Merge branch 'main' of github.com:mir-protocol/plonky2 into non-inv

This commit is contained in:
Dmitry Vagner 2022-12-12 11:06:16 -08:00
commit 2e2007eede
91 changed files with 1881 additions and 976 deletions

View File

@ -46,6 +46,7 @@ jobs:
env:
RUSTFLAGS: -Copt-level=3 -Cdebug-assertions -Coverflow-checks=y -Cdebuginfo=0 -Cprefer-dynamic=y
CARGO_INCREMENTAL: 1
RUST_BACKTRACE: 1
lints:
name: Formatting and Clippy

View File

@ -8,7 +8,7 @@ use plonky2::field::types::{Field, PrimeField};
use plonky2::hash::hash_types::RichField;
use plonky2::iop::generator::{GeneratedValues, SimpleGenerator};
use plonky2::iop::target::{BoolTarget, Target};
use plonky2::iop::witness::PartitionWitness;
use plonky2::iop::witness::{PartitionWitness, WitnessWrite};
use plonky2::plonk::circuit_builder::CircuitBuilder;
use crate::curve::glv::{decompose_secp256k1_scalar, GLV_BETA, GLV_S};

View File

@ -8,7 +8,7 @@ use plonky2::field::types::{Field, PrimeField};
use plonky2::hash::hash_types::RichField;
use plonky2::iop::generator::{GeneratedValues, SimpleGenerator};
use plonky2::iop::target::{BoolTarget, Target};
use plonky2::iop::witness::PartitionWitness;
use plonky2::iop::witness::{PartitionWitness, WitnessWrite};
use plonky2::plonk::circuit_builder::CircuitBuilder;
use plonky2::util::ceil_div_usize;
use plonky2_u32::gadgets::arithmetic_u32::{CircuitBuilderU32, U32Target};

View File

@ -76,6 +76,18 @@ pub enum Table {
pub(crate) const NUM_TABLES: usize = Table::Memory as usize + 1;
impl Table {
pub(crate) fn all() -> [Self; NUM_TABLES] {
[
Self::Cpu,
Self::Keccak,
Self::KeccakSponge,
Self::Logic,
Self::Memory,
]
}
}
pub(crate) fn all_cross_table_lookups<F: Field>() -> Vec<CrossTableLookup<F>> {
let mut ctls = vec![ctl_keccak(), ctl_logic(), ctl_memory(), ctl_keccak_sponge()];
// TODO: Some CTLs temporarily disabled while we get them working.

View File

@ -97,46 +97,10 @@ pub(crate) struct CpuLogicView<T: Copy> {
#[derive(Copy, Clone)]
pub(crate) struct CpuJumpsView<T: Copy> {
/// `input0` is `mem_channel[0].value`. It's the top stack value at entry (for jumps, the
/// address; for `EXIT_KERNEL`, the address and new privilege level).
/// `input1` is `mem_channel[1].value`. For `JUMPI`, it's the second stack value (the
/// predicate). For `JUMP`, 1.
/// Inverse of `input0[1] + ... + input0[7]`, if one exists; otherwise, an arbitrary value.
/// Needed to prove that `input0` is nonzero.
pub(crate) input0_upper_sum_inv: T,
/// 1 if `input0[1..7]` is zero; else 0.
pub(crate) input0_upper_zero: T,
/// 1 if `input0[0]` is the address of a valid jump destination (i.e. `JUMPDEST` that is not
/// part of a `PUSH` immediate); else 0. Note that the kernel is allowed to jump anywhere it
/// wants, so this flag is computed but ignored in kernel mode.
/// NOTE: this flag only considers `input0[0]`, the low 32 bits of the 256-bit register. Even if
/// this flag is 1, `input0` will still be an invalid address if the high 224 bits are not 0.
pub(crate) dst_valid: T, // TODO: populate this (check for JUMPDEST)
/// 1 if either `dst_valid` is 1 or we are in kernel mode; else 0. (Just a logical OR.)
pub(crate) dst_valid_or_kernel: T,
/// 1 if `dst_valid_or_kernel` and `input0_upper_zero` are both 1; else 0. In other words, we
/// are allowed to jump to `input0[0]` because either it's a valid address or we're in kernel
/// mode (`dst_valid_or_kernel`), and also `input0[1..7]` are all 0 so `input0[0]` is in fact
/// the whole address (we're not being asked to jump to an address that would overflow).
pub(crate) input0_jumpable: T,
/// Inverse of `input1[0] + ... + input1[7]`, if one exists; otherwise, an arbitrary value.
/// Needed to prove that `input1` is nonzero.
pub(crate) input1_sum_inv: T,
/// Note that the below flags are mutually exclusive.
/// 1 if the JUMPI falls though (because input1 is 0); else 0.
pub(crate) should_continue: T,
/// 1 if the JUMP/JUMPI does in fact jump to `input0`; else 0. This requires `input0` to be a
/// valid destination (`input0[0]` is a `JUMPDEST` not in an immediate or we are in kernel mode
/// and also `input0[1..7]` is 0) and `input1` to be nonzero.
// A flag.
pub(crate) should_jump: T,
/// 1 if the JUMP/JUMPI faults; else 0. This happens when `input0` is not a valid destination
/// (`input0[0]` is not `JUMPDEST` that is not in an immediate while we are in user mode, or
/// `input0[1..7]` is nonzero) and `input1` is nonzero.
pub(crate) should_trap: T,
// Pseudoinverse of `cond.iter().sum()`. Used to check `should_jump`.
pub(crate) cond_sum_pinv: T,
}
#[derive(Copy, Clone)]

82
evm/src/cpu/contextops.rs Normal file
View File

@ -0,0 +1,82 @@
use plonky2::field::extension::Extendable;
use plonky2::field::packed::PackedField;
use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::plonk::circuit_builder::CircuitBuilder;
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::cpu::columns::CpuColumnsView;
use crate::cpu::membus::NUM_GP_CHANNELS;
fn eval_packed_get<P: PackedField>(
lv: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
) {
let filter = lv.op.get_context;
let push_channel = lv.mem_channels[NUM_GP_CHANNELS - 1];
yield_constr.constraint(filter * (push_channel.value[0] - lv.context));
for &limb in &push_channel.value[1..] {
yield_constr.constraint(filter * limb);
}
}
fn eval_ext_circuit_get<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
lv: &CpuColumnsView<ExtensionTarget<D>>,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
let filter = lv.op.get_context;
let push_channel = lv.mem_channels[NUM_GP_CHANNELS - 1];
{
let diff = builder.sub_extension(push_channel.value[0], lv.context);
let constr = builder.mul_extension(filter, diff);
yield_constr.constraint(builder, constr);
}
for &limb in &push_channel.value[1..] {
let constr = builder.mul_extension(filter, limb);
yield_constr.constraint(builder, constr);
}
}
fn eval_packed_set<P: PackedField>(
lv: &CpuColumnsView<P>,
nv: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
) {
let filter = lv.op.set_context;
let pop_channel = lv.mem_channels[0];
yield_constr.constraint_transition(filter * (pop_channel.value[0] - nv.context));
}
fn eval_ext_circuit_set<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
lv: &CpuColumnsView<ExtensionTarget<D>>,
nv: &CpuColumnsView<ExtensionTarget<D>>,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
let filter = lv.op.set_context;
let pop_channel = lv.mem_channels[0];
let diff = builder.sub_extension(pop_channel.value[0], nv.context);
let constr = builder.mul_extension(filter, diff);
yield_constr.constraint_transition(builder, constr);
}
pub fn eval_packed<P: PackedField>(
lv: &CpuColumnsView<P>,
nv: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
) {
eval_packed_get(lv, yield_constr);
eval_packed_set(lv, nv, yield_constr);
}
pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
lv: &CpuColumnsView<ExtensionTarget<D>>,
nv: &CpuColumnsView<ExtensionTarget<D>>,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
eval_ext_circuit_get(builder, lv, yield_constr);
eval_ext_circuit_set(builder, lv, nv, yield_constr);
}

View File

@ -11,8 +11,8 @@ use plonky2::hash::hash_types::RichField;
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::cpu::columns::{CpuColumnsView, COL_MAP, NUM_CPU_COLUMNS};
use crate::cpu::{
bootstrap_kernel, control_flow, decode, dup_swap, jumps, membus, modfp254, shift, simple_logic,
stack, stack_bounds, syscalls,
bootstrap_kernel, contextops, control_flow, decode, dup_swap, jumps, membus, memio, modfp254,
pc, shift, simple_logic, stack, stack_bounds, syscalls,
};
use crate::cross_table_lookup::Column;
use crate::memory::segments::Segment;
@ -141,15 +141,18 @@ impl<F: RichField + Extendable<D>, const D: usize> Stark<F, D> for CpuStark<F, D
// TODO: Some failing constraints temporarily disabled by using this dummy consumer.
let mut dummy_yield_constr = ConstraintConsumer::new(vec![], P::ZEROS, P::ZEROS, P::ZEROS);
bootstrap_kernel::eval_bootstrap_kernel(vars, yield_constr);
contextops::eval_packed(local_values, next_values, yield_constr);
control_flow::eval_packed_generic(local_values, next_values, yield_constr);
decode::eval_packed_generic(local_values, yield_constr);
decode::eval_packed_generic(local_values, &mut dummy_yield_constr);
dup_swap::eval_packed(local_values, yield_constr);
jumps::eval_packed(local_values, next_values, &mut dummy_yield_constr);
jumps::eval_packed(local_values, next_values, yield_constr);
membus::eval_packed(local_values, yield_constr);
memio::eval_packed(local_values, yield_constr);
modfp254::eval_packed(local_values, yield_constr);
pc::eval_packed(local_values, yield_constr);
shift::eval_packed(local_values, yield_constr);
simple_logic::eval_packed(local_values, yield_constr);
stack::eval_packed(local_values, yield_constr);
stack::eval_packed(local_values, &mut dummy_yield_constr);
stack_bounds::eval_packed(local_values, &mut dummy_yield_constr);
syscalls::eval_packed(local_values, next_values, yield_constr);
}
@ -167,15 +170,18 @@ impl<F: RichField + Extendable<D>, const D: usize> Stark<F, D> for CpuStark<F, D
let mut dummy_yield_constr =
RecursiveConstraintConsumer::new(zero, vec![], zero, zero, zero);
bootstrap_kernel::eval_bootstrap_kernel_circuit(builder, vars, yield_constr);
contextops::eval_ext_circuit(builder, local_values, next_values, yield_constr);
control_flow::eval_ext_circuit(builder, local_values, next_values, yield_constr);
decode::eval_ext_circuit(builder, local_values, yield_constr);
decode::eval_ext_circuit(builder, local_values, &mut dummy_yield_constr);
dup_swap::eval_ext_circuit(builder, local_values, yield_constr);
jumps::eval_ext_circuit(builder, local_values, next_values, &mut dummy_yield_constr);
jumps::eval_ext_circuit(builder, local_values, next_values, yield_constr);
membus::eval_ext_circuit(builder, local_values, yield_constr);
memio::eval_ext_circuit(builder, local_values, yield_constr);
modfp254::eval_ext_circuit(builder, local_values, yield_constr);
pc::eval_ext_circuit(builder, local_values, yield_constr);
shift::eval_ext_circuit(builder, local_values, yield_constr);
simple_logic::eval_ext_circuit(builder, local_values, yield_constr);
stack::eval_ext_circuit(builder, local_values, yield_constr);
stack::eval_ext_circuit(builder, local_values, &mut dummy_yield_constr);
stack_bounds::eval_ext_circuit(builder, local_values, &mut dummy_yield_constr);
syscalls::eval_ext_circuit(builder, local_values, next_values, yield_constr);
}

View File

@ -1,4 +1,3 @@
use once_cell::sync::Lazy;
use plonky2::field::extension::Extendable;
use plonky2::field::packed::PackedField;
use plonky2::field::types::Field;
@ -7,10 +6,8 @@ use plonky2::iop::ext_target::ExtensionTarget;
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::cpu::columns::CpuColumnsView;
use crate::cpu::kernel::aggregator::KERNEL;
static INVALID_DST_HANDLER_ADDR: Lazy<usize> =
Lazy::new(|| KERNEL.global_labels["fault_exception"]);
use crate::cpu::membus::NUM_GP_CHANNELS;
use crate::memory::segments::Segment;
pub fn eval_packed_exit_kernel<P: PackedField>(
lv: &CpuColumnsView<P>,
@ -58,99 +55,65 @@ pub fn eval_packed_jump_jumpi<P: PackedField>(
yield_constr: &mut ConstraintConsumer<P>,
) {
let jumps_lv = lv.general.jumps();
let input0 = lv.mem_channels[0].value;
let input1 = lv.mem_channels[1].value;
let dst = lv.mem_channels[0].value;
let cond = lv.mem_channels[1].value;
let filter = lv.op.jump + lv.op.jumpi; // `JUMP` or `JUMPI`
let jumpdest_flag_channel = lv.mem_channels[NUM_GP_CHANNELS - 1];
// If `JUMP`, re-use the `JUMPI` logic, but setting the second input (the predicate) to be 1.
// In other words, we implement `JUMP(addr)` as `JUMPI(addr, cond=1)`.
yield_constr.constraint(lv.op.jump * (input1[0] - P::ONES));
for &limb in &input1[1..] {
// In other words, we implement `JUMP(dst)` as `JUMPI(dst, cond=1)`.
yield_constr.constraint(lv.op.jump * (cond[0] - P::ONES));
for &limb in &cond[1..] {
// Set all limbs (other than the least-significant limb) to 0.
// NB: Technically, they don't have to be 0, as long as the sum
// `input1[0] + ... + input1[7]` cannot overflow.
// `cond[0] + ... + cond[7]` cannot overflow.
yield_constr.constraint(lv.op.jump * limb);
}
// Check `input0_upper_zero`
// `input0_upper_zero` is either 0 or 1.
yield_constr
.constraint(filter * jumps_lv.input0_upper_zero * (jumps_lv.input0_upper_zero - P::ONES));
// The below sum cannot overflow due to the limb size.
let input0_upper_sum: P = input0[1..].iter().copied().sum();
// `input0_upper_zero` = 1 implies `input0_upper_sum` = 0.
yield_constr.constraint(filter * jumps_lv.input0_upper_zero * input0_upper_sum);
// `input0_upper_zero` = 0 implies `input0_upper_sum_inv * input0_upper_sum` = 1, which can only
// happen when `input0_upper_sum` is nonzero.
yield_constr.constraint(
filter
* (jumps_lv.input0_upper_sum_inv * input0_upper_sum + jumps_lv.input0_upper_zero
- P::ONES),
);
// Check `dst_valid_or_kernel` (this is just a logical OR)
yield_constr.constraint(
filter
* (jumps_lv.dst_valid + lv.is_kernel_mode
- jumps_lv.dst_valid * lv.is_kernel_mode
- jumps_lv.dst_valid_or_kernel),
);
// Check `input0_jumpable` (this is just `dst_valid_or_kernel` AND `input0_upper_zero`)
yield_constr.constraint(
filter
* (jumps_lv.dst_valid_or_kernel * jumps_lv.input0_upper_zero
- jumps_lv.input0_jumpable),
);
// Make sure that `should_continue`, `should_jump`, `should_trap` are all binary and exactly one
// is set.
yield_constr
.constraint(filter * jumps_lv.should_continue * (jumps_lv.should_continue - P::ONES));
// Check `should_jump`:
yield_constr.constraint(filter * jumps_lv.should_jump * (jumps_lv.should_jump - P::ONES));
yield_constr.constraint(filter * jumps_lv.should_trap * (jumps_lv.should_trap - P::ONES));
let cond_sum: P = cond.into_iter().sum();
yield_constr.constraint(filter * (jumps_lv.should_jump - P::ONES) * cond_sum);
yield_constr.constraint(filter * (jumps_lv.cond_sum_pinv * cond_sum - jumps_lv.should_jump));
// If we're jumping, then the high 7 limbs of the destination must be 0.
let dst_hi_sum: P = dst[1..].iter().copied().sum();
yield_constr.constraint(filter * jumps_lv.should_jump * dst_hi_sum);
// Check that the destination address holds a `JUMPDEST` instruction. Note that this constraint
// does not need to be conditioned on `should_jump` because no read takes place if we're not
// jumping, so we're free to set the channel to 1.
yield_constr.constraint(filter * (jumpdest_flag_channel.value[0] - P::ONES));
// Make sure that the JUMPDEST flag channel is constrained.
// Only need to read if we're about to jump and we're not in kernel mode.
yield_constr.constraint(
filter * (jumps_lv.should_continue + jumps_lv.should_jump + jumps_lv.should_trap - P::ONES),
);
// Validate `should_continue`
// This sum cannot overflow (due to limb size).
let input1_sum: P = input1.into_iter().sum();
// `should_continue` = 1 implies `input1_sum` = 0.
yield_constr.constraint(filter * jumps_lv.should_continue * input1_sum);
// `should_continue` = 0 implies `input1_sum * input1_sum_inv` = 1, which can only happen if
// input1_sum is nonzero.
yield_constr.constraint(
filter * (input1_sum * jumps_lv.input1_sum_inv + jumps_lv.should_continue - P::ONES),
);
// Validate `should_jump` and `should_trap` by splitting on `input0_jumpable`.
// Note that `should_jump` = 1 and `should_trap` = 1 both imply that `should_continue` = 0, so
// `input1` is nonzero.
yield_constr.constraint(filter * jumps_lv.should_jump * (jumps_lv.input0_jumpable - P::ONES));
yield_constr.constraint(filter * jumps_lv.should_trap * jumps_lv.input0_jumpable);
// Handle trap
// Set program counter and kernel flag
yield_constr
.constraint_transition(filter * jumps_lv.should_trap * (nv.is_kernel_mode - P::ONES));
yield_constr.constraint_transition(
filter
* jumps_lv.should_trap
* (nv.program_counter - P::Scalar::from_canonical_usize(*INVALID_DST_HANDLER_ADDR)),
* (jumpdest_flag_channel.used - jumps_lv.should_jump * (P::ONES - lv.is_kernel_mode)),
);
yield_constr.constraint(filter * (jumpdest_flag_channel.is_read - P::ONES));
yield_constr.constraint(filter * (jumpdest_flag_channel.addr_context - lv.context));
yield_constr.constraint(
filter
* (jumpdest_flag_channel.addr_segment
- P::Scalar::from_canonical_u64(Segment::JumpdestBits as u64)),
);
yield_constr.constraint(filter * (jumpdest_flag_channel.addr_virtual - dst[0]));
// Handle continue and jump
let continue_or_jump = jumps_lv.should_continue + jumps_lv.should_jump;
// Keep kernel mode.
yield_constr
.constraint_transition(filter * continue_or_jump * (nv.is_kernel_mode - lv.is_kernel_mode));
// Set program counter depending on whether we're continuing or jumping.
// Disable unused memory channels
for &channel in &lv.mem_channels[2..NUM_GP_CHANNELS - 1] {
yield_constr.constraint(filter * channel.used);
}
// Channel 1 is unused by the `JUMP` instruction.
yield_constr.constraint(lv.op.jump * lv.mem_channels[1].used);
// Finally, set the next program counter.
let fallthrough_dst = lv.program_counter + P::ONES;
let jump_dest = dst[0];
yield_constr.constraint_transition(
filter * jumps_lv.should_continue * (nv.program_counter - lv.program_counter - P::ONES),
filter * (jumps_lv.should_jump - P::ONES) * (nv.program_counter - fallthrough_dst),
);
yield_constr
.constraint_transition(filter * jumps_lv.should_jump * (nv.program_counter - input0[0]));
.constraint_transition(filter * jumps_lv.should_jump * (nv.program_counter - jump_dest));
}
pub fn eval_ext_circuit_jump_jumpi<F: RichField + Extendable<D>, const D: usize>(
@ -160,178 +123,124 @@ pub fn eval_ext_circuit_jump_jumpi<F: RichField + Extendable<D>, const D: usize>
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
let jumps_lv = lv.general.jumps();
let input0 = lv.mem_channels[0].value;
let input1 = lv.mem_channels[1].value;
let dst = lv.mem_channels[0].value;
let cond = lv.mem_channels[1].value;
let filter = builder.add_extension(lv.op.jump, lv.op.jumpi); // `JUMP` or `JUMPI`
let jumpdest_flag_channel = lv.mem_channels[NUM_GP_CHANNELS - 1];
// If `JUMP`, re-use the `JUMPI` logic, but setting the second input (the predicate) to be 1.
// In other words, we implement `JUMP(addr)` as `JUMPI(addr, cond=1)`.
// In other words, we implement `JUMP(dst)` as `JUMPI(dst, cond=1)`.
{
let constr = builder.mul_sub_extension(lv.op.jump, input1[0], lv.op.jump);
let constr = builder.mul_sub_extension(lv.op.jump, cond[0], lv.op.jump);
yield_constr.constraint(builder, constr);
}
for &limb in &input1[1..] {
for &limb in &cond[1..] {
// Set all limbs (other than the least-significant limb) to 0.
// NB: Technically, they don't have to be 0, as long as the sum
// `input1[0] + ... + input1[7]` cannot overflow.
// `cond[0] + ... + cond[7]` cannot overflow.
let constr = builder.mul_extension(lv.op.jump, limb);
yield_constr.constraint(builder, constr);
}
// Check `input0_upper_zero`
// `input0_upper_zero` is either 0 or 1.
// Check `should_jump`:
{
let constr = builder.mul_sub_extension(
jumps_lv.input0_upper_zero,
jumps_lv.input0_upper_zero,
jumps_lv.input0_upper_zero,
jumps_lv.should_jump,
jumps_lv.should_jump,
jumps_lv.should_jump,
);
let constr = builder.mul_extension(filter, constr);
yield_constr.constraint(builder, constr);
}
let cond_sum = builder.add_many_extension(cond);
{
// The below sum cannot overflow due to the limb size.
let input0_upper_sum = builder.add_many_extension(input0[1..].iter());
// `input0_upper_zero` = 1 implies `input0_upper_sum` = 0.
let constr = builder.mul_extension(jumps_lv.input0_upper_zero, input0_upper_sum);
let constr = builder.mul_sub_extension(cond_sum, jumps_lv.should_jump, cond_sum);
let constr = builder.mul_extension(filter, constr);
yield_constr.constraint(builder, constr);
// `input0_upper_zero` = 0 implies `input0_upper_sum_inv * input0_upper_sum` = 1, which can
// only happen when `input0_upper_sum` is nonzero.
let constr = builder.mul_add_extension(
jumps_lv.input0_upper_sum_inv,
input0_upper_sum,
jumps_lv.input0_upper_zero,
);
let constr = builder.mul_sub_extension(filter, constr, filter);
yield_constr.constraint(builder, constr);
};
// Check `dst_valid_or_kernel` (this is just a logical OR)
}
{
let constr = builder.mul_add_extension(
jumps_lv.dst_valid,
let constr =
builder.mul_sub_extension(jumps_lv.cond_sum_pinv, cond_sum, jumps_lv.should_jump);
let constr = builder.mul_extension(filter, constr);
yield_constr.constraint(builder, constr);
}
// If we're jumping, then the high 7 limbs of the destination must be 0.
let dst_hi_sum = builder.add_many_extension(&dst[1..]);
{
let constr = builder.mul_extension(jumps_lv.should_jump, dst_hi_sum);
let constr = builder.mul_extension(filter, constr);
yield_constr.constraint(builder, constr);
}
// Check that the destination address holds a `JUMPDEST` instruction. Note that this constraint
// does not need to be conditioned on `should_jump` because no read takes place if we're not
// jumping, so we're free to set the channel to 1.
{
let constr = builder.mul_sub_extension(filter, jumpdest_flag_channel.value[0], filter);
yield_constr.constraint(builder, constr);
}
// Make sure that the JUMPDEST flag channel is constrained.
// Only need to read if we're about to jump and we're not in kernel mode.
{
let constr = builder.mul_sub_extension(
jumps_lv.should_jump,
lv.is_kernel_mode,
jumps_lv.dst_valid_or_kernel,
);
let constr = builder.sub_extension(jumps_lv.dst_valid, constr);
let constr = builder.add_extension(lv.is_kernel_mode, constr);
let constr = builder.mul_extension(filter, constr);
yield_constr.constraint(builder, constr);
}
// Check `input0_jumpable` (this is just `dst_valid_or_kernel` AND `input0_upper_zero`)
{
let constr = builder.mul_sub_extension(
jumps_lv.dst_valid_or_kernel,
jumps_lv.input0_upper_zero,
jumps_lv.input0_jumpable,
);
let constr = builder.mul_extension(filter, constr);
yield_constr.constraint(builder, constr);
}
// Make sure that `should_continue`, `should_jump`, `should_trap` are all binary and exactly one
// is set.
for flag in [
jumps_lv.should_continue,
jumps_lv.should_jump,
jumps_lv.should_trap,
] {
let constr = builder.mul_sub_extension(flag, flag, flag);
let constr = builder.mul_extension(filter, constr);
yield_constr.constraint(builder, constr);
}
{
let constr = builder.add_extension(jumps_lv.should_continue, jumps_lv.should_jump);
let constr = builder.add_extension(constr, jumps_lv.should_trap);
let constr = builder.mul_sub_extension(filter, constr, filter);
yield_constr.constraint(builder, constr);
}
// Validate `should_continue`
{
// This sum cannot overflow (due to limb size).
let input1_sum = builder.add_many_extension(input1.into_iter());
// `should_continue` = 1 implies `input1_sum` = 0.
let constr = builder.mul_extension(jumps_lv.should_continue, input1_sum);
let constr = builder.mul_extension(filter, constr);
yield_constr.constraint(builder, constr);
// `should_continue` = 0 implies `input1_sum * input1_sum_inv` = 1, which can only happen if
// input1_sum is nonzero.
let constr = builder.mul_add_extension(
input1_sum,
jumps_lv.input1_sum_inv,
jumps_lv.should_continue,
);
let constr = builder.mul_sub_extension(filter, constr, filter);
yield_constr.constraint(builder, constr);
}
// Validate `should_jump` and `should_trap` by splitting on `input0_jumpable`.
// Note that `should_jump` = 1 and `should_trap` = 1 both imply that `should_continue` = 0, so
// `input1` is nonzero.
{
let constr = builder.mul_sub_extension(
jumps_lv.should_jump,
jumps_lv.input0_jumpable,
jumps_lv.should_jump,
);
let constr = builder.add_extension(jumpdest_flag_channel.used, constr);
let constr = builder.mul_extension(filter, constr);
yield_constr.constraint(builder, constr);
}
{
let constr = builder.mul_extension(jumps_lv.should_trap, jumps_lv.input0_jumpable);
let constr = builder.mul_sub_extension(filter, jumpdest_flag_channel.is_read, filter);
yield_constr.constraint(builder, constr);
}
{
let constr = builder.sub_extension(jumpdest_flag_channel.addr_context, lv.context);
let constr = builder.mul_extension(filter, constr);
yield_constr.constraint(builder, constr);
}
// Handle trap
{
let trap_filter = builder.mul_extension(filter, jumps_lv.should_trap);
// Set kernel flag
let constr = builder.mul_sub_extension(trap_filter, nv.is_kernel_mode, trap_filter);
yield_constr.constraint_transition(builder, constr);
// Set program counter
let constr = builder.arithmetic_extension(
F::ONE,
-F::from_canonical_usize(*INVALID_DST_HANDLER_ADDR),
trap_filter,
nv.program_counter,
trap_filter,
-F::from_canonical_u64(Segment::JumpdestBits as u64),
filter,
jumpdest_flag_channel.addr_segment,
filter,
);
yield_constr.constraint_transition(builder, constr);
yield_constr.constraint(builder, constr);
}
{
let constr = builder.sub_extension(jumpdest_flag_channel.addr_virtual, dst[0]);
let constr = builder.mul_extension(filter, constr);
yield_constr.constraint(builder, constr);
}
// Handle continue and jump
// Disable unused memory channels
for &channel in &lv.mem_channels[2..NUM_GP_CHANNELS - 1] {
let constr = builder.mul_extension(filter, channel.used);
yield_constr.constraint(builder, constr);
}
// Channel 1 is unused by the `JUMP` instruction.
{
// Keep kernel mode.
let continue_or_jump =
builder.add_extension(jumps_lv.should_continue, jumps_lv.should_jump);
let constr = builder.sub_extension(nv.is_kernel_mode, lv.is_kernel_mode);
let constr = builder.mul_extension(continue_or_jump, constr);
let constr = builder.mul_extension(filter, constr);
let constr = builder.mul_extension(lv.op.jump, lv.mem_channels[1].used);
yield_constr.constraint(builder, constr);
}
// Finally, set the next program counter.
let fallthrough_dst = builder.add_const_extension(lv.program_counter, F::ONE);
let jump_dest = dst[0];
{
let constr_a = builder.mul_sub_extension(filter, jumps_lv.should_jump, filter);
let constr_b = builder.sub_extension(nv.program_counter, fallthrough_dst);
let constr = builder.mul_extension(constr_a, constr_b);
yield_constr.constraint_transition(builder, constr);
}
// Set program counter depending on whether we're continuing...
{
let constr = builder.sub_extension(nv.program_counter, lv.program_counter);
let constr =
builder.mul_sub_extension(jumps_lv.should_continue, constr, jumps_lv.should_continue);
let constr = builder.mul_extension(filter, constr);
yield_constr.constraint_transition(builder, constr);
}
// ...or jumping.
{
let constr = builder.sub_extension(nv.program_counter, input0[0]);
let constr = builder.mul_extension(jumps_lv.should_jump, constr);
let constr = builder.mul_extension(filter, constr);
let constr_a = builder.mul_extension(filter, jumps_lv.should_jump);
let constr_b = builder.sub_extension(nv.program_counter, jump_dest);
let constr = builder.mul_extension(constr_a, constr_b);
yield_constr.constraint_transition(builder, constr);
}
}

View File

@ -48,6 +48,7 @@ pub(crate) fn combined_kernel() -> Kernel {
include_str!("asm/memory/memcpy.asm"),
include_str!("asm/memory/metadata.asm"),
include_str!("asm/memory/packing.asm"),
include_str!("asm/memory/syscalls.asm"),
include_str!("asm/memory/txn_fields.asm"),
include_str!("asm/mpt/accounts.asm"),
include_str!("asm/mpt/delete/delete.asm"),
@ -71,8 +72,9 @@ pub(crate) fn combined_kernel() -> Kernel {
include_str!("asm/ripemd/main.asm"),
include_str!("asm/ripemd/memory.asm"),
include_str!("asm/ripemd/update.asm"),
include_str!("asm/rlp/encode.asm"),
include_str!("asm/rlp/decode.asm"),
include_str!("asm/rlp/encode.asm"),
include_str!("asm/rlp/encode_rlp_string.asm"),
include_str!("asm/rlp/num_bytes.asm"),
include_str!("asm/rlp/read_to_memory.asm"),
include_str!("asm/sha2/compression.asm"),

View File

@ -4,7 +4,7 @@ global balance:
// stack: account_ptr, retdest
DUP1 ISZERO %jumpi(retzero) // If the account pointer is null, return 0.
%add_const(1)
// stack: balance_ptr
// stack: balance_ptr, retdest
%mload_trie_data
// stack: balance, retdest
SWAP1 JUMP

View File

@ -45,7 +45,9 @@ global delegate_call:
-> (0, 0, value, sender, self, address, gas)
%jump(call_common)
call_common:
// Pre stack: static, should_transfer_value, value, sender, address, code_addr, gas, args_offset, args_size, ret_offset, ret_size, retdest
// Post stack: success, leftover_gas
global call_common:
// stack: static, should_transfer_value, value, sender, address, code_addr, gas, args_offset, args_size, ret_offset, ret_size, retdest
%create_context
// Store the static flag in metadata.
@ -108,3 +110,4 @@ after_call:
// stack: new_ctx, ret_offset, ret_size, retdest
// TODO: Set RETURNDATA.
// TODO: Return to caller w/ EXIT_KERNEL.
// TODO: Return leftover gas

View File

@ -5,6 +5,7 @@
global get_nonce:
// stack: address, retdest
// TODO: Replace with actual implementation.
POP
JUMP
// Convenience macro to call get_nonce and return where we left off.

View File

@ -3,39 +3,58 @@
// TODO: Save checkpoints in @CTX_METADATA_STATE_TRIE_CHECKPOINT_PTR and @SEGMENT_STORAGE_TRIE_CHECKPOINT_PTRS.
// Pre stack: retdest
// Post stack: (empty)
global process_normalized_txn:
// stack: (empty)
// stack: retdest
PUSH validate
%jump(intrinsic_gas)
validate:
// stack: intrinsic_gas
// TODO: Check gas >= intrinsic_gas.
// TODO: Check sender_balance >= intrinsic_gas + value.
global validate:
// stack: intrinsic_gas, retdest
// TODO: Check signature? (Or might happen in type_0.asm etc.)
// TODO: Assert nonce is correct.
// TODO: Assert sender has no code.
POP // TODO: Assert gas_limit >= intrinsic_gas.
// stack: retdest
buy_gas:
// TODO: Deduct gas from sender (some may be refunded later).
global charge_gas:
// TODO: Deduct gas limit from sender (some gas may be refunded later).
increment_nonce:
// TODO: Increment nonce.
PUSH 0 // TODO: Push sender.
%increment_nonce
process_based_on_type:
global process_based_on_type:
%is_contract_creation
%jumpi(process_contract_creation_txn)
%jump(process_message_txn)
process_contract_creation_txn:
// stack: (empty)
global process_contract_creation_txn:
// stack: retdest
// Push the code address & length onto the stack, then call `create`.
%mload_txn_field(@TXN_FIELD_DATA_LEN)
// stack: code_len
// stack: code_len, retdest
PUSH 0
// stack: code_offset, code_len
// stack: code_offset, code_len, retdest
PUSH @SEGMENT_TXN_DATA
// stack: code_segment, code_offset, code_len
// stack: code_segment, code_offset, code_len, retdest
PUSH 0 // context
// stack: CODE_ADDR, code_len
// stack: CODE_ADDR, code_len, retdest
%jump(create)
process_message_txn:
// TODO
global process_message_txn:
// stack: retdest
%mload_txn_field(@TXN_FIELD_VALUE)
%mload_txn_field(@TXN_FIELD_TO)
%mload_txn_field(@TXN_FIELD_ORIGIN)
// stack: from, to, amount, retdest
%transfer_eth
// stack: transfer_eth_status, retdest
%jumpi(process_message_txn_insufficient_balance)
// stack: retdest
// TODO: If code is non-empty, execute it in a new context.
JUMP
global process_message_txn_insufficient_balance:
// stack: retdest
PANIC // TODO

View File

@ -7,7 +7,6 @@ global sys_signextend:
global sys_slt:
global sys_sgt:
global sys_sar:
global sys_keccak256:
global sys_address:
global sys_balance:
global sys_origin:
@ -33,9 +32,6 @@ global sys_gaslimit:
global sys_chainid:
global sys_selfbalance:
global sys_basefee:
global sys_mload:
global sys_mstore:
global sys_mstore8:
global sys_sload:
global sys_sstore:
global sys_msize:

View File

@ -1,15 +1,20 @@
// Transfers some ETH from one address to another. The amount is given in wei.
// Pre stack: from, to, amount, retdest
// Post stack: (empty)
// Post stack: status (0 indicates success)
global transfer_eth:
// stack: from, to, amount, retdest
%stack (from, to, amount, retdest)
-> (from, amount, to, amount)
-> (from, amount, to, amount, retdest)
%deduct_eth
// TODO: Handle exception from %deduct_eth?
// stack: deduct_eth_status, to, amount, retdest
%jumpi(transfer_eth_failure)
// stack: to, amount, retdest
%add_eth
// stack: retdest
global transfer_eth_3:
%stack (retdest) -> (retdest, 0)
JUMP
global transfer_eth_failure:
%stack (to, amount, retdest) -> (retdest, 1)
JUMP
// Convenience macro to call transfer_eth and return where we left off.
@ -31,11 +36,31 @@ global transfer_eth:
%%after:
%endmacro
// Returns 0 on success, or 1 if addr has insufficient balance. Panics if addr isn't found in the trie.
// Pre stack: addr, amount, retdest
// Post stack: status (0 indicates success)
global deduct_eth:
// stack: addr, amount, retdest
%jump(mpt_read_state_trie)
deduct_eth_after_read:
PANIC // TODO
%mpt_read_state_trie
// stack: account_ptr, amount, retdest
DUP1 ISZERO %jumpi(panic) // If the account pointer is null, return 0.
%add_const(1)
// stack: balance_ptr, amount, retdest
DUP1 %mload_trie_data
// stack: balance, balance_ptr, amount, retdest
DUP1 DUP4 GT
// stack: amount > balance, balance, balance_ptr, amount, retdest
%jumpi(deduct_eth_insufficient_balance)
%stack (balance, balance_ptr, amount, retdest) -> (balance, amount, balance_ptr, retdest, 0)
SUB
SWAP1
// stack: balance_ptr, balance - amount, retdest, 0
%mstore_trie_data
// stack: retdest, 0
JUMP
global deduct_eth_insufficient_balance:
%stack (balance, balance_ptr, amount, retdest) -> (retdest, 1)
JUMP
// Convenience macro to call deduct_eth and return where we left off.
%macro deduct_eth
@ -44,8 +69,40 @@ deduct_eth_after_read:
%%after:
%endmacro
// Pre stack: addr, amount, redest
// Post stack: (empty)
global add_eth:
PANIC // TODO
// stack: addr, amount, retdest
DUP1 %mpt_read_state_trie
// stack: account_ptr, addr, amount, retdest
DUP1 ISZERO %jumpi(add_eth_new_account) // If the account pointer is null, we need to create the account.
%add_const(1)
// stack: balance_ptr, addr, amount, retdest
DUP1 %mload_trie_data
// stack: balance, balance_ptr, addr, amount, retdest
%stack (balance, balance_ptr, addr, amount) -> (amount, balance, addr, balance_ptr)
ADD
// stack: new_balance, addr, balance_ptr, retdest
SWAP1 %mstore_trie_data
// stack: addr, retdest
POP JUMP
global add_eth_new_account:
// TODO: Skip creation if amount == 0?
// stack: null_account_ptr, addr, amount, retdest
POP
%get_trie_data_size // pointer to new account we're about to create
// stack: new_account_ptr, addr, amount, retdest
SWAP2
// stack: amount, addr, new_account_ptr, retdest
PUSH 0 %append_to_trie_data // nonce
%append_to_trie_data // balance
// stack: addr, new_account_ptr, retdest
PUSH 0 %append_to_trie_data // storage root pointer
PUSH @EMPTY_STRING_HASH %append_to_trie_data // code hash
// stack: addr, new_account_ptr, retdest
%addr_to_state_key
// stack: key, new_account_ptr, retdest
%jump(mpt_insert_state_trie)
// Convenience macro to call add_eth and return where we left off.
%macro add_eth

View File

@ -11,7 +11,7 @@ hash_initial_tries:
%mpt_hash_txn_trie %mstore_global_metadata(@GLOBAL_METADATA_TXN_TRIE_DIGEST_BEFORE)
%mpt_hash_receipt_trie %mstore_global_metadata(@GLOBAL_METADATA_RECEIPT_TRIE_DIGEST_BEFORE)
txn_loop:
global txn_loop:
// If the prover has no more txns for us to process, halt.
PROVER_INPUT(end_of_txns)
%jumpi(hash_final_tries)
@ -20,7 +20,7 @@ txn_loop:
PUSH txn_loop
%jump(route_txn)
hash_final_tries:
global hash_final_tries:
%mpt_hash_state_trie %mstore_global_metadata(@GLOBAL_METADATA_STATE_TRIE_DIGEST_AFTER)
%mpt_hash_txn_trie %mstore_global_metadata(@GLOBAL_METADATA_TXN_TRIE_DIGEST_AFTER)
%mpt_hash_receipt_trie %mstore_global_metadata(@GLOBAL_METADATA_RECEIPT_TRIE_DIGEST_AFTER)

View File

@ -0,0 +1,82 @@
global sys_mload:
// stack: kexit_info, offset
PUSH 0 // acc = 0
// stack: acc, kexit_info, offset
DUP3 %add_const( 0) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0xf8) ADD
DUP3 %add_const( 1) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0xf0) ADD
DUP3 %add_const( 2) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0xe8) ADD
DUP3 %add_const( 3) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0xe0) ADD
DUP3 %add_const( 4) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0xd8) ADD
DUP3 %add_const( 5) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0xd0) ADD
DUP3 %add_const( 6) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0xc8) ADD
DUP3 %add_const( 7) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0xc0) ADD
DUP3 %add_const( 8) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0xb8) ADD
DUP3 %add_const( 9) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0xb0) ADD
DUP3 %add_const(10) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0xa8) ADD
DUP3 %add_const(11) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0xa0) ADD
DUP3 %add_const(12) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0x98) ADD
DUP3 %add_const(13) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0x90) ADD
DUP3 %add_const(14) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0x88) ADD
DUP3 %add_const(15) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0x80) ADD
DUP3 %add_const(16) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0x78) ADD
DUP3 %add_const(17) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0x70) ADD
DUP3 %add_const(18) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0x68) ADD
DUP3 %add_const(19) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0x60) ADD
DUP3 %add_const(20) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0x58) ADD
DUP3 %add_const(21) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0x50) ADD
DUP3 %add_const(22) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0x48) ADD
DUP3 %add_const(23) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0x40) ADD
DUP3 %add_const(24) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0x38) ADD
DUP3 %add_const(25) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0x30) ADD
DUP3 %add_const(26) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0x28) ADD
DUP3 %add_const(27) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0x20) ADD
DUP3 %add_const(28) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0x18) ADD
DUP3 %add_const(29) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0x10) ADD
DUP3 %add_const(30) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0x08) ADD
DUP3 %add_const(31) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0x00) ADD
%stack (acc, kexit_info, offset) -> (kexit_info, acc)
EXIT_KERNEL
global sys_mstore:
// stack: kexit_info, offset, value
DUP3 PUSH 0 BYTE DUP3 %add_const( 0) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 1 BYTE DUP3 %add_const( 1) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 2 BYTE DUP3 %add_const( 2) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 3 BYTE DUP3 %add_const( 3) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 4 BYTE DUP3 %add_const( 4) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 5 BYTE DUP3 %add_const( 5) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 6 BYTE DUP3 %add_const( 6) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 7 BYTE DUP3 %add_const( 7) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 8 BYTE DUP3 %add_const( 8) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 9 BYTE DUP3 %add_const( 9) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 10 BYTE DUP3 %add_const(10) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 11 BYTE DUP3 %add_const(11) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 12 BYTE DUP3 %add_const(12) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 13 BYTE DUP3 %add_const(13) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 14 BYTE DUP3 %add_const(14) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 15 BYTE DUP3 %add_const(15) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 16 BYTE DUP3 %add_const(16) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 17 BYTE DUP3 %add_const(17) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 18 BYTE DUP3 %add_const(18) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 19 BYTE DUP3 %add_const(19) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 20 BYTE DUP3 %add_const(20) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 21 BYTE DUP3 %add_const(21) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 22 BYTE DUP3 %add_const(22) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 23 BYTE DUP3 %add_const(23) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 24 BYTE DUP3 %add_const(24) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 25 BYTE DUP3 %add_const(25) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 26 BYTE DUP3 %add_const(26) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 27 BYTE DUP3 %add_const(27) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 28 BYTE DUP3 %add_const(28) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 29 BYTE DUP3 %add_const(29) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 30 BYTE DUP3 %add_const(30) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 31 BYTE DUP3 %add_const(31) %mstore_current(@SEGMENT_MAIN_MEMORY)
%stack (kexit_info, offset, value) -> (kexit_info)
EXIT_KERNEL
global sys_mstore8:
// stack: kexit_info, offset, value
%stack (kexit_info, offset, value) -> (offset, value, kexit_info)
%mstore_current(@SEGMENT_MAIN_MEMORY)
// stack: kexit_info
EXIT_KERNEL

View File

@ -1,6 +1,8 @@
// Insertion logic specific to a particular trie.
// Mutate the state trie, inserting the given key-value pair.
// Pre stack: key, value_ptr, retdest
// Post stack: (empty)
global mpt_insert_state_trie:
// stack: key, value_ptr, retdest
%stack (key, value_ptr)

View File

@ -8,7 +8,20 @@ global encode_rlp_scalar:
%gt_const(0x7f)
%jumpi(encode_rlp_scalar_medium)
// This is the "small" case, where the value is its own encoding.
// Else, if scalar != 0, this is the "small" case, where the value is its own encoding.
DUP2 %jumpi(encode_rlp_scalar_small)
// scalar = 0, so BE(scalar) is the empty string, which RLP encodes as a single byte 0x80.
// stack: pos, scalar, retdest
%stack (pos, scalar) -> (pos, 0x80, pos)
%mstore_rlp
// stack: pos, retdest
%increment
// stack: pos', retdest
SWAP1
JUMP
encode_rlp_scalar_small:
// stack: pos, scalar, retdest
%stack (pos, scalar) -> (pos, scalar, pos)
// stack: pos, scalar, pos, retdest
@ -127,14 +140,8 @@ encode_rlp_multi_byte_string_prefix_large:
// stack: pos, len_of_len, str_len, retdest
%increment
// stack: pos', len_of_len, str_len, retdest
%stack (pos, len_of_len, str_len)
-> (pos, str_len, len_of_len,
encode_rlp_multi_byte_string_prefix_large_done_writing_len)
%stack (pos, len_of_len, str_len) -> (pos, str_len, len_of_len)
%jump(mstore_unpacking_rlp)
encode_rlp_multi_byte_string_prefix_large_done_writing_len:
// stack: pos'', retdest
SWAP1
JUMP
%macro encode_rlp_multi_byte_string_prefix
%stack (pos, str_len) -> (pos, str_len, %%after)

View File

@ -0,0 +1,72 @@
// Encodes an arbitrary string, given a pointer and length.
// Pre stack: pos, ADDR: 3, len, retdest
// Post stack: pos'
global encode_rlp_string:
// stack: pos, ADDR: 3, len, retdest
DUP5 %eq_const(1)
// stack: len == 1, pos, ADDR: 3, len, retdest
DUP5 DUP5 DUP5 // ADDR: 3
MLOAD_GENERAL
// stack: first_byte, len == 1, pos, ADDR: 3, len, retdest
%lt_const(128)
MUL // cheaper than AND
// stack: single_small_byte, pos, ADDR: 3, len, retdest
%jumpi(encode_rlp_string_small_single_byte)
// stack: pos, ADDR: 3, len, retdest
DUP5 %gt_const(55)
// stack: len > 55, pos, ADDR: 3, len, retdest
%jumpi(encode_rlp_string_large)
global encode_rlp_string_small:
// stack: pos, ADDR: 3, len, retdest
DUP5 // len
%add_const(0x80)
// stack: first_byte, pos, ADDR: 3, len, retdest
DUP2
// stack: pos, first_byte, pos, ADDR: 3, len, retdest
%mstore_rlp
// stack: pos, ADDR: 3, len, retdest
%increment
// stack: pos', ADDR: 3, len, retdest
DUP5 DUP2 ADD // pos'' = pos' + len
// stack: pos'', pos', ADDR: 3, len, retdest
%stack (pos2, pos1, ADDR: 3, len, retdest)
-> (0, @SEGMENT_RLP_RAW, pos1, ADDR, len, retdest, pos2)
%jump(memcpy)
global encode_rlp_string_small_single_byte:
// stack: pos, ADDR: 3, len, retdest
%stack (pos, ADDR: 3, len) -> (ADDR, pos)
MLOAD_GENERAL
// stack: byte, pos, retdest
DUP2
%mstore_rlp
// stack: pos, retdest
%increment
JUMP
global encode_rlp_string_large:
// stack: pos, ADDR: 3, len, retdest
DUP5 %num_bytes
// stack: len_of_len, pos, ADDR: 3, len, retdest
SWAP1
DUP2 // len_of_len
%add_const(0xb7)
// stack: first_byte, pos, len_of_len, ADDR: 3, len, retdest
DUP2
// stack: pos, first_byte, pos, len_of_len, ADDR: 3, len, retdest
%mstore_rlp
// stack: pos, len_of_len, ADDR: 3, len, retdest
%increment
// stack: pos', len_of_len, ADDR: 3, len, retdest
%stack (pos, len_of_len, ADDR: 3, len)
-> (pos, len, len_of_len, encode_rlp_string_large_after_writing_len, ADDR, len)
%jump(mstore_unpacking_rlp)
global encode_rlp_string_large_after_writing_len:
// stack: pos'', ADDR: 3, len, retdest
DUP5 DUP2 ADD // pos''' = pos'' + len
// stack: pos''', pos'', ADDR: 3, len, retdest
%stack (pos3, pos2, ADDR: 3, len, retdest)
-> (0, @SEGMENT_RLP_RAW, pos2, ADDR, len, retdest, pos3)
%jump(memcpy)

View File

@ -127,7 +127,92 @@ parse_r:
%mstore_txn_field(@TXN_FIELD_S)
// stack: retdest
// TODO: Write the signed txn data to memory, where it can be hashed and
// checked against the signature.
type_0_compute_signed_data:
// If a chain_id is present in v, the signed data is
// keccak256(rlp([nonce, gas_price, gas_limit, to, value, data, chain_id, 0, 0]))
// otherwise, it is
// keccak256(rlp([nonce, gas_price, gas_limit, to, value, data]))
%mload_txn_field(@TXN_FIELD_NONCE)
// stack: nonce, retdest
PUSH 9 // We start at 9 to leave room to prepend the largest possible RLP list header.
// stack: rlp_pos, nonce, retdest
%encode_rlp_scalar
// stack: rlp_pos, retdest
%mload_txn_field(@TXN_FIELD_MAX_FEE_PER_GAS)
SWAP1 %encode_rlp_scalar
// stack: rlp_pos, retdest
%mload_txn_field(@TXN_FIELD_GAS_LIMIT)
SWAP1 %encode_rlp_scalar
// stack: rlp_pos, retdest
%mload_txn_field(@TXN_FIELD_TO)
SWAP1 %encode_rlp_160
// stack: rlp_pos, retdest
%mload_txn_field(@TXN_FIELD_VALUE)
SWAP1 %encode_rlp_scalar
// stack: rlp_pos, retdest
// Encode txn data.
%mload_txn_field(@TXN_FIELD_DATA_LEN)
PUSH 0 // ADDR.virt
PUSH @SEGMENT_TXN_DATA
PUSH 0 // ADDR.context
// stack: ADDR: 3, len, rlp_pos, retdest
PUSH after_serializing_txn_data
// stack: after_serializing_txn_data, ADDR: 3, len, rlp_pos, retdest
SWAP5
// stack: rlp_pos, ADDR: 3, len, after_serializing_txn_data, retdest
%jump(encode_rlp_string)
after_serializing_txn_data:
// stack: rlp_pos, retdest
%mload_txn_field(@TXN_FIELD_CHAIN_ID_PRESENT)
ISZERO %jumpi(finish_rlp_list)
// stack: rlp_pos, retdest
%mload_txn_field(@TXN_FIELD_CHAIN_ID)
SWAP1 %encode_rlp_scalar
// stack: rlp_pos, retdest
PUSH 0
SWAP1 %encode_rlp_scalar
// stack: rlp_pos, retdest
PUSH 0
SWAP1 %encode_rlp_scalar
// stack: rlp_pos, retdest
finish_rlp_list:
%prepend_rlp_list_prefix
// stack: start_pos, rlp_len, retdest
PUSH @SEGMENT_RLP_RAW
PUSH 0 // context
// stack: ADDR: 3, rlp_len, retdest
KECCAK_GENERAL
// stack: hash, retdest
%mload_txn_field(@TXN_FIELD_S)
%mload_txn_field(@TXN_FIELD_R)
%mload_txn_field(@TXN_FIELD_Y_PARITY) %add_const(27) // ecrecover interprets v as y_parity + 27
PUSH store_origin
// stack: store_origin, v, r, s, hash, retdest
SWAP4
// stack: hash, v, r, s, store_origin, retdest
%jump(ecrecover)
store_origin:
// stack: address, retdest
// If ecrecover returned u256::MAX, that indicates failure.
DUP1
%eq_const(0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff)
%jumpi(panic)
// stack: address, retdest
%mstore_txn_field(@TXN_FIELD_ORIGIN)
// stack: retdest
%jump(process_normalized_txn)

View File

@ -1,3 +1,14 @@
global sys_keccak256:
// stack: kexit_info, offset, len
%stack (kexit_info, offset, len) -> (offset, len, kexit_info)
PUSH @SEGMENT_MAIN_MEMORY
GET_CONTEXT
// stack: ADDR: 3, len, kexit_info
KECCAK_GENERAL
// stack: hash, kexit_info
SWAP1
EXIT_KERNEL
// Computes Keccak256(input_word). Clobbers @SEGMENT_KERNEL_GENERAL.
//
// Pre stack: input_word

View File

@ -1,7 +1,7 @@
use std::collections::HashMap;
use ethereum_types::U256;
use itertools::izip;
use itertools::{izip, Itertools};
use keccak_hash::keccak;
use log::debug;
@ -28,6 +28,7 @@ pub struct Kernel {
pub(crate) code_hash: [u32; 8],
pub(crate) global_labels: HashMap<String, usize>,
pub(crate) ordered_labels: Vec<String>,
/// Map from `PROVER_INPUT` offsets to their corresponding `ProverInputFn`.
pub(crate) prover_inputs: HashMap<usize, ProverInputFn>,
@ -43,18 +44,30 @@ impl Kernel {
let code_hash = std::array::from_fn(|i| {
u32::from_le_bytes(std::array::from_fn(|j| code_hash_bytes[i * 4 + j]))
});
let ordered_labels = global_labels
.keys()
.cloned()
.sorted_by_key(|label| global_labels[label])
.collect();
Self {
code,
code_hash,
global_labels,
ordered_labels,
prover_inputs,
}
}
/// Get a string representation of the current offset for debugging purposes.
pub(crate) fn offset_name(&self, offset: usize) -> String {
self.offset_label(offset)
.unwrap_or_else(|| offset.to_string())
match self
.ordered_labels
.binary_search_by_key(&offset, |label| self.global_labels[label])
{
Ok(idx) => self.ordered_labels[idx].clone(),
Err(0) => offset.to_string(),
Err(idx) => format!("{}, below {}", offset, self.ordered_labels[idx - 1]),
}
}
pub(crate) fn offset_label(&self, offset: usize) -> Option<String> {

View File

@ -49,7 +49,12 @@ pub fn evm_constants() -> HashMap<String, U256> {
c
}
const HASH_CONSTANTS: [(&str, [u8; 32]); 1] = [
const HASH_CONSTANTS: [(&str, [u8; 32]); 2] = [
// Hash of an empty string: keccak(b'').hex()
(
"EMPTY_STRING_HASH",
hex!("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"),
),
// Hash of an empty node: keccak(rlp.encode(b'')).hex()
(
"EMPTY_NODE_HASH",

View File

@ -17,10 +17,11 @@ pub(crate) enum NormalizedTxnField {
YParity = 9,
R = 10,
S = 11,
Origin = 12,
}
impl NormalizedTxnField {
pub(crate) const COUNT: usize = 12;
pub(crate) const COUNT: usize = 13;
pub(crate) fn all() -> [Self; Self::COUNT] {
[
@ -36,6 +37,7 @@ impl NormalizedTxnField {
Self::YParity,
Self::R,
Self::S,
Self::Origin,
]
}
@ -54,6 +56,7 @@ impl NormalizedTxnField {
NormalizedTxnField::YParity => "TXN_FIELD_Y_PARITY",
NormalizedTxnField::R => "TXN_FIELD_R",
NormalizedTxnField::S => "TXN_FIELD_S",
NormalizedTxnField::Origin => "TXN_FIELD_ORIGIN",
}
}
}

View File

@ -16,7 +16,7 @@ use crate::generation::prover_input::ProverInputFn;
use crate::generation::state::GenerationState;
use crate::generation::GenerationInputs;
use crate::memory::segments::Segment;
use crate::witness::memory::{MemoryContextState, MemorySegmentState, MemoryState};
use crate::witness::memory::{MemoryAddress, MemoryContextState, MemorySegmentState, MemoryState};
use crate::witness::util::stack_peek;
type F = GoldilocksField;
@ -26,22 +26,11 @@ const DEFAULT_HALT_OFFSET: usize = 0xdeadbeef;
impl MemoryState {
fn mload_general(&self, context: usize, segment: Segment, offset: usize) -> U256 {
let value = self.contexts[context].segments[segment as usize].get(offset);
assert!(
value.bits() <= segment.bit_range(),
"Value read from memory exceeds expected range of {:?} segment",
segment
);
value
self.get(MemoryAddress::new(context, segment, offset))
}
fn mstore_general(&mut self, context: usize, segment: Segment, offset: usize, value: U256) {
assert!(
value.bits() <= segment.bit_range(),
"Value written to memory exceeds expected range of {:?} segment",
segment
);
self.contexts[context].segments[segment as usize].set(offset, value)
self.set(MemoryAddress::new(context, segment, offset), value);
}
}
@ -744,13 +733,6 @@ impl<'a> Interpreter<'a> {
let segment = Segment::all()[self.pop().as_usize()];
let offset = self.pop().as_usize();
let value = self.pop();
assert!(
value.bits() <= segment.bit_range(),
"Value {} exceeds {:?} range of {} bits",
value,
segment,
segment.bit_range()
);
self.generation_state
.memory
.mstore_general(context, segment, offset, value);

171
evm/src/cpu/memio.rs Normal file
View File

@ -0,0 +1,171 @@
use itertools::izip;
use plonky2::field::extension::Extendable;
use plonky2::field::packed::PackedField;
use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::cpu::columns::CpuColumnsView;
use crate::cpu::membus::NUM_GP_CHANNELS;
fn get_addr<T: Copy>(lv: &CpuColumnsView<T>) -> (T, T, T) {
let addr_context = lv.mem_channels[0].value[0];
let addr_segment = lv.mem_channels[1].value[0];
let addr_virtual = lv.mem_channels[2].value[0];
(addr_context, addr_segment, addr_virtual)
}
fn eval_packed_load<P: PackedField>(
lv: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
) {
let filter = lv.op.mload_general;
let (addr_context, addr_segment, addr_virtual) = get_addr(lv);
let load_channel = lv.mem_channels[3];
let push_channel = lv.mem_channels[NUM_GP_CHANNELS - 1];
yield_constr.constraint(filter * (load_channel.used - P::ONES));
yield_constr.constraint(filter * (load_channel.is_read - P::ONES));
yield_constr.constraint(filter * (load_channel.addr_context - addr_context));
yield_constr.constraint(filter * (load_channel.addr_segment - addr_segment));
yield_constr.constraint(filter * (load_channel.addr_virtual - addr_virtual));
for (load_limb, push_limb) in izip!(load_channel.value, push_channel.value) {
yield_constr.constraint(filter * (load_limb - push_limb));
}
// Disable remaining memory channels, if any.
for &channel in &lv.mem_channels[4..NUM_GP_CHANNELS - 1] {
yield_constr.constraint(filter * channel.used);
}
}
fn eval_ext_circuit_load<F: RichField + Extendable<D>, const D: usize>(
builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder<F, D>,
lv: &CpuColumnsView<ExtensionTarget<D>>,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
let filter = lv.op.mload_general;
let (addr_context, addr_segment, addr_virtual) = get_addr(lv);
let load_channel = lv.mem_channels[3];
let push_channel = lv.mem_channels[NUM_GP_CHANNELS - 1];
{
let constr = builder.mul_sub_extension(filter, load_channel.used, filter);
yield_constr.constraint(builder, constr);
}
{
let constr = builder.mul_sub_extension(filter, load_channel.is_read, filter);
yield_constr.constraint(builder, constr);
}
for (channel_field, target) in izip!(
[
load_channel.addr_context,
load_channel.addr_segment,
load_channel.addr_virtual,
],
[addr_context, addr_segment, addr_virtual]
) {
let diff = builder.sub_extension(channel_field, target);
let constr = builder.mul_extension(filter, diff);
yield_constr.constraint(builder, constr);
}
for (load_limb, push_limb) in izip!(load_channel.value, push_channel.value) {
let diff = builder.sub_extension(load_limb, push_limb);
let constr = builder.mul_extension(filter, diff);
yield_constr.constraint(builder, constr);
}
// Disable remaining memory channels, if any.
for &channel in &lv.mem_channels[4..NUM_GP_CHANNELS - 1] {
let constr = builder.mul_extension(filter, channel.used);
yield_constr.constraint(builder, constr);
}
}
fn eval_packed_store<P: PackedField>(
lv: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
) {
let filter = lv.op.mstore_general;
let (addr_context, addr_segment, addr_virtual) = get_addr(lv);
let value_channel = lv.mem_channels[3];
let store_channel = lv.mem_channels[4];
yield_constr.constraint(filter * (store_channel.used - P::ONES));
yield_constr.constraint(filter * store_channel.is_read);
yield_constr.constraint(filter * (store_channel.addr_context - addr_context));
yield_constr.constraint(filter * (store_channel.addr_segment - addr_segment));
yield_constr.constraint(filter * (store_channel.addr_virtual - addr_virtual));
for (value_limb, store_limb) in izip!(value_channel.value, store_channel.value) {
yield_constr.constraint(filter * (value_limb - store_limb));
}
// Disable remaining memory channels, if any.
for &channel in &lv.mem_channels[5..] {
yield_constr.constraint(filter * channel.used);
}
}
fn eval_ext_circuit_store<F: RichField + Extendable<D>, const D: usize>(
builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder<F, D>,
lv: &CpuColumnsView<ExtensionTarget<D>>,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
let filter = lv.op.mstore_general;
let (addr_context, addr_segment, addr_virtual) = get_addr(lv);
let value_channel = lv.mem_channels[3];
let store_channel = lv.mem_channels[4];
{
let constr = builder.mul_sub_extension(filter, store_channel.used, filter);
yield_constr.constraint(builder, constr);
}
{
let constr = builder.mul_extension(filter, store_channel.is_read);
yield_constr.constraint(builder, constr);
}
for (channel_field, target) in izip!(
[
store_channel.addr_context,
store_channel.addr_segment,
store_channel.addr_virtual,
],
[addr_context, addr_segment, addr_virtual]
) {
let diff = builder.sub_extension(channel_field, target);
let constr = builder.mul_extension(filter, diff);
yield_constr.constraint(builder, constr);
}
for (value_limb, store_limb) in izip!(value_channel.value, store_channel.value) {
let diff = builder.sub_extension(value_limb, store_limb);
let constr = builder.mul_extension(filter, diff);
yield_constr.constraint(builder, constr);
}
// Disable remaining memory channels, if any.
for &channel in &lv.mem_channels[5..] {
let constr = builder.mul_extension(filter, channel.used);
yield_constr.constraint(builder, constr);
}
}
pub fn eval_packed<P: PackedField>(
lv: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
) {
eval_packed_load(lv, yield_constr);
eval_packed_store(lv, yield_constr);
}
pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder<F, D>,
lv: &CpuColumnsView<ExtensionTarget<D>>,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
eval_ext_circuit_load(builder, lv, yield_constr);
eval_ext_circuit_store(builder, lv, yield_constr);
}

View File

@ -1,5 +1,6 @@
pub(crate) mod bootstrap_kernel;
pub(crate) mod columns;
mod contextops;
pub(crate) mod control_flow;
pub mod cpu_stark;
pub(crate) mod decode;
@ -7,7 +8,9 @@ mod dup_swap;
mod jumps;
pub mod kernel;
pub(crate) mod membus;
mod memio;
mod modfp254;
mod pc;
mod shift;
pub(crate) mod simple_logic;
mod stack;

38
evm/src/cpu/pc.rs Normal file
View File

@ -0,0 +1,38 @@
use plonky2::field::extension::Extendable;
use plonky2::field::packed::PackedField;
use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::cpu::columns::CpuColumnsView;
use crate::cpu::membus::NUM_GP_CHANNELS;
pub fn eval_packed<P: PackedField>(
lv: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
) {
let filter = lv.op.pc;
let push_value = lv.mem_channels[NUM_GP_CHANNELS - 1].value;
yield_constr.constraint(filter * (push_value[0] - lv.program_counter));
for &limb in &push_value[1..] {
yield_constr.constraint(filter * limb);
}
}
pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder<F, D>,
lv: &CpuColumnsView<ExtensionTarget<D>>,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
let filter = lv.op.pc;
let push_value = lv.mem_channels[NUM_GP_CHANNELS - 1].value;
{
let diff = builder.sub_extension(push_value[0], lv.program_counter);
let constr = builder.mul_extension(filter, diff);
yield_constr.constraint(builder, constr);
}
for &limb in &push_value[1..] {
let constr = builder.mul_extension(filter, limb);
yield_constr.constraint(builder, constr);
}
}

View File

@ -64,20 +64,56 @@ const STACK_BEHAVIORS: OpsColumnsView<Option<StackBehavior>> = OpsColumnsView {
keccak_general: None, // TODO
prover_input: None, // TODO
pop: None, // TODO
jump: None, // TODO
jumpi: None, // TODO
pc: None, // TODO
gas: None, // TODO
jumpdest: None, // TODO
push: None, // TODO
jump: Some(StackBehavior {
num_pops: 1,
pushes: false,
disable_other_channels: false,
}),
jumpi: Some(StackBehavior {
num_pops: 2,
pushes: false,
disable_other_channels: false,
}),
pc: Some(StackBehavior {
num_pops: 0,
pushes: true,
disable_other_channels: true,
}),
gas: None, // TODO
jumpdest: Some(StackBehavior {
num_pops: 0,
pushes: false,
disable_other_channels: true,
}),
push: None, // TODO
dup: None,
swap: None,
get_context: None, // TODO
set_context: None, // TODO
consume_gas: None, // TODO
exit_kernel: None, // TODO
mload_general: None, // TODO
mstore_general: None, // TODO
get_context: Some(StackBehavior {
num_pops: 0,
pushes: true,
disable_other_channels: true,
}),
set_context: Some(StackBehavior {
num_pops: 1,
pushes: false,
disable_other_channels: true,
}),
consume_gas: None, // TODO
exit_kernel: Some(StackBehavior {
num_pops: 1,
pushes: false,
disable_other_channels: true,
}),
mload_general: Some(StackBehavior {
num_pops: 3,
pushes: true,
disable_other_channels: false,
}),
mstore_general: Some(StackBehavior {
num_pops: 4,
pushes: false,
disable_other_channels: false,
}),
syscall: Some(StackBehavior {
num_pops: 0,
pushes: true,

View File

@ -72,8 +72,8 @@ pub fn eval_packed<P: PackedField>(
// This memory channel is constrained in `stack.rs`.
let output = lv.mem_channels[NUM_GP_CHANNELS - 1].value;
// Push current PC to stack
yield_constr.constraint(filter * (output[0] - lv.program_counter));
// Push current PC + 1 to stack
yield_constr.constraint(filter * (output[0] - (lv.program_counter + P::ONES)));
// Push current kernel flag to stack (share register with PC)
yield_constr.constraint(filter * (output[1] - lv.is_kernel_mode));
// Zero the rest of that register
@ -180,9 +180,10 @@ pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
// This memory channel is constrained in `stack.rs`.
let output = lv.mem_channels[NUM_GP_CHANNELS - 1].value;
// Push current PC to stack
// Push current PC + 1 to stack
{
let diff = builder.sub_extension(output[0], lv.program_counter);
let pc_plus_1 = builder.add_const_extension(lv.program_counter, F::ONE);
let diff = builder.sub_extension(output[0], pc_plus_1);
let constr = builder.mul_extension(filter, diff);
yield_constr.constraint(builder, constr);
}

View File

@ -125,11 +125,12 @@ fn simulate_cpu<F: RichField + Extendable<D>, const D: usize>(state: &mut Genera
log::info!("CPU halted after {} cycles", state.traces.clock());
}
already_in_halt_loop |= in_halt_loop;
transition(state);
if already_in_halt_loop && state.traces.clock().is_power_of_two() {
log::info!("CPU trace padded to {} cycles", state.traces.clock());
break;
}
transition(state);
}
}

View File

@ -16,6 +16,17 @@ pub struct AccountRlp {
pub code_hash: H256,
}
impl Default for AccountRlp {
fn default() -> Self {
Self {
nonce: U256::zero(),
balance: U256::zero(),
storage_root: PartialTrie::Empty.calc_hash(),
code_hash: keccak([]),
}
}
}
pub(crate) fn all_mpt_prover_inputs_reversed(trie_inputs: &TrieInputs) -> Vec<U256> {
let mut inputs = all_mpt_prover_inputs(trie_inputs);
inputs.reverse();

View File

@ -68,25 +68,6 @@ impl MemoryOp {
}
}
fn get_max_range_check(memory_ops: &[MemoryOp]) -> usize {
memory_ops
.iter()
.tuple_windows()
.map(|(curr, next)| {
if curr.address.context != next.address.context {
next.address.context - curr.address.context - 1
} else if curr.address.segment != next.address.segment {
next.address.segment - curr.address.segment - 1
} else if curr.address.virt != next.address.virt {
next.address.virt - curr.address.virt - 1
} else {
next.timestamp - curr.timestamp
}
})
.max()
.unwrap_or(0)
}
/// Generates the `_FIRST_CHANGE` columns and the `RANGE_CHECK` column in the trace.
pub fn generate_first_change_flags_and_rc<F: RichField>(trace_rows: &mut [[F; NUM_COLUMNS]]) {
let num_ops = trace_rows.len();
@ -126,6 +107,12 @@ pub fn generate_first_change_flags_and_rc<F: RichField>(trace_rows: &mut [[F; NU
} else {
next_timestamp - timestamp
};
assert!(
row[RANGE_CHECK].to_canonical_u64() < num_ops as u64,
"Range check of {} is too large. Bug in fill_gaps?",
row[RANGE_CHECK]
);
}
}
@ -133,17 +120,15 @@ impl<F: RichField + Extendable<D>, const D: usize> MemoryStark<F, D> {
/// Generate most of the trace rows. Excludes a few columns like `COUNTER`, which are generated
/// later, after transposing to column-major form.
fn generate_trace_row_major(&self, mut memory_ops: Vec<MemoryOp>) -> Vec<[F; NUM_COLUMNS]> {
memory_ops.sort_by_key(|op| {
(
op.address.context,
op.address.segment,
op.address.virt,
op.timestamp,
)
});
// fill_gaps expects an ordered list of operations.
memory_ops.sort_by_key(MemoryOp::sorting_key);
Self::fill_gaps(&mut memory_ops);
Self::pad_memory_ops(&mut memory_ops);
// fill_gaps may have added operations at the end which break the order, so sort again.
memory_ops.sort_by_key(MemoryOp::sorting_key);
let mut trace_rows = memory_ops
.into_par_iter()
.map(|op| op.into_row())
@ -164,26 +149,64 @@ impl<F: RichField + Extendable<D>, const D: usize> MemoryStark<F, D> {
trace_col_vecs[COUNTER_PERMUTED] = permuted_table;
}
fn pad_memory_ops(memory_ops: &mut Vec<MemoryOp>) {
let num_ops = memory_ops.len();
let max_range_check = get_max_range_check(memory_ops);
let num_ops_padded = num_ops.max(max_range_check + 1).next_power_of_two();
let to_pad = num_ops_padded - num_ops;
/// This memory STARK orders rows by `(context, segment, virt, timestamp)`. To enforce the
/// ordering, it range checks the delta of the first field that changed.
///
/// This method adds some dummy operations to ensure that none of these range checks will be too
/// large, i.e. that they will all be smaller than the number of rows, allowing them to be
/// checked easily with a single lookup.
///
/// For example, say there are 32 memory operations, and a particular address is accessed at
/// timestamps 20 and 100. 80 would fail the range check, so this method would add two dummy
/// reads to the same address, say at timestamps 50 and 80.
fn fill_gaps(memory_ops: &mut Vec<MemoryOp>) {
let max_rc = memory_ops.len().next_power_of_two() - 1;
for (mut curr, next) in memory_ops.clone().into_iter().tuple_windows() {
if curr.address.context != next.address.context
|| curr.address.segment != next.address.segment
{
// We won't bother to check if there's a large context gap, because there can't be
// more than 500 contexts or so, as explained here:
// https://notes.ethereum.org/@vbuterin/proposals_to_adjust_memory_gas_costs
// Similarly, the number of possible segments is a small constant, so any gap must
// be small. max_rc will always be much larger, as just bootloading the kernel will
// trigger thousands of memory operations.
} else if curr.address.virt != next.address.virt {
while next.address.virt - curr.address.virt - 1 > max_rc {
let mut dummy_address = curr.address;
dummy_address.virt += max_rc + 1;
let dummy_read = MemoryOp::new_dummy_read(dummy_address, 0);
memory_ops.push(dummy_read);
curr = dummy_read;
}
} else {
while next.timestamp - curr.timestamp > max_rc {
let dummy_read =
MemoryOp::new_dummy_read(curr.address, curr.timestamp + max_rc);
memory_ops.push(dummy_read);
curr = dummy_read;
}
}
}
}
fn pad_memory_ops(memory_ops: &mut Vec<MemoryOp>) {
let last_op = *memory_ops.last().expect("No memory ops?");
// We essentially repeat the last operation until our operation list has the desired size,
// with a few changes:
// - We change its filter to 0 to indicate that this is a dummy operation.
// - We increment its timestamp in order to pass the ordering check.
// - We make sure it's a read, sine dummy operations must be reads.
for i in 0..to_pad {
memory_ops.push(MemoryOp {
filter: false,
timestamp: last_op.timestamp + i + 1,
kind: Read,
..last_op
});
// - We make sure it's a read, since dummy operations must be reads.
let padding_op = MemoryOp {
filter: false,
kind: Read,
..last_op
};
let num_ops = memory_ops.len();
let num_ops_padded = num_ops.next_power_of_two();
for _ in num_ops..num_ops_padded {
memory_ops.push(padding_op);
}
}

View File

@ -1,6 +1,7 @@
use std::any::type_name;
use anyhow::{ensure, Result};
use itertools::Itertools;
use maybe_rayon::*;
use plonky2::field::extension::Extendable;
use plonky2::field::packable::Packable;
@ -53,7 +54,11 @@ where
[(); LogicStark::<F, D>::COLUMNS]:,
[(); MemoryStark::<F, D>::COLUMNS]:,
{
let (traces, public_values) = generate_traces(all_stark, inputs, config, timing);
let (traces, public_values) = timed!(
timing,
"generate all traces",
generate_traces(all_stark, inputs, config, timing)
);
prove_with_traces(all_stark, config, traces, public_values, timing)
}
@ -80,19 +85,24 @@ where
let trace_commitments = timed!(
timing,
"compute trace commitments",
"compute all trace commitments",
trace_poly_values
.iter()
.map(|trace| {
PolynomialBatch::<F, C, D>::from_values(
// TODO: Cloning this isn't great; consider having `from_values` accept a reference,
// or having `compute_permutation_z_polys` read trace values from the `PolynomialBatch`.
trace.clone(),
rate_bits,
false,
cap_height,
.zip_eq(Table::all())
.map(|(trace, table)| {
timed!(
timing,
None,
&format!("compute trace commitment for {:?}", table),
PolynomialBatch::<F, C, D>::from_values(
// TODO: Cloning this isn't great; consider having `from_values` accept a reference,
// or having `compute_permutation_z_polys` read trace values from the `PolynomialBatch`.
trace.clone(),
rate_bits,
false,
cap_height,
timing,
None,
)
)
})
.collect::<Vec<_>>()
@ -107,66 +117,30 @@ where
challenger.observe_cap(cap);
}
let ctl_data_per_table = cross_table_lookup_data::<F, C, D>(
config,
&trace_poly_values,
&all_stark.cross_table_lookups,
&mut challenger,
let ctl_data_per_table = timed!(
timing,
"compute CTL data",
cross_table_lookup_data::<F, C, D>(
config,
&trace_poly_values,
&all_stark.cross_table_lookups,
&mut challenger,
)
);
let cpu_proof = prove_single_table(
&all_stark.cpu_stark,
config,
&trace_poly_values[Table::Cpu as usize],
&trace_commitments[Table::Cpu as usize],
&ctl_data_per_table[Table::Cpu as usize],
&mut challenger,
let stark_proofs = timed!(
timing,
)?;
let keccak_proof = prove_single_table(
&all_stark.keccak_stark,
config,
&trace_poly_values[Table::Keccak as usize],
&trace_commitments[Table::Keccak as usize],
&ctl_data_per_table[Table::Keccak as usize],
&mut challenger,
timing,
)?;
let keccak_sponge_proof = prove_single_table(
&all_stark.keccak_sponge_stark,
config,
&trace_poly_values[Table::KeccakSponge as usize],
&trace_commitments[Table::KeccakSponge as usize],
&ctl_data_per_table[Table::KeccakSponge as usize],
&mut challenger,
timing,
)?;
let logic_proof = prove_single_table(
&all_stark.logic_stark,
config,
&trace_poly_values[Table::Logic as usize],
&trace_commitments[Table::Logic as usize],
&ctl_data_per_table[Table::Logic as usize],
&mut challenger,
timing,
)?;
let memory_proof = prove_single_table(
&all_stark.memory_stark,
config,
&trace_poly_values[Table::Memory as usize],
&trace_commitments[Table::Memory as usize],
&ctl_data_per_table[Table::Memory as usize],
&mut challenger,
timing,
)?;
let stark_proofs = [
cpu_proof,
keccak_proof,
keccak_sponge_proof,
logic_proof,
memory_proof,
];
"compute all proofs given commitments",
prove_with_commitments(
all_stark,
config,
trace_poly_values,
trace_commitments,
ctl_data_per_table,
&mut challenger,
timing
)?
);
Ok(AllProof {
stark_proofs,
@ -174,6 +148,99 @@ where
})
}
fn prove_with_commitments<F, C, const D: usize>(
all_stark: &AllStark<F, D>,
config: &StarkConfig,
trace_poly_values: [Vec<PolynomialValues<F>>; NUM_TABLES],
trace_commitments: Vec<PolynomialBatch<F, C, D>>,
ctl_data_per_table: [CtlData<F>; NUM_TABLES],
challenger: &mut Challenger<F, C::Hasher>,
timing: &mut TimingTree,
) -> Result<[StarkProof<F, C, D>; NUM_TABLES]>
where
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
[(); C::Hasher::HASH_SIZE]:,
[(); CpuStark::<F, D>::COLUMNS]:,
[(); KeccakStark::<F, D>::COLUMNS]:,
[(); KeccakSpongeStark::<F, D>::COLUMNS]:,
[(); LogicStark::<F, D>::COLUMNS]:,
[(); MemoryStark::<F, D>::COLUMNS]:,
{
let cpu_proof = timed!(
timing,
"prove CPU STARK",
prove_single_table(
&all_stark.cpu_stark,
config,
&trace_poly_values[Table::Cpu as usize],
&trace_commitments[Table::Cpu as usize],
&ctl_data_per_table[Table::Cpu as usize],
challenger,
timing,
)?
);
let keccak_proof = timed!(
timing,
"prove Keccak STARK",
prove_single_table(
&all_stark.keccak_stark,
config,
&trace_poly_values[Table::Keccak as usize],
&trace_commitments[Table::Keccak as usize],
&ctl_data_per_table[Table::Keccak as usize],
challenger,
timing,
)?
);
let keccak_sponge_proof = timed!(
timing,
"prove Keccak sponge STARK",
prove_single_table(
&all_stark.keccak_sponge_stark,
config,
&trace_poly_values[Table::KeccakSponge as usize],
&trace_commitments[Table::KeccakSponge as usize],
&ctl_data_per_table[Table::KeccakSponge as usize],
challenger,
timing,
)?
);
let logic_proof = timed!(
timing,
"prove logic STARK",
prove_single_table(
&all_stark.logic_stark,
config,
&trace_poly_values[Table::Logic as usize],
&trace_commitments[Table::Logic as usize],
&ctl_data_per_table[Table::Logic as usize],
challenger,
timing,
)?
);
let memory_proof = timed!(
timing,
"prove memory STARK",
prove_single_table(
&all_stark.memory_stark,
config,
&trace_poly_values[Table::Memory as usize],
&trace_commitments[Table::Memory as usize],
&ctl_data_per_table[Table::Memory as usize],
challenger,
timing,
)?
);
Ok([
cpu_proof,
keccak_proof,
keccak_sponge_proof,
logic_proof,
memory_proof,
])
}
/// Compute proof for a single STARK table.
pub(crate) fn prove_single_table<F, C, S, const D: usize>(
stark: &S,

View File

@ -18,7 +18,7 @@ use plonky2::plonk::proof::{ProofWithPublicInputs, ProofWithPublicInputsTarget};
use plonky2::util::reducing::ReducingFactorTarget;
use plonky2::with_context;
use crate::all_stark::{AllStark, Table, NUM_TABLES};
use crate::all_stark::{all_cross_table_lookups, AllStark, Table, NUM_TABLES};
use crate::config::StarkConfig;
use crate::constraint_consumer::RecursiveConstraintConsumer;
use crate::cpu::cpu_stark::CpuStark;
@ -162,7 +162,6 @@ impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
builder: &mut CircuitBuilder<F, D>,
recursive_all_proof_target: RecursiveAllProofTargetWithData<D>,
verifier_data: &[VerifierCircuitData<F, C, D>; NUM_TABLES],
cross_table_lookups: Vec<CrossTableLookup<F>>,
inner_config: &StarkConfig,
) where
[(); C::Hasher::HASH_SIZE]:,
@ -219,7 +218,7 @@ impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>
let degrees_bits = std::array::from_fn(|i| verifier_data[i].common.degree_bits());
verify_cross_table_lookups_circuit::<F, C, D>(
builder,
cross_table_lookups,
all_cross_table_lookups(),
pis.map(|p| p.ctl_zs_last),
degrees_bits,
ctl_challenges,
@ -842,7 +841,7 @@ pub(crate) mod tests {
use plonky2::hash::hash_types::RichField;
use plonky2::hash::hashing::SPONGE_WIDTH;
use plonky2::iop::challenger::RecursiveChallenger;
use plonky2::iop::witness::{PartialWitness, Witness};
use plonky2::iop::witness::{PartialWitness, WitnessWrite};
use plonky2::plonk::circuit_builder::CircuitBuilder;
use plonky2::plonk::circuit_data::{CircuitConfig, VerifierCircuitData};
use plonky2::plonk::config::{AlgebraicHasher, GenericConfig, Hasher};

View File

@ -3,7 +3,7 @@ use plonky2::field::extension::{Extendable, FieldExtension};
use plonky2::field::polynomial::{PolynomialCoeffs, PolynomialValues};
use plonky2::field::types::{Field, Sample};
use plonky2::hash::hash_types::RichField;
use plonky2::iop::witness::{PartialWitness, Witness};
use plonky2::iop::witness::{PartialWitness, WitnessWrite};
use plonky2::plonk::circuit_builder::CircuitBuilder;
use plonky2::plonk::circuit_data::CircuitConfig;
use plonky2::plonk::config::{GenericConfig, Hasher};

View File

@ -144,11 +144,3 @@ pub(crate) fn biguint_to_u256(x: BigUint) -> U256 {
let bytes = x.to_bytes_le();
U256::from_little_endian(&bytes)
}
pub(crate) fn u256_saturating_cast_usize(x: U256) -> usize {
if x > usize::MAX.into() {
usize::MAX
} else {
x.as_usize()
}
}

View File

@ -11,7 +11,6 @@ pub enum MemoryChannel {
use MemoryChannel::{Code, GeneralPurpose};
use crate::memory::segments::Segment;
use crate::util::u256_saturating_cast_usize;
impl MemoryChannel {
pub fn index(&self) -> usize {
@ -42,10 +41,17 @@ impl MemoryAddress {
}
pub(crate) fn new_u256s(context: U256, segment: U256, virt: U256) -> Self {
assert!(context.bits() <= 32, "context too large: {}", context);
assert!(
segment < Segment::COUNT.into(),
"segment too large: {}",
segment
);
assert!(virt.bits() <= 32, "virt too large: {}", virt);
Self {
context: u256_saturating_cast_usize(context),
segment: u256_saturating_cast_usize(segment),
virt: u256_saturating_cast_usize(virt),
context: context.as_usize(),
segment: segment.as_usize(),
virt: virt.as_usize(),
}
}
@ -87,6 +93,25 @@ impl MemoryOp {
value,
}
}
pub(crate) fn new_dummy_read(address: MemoryAddress, timestamp: usize) -> Self {
Self {
filter: false,
timestamp,
address,
kind: MemoryOpKind::Read,
value: U256::zero(),
}
}
pub(crate) fn sorting_key(&self) -> (usize, usize, usize, usize) {
(
self.address.context,
self.address.segment,
self.address.virt,
self.timestamp,
)
}
}
#[derive(Clone, Debug)]
@ -117,10 +142,27 @@ impl MemoryState {
}
pub fn get(&self, address: MemoryAddress) -> U256 {
self.contexts[address.context].segments[address.segment].get(address.virt)
let segment = Segment::all()[address.segment];
let val = self.contexts[address.context].segments[address.segment].get(address.virt);
assert!(
val.bits() <= segment.bit_range(),
"Value {} exceeds {:?} range of {} bits",
val,
segment,
segment.bit_range()
);
val
}
pub fn set(&mut self, address: MemoryAddress, val: U256) {
let segment = Segment::all()[address.segment];
assert!(
val.bits() <= segment.bit_range(),
"Value {} exceeds {:?} range of {} bits",
val,
segment,
segment.bit_range()
);
self.contexts[address.context].segments[address.segment].set(address.virt, val);
}
}

View File

@ -5,11 +5,11 @@ use plonky2::field::types::Field;
use crate::cpu::columns::CpuColumnsView;
use crate::cpu::kernel::aggregator::KERNEL;
use crate::cpu::kernel::assembler::BYTES_PER_OFFSET;
use crate::cpu::membus::NUM_GP_CHANNELS;
use crate::cpu::simple_logic::eq_iszero::generate_pinv_diff;
use crate::generation::state::GenerationState;
use crate::memory::segments::Segment;
use crate::util::u256_saturating_cast_usize;
use crate::witness::errors::ProgramError;
use crate::witness::memory::MemoryAddress;
use crate::witness::util::{
@ -20,9 +20,6 @@ use crate::{arithmetic, logic};
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub(crate) enum Operation {
Push(u8),
Dup(u8),
Swap(u8),
Iszero,
Not,
Byte,
@ -39,6 +36,9 @@ pub(crate) enum Operation {
Pc,
Gas,
Jumpdest,
Push(u8),
Dup(u8),
Swap(u8),
GetContext,
SetContext,
ConsumeGas,
@ -186,11 +186,37 @@ pub(crate) fn generate_jump<F: Field>(
mut row: CpuColumnsView<F>,
) -> Result<(), ProgramError> {
let [(dst, log_in0)] = stack_pop_with_log_and_fill::<1, _>(state, &mut row)?;
let dst: u32 = dst
.try_into()
.map_err(|_| ProgramError::InvalidJumpDestination)?;
let (jumpdest_bit, jumpdest_bit_log) = mem_read_gp_with_log_and_fill(
NUM_GP_CHANNELS - 1,
MemoryAddress::new(state.registers.context, Segment::JumpdestBits, dst as usize),
state,
&mut row,
);
if state.registers.is_kernel {
// Don't actually do the read, just set the address, etc.
let mut channel = &mut row.mem_channels[NUM_GP_CHANNELS - 1];
channel.used = F::ZERO;
channel.value[0] = F::ONE;
row.mem_channels[1].value[0] = F::ONE;
} else {
if jumpdest_bit != U256::one() {
return Err(ProgramError::InvalidJumpDestination);
}
state.traces.push_memory(jumpdest_bit_log);
}
// Extra fields required by the constraints.
row.general.jumps_mut().should_jump = F::ONE;
row.general.jumps_mut().cond_sum_pinv = F::ONE;
state.traces.push_memory(log_in0);
state.traces.push_cpu(row);
state.registers.program_counter = u256_saturating_cast_usize(dst);
// TODO: Set other cols like input0_upper_sum_inv.
state.registers.program_counter = dst as usize;
Ok(())
}
@ -200,15 +226,92 @@ pub(crate) fn generate_jumpi<F: Field>(
) -> Result<(), ProgramError> {
let [(dst, log_in0), (cond, log_in1)] = stack_pop_with_log_and_fill::<2, _>(state, &mut row)?;
let should_jump = !cond.is_zero();
if should_jump {
row.general.jumps_mut().should_jump = F::ONE;
let cond_sum_u64 = cond
.0
.into_iter()
.map(|limb| ((limb as u32) as u64) + (limb >> 32))
.sum();
let cond_sum = F::from_canonical_u64(cond_sum_u64);
row.general.jumps_mut().cond_sum_pinv = cond_sum.inverse();
let dst: u32 = dst
.try_into()
.map_err(|_| ProgramError::InvalidJumpiDestination)?;
state.registers.program_counter = dst as usize;
} else {
row.general.jumps_mut().should_jump = F::ZERO;
row.general.jumps_mut().cond_sum_pinv = F::ZERO;
state.registers.program_counter += 1;
}
let (jumpdest_bit, jumpdest_bit_log) = mem_read_gp_with_log_and_fill(
NUM_GP_CHANNELS - 1,
MemoryAddress::new(
state.registers.context,
Segment::JumpdestBits,
dst.low_u32() as usize,
),
state,
&mut row,
);
if !should_jump || state.registers.is_kernel {
// Don't actually do the read, just set the address, etc.
let mut channel = &mut row.mem_channels[NUM_GP_CHANNELS - 1];
channel.used = F::ZERO;
channel.value[0] = F::ONE;
} else {
if jumpdest_bit != U256::one() {
return Err(ProgramError::InvalidJumpiDestination);
}
state.traces.push_memory(jumpdest_bit_log);
}
state.traces.push_memory(log_in0);
state.traces.push_memory(log_in1);
state.traces.push_cpu(row);
state.registers.program_counter = if cond.is_zero() {
state.registers.program_counter + 1
} else {
u256_saturating_cast_usize(dst)
};
// TODO: Set other cols like input0_upper_sum_inv.
Ok(())
}
pub(crate) fn generate_pc<F: Field>(
state: &mut GenerationState<F>,
mut row: CpuColumnsView<F>,
) -> Result<(), ProgramError> {
let write = stack_push_log_and_fill(state, &mut row, state.registers.program_counter.into())?;
state.traces.push_memory(write);
state.traces.push_cpu(row);
Ok(())
}
pub(crate) fn generate_jumpdest<F: Field>(
state: &mut GenerationState<F>,
row: CpuColumnsView<F>,
) -> Result<(), ProgramError> {
state.traces.push_cpu(row);
Ok(())
}
pub(crate) fn generate_get_context<F: Field>(
state: &mut GenerationState<F>,
mut row: CpuColumnsView<F>,
) -> Result<(), ProgramError> {
let ctx = state.registers.context.into();
let write = stack_push_log_and_fill(state, &mut row, ctx)?;
state.traces.push_memory(write);
state.traces.push_cpu(row);
Ok(())
}
pub(crate) fn generate_set_context<F: Field>(
state: &mut GenerationState<F>,
mut row: CpuColumnsView<F>,
) -> Result<(), ProgramError> {
let [(ctx, log_in)] = stack_pop_with_log_and_fill::<1, _>(state, &mut row)?;
state.registers.context = ctx.as_usize();
state.traces.push_memory(log_in);
state.traces.push_cpu(row);
Ok(())
}
@ -386,7 +489,9 @@ pub(crate) fn generate_syscall<F: Field>(
mut row: CpuColumnsView<F>,
) -> Result<(), ProgramError> {
let handler_jumptable_addr = KERNEL.global_labels["syscall_jumptable"];
let handler_addr_addr = handler_jumptable_addr + (opcode as usize);
let handler_addr_addr =
handler_jumptable_addr + (opcode as usize) * (BYTES_PER_OFFSET as usize);
assert_eq!(BYTES_PER_OFFSET, 3, "Code below assumes 3 bytes per offset");
let (handler_addr0, log_in0) = mem_read_gp_with_log_and_fill(
0,
MemoryAddress::new(0, Segment::Code, handler_addr_addr),
@ -409,11 +514,12 @@ pub(crate) fn generate_syscall<F: Field>(
let handler_addr = (handler_addr0 << 16) + (handler_addr1 << 8) + handler_addr2;
let new_program_counter = handler_addr.as_usize();
let syscall_info = U256::from(state.registers.program_counter)
let syscall_info = U256::from(state.registers.program_counter + 1)
+ (U256::from(u64::from(state.registers.is_kernel)) << 32);
let log_out = stack_push_log_and_fill(state, &mut row, syscall_info)?;
state.registers.program_counter = new_program_counter;
log::debug!("Syscall to {}", KERNEL.offset_name(new_program_counter));
state.registers.is_kernel = true;
state.traces.push_memory(log_in0);
@ -448,14 +554,19 @@ pub(crate) fn generate_exit_kernel<F: Field>(
mut row: CpuColumnsView<F>,
) -> Result<(), ProgramError> {
let [(kexit_info, log_in)] = stack_pop_with_log_and_fill::<1, _>(state, &mut row)?;
let kexit_info_u64: [u64; 4] = kexit_info.0;
let program_counter = kexit_info_u64[0] as usize;
let is_kernel_mode_val = (kexit_info_u64[1] >> 32) as u32;
let kexit_info_u64 = kexit_info.0[0];
let program_counter = kexit_info_u64 as u32 as usize;
let is_kernel_mode_val = (kexit_info_u64 >> 32) as u32;
assert!(is_kernel_mode_val == 0 || is_kernel_mode_val == 1);
let is_kernel_mode = is_kernel_mode_val != 0;
state.registers.program_counter = program_counter;
state.registers.is_kernel = is_kernel_mode;
log::debug!(
"Exiting to {}, is_kernel={}",
KERNEL.offset_name(program_counter),
is_kernel_mode
);
state.traces.push_memory(log_in);
state.traces.push_cpu(row);
@ -470,14 +581,19 @@ pub(crate) fn generate_mload_general<F: Field>(
let [(context, log_in0), (segment, log_in1), (virt, log_in2)] =
stack_pop_with_log_and_fill::<3, _>(state, &mut row)?;
let val = state
.memory
.get(MemoryAddress::new_u256s(context, segment, virt));
let (val, log_read) = mem_read_gp_with_log_and_fill(
3,
MemoryAddress::new_u256s(context, segment, virt),
state,
&mut row,
);
let log_out = stack_push_log_and_fill(state, &mut row, val)?;
state.traces.push_memory(log_in0);
state.traces.push_memory(log_in1);
state.traces.push_memory(log_in2);
state.traces.push_memory(log_read);
state.traces.push_memory(log_out);
state.traces.push_cpu(row);
Ok(())

View File

@ -4,6 +4,7 @@ use itertools::Itertools;
use plonky2::field::extension::Extendable;
use plonky2::field::polynomial::PolynomialValues;
use plonky2::hash::hash_types::RichField;
use plonky2::timed;
use plonky2::util::timing::TimingTree;
use crate::all_stark::{AllStark, NUM_TABLES};
@ -131,18 +132,32 @@ impl<T: Copy> Traces<T> {
let cpu_rows = cpu.into_iter().map(|x| x.into()).collect();
let cpu_trace = trace_rows_to_poly_values(cpu_rows);
let keccak_trace =
let keccak_trace = timed!(
timing,
"generate Keccak trace",
all_stark
.keccak_stark
.generate_trace(keccak_inputs, cap_elements, timing);
let keccak_sponge_trace =
.generate_trace(keccak_inputs, cap_elements, timing)
);
let keccak_sponge_trace = timed!(
timing,
"generate Keccak sponge trace",
all_stark
.keccak_sponge_stark
.generate_trace(keccak_sponge_ops, cap_elements, timing);
let logic_trace = all_stark
.logic_stark
.generate_trace(logic_ops, cap_elements, timing);
let memory_trace = all_stark.memory_stark.generate_trace(memory_ops, timing);
.generate_trace(keccak_sponge_ops, cap_elements, timing)
);
let logic_trace = timed!(
timing,
"generate logic trace",
all_stark
.logic_stark
.generate_trace(logic_ops, cap_elements, timing)
);
let memory_trace = timed!(
timing,
"generate memory trace",
all_stark.memory_stark.generate_trace(memory_ops, timing)
);
[
cpu_trace,

View File

@ -113,6 +113,10 @@ fn decode(registers: RegistersState, opcode: u8) -> Result<Operation, ProgramErr
(0xa2, _) => Ok(Operation::Syscall(opcode)),
(0xa3, _) => Ok(Operation::Syscall(opcode)),
(0xa4, _) => Ok(Operation::Syscall(opcode)),
(0xa5, _) => panic!(
"Kernel panic at {}",
KERNEL.offset_name(registers.program_counter)
),
(0xf0, _) => Ok(Operation::Syscall(opcode)),
(0xf1, _) => Ok(Operation::Syscall(opcode)),
(0xf2, _) => Ok(Operation::Syscall(opcode)),
@ -128,7 +132,10 @@ fn decode(registers: RegistersState, opcode: u8) -> Result<Operation, ProgramErr
(0xfc, true) => Ok(Operation::MstoreGeneral),
(0xfd, _) => Ok(Operation::Syscall(opcode)),
(0xff, _) => Ok(Operation::Syscall(opcode)),
_ => Err(ProgramError::InvalidOpcode),
_ => {
log::warn!("Invalid opcode: {}", opcode);
Err(ProgramError::InvalidOpcode)
}
}
}
@ -201,11 +208,11 @@ fn perform_op<F: Field>(
Operation::Pop => generate_pop(state, row)?,
Operation::Jump => generate_jump(state, row)?,
Operation::Jumpi => generate_jumpi(state, row)?,
Operation::Pc => todo!(),
Operation::Pc => generate_pc(state, row)?,
Operation::Gas => todo!(),
Operation::Jumpdest => todo!(),
Operation::GetContext => todo!(),
Operation::SetContext => todo!(),
Operation::Jumpdest => generate_jumpdest(state, row)?,
Operation::GetContext => generate_get_context(state, row)?,
Operation::SetContext => generate_set_context(state, row)?,
Operation::ConsumeGas => todo!(),
Operation::ExitKernel => generate_exit_kernel(state, row)?,
Operation::MloadGeneral => generate_mload_general(state, row)?,
@ -219,12 +226,6 @@ fn perform_op<F: Field>(
_ => 1,
};
if let Some(label) = KERNEL.offset_label(state.registers.program_counter) {
if !label.starts_with("halt_pc") {
log::debug!("At {label}");
}
}
Ok(())
}
@ -239,19 +240,39 @@ fn try_perform_instruction<F: Field>(state: &mut GenerationState<F>) -> Result<(
let opcode = read_code_memory(state, &mut row);
let op = decode(state.registers, opcode)?;
let pc = state.registers.program_counter;
log::trace!("\nCycle {}", state.traces.clock());
log::trace!(
"Stack: {:?}",
log_instruction(state, op);
fill_op_flag(op, &mut row);
perform_op(state, op, row)
}
fn log_instruction<F: Field>(state: &mut GenerationState<F>, op: Operation) {
let pc = state.registers.program_counter;
let is_interesting_offset = KERNEL
.offset_label(pc)
.filter(|label| !label.starts_with("halt_pc"))
.is_some();
let level = if is_interesting_offset {
log::Level::Debug
} else {
log::Level::Trace
};
log::log!(
level,
"Cycle {}, pc={}, instruction={:?}, stack={:?}",
state.traces.clock(),
KERNEL.offset_name(pc),
op,
(0..state.registers.stack_len)
.map(|i| stack_peek(state, i).unwrap())
.collect_vec()
);
log::trace!("Executing {:?} at {}", op, KERNEL.offset_name(pc));
fill_op_flag(op, &mut row);
perform_op(state, op, row)
if state.registers.is_kernel && pc >= KERNEL.code.len() {
panic!("Kernel PC is out of range: {}", pc);
}
}
fn handle_error<F: Field>(_state: &mut GenerationState<F>) {
@ -270,7 +291,8 @@ pub(crate) fn transition<F: Field>(state: &mut GenerationState<F>) {
}
Err(e) => {
if state.registers.is_kernel {
panic!("exception in kernel mode: {:?}", e);
let offset_name = KERNEL.offset_name(state.registers.program_counter);
panic!("exception in kernel mode at {}: {:?}", offset_name, e);
}
state.rollback(checkpoint);
handle_error(state)

View File

@ -1,4 +1,5 @@
use std::collections::HashMap;
use std::time::Duration;
use env_logger::{try_init_from_env, Env, DEFAULT_FILTER_ENV};
use eth_trie_utils::partial_trie::PartialTrie;
@ -49,7 +50,7 @@ fn test_empty_txn_list() -> anyhow::Result<()> {
let mut timing = TimingTree::new("prove", log::Level::Debug);
let proof = prove::<F, C, D>(&all_stark, &config, inputs, &mut timing)?;
timing.print();
timing.filter(Duration::from_millis(100)).print();
assert_eq!(
proof.public_values.trie_roots_before.state_root,
@ -80,5 +81,5 @@ fn test_empty_txn_list() -> anyhow::Result<()> {
}
fn init_logger() {
let _ = try_init_from_env(Env::default().filter_or(DEFAULT_FILTER_ENV, "debug"));
let _ = try_init_from_env(Env::default().filter_or(DEFAULT_FILTER_ENV, "info"));
}

View File

@ -1,12 +1,17 @@
use std::collections::HashMap;
use std::time::Duration;
use eth_trie_utils::partial_trie::PartialTrie;
use env_logger::{try_init_from_env, Env, DEFAULT_FILTER_ENV};
use eth_trie_utils::partial_trie::{Nibbles, PartialTrie};
use ethereum_types::U256;
use hex_literal::hex;
use keccak_hash::keccak;
use plonky2::field::goldilocks_field::GoldilocksField;
use plonky2::plonk::config::PoseidonGoldilocksConfig;
use plonky2::util::timing::TimingTree;
use plonky2_evm::all_stark::AllStark;
use plonky2_evm::config::StarkConfig;
use plonky2_evm::generation::mpt::AccountRlp;
use plonky2_evm::generation::{GenerationInputs, TrieInputs};
use plonky2_evm::proof::BlockMetadata;
use plonky2_evm::prover::prove;
@ -18,28 +23,95 @@ type C = PoseidonGoldilocksConfig;
/// Test a simple token transfer to a new address.
#[test]
#[ignore] // TODO: Won't work until txn parsing, storage, etc. are implemented.
fn test_simple_transfer() -> anyhow::Result<()> {
init_logger();
let all_stark = AllStark::<F, D>::default();
let config = StarkConfig::standard_fast_config();
let block_metadata = BlockMetadata::default();
let sender = hex!("2c7536e3605d9c16a7a3d7b1898e529396a65c23");
let to = hex!("a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0");
let sender_state_key = keccak(sender);
let to_state_key = keccak(to);
let sender_nibbles = Nibbles::from(sender_state_key);
let to_nibbles = Nibbles::from(to_state_key);
let value = U256::from(100u32);
let txn = hex!("f85f050a82520894000000000000000000000000000000000000000064801ca0fa56df5d988638fad8798e5ef75a1e1125dc7fb55d2ac4bce25776a63f0c2967a02cb47a5579eb5f83a1cabe4662501c0059f1b58e60ef839a1b0da67af6b9fb38");
let sender_account_before = AccountRlp {
nonce: 5.into(),
balance: eth_to_wei(100_000.into()),
storage_root: PartialTrie::Empty.calc_hash(),
code_hash: keccak([]),
};
let state_trie_before = PartialTrie::Leaf {
nibbles: sender_nibbles,
value: rlp::encode(&sender_account_before).to_vec(),
};
let tries_before = TrieInputs {
state_trie: state_trie_before,
transactions_trie: PartialTrie::Empty,
receipts_trie: PartialTrie::Empty,
storage_tries: vec![],
};
// Generated using a little py-evm script.
let txn = hex!("f861050a8255f094a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0648242421ba02c89eb757d9deeb1f5b3859a9d4d679951ef610ac47ad4608dc142beb1b7e313a05af7e9fbab825455d36c36c7f4cfcafbeafa9a77bdff936b52afb36d4fe4bcdd");
let block_metadata = BlockMetadata::default();
let inputs = GenerationInputs {
signed_txns: vec![txn.to_vec()],
tries: TrieInputs {
state_trie: PartialTrie::Empty,
transactions_trie: PartialTrie::Empty,
receipts_trie: PartialTrie::Empty,
storage_tries: vec![],
},
tries: tries_before,
contract_code: HashMap::new(),
block_metadata,
};
let proof = prove::<F, C, D>(&all_stark, &config, inputs, &mut TimingTree::default())?;
let mut timing = TimingTree::new("prove", log::Level::Debug);
let proof = prove::<F, C, D>(&all_stark, &config, inputs, &mut timing)?;
timing.filter(Duration::from_millis(100)).print();
let expected_state_trie_after = {
let sender_account_after = AccountRlp {
balance: sender_account_before.balance - value, // TODO: Also subtract gas_used * price.
// nonce: sender_account_before.nonce + 1, // TODO
..sender_account_before
};
let to_account_after = AccountRlp {
balance: value,
..AccountRlp::default()
};
let mut children = std::array::from_fn(|_| PartialTrie::Empty.into());
children[sender_nibbles.get_nibble(0) as usize] = PartialTrie::Leaf {
nibbles: sender_nibbles.truncate_n_nibbles_front(1),
value: rlp::encode(&sender_account_after).to_vec(),
}
.into();
children[to_nibbles.get_nibble(0) as usize] = PartialTrie::Leaf {
nibbles: to_nibbles.truncate_n_nibbles_front(1),
value: rlp::encode(&to_account_after).to_vec(),
}
.into();
PartialTrie::Branch {
children,
value: vec![],
}
};
assert_eq!(
proof.public_values.trie_roots_after.state_root,
expected_state_trie_after.calc_hash()
);
verify_proof(all_stark, proof, &config)
}
fn eth_to_wei(eth: U256) -> U256 {
// 1 ether = 10^18 wei.
eth * U256::from(10).pow(18.into())
}
fn init_logger() {
let _ = try_init_from_env(Env::default().filter_or(DEFAULT_FILTER_ENV, "info"));
}

View File

@ -14,7 +14,7 @@ use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::iop::generator::{GeneratedValues, SimpleGenerator, WitnessGenerator};
use plonky2::iop::target::Target;
use plonky2::iop::wire::Wire;
use plonky2::iop::witness::{PartitionWitness, Witness};
use plonky2::iop::witness::{PartitionWitness, Witness, WitnessWrite};
use plonky2::plonk::circuit_builder::CircuitBuilder;
use plonky2::plonk::vars::{EvaluationTargets, EvaluationVars, EvaluationVarsBase};

View File

@ -12,7 +12,7 @@ use log::{info, Level, LevelFilter};
use maybe_rayon::rayon;
use plonky2::gates::noop::NoopGate;
use plonky2::hash::hash_types::RichField;
use plonky2::iop::witness::{PartialWitness, Witness};
use plonky2::iop::witness::{PartialWitness, WitnessWrite};
use plonky2::plonk::circuit_builder::CircuitBuilder;
use plonky2::plonk::circuit_data::{
CircuitConfig, CommonCircuitData, VerifierCircuitTarget, VerifierOnlyCircuitData,

View File

@ -1,6 +1,6 @@
use anyhow::Result;
use plonky2::field::types::Field;
use plonky2::iop::witness::{PartialWitness, Witness};
use plonky2::iop::witness::{PartialWitness, WitnessWrite};
use plonky2::plonk::circuit_builder::CircuitBuilder;
use plonky2::plonk::circuit_data::CircuitConfig;
use plonky2::plonk::config::{GenericConfig, PoseidonGoldilocksConfig};
@ -35,7 +35,7 @@ fn main() -> Result<()> {
let proof = data.prove(pw)?;
println!(
"Factorial starting at {} is {}!",
"Factorial starting at {} is {}",
proof.public_inputs[0], proof.public_inputs[1]
);

View File

@ -1,6 +1,6 @@
use anyhow::Result;
use plonky2::field::types::Field;
use plonky2::iop::witness::{PartialWitness, Witness};
use plonky2::iop::witness::{PartialWitness, WitnessWrite};
use plonky2::plonk::circuit_builder::CircuitBuilder;
use plonky2::plonk::circuit_data::CircuitConfig;
use plonky2::plonk::config::{GenericConfig, PoseidonGoldilocksConfig};

View File

@ -5,7 +5,7 @@ use plonky2::field::types::{PrimeField, Sample};
use plonky2::hash::hash_types::RichField;
use plonky2::iop::generator::{GeneratedValues, SimpleGenerator};
use plonky2::iop::target::Target;
use plonky2::iop::witness::{PartialWitness, PartitionWitness, Witness};
use plonky2::iop::witness::{PartialWitness, PartitionWitness, Witness, WitnessWrite};
use plonky2::plonk::circuit_builder::CircuitBuilder;
use plonky2::plonk::circuit_data::CircuitConfig;
use plonky2::plonk::config::{GenericConfig, PoseidonGoldilocksConfig};

View File

@ -3,7 +3,7 @@ use itertools::Itertools;
use crate::field::extension::Extendable;
use crate::fri::proof::{FriProof, FriProofTarget};
use crate::hash::hash_types::RichField;
use crate::iop::witness::Witness;
use crate::iop::witness::WitnessWrite;
use crate::plonk::config::AlgebraicHasher;
/// Set the targets in a `FriProofTarget` to their corresponding values in a `FriProof`.
@ -13,7 +13,7 @@ pub fn set_fri_proof_target<F, W, H, const D: usize>(
fri_proof: &FriProof<F, H, D>,
) where
F: RichField + Extendable<D>,
W: Witness<F> + ?Sized,
W: WitnessWrite<F> + ?Sized,
H: AlgebraicHasher<F>,
{
witness.set_target(fri_proof_target.pow_witness, fri_proof.pow_witness);

View File

@ -9,7 +9,7 @@ use crate::gates::exponentiation::ExponentiationGate;
use crate::hash::hash_types::RichField;
use crate::iop::generator::{GeneratedValues, SimpleGenerator};
use crate::iop::target::{BoolTarget, Target};
use crate::iop::witness::{PartitionWitness, Witness};
use crate::iop::witness::{PartitionWitness, Witness, WitnessWrite};
use crate::plonk::circuit_builder::CircuitBuilder;
impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {

View File

@ -10,7 +10,7 @@ use crate::hash::hash_types::RichField;
use crate::iop::ext_target::{ExtensionAlgebraTarget, ExtensionTarget};
use crate::iop::generator::{GeneratedValues, SimpleGenerator};
use crate::iop::target::Target;
use crate::iop::witness::{PartitionWitness, Witness};
use crate::iop::witness::{PartitionWitness, Witness, WitnessWrite};
use crate::plonk::circuit_builder::CircuitBuilder;
use crate::util::bits_u64;
@ -573,7 +573,7 @@ mod tests {
use crate::field::extension::algebra::ExtensionAlgebra;
use crate::field::types::Sample;
use crate::iop::ext_target::ExtensionAlgebraTarget;
use crate::iop::witness::{PartialWitness, Witness};
use crate::iop::witness::{PartialWitness, WitnessWrite};
use crate::plonk::circuit_builder::CircuitBuilder;
use crate::plonk::circuit_data::CircuitConfig;
use crate::plonk::config::{GenericConfig, KeccakGoldilocksConfig, PoseidonGoldilocksConfig};
@ -588,7 +588,7 @@ mod tests {
let config = CircuitConfig::standard_recursion_config();
let mut pw = PartialWitness::new();
let mut pw = PartialWitness::<F>::new();
let mut builder = CircuitBuilder::<F, D>::new(config);
let vs = FF::rand_vec(3);

View File

@ -5,7 +5,7 @@ use crate::field::extension::Extendable;
use crate::hash::hash_types::RichField;
use crate::iop::generator::{GeneratedValues, SimpleGenerator};
use crate::iop::target::{BoolTarget, Target};
use crate::iop::witness::{PartitionWitness, Witness};
use crate::iop::witness::{PartitionWitness, Witness, WitnessWrite};
use crate::plonk::circuit_builder::CircuitBuilder;
impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {

View File

@ -41,7 +41,7 @@ mod tests {
use anyhow::Result;
use crate::field::types::Sample;
use crate::iop::witness::{PartialWitness, Witness};
use crate::iop::witness::{PartialWitness, WitnessWrite};
use crate::plonk::circuit_builder::CircuitBuilder;
use crate::plonk::circuit_data::CircuitConfig;
use crate::plonk::config::{GenericConfig, PoseidonGoldilocksConfig};
@ -54,7 +54,7 @@ mod tests {
type F = <C as GenericConfig<D>>::F;
type FF = <C as GenericConfig<D>>::FE;
let config = CircuitConfig::standard_recursion_config();
let mut pw = PartialWitness::new();
let mut pw = PartialWitness::<F>::new();
let mut builder = CircuitBuilder::<F, D>::new(config);
let (x, y) = (FF::rand(), FF::rand());

View File

@ -10,7 +10,7 @@ use crate::gates::base_sum::BaseSumGate;
use crate::hash::hash_types::RichField;
use crate::iop::generator::{GeneratedValues, SimpleGenerator};
use crate::iop::target::{BoolTarget, Target};
use crate::iop::witness::{PartitionWitness, Witness};
use crate::iop::witness::{PartitionWitness, Witness, WitnessWrite};
use crate::plonk::circuit_builder::CircuitBuilder;
use crate::util::log_floor;

View File

@ -6,7 +6,7 @@ use crate::gates::base_sum::BaseSumGate;
use crate::hash::hash_types::RichField;
use crate::iop::generator::{GeneratedValues, SimpleGenerator};
use crate::iop::target::{BoolTarget, Target};
use crate::iop::witness::{PartitionWitness, Witness};
use crate::iop::witness::{PartitionWitness, Witness, WitnessWrite};
use crate::plonk::circuit_builder::CircuitBuilder;
use crate::util::ceil_div_usize;

View File

@ -12,7 +12,7 @@ use crate::hash::hash_types::RichField;
use crate::iop::ext_target::ExtensionTarget;
use crate::iop::generator::{GeneratedValues, SimpleGenerator, WitnessGenerator};
use crate::iop::target::Target;
use crate::iop::witness::{PartitionWitness, Witness};
use crate::iop::witness::{PartitionWitness, Witness, WitnessWrite};
use crate::plonk::circuit_builder::CircuitBuilder;
use crate::plonk::circuit_data::CircuitConfig;
use crate::plonk::vars::{

View File

@ -11,7 +11,7 @@ use crate::hash::hash_types::RichField;
use crate::iop::ext_target::ExtensionTarget;
use crate::iop::generator::{GeneratedValues, SimpleGenerator, WitnessGenerator};
use crate::iop::target::Target;
use crate::iop::witness::{PartitionWitness, Witness};
use crate::iop::witness::{PartitionWitness, Witness, WitnessWrite};
use crate::plonk::circuit_builder::CircuitBuilder;
use crate::plonk::circuit_data::CircuitConfig;
use crate::plonk::vars::{EvaluationTargets, EvaluationVars, EvaluationVarsBase};

View File

@ -14,7 +14,7 @@ use crate::hash::hash_types::RichField;
use crate::iop::ext_target::ExtensionTarget;
use crate::iop::generator::{GeneratedValues, SimpleGenerator, WitnessGenerator};
use crate::iop::target::Target;
use crate::iop::witness::{PartitionWitness, Witness};
use crate::iop::witness::{PartitionWitness, Witness, WitnessWrite};
use crate::plonk::circuit_builder::CircuitBuilder;
use crate::plonk::circuit_data::CircuitConfig;
use crate::plonk::plonk_common::{reduce_with_powers, reduce_with_powers_ext_circuit};

View File

@ -16,7 +16,7 @@ use crate::iop::ext_target::ExtensionTarget;
use crate::iop::generator::{GeneratedValues, SimpleGenerator, WitnessGenerator};
use crate::iop::target::Target;
use crate::iop::wire::Wire;
use crate::iop::witness::{PartitionWitness, Witness};
use crate::iop::witness::{PartitionWitness, Witness, WitnessWrite};
use crate::plonk::circuit_builder::CircuitBuilder;
use crate::plonk::circuit_data::CircuitConfig;
use crate::plonk::vars::{

View File

@ -8,7 +8,7 @@ use crate::field::polynomial::{PolynomialCoeffs, PolynomialValues};
use crate::field::types::{Field, Sample};
use crate::gates::gate::Gate;
use crate::hash::hash_types::{HashOut, RichField};
use crate::iop::witness::{PartialWitness, Witness};
use crate::iop::witness::{PartialWitness, WitnessWrite};
use crate::plonk::circuit_builder::CircuitBuilder;
use crate::plonk::circuit_data::CircuitConfig;
use crate::plonk::config::GenericConfig;

View File

@ -18,7 +18,7 @@ use crate::iop::ext_target::ExtensionTarget;
use crate::iop::generator::{GeneratedValues, SimpleGenerator, WitnessGenerator};
use crate::iop::target::Target;
use crate::iop::wire::Wire;
use crate::iop::witness::{PartitionWitness, Witness};
use crate::iop::witness::{PartitionWitness, Witness, WitnessWrite};
use crate::plonk::circuit_builder::CircuitBuilder;
use crate::plonk::vars::{EvaluationTargets, EvaluationVars, EvaluationVarsBase};

View File

@ -19,7 +19,7 @@ use crate::iop::ext_target::ExtensionTarget;
use crate::iop::generator::{GeneratedValues, SimpleGenerator, WitnessGenerator};
use crate::iop::target::Target;
use crate::iop::wire::Wire;
use crate::iop::witness::{PartitionWitness, Witness};
use crate::iop::witness::{PartitionWitness, Witness, WitnessWrite};
use crate::plonk::circuit_builder::CircuitBuilder;
use crate::plonk::vars::{EvaluationTargets, EvaluationVars, EvaluationVarsBase};

View File

@ -11,7 +11,7 @@ use crate::hash::hash_types::RichField;
use crate::iop::ext_target::ExtensionTarget;
use crate::iop::generator::{GeneratedValues, SimpleGenerator, WitnessGenerator};
use crate::iop::target::Target;
use crate::iop::witness::{PartitionWitness, Witness};
use crate::iop::witness::{PartitionWitness, Witness, WitnessWrite};
use crate::plonk::circuit_builder::CircuitBuilder;
use crate::plonk::circuit_data::CircuitConfig;
use crate::plonk::vars::{EvaluationTargets, EvaluationVars, EvaluationVarsBase};

View File

@ -17,7 +17,7 @@ use crate::iop::ext_target::ExtensionTarget;
use crate::iop::generator::{GeneratedValues, SimpleGenerator, WitnessGenerator};
use crate::iop::target::Target;
use crate::iop::wire::Wire;
use crate::iop::witness::{PartitionWitness, Witness};
use crate::iop::witness::{PartitionWitness, Witness, WitnessWrite};
use crate::plonk::circuit_builder::CircuitBuilder;
use crate::plonk::vars::{EvaluationTargets, EvaluationVars, EvaluationVarsBase};
@ -514,7 +514,7 @@ mod tests {
use crate::hash::poseidon::Poseidon;
use crate::iop::generator::generate_partial_witness;
use crate::iop::wire::Wire;
use crate::iop::witness::{PartialWitness, Witness};
use crate::iop::witness::{PartialWitness, Witness, WitnessWrite};
use crate::plonk::circuit_builder::CircuitBuilder;
use crate::plonk::circuit_data::CircuitConfig;
use crate::plonk::config::{GenericConfig, PoseidonGoldilocksConfig};

View File

@ -16,7 +16,7 @@ use crate::hash::poseidon::Poseidon;
use crate::iop::ext_target::{ExtensionAlgebraTarget, ExtensionTarget};
use crate::iop::generator::{GeneratedValues, SimpleGenerator, WitnessGenerator};
use crate::iop::target::Target;
use crate::iop::witness::{PartitionWitness, Witness};
use crate::iop::witness::{PartitionWitness, Witness, WitnessWrite};
use crate::plonk::circuit_builder::CircuitBuilder;
use crate::plonk::vars::{EvaluationTargets, EvaluationVars, EvaluationVarsBase};

View File

@ -17,7 +17,7 @@ use crate::iop::ext_target::ExtensionTarget;
use crate::iop::generator::{GeneratedValues, SimpleGenerator, WitnessGenerator};
use crate::iop::target::Target;
use crate::iop::wire::Wire;
use crate::iop::witness::{PartitionWitness, Witness};
use crate::iop::witness::{PartitionWitness, Witness, WitnessWrite};
use crate::plonk::circuit_builder::CircuitBuilder;
use crate::plonk::circuit_data::CircuitConfig;
use crate::plonk::vars::{

View File

@ -11,7 +11,7 @@ use crate::hash::hash_types::RichField;
use crate::iop::ext_target::ExtensionTarget;
use crate::iop::generator::{GeneratedValues, SimpleGenerator, WitnessGenerator};
use crate::iop::target::Target;
use crate::iop::witness::{PartitionWitness, Witness};
use crate::iop::witness::{PartitionWitness, Witness, WitnessWrite};
use crate::plonk::circuit_builder::CircuitBuilder;
use crate::plonk::vars::{EvaluationTargets, EvaluationVars, EvaluationVarsBase};

View File

@ -11,7 +11,7 @@ use crate::hash::hash_types::RichField;
use crate::iop::ext_target::ExtensionTarget;
use crate::iop::generator::{GeneratedValues, SimpleGenerator, WitnessGenerator};
use crate::iop::target::Target;
use crate::iop::witness::{PartitionWitness, Witness};
use crate::iop::witness::{PartitionWitness, Witness, WitnessWrite};
use crate::plonk::circuit_builder::CircuitBuilder;
use crate::plonk::vars::{EvaluationTargets, EvaluationVars, EvaluationVarsBase};

View File

@ -2,6 +2,7 @@ use alloc::vec;
use alloc::vec::Vec;
use anyhow::{ensure, Result};
use itertools::Itertools;
use serde::{Deserialize, Serialize};
use crate::field::extension::Extendable;
@ -145,6 +146,12 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
self.connect(x.elements[i], y.elements[i]);
}
}
pub fn connect_merkle_caps(&mut self, x: &MerkleCapTarget, y: &MerkleCapTarget) {
for (h0, h1) in x.0.iter().zip_eq(&y.0) {
self.connect_hashes(*h0, *h1);
}
}
}
#[cfg(test)]
@ -156,7 +163,7 @@ mod tests {
use super::*;
use crate::field::types::Field;
use crate::hash::merkle_tree::MerkleTree;
use crate::iop::witness::{PartialWitness, Witness};
use crate::iop::witness::{PartialWitness, WitnessWrite};
use crate::plonk::circuit_builder::CircuitBuilder;
use crate::plonk::circuit_data::CircuitConfig;
use crate::plonk::config::{GenericConfig, PoseidonGoldilocksConfig};

View File

@ -3,13 +3,13 @@ use alloc::vec::Vec;
use core::fmt::Debug;
use core::marker::PhantomData;
use crate::field::extension::{Extendable, FieldExtension};
use crate::field::extension::Extendable;
use crate::field::types::Field;
use crate::hash::hash_types::{HashOut, HashOutTarget, RichField};
use crate::hash::hash_types::RichField;
use crate::iop::ext_target::ExtensionTarget;
use crate::iop::target::{BoolTarget, Target};
use crate::iop::target::Target;
use crate::iop::wire::Wire;
use crate::iop::witness::{PartialWitness, PartitionWitness, Witness};
use crate::iop::witness::{PartialWitness, PartitionWitness, Witness, WitnessWrite};
use crate::plonk::circuit_data::{CommonCircuitData, ProverOnlyCircuitData};
use crate::plonk::config::GenericConfig;
@ -120,6 +120,12 @@ impl<F: Field> From<Vec<(Target, F)>> for GeneratedValues<F> {
}
}
impl<F: Field> WitnessWrite<F> for GeneratedValues<F> {
fn set_target(&mut self, target: Target, value: F) {
self.target_values.push((target, value));
}
}
impl<F: Field> GeneratedValues<F> {
pub fn with_capacity(capacity: usize) -> Self {
Vec::with_capacity(capacity).into()
@ -137,10 +143,6 @@ impl<F: Field> GeneratedValues<F> {
vec![(target, value)].into()
}
pub fn clear(&mut self) {
self.target_values.clear();
}
pub fn singleton_extension_target<const D: usize>(
et: ExtensionTarget<D>,
value: F::Extension,
@ -152,56 +154,6 @@ impl<F: Field> GeneratedValues<F> {
witness.set_extension_target(et, value);
witness
}
pub fn set_target(&mut self, target: Target, value: F) {
self.target_values.push((target, value))
}
pub fn set_bool_target(&mut self, target: BoolTarget, value: bool) {
self.set_target(target.target, F::from_bool(value))
}
pub fn set_hash_target(&mut self, ht: HashOutTarget, value: HashOut<F>) {
ht.elements
.iter()
.zip(value.elements)
.for_each(|(&t, x)| self.set_target(t, x));
}
pub fn set_extension_target<const D: usize>(
&mut self,
et: ExtensionTarget<D>,
value: F::Extension,
) where
F: RichField + Extendable<D>,
{
let limbs = value.to_basefield_array();
(0..D).for_each(|i| {
self.set_target(et.0[i], limbs[i]);
});
}
pub fn set_wire(&mut self, wire: Wire, value: F) {
self.set_target(Target::Wire(wire), value)
}
pub fn set_wires<W>(&mut self, wires: W, values: &[F])
where
W: IntoIterator<Item = Wire>,
{
// If we used itertools, we could use zip_eq for extra safety.
for (wire, &value) in wires.into_iter().zip(values) {
self.set_wire(wire, value);
}
}
pub fn set_ext_wires<W, const D: usize>(&mut self, wires: W, value: F::Extension)
where
F: RichField + Extendable<D>,
W: IntoIterator<Item = Wire>,
{
self.set_wires(wires, &value.to_basefield_array());
}
}
/// A generator which runs once after a list of dependencies is present in the witness.

View File

@ -17,71 +17,9 @@ use crate::plonk::circuit_data::{VerifierCircuitTarget, VerifierOnlyCircuitData}
use crate::plonk::config::{AlgebraicHasher, GenericConfig};
use crate::plonk::proof::{Proof, ProofTarget, ProofWithPublicInputs, ProofWithPublicInputsTarget};
/// A witness holds information on the values of targets in a circuit.
pub trait Witness<F: Field> {
fn try_get_target(&self, target: Target) -> Option<F>;
pub trait WitnessWrite<F: Field> {
fn set_target(&mut self, target: Target, value: F);
fn get_target(&self, target: Target) -> F {
self.try_get_target(target).unwrap()
}
fn get_targets(&self, targets: &[Target]) -> Vec<F> {
targets.iter().map(|&t| self.get_target(t)).collect()
}
fn get_extension_target<const D: usize>(&self, et: ExtensionTarget<D>) -> F::Extension
where
F: RichField + Extendable<D>,
{
F::Extension::from_basefield_array(
self.get_targets(&et.to_target_array()).try_into().unwrap(),
)
}
fn get_extension_targets<const D: usize>(&self, ets: &[ExtensionTarget<D>]) -> Vec<F::Extension>
where
F: RichField + Extendable<D>,
{
ets.iter()
.map(|&et| self.get_extension_target(et))
.collect()
}
fn get_bool_target(&self, target: BoolTarget) -> bool {
let value = self.get_target(target.target);
if value.is_zero() {
return false;
}
if value.is_one() {
return true;
}
panic!("not a bool")
}
fn get_hash_target(&self, ht: HashOutTarget) -> HashOut<F> {
HashOut {
elements: self.get_targets(&ht.elements).try_into().unwrap(),
}
}
fn get_wire(&self, wire: Wire) -> F {
self.get_target(Target::Wire(wire))
}
fn try_get_wire(&self, wire: Wire) -> Option<F> {
self.try_get_target(Target::Wire(wire))
}
fn contains(&self, target: Target) -> bool {
self.try_get_target(target).is_some()
}
fn contains_all(&self, targets: &[Target]) -> bool {
targets.iter().all(|&t| self.contains(t))
}
fn set_hash_target(&mut self, ht: HashOutTarget, value: HashOut<F>) {
ht.elements
.iter()
@ -239,6 +177,70 @@ pub trait Witness<F: Field> {
}
}
/// A witness holds information on the values of targets in a circuit.
pub trait Witness<F: Field>: WitnessWrite<F> {
fn try_get_target(&self, target: Target) -> Option<F>;
fn get_target(&self, target: Target) -> F {
self.try_get_target(target).unwrap()
}
fn get_targets(&self, targets: &[Target]) -> Vec<F> {
targets.iter().map(|&t| self.get_target(t)).collect()
}
fn get_extension_target<const D: usize>(&self, et: ExtensionTarget<D>) -> F::Extension
where
F: RichField + Extendable<D>,
{
F::Extension::from_basefield_array(
self.get_targets(&et.to_target_array()).try_into().unwrap(),
)
}
fn get_extension_targets<const D: usize>(&self, ets: &[ExtensionTarget<D>]) -> Vec<F::Extension>
where
F: RichField + Extendable<D>,
{
ets.iter()
.map(|&et| self.get_extension_target(et))
.collect()
}
fn get_bool_target(&self, target: BoolTarget) -> bool {
let value = self.get_target(target.target);
if value.is_zero() {
return false;
}
if value.is_one() {
return true;
}
panic!("not a bool")
}
fn get_hash_target(&self, ht: HashOutTarget) -> HashOut<F> {
HashOut {
elements: self.get_targets(&ht.elements).try_into().unwrap(),
}
}
fn get_wire(&self, wire: Wire) -> F {
self.get_target(Target::Wire(wire))
}
fn try_get_wire(&self, wire: Wire) -> Option<F> {
self.try_get_target(Target::Wire(wire))
}
fn contains(&self, target: Target) -> bool {
self.try_get_target(target).is_some()
}
fn contains_all(&self, targets: &[Target]) -> bool {
targets.iter().all(|&t| self.contains(t))
}
}
#[derive(Clone, Debug)]
pub struct MatrixWitness<F: Field> {
pub(crate) wire_values: Vec<Vec<F>>,
@ -263,23 +265,25 @@ impl<F: Field> PartialWitness<F> {
}
}
impl<F: Field> Witness<F> for PartialWitness<F> {
fn try_get_target(&self, target: Target) -> Option<F> {
self.target_values.get(&target).copied()
}
impl<F: Field> WitnessWrite<F> for PartialWitness<F> {
fn set_target(&mut self, target: Target, value: F) {
let opt_old_value = self.target_values.insert(target, value);
if let Some(old_value) = opt_old_value {
assert_eq!(
old_value, value,
"Target {:?} was set twice with different values",
target
value, old_value,
"Target {:?} was set twice with different values: {} != {}",
target, old_value, value
);
}
}
}
impl<F: Field> Witness<F> for PartialWitness<F> {
fn try_get_target(&self, target: Target) -> Option<F> {
self.target_values.get(&target).copied()
}
}
/// `PartitionWitness` holds a disjoint-set forest of the targets respecting a circuit's copy constraints.
/// The value of a target is defined to be the value of its root in the forest.
#[derive(Clone)]
@ -308,8 +312,8 @@ impl<'a, F: Field> PartitionWitness<'a, F> {
if let Some(old_value) = *rep_value {
assert_eq!(
value, old_value,
"Partition containing {:?} was set twice with different values",
target
"Partition containing {:?} was set twice with different values: {} != {}",
target, old_value, value
);
None
} else {
@ -337,13 +341,15 @@ impl<'a, F: Field> PartitionWitness<'a, F> {
}
}
impl<'a, F: Field> WitnessWrite<F> for PartitionWitness<'a, F> {
fn set_target(&mut self, target: Target, value: F) {
self.set_target_returning_rep(target, value);
}
}
impl<'a, F: Field> Witness<F> for PartitionWitness<'a, F> {
fn try_get_target(&self, target: Target) -> Option<F> {
let rep_index = self.representative_map[self.target_index(target)];
self.values[rep_index]
}
fn set_target(&mut self, target: Target, value: F) {
self.set_target_returning_rep(target, value);
}
}

View File

@ -40,7 +40,7 @@ use crate::plonk::circuit_data::{
CircuitConfig, CircuitData, CommonCircuitData, ProverCircuitData, ProverOnlyCircuitData,
VerifierCircuitData, VerifierCircuitTarget, VerifierOnlyCircuitData,
};
use crate::plonk::config::{GenericConfig, GenericHashOut, Hasher};
use crate::plonk::config::{AlgebraicHasher, GenericConfig, GenericHashOut, Hasher};
use crate::plonk::copy_constraint::CopyConstraint;
use crate::plonk::permutation_argument::Forest;
use crate::plonk::plonk_common::PlonkOracle;
@ -247,7 +247,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
/// Add a virtual verifier data, register it as a public input and set it to `self.verifier_data_public_input`.
/// WARNING: Do not register any public input after calling this! TODO: relax this
pub fn add_verifier_data_public_inputs(&mut self) {
pub fn add_verifier_data_public_inputs(&mut self) -> VerifierCircuitTarget {
assert!(
self.verifier_data_public_input.is_none(),
"add_verifier_data_public_inputs only needs to be called once"
@ -263,7 +263,8 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
self.register_public_inputs(&verifier_data.constants_sigmas_cap.0[i].elements);
}
self.verifier_data_public_input = Some(verifier_data);
self.verifier_data_public_input = Some(verifier_data.clone());
verifier_data
}
/// Adds a gate to the circuit, and returns its index.
@ -436,6 +437,19 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
MerkleCapTarget(cap.0.iter().map(|h| self.constant_hash(*h)).collect())
}
pub fn constant_verifier_data<C: GenericConfig<D, F = F>>(
&mut self,
verifier_data: &VerifierOnlyCircuitData<C, D>,
) -> VerifierCircuitTarget
where
C::Hasher: AlgebraicHasher<F>,
{
VerifierCircuitTarget {
constants_sigmas_cap: self.constant_merkle_cap(&verifier_data.constants_sigmas_cap),
circuit_digest: self.constant_hash(verifier_data.circuit_digest),
}
}
/// If the given target is a constant (i.e. it was created by the `constant(F)` method), returns
/// its constant value. Otherwise, returns `None`.
pub fn target_as_constant(&self, target: Target) -> Option<F> {

View File

@ -470,7 +470,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CommonCircuitData<F, D> {
/// is intentionally missing certain fields, such as `CircuitConfig`, because we support only a
/// limited form of dynamic inner circuits. We can't practically make things like the wire count
/// dynamic, at least not without setting a maximum wire count and paying for the worst case.
#[derive(Clone)]
#[derive(Clone, Debug)]
pub struct VerifierCircuitTarget {
/// A commitment to each constant polynomial and each permutation polynomial.
pub constants_sigmas_cap: MerkleCapTarget,

View File

@ -31,6 +31,55 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
) where
C::Hasher: AlgebraicHasher<F>,
{
let selected_proof =
self.select_proof_with_pis(condition, proof_with_pis0, proof_with_pis1);
let selected_verifier_data = VerifierCircuitTarget {
constants_sigmas_cap: self.select_cap(
condition,
&inner_verifier_data0.constants_sigmas_cap,
&inner_verifier_data1.constants_sigmas_cap,
),
circuit_digest: self.select_hash(
condition,
inner_verifier_data0.circuit_digest,
inner_verifier_data1.circuit_digest,
),
};
self.verify_proof::<C>(&selected_proof, &selected_verifier_data, inner_common_data);
}
/// Conditionally verify a proof with a new generated dummy proof.
pub fn conditionally_verify_proof_or_dummy<C: GenericConfig<D, F = F> + 'static>(
&mut self,
condition: BoolTarget,
proof_with_pis: &ProofWithPublicInputsTarget<D>,
inner_verifier_data: &VerifierCircuitTarget,
inner_common_data: &CommonCircuitData<F, D>,
) -> anyhow::Result<()>
where
C::Hasher: AlgebraicHasher<F>,
{
let (dummy_proof_with_pis_target, dummy_verifier_data_target) =
self.dummy_proof_and_vk::<C>(inner_common_data)?;
self.conditionally_verify_proof::<C>(
condition,
proof_with_pis,
inner_verifier_data,
&dummy_proof_with_pis_target,
&dummy_verifier_data_target,
inner_common_data,
);
Ok(())
}
/// Computes `if b { proof_with_pis0 } else { proof_with_pis1 }`.
fn select_proof_with_pis(
&mut self,
b: BoolTarget,
proof_with_pis0: &ProofWithPublicInputsTarget<D>,
proof_with_pis1: &ProofWithPublicInputsTarget<D>,
) -> ProofWithPublicInputsTarget<D> {
let ProofWithPublicInputsTarget {
proof:
ProofTarget {
@ -53,20 +102,19 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
},
public_inputs: public_inputs1,
} = proof_with_pis1;
let selected_proof = with_context!(self, "select proof", {
let selected_wires_cap = self.select_cap(condition, wires_cap0, wires_cap1);
with_context!(self, "select proof", {
let selected_wires_cap = self.select_cap(b, wires_cap0, wires_cap1);
let selected_plonk_zs_partial_products_cap = self.select_cap(
condition,
b,
plonk_zs_partial_products_cap0,
plonk_zs_partial_products_cap1,
);
let selected_quotient_polys_cap =
self.select_cap(condition, quotient_polys_cap0, quotient_polys_cap1);
let selected_openings = self.select_opening_set(condition, openings0, openings1);
self.select_cap(b, quotient_polys_cap0, quotient_polys_cap1);
let selected_openings = self.select_opening_set(b, openings0, openings1);
let selected_opening_proof =
self.select_opening_proof(condition, opening_proof0, opening_proof1);
let selected_public_inputs = self.select_vec(condition, public_inputs0, public_inputs1);
self.select_opening_proof(b, opening_proof0, opening_proof1);
let selected_public_inputs = self.select_vec(b, public_inputs0, public_inputs1);
ProofWithPublicInputsTarget {
proof: ProofTarget {
wires_cap: selected_wires_cap,
@ -77,52 +125,10 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
},
public_inputs: selected_public_inputs,
}
});
let selected_verifier_data = VerifierCircuitTarget {
constants_sigmas_cap: self.select_cap(
condition,
&inner_verifier_data0.constants_sigmas_cap,
&inner_verifier_data1.constants_sigmas_cap,
),
circuit_digest: self.select_hash(
condition,
inner_verifier_data0.circuit_digest,
inner_verifier_data1.circuit_digest,
),
};
self.verify_proof::<C>(&selected_proof, &selected_verifier_data, inner_common_data);
}
/// Conditionally verify a proof with a new generated dummy proof.
pub fn conditionally_verify_proof_or_dummy<C: GenericConfig<D, F = F>>(
&mut self,
condition: BoolTarget,
proof_with_pis: &ProofWithPublicInputsTarget<D>,
inner_verifier_data: &VerifierCircuitTarget,
inner_common_data: &CommonCircuitData<F, D>,
) -> (ProofWithPublicInputsTarget<D>, VerifierCircuitTarget)
where
C::Hasher: AlgebraicHasher<F>,
{
let dummy_proof = self.add_virtual_proof_with_pis::<C>(inner_common_data);
let dummy_verifier_data = VerifierCircuitTarget {
constants_sigmas_cap: self
.add_virtual_cap(inner_common_data.config.fri_config.cap_height),
circuit_digest: self.add_virtual_hash(),
};
self.conditionally_verify_proof::<C>(
condition,
proof_with_pis,
inner_verifier_data,
&dummy_proof,
&dummy_verifier_data,
inner_common_data,
);
(dummy_proof, dummy_verifier_data)
})
}
/// Computes `if b { v0 } else { v1 }`.
fn select_vec(&mut self, b: BoolTarget, v0: &[Target], v1: &[Target]) -> Vec<Target> {
v0.iter()
.zip_eq(v1)
@ -130,6 +136,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
.collect()
}
/// Computes `if b { h0 } else { h1 }`.
pub(crate) fn select_hash(
&mut self,
b: BoolTarget,
@ -141,6 +148,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
}
}
/// Computes `if b { cap0 } else { cap1 }`.
fn select_cap(
&mut self,
b: BoolTarget,
@ -157,6 +165,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
)
}
/// Computes `if b { v0 } else { v1 }`.
fn select_vec_cap(
&mut self,
b: BoolTarget,
@ -169,6 +178,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
.collect()
}
/// Computes `if b { os0 } else { os1 }`.
fn select_opening_set(
&mut self,
b: BoolTarget,
@ -186,6 +196,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
}
}
/// Computes `if b { v0 } else { v1 }`.
fn select_vec_ext(
&mut self,
b: BoolTarget,
@ -198,6 +209,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
.collect()
}
/// Computes `if b { proof0 } else { proof1 }`.
fn select_opening_proof(
&mut self,
b: BoolTarget,
@ -224,6 +236,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
}
}
/// Computes `if b { qr0 } else { qr1 }`.
fn select_query_round(
&mut self,
b: BoolTarget,
@ -240,6 +253,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
}
}
/// Computes `if b { v0 } else { v1 }`.
fn select_vec_query_round(
&mut self,
b: BoolTarget,
@ -252,6 +266,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
.collect()
}
/// Computes `if b { proof0 } else { proof1 }`.
fn select_initial_tree_proof(
&mut self,
b: BoolTarget,
@ -273,6 +288,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
}
}
/// Computes `if b { proof0 } else { proof1 }`.
fn select_merkle_proof(
&mut self,
b: BoolTarget,
@ -289,6 +305,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
}
}
/// Computes `if b { qs0 } else { qs01 }`.
fn select_query_step(
&mut self,
b: BoolTarget,
@ -301,6 +318,7 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
}
}
/// Computes `if b { v0 } else { v1 }`.
fn select_vec_query_step(
&mut self,
b: BoolTarget,
@ -322,7 +340,7 @@ mod tests {
use super::*;
use crate::field::types::Sample;
use crate::gates::noop::NoopGate;
use crate::iop::witness::{PartialWitness, Witness};
use crate::iop::witness::{PartialWitness, WitnessWrite};
use crate::plonk::circuit_data::CircuitConfig;
use crate::plonk::config::{GenericConfig, PoseidonGoldilocksConfig};
use crate::recursion::dummy_circuit::{dummy_circuit, dummy_proof};

View File

@ -1,48 +1,17 @@
#![allow(clippy::int_plus_one)] // Makes more sense for some inequalities below.
use alloc::vec;
use anyhow::{ensure, Result};
use hashbrown::HashMap;
use itertools::Itertools;
use crate::field::extension::Extendable;
use crate::gates::noop::NoopGate;
use crate::hash::hash_types::{HashOut, HashOutTarget, MerkleCapTarget, RichField};
use crate::hash::merkle_tree::MerkleCap;
use crate::iop::target::{BoolTarget, Target};
use crate::iop::witness::{PartialWitness, Witness};
use crate::plonk::circuit_builder::CircuitBuilder;
use crate::plonk::circuit_data::{
CircuitData, CommonCircuitData, VerifierCircuitTarget, VerifierOnlyCircuitData,
CommonCircuitData, VerifierCircuitTarget, VerifierOnlyCircuitData,
};
use crate::plonk::config::{AlgebraicHasher, GenericConfig};
use crate::plonk::proof::{ProofWithPublicInputs, ProofWithPublicInputsTarget};
use crate::recursion::dummy_circuit::{dummy_circuit, dummy_proof};
pub struct CyclicRecursionData<
'a,
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
> {
proof: &'a Option<ProofWithPublicInputs<F, C, D>>,
verifier_data: &'a VerifierOnlyCircuitData<C, D>,
common_data: &'a CommonCircuitData<F, D>,
}
pub struct CyclicRecursionTarget<F, C, const D: usize>
where
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
{
pub(crate) proof: ProofWithPublicInputsTarget<D>,
pub(crate) verifier_data: VerifierCircuitTarget,
pub(crate) dummy_proof: ProofWithPublicInputsTarget<D>,
pub(crate) dummy_verifier_data: VerifierCircuitTarget,
pub(crate) condition: BoolTarget,
pub(crate) dummy_circuit: CircuitData<F, C, D>,
}
impl<C: GenericConfig<D>, const D: usize> VerifierOnlyCircuitData<C, D> {
fn from_slice(slice: &[C::F], common_data: &CommonCircuitData<C::F, D>) -> Result<Self>
@ -98,7 +67,7 @@ impl VerifierCircuitTarget {
impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
/// If `condition` is true, recursively verify a proof for the same circuit as the one we're
/// currently building.
/// currently building. Otherwise, verify `other_proof_with_pis`.
///
/// For a typical IVC use case, `condition` will be false for the very first proof in a chain,
/// i.e. the base case.
@ -110,12 +79,14 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
/// that the verification key matches.
///
/// WARNING: Do not register any public input after calling this! TODO: relax this
pub fn cyclic_recursion<C: GenericConfig<D, F = F>>(
pub fn conditionally_verify_cyclic_proof<C: GenericConfig<D, F = F>>(
&mut self,
condition: BoolTarget,
proof_with_pis: &ProofWithPublicInputsTarget<D>,
cyclic_proof_with_pis: &ProofWithPublicInputsTarget<D>,
other_proof_with_pis: &ProofWithPublicInputsTarget<D>,
other_verifier_data: &VerifierCircuitTarget,
common_data: &CommonCircuitData<F, D>,
) -> Result<CyclicRecursionTarget<F, C, D>>
) -> Result<()>
where
C::Hasher: AlgebraicHasher<F>,
{
@ -123,131 +94,67 @@ impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
.verifier_data_public_input
.clone()
.expect("Must call add_verifier_data_public_inputs before cyclic recursion");
self.goal_common_data = Some(common_data.clone());
let dummy_verifier_data = VerifierCircuitTarget {
constants_sigmas_cap: self.add_virtual_cap(self.config.fri_config.cap_height),
circuit_digest: self.add_virtual_hash(),
};
if let Some(existing_common_data) = self.goal_common_data.as_ref() {
assert_eq!(existing_common_data, common_data);
} else {
self.goal_common_data = Some(common_data.clone());
}
let dummy_proof = self.add_virtual_proof_with_pis::<C>(common_data);
let pis = VerifierCircuitTarget::from_slice::<F, C, D>(
&proof_with_pis.public_inputs,
let inner_cyclic_pis = VerifierCircuitTarget::from_slice::<F, C, D>(
&cyclic_proof_with_pis.public_inputs,
common_data,
)?;
// Connect previous verifier data to current one. This guarantees that every proof in the cycle uses the same verifier data.
self.connect_hashes(pis.circuit_digest, verifier_data.circuit_digest);
for (h0, h1) in pis
.constants_sigmas_cap
.0
.iter()
.zip_eq(&verifier_data.constants_sigmas_cap.0)
{
self.connect_hashes(*h0, *h1);
}
self.connect_hashes(
inner_cyclic_pis.circuit_digest,
verifier_data.circuit_digest,
);
self.connect_merkle_caps(
&inner_cyclic_pis.constants_sigmas_cap,
&verifier_data.constants_sigmas_cap,
);
// Verify the real proof if `condition` is set to true, otherwise verify the dummy proof.
// Verify the cyclic proof if `condition` is set to true, otherwise verify the other proof.
self.conditionally_verify_proof::<C>(
condition,
proof_with_pis,
cyclic_proof_with_pis,
&verifier_data,
&dummy_proof,
&dummy_verifier_data,
other_proof_with_pis,
other_verifier_data,
common_data,
);
// Make sure we have enough gates to match `common_data`.
while self.num_gates() < (common_data.degree() / 2) {
self.add_gate(NoopGate, vec![]);
}
// Make sure we have every gate to match `common_data`.
for g in &common_data.gates {
self.add_gate_to_gate_set(g.clone());
}
Ok(CyclicRecursionTarget {
proof: proof_with_pis.clone(),
verifier_data,
dummy_proof,
dummy_verifier_data,
Ok(())
}
pub fn conditionally_verify_cyclic_proof_or_dummy<C: GenericConfig<D, F = F> + 'static>(
&mut self,
condition: BoolTarget,
cyclic_proof_with_pis: &ProofWithPublicInputsTarget<D>,
common_data: &CommonCircuitData<F, D>,
) -> Result<()>
where
C::Hasher: AlgebraicHasher<F>,
{
let (dummy_proof_with_pis_target, dummy_verifier_data_target) =
self.dummy_proof_and_vk::<C>(common_data)?;
self.conditionally_verify_cyclic_proof::<C>(
condition,
dummy_circuit: dummy_circuit(common_data),
})
cyclic_proof_with_pis,
&dummy_proof_with_pis_target,
&dummy_verifier_data_target,
common_data,
)?;
Ok(())
}
}
/// Set the targets in a `CyclicRecursionTarget` to their corresponding values in a `CyclicRecursionData`.
/// The `public_inputs` parameter let the caller specify certain public inputs (identified by their
/// indices) which should be given specific values. The rest will default to zero.
pub fn set_cyclic_recursion_data_target<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
pw: &mut PartialWitness<F>,
cyclic_recursion_data_target: &CyclicRecursionTarget<F, C, D>,
cyclic_recursion_data: &CyclicRecursionData<F, C, D>,
// Public inputs to set in the base case to seed some initial data.
mut public_inputs: HashMap<usize, F>,
) -> Result<()>
where
C::Hasher: AlgebraicHasher<F>,
{
if let Some(proof) = cyclic_recursion_data.proof {
pw.set_bool_target(cyclic_recursion_data_target.condition, true);
pw.set_proof_with_pis_target(&cyclic_recursion_data_target.proof, proof);
pw.set_verifier_data_target(
&cyclic_recursion_data_target.verifier_data,
cyclic_recursion_data.verifier_data,
);
pw.set_proof_with_pis_target(&cyclic_recursion_data_target.dummy_proof, proof);
pw.set_verifier_data_target(
&cyclic_recursion_data_target.dummy_verifier_data,
cyclic_recursion_data.verifier_data,
);
} else {
pw.set_bool_target(cyclic_recursion_data_target.condition, false);
let pis_len = cyclic_recursion_data_target
.dummy_circuit
.common
.num_public_inputs;
let cap_elements = cyclic_recursion_data
.common_data
.config
.fri_config
.num_cap_elements();
let start_vk_pis = pis_len - 4 - 4 * cap_elements;
// The circuit checks that the verifier data is the same throughout the cycle, so
// we set the verifier data to the "real" verifier data even though it's unused in the base case.
let verifier_data = &cyclic_recursion_data.verifier_data;
public_inputs.extend((start_vk_pis..).zip(verifier_data.circuit_digest.elements));
for i in 0..cap_elements {
let start = start_vk_pis + 4 + 4 * i;
public_inputs.extend((start..).zip(verifier_data.constants_sigmas_cap.0[i].elements));
}
let proof = dummy_proof(&cyclic_recursion_data_target.dummy_circuit, public_inputs)?;
pw.set_proof_with_pis_target(&cyclic_recursion_data_target.proof, &proof);
pw.set_verifier_data_target(
&cyclic_recursion_data_target.verifier_data,
cyclic_recursion_data.verifier_data,
);
let dummy_p = dummy_proof(&cyclic_recursion_data_target.dummy_circuit, HashMap::new())?;
pw.set_proof_with_pis_target(&cyclic_recursion_data_target.dummy_proof, &dummy_p);
pw.set_verifier_data_target(
&cyclic_recursion_data_target.dummy_verifier_data,
&cyclic_recursion_data_target.dummy_circuit.verifier_only,
);
}
Ok(())
}
/// Additional checks to be performed on a cyclic recursive proof in addition to verifying the proof.
/// Checks that the purported verifier data in the public inputs match the real verifier data.
pub fn check_cyclic_proof_verifier_data<
@ -272,7 +179,6 @@ where
#[cfg(test)]
mod tests {
use anyhow::Result;
use hashbrown::HashMap;
use crate::field::extension::Extendable;
use crate::field::types::{Field, PrimeField64};
@ -280,13 +186,12 @@ mod tests {
use crate::hash::hash_types::{HashOutTarget, RichField};
use crate::hash::hashing::hash_n_to_hash_no_pad;
use crate::hash::poseidon::{PoseidonHash, PoseidonPermutation};
use crate::iop::witness::PartialWitness;
use crate::iop::witness::{PartialWitness, WitnessWrite};
use crate::plonk::circuit_builder::CircuitBuilder;
use crate::plonk::circuit_data::{CircuitConfig, CommonCircuitData, VerifierCircuitTarget};
use crate::plonk::config::{AlgebraicHasher, GenericConfig, PoseidonGoldilocksConfig};
use crate::recursion::cyclic_recursion::{
check_cyclic_proof_verifier_data, set_cyclic_recursion_data_target, CyclicRecursionData,
};
use crate::recursion::cyclic_recursion::check_cyclic_proof_verifier_data;
use crate::recursion::dummy_circuit::cyclic_base_proof;
// Generates `CommonCircuitData` usable for recursion.
fn common_data_for_recursion<
@ -341,8 +246,8 @@ mod tests {
let one = builder.one();
// Circuit that computes a repeated hash.
let initial_hash = builder.add_virtual_hash();
builder.register_public_inputs(&initial_hash.elements);
let initial_hash_target = builder.add_virtual_hash();
builder.register_public_inputs(&initial_hash_target.elements);
let current_hash_in = builder.add_virtual_hash();
let current_hash_out =
builder.hash_n_to_hash_no_pad::<PoseidonHash>(current_hash_in.elements.to_vec());
@ -350,97 +255,84 @@ mod tests {
let counter = builder.add_virtual_public_input();
let mut common_data = common_data_for_recursion::<F, C, D>();
builder.add_verifier_data_public_inputs();
let verifier_data_target = builder.add_verifier_data_public_inputs();
common_data.num_public_inputs = builder.num_public_inputs();
let condition = builder.add_virtual_bool_target_safe();
// Unpack inner proof's public inputs.
let inner_proof_with_pis = builder.add_virtual_proof_with_pis::<C>(&common_data);
let inner_pis = &inner_proof_with_pis.public_inputs;
let inner_initial_hash = HashOutTarget::try_from(&inner_pis[0..4]).unwrap();
let inner_latest_hash = HashOutTarget::try_from(&inner_pis[4..8]).unwrap();
let inner_counter = inner_pis[8];
let inner_cyclic_proof_with_pis = builder.add_virtual_proof_with_pis::<C>(&common_data);
let inner_cyclic_pis = &inner_cyclic_proof_with_pis.public_inputs;
let inner_cyclic_initial_hash = HashOutTarget::try_from(&inner_cyclic_pis[0..4]).unwrap();
let inner_cyclic_latest_hash = HashOutTarget::try_from(&inner_cyclic_pis[4..8]).unwrap();
let inner_cyclic_counter = inner_cyclic_pis[8];
// Connect our initial hash to that of our inner proof. (If there is no inner proof, the
// initial hash will be unconstrained, which is intentional.)
builder.connect_hashes(initial_hash, inner_initial_hash);
builder.connect_hashes(initial_hash_target, inner_cyclic_initial_hash);
// The input hash is the previous hash output if we have an inner proof, or the initial hash
// if this is the base case.
let actual_hash_in = builder.select_hash(condition, inner_latest_hash, initial_hash);
let actual_hash_in =
builder.select_hash(condition, inner_cyclic_latest_hash, initial_hash_target);
builder.connect_hashes(current_hash_in, actual_hash_in);
// Our chain length will be inner_counter + 1 if we have an inner proof, or 1 if not.
let new_counter = builder.mul_add(condition.target, inner_counter, one);
let new_counter = builder.mul_add(condition.target, inner_cyclic_counter, one);
builder.connect(counter, new_counter);
let cyclic_data_target =
builder.cyclic_recursion::<C>(condition, &inner_proof_with_pis, &common_data)?;
builder.conditionally_verify_cyclic_proof_or_dummy::<C>(
condition,
&inner_cyclic_proof_with_pis,
&common_data,
)?;
let cyclic_circuit_data = builder.build::<C>();
let mut pw = PartialWitness::new();
let cyclic_recursion_data = CyclicRecursionData {
proof: &None, // Base case: We don't have a proof to put here yet.
verifier_data: &cyclic_circuit_data.verifier_only,
common_data: &cyclic_circuit_data.common,
};
let initial_hash = [F::ZERO, F::ONE, F::TWO, F::from_canonical_usize(3)];
let initial_hash_pis = initial_hash.into_iter().enumerate().collect();
set_cyclic_recursion_data_target(
&mut pw,
&cyclic_data_target,
&cyclic_recursion_data,
initial_hash_pis,
)?;
pw.set_bool_target(condition, false);
pw.set_proof_with_pis_target::<C, D>(
&inner_cyclic_proof_with_pis,
&cyclic_base_proof(
&common_data,
&cyclic_circuit_data.verifier_only,
initial_hash_pis,
),
);
pw.set_verifier_data_target(&verifier_data_target, &cyclic_circuit_data.verifier_only);
let proof = cyclic_circuit_data.prove(pw)?;
check_cyclic_proof_verifier_data(
&proof,
cyclic_recursion_data.verifier_data,
cyclic_recursion_data.common_data,
&cyclic_circuit_data.verifier_only,
&cyclic_circuit_data.common,
)?;
cyclic_circuit_data.verify(proof.clone())?;
// 1st recursive layer.
let mut pw = PartialWitness::new();
let cyclic_recursion_data = CyclicRecursionData {
proof: &Some(proof), // Input previous proof.
verifier_data: &cyclic_circuit_data.verifier_only,
common_data: &cyclic_circuit_data.common,
};
set_cyclic_recursion_data_target(
&mut pw,
&cyclic_data_target,
&cyclic_recursion_data,
HashMap::new(),
)?;
pw.set_bool_target(condition, true);
pw.set_proof_with_pis_target(&inner_cyclic_proof_with_pis, &proof);
pw.set_verifier_data_target(&verifier_data_target, &cyclic_circuit_data.verifier_only);
let proof = cyclic_circuit_data.prove(pw)?;
check_cyclic_proof_verifier_data(
&proof,
cyclic_recursion_data.verifier_data,
cyclic_recursion_data.common_data,
&cyclic_circuit_data.verifier_only,
&cyclic_circuit_data.common,
)?;
cyclic_circuit_data.verify(proof.clone())?;
// 2nd recursive layer.
let mut pw = PartialWitness::new();
let cyclic_recursion_data = CyclicRecursionData {
proof: &Some(proof), // Input previous proof.
verifier_data: &cyclic_circuit_data.verifier_only,
common_data: &cyclic_circuit_data.common,
};
set_cyclic_recursion_data_target(
&mut pw,
&cyclic_data_target,
&cyclic_recursion_data,
HashMap::new(),
)?;
pw.set_bool_target(condition, true);
pw.set_proof_with_pis_target(&inner_cyclic_proof_with_pis, &proof);
pw.set_verifier_data_target(&verifier_data_target, &cyclic_circuit_data.verifier_only);
let proof = cyclic_circuit_data.prove(pw)?;
check_cyclic_proof_verifier_data(
&proof,
cyclic_recursion_data.verifier_data,
cyclic_recursion_data.common_data,
&cyclic_circuit_data.verifier_only,
&cyclic_circuit_data.common,
)?;
// Verify that the proof correctly computes a repeated hash.

View File

@ -6,11 +6,47 @@ use plonky2_util::ceil_div_usize;
use crate::gates::noop::NoopGate;
use crate::hash::hash_types::RichField;
use crate::iop::witness::{PartialWitness, Witness};
use crate::iop::generator::{GeneratedValues, SimpleGenerator};
use crate::iop::target::Target;
use crate::iop::witness::{PartialWitness, PartitionWitness, WitnessWrite};
use crate::plonk::circuit_builder::CircuitBuilder;
use crate::plonk::circuit_data::{CircuitData, CommonCircuitData};
use crate::plonk::config::GenericConfig;
use crate::plonk::proof::ProofWithPublicInputs;
use crate::plonk::circuit_data::{
CircuitData, CommonCircuitData, VerifierCircuitTarget, VerifierOnlyCircuitData,
};
use crate::plonk::config::{AlgebraicHasher, GenericConfig};
use crate::plonk::proof::{ProofWithPublicInputs, ProofWithPublicInputsTarget};
/// Creates a dummy proof which is suitable for use as a base proof in a cyclic recursion tree.
/// Such a base proof will not actually be verified, so most of its data is arbitrary. However, its
/// public inputs which encode the cyclic verification key must be set properly, and this method
/// takes care of that. It also allows the user to specify any other public inputs which should be
/// set in this base proof.
pub fn cyclic_base_proof<F, C, const D: usize>(
common_data: &CommonCircuitData<F, D>,
verifier_data: &VerifierOnlyCircuitData<C, D>,
mut nonzero_public_inputs: HashMap<usize, F>,
) -> ProofWithPublicInputs<F, C, D>
where
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
C::Hasher: AlgebraicHasher<C::F>,
{
let pis_len = common_data.num_public_inputs;
let cap_elements = common_data.config.fri_config.num_cap_elements();
let start_vk_pis = pis_len - 4 - 4 * cap_elements;
// Add the cyclic verifier data public inputs.
nonzero_public_inputs.extend((start_vk_pis..).zip(verifier_data.circuit_digest.elements));
for i in 0..cap_elements {
let start = start_vk_pis + 4 + 4 * i;
nonzero_public_inputs
.extend((start..).zip(verifier_data.constants_sigmas_cap.0[i].elements));
}
// TODO: A bit wasteful to build a dummy circuit here. We could potentially use a proof that
// just consists of zeros, apart from public inputs.
dummy_proof(&dummy_circuit(common_data), nonzero_public_inputs).unwrap()
}
/// Generate a proof for a dummy circuit. The `public_inputs` parameter let the caller specify
/// certain public inputs (identified by their indices) which should be given specific values.
@ -65,3 +101,59 @@ pub(crate) fn dummy_circuit<
assert_eq!(&circuit.common, common_data);
circuit
}
impl<F: RichField + Extendable<D>, const D: usize> CircuitBuilder<F, D> {
pub(crate) fn dummy_proof_and_vk<C: GenericConfig<D, F = F> + 'static>(
&mut self,
common_data: &CommonCircuitData<F, D>,
) -> anyhow::Result<(ProofWithPublicInputsTarget<D>, VerifierCircuitTarget)>
where
C::Hasher: AlgebraicHasher<F>,
{
let dummy_circuit = dummy_circuit::<F, C, D>(common_data);
let dummy_proof_with_pis = dummy_proof(&dummy_circuit, HashMap::new())?;
let dummy_proof_with_pis_target = self.add_virtual_proof_with_pis::<C>(common_data);
let dummy_verifier_data_target = VerifierCircuitTarget {
constants_sigmas_cap: self.add_virtual_cap(self.config.fri_config.cap_height),
circuit_digest: self.add_virtual_hash(),
};
self.add_simple_generator(DummyProofGenerator {
proof_with_pis_target: dummy_proof_with_pis_target.clone(),
proof_with_pis: dummy_proof_with_pis,
verifier_data_target: dummy_verifier_data_target.clone(),
verifier_data: dummy_circuit.verifier_only,
});
Ok((dummy_proof_with_pis_target, dummy_verifier_data_target))
}
}
#[derive(Debug)]
pub(crate) struct DummyProofGenerator<F, C, const D: usize>
where
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
{
pub(crate) proof_with_pis_target: ProofWithPublicInputsTarget<D>,
pub(crate) proof_with_pis: ProofWithPublicInputs<F, C, D>,
pub(crate) verifier_data_target: VerifierCircuitTarget,
pub(crate) verifier_data: VerifierOnlyCircuitData<C, D>,
}
impl<F, C, const D: usize> SimpleGenerator<F> for DummyProofGenerator<F, C, D>
where
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F> + 'static,
C::Hasher: AlgebraicHasher<F>,
{
fn dependencies(&self) -> Vec<Target> {
vec![]
}
fn run_once(&self, _witness: &PartitionWitness<F>, out_buffer: &mut GeneratedValues<F>) {
out_buffer.set_proof_with_pis_target(&self.proof_with_pis_target, &self.proof_with_pis);
out_buffer.set_verifier_data_target(&self.verifier_data_target, &self.verifier_data);
}
}

View File

@ -1,4 +1,4 @@
pub mod conditional_recursive_verifier;
pub mod cyclic_recursion;
pub(crate) mod dummy_circuit;
pub mod dummy_circuit;
pub mod recursive_verifier;

View File

@ -191,7 +191,7 @@ mod tests {
use crate::fri::reduction_strategies::FriReductionStrategy;
use crate::fri::FriConfig;
use crate::gates::noop::NoopGate;
use crate::iop::witness::{PartialWitness, Witness};
use crate::iop::witness::{PartialWitness, WitnessWrite};
use crate::plonk::circuit_data::{CircuitConfig, VerifierOnlyCircuitData};
use crate::plonk::config::{GenericConfig, KeccakGoldilocksConfig, PoseidonGoldilocksConfig};
use crate::plonk::proof::{CompressedProofWithPublicInputs, ProofWithPublicInputs};

View File

@ -278,7 +278,7 @@ mod tests {
use super::*;
use crate::field::types::Sample;
use crate::iop::witness::{PartialWitness, Witness};
use crate::iop::witness::{PartialWitness, WitnessWrite};
use crate::plonk::circuit_data::CircuitConfig;
use crate::plonk::config::{GenericConfig, PoseidonGoldilocksConfig};
use crate::plonk::verifier::verify;

View File

@ -6,7 +6,7 @@ use plonky2::field::extension::{Extendable, FieldExtension};
use plonky2::field::polynomial::{PolynomialCoeffs, PolynomialValues};
use plonky2::field::types::{Field, Sample};
use plonky2::hash::hash_types::RichField;
use plonky2::iop::witness::{PartialWitness, Witness};
use plonky2::iop::witness::{PartialWitness, WitnessWrite};
use plonky2::plonk::circuit_builder::CircuitBuilder;
use plonky2::plonk::circuit_data::CircuitConfig;
use plonky2::plonk::config::{GenericConfig, Hasher};

View File

@ -14,7 +14,7 @@ use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::iop::generator::{GeneratedValues, SimpleGenerator, WitnessGenerator};
use plonky2::iop::target::Target;
use plonky2::iop::wire::Wire;
use plonky2::iop::witness::{PartitionWitness, Witness};
use plonky2::iop::witness::{PartitionWitness, Witness, WitnessWrite};
use plonky2::plonk::circuit_builder::CircuitBuilder;
use plonky2::plonk::circuit_data::CircuitConfig;
use plonky2::plonk::vars::{EvaluationTargets, EvaluationVars, EvaluationVarsBase};

View File

@ -16,7 +16,7 @@ use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::iop::generator::{GeneratedValues, SimpleGenerator, WitnessGenerator};
use plonky2::iop::target::Target;
use plonky2::iop::wire::Wire;
use plonky2::iop::witness::{PartitionWitness, Witness};
use plonky2::iop::witness::{PartitionWitness, Witness, WitnessWrite};
use plonky2::plonk::circuit_builder::CircuitBuilder;
use plonky2::plonk::circuit_data::CircuitConfig;
use plonky2::plonk::vars::{

View File

@ -15,7 +15,7 @@ use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::iop::generator::{GeneratedValues, SimpleGenerator, WitnessGenerator};
use plonky2::iop::target::Target;
use plonky2::iop::wire::Wire;
use plonky2::iop::witness::{PartitionWitness, Witness};
use plonky2::iop::witness::{PartitionWitness, Witness, WitnessWrite};
use plonky2::plonk::circuit_builder::CircuitBuilder;
use plonky2::plonk::plonk_common::{reduce_with_powers, reduce_with_powers_ext_circuit};
use plonky2::plonk::vars::{

View File

@ -12,7 +12,7 @@ use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::iop::generator::{GeneratedValues, SimpleGenerator, WitnessGenerator};
use plonky2::iop::target::Target;
use plonky2::iop::witness::{PartitionWitness, Witness};
use plonky2::iop::witness::{PartitionWitness, Witness, WitnessWrite};
use plonky2::plonk::circuit_builder::CircuitBuilder;
use plonky2::plonk::plonk_common::{reduce_with_powers, reduce_with_powers_ext_circuit};
use plonky2::plonk::vars::{EvaluationTargets, EvaluationVars, EvaluationVarsBase};

View File

@ -15,7 +15,7 @@ use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::iop::generator::{GeneratedValues, SimpleGenerator, WitnessGenerator};
use plonky2::iop::target::Target;
use plonky2::iop::wire::Wire;
use plonky2::iop::witness::{PartitionWitness, Witness};
use plonky2::iop::witness::{PartitionWitness, Witness, WitnessWrite};
use plonky2::plonk::circuit_builder::CircuitBuilder;
use plonky2::plonk::circuit_data::CircuitConfig;
use plonky2::plonk::vars::{

View File

@ -1,6 +1,6 @@
use plonky2::field::types::{Field, PrimeField64};
use plonky2::iop::generator::GeneratedValues;
use plonky2::iop::witness::Witness;
use plonky2::iop::witness::{Witness, WitnessWrite};
use crate::gadgets::arithmetic_u32::U32Target;

View File

@ -8,7 +8,7 @@ use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::iop::generator::{GeneratedValues, SimpleGenerator, WitnessGenerator};
use plonky2::iop::target::Target;
use plonky2::iop::wire::Wire;
use plonky2::iop::witness::{PartitionWitness, Witness};
use plonky2::iop::witness::{PartitionWitness, Witness, WitnessWrite};
use plonky2::plonk::circuit_builder::CircuitBuilder;
use plonky2::plonk::plonk_common::{reduce_with_powers, reduce_with_powers_ext_circuit};
use plonky2::plonk::vars::{

View File

@ -9,7 +9,7 @@ use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::iop::generator::{GeneratedValues, WitnessGenerator};
use plonky2::iop::target::Target;
use plonky2::iop::wire::Wire;
use plonky2::iop::witness::{PartitionWitness, Witness};
use plonky2::iop::witness::{PartitionWitness, Witness, WitnessWrite};
use plonky2::plonk::circuit_builder::CircuitBuilder;
use plonky2::plonk::circuit_data::CircuitConfig;
use plonky2::plonk::vars::{

View File

@ -6,7 +6,7 @@ use plonky2::field::types::Field;
use plonky2::hash::hash_types::RichField;
use plonky2::iop::generator::{GeneratedValues, SimpleGenerator};
use plonky2::iop::target::Target;
use plonky2::iop::witness::{PartitionWitness, Witness};
use plonky2::iop::witness::{PartitionWitness, Witness, WitnessWrite};
use plonky2::plonk::circuit_builder::CircuitBuilder;
use crate::bimap::bimap_from_lists;

View File

@ -6,7 +6,7 @@ use plonky2::field::types::Field;
use plonky2::hash::hash_types::RichField;
use plonky2::iop::generator::{GeneratedValues, SimpleGenerator};
use plonky2::iop::target::{BoolTarget, Target};
use plonky2::iop::witness::{PartitionWitness, Witness};
use plonky2::iop::witness::{PartitionWitness, Witness, WitnessWrite};
use plonky2::plonk::circuit_builder::CircuitBuilder;
use plonky2_util::ceil_div_usize;