Merge pull request #834 from mir-protocol/use_keccak_sponge

Use Keccak sponge table for bootloading
This commit is contained in:
Daniel Lubarov 2022-12-03 12:25:34 -08:00 committed by GitHub
commit a3d8ecc52c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
22 changed files with 249 additions and 1294 deletions

View File

@ -11,9 +11,9 @@ use crate::cpu::membus::NUM_GP_CHANNELS;
use crate::cross_table_lookup::{Column, CrossTableLookup, TableWithColumns};
use crate::keccak::keccak_stark;
use crate::keccak::keccak_stark::KeccakStark;
use crate::keccak_memory::columns::KECCAK_WIDTH_BYTES;
use crate::keccak_memory::keccak_memory_stark;
use crate::keccak_memory::keccak_memory_stark::KeccakMemoryStark;
use crate::keccak_sponge::columns::KECCAK_RATE_BYTES;
use crate::keccak_sponge::keccak_sponge_stark;
use crate::keccak_sponge::keccak_sponge_stark::{num_logic_ctls, KeccakSpongeStark};
use crate::logic;
use crate::logic::LogicStark;
use crate::memory::memory_stark;
@ -24,7 +24,7 @@ use crate::stark::Stark;
pub struct AllStark<F: RichField + Extendable<D>, const D: usize> {
pub cpu_stark: CpuStark<F, D>,
pub keccak_stark: KeccakStark<F, D>,
pub keccak_memory_stark: KeccakMemoryStark<F, D>,
pub keccak_sponge_stark: KeccakSpongeStark<F, D>,
pub logic_stark: LogicStark<F, D>,
pub memory_stark: MemoryStark<F, D>,
pub cross_table_lookups: Vec<CrossTableLookup<F>>,
@ -35,7 +35,7 @@ impl<F: RichField + Extendable<D>, const D: usize> Default for AllStark<F, D> {
Self {
cpu_stark: CpuStark::default(),
keccak_stark: KeccakStark::default(),
keccak_memory_stark: KeccakMemoryStark::default(),
keccak_sponge_stark: KeccakSpongeStark::default(),
logic_stark: LogicStark::default(),
memory_stark: MemoryStark::default(),
cross_table_lookups: all_cross_table_lookups(),
@ -48,7 +48,7 @@ impl<F: RichField + Extendable<D>, const D: usize> AllStark<F, D> {
[
self.cpu_stark.num_permutation_batches(config),
self.keccak_stark.num_permutation_batches(config),
self.keccak_memory_stark.num_permutation_batches(config),
self.keccak_sponge_stark.num_permutation_batches(config),
self.logic_stark.num_permutation_batches(config),
self.memory_stark.num_permutation_batches(config),
]
@ -58,7 +58,7 @@ impl<F: RichField + Extendable<D>, const D: usize> AllStark<F, D> {
[
self.cpu_stark.permutation_batch_size(),
self.keccak_stark.permutation_batch_size(),
self.keccak_memory_stark.permutation_batch_size(),
self.keccak_sponge_stark.permutation_batch_size(),
self.logic_stark.permutation_batch_size(),
self.memory_stark.permutation_batch_size(),
]
@ -69,19 +69,18 @@ impl<F: RichField + Extendable<D>, const D: usize> AllStark<F, D> {
pub enum Table {
Cpu = 0,
Keccak = 1,
KeccakMemory = 2,
KeccakSponge = 2,
Logic = 3,
Memory = 4,
}
pub(crate) const NUM_TABLES: usize = Table::Memory as usize + 1;
#[allow(unused)] // TODO: Should be used soon.
pub(crate) fn all_cross_table_lookups<F: Field>() -> Vec<CrossTableLookup<F>> {
let mut ctls = vec![ctl_keccak(), ctl_logic(), ctl_memory(), ctl_keccak_memory()];
let mut ctls = vec![ctl_keccak(), ctl_logic(), ctl_memory(), ctl_keccak_sponge()];
// TODO: Some CTLs temporarily disabled while we get them working.
disable_ctl(&mut ctls[0]);
disable_ctl(&mut ctls[1]);
disable_ctl(&mut ctls[1]); // Enable once we populate logic log in keccak_sponge_log.
disable_ctl(&mut ctls[2]);
disable_ctl(&mut ctls[3]);
ctls
@ -95,53 +94,52 @@ fn disable_ctl<F: Field>(ctl: &mut CrossTableLookup<F>) {
}
fn ctl_keccak<F: Field>() -> CrossTableLookup<F> {
let cpu_looking = TableWithColumns::new(
Table::Cpu,
cpu_stark::ctl_data_keccak(),
Some(cpu_stark::ctl_filter_keccak()),
let keccak_sponge_looking = TableWithColumns::new(
Table::KeccakSponge,
keccak_sponge_stark::ctl_looking_keccak(),
Some(keccak_sponge_stark::ctl_looking_keccak_filter()),
);
let keccak_memory_looking = TableWithColumns::new(
Table::KeccakMemory,
keccak_memory_stark::ctl_looking_keccak(),
Some(keccak_memory_stark::ctl_filter()),
let keccak_looked = TableWithColumns::new(
Table::Keccak,
keccak_stark::ctl_data(),
Some(keccak_stark::ctl_filter()),
);
CrossTableLookup::new(
vec![cpu_looking, keccak_memory_looking],
TableWithColumns::new(
Table::Keccak,
keccak_stark::ctl_data(),
Some(keccak_stark::ctl_filter()),
),
None,
)
CrossTableLookup::new(vec![keccak_sponge_looking], keccak_looked, None)
}
fn ctl_keccak_memory<F: Field>() -> CrossTableLookup<F> {
CrossTableLookup::new(
vec![TableWithColumns::new(
Table::Cpu,
cpu_stark::ctl_data_keccak_memory(),
Some(cpu_stark::ctl_filter_keccak_memory()),
)],
TableWithColumns::new(
Table::KeccakMemory,
keccak_memory_stark::ctl_looked_data(),
Some(keccak_memory_stark::ctl_filter()),
),
None,
)
fn ctl_keccak_sponge<F: Field>() -> CrossTableLookup<F> {
let cpu_looking = TableWithColumns::new(
Table::Cpu,
cpu_stark::ctl_data_keccak_sponge(),
Some(cpu_stark::ctl_filter_keccak_sponge()),
);
let keccak_sponge_looked = TableWithColumns::new(
Table::KeccakSponge,
keccak_sponge_stark::ctl_looked_data(),
Some(keccak_sponge_stark::ctl_looked_filter()),
);
CrossTableLookup::new(vec![cpu_looking], keccak_sponge_looked, None)
}
fn ctl_logic<F: Field>() -> CrossTableLookup<F> {
CrossTableLookup::new(
vec![TableWithColumns::new(
Table::Cpu,
cpu_stark::ctl_data_logic(),
Some(cpu_stark::ctl_filter_logic()),
)],
TableWithColumns::new(Table::Logic, logic::ctl_data(), Some(logic::ctl_filter())),
None,
)
let cpu_looking = TableWithColumns::new(
Table::Cpu,
cpu_stark::ctl_data_logic(),
Some(cpu_stark::ctl_filter_logic()),
);
let mut all_lookers = vec![cpu_looking];
for i in 0..num_logic_ctls() {
let keccak_sponge_looking = TableWithColumns::new(
Table::KeccakSponge,
keccak_sponge_stark::ctl_looking_logic(i),
// TODO: Double check, but I think it's the same filter for memory and logic?
Some(keccak_sponge_stark::ctl_looking_memory_filter(i)),
);
all_lookers.push(keccak_sponge_looking);
}
let logic_looked =
TableWithColumns::new(Table::Logic, logic::ctl_data(), Some(logic::ctl_filter()));
CrossTableLookup::new(all_lookers, logic_looked, None)
}
fn ctl_memory<F: Field>() -> CrossTableLookup<F> {
@ -157,674 +155,21 @@ fn ctl_memory<F: Field>() -> CrossTableLookup<F> {
Some(cpu_stark::ctl_filter_gp_memory(channel)),
)
});
let keccak_memory_reads = (0..KECCAK_WIDTH_BYTES).map(|i| {
let keccak_sponge_reads = (0..KECCAK_RATE_BYTES).map(|i| {
TableWithColumns::new(
Table::KeccakMemory,
keccak_memory_stark::ctl_looking_memory(i, true),
Some(keccak_memory_stark::ctl_filter()),
)
});
let keccak_memory_writes = (0..KECCAK_WIDTH_BYTES).map(|i| {
TableWithColumns::new(
Table::KeccakMemory,
keccak_memory_stark::ctl_looking_memory(i, false),
Some(keccak_memory_stark::ctl_filter()),
Table::KeccakSponge,
keccak_sponge_stark::ctl_looking_memory(i),
Some(keccak_sponge_stark::ctl_looking_memory_filter(i)),
)
});
let all_lookers = iter::once(cpu_memory_code_read)
.chain(cpu_memory_gp_ops)
.chain(keccak_memory_reads)
.chain(keccak_memory_writes)
.chain(keccak_sponge_reads)
.collect();
CrossTableLookup::new(
all_lookers,
TableWithColumns::new(
Table::Memory,
memory_stark::ctl_data(),
Some(memory_stark::ctl_filter()),
),
None,
)
}
#[cfg(test)]
mod tests {
use std::borrow::BorrowMut;
use anyhow::Result;
use ethereum_types::U256;
use itertools::Itertools;
use plonky2::field::polynomial::PolynomialValues;
use plonky2::field::types::{Field, PrimeField64};
use plonky2::iop::witness::PartialWitness;
use plonky2::plonk::circuit_builder::CircuitBuilder;
use plonky2::plonk::circuit_data::{CircuitConfig, VerifierCircuitData};
use plonky2::plonk::config::{GenericConfig, PoseidonGoldilocksConfig};
use plonky2::util::timing::TimingTree;
use rand::{thread_rng, Rng};
use crate::all_stark::{AllStark, NUM_TABLES};
use crate::config::StarkConfig;
use crate::cpu::cpu_stark::CpuStark;
use crate::cpu::kernel::aggregator::KERNEL;
use crate::cross_table_lookup::testutils::check_ctls;
use crate::keccak::keccak_stark::{KeccakStark, NUM_INPUTS, NUM_ROUNDS};
use crate::keccak_memory::keccak_memory_stark::KeccakMemoryStark;
use crate::logic::{self, LogicStark, Operation};
use crate::memory::memory_stark::tests::generate_random_memory_ops;
use crate::memory::memory_stark::MemoryStark;
use crate::memory::NUM_CHANNELS;
use crate::proof::{AllProof, PublicValues};
use crate::prover::prove_with_traces;
use crate::recursive_verifier::tests::recursively_verify_all_proof;
use crate::recursive_verifier::{
add_virtual_recursive_all_proof, all_verifier_data_recursive_stark_proof,
set_recursive_all_proof_target, RecursiveAllProof,
};
use crate::stark::Stark;
use crate::util::{limb_from_bits_le, trace_rows_to_poly_values};
use crate::verifier::verify_proof;
use crate::{cpu, keccak, memory};
const D: usize = 2;
type C = PoseidonGoldilocksConfig;
type F = <C as GenericConfig<D>>::F;
fn make_keccak_trace<R: Rng>(
num_keccak_perms: usize,
keccak_stark: &KeccakStark<F, D>,
config: &StarkConfig,
rng: &mut R,
) -> Vec<PolynomialValues<F>> {
let keccak_inputs = (0..num_keccak_perms)
.map(|_| [0u64; NUM_INPUTS].map(|_| rng.gen()))
.collect_vec();
keccak_stark.generate_trace(
keccak_inputs,
config.fri_config.num_cap_elements(),
&mut TimingTree::default(),
)
}
fn make_keccak_memory_trace(
keccak_memory_stark: &KeccakMemoryStark<F, D>,
config: &StarkConfig,
) -> Vec<PolynomialValues<F>> {
keccak_memory_stark.generate_trace(
vec![],
config.fri_config.num_cap_elements(),
&mut TimingTree::default(),
)
}
fn make_logic_trace<R: Rng>(
num_rows: usize,
logic_stark: &LogicStark<F, D>,
config: &StarkConfig,
rng: &mut R,
) -> Vec<PolynomialValues<F>> {
let all_ops = [logic::Op::And, logic::Op::Or, logic::Op::Xor];
let ops = (0..num_rows)
.map(|_| {
let op = all_ops[rng.gen_range(0..all_ops.len())];
let input0 = U256(rng.gen());
let input1 = U256(rng.gen());
Operation::new(op, input0, input1)
})
.collect();
logic_stark.generate_trace(
ops,
config.fri_config.num_cap_elements(),
&mut TimingTree::default(),
)
}
fn make_memory_trace<R: Rng>(
num_memory_ops: usize,
memory_stark: &MemoryStark<F, D>,
rng: &mut R,
) -> (Vec<PolynomialValues<F>>, usize) {
let memory_ops = generate_random_memory_ops(num_memory_ops, rng);
let trace = memory_stark.generate_trace(memory_ops, &mut TimingTree::default());
let num_ops = trace[0].values.len();
(trace, num_ops)
}
fn bits_from_opcode(opcode: u8) -> [F; 8] {
[
F::from_bool(opcode & (1 << 0) != 0),
F::from_bool(opcode & (1 << 1) != 0),
F::from_bool(opcode & (1 << 2) != 0),
F::from_bool(opcode & (1 << 3) != 0),
F::from_bool(opcode & (1 << 4) != 0),
F::from_bool(opcode & (1 << 5) != 0),
F::from_bool(opcode & (1 << 6) != 0),
F::from_bool(opcode & (1 << 7) != 0),
]
}
fn make_cpu_trace(
num_keccak_perms: usize,
num_logic_rows: usize,
num_memory_ops: usize,
cpu_stark: &CpuStark<F, D>,
keccak_trace: &[PolynomialValues<F>],
logic_trace: &[PolynomialValues<F>],
memory_trace: &mut [PolynomialValues<F>],
) -> Vec<PolynomialValues<F>> {
let keccak_input_limbs: Vec<[F; 2 * NUM_INPUTS]> = (0..num_keccak_perms)
.map(|i| {
(0..2 * NUM_INPUTS)
.map(|j| {
keccak::columns::reg_input_limb(j)
.eval_table(keccak_trace, (i + 1) * NUM_ROUNDS - 1)
})
.collect::<Vec<_>>()
.try_into()
.unwrap()
})
.collect();
let keccak_output_limbs: Vec<[F; 2 * NUM_INPUTS]> = (0..num_keccak_perms)
.map(|i| {
(0..2 * NUM_INPUTS)
.map(|j| {
keccak_trace[keccak::columns::reg_output_limb(j)].values
[(i + 1) * NUM_ROUNDS - 1]
})
.collect::<Vec<_>>()
.try_into()
.unwrap()
})
.collect();
let mut cpu_trace_rows: Vec<[F; CpuStark::<F, D>::COLUMNS]> = vec![];
let mut bootstrap_row: cpu::columns::CpuColumnsView<F> =
[F::ZERO; CpuStark::<F, D>::COLUMNS].into();
bootstrap_row.is_bootstrap_kernel = F::ONE;
cpu_trace_rows.push(bootstrap_row.into());
for i in 0..num_keccak_perms {
let mut row: cpu::columns::CpuColumnsView<F> =
[F::ZERO; CpuStark::<F, D>::COLUMNS].into();
row.is_keccak = F::ONE;
let keccak = row.general.keccak_mut();
for j in 0..2 * NUM_INPUTS {
keccak.input_limbs[j] = keccak_input_limbs[i][j];
keccak.output_limbs[j] = keccak_output_limbs[i][j];
}
cpu_stark.generate(row.borrow_mut());
cpu_trace_rows.push(row.into());
}
// Pad to `num_memory_ops` for memory testing.
for _ in cpu_trace_rows.len()..num_memory_ops {
let mut row: cpu::columns::CpuColumnsView<F> =
[F::ZERO; CpuStark::<F, D>::COLUMNS].into();
row.opcode_bits = bits_from_opcode(0x5b);
row.is_cpu_cycle = F::ONE;
row.is_kernel_mode = F::ONE;
row.program_counter = F::from_canonical_usize(KERNEL.global_labels["main"]);
cpu_stark.generate(row.borrow_mut());
cpu_trace_rows.push(row.into());
}
for i in 0..num_memory_ops {
let mem_timestamp: usize = memory_trace[memory::columns::TIMESTAMP].values[i]
.to_canonical_u64()
.try_into()
.unwrap();
let clock = mem_timestamp / NUM_CHANNELS;
let channel = mem_timestamp % NUM_CHANNELS;
let filter = memory_trace[memory::columns::FILTER].values[i];
assert!(filter.is_one() || filter.is_zero());
let is_actual_op = filter.is_one();
if is_actual_op {
let row: &mut cpu::columns::CpuColumnsView<F> = cpu_trace_rows[clock].borrow_mut();
row.clock = F::from_canonical_usize(clock);
dbg!(channel, row.mem_channels.len());
let channel = &mut row.mem_channels[channel];
channel.used = F::ONE;
channel.is_read = memory_trace[memory::columns::IS_READ].values[i];
channel.addr_context = memory_trace[memory::columns::ADDR_CONTEXT].values[i];
channel.addr_segment = memory_trace[memory::columns::ADDR_SEGMENT].values[i];
channel.addr_virtual = memory_trace[memory::columns::ADDR_VIRTUAL].values[i];
for j in 0..8 {
channel.value[j] = memory_trace[memory::columns::value_limb(j)].values[i];
}
}
}
for i in 0..num_logic_rows {
let mut row: cpu::columns::CpuColumnsView<F> =
[F::ZERO; CpuStark::<F, D>::COLUMNS].into();
row.is_cpu_cycle = F::ONE;
row.is_kernel_mode = F::ONE;
// Since these are the first cycle rows, we must start with PC=main then increment.
row.program_counter = F::from_canonical_usize(KERNEL.global_labels["main"] + i);
row.opcode_bits = bits_from_opcode(
if logic_trace[logic::columns::IS_AND].values[i] != F::ZERO {
0x16
} else if logic_trace[logic::columns::IS_OR].values[i] != F::ZERO {
0x17
} else if logic_trace[logic::columns::IS_XOR].values[i] != F::ZERO {
0x18
} else {
panic!()
},
);
let input0_bit_cols = logic::columns::limb_bit_cols_for_input(logic::columns::INPUT0);
for (col_cpu, limb_cols_logic) in
row.mem_channels[0].value.iter_mut().zip(input0_bit_cols)
{
*col_cpu = limb_from_bits_le(limb_cols_logic.map(|col| logic_trace[col].values[i]));
}
let input1_bit_cols = logic::columns::limb_bit_cols_for_input(logic::columns::INPUT1);
for (col_cpu, limb_cols_logic) in
row.mem_channels[1].value.iter_mut().zip(input1_bit_cols)
{
*col_cpu = limb_from_bits_le(limb_cols_logic.map(|col| logic_trace[col].values[i]));
}
for (col_cpu, col_logic) in row.mem_channels[2]
.value
.iter_mut()
.zip(logic::columns::RESULT)
{
*col_cpu = logic_trace[col_logic].values[i];
}
cpu_stark.generate(row.borrow_mut());
cpu_trace_rows.push(row.into());
}
// Trap to kernel
{
let mut row: cpu::columns::CpuColumnsView<F> =
[F::ZERO; CpuStark::<F, D>::COLUMNS].into();
let last_row: cpu::columns::CpuColumnsView<F> =
cpu_trace_rows[cpu_trace_rows.len() - 1].into();
row.is_cpu_cycle = F::ONE;
row.opcode_bits = bits_from_opcode(0x0a); // `EXP` is implemented in software
row.is_kernel_mode = F::ONE;
row.program_counter = last_row.program_counter + F::ONE;
row.mem_channels[0].value = [
row.program_counter,
F::ONE,
F::ZERO,
F::ZERO,
F::ZERO,
F::ZERO,
F::ZERO,
F::ZERO,
];
cpu_stark.generate(row.borrow_mut());
cpu_trace_rows.push(row.into());
}
// `EXIT_KERNEL` (to kernel)
{
let mut row: cpu::columns::CpuColumnsView<F> =
[F::ZERO; CpuStark::<F, D>::COLUMNS].into();
row.is_cpu_cycle = F::ONE;
row.opcode_bits = bits_from_opcode(0xf9);
row.is_kernel_mode = F::ONE;
row.program_counter = F::from_canonical_usize(KERNEL.global_labels["sys_exp"]);
row.mem_channels[0].value = [
F::from_canonical_u16(15682),
F::ONE,
F::ZERO,
F::ZERO,
F::ZERO,
F::ZERO,
F::ZERO,
F::ZERO,
];
cpu_stark.generate(row.borrow_mut());
cpu_trace_rows.push(row.into());
}
// `JUMP` (in kernel mode)
{
let mut row: cpu::columns::CpuColumnsView<F> =
[F::ZERO; CpuStark::<F, D>::COLUMNS].into();
row.is_cpu_cycle = F::ONE;
row.opcode_bits = bits_from_opcode(0x56);
row.is_kernel_mode = F::ONE;
row.program_counter = F::from_canonical_u16(15682);
row.mem_channels[0].value = [
F::from_canonical_u16(15106),
F::ZERO,
F::ZERO,
F::ZERO,
F::ZERO,
F::ZERO,
F::ZERO,
F::ZERO,
];
row.mem_channels[1].value = [
F::ONE,
F::ZERO,
F::ZERO,
F::ZERO,
F::ZERO,
F::ZERO,
F::ZERO,
F::ZERO,
];
row.general.jumps_mut().input0_upper_zero = F::ONE;
row.general.jumps_mut().dst_valid_or_kernel = F::ONE;
row.general.jumps_mut().input0_jumpable = F::ONE;
row.general.jumps_mut().input1_sum_inv = F::ONE;
row.general.jumps_mut().should_jump = F::ONE;
cpu_stark.generate(row.borrow_mut());
cpu_trace_rows.push(row.into());
}
// `EXIT_KERNEL` (to userspace)
{
let mut row: cpu::columns::CpuColumnsView<F> =
[F::ZERO; CpuStark::<F, D>::COLUMNS].into();
row.is_cpu_cycle = F::ONE;
row.opcode_bits = bits_from_opcode(0xf9);
row.is_kernel_mode = F::ONE;
row.program_counter = F::from_canonical_u16(15106);
row.mem_channels[0].value = [
F::from_canonical_u16(63064),
F::ZERO,
F::ZERO,
F::ZERO,
F::ZERO,
F::ZERO,
F::ZERO,
F::ZERO,
];
cpu_stark.generate(row.borrow_mut());
cpu_trace_rows.push(row.into());
}
// `JUMP` (taken)
{
let mut row: cpu::columns::CpuColumnsView<F> =
[F::ZERO; CpuStark::<F, D>::COLUMNS].into();
row.is_cpu_cycle = F::ONE;
row.opcode_bits = bits_from_opcode(0x56);
row.is_kernel_mode = F::ZERO;
row.program_counter = F::from_canonical_u16(63064);
row.mem_channels[0].value = [
F::from_canonical_u16(3754),
F::ZERO,
F::ZERO,
F::ZERO,
F::ZERO,
F::ZERO,
F::ZERO,
F::ZERO,
];
row.mem_channels[1].value = [
F::ONE,
F::ZERO,
F::ZERO,
F::ZERO,
F::ZERO,
F::ZERO,
F::ZERO,
F::ZERO,
];
row.general.jumps_mut().input0_upper_zero = F::ONE;
row.general.jumps_mut().dst_valid = F::ONE;
row.general.jumps_mut().dst_valid_or_kernel = F::ONE;
row.general.jumps_mut().input0_jumpable = F::ONE;
row.general.jumps_mut().input1_sum_inv = F::ONE;
row.general.jumps_mut().should_jump = F::ONE;
cpu_stark.generate(row.borrow_mut());
cpu_trace_rows.push(row.into());
}
// `JUMPI` (taken)
{
let mut row: cpu::columns::CpuColumnsView<F> =
[F::ZERO; CpuStark::<F, D>::COLUMNS].into();
row.is_cpu_cycle = F::ONE;
row.opcode_bits = bits_from_opcode(0x57);
row.is_kernel_mode = F::ZERO;
row.program_counter = F::from_canonical_u16(3754);
row.mem_channels[0].value = [
F::from_canonical_u16(37543),
F::ZERO,
F::ZERO,
F::ZERO,
F::ZERO,
F::ZERO,
F::ZERO,
F::ZERO,
];
row.mem_channels[1].value = [
F::ZERO,
F::ZERO,
F::ZERO,
F::ONE,
F::ZERO,
F::ZERO,
F::ZERO,
F::ZERO,
];
row.general.jumps_mut().input0_upper_zero = F::ONE;
row.general.jumps_mut().dst_valid = F::ONE;
row.general.jumps_mut().dst_valid_or_kernel = F::ONE;
row.general.jumps_mut().input0_jumpable = F::ONE;
row.general.jumps_mut().input1_sum_inv = F::ONE;
row.general.jumps_mut().should_jump = F::ONE;
cpu_stark.generate(row.borrow_mut());
cpu_trace_rows.push(row.into());
}
// `JUMPI` (not taken)
{
let mut row: cpu::columns::CpuColumnsView<F> =
[F::ZERO; CpuStark::<F, D>::COLUMNS].into();
row.is_cpu_cycle = F::ONE;
row.opcode_bits = bits_from_opcode(0x57);
row.is_kernel_mode = F::ZERO;
row.program_counter = F::from_canonical_u16(37543);
row.mem_channels[0].value = [
F::from_canonical_u16(37543),
F::ZERO,
F::ZERO,
F::ZERO,
F::ONE,
F::ZERO,
F::ZERO,
F::ZERO,
];
row.general.jumps_mut().input0_upper_sum_inv = F::ONE;
row.general.jumps_mut().dst_valid = F::ONE;
row.general.jumps_mut().dst_valid_or_kernel = F::ONE;
row.general.jumps_mut().input0_jumpable = F::ZERO;
row.general.jumps_mut().should_continue = F::ONE;
cpu_stark.generate(row.borrow_mut());
cpu_trace_rows.push(row.into());
}
// `JUMP` (trapping)
{
let mut row: cpu::columns::CpuColumnsView<F> =
[F::ZERO; CpuStark::<F, D>::COLUMNS].into();
let last_row: cpu::columns::CpuColumnsView<F> =
cpu_trace_rows[cpu_trace_rows.len() - 1].into();
row.is_cpu_cycle = F::ONE;
row.opcode_bits = bits_from_opcode(0x56);
row.is_kernel_mode = F::ZERO;
row.program_counter = last_row.program_counter + F::ONE;
row.mem_channels[0].value = [
F::from_canonical_u16(37543),
F::ZERO,
F::ZERO,
F::ZERO,
F::ONE,
F::ZERO,
F::ZERO,
F::ZERO,
];
row.mem_channels[1].value = [
F::ONE,
F::ZERO,
F::ZERO,
F::ZERO,
F::ZERO,
F::ZERO,
F::ZERO,
F::ZERO,
];
row.general.jumps_mut().input0_upper_sum_inv = F::ONE;
row.general.jumps_mut().dst_valid = F::ONE;
row.general.jumps_mut().dst_valid_or_kernel = F::ONE;
row.general.jumps_mut().input0_jumpable = F::ZERO;
row.general.jumps_mut().input1_sum_inv = F::ONE;
row.general.jumps_mut().should_trap = F::ONE;
cpu_stark.generate(row.borrow_mut());
cpu_trace_rows.push(row.into());
}
// Pad to a power of two.
for i in 0..cpu_trace_rows.len().next_power_of_two() - cpu_trace_rows.len() {
let mut row: cpu::columns::CpuColumnsView<F> =
[F::ZERO; CpuStark::<F, D>::COLUMNS].into();
row.opcode_bits = bits_from_opcode(0xff);
row.is_cpu_cycle = F::ONE;
row.is_kernel_mode = F::ONE;
row.program_counter =
F::from_canonical_usize(KERNEL.global_labels["fault_exception"] + i);
cpu_stark.generate(row.borrow_mut());
cpu_trace_rows.push(row.into());
}
// Ensure we finish in a halted state.
{
let num_rows = cpu_trace_rows.len();
let halt_label = F::from_canonical_usize(KERNEL.global_labels["halt_pc0"]);
let last_row: &mut cpu::columns::CpuColumnsView<F> =
cpu_trace_rows[num_rows - 1].borrow_mut();
last_row.program_counter = halt_label;
}
trace_rows_to_poly_values(cpu_trace_rows)
}
fn get_proof(config: &StarkConfig) -> Result<(AllStark<F, D>, AllProof<F, C, D>)> {
let all_stark = AllStark::default();
let num_logic_rows = 62;
let num_memory_ops = 1 << 5;
let mut rng = thread_rng();
let num_keccak_perms = 2;
let keccak_trace =
make_keccak_trace(num_keccak_perms, &all_stark.keccak_stark, config, &mut rng);
let keccak_memory_trace = make_keccak_memory_trace(&all_stark.keccak_memory_stark, config);
let logic_trace =
make_logic_trace(num_logic_rows, &all_stark.logic_stark, config, &mut rng);
let mem_trace = make_memory_trace(num_memory_ops, &all_stark.memory_stark, &mut rng);
let mut memory_trace = mem_trace.0;
let num_memory_ops = mem_trace.1;
let cpu_trace = make_cpu_trace(
num_keccak_perms,
num_logic_rows,
num_memory_ops,
&all_stark.cpu_stark,
&keccak_trace,
&logic_trace,
&mut memory_trace,
);
let traces = [
cpu_trace,
keccak_trace,
keccak_memory_trace,
logic_trace,
memory_trace,
];
check_ctls(&traces, &all_stark.cross_table_lookups);
let public_values = PublicValues::default();
let proof = prove_with_traces::<F, C, D>(
&all_stark,
config,
traces,
public_values,
&mut TimingTree::default(),
)?;
Ok((all_stark, proof))
}
#[test]
#[ignore] // Ignoring but not deleting so the test can serve as an API usage example
fn test_all_stark() -> Result<()> {
let config = StarkConfig::standard_fast_config();
let (all_stark, proof) = get_proof(&config)?;
verify_proof(all_stark, proof, &config)
}
#[test]
#[ignore] // Ignoring but not deleting so the test can serve as an API usage example
fn test_all_stark_recursive_verifier() -> Result<()> {
init_logger();
let config = StarkConfig::standard_fast_config();
let (all_stark, proof) = get_proof(&config)?;
verify_proof(all_stark.clone(), proof.clone(), &config)?;
recursive_proof(all_stark, proof, &config)
}
fn recursive_proof(
inner_all_stark: AllStark<F, D>,
inner_proof: AllProof<F, C, D>,
inner_config: &StarkConfig,
) -> Result<()> {
let circuit_config = CircuitConfig::standard_recursion_config();
let recursive_all_proof = recursively_verify_all_proof(
&inner_all_stark,
&inner_proof,
inner_config,
&circuit_config,
)?;
let verifier_data: [VerifierCircuitData<F, C, D>; NUM_TABLES] =
all_verifier_data_recursive_stark_proof(
&inner_all_stark,
inner_proof.degree_bits(inner_config),
inner_config,
&circuit_config,
);
let circuit_config = CircuitConfig::standard_recursion_config();
let mut builder = CircuitBuilder::<F, D>::new(circuit_config);
let mut pw = PartialWitness::new();
let recursive_all_proof_target =
add_virtual_recursive_all_proof(&mut builder, &verifier_data);
set_recursive_all_proof_target(&mut pw, &recursive_all_proof_target, &recursive_all_proof);
RecursiveAllProof::verify_circuit(
&mut builder,
recursive_all_proof_target,
&verifier_data,
inner_all_stark.cross_table_lookups,
inner_config,
);
let data = builder.build::<C>();
let proof = data.prove(pw)?;
data.verify(proof)
}
fn init_logger() {
let _ = env_logger::builder().format_timestamp(None).try_init();
}
let memory_looked = TableWithColumns::new(
Table::Memory,
memory_stark::ctl_data(),
Some(memory_stark::ctl_filter()),
);
CrossTableLookup::new(all_lookers, memory_looked, None)
}

View File

@ -13,66 +13,47 @@ use plonky2::plonk::circuit_builder::CircuitBuilder;
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::cpu::columns::{CpuColumnsView, NUM_CPU_COLUMNS};
use crate::cpu::kernel::aggregator::KERNEL;
use crate::cpu::kernel::keccak_util::keccakf_u32s;
use crate::cpu::membus::NUM_GP_CHANNELS;
use crate::generation::state::GenerationState;
use crate::keccak_sponge::columns::KECCAK_RATE_U32S;
use crate::memory::segments::Segment;
use crate::vars::{StarkEvaluationTargets, StarkEvaluationVars};
use crate::witness::memory::MemoryAddress;
use crate::witness::util::mem_write_gp_log_and_fill;
/// We can't process more than `NUM_CHANNELS` bytes per row, since that's all the memory bandwidth
/// we have. We also can't process more than 4 bytes (or the number of bytes in a `u32`), since we
/// want them to fit in a single limb of Keccak input.
const BYTES_PER_ROW: usize = 4;
use crate::witness::util::{keccak_sponge_log, mem_write_gp_log_and_fill};
pub(crate) fn generate_bootstrap_kernel<F: Field>(state: &mut GenerationState<F>) {
let mut sponge_state = [0u32; 50];
let mut sponge_input_pos: usize = 0;
// Iterate through chunks of the code, such that we can write one chunk to memory per row.
for chunk in &KERNEL
.padded_code()
.iter()
.enumerate()
.chunks(BYTES_PER_ROW)
{
let mut current_cpu_row = CpuColumnsView::default();
current_cpu_row.is_bootstrap_kernel = F::ONE;
for chunk in &KERNEL.code.iter().enumerate().chunks(NUM_GP_CHANNELS) {
let mut cpu_row = CpuColumnsView::default();
cpu_row.clock = F::from_canonical_usize(state.traces.clock());
cpu_row.is_bootstrap_kernel = F::ONE;
// Write this chunk to memory, while simultaneously packing its bytes into a u32 word.
let mut packed_bytes: u32 = 0;
for (channel, (addr, &byte)) in chunk.enumerate() {
let address = MemoryAddress::new(0, Segment::Code, addr);
let write = mem_write_gp_log_and_fill(
channel,
address,
state,
&mut current_cpu_row,
byte.into(),
);
let write =
mem_write_gp_log_and_fill(channel, address, state, &mut cpu_row, byte.into());
state.traces.push_memory(write);
packed_bytes = (packed_bytes << 8) | byte as u32;
}
sponge_state[sponge_input_pos] = packed_bytes;
let keccak = current_cpu_row.general.keccak_mut();
keccak.input_limbs = sponge_state.map(F::from_canonical_u32);
sponge_input_pos = (sponge_input_pos + 1) % KECCAK_RATE_U32S;
// If we just crossed a multiple of KECCAK_RATE_LIMBS, then we've filled the Keccak input
// buffer, so it's time to absorb.
if sponge_input_pos == 0 {
current_cpu_row.is_keccak = F::ONE;
// TODO: Push sponge_state to Keccak inputs in traces.
keccakf_u32s(&mut sponge_state);
let keccak = current_cpu_row.general.keccak_mut();
keccak.output_limbs = sponge_state.map(F::from_canonical_u32);
}
state.traces.push_cpu(current_cpu_row);
state.traces.push_cpu(cpu_row);
}
let mut final_cpu_row = CpuColumnsView::default();
final_cpu_row.clock = F::from_canonical_usize(state.traces.clock());
final_cpu_row.is_bootstrap_kernel = F::ONE;
final_cpu_row.is_keccak_sponge = F::ONE;
// The Keccak sponge CTL uses memory value columns for its inputs and outputs.
final_cpu_row.mem_channels[0].value[0] = F::ZERO;
final_cpu_row.mem_channels[1].value[0] = F::from_canonical_usize(Segment::Code as usize);
final_cpu_row.mem_channels[2].value[0] = F::ZERO;
final_cpu_row.mem_channels[3].value[0] = F::from_canonical_usize(state.traces.clock());
final_cpu_row.mem_channels[4].value = KERNEL.code_hash.map(F::from_canonical_u32);
state.traces.push_cpu(final_cpu_row);
keccak_sponge_log(
state,
MemoryAddress::new(0, Segment::Code, 0),
KERNEL.code.clone(),
);
}
pub(crate) fn eval_bootstrap_kernel<F: Field, P: PackedField<Scalar = F>>(
@ -90,24 +71,29 @@ pub(crate) fn eval_bootstrap_kernel<F: Field, P: PackedField<Scalar = F>>(
let delta_is_bootstrap = next_is_bootstrap - local_is_bootstrap;
yield_constr.constraint_transition(delta_is_bootstrap * (delta_is_bootstrap + P::ONES));
// TODO: Constraints to enforce that, if IS_BOOTSTRAP_KERNEL,
// - If CLOCK is a multiple of KECCAK_RATE_LIMBS, activate the Keccak CTL, and ensure the output
// is copied to the next row (besides the first limb which will immediately be overwritten).
// - Otherwise, ensure that the Keccak input is copied to the next row (besides the next limb).
// - The next limb we add to the buffer is also written to memory.
// If this is a bootloading row and the i'th memory channel is used, it must have the right
// address, name context = 0, segment = Code, virt = clock * NUM_GP_CHANNELS + i.
let code_segment = F::from_canonical_usize(Segment::Code as usize);
for (i, channel) in local_values.mem_channels.iter().enumerate() {
let filter = local_is_bootstrap * channel.used;
yield_constr.constraint(filter * channel.addr_context);
yield_constr.constraint(filter * (channel.addr_segment - code_segment));
let expected_virt = local_values.clock * F::from_canonical_usize(NUM_GP_CHANNELS)
+ F::from_canonical_usize(i);
yield_constr.constraint(filter * (channel.addr_virtual - expected_virt));
}
// If IS_BOOTSTRAP_KERNEL changed (from 1 to 0), check that
// - the clock is a multiple of KECCAK_RATE_LIMBS (TODO)
// If this is the final bootstrap row (i.e. delta_is_bootstrap = 1), check that
// - all memory channels are disabled (TODO)
// - the current kernel hash matches a precomputed one
for (&expected, actual) in KERNEL
.code_hash
.iter()
.zip(local_values.general.keccak().output_limbs)
.zip(local_values.mem_channels.last().unwrap().value)
{
let expected = P::from(F::from_canonical_u32(expected));
let _diff = expected - actual;
// TODO: Not working yet.
// yield_constr.constraint_transition(delta_is_bootstrap * diff);
let diff = expected - actual;
yield_constr.constraint_transition(delta_is_bootstrap * diff);
}
}
@ -131,24 +117,39 @@ pub(crate) fn eval_bootstrap_kernel_circuit<F: RichField + Extendable<D>, const
builder.mul_add_extension(delta_is_bootstrap, delta_is_bootstrap, delta_is_bootstrap);
yield_constr.constraint_transition(builder, constraint);
// TODO: Constraints to enforce that, if IS_BOOTSTRAP_KERNEL,
// - If CLOCK is a multiple of KECCAK_RATE_LIMBS, activate the Keccak CTL, and ensure the output
// is copied to the next row (besides the first limb which will immediately be overwritten).
// - Otherwise, ensure that the Keccak input is copied to the next row (besides the next limb).
// - The next limb we add to the buffer is also written to memory.
// If this is a bootloading row and the i'th memory channel is used, it must have the right
// address, name context = 0, segment = Code, virt = clock * NUM_GP_CHANNELS + i.
let code_segment =
builder.constant_extension(F::Extension::from_canonical_usize(Segment::Code as usize));
for (i, channel) in local_values.mem_channels.iter().enumerate() {
let filter = builder.mul_extension(local_is_bootstrap, channel.used);
let constraint = builder.mul_extension(filter, channel.addr_context);
yield_constr.constraint(builder, constraint);
// If IS_BOOTSTRAP_KERNEL changed (from 1 to 0), check that
// - the clock is a multiple of KECCAK_RATE_LIMBS (TODO)
let segment_diff = builder.sub_extension(channel.addr_segment, code_segment);
let constraint = builder.mul_extension(filter, segment_diff);
yield_constr.constraint(builder, constraint);
let i_ext = builder.constant_extension(F::Extension::from_canonical_usize(i));
let num_gp_channels_f = F::from_canonical_usize(NUM_GP_CHANNELS);
let expected_virt =
builder.mul_const_add_extension(num_gp_channels_f, local_values.clock, i_ext);
let virt_diff = builder.sub_extension(channel.addr_virtual, expected_virt);
let constraint = builder.mul_extension(filter, virt_diff);
yield_constr.constraint(builder, constraint);
}
// If this is the final bootstrap row (i.e. delta_is_bootstrap = 1), check that
// - all memory channels are disabled (TODO)
// - the current kernel hash matches a precomputed one
for (&expected, actual) in KERNEL
.code_hash
.iter()
.zip(local_values.general.keccak().output_limbs)
.zip(local_values.mem_channels.last().unwrap().value)
{
let expected = builder.constant_extension(F::Extension::from_canonical_u32(expected));
let diff = builder.sub_extension(expected, actual);
let _constraint = builder.mul_extension(delta_is_bootstrap, diff);
// TODO: Not working yet.
// yield_constr.constraint_transition(builder, constraint);
let constraint = builder.mul_extension(delta_is_bootstrap, diff);
yield_constr.constraint_transition(builder, constraint);
}
}

View File

@ -6,7 +6,6 @@ use std::mem::{size_of, transmute};
/// operation is occurring at this row.
#[derive(Clone, Copy)]
pub(crate) union CpuGeneralColumnsView<T: Copy> {
keccak: CpuKeccakView<T>,
arithmetic: CpuArithmeticView<T>,
logic: CpuLogicView<T>,
jumps: CpuJumpsView<T>,
@ -14,16 +13,6 @@ pub(crate) union CpuGeneralColumnsView<T: Copy> {
}
impl<T: Copy> CpuGeneralColumnsView<T> {
// SAFETY: Each view is a valid interpretation of the underlying array.
pub(crate) fn keccak(&self) -> &CpuKeccakView<T> {
unsafe { &self.keccak }
}
// SAFETY: Each view is a valid interpretation of the underlying array.
pub(crate) fn keccak_mut(&mut self) -> &mut CpuKeccakView<T> {
unsafe { &mut self.keccak }
}
// SAFETY: Each view is a valid interpretation of the underlying array.
pub(crate) fn arithmetic(&self) -> &CpuArithmeticView<T> {
unsafe { &self.arithmetic }
@ -94,12 +83,6 @@ impl<T: Copy> BorrowMut<[T; NUM_SHARED_COLUMNS]> for CpuGeneralColumnsView<T> {
}
}
#[derive(Copy, Clone)]
pub(crate) struct CpuKeccakView<T: Copy> {
pub(crate) input_limbs: [T; 50],
pub(crate) output_limbs: [T; 50],
}
#[derive(Copy, Clone)]
pub(crate) struct CpuArithmeticView<T: Copy> {
// TODO: Add "looking" columns for the arithmetic CTL.

View File

@ -69,11 +69,8 @@ pub struct CpuColumnsView<T: Copy> {
/// If CPU cycle: the opcode, broken up into bits in little-endian order.
pub opcode_bits: [T; 8],
/// Filter. 1 iff a Keccak lookup is performed on this row.
pub is_keccak: T,
/// Filter. 1 iff a Keccak memory lookup is performed on this row.
pub is_keccak_memory: T,
/// Filter. 1 iff a Keccak sponge lookup is performed on this row.
pub is_keccak_sponge: T,
pub(crate) general: CpuGeneralColumnsView<T>,

View File

@ -20,34 +20,28 @@ use crate::memory::{NUM_CHANNELS, VALUE_LIMBS};
use crate::stark::Stark;
use crate::vars::{StarkEvaluationTargets, StarkEvaluationVars};
pub fn ctl_data_keccak<F: Field>() -> Vec<Column<F>> {
let keccak = COL_MAP.general.keccak();
let mut res: Vec<_> = Column::singles(keccak.input_limbs).collect();
res.extend(Column::singles(keccak.output_limbs));
res
}
pub fn ctl_data_keccak_memory<F: Field>() -> Vec<Column<F>> {
pub fn ctl_data_keccak_sponge<F: Field>() -> Vec<Column<F>> {
// When executing KECCAK_GENERAL, the GP memory channels are used as follows:
// GP channel 0: stack[-1] = context
// GP channel 1: stack[-2] = segment
// GP channel 2: stack[-3] = virtual
// GP channel 2: stack[-3] = virt
// GP channel 3: stack[-4] = len
// GP channel 4: pushed = outputs
let context = Column::single(COL_MAP.mem_channels[0].value[0]);
let segment = Column::single(COL_MAP.mem_channels[1].value[0]);
let virt = Column::single(COL_MAP.mem_channels[2].value[0]);
let len = Column::single(COL_MAP.mem_channels[3].value[0]);
let num_channels = F::from_canonical_usize(NUM_CHANNELS);
let clock = Column::linear_combination([(COL_MAP.clock, num_channels)]);
let timestamp = Column::linear_combination([(COL_MAP.clock, num_channels)]);
vec![context, segment, virt, clock]
let mut cols = vec![context, segment, virt, len, timestamp];
cols.extend(COL_MAP.mem_channels[3].value.map(Column::single));
cols
}
pub fn ctl_filter_keccak<F: Field>() -> Column<F> {
Column::single(COL_MAP.is_keccak)
}
pub fn ctl_filter_keccak_memory<F: Field>() -> Column<F> {
Column::single(COL_MAP.is_keccak_memory)
pub fn ctl_filter_keccak_sponge<F: Field>() -> Column<F> {
Column::single(COL_MAP.is_keccak_sponge)
}
pub fn ctl_data_logic<F: Field>() -> Vec<Column<F>> {
@ -122,6 +116,7 @@ pub struct CpuStark<F, const D: usize> {
}
impl<F: RichField, const D: usize> CpuStark<F, D> {
// TODO: Remove?
pub fn generate(&self, local_values: &mut [F; NUM_CPU_COLUMNS]) {
let local_values: &mut CpuColumnsView<_> = local_values.borrow_mut();
decode::generate(local_values);

View File

@ -2,19 +2,17 @@ use std::collections::HashMap;
use ethereum_types::U256;
use itertools::izip;
use keccak_hash::keccak;
use log::debug;
use plonky2_util::ceil_div_usize;
use super::ast::PushTarget;
use crate::cpu::kernel::ast::Item::LocalLabelDeclaration;
use crate::cpu::kernel::ast::{File, Item, StackReplacement};
use crate::cpu::kernel::keccak_util::hash_kernel;
use crate::cpu::kernel::opcodes::{get_opcode, get_push_opcode};
use crate::cpu::kernel::optimizer::optimize_asm;
use crate::cpu::kernel::stack::stack_manipulation::expand_stack_manipulation;
use crate::cpu::kernel::utils::u256_to_trimmed_be_bytes;
use crate::generation::prover_input::ProverInputFn;
use crate::keccak_sponge::columns::KECCAK_RATE_BYTES;
/// The number of bytes to push when pushing an offset within the code (i.e. when assembling jumps).
/// Ideally we would automatically use the minimal number of bytes required, but that would be
@ -41,8 +39,10 @@ impl Kernel {
global_labels: HashMap<String, usize>,
prover_inputs: HashMap<usize, ProverInputFn>,
) -> Self {
let code_hash = hash_kernel(&Self::padded_code_helper(&code));
let code_hash_bytes = keccak(&code).0;
let code_hash = std::array::from_fn(|i| {
u32::from_le_bytes(std::array::from_fn(|j| code_hash_bytes[i * 4 + j]))
});
Self {
code,
code_hash,
@ -51,18 +51,6 @@ impl Kernel {
}
}
/// Zero-pads the code such that its length is a multiple of the Keccak rate.
pub(crate) fn padded_code(&self) -> Vec<u8> {
Self::padded_code_helper(&self.code)
}
fn padded_code_helper(code: &[u8]) -> Vec<u8> {
let padded_len = ceil_div_usize(code.len(), KECCAK_RATE_BYTES) * KECCAK_RATE_BYTES;
let mut padded_code = code.to_vec();
padded_code.resize(padded_len, 0);
padded_code
}
/// Get a string representation of the current offset for debugging purposes.
pub(crate) fn offset_name(&self, offset: usize) -> String {
self.offset_label(offset)

View File

@ -1,29 +1,6 @@
use tiny_keccak::keccakf;
use crate::keccak_memory::columns::KECCAK_WIDTH_BYTES;
use crate::keccak_sponge::columns::{KECCAK_RATE_BYTES, KECCAK_RATE_U32S, KECCAK_WIDTH_U32S};
/// A Keccak-f based hash.
///
/// This hash does not use standard Keccak padding, since we don't care about extra zeros at the
/// end of the code. It also uses an overwrite-mode sponge, rather than a standard sponge where
/// inputs are xor'ed in.
pub(crate) fn hash_kernel(code: &[u8]) -> [u32; 8] {
debug_assert_eq!(
code.len() % KECCAK_RATE_BYTES,
0,
"Code should have been padded to a multiple of the Keccak rate."
);
let mut state = [0u32; 50];
for chunk in code.chunks(KECCAK_RATE_BYTES) {
for i in 0..KECCAK_RATE_U32S {
state[i] = u32::from_le_bytes(std::array::from_fn(|j| chunk[i * 4 + j]));
}
keccakf_u32s(&mut state);
}
state[..8].try_into().unwrap()
}
use crate::keccak_sponge::columns::{KECCAK_WIDTH_BYTES, KECCAK_WIDTH_U32S};
/// Like tiny-keccak's `keccakf`, but deals with `u32` limbs instead of `u64` limbs.
pub(crate) fn keccakf_u32s(state_u32s: &mut [u32; KECCAK_WIDTH_U32S]) {

View File

@ -694,6 +694,7 @@ pub(crate) mod testutils {
type MultiSet<F> = HashMap<Vec<F>, Vec<(Table, usize)>>;
/// Check that the provided traces and cross-table lookups are consistent.
#[allow(unused)] // TODO: used later?
pub(crate) fn check_ctls<F: Field>(
trace_poly_values: &[Vec<PolynomialValues<F>>],
cross_table_lookups: &[CrossTableLookup<F>],

View File

@ -22,7 +22,6 @@ impl From<Vec<String>> for ProverInputFn {
}
impl<F: Field> GenerationState<F> {
#[allow(unused)] // TODO: Should be used soon.
pub(crate) fn prover_input(&mut self, input_fn: &ProverInputFn) -> U256 {
match input_fn.0[0].as_str() {
"end_of_txns" => self.run_end_of_txns(),

View File

@ -1,29 +0,0 @@
pub(crate) const KECCAK_WIDTH_BYTES: usize = 200;
/// 1 if this row represents a real operation; 0 if it's a padding row.
pub(crate) const COL_IS_REAL: usize = 0;
// The address at which we will read inputs and write outputs.
pub(crate) const COL_CONTEXT: usize = 1;
pub(crate) const COL_SEGMENT: usize = 2;
pub(crate) const COL_VIRTUAL: usize = 3;
/// The timestamp at which inputs should be read from memory.
/// Outputs will be written at the following timestamp.
pub(crate) const COL_READ_TIMESTAMP: usize = 4;
const START_INPUT_LIMBS: usize = 5;
/// A byte of the input.
pub(crate) fn col_input_byte(i: usize) -> usize {
debug_assert!(i < KECCAK_WIDTH_BYTES);
START_INPUT_LIMBS + i
}
const START_OUTPUT_LIMBS: usize = START_INPUT_LIMBS + KECCAK_WIDTH_BYTES;
/// A byte of the output.
pub(crate) fn col_output_byte(i: usize) -> usize {
debug_assert!(i < KECCAK_WIDTH_BYTES);
START_OUTPUT_LIMBS + i
}
pub const NUM_COLUMNS: usize = START_OUTPUT_LIMBS + KECCAK_WIDTH_BYTES;

View File

@ -1,222 +0,0 @@
use std::marker::PhantomData;
use plonky2::field::extension::{Extendable, FieldExtension};
use plonky2::field::packed::PackedField;
use plonky2::field::polynomial::PolynomialValues;
use plonky2::field::types::Field;
use plonky2::hash::hash_types::RichField;
use plonky2::timed;
use plonky2::util::timing::TimingTree;
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::cross_table_lookup::Column;
use crate::keccak::keccak_stark::NUM_INPUTS;
use crate::keccak_memory::columns::*;
use crate::stark::Stark;
use crate::util::trace_rows_to_poly_values;
use crate::vars::{StarkEvaluationTargets, StarkEvaluationVars};
use crate::witness::memory::MemoryAddress;
pub(crate) fn ctl_looked_data<F: Field>() -> Vec<Column<F>> {
Column::singles([COL_CONTEXT, COL_SEGMENT, COL_VIRTUAL, COL_READ_TIMESTAMP]).collect()
}
pub(crate) fn ctl_looking_keccak<F: Field>() -> Vec<Column<F>> {
let input_cols = (0..50).map(|i| {
Column::le_bytes((0..4).map(|j| {
let byte_index = i * 4 + j;
col_input_byte(byte_index)
}))
});
let output_cols = (0..50).map(|i| {
Column::le_bytes((0..4).map(|j| {
let byte_index = i * 4 + j;
col_output_byte(byte_index)
}))
});
input_cols.chain(output_cols).collect()
}
pub(crate) fn ctl_looking_memory<F: Field>(i: usize, is_read: bool) -> Vec<Column<F>> {
let mut res = vec![Column::constant(F::from_bool(is_read))];
res.extend(Column::singles([COL_CONTEXT, COL_SEGMENT, COL_VIRTUAL]));
res.push(Column::single(col_input_byte(i)));
// Since we're reading or writing a single byte, the higher limbs must be zero.
res.extend((1..8).map(|_| Column::zero()));
// Since COL_READ_TIMESTAMP is the read time, we add 1 if this is a write.
let is_write_f = F::from_bool(!is_read);
res.push(Column::linear_combination_with_constant(
[(COL_READ_TIMESTAMP, F::ONE)],
is_write_f,
));
assert_eq!(
res.len(),
crate::memory::memory_stark::ctl_data::<F>().len()
);
res
}
/// CTL filter used for both directions (looked and looking).
pub(crate) fn ctl_filter<F: Field>() -> Column<F> {
Column::single(COL_IS_REAL)
}
/// Information about a Keccak memory operation needed for witness generation.
#[derive(Debug)]
pub(crate) struct KeccakMemoryOp {
/// The base address at which we will read inputs and write outputs.
pub(crate) address: MemoryAddress,
/// The timestamp at which inputs should be read from memory.
/// Outputs will be written at the following timestamp.
pub(crate) read_timestamp: usize,
/// The input that was read at that address.
pub(crate) input: [u64; NUM_INPUTS],
pub(crate) output: [u64; NUM_INPUTS],
}
#[derive(Copy, Clone, Default)]
pub struct KeccakMemoryStark<F, const D: usize> {
pub(crate) f: PhantomData<F>,
}
impl<F: RichField + Extendable<D>, const D: usize> KeccakMemoryStark<F, D> {
#[allow(unused)] // TODO: Should be used soon.
pub(crate) fn generate_trace(
&self,
operations: Vec<KeccakMemoryOp>,
min_rows: usize,
timing: &mut TimingTree,
) -> Vec<PolynomialValues<F>> {
// Generate the witness row-wise.
let trace_rows = timed!(
timing,
"generate trace rows",
self.generate_trace_rows(operations, min_rows)
);
let trace_polys = timed!(
timing,
"convert to PolynomialValues",
trace_rows_to_poly_values(trace_rows)
);
trace_polys
}
fn generate_trace_rows(
&self,
operations: Vec<KeccakMemoryOp>,
min_rows: usize,
) -> Vec<[F; NUM_COLUMNS]> {
let num_rows = operations.len().max(min_rows).next_power_of_two();
let mut rows = Vec::with_capacity(num_rows);
for op in operations {
rows.push(self.generate_row_for_op(op));
}
let padding_row = self.generate_padding_row();
for _ in rows.len()..num_rows {
rows.push(padding_row);
}
rows
}
fn generate_row_for_op(&self, op: KeccakMemoryOp) -> [F; NUM_COLUMNS] {
let mut row = [F::ZERO; NUM_COLUMNS];
row[COL_IS_REAL] = F::ONE;
row[COL_CONTEXT] = F::from_canonical_usize(op.address.context);
row[COL_SEGMENT] = F::from_canonical_usize(op.address.segment);
row[COL_VIRTUAL] = F::from_canonical_usize(op.address.virt);
row[COL_READ_TIMESTAMP] = F::from_canonical_usize(op.read_timestamp);
for i in 0..25 {
let input_u64 = op.input[i];
let output_u64 = op.output[i];
for j in 0..8 {
let byte_index = i * 8 + j;
row[col_input_byte(byte_index)] = F::from_canonical_u8(input_u64.to_le_bytes()[j]);
row[col_output_byte(byte_index)] =
F::from_canonical_u8(output_u64.to_le_bytes()[j]);
}
}
row
}
fn generate_padding_row(&self) -> [F; NUM_COLUMNS] {
// We just need COL_IS_REAL to be zero, which it is by default.
// The other fields will have no effect.
[F::ZERO; NUM_COLUMNS]
}
}
impl<F: RichField + Extendable<D>, const D: usize> Stark<F, D> for KeccakMemoryStark<F, D> {
const COLUMNS: usize = NUM_COLUMNS;
fn eval_packed_generic<FE, P, const D2: usize>(
&self,
vars: StarkEvaluationVars<FE, P, { Self::COLUMNS }>,
yield_constr: &mut ConstraintConsumer<P>,
) where
FE: FieldExtension<D2, BaseField = F>,
P: PackedField<Scalar = FE>,
{
// is_real must be 0 or 1.
let is_real = vars.local_values[COL_IS_REAL];
yield_constr.constraint(is_real * (is_real - P::ONES));
}
fn eval_ext_circuit(
&self,
builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder<F, D>,
vars: StarkEvaluationTargets<D, { Self::COLUMNS }>,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
// is_real must be 0 or 1.
let is_real = vars.local_values[COL_IS_REAL];
let constraint = builder.mul_sub_extension(is_real, is_real, is_real);
yield_constr.constraint(builder, constraint);
}
fn constraint_degree(&self) -> usize {
2
}
}
#[cfg(test)]
mod tests {
use anyhow::Result;
use plonky2::plonk::config::{GenericConfig, PoseidonGoldilocksConfig};
use crate::keccak_memory::keccak_memory_stark::KeccakMemoryStark;
use crate::stark_testing::{test_stark_circuit_constraints, test_stark_low_degree};
#[test]
fn test_stark_degree() -> Result<()> {
const D: usize = 2;
type C = PoseidonGoldilocksConfig;
type F = <C as GenericConfig<D>>::F;
type S = KeccakMemoryStark<F, D>;
let stark = S {
f: Default::default(),
};
test_stark_low_degree(stark)
}
#[test]
fn test_stark_circuit() -> Result<()> {
const D: usize = 2;
type C = PoseidonGoldilocksConfig;
type F = <C as GenericConfig<D>>::F;
type S = KeccakMemoryStark<F, D>;
let stark = S {
f: Default::default(),
};
test_stark_circuit_constraints::<F, C, S, D>(stark)
}
}

View File

@ -1,2 +0,0 @@
pub mod columns;
pub mod keccak_memory_stark;

View File

@ -23,7 +23,6 @@ use crate::util::trace_rows_to_poly_values;
use crate::vars::{StarkEvaluationTargets, StarkEvaluationVars};
use crate::witness::memory::MemoryAddress;
#[allow(unused)] // TODO: Should be used soon.
pub(crate) fn ctl_looked_data<F: Field>() -> Vec<Column<F>> {
let cols = KECCAK_SPONGE_COL_MAP;
let outputs = Column::singles(&cols.updated_state_u32s[..8]);
@ -31,14 +30,13 @@ pub(crate) fn ctl_looked_data<F: Field>() -> Vec<Column<F>> {
cols.context,
cols.segment,
cols.virt,
cols.timestamp,
cols.len,
cols.timestamp,
])
.chain(outputs)
.collect()
}
#[allow(unused)] // TODO: Should be used soon.
pub(crate) fn ctl_looking_keccak<F: Field>() -> Vec<Column<F>> {
let cols = KECCAK_SPONGE_COL_MAP;
Column::singles(
@ -52,7 +50,6 @@ pub(crate) fn ctl_looking_keccak<F: Field>() -> Vec<Column<F>> {
.collect()
}
#[allow(unused)] // TODO: Should be used soon.
pub(crate) fn ctl_looking_memory<F: Field>(i: usize) -> Vec<Column<F>> {
let cols = KECCAK_SPONGE_COL_MAP;
@ -81,14 +78,18 @@ pub(crate) fn ctl_looking_memory<F: Field>(i: usize) -> Vec<Column<F>> {
res
}
pub(crate) fn num_logic_ctls() -> usize {
const U8S_PER_CTL: usize = 32;
ceil_div_usize(KECCAK_RATE_BYTES, U8S_PER_CTL)
}
/// CTL for performing the `i`th logic CTL. Since we need to do 136 byte XORs, and the logic CTL can
/// XOR 32 bytes per CTL, there are 5 such CTLs.
#[allow(unused)] // TODO: Should be used soon.
pub(crate) fn ctl_looking_logic<F: Field>(i: usize) -> Vec<Column<F>> {
const U32S_PER_CTL: usize = 8;
const U8S_PER_CTL: usize = 32;
debug_assert!(i < ceil_div_usize(KECCAK_RATE_BYTES, U8S_PER_CTL));
debug_assert!(i < num_logic_ctls());
let cols = KECCAK_SPONGE_COL_MAP;
let mut res = vec![
@ -111,7 +112,7 @@ pub(crate) fn ctl_looking_logic<F: Field>(i: usize) -> Vec<Column<F>> {
.chunks(size_of::<u32>())
.map(|chunk| Column::le_bytes(chunk))
.chain(repeat(Column::zero()))
.take(U8S_PER_CTL),
.take(U32S_PER_CTL),
);
// The output contains the XOR'd rate part.
@ -124,14 +125,12 @@ pub(crate) fn ctl_looking_logic<F: Field>(i: usize) -> Vec<Column<F>> {
res
}
#[allow(unused)] // TODO: Should be used soon.
pub(crate) fn ctl_looked_filter<F: Field>() -> Column<F> {
// The CPU table is only interested in our final-block rows, since those contain the final
// sponge output.
Column::single(KECCAK_SPONGE_COL_MAP.is_final_block)
}
#[allow(unused)] // TODO: Should be used soon.
/// CTL filter for reading the `i`th byte of input from memory.
pub(crate) fn ctl_looking_memory_filter<F: Field>(i: usize) -> Column<F> {
// We perform the `i`th read if either
@ -141,6 +140,11 @@ pub(crate) fn ctl_looking_memory_filter<F: Field>(i: usize) -> Column<F> {
Column::sum(once(&cols.is_full_input_block).chain(&cols.is_final_input_len[i..]))
}
pub(crate) fn ctl_looking_keccak_filter<F: Field>() -> Column<F> {
let cols = KECCAK_SPONGE_COL_MAP;
Column::sum([cols.is_full_input_block, cols.is_final_block])
}
/// Information about a Keccak sponge operation needed for witness generation.
#[derive(Debug)]
pub(crate) struct KeccakSpongeOp {
@ -150,15 +154,12 @@ pub(crate) struct KeccakSpongeOp {
/// The timestamp at which inputs are read.
pub(crate) timestamp: usize,
/// The length of the input, in bytes.
pub(crate) len: usize,
/// The input that was read.
pub(crate) input: Vec<u8>,
}
#[derive(Copy, Clone, Default)]
pub(crate) struct KeccakSpongeStark<F, const D: usize> {
pub struct KeccakSpongeStark<F, const D: usize> {
f: PhantomData<F>,
}
@ -259,7 +260,7 @@ impl<F: RichField + Extendable<D>, const D: usize> KeccakSpongeStark<F, D> {
sponge_state: [u32; KECCAK_WIDTH_U32S],
final_inputs: &[u8],
) -> KeccakSpongeColumnsView<F> {
assert_eq!(already_absorbed_bytes + final_inputs.len(), op.len);
assert_eq!(already_absorbed_bytes + final_inputs.len(), op.input.len());
let mut row = KeccakSpongeColumnsView {
is_final_block: F::ONE,
@ -297,7 +298,7 @@ impl<F: RichField + Extendable<D>, const D: usize> KeccakSpongeStark<F, D> {
row.segment = F::from_canonical_usize(op.base_address.segment);
row.virt = F::from_canonical_usize(op.base_address.virt);
row.timestamp = F::from_canonical_usize(op.timestamp);
row.len = F::from_canonical_usize(op.len);
row.len = F::from_canonical_usize(op.input.len());
row.already_absorbed_bytes = F::from_canonical_usize(already_absorbed_bytes);
row.original_rate_u32s = sponge_state[..KECCAK_RATE_U32S]
@ -448,7 +449,6 @@ mod tests {
virt: 0,
},
timestamp: 0,
len: input.len(),
input,
};
let stark = S::default();

View File

@ -15,7 +15,6 @@ pub mod cross_table_lookup;
pub mod generation;
mod get_challenges;
pub mod keccak;
pub mod keccak_memory;
pub mod keccak_sponge;
pub mod logic;
pub mod lookup;

View File

@ -439,97 +439,11 @@ impl<F: RichField + Extendable<D>, const D: usize> Stark<F, D> for MemoryStark<F
#[cfg(test)]
pub(crate) mod tests {
use std::collections::{HashMap, HashSet};
use anyhow::Result;
use ethereum_types::U256;
use plonky2::plonk::config::{GenericConfig, PoseidonGoldilocksConfig};
use rand::prelude::SliceRandom;
use rand::Rng;
use crate::memory::memory_stark::{MemoryOp, MemoryStark};
use crate::memory::segments::Segment;
use crate::memory::NUM_CHANNELS;
use crate::memory::memory_stark::MemoryStark;
use crate::stark_testing::{test_stark_circuit_constraints, test_stark_low_degree};
use crate::witness::memory::MemoryAddress;
use crate::witness::memory::MemoryOpKind::{Read, Write};
pub(crate) fn generate_random_memory_ops<R: Rng>(num_ops: usize, rng: &mut R) -> Vec<MemoryOp> {
let mut memory_ops = Vec::new();
let mut current_memory_values: HashMap<(usize, Segment, usize), U256> = HashMap::new();
let num_cycles = num_ops / 2;
for clock in 0..num_cycles {
let mut used_indices = HashSet::new();
let mut new_writes_this_cycle = HashMap::new();
let mut has_read = false;
for _ in 0..2 {
let mut channel_index = rng.gen_range(0..NUM_CHANNELS);
while used_indices.contains(&channel_index) {
channel_index = rng.gen_range(0..NUM_CHANNELS);
}
used_indices.insert(channel_index);
let is_read = if clock == 0 {
false
} else {
!has_read && rng.gen()
};
has_read = has_read || is_read;
let (context, segment, virt, vals) = if is_read {
let written: Vec<_> = current_memory_values.keys().collect();
let &(mut context, mut segment, mut virt) =
written[rng.gen_range(0..written.len())];
while new_writes_this_cycle.contains_key(&(context, segment, virt)) {
(context, segment, virt) = *written[rng.gen_range(0..written.len())];
}
let &vals = current_memory_values
.get(&(context, segment, virt))
.unwrap();
(context, segment, virt, vals)
} else {
// TODO: with taller memory table or more padding (to enable range-checking bigger diffs),
// test larger address values.
let mut context = rng.gen_range(0..40);
let segments = [Segment::Code, Segment::Stack, Segment::MainMemory];
let mut segment = *segments.choose(rng).unwrap();
let mut virt = rng.gen_range(0..20);
while new_writes_this_cycle.contains_key(&(context, segment, virt)) {
context = rng.gen_range(0..40);
segment = *segments.choose(rng).unwrap();
virt = rng.gen_range(0..20);
}
let val = U256(rng.gen());
new_writes_this_cycle.insert((context, segment, virt), val);
(context, segment, virt, val)
};
let timestamp = clock * NUM_CHANNELS + channel_index;
memory_ops.push(MemoryOp {
filter: true,
timestamp,
address: MemoryAddress {
context,
segment: segment as usize,
virt,
},
kind: if is_read { Read } else { Write },
value: vals,
});
}
for (k, v) in new_writes_this_cycle {
current_memory_values.insert(k, v);
}
}
memory_ops
}
#[test]
fn test_stark_degree() -> Result<()> {

View File

@ -24,7 +24,7 @@ use crate::cpu::cpu_stark::CpuStark;
use crate::cross_table_lookup::{cross_table_lookup_data, CtlCheckVars, CtlData};
use crate::generation::{generate_traces, GenerationInputs};
use crate::keccak::keccak_stark::KeccakStark;
use crate::keccak_memory::keccak_memory_stark::KeccakMemoryStark;
use crate::keccak_sponge::keccak_sponge_stark::KeccakSpongeStark;
use crate::logic::LogicStark;
use crate::memory::memory_stark::MemoryStark;
use crate::permutation::{
@ -49,7 +49,7 @@ where
[(); C::Hasher::HASH_SIZE]:,
[(); CpuStark::<F, D>::COLUMNS]:,
[(); KeccakStark::<F, D>::COLUMNS]:,
[(); KeccakMemoryStark::<F, D>::COLUMNS]:,
[(); KeccakSpongeStark::<F, D>::COLUMNS]:,
[(); LogicStark::<F, D>::COLUMNS]:,
[(); MemoryStark::<F, D>::COLUMNS]:,
{
@ -71,7 +71,7 @@ where
[(); C::Hasher::HASH_SIZE]:,
[(); CpuStark::<F, D>::COLUMNS]:,
[(); KeccakStark::<F, D>::COLUMNS]:,
[(); KeccakMemoryStark::<F, D>::COLUMNS]:,
[(); KeccakSpongeStark::<F, D>::COLUMNS]:,
[(); LogicStark::<F, D>::COLUMNS]:,
[(); MemoryStark::<F, D>::COLUMNS]:,
{
@ -132,12 +132,12 @@ where
&mut challenger,
timing,
)?;
let keccak_memory_proof = prove_single_table(
&all_stark.keccak_memory_stark,
let keccak_sponge_proof = prove_single_table(
&all_stark.keccak_sponge_stark,
config,
&trace_poly_values[Table::KeccakMemory as usize],
&trace_commitments[Table::KeccakMemory as usize],
&ctl_data_per_table[Table::KeccakMemory as usize],
&trace_poly_values[Table::KeccakSponge as usize],
&trace_commitments[Table::KeccakSponge as usize],
&ctl_data_per_table[Table::KeccakSponge as usize],
&mut challenger,
timing,
)?;
@ -163,7 +163,7 @@ where
let stark_proofs = [
cpu_proof,
keccak_proof,
keccak_memory_proof,
keccak_sponge_proof,
logic_proof,
memory_proof,
];

View File

@ -27,7 +27,7 @@ use crate::cross_table_lookup::{
CtlCheckVarsTarget,
};
use crate::keccak::keccak_stark::KeccakStark;
use crate::keccak_memory::keccak_memory_stark::KeccakMemoryStark;
use crate::keccak_sponge::keccak_sponge_stark::KeccakSpongeStark;
use crate::logic::LogicStark;
use crate::memory::memory_stark::MemoryStark;
use crate::permutation::{
@ -332,7 +332,7 @@ pub fn all_verifier_data_recursive_stark_proof<
where
[(); CpuStark::<F, D>::COLUMNS]:,
[(); KeccakStark::<F, D>::COLUMNS]:,
[(); KeccakMemoryStark::<F, D>::COLUMNS]:,
[(); KeccakSpongeStark::<F, D>::COLUMNS]:,
[(); LogicStark::<F, D>::COLUMNS]:,
[(); MemoryStark::<F, D>::COLUMNS]:,
[(); C::Hasher::HASH_SIZE]:,
@ -356,9 +356,9 @@ where
circuit_config,
),
verifier_data_recursive_stark_proof(
Table::KeccakMemory,
all_stark.keccak_memory_stark,
degree_bits[Table::KeccakMemory as usize],
Table::KeccakSponge,
all_stark.keccak_sponge_stark,
degree_bits[Table::KeccakSponge as usize],
&all_stark.cross_table_lookups,
inner_config,
circuit_config,
@ -534,10 +534,10 @@ pub fn add_virtual_all_proof<F: RichField + Extendable<D>, const D: usize>(
),
add_virtual_stark_proof(
builder,
&all_stark.keccak_memory_stark,
&all_stark.keccak_sponge_stark,
config,
degree_bits[Table::KeccakMemory as usize],
nums_ctl_zs[Table::KeccakMemory as usize],
degree_bits[Table::KeccakSponge as usize],
nums_ctl_zs[Table::KeccakSponge as usize],
),
add_virtual_stark_proof(
builder,
@ -853,7 +853,7 @@ pub(crate) mod tests {
use crate::cpu::cpu_stark::CpuStark;
use crate::cross_table_lookup::{CrossTableLookup, CtlCheckVarsTarget};
use crate::keccak::keccak_stark::KeccakStark;
use crate::keccak_memory::keccak_memory_stark::KeccakMemoryStark;
use crate::keccak_sponge::keccak_sponge_stark::KeccakSpongeStark;
use crate::logic::LogicStark;
use crate::memory::memory_stark::MemoryStark;
use crate::permutation::{GrandProductChallenge, GrandProductChallengeSet};
@ -866,6 +866,7 @@ pub(crate) mod tests {
/// Recursively verify a Stark proof.
/// Outputs the recursive proof and the associated verifier data.
#[allow(unused)] // TODO: used later?
fn recursively_verify_stark_proof<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
@ -965,6 +966,7 @@ pub(crate) mod tests {
}
/// Recursively verify every Stark proof in an `AllProof`.
#[allow(unused)] // TODO: used later?
pub fn recursively_verify_all_proof<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
@ -978,7 +980,7 @@ pub(crate) mod tests {
where
[(); CpuStark::<F, D>::COLUMNS]:,
[(); KeccakStark::<F, D>::COLUMNS]:,
[(); KeccakMemoryStark::<F, D>::COLUMNS]:,
[(); KeccakSpongeStark::<F, D>::COLUMNS]:,
[(); LogicStark::<F, D>::COLUMNS]:,
[(); MemoryStark::<F, D>::COLUMNS]:,
[(); C::Hasher::HASH_SIZE]:,
@ -1013,9 +1015,9 @@ pub(crate) mod tests {
)?
.0,
recursively_verify_stark_proof(
Table::KeccakMemory,
all_stark.keccak_memory_stark,
&all_proof.stark_proofs[Table::KeccakMemory as usize],
Table::KeccakSponge,
all_stark.keccak_sponge_stark,
&all_proof.stark_proofs[Table::KeccakSponge as usize],
&all_stark.cross_table_lookups,
&ctl_challenges,
states[2],

View File

@ -14,7 +14,7 @@ use crate::constraint_consumer::ConstraintConsumer;
use crate::cpu::cpu_stark::CpuStark;
use crate::cross_table_lookup::{verify_cross_table_lookups, CtlCheckVars};
use crate::keccak::keccak_stark::KeccakStark;
use crate::keccak_memory::keccak_memory_stark::KeccakMemoryStark;
use crate::keccak_sponge::keccak_sponge_stark::KeccakSpongeStark;
use crate::logic::LogicStark;
use crate::memory::memory_stark::MemoryStark;
use crate::permutation::PermutationCheckVars;
@ -33,7 +33,7 @@ pub fn verify_proof<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, co
where
[(); CpuStark::<F, D>::COLUMNS]:,
[(); KeccakStark::<F, D>::COLUMNS]:,
[(); KeccakMemoryStark::<F, D>::COLUMNS]:,
[(); KeccakSpongeStark::<F, D>::COLUMNS]:,
[(); LogicStark::<F, D>::COLUMNS]:,
[(); MemoryStark::<F, D>::COLUMNS]:,
[(); C::Hasher::HASH_SIZE]:,
@ -48,7 +48,7 @@ where
let AllStark {
cpu_stark,
keccak_stark,
keccak_memory_stark,
keccak_sponge_stark,
logic_stark,
memory_stark,
cross_table_lookups,
@ -76,10 +76,10 @@ where
config,
)?;
verify_stark_proof_with_challenges(
keccak_memory_stark,
&all_proof.stark_proofs[Table::KeccakMemory as usize],
&stark_challenges[Table::KeccakMemory as usize],
&ctl_vars_per_table[Table::KeccakMemory as usize],
keccak_sponge_stark,
&all_proof.stark_proofs[Table::KeccakSponge as usize],
&stark_challenges[Table::KeccakSponge as usize],
&ctl_vars_per_table[Table::KeccakSponge as usize],
config,
)?;
verify_stark_proof_with_challenges(

View File

@ -5,20 +5,16 @@ use plonky2::field::types::Field;
use crate::cpu::columns::CpuColumnsView;
use crate::cpu::kernel::aggregator::KERNEL;
use crate::cpu::kernel::keccak_util::keccakf_u8s;
use crate::cpu::membus::NUM_GP_CHANNELS;
use crate::cpu::simple_logic::eq_iszero::generate_pinv_diff;
use crate::generation::state::GenerationState;
use crate::keccak_memory::columns::KECCAK_WIDTH_BYTES;
use crate::keccak_sponge::columns::KECCAK_RATE_BYTES;
use crate::keccak_sponge::keccak_sponge_stark::KeccakSpongeOp;
use crate::memory::segments::Segment;
use crate::util::u256_saturating_cast_usize;
use crate::witness::errors::ProgramError;
use crate::witness::memory::MemoryAddress;
use crate::witness::util::{
mem_read_code_with_log_and_fill, mem_read_gp_with_log_and_fill, mem_write_gp_log_and_fill,
stack_pop_with_log_and_fill, stack_push_log_and_fill,
keccak_sponge_log, mem_read_code_with_log_and_fill, mem_read_gp_with_log_and_fill,
mem_write_gp_log_and_fill, stack_pop_with_log_and_fill, stack_push_log_and_fill,
};
use crate::{arithmetic, logic};
@ -109,6 +105,7 @@ pub(crate) fn generate_keccak_general<F: Field>(
state: &mut GenerationState<F>,
mut row: CpuColumnsView<F>,
) -> Result<(), ProgramError> {
row.is_keccak_sponge = F::ONE;
let [(context, log_in0), (segment, log_in1), (base_virt, log_in2), (len, log_in3)] =
stack_pop_with_log_and_fill::<4, _>(state, &mut row)?;
let len = len.as_usize();
@ -128,33 +125,7 @@ pub(crate) fn generate_keccak_general<F: Field>(
let hash = keccak(&input);
let log_push = stack_push_log_and_fill(state, &mut row, hash.into_uint())?;
let mut input_blocks = input.chunks_exact(KECCAK_RATE_BYTES);
let mut sponge_state = [0u8; KECCAK_WIDTH_BYTES];
for block in input_blocks.by_ref() {
sponge_state[..KECCAK_RATE_BYTES].copy_from_slice(block);
state.traces.push_keccak_bytes(sponge_state);
keccakf_u8s(&mut sponge_state);
}
let final_inputs = input_blocks.remainder();
sponge_state[..final_inputs.len()].copy_from_slice(final_inputs);
// pad10*1 rule
sponge_state[final_inputs.len()..KECCAK_RATE_BYTES].fill(0);
if final_inputs.len() == KECCAK_RATE_BYTES - 1 {
// Both 1s are placed in the same byte.
sponge_state[final_inputs.len()] = 0b10000001;
} else {
sponge_state[final_inputs.len()] = 1;
sponge_state[KECCAK_RATE_BYTES - 1] = 0b10000000;
}
state.traces.push_keccak_bytes(sponge_state);
state.traces.push_keccak_sponge(KeccakSpongeOp {
base_address,
timestamp: state.traces.clock(),
len,
input,
});
keccak_sponge_log(state, base_address, input);
state.traces.push_memory(log_in0);
state.traces.push_memory(log_in1);

View File

@ -9,8 +9,7 @@ use plonky2::util::timing::TimingTree;
use crate::all_stark::{AllStark, NUM_TABLES};
use crate::config::StarkConfig;
use crate::cpu::columns::CpuColumnsView;
use crate::keccak_memory::columns::KECCAK_WIDTH_BYTES;
use crate::keccak_memory::keccak_memory_stark::KeccakMemoryOp;
use crate::keccak_sponge::columns::KECCAK_WIDTH_BYTES;
use crate::keccak_sponge::keccak_sponge_stark::KeccakSpongeOp;
use crate::util::trace_rows_to_poly_values;
use crate::witness::memory::MemoryOp;
@ -31,7 +30,6 @@ pub(crate) struct Traces<T: Copy> {
pub(crate) arithmetic: Vec<arithmetic::Operation>,
pub(crate) memory_ops: Vec<MemoryOp>,
pub(crate) keccak_inputs: Vec<[u64; keccak::keccak_stark::NUM_INPUTS]>,
pub(crate) keccak_memory_inputs: Vec<KeccakMemoryOp>,
pub(crate) keccak_sponge_ops: Vec<KeccakSpongeOp>,
}
@ -43,7 +41,6 @@ impl<T: Copy> Traces<T> {
arithmetic: vec![],
memory_ops: vec![],
keccak_inputs: vec![],
keccak_memory_inputs: vec![],
keccak_sponge_ops: vec![],
}
}
@ -124,8 +121,7 @@ impl<T: Copy> Traces<T> {
arithmetic: _, // TODO
memory_ops,
keccak_inputs,
keccak_memory_inputs,
keccak_sponge_ops: _, // TODO
keccak_sponge_ops,
} = self;
let cpu_rows = cpu.into_iter().map(|x| x.into()).collect();
@ -134,11 +130,10 @@ impl<T: Copy> Traces<T> {
all_stark
.keccak_stark
.generate_trace(keccak_inputs, cap_elements, timing);
let keccak_memory_trace = all_stark.keccak_memory_stark.generate_trace(
keccak_memory_inputs,
cap_elements,
timing,
);
let keccak_sponge_trace =
all_stark
.keccak_sponge_stark
.generate_trace(keccak_sponge_ops, cap_elements, timing);
let logic_trace = all_stark
.logic_stark
.generate_trace(logic_ops, cap_elements, timing);
@ -147,7 +142,7 @@ impl<T: Copy> Traces<T> {
[
cpu_trace,
keccak_trace,
keccak_memory_trace,
keccak_sponge_trace,
logic_trace,
memory_trace,
]

View File

@ -225,6 +225,7 @@ fn perform_op<F: Field>(
fn try_perform_instruction<F: Field>(state: &mut GenerationState<F>) -> Result<(), ProgramError> {
let mut row: CpuColumnsView<F> = CpuColumnsView::default();
row.is_cpu_cycle = F::ONE;
row.clock = F::from_canonical_usize(state.traces.clock());
row.context = F::from_canonical_usize(state.registers.context);
row.program_counter = F::from_canonical_usize(state.registers.program_counter);
row.is_kernel_mode = F::from_bool(state.registers.is_kernel);

View File

@ -2,9 +2,12 @@ use ethereum_types::U256;
use plonky2::field::types::Field;
use crate::cpu::columns::CpuColumnsView;
use crate::cpu::kernel::keccak_util::keccakf_u8s;
use crate::cpu::membus::NUM_GP_CHANNELS;
use crate::cpu::stack_bounds::MAX_USER_STACK_SIZE;
use crate::generation::state::GenerationState;
use crate::keccak_sponge::columns::{KECCAK_RATE_BYTES, KECCAK_WIDTH_BYTES};
use crate::keccak_sponge::keccak_sponge_stark::KeccakSpongeOp;
use crate::memory::segments::Segment;
use crate::witness::errors::ProgramError;
use crate::witness::memory::{MemoryAddress, MemoryChannel, MemoryOp, MemoryOpKind};
@ -170,3 +173,40 @@ pub(crate) fn stack_push_log_and_fill<F: Field>(
Ok(res)
}
pub(crate) fn keccak_sponge_log<F: Field>(
state: &mut GenerationState<F>,
base_address: MemoryAddress,
input: Vec<u8>,
) {
let mut input_blocks = input.chunks_exact(KECCAK_RATE_BYTES);
let mut sponge_state = [0u8; KECCAK_WIDTH_BYTES];
for block in input_blocks.by_ref() {
sponge_state[..KECCAK_RATE_BYTES].copy_from_slice(block);
state.traces.push_keccak_bytes(sponge_state);
// TODO: Also push logic rows for XORs.
// TODO: Also push memory read rows.
keccakf_u8s(&mut sponge_state);
}
let final_inputs = input_blocks.remainder();
sponge_state[..final_inputs.len()].copy_from_slice(final_inputs);
// pad10*1 rule
sponge_state[final_inputs.len()..KECCAK_RATE_BYTES].fill(0);
if final_inputs.len() == KECCAK_RATE_BYTES - 1 {
// Both 1s are placed in the same byte.
sponge_state[final_inputs.len()] = 0b10000001;
} else {
sponge_state[final_inputs.len()] = 1;
sponge_state[KECCAK_RATE_BYTES - 1] = 0b10000000;
}
state.traces.push_keccak_bytes(sponge_state);
// TODO: Also push logic rows for XORs.
// TODO: Also push memory read rows.
state.traces.push_keccak_sponge(KeccakSpongeOp {
base_address,
timestamp: state.traces.clock(),
input,
});
}