Reduce visibility for a bunch of structs and methods in EVM crate (#1289)

* Reduce visibility for a bunch of structs and methods

* Remove redundant
This commit is contained in:
Robin Salen 2023-11-13 09:26:56 -05:00 committed by GitHub
parent 5800e6ad64
commit 88fcc32983
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
52 changed files with 278 additions and 265 deletions

View File

@ -26,14 +26,14 @@ use crate::stark::Stark;
/// Structure containing all STARKs and the cross-table lookups.
#[derive(Clone)]
pub struct AllStark<F: RichField + Extendable<D>, const D: usize> {
pub arithmetic_stark: ArithmeticStark<F, D>,
pub byte_packing_stark: BytePackingStark<F, D>,
pub cpu_stark: CpuStark<F, D>,
pub keccak_stark: KeccakStark<F, D>,
pub keccak_sponge_stark: KeccakSpongeStark<F, D>,
pub logic_stark: LogicStark<F, D>,
pub memory_stark: MemoryStark<F, D>,
pub cross_table_lookups: Vec<CrossTableLookup<F>>,
pub(crate) arithmetic_stark: ArithmeticStark<F, D>,
pub(crate) byte_packing_stark: BytePackingStark<F, D>,
pub(crate) cpu_stark: CpuStark<F, D>,
pub(crate) keccak_stark: KeccakStark<F, D>,
pub(crate) keccak_sponge_stark: KeccakSpongeStark<F, D>,
pub(crate) logic_stark: LogicStark<F, D>,
pub(crate) memory_stark: MemoryStark<F, D>,
pub(crate) cross_table_lookups: Vec<CrossTableLookup<F>>,
}
impl<F: RichField + Extendable<D>, const D: usize> Default for AllStark<F, D> {

View File

@ -116,7 +116,7 @@ pub(crate) fn ctl_arithmetic_rows<F: Field>() -> TableWithColumns<F> {
/// Structure representing the `Arithmetic` STARK, which carries out all the arithmetic operations.
#[derive(Copy, Clone, Default)]
pub struct ArithmeticStark<F, const D: usize> {
pub(crate) struct ArithmeticStark<F, const D: usize> {
pub f: PhantomData<F>,
}

View File

@ -91,7 +91,7 @@ pub(crate) fn ctl_looked_data<F: Field>() -> Vec<Column<F>> {
}
/// CTL filter for the `BytePackingStark` looked table.
pub fn ctl_looked_filter<F: Field>() -> Column<F> {
pub(crate) fn ctl_looked_filter<F: Field>() -> Column<F> {
// The CPU table is only interested in our sequence end rows,
// since those contain the final limbs of our packed int.
Column::single(SEQUENCE_END)
@ -136,7 +136,7 @@ pub(crate) struct BytePackingOp {
}
#[derive(Copy, Clone, Default)]
pub struct BytePackingStark<F, const D: usize> {
pub(crate) struct BytePackingStark<F, const D: usize> {
pub(crate) f: PhantomData<F>,
}

View File

@ -29,7 +29,7 @@ pub struct ConstraintConsumer<P: PackedField> {
}
impl<P: PackedField> ConstraintConsumer<P> {
pub fn new(
pub(crate) fn new(
alphas: Vec<P::Scalar>,
z_last: P,
lagrange_basis_first: P,
@ -44,17 +44,17 @@ impl<P: PackedField> ConstraintConsumer<P> {
}
}
pub fn accumulators(self) -> Vec<P> {
pub(crate) fn accumulators(self) -> Vec<P> {
self.constraint_accs
}
/// Add one constraint valid on all rows except the last.
pub fn constraint_transition(&mut self, constraint: P) {
pub(crate) fn constraint_transition(&mut self, constraint: P) {
self.constraint(constraint * self.z_last);
}
/// Add one constraint on all rows.
pub fn constraint(&mut self, constraint: P) {
pub(crate) fn constraint(&mut self, constraint: P) {
for (&alpha, acc) in self.alphas.iter().zip(&mut self.constraint_accs) {
*acc *= alpha;
*acc += constraint;
@ -63,13 +63,13 @@ impl<P: PackedField> ConstraintConsumer<P> {
/// Add one constraint, but first multiply it by a filter such that it will only apply to the
/// first row of the trace.
pub fn constraint_first_row(&mut self, constraint: P) {
pub(crate) fn constraint_first_row(&mut self, constraint: P) {
self.constraint(constraint * self.lagrange_basis_first);
}
/// Add one constraint, but first multiply it by a filter such that it will only apply to the
/// last row of the trace.
pub fn constraint_last_row(&mut self, constraint: P) {
pub(crate) fn constraint_last_row(&mut self, constraint: P) {
self.constraint(constraint * self.lagrange_basis_last);
}
}
@ -96,7 +96,7 @@ pub struct RecursiveConstraintConsumer<F: RichField + Extendable<D>, const D: us
}
impl<F: RichField + Extendable<D>, const D: usize> RecursiveConstraintConsumer<F, D> {
pub fn new(
pub(crate) fn new(
zero: ExtensionTarget<D>,
alphas: Vec<Target>,
z_last: ExtensionTarget<D>,
@ -113,12 +113,12 @@ impl<F: RichField + Extendable<D>, const D: usize> RecursiveConstraintConsumer<F
}
}
pub fn accumulators(self) -> Vec<ExtensionTarget<D>> {
pub(crate) fn accumulators(self) -> Vec<ExtensionTarget<D>> {
self.constraint_accs
}
/// Add one constraint valid on all rows except the last.
pub fn constraint_transition(
pub(crate) fn constraint_transition(
&mut self,
builder: &mut CircuitBuilder<F, D>,
constraint: ExtensionTarget<D>,
@ -128,7 +128,7 @@ impl<F: RichField + Extendable<D>, const D: usize> RecursiveConstraintConsumer<F
}
/// Add one constraint valid on all rows.
pub fn constraint(
pub(crate) fn constraint(
&mut self,
builder: &mut CircuitBuilder<F, D>,
constraint: ExtensionTarget<D>,
@ -140,7 +140,7 @@ impl<F: RichField + Extendable<D>, const D: usize> RecursiveConstraintConsumer<F
/// Add one constraint, but first multiply it by a filter such that it will only apply to the
/// first row of the trace.
pub fn constraint_first_row(
pub(crate) fn constraint_first_row(
&mut self,
builder: &mut CircuitBuilder<F, D>,
constraint: ExtensionTarget<D>,
@ -151,7 +151,7 @@ impl<F: RichField + Extendable<D>, const D: usize> RecursiveConstraintConsumer<F
/// Add one constraint, but first multiply it by a filter such that it will only apply to the
/// last row of the trace.
pub fn constraint_last_row(
pub(crate) fn constraint_last_row(
&mut self,
builder: &mut CircuitBuilder<F, D>,
constraint: ExtensionTarget<D>,

View File

@ -21,7 +21,7 @@ pub type MemValue<T> = [T; memory::VALUE_LIMBS];
/// View of the columns required for one memory channel.
#[repr(C)]
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub struct MemoryChannelView<T: Copy> {
pub(crate) struct MemoryChannelView<T: Copy> {
/// 1 if this row includes a memory operation in the `i`th channel of the memory bus, otherwise
/// 0.
pub used: T,
@ -40,7 +40,7 @@ pub struct MemoryChannelView<T: Copy> {
/// View of all the columns in `CpuStark`.
#[repr(C)]
#[derive(Clone, Copy, Eq, PartialEq, Debug)]
pub struct CpuColumnsView<T: Copy> {
pub(crate) struct CpuColumnsView<T: Copy> {
/// Filter. 1 if the row is part of bootstrapping the kernel code, 0 otherwise.
pub is_bootstrap_kernel: T,

View File

@ -7,7 +7,7 @@ use crate::util::transmute_no_compile_time_size_checks;
/// Structure representing the flags for the various opcodes.
#[repr(C)]
#[derive(Clone, Copy, Eq, PartialEq, Debug)]
pub struct OpsColumnsView<T: Copy> {
pub(crate) struct OpsColumnsView<T: Copy> {
/// Combines ADD, MUL, SUB, DIV, MOD, LT, GT and BYTE flags.
pub binary_op: T,
/// Combines ADDMOD, MULMOD and SUBMOD flags.

View File

@ -337,7 +337,7 @@ fn eval_ext_circuit_set<F: RichField + Extendable<D>, const D: usize>(
}
/// Evaluates the constraints for the GET and SET opcodes.
pub fn eval_packed<P: PackedField>(
pub(crate) fn eval_packed<P: PackedField>(
lv: &CpuColumnsView<P>,
nv: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
@ -376,7 +376,7 @@ pub fn eval_packed<P: PackedField>(
/// Circuit version of èval_packed`.
/// Evaluates the constraints for the GET and SET opcodes.
pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
pub(crate) fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
lv: &CpuColumnsView<ExtensionTarget<D>>,
nv: &CpuColumnsView<ExtensionTarget<D>>,

View File

@ -43,7 +43,7 @@ pub(crate) fn get_start_pc<F: Field>() -> F {
}
/// Evaluates the constraints related to the flow of instructions.
pub fn eval_packed_generic<P: PackedField>(
pub(crate) fn eval_packed_generic<P: PackedField>(
lv: &CpuColumnsView<P>,
nv: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
@ -83,7 +83,7 @@ pub fn eval_packed_generic<P: PackedField>(
/// Circuit version of `eval_packed`.
/// Evaluates the constraints related to the flow of instructions.
pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
pub(crate) fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder<F, D>,
lv: &CpuColumnsView<ExtensionTarget<D>>,
nv: &CpuColumnsView<ExtensionTarget<D>>,

View File

@ -26,7 +26,7 @@ use crate::stark::Stark;
/// Creates the vector of `Columns` corresponding to the General Purpose channels when calling the Keccak sponge:
/// the CPU reads the output of the sponge directly from the `KeccakSpongeStark` table.
pub fn ctl_data_keccak_sponge<F: Field>() -> Vec<Column<F>> {
pub(crate) fn ctl_data_keccak_sponge<F: Field>() -> Vec<Column<F>> {
// When executing KECCAK_GENERAL, the GP memory channels are used as follows:
// GP channel 0: stack[-1] = context
// GP channel 1: stack[-2] = segment
@ -47,7 +47,7 @@ pub fn ctl_data_keccak_sponge<F: Field>() -> Vec<Column<F>> {
}
/// CTL filter for a call to the Keccak sponge.
pub fn ctl_filter_keccak_sponge<F: Field>() -> Column<F> {
pub(crate) fn ctl_filter_keccak_sponge<F: Field>() -> Column<F> {
Column::single(COL_MAP.is_keccak_sponge)
}
@ -73,7 +73,7 @@ fn ctl_data_ternops<F: Field>() -> Vec<Column<F>> {
}
/// Creates the vector of columns corresponding to the opcode, the two inputs and the output of the logic operation.
pub fn ctl_data_logic<F: Field>() -> Vec<Column<F>> {
pub(crate) fn ctl_data_logic<F: Field>() -> Vec<Column<F>> {
// Instead of taking single columns, we reconstruct the entire opcode value directly.
let mut res = vec![Column::le_bits(COL_MAP.opcode_bits)];
res.extend(ctl_data_binops());
@ -81,12 +81,12 @@ pub fn ctl_data_logic<F: Field>() -> Vec<Column<F>> {
}
/// CTL filter for logic operations.
pub fn ctl_filter_logic<F: Field>() -> Column<F> {
pub(crate) fn ctl_filter_logic<F: Field>() -> Column<F> {
Column::single(COL_MAP.op.logic_op)
}
/// Returns the `TableWithColumns` for the CPU rows calling arithmetic operations.
pub fn ctl_arithmetic_base_rows<F: Field>() -> TableWithColumns<F> {
pub(crate) fn ctl_arithmetic_base_rows<F: Field>() -> TableWithColumns<F> {
// Instead of taking single columns, we reconstruct the entire opcode value directly.
let mut columns = vec![Column::le_bits(COL_MAP.opcode_bits)];
columns.extend(ctl_data_ternops());
@ -112,17 +112,17 @@ pub fn ctl_arithmetic_base_rows<F: Field>() -> TableWithColumns<F> {
/// Creates the vector of `Columns` corresponding to the contents of General Purpose channels when calling byte packing.
/// We use `ctl_data_keccak_sponge` because the `Columns` are the same as the ones computed for `KeccakSpongeStark`.
pub fn ctl_data_byte_packing<F: Field>() -> Vec<Column<F>> {
pub(crate) fn ctl_data_byte_packing<F: Field>() -> Vec<Column<F>> {
ctl_data_keccak_sponge()
}
/// CTL filter for the `MLOAD_32BYTES` operation.
pub fn ctl_filter_byte_packing<F: Field>() -> Column<F> {
pub(crate) fn ctl_filter_byte_packing<F: Field>() -> Column<F> {
Column::single(COL_MAP.op.mload_32bytes)
}
/// Creates the vector of `Columns` corresponding to the contents of General Purpose channels when calling byte unpacking.
pub fn ctl_data_byte_unpacking<F: Field>() -> Vec<Column<F>> {
pub(crate) fn ctl_data_byte_unpacking<F: Field>() -> Vec<Column<F>> {
// When executing MSTORE_32BYTES, the GP memory channels are used as follows:
// GP channel 0: stack[-1] = context
// GP channel 1: stack[-2] = segment
@ -145,7 +145,7 @@ pub fn ctl_data_byte_unpacking<F: Field>() -> Vec<Column<F>> {
}
/// CTL filter for the `MSTORE_32BYTES` operation.
pub fn ctl_filter_byte_unpacking<F: Field>() -> Column<F> {
pub(crate) fn ctl_filter_byte_unpacking<F: Field>() -> Column<F> {
Column::single(COL_MAP.op.mstore_32bytes)
}
@ -162,7 +162,7 @@ fn mem_time_and_channel<F: Field>(channel: usize) -> Column<F> {
}
/// Creates the vector of `Columns` corresponding to the contents of the code channel when reading code values.
pub fn ctl_data_code_memory<F: Field>() -> Vec<Column<F>> {
pub(crate) fn ctl_data_code_memory<F: Field>() -> Vec<Column<F>> {
let mut cols = vec![
Column::constant(F::ONE), // is_read
Column::single(COL_MAP.code_context), // addr_context
@ -182,7 +182,7 @@ pub fn ctl_data_code_memory<F: Field>() -> Vec<Column<F>> {
}
/// Creates the vector of `Columns` corresponding to the contents of General Purpose channels.
pub fn ctl_data_gp_memory<F: Field>(channel: usize) -> Vec<Column<F>> {
pub(crate) fn ctl_data_gp_memory<F: Field>(channel: usize) -> Vec<Column<F>> {
let channel_map = COL_MAP.mem_channels[channel];
let mut cols: Vec<_> = Column::singles([
channel_map.is_read,
@ -200,18 +200,18 @@ pub fn ctl_data_gp_memory<F: Field>(channel: usize) -> Vec<Column<F>> {
}
/// CTL filter for code read and write operations.
pub fn ctl_filter_code_memory<F: Field>() -> Column<F> {
pub(crate) fn ctl_filter_code_memory<F: Field>() -> Column<F> {
Column::sum(COL_MAP.op.iter())
}
/// CTL filter for General Purpose memory read and write operations.
pub fn ctl_filter_gp_memory<F: Field>(channel: usize) -> Column<F> {
pub(crate) fn ctl_filter_gp_memory<F: Field>(channel: usize) -> Column<F> {
Column::single(COL_MAP.mem_channels[channel].used)
}
/// Structure representing the CPU Stark.
#[derive(Copy, Clone, Default)]
pub struct CpuStark<F, const D: usize> {
pub(crate) struct CpuStark<F, const D: usize> {
pub f: PhantomData<F>,
}

View File

@ -74,7 +74,7 @@ const fn bits_from_opcode(opcode: u8) -> [bool; 8] {
}
/// Evaluates the constraints for opcode decoding.
pub fn eval_packed_generic<P: PackedField>(
pub(crate) fn eval_packed_generic<P: PackedField>(
lv: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
) {
@ -181,7 +181,7 @@ pub fn eval_packed_generic<P: PackedField>(
/// Circuit version of `eval_packed_generic`.
/// Evaluates the constraints for opcode decoding.
pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
pub(crate) fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder<F, D>,
lv: &CpuColumnsView<ExtensionTarget<D>>,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,

View File

@ -323,7 +323,7 @@ fn eval_ext_circuit_swap<F: RichField + Extendable<D>, const D: usize>(
}
/// Evaluates the constraints for the DUP and SWAP opcodes.
pub fn eval_packed<P: PackedField>(
pub(crate) fn eval_packed<P: PackedField>(
lv: &CpuColumnsView<P>,
nv: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
@ -339,7 +339,7 @@ pub fn eval_packed<P: PackedField>(
/// Circuit version of `eval_packed`.
/// Evaluates the constraints for the DUP and SWAP opcodes.
pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
pub(crate) fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
lv: &CpuColumnsView<ExtensionTarget<D>>,
nv: &CpuColumnsView<ExtensionTarget<D>>,

View File

@ -134,7 +134,7 @@ fn eval_packed_init<P: PackedField>(
}
/// Evaluate the gas constraints for the opcodes that cost a constant gas.
pub fn eval_packed<P: PackedField>(
pub(crate) fn eval_packed<P: PackedField>(
lv: &CpuColumnsView<P>,
nv: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
@ -300,7 +300,7 @@ fn eval_ext_circuit_init<F: RichField + Extendable<D>, const D: usize>(
/// Circuit version of `eval_packed`.
/// Evaluate the gas constraints for the opcodes that cost a constant gas.
pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
pub(crate) fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder<F, D>,
lv: &CpuColumnsView<ExtensionTarget<D>>,
nv: &CpuColumnsView<ExtensionTarget<D>>,

View File

@ -12,7 +12,7 @@ use crate::cpu::columns::{CpuColumnsView, COL_MAP};
use crate::cpu::membus::NUM_GP_CHANNELS;
/// Evaluates constraints for the `halt` flag.
pub fn eval_packed<P: PackedField>(
pub(crate) fn eval_packed<P: PackedField>(
lv: &CpuColumnsView<P>,
nv: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
@ -48,7 +48,7 @@ pub fn eval_packed<P: PackedField>(
/// Circuit version of `eval_packed`.
/// Evaluates constraints for the `halt` flag.
pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
pub(crate) fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder<F, D>,
lv: &CpuColumnsView<ExtensionTarget<D>>,
nv: &CpuColumnsView<ExtensionTarget<D>>,

View File

@ -10,7 +10,7 @@ use crate::cpu::membus::NUM_GP_CHANNELS;
use crate::memory::segments::Segment;
/// Evaluates constraints for EXIT_KERNEL.
pub fn eval_packed_exit_kernel<P: PackedField>(
pub(crate) fn eval_packed_exit_kernel<P: PackedField>(
lv: &CpuColumnsView<P>,
nv: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
@ -29,7 +29,7 @@ pub fn eval_packed_exit_kernel<P: PackedField>(
/// Circuit version of `eval_packed_exit_kernel`.
/// Evaluates constraints for EXIT_KERNEL.
pub fn eval_ext_circuit_exit_kernel<F: RichField + Extendable<D>, const D: usize>(
pub(crate) fn eval_ext_circuit_exit_kernel<F: RichField + Extendable<D>, const D: usize>(
builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder<F, D>,
lv: &CpuColumnsView<ExtensionTarget<D>>,
nv: &CpuColumnsView<ExtensionTarget<D>>,
@ -63,7 +63,7 @@ pub fn eval_ext_circuit_exit_kernel<F: RichField + Extendable<D>, const D: usize
}
/// Evaluates constraints jump operations: JUMP and JUMPI.
pub fn eval_packed_jump_jumpi<P: PackedField>(
pub(crate) fn eval_packed_jump_jumpi<P: PackedField>(
lv: &CpuColumnsView<P>,
nv: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
@ -162,7 +162,7 @@ pub fn eval_packed_jump_jumpi<P: PackedField>(
/// Circuit version of `eval_packed_jumpi_jumpi`.
/// Evaluates constraints jump operations: JUMP and JUMPI.
pub fn eval_ext_circuit_jump_jumpi<F: RichField + Extendable<D>, const D: usize>(
pub(crate) fn eval_ext_circuit_jump_jumpi<F: RichField + Extendable<D>, const D: usize>(
builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder<F, D>,
lv: &CpuColumnsView<ExtensionTarget<D>>,
nv: &CpuColumnsView<ExtensionTarget<D>>,
@ -360,7 +360,7 @@ pub fn eval_ext_circuit_jump_jumpi<F: RichField + Extendable<D>, const D: usize>
}
/// Evaluates constraints for EXIT_KERNEL, JUMP and JUMPI.
pub fn eval_packed<P: PackedField>(
pub(crate) fn eval_packed<P: PackedField>(
lv: &CpuColumnsView<P>,
nv: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
@ -371,7 +371,7 @@ pub fn eval_packed<P: PackedField>(
/// Circuit version of `eval_packed`.
/// Evaluates constraints for EXIT_KERNEL, JUMP and JUMPI.
pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
pub(crate) fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder<F, D>,
lv: &CpuColumnsView<ExtensionTarget<D>>,
nv: &CpuColumnsView<ExtensionTarget<D>>,

View File

@ -1,4 +1,3 @@
#[allow(dead_code)]
#[derive(Copy, Clone, Eq, PartialEq, Hash, Ord, PartialOrd, Debug)]
pub(crate) enum JournalEntry {
AccountLoaded = 0,

View File

@ -18,7 +18,7 @@ pub(crate) mod trie_type;
pub(crate) mod txn_fields;
/// Constants that are accessible to our kernel assembly code.
pub fn evm_constants() -> HashMap<String, U256> {
pub(crate) fn evm_constants() -> HashMap<String, U256> {
let mut c = HashMap::new();
let hex_constants = MISC_CONSTANTS

View File

@ -1,5 +1,4 @@
/// These are normalized transaction fields, i.e. not specific to any transaction type.
#[allow(dead_code)]
#[derive(Copy, Clone, Eq, PartialEq, Hash, Ord, PartialOrd, Debug)]
pub(crate) enum NormalizedTxnField {
/// Whether a chain ID was present in the txn data. Type 0 transaction with v=27 or v=28 have

View File

@ -42,7 +42,7 @@ impl MemoryState {
}
}
pub struct Interpreter<'a> {
pub(crate) struct Interpreter<'a> {
pub(crate) kernel_mode: bool,
jumpdests: Vec<usize>,
pub(crate) context: usize,
@ -54,7 +54,7 @@ pub struct Interpreter<'a> {
opcode_count: [usize; 0x100],
}
pub fn run_interpreter(
pub(crate) fn run_interpreter(
initial_offset: usize,
initial_stack: Vec<U256>,
) -> anyhow::Result<Interpreter<'static>> {
@ -67,14 +67,14 @@ pub fn run_interpreter(
}
#[derive(Clone)]
pub struct InterpreterMemoryInitialization {
pub(crate) struct InterpreterMemoryInitialization {
pub label: String,
pub stack: Vec<U256>,
pub segment: Segment,
pub memory: Vec<(usize, Vec<U256>)>,
}
pub fn run_interpreter_with_memory(
pub(crate) fn run_interpreter_with_memory(
memory_init: InterpreterMemoryInitialization,
) -> anyhow::Result<Interpreter<'static>> {
let label = KERNEL.global_labels[&memory_init.label];
@ -93,7 +93,7 @@ pub fn run_interpreter_with_memory(
Ok(interpreter)
}
pub fn run<'a>(
pub(crate) fn run<'a>(
code: &'a [u8],
initial_offset: usize,
initial_stack: Vec<U256>,
@ -293,7 +293,7 @@ impl<'a> Interpreter<'a> {
.content
}
pub fn extract_kernel_memory(self, segment: Segment, range: Range<usize>) -> Vec<U256> {
pub(crate) fn extract_kernel_memory(self, segment: Segment, range: Range<usize>) -> Vec<U256> {
let mut output: Vec<U256> = vec![];
for i in range {
let term = self

View File

@ -10,7 +10,7 @@ use crate::cpu::kernel::ast::{File, Item, PushTarget, StackReplacement};
/// Parses EVM assembly code.
#[derive(pest_derive::Parser)]
#[grammar = "cpu/kernel/evm_asm.pest"]
pub struct AsmParser;
struct AsmParser;
pub(crate) fn parse(s: &str) -> File {
let file = AsmParser::parse(Rule::file, s)

View File

@ -58,7 +58,7 @@ fn apply_perm<T: Eq + Hash + Clone>(permutation: Vec<Vec<usize>>, mut lst: Vec<T
/// This function does STEP 1.
/// Given 2 lists A, B find a permutation P such that P . A = B.
pub fn find_permutation<T: Eq + Hash + Clone>(lst_a: &[T], lst_b: &[T]) -> Vec<Vec<usize>> {
pub(crate) fn find_permutation<T: Eq + Hash + Clone>(lst_a: &[T], lst_b: &[T]) -> Vec<Vec<usize>> {
// We should check to ensure that A and B are indeed rearrangments of each other.
assert!(is_permutation(lst_a, lst_b));
@ -210,7 +210,7 @@ fn transpositions_to_stack_ops(trans: Vec<usize>) -> Vec<StackOp> {
trans.into_iter().map(|i| StackOp::Swap(i as u8)).collect()
}
pub fn is_permutation<T: Eq + Hash + Clone>(a: &[T], b: &[T]) -> bool {
pub(crate) fn is_permutation<T: Eq + Hash + Clone>(a: &[T], b: &[T]) -> bool {
make_multiset(a) == make_multiset(b)
}

View File

@ -588,7 +588,7 @@ fn test_bloom_two_logs() -> Result<()> {
Ok(())
}
pub fn logs_bloom_bytes_fn(logs_list: Vec<(Vec<u8>, Vec<Vec<u8>>)>) -> [u8; 256] {
fn logs_bloom_bytes_fn(logs_list: Vec<(Vec<u8>, Vec<Vec<u8>>)>) -> [u8; 256] {
// The first element of logs_list.
let mut bloom = [0_u8; 256];

View File

@ -33,7 +33,7 @@ pub mod channel_indices {
pub const NUM_CHANNELS: usize = channel_indices::GP.end;
/// Evaluates constraints regarding the membus.
pub fn eval_packed<P: PackedField>(
pub(crate) fn eval_packed<P: PackedField>(
lv: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
) {
@ -51,7 +51,7 @@ pub fn eval_packed<P: PackedField>(
/// Circuit version of `eval_packed`.
/// Evaluates constraints regarding the membus.
pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
pub(crate) fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder<F, D>,
lv: &CpuColumnsView<ExtensionTarget<D>>,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,

View File

@ -327,7 +327,7 @@ fn eval_ext_circuit_store<F: RichField + Extendable<D>, const D: usize>(
}
/// Evaluates constraints for MLOAD_GENERAL and MSTORE_GENERAL.
pub fn eval_packed<P: PackedField>(
pub(crate) fn eval_packed<P: PackedField>(
lv: &CpuColumnsView<P>,
nv: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
@ -338,7 +338,7 @@ pub fn eval_packed<P: PackedField>(
/// Circuit version of `eval_packed`.
/// Evaluates constraints for MLOAD_GENERAL and MSTORE_GENERAL.
pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
pub(crate) fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder<F, D>,
lv: &CpuColumnsView<ExtensionTarget<D>>,
nv: &CpuColumnsView<ExtensionTarget<D>>,

View File

@ -16,7 +16,7 @@ const P_LIMBS: [u32; 8] = [
];
/// Evaluates constraints to check the modulus in mem_channel[2].
pub fn eval_packed<P: PackedField>(
pub(crate) fn eval_packed<P: PackedField>(
lv: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
) {
@ -34,7 +34,7 @@ pub fn eval_packed<P: PackedField>(
/// Circuit version of `eval_packed`.
/// Evaluates constraints to check the modulus in mem_channel[2].
pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
pub(crate) fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder<F, D>,
lv: &CpuColumnsView<ExtensionTarget<D>>,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,

View File

@ -7,7 +7,7 @@ use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer
use crate::cpu::columns::CpuColumnsView;
/// Evaluates constraints to check that we are storing the correct PC.
pub fn eval_packed<P: PackedField>(
pub(crate) fn eval_packed<P: PackedField>(
lv: &CpuColumnsView<P>,
nv: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
@ -23,7 +23,7 @@ pub fn eval_packed<P: PackedField>(
/// Circuit version if `eval_packed`.
/// Evaluates constraints to check that we are storing the correct PC.
pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
pub(crate) fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder<F, D>,
lv: &CpuColumnsView<ExtensionTarget<D>>,
nv: &CpuColumnsView<ExtensionTarget<D>>,

View File

@ -7,7 +7,7 @@ use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer
use crate::cpu::columns::CpuColumnsView;
/// Evaluates constraints to check that we are not pushing anything.
pub fn eval_packed<P: PackedField>(
pub(crate) fn eval_packed<P: PackedField>(
lv: &CpuColumnsView<P>,
nv: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
@ -21,7 +21,7 @@ pub fn eval_packed<P: PackedField>(
/// Circuit version of `eval_packed`.
/// Evaluates constraints to check that we are not pushing anything.
pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
pub(crate) fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder<F, D>,
lv: &CpuColumnsView<ExtensionTarget<D>>,
nv: &CpuColumnsView<ExtensionTarget<D>>,

View File

@ -24,7 +24,7 @@ fn limbs(x: U256) -> [u32; 8] {
/// Then `diff @ x = num_unequal_limbs`, where `@` denotes the dot product. We set
/// `diff_pinv = num_unequal_limbs^-1 * x` if `num_unequal_limbs != 0` and 0 otherwise. We have
/// `diff @ diff_pinv = 1 - equal` as desired.
pub fn generate_pinv_diff<F: Field>(val0: U256, val1: U256, lv: &mut CpuColumnsView<F>) {
pub(crate) fn generate_pinv_diff<F: Field>(val0: U256, val1: U256, lv: &mut CpuColumnsView<F>) {
let val0_limbs = limbs(val0).map(F::from_canonical_u32);
let val1_limbs = limbs(val1).map(F::from_canonical_u32);
@ -43,7 +43,7 @@ pub fn generate_pinv_diff<F: Field>(val0: U256, val1: U256, lv: &mut CpuColumnsV
}
/// Evaluates the constraints for EQ and ISZERO.
pub fn eval_packed<P: PackedField>(
pub(crate) fn eval_packed<P: PackedField>(
lv: &CpuColumnsView<P>,
nv: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
@ -101,7 +101,7 @@ pub fn eval_packed<P: PackedField>(
/// Circuit version of `eval_packed`.
/// Evaluates the constraints for EQ and ISZERO.
pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
pub(crate) fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder<F, D>,
lv: &CpuColumnsView<ExtensionTarget<D>>,
nv: &CpuColumnsView<ExtensionTarget<D>>,

View File

@ -10,7 +10,7 @@ use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer
use crate::cpu::columns::CpuColumnsView;
/// Evaluates constraints for NOT, EQ and ISZERO.
pub fn eval_packed<P: PackedField>(
pub(crate) fn eval_packed<P: PackedField>(
lv: &CpuColumnsView<P>,
nv: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
@ -21,7 +21,7 @@ pub fn eval_packed<P: PackedField>(
/// Circuit version of `eval_packed`.
/// Evaluates constraints for NOT, EQ and ISZERO.
pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
pub(crate) fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder<F, D>,
lv: &CpuColumnsView<ExtensionTarget<D>>,
nv: &CpuColumnsView<ExtensionTarget<D>>,

View File

@ -12,7 +12,7 @@ const LIMB_SIZE: usize = 32;
const ALL_1_LIMB: u64 = (1 << LIMB_SIZE) - 1;
/// Evaluates constraints for NOT.
pub fn eval_packed<P: PackedField>(
pub(crate) fn eval_packed<P: PackedField>(
lv: &CpuColumnsView<P>,
nv: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
@ -33,7 +33,7 @@ pub fn eval_packed<P: PackedField>(
/// Circuit version of `eval_packed`.
/// Evaluates constraints for NOT.
pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
pub(crate) fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder<F, D>,
lv: &CpuColumnsView<ExtensionTarget<D>>,
nv: &CpuColumnsView<ExtensionTarget<D>>,

View File

@ -257,7 +257,7 @@ pub(crate) fn eval_packed_one<P: PackedField>(
}
/// Evaluates constraints for all opcodes' `StackBehavior`s.
pub fn eval_packed<P: PackedField>(
pub(crate) fn eval_packed<P: PackedField>(
lv: &CpuColumnsView<P>,
nv: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
@ -535,7 +535,7 @@ pub(crate) fn eval_ext_circuit_one<F: RichField + Extendable<D>, const D: usize>
/// Circuti version of `eval_packed`.
/// Evaluates constraints for all opcodes' `StackBehavior`s.
pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
pub(crate) fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder<F, D>,
lv: &CpuColumnsView<ExtensionTarget<D>>,
nv: &CpuColumnsView<ExtensionTarget<D>>,

View File

@ -21,7 +21,7 @@ use crate::cpu::columns::CpuColumnsView;
pub const MAX_USER_STACK_SIZE: usize = 1024;
/// Evaluates constraints to check for stack overflows.
pub fn eval_packed<P: PackedField>(
pub(crate) fn eval_packed<P: PackedField>(
lv: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
) {
@ -39,7 +39,7 @@ pub fn eval_packed<P: PackedField>(
/// Circuit version of `eval_packed`.
/// Evaluates constraints to check for stack overflows.
pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
pub(crate) fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder<F, D>,
lv: &CpuColumnsView<ExtensionTarget<D>>,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,

View File

@ -20,7 +20,7 @@ const BYTES_PER_OFFSET: usize = crate::cpu::kernel::assembler::BYTES_PER_OFFSET
const_assert!(BYTES_PER_OFFSET < NUM_GP_CHANNELS); // Reserve one channel for stack push
/// Evaluates constraints for syscalls and exceptions.
pub fn eval_packed<P: PackedField>(
pub(crate) fn eval_packed<P: PackedField>(
lv: &CpuColumnsView<P>,
nv: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
@ -124,7 +124,7 @@ pub fn eval_packed<P: PackedField>(
/// Circuit version of `eval_packed`.
/// Evaluates constraints for syscalls and exceptions.
pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
pub(crate) fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder<F, D>,
lv: &CpuColumnsView<ExtensionTarget<D>>,
nv: &CpuColumnsView<ExtensionTarget<D>>,

View File

@ -60,7 +60,7 @@ use crate::stark::Stark;
/// - a vector of `(usize, F)` corresponding to the column number and the associated multiplicand
/// - the constant of the linear combination.
#[derive(Clone, Debug)]
pub struct Column<F: Field> {
pub(crate) struct Column<F: Field> {
linear_combination: Vec<(usize, F)>,
next_row_linear_combination: Vec<(usize, F)>,
constant: F,
@ -68,7 +68,7 @@ pub struct Column<F: Field> {
impl<F: Field> Column<F> {
/// Returns the representation of a single column in the current row.
pub fn single(c: usize) -> Self {
pub(crate) fn single(c: usize) -> Self {
Self {
linear_combination: vec![(c, F::ONE)],
next_row_linear_combination: vec![],
@ -77,14 +77,14 @@ impl<F: Field> Column<F> {
}
/// Returns multiple single columns in the current row.
pub fn singles<I: IntoIterator<Item = impl Borrow<usize>>>(
pub(crate) fn singles<I: IntoIterator<Item = impl Borrow<usize>>>(
cs: I,
) -> impl Iterator<Item = Self> {
cs.into_iter().map(|c| Self::single(*c.borrow()))
}
/// Returns the representation of a single column in the next row.
pub fn single_next_row(c: usize) -> Self {
pub(crate) fn single_next_row(c: usize) -> Self {
Self {
linear_combination: vec![],
next_row_linear_combination: vec![(c, F::ONE)],
@ -93,14 +93,14 @@ impl<F: Field> Column<F> {
}
/// Returns multiple single columns for the next row.
pub fn singles_next_row<I: IntoIterator<Item = impl Borrow<usize>>>(
pub(crate) fn singles_next_row<I: IntoIterator<Item = impl Borrow<usize>>>(
cs: I,
) -> impl Iterator<Item = Self> {
cs.into_iter().map(|c| Self::single_next_row(*c.borrow()))
}
/// Returns a linear combination corresponding to a constant.
pub fn constant(constant: F) -> Self {
pub(crate) fn constant(constant: F) -> Self {
Self {
linear_combination: vec![],
next_row_linear_combination: vec![],
@ -109,17 +109,17 @@ impl<F: Field> Column<F> {
}
/// Returns a linear combination corresponding to 0.
pub fn zero() -> Self {
pub(crate) fn zero() -> Self {
Self::constant(F::ZERO)
}
/// Returns a linear combination corresponding to 1.
pub fn one() -> Self {
pub(crate) fn one() -> Self {
Self::constant(F::ONE)
}
/// Given an iterator of `(usize, F)` and a constant, returns the association linear combination of columns for the current row.
pub fn linear_combination_with_constant<I: IntoIterator<Item = (usize, F)>>(
pub(crate) fn linear_combination_with_constant<I: IntoIterator<Item = (usize, F)>>(
iter: I,
constant: F,
) -> Self {
@ -138,7 +138,9 @@ impl<F: Field> Column<F> {
}
/// Given an iterator of `(usize, F)` and a constant, returns the associated linear combination of columns for the current and the next rows.
pub fn linear_combination_and_next_row_with_constant<I: IntoIterator<Item = (usize, F)>>(
pub(crate) fn linear_combination_and_next_row_with_constant<
I: IntoIterator<Item = (usize, F)>,
>(
iter: I,
next_row_iter: I,
constant: F,
@ -166,19 +168,19 @@ impl<F: Field> Column<F> {
}
/// Returns a linear combination of columns, with no additional constant.
pub fn linear_combination<I: IntoIterator<Item = (usize, F)>>(iter: I) -> Self {
pub(crate) fn linear_combination<I: IntoIterator<Item = (usize, F)>>(iter: I) -> Self {
Self::linear_combination_with_constant(iter, F::ZERO)
}
/// Given an iterator of columns (c_0, ..., c_n) containing bits in little endian order:
/// returns the representation of c_0 + 2 * c_1 + ... + 2^n * c_n.
pub fn le_bits<I: IntoIterator<Item = impl Borrow<usize>>>(cs: I) -> Self {
pub(crate) fn le_bits<I: IntoIterator<Item = impl Borrow<usize>>>(cs: I) -> Self {
Self::linear_combination(cs.into_iter().map(|c| *c.borrow()).zip(F::TWO.powers()))
}
/// Given an iterator of columns (c_0, ..., c_n) containing bytes in little endian order:
/// returns the representation of c_0 + 256 * c_1 + ... + 256^n * c_n.
pub fn le_bytes<I: IntoIterator<Item = impl Borrow<usize>>>(cs: I) -> Self {
pub(crate) fn le_bytes<I: IntoIterator<Item = impl Borrow<usize>>>(cs: I) -> Self {
Self::linear_combination(
cs.into_iter()
.map(|c| *c.borrow())
@ -187,12 +189,12 @@ impl<F: Field> Column<F> {
}
/// Given an iterator of columns, returns the representation of their sum.
pub fn sum<I: IntoIterator<Item = impl Borrow<usize>>>(cs: I) -> Self {
pub(crate) fn sum<I: IntoIterator<Item = impl Borrow<usize>>>(cs: I) -> Self {
Self::linear_combination(cs.into_iter().map(|c| *c.borrow()).zip(repeat(F::ONE)))
}
/// Given the column values for the current row, returns the evaluation of the linear combination.
pub fn eval<FE, P, const D: usize>(&self, v: &[P]) -> P
pub(crate) fn eval<FE, P, const D: usize>(&self, v: &[P]) -> P
where
FE: FieldExtension<D, BaseField = F>,
P: PackedField<Scalar = FE>,
@ -205,7 +207,7 @@ impl<F: Field> Column<F> {
}
/// Given the column values for the current and next rows, evaluates the current and next linear combinations and returns their sum.
pub fn eval_with_next<FE, P, const D: usize>(&self, v: &[P], next_v: &[P]) -> P
pub(crate) fn eval_with_next<FE, P, const D: usize>(&self, v: &[P], next_v: &[P]) -> P
where
FE: FieldExtension<D, BaseField = F>,
P: PackedField<Scalar = FE>,
@ -223,7 +225,7 @@ impl<F: Field> Column<F> {
}
/// Evaluate on a row of a table given in column-major form.
pub fn eval_table(&self, table: &[PolynomialValues<F>], row: usize) -> F {
pub(crate) fn eval_table(&self, table: &[PolynomialValues<F>], row: usize) -> F {
let mut res = self
.linear_combination
.iter()
@ -245,7 +247,7 @@ impl<F: Field> Column<F> {
}
/// Circuit version of `eval`: Given a row's targets, returns their linear combination.
pub fn eval_circuit<const D: usize>(
pub(crate) fn eval_circuit<const D: usize>(
&self,
builder: &mut CircuitBuilder<F, D>,
v: &[ExtensionTarget<D>],
@ -269,7 +271,7 @@ impl<F: Field> Column<F> {
/// Circuit version of `eval_with_next`:
/// Given the targets of the current and next row, returns the sum of their linear combinations.
pub fn eval_with_next_circuit<const D: usize>(
pub(crate) fn eval_with_next_circuit<const D: usize>(
&self,
builder: &mut CircuitBuilder<F, D>,
v: &[ExtensionTarget<D>],
@ -304,7 +306,7 @@ impl<F: Field> Column<F> {
/// `filter_column` is used to determine the rows to select in `Table`.
/// `columns` represents linear combinations of the columns of `Table`.
#[derive(Clone, Debug)]
pub struct TableWithColumns<F: Field> {
pub(crate) struct TableWithColumns<F: Field> {
table: Table,
columns: Vec<Column<F>>,
pub(crate) filter_column: Option<Column<F>>,
@ -312,7 +314,11 @@ pub struct TableWithColumns<F: Field> {
impl<F: Field> TableWithColumns<F> {
/// Generates a new `TableWithColumns` given a `Table`, a linear combination of columns `columns` and a `filter_column`.
pub fn new(table: Table, columns: Vec<Column<F>>, filter_column: Option<Column<F>>) -> Self {
pub(crate) fn new(
table: Table,
columns: Vec<Column<F>>,
filter_column: Option<Column<F>>,
) -> Self {
Self {
table,
columns,
@ -324,7 +330,7 @@ impl<F: Field> TableWithColumns<F> {
/// Cross-table lookup data consisting in the lookup table (`looked_table`) and all the tables that look into `looked_table` (`looking_tables`).
/// Each `looking_table` corresponds to a STARK's table whose rows have been filtered out and whose columns have been through a linear combination (see `eval_table`). The concatenation of those smaller tables should result in the `looked_table`.
#[derive(Clone)]
pub struct CrossTableLookup<F: Field> {
pub(crate) struct CrossTableLookup<F: Field> {
/// Column linear combinations for all tables that are looking into the current table.
pub(crate) looking_tables: Vec<TableWithColumns<F>>,
/// Column linear combination for the current table.
@ -334,7 +340,7 @@ pub struct CrossTableLookup<F: Field> {
impl<F: Field> CrossTableLookup<F> {
/// Creates a new `CrossTableLookup` given some looking tables and a looked table.
/// All tables should have the same width.
pub fn new(
pub(crate) fn new(
looking_tables: Vec<TableWithColumns<F>>,
looked_table: TableWithColumns<F>,
) -> Self {
@ -361,7 +367,7 @@ impl<F: Field> CrossTableLookup<F> {
/// Cross-table lookup data for one table.
#[derive(Clone, Default)]
pub struct CtlData<F: Field> {
pub(crate) struct CtlData<F: Field> {
/// Data associated with all Z(x) polynomials for one table.
pub(crate) zs_columns: Vec<CtlZData<F>>,
}
@ -381,17 +387,17 @@ pub(crate) struct CtlZData<F: Field> {
impl<F: Field> CtlData<F> {
/// Returns the number of cross-table lookup polynomials.
pub fn len(&self) -> usize {
pub(crate) fn len(&self) -> usize {
self.zs_columns.len()
}
/// Returns whether there are no cross-table lookups.
pub fn is_empty(&self) -> bool {
pub(crate) fn is_empty(&self) -> bool {
self.zs_columns.is_empty()
}
/// Returns all the cross-table lookup polynomials.
pub fn z_polys(&self) -> Vec<PolynomialValues<F>> {
pub(crate) fn z_polys(&self) -> Vec<PolynomialValues<F>> {
self.zs_columns
.iter()
.map(|zs_columns| zs_columns.z.clone())
@ -452,7 +458,7 @@ pub(crate) struct GrandProductChallengeSet<T: Copy + Eq + PartialEq + Debug> {
}
impl GrandProductChallengeSet<Target> {
pub fn to_buffer(&self, buffer: &mut Vec<u8>) -> IoResult<()> {
pub(crate) fn to_buffer(&self, buffer: &mut Vec<u8>) -> IoResult<()> {
buffer.write_usize(self.challenges.len())?;
for challenge in &self.challenges {
buffer.write_target(challenge.beta)?;
@ -461,7 +467,7 @@ impl GrandProductChallengeSet<Target> {
Ok(())
}
pub fn from_buffer(buffer: &mut Buffer) -> IoResult<Self> {
pub(crate) fn from_buffer(buffer: &mut Buffer) -> IoResult<Self> {
let length = buffer.read_usize()?;
let mut challenges = Vec::with_capacity(length);
for _ in 0..length {
@ -616,7 +622,7 @@ fn partial_products<F: Field>(
/// Data necessary to check the cross-table lookups of a given table.
#[derive(Clone)]
pub struct CtlCheckVars<'a, F, FE, P, const D2: usize>
pub(crate) struct CtlCheckVars<'a, F, FE, P, const D2: usize>
where
F: Field,
FE: FieldExtension<D2, BaseField = F>,
@ -741,7 +747,7 @@ pub(crate) fn eval_cross_table_lookup_checks<F, FE, P, S, const D: usize, const
/// Circuit version of `CtlCheckVars`. Data necessary to check the cross-table lookups of a given table.
#[derive(Clone)]
pub struct CtlCheckVarsTarget<'a, F: Field, const D: usize> {
pub(crate) struct CtlCheckVarsTarget<'a, F: Field, const D: usize> {
/// Evaluation of the trace polynomials at point `zeta`.
pub(crate) local_z: ExtensionTarget<D>,
/// Evaluation of the trace polynomials at point `g * zeta`.

View File

@ -8,7 +8,7 @@ use rand::Rng;
use crate::extension_tower::{FieldExt, Fp12, Fp2, Fp6, Stack, BN254};
#[derive(Debug, Copy, Clone, PartialEq)]
pub struct Curve<T>
pub(crate) struct Curve<T>
where
T: FieldExt,
{
@ -17,7 +17,7 @@ where
}
impl<T: FieldExt> Curve<T> {
pub fn unit() -> Self {
pub(crate) fn unit() -> Self {
Curve {
x: T::ZERO,
y: T::ZERO,
@ -47,7 +47,7 @@ where
T: FieldExt,
Curve<T>: CyclicGroup,
{
pub fn int(z: i32) -> Self {
pub(crate) fn int(z: i32) -> Self {
Curve::<T>::GENERATOR * z
}
}
@ -195,7 +195,7 @@ impl CyclicGroup for Curve<Fp2<BN254>> {
}
// The tate pairing takes a point each from the curve and its twist and outputs an Fp12 element
pub fn bn_tate(p: Curve<BN254>, q: Curve<Fp2<BN254>>) -> Fp12<BN254> {
pub(crate) fn bn_tate(p: Curve<BN254>, q: Curve<Fp2<BN254>>) -> Fp12<BN254> {
let miller_output = bn_miller_loop(p, q);
bn_final_exponent(miller_output)
}
@ -203,7 +203,7 @@ pub fn bn_tate(p: Curve<BN254>, q: Curve<Fp2<BN254>>) -> Fp12<BN254> {
/// Standard code for miller loop, can be found on page 99 at this url:
/// https://static1.squarespace.com/static/5fdbb09f31d71c1227082339/t/5ff394720493bd28278889c6/1609798774687/PairingsForBeginners.pdf#page=107
/// where BN_EXP is a hardcoding of the array of Booleans that the loop traverses
pub fn bn_miller_loop(p: Curve<BN254>, q: Curve<Fp2<BN254>>) -> Fp12<BN254> {
pub(crate) fn bn_miller_loop(p: Curve<BN254>, q: Curve<Fp2<BN254>>) -> Fp12<BN254> {
let mut r = p;
let mut acc: Fp12<BN254> = Fp12::<BN254>::UNIT;
let mut line: Fp12<BN254>;
@ -222,14 +222,14 @@ pub fn bn_miller_loop(p: Curve<BN254>, q: Curve<Fp2<BN254>>) -> Fp12<BN254> {
}
/// The sloped line function for doubling a point
pub fn bn_tangent(p: Curve<BN254>, q: Curve<Fp2<BN254>>) -> Fp12<BN254> {
pub(crate) fn bn_tangent(p: Curve<BN254>, q: Curve<Fp2<BN254>>) -> Fp12<BN254> {
let cx = -BN254::new(3) * p.x * p.x;
let cy = BN254::new(2) * p.y;
bn_sparse_embed(p.y * p.y - BN254::new(9), q.x * cx, q.y * cy)
}
/// The sloped line function for adding two points
pub fn bn_cord(p1: Curve<BN254>, p2: Curve<BN254>, q: Curve<Fp2<BN254>>) -> Fp12<BN254> {
pub(crate) fn bn_cord(p1: Curve<BN254>, p2: Curve<BN254>, q: Curve<Fp2<BN254>>) -> Fp12<BN254> {
let cx = p2.y - p1.y;
let cy = p1.x - p2.x;
bn_sparse_embed(p1.y * p2.x - p2.y * p1.x, q.x * cx, q.y * cy)
@ -237,7 +237,7 @@ pub fn bn_cord(p1: Curve<BN254>, p2: Curve<BN254>, q: Curve<Fp2<BN254>>) -> Fp12
/// The tangent and cord functions output sparse Fp12 elements.
/// This map embeds the nonzero coefficients into an Fp12.
pub fn bn_sparse_embed(g000: BN254, g01: Fp2<BN254>, g11: Fp2<BN254>) -> Fp12<BN254> {
pub(crate) fn bn_sparse_embed(g000: BN254, g01: Fp2<BN254>, g11: Fp2<BN254>) -> Fp12<BN254> {
let g0 = Fp6 {
t0: Fp2 {
re: g000,
@ -256,7 +256,7 @@ pub fn bn_sparse_embed(g000: BN254, g01: Fp2<BN254>, g11: Fp2<BN254>) -> Fp12<BN
Fp12 { z0: g0, z1: g1 }
}
pub fn gen_bn_fp12_sparse<R: Rng + ?Sized>(rng: &mut R) -> Fp12<BN254> {
pub(crate) fn gen_bn_fp12_sparse<R: Rng + ?Sized>(rng: &mut R) -> Fp12<BN254> {
bn_sparse_embed(
rng.gen::<BN254>(),
rng.gen::<Fp2<BN254>>(),
@ -276,7 +276,7 @@ pub fn gen_bn_fp12_sparse<R: Rng + ?Sized>(rng: &mut R) -> Fp12<BN254> {
/// (p^4 - p^2 + 1)/N = p^3 + (a2)p^2 - (a1)p - a0
/// where 0 < a0, a1, a2 < p. Then the final power is given by
/// y = y_3 * (y^a2)_2 * (y^-a1)_1 * (y^-a0)
pub fn bn_final_exponent(f: Fp12<BN254>) -> Fp12<BN254> {
pub(crate) fn bn_final_exponent(f: Fp12<BN254>) -> Fp12<BN254> {
let mut y = f.frob(6) / f;
y = y.frob(2) * y;
let (y_a2, y_a1, y_a0) = get_bn_custom_powers(y);

View File

@ -29,7 +29,7 @@ pub const BN_BASE: U256 = U256([
]);
#[derive(Debug, Copy, Clone, PartialEq)]
pub struct BN254 {
pub(crate) struct BN254 {
pub val: U256,
}
@ -126,16 +126,16 @@ pub const BLS_BASE: U512 = U512([
]);
#[derive(Debug, Copy, Clone, PartialEq)]
pub struct BLS381 {
pub(crate) struct BLS381 {
pub val: U512,
}
impl BLS381 {
pub fn lo(self) -> U256 {
pub(crate) fn lo(self) -> U256 {
U256(self.val.0[..4].try_into().unwrap())
}
pub fn hi(self) -> U256 {
pub(crate) fn hi(self) -> U256 {
U256(self.val.0[4..].try_into().unwrap())
}
}
@ -260,7 +260,7 @@ impl Div for BLS381 {
/// The degree 2 field extension Fp2 is given by adjoining i, the square root of -1, to BN254
/// The arithmetic in this extension is standard complex arithmetic
#[derive(Debug, Copy, Clone, PartialEq)]
pub struct Fp2<T>
pub(crate) struct Fp2<T>
where
T: FieldExt,
{
@ -812,7 +812,7 @@ impl Adj for Fp2<BLS381> {
/// The degree 3 field extension Fp6 over Fp2 is given by adjoining t, where t^3 = 1 + i
/// Fp6 has basis 1, t, t^2 over Fp2
#[derive(Debug, Copy, Clone, PartialEq)]
pub struct Fp6<T>
pub(crate) struct Fp6<T>
where
T: FieldExt,
Fp2<T>: Adj,
@ -944,7 +944,7 @@ where
/// while the values of
/// t^(p^n) and t^(2p^n)
/// are precomputed in the constant arrays FROB_T1 and FROB_T2
pub fn frob(self, n: usize) -> Fp6<T> {
pub(crate) fn frob(self, n: usize) -> Fp6<T> {
let n = n % 6;
let frob_t1 = Fp2::<T>::FROB_T[0][n];
let frob_t2 = Fp2::<T>::FROB_T[1][n];
@ -1031,7 +1031,7 @@ where
/// The degree 2 field extension Fp12 over Fp6 is given by
/// adjoining z, where z^2 = t. It thus has basis 1, z over Fp6
#[derive(Debug, Copy, Clone, PartialEq)]
pub struct Fp12<T>
pub(crate) struct Fp12<T>
where
T: FieldExt,
Fp2<T>: Adj,
@ -1200,7 +1200,7 @@ where
/// which sends a + bz: Fp12 to
/// a^(p^n) + b^(p^n) * z^(p^n)
/// where the values of z^(p^n) are precomputed in the constant array FROB_Z
pub fn frob(self, n: usize) -> Fp12<T> {
pub(crate) fn frob(self, n: usize) -> Fp12<T> {
let n = n % 12;
Fp12 {
z0: self.z0.frob(n),

View File

@ -96,7 +96,7 @@ where
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
{
pub fn to_buffer(
fn to_buffer(
&self,
buffer: &mut Vec<u8>,
gate_serializer: &dyn GateSerializer<F, D>,
@ -114,7 +114,7 @@ where
Ok(())
}
pub fn from_buffer(
fn from_buffer(
buffer: &mut Buffer,
gate_serializer: &dyn GateSerializer<F, D>,
generator_serializer: &dyn WitnessGeneratorSerializer<F, D>,
@ -161,7 +161,7 @@ where
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
{
pub fn to_buffer(
fn to_buffer(
&self,
buffer: &mut Vec<u8>,
gate_serializer: &dyn GateSerializer<F, D>,
@ -175,7 +175,7 @@ where
Ok(())
}
pub fn from_buffer(
fn from_buffer(
buffer: &mut Buffer,
gate_serializer: &dyn GateSerializer<F, D>,
generator_serializer: &dyn WitnessGeneratorSerializer<F, D>,
@ -196,21 +196,21 @@ where
}
#[derive(Eq, PartialEq, Debug)]
pub struct AggregationChildTarget<const D: usize> {
struct AggregationChildTarget<const D: usize> {
is_agg: BoolTarget,
agg_proof: ProofWithPublicInputsTarget<D>,
evm_proof: ProofWithPublicInputsTarget<D>,
}
impl<const D: usize> AggregationChildTarget<D> {
pub fn to_buffer(&self, buffer: &mut Vec<u8>) -> IoResult<()> {
fn to_buffer(&self, buffer: &mut Vec<u8>) -> IoResult<()> {
buffer.write_target_bool(self.is_agg)?;
buffer.write_target_proof_with_public_inputs(&self.agg_proof)?;
buffer.write_target_proof_with_public_inputs(&self.evm_proof)?;
Ok(())
}
pub fn from_buffer(buffer: &mut Buffer) -> IoResult<Self> {
fn from_buffer(buffer: &mut Buffer) -> IoResult<Self> {
let is_agg = buffer.read_target_bool()?;
let agg_proof = buffer.read_target_proof_with_public_inputs()?;
let evm_proof = buffer.read_target_proof_with_public_inputs()?;
@ -221,7 +221,7 @@ impl<const D: usize> AggregationChildTarget<D> {
})
}
pub fn public_values<F: RichField + Extendable<D>>(
fn public_values<F: RichField + Extendable<D>>(
&self,
builder: &mut CircuitBuilder<F, D>,
) -> PublicValuesTarget {
@ -250,7 +250,7 @@ where
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
{
pub fn to_buffer(
fn to_buffer(
&self,
buffer: &mut Vec<u8>,
gate_serializer: &dyn GateSerializer<F, D>,
@ -265,7 +265,7 @@ where
Ok(())
}
pub fn from_buffer(
fn from_buffer(
buffer: &mut Buffer,
gate_serializer: &dyn GateSerializer<F, D>,
generator_serializer: &dyn WitnessGeneratorSerializer<F, D>,
@ -760,7 +760,7 @@ where
}
/// Connect the 256 block hashes between two blocks
pub fn connect_block_hashes(
fn connect_block_hashes(
builder: &mut CircuitBuilder<F, D>,
lhs: &ProofWithPublicInputsTarget<D>,
rhs: &ProofWithPublicInputsTarget<D>,
@ -1103,7 +1103,7 @@ where
C: GenericConfig<D, F = F>,
C::Hasher: AlgebraicHasher<F>,
{
pub fn to_buffer(
fn to_buffer(
&self,
buffer: &mut Vec<u8>,
gate_serializer: &dyn GateSerializer<F, D>,
@ -1117,7 +1117,7 @@ where
Ok(())
}
pub fn from_buffer(
fn from_buffer(
buffer: &mut Buffer,
gate_serializer: &dyn GateSerializer<F, D>,
generator_serializer: &dyn WitnessGeneratorSerializer<F, D>,
@ -1195,7 +1195,7 @@ where
C: GenericConfig<D, F = F>,
C::Hasher: AlgebraicHasher<F>,
{
pub fn to_buffer(
fn to_buffer(
&self,
buffer: &mut Vec<u8>,
gate_serializer: &dyn GateSerializer<F, D>,
@ -1222,7 +1222,7 @@ where
Ok(())
}
pub fn from_buffer(
fn from_buffer(
buffer: &mut Buffer,
gate_serializer: &dyn GateSerializer<F, D>,
generator_serializer: &dyn WitnessGeneratorSerializer<F, D>,

View File

@ -82,7 +82,7 @@ impl<F: Field> GenerationState<F> {
/// Updates `program_counter`, and potentially adds some extra handling if we're jumping to a
/// special location.
pub fn jump_to(&mut self, dst: usize) -> Result<(), ProgramError> {
pub(crate) fn jump_to(&mut self, dst: usize) -> Result<(), ProgramError> {
self.registers.program_counter = dst;
if dst == KERNEL.global_labels["observe_new_address"] {
let tip_u256 = stack_peek(self, 0)?;
@ -100,14 +100,14 @@ impl<F: Field> GenerationState<F> {
/// Observe the given address, so that we will be able to recognize the associated state key.
/// This is just for debugging purposes.
pub fn observe_address(&mut self, address: Address) {
pub(crate) fn observe_address(&mut self, address: Address) {
let state_key = keccak(address.0);
self.state_key_to_address.insert(state_key, address);
}
/// Observe the given code hash and store the associated code.
/// When called, the code corresponding to `codehash` should be stored in the return data.
pub fn observe_contract(&mut self, codehash: H256) -> Result<(), ProgramError> {
pub(crate) fn observe_contract(&mut self, codehash: H256) -> Result<(), ProgramError> {
if self.inputs.contract_code.contains_key(&codehash) {
return Ok(()); // Return early if the code hash has already been observed.
}
@ -131,14 +131,14 @@ impl<F: Field> GenerationState<F> {
Ok(())
}
pub fn checkpoint(&self) -> GenerationStateCheckpoint {
pub(crate) fn checkpoint(&self) -> GenerationStateCheckpoint {
GenerationStateCheckpoint {
registers: self.registers,
traces: self.traces.checkpoint(),
}
}
pub fn rollback(&mut self, checkpoint: GenerationStateCheckpoint) {
pub(crate) fn rollback(&mut self, checkpoint: GenerationStateCheckpoint) {
self.registers = checkpoint.registers;
self.traces.rollback(checkpoint.traces);
}

View File

@ -12,7 +12,7 @@ pub const fn reg_step(i: usize) -> usize {
/// Registers to hold permutation inputs.
/// `reg_input_limb(2*i) -> input[i] as u32`
/// `reg_input_limb(2*i+1) -> input[i] >> 32`
pub fn reg_input_limb<F: Field>(i: usize) -> Column<F> {
pub(crate) fn reg_input_limb<F: Field>(i: usize) -> Column<F> {
debug_assert!(i < 2 * NUM_INPUTS);
let i_u64 = i / 2; // The index of the 64-bit chunk.

View File

@ -34,31 +34,31 @@ pub(crate) const NUM_ROUNDS: usize = 24;
pub(crate) const NUM_INPUTS: usize = 25;
/// Create vector of `Columns` corresponding to the permutation input limbs.
pub fn ctl_data_inputs<F: Field>() -> Vec<Column<F>> {
pub(crate) fn ctl_data_inputs<F: Field>() -> Vec<Column<F>> {
let mut res: Vec<_> = (0..2 * NUM_INPUTS).map(reg_input_limb).collect();
res.push(Column::single(TIMESTAMP));
res
}
/// Create vector of `Columns` corresponding to the permutation output limbs.
pub fn ctl_data_outputs<F: Field>() -> Vec<Column<F>> {
pub(crate) fn ctl_data_outputs<F: Field>() -> Vec<Column<F>> {
let mut res: Vec<_> = Column::singles((0..2 * NUM_INPUTS).map(reg_output_limb)).collect();
res.push(Column::single(TIMESTAMP));
res
}
/// CTL filter for the first round of the Keccak permutation.
pub fn ctl_filter_inputs<F: Field>() -> Column<F> {
pub(crate) fn ctl_filter_inputs<F: Field>() -> Column<F> {
Column::single(reg_step(0))
}
/// CTL filter for the final round of the Keccak permutation.
pub fn ctl_filter_outputs<F: Field>() -> Column<F> {
pub(crate) fn ctl_filter_outputs<F: Field>() -> Column<F> {
Column::single(reg_step(NUM_ROUNDS - 1))
}
#[derive(Copy, Clone, Default)]
pub struct KeccakStark<F, const D: usize> {
pub(crate) struct KeccakStark<F, const D: usize> {
pub(crate) f: PhantomData<F>,
}
@ -231,7 +231,7 @@ impl<F: RichField + Extendable<D>, const D: usize> KeccakStark<F, D> {
row[out_reg_hi] = F::from_canonical_u64(row[in_reg_hi].to_canonical_u64() ^ rc_hi);
}
pub fn generate_trace(
pub(crate) fn generate_trace(
&self,
inputs: Vec<([u64; NUM_INPUTS], usize)>,
min_rows: usize,

View File

@ -231,7 +231,7 @@ pub(crate) struct KeccakSpongeOp {
/// Structure representing the `KeccakSponge` STARK, which carries out the sponge permutation.
#[derive(Copy, Clone, Default)]
pub struct KeccakSpongeStark<F, const D: usize> {
pub(crate) struct KeccakSpongeStark<F, const D: usize> {
f: PhantomData<F>,
}

View File

@ -3,6 +3,7 @@
#![allow(clippy::too_many_arguments)]
#![allow(clippy::type_complexity)]
#![allow(clippy::field_reassign_with_default)]
#![allow(unused)]
#![feature(let_chains)]
pub mod all_stark;
@ -27,12 +28,14 @@ pub mod proof;
pub mod prover;
pub mod recursive_verifier;
pub mod stark;
pub mod stark_testing;
pub mod util;
pub mod vanishing_poly;
pub mod verifier;
pub mod witness;
#[cfg(test)]
mod stark_testing;
use eth_trie_utils::partial_trie::HashedPartialTrie;
// Set up Jemalloc
#[cfg(not(target_env = "msvc"))]

View File

@ -47,7 +47,9 @@ pub(crate) mod columns {
pub const RESULT: Range<usize> = INPUT1.end..INPUT1.end + PACKED_LEN;
/// Returns the column range for each 32 bit chunk in the input.
pub fn limb_bit_cols_for_input(input_bits: Range<usize>) -> impl Iterator<Item = Range<usize>> {
pub(crate) fn limb_bit_cols_for_input(
input_bits: Range<usize>,
) -> impl Iterator<Item = Range<usize>> {
(0..PACKED_LEN).map(move |i| {
let start = input_bits.start + i * PACKED_LIMB_BITS;
let end = min(start + PACKED_LIMB_BITS, input_bits.end);
@ -60,7 +62,7 @@ pub(crate) mod columns {
}
/// Creates the vector of `Columns` corresponding to the opcode, the two inputs and the output of the logic operation.
pub fn ctl_data<F: Field>() -> Vec<Column<F>> {
pub(crate) fn ctl_data<F: Field>() -> Vec<Column<F>> {
// We scale each filter flag with the associated opcode value.
// If a logic operation is happening on the CPU side, the CTL
// will enforce that the reconstructed opcode value from the
@ -77,13 +79,13 @@ pub fn ctl_data<F: Field>() -> Vec<Column<F>> {
}
/// CTL filter for logic operations.
pub fn ctl_filter<F: Field>() -> Column<F> {
pub(crate) fn ctl_filter<F: Field>() -> Column<F> {
Column::sum([columns::IS_AND, columns::IS_OR, columns::IS_XOR])
}
/// Structure representing the Logic STARK, which computes all logic operations.
#[derive(Copy, Clone, Default)]
pub struct LogicStark<F, const D: usize> {
pub(crate) struct LogicStark<F, const D: usize> {
pub f: PhantomData<F>,
}

View File

@ -116,7 +116,7 @@ pub(crate) fn lookup_helper_columns<F: Field>(
helper_columns
}
pub struct LookupCheckVars<F, FE, P, const D2: usize>
pub(crate) struct LookupCheckVars<F, FE, P, const D2: usize>
where
F: Field,
FE: FieldExtension<D2, BaseField = F>,
@ -179,7 +179,7 @@ pub(crate) fn eval_packed_lookups_generic<F, FE, P, S, const D: usize, const D2:
}
}
pub struct LookupCheckVarsTarget<const D: usize> {
pub(crate) struct LookupCheckVarsTarget<const D: usize> {
pub(crate) local_values: Vec<ExtensionTarget<D>>,
pub(crate) next_values: Vec<ExtensionTarget<D>>,
pub(crate) challenges: Vec<Target>,

View File

@ -32,7 +32,7 @@ use crate::witness::memory::{MemoryAddress, MemoryOp};
/// - the address in memory of the element being read/written,
/// - the value being read/written,
/// - the timestamp at which the element is read/written.
pub fn ctl_data<F: Field>() -> Vec<Column<F>> {
pub(crate) fn ctl_data<F: Field>() -> Vec<Column<F>> {
let mut res =
Column::singles([IS_READ, ADDR_CONTEXT, ADDR_SEGMENT, ADDR_VIRTUAL]).collect_vec();
res.extend(Column::singles((0..8).map(value_limb)));
@ -41,12 +41,12 @@ pub fn ctl_data<F: Field>() -> Vec<Column<F>> {
}
/// CTL filter for memory operations.
pub fn ctl_filter<F: Field>() -> Column<F> {
pub(crate) fn ctl_filter<F: Field>() -> Column<F> {
Column::single(FILTER)
}
#[derive(Copy, Clone, Default)]
pub struct MemoryStark<F, const D: usize> {
pub(crate) struct MemoryStark<F, const D: usize> {
pub(crate) f: PhantomData<F>,
}
@ -76,7 +76,9 @@ impl MemoryOp {
}
/// Generates the `_FIRST_CHANGE` columns and the `RANGE_CHECK` column in the trace.
pub fn generate_first_change_flags_and_rc<F: RichField>(trace_rows: &mut [[F; NUM_COLUMNS]]) {
pub(crate) fn generate_first_change_flags_and_rc<F: RichField>(
trace_rows: &mut [[F; NUM_COLUMNS]],
) {
let num_ops = trace_rows.len();
for idx in 0..num_ops - 1 {
let row = trace_rows[idx].as_slice();

View File

@ -1,4 +1,3 @@
#[allow(dead_code)]
#[derive(Copy, Clone, Eq, PartialEq, Hash, Ord, PartialOrd, Debug)]
pub enum Segment {
/// Contains EVM bytecode.
@ -160,7 +159,6 @@ impl Segment {
}
}
#[allow(dead_code)]
pub(crate) fn bit_range(&self) -> usize {
match self {
Segment::Code => 8,

View File

@ -156,7 +156,7 @@ pub struct ExtraBlockData {
/// Memory values which are public.
/// Note: All the larger integers are encoded with 32-bit limbs in little-endian order.
#[derive(Eq, PartialEq, Debug)]
pub struct PublicValuesTarget {
pub(crate) struct PublicValuesTarget {
/// Trie hashes before the execution of the local state transition.
pub trie_roots_before: TrieRootsTarget,
/// Trie hashes after the execution of the local state transition.
@ -171,7 +171,7 @@ pub struct PublicValuesTarget {
impl PublicValuesTarget {
/// Serializes public value targets.
pub fn to_buffer(&self, buffer: &mut Vec<u8>) -> IoResult<()> {
pub(crate) fn to_buffer(&self, buffer: &mut Vec<u8>) -> IoResult<()> {
let TrieRootsTarget {
state_root: state_root_before,
transactions_root: transactions_root_before,
@ -244,7 +244,7 @@ impl PublicValuesTarget {
}
/// Deserializes public value targets.
pub fn from_buffer(buffer: &mut Buffer) -> IoResult<Self> {
pub(crate) fn from_buffer(buffer: &mut Buffer) -> IoResult<Self> {
let trie_roots_before = TrieRootsTarget {
state_root: buffer.read_target_array()?,
transactions_root: buffer.read_target_array()?,
@ -297,7 +297,7 @@ impl PublicValuesTarget {
/// Extracts public value `Target`s from the given public input `Target`s.
/// Public values are always the first public inputs added to the circuit,
/// so we can start extracting at index 0.
pub fn from_public_inputs(pis: &[Target]) -> Self {
pub(crate) fn from_public_inputs(pis: &[Target]) -> Self {
assert!(
pis.len()
> TrieRootsTarget::SIZE * 2
@ -335,7 +335,7 @@ impl PublicValuesTarget {
}
/// Returns the public values in `pv0` or `pv1` depening on `condition`.
pub fn select<F: RichField + Extendable<D>, const D: usize>(
pub(crate) fn select<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
condition: BoolTarget,
pv0: Self,
@ -379,13 +379,13 @@ impl PublicValuesTarget {
/// Circuit version of `TrieRoots`.
/// `Target`s for trie hashes. Since a `Target` holds a 32-bit limb, each hash requires 8 `Target`s.
#[derive(Eq, PartialEq, Debug, Copy, Clone)]
pub struct TrieRootsTarget {
pub(crate) struct TrieRootsTarget {
/// Targets for the state trie hash.
pub state_root: [Target; 8],
pub(crate) state_root: [Target; 8],
/// Targets for the transactions trie hash.
pub transactions_root: [Target; 8],
pub(crate) transactions_root: [Target; 8],
/// Targets for the receipts trie hash.
pub receipts_root: [Target; 8],
pub(crate) receipts_root: [Target; 8],
}
impl TrieRootsTarget {
@ -394,7 +394,7 @@ impl TrieRootsTarget {
/// Extracts trie hash `Target`s for all tries from the provided public input `Target`s.
/// The provided `pis` should start with the trie hashes.
pub fn from_public_inputs(pis: &[Target]) -> Self {
pub(crate) fn from_public_inputs(pis: &[Target]) -> Self {
let state_root = pis[0..8].try_into().unwrap();
let transactions_root = pis[8..16].try_into().unwrap();
let receipts_root = pis[16..24].try_into().unwrap();
@ -408,7 +408,7 @@ impl TrieRootsTarget {
/// If `condition`, returns the trie hashes in `tr0`,
/// otherwise returns the trie hashes in `tr1`.
pub fn select<F: RichField + Extendable<D>, const D: usize>(
pub(crate) fn select<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
condition: BoolTarget,
tr0: Self,
@ -432,7 +432,7 @@ impl TrieRootsTarget {
}
/// Connects the trie hashes in `tr0` and in `tr1`.
pub fn connect<F: RichField + Extendable<D>, const D: usize>(
pub(crate) fn connect<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
tr0: Self,
tr1: Self,
@ -449,27 +449,27 @@ impl TrieRootsTarget {
/// Metadata contained in a block header. Those are identical between
/// all state transition proofs within the same block.
#[derive(Eq, PartialEq, Debug, Copy, Clone)]
pub struct BlockMetadataTarget {
pub(crate) struct BlockMetadataTarget {
/// `Target`s for the address of this block's producer.
pub block_beneficiary: [Target; 5],
pub(crate) block_beneficiary: [Target; 5],
/// `Target` for the timestamp of this block.
pub block_timestamp: Target,
pub(crate) block_timestamp: Target,
/// `Target` for the index of this block.
pub block_number: Target,
pub(crate) block_number: Target,
/// `Target` for the difficulty (before PoS transition) of this block.
pub block_difficulty: Target,
pub(crate) block_difficulty: Target,
/// `Target`s for the `mix_hash` value of this block.
pub block_random: [Target; 8],
pub(crate) block_random: [Target; 8],
/// `Target`s for the gas limit of this block.
pub block_gaslimit: [Target; 2],
pub(crate) block_gaslimit: [Target; 2],
/// `Target` for the chain id of this block.
pub block_chain_id: Target,
pub(crate) block_chain_id: Target,
/// `Target`s for the base fee of this block.
pub block_base_fee: [Target; 2],
pub(crate) block_base_fee: [Target; 2],
/// `Target`s for the gas used of this block.
pub block_gas_used: [Target; 2],
pub(crate) block_gas_used: [Target; 2],
/// `Target`s for the block bloom of this block.
pub block_bloom: [Target; 64],
pub(crate) block_bloom: [Target; 64],
}
impl BlockMetadataTarget {
@ -478,7 +478,7 @@ impl BlockMetadataTarget {
/// Extracts block metadata `Target`s from the provided public input `Target`s.
/// The provided `pis` should start with the block metadata.
pub fn from_public_inputs(pis: &[Target]) -> Self {
pub(crate) fn from_public_inputs(pis: &[Target]) -> Self {
let block_beneficiary = pis[0..5].try_into().unwrap();
let block_timestamp = pis[5];
let block_number = pis[6];
@ -506,7 +506,7 @@ impl BlockMetadataTarget {
/// If `condition`, returns the block metadata in `bm0`,
/// otherwise returns the block metadata in `bm1`.
pub fn select<F: RichField + Extendable<D>, const D: usize>(
pub(crate) fn select<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
condition: BoolTarget,
bm0: Self,
@ -543,7 +543,7 @@ impl BlockMetadataTarget {
}
/// Connects the block metadata in `bm0` to the block metadata in `bm1`.
pub fn connect<F: RichField + Extendable<D>, const D: usize>(
pub(crate) fn connect<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
bm0: Self,
bm1: Self,
@ -582,21 +582,21 @@ impl BlockMetadataTarget {
/// When the block number is less than 256, dummy values, i.e. `H256::default()`,
/// should be used for the additional block hashes.
#[derive(Eq, PartialEq, Debug, Copy, Clone)]
pub struct BlockHashesTarget {
pub(crate) struct BlockHashesTarget {
/// `Target`s for the previous 256 hashes to the current block. The leftmost hash, i.e. `prev_hashes[0..8]`,
/// is the oldest, and the rightmost, i.e. `prev_hashes[255 * 7..255 * 8]` is the hash of the parent block.
pub prev_hashes: [Target; 2048],
pub(crate) prev_hashes: [Target; 2048],
// `Target` for the hash of the current block.
pub cur_hash: [Target; 8],
pub(crate) cur_hash: [Target; 8],
}
impl BlockHashesTarget {
/// Number of `Target`s required for previous and current block hashes.
pub const BLOCK_HASHES_SIZE: usize = 2056;
pub(crate) const BLOCK_HASHES_SIZE: usize = 2056;
/// Extracts the previous and current block hash `Target`s from the public input `Target`s.
/// The provided `pis` should start with the block hashes.
pub fn from_public_inputs(pis: &[Target]) -> Self {
pub(crate) fn from_public_inputs(pis: &[Target]) -> Self {
Self {
prev_hashes: pis[0..2048].try_into().unwrap(),
cur_hash: pis[2048..2056].try_into().unwrap(),
@ -605,7 +605,7 @@ impl BlockHashesTarget {
/// If `condition`, returns the block hashes in `bm0`,
/// otherwise returns the block hashes in `bm1`.
pub fn select<F: RichField + Extendable<D>, const D: usize>(
pub(crate) fn select<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
condition: BoolTarget,
bm0: Self,
@ -622,7 +622,7 @@ impl BlockHashesTarget {
}
/// Connects the block hashes in `bm0` to the block hashes in `bm1`.
pub fn connect<F: RichField + Extendable<D>, const D: usize>(
pub(crate) fn connect<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
bm0: Self,
bm1: Self,
@ -640,7 +640,7 @@ impl BlockHashesTarget {
/// Additional block data that are specific to the local transaction being proven,
/// unlike `BlockMetadata`.
#[derive(Eq, PartialEq, Debug, Copy, Clone)]
pub struct ExtraBlockDataTarget {
pub(crate) struct ExtraBlockDataTarget {
/// `Target`s for the state trie digest of the genesis block.
pub genesis_state_trie_root: [Target; 8],
/// `Target` for the transaction count prior execution of the local state transition, starting
@ -668,7 +668,7 @@ impl ExtraBlockDataTarget {
/// Extracts the extra block data `Target`s from the public input `Target`s.
/// The provided `pis` should start with the extra vblock data.
pub fn from_public_inputs(pis: &[Target]) -> Self {
pub(crate) fn from_public_inputs(pis: &[Target]) -> Self {
let genesis_state_trie_root = pis[0..8].try_into().unwrap();
let txn_number_before = pis[8];
let txn_number_after = pis[9];
@ -690,7 +690,7 @@ impl ExtraBlockDataTarget {
/// If `condition`, returns the extra block data in `ed0`,
/// otherwise returns the extra block data in `ed1`.
pub fn select<F: RichField + Extendable<D>, const D: usize>(
pub(crate) fn select<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
condition: BoolTarget,
ed0: Self,
@ -734,7 +734,7 @@ impl ExtraBlockDataTarget {
}
/// Connects the extra block data in `ed0` with the extra block data in `ed1`.
pub fn connect<F: RichField + Extendable<D>, const D: usize>(
pub(crate) fn connect<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
ed0: Self,
ed1: Self,
@ -811,7 +811,7 @@ impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize> S
/// Circuit version of `StarkProof`.
/// Merkle caps and openings that form the proof of a single STARK.
#[derive(Eq, PartialEq, Debug)]
pub struct StarkProofTarget<const D: usize> {
pub(crate) struct StarkProofTarget<const D: usize> {
/// `Target` for the Merkle cap if LDEs of trace values.
pub trace_cap: MerkleCapTarget,
/// `Target` for the Merkle cap of LDEs of lookup helper and CTL columns.
@ -826,7 +826,7 @@ pub struct StarkProofTarget<const D: usize> {
impl<const D: usize> StarkProofTarget<D> {
/// Serializes a STARK proof.
pub fn to_buffer(&self, buffer: &mut Vec<u8>) -> IoResult<()> {
pub(crate) fn to_buffer(&self, buffer: &mut Vec<u8>) -> IoResult<()> {
buffer.write_target_merkle_cap(&self.trace_cap)?;
buffer.write_target_merkle_cap(&self.auxiliary_polys_cap)?;
buffer.write_target_merkle_cap(&self.quotient_polys_cap)?;
@ -836,7 +836,7 @@ impl<const D: usize> StarkProofTarget<D> {
}
/// Deserializes a STARK proof.
pub fn from_buffer(buffer: &mut Buffer) -> IoResult<Self> {
pub(crate) fn from_buffer(buffer: &mut Buffer) -> IoResult<Self> {
let trace_cap = buffer.read_target_merkle_cap()?;
let auxiliary_polys_cap = buffer.read_target_merkle_cap()?;
let quotient_polys_cap = buffer.read_target_merkle_cap()?;
@ -853,7 +853,7 @@ impl<const D: usize> StarkProofTarget<D> {
}
/// Recover the length of the trace from a STARK proof and a STARK config.
pub fn recover_degree_bits(&self, config: &StarkConfig) -> usize {
pub(crate) fn recover_degree_bits(&self, config: &StarkConfig) -> usize {
let initial_merkle_proof = &self.opening_proof.query_round_proofs[0]
.initial_trees_proof
.evals_proofs[0]
@ -981,7 +981,7 @@ impl<F: RichField + Extendable<D>, const D: usize> StarkOpeningSet<F, D> {
/// Circuit version of `StarkOpeningSet`.
/// `Target`s for the purported values of each polynomial at the challenge point.
#[derive(Eq, PartialEq, Debug)]
pub struct StarkOpeningSetTarget<const D: usize> {
pub(crate) struct StarkOpeningSetTarget<const D: usize> {
/// `ExtensionTarget`s for the openings of trace polynomials at `zeta`.
pub local_values: Vec<ExtensionTarget<D>>,
/// `ExtensionTarget`s for the opening of trace polynomials at `g * zeta`.
@ -998,7 +998,7 @@ pub struct StarkOpeningSetTarget<const D: usize> {
impl<const D: usize> StarkOpeningSetTarget<D> {
/// Serializes a STARK's opening set.
pub fn to_buffer(&self, buffer: &mut Vec<u8>) -> IoResult<()> {
pub(crate) fn to_buffer(&self, buffer: &mut Vec<u8>) -> IoResult<()> {
buffer.write_target_ext_vec(&self.local_values)?;
buffer.write_target_ext_vec(&self.next_values)?;
buffer.write_target_ext_vec(&self.auxiliary_polys)?;
@ -1009,7 +1009,7 @@ impl<const D: usize> StarkOpeningSetTarget<D> {
}
/// Deserializes a STARK's opening set.
pub fn from_buffer(buffer: &mut Buffer) -> IoResult<Self> {
pub(crate) fn from_buffer(buffer: &mut Buffer) -> IoResult<Self> {
let local_values = buffer.read_target_ext_vec::<D>()?;
let next_values = buffer.read_target_ext_vec::<D>()?;
let auxiliary_polys = buffer.read_target_ext_vec::<D>()?;

View File

@ -110,7 +110,7 @@ where
C: GenericConfig<D, F = F>,
C::Hasher: AlgebraicHasher<F>,
{
pub fn to_buffer(
pub(crate) fn to_buffer(
&self,
buffer: &mut Vec<u8>,
gate_serializer: &dyn GateSerializer<F, D>,
@ -124,7 +124,7 @@ where
Ok(())
}
pub fn from_buffer(
pub(crate) fn from_buffer(
buffer: &mut Buffer,
gate_serializer: &dyn GateSerializer<F, D>,
generator_serializer: &dyn WitnessGeneratorSerializer<F, D>,

View File

@ -18,7 +18,11 @@ const WITNESS_SIZE: usize = 1 << 5;
/// Tests that the constraints imposed by the given STARK are low-degree by applying them to random
/// low-degree witness polynomials.
pub fn test_stark_low_degree<F: RichField + Extendable<D>, S: Stark<F, D>, const D: usize>(
pub(crate) fn test_stark_low_degree<
F: RichField + Extendable<D>,
S: Stark<F, D>,
const D: usize,
>(
stark: S,
) -> Result<()> {
let rate_bits = log2_ceil(stark.constraint_degree() + 1);
@ -70,7 +74,7 @@ pub fn test_stark_low_degree<F: RichField + Extendable<D>, S: Stark<F, D>, const
}
/// Tests that the circuit constraints imposed by the given STARK are coherent with the native constraints.
pub fn test_stark_circuit_constraints<
pub(crate) fn test_stark_circuit_constraints<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
S: Stark<F, D>,

View File

@ -14,7 +14,7 @@ use plonky2::util::transpose;
use crate::witness::errors::ProgramError;
/// Construct an integer from its constituent bits (in little-endian order)
pub fn limb_from_bits_le<P: PackedField>(iter: impl IntoIterator<Item = P>) -> P {
pub(crate) fn limb_from_bits_le<P: PackedField>(iter: impl IntoIterator<Item = P>) -> P {
// TODO: This is technically wrong, as 1 << i won't be canonical for all fields...
iter.into_iter()
.enumerate()
@ -23,7 +23,7 @@ pub fn limb_from_bits_le<P: PackedField>(iter: impl IntoIterator<Item = P>) -> P
}
/// Construct an integer from its constituent bits (in little-endian order): recursive edition
pub fn limb_from_bits_le_recursive<F: RichField + Extendable<D>, const D: usize>(
pub(crate) fn limb_from_bits_le_recursive<F: RichField + Extendable<D>, const D: usize>(
builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder<F, D>,
iter: impl IntoIterator<Item = ExtensionTarget<D>>,
) -> ExtensionTarget<D> {
@ -36,7 +36,7 @@ pub fn limb_from_bits_le_recursive<F: RichField + Extendable<D>, const D: usize>
}
/// A helper function to transpose a row-wise trace and put it in the format that `prove` expects.
pub fn trace_rows_to_poly_values<F: Field, const COLUMNS: usize>(
pub(crate) fn trace_rows_to_poly_values<F: Field, const COLUMNS: usize>(
trace_rows: Vec<[F; COLUMNS]>,
) -> Vec<PolynomialValues<F>> {
let trace_row_vecs = trace_rows.into_iter().map(|row| row.to_vec()).collect_vec();
@ -75,7 +75,6 @@ pub(crate) fn u256_to_usize(u256: U256) -> Result<usize, ProgramError> {
u256.try_into().map_err(|_| ProgramError::IntegerTooLarge)
}
#[allow(unused)] // TODO: Remove?
/// Returns the 32-bit little-endian limbs of a `U256`.
pub(crate) fn u256_limbs<F: Field>(u256: U256) -> [F; 8] {
u256.0
@ -91,7 +90,6 @@ pub(crate) fn u256_limbs<F: Field>(u256: U256) -> [F; 8] {
.unwrap()
}
#[allow(unused)]
/// Returns the 32-bit little-endian limbs of a `H256`.
pub(crate) fn h256_limbs<F: Field>(h256: H256) -> [F; 8] {
let mut temp_h256 = h256.0;
@ -105,7 +103,6 @@ pub(crate) fn h256_limbs<F: Field>(h256: H256) -> [F; 8] {
.unwrap()
}
#[allow(unused)]
/// Returns the 32-bit limbs of a `U160`.
pub(crate) fn h160_limbs<F: Field>(h160: H160) -> [F; 5] {
h160.0

View File

@ -1,6 +1,5 @@
use ethereum_types::U256;
#[allow(dead_code)]
#[derive(Debug)]
pub enum ProgramError {
OutOfGas,

View File

@ -17,7 +17,7 @@ use crate::witness::errors::ProgramError;
use crate::witness::errors::ProgramError::MemoryError;
impl MemoryChannel {
pub fn index(&self) -> usize {
pub(crate) fn index(&self) -> usize {
match *self {
Code => 0,
GeneralPurpose(n) => {
@ -29,7 +29,7 @@ impl MemoryChannel {
}
#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)]
pub struct MemoryAddress {
pub(crate) struct MemoryAddress {
pub(crate) context: usize,
pub(crate) segment: usize,
pub(crate) virt: usize,
@ -79,7 +79,7 @@ pub enum MemoryOpKind {
}
#[derive(Clone, Copy, Debug)]
pub struct MemoryOp {
pub(crate) struct MemoryOp {
/// true if this is an actual memory operation, or false if it's a padding row.
pub filter: bool,
pub timestamp: usize,
@ -101,7 +101,7 @@ pub static DUMMY_MEMOP: MemoryOp = MemoryOp {
};
impl MemoryOp {
pub fn new(
pub(crate) fn new(
channel: MemoryChannel,
clock: usize,
address: MemoryAddress,
@ -139,19 +139,19 @@ impl MemoryOp {
}
#[derive(Clone, Debug)]
pub struct MemoryState {
pub(crate) struct MemoryState {
pub(crate) contexts: Vec<MemoryContextState>,
}
impl MemoryState {
pub fn new(kernel_code: &[u8]) -> Self {
pub(crate) fn new(kernel_code: &[u8]) -> Self {
let code_u256s = kernel_code.iter().map(|&x| x.into()).collect();
let mut result = Self::default();
result.contexts[0].segments[Segment::Code as usize].content = code_u256s;
result
}
pub fn apply_ops(&mut self, ops: &[MemoryOp]) {
pub(crate) fn apply_ops(&mut self, ops: &[MemoryOp]) {
for &op in ops {
let MemoryOp {
address,
@ -165,7 +165,7 @@ impl MemoryState {
}
}
pub fn get(&self, address: MemoryAddress) -> U256 {
pub(crate) fn get(&self, address: MemoryAddress) -> U256 {
if address.context >= self.contexts.len() {
return U256::zero();
}
@ -182,7 +182,7 @@ impl MemoryState {
val
}
pub fn set(&mut self, address: MemoryAddress, val: U256) {
pub(crate) fn set(&mut self, address: MemoryAddress, val: U256) {
while address.context >= self.contexts.len() {
self.contexts.push(MemoryContextState::default());
}

View File

@ -19,7 +19,7 @@ use crate::witness::memory::MemoryOp;
use crate::{arithmetic, keccak, keccak_sponge, logic};
#[derive(Clone, Copy, Debug)]
pub struct TraceCheckpoint {
pub(crate) struct TraceCheckpoint {
pub(self) arithmetic_len: usize,
pub(self) byte_packing_len: usize,
pub(self) cpu_len: usize,
@ -41,7 +41,7 @@ pub(crate) struct Traces<T: Copy> {
}
impl<T: Copy> Traces<T> {
pub fn new() -> Self {
pub(crate) fn new() -> Self {
Traces {
arithmetic_ops: vec![],
byte_packing_ops: vec![],
@ -55,7 +55,7 @@ impl<T: Copy> Traces<T> {
/// Returns the actual trace lengths for each STARK module.
// Uses a `TraceCheckPoint` as return object for convenience.
pub fn get_lengths(&self) -> TraceCheckpoint {
pub(crate) fn get_lengths(&self) -> TraceCheckpoint {
TraceCheckpoint {
arithmetic_len: self
.arithmetic_ops
@ -85,7 +85,7 @@ impl<T: Copy> Traces<T> {
}
/// Returns the number of operations for each STARK module.
pub fn checkpoint(&self) -> TraceCheckpoint {
pub(crate) fn checkpoint(&self) -> TraceCheckpoint {
TraceCheckpoint {
arithmetic_len: self.arithmetic_ops.len(),
byte_packing_len: self.byte_packing_ops.len(),
@ -97,7 +97,7 @@ impl<T: Copy> Traces<T> {
}
}
pub fn rollback(&mut self, checkpoint: TraceCheckpoint) {
pub(crate) fn rollback(&mut self, checkpoint: TraceCheckpoint) {
self.arithmetic_ops.truncate(checkpoint.arithmetic_len);
self.byte_packing_ops.truncate(checkpoint.byte_packing_len);
self.cpu.truncate(checkpoint.cpu_len);
@ -108,35 +108,39 @@ impl<T: Copy> Traces<T> {
self.memory_ops.truncate(checkpoint.memory_len);
}
pub fn mem_ops_since(&self, checkpoint: TraceCheckpoint) -> &[MemoryOp] {
pub(crate) fn mem_ops_since(&self, checkpoint: TraceCheckpoint) -> &[MemoryOp] {
&self.memory_ops[checkpoint.memory_len..]
}
pub fn push_cpu(&mut self, val: CpuColumnsView<T>) {
pub(crate) fn push_cpu(&mut self, val: CpuColumnsView<T>) {
self.cpu.push(val);
}
pub fn push_logic(&mut self, op: logic::Operation) {
pub(crate) fn push_logic(&mut self, op: logic::Operation) {
self.logic_ops.push(op);
}
pub fn push_arithmetic(&mut self, op: arithmetic::Operation) {
pub(crate) fn push_arithmetic(&mut self, op: arithmetic::Operation) {
self.arithmetic_ops.push(op);
}
pub fn push_memory(&mut self, op: MemoryOp) {
pub(crate) fn push_memory(&mut self, op: MemoryOp) {
self.memory_ops.push(op);
}
pub fn push_byte_packing(&mut self, op: BytePackingOp) {
pub(crate) fn push_byte_packing(&mut self, op: BytePackingOp) {
self.byte_packing_ops.push(op);
}
pub fn push_keccak(&mut self, input: [u64; keccak::keccak_stark::NUM_INPUTS], clock: usize) {
pub(crate) fn push_keccak(
&mut self,
input: [u64; keccak::keccak_stark::NUM_INPUTS],
clock: usize,
) {
self.keccak_inputs.push((input, clock));
}
pub fn push_keccak_bytes(&mut self, input: [u8; KECCAK_WIDTH_BYTES], clock: usize) {
pub(crate) fn push_keccak_bytes(&mut self, input: [u8; KECCAK_WIDTH_BYTES], clock: usize) {
let chunks = input
.chunks(size_of::<u64>())
.map(|chunk| u64::from_le_bytes(chunk.try_into().unwrap()))
@ -146,15 +150,15 @@ impl<T: Copy> Traces<T> {
self.push_keccak(chunks, clock);
}
pub fn push_keccak_sponge(&mut self, op: KeccakSpongeOp) {
pub(crate) fn push_keccak_sponge(&mut self, op: KeccakSpongeOp) {
self.keccak_sponge_ops.push(op);
}
pub fn clock(&self) -> usize {
pub(crate) fn clock(&self) -> usize {
self.cpu.len()
}
pub fn into_tables<const D: usize>(
pub(crate) fn into_tables<const D: usize>(
self,
all_stark: &AllStark<T, D>,
config: &StarkConfig,