Merge branch 'main' of github.com:mir-protocol/plonky2 into ripeMD

This commit is contained in:
Dmitry Vagner 2022-09-30 09:41:04 -07:00
commit b26a28454f
23 changed files with 176 additions and 80 deletions

View File

@ -7,11 +7,11 @@ edition = "2021"
[dependencies]
plonky2 = { path = "../plonky2", default-features = false, features = ["rand", "timing"] }
plonky2_util = { path = "../util" }
eth-trie-utils = { git = "https://github.com/mir-protocol/eth-trie-utils.git", rev = "c52a04c9f349ac812b886f383a7306b27c8b96dc" }
eth-trie-utils = { git = "https://github.com/mir-protocol/eth-trie-utils.git", rev = "dd3595b4ba7923f8d465450d210f17a2b4e20f96" }
maybe_rayon = { path = "../maybe_rayon" }
anyhow = "1.0.40"
env_logger = "0.9.0"
ethereum-types = "0.13.1"
ethereum-types = "0.14.0"
hex = { version = "0.4.3", optional = true }
hex-literal = "0.3.4"
itertools = "0.10.3"

View File

@ -338,7 +338,7 @@ mod tests {
row.opcode_bits = bits_from_opcode(0x5b);
row.is_cpu_cycle = F::ONE;
row.is_kernel_mode = F::ONE;
row.program_counter = F::from_canonical_usize(KERNEL.global_labels["route_txn"]);
row.program_counter = F::from_canonical_usize(KERNEL.global_labels["main"]);
cpu_stark.generate(row.borrow_mut());
cpu_trace_rows.push(row.into());
}
@ -377,8 +377,8 @@ mod tests {
row.is_cpu_cycle = F::ONE;
row.is_kernel_mode = F::ONE;
// Since these are the first cycle rows, we must start with PC=route_txn then increment.
row.program_counter = F::from_canonical_usize(KERNEL.global_labels["route_txn"] + i);
// Since these are the first cycle rows, we must start with PC=main then increment.
row.program_counter = F::from_canonical_usize(KERNEL.global_labels["main"] + i);
row.opcode_bits = bits_from_opcode(
if logic_trace[logic::columns::IS_AND].values[i] != F::ZERO {
0x16

View File

@ -69,12 +69,12 @@ pub fn eval_packed_generic<P: PackedField>(
);
// If a non-CPU cycle row is followed by a CPU cycle row, then:
// - the `program_counter` of the CPU cycle row is `route_txn` (the entry point of our kernel),
// - the `program_counter` of the CPU cycle row is `main` (the entry point of our kernel),
// - execution is in kernel mode, and
// - the stack is empty.
let is_last_noncpu_cycle = (lv.is_cpu_cycle - P::ONES) * nv.is_cpu_cycle;
let pc_diff =
nv.program_counter - P::Scalar::from_canonical_usize(KERNEL.global_labels["route_txn"]);
nv.program_counter - P::Scalar::from_canonical_usize(KERNEL.global_labels["main"]);
yield_constr.constraint_transition(is_last_noncpu_cycle * pc_diff);
yield_constr.constraint_transition(is_last_noncpu_cycle * (nv.is_kernel_mode - P::ONES));
yield_constr.constraint_transition(is_last_noncpu_cycle * nv.stack_len);
@ -118,18 +118,18 @@ pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
}
// If a non-CPU cycle row is followed by a CPU cycle row, then:
// - the `program_counter` of the CPU cycle row is `route_txn` (the entry point of our kernel),
// - the `program_counter` of the CPU cycle row is `main` (the entry point of our kernel),
// - execution is in kernel mode, and
// - the stack is empty.
{
let is_last_noncpu_cycle =
builder.mul_sub_extension(lv.is_cpu_cycle, nv.is_cpu_cycle, nv.is_cpu_cycle);
// Start at `route_txn`.
let route_txn = builder.constant_extension(F::Extension::from_canonical_usize(
KERNEL.global_labels["route_txn"],
// Start at `main`.
let main = builder.constant_extension(F::Extension::from_canonical_usize(
KERNEL.global_labels["main"],
));
let pc_diff = builder.sub_extension(nv.program_counter, route_txn);
let pc_diff = builder.sub_extension(nv.program_counter, main);
let pc_constr = builder.mul_extension(is_last_noncpu_cycle, pc_diff);
yield_constr.constraint_transition(builder, pc_constr);

View File

@ -33,6 +33,7 @@ pub(crate) fn combined_kernel() -> Kernel {
include_str!("asm/curve/secp256k1/moddiv.asm"),
include_str!("asm/exp.asm"),
include_str!("asm/halt.asm"),
include_str!("asm/main.asm"),
include_str!("asm/memory/core.asm"),
include_str!("asm/memory/memcpy.asm"),
include_str!("asm/memory/metadata.asm"),

View File

@ -1,8 +0,0 @@
// Computes the Keccak256 hash of some arbitrary bytes in memory.
// The given memory values should be in the range of a byte.
//
// Pre stack: ADDR, len, retdest
// Post stack: hash
global keccak_general:
// stack: ADDR, len
// TODO

View File

@ -0,0 +1,8 @@
global main:
// If the prover has no more txns for us to process, halt.
PROVER_INPUT(end_of_txns)
%jumpi(halt)
// Call route_txn, returning to main to continue the loop.
PUSH main
%jump(route_txn)

View File

@ -6,7 +6,7 @@
global read_rlp_to_memory:
// stack: retdest
PROVER_INPUT // Read the RLP blob length from the prover tape.
PROVER_INPUT(rlp) // Read the RLP blob length from the prover tape.
// stack: len, retdest
PUSH 0 // initial position
// stack: pos, len, retdest
@ -19,7 +19,7 @@ read_rlp_to_memory_loop:
// stack: pos == len, pos, len, retdest
%jumpi(read_rlp_to_memory_finish)
// stack: pos, len, retdest
PROVER_INPUT
PROVER_INPUT(rlp)
// stack: byte, pos, len, retdest
DUP2
// stack: pos, byte, pos, len, retdest

View File

@ -3,14 +3,14 @@
// jump to the appropriate transaction parsing method.
global route_txn:
// stack: (empty)
// stack: retdest
// First load transaction data into memory, where it will be parsed.
PUSH read_txn_from_memory
%jump(read_rlp_to_memory)
// At this point, the raw txn data is in memory.
read_txn_from_memory:
// stack: (empty)
// stack: retdest
// We will peak at the first byte to determine what type of transaction this is.
// Note that type 1 and 2 transactions have a first byte of 1 and 2, respectively.
@ -20,17 +20,17 @@ read_txn_from_memory:
PUSH 0
%mload_current(@SEGMENT_RLP_RAW)
%eq_const(1)
// stack: first_byte == 1
// stack: first_byte == 1, retdest
%jumpi(process_type_1_txn)
// stack: (empty)
// stack: retdest
PUSH 0
%mload_current(@SEGMENT_RLP_RAW)
%eq_const(2)
// stack: first_byte == 2
// stack: first_byte == 2, retdest
%jumpi(process_type_2_txn)
// stack: (empty)
// stack: retdest
// At this point, since it's not a type 1 or 2 transaction,
// it must be a legacy (aka type 0) transaction.
%jump(process_type_2_txn)
%jump(process_type_0_txn)

View File

@ -12,15 +12,15 @@
// keccak256(rlp([nonce, gas_price, gas_limit, to, value, data]))
global process_type_0_txn:
// stack: (empty)
// stack: retdest
PUSH 0 // initial pos
// stack: pos
// stack: pos, retdest
%decode_rlp_list_len
// We don't actually need the length.
%stack (pos, len) -> (pos)
// Decode the nonce and store it.
// stack: pos
// stack: pos, retdest
%decode_rlp_scalar
%stack (pos, nonce) -> (nonce, pos)
%mstore_txn_field(@TXN_FIELD_NONCE)
@ -29,38 +29,38 @@ global process_type_0_txn:
// For legacy transactions, we set both the
// TXN_FIELD_MAX_PRIORITY_FEE_PER_GAS and TXN_FIELD_MAX_FEE_PER_GAS
// fields to gas_price.
// stack: pos
// stack: pos, retdest
%decode_rlp_scalar
%stack (pos, gas_price) -> (gas_price, gas_price, pos)
%mstore_txn_field(@TXN_FIELD_MAX_PRIORITY_FEE_PER_GAS)
%mstore_txn_field(@TXN_FIELD_MAX_FEE_PER_GAS)
// Decode the gas limit and store it.
// stack: pos
// stack: pos, retdest
%decode_rlp_scalar
%stack (pos, gas_limit) -> (gas_limit, pos)
%mstore_txn_field(@TXN_FIELD_GAS_LIMIT)
// Decode the "to" field and store it.
// stack: pos
// stack: pos, retdest
%decode_rlp_scalar
%stack (pos, to) -> (to, pos)
%mstore_txn_field(@TXN_FIELD_TO)
// Decode the value field and store it.
// stack: pos
// stack: pos, retdest
%decode_rlp_scalar
%stack (pos, value) -> (value, pos)
%mstore_txn_field(@TXN_FIELD_VALUE)
// Decode the data length, store it, and compute new_pos after any data.
// stack: pos
// stack: pos, retdest
%decode_rlp_string_len
%stack (pos, data_len) -> (data_len, pos, data_len, pos, data_len)
%mstore_txn_field(@TXN_FIELD_DATA_LEN)
// stack: pos, data_len, pos, data_len
// stack: pos, data_len, pos, data_len, retdest
ADD
// stack: new_pos, pos, data_len
// stack: new_pos, pos, data_len, retdest
// Memcpy the txn data from @SEGMENT_RLP_RAW to @SEGMENT_TXN_DATA.
PUSH parse_v
@ -70,62 +70,62 @@ global process_type_0_txn:
PUSH 0
PUSH @SEGMENT_TXN_DATA
GET_CONTEXT
// stack: DST, SRC, data_len, parse_v, new_pos
// stack: DST, SRC, data_len, parse_v, new_pos, retdest
%jump(memcpy)
parse_v:
// stack: pos
// stack: pos, retdest
%decode_rlp_scalar
// stack: pos, v
// stack: pos, v, retdest
SWAP1
// stack: v, pos
// stack: v, pos, retdest
DUP1
%gt_const(28)
// stack: v > 28, v, pos
// stack: v > 28, v, pos, retdest
%jumpi(process_v_new_style)
// We have an old style v, so y_parity = v - 27.
// No chain ID is present, so we can leave TXN_FIELD_CHAIN_ID_PRESENT and
// TXN_FIELD_CHAIN_ID with their default values of zero.
// stack: v, pos
// stack: v, pos, retdest
%sub_const(27)
%stack (y_parity, pos) -> (y_parity, pos)
%mstore_txn_field(@TXN_FIELD_Y_PARITY)
// stack: pos
// stack: pos, retdest
%jump(parse_r)
process_v_new_style:
// stack: v, pos
// stack: v, pos, retdest
// We have a new style v, so chain_id_present = 1,
// chain_id = (v - 35) / 2, and y_parity = (v - 35) % 2.
%stack (v, pos) -> (1, v, pos)
%mstore_txn_field(@TXN_FIELD_CHAIN_ID_PRESENT)
// stack: v, pos
// stack: v, pos, retdest
%sub_const(35)
DUP1
// stack: v - 35, v - 35, pos
// stack: v - 35, v - 35, pos, retdest
%div_const(2)
// stack: chain_id, v - 35, pos
// stack: chain_id, v - 35, pos, retdest
%mstore_txn_field(@TXN_FIELD_CHAIN_ID)
// stack: v - 35, pos
// stack: v - 35, pos, retdest
%mod_const(2)
// stack: y_parity, pos
// stack: y_parity, pos, retdest
%mstore_txn_field(@TXN_FIELD_Y_PARITY)
parse_r:
// stack: pos
// stack: pos, retdest
%decode_rlp_scalar
%stack (pos, r) -> (r, pos)
%mstore_txn_field(@TXN_FIELD_R)
// stack: pos
// stack: pos, retdest
%decode_rlp_scalar
%stack (pos, s) -> (s)
%mstore_txn_field(@TXN_FIELD_S)
// stack: (empty)
// stack: retdest
// TODO: Write the signed txn data to memory, where it can be hashed and
// checked against the signature.

View File

@ -7,5 +7,5 @@
// data, access_list]))
global process_type_1_txn:
// stack: (empty)
// stack: retdest
PANIC // TODO: Unfinished

View File

@ -8,5 +8,5 @@
// access_list]))
global process_type_2_txn:
// stack: (empty)
// stack: retdest
PANIC // TODO: Unfinished

View File

@ -1,7 +1,7 @@
use std::collections::HashMap;
use anyhow::{anyhow, bail, ensure};
use ethereum_types::{BigEndianHash, U256, U512};
use ethereum_types::{U256, U512};
use keccak_hash::keccak;
use plonky2::field::goldilocks_field::GoldilocksField;
@ -263,7 +263,7 @@ impl<'a> Interpreter<'a> {
0x56 => self.run_jump(), // "JUMP",
0x57 => self.run_jumpi(), // "JUMPI",
0x58 => todo!(), // "GETPC",
0x59 => todo!(), // "MSIZE",
0x59 => self.run_msize(), // "MSIZE",
0x5a => todo!(), // "GAS",
0x5b => self.run_jumpdest(), // "JUMPDEST",
0x5c => todo!(), // "GET_STATE_ROOT",
@ -444,7 +444,7 @@ impl<'a> Interpreter<'a> {
})
.collect::<Vec<_>>();
let hash = keccak(bytes);
self.push(hash.into_uint());
self.push(U256::from_big_endian(hash.as_bytes()));
}
fn run_prover_input(&mut self) -> anyhow::Result<()> {
@ -511,6 +511,14 @@ impl<'a> Interpreter<'a> {
}
}
fn run_msize(&mut self) {
let num_bytes = self.memory.context_memory[self.context].segments
[Segment::MainMemory as usize]
.content
.len();
self.push(U256::from(num_bytes));
}
fn run_jumpdest(&mut self) {
assert!(!self.kernel_mode, "JUMPDEST is not needed in kernel code");
}

View File

@ -12,7 +12,8 @@ fn process_type_0_txn() -> Result<()> {
let process_type_0_txn = KERNEL.global_labels["process_type_0_txn"];
let process_normalized_txn = KERNEL.global_labels["process_normalized_txn"];
let mut interpreter = Interpreter::new_with_kernel(process_type_0_txn, vec![]);
let retaddr = 0xDEADBEEFu32.into();
let mut interpreter = Interpreter::new_with_kernel(process_type_0_txn, vec![retaddr]);
// When we reach process_normalized_txn, we're done with parsing and normalizing.
// Processing normalized transactions is outside the scope of this test.

View File

@ -1,7 +1,7 @@
use std::collections::HashMap;
use eth_trie_utils::partial_trie::PartialTrie;
use ethereum_types::{Address, H256};
use ethereum_types::{Address, BigEndianHash, H256};
use plonky2::field::extension::Extendable;
use plonky2::field::polynomial::PolynomialValues;
use plonky2::field::types::Field;
@ -23,6 +23,7 @@ use crate::util::trace_rows_to_poly_values;
pub(crate) mod memory;
pub(crate) mod mpt;
pub(crate) mod prover_input;
pub(crate) mod rlp;
pub(crate) mod state;
#[derive(Clone, Debug, Deserialize, Serialize, Default)]
@ -86,14 +87,20 @@ pub(crate) fn generate_traces<F: RichField + Extendable<D>, const D: usize>(
};
let trie_roots_before = TrieRoots {
state_root: read_metadata(GlobalMetadata::StateTrieRootDigestBefore),
transactions_root: read_metadata(GlobalMetadata::TransactionsTrieRootDigestBefore),
receipts_root: read_metadata(GlobalMetadata::ReceiptsTrieRootDigestBefore),
state_root: H256::from_uint(&read_metadata(GlobalMetadata::StateTrieRootDigestBefore)),
transactions_root: H256::from_uint(&read_metadata(
GlobalMetadata::TransactionsTrieRootDigestBefore,
)),
receipts_root: H256::from_uint(&read_metadata(
GlobalMetadata::ReceiptsTrieRootDigestBefore,
)),
};
let trie_roots_after = TrieRoots {
state_root: read_metadata(GlobalMetadata::StateTrieRootDigestAfter),
transactions_root: read_metadata(GlobalMetadata::TransactionsTrieRootDigestAfter),
receipts_root: read_metadata(GlobalMetadata::ReceiptsTrieRootDigestAfter),
state_root: H256::from_uint(&read_metadata(GlobalMetadata::StateTrieRootDigestAfter)),
transactions_root: H256::from_uint(&read_metadata(
GlobalMetadata::TransactionsTrieRootDigestAfter,
)),
receipts_root: H256::from_uint(&read_metadata(GlobalMetadata::ReceiptsTrieRootDigestAfter)),
};
let GenerationState {

View File

@ -52,7 +52,7 @@ pub(crate) fn mpt_prover_inputs<F>(
prover_inputs.push((PartialTrieType::of(trie) as u32).into());
match trie {
PartialTrie::Empty => {}
PartialTrie::Hash(h) => prover_inputs.push(*h),
PartialTrie::Hash(h) => prover_inputs.push(U256::from_big_endian(h.as_bytes())),
PartialTrie::Branch { children, value } => {
for child in children {
mpt_prover_inputs(child, prover_inputs, parse_leaf);

View File

@ -24,12 +24,24 @@ impl<F: Field> GenerationState<F> {
#[allow(unused)] // TODO: Should be used soon.
pub(crate) fn prover_input(&mut self, stack: &[U256], input_fn: &ProverInputFn) -> U256 {
match input_fn.0[0].as_str() {
"end_of_txns" => self.run_end_of_txns(),
"ff" => self.run_ff(stack, input_fn),
"mpt" => self.run_mpt(),
"rlp" => self.run_rlp(),
_ => panic!("Unrecognized prover input function."),
}
}
fn run_end_of_txns(&mut self) -> U256 {
let end = self.next_txn_index == self.inputs.signed_txns.len();
if end {
U256::one()
} else {
self.next_txn_index += 1;
U256::zero()
}
}
/// Finite field operations.
fn run_ff(&self, stack: &[U256], input_fn: &ProverInputFn) -> U256 {
let field = EvmField::from_str(input_fn.0[1].as_str()).unwrap();
@ -44,6 +56,13 @@ impl<F: Field> GenerationState<F> {
.pop()
.unwrap_or_else(|| panic!("Out of MPT data"))
}
/// RLP data.
fn run_rlp(&mut self) -> U256 {
self.rlp_prover_inputs
.pop()
.unwrap_or_else(|| panic!("Out of RLP data"))
}
}
enum EvmField {

18
evm/src/generation/rlp.rs Normal file
View File

@ -0,0 +1,18 @@
use ethereum_types::U256;
pub(crate) fn all_rlp_prover_inputs_reversed(signed_txns: &[Vec<u8>]) -> Vec<U256> {
let mut inputs = all_rlp_prover_inputs(signed_txns);
inputs.reverse();
inputs
}
fn all_rlp_prover_inputs(signed_txns: &[Vec<u8>]) -> Vec<U256> {
let mut prover_inputs = vec![];
for txn in signed_txns {
prover_inputs.push(txn.len().into());
for &byte in txn {
prover_inputs.push(byte.into());
}
}
prover_inputs
}

View File

@ -7,6 +7,7 @@ use tiny_keccak::keccakf;
use crate::cpu::columns::{CpuColumnsView, NUM_CPU_COLUMNS};
use crate::generation::memory::MemoryState;
use crate::generation::mpt::all_mpt_prover_inputs_reversed;
use crate::generation::rlp::all_rlp_prover_inputs_reversed;
use crate::generation::GenerationInputs;
use crate::keccak_memory::keccak_memory_stark::KeccakMemoryOp;
use crate::memory::memory_stark::MemoryOp;
@ -19,6 +20,7 @@ use crate::{keccak, logic};
pub(crate) struct GenerationState<F: Field> {
#[allow(unused)] // TODO: Should be used soon.
pub(crate) inputs: GenerationInputs,
pub(crate) next_txn_index: usize,
pub(crate) cpu_rows: Vec<[F; NUM_CPU_COLUMNS]>,
pub(crate) current_cpu_row: CpuColumnsView<F>,
@ -32,14 +34,20 @@ pub(crate) struct GenerationState<F: Field> {
/// Prover inputs containing MPT data, in reverse order so that the next input can be obtained
/// via `pop()`.
pub(crate) mpt_prover_inputs: Vec<U256>,
/// Prover inputs containing RLP data, in reverse order so that the next input can be obtained
/// via `pop()`.
pub(crate) rlp_prover_inputs: Vec<U256>,
}
impl<F: Field> GenerationState<F> {
pub(crate) fn new(inputs: GenerationInputs) -> Self {
let mpt_prover_inputs = all_mpt_prover_inputs_reversed(&inputs.tries);
let rlp_prover_inputs = all_rlp_prover_inputs_reversed(&inputs.signed_txns);
Self {
inputs,
next_txn_index: 0,
cpu_rows: vec![],
current_cpu_row: [F::ZERO; NUM_CPU_COLUMNS].into(),
current_context: 0,
@ -48,6 +56,7 @@ impl<F: Field> GenerationState<F> {
keccak_memory_inputs: vec![],
logic_ops: vec![],
mpt_prover_inputs,
rlp_prover_inputs,
}
}

View File

@ -1,4 +1,4 @@
use ethereum_types::{Address, U256};
use ethereum_types::{Address, H256, U256};
use itertools::Itertools;
use maybe_rayon::*;
use plonky2::field::extension::{Extendable, FieldExtension};
@ -54,9 +54,9 @@ pub struct PublicValues {
#[derive(Debug, Clone, Default)]
pub struct TrieRoots {
pub state_root: U256,
pub transactions_root: U256,
pub receipts_root: U256,
pub state_root: H256,
pub transactions_root: H256,
pub receipts_root: H256,
}
#[derive(Debug, Clone, Default, Deserialize, Serialize)]

View File

@ -11,7 +11,6 @@ use plonky2::plonk::config::{AlgebraicHasher, GenericConfig};
use plonky2::util::reducing::ReducingFactorTarget;
use plonky2::with_context;
use crate::all_stark::{AllStark, Table};
use crate::config::StarkConfig;
use crate::constraint_consumer::RecursiveConstraintConsumer;
use crate::cpu::cpu_stark::CpuStark;
@ -27,9 +26,13 @@ use crate::proof::{
StarkProofChallengesTarget, StarkProofTarget, TrieRoots, TrieRootsTarget,
};
use crate::stark::Stark;
use crate::util::{h160_limbs, u256_limbs};
use crate::util::h160_limbs;
use crate::vanishing_poly::eval_vanishing_poly_circuit;
use crate::vars::StarkEvaluationTargets;
use crate::{
all_stark::{AllStark, Table},
util::h256_limbs,
};
pub fn verify_proof_circuit<
F: RichField + Extendable<D>,
@ -504,15 +507,15 @@ pub fn set_trie_roots_target<F, W, const D: usize>(
{
witness.set_target_arr(
trie_roots_target.state_root,
u256_limbs(trie_roots.state_root),
h256_limbs(trie_roots.state_root),
);
witness.set_target_arr(
trie_roots_target.transactions_root,
u256_limbs(trie_roots.transactions_root),
h256_limbs(trie_roots.transactions_root),
);
witness.set_target_arr(
trie_roots_target.receipts_root,
u256_limbs(trie_roots.receipts_root),
h256_limbs(trie_roots.receipts_root),
);
}

View File

@ -1,6 +1,6 @@
use std::mem::{size_of, transmute_copy, ManuallyDrop};
use ethereum_types::{H160, U256};
use ethereum_types::{H160, H256, U256};
use itertools::Itertools;
use plonky2::field::extension::Extendable;
use plonky2::field::packed::PackedField;
@ -59,6 +59,17 @@ pub(crate) fn u256_limbs<F: Field>(u256: U256) -> [F; 8] {
.unwrap()
}
/// Returns the 32-bit little-endian limbs of a `H256`.
pub(crate) fn h256_limbs<F: Field>(h256: H256) -> [F; 8] {
h256.0
.chunks(4)
.map(|chunk| u32::from_le_bytes(chunk.try_into().unwrap()))
.map(F::from_canonical_u32)
.collect_vec()
.try_into()
.unwrap()
}
/// Returns the 32-bit limbs of a `U160`.
pub(crate) fn h160_limbs<F: Field>(h160: H160) -> [F; 5] {
h160.0

View File

@ -3,6 +3,7 @@ use std::ops::Range;
use plonky2_field::extension::Extendable;
use plonky2_field::packed::PackedField;
use plonky2_field::types::{Field, Field64};
use plonky2_util::log_floor;
use crate::gates::gate::Gate;
use crate::gates::packed_util::PackedEvaluableBase;
@ -32,7 +33,8 @@ impl<const B: usize> BaseSumGate<B> {
}
pub fn new_from_config<F: Field64>(config: &CircuitConfig) -> Self {
let num_limbs = F::BITS.min(config.num_routed_wires - Self::START_LIMBS);
let num_limbs =
log_floor(F::ORDER - 1, B as u64).min(config.num_routed_wires - Self::START_LIMBS);
Self::new(num_limbs)
}

View File

@ -38,6 +38,23 @@ pub fn log2_strict(n: usize) -> usize {
res as usize
}
/// Returns the largest integer `i` such that `base**i <= n`.
pub const fn log_floor(n: u64, base: u64) -> usize {
assert!(n > 0);
assert!(base > 1);
let mut i = 0;
let mut cur: u64 = 1;
loop {
let (mul, overflow) = cur.overflowing_mul(base);
if overflow || mul > n {
return i;
} else {
i += 1;
cur = mul;
}
}
}
/// Permutes `arr` such that each index is mapped to its reverse in binary.
pub fn reverse_index_bits<T: Copy>(arr: &[T]) -> Vec<T> {
let n = arr.len();