Merge branch 'main' into 'new-logup'.

This commit is contained in:
Linda Guiga 2023-09-20 12:45:14 -04:00
parent ca44187201
commit f438d45f06
No known key found for this signature in database
7 changed files with 113 additions and 138 deletions

View File

@ -47,7 +47,7 @@ use plonky2::util::transpose;
use super::NUM_BYTES;
use crate::byte_packing::columns::{
index_bytes, value_bytes, ADDR_CONTEXT, ADDR_SEGMENT, ADDR_VIRTUAL, BYTE_INDICES_COLS, IS_READ,
NUM_COLUMNS, RANGE_COUNTER, RC_FREQUENCIES, SEQUENCE_END, SEQUENCE_LEN, TIMESTAMP,
NUM_COLUMNS, RANGE_COUNTER, RC_FREQUENCIES, SEQUENCE_END, TIMESTAMP,
};
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::cross_table_lookup::Column;
@ -76,15 +76,16 @@ pub(crate) fn ctl_looked_data<F: Field>() -> Vec<Column<F>> {
})
.collect();
Column::singles([
ADDR_CONTEXT,
ADDR_SEGMENT,
ADDR_VIRTUAL,
SEQUENCE_LEN,
TIMESTAMP,
])
.chain(outputs)
.collect()
// This will correspond to the actual sequence length when the `SEQUENCE_END` flag is on.
let sequence_len: Column<F> = Column::linear_combination(
(0..NUM_BYTES).map(|i| (index_bytes(i), F::from_canonical_usize(i + 1))),
);
Column::singles([ADDR_CONTEXT, ADDR_SEGMENT, ADDR_VIRTUAL])
.chain([sequence_len])
.chain(Column::singles(&[TIMESTAMP]))
.chain(outputs)
.collect()
}
pub fn ctl_looked_filter<F: Field>() -> Column<F> {
@ -202,7 +203,6 @@ impl<F: RichField + Extendable<D>, const D: usize> BytePackingStark<F, D> {
row[ADDR_VIRTUAL] = F::from_canonical_usize(virt + bytes.len() - 1);
row[TIMESTAMP] = F::from_canonical_usize(timestamp);
row[SEQUENCE_LEN] = F::from_canonical_usize(bytes.len());
for (i, &byte) in bytes.iter().rev().enumerate() {
if i == bytes.len() - 1 {
@ -356,27 +356,20 @@ impl<F: RichField + Extendable<D>, const D: usize> Stark<F, D> for BytePackingSt
current_sequence_end * next_filter * (next_sequence_start - one),
);
// The remaining length of a byte sequence must decrease by one or be zero.
let current_sequence_length = vars.local_values[SEQUENCE_LEN];
// The active position in a byte sequence must increase by one on every row
// or be one on the next row (i.e. at the start of a new sequence).
let current_position = self.get_active_position(vars.local_values);
let next_position = self.get_active_position(vars.next_values);
let current_remaining_length = current_sequence_length - current_position;
let next_sequence_length = vars.next_values[SEQUENCE_LEN];
let next_remaining_length = next_sequence_length - next_position;
yield_constr.constraint_transition(
current_remaining_length * (current_remaining_length - next_remaining_length - one),
next_filter * (next_position - one) * (next_position - current_position - one),
);
// At the start of a sequence, the remaining length must be equal to the starting length minus one
yield_constr.constraint(
current_sequence_start * (current_sequence_length - current_remaining_length - one),
);
// The last row must be the end of a sequence or a padding row.
yield_constr.constraint_last_row(current_filter * (current_sequence_end - one));
// The remaining length on the last row must be zero.
yield_constr.constraint_last_row(current_remaining_length);
// If the current remaining length is zero, the end flag must be one.
yield_constr.constraint(current_remaining_length * current_sequence_end);
// If the next position is one in an active row, the current end flag must be one.
yield_constr
.constraint_transition(next_filter * current_sequence_end * (next_position - one));
// The context, segment and timestamp fields must remain unchanged throughout a byte sequence.
// The virtual address must decrement by one at each step of a sequence.
@ -486,36 +479,26 @@ impl<F: RichField + Extendable<D>, const D: usize> Stark<F, D> for BytePackingSt
let constraint = builder.mul_extension(next_filter, constraint);
yield_constr.constraint_transition(builder, constraint);
// The remaining length of a byte sequence must decrease by one or be zero.
let current_sequence_length = vars.local_values[SEQUENCE_LEN];
let next_sequence_length = vars.next_values[SEQUENCE_LEN];
// The active position in a byte sequence must increase by one on every row
// or be one on the next row (i.e. at the start of a new sequence).
let current_position = self.get_active_position_circuit(builder, vars.local_values);
let next_position = self.get_active_position_circuit(builder, vars.next_values);
let current_remaining_length =
builder.sub_extension(current_sequence_length, current_position);
let next_remaining_length = builder.sub_extension(next_sequence_length, next_position);
let length_diff = builder.sub_extension(current_remaining_length, next_remaining_length);
let constraint = builder.mul_sub_extension(
current_remaining_length,
length_diff,
current_remaining_length,
);
let position_diff = builder.sub_extension(next_position, current_position);
let is_new_or_inactive = builder.mul_sub_extension(next_filter, next_position, next_filter);
let constraint =
builder.mul_sub_extension(is_new_or_inactive, position_diff, is_new_or_inactive);
yield_constr.constraint_transition(builder, constraint);
// At the start of a sequence, the remaining length must be equal to the starting length minus one
let current_sequence_length = vars.local_values[SEQUENCE_LEN];
let length_diff = builder.sub_extension(current_sequence_length, current_remaining_length);
// The last row must be the end of a sequence or a padding row.
let constraint =
builder.mul_sub_extension(current_sequence_start, length_diff, current_sequence_start);
yield_constr.constraint(builder, constraint);
builder.mul_sub_extension(current_filter, current_sequence_end, current_filter);
yield_constr.constraint_last_row(builder, constraint);
// The remaining length on the last row must be zero.
yield_constr.constraint_last_row(builder, current_remaining_length);
// If the current remaining length is zero, the end flag must be one.
let constraint = builder.mul_extension(current_remaining_length, current_sequence_end);
yield_constr.constraint(builder, constraint);
// If the next position is one in an active row, the current end flag must be one.
let constraint = builder.mul_extension(next_filter, current_sequence_end);
let constraint = builder.mul_sub_extension(constraint, next_position, constraint);
yield_constr.constraint_transition(builder, constraint);
// The context, segment and timestamp fields must remain unchanged throughout a byte sequence.
// The virtual address must decrement by one at each step of a sequence.

View File

@ -16,7 +16,8 @@ pub(crate) const fn index_bytes(i: usize) -> usize {
BYTES_INDICES_START + i
}
// Note: Those are used as filter for distinguishing active vs padding rows.
// Note: Those are used as filter for distinguishing active vs padding rows,
// and also to obtain the length of a sequence of bytes being processed.
pub(crate) const BYTE_INDICES_COLS: Range<usize> =
BYTES_INDICES_START..BYTES_INDICES_START + NUM_BYTES;
@ -25,12 +26,8 @@ pub(crate) const ADDR_SEGMENT: usize = ADDR_CONTEXT + 1;
pub(crate) const ADDR_VIRTUAL: usize = ADDR_SEGMENT + 1;
pub(crate) const TIMESTAMP: usize = ADDR_VIRTUAL + 1;
/// The total length of a sequence of bytes.
/// Cannot be greater than 32.
pub(crate) const SEQUENCE_LEN: usize = TIMESTAMP + 1;
// 32 byte limbs hold a total of 256 bits.
const BYTES_VALUES_START: usize = SEQUENCE_LEN + 1;
const BYTES_VALUES_START: usize = TIMESTAMP + 1;
pub(crate) const fn value_bytes(i: usize) -> usize {
debug_assert!(i < NUM_BYTES);
BYTES_VALUES_START + i

View File

@ -8,41 +8,12 @@ global sys_mload:
// stack: expanded_num_bytes, kexit_info, offset
%update_mem_bytes
// stack: kexit_info, offset
PUSH 0 // acc = 0
// stack: acc, kexit_info, offset
DUP3 %add_const( 0) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0xf8) ADD
DUP3 %add_const( 1) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0xf0) ADD
DUP3 %add_const( 2) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0xe8) ADD
DUP3 %add_const( 3) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0xe0) ADD
DUP3 %add_const( 4) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0xd8) ADD
DUP3 %add_const( 5) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0xd0) ADD
DUP3 %add_const( 6) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0xc8) ADD
DUP3 %add_const( 7) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0xc0) ADD
DUP3 %add_const( 8) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0xb8) ADD
DUP3 %add_const( 9) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0xb0) ADD
DUP3 %add_const(10) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0xa8) ADD
DUP3 %add_const(11) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0xa0) ADD
DUP3 %add_const(12) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0x98) ADD
DUP3 %add_const(13) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0x90) ADD
DUP3 %add_const(14) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0x88) ADD
DUP3 %add_const(15) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0x80) ADD
DUP3 %add_const(16) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0x78) ADD
DUP3 %add_const(17) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0x70) ADD
DUP3 %add_const(18) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0x68) ADD
DUP3 %add_const(19) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0x60) ADD
DUP3 %add_const(20) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0x58) ADD
DUP3 %add_const(21) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0x50) ADD
DUP3 %add_const(22) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0x48) ADD
DUP3 %add_const(23) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0x40) ADD
DUP3 %add_const(24) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0x38) ADD
DUP3 %add_const(25) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0x30) ADD
DUP3 %add_const(26) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0x28) ADD
DUP3 %add_const(27) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0x20) ADD
DUP3 %add_const(28) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0x18) ADD
DUP3 %add_const(29) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0x10) ADD
DUP3 %add_const(30) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0x08) ADD
DUP3 %add_const(31) %mload_current(@SEGMENT_MAIN_MEMORY) %shl_const(0x00) ADD
%stack (acc, kexit_info, offset) -> (kexit_info, acc)
%stack(kexit_info, offset) -> (offset, 32, kexit_info)
PUSH @SEGMENT_MAIN_MEMORY
GET_CONTEXT
// stack: addr: 3, len, kexit_info
MLOAD_32BYTES
%stack (value, kexit_info) -> (kexit_info, value)
EXIT_KERNEL
global sys_mstore:
@ -55,39 +26,12 @@ global sys_mstore:
// stack: expanded_num_bytes, kexit_info, offset, value
%update_mem_bytes
// stack: kexit_info, offset, value
DUP3 PUSH 0 BYTE DUP3 %add_const( 0) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 1 BYTE DUP3 %add_const( 1) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 2 BYTE DUP3 %add_const( 2) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 3 BYTE DUP3 %add_const( 3) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 4 BYTE DUP3 %add_const( 4) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 5 BYTE DUP3 %add_const( 5) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 6 BYTE DUP3 %add_const( 6) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 7 BYTE DUP3 %add_const( 7) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 8 BYTE DUP3 %add_const( 8) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 9 BYTE DUP3 %add_const( 9) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 10 BYTE DUP3 %add_const(10) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 11 BYTE DUP3 %add_const(11) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 12 BYTE DUP3 %add_const(12) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 13 BYTE DUP3 %add_const(13) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 14 BYTE DUP3 %add_const(14) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 15 BYTE DUP3 %add_const(15) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 16 BYTE DUP3 %add_const(16) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 17 BYTE DUP3 %add_const(17) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 18 BYTE DUP3 %add_const(18) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 19 BYTE DUP3 %add_const(19) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 20 BYTE DUP3 %add_const(20) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 21 BYTE DUP3 %add_const(21) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 22 BYTE DUP3 %add_const(22) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 23 BYTE DUP3 %add_const(23) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 24 BYTE DUP3 %add_const(24) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 25 BYTE DUP3 %add_const(25) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 26 BYTE DUP3 %add_const(26) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 27 BYTE DUP3 %add_const(27) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 28 BYTE DUP3 %add_const(28) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 29 BYTE DUP3 %add_const(29) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 30 BYTE DUP3 %add_const(30) %mstore_current(@SEGMENT_MAIN_MEMORY)
DUP3 PUSH 31 BYTE DUP3 %add_const(31) %mstore_current(@SEGMENT_MAIN_MEMORY)
%stack (kexit_info, offset, value) -> (kexit_info)
%stack(kexit_info, offset, value) -> (offset, value, 32, kexit_info)
PUSH @SEGMENT_MAIN_MEMORY
GET_CONTEXT
// stack: addr: 3, value, len, kexit_info
MSTORE_32BYTES
// stack: kexit_info
EXIT_KERNEL
global sys_mstore8:

View File

@ -184,7 +184,7 @@ impl<F: Field> Column<F> {
// If we access the next row at the last row, for sanity, we consider the next row's values to be 0.
// If CTLs are correctly written, the filter should be 0 in that case anyway.
if !self.next_row_linear_combination.is_empty() && row < table.len() - 1 {
if !self.next_row_linear_combination.is_empty() && row < table[0].values.len() - 1 {
res += self
.next_row_linear_combination
.iter()
@ -624,7 +624,7 @@ pub(crate) fn eval_cross_table_lookup_checks<F, FE, P, S, const D: usize, const
.collect::<Vec<_>>();
let combined = challenges.combine(evals.iter());
let local_filter = if let Some(column) = filter_column {
column.eval(vars.local_values)
column.eval_with_next(vars.local_values, vars.next_values)
} else {
P::ONES
};

View File

@ -377,49 +377,49 @@ where
let arithmetic = RecursiveCircuitsForTable::new(
Table::Arithmetic,
&all_stark.arithmetic_stark,
degree_bits_ranges[0].clone(),
degree_bits_ranges[Table::Arithmetic as usize].clone(),
&all_stark.cross_table_lookups,
stark_config,
);
let byte_packing = RecursiveCircuitsForTable::new(
Table::BytePacking,
&all_stark.byte_packing_stark,
degree_bits_ranges[1].clone(),
degree_bits_ranges[Table::BytePacking as usize].clone(),
&all_stark.cross_table_lookups,
stark_config,
);
let cpu = RecursiveCircuitsForTable::new(
Table::Cpu,
&all_stark.cpu_stark,
degree_bits_ranges[2].clone(),
degree_bits_ranges[Table::Cpu as usize].clone(),
&all_stark.cross_table_lookups,
stark_config,
);
let keccak = RecursiveCircuitsForTable::new(
Table::Keccak,
&all_stark.keccak_stark,
degree_bits_ranges[3].clone(),
degree_bits_ranges[Table::Keccak as usize].clone(),
&all_stark.cross_table_lookups,
stark_config,
);
let keccak_sponge = RecursiveCircuitsForTable::new(
Table::KeccakSponge,
&all_stark.keccak_sponge_stark,
degree_bits_ranges[4].clone(),
degree_bits_ranges[Table::KeccakSponge as usize].clone(),
&all_stark.cross_table_lookups,
stark_config,
);
let logic = RecursiveCircuitsForTable::new(
Table::Logic,
&all_stark.logic_stark,
degree_bits_ranges[5].clone(),
degree_bits_ranges[Table::Logic as usize].clone(),
&all_stark.cross_table_lookups,
stark_config,
);
let memory = RecursiveCircuitsForTable::new(
Table::Memory,
&all_stark.memory_stark,
degree_bits_ranges[6].clone(),
degree_bits_ranges[Table::Memory as usize].clone(),
&all_stark.cross_table_lookups,
stark_config,
);

View File

@ -74,32 +74,66 @@ impl Default for BlockHashes {
}
}
/// User-provided helper values to compute the `BLOCKHASH` opcode.
/// The proofs across consecutive blocks ensure that these values
/// are consistent (i.e. shifted by one to the left).
///
/// When the block number is less than 256, dummy values, i.e. `H256::default()`,
/// should be used for the additional block hashes.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BlockHashes {
/// The previous 256 hashes to the current block. The leftmost hash, i.e. `prev_hashes[0]`,
/// is the oldest, and the rightmost, i.e. `prev_hashes[255]` is the hash of the parent block.
pub prev_hashes: Vec<H256>,
// The hash of the current block.
pub cur_hash: H256,
}
/// Metadata contained in a block header. Those are identical between
/// all state transition proofs within the same block.
#[derive(Debug, Clone, Default, Deserialize, Serialize)]
pub struct BlockMetadata {
/// The address of this block's producer.
pub block_beneficiary: Address,
/// The timestamp of this block.
pub block_timestamp: U256,
/// The index of this block.
pub block_number: U256,
/// The difficulty (before PoS transition) of this block.
pub block_difficulty: U256,
/// The gas limit of this block. It must fit in a `u32`.
pub block_gaslimit: U256,
/// The chain id of this block.
pub block_chain_id: U256,
/// The base fee of this block.
pub block_base_fee: U256,
/// The total gas used in this block. It must fit in a `u32`.
pub block_gas_used: U256,
/// The block bloom of this block, represented as the consecutive
/// 32-byte chunks of a block's final bloom filter string.
pub block_bloom: [U256; 8],
}
/// Additional block data that are specific to the local transaction being proven,
/// unlike `BlockMetadata`.
#[derive(Debug, Clone, Default, Deserialize, Serialize)]
pub struct ExtraBlockData {
/// The transaction count prior execution of the local state transition, starting
/// at 0 for the initial transaction of a block.
pub txn_number_before: U256,
/// The transaction count after execution of the local state transition.
pub txn_number_after: U256,
/// The accumulated gas used prior execution of the local state transition, starting
/// at 0 for the initial transaction of a block.
pub gas_used_before: U256,
/// The accumulated gas used after execution of the local state transition. It should
/// match the `block_gas_used` value after execution of the last transaction in a block.
pub gas_used_after: U256,
/// The accumulated bloom filter of this block prior execution of the local state transition,
/// starting with all zeros for the initial transaction of a block.
pub block_bloom_before: [U256; 8],
/// The accumulated bloom filter after execution of the local state transition. It should
/// match the `block_bloom` value after execution of the last transaction in a block.
pub block_bloom_after: [U256; 8],
}

View File

@ -113,6 +113,29 @@ pub fn prove<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D:
inputs: PartialWitness<F>,
timing: &mut TimingTree,
) -> Result<ProofWithPublicInputs<F, C, D>>
where
C::Hasher: Hasher<F>,
C::InnerHasher: Hasher<F>,
{
let partition_witness = timed!(
timing,
&format!("run {} generators", prover_data.generators.len()),
generate_partial_witness(inputs, prover_data, common_data)
);
prove_with_partition_witness(prover_data, common_data, partition_witness, timing)
}
pub fn prove_with_partition_witness<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
prover_data: &ProverOnlyCircuitData<F, C, D>,
common_data: &CommonCircuitData<F, D>,
mut partition_witness: PartitionWitness<F>,
timing: &mut TimingTree,
) -> Result<ProofWithPublicInputs<F, C, D>>
where
C::Hasher: Hasher<F>,
C::InnerHasher: Hasher<F>,
@ -123,12 +146,6 @@ where
let quotient_degree = common_data.quotient_degree();
let degree = common_data.degree();
let mut partition_witness = timed!(
timing,
&format!("run {} generators", prover_data.generators.len()),
generate_partial_witness(inputs, prover_data, common_data)
);
set_lookup_wires(prover_data, common_data, &mut partition_witness);
let public_inputs = partition_witness.get_targets(&prover_data.public_inputs);