Robin Salen 9508b49090
Move byte packing / unpacking to a distinct table (#1212)
* Duplicate Memory trace into BytePacking one

* Add mload_32bytes instruction

* Use dedicated ops for byte packing trace

* Change witness generation to reduce memory reads for MLOAD_32BYTES

* Remove segments

* Fix stack

* Fix extra product when fixing CTL for byte_packing

* Write output value in trace

* Add constraints for BYTE_PACKING table

* Add recursive constraints for BYTE_PACKING table

* Fix constraints

* Add address in trace and constraints

* Add timestamp and batch inputs into BytePackingOp struct

* Add extra column

* Fix BytePackingStark CTL

* Tiny fix in witness generation

* Fix the Memory CTL

* Add constraints for the new columns

* Remove 1 column

* Remove limb columns

* Fix

* Fix recursive circuit of BytePackingTable

* Fix constraints

* Fix endianness

* Add MSTORE_32BYTES instruction and move decomposition to packing table

* Add missing constraint

* Add range-check for all bytes

* Add extra constraint

* Cleanup

* Remove REMAINING_LEN column

* Add corresponding implementations in interpreter

* Fix recursive version

* Remove debug assertion because of CI

* Remove FILTER column

* Update new test from rebasing

* Reorder STARK modules to match TraceCheckPoint ordering

* Address comments

* Pacify clippy

* Add documentation to the packing module

* Fix doctest
2023-09-13 04:45:37 +10:00

49 lines
1.9 KiB
Rust

//! Byte packing registers.
use core::ops::Range;
use crate::byte_packing::NUM_BYTES;
/// 1 if this is a READ operation, and 0 if this is a WRITE operation.
pub(crate) const IS_READ: usize = 0;
/// 1 if this is the end of a sequence of bytes.
/// This is also used as filter for the CTL.
pub(crate) const SEQUENCE_END: usize = IS_READ + 1;
pub(super) const BYTES_INDICES_START: usize = SEQUENCE_END + 1;
pub(crate) const fn index_bytes(i: usize) -> usize {
debug_assert!(i < NUM_BYTES);
BYTES_INDICES_START + i
}
// Note: Those are used as filter for distinguishing active vs padding rows.
pub(crate) const BYTE_INDICES_COLS: Range<usize> =
BYTES_INDICES_START..BYTES_INDICES_START + NUM_BYTES;
pub(crate) const ADDR_CONTEXT: usize = BYTES_INDICES_START + NUM_BYTES;
pub(crate) const ADDR_SEGMENT: usize = ADDR_CONTEXT + 1;
pub(crate) const ADDR_VIRTUAL: usize = ADDR_SEGMENT + 1;
pub(crate) const TIMESTAMP: usize = ADDR_VIRTUAL + 1;
/// The total length of a sequence of bytes.
/// Cannot be greater than 32.
pub(crate) const SEQUENCE_LEN: usize = TIMESTAMP + 1;
// 32 byte limbs hold a total of 256 bits.
const BYTES_VALUES_START: usize = SEQUENCE_LEN + 1;
pub(crate) const fn value_bytes(i: usize) -> usize {
debug_assert!(i < NUM_BYTES);
BYTES_VALUES_START + i
}
// We need one column for the table, then two columns for every value
// that needs to be range checked in the trace (all written bytes),
// namely the permutation of the column and the permutation of the range.
// The two permutations associated to the byte in column i will be in
// columns RC_COLS[2i] and RC_COLS[2i+1].
pub(crate) const RANGE_COUNTER: usize = BYTES_VALUES_START + NUM_BYTES;
pub(crate) const NUM_RANGE_CHECK_COLS: usize = 1 + 2 * NUM_BYTES;
pub(crate) const RC_COLS: Range<usize> = RANGE_COUNTER + 1..RANGE_COUNTER + NUM_RANGE_CHECK_COLS;
pub(crate) const NUM_COLUMNS: usize = RANGE_COUNTER + NUM_RANGE_CHECK_COLS;