mirror of
https://github.com/logos-storage/plonky2.git
synced 2026-01-10 01:33:07 +00:00
clippies
This commit is contained in:
parent
7293054062
commit
82d0f08193
@ -102,7 +102,7 @@ pub(crate) fn generate_traces<F: RichField + Extendable<D>, const D: usize>(
|
||||
let tables = timed!(
|
||||
timing,
|
||||
"convert trace data to tables",
|
||||
state.traces.to_tables(all_stark, config, timing)
|
||||
state.traces.into_tables(all_stark, config, timing)
|
||||
);
|
||||
(tables, public_values)
|
||||
}
|
||||
|
||||
@ -130,7 +130,7 @@ impl<F: RichField + Extendable<D>, const D: usize> KeccakMemoryStark<F, D> {
|
||||
let mut row = [F::ZERO; NUM_COLUMNS];
|
||||
row[COL_IS_REAL] = F::ONE;
|
||||
row[COL_CONTEXT] = F::from_canonical_usize(op.address.context);
|
||||
row[COL_SEGMENT] = F::from_canonical_usize(op.address.segment as usize);
|
||||
row[COL_SEGMENT] = F::from_canonical_usize(op.address.segment);
|
||||
row[COL_VIRTUAL] = F::from_canonical_usize(op.address.virt);
|
||||
row[COL_READ_TIMESTAMP] = F::from_canonical_usize(op.read_timestamp);
|
||||
for i in 0..25 {
|
||||
|
||||
@ -294,7 +294,7 @@ impl<F: RichField + Extendable<D>, const D: usize> KeccakSpongeStark<F, D> {
|
||||
mut sponge_state: [u32; KECCAK_WIDTH_U32S],
|
||||
) {
|
||||
row.context = F::from_canonical_usize(op.base_address.context);
|
||||
row.segment = F::from_canonical_usize(op.base_address.segment as usize);
|
||||
row.segment = F::from_canonical_usize(op.base_address.segment);
|
||||
row.virt = F::from_canonical_usize(op.base_address.virt);
|
||||
row.timestamp = F::from_canonical_usize(op.timestamp);
|
||||
row.len = F::from_canonical_usize(op.len);
|
||||
|
||||
@ -2,6 +2,7 @@
|
||||
#![allow(clippy::needless_range_loop)]
|
||||
#![allow(clippy::too_many_arguments)]
|
||||
#![allow(clippy::type_complexity)]
|
||||
#![allow(clippy::field_reassign_with_default)]
|
||||
#![feature(let_chains)]
|
||||
#![feature(generic_const_exprs)]
|
||||
|
||||
|
||||
@ -108,7 +108,7 @@ impl Operation {
|
||||
}
|
||||
}
|
||||
|
||||
fn to_row<F: Field>(&self) -> [F; NUM_COLUMNS] {
|
||||
fn into_row<F: Field>(self) -> [F; NUM_COLUMNS] {
|
||||
let Operation {
|
||||
operator,
|
||||
input0,
|
||||
@ -164,7 +164,7 @@ impl<F: RichField, const D: usize> LogicStark<F, D> {
|
||||
|
||||
let mut rows = Vec::with_capacity(padded_len);
|
||||
for op in operations {
|
||||
rows.push(op.to_row());
|
||||
rows.push(op.into_row());
|
||||
}
|
||||
|
||||
// Pad to a power of two.
|
||||
|
||||
@ -48,11 +48,11 @@ impl MemoryOp {
|
||||
/// depend on the next operation, such as `CONTEXT_FIRST_CHANGE`; those are generated later.
|
||||
/// It also does not generate columns such as `COUNTER`, which are generated later, after the
|
||||
/// trace has been transposed into column-major form.
|
||||
fn to_row<F: Field>(&self) -> [F; NUM_COLUMNS] {
|
||||
fn into_row<F: Field>(self) -> [F; NUM_COLUMNS] {
|
||||
let mut row = [F::ZERO; NUM_COLUMNS];
|
||||
row[FILTER] = F::from_bool(self.filter);
|
||||
row[TIMESTAMP] = F::from_canonical_usize(self.timestamp);
|
||||
row[IS_READ] = F::from_bool(self.op == Read);
|
||||
row[IS_READ] = F::from_bool(self.kind == Read);
|
||||
let MemoryAddress {
|
||||
context,
|
||||
segment,
|
||||
@ -76,7 +76,7 @@ fn get_max_range_check(memory_ops: &[MemoryOp]) -> usize {
|
||||
if curr.address.context != next.address.context {
|
||||
next.address.context - curr.address.context - 1
|
||||
} else if curr.address.segment != next.address.segment {
|
||||
next.address.segment as usize - curr.address.segment as usize - 1
|
||||
next.address.segment - curr.address.segment - 1
|
||||
} else if curr.address.virt != next.address.virt {
|
||||
next.address.virt - curr.address.virt - 1
|
||||
} else {
|
||||
@ -146,7 +146,7 @@ impl<F: RichField + Extendable<D>, const D: usize> MemoryStark<F, D> {
|
||||
|
||||
let mut trace_rows = memory_ops
|
||||
.into_par_iter()
|
||||
.map(|op| op.to_row())
|
||||
.map(|op| op.into_row())
|
||||
.collect::<Vec<_>>();
|
||||
generate_first_change_flags_and_rc(trace_rows.as_mut_slice());
|
||||
trace_rows
|
||||
@ -170,7 +170,7 @@ impl<F: RichField + Extendable<D>, const D: usize> MemoryStark<F, D> {
|
||||
let num_ops_padded = num_ops.max(max_range_check + 1).next_power_of_two();
|
||||
let to_pad = num_ops_padded - num_ops;
|
||||
|
||||
let last_op = memory_ops.last().expect("No memory ops?").clone();
|
||||
let last_op = *memory_ops.last().expect("No memory ops?");
|
||||
|
||||
// We essentially repeat the last operation until our operation list has the desired size,
|
||||
// with a few changes:
|
||||
@ -181,7 +181,7 @@ impl<F: RichField + Extendable<D>, const D: usize> MemoryStark<F, D> {
|
||||
memory_ops.push(MemoryOp {
|
||||
filter: false,
|
||||
timestamp: last_op.timestamp + i + 1,
|
||||
op: Read,
|
||||
kind: Read,
|
||||
..last_op
|
||||
});
|
||||
}
|
||||
@ -519,7 +519,7 @@ pub(crate) mod tests {
|
||||
segment: segment as usize,
|
||||
virt,
|
||||
},
|
||||
op: if is_read { Read } else { Write },
|
||||
kind: if is_read { Read } else { Write },
|
||||
value: vals,
|
||||
});
|
||||
}
|
||||
|
||||
@ -62,7 +62,7 @@ pub struct MemoryOp {
|
||||
pub filter: bool,
|
||||
pub timestamp: usize,
|
||||
pub address: MemoryAddress,
|
||||
pub op: MemoryOpKind,
|
||||
pub kind: MemoryOpKind,
|
||||
pub value: U256,
|
||||
}
|
||||
|
||||
@ -71,7 +71,7 @@ impl MemoryOp {
|
||||
channel: MemoryChannel,
|
||||
clock: usize,
|
||||
address: MemoryAddress,
|
||||
op: MemoryOpKind,
|
||||
kind: MemoryOpKind,
|
||||
value: U256,
|
||||
) -> Self {
|
||||
let timestamp = clock * NUM_CHANNELS + channel.index();
|
||||
@ -79,7 +79,7 @@ impl MemoryOp {
|
||||
filter: true,
|
||||
timestamp,
|
||||
address,
|
||||
op,
|
||||
kind,
|
||||
value,
|
||||
}
|
||||
}
|
||||
@ -101,9 +101,12 @@ impl MemoryState {
|
||||
pub fn apply_ops(&mut self, ops: &[MemoryOp]) {
|
||||
for &op in ops {
|
||||
let MemoryOp {
|
||||
address, op, value, ..
|
||||
address,
|
||||
kind,
|
||||
value,
|
||||
..
|
||||
} = op;
|
||||
if op == MemoryOpKind::Write {
|
||||
if kind == MemoryOpKind::Write {
|
||||
self.set(address, value);
|
||||
}
|
||||
}
|
||||
|
||||
@ -396,7 +396,7 @@ pub(crate) fn generate_syscall<F: Field>(
|
||||
state: &mut GenerationState<F>,
|
||||
mut row: CpuColumnsView<F>,
|
||||
) -> Result<(), ProgramError> {
|
||||
let handler_jumptable_addr = KERNEL.global_labels["syscall_jumptable"] as usize;
|
||||
let handler_jumptable_addr = KERNEL.global_labels["syscall_jumptable"];
|
||||
let handler_addr_addr = handler_jumptable_addr + (opcode as usize);
|
||||
let (handler_addr0, log_in0) = mem_read_gp_with_log_and_fill(
|
||||
0,
|
||||
|
||||
@ -108,7 +108,7 @@ impl<T: Copy> Traces<T> {
|
||||
self.cpu.len()
|
||||
}
|
||||
|
||||
pub fn to_tables<const D: usize>(
|
||||
pub fn into_tables<const D: usize>(
|
||||
self,
|
||||
all_stark: &AllStark<T, D>,
|
||||
config: &StarkConfig,
|
||||
|
||||
@ -142,8 +142,7 @@ pub(crate) fn stack_pop_with_log_and_fill<const N: usize, F: Field>(
|
||||
Segment::Stack,
|
||||
state.registers.stack_len - 1 - i,
|
||||
);
|
||||
let res = mem_read_gp_with_log_and_fill(i, address, state, row);
|
||||
res
|
||||
mem_read_gp_with_log_and_fill(i, address, state, row)
|
||||
});
|
||||
|
||||
state.registers.stack_len -= N;
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user