diff --git a/Stwo_wrapper/Cargo.toml b/Stwo_wrapper/Cargo.toml
deleted file mode 100644
index 0f314a4..0000000
--- a/Stwo_wrapper/Cargo.toml
+++ /dev/null
@@ -1,22 +0,0 @@
-[workspace]
-members = ["crates/prover"]
-resolver = "2"
-
-[workspace.package]
-version = "0.1.1"
-edition = "2021"
-
-[workspace.dependencies]
-blake2 = "0.10.6"
-blake3 = "1.5.0"
-educe = "0.5.0"
-hex = "0.4.3"
-itertools = "0.12.0"
-num-traits = "0.2.17"
-thiserror = "1.0.56"
-bytemuck = "1.14.3"
-tracing = "0.1.40"
-
-[profile.bench]
-codegen-units = 1
-lto = true
diff --git a/Stwo_wrapper/LICENSE b/Stwo_wrapper/LICENSE
deleted file mode 100644
index 2e0cecd..0000000
--- a/Stwo_wrapper/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright 2024 StarkWare Industries Ltd.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
\ No newline at end of file
diff --git a/Stwo_wrapper/README.md b/Stwo_wrapper/README.md
deleted file mode 100644
index ece38cc..0000000
--- a/Stwo_wrapper/README.md
+++ /dev/null
@@ -1,62 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-# Stwo
-
-## 🌟 About
-
-Stwo is a next generation implementation of a [CSTARK](https://eprint.iacr.org/2024/278) prover and verifier, written in Rust 🦀.
-
-> **Stwo is a work in progress.**
->
-> It is not recommended to use it in a production setting yet.
-
-## 🚀 Key Features
-
-- **Circle STARKs:** Based on the latest cryptographic research and innovations in the ZK field.
-- **High performance:** Stwo is designed to be extremely fast and efficient.
-- **Flexible:** Adaptable for various validity proof applications.
-
-## 📊 Benchmarks
-
-Run `poseidon_benchmark.sh` to run a single-threaded poseidon2 hash proof benchmark.
-
-Further benchmarks can be run using `cargo bench`.
-
-Visual representation of benchmarks can be found [here](https://starkware-libs.github.io/stwo/dev/bench/index.html).
-
-## 📜 License
-
-This project is licensed under the **Apache 2.0 license**.
-
-See [LICENSE](LICENSE) for more information.
-
-
-
-
-
diff --git a/Stwo_wrapper/WORKSPACE b/Stwo_wrapper/WORKSPACE
deleted file mode 100644
index e69de29..0000000
diff --git a/Stwo_wrapper/crates/prover/Cargo.toml b/Stwo_wrapper/crates/prover/Cargo.toml
deleted file mode 100644
index 587e655..0000000
--- a/Stwo_wrapper/crates/prover/Cargo.toml
+++ /dev/null
@@ -1,110 +0,0 @@
-[package]
-name = "stwo-prover"
-version.workspace = true
-edition.workspace = true
-
-[features]
-parallel = ["rayon"]
-slow-tests = []
-
-# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
-
-[dependencies]
-blake2.workspace = true
-blake3.workspace = true
-bytemuck = { workspace = true, features = ["derive", "extern_crate_alloc"] }
-cfg-if = "1.0.0"
-downcast-rs = "1.2"
-educe.workspace = true
-hex.workspace = true
-itertools.workspace = true
-num-traits.workspace = true
-rand = { version = "0.8.5", default-features = false, features = ["small_rng"] }
-starknet-crypto = "0.6.2"
-starknet-ff = "0.3.7"
-ark-bls12-381 = "0.4.0"
-ark-ff = "0.4.0"
-thiserror.workspace = true
-tracing.workspace = true
-rayon = { version = "1.10.0", optional = true }
-serde = { version = "1.0", features = ["derive"] }
-crypto-bigint = "0.5.5"
-ark-serialize = "0.4.0"
-serde_json = "1.0.116"
-
-[dev-dependencies]
-aligned = "0.4.2"
-test-log = { version = "0.2.15", features = ["trace"] }
-tracing-subscriber = "0.3.18"
-[target.'cfg(all(target_family = "wasm", not(target_os = "wasi")))'.dev-dependencies]
-wasm-bindgen-test = "0.3.43"
-
-[target.'cfg(not(target_arch = "wasm32"))'.dev-dependencies.criterion]
-features = ["html_reports"]
-version = "0.5.1"
-
-# Default features cause compile error:
-# "Rayon cannot be used when targeting wasi32. Try disabling default features."
-[target.'cfg(target_arch = "wasm32")'.dev-dependencies.criterion]
-default-features = false
-features = ["html_reports"]
-version = "0.5.1"
-
-[lib]
-bench = false
-crate-type = ["cdylib", "lib"]
-
-[lints.rust]
-warnings = "deny"
-future-incompatible = "deny"
-nonstandard-style = "deny"
-rust-2018-idioms = "deny"
-unused = "deny"
-
-[[bench]]
-harness = false
-name = "bit_rev"
-
-[[bench]]
-harness = false
-name = "eval_at_point"
-
-[[bench]]
-harness = false
-name = "fft"
-
-[[bench]]
-harness = false
-name = "field"
-
-[[bench]]
-harness = false
-name = "fri"
-
-[[bench]]
-harness = false
-name = "lookups"
-
-[[bench]]
-harness = false
-name = "matrix"
-
-[[bench]]
-harness = false
-name = "merkle"
-
-[[bench]]
-harness = false
-name = "poseidon"
-
-[[bench]]
-harness = false
-name = "prefix_sum"
-
-[[bench]]
-harness = false
-name = "quotients"
-
-[[bench]]
-harness = false
-name = "pcs"
diff --git a/Stwo_wrapper/crates/prover/benches/README.md b/Stwo_wrapper/crates/prover/benches/README.md
deleted file mode 100644
index 8e6d73f..0000000
--- a/Stwo_wrapper/crates/prover/benches/README.md
+++ /dev/null
@@ -1,2 +0,0 @@
-dev benchmark results can be seen at
-https://starkware-libs.github.io/stwo/dev/bench/index.html
diff --git a/Stwo_wrapper/crates/prover/benches/bit_rev.rs b/Stwo_wrapper/crates/prover/benches/bit_rev.rs
deleted file mode 100644
index 6e287e6..0000000
--- a/Stwo_wrapper/crates/prover/benches/bit_rev.rs
+++ /dev/null
@@ -1,39 +0,0 @@
-#![feature(iter_array_chunks)]
-
-use criterion::{criterion_group, criterion_main, BatchSize, Criterion};
-use itertools::Itertools;
-use stwo_prover::core::fields::m31::BaseField;
-
-pub fn cpu_bit_rev(c: &mut Criterion) {
- use stwo_prover::core::utils::bit_reverse;
- // TODO(andrew): Consider using same size for all.
- const SIZE: usize = 1 << 24;
- let data = (0..SIZE).map(BaseField::from).collect_vec();
- c.bench_function("cpu bit_rev 24bit", |b| {
- b.iter_batched(
- || data.clone(),
- |mut data| bit_reverse(&mut data),
- BatchSize::LargeInput,
- );
- });
-}
-
-pub fn simd_bit_rev(c: &mut Criterion) {
- use stwo_prover::core::backend::simd::bit_reverse::bit_reverse_m31;
- use stwo_prover::core::backend::simd::column::BaseColumn;
- const SIZE: usize = 1 << 26;
- let data = (0..SIZE).map(BaseField::from).collect::();
- c.bench_function("simd bit_rev 26bit", |b| {
- b.iter_batched(
- || data.data.clone(),
- |mut data| bit_reverse_m31(&mut data),
- BatchSize::LargeInput,
- );
- });
-}
-
-criterion_group!(
- name = bit_rev;
- config = Criterion::default().sample_size(10);
- targets = simd_bit_rev, cpu_bit_rev);
-criterion_main!(bit_rev);
diff --git a/Stwo_wrapper/crates/prover/benches/eval_at_point.rs b/Stwo_wrapper/crates/prover/benches/eval_at_point.rs
deleted file mode 100644
index 64d1eec..0000000
--- a/Stwo_wrapper/crates/prover/benches/eval_at_point.rs
+++ /dev/null
@@ -1,35 +0,0 @@
-use criterion::{black_box, criterion_group, criterion_main, Criterion};
-use rand::rngs::SmallRng;
-use rand::{Rng, SeedableRng};
-use stwo_prover::core::backend::cpu::CpuBackend;
-use stwo_prover::core::backend::simd::SimdBackend;
-use stwo_prover::core::circle::CirclePoint;
-use stwo_prover::core::fields::m31::BaseField;
-use stwo_prover::core::poly::circle::{CirclePoly, PolyOps};
-
-const LOG_SIZE: u32 = 20;
-
-fn bench_eval_at_secure_point(c: &mut Criterion, id: &str) {
- let poly = CirclePoly::new((0..1 << LOG_SIZE).map(BaseField::from).collect());
- let mut rng = SmallRng::seed_from_u64(0);
- let x = rng.gen();
- let y = rng.gen();
- let point = CirclePoint { x, y };
- c.bench_function(
- &format!("{id} eval_at_secure_field_point 2^{LOG_SIZE}"),
- |b| {
- b.iter(|| B::eval_at_point(black_box(&poly), black_box(point)));
- },
- );
-}
-
-fn eval_at_secure_point_benches(c: &mut Criterion) {
- bench_eval_at_secure_point::(c, "simd");
- bench_eval_at_secure_point::(c, "cpu");
-}
-
-criterion_group!(
- name = benches;
- config = Criterion::default().sample_size(10);
- targets = eval_at_secure_point_benches);
-criterion_main!(benches);
diff --git a/Stwo_wrapper/crates/prover/benches/fft.rs b/Stwo_wrapper/crates/prover/benches/fft.rs
deleted file mode 100644
index 35841d7..0000000
--- a/Stwo_wrapper/crates/prover/benches/fft.rs
+++ /dev/null
@@ -1,131 +0,0 @@
-#![feature(iter_array_chunks)]
-
-use std::hint::black_box;
-use std::mem::{size_of_val, transmute};
-
-use criterion::{criterion_group, criterion_main, BatchSize, BenchmarkId, Criterion, Throughput};
-use itertools::Itertools;
-use stwo_prover::core::backend::simd::column::BaseColumn;
-use stwo_prover::core::backend::simd::fft::ifft::{
- get_itwiddle_dbls, ifft, ifft3_loop, ifft_vecwise_loop,
-};
-use stwo_prover::core::backend::simd::fft::rfft::{fft, get_twiddle_dbls};
-use stwo_prover::core::backend::simd::fft::transpose_vecs;
-use stwo_prover::core::backend::simd::m31::PackedBaseField;
-use stwo_prover::core::fields::m31::BaseField;
-use stwo_prover::core::poly::circle::CanonicCoset;
-
-pub fn simd_ifft(c: &mut Criterion) {
- let mut group = c.benchmark_group("iffts");
-
- for log_size in 16..=28 {
- let domain = CanonicCoset::new(log_size).circle_domain();
- let twiddle_dbls = get_itwiddle_dbls(domain.half_coset);
- let twiddle_dbls_refs = twiddle_dbls.iter().map(|x| x.as_slice()).collect_vec();
- let values: BaseColumn = (0..domain.size()).map(BaseField::from).collect();
- group.throughput(Throughput::Bytes(size_of_val(&*values.data) as u64));
- group.bench_function(BenchmarkId::new("simd ifft", log_size), |b| {
- b.iter_batched(
- || values.clone().data,
- |mut data| unsafe {
- ifft(
- transmute(data.as_mut_ptr()),
- black_box(&twiddle_dbls_refs),
- black_box(log_size as usize),
- );
- },
- BatchSize::LargeInput,
- )
- });
- }
-}
-
-pub fn simd_ifft_parts(c: &mut Criterion) {
- const LOG_SIZE: u32 = 14;
-
- let domain = CanonicCoset::new(LOG_SIZE).circle_domain();
- let twiddle_dbls = get_itwiddle_dbls(domain.half_coset);
- let twiddle_dbls_refs = twiddle_dbls.iter().map(|x| x.as_slice()).collect_vec();
- let values: BaseColumn = (0..domain.size()).map(BaseField::from).collect();
-
- let mut group = c.benchmark_group("ifft parts");
-
- // Note: These benchmarks run only on 2^LOG_SIZE elements because of their parameters.
- // Increasing the figure above won't change the runtime of these benchmarks.
- group.throughput(Throughput::Bytes(4 << LOG_SIZE));
- group.bench_function(format!("simd ifft_vecwise_loop 2^{LOG_SIZE}"), |b| {
- b.iter_batched(
- || values.clone().data,
- |mut values| unsafe {
- ifft_vecwise_loop(
- transmute(values.as_mut_ptr()),
- black_box(&twiddle_dbls_refs),
- black_box(9),
- black_box(0),
- )
- },
- BatchSize::LargeInput,
- );
- });
- group.bench_function(format!("simd ifft3_loop 2^{LOG_SIZE}"), |b| {
- b.iter_batched(
- || values.clone().data,
- |mut values| unsafe {
- ifft3_loop(
- transmute(values.as_mut_ptr()),
- black_box(&twiddle_dbls_refs[3..]),
- black_box(7),
- black_box(4),
- black_box(0),
- )
- },
- BatchSize::LargeInput,
- );
- });
-
- const TRANSPOSE_LOG_SIZE: u32 = 20;
- let transpose_values: BaseColumn = (0..1 << TRANSPOSE_LOG_SIZE).map(BaseField::from).collect();
- group.throughput(Throughput::Bytes(4 << TRANSPOSE_LOG_SIZE));
- group.bench_function(format!("simd transpose_vecs 2^{TRANSPOSE_LOG_SIZE}"), |b| {
- b.iter_batched(
- || transpose_values.clone().data,
- |mut values| unsafe {
- transpose_vecs(
- transmute(values.as_mut_ptr()),
- black_box(TRANSPOSE_LOG_SIZE as usize - 4),
- )
- },
- BatchSize::LargeInput,
- );
- });
-}
-
-pub fn simd_rfft(c: &mut Criterion) {
- const LOG_SIZE: u32 = 20;
-
- let domain = CanonicCoset::new(LOG_SIZE).circle_domain();
- let twiddle_dbls = get_twiddle_dbls(domain.half_coset);
- let twiddle_dbls_refs = twiddle_dbls.iter().map(|x| x.as_slice()).collect_vec();
- let values: BaseColumn = (0..domain.size()).map(BaseField::from).collect();
-
- c.bench_function("simd rfft 20bit", |b| {
- b.iter_with_large_drop(|| unsafe {
- let mut target = Vec::::with_capacity(values.data.len());
- #[allow(clippy::uninit_vec)]
- target.set_len(values.data.len());
-
- fft(
- black_box(transmute(values.data.as_ptr())),
- transmute(target.as_mut_ptr()),
- black_box(&twiddle_dbls_refs),
- black_box(LOG_SIZE as usize),
- )
- });
- });
-}
-
-criterion_group!(
- name = benches;
- config = Criterion::default().sample_size(10);
- targets = simd_ifft, simd_ifft_parts, simd_rfft);
-criterion_main!(benches);
diff --git a/Stwo_wrapper/crates/prover/benches/field.rs b/Stwo_wrapper/crates/prover/benches/field.rs
deleted file mode 100644
index acb318c..0000000
--- a/Stwo_wrapper/crates/prover/benches/field.rs
+++ /dev/null
@@ -1,150 +0,0 @@
-use criterion::{criterion_group, criterion_main, Criterion};
-use num_traits::One;
-use rand::rngs::SmallRng;
-use rand::{Rng, SeedableRng};
-use stwo_prover::core::backend::simd::m31::{PackedBaseField, N_LANES};
-use stwo_prover::core::fields::cm31::CM31;
-use stwo_prover::core::fields::m31::{BaseField, M31};
-use stwo_prover::core::fields::qm31::SecureField;
-
-pub const N_ELEMENTS: usize = 1 << 16;
-pub const N_STATE_ELEMENTS: usize = 8;
-
-pub fn m31_operations_bench(c: &mut Criterion) {
- let mut rng = SmallRng::seed_from_u64(0);
- let elements: Vec = (0..N_ELEMENTS).map(|_| rng.gen()).collect();
- let mut state: [M31; N_STATE_ELEMENTS] = rng.gen();
-
- c.bench_function("M31 mul", |b| {
- b.iter(|| {
- for elem in &elements {
- for _ in 0..128 {
- for state_elem in &mut state {
- *state_elem *= *elem;
- }
- }
- }
- })
- });
-
- c.bench_function("M31 add", |b| {
- b.iter(|| {
- for elem in &elements {
- for _ in 0..128 {
- for state_elem in &mut state {
- *state_elem += *elem;
- }
- }
- }
- })
- });
-}
-
-pub fn cm31_operations_bench(c: &mut Criterion) {
- let mut rng = SmallRng::seed_from_u64(0);
- let elements: Vec = (0..N_ELEMENTS).map(|_| rng.gen()).collect();
- let mut state: [CM31; N_STATE_ELEMENTS] = rng.gen();
-
- c.bench_function("CM31 mul", |b| {
- b.iter(|| {
- for elem in &elements {
- for _ in 0..128 {
- for state_elem in &mut state {
- *state_elem *= *elem;
- }
- }
- }
- })
- });
-
- c.bench_function("CM31 add", |b| {
- b.iter(|| {
- for elem in &elements {
- for _ in 0..128 {
- for state_elem in &mut state {
- *state_elem += *elem;
- }
- }
- }
- })
- });
-}
-
-pub fn qm31_operations_bench(c: &mut Criterion) {
- let mut rng = SmallRng::seed_from_u64(0);
- let elements: Vec = (0..N_ELEMENTS).map(|_| rng.gen()).collect();
- let mut state: [SecureField; N_STATE_ELEMENTS] = rng.gen();
-
- c.bench_function("SecureField mul", |b| {
- b.iter(|| {
- for elem in &elements {
- for _ in 0..128 {
- for state_elem in &mut state {
- *state_elem *= *elem;
- }
- }
- }
- })
- });
-
- c.bench_function("SecureField add", |b| {
- b.iter(|| {
- for elem in &elements {
- for _ in 0..128 {
- for state_elem in &mut state {
- *state_elem += *elem;
- }
- }
- }
- })
- });
-}
-
-pub fn simd_m31_operations_bench(c: &mut Criterion) {
- let mut rng = SmallRng::seed_from_u64(0);
- let elements: Vec = (0..N_ELEMENTS / N_LANES).map(|_| rng.gen()).collect();
- let mut states = vec![PackedBaseField::broadcast(BaseField::one()); N_STATE_ELEMENTS];
-
- c.bench_function("mul_simd", |b| {
- b.iter(|| {
- for elem in elements.iter() {
- for _ in 0..128 {
- for state in states.iter_mut() {
- *state *= *elem;
- }
- }
- }
- })
- });
-
- c.bench_function("add_simd", |b| {
- b.iter(|| {
- for elem in elements.iter() {
- for _ in 0..128 {
- for state in states.iter_mut() {
- *state += *elem;
- }
- }
- }
- })
- });
-
- c.bench_function("sub_simd", |b| {
- b.iter(|| {
- for elem in elements.iter() {
- for _ in 0..128 {
- for state in states.iter_mut() {
- *state -= *elem;
- }
- }
- }
- })
- });
-}
-
-criterion_group!(
- name = benches;
- config = Criterion::default().sample_size(10);
- targets = m31_operations_bench, cm31_operations_bench, qm31_operations_bench,
- simd_m31_operations_bench);
-criterion_main!(benches);
diff --git a/Stwo_wrapper/crates/prover/benches/fri.rs b/Stwo_wrapper/crates/prover/benches/fri.rs
deleted file mode 100644
index 1c38a0e..0000000
--- a/Stwo_wrapper/crates/prover/benches/fri.rs
+++ /dev/null
@@ -1,35 +0,0 @@
-use criterion::{black_box, criterion_group, criterion_main, Criterion};
-use stwo_prover::core::backend::CpuBackend;
-use stwo_prover::core::fields::m31::BaseField;
-use stwo_prover::core::fields::qm31::SecureField;
-use stwo_prover::core::fields::secure_column::SecureColumnByCoords;
-use stwo_prover::core::fri::FriOps;
-use stwo_prover::core::poly::circle::{CanonicCoset, PolyOps};
-use stwo_prover::core::poly::line::{LineDomain, LineEvaluation};
-
-fn folding_benchmark(c: &mut Criterion) {
- const LOG_SIZE: u32 = 12;
- let domain = LineDomain::new(CanonicCoset::new(LOG_SIZE + 1).half_coset());
- let evals = LineEvaluation::new(
- domain,
- SecureColumnByCoords {
- columns: std::array::from_fn(|i| {
- vec![BaseField::from_u32_unchecked(i as u32); 1 << LOG_SIZE]
- }),
- },
- );
- let alpha = SecureField::from_u32_unchecked(2213980, 2213981, 2213982, 2213983);
- let twiddles = CpuBackend::precompute_twiddles(domain.coset());
- c.bench_function("fold_line", |b| {
- b.iter(|| {
- black_box(CpuBackend::fold_line(
- black_box(&evals),
- black_box(alpha),
- &twiddles,
- ));
- })
- });
-}
-
-criterion_group!(benches, folding_benchmark);
-criterion_main!(benches);
diff --git a/Stwo_wrapper/crates/prover/benches/lookups.rs b/Stwo_wrapper/crates/prover/benches/lookups.rs
deleted file mode 100644
index ac45a95..0000000
--- a/Stwo_wrapper/crates/prover/benches/lookups.rs
+++ /dev/null
@@ -1,104 +0,0 @@
-use criterion::{criterion_group, criterion_main, BatchSize, Criterion};
-use rand::distributions::{Distribution, Standard};
-use rand::rngs::SmallRng;
-use rand::{Rng, SeedableRng};
-use stwo_prover::core::backend::simd::SimdBackend;
-use stwo_prover::core::backend::CpuBackend;
-use stwo_prover::core::channel::Blake2sChannel;
-use stwo_prover::core::fields::Field;
-use stwo_prover::core::lookups::gkr_prover::{prove_batch, GkrOps, Layer};
-use stwo_prover::core::lookups::mle::{Mle, MleOps};
-
-const LOG_N_ROWS: u32 = 16;
-
-fn bench_gkr_grand_product(c: &mut Criterion, id: &str) {
- let mut rng = SmallRng::seed_from_u64(0);
- let layer = Layer::::GrandProduct(gen_random_mle(&mut rng, LOG_N_ROWS));
- c.bench_function(&format!("{id} grand product lookup 2^{LOG_N_ROWS}"), |b| {
- b.iter_batched(
- || layer.clone(),
- |layer| prove_batch(&mut Blake2sChannel::default(), vec![layer]),
- BatchSize::LargeInput,
- )
- });
- c.bench_function(
- &format!("{id} grand product lookup batch 4x 2^{LOG_N_ROWS}"),
- |b| {
- b.iter_batched(
- || vec![layer.clone(), layer.clone(), layer.clone(), layer.clone()],
- |layers| prove_batch(&mut Blake2sChannel::default(), layers),
- BatchSize::LargeInput,
- )
- },
- );
-}
-
-fn bench_gkr_logup_generic(c: &mut Criterion, id: &str) {
- let mut rng = SmallRng::seed_from_u64(0);
- let generic_layer = Layer::::LogUpGeneric {
- numerators: gen_random_mle(&mut rng, LOG_N_ROWS),
- denominators: gen_random_mle(&mut rng, LOG_N_ROWS),
- };
- c.bench_function(&format!("{id} generic logup lookup 2^{LOG_N_ROWS}"), |b| {
- b.iter_batched(
- || generic_layer.clone(),
- |layer| prove_batch(&mut Blake2sChannel::default(), vec![layer]),
- BatchSize::LargeInput,
- )
- });
-}
-
-fn bench_gkr_logup_multiplicities(c: &mut Criterion, id: &str) {
- let mut rng = SmallRng::seed_from_u64(0);
- let multiplicities_layer = Layer::::LogUpMultiplicities {
- numerators: gen_random_mle(&mut rng, LOG_N_ROWS),
- denominators: gen_random_mle(&mut rng, LOG_N_ROWS),
- };
- c.bench_function(
- &format!("{id} multiplicities logup lookup 2^{LOG_N_ROWS}"),
- |b| {
- b.iter_batched(
- || multiplicities_layer.clone(),
- |layer| prove_batch(&mut Blake2sChannel::default(), vec![layer]),
- BatchSize::LargeInput,
- )
- },
- );
-}
-
-fn bench_gkr_logup_singles(c: &mut Criterion, id: &str) {
- let mut rng = SmallRng::seed_from_u64(0);
- let singles_layer = Layer::::LogUpSingles {
- denominators: gen_random_mle(&mut rng, LOG_N_ROWS),
- };
- c.bench_function(&format!("{id} singles logup lookup 2^{LOG_N_ROWS}"), |b| {
- b.iter_batched(
- || singles_layer.clone(),
- |layer| prove_batch(&mut Blake2sChannel::default(), vec![layer]),
- BatchSize::LargeInput,
- )
- });
-}
-
-/// Generates a random multilinear polynomial.
-fn gen_random_mle, F: Field>(rng: &mut impl Rng, n_variables: u32) -> Mle
-where
- Standard: Distribution,
-{
- Mle::new((0..1 << n_variables).map(|_| rng.gen()).collect())
-}
-
-fn gkr_lookup_benches(c: &mut Criterion) {
- bench_gkr_grand_product::(c, "simd");
- bench_gkr_logup_generic::(c, "simd");
- bench_gkr_logup_multiplicities::(c, "simd");
- bench_gkr_logup_singles::(c, "simd");
-
- bench_gkr_grand_product::(c, "cpu");
- bench_gkr_logup_generic::(c, "cpu");
- bench_gkr_logup_multiplicities::(c, "cpu");
- bench_gkr_logup_singles::(c, "cpu");
-}
-
-criterion_group!(benches, gkr_lookup_benches);
-criterion_main!(benches);
diff --git a/Stwo_wrapper/crates/prover/benches/matrix.rs b/Stwo_wrapper/crates/prover/benches/matrix.rs
deleted file mode 100644
index 8e44a98..0000000
--- a/Stwo_wrapper/crates/prover/benches/matrix.rs
+++ /dev/null
@@ -1,63 +0,0 @@
-use criterion::{black_box, criterion_group, criterion_main, Criterion};
-use rand::rngs::SmallRng;
-use rand::{Rng, SeedableRng};
-use stwo_prover::core::fields::m31::{M31, P};
-use stwo_prover::core::fields::qm31::QM31;
-use stwo_prover::math::matrix::{RowMajorMatrix, SquareMatrix};
-
-const MATRIX_SIZE: usize = 24;
-const QM31_MATRIX_SIZE: usize = 6;
-
-// TODO(ShaharS): Share code with other benchmarks.
-fn row_major_matrix_multiplication_bench(c: &mut Criterion) {
- let mut rng = SmallRng::seed_from_u64(0);
-
- let matrix_m31 = RowMajorMatrix::::new(
- (0..MATRIX_SIZE.pow(2))
- .map(|_| rng.gen())
- .collect::>(),
- );
-
- let matrix_qm31 = RowMajorMatrix::::new(
- (0..QM31_MATRIX_SIZE.pow(2))
- .map(|_| rng.gen())
- .collect::>(),
- );
-
- // Create vector M31.
- let vec: [M31; MATRIX_SIZE] = rng.gen();
-
- // Create vector QM31.
- let vec_qm31: [QM31; QM31_MATRIX_SIZE] = [(); QM31_MATRIX_SIZE].map(|_| {
- QM31::from_u32_unchecked(
- rng.gen::() % P,
- rng.gen::() % P,
- rng.gen::() % P,
- rng.gen::() % P,
- )
- });
-
- // bench matrix multiplication.
- c.bench_function(
- &format!("RowMajorMatrix M31 {size}x{size} mul", size = MATRIX_SIZE),
- |b| {
- b.iter(|| {
- black_box(matrix_m31.mul(vec));
- })
- },
- );
- c.bench_function(
- &format!(
- "QM31 RowMajorMatrix {size}x{size} mul",
- size = QM31_MATRIX_SIZE
- ),
- |b| {
- b.iter(|| {
- black_box(matrix_qm31.mul(vec_qm31));
- })
- },
- );
-}
-
-criterion_group!(benches, row_major_matrix_multiplication_bench);
-criterion_main!(benches);
diff --git a/Stwo_wrapper/crates/prover/benches/merkle.rs b/Stwo_wrapper/crates/prover/benches/merkle.rs
deleted file mode 100644
index c039be7..0000000
--- a/Stwo_wrapper/crates/prover/benches/merkle.rs
+++ /dev/null
@@ -1,38 +0,0 @@
-#![feature(iter_array_chunks)]
-
-use criterion::{criterion_group, criterion_main, Criterion, Throughput};
-use itertools::Itertools;
-use num_traits::Zero;
-use stwo_prover::core::backend::simd::SimdBackend;
-use stwo_prover::core::backend::{Col, CpuBackend};
-use stwo_prover::core::fields::m31::{BaseField, N_BYTES_FELT};
-use stwo_prover::core::vcs::blake2_merkle::Blake2sMerkleHasher;
-use stwo_prover::core::vcs::ops::MerkleOps;
-
-const LOG_N_ROWS: u32 = 16;
-
-const LOG_N_COLS: u32 = 8;
-
-fn bench_blake2s_merkle>(c: &mut Criterion, id: &str) {
- let col: Col = (0..1 << LOG_N_ROWS).map(|_| BaseField::zero()).collect();
- let cols = (0..1 << LOG_N_COLS).map(|_| col.clone()).collect_vec();
- let col_refs = cols.iter().collect_vec();
- let mut group = c.benchmark_group("merkle throughput");
- let n_elements = 1 << (LOG_N_COLS + LOG_N_ROWS);
- group.throughput(Throughput::Elements(n_elements));
- group.throughput(Throughput::Bytes(N_BYTES_FELT as u64 * n_elements));
- group.bench_function(&format!("{id} merkle"), |b| {
- b.iter_with_large_drop(|| B::commit_on_layer(LOG_N_ROWS, None, &col_refs))
- });
-}
-
-fn blake2s_merkle_benches(c: &mut Criterion) {
- bench_blake2s_merkle::(c, "simd");
- bench_blake2s_merkle::(c, "cpu");
-}
-
-criterion_group!(
- name = benches;
- config = Criterion::default().sample_size(10);
- targets = blake2s_merkle_benches);
-criterion_main!(benches);
diff --git a/Stwo_wrapper/crates/prover/benches/pcs.rs b/Stwo_wrapper/crates/prover/benches/pcs.rs
deleted file mode 100644
index da185d7..0000000
--- a/Stwo_wrapper/crates/prover/benches/pcs.rs
+++ /dev/null
@@ -1,81 +0,0 @@
-use std::iter;
-
-use criterion::{black_box, criterion_group, criterion_main, BatchSize, Criterion};
-use rand::rngs::SmallRng;
-use rand::{Rng, SeedableRng};
-use stwo_prover::core::backend::simd::SimdBackend;
-use stwo_prover::core::backend::{BackendForChannel, CpuBackend};
-use stwo_prover::core::channel::Blake2sChannel;
-use stwo_prover::core::fields::m31::BaseField;
-use stwo_prover::core::pcs::CommitmentTreeProver;
-use stwo_prover::core::poly::circle::{CanonicCoset, CircleEvaluation};
-use stwo_prover::core::poly::twiddles::TwiddleTree;
-use stwo_prover::core::poly::BitReversedOrder;
-use stwo_prover::core::vcs::blake2_merkle::Blake2sMerkleChannel;
-
-const LOG_COSET_SIZE: u32 = 20;
-const LOG_BLOWUP_FACTOR: u32 = 1;
-const N_POLYS: usize = 16;
-
-fn benched_fn>(
- evals: Vec>,
- channel: &mut Blake2sChannel,
- twiddles: &TwiddleTree,
-) {
- let polys = evals
- .into_iter()
- .map(|eval| eval.interpolate_with_twiddles(twiddles))
- .collect();
-
- CommitmentTreeProver::::new(
- polys,
- LOG_BLOWUP_FACTOR,
- channel,
- twiddles,
- );
-}
-
-fn bench_pcs>(c: &mut Criterion, id: &str) {
- let small_domain = CanonicCoset::new(LOG_COSET_SIZE);
- let big_domain = CanonicCoset::new(LOG_COSET_SIZE + LOG_BLOWUP_FACTOR);
- let twiddles = B::precompute_twiddles(big_domain.half_coset());
- let mut channel = Blake2sChannel::default();
- let mut rng = SmallRng::seed_from_u64(0);
-
- let evals: Vec> = iter::repeat_with(|| {
- CircleEvaluation::new(
- small_domain.circle_domain(),
- (0..1 << LOG_COSET_SIZE).map(|_| rng.gen()).collect(),
- )
- })
- .take(N_POLYS)
- .collect();
-
- c.bench_function(
- &format!("{id} polynomial commitment 2^{LOG_COSET_SIZE}"),
- |b| {
- b.iter_batched(
- || evals.clone(),
- |evals| {
- benched_fn::(
- black_box(evals),
- black_box(&mut channel),
- black_box(&twiddles),
- )
- },
- BatchSize::LargeInput,
- );
- },
- );
-}
-
-fn pcs_benches(c: &mut Criterion) {
- bench_pcs::(c, "simd");
- bench_pcs::(c, "cpu");
-}
-
-criterion_group!(
- name = benches;
- config = Criterion::default().sample_size(10);
- targets = pcs_benches);
-criterion_main!(benches);
diff --git a/Stwo_wrapper/crates/prover/benches/poseidon.rs b/Stwo_wrapper/crates/prover/benches/poseidon.rs
deleted file mode 100644
index bc796c6..0000000
--- a/Stwo_wrapper/crates/prover/benches/poseidon.rs
+++ /dev/null
@@ -1,18 +0,0 @@
-use criterion::{criterion_group, criterion_main, Criterion, Throughput};
-use stwo_prover::core::pcs::PcsConfig;
-use stwo_prover::examples::poseidon::prove_poseidon;
-
-pub fn simd_poseidon(c: &mut Criterion) {
- const LOG_N_INSTANCES: u32 = 18;
- let mut group = c.benchmark_group("poseidon2");
- group.throughput(Throughput::Elements(1u64 << LOG_N_INSTANCES));
- group.bench_function(format!("poseidon2 2^{} instances", LOG_N_INSTANCES), |b| {
- b.iter(|| prove_poseidon(LOG_N_INSTANCES, PcsConfig::default()));
- });
-}
-
-criterion_group!(
- name = bit_rev;
- config = Criterion::default().sample_size(10);
- targets = simd_poseidon);
-criterion_main!(bit_rev);
diff --git a/Stwo_wrapper/crates/prover/benches/prefix_sum.rs b/Stwo_wrapper/crates/prover/benches/prefix_sum.rs
deleted file mode 100644
index 7faf4ac..0000000
--- a/Stwo_wrapper/crates/prover/benches/prefix_sum.rs
+++ /dev/null
@@ -1,19 +0,0 @@
-use criterion::{criterion_group, criterion_main, BatchSize, Criterion};
-use stwo_prover::core::backend::simd::column::BaseColumn;
-use stwo_prover::core::backend::simd::prefix_sum::inclusive_prefix_sum;
-use stwo_prover::core::fields::m31::BaseField;
-
-pub fn simd_prefix_sum_bench(c: &mut Criterion) {
- const LOG_SIZE: u32 = 24;
- let evals: BaseColumn = (0..1 << LOG_SIZE).map(BaseField::from).collect();
- c.bench_function(&format!("simd prefix_sum 2^{LOG_SIZE}"), |b| {
- b.iter_batched(
- || evals.clone(),
- inclusive_prefix_sum,
- BatchSize::LargeInput,
- );
- });
-}
-
-criterion_group!(benches, simd_prefix_sum_bench);
-criterion_main!(benches);
diff --git a/Stwo_wrapper/crates/prover/benches/quotients.rs b/Stwo_wrapper/crates/prover/benches/quotients.rs
deleted file mode 100644
index fc2949a..0000000
--- a/Stwo_wrapper/crates/prover/benches/quotients.rs
+++ /dev/null
@@ -1,55 +0,0 @@
-#![feature(iter_array_chunks)]
-
-use criterion::{black_box, criterion_group, criterion_main, Criterion};
-use itertools::Itertools;
-use stwo_prover::core::backend::cpu::CpuBackend;
-use stwo_prover::core::backend::simd::SimdBackend;
-use stwo_prover::core::circle::SECURE_FIELD_CIRCLE_GEN;
-use stwo_prover::core::fields::m31::BaseField;
-use stwo_prover::core::fields::qm31::SecureField;
-use stwo_prover::core::pcs::quotients::{ColumnSampleBatch, QuotientOps};
-use stwo_prover::core::poly::circle::{CanonicCoset, CircleEvaluation};
-use stwo_prover::core::poly::BitReversedOrder;
-
-// TODO(andrew): Consider removing const generics and making all sizes the same.
-fn bench_quotients(
- c: &mut Criterion,
- id: &str,
-) {
- let domain = CanonicCoset::new(LOG_N_ROWS).circle_domain();
- let values = (0..domain.size()).map(BaseField::from).collect();
- let col = CircleEvaluation::::new(domain, values);
- let cols = (0..1 << LOG_N_COLS).map(|_| col.clone()).collect_vec();
- let col_refs = cols.iter().collect_vec();
- let random_coeff = SecureField::from_u32_unchecked(0, 1, 2, 3);
- let a = SecureField::from_u32_unchecked(5, 6, 7, 8);
- let samples = vec![ColumnSampleBatch {
- point: SECURE_FIELD_CIRCLE_GEN,
- columns_and_values: (0..1 << LOG_N_COLS).map(|i| (i, a)).collect(),
- }];
- c.bench_function(
- &format!("{id} quotients 2^{LOG_N_COLS} x 2^{LOG_N_ROWS}"),
- |b| {
- b.iter_with_large_drop(|| {
- B::accumulate_quotients(
- black_box(domain),
- black_box(&col_refs),
- black_box(random_coeff),
- black_box(&samples),
- 1,
- )
- })
- },
- );
-}
-
-fn quotients_benches(c: &mut Criterion) {
- bench_quotients::(c, "simd");
- bench_quotients::(c, "cpu");
-}
-
-criterion_group!(
- name = benches;
- config = Criterion::default().sample_size(10);
- targets = quotients_benches);
-criterion_main!(benches);
diff --git a/Stwo_wrapper/crates/prover/proof.json b/Stwo_wrapper/crates/prover/proof.json
deleted file mode 100644
index eef777e..0000000
--- a/Stwo_wrapper/crates/prover/proof.json
+++ /dev/null
@@ -1,348 +0,0 @@
-{
- "commitments" :
- ["34328580272026076035687604093297365442785733592720865218001799813393342152908",
- "38388381845372648579572899115609862601821983406101214230086519922780265042634"],
-
- "sampled_values_0" :
- [["1","0","0","0"],
- ["2129160320","1109509513","787887008","1676461964"],
- ["262908602","915488457","1893945291","1774327476"],
- ["894719153","1570509766","1424186619","204092576"],
- ["397490811","836398274","1615765624","2013800563"],
- ["1022303904","276983775","1064742229","165204856"],
- ["1200363525","170838026","524999776","156116441"],
- ["850733526","448725560","1521962209","1318190714"],
- ["1187866075","1705588092","924088348","490002418"],
- ["2033565088","996780784","1820235518","2048788344"],
- ["2061590372","1150986157","711772586","1511398564"],
- ["1066623954","530384603","1890251380","1699008129"],
- ["734047580","1685768538","505142109","787113212"],
- ["2030904700","99932423","695391286","1736941035"],
- ["1580330105","932031717","1705998668","146411959"],
- ["1585732224","1556242253","941668238","1998570239"],
- ["199481433","2123320403","1257464748","1663811899"],
- ["2139019524","1547107722","728449250","1941851166"],
- ["752079023","268472135","1465850435","16510773"],
- ["1279312817","63252415","442230579","1560954631"],
- ["1074859131","137997593","2118329011","652535723"],
- ["297567647","1483381078","1941495981","599737348"],
- ["1735543786","1420676479","1354982762","1114211268"],
- ["1691705401","1143446295","1748115479","1666756627"],
- ["955696743","2077778309","736065989","1319443838"],
- ["1076874307","1001483910","1702287354","819727011"],
- ["1134989244","1823710400","2067694105","1098263343"],
- ["1793642608","961404475","1279773056","1815400043"],
- ["739677274","1827877577","838562378","171296720"],
- ["2036367121","1901888610","289723252","2014426907"],
- ["330020507","436937516","2113056521","1828501207"],
- ["1359068814","583899921","734628376","1223217137"],
- ["1319501520","1242972089","1202216521","1285024997"],
- ["681182370","1569622309","1574376904","1563950435"],
- ["1204519566","483612224","1677731115","1667757584"],
- ["330284364","917877098","57538161","179869993"],
- ["2056561198","119768893","740294154","1454562198"],
- ["79009084","545196641","13388962","1973400144"],
- ["885977898","1973300145","37115619","957100699"],
- ["1937449867","1777683674","1983002799","757662558"],
- ["344927561","357845689","26887161","664585634"],
- ["1462268220","615463524","209500386","44308852"],
- ["570984705","2022111132","1404632615","2119081660"],
- ["13183327","1584451280","1216116653","316345540"],
- ["1497965915","705236857","1892466476","1068567492"],
- ["1758694676","1408790161","1140545981","315723937"],
- ["645308461","1125824784","1786470558","1240927727"],
- ["1213464061","470930291","1718629724","1149088875"],
- ["214577693","1578610321","2133720991","226291629"],
- ["1357706729","2097875841","1767996253","1478111500"],
- ["1154658683","752162439","2018723944","163997560"],
- ["1051993583","703716977","379706674","487262860"],
- ["1017692573","2060296775","2001023083","1064213951"],
- ["1042587725","1701108370","204550428","904590130"],
- ["1115340870","743420370","1927225111","1276396551"],
- ["493638626","1874789377","47342513","209203758"],
- ["1558586505","83459476","247638703","1975504267"],
- ["2097068784","954319448","367516919","1545761518"],
- ["1655645294","352838520","1307263981","1110198118"],
- ["1169856046","1925368371","1362317240","1926032147"],
- ["1940113709","885624001","1395047654","80053995"],
- ["1778932990","25092730","201117282","1724571908"],
- ["2096327738","233411984","1247443120","713989449"],
- ["808532602","136577890","1015579288","38900716"],
- ["1182257782","1186245376","1451332036","2080170103"],
- ["1662610758","1505542080","1038243031","1889715771"],
- ["440146119","942837214","1440484295","1593949278"],
- ["46258268","1884246120","164930024","2050584510"],
- ["1198954868","1079638495","1424072583","1028611344"],
- ["2112984649","1531382496","1873151714","1818301795"],
- ["1554382282","253920307","1641628530","1378998084"],
- ["857898234","686236793","2091871553","184978860"],
- ["2049153599","6111471","1579475775","32492894"],
- ["1371356596","679072793","1547377985","354305233"],
- ["1799882226","1201472049","1592617716","125534957"],
- ["1277144880","253726080","1800145982","1125162267"],
- ["1577717920","440984421","1377891036","846453148"],
- ["1952731919","1710992214","673668053","1871913638"],
- ["1559011028","2060945859","719954448","1356468891"],
- ["1961642242","1693473944","1300152522","412222111"],
- ["861208187","1242659514","977183954","38730935"],
- ["1016984917","1368361439","2106430139","1225979890"],
- ["1427754325","1482206106","1465316380","1096279813"],
- ["566051043","2025874544","234976335","1482256978"],
- ["1750543495","1494541462","374330732","411642241"],
- ["230654343","55625728","136463431","1099606808"],
- ["1172218793","1260458608","1314942990","75527287"],
- ["1824515276","916178746","1300275105","370626746"],
- ["915931367","987018043","56193044","617907884"],
- ["1934695822","1112844637","609268252","1972086910"],
- ["619631651","152029630","1979976905","292597437"],
- ["62258350","1890115432","1373605674","1505619938"],
- ["1770422019","1398189304","1773172351","1576001433"],
- ["650940868","1756047014","1764798953","1146887875"],
- ["1746945043","528205234","778346028","1797468521"],
- ["760802416","1479409742","1556974632","1307498378"],
- ["102511022","1787975482","968854748","1010240763"],
- ["330722054","2046294448","14132125","1822414050"],
- ["943548871","1770900623","1861740461","1290634078"],
- ["1402661415","1361511065","1784889120","837615360"]],
-
- "sampled_values_1" :
- [["712066144","1576368753","626134398","426337436"],
- ["160634493","1096735733","992622982","964509862"],
- ["208900621","1128739590","1423579079","1688318061"],
- ["1029182234","1152361165","571476481","1593867154"]],
-
- "decommitment_0" :
- ["24311567319749512546399129581715033328970605051392227451685196018312506896509",
- "8450134967305372517473027560161707471995673370792264153422077885080332622841",
- "6431507699794114682519586182713221908058047520896405293833270087934517909753",
- "303109001984349840640377328716025252051982378448629744935456455431709129012",
- "47167328465744900593371601186109726758160197572292632388959155138584359581158",
- "50584492046778480438774038937088410409133167768957478525289857065775850658491",
- "30584798499699103841624545814425958941934653399588880797257122471101102880636",
- "33441256878213890325682161124370878299436204406591246133637659120215439522803",
- "3288124068330032280185519028600654292250668929588668389702892483946668251740",
- "29852774919556057664485671676242264613416836486089146650713214180894511265116",
- "12482060975231949385592255321766253365687502822944549845564491620341379321204",
- "46285234163162336949700608657672147469543559995399282843606812790099228411758",
- "20807128972645591294726020136444795908525656782422245307591812614900798799914"],
-
- "decommitment_1" :
- ["18063303111481257844109225560025890393366258018933166919604543575686388632162",
- "1676364734386980395984608216327451243278421019544108756198322792517099196249",
- "5278661052518480850653886996628582549184134231869598116690316714367933376948",
- "21983822689977371558234298346357617674436224016274009820764238516520240403273",
- "605332543427376153930374757063581881998320956602375739165671986207155079359",
- "33771702906041565783498389127165108212044382608172583325407071671862086994048",
- "6930451780154275491146135719028766497977496109537963233244808739657647563071",
- "4564117668410212714684125903928600765456322272915099403587425756488534507713",
- "337808767671877648828499299861821973796749820854854708379479049898835100991",
- "2725840354457305623692571800192492803162041315546256970381708693201407812833",
- "34716495111790106826563330917176360656701717867702196654990744866499300990003",
- "49719870445464463616785535809529171382800153139923763422202182182379572350737",
- "48664746464641275030915461677298150155193593333108431337941029583245720868695",
- "32557886668297237033601675259512842580727821006475208499640136322794706303894",
- "15014835402414421167586357788116276188694467622586221351644991310645286648480",
- "12973705814659120511327850727547995427054971555827754777395732787633567627149",
- "15379912850866472398958956306527914195058439699787840041152620034933267404138",
- "19859070819439084101412868355121176941090844577507824922960697697668791429525",
- "38273559034692361632775489704953448699371080776239846995670381153186834620044"],
-
- "queried_values_0" :
- [["1","1","1","1","1","1"],
- ["730457281","730490049","28918683","28885915","1656126010","1656093242"],
- ["855614122","1238037465","1836504291","355791428","757095818","467806903"],
- ["674179888","1530445315","1720543014","76190330","1475912409","1017215862"],
- ["1142290008","1148671853","1619097781","938511401","904357795","257652679"],
- ["1679234056","1355264641","2139729457","574756654","604307234","1146556949"],
- ["1000500309","2008905806","1442759180","598876729","1786070690","1072293976"],
- ["1119085545","2133345582","135683580","216214405","1049766224","943727969"],
- ["206423262","2047139937","305085364","1422472664","1826554088","1032095092"],
- ["501238882","1656305868","724710382","1949772461","1426787917","585368894"],
- ["1005468045","1775577441","1042182076","415631363","1067013227","1635705270"],
- ["1776076392","216798814","1525036520","1160666510","1212132211","1915058776"],
- ["859923105","1633989410","182110635","2060185314","1084464822","1129902257"],
- ["489437802","313401022","271315129","357612175","2050381179","647577687"],
- ["1495302158","2052264981","1498165299","1164417520","1050104037","450244199"],
- ["1084986392","398966983","808449145","1733554138","2068501028","659474347"],
- ["399458768","1789245133","1698759035","188433436","1794535430","364419824"],
- ["2013965647","722839714","928854328","124488895","1378959529","952886009"],
- ["1334765706","193402268","471076108","640800921","1998121783","961582406"],
- ["1067762968","381831281","560459357","1025929344","181659877","1922040224"],
- ["1993303462","467991218","849673597","744722836","239634354","329631295"],
- ["785794488","1649178388","672964420","1281255462","900602801","271501809"],
- ["857859728","1325395820","985014020","1094321795","259553347","774587048"],
- ["1214640090","1588569866","871717820","1131833706","1625896842","1635087550"],
- ["796549205","931495223","2018253108","1395065060","158209751","1160478135"],
- ["883143962","729115354","190207821","839273168","1668931939","2074584689"],
- ["1490296658","1846956206","1610364850","56422972","160482417","681872093"],
- ["1270585092","1910190167","464113273","613529242","1027101122","1014185686"],
- ["1456043179","1999662961","193940913","678382864","39040067","1236859818"],
- ["1626243617","901735777","1703169024","911300891","1640727682","1121874896"],
- ["492192896","15672698","319327174","1727120334","1965889437","114404366"],
- ["407079019","949462637","255390508","1753162095","501134776","1457122467"],
- ["1478573872","1439193434","1053200675","1001140887","1553935777","1253681552"],
- ["183135520","946237525","1802924023","1831496784","1893117930","1830486286"],
- ["234902670","1169030504","196055115","1323151968","855748623","1328842866"],
- ["1150999776","1338824346","2072101698","774206263","1967350016","1808817867"],
- ["924341552","1430286424","511268814","825025920","1061850574","1954646566"],
- ["302634890","314434153","1692670768","1822915313","1244352075","1953834230"],
- ["1576167467","687837005","2116136752","144109400","1590157548","1634932462"],
- ["396756275","1272134898","1207308240","818219166","1314182589","109494000"],
- ["846425160","897737569","757312164","826009489","1019831588","1977463051"],
- ["2065801114","1918982367","1548689186","2082631803","298112070","383438809"],
- ["1034102289","461735180","2115581275","1343026598","1229979058","1021418523"],
- ["1784173874","166635387","547550115","1094693960","573193735","451367040"],
- ["119818313","659105018","1741377697","8940733","911200334","511474518"],
- ["1511949880","1315119529","1267019200","2134944693","878254810","375758264"],
- ["1203050254","156394547","1348568635","412863443","1068659960","1407913814"],
- ["361779719","130417374","89109096","117994876","1151322919","863143484"],
- ["1007476533","989566160","138644964","1672742874","540141118","1296408100"],
- ["1824241144","2051199719","1863718547","2109877864","36689613","1055926854"],
- ["791693003","1433717239","991140958","1565955371","1839976870","1163947838"],
- ["1267320759","2102593211","1831360854","1691591439","1672201908","61327345"],
- ["301343164","277158258","627925439","577508975","1896464649","907629062"],
- ["1964268932","929590164","1529686876","68630644","1663063136","254082844"],
- ["693529348","1815295486","1660565870","1226857377","156312343","1500907098"],
- ["1723158753","252348225","253985470","52424437","1605949937","576572581"],
- ["1781792048","1497492716","1951824572","1156925855","863650708","156447987"],
- ["876432605","458503399","283092867","247883110","1227074181","966219235"],
- ["1581118191","66527915","1577039825","1227961402","1738412997","1862462297"],
- ["679458448","338624032","34185999","253532412","65409631","563033132"],
- ["1011967612","1898273226","2124401156","105282260","1188226330","123913515"],
- ["1432513005","1083162825","1299704150","1184276814","339749370","1064298821"],
- ["145070927","1250457746","1977306722","1035124433","322154361","1782232869"],
- ["1934256728","1269667423","248999825","267009036","132507662","474021315"],
- ["1694871453","535187899","981724703","1180312550","74370795","277702656"],
- ["1850841912","1037634121","1967377497","1755127193","1566449422","1939039785"],
- ["1315699598","476111459","2058733537","332263289","1592057567","874912616"],
- ["774536537","170060616","2086574090","47894465","778021586","2115296942"],
- ["322468558","24934377","637275739","1596346002","1896623296","1814433409"],
- ["766517428","1263076038","358941187","2070217919","2108397185","1587546402"],
- ["345404490","1065320570","1231275245","1037359122","1286389839","2070140848"],
- ["746521574","835067673","311114030","1586400488","1406022058","1284151326"],
- ["1857315969","431410759","825259098","1717904860","503708539","2097758215"],
- ["1879479734","1863555039","2108235515","1833922769","1562707156","49484002"],
- ["1366768987","1050390036","1491845132","666041968","74368055","1254335623"],
- ["188857287","1161878039","1771805176","1457666227","1157868840","486461459"],
- ["261764705","1577846886","1332322961","10423372","640027252","1086814656"],
- ["111907709","542625019","2021749229","2013008690","523703611","1328833940"],
- ["1270684472","1675989474","394214608","538100201","1984625073","1560563159"],
- ["666555709","852557426","651115051","1878827907","953346499","619017191"],
- ["747972907","149382079","1393306586","1394823957","960994901","536632180"],
- ["80535893","229602380","1817483938","1455260088","484432","1869486290"],
- ["443556931","253108261","1609174393","1245931188","752691602","1668543792"],
- ["745497042","854686466","1834097777","642389535","1284043061","896553209"],
- ["532777064","1491985134","200157005","1378855967","1159213374","1797221037"],
- ["933463176","813761538","1124049829","1988347055","2115297439","1836576920"],
- ["194436043","1437728625","43998833","786326005","1130428925","1424571033"],
- ["2108272636","1410841489","753065553","2020187193","1644376367","670324352"],
- ["1362448669","752702510","1740531646","47989265","588634","1940480814"],
- ["439960422","528245604","179496898","235775013","59000527","1903150726"],
- ["103605138","249162711","1971219628","1958189530","423278905","1318354885"],
- ["321504059","1595801356","596911575","1361967073","459661104","599048233"],
- ["1610552125","73166668","444776743","1820306524","1180674369","1570356756"],
- ["1283703846","1024562975","958477092","1329464736","1758672211","899108631"],
- ["713626137","904634570","1902566483","1938333063","447549083","703262660"],
- ["1417291696","1717451368","354584524","832751684","930128006","1037860604"],
- ["1618745108","79533863","301008038","2091942909","1221962725","1524945081"],
- ["1490224031","349040760","393684137","484089443","1912848485","1790207999"],
- ["1930411520","642009628","1138820074","31855314","1177766391","1913457637"],
- ["618628623","139430131","904498895","925273128","2111653256","1012250155"]],
-
- "queried_values_1" : [["316772341","1526280133","663010112","224983897","510598760","1109503351"],
- ["754832207","435790299","883623752","553207508","154784232","199176676"],
- ["689603315","1763523007","1720552945","1983603154","367841669","319325418"],
- ["1290247052","1120744584","193500372","294491115","951360807","891034447"]],
-
- "proof of work" : "43",
-
- "coeffs" : ["329725079","667313404","2083859876","1645693780"],
-
- "inner_commitment_0" : "45555755014923146766476222823122654194153582923386372098787021809602091298670",
-
- "inner_decommitment_0" :
- ["5462985033728555575703006689913665598917262836577853690370070265073174719979",
- "1439463537898028163672031322449473184361454650351831891678420729829634422052",
- "3457992889375232257419443221173459860069710025424105705342440558952480618733",
- "1560236756242436900530359200127475252176167690738804367783218449678388748008",
- "44704035195642947074853484428073358106137689624692539529545008114744235429869",
- "1823722396825684657353300460130959150013626547749061669644787096209462088081",
- "36173961213090661587166983586606381085726394354099487131775975739295244107044",
- "44982414287882497869227873977178787463294164719732705041425254748922385395330",
- "45309152665928987599895709328729600158686240484613667382186228529382344291065",
- "6727671460449390719545303211873485738734289534911560838410979450236173250575",
- "20261836592905556803606993653450529841562498576647326913889589677099044443740",
- "5092741370842944151567575040255811031129982749889308756570945791769905531735",
- "25669118913473215056777887930215946759071827607816131638248897691395809170466"],
-
- "inner_evals_subset_0" :
- [["1779738283","1440487135","622229563","2027928845"],
- ["3310770","2003547458","1663490902","2105455978"],
- ["824310523","1757518542","231582441","427507918"]],
-
- "inner_commitment_1" : "36809196688918736785151875655523363274066842107451407514854169291514772437712",
-
- "inner_decommitment_1" :
- ["45836684762941279488847688946861562185184567552524644394596887832754938056979",
- "19073399120361742411025793373596546929954434607182120440274945820526786807031",
- "10149408627738268983458395386544516732232569888429637086606165543488301475287",
- "1134561121197915913438467336074096605141057500441242329860673894339623319838",
- "4117331370612733116088255919125911273783755080775248279590234839282346655209",
- "10513576479117646185507147947418059201301955276798193127377481002238952180861",
- "23342730846670478933566004300392843380760787769231395287565422468403932432226",
- "18740447275326464369699774196183138103532792867824812360537906002797944316369",
- "14414140432147963417180883803716587948502362440108737731008978168633980398219",
- "12250577863311462987061070846596950168057013378171600204130942328863618210853"],
-
- "inner_evals_subset_1" :
- [["1993244979","993480712","1202910330","51538179"],
- ["1550507975","1444313548","117070947","1740854590"],
- ["342709844","601149328","1436490544","1384381104"]],
-
- "inner_commitment_2" : "881819876414071785116043072319901261019430342891967010427739931379717752179",
-
- "inner_decommitment_2" :
- ["3578061893060038632121836895066391994380789049478144565795630968916166958170",
- "37159117331259094338569510749131708143302385613991325356583099983565421599794",
- "37757292341028790385725896863222501721662517262004109998718347744172059822092",
- "32039058093629437909283983486827138804118789892152561698628703527746673098335",
- "11826129489683399268430621966121477905203281789801973078750629518404118320344",
- "12209931412475259378582945309327673629248630080862144675946167350377528046777",
- "23652788355891929123107872712785512551735081591713325931570432712643436668697"],
-
- "inner_evals_subset_2" :
- [["1184374976","203117688","803515935","1781630737"],
- ["716686867","138132852","2024080584","392488646"],
- ["606958913","308986056","258114411","2075401741"]],
-
- "inner_commitment_3" : "12027868599227153144742193285247060272784688895537159385948117930165143367635",
-
- "inner_decommitment_3" :
- ["46510393127320994984678433868779353751380750916123221099220042551249644407575",
- "39205406309151711272632862117612882165913578213907083849769292791373274291092",
- "1794620160371318211300608665903463515892826349097148444913826356018179256828",
- "30764929521910551485060681298929856016753563990361023701791813032517030674324"],
-
- "inner_evals_subset_3" :
- [["1682558787","129089003","784689440","491206249"],
- ["2038210709","1600238918","655676259","1542271403"],
- ["1014579052","1384080403","862591487","1941843578"]],
-
- "inner_commitment_4" : "14412699124489796400221638504502701429059474709940645969751865021865037702257",
-
- "inner_decommitment_4" :
- ["19324767949149751902195880760061491860991545124249052461044586503970003688610"],
-
- "inner_evals_subset_4" :
- [["683258805","1002722262","1583421272","1748673499"],
- ["2101847208","689925082","1602280602","1942656531"],
- ["1952987285","1995490213","2082219584","1620868519"]],
-
- "inner_commitment_5" : "7898658461322497542615494384418597990320440781916208640366283709415937814000",
-
- "inner_decommitment_5" :
- [],
-
- "inner_evals_subset_5" :
- [["1862940297","2014478284","1043383827","560191545"]]
-}
\ No newline at end of file
diff --git a/Stwo_wrapper/crates/prover/src/constraint_framework/assert.rs b/Stwo_wrapper/crates/prover/src/constraint_framework/assert.rs
deleted file mode 100644
index 9e5530b..0000000
--- a/Stwo_wrapper/crates/prover/src/constraint_framework/assert.rs
+++ /dev/null
@@ -1,84 +0,0 @@
-use num_traits::{One, Zero};
-
-use super::EvalAtRow;
-use crate::core::backend::{Backend, Column};
-use crate::core::fields::m31::BaseField;
-use crate::core::fields::qm31::SecureField;
-use crate::core::fields::secure_column::SECURE_EXTENSION_DEGREE;
-use crate::core::pcs::TreeVec;
-use crate::core::poly::circle::{CanonicCoset, CirclePoly};
-use crate::core::utils::circle_domain_order_to_coset_order;
-
-/// Evaluates expressions at a trace domain row, and asserts constraints. Mainly used for testing.
-pub struct AssertEvaluator<'a> {
- pub trace: &'a TreeVec>>,
- pub col_index: TreeVec,
- pub row: usize,
-}
-impl<'a> AssertEvaluator<'a> {
- pub fn new(trace: &'a TreeVec>>, row: usize) -> Self {
- Self {
- trace,
- col_index: TreeVec::new(vec![0; trace.len()]),
- row,
- }
- }
-}
-impl<'a> EvalAtRow for AssertEvaluator<'a> {
- type F = BaseField;
- type EF = SecureField;
-
- fn next_interaction_mask(
- &mut self,
- interaction: usize,
- offsets: [isize; N],
- ) -> [Self::F; N] {
- let col_index = self.col_index[interaction];
- self.col_index[interaction] += 1;
- offsets.map(|off| {
- // The mask row might wrap around the column size.
- let col_size = self.trace[interaction][col_index].len() as isize;
- self.trace[interaction][col_index]
- [(self.row as isize + off).rem_euclid(col_size) as usize]
- })
- }
-
- fn add_constraint(&mut self, constraint: G)
- where
- Self::EF: std::ops::Mul,
- {
- // Cast to SecureField.
- let res = SecureField::one() * constraint;
- // The constraint should be zero at the given row, since we are evaluating on the trace
- // domain.
- assert_eq!(res, SecureField::zero(), "row: {}", self.row);
- }
-
- fn combine_ef(values: [Self::F; SECURE_EXTENSION_DEGREE]) -> Self::EF {
- SecureField::from_m31_array(values)
- }
-}
-
-pub fn assert_constraints(
- trace_polys: &TreeVec>>,
- trace_domain: CanonicCoset,
- assert_func: impl Fn(AssertEvaluator<'_>),
-) {
- let traces = trace_polys.as_ref().map(|tree| {
- tree.iter()
- .map(|poly| {
- circle_domain_order_to_coset_order(
- &poly
- .evaluate(trace_domain.circle_domain())
- .bit_reverse()
- .values
- .to_cpu(),
- )
- })
- .collect()
- });
- for row in 0..trace_domain.size() {
- let eval = AssertEvaluator::new(&traces, row);
- assert_func(eval);
- }
-}
diff --git a/Stwo_wrapper/crates/prover/src/constraint_framework/component.rs b/Stwo_wrapper/crates/prover/src/constraint_framework/component.rs
deleted file mode 100644
index c0d8319..0000000
--- a/Stwo_wrapper/crates/prover/src/constraint_framework/component.rs
+++ /dev/null
@@ -1,210 +0,0 @@
-use std::borrow::Cow;
-use std::iter::zip;
-use std::ops::Deref;
-
-use itertools::Itertools;
-use tracing::{span, Level};
-
-use super::{EvalAtRow, InfoEvaluator, PointEvaluator, SimdDomainEvaluator};
-use crate::core::air::accumulation::{DomainEvaluationAccumulator, PointEvaluationAccumulator};
-use crate::core::air::{Component, ComponentProver, Trace};
-use crate::core::backend::simd::column::VeryPackedSecureColumnByCoords;
-use crate::core::backend::simd::m31::LOG_N_LANES;
-use crate::core::backend::simd::very_packed_m31::{VeryPackedBaseField, LOG_N_VERY_PACKED_ELEMS};
-use crate::core::backend::simd::SimdBackend;
-use crate::core::circle::CirclePoint;
-use crate::core::constraints::coset_vanishing;
-use crate::core::fields::m31::BaseField;
-use crate::core::fields::qm31::SecureField;
-use crate::core::fields::FieldExpOps;
-use crate::core::pcs::{TreeSubspan, TreeVec};
-use crate::core::poly::circle::{CanonicCoset, CircleEvaluation, PolyOps};
-use crate::core::poly::BitReversedOrder;
-use crate::core::{utils, ColumnVec};
-
-// TODO(andrew): Docs.
-// TODO(andrew): Consider better location for this.
-#[derive(Debug, Default)]
-pub struct TraceLocationAllocator {
- /// Mapping of tree index to next available column offset.
- next_tree_offsets: TreeVec,
-}
-
-impl TraceLocationAllocator {
- fn next_for_structure(&mut self, structure: &TreeVec>) -> TreeVec {
- if structure.len() > self.next_tree_offsets.len() {
- self.next_tree_offsets.resize(structure.len(), 0);
- }
-
- TreeVec::new(
- zip(&mut *self.next_tree_offsets, &**structure)
- .enumerate()
- .map(|(tree_index, (offset, cols))| {
- let col_start = *offset;
- let col_end = col_start + cols.len();
- *offset = col_end;
- TreeSubspan {
- tree_index,
- col_start,
- col_end,
- }
- })
- .collect(),
- )
- }
-}
-
-/// A component defined solely in means of the constraints framework.
-/// Implementing this trait introduces implementations for [`Component`] and [`ComponentProver`] for
-/// the SIMD backend.
-/// Note that the constraint framework only support components with columns of the same size.
-pub trait FrameworkEval {
- fn log_size(&self) -> u32;
-
- fn max_constraint_log_degree_bound(&self) -> u32;
-
- fn evaluate(&self, eval: E) -> E;
-}
-
-pub struct FrameworkComponent {
- eval: C,
- trace_locations: TreeVec,
-}
-
-impl FrameworkComponent {
- pub fn new(provider: &mut TraceLocationAllocator, eval: E) -> Self {
- let eval_tree_structure = eval.evaluate(InfoEvaluator::default()).mask_offsets;
- let trace_locations = provider.next_for_structure(&eval_tree_structure);
- Self {
- eval,
- trace_locations,
- }
- }
-}
-
-impl Component for FrameworkComponent {
- fn n_constraints(&self) -> usize {
- self.eval.evaluate(InfoEvaluator::default()).n_constraints
- }
-
- fn max_constraint_log_degree_bound(&self) -> u32 {
- self.eval.max_constraint_log_degree_bound()
- }
-
- fn trace_log_degree_bounds(&self) -> TreeVec> {
- TreeVec::new(
- self.eval
- .evaluate(InfoEvaluator::default())
- .mask_offsets
- .iter()
- .map(|tree_masks| vec![self.eval.log_size(); tree_masks.len()])
- .collect(),
- )
- }
-
- fn mask_points(
- &self,
- point: CirclePoint,
- ) -> TreeVec>>> {
- let info = self.eval.evaluate(InfoEvaluator::default());
- let trace_step = CanonicCoset::new(self.eval.log_size()).step();
- info.mask_offsets.map_cols(|col_mask| {
- col_mask
- .iter()
- .map(|off| point + trace_step.mul_signed(*off).into_ef())
- .collect()
- })
- }
-
- fn evaluate_constraint_quotients_at_point(
- &self,
- point: CirclePoint,
- mask: &TreeVec>>,
- evaluation_accumulator: &mut PointEvaluationAccumulator,
- ) {
- self.eval.evaluate(PointEvaluator::new(
- mask.sub_tree(&self.trace_locations),
- evaluation_accumulator,
- coset_vanishing(CanonicCoset::new(self.eval.log_size()).coset, point).inverse(),
- ));
- }
-}
-
-impl ComponentProver for FrameworkComponent {
- fn evaluate_constraint_quotients_on_domain(
- &self,
- trace: &Trace<'_, SimdBackend>,
- evaluation_accumulator: &mut DomainEvaluationAccumulator,
- ) {
- let eval_domain = CanonicCoset::new(self.max_constraint_log_degree_bound()).circle_domain();
- let trace_domain = CanonicCoset::new(self.eval.log_size());
-
- let component_polys = trace.polys.sub_tree(&self.trace_locations);
- let component_evals = trace.evals.sub_tree(&self.trace_locations);
-
- // Extend trace if necessary.
- // TODO(spapini): Don't extend when eval_size < committed_size. Instead, pick a good
- // subdomain.
- let need_to_extend = component_evals
- .iter()
- .flatten()
- .any(|c| c.domain != eval_domain);
- let trace: TreeVec<
- Vec>>,
- > = if need_to_extend {
- let _span = span!(Level::INFO, "Extension").entered();
- let twiddles = SimdBackend::precompute_twiddles(eval_domain.half_coset);
- component_polys
- .as_cols_ref()
- .map_cols(|col| Cow::Owned(col.evaluate_with_twiddles(eval_domain, &twiddles)))
- } else {
- component_evals.clone().map_cols(|c| Cow::Borrowed(*c))
- };
-
- // Denom inverses.
- let log_expand = eval_domain.log_size() - trace_domain.log_size();
- let mut denom_inv = (0..1 << log_expand)
- .map(|i| coset_vanishing(trace_domain.coset(), eval_domain.at(i)).inverse())
- .collect_vec();
- utils::bit_reverse(&mut denom_inv);
-
- // Accumulator.
- let [mut accum] =
- evaluation_accumulator.columns([(eval_domain.log_size(), self.n_constraints())]);
- accum.random_coeff_powers.reverse();
-
- let _span = span!(Level::INFO, "Constraint pointwise eval").entered();
- let col = unsafe { VeryPackedSecureColumnByCoords::transform_under_mut(accum.col) };
-
- for vec_row in 0..(1 << (eval_domain.log_size() - LOG_N_LANES - LOG_N_VERY_PACKED_ELEMS)) {
- let trace_cols = trace.as_cols_ref().map_cols(|c| c.as_ref());
-
- // Evaluate constrains at row.
- let eval = SimdDomainEvaluator::new(
- &trace_cols,
- vec_row,
- &accum.random_coeff_powers,
- trace_domain.log_size(),
- eval_domain.log_size(),
- );
- let row_res = self.eval.evaluate(eval).row_res;
-
- // Finalize row.
- unsafe {
- let denom_inv = VeryPackedBaseField::broadcast(
- denom_inv[vec_row
- >> (trace_domain.log_size() - LOG_N_LANES - LOG_N_VERY_PACKED_ELEMS)],
- );
- col.set_packed(vec_row, col.packed_at(vec_row) + row_res * denom_inv)
- }
- }
- }
-}
-
-impl Deref for FrameworkComponent {
- type Target = E;
-
- fn deref(&self) -> &E {
- &self.eval
- }
-}
diff --git a/Stwo_wrapper/crates/prover/src/constraint_framework/constant_columns.rs b/Stwo_wrapper/crates/prover/src/constraint_framework/constant_columns.rs
deleted file mode 100644
index e57df28..0000000
--- a/Stwo_wrapper/crates/prover/src/constraint_framework/constant_columns.rs
+++ /dev/null
@@ -1,37 +0,0 @@
-use num_traits::One;
-
-use crate::core::backend::{Backend, Col, Column};
-use crate::core::fields::m31::BaseField;
-use crate::core::poly::circle::{CanonicCoset, CircleEvaluation};
-use crate::core::poly::BitReversedOrder;
-use crate::core::utils::{bit_reverse_index, coset_index_to_circle_domain_index};
-
-/// Generates a column with a single one at the first position, and zeros elsewhere.
-pub fn gen_is_first(log_size: u32) -> CircleEvaluation {
- let mut col = Col::::zeros(1 << log_size);
- col.set(0, BaseField::one());
- CircleEvaluation::new(CanonicCoset::new(log_size).circle_domain(), col)
-}
-
-/// Generates a column with `1` at every `2^log_step` positions, `0` elsewhere, shifted by offset.
-// TODO(andrew): Consider optimizing. Is a quotients of two coset_vanishing (use succinct rep for
-// verifier).
-pub fn gen_is_step_with_offset(
- log_size: u32,
- log_step: u32,
- offset: usize,
-) -> CircleEvaluation {
- let mut col = Col::::zeros(1 << log_size);
-
- let size = 1 << log_size;
- let step = 1 << log_step;
- let step_offset = offset % step;
-
- for i in (step_offset..size).step_by(step) {
- let circle_domain_index = coset_index_to_circle_domain_index(i, log_size);
- let circle_domain_index_bit_rev = bit_reverse_index(circle_domain_index, log_size);
- col.set(circle_domain_index_bit_rev, BaseField::one());
- }
-
- CircleEvaluation::new(CanonicCoset::new(log_size).circle_domain(), col)
-}
diff --git a/Stwo_wrapper/crates/prover/src/constraint_framework/info.rs b/Stwo_wrapper/crates/prover/src/constraint_framework/info.rs
deleted file mode 100644
index 05da93f..0000000
--- a/Stwo_wrapper/crates/prover/src/constraint_framework/info.rs
+++ /dev/null
@@ -1,48 +0,0 @@
-use std::ops::Mul;
-
-use num_traits::One;
-
-use super::EvalAtRow;
-use crate::core::fields::m31::BaseField;
-use crate::core::fields::qm31::SecureField;
-use crate::core::pcs::TreeVec;
-
-/// Collects information about the constraints.
-/// This includes mask offsets and columns at each interaction, and the number of constraints.
-#[derive(Default)]
-pub struct InfoEvaluator {
- pub mask_offsets: TreeVec>>,
- pub n_constraints: usize,
-}
-impl InfoEvaluator {
- pub fn new() -> Self {
- Self::default()
- }
-}
-impl EvalAtRow for InfoEvaluator {
- type F = BaseField;
- type EF = SecureField;
- fn next_interaction_mask(
- &mut self,
- interaction: usize,
- offsets: [isize; N],
- ) -> [Self::F; N] {
- // Check if requested a mask from a new interaction
- if self.mask_offsets.len() <= interaction {
- // Extend `mask_offsets` so that `interaction` is the last index.
- self.mask_offsets.resize(interaction + 1, vec![]);
- }
- self.mask_offsets[interaction].push(offsets.into_iter().collect());
- [BaseField::one(); N]
- }
- fn add_constraint(&mut self, _constraint: G)
- where
- Self::EF: Mul,
- {
- self.n_constraints += 1;
- }
-
- fn combine_ef(_values: [Self::F; 4]) -> Self::EF {
- SecureField::one()
- }
-}
diff --git a/Stwo_wrapper/crates/prover/src/constraint_framework/logup.rs b/Stwo_wrapper/crates/prover/src/constraint_framework/logup.rs
deleted file mode 100644
index 696a7b9..0000000
--- a/Stwo_wrapper/crates/prover/src/constraint_framework/logup.rs
+++ /dev/null
@@ -1,315 +0,0 @@
-use std::ops::{Mul, Sub};
-
-use itertools::Itertools;
-use num_traits::{One, Zero};
-
-use super::EvalAtRow;
-use crate::core::backend::simd::column::SecureColumn;
-use crate::core::backend::simd::m31::{PackedBaseField, LOG_N_LANES};
-use crate::core::backend::simd::prefix_sum::inclusive_prefix_sum;
-use crate::core::backend::simd::qm31::PackedSecureField;
-use crate::core::backend::simd::SimdBackend;
-use crate::core::backend::Column;
-use crate::core::channel::Channel;
-use crate::core::fields::m31::BaseField;
-use crate::core::fields::qm31::SecureField;
-use crate::core::fields::secure_column::{SecureColumnByCoords, SECURE_EXTENSION_DEGREE};
-use crate::core::fields::FieldExpOps;
-use crate::core::lookups::utils::Fraction;
-use crate::core::poly::circle::{CanonicCoset, CircleEvaluation};
-use crate::core::poly::BitReversedOrder;
-use crate::core::ColumnVec;
-
-/// Evaluates constraints for batched logups.
-/// These constraint enforce the sum of multiplicity_i / (z + sum_j alpha^j * x_j) = claimed_sum.
-/// BATCH_SIZE is the number of fractions to batch together. The degree of the resulting constraints
-/// will be BATCH_SIZE + 1.
-pub struct LogupAtRow {
- /// The index of the interaction used for the cumulative sum columns.
- pub interaction: usize,
- /// Queue of fractions waiting to be batched together.
- pub queue: [(E::EF, E::EF); BATCH_SIZE],
- /// Number of fractions in the queue.
- pub queue_size: usize,
- /// A constant to subtract from each row, to make the totall sum of the last column zero.
- /// In other words, claimed_sum / 2^log_size.
- /// This is used to make the constraint uniform.
- pub cumsum_shift: SecureField,
- /// The evaluation of the last cumulative sum column.
- pub prev_col_cumsum: E::EF,
- is_finalized: bool,
-}
-impl LogupAtRow {
- pub fn new(interaction: usize, claimed_sum: SecureField, log_size: u32) -> Self {
- Self {
- interaction,
- queue: [(E::EF::zero(), E::EF::zero()); BATCH_SIZE],
- queue_size: 0,
- cumsum_shift: claimed_sum / BaseField::from_u32_unchecked(1 << log_size),
- prev_col_cumsum: E::EF::zero(),
- is_finalized: false,
- }
- }
- pub fn push_lookup(
- &mut self,
- eval: &mut E,
- numerator: E::EF,
- values: &[E::F],
- lookup_elements: &LookupElements,
- ) {
- let shifted_value = lookup_elements.combine(values);
- self.push_frac(eval, numerator, shifted_value);
- }
-
- pub fn push_frac(&mut self, eval: &mut E, numerator: E::EF, denominator: E::EF) {
- if self.queue_size < BATCH_SIZE {
- self.queue[self.queue_size] = (numerator, denominator);
- self.queue_size += 1;
- return;
- }
-
- // Compute sum_i pi/qi over batch, as a fraction, num/denom.
- let (num, denom) = self.fold_queue();
-
- self.queue[0] = (numerator, denominator);
- self.queue_size = 1;
-
- // Add a constraint that num / denom = diff.
- let cur_cumsum = eval.next_extension_interaction_mask(self.interaction, [0])[0];
- let diff = cur_cumsum - self.prev_col_cumsum;
- self.prev_col_cumsum = cur_cumsum;
- eval.add_constraint(diff * denom - num);
- }
-
- pub fn add_frac(&mut self, eval: &mut E, fraction: Fraction) {
- // Add a constraint that num / denom = diff.
- let cur_cumsum = eval.next_extension_interaction_mask(self.interaction, [0])[0];
- let diff = cur_cumsum - self.prev_col_cumsum;
- self.prev_col_cumsum = cur_cumsum;
- eval.add_constraint(diff * fraction.denominator - fraction.numerator);
- }
-
- pub fn finalize(mut self, eval: &mut E) {
- assert!(!self.is_finalized, "LogupAtRow was already finalized");
- let (num, denom) = self.fold_queue();
-
- let [cur_cumsum, prev_row_cumsum] =
- eval.next_extension_interaction_mask(self.interaction, [0, -1]);
-
- let diff = cur_cumsum - prev_row_cumsum - self.prev_col_cumsum;
- // Instead of checking diff = num / denom, check diff = num / denom - cumsum_shift.
- // This makes (num / denom - cumsum_shift) have sum zero, which makes the constraint
- // uniform - apply on all rows.
- let fixed_diff = diff + self.cumsum_shift;
-
- eval.add_constraint(fixed_diff * denom - num);
-
- self.is_finalized = true;
- }
-
- fn fold_queue(&self) -> (E::EF, E::EF) {
- self.queue[0..self.queue_size]
- .iter()
- .copied()
- .fold((E::EF::zero(), E::EF::one()), |(p0, q0), (pi, qi)| {
- (p0 * qi + pi * q0, qi * q0)
- })
- }
-}
-
-/// Ensures that the LogupAtRow is finalized.
-/// LogupAtRow should be finalized exactly once.
-impl Drop for LogupAtRow {
- fn drop(&mut self) {
- assert!(self.is_finalized, "LogupAtRow was not finalized");
- }
-}
-
-/// Interaction elements for the logup protocol.
-#[derive(Clone, Debug, PartialEq, Eq)]
-pub struct LookupElements {
- pub z: SecureField,
- pub alpha: SecureField,
- alpha_powers: [SecureField; N],
-}
-impl LookupElements {
- pub fn draw(channel: &mut impl Channel) -> Self {
- let [z, alpha] = channel.draw_felts(2).try_into().unwrap();
- let mut cur = SecureField::one();
- let alpha_powers = std::array::from_fn(|_| {
- let res = cur;
- cur *= alpha;
- res
- });
- Self {
- z,
- alpha,
- alpha_powers,
- }
- }
- pub fn combine(&self, values: &[F]) -> EF
- where
- EF: Copy + Zero + From + From + Mul + Sub,
- {
- EF::from(values[0])
- + values[1..]
- .iter()
- .zip(self.alpha_powers.iter())
- .fold(EF::zero(), |acc, (&value, &power)| {
- acc + EF::from(power) * value
- })
- - EF::from(self.z)
- }
- // TODO(spapini): Try to remove this.
- pub fn dummy() -> Self {
- Self {
- z: SecureField::one(),
- alpha: SecureField::one(),
- alpha_powers: [SecureField::one(); N],
- }
- }
-}
-
-// SIMD backend generator for logup interaction trace.
-pub struct LogupTraceGenerator {
- log_size: u32,
- /// Current allocated interaction columns.
- trace: Vec>,
- /// Denominator expressions (z + sum_i alpha^i * x_i) being generated for the current lookup.
- denom: SecureColumn,
- /// Preallocated buffer for the Inverses of the denominators.
- denom_inv: SecureColumn,
-}
-impl LogupTraceGenerator {
- pub fn new(log_size: u32) -> Self {
- let trace = vec![];
- let denom = SecureColumn::zeros(1 << log_size);
- let denom_inv = SecureColumn::zeros(1 << log_size);
- Self {
- log_size,
- trace,
- denom,
- denom_inv,
- }
- }
-
- /// Allocate a new lookup column.
- pub fn new_col(&mut self) -> LogupColGenerator<'_> {
- let log_size = self.log_size;
- LogupColGenerator {
- gen: self,
- numerator: SecureColumnByCoords::::zeros(1 << log_size),
- }
- }
-
- /// Finalize the trace. Returns the trace and the claimed sum of the last column.
- pub fn finalize(
- mut self,
- ) -> (
- ColumnVec>,
- SecureField,
- ) {
- // Compute claimed sum.
- let mut last_col_coords = self.trace.pop().unwrap().columns;
- let packed_sums: [PackedBaseField; SECURE_EXTENSION_DEGREE] = last_col_coords
- .each_ref()
- .map(|c| c.data.iter().copied().sum());
- let base_sums = packed_sums.map(|s| s.pointwise_sum());
- let claimed_sum = SecureField::from_m31_array(base_sums);
-
- // Shift the last column to make the sum zero.
- let cumsum_shift = claimed_sum / BaseField::from_u32_unchecked(1 << self.log_size);
- last_col_coords.iter_mut().enumerate().for_each(|(i, c)| {
- c.data
- .iter_mut()
- .for_each(|x| *x -= PackedBaseField::broadcast(cumsum_shift.to_m31_array()[i]))
- });
-
- // Prefix sum the last column.
- let coord_prefix_sum = last_col_coords.map(inclusive_prefix_sum);
- self.trace.push(SecureColumnByCoords {
- columns: coord_prefix_sum,
- });
-
- let trace = self
- .trace
- .into_iter()
- .flat_map(|eval| {
- eval.columns.map(|c| {
- CircleEvaluation::::new(
- CanonicCoset::new(self.log_size).circle_domain(),
- c,
- )
- })
- })
- .collect_vec();
- (trace, claimed_sum)
- }
-}
-
-/// Trace generator for a single lookup column.
-pub struct LogupColGenerator<'a> {
- gen: &'a mut LogupTraceGenerator,
- /// Numerator expressions (i.e. multiplicities) being generated for the current lookup.
- numerator: SecureColumnByCoords,
-}
-impl<'a> LogupColGenerator<'a> {
- /// Write a fraction to the column at a row.
- pub fn write_frac(
- &mut self,
- vec_row: usize,
- numerator: PackedSecureField,
- denom: PackedSecureField,
- ) {
- debug_assert!(
- denom.to_array().iter().all(|x| *x != SecureField::zero()),
- "{:?}",
- ("denom at vec_row {} is zero {}", denom, vec_row)
- );
- unsafe {
- self.numerator.set_packed(vec_row, numerator);
- *self.gen.denom.data.get_unchecked_mut(vec_row) = denom;
- }
- }
-
- /// Finalizes generating the column.
- pub fn finalize_col(mut self) {
- FieldExpOps::batch_inverse(&self.gen.denom.data, &mut self.gen.denom_inv.data);
-
- for vec_row in 0..(1 << (self.gen.log_size - LOG_N_LANES)) {
- unsafe {
- let value = self.numerator.packed_at(vec_row)
- * *self.gen.denom_inv.data.get_unchecked(vec_row);
- let prev_value = self
- .gen
- .trace
- .last()
- .map(|col| col.packed_at(vec_row))
- .unwrap_or_else(PackedSecureField::zero);
- self.numerator.set_packed(vec_row, value + prev_value)
- };
- }
-
- self.gen.trace.push(self.numerator)
- }
-}
-
-#[cfg(test)]
-mod tests {
- use num_traits::One;
-
- use super::LogupAtRow;
- use crate::constraint_framework::InfoEvaluator;
- use crate::core::fields::qm31::SecureField;
-
- #[test]
- #[should_panic]
- fn test_logup_not_finalized_panic() {
- let mut logup = LogupAtRow::<2, InfoEvaluator>::new(1, SecureField::one(), 7);
- logup.push_frac(
- &mut InfoEvaluator::default(),
- SecureField::one(),
- SecureField::one(),
- );
- }
-}
diff --git a/Stwo_wrapper/crates/prover/src/constraint_framework/mod.rs b/Stwo_wrapper/crates/prover/src/constraint_framework/mod.rs
deleted file mode 100644
index 87069d3..0000000
--- a/Stwo_wrapper/crates/prover/src/constraint_framework/mod.rs
+++ /dev/null
@@ -1,97 +0,0 @@
-/// ! This module contains helpers to express and use constraints for components.
-mod assert;
-mod component;
-pub mod constant_columns;
-mod info;
-pub mod logup;
-mod point;
-mod simd_domain;
-
-use std::array;
-use std::fmt::Debug;
-use std::ops::{Add, AddAssign, Mul, Neg, Sub};
-
-pub use assert::{assert_constraints, AssertEvaluator};
-pub use component::{FrameworkComponent, FrameworkEval, TraceLocationAllocator};
-pub use info::InfoEvaluator;
-use num_traits::{One, Zero};
-pub use point::PointEvaluator;
-pub use simd_domain::SimdDomainEvaluator;
-
-use crate::core::fields::m31::BaseField;
-use crate::core::fields::qm31::SecureField;
-use crate::core::fields::secure_column::SECURE_EXTENSION_DEGREE;
-use crate::core::fields::FieldExpOps;
-
-/// A trait for evaluating expressions at some point or row.
-pub trait EvalAtRow {
- // TODO(spapini): Use a better trait for these, like 'Algebra' or something.
- /// The field type holding values of columns for the component. These are the inputs to the
- /// constraints. It might be [BaseField] packed types, or even [SecureField], when evaluating
- /// the columns out of domain.
- type F: FieldExpOps
- + Copy
- + Debug
- + Zero
- + Neg
- + AddAssign
- + AddAssign
- + Add
- + Sub
- + Mul
- + Add
- + Mul
- + Neg
- + From;
-
- /// A field type representing the closure of `F` with multiplying by [SecureField]. Constraints
- /// usually get multiplied by [SecureField] values for security.
- type EF: One
- + Copy
- + Debug
- + Zero
- + From
- + Neg
- + AddAssign
- + Add
- + Sub
- + Mul
- + Add
- + Mul
- + Sub
- + Mul
- + From
- + From;
-
- /// Returns the next mask value for the first interaction at offset 0.
- fn next_trace_mask(&mut self) -> Self::F {
- let [mask_item] = self.next_interaction_mask(0, [0]);
- mask_item
- }
-
- /// Returns the mask values of the given offsets for the next column in the interaction.
- fn next_interaction_mask(
- &mut self,
- interaction: usize,
- offsets: [isize; N],
- ) -> [Self::F; N];
-
- /// Returns the extension mask values of the given offsets for the next extension degree many
- /// columns in the interaction.
- fn next_extension_interaction_mask(
- &mut self,
- interaction: usize,
- offsets: [isize; N],
- ) -> [Self::EF; N] {
- let res_col_major = array::from_fn(|_| self.next_interaction_mask(interaction, offsets));
- array::from_fn(|i| Self::combine_ef(res_col_major.map(|c| c[i])))
- }
-
- /// Adds a constraint to the component.
- fn add_constraint(&mut self, constraint: G)
- where
- Self::EF: Mul;
-
- /// Combines 4 base field values into a single extension field value.
- fn combine_ef(values: [Self::F; SECURE_EXTENSION_DEGREE]) -> Self::EF;
-}
diff --git a/Stwo_wrapper/crates/prover/src/constraint_framework/point.rs b/Stwo_wrapper/crates/prover/src/constraint_framework/point.rs
deleted file mode 100644
index 6c6f72f..0000000
--- a/Stwo_wrapper/crates/prover/src/constraint_framework/point.rs
+++ /dev/null
@@ -1,57 +0,0 @@
-use std::ops::Mul;
-
-use super::EvalAtRow;
-use crate::core::air::accumulation::PointEvaluationAccumulator;
-use crate::core::fields::qm31::SecureField;
-use crate::core::fields::secure_column::SECURE_EXTENSION_DEGREE;
-use crate::core::pcs::TreeVec;
-use crate::core::ColumnVec;
-
-/// Evaluates expressions at a point out of domain.
-pub struct PointEvaluator<'a> {
- pub mask: TreeVec>>,
- pub evaluation_accumulator: &'a mut PointEvaluationAccumulator,
- pub col_index: Vec,
- pub denom_inverse: SecureField,
-}
-impl<'a> PointEvaluator<'a> {
- pub fn new(
- mask: TreeVec>>,
- evaluation_accumulator: &'a mut PointEvaluationAccumulator,
- denom_inverse: SecureField,
- ) -> Self {
- let col_index = vec![0; mask.len()];
- Self {
- mask,
- evaluation_accumulator,
- col_index,
- denom_inverse,
- }
- }
-}
-impl<'a> EvalAtRow for PointEvaluator<'a> {
- type F = SecureField;
- type EF = SecureField;
-
- fn next_interaction_mask(
- &mut self,
- interaction: usize,
- _offsets: [isize; N],
- ) -> [Self::F; N] {
- let col_index = self.col_index[interaction];
- self.col_index[interaction] += 1;
- let mask = self.mask[interaction][col_index].clone();
- assert_eq!(mask.len(), N);
- mask.try_into().unwrap()
- }
- fn add_constraint(&mut self, constraint: G)
- where
- Self::EF: Mul,
- {
- self.evaluation_accumulator
- .accumulate(self.denom_inverse * constraint);
- }
- fn combine_ef(values: [Self::F; SECURE_EXTENSION_DEGREE]) -> Self::EF {
- SecureField::from_partial_evals(values)
- }
-}
diff --git a/Stwo_wrapper/crates/prover/src/constraint_framework/simd_domain.rs b/Stwo_wrapper/crates/prover/src/constraint_framework/simd_domain.rs
deleted file mode 100644
index ef3662a..0000000
--- a/Stwo_wrapper/crates/prover/src/constraint_framework/simd_domain.rs
+++ /dev/null
@@ -1,106 +0,0 @@
-use std::ops::Mul;
-
-use num_traits::Zero;
-
-use super::EvalAtRow;
-use crate::core::backend::simd::column::VeryPackedBaseColumn;
-use crate::core::backend::simd::m31::LOG_N_LANES;
-use crate::core::backend::simd::very_packed_m31::{
- VeryPackedBaseField, VeryPackedSecureField, LOG_N_VERY_PACKED_ELEMS,
-};
-use crate::core::backend::simd::SimdBackend;
-use crate::core::backend::Column;
-use crate::core::fields::m31::BaseField;
-use crate::core::fields::qm31::SecureField;
-use crate::core::fields::secure_column::SECURE_EXTENSION_DEGREE;
-use crate::core::pcs::TreeVec;
-use crate::core::poly::circle::CircleEvaluation;
-use crate::core::poly::BitReversedOrder;
-use crate::core::utils::offset_bit_reversed_circle_domain_index;
-
-/// Evaluates constraints at an evaluation domain points.
-pub struct SimdDomainEvaluator<'a> {
- pub trace_eval:
- &'a TreeVec>>,
- pub column_index_per_interaction: Vec,
- /// The row index of the simd-vector row to evaluate the constraints at.
- pub vec_row: usize,
- pub random_coeff_powers: &'a [SecureField],
- pub row_res: VeryPackedSecureField,
- pub constraint_index: usize,
- pub domain_log_size: u32,
- pub eval_domain_log_size: u32,
-}
-impl<'a> SimdDomainEvaluator<'a> {
- pub fn new(
- trace_eval: &'a TreeVec>>,
- vec_row: usize,
- random_coeff_powers: &'a [SecureField],
- domain_log_size: u32,
- eval_log_size: u32,
- ) -> Self {
- Self {
- trace_eval,
- column_index_per_interaction: vec![0; trace_eval.len()],
- vec_row,
- random_coeff_powers,
- row_res: VeryPackedSecureField::zero(),
- constraint_index: 0,
- domain_log_size,
- eval_domain_log_size: eval_log_size,
- }
- }
-}
-impl<'a> EvalAtRow for SimdDomainEvaluator<'a> {
- type F = VeryPackedBaseField;
- type EF = VeryPackedSecureField;
-
- // TODO(spapini): Remove all boundary checks.
- fn next_interaction_mask(
- &mut self,
- interaction: usize,
- offsets: [isize; N],
- ) -> [Self::F; N] {
- let col_index = self.column_index_per_interaction[interaction];
- self.column_index_per_interaction[interaction] += 1;
- offsets.map(|off| {
- // If the offset is 0, we can just return the value directly from this row.
- if off == 0 {
- unsafe {
- let col = &self
- .trace_eval
- .get_unchecked(interaction)
- .get_unchecked(col_index)
- .values;
- let very_packed_col = VeryPackedBaseColumn::transform_under_ref(col);
- return *very_packed_col.data.get_unchecked(self.vec_row);
- };
- }
- // Otherwise, we need to look up the value at the offset.
- // Since the domain is bit-reversed circle domain ordered, we need to look up the value
- // at the bit-reversed natural order index at an offset.
- VeryPackedBaseField::from_array(std::array::from_fn(|i| {
- let row_index = offset_bit_reversed_circle_domain_index(
- (self.vec_row << (LOG_N_LANES + LOG_N_VERY_PACKED_ELEMS)) + i,
- self.domain_log_size,
- self.eval_domain_log_size,
- off,
- );
- self.trace_eval[interaction][col_index].at(row_index)
- }))
- })
- }
- fn add_constraint(&mut self, constraint: G)
- where
- Self::EF: Mul,
- {
- self.row_res +=
- VeryPackedSecureField::broadcast(self.random_coeff_powers[self.constraint_index])
- * constraint;
- self.constraint_index += 1;
- }
-
- fn combine_ef(values: [Self::F; SECURE_EXTENSION_DEGREE]) -> Self::EF {
- VeryPackedSecureField::from_very_packed_m31s(values)
- }
-}
diff --git a/Stwo_wrapper/crates/prover/src/core/air/accumulation.rs b/Stwo_wrapper/crates/prover/src/core/air/accumulation.rs
deleted file mode 100644
index 8fcf575..0000000
--- a/Stwo_wrapper/crates/prover/src/core/air/accumulation.rs
+++ /dev/null
@@ -1,297 +0,0 @@
-//! Accumulators for a random linear combination of circle polynomials.
-//! Given N polynomials, u_0(P), ... u_{N-1}(P), and a random alpha, the combined polynomial is
-//! defined as
-//! f(p) = sum_i alpha^{N-1-i} u_i(P).
-
-use itertools::Itertools;
-use tracing::{span, Level};
-
-use crate::core::backend::{Backend, Col, Column, CpuBackend};
-use crate::core::fields::m31::BaseField;
-use crate::core::fields::qm31::SecureField;
-use crate::core::fields::secure_column::SecureColumnByCoords;
-use crate::core::fields::FieldOps;
-use crate::core::poly::circle::{CanonicCoset, CircleEvaluation, CirclePoly, SecureCirclePoly};
-use crate::core::poly::BitReversedOrder;
-use crate::core::utils::generate_secure_powers;
-
-/// Accumulates N evaluations of u_i(P0) at a single point.
-/// Computes f(P0), the combined polynomial at that point.
-/// For n accumulated evaluations, the i'th evaluation is multiplied by alpha^(N-1-i).
-pub struct PointEvaluationAccumulator {
- random_coeff: SecureField,
- accumulation: SecureField,
-}
-
-impl PointEvaluationAccumulator {
- /// Creates a new accumulator.
- /// `random_coeff` should be a secure random field element, drawn from the channel.
- pub fn new(random_coeff: SecureField) -> Self {
- Self {
- random_coeff,
- accumulation: SecureField::default(),
- }
- }
-
- /// Accumulates u_i(P0), a polynomial evaluation at a P0 in reverse order.
- pub fn accumulate(&mut self, evaluation: SecureField) {
- self.accumulation = self.accumulation * self.random_coeff + evaluation;
- }
-
- pub fn finalize(self) -> SecureField {
- self.accumulation
- }
-}
-
-// TODO(ShaharS), rename terminology to constraints instead of columns.
-/// Accumulates evaluations of u_i(P), each at an evaluation domain of the size of that polynomial.
-/// Computes the coefficients of f(P).
-pub struct DomainEvaluationAccumulator {
- random_coeff_powers: Vec,
- /// Accumulated evaluations for each log_size.
- /// Each `sub_accumulation` holds the sum over all columns i of that log_size, of
- /// `evaluation_i * alpha^(N - 1 - i)`
- /// where `N` is the total number of evaluations.
- sub_accumulations: Vec>>,
-}
-
-impl DomainEvaluationAccumulator {
- /// Creates a new accumulator.
- /// `random_coeff` should be a secure random field element, drawn from the channel.
- /// `max_log_size` is the maximum log_size of the accumulated evaluations.
- pub fn new(random_coeff: SecureField, max_log_size: u32, total_columns: usize) -> Self {
- let max_log_size = max_log_size as usize;
- Self {
- random_coeff_powers: generate_secure_powers(random_coeff, total_columns),
- sub_accumulations: (0..(max_log_size + 1)).map(|_| None).collect(),
- }
- }
-
- /// Gets accumulators for some sizes.
- /// `n_cols_per_size` is an array of pairs (log_size, n_cols).
- /// For each entry, a [ColumnAccumulator] is returned, expecting to accumulate `n_cols`
- /// evaluations of size `log_size`.
- /// The array size, `N`, is the number of different sizes.
- pub fn columns(
- &mut self,
- n_cols_per_size: [(u32, usize); N],
- ) -> [ColumnAccumulator<'_, B>; N] {
- self.sub_accumulations
- .get_many_mut(n_cols_per_size.map(|(log_size, _)| log_size as usize))
- .unwrap_or_else(|e| panic!("invalid log_sizes: {}", e))
- .into_iter()
- .zip(n_cols_per_size)
- .map(|(col, (log_size, n_cols))| {
- let random_coeffs = self
- .random_coeff_powers
- .split_off(self.random_coeff_powers.len() - n_cols);
- ColumnAccumulator {
- random_coeff_powers: random_coeffs,
- col: col.get_or_insert_with(|| SecureColumnByCoords::zeros(1 << log_size)),
- }
- })
- .collect_vec()
- .try_into()
- .unwrap_or_else(|_| unreachable!())
- }
-
- /// Returns the log size of the resulting polynomial.
- pub fn log_size(&self) -> u32 {
- (self.sub_accumulations.len() - 1) as u32
- }
-}
-
-pub trait AccumulationOps: FieldOps + Sized {
- /// Accumulates other into column:
- /// column = column + other.
- fn accumulate(column: &mut SecureColumnByCoords, other: &SecureColumnByCoords);
-}
-
-impl DomainEvaluationAccumulator {
- /// Computes f(P) as coefficients.
- pub fn finalize(self) -> SecureCirclePoly {
- assert_eq!(
- self.random_coeff_powers.len(),
- 0,
- "not all random coefficients were used"
- );
- let log_size = self.log_size();
- let _span = span!(Level::INFO, "Constraints interpolation").entered();
- let mut cur_poly: Option> = None;
- let twiddles = B::precompute_twiddles(
- CanonicCoset::new(self.log_size())
- .circle_domain()
- .half_coset,
- );
-
- for (log_size, values) in self.sub_accumulations.into_iter().enumerate().skip(1) {
- let Some(mut values) = values else {
- continue;
- };
- if let Some(prev_poly) = cur_poly {
- let eval = SecureColumnByCoords {
- columns: prev_poly.0.map(|c| {
- c.evaluate_with_twiddles(
- CanonicCoset::new(log_size as u32).circle_domain(),
- &twiddles,
- )
- .values
- }),
- };
- B::accumulate(&mut values, &eval);
- }
- cur_poly = Some(SecureCirclePoly(values.columns.map(|c| {
- CircleEvaluation::::new(
- CanonicCoset::new(log_size as u32).circle_domain(),
- c,
- )
- .interpolate_with_twiddles(&twiddles)
- })));
- }
- cur_poly.unwrap_or_else(|| {
- SecureCirclePoly(std::array::from_fn(|_| {
- CirclePoly::new(Col::::zeros(1 << log_size))
- }))
- })
- }
-}
-
-/// A domain accumulator for polynomials of a single size.
-pub struct ColumnAccumulator<'a, B: Backend> {
- pub random_coeff_powers: Vec,
- pub col: &'a mut SecureColumnByCoords,
-}
-impl<'a> ColumnAccumulator<'a, CpuBackend> {
- pub fn accumulate(&mut self, index: usize, evaluation: SecureField) {
- let val = self.col.at(index) + evaluation;
- self.col.set(index, val);
- }
-}
-
-#[cfg(test)]
-mod tests {
- use std::array;
-
- use num_traits::Zero;
- use rand::rngs::SmallRng;
- use rand::{Rng, SeedableRng};
-
- use super::*;
- use crate::core::backend::cpu::CpuCircleEvaluation;
- use crate::core::circle::CirclePoint;
- use crate::core::fields::m31::{M31, P};
- use crate::qm31;
-
- #[test]
- fn test_point_evaluation_accumulator() {
- // Generate a vector of random sizes with a constant seed.
- let mut rng = SmallRng::seed_from_u64(0);
- const MAX_LOG_SIZE: u32 = 10;
- const MASK: u32 = P;
- let log_sizes = (0..100)
- .map(|_| rng.gen_range(4..MAX_LOG_SIZE))
- .collect::>();
-
- // Generate random evaluations.
- let evaluations = log_sizes
- .iter()
- .map(|_| M31::from_u32_unchecked(rng.gen::() & MASK))
- .collect::>();
- let alpha = qm31!(2, 3, 4, 5);
-
- // Use accumulator.
- let mut accumulator = PointEvaluationAccumulator::new(alpha);
- for (_, evaluation) in log_sizes.iter().zip(evaluations.iter()) {
- accumulator.accumulate((*evaluation).into());
- }
- let accumulator_res = accumulator.finalize();
-
- // Use direct computation.
- let mut res = SecureField::default();
- for evaluation in evaluations.iter() {
- res = res * alpha + *evaluation;
- }
-
- assert_eq!(accumulator_res, res);
- }
-
- #[test]
- fn test_domain_evaluation_accumulator() {
- // Generate a vector of random sizes with a constant seed.
- let mut rng = SmallRng::seed_from_u64(0);
- const LOG_SIZE_MIN: u32 = 4;
- const LOG_SIZE_BOUND: u32 = 10;
- const MASK: u32 = P;
- let mut log_sizes = (0..100)
- .map(|_| rng.gen_range(LOG_SIZE_MIN..LOG_SIZE_BOUND))
- .collect::>();
- log_sizes.sort();
-
- // Generate random evaluations.
- let evaluations = log_sizes
- .iter()
- .map(|log_size| {
- (0..(1 << *log_size))
- .map(|_| M31::from_u32_unchecked(rng.gen::() & MASK))
- .collect::>()
- })
- .collect::>();
- let alpha = qm31!(2, 3, 4, 5);
-
- // Use accumulator.
- let mut accumulator = DomainEvaluationAccumulator::::new(
- alpha,
- LOG_SIZE_BOUND,
- evaluations.len(),
- );
- let n_cols_per_size: [(u32, usize); (LOG_SIZE_BOUND - LOG_SIZE_MIN) as usize] =
- array::from_fn(|i| {
- let current_log_size = LOG_SIZE_MIN + i as u32;
- let n_cols = log_sizes
- .iter()
- .copied()
- .filter(|&log_size| log_size == current_log_size)
- .count();
- (current_log_size, n_cols)
- });
- let mut cols = accumulator.columns(n_cols_per_size);
- let mut eval_chunk_offset = 0;
- for (log_size, n_cols) in n_cols_per_size.iter() {
- for index in 0..(1 << log_size) {
- let mut val = SecureField::zero();
- for (eval_index, (col_log_size, evaluation)) in
- log_sizes.iter().zip(evaluations.iter()).enumerate()
- {
- if *log_size != *col_log_size {
- continue;
- }
-
- // The random coefficient powers chunk is in regular order.
- let random_coeff_chunk =
- &cols[(log_size - LOG_SIZE_MIN) as usize].random_coeff_powers;
- val += random_coeff_chunk
- [random_coeff_chunk.len() - 1 - (eval_index - eval_chunk_offset)]
- * evaluation[index];
- }
- cols[(log_size - LOG_SIZE_MIN) as usize].accumulate(index, val);
- }
- eval_chunk_offset += n_cols;
- }
- let accumulator_poly = accumulator.finalize();
-
- // Pick an arbitrary sample point.
- let point = CirclePoint::::get_point(98989892);
- let accumulator_res = accumulator_poly.eval_at_point(point);
-
- // Use direct computation.
- let mut res = SecureField::default();
- for (log_size, values) in log_sizes.into_iter().zip(evaluations) {
- res = res * alpha
- + CpuCircleEvaluation::new(CanonicCoset::new(log_size).circle_domain(), values)
- .interpolate()
- .eval_at_point(point);
- }
-
- assert_eq!(accumulator_res, res);
- }
-}
diff --git a/Stwo_wrapper/crates/prover/src/core/air/components.rs b/Stwo_wrapper/crates/prover/src/core/air/components.rs
deleted file mode 100644
index a7e0129..0000000
--- a/Stwo_wrapper/crates/prover/src/core/air/components.rs
+++ /dev/null
@@ -1,80 +0,0 @@
-use itertools::Itertools;
-
-use super::accumulation::{DomainEvaluationAccumulator, PointEvaluationAccumulator};
-use super::{Component, ComponentProver, Trace};
-use crate::core::backend::Backend;
-use crate::core::circle::CirclePoint;
-use crate::core::fields::qm31::SecureField;
-use crate::core::pcs::TreeVec;
-use crate::core::poly::circle::SecureCirclePoly;
-use crate::core::ColumnVec;
-
-pub struct Components<'a>(pub Vec<&'a dyn Component>);
-
-impl<'a> Components<'a> {
- pub fn composition_log_degree_bound(&self) -> u32 {
- self.0
- .iter()
- .map(|component| component.max_constraint_log_degree_bound())
- .max()
- .unwrap()
- }
-
- pub fn mask_points(
- &self,
- point: CirclePoint,
- ) -> TreeVec>>> {
- TreeVec::concat_cols(self.0.iter().map(|component| component.mask_points(point)))
- }
-
- pub fn eval_composition_polynomial_at_point(
- &self,
- point: CirclePoint,
- mask_values: &TreeVec>>,
- random_coeff: SecureField,
- ) -> SecureField {
- //accumulator for the random linear comination over powers of random_coeff
- let mut evaluation_accumulator = PointEvaluationAccumulator::new(random_coeff);
-
- for component in &self.0 {
- component.evaluate_constraint_quotients_at_point(
- point,
- mask_values,
- &mut evaluation_accumulator,
- )
- }
- evaluation_accumulator.finalize()
- }
-
- pub fn column_log_sizes(&self) -> TreeVec> {
- TreeVec::concat_cols(
- self.0
- .iter()
- .map(|component| component.trace_log_degree_bounds()),
- )
- }
-}
-
-pub struct ComponentProvers<'a, B: Backend>(pub Vec<&'a dyn ComponentProver>);
-
-impl<'a, B: Backend> ComponentProvers<'a, B> {
- pub fn components(&self) -> Components<'_> {
- Components(self.0.iter().map(|c| *c as &dyn Component).collect_vec())
- }
- pub fn compute_composition_polynomial(
- &self,
- random_coeff: SecureField,
- trace: &Trace<'_, B>,
- ) -> SecureCirclePoly {
- let total_constraints: usize = self.0.iter().map(|c| c.n_constraints()).sum();
- let mut accumulator = DomainEvaluationAccumulator::new(
- random_coeff,
- self.components().composition_log_degree_bound(),
- total_constraints,
- );
- for component in &self.0 {
- component.evaluate_constraint_quotients_on_domain(trace, &mut accumulator)
- }
- accumulator.finalize()
- }
-}
diff --git a/Stwo_wrapper/crates/prover/src/core/air/mask.rs b/Stwo_wrapper/crates/prover/src/core/air/mask.rs
deleted file mode 100644
index e2748a6..0000000
--- a/Stwo_wrapper/crates/prover/src/core/air/mask.rs
+++ /dev/null
@@ -1,91 +0,0 @@
-use std::collections::HashSet;
-use std::vec;
-
-use itertools::Itertools;
-
-use crate::core::circle::CirclePoint;
-use crate::core::fields::qm31::SecureField;
-use crate::core::poly::circle::CanonicCoset;
-use crate::core::ColumnVec;
-
-/// Mask holds a vector with an entry for each column.
-/// Each entry holds a list of mask items, which are the offsets of the mask at that column.
-type Mask = ColumnVec>;
-
-/// Returns the same point for each mask item.
-/// Should be used where all the mask items has no shift from the constraint point.
-pub fn fixed_mask_points(
- mask: &Mask,
- point: CirclePoint,
-) -> ColumnVec>> {
- assert_eq!(
- mask.iter()
- .flat_map(|mask_entry| mask_entry.iter().collect::>())
- .collect::>()
- .into_iter()
- .collect_vec(),
- vec![&0]
- );
- mask.iter()
- .map(|mask_entry| mask_entry.iter().map(|_| point).collect())
- .collect()
-}
-
-/// For each mask item returns the point shifted by the domain initial point of the column.
-/// Should be used where the mask items are shifted from the constraint point.
-pub fn shifted_mask_points(
- mask: &Mask,
- domains: &[CanonicCoset],
- point: CirclePoint,
-) -> ColumnVec>> {
- mask.iter()
- .zip(domains.iter())
- .map(|(mask_entry, domain)| {
- mask_entry
- .iter()
- .map(|mask_item| point + domain.at(*mask_item).into_ef())
- .collect()
- })
- .collect()
-}
-
-#[cfg(test)]
-mod tests {
- use crate::core::air::mask::{fixed_mask_points, shifted_mask_points};
- use crate::core::circle::CirclePoint;
- use crate::core::poly::circle::CanonicCoset;
-
- #[test]
- fn test_mask_fixed_points() {
- let mask = vec![vec![0], vec![0]];
- let constraint_point = CirclePoint::get_point(1234);
-
- let points = fixed_mask_points(&mask, constraint_point);
-
- assert_eq!(points.len(), 2);
- assert_eq!(points[0].len(), 1);
- assert_eq!(points[1].len(), 1);
- assert_eq!(points[0][0], constraint_point);
- assert_eq!(points[1][0], constraint_point);
- }
-
- #[test]
- fn test_mask_shifted_points() {
- let mask = vec![vec![0, 1], vec![0, 1, 2]];
- let constraint_point = CirclePoint::get_point(1234);
- let domains = (0..mask.len() as u32)
- .map(|i| CanonicCoset::new(7 + i))
- .collect::>();
-
- let points = shifted_mask_points(&mask, &domains, constraint_point);
-
- assert_eq!(points.len(), 2);
- assert_eq!(points[0].len(), 2);
- assert_eq!(points[1].len(), 3);
- assert_eq!(points[0][0], constraint_point + domains[0].at(0).into_ef());
- assert_eq!(points[0][1], constraint_point + domains[0].at(1).into_ef());
- assert_eq!(points[1][0], constraint_point + domains[1].at(0).into_ef());
- assert_eq!(points[1][1], constraint_point + domains[1].at(1).into_ef());
- assert_eq!(points[1][2], constraint_point + domains[1].at(2).into_ef());
- }
-}
diff --git a/Stwo_wrapper/crates/prover/src/core/air/mod.rs b/Stwo_wrapper/crates/prover/src/core/air/mod.rs
deleted file mode 100644
index fcdd4d5..0000000
--- a/Stwo_wrapper/crates/prover/src/core/air/mod.rs
+++ /dev/null
@@ -1,76 +0,0 @@
-pub use components::{ComponentProvers, Components};
-
-use self::accumulation::{DomainEvaluationAccumulator, PointEvaluationAccumulator};
-use super::backend::Backend;
-use super::circle::CirclePoint;
-use super::fields::m31::BaseField;
-use super::fields::qm31::SecureField;
-use super::pcs::TreeVec;
-use super::poly::circle::{CircleEvaluation, CirclePoly};
-use super::poly::BitReversedOrder;
-use super::ColumnVec;
-
-pub mod accumulation;
-mod components;
-pub mod mask;
-
-/// Arithmetic Intermediate Representation (AIR).
-/// An Air instance is assumed to already contain all the information needed to
-/// evaluate the constraints.
-/// For instance, all interaction elements are assumed to be present in it.
-/// Therefore, an AIR is generated only after the initial trace commitment phase.
-// TODO(spapini): consider renaming this struct.
-pub trait Air {
- fn components(&self) -> Vec<&dyn Component>;
-}
-
-pub trait AirProver: Air {
- fn component_provers(&self) -> Vec<&dyn ComponentProver>;
-}
-
-/// A component is a set of trace columns of various sizes along with a set of
-/// constraints on them.
-pub trait Component {
- fn n_constraints(&self) -> usize;
-
- fn max_constraint_log_degree_bound(&self) -> u32;
-
- /// Returns the degree bounds of each trace column. The returned TreeVec should be of size
- /// `n_interaction_phases`.
- fn trace_log_degree_bounds(&self) -> TreeVec>;
-
- /// Returns the mask points for each trace column. The returned TreeVec should be of size
- /// `n_interaction_phases`.
- fn mask_points(
- &self,
- point: CirclePoint,
- ) -> TreeVec>>>;
-
- /// Evaluates the constraint quotients combination of the component at a point.
- fn evaluate_constraint_quotients_at_point(
- &self,
- point: CirclePoint,
- mask: &TreeVec>>,
- evaluation_accumulator: &mut PointEvaluationAccumulator,
- );
-}
-
-pub trait ComponentProver: Component {
- /// Evaluates the constraint quotients of the component on the evaluation domain.
- /// Accumulates quotients in `evaluation_accumulator`.
- fn evaluate_constraint_quotients_on_domain(
- &self,
- trace: &Trace<'_, B>,
- evaluation_accumulator: &mut DomainEvaluationAccumulator,
- );
-}
-
-/// The set of polynomials that make up the trace.
-///
-/// Each polynomial is stored both in a coefficients, and evaluations form (for efficiency)
-pub struct Trace<'a, B: Backend> {
- /// Polynomials for each column.
- pub polys: TreeVec>>,
- /// Evaluations for each column (evaluated on their commitment domains).
- pub evals: TreeVec>>,
-}
diff --git a/Stwo_wrapper/crates/prover/src/core/backend/cpu/accumulation.rs b/Stwo_wrapper/crates/prover/src/core/backend/cpu/accumulation.rs
deleted file mode 100644
index 63a49bf..0000000
--- a/Stwo_wrapper/crates/prover/src/core/backend/cpu/accumulation.rs
+++ /dev/null
@@ -1,12 +0,0 @@
-use super::CpuBackend;
-use crate::core::air::accumulation::AccumulationOps;
-use crate::core::fields::secure_column::SecureColumnByCoords;
-
-impl AccumulationOps for CpuBackend {
- fn accumulate(column: &mut SecureColumnByCoords, other: &SecureColumnByCoords) {
- for i in 0..column.len() {
- let res_coeff = column.at(i) + other.at(i);
- column.set(i, res_coeff);
- }
- }
-}
diff --git a/Stwo_wrapper/crates/prover/src/core/backend/cpu/blake2s.rs b/Stwo_wrapper/crates/prover/src/core/backend/cpu/blake2s.rs
deleted file mode 100644
index a87a5ae..0000000
--- a/Stwo_wrapper/crates/prover/src/core/backend/cpu/blake2s.rs
+++ /dev/null
@@ -1,24 +0,0 @@
-use itertools::Itertools;
-
-use crate::core::backend::CpuBackend;
-use crate::core::fields::m31::BaseField;
-use crate::core::vcs::blake2_hash::Blake2sHash;
-use crate::core::vcs::blake2_merkle::Blake2sMerkleHasher;
-use crate::core::vcs::ops::{MerkleHasher, MerkleOps};
-
-impl MerkleOps for CpuBackend {
- fn commit_on_layer(
- log_size: u32,
- prev_layer: Option<&Vec>,
- columns: &[&Vec],
- ) -> Vec {
- (0..(1 << log_size))
- .map(|i| {
- Blake2sMerkleHasher::hash_node(
- prev_layer.map(|prev_layer| (prev_layer[2 * i], prev_layer[2 * i + 1])),
- &columns.iter().map(|column| column[i]).collect_vec(),
- )
- })
- .collect()
- }
-}
diff --git a/Stwo_wrapper/crates/prover/src/core/backend/cpu/circle.rs b/Stwo_wrapper/crates/prover/src/core/backend/cpu/circle.rs
deleted file mode 100644
index c37ffe2..0000000
--- a/Stwo_wrapper/crates/prover/src/core/backend/cpu/circle.rs
+++ /dev/null
@@ -1,376 +0,0 @@
-use num_traits::Zero;
-
-use super::CpuBackend;
-use crate::core::backend::{Col, ColumnOps};
-use crate::core::circle::{CirclePoint, Coset};
-use crate::core::fft::{butterfly, ibutterfly};
-use crate::core::fields::m31::BaseField;
-use crate::core::fields::qm31::SecureField;
-use crate::core::fields::{ExtensionOf, FieldExpOps};
-use crate::core::poly::circle::{
- CanonicCoset, CircleDomain, CircleEvaluation, CirclePoly, PolyOps,
-};
-use crate::core::poly::twiddles::TwiddleTree;
-use crate::core::poly::utils::{domain_line_twiddles_from_tree, fold};
-use crate::core::poly::BitReversedOrder;
-use crate::core::utils::{bit_reverse, coset_order_to_circle_domain_order};
-
-impl PolyOps for CpuBackend {
- type Twiddles = Vec;
-
- fn new_canonical_ordered(
- coset: CanonicCoset,
- values: Col,
- ) -> CircleEvaluation {
- let domain = coset.circle_domain();
- assert_eq!(values.len(), domain.size());
- let mut new_values = coset_order_to_circle_domain_order(&values);
- CpuBackend::bit_reverse_column(&mut new_values);
- CircleEvaluation::new(domain, new_values)
- }
-
- fn interpolate(
- eval: CircleEvaluation,
- twiddles: &TwiddleTree,
- ) -> CirclePoly {
- assert!(eval.domain.half_coset.is_doubling_of(twiddles.root_coset));
-
- let mut values = eval.values;
-
- if eval.domain.log_size() == 1 {
- let y = eval.domain.half_coset.initial.y;
- let n = BaseField::from(2);
- let yn_inv = (y * n).inverse();
- let y_inv = yn_inv * n;
- let n_inv = yn_inv * y;
- let (mut v0, mut v1) = (values[0], values[1]);
- ibutterfly(&mut v0, &mut v1, y_inv);
- return CirclePoly::new(vec![v0 * n_inv, v1 * n_inv]);
- }
-
- if eval.domain.log_size() == 2 {
- let CirclePoint { x, y } = eval.domain.half_coset.initial;
- let n = BaseField::from(4);
- let xyn_inv = (x * y * n).inverse();
- let x_inv = xyn_inv * y * n;
- let y_inv = xyn_inv * x * n;
- let n_inv = xyn_inv * x * y;
- let (mut v0, mut v1, mut v2, mut v3) = (values[0], values[1], values[2], values[3]);
- ibutterfly(&mut v0, &mut v1, y_inv);
- ibutterfly(&mut v2, &mut v3, -y_inv);
- ibutterfly(&mut v0, &mut v2, x_inv);
- ibutterfly(&mut v1, &mut v3, x_inv);
- return CirclePoly::new(vec![v0 * n_inv, v1 * n_inv, v2 * n_inv, v3 * n_inv]);
- }
-
- let line_twiddles = domain_line_twiddles_from_tree(eval.domain, &twiddles.itwiddles);
- let circle_twiddles = circle_twiddles_from_line_twiddles(line_twiddles[0]);
-
- for (h, t) in circle_twiddles.enumerate() {
- fft_layer_loop(&mut values, 0, h, t, ibutterfly);
- }
- for (layer, layer_twiddles) in line_twiddles.into_iter().enumerate() {
- for (h, &t) in layer_twiddles.iter().enumerate() {
- fft_layer_loop(&mut values, layer + 1, h, t, ibutterfly);
- }
- }
-
- // Divide all values by 2^log_size.
- let inv = BaseField::from_u32_unchecked(eval.domain.size() as u32).inverse();
- for val in &mut values {
- *val *= inv;
- }
-
- CirclePoly::new(values)
- }
-
- fn eval_at_point(poly: &CirclePoly, point: CirclePoint) -> SecureField {
- if poly.log_size() == 0 {
- return poly.coeffs[0].into();
- }
-
- let mut mappings = vec![point.y];
- let mut x = point.x;
- for _ in 1..poly.log_size() {
- mappings.push(x);
- x = CirclePoint::double_x(x);
- }
- mappings.reverse();
-
- fold(&poly.coeffs, &mappings)
- }
-
- fn extend(poly: &CirclePoly, log_size: u32) -> CirclePoly {
- assert!(log_size >= poly.log_size());
- let mut coeffs = Vec::with_capacity(1 << log_size);
- coeffs.extend_from_slice(&poly.coeffs);
- coeffs.resize(1 << log_size, BaseField::zero());
- CirclePoly::new(coeffs)
- }
-
- fn evaluate(
- poly: &CirclePoly,
- domain: CircleDomain,
- twiddles: &TwiddleTree,
- ) -> CircleEvaluation {
- assert!(domain.half_coset.is_doubling_of(twiddles.root_coset));
-
- let mut values = poly.extend(domain.log_size()).coeffs;
-
- if domain.log_size() == 1 {
- let (mut v0, mut v1) = (values[0], values[1]);
- butterfly(&mut v0, &mut v1, domain.half_coset.initial.y);
- return CircleEvaluation::new(domain, vec![v0, v1]);
- }
-
- if domain.log_size() == 2 {
- let (mut v0, mut v1, mut v2, mut v3) = (values[0], values[1], values[2], values[3]);
- let CirclePoint { x, y } = domain.half_coset.initial;
- butterfly(&mut v0, &mut v2, x);
- butterfly(&mut v1, &mut v3, x);
- butterfly(&mut v0, &mut v1, y);
- butterfly(&mut v2, &mut v3, -y);
- return CircleEvaluation::new(domain, vec![v0, v1, v2, v3]);
- }
-
- let line_twiddles = domain_line_twiddles_from_tree(domain, &twiddles.twiddles);
- let circle_twiddles = circle_twiddles_from_line_twiddles(line_twiddles[0]);
-
- for (layer, layer_twiddles) in line_twiddles.iter().enumerate().rev() {
- for (h, &t) in layer_twiddles.iter().enumerate() {
- fft_layer_loop(&mut values, layer + 1, h, t, butterfly);
- }
- }
- for (h, t) in circle_twiddles.enumerate() {
- fft_layer_loop(&mut values, 0, h, t, butterfly);
- }
-
- CircleEvaluation::new(domain, values)
- }
-
- fn precompute_twiddles(mut coset: Coset) -> TwiddleTree {
- const CHUNK_LOG_SIZE: usize = 12;
- const CHUNK_SIZE: usize = 1 << CHUNK_LOG_SIZE;
-
- let root_coset = coset;
- let mut twiddles = Vec::with_capacity(coset.size());
- for _ in 0..coset.log_size() {
- let i0 = twiddles.len();
- twiddles.extend(
- coset
- .iter()
- .take(coset.size() / 2)
- .map(|p| p.x)
- .collect::>(),
- );
- bit_reverse(&mut twiddles[i0..]);
- coset = coset.double();
- }
- twiddles.push(1.into());
-
- // Inverse twiddles.
- // Fallback to the non-chunked version if the domain is not big enough.
- if CHUNK_SIZE > coset.size() {
- let itwiddles = twiddles.iter().map(|&t| t.inverse()).collect();
- return TwiddleTree {
- root_coset,
- twiddles,
- itwiddles,
- };
- }
-
- let mut itwiddles = vec![BaseField::zero(); twiddles.len()];
- twiddles
- .array_chunks::()
- .zip(itwiddles.array_chunks_mut::())
- .for_each(|(src, dst)| {
- BaseField::batch_inverse(src, dst);
- });
-
- TwiddleTree {
- root_coset,
- twiddles,
- itwiddles,
- }
- }
-}
-
-fn fft_layer_loop(
- values: &mut [BaseField],
- i: usize,
- h: usize,
- t: BaseField,
- butterfly_fn: impl Fn(&mut BaseField, &mut BaseField, BaseField),
-) {
- for l in 0..(1 << i) {
- let idx0 = (h << (i + 1)) + l;
- let idx1 = idx0 + (1 << i);
- let (mut val0, mut val1) = (values[idx0], values[idx1]);
- butterfly_fn(&mut val0, &mut val1, t);
- (values[idx0], values[idx1]) = (val0, val1);
- }
-}
-
-/// Computes the circle twiddles layer (layer 0) from the first line twiddles layer (layer 1).
-///
-/// Only works for line twiddles generated from a domain with size `>4`.
-fn circle_twiddles_from_line_twiddles(
- first_line_twiddles: &[BaseField],
-) -> impl Iterator- + '_ {
- // The twiddles for layer 0 can be computed from the twiddles for layer 1.
- // Since the twiddles are bit reversed, we consider the circle domain in bit reversed order.
- // Each consecutive 4 points in the bit reversed order of a coset form a circle coset of size 4.
- // A circle coset of size 4 in bit reversed order looks like this:
- // [(x, y), (-x, -y), (y, -x), (-y, x)]
- // Note: This relation is derived from the fact that `M31_CIRCLE_GEN`.repeated_double(ORDER / 4)
- // == (-1,0), and not (0,1). (0,1) would yield another relation.
- // The twiddles for layer 0 are the y coordinates:
- // [y, -y, -x, x]
- // The twiddles for layer 1 in bit reversed order are the x coordinates of the even indices
- // points:
- // [x, y]
- // Works also for inverse of the twiddles.
- first_line_twiddles
- .iter()
- .array_chunks()
- .flat_map(|[&x, &y]| [y, -y, -x, x])
-}
-
-impl
, EvalOrder> IntoIterator
- for CircleEvaluation
-{
- type Item = F;
- type IntoIter = std::vec::IntoIter;
-
- /// Creates a consuming iterator over the evaluations.
- ///
- /// Evaluations are returned in the same order as elements of the domain.
- fn into_iter(self) -> Self::IntoIter {
- self.values.into_iter()
- }
-}
-
-#[cfg(test)]
-mod tests {
- use std::iter::zip;
-
- use num_traits::One;
-
- use crate::core::backend::cpu::CpuCirclePoly;
- use crate::core::circle::CirclePoint;
- use crate::core::fields::m31::BaseField;
- use crate::core::fields::qm31::SecureField;
- use crate::core::poly::circle::CanonicCoset;
-
- #[test]
- fn test_eval_at_point_with_4_coeffs() {
- // Represents the polynomial `1 + 2y + 3x + 4xy`.
- // Note coefficients are passed in bit reversed order.
- let poly = CpuCirclePoly::new([1, 3, 2, 4].map(BaseField::from).to_vec());
- let x = BaseField::from(5).into();
- let y = BaseField::from(8).into();
-
- let eval = poly.eval_at_point(CirclePoint { x, y });
-
- assert_eq!(
- eval,
- poly.coeffs[0] + poly.coeffs[1] * y + poly.coeffs[2] * x + poly.coeffs[3] * x * y
- );
- }
-
- #[test]
- fn test_eval_at_point_with_2_coeffs() {
- // Represents the polynomial `1 + 2y`.
- let poly = CpuCirclePoly::new(vec![BaseField::from(1), BaseField::from(2)]);
- let x = BaseField::from(5).into();
- let y = BaseField::from(8).into();
-
- let eval = poly.eval_at_point(CirclePoint { x, y });
-
- assert_eq!(eval, poly.coeffs[0] + poly.coeffs[1] * y);
- }
-
- #[test]
- fn test_eval_at_point_with_1_coeff() {
- // Represents the polynomial `1`.
- let poly = CpuCirclePoly::new(vec![BaseField::one()]);
- let x = BaseField::from(5).into();
- let y = BaseField::from(8).into();
-
- let eval = poly.eval_at_point(CirclePoint { x, y });
-
- assert_eq!(eval, SecureField::one());
- }
-
- #[test]
- fn test_evaluate_2_coeffs() {
- let domain = CanonicCoset::new(1).circle_domain();
- let poly = CpuCirclePoly::new((1..=2).map(BaseField::from).collect());
-
- let evaluation = poly.clone().evaluate(domain).bit_reverse();
-
- for (i, (p, eval)) in zip(domain, evaluation).enumerate() {
- let eval: SecureField = eval.into();
- assert_eq!(eval, poly.eval_at_point(p.into_ef()), "mismatch at i={i}");
- }
- }
-
- #[test]
- fn test_evaluate_4_coeffs() {
- let domain = CanonicCoset::new(2).circle_domain();
- let poly = CpuCirclePoly::new((1..=4).map(BaseField::from).collect());
-
- let evaluation = poly.clone().evaluate(domain).bit_reverse();
-
- for (i, (x, eval)) in zip(domain, evaluation).enumerate() {
- let eval: SecureField = eval.into();
- assert_eq!(eval, poly.eval_at_point(x.into_ef()), "mismatch at i={i}");
- }
- }
-
- #[test]
- fn test_evaluate_8_coeffs() {
- let domain = CanonicCoset::new(3).circle_domain();
- let poly = CpuCirclePoly::new((1..=8).map(BaseField::from).collect());
-
- let evaluation = poly.clone().evaluate(domain).bit_reverse();
-
- for (i, (x, eval)) in zip(domain, evaluation).enumerate() {
- let eval: SecureField = eval.into();
- assert_eq!(eval, poly.eval_at_point(x.into_ef()), "mismatch at i={i}");
- }
- }
-
- #[test]
- fn test_interpolate_2_evals() {
- let poly = CpuCirclePoly::new(vec![BaseField::one(), BaseField::from(2)]);
- let domain = CanonicCoset::new(1).circle_domain();
- let evals = poly.clone().evaluate(domain);
-
- let interpolated_poly = evals.interpolate();
-
- assert_eq!(interpolated_poly.coeffs, poly.coeffs);
- }
-
- #[test]
- fn test_interpolate_4_evals() {
- let poly = CpuCirclePoly::new((1..=4).map(BaseField::from).collect());
- let domain = CanonicCoset::new(2).circle_domain();
- let evals = poly.clone().evaluate(domain);
-
- let interpolated_poly = evals.interpolate();
-
- assert_eq!(interpolated_poly.coeffs, poly.coeffs);
- }
-
- #[test]
- fn test_interpolate_8_evals() {
- let poly = CpuCirclePoly::new((1..=8).map(BaseField::from).collect());
- let domain = CanonicCoset::new(3).circle_domain();
- let evals = poly.clone().evaluate(domain);
-
- let interpolated_poly = evals.interpolate();
-
- assert_eq!(interpolated_poly.coeffs, poly.coeffs);
- }
-}
diff --git a/Stwo_wrapper/crates/prover/src/core/backend/cpu/fri.rs b/Stwo_wrapper/crates/prover/src/core/backend/cpu/fri.rs
deleted file mode 100644
index 693fb99..0000000
--- a/Stwo_wrapper/crates/prover/src/core/backend/cpu/fri.rs
+++ /dev/null
@@ -1,144 +0,0 @@
-use super::CpuBackend;
-use crate::core::fields::m31::BaseField;
-use crate::core::fields::qm31::SecureField;
-use crate::core::fields::secure_column::SecureColumnByCoords;
-use crate::core::fri::{fold_circle_into_line, fold_line, FriOps};
-use crate::core::poly::circle::SecureEvaluation;
-use crate::core::poly::line::LineEvaluation;
-use crate::core::poly::twiddles::TwiddleTree;
-use crate::core::poly::BitReversedOrder;
-
-// TODO(spapini): Optimized these functions as well.
-impl FriOps for CpuBackend {
- fn fold_line(
- eval: &LineEvaluation,
- alpha: SecureField,
- _twiddles: &TwiddleTree,
- ) -> LineEvaluation {
- fold_line(eval, alpha)
- }
- fn fold_circle_into_line(
- dst: &mut LineEvaluation,
- src: &SecureEvaluation,
- alpha: SecureField,
- _twiddles: &TwiddleTree,
- ) {
- fold_circle_into_line(dst, src, alpha)
- }
-
- fn decompose(
- eval: &SecureEvaluation,
- ) -> (SecureEvaluation, SecureField) {
- let lambda = Self::decomposition_coefficient(eval);
- let mut g_values = unsafe { SecureColumnByCoords::::uninitialized(eval.len()) };
-
- let domain_size = eval.len();
- let half_domain_size = domain_size / 2;
-
- for i in 0..half_domain_size {
- let x = eval.values.at(i);
- let val = x - lambda;
- g_values.set(i, val);
- }
- for i in half_domain_size..domain_size {
- let x = eval.values.at(i);
- let val = x + lambda;
- g_values.set(i, val);
- }
-
- let g = SecureEvaluation::new(eval.domain, g_values);
- (g, lambda)
- }
-}
-
-impl CpuBackend {
- /// Used to decompose a general polynomial to a polynomial inside the fft-space, and
- /// the remainder terms.
- /// A coset-diff on a [`CirclePoly`] that is in the FFT space will return zero.
- ///
- /// Let N be the domain size, Let h be a coset size N/2. Using lemma #7 from the CircleStark
- /// paper, = lambda = lambda\*N => lambda = f(0)\*V_h(0) + f(1)*V_h(1) + .. +
- /// f(N-1)\*V_h(N-1). The Vanishing polynomial of a cannonic coset sized half the circle
- /// domain,evaluated on the circle domain, is [(1, -1, -1, 1)] repeating. This becomes
- /// alternating [+-1] in our NaturalOrder, and [(+, +, +, ... , -, -)] in bit reverse.
- /// Explicitly, lambda\*N = sum(+f(0..N/2)) + sum(-f(N/2..)).
- ///
- /// # Warning
- /// This function assumes the blowupfactor is 2
- ///
- /// [`CirclePoly`]: crate::core::poly::circle::CirclePoly
- fn decomposition_coefficient(eval: &SecureEvaluation) -> SecureField {
- let domain_size = 1 << eval.domain.log_size();
- let half_domain_size = domain_size / 2;
-
- // eval is in bit-reverse, hence all the positive factors are in the first half, opposite to
- // the latter.
- let a_sum = (0..half_domain_size)
- .map(|i| eval.values.at(i))
- .sum::();
- let b_sum = (half_domain_size..domain_size)
- .map(|i| eval.values.at(i))
- .sum::();
-
- // lambda = sum(+-f(p)) / 2N.
- (a_sum - b_sum) / BaseField::from_u32_unchecked(domain_size as u32)
- }
-}
-
-#[cfg(test)]
-mod tests {
- use num_traits::Zero;
-
- use crate::core::backend::cpu::{CpuCircleEvaluation, CpuCirclePoly};
- use crate::core::backend::CpuBackend;
- use crate::core::fields::m31::BaseField;
- use crate::core::fields::qm31::SecureField;
- use crate::core::fields::secure_column::SecureColumnByCoords;
- use crate::core::fri::FriOps;
- use crate::core::poly::circle::{CanonicCoset, SecureEvaluation};
- use crate::core::poly::BitReversedOrder;
- use crate::m31;
-
- #[test]
- fn decompose_coeff_out_fft_space_test() {
- for domain_log_size in 5..12 {
- let domain_log_half_size = domain_log_size - 1;
- let s = CanonicCoset::new(domain_log_size);
- let domain = s.circle_domain();
-
- let mut coeffs = vec![BaseField::zero(); 1 << domain_log_size];
-
- // Polynomial is out of FFT space.
- coeffs[1 << domain_log_half_size] = m31!(1);
- assert!(!CpuCirclePoly::new(coeffs.clone()).is_in_fft_space(domain_log_half_size));
-
- let poly = CpuCirclePoly::new(coeffs);
- let values = poly.evaluate(domain);
- let secure_column = SecureColumnByCoords {
- columns: [
- values.values.clone(),
- values.values.clone(),
- values.values.clone(),
- values.values.clone(),
- ],
- };
- let secure_eval = SecureEvaluation::::new(
- domain,
- secure_column.clone(),
- );
-
- let (g, lambda) = CpuBackend::decompose(&secure_eval);
-
- // Sanity check.
- assert_ne!(lambda, SecureField::zero());
-
- // Assert the new polynomial is in the FFT space.
- for i in 0..4 {
- let basefield_column = g.columns[i].clone();
- let eval = CpuCircleEvaluation::new(domain, basefield_column);
- let coeffs = eval.interpolate().coeffs;
- assert!(CpuCirclePoly::new(coeffs).is_in_fft_space(domain_log_half_size));
- }
- }
- }
-}
diff --git a/Stwo_wrapper/crates/prover/src/core/backend/cpu/grind.rs b/Stwo_wrapper/crates/prover/src/core/backend/cpu/grind.rs
deleted file mode 100644
index c5d27a1..0000000
--- a/Stwo_wrapper/crates/prover/src/core/backend/cpu/grind.rs
+++ /dev/null
@@ -1,18 +0,0 @@
-use super::CpuBackend;
-use crate::core::channel::Channel;
-use crate::core::proof_of_work::GrindOps;
-
-impl GrindOps for CpuBackend {
- fn grind(channel: &C, pow_bits: u32) -> u64 {
- // TODO(spapini): This is a naive implementation. Optimize it.
- let mut nonce = 0;
- loop {
- let mut channel = channel.clone();
- channel.mix_nonce(nonce);
- if channel.trailing_zeros() >= pow_bits {
- return nonce;
- }
- nonce += 1;
- }
- }
-}
diff --git a/Stwo_wrapper/crates/prover/src/core/backend/cpu/lookups/gkr.rs b/Stwo_wrapper/crates/prover/src/core/backend/cpu/lookups/gkr.rs
deleted file mode 100644
index ae9ab6b..0000000
--- a/Stwo_wrapper/crates/prover/src/core/backend/cpu/lookups/gkr.rs
+++ /dev/null
@@ -1,448 +0,0 @@
-use std::ops::Index;
-
-use num_traits::{One, Zero};
-
-use crate::core::backend::CpuBackend;
-use crate::core::fields::m31::BaseField;
-use crate::core::fields::qm31::SecureField;
-use crate::core::fields::{ExtensionOf, Field};
-use crate::core::lookups::gkr_prover::{
- correct_sum_as_poly_in_first_variable, EqEvals, GkrMultivariatePolyOracle, GkrOps, Layer,
-};
-use crate::core::lookups::mle::{Mle, MleOps};
-use crate::core::lookups::sumcheck::MultivariatePolyOracle;
-use crate::core::lookups::utils::{Fraction, Reciprocal, UnivariatePoly};
-
-impl GkrOps for CpuBackend {
- fn gen_eq_evals(y: &[SecureField], v: SecureField) -> Mle {
- Mle::new(gen_eq_evals(y, v))
- }
-
- fn next_layer(layer: &Layer) -> Layer {
- match layer {
- Layer::GrandProduct(layer) => next_grand_product_layer(layer),
- Layer::LogUpGeneric {
- numerators,
- denominators,
- } => next_logup_layer(MleExpr::Mle(numerators), denominators),
- Layer::LogUpMultiplicities {
- numerators,
- denominators,
- } => next_logup_layer(MleExpr::Mle(numerators), denominators),
- Layer::LogUpSingles { denominators } => {
- next_logup_layer(MleExpr::Constant(BaseField::one()), denominators)
- }
- }
- }
-
- fn sum_as_poly_in_first_variable(
- h: &GkrMultivariatePolyOracle<'_, Self>,
- claim: SecureField,
- ) -> UnivariatePoly {
- let n_variables = h.n_variables();
- assert!(!n_variables.is_zero());
- let n_terms = 1 << (n_variables - 1);
- let eq_evals = h.eq_evals.as_ref();
- // Vector used to generate evaluations of `eq(x, y)` for `x` in the boolean hypercube.
- let y = eq_evals.y();
- let lambda = h.lambda;
-
- let (mut eval_at_0, mut eval_at_2) = match &h.input_layer {
- Layer::GrandProduct(col) => eval_grand_product_sum(eq_evals, col, n_terms),
- Layer::LogUpGeneric {
- numerators,
- denominators,
- } => eval_logup_sum(eq_evals, numerators, denominators, n_terms, lambda),
- Layer::LogUpMultiplicities {
- numerators,
- denominators,
- } => eval_logup_sum(eq_evals, numerators, denominators, n_terms, lambda),
- Layer::LogUpSingles { denominators } => {
- eval_logup_singles_sum(eq_evals, denominators, n_terms, lambda)
- }
- };
-
- eval_at_0 *= h.eq_fixed_var_correction;
- eval_at_2 *= h.eq_fixed_var_correction;
- correct_sum_as_poly_in_first_variable(eval_at_0, eval_at_2, claim, y, n_variables)
- }
-}
-
-/// Evaluates `sum_x eq(({0}^|r|, 0, x), y) * inp(r, t, x, 0) * inp(r, t, x, 1)` at `t=0` and `t=2`.
-///
-/// Output of the form: `(eval_at_0, eval_at_2)`.
-fn eval_grand_product_sum(
- eq_evals: &EqEvals,
- input_layer: &Mle,
- n_terms: usize,
-) -> (SecureField, SecureField) {
- let mut eval_at_0 = SecureField::zero();
- let mut eval_at_2 = SecureField::zero();
-
- for i in 0..n_terms {
- // Input polynomial at points `(r, {0, 1, 2}, bits(i), {0, 1})`.
- let inp_at_r0i0 = input_layer[i * 2];
- let inp_at_r0i1 = input_layer[i * 2 + 1];
- let inp_at_r1i0 = input_layer[(n_terms + i) * 2];
- let inp_at_r1i1 = input_layer[(n_terms + i) * 2 + 1];
- // Note `inp(r, t, x) = eq(t, 0) * inp(r, 0, x) + eq(t, 1) * inp(r, 1, x)`
- // => `inp(r, 2, x) = 2 * inp(r, 1, x) - inp(r, 0, x)`
- // TODO(andrew): Consider evaluation at `1/2` to save an addition operation since
- // `inp(r, 1/2, x) = 1/2 * (inp(r, 1, x) + inp(r, 0, x))`. `1/2 * ...` can be factored
- // outside the loop.
- let inp_at_r2i0 = inp_at_r1i0.double() - inp_at_r0i0;
- let inp_at_r2i1 = inp_at_r1i1.double() - inp_at_r0i1;
-
- // Product polynomial `prod(x) = inp(x, 0) * inp(x, 1)` at points `(r, {0, 2}, bits(i))`.
- let prod_at_r2i = inp_at_r2i0 * inp_at_r2i1;
- let prod_at_r0i = inp_at_r0i0 * inp_at_r0i1;
-
- let eq_eval_at_0i = eq_evals[i];
- eval_at_0 += eq_eval_at_0i * prod_at_r0i;
- eval_at_2 += eq_eval_at_0i * prod_at_r2i;
- }
-
- (eval_at_0, eval_at_2)
-}
-
-/// Evaluates `sum_x eq(({0}^|r|, 0, x), y) * (inp_numer(r, t, x, 0) * inp_denom(r, t, x, 1) +
-/// inp_numer(r, t, x, 1) * inp_denom(r, t, x, 0) + lambda * inp_denom(r, t, x, 0) * inp_denom(r, t,
-/// x, 1))` at `t=0` and `t=2`.
-///
-/// Output of the form: `(eval_at_0, eval_at_2)`.
-fn eval_logup_sum