moved waksman to outside repo

This commit is contained in:
Nicholas Ward 2023-03-03 13:56:06 -08:00
parent 57ea64e303
commit b95bc90bb7
13 changed files with 2 additions and 2209 deletions

View File

@ -1,5 +1,5 @@
[workspace]
members = ["ecdsa", "evm", "field", "insertion", "maybe_rayon", "plonky2", "starky", "u32", "util", "waksman"]
members = ["ecdsa", "evm", "field", "insertion", "maybe_rayon", "plonky2", "starky", "u32", "util"]
[profile.release]
opt-level = 3

View File

@ -59,3 +59,4 @@ Plonky2's default hash function is Poseidon, configured with 8 full rounds, 22 p
## Links
- [System Zero](https://github.com/mir-protocol/system-zero), a zkVM built on top of Starky (no longer maintained)
- [Waksman](https://github.com/mir-protocol/waksman), Plonky2 gadgets for permutation checking using Waksman networks (no longer maintained)

View File

@ -1,15 +0,0 @@
[package]
name = "plonky2_waksman"
description = "A circuit implementation AS-Waksman networks, useful for checking permutations and sorting"
version = "0.1.0"
edition = "2021"
[dependencies]
anyhow = "1.0.40"
array_tool = "1.0.3"
bimap = "0.6.1"
itertools = "0.10.0"
"plonky2" = { version = "0.1.0" }
"plonky2_field" = { version = "0.1.0" }
"plonky2_util" = { version = "0.1.0" }
rand = "0.8.4"

View File

@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2022 The Plonky2 Authors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@ -1,13 +0,0 @@
## License
Licensed under either of
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
* MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
at your option.
### Contribution
Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions.

View File

@ -1,75 +0,0 @@
use std::collections::HashMap;
use std::hash::Hash;
use bimap::BiMap;
/// Given two lists which are permutations of one another, creates a BiMap which maps an index in
/// one list to an index in the other list with the same associated value.
///
/// If the lists contain duplicates, then multiple permutations with this property exist, and an
/// arbitrary one of them will be returned.
pub fn bimap_from_lists<T: Eq + Hash>(a: Vec<T>, b: Vec<T>) -> BiMap<usize, usize> {
assert_eq!(a.len(), b.len(), "Vectors differ in length");
let mut b_values_to_indices = HashMap::new();
for (i, value) in b.iter().enumerate() {
b_values_to_indices
.entry(value)
.or_insert_with(Vec::new)
.push(i);
}
let mut bimap = BiMap::new();
for (i, value) in a.iter().enumerate() {
if let Some(j) = b_values_to_indices.get_mut(&value).and_then(Vec::pop) {
bimap.insert(i, j);
} else {
panic!("Value in first list not found in second list");
}
}
bimap
}
#[cfg(test)]
mod tests {
use crate::bimap::bimap_from_lists;
#[test]
fn empty_lists() {
let empty: Vec<char> = Vec::new();
let bimap = bimap_from_lists(empty.clone(), empty);
assert!(bimap.is_empty());
}
#[test]
fn without_duplicates() {
let bimap = bimap_from_lists(vec!['a', 'b', 'c'], vec!['b', 'c', 'a']);
assert_eq!(bimap.get_by_left(&0), Some(&2));
assert_eq!(bimap.get_by_left(&1), Some(&0));
assert_eq!(bimap.get_by_left(&2), Some(&1));
}
#[test]
fn with_duplicates() {
let first = vec!['a', 'a', 'b'];
let second = vec!['a', 'b', 'a'];
let bimap = bimap_from_lists(first.clone(), second.clone());
for i in 0..3 {
let j = *bimap.get_by_left(&i).unwrap();
assert_eq!(first[i], second[j]);
}
}
#[test]
#[should_panic]
fn lengths_differ() {
bimap_from_lists(vec!['a', 'a', 'b'], vec!['a', 'b']);
}
#[test]
#[should_panic]
fn not_a_permutation() {
bimap_from_lists(vec!['a', 'a', 'b'], vec!['a', 'b', 'b']);
}
}

View File

@ -1,629 +0,0 @@
use std::marker::PhantomData;
use plonky2::gates::gate::Gate;
use plonky2::gates::packed_util::PackedEvaluableBase;
use plonky2::gates::util::StridedConstraintConsumer;
use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::iop::generator::{GeneratedValues, SimpleGenerator, WitnessGenerator};
use plonky2::iop::target::Target;
use plonky2::iop::wire::Wire;
use plonky2::iop::witness::{PartitionWitness, Witness, WitnessWrite};
use plonky2::plonk::circuit_builder::CircuitBuilder;
use plonky2::plonk::plonk_common::{reduce_with_powers, reduce_with_powers_ext_circuit};
use plonky2::plonk::vars::{
EvaluationTargets, EvaluationVars, EvaluationVarsBase, EvaluationVarsBaseBatch,
EvaluationVarsBasePacked,
};
use plonky2_field::extension::Extendable;
use plonky2_field::packed::PackedField;
use plonky2_field::types::{Field, Field64};
use plonky2_util::{bits_u64, ceil_div_usize};
// TODO: replace/merge this gate with `ComparisonGate`.
/// A gate for checking that one value is less than or equal to another.
#[derive(Clone, Debug)]
pub struct AssertLessThanGate<F: Field64 + Extendable<D>, const D: usize> {
pub(crate) num_bits: usize,
pub(crate) num_chunks: usize,
_phantom: PhantomData<F>,
}
impl<F: RichField + Extendable<D>, const D: usize> AssertLessThanGate<F, D> {
pub fn new(num_bits: usize, num_chunks: usize) -> Self {
debug_assert!(num_bits < bits_u64(F::ORDER));
Self {
num_bits,
num_chunks,
_phantom: PhantomData,
}
}
pub fn chunk_bits(&self) -> usize {
ceil_div_usize(self.num_bits, self.num_chunks)
}
pub fn wire_first_input(&self) -> usize {
0
}
pub fn wire_second_input(&self) -> usize {
1
}
pub fn wire_most_significant_diff(&self) -> usize {
2
}
pub fn wire_first_chunk_val(&self, chunk: usize) -> usize {
debug_assert!(chunk < self.num_chunks);
3 + chunk
}
pub fn wire_second_chunk_val(&self, chunk: usize) -> usize {
debug_assert!(chunk < self.num_chunks);
3 + self.num_chunks + chunk
}
pub fn wire_equality_dummy(&self, chunk: usize) -> usize {
debug_assert!(chunk < self.num_chunks);
3 + 2 * self.num_chunks + chunk
}
pub fn wire_chunks_equal(&self, chunk: usize) -> usize {
debug_assert!(chunk < self.num_chunks);
3 + 3 * self.num_chunks + chunk
}
pub fn wire_intermediate_value(&self, chunk: usize) -> usize {
debug_assert!(chunk < self.num_chunks);
3 + 4 * self.num_chunks + chunk
}
}
impl<F: RichField + Extendable<D>, const D: usize> Gate<F, D> for AssertLessThanGate<F, D> {
fn id(&self) -> String {
format!("{self:?}<D={D}>")
}
fn eval_unfiltered(&self, vars: EvaluationVars<F, D>) -> Vec<F::Extension> {
let mut constraints = Vec::with_capacity(self.num_constraints());
let first_input = vars.local_wires[self.wire_first_input()];
let second_input = vars.local_wires[self.wire_second_input()];
// Get chunks and assert that they match
let first_chunks: Vec<F::Extension> = (0..self.num_chunks)
.map(|i| vars.local_wires[self.wire_first_chunk_val(i)])
.collect();
let second_chunks: Vec<F::Extension> = (0..self.num_chunks)
.map(|i| vars.local_wires[self.wire_second_chunk_val(i)])
.collect();
let first_chunks_combined = reduce_with_powers(
&first_chunks,
F::Extension::from_canonical_usize(1 << self.chunk_bits()),
);
let second_chunks_combined = reduce_with_powers(
&second_chunks,
F::Extension::from_canonical_usize(1 << self.chunk_bits()),
);
constraints.push(first_chunks_combined - first_input);
constraints.push(second_chunks_combined - second_input);
let chunk_size = 1 << self.chunk_bits();
let mut most_significant_diff_so_far = F::Extension::ZERO;
for i in 0..self.num_chunks {
// Range-check the chunks to be less than `chunk_size`.
let first_product = (0..chunk_size)
.map(|x| first_chunks[i] - F::Extension::from_canonical_usize(x))
.product();
let second_product = (0..chunk_size)
.map(|x| second_chunks[i] - F::Extension::from_canonical_usize(x))
.product();
constraints.push(first_product);
constraints.push(second_product);
let difference = second_chunks[i] - first_chunks[i];
let equality_dummy = vars.local_wires[self.wire_equality_dummy(i)];
let chunks_equal = vars.local_wires[self.wire_chunks_equal(i)];
// Two constraints to assert that `chunks_equal` is valid.
constraints.push(difference * equality_dummy - (F::Extension::ONE - chunks_equal));
constraints.push(chunks_equal * difference);
// Update `most_significant_diff_so_far`.
let intermediate_value = vars.local_wires[self.wire_intermediate_value(i)];
constraints.push(intermediate_value - chunks_equal * most_significant_diff_so_far);
most_significant_diff_so_far =
intermediate_value + (F::Extension::ONE - chunks_equal) * difference;
}
let most_significant_diff = vars.local_wires[self.wire_most_significant_diff()];
constraints.push(most_significant_diff - most_significant_diff_so_far);
// Range check `most_significant_diff` to be less than `chunk_size`.
let product = (0..chunk_size)
.map(|x| most_significant_diff - F::Extension::from_canonical_usize(x))
.product();
constraints.push(product);
constraints
}
fn eval_unfiltered_base_one(
&self,
_vars: EvaluationVarsBase<F>,
_yield_constr: StridedConstraintConsumer<F>,
) {
panic!("use eval_unfiltered_base_packed instead");
}
fn eval_unfiltered_base_batch(&self, vars_base: EvaluationVarsBaseBatch<F>) -> Vec<F> {
self.eval_unfiltered_base_batch_packed(vars_base)
}
fn eval_unfiltered_circuit(
&self,
builder: &mut CircuitBuilder<F, D>,
vars: EvaluationTargets<D>,
) -> Vec<ExtensionTarget<D>> {
let mut constraints = Vec::with_capacity(self.num_constraints());
let first_input = vars.local_wires[self.wire_first_input()];
let second_input = vars.local_wires[self.wire_second_input()];
// Get chunks and assert that they match
let first_chunks: Vec<ExtensionTarget<D>> = (0..self.num_chunks)
.map(|i| vars.local_wires[self.wire_first_chunk_val(i)])
.collect();
let second_chunks: Vec<ExtensionTarget<D>> = (0..self.num_chunks)
.map(|i| vars.local_wires[self.wire_second_chunk_val(i)])
.collect();
let chunk_base = builder.constant(F::from_canonical_usize(1 << self.chunk_bits()));
let first_chunks_combined =
reduce_with_powers_ext_circuit(builder, &first_chunks, chunk_base);
let second_chunks_combined =
reduce_with_powers_ext_circuit(builder, &second_chunks, chunk_base);
constraints.push(builder.sub_extension(first_chunks_combined, first_input));
constraints.push(builder.sub_extension(second_chunks_combined, second_input));
let chunk_size = 1 << self.chunk_bits();
let mut most_significant_diff_so_far = builder.zero_extension();
let one = builder.one_extension();
// Find the chosen chunk.
for i in 0..self.num_chunks {
// Range-check the chunks to be less than `chunk_size`.
let mut first_product = one;
let mut second_product = one;
for x in 0..chunk_size {
let x_f = builder.constant_extension(F::Extension::from_canonical_usize(x));
let first_diff = builder.sub_extension(first_chunks[i], x_f);
let second_diff = builder.sub_extension(second_chunks[i], x_f);
first_product = builder.mul_extension(first_product, first_diff);
second_product = builder.mul_extension(second_product, second_diff);
}
constraints.push(first_product);
constraints.push(second_product);
let difference = builder.sub_extension(second_chunks[i], first_chunks[i]);
let equality_dummy = vars.local_wires[self.wire_equality_dummy(i)];
let chunks_equal = vars.local_wires[self.wire_chunks_equal(i)];
// Two constraints to assert that `chunks_equal` is valid.
let diff_times_equal = builder.mul_extension(difference, equality_dummy);
let not_equal = builder.sub_extension(one, chunks_equal);
constraints.push(builder.sub_extension(diff_times_equal, not_equal));
constraints.push(builder.mul_extension(chunks_equal, difference));
// Update `most_significant_diff_so_far`.
let intermediate_value = vars.local_wires[self.wire_intermediate_value(i)];
let old_diff = builder.mul_extension(chunks_equal, most_significant_diff_so_far);
constraints.push(builder.sub_extension(intermediate_value, old_diff));
let not_equal = builder.sub_extension(one, chunks_equal);
let new_diff = builder.mul_extension(not_equal, difference);
most_significant_diff_so_far = builder.add_extension(intermediate_value, new_diff);
}
let most_significant_diff = vars.local_wires[self.wire_most_significant_diff()];
constraints
.push(builder.sub_extension(most_significant_diff, most_significant_diff_so_far));
// Range check `most_significant_diff` to be less than `chunk_size`.
let mut product = builder.one_extension();
for x in 0..chunk_size {
let x_f = builder.constant_extension(F::Extension::from_canonical_usize(x));
let diff = builder.sub_extension(most_significant_diff, x_f);
product = builder.mul_extension(product, diff);
}
constraints.push(product);
constraints
}
fn generators(&self, row: usize, _local_constants: &[F]) -> Vec<Box<dyn WitnessGenerator<F>>> {
let gen = AssertLessThanGenerator::<F, D> {
row,
gate: self.clone(),
};
vec![Box::new(gen.adapter())]
}
fn num_wires(&self) -> usize {
self.wire_intermediate_value(self.num_chunks - 1) + 1
}
fn num_constants(&self) -> usize {
0
}
fn degree(&self) -> usize {
1 << self.chunk_bits()
}
fn num_constraints(&self) -> usize {
4 + 5 * self.num_chunks
}
}
impl<F: RichField + Extendable<D>, const D: usize> PackedEvaluableBase<F, D>
for AssertLessThanGate<F, D>
{
fn eval_unfiltered_base_packed<P: PackedField<Scalar = F>>(
&self,
vars: EvaluationVarsBasePacked<P>,
mut yield_constr: StridedConstraintConsumer<P>,
) {
let first_input = vars.local_wires[self.wire_first_input()];
let second_input = vars.local_wires[self.wire_second_input()];
// Get chunks and assert that they match
let first_chunks: Vec<_> = (0..self.num_chunks)
.map(|i| vars.local_wires[self.wire_first_chunk_val(i)])
.collect();
let second_chunks: Vec<_> = (0..self.num_chunks)
.map(|i| vars.local_wires[self.wire_second_chunk_val(i)])
.collect();
let first_chunks_combined = reduce_with_powers(
&first_chunks,
F::from_canonical_usize(1 << self.chunk_bits()),
);
let second_chunks_combined = reduce_with_powers(
&second_chunks,
F::from_canonical_usize(1 << self.chunk_bits()),
);
yield_constr.one(first_chunks_combined - first_input);
yield_constr.one(second_chunks_combined - second_input);
let chunk_size = 1 << self.chunk_bits();
let mut most_significant_diff_so_far = P::ZEROS;
for i in 0..self.num_chunks {
// Range-check the chunks to be less than `chunk_size`.
let first_product = (0..chunk_size)
.map(|x| first_chunks[i] - F::from_canonical_usize(x))
.product();
let second_product = (0..chunk_size)
.map(|x| second_chunks[i] - F::from_canonical_usize(x))
.product();
yield_constr.one(first_product);
yield_constr.one(second_product);
let difference = second_chunks[i] - first_chunks[i];
let equality_dummy = vars.local_wires[self.wire_equality_dummy(i)];
let chunks_equal = vars.local_wires[self.wire_chunks_equal(i)];
// Two constraints to assert that `chunks_equal` is valid.
yield_constr.one(difference * equality_dummy - (P::ONES - chunks_equal));
yield_constr.one(chunks_equal * difference);
// Update `most_significant_diff_so_far`.
let intermediate_value = vars.local_wires[self.wire_intermediate_value(i)];
yield_constr.one(intermediate_value - chunks_equal * most_significant_diff_so_far);
most_significant_diff_so_far =
intermediate_value + (P::ONES - chunks_equal) * difference;
}
let most_significant_diff = vars.local_wires[self.wire_most_significant_diff()];
yield_constr.one(most_significant_diff - most_significant_diff_so_far);
// Range check `most_significant_diff` to be less than `chunk_size`.
let product = (0..chunk_size)
.map(|x| most_significant_diff - F::from_canonical_usize(x))
.product();
yield_constr.one(product);
}
}
#[derive(Debug)]
struct AssertLessThanGenerator<F: RichField + Extendable<D>, const D: usize> {
row: usize,
gate: AssertLessThanGate<F, D>,
}
impl<F: RichField + Extendable<D>, const D: usize> SimpleGenerator<F>
for AssertLessThanGenerator<F, D>
{
fn dependencies(&self) -> Vec<Target> {
let local_target = |column| Target::wire(self.row, column);
vec![
local_target(self.gate.wire_first_input()),
local_target(self.gate.wire_second_input()),
]
}
fn run_once(&self, witness: &PartitionWitness<F>, out_buffer: &mut GeneratedValues<F>) {
let local_wire = |column| Wire {
row: self.row,
column,
};
let get_local_wire = |column| witness.get_wire(local_wire(column));
let first_input = get_local_wire(self.gate.wire_first_input());
let second_input = get_local_wire(self.gate.wire_second_input());
let first_input_u64 = first_input.to_canonical_u64();
let second_input_u64 = second_input.to_canonical_u64();
debug_assert!(first_input_u64 < second_input_u64);
let chunk_size = 1 << self.gate.chunk_bits();
let first_input_chunks: Vec<F> = (0..self.gate.num_chunks)
.scan(first_input_u64, |acc, _| {
let tmp = *acc % chunk_size;
*acc /= chunk_size;
Some(F::from_canonical_u64(tmp))
})
.collect();
let second_input_chunks: Vec<F> = (0..self.gate.num_chunks)
.scan(second_input_u64, |acc, _| {
let tmp = *acc % chunk_size;
*acc /= chunk_size;
Some(F::from_canonical_u64(tmp))
})
.collect();
let chunks_equal: Vec<F> = (0..self.gate.num_chunks)
.map(|i| F::from_bool(first_input_chunks[i] == second_input_chunks[i]))
.collect();
let equality_dummies: Vec<F> = first_input_chunks
.iter()
.zip(second_input_chunks.iter())
.map(|(&f, &s)| if f == s { F::ONE } else { F::ONE / (s - f) })
.collect();
let mut most_significant_diff_so_far = F::ZERO;
let mut intermediate_values = Vec::new();
for i in 0..self.gate.num_chunks {
if first_input_chunks[i] != second_input_chunks[i] {
most_significant_diff_so_far = second_input_chunks[i] - first_input_chunks[i];
intermediate_values.push(F::ZERO);
} else {
intermediate_values.push(most_significant_diff_so_far);
}
}
let most_significant_diff = most_significant_diff_so_far;
out_buffer.set_wire(
local_wire(self.gate.wire_most_significant_diff()),
most_significant_diff,
);
for i in 0..self.gate.num_chunks {
out_buffer.set_wire(
local_wire(self.gate.wire_first_chunk_val(i)),
first_input_chunks[i],
);
out_buffer.set_wire(
local_wire(self.gate.wire_second_chunk_val(i)),
second_input_chunks[i],
);
out_buffer.set_wire(
local_wire(self.gate.wire_equality_dummy(i)),
equality_dummies[i],
);
out_buffer.set_wire(local_wire(self.gate.wire_chunks_equal(i)), chunks_equal[i]);
out_buffer.set_wire(
local_wire(self.gate.wire_intermediate_value(i)),
intermediate_values[i],
);
}
}
}
#[cfg(test)]
mod tests {
use core::marker::PhantomData;
use anyhow::Result;
use plonky2::gates::gate::Gate;
use plonky2::gates::gate_testing::{test_eval_fns, test_low_degree};
use plonky2::hash::hash_types::HashOut;
use plonky2::plonk::config::{GenericConfig, PoseidonGoldilocksConfig};
use plonky2::plonk::vars::EvaluationVars;
use plonky2_field::extension::quartic::QuarticExtension;
use plonky2_field::goldilocks_field::GoldilocksField;
use plonky2_field::types::{Field, PrimeField64, Sample};
use rand::Rng;
use crate::gates::assert_le::AssertLessThanGate;
#[test]
fn wire_indices() {
type AG = AssertLessThanGate<GoldilocksField, 4>;
let num_bits = 40;
let num_chunks = 5;
let gate = AG {
num_bits,
num_chunks,
_phantom: PhantomData,
};
assert_eq!(gate.wire_first_input(), 0);
assert_eq!(gate.wire_second_input(), 1);
assert_eq!(gate.wire_most_significant_diff(), 2);
assert_eq!(gate.wire_first_chunk_val(0), 3);
assert_eq!(gate.wire_first_chunk_val(4), 7);
assert_eq!(gate.wire_second_chunk_val(0), 8);
assert_eq!(gate.wire_second_chunk_val(4), 12);
assert_eq!(gate.wire_equality_dummy(0), 13);
assert_eq!(gate.wire_equality_dummy(4), 17);
assert_eq!(gate.wire_chunks_equal(0), 18);
assert_eq!(gate.wire_chunks_equal(4), 22);
assert_eq!(gate.wire_intermediate_value(0), 23);
assert_eq!(gate.wire_intermediate_value(4), 27);
}
#[test]
fn low_degree() {
let num_bits = 20;
let num_chunks = 4;
test_low_degree::<GoldilocksField, _, 4>(AssertLessThanGate::<_, 4>::new(
num_bits, num_chunks,
))
}
#[test]
fn eval_fns() -> Result<()> {
const D: usize = 2;
type C = PoseidonGoldilocksConfig;
type F = <C as GenericConfig<D>>::F;
let num_bits = 20;
let num_chunks = 4;
test_eval_fns::<F, C, _, D>(AssertLessThanGate::<_, D>::new(num_bits, num_chunks))
}
#[test]
fn test_gate_constraint() {
type F = GoldilocksField;
type FF = QuarticExtension<GoldilocksField>;
const D: usize = 4;
let num_bits = 40;
let num_chunks = 5;
let chunk_bits = num_bits / num_chunks;
// Returns the local wires for an AssertLessThanGate given the two inputs.
let get_wires = |first_input: F, second_input: F| -> Vec<FF> {
let mut v = Vec::new();
let first_input_u64 = first_input.to_canonical_u64();
let second_input_u64 = second_input.to_canonical_u64();
let chunk_size = 1 << chunk_bits;
let mut first_input_chunks: Vec<F> = (0..num_chunks)
.scan(first_input_u64, |acc, _| {
let tmp = *acc % chunk_size;
*acc /= chunk_size;
Some(F::from_canonical_u64(tmp))
})
.collect();
let mut second_input_chunks: Vec<F> = (0..num_chunks)
.scan(second_input_u64, |acc, _| {
let tmp = *acc % chunk_size;
*acc /= chunk_size;
Some(F::from_canonical_u64(tmp))
})
.collect();
let mut chunks_equal: Vec<F> = (0..num_chunks)
.map(|i| F::from_bool(first_input_chunks[i] == second_input_chunks[i]))
.collect();
let mut equality_dummies: Vec<F> = first_input_chunks
.iter()
.zip(second_input_chunks.iter())
.map(|(&f, &s)| if f == s { F::ONE } else { F::ONE / (s - f) })
.collect();
let mut most_significant_diff_so_far = F::ZERO;
let mut intermediate_values = Vec::new();
for i in 0..num_chunks {
if first_input_chunks[i] != second_input_chunks[i] {
most_significant_diff_so_far = second_input_chunks[i] - first_input_chunks[i];
intermediate_values.push(F::ZERO);
} else {
intermediate_values.push(most_significant_diff_so_far);
}
}
let most_significant_diff = most_significant_diff_so_far;
v.push(first_input);
v.push(second_input);
v.push(most_significant_diff);
v.append(&mut first_input_chunks);
v.append(&mut second_input_chunks);
v.append(&mut equality_dummies);
v.append(&mut chunks_equal);
v.append(&mut intermediate_values);
v.iter().map(|&x| x.into()).collect()
};
let mut rng = rand::thread_rng();
let max: u64 = 1 << (num_bits - 1);
let first_input_u64 = rng.gen_range(0..max);
let second_input_u64 = {
let mut val = rng.gen_range(0..max);
while val < first_input_u64 {
val = rng.gen_range(0..max);
}
val
};
let first_input = F::from_canonical_u64(first_input_u64);
let second_input = F::from_canonical_u64(second_input_u64);
let less_than_gate = AssertLessThanGate::<F, D> {
num_bits,
num_chunks,
_phantom: PhantomData,
};
let less_than_vars = EvaluationVars {
local_constants: &[],
local_wires: &get_wires(first_input, second_input),
public_inputs_hash: &HashOut::rand(),
};
assert!(
less_than_gate
.eval_unfiltered(less_than_vars)
.iter()
.all(|x| x.is_zero()),
"Gate constraints are not satisfied."
);
let equal_gate = AssertLessThanGate::<F, D> {
num_bits,
num_chunks,
_phantom: PhantomData,
};
let equal_vars = EvaluationVars {
local_constants: &[],
local_wires: &get_wires(first_input, first_input),
public_inputs_hash: &HashOut::rand(),
};
assert!(
equal_gate
.eval_unfiltered(equal_vars)
.iter()
.all(|x| x.is_zero()),
"Gate constraints are not satisfied."
);
}
}

View File

@ -1,2 +0,0 @@
pub mod assert_le;
pub mod switch;

View File

@ -1,454 +0,0 @@
use std::marker::PhantomData;
use array_tool::vec::Union;
use plonky2::gates::gate::Gate;
use plonky2::gates::packed_util::PackedEvaluableBase;
use plonky2::gates::util::StridedConstraintConsumer;
use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::iop::generator::{GeneratedValues, WitnessGenerator};
use plonky2::iop::target::Target;
use plonky2::iop::wire::Wire;
use plonky2::iop::witness::{PartitionWitness, Witness, WitnessWrite};
use plonky2::plonk::circuit_builder::CircuitBuilder;
use plonky2::plonk::circuit_data::CircuitConfig;
use plonky2::plonk::vars::{
EvaluationTargets, EvaluationVars, EvaluationVarsBase, EvaluationVarsBaseBatch,
EvaluationVarsBasePacked,
};
use plonky2_field::extension::Extendable;
use plonky2_field::packed::PackedField;
use plonky2_field::types::Field;
/// A gate for conditionally swapping input values based on a boolean.
#[derive(Copy, Clone, Debug)]
pub struct SwitchGate<F: RichField + Extendable<D>, const D: usize> {
pub(crate) chunk_size: usize,
pub(crate) num_copies: usize,
_phantom: PhantomData<F>,
}
impl<F: RichField + Extendable<D>, const D: usize> SwitchGate<F, D> {
pub fn new(num_copies: usize, chunk_size: usize) -> Self {
Self {
chunk_size,
num_copies,
_phantom: PhantomData,
}
}
pub fn new_from_config(config: &CircuitConfig, chunk_size: usize) -> Self {
let num_copies = Self::max_num_copies(config.num_routed_wires, chunk_size);
Self::new(num_copies, chunk_size)
}
pub fn max_num_copies(num_routed_wires: usize, chunk_size: usize) -> usize {
num_routed_wires / (4 * chunk_size + 1)
}
pub fn wire_first_input(&self, copy: usize, element: usize) -> usize {
debug_assert!(element < self.chunk_size);
copy * (4 * self.chunk_size + 1) + element
}
pub fn wire_second_input(&self, copy: usize, element: usize) -> usize {
debug_assert!(element < self.chunk_size);
copy * (4 * self.chunk_size + 1) + self.chunk_size + element
}
pub fn wire_first_output(&self, copy: usize, element: usize) -> usize {
debug_assert!(element < self.chunk_size);
copy * (4 * self.chunk_size + 1) + 2 * self.chunk_size + element
}
pub fn wire_second_output(&self, copy: usize, element: usize) -> usize {
debug_assert!(element < self.chunk_size);
copy * (4 * self.chunk_size + 1) + 3 * self.chunk_size + element
}
pub fn wire_switch_bool(&self, copy: usize) -> usize {
debug_assert!(copy < self.num_copies);
copy * (4 * self.chunk_size + 1) + 4 * self.chunk_size
}
}
impl<F: RichField + Extendable<D>, const D: usize> Gate<F, D> for SwitchGate<F, D> {
fn id(&self) -> String {
format!("{self:?}<D={D}>")
}
fn eval_unfiltered(&self, vars: EvaluationVars<F, D>) -> Vec<F::Extension> {
let mut constraints = Vec::with_capacity(self.num_constraints());
for c in 0..self.num_copies {
let switch_bool = vars.local_wires[self.wire_switch_bool(c)];
let not_switch = F::Extension::ONE - switch_bool;
for e in 0..self.chunk_size {
let first_input = vars.local_wires[self.wire_first_input(c, e)];
let second_input = vars.local_wires[self.wire_second_input(c, e)];
let first_output = vars.local_wires[self.wire_first_output(c, e)];
let second_output = vars.local_wires[self.wire_second_output(c, e)];
constraints.push(switch_bool * (first_input - second_output));
constraints.push(switch_bool * (second_input - first_output));
constraints.push(not_switch * (first_input - first_output));
constraints.push(not_switch * (second_input - second_output));
}
}
constraints
}
fn eval_unfiltered_base_one(
&self,
_vars: EvaluationVarsBase<F>,
_yield_constr: StridedConstraintConsumer<F>,
) {
panic!("use eval_unfiltered_base_packed instead");
}
fn eval_unfiltered_base_batch(&self, vars_base: EvaluationVarsBaseBatch<F>) -> Vec<F> {
self.eval_unfiltered_base_batch_packed(vars_base)
}
fn eval_unfiltered_circuit(
&self,
builder: &mut CircuitBuilder<F, D>,
vars: EvaluationTargets<D>,
) -> Vec<ExtensionTarget<D>> {
let mut constraints = Vec::with_capacity(self.num_constraints());
let one = builder.one_extension();
for c in 0..self.num_copies {
let switch_bool = vars.local_wires[self.wire_switch_bool(c)];
let not_switch = builder.sub_extension(one, switch_bool);
for e in 0..self.chunk_size {
let first_input = vars.local_wires[self.wire_first_input(c, e)];
let second_input = vars.local_wires[self.wire_second_input(c, e)];
let first_output = vars.local_wires[self.wire_first_output(c, e)];
let second_output = vars.local_wires[self.wire_second_output(c, e)];
let first_switched = builder.sub_extension(first_input, second_output);
let first_switched_constraint = builder.mul_extension(switch_bool, first_switched);
constraints.push(first_switched_constraint);
let second_switched = builder.sub_extension(second_input, first_output);
let second_switched_constraint =
builder.mul_extension(switch_bool, second_switched);
constraints.push(second_switched_constraint);
let first_not_switched = builder.sub_extension(first_input, first_output);
let first_not_switched_constraint =
builder.mul_extension(not_switch, first_not_switched);
constraints.push(first_not_switched_constraint);
let second_not_switched = builder.sub_extension(second_input, second_output);
let second_not_switched_constraint =
builder.mul_extension(not_switch, second_not_switched);
constraints.push(second_not_switched_constraint);
}
}
constraints
}
fn generators(&self, row: usize, _local_constants: &[F]) -> Vec<Box<dyn WitnessGenerator<F>>> {
(0..self.num_copies)
.map(|c| {
let g: Box<dyn WitnessGenerator<F>> = Box::new(SwitchGenerator::<F, D> {
row,
gate: *self,
copy: c,
});
g
})
.collect()
}
fn num_wires(&self) -> usize {
self.wire_switch_bool(self.num_copies - 1) + 1
}
fn num_constants(&self) -> usize {
0
}
fn degree(&self) -> usize {
2
}
fn num_constraints(&self) -> usize {
4 * self.num_copies * self.chunk_size
}
}
impl<F: RichField + Extendable<D>, const D: usize> PackedEvaluableBase<F, D> for SwitchGate<F, D> {
fn eval_unfiltered_base_packed<P: PackedField<Scalar = F>>(
&self,
vars: EvaluationVarsBasePacked<P>,
mut yield_constr: StridedConstraintConsumer<P>,
) {
for c in 0..self.num_copies {
let switch_bool = vars.local_wires[self.wire_switch_bool(c)];
let not_switch = P::ONES - switch_bool;
for e in 0..self.chunk_size {
let first_input = vars.local_wires[self.wire_first_input(c, e)];
let second_input = vars.local_wires[self.wire_second_input(c, e)];
let first_output = vars.local_wires[self.wire_first_output(c, e)];
let second_output = vars.local_wires[self.wire_second_output(c, e)];
yield_constr.one(switch_bool * (first_input - second_output));
yield_constr.one(switch_bool * (second_input - first_output));
yield_constr.one(not_switch * (first_input - first_output));
yield_constr.one(not_switch * (second_input - second_output));
}
}
}
}
#[derive(Debug)]
struct SwitchGenerator<F: RichField + Extendable<D>, const D: usize> {
row: usize,
gate: SwitchGate<F, D>,
copy: usize,
}
impl<F: RichField + Extendable<D>, const D: usize> SwitchGenerator<F, D> {
fn in_out_dependencies(&self) -> Vec<Target> {
let local_target = |column| Target::wire(self.row, column);
let mut deps = Vec::new();
for e in 0..self.gate.chunk_size {
deps.push(local_target(self.gate.wire_first_input(self.copy, e)));
deps.push(local_target(self.gate.wire_second_input(self.copy, e)));
deps.push(local_target(self.gate.wire_first_output(self.copy, e)));
deps.push(local_target(self.gate.wire_second_output(self.copy, e)));
}
deps
}
fn in_switch_dependencies(&self) -> Vec<Target> {
let local_target = |column| Target::wire(self.row, column);
let mut deps = Vec::new();
for e in 0..self.gate.chunk_size {
deps.push(local_target(self.gate.wire_first_input(self.copy, e)));
deps.push(local_target(self.gate.wire_second_input(self.copy, e)));
deps.push(local_target(self.gate.wire_switch_bool(self.copy)));
}
deps
}
fn run_in_out(&self, witness: &PartitionWitness<F>, out_buffer: &mut GeneratedValues<F>) {
let local_wire = |column| Wire {
row: self.row,
column,
};
let get_local_wire = |column| witness.get_wire(local_wire(column));
let switch_bool_wire = local_wire(self.gate.wire_switch_bool(self.copy));
let mut first_inputs = Vec::new();
let mut second_inputs = Vec::new();
let mut first_outputs = Vec::new();
let mut second_outputs = Vec::new();
for e in 0..self.gate.chunk_size {
first_inputs.push(get_local_wire(self.gate.wire_first_input(self.copy, e)));
second_inputs.push(get_local_wire(self.gate.wire_second_input(self.copy, e)));
first_outputs.push(get_local_wire(self.gate.wire_first_output(self.copy, e)));
second_outputs.push(get_local_wire(self.gate.wire_second_output(self.copy, e)));
}
if first_outputs == first_inputs && second_outputs == second_inputs {
out_buffer.set_wire(switch_bool_wire, F::ZERO);
} else if first_outputs == second_inputs && second_outputs == first_inputs {
out_buffer.set_wire(switch_bool_wire, F::ONE);
} else {
panic!("No permutation from given inputs to given outputs");
}
}
fn run_in_switch(&self, witness: &PartitionWitness<F>, out_buffer: &mut GeneratedValues<F>) {
let local_wire = |column| Wire {
row: self.row,
column,
};
let get_local_wire = |column| witness.get_wire(local_wire(column));
let switch_bool = get_local_wire(self.gate.wire_switch_bool(self.copy));
for e in 0..self.gate.chunk_size {
let first_output_wire = local_wire(self.gate.wire_first_output(self.copy, e));
let second_output_wire = local_wire(self.gate.wire_second_output(self.copy, e));
let first_input = get_local_wire(self.gate.wire_first_input(self.copy, e));
let second_input = get_local_wire(self.gate.wire_second_input(self.copy, e));
let (first_output, second_output) = if switch_bool == F::ZERO {
(first_input, second_input)
} else if switch_bool == F::ONE {
(second_input, first_input)
} else {
panic!("Invalid switch bool value");
};
out_buffer.set_wire(first_output_wire, first_output);
out_buffer.set_wire(second_output_wire, second_output);
}
}
}
impl<F: RichField + Extendable<D>, const D: usize> WitnessGenerator<F> for SwitchGenerator<F, D> {
fn watch_list(&self) -> Vec<Target> {
self.in_out_dependencies()
.union(self.in_switch_dependencies())
}
fn run(&self, witness: &PartitionWitness<F>, out_buffer: &mut GeneratedValues<F>) -> bool {
if witness.contains_all(&self.in_out_dependencies()) {
self.run_in_out(witness, out_buffer);
true
} else if witness.contains_all(&self.in_switch_dependencies()) {
self.run_in_switch(witness, out_buffer);
true
} else {
false
}
}
}
#[cfg(test)]
mod tests {
use std::marker::PhantomData;
use anyhow::Result;
use plonky2::gates::gate::Gate;
use plonky2::gates::gate_testing::{test_eval_fns, test_low_degree};
use plonky2::hash::hash_types::HashOut;
use plonky2::plonk::circuit_data::CircuitConfig;
use plonky2::plonk::config::{GenericConfig, PoseidonGoldilocksConfig};
use plonky2::plonk::vars::EvaluationVars;
use plonky2_field::goldilocks_field::GoldilocksField;
use plonky2_field::types::{Field, Sample};
use crate::gates::switch::SwitchGate;
#[test]
fn wire_indices() {
type SG = SwitchGate<GoldilocksField, 4>;
let num_copies = 3;
let chunk_size = 3;
let gate = SG {
chunk_size,
num_copies,
_phantom: PhantomData,
};
assert_eq!(gate.wire_first_input(0, 0), 0);
assert_eq!(gate.wire_first_input(0, 2), 2);
assert_eq!(gate.wire_second_input(0, 0), 3);
assert_eq!(gate.wire_second_input(0, 2), 5);
assert_eq!(gate.wire_first_output(0, 0), 6);
assert_eq!(gate.wire_second_output(0, 2), 11);
assert_eq!(gate.wire_switch_bool(0), 12);
assert_eq!(gate.wire_first_input(1, 0), 13);
assert_eq!(gate.wire_second_output(1, 2), 24);
assert_eq!(gate.wire_switch_bool(1), 25);
assert_eq!(gate.wire_first_input(2, 0), 26);
assert_eq!(gate.wire_second_output(2, 2), 37);
assert_eq!(gate.wire_switch_bool(2), 38);
}
#[test]
fn low_degree() {
test_low_degree::<GoldilocksField, _, 4>(SwitchGate::<_, 4>::new_from_config(
&CircuitConfig::standard_recursion_config(),
3,
));
}
#[test]
fn eval_fns() -> Result<()> {
const D: usize = 2;
type C = PoseidonGoldilocksConfig;
type F = <C as GenericConfig<D>>::F;
test_eval_fns::<F, C, _, D>(SwitchGate::<_, D>::new_from_config(
&CircuitConfig::standard_recursion_config(),
3,
))
}
#[test]
fn test_gate_constraint() {
const D: usize = 2;
type C = PoseidonGoldilocksConfig;
type F = <C as GenericConfig<D>>::F;
type FF = <C as GenericConfig<D>>::FE;
const CHUNK_SIZE: usize = 4;
let num_copies = 3;
/// Returns the local wires for a switch gate given the inputs and the switch booleans.
fn get_wires(
first_inputs: Vec<Vec<F>>,
second_inputs: Vec<Vec<F>>,
switch_bools: Vec<bool>,
) -> Vec<FF> {
let num_copies = first_inputs.len();
let mut v = Vec::new();
for c in 0..num_copies {
let switch = switch_bools[c];
let mut first_input_chunk = Vec::with_capacity(CHUNK_SIZE);
let mut second_input_chunk = Vec::with_capacity(CHUNK_SIZE);
let mut first_output_chunk = Vec::with_capacity(CHUNK_SIZE);
let mut second_output_chunk = Vec::with_capacity(CHUNK_SIZE);
for e in 0..CHUNK_SIZE {
let first_input = first_inputs[c][e];
let second_input = second_inputs[c][e];
let first_output = if switch { second_input } else { first_input };
let second_output = if switch { first_input } else { second_input };
first_input_chunk.push(first_input);
second_input_chunk.push(second_input);
first_output_chunk.push(first_output);
second_output_chunk.push(second_output);
}
v.append(&mut first_input_chunk);
v.append(&mut second_input_chunk);
v.append(&mut first_output_chunk);
v.append(&mut second_output_chunk);
v.push(F::from_bool(switch));
}
v.iter().map(|&x| x.into()).collect()
}
let first_inputs: Vec<Vec<F>> = (0..num_copies).map(|_| F::rand_vec(CHUNK_SIZE)).collect();
let second_inputs: Vec<Vec<F>> = (0..num_copies).map(|_| F::rand_vec(CHUNK_SIZE)).collect();
let switch_bools = vec![true, false, true];
let gate = SwitchGate::<F, D> {
chunk_size: CHUNK_SIZE,
num_copies,
_phantom: PhantomData,
};
let vars = EvaluationVars {
local_constants: &[],
local_wires: &get_wires(first_inputs, second_inputs, switch_bools),
public_inputs_hash: &HashOut::rand(),
};
assert!(
gate.eval_unfiltered(vars).iter().all(|x| x.is_zero()),
"Gate constraints are not satisfied."
);
}
}

View File

@ -1,11 +0,0 @@
#![allow(clippy::new_without_default)]
#![allow(clippy::too_many_arguments)]
#![allow(clippy::type_complexity)]
#![allow(clippy::len_without_is_empty)]
#![allow(clippy::needless_range_loop)]
#![allow(clippy::return_self_not_must_use)]
pub mod bimap;
pub mod gates;
pub mod permutation;
pub mod sorting;

View File

@ -1,509 +0,0 @@
use std::collections::BTreeMap;
use std::marker::PhantomData;
use plonky2::field::extension::Extendable;
use plonky2::field::types::Field;
use plonky2::hash::hash_types::RichField;
use plonky2::iop::generator::{GeneratedValues, SimpleGenerator};
use plonky2::iop::target::Target;
use plonky2::iop::witness::{PartitionWitness, Witness, WitnessWrite};
use plonky2::plonk::circuit_builder::CircuitBuilder;
use crate::bimap::bimap_from_lists;
use crate::gates::switch::SwitchGate;
/// Assert that two lists of expressions evaluate to permutations of one another.
pub fn assert_permutation_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
a: Vec<Vec<Target>>,
b: Vec<Vec<Target>>,
) {
assert_eq!(
a.len(),
b.len(),
"Permutation must have same number of inputs and outputs"
);
assert_eq!(a[0].len(), b[0].len(), "Chunk size must be the same");
let chunk_size = a[0].len();
match a.len() {
// Two empty lists are permutations of one another, trivially.
0 => (),
// Two singleton lists are permutations of one another as long as their items are equal.
1 => {
for e in 0..chunk_size {
builder.connect(a[0][e], b[0][e])
}
}
2 => assert_permutation_2x2_circuit(
builder,
a[0].clone(),
a[1].clone(),
b[0].clone(),
b[1].clone(),
),
// For larger lists, we recursively use two smaller permutation networks.
_ => assert_permutation_helper_circuit(builder, a, b),
}
}
/// Assert that [a1, a2] is a permutation of [b1, b2].
fn assert_permutation_2x2_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
a1: Vec<Target>,
a2: Vec<Target>,
b1: Vec<Target>,
b2: Vec<Target>,
) {
assert!(
a1.len() == a2.len() && a2.len() == b1.len() && b1.len() == b2.len(),
"Chunk size must be the same"
);
let chunk_size = a1.len();
let (_switch, gate_out1, gate_out2) = create_switch_circuit(builder, a1, a2);
for e in 0..chunk_size {
builder.connect(b1[e], gate_out1[e]);
builder.connect(b2[e], gate_out2[e]);
}
}
/// Given two input wire chunks, add a new switch to the circuit (by adding one copy to a switch
/// gate). Returns the wire for the switch boolean, and the two output wire chunks.
fn create_switch_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
a1: Vec<Target>,
a2: Vec<Target>,
) -> (Target, Vec<Target>, Vec<Target>) {
assert_eq!(a1.len(), a2.len(), "Chunk size must be the same");
let chunk_size = a1.len();
let gate = SwitchGate::new_from_config(&builder.config, chunk_size);
let params = vec![F::from_canonical_usize(chunk_size)];
let (row, next_copy) = builder.find_slot(gate, &params, &[]);
let mut c = Vec::new();
let mut d = Vec::new();
for e in 0..chunk_size {
builder.connect(
a1[e],
Target::wire(row, gate.wire_first_input(next_copy, e)),
);
builder.connect(
a2[e],
Target::wire(row, gate.wire_second_input(next_copy, e)),
);
c.push(Target::wire(row, gate.wire_first_output(next_copy, e)));
d.push(Target::wire(row, gate.wire_second_output(next_copy, e)));
}
let switch = Target::wire(row, gate.wire_switch_bool(next_copy));
(switch, c, d)
}
fn assert_permutation_helper_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
a: Vec<Vec<Target>>,
b: Vec<Vec<Target>>,
) {
assert_eq!(
a.len(),
b.len(),
"Permutation must have same number of inputs and outputs"
);
assert_eq!(a[0].len(), b[0].len(), "Chunk size must be the same");
let n = a.len();
let even = n % 2 == 0;
let mut child_1_a = Vec::new();
let mut child_1_b = Vec::new();
let mut child_2_a = Vec::new();
let mut child_2_b = Vec::new();
// See Figure 8 in the AS-Waksman paper.
let a_num_switches = n / 2;
let b_num_switches = if even {
a_num_switches - 1
} else {
a_num_switches
};
let mut a_switches = Vec::new();
let mut b_switches = Vec::new();
for i in 0..a_num_switches {
let (switch, out_1, out_2) =
create_switch_circuit(builder, a[i * 2].clone(), a[i * 2 + 1].clone());
a_switches.push(switch);
child_1_a.push(out_1);
child_2_a.push(out_2);
}
for i in 0..b_num_switches {
let (switch, out_1, out_2) =
create_switch_circuit(builder, b[i * 2].clone(), b[i * 2 + 1].clone());
b_switches.push(switch);
child_1_b.push(out_1);
child_2_b.push(out_2);
}
// See Figure 8 in the AS-Waksman paper.
if even {
child_1_b.push(b[n - 2].clone());
child_2_b.push(b[n - 1].clone());
} else {
child_2_a.push(a[n - 1].clone());
child_2_b.push(b[n - 1].clone());
}
assert_permutation_circuit(builder, child_1_a, child_1_b);
assert_permutation_circuit(builder, child_2_a, child_2_b);
builder.add_simple_generator(PermutationGenerator::<F> {
a,
b,
a_switches,
b_switches,
_phantom: PhantomData,
});
}
fn route<F: Field>(
a_values: Vec<Vec<F>>,
b_values: Vec<Vec<F>>,
a_switches: Vec<Target>,
b_switches: Vec<Target>,
witness: &PartitionWitness<F>,
out_buffer: &mut GeneratedValues<F>,
) {
assert_eq!(a_values.len(), b_values.len());
let n = a_values.len();
let even = n % 2 == 0;
// We use a bimap to match indices of values in a to indices of the same values in b.
// This means that given a wire on one side, we can easily find the matching wire on the other side.
let ab_map = bimap_from_lists(a_values, b_values);
let switches = [a_switches, b_switches];
// We keep track of the new wires we've routed (after routing some wires, we need to check `witness`
// and `newly_set` instead of just `witness`.
let mut newly_set = [vec![false; n], vec![false; n]];
// Given a side and an index, returns the index in the other side that corresponds to the same value.
let ab_map_by_side = |side: usize, index: usize| -> usize {
*match side {
0 => ab_map.get_by_left(&index),
1 => ab_map.get_by_right(&index),
_ => panic!("Expected side to be 0 or 1"),
}
.unwrap()
};
// We maintain two maps for wires which have been routed to a particular subnetwork on one side
// of the network (left or right) but not the other. The keys are wire indices, and the values
// are subnetwork indices.
let mut partial_routes = [BTreeMap::new(), BTreeMap::new()];
// After we route a wire on one side, we find the corresponding wire on the other side and check
// if it still needs to be routed. If so, we add it to partial_routes.
let enqueue_other_side = |partial_routes: &mut [BTreeMap<usize, bool>],
witness: &PartitionWitness<F>,
newly_set: &mut [Vec<bool>],
side: usize,
this_i: usize,
subnet: bool| {
let other_side = 1 - side;
let other_i = ab_map_by_side(side, this_i);
let other_switch_i = other_i / 2;
if other_switch_i >= switches[other_side].len() {
// The other wire doesn't go through a switch, so there's no routing to be done.
// This happens in the case of the very last wire.
return;
}
if witness.contains(switches[other_side][other_switch_i])
|| newly_set[other_side][other_switch_i]
{
// The other switch has already been routed.
return;
}
let other_i_sibling = 4 * other_switch_i + 1 - other_i;
if let Some(&sibling_subnet) = partial_routes[other_side].get(&other_i_sibling) {
// The other switch's sibling is already pending routing.
assert_ne!(subnet, sibling_subnet);
} else {
let opt_old_subnet = partial_routes[other_side].insert(other_i, subnet);
if let Some(old_subnet) = opt_old_subnet {
assert_eq!(subnet, old_subnet, "Routing conflict (should never happen)");
}
}
};
// See Figure 8 in the AS-Waksman paper.
if even {
enqueue_other_side(
&mut partial_routes,
witness,
&mut newly_set,
1,
n - 2,
false,
);
enqueue_other_side(&mut partial_routes, witness, &mut newly_set, 1, n - 1, true);
} else {
enqueue_other_side(&mut partial_routes, witness, &mut newly_set, 0, n - 1, true);
enqueue_other_side(&mut partial_routes, witness, &mut newly_set, 1, n - 1, true);
}
let route_switch = |partial_routes: &mut [BTreeMap<usize, bool>],
witness: &PartitionWitness<F>,
out_buffer: &mut GeneratedValues<F>,
newly_set: &mut [Vec<bool>],
side: usize,
switch_index: usize,
swap: bool| {
// First, we actually set the switch configuration.
out_buffer.set_target(switches[side][switch_index], F::from_bool(swap));
newly_set[side][switch_index] = true;
// Then, we enqueue the two corresponding wires on the other side of the network, to ensure
// that they get routed in the next step.
let this_i_1 = switch_index * 2;
let this_i_2 = this_i_1 + 1;
enqueue_other_side(partial_routes, witness, newly_set, side, this_i_1, swap);
enqueue_other_side(partial_routes, witness, newly_set, side, this_i_2, !swap);
};
// If {a,b}_only_routes is empty, then we can route any switch next. For efficiency, we will
// simply do top-down scans (one on the left side, one on the right side) for switches which
// have not yet been routed. These variables represent the positions of those two scans.
let mut scan_index = [0, 0];
// Until both scans complete, we alternate back and worth between the left and right switch
// layers. We process any partially routed wires for that side, or if there aren't any, we route
// the next switch in our scan.
while scan_index[0] < switches[0].len() || scan_index[1] < switches[1].len() {
for side in 0..=1 {
if !partial_routes[side].is_empty() {
for (this_i, subnet) in partial_routes[side].clone().into_iter() {
let this_first_switch_input = this_i % 2 == 0;
let swap = this_first_switch_input == subnet;
let this_switch_i = this_i / 2;
route_switch(
&mut partial_routes,
witness,
out_buffer,
&mut newly_set,
side,
this_switch_i,
swap,
);
}
partial_routes[side].clear();
} else {
// We can route any switch next. Continue our scan for pending switches.
while scan_index[side] < switches[side].len()
&& (witness.contains(switches[side][scan_index[side]])
|| newly_set[side][scan_index[side]])
{
scan_index[side] += 1;
}
if scan_index[side] < switches[side].len() {
// Either switch configuration would work; we arbitrarily choose to not swap.
route_switch(
&mut partial_routes,
witness,
out_buffer,
&mut newly_set,
side,
scan_index[side],
false,
);
scan_index[side] += 1;
}
}
}
}
}
#[derive(Debug)]
struct PermutationGenerator<F: Field> {
a: Vec<Vec<Target>>,
b: Vec<Vec<Target>>,
a_switches: Vec<Target>,
b_switches: Vec<Target>,
_phantom: PhantomData<F>,
}
impl<F: Field> SimpleGenerator<F> for PermutationGenerator<F> {
fn dependencies(&self) -> Vec<Target> {
self.a.iter().chain(&self.b).flatten().cloned().collect()
}
fn run_once(&self, witness: &PartitionWitness<F>, out_buffer: &mut GeneratedValues<F>) {
let a_values = self
.a
.iter()
.map(|chunk| chunk.iter().map(|wire| witness.get_target(*wire)).collect())
.collect();
let b_values = self
.b
.iter()
.map(|chunk| chunk.iter().map(|wire| witness.get_target(*wire)).collect())
.collect();
route(
a_values,
b_values,
self.a_switches.clone(),
self.b_switches.clone(),
witness,
out_buffer,
);
}
}
#[cfg(test)]
mod tests {
use anyhow::Result;
use plonky2::field::types::{Field, Sample};
use plonky2::iop::witness::PartialWitness;
use plonky2::plonk::circuit_data::CircuitConfig;
use plonky2::plonk::config::{GenericConfig, PoseidonGoldilocksConfig};
use rand::seq::SliceRandom;
use rand::{thread_rng, Rng};
use super::*;
fn test_permutation_good(size: usize) -> Result<()> {
const D: usize = 2;
type C = PoseidonGoldilocksConfig;
type F = <C as GenericConfig<D>>::F;
let config = CircuitConfig::standard_recursion_config();
let pw = PartialWitness::new();
let mut builder = CircuitBuilder::<F, D>::new(config);
let lst: Vec<F> = (0..size * 2).map(F::from_canonical_usize).collect();
let a: Vec<Vec<Target>> = lst[..]
.chunks(2)
.map(|pair| vec![builder.constant(pair[0]), builder.constant(pair[1])])
.collect();
let mut b = a.clone();
b.shuffle(&mut thread_rng());
assert_permutation_circuit(&mut builder, a, b);
let data = builder.build::<C>();
let proof = data.prove(pw)?;
data.verify(proof)
}
fn test_permutation_duplicates(size: usize) -> Result<()> {
const D: usize = 2;
type C = PoseidonGoldilocksConfig;
type F = <C as GenericConfig<D>>::F;
let config = CircuitConfig::standard_recursion_config();
let pw = PartialWitness::new();
let mut builder = CircuitBuilder::<F, D>::new(config);
let mut rng = thread_rng();
let lst: Vec<F> = (0..size * 2)
.map(|_| F::from_canonical_usize(rng.gen_range(0..2usize)))
.collect();
let a: Vec<Vec<Target>> = lst[..]
.chunks(2)
.map(|pair| vec![builder.constant(pair[0]), builder.constant(pair[1])])
.collect();
let mut b = a.clone();
b.shuffle(&mut thread_rng());
assert_permutation_circuit(&mut builder, a, b);
let data = builder.build::<C>();
let proof = data.prove(pw)?;
data.verify(proof)
}
fn test_permutation_bad(size: usize) -> Result<()> {
const D: usize = 2;
type C = PoseidonGoldilocksConfig;
type F = <C as GenericConfig<D>>::F;
let config = CircuitConfig::standard_recursion_config();
let pw = PartialWitness::new();
let mut builder = CircuitBuilder::<F, D>::new(config);
let lst1: Vec<F> = F::rand_vec(size * 2);
let lst2: Vec<F> = F::rand_vec(size * 2);
let a: Vec<Vec<Target>> = lst1[..]
.chunks(2)
.map(|pair| vec![builder.constant(pair[0]), builder.constant(pair[1])])
.collect();
let b: Vec<Vec<Target>> = lst2[..]
.chunks(2)
.map(|pair| vec![builder.constant(pair[0]), builder.constant(pair[1])])
.collect();
assert_permutation_circuit(&mut builder, a, b);
let data = builder.build::<C>();
data.prove(pw)?;
Ok(())
}
#[test]
fn test_permutations_duplicates() -> Result<()> {
for n in 2..9 {
test_permutation_duplicates(n)?;
}
Ok(())
}
#[test]
fn test_permutations_good() -> Result<()> {
for n in 2..9 {
test_permutation_good(n)?;
}
Ok(())
}
#[test]
#[should_panic]
fn test_permutation_bad_small() {
let size = 2;
test_permutation_bad(size).unwrap()
}
#[test]
#[should_panic]
fn test_permutation_bad_medium() {
let size = 6;
test_permutation_bad(size).unwrap()
}
#[test]
#[should_panic]
fn test_permutation_bad_large() {
let size = 10;
test_permutation_bad(size).unwrap()
}
}

View File

@ -1,277 +0,0 @@
use std::marker::PhantomData;
use itertools::izip;
use plonky2::field::extension::Extendable;
use plonky2::field::types::Field;
use plonky2::hash::hash_types::RichField;
use plonky2::iop::generator::{GeneratedValues, SimpleGenerator};
use plonky2::iop::target::{BoolTarget, Target};
use plonky2::iop::witness::{PartitionWitness, Witness, WitnessWrite};
use plonky2::plonk::circuit_builder::CircuitBuilder;
use plonky2_util::ceil_div_usize;
use crate::gates::assert_le::AssertLessThanGate;
use crate::permutation::assert_permutation_circuit;
pub struct MemoryOp<F: Field> {
is_write: bool,
address: F,
timestamp: F,
value: F,
}
#[derive(Clone, Debug)]
pub struct MemoryOpTarget {
is_write: BoolTarget,
address: Target,
timestamp: Target,
value: Target,
}
pub fn assert_permutation_memory_ops_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
a: &[MemoryOpTarget],
b: &[MemoryOpTarget],
) {
let a_chunks: Vec<Vec<Target>> = a
.iter()
.map(|op| vec![op.address, op.timestamp, op.is_write.target, op.value])
.collect();
let b_chunks: Vec<Vec<Target>> = b
.iter()
.map(|op| vec![op.address, op.timestamp, op.is_write.target, op.value])
.collect();
assert_permutation_circuit(builder, a_chunks, b_chunks);
}
/// Add an AssertLessThanGate to assert that `lhs` is less than `rhs`, where their values are at most `bits` bits.
pub fn assert_le_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
lhs: Target,
rhs: Target,
bits: usize,
num_chunks: usize,
) {
let gate = AssertLessThanGate::new(bits, num_chunks);
let row = builder.add_gate(gate.clone(), vec![]);
builder.connect(Target::wire(row, gate.wire_first_input()), lhs);
builder.connect(Target::wire(row, gate.wire_second_input()), rhs);
}
/// Sort memory operations by address value, then by timestamp value.
/// This is done by combining address and timestamp into one field element (using their given bit lengths).
pub fn sort_memory_ops_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
ops: &[MemoryOpTarget],
address_bits: usize,
timestamp_bits: usize,
) -> Vec<MemoryOpTarget> {
let n = ops.len();
let combined_bits = address_bits + timestamp_bits;
let chunk_bits = 3;
let num_chunks = ceil_div_usize(combined_bits, chunk_bits);
// This is safe because `assert_permutation` will force these targets (in the output list) to match the boolean values from the input list.
let is_write_targets: Vec<_> = builder
.add_virtual_targets(n)
.iter()
.map(|&t| BoolTarget::new_unsafe(t))
.collect();
let address_targets = builder.add_virtual_targets(n);
let timestamp_targets = builder.add_virtual_targets(n);
let value_targets = builder.add_virtual_targets(n);
let output_targets: Vec<_> = izip!(
is_write_targets,
address_targets,
timestamp_targets,
value_targets
)
.map(|(i, a, t, v)| MemoryOpTarget {
is_write: i,
address: a,
timestamp: t,
value: v,
})
.collect();
let two_n = builder.constant(F::from_canonical_usize(1 << timestamp_bits));
let address_timestamp_combined: Vec<_> = output_targets
.iter()
.map(|op| builder.mul_add(op.address, two_n, op.timestamp))
.collect();
for i in 1..n {
assert_le_circuit(
builder,
address_timestamp_combined[i - 1],
address_timestamp_combined[i],
combined_bits,
num_chunks,
);
}
assert_permutation_memory_ops_circuit(builder, ops, &output_targets);
builder.add_simple_generator(MemoryOpSortGenerator::<F, D> {
input_ops: ops.to_vec(),
output_ops: output_targets.clone(),
_phantom: PhantomData,
});
output_targets
}
#[derive(Debug)]
struct MemoryOpSortGenerator<F: RichField + Extendable<D>, const D: usize> {
input_ops: Vec<MemoryOpTarget>,
output_ops: Vec<MemoryOpTarget>,
_phantom: PhantomData<F>,
}
impl<F: RichField + Extendable<D>, const D: usize> SimpleGenerator<F>
for MemoryOpSortGenerator<F, D>
{
fn dependencies(&self) -> Vec<Target> {
self.input_ops
.iter()
.flat_map(|op| vec![op.is_write.target, op.address, op.timestamp, op.value])
.collect()
}
fn run_once(&self, witness: &PartitionWitness<F>, out_buffer: &mut GeneratedValues<F>) {
let n = self.input_ops.len();
debug_assert!(self.output_ops.len() == n);
let mut ops: Vec<_> = self
.input_ops
.iter()
.map(|op| {
let is_write = witness.get_bool_target(op.is_write);
let address = witness.get_target(op.address);
let timestamp = witness.get_target(op.timestamp);
let value = witness.get_target(op.value);
MemoryOp {
is_write,
address,
timestamp,
value,
}
})
.collect();
ops.sort_unstable_by_key(|op| {
(
op.address.to_canonical_u64(),
op.timestamp.to_canonical_u64(),
)
});
for (op, out_op) in ops.iter().zip(&self.output_ops) {
out_buffer.set_target(out_op.is_write.target, F::from_bool(op.is_write));
out_buffer.set_target(out_op.address, op.address);
out_buffer.set_target(out_op.timestamp, op.timestamp);
out_buffer.set_target(out_op.value, op.value);
}
}
}
#[cfg(test)]
mod tests {
use anyhow::Result;
use plonky2::field::types::{Field, PrimeField64, Sample};
use plonky2::iop::witness::PartialWitness;
use plonky2::plonk::circuit_data::CircuitConfig;
use plonky2::plonk::config::{GenericConfig, PoseidonGoldilocksConfig};
use rand::{thread_rng, Rng};
use super::*;
fn test_sorting(size: usize, address_bits: usize, timestamp_bits: usize) -> Result<()> {
const D: usize = 2;
type C = PoseidonGoldilocksConfig;
type F = <C as GenericConfig<D>>::F;
let config = CircuitConfig::standard_recursion_config();
let mut pw = PartialWitness::new();
let mut builder = CircuitBuilder::<F, D>::new(config);
let mut rng = thread_rng();
let is_write_vals: Vec<_> = (0..size).map(|_| rng.gen_range(0..2) != 0).collect();
let address_vals: Vec<_> = (0..size)
.map(|_| F::from_canonical_u64(rng.gen_range(0..1 << address_bits as u64)))
.collect();
let timestamp_vals: Vec<_> = (0..size)
.map(|_| F::from_canonical_u64(rng.gen_range(0..1 << timestamp_bits as u64)))
.collect();
let value_vals: Vec<_> = (0..size).map(|_| F::rand()).collect();
let input_ops: Vec<MemoryOpTarget> = izip!(
is_write_vals.clone(),
address_vals.clone(),
timestamp_vals.clone(),
value_vals.clone()
)
.map(|(is_write, address, timestamp, value)| MemoryOpTarget {
is_write: builder.constant_bool(is_write),
address: builder.constant(address),
timestamp: builder.constant(timestamp),
value: builder.constant(value),
})
.collect();
let combined_vals_u64: Vec<_> = timestamp_vals
.iter()
.zip(&address_vals)
.map(|(&t, &a)| (a.to_canonical_u64() << timestamp_bits as u64) + t.to_canonical_u64())
.collect();
let mut input_ops_and_keys: Vec<_> =
izip!(is_write_vals, address_vals, timestamp_vals, value_vals)
.zip(combined_vals_u64)
.collect::<Vec<_>>();
input_ops_and_keys.sort_by_key(|(_, val)| *val);
let input_ops_sorted: Vec<_> = input_ops_and_keys.iter().map(|(x, _)| x).collect();
let output_ops = sort_memory_ops_circuit(
&mut builder,
input_ops.as_slice(),
address_bits,
timestamp_bits,
);
for i in 0..size {
pw.set_bool_target(output_ops[i].is_write, input_ops_sorted[i].0);
pw.set_target(output_ops[i].address, input_ops_sorted[i].1);
pw.set_target(output_ops[i].timestamp, input_ops_sorted[i].2);
pw.set_target(output_ops[i].value, input_ops_sorted[i].3);
}
let data = builder.build::<C>();
let proof = data.prove(pw).unwrap();
data.verify(proof)
}
#[test]
fn test_sorting_small() -> Result<()> {
let size = 5;
let address_bits = 20;
let timestamp_bits = 20;
test_sorting(size, address_bits, timestamp_bits)
}
#[test]
fn test_sorting_large() -> Result<()> {
let size = 20;
let address_bits = 20;
let timestamp_bits = 20;
test_sorting(size, address_bits, timestamp_bits)
}
}