Feat: Use pre-calculated ConstraintMatrices (#2)
* feat: add function for calculating the coefficients * fix tests / debug coeffs * feat: use groth16 with configurable matrices * test: add no r1cs test * test: add a test to check matrices values * scaffold of the matrix calculation * feat: correctly load and use matrices in the without_r1cs variant * chore: cargo fmt * chore: cargo fmt / lints * ci: do not double run tests * fix: calculate correctly points at inf * test: use correct abicoder v2 types Co-authored-by: Kobi Gurkan <kobigurk@gmail.com>
This commit is contained in:
parent
4e2c2d39dd
commit
11e6d04f3b
|
@ -1,4 +1,8 @@
|
|||
on: [pull_request, push]
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
|
||||
name: Tests
|
||||
|
||||
|
|
|
@ -15,7 +15,7 @@ ark-ec = { version = "0.3.0", default-features = false, features = ["parallel"]
|
|||
ark-ff = { version = "0.3.0", default-features = false, features = ["asm", "parallel"] }
|
||||
ark-std = { version = "0.3.0", default-features = false }
|
||||
ark-bn254 = { version = "0.3.0" }
|
||||
ark-groth16 = { git = "https://github.com/arkworks-rs/groth16", version = "0.3.0", features = ["parallel"] }
|
||||
ark-groth16 = { git = "https://github.com/gakonst/groth16", version = "0.3.0", branch = "calculate-matrices", features = ["parallel"] }
|
||||
ark-poly = { version = "^0.3.0", default-features = false, features = ["parallel"] }
|
||||
ark-relations = { version = "0.3.0", default-features = false }
|
||||
ark-serialize = { version = "0.3.0", default-features = false }
|
||||
|
|
|
@ -75,25 +75,7 @@ impl<E: PairingEngine> CircomBuilder<E> {
|
|||
let witness = self
|
||||
.cfg
|
||||
.wtns
|
||||
.calculate_witness(self.inputs, self.cfg.sanity_check)?;
|
||||
|
||||
use ark_ff::{FpParameters, PrimeField};
|
||||
let modulus = <<E::Fr as PrimeField>::Params as FpParameters>::MODULUS;
|
||||
|
||||
// convert it to field elements
|
||||
use num_traits::Signed;
|
||||
let witness = witness
|
||||
.into_iter()
|
||||
.map(|w| {
|
||||
let w = if w.sign() == num_bigint::Sign::Minus {
|
||||
// Need to negate the witness element if negative
|
||||
modulus.into() - w.abs().to_biguint().unwrap()
|
||||
} else {
|
||||
w.to_biguint().unwrap()
|
||||
};
|
||||
E::Fr::from(w)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
.calculate_witness_element::<E, _>(self.inputs, self.cfg.sanity_check)?;
|
||||
circom.witness = Some(witness);
|
||||
|
||||
// sanity check
|
||||
|
|
|
@ -1,9 +1,8 @@
|
|||
use ark_ff::PrimeField;
|
||||
use ark_groth16::r1cs_to_qap::{evaluate_constraint, LibsnarkReduction, R1CStoQAP};
|
||||
use ark_poly::EvaluationDomain;
|
||||
use ark_relations::r1cs::{ConstraintSystemRef, SynthesisError};
|
||||
use ark_relations::r1cs::{ConstraintMatrices, ConstraintSystemRef, SynthesisError};
|
||||
use ark_std::{cfg_into_iter, cfg_iter, cfg_iter_mut, vec};
|
||||
use core::ops::Deref;
|
||||
|
||||
/// Implements the witness map used by snarkjs. The arkworks witness map calculates the
|
||||
/// coefficients of H through computing (AB-C)/Z in the evaluation domain and going back to the
|
||||
|
@ -21,22 +20,13 @@ impl R1CStoQAP for CircomReduction {
|
|||
LibsnarkReduction::instance_map_with_evaluation::<F, D>(cs, t)
|
||||
}
|
||||
|
||||
fn witness_map<F: PrimeField, D: EvaluationDomain<F>>(
|
||||
prover: ConstraintSystemRef<F>,
|
||||
fn witness_map_from_matrices<F: PrimeField, D: EvaluationDomain<F>>(
|
||||
matrices: &ConstraintMatrices<F>,
|
||||
num_inputs: usize,
|
||||
num_constraints: usize,
|
||||
full_assignment: &[F],
|
||||
) -> Result<Vec<F>, SynthesisError> {
|
||||
let matrices = prover.to_matrices().unwrap();
|
||||
let zero = F::zero();
|
||||
let num_inputs = prover.num_instance_variables();
|
||||
let num_constraints = prover.num_constraints();
|
||||
let cs = prover.borrow().unwrap();
|
||||
let prover = cs.deref();
|
||||
|
||||
let full_assignment = [
|
||||
prover.instance_assignment.as_slice(),
|
||||
prover.witness_assignment.as_slice(),
|
||||
]
|
||||
.concat();
|
||||
|
||||
let domain =
|
||||
D::new(num_constraints + num_inputs).ok_or(SynthesisError::PolynomialDegreeTooLarge)?;
|
||||
let domain_size = domain.size();
|
||||
|
@ -49,8 +39,8 @@ impl R1CStoQAP for CircomReduction {
|
|||
.zip(cfg_iter!(&matrices.a))
|
||||
.zip(cfg_iter!(&matrices.b))
|
||||
.for_each(|(((a, b), at_i), bt_i)| {
|
||||
*a = evaluate_constraint(at_i, &full_assignment);
|
||||
*b = evaluate_constraint(bt_i, &full_assignment);
|
||||
*a = evaluate_constraint(at_i, full_assignment);
|
||||
*b = evaluate_constraint(bt_i, full_assignment);
|
||||
});
|
||||
|
||||
{
|
||||
|
@ -59,6 +49,14 @@ impl R1CStoQAP for CircomReduction {
|
|||
a[start..end].clone_from_slice(&full_assignment[..num_inputs]);
|
||||
}
|
||||
|
||||
let mut c = vec![zero; domain_size];
|
||||
cfg_iter_mut!(c[..num_constraints])
|
||||
.zip(&a)
|
||||
.zip(&b)
|
||||
.for_each(|((c_i, &a), &b)| {
|
||||
*c_i = a * b;
|
||||
});
|
||||
|
||||
domain.ifft_in_place(&mut a);
|
||||
domain.ifft_in_place(&mut b);
|
||||
|
||||
|
@ -78,13 +76,6 @@ impl R1CStoQAP for CircomReduction {
|
|||
drop(a);
|
||||
drop(b);
|
||||
|
||||
let mut c = vec![zero; domain_size];
|
||||
cfg_iter_mut!(c[..prover.num_constraints])
|
||||
.enumerate()
|
||||
.for_each(|(i, c)| {
|
||||
*c = evaluate_constraint(&matrices.c[i], &full_assignment);
|
||||
});
|
||||
|
||||
domain.ifft_in_place(&mut c);
|
||||
D::distribute_powers_and_mul_by_const(&mut c, root_of_unity, F::one());
|
||||
domain.fft_in_place(&mut c);
|
||||
|
|
|
@ -2,8 +2,9 @@
|
|||
//! Solidity Groth16 Verifier smart contracts
|
||||
use ark_ff::{BigInteger, FromBytes, PrimeField};
|
||||
use ethers_core::types::U256;
|
||||
use num_traits::Zero;
|
||||
|
||||
use ark_bn254::{Bn254, Fq2, Fr, G1Affine, G2Affine};
|
||||
use ark_bn254::{Bn254, Fq, Fq2, Fr, G1Affine, G2Affine};
|
||||
|
||||
pub struct Inputs(pub Vec<U256>);
|
||||
|
||||
|
@ -23,9 +24,10 @@ pub struct G1 {
|
|||
|
||||
impl From<G1> for G1Affine {
|
||||
fn from(src: G1) -> G1Affine {
|
||||
let x = u256_to_point(src.x);
|
||||
let y = u256_to_point(src.y);
|
||||
G1Affine::new(x, y, false)
|
||||
let x: Fq = u256_to_point(src.x);
|
||||
let y: Fq = u256_to_point(src.y);
|
||||
let inf = x.is_zero() && y.is_zero();
|
||||
G1Affine::new(x, y, inf)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -62,7 +64,8 @@ impl From<G2> for G2Affine {
|
|||
let c1 = u256_to_point(src.y[1]);
|
||||
let y = Fq2::new(c0, c1);
|
||||
|
||||
G2Affine::new(x, y, false)
|
||||
let inf = x.is_zero() && y.is_zero();
|
||||
G2Affine::new(x, y, inf)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -92,6 +92,36 @@ impl WitnessCalculator {
|
|||
Ok(w)
|
||||
}
|
||||
|
||||
pub fn calculate_witness_element<
|
||||
E: ark_ec::PairingEngine,
|
||||
I: IntoIterator<Item = (String, Vec<BigInt>)>,
|
||||
>(
|
||||
&mut self,
|
||||
inputs: I,
|
||||
sanity_check: bool,
|
||||
) -> Result<Vec<E::Fr>> {
|
||||
use ark_ff::{FpParameters, PrimeField};
|
||||
let witness = self.calculate_witness(inputs, sanity_check)?;
|
||||
let modulus = <<E::Fr as PrimeField>::Params as FpParameters>::MODULUS;
|
||||
|
||||
// convert it to field elements
|
||||
use num_traits::Signed;
|
||||
let witness = witness
|
||||
.into_iter()
|
||||
.map(|w| {
|
||||
let w = if w.sign() == num_bigint::Sign::Minus {
|
||||
// Need to negate the witness element if negative
|
||||
modulus.into() - w.abs().to_biguint().unwrap()
|
||||
} else {
|
||||
w.to_biguint().unwrap()
|
||||
};
|
||||
E::Fr::from(w)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
Ok(witness)
|
||||
}
|
||||
|
||||
pub fn get_witness_buffer(&self) -> Result<Vec<u8>> {
|
||||
let ptr = self.instance.get_ptr_witness_buffer()? as usize;
|
||||
|
||||
|
|
124
src/zkey.rs
124
src/zkey.rs
|
@ -25,7 +25,8 @@
|
|||
//! PointsC(8)
|
||||
//! PointsH(9)
|
||||
//! Contributions(10)
|
||||
use ark_ff::{BigInteger256, FromBytes};
|
||||
use ark_ff::{BigInteger256, FromBytes, PrimeField};
|
||||
use ark_relations::r1cs::ConstraintMatrices;
|
||||
use ark_serialize::{CanonicalDeserialize, SerializationError};
|
||||
use ark_std::log2;
|
||||
use byteorder::{LittleEndian, ReadBytesExt};
|
||||
|
@ -35,7 +36,7 @@ use std::{
|
|||
io::{Read, Result as IoResult, Seek, SeekFrom},
|
||||
};
|
||||
|
||||
use ark_bn254::{Bn254, Fq, Fq2, G1Affine, G2Affine};
|
||||
use ark_bn254::{Bn254, Fq, Fq2, Fr, G1Affine, G2Affine};
|
||||
use ark_groth16::{ProvingKey, VerifyingKey};
|
||||
use num_traits::Zero;
|
||||
|
||||
|
@ -46,9 +47,13 @@ struct Section {
|
|||
}
|
||||
|
||||
/// Reads a SnarkJS ZKey file into an Arkworks ProvingKey.
|
||||
pub fn read_zkey<R: Read + Seek>(reader: &mut R) -> IoResult<ProvingKey<Bn254>> {
|
||||
pub fn read_zkey<R: Read + Seek>(
|
||||
reader: &mut R,
|
||||
) -> IoResult<(ProvingKey<Bn254>, ConstraintMatrices<Fr>)> {
|
||||
let mut binfile = BinFile::new(reader)?;
|
||||
binfile.proving_key()
|
||||
let proving_key = binfile.proving_key()?;
|
||||
let matrices = binfile.matrices()?;
|
||||
Ok((proving_key, matrices))
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
|
@ -137,7 +142,53 @@ impl<'a, R: Read + Seek> BinFile<'a, R> {
|
|||
self.g1_section(n_public + 1, 3)
|
||||
}
|
||||
|
||||
// Section 4 is the coefficients, we ignore it
|
||||
/// Returns the [`ConstraintMatrices`] corresponding to the zkey
|
||||
pub fn matrices(&mut self) -> IoResult<ConstraintMatrices<Fr>> {
|
||||
let header = self.groth_header()?;
|
||||
|
||||
let section = self.get_section(4);
|
||||
self.reader.seek(SeekFrom::Start(section.position))?;
|
||||
let num_coeffs: u32 = self.reader.read_u32::<LittleEndian>()?;
|
||||
|
||||
// insantiate AB
|
||||
let mut matrices = vec![vec![vec![]; header.domain_size as usize]; 2];
|
||||
let mut max_constraint_index = 0;
|
||||
for _ in 0..num_coeffs {
|
||||
let matrix: u32 = self.reader.read_u32::<LittleEndian>()?;
|
||||
let constraint: u32 = self.reader.read_u32::<LittleEndian>()?;
|
||||
let signal: u32 = self.reader.read_u32::<LittleEndian>()?;
|
||||
|
||||
let value: Fr = deserialize_field_fr(&mut self.reader)?;
|
||||
max_constraint_index = std::cmp::max(max_constraint_index, constraint);
|
||||
matrices[matrix as usize][constraint as usize].push((value, signal as usize));
|
||||
}
|
||||
|
||||
let num_constraints = max_constraint_index as usize - header.n_public;
|
||||
// Remove the public input constraints, Arkworks adds them later
|
||||
matrices.iter_mut().for_each(|m| {
|
||||
m.truncate(num_constraints);
|
||||
});
|
||||
// This is taken from Arkworks' to_matrices() function
|
||||
let a = matrices[0].clone();
|
||||
let b = matrices[1].clone();
|
||||
let a_num_non_zero: usize = a.iter().map(|lc| lc.len()).sum();
|
||||
let b_num_non_zero: usize = b.iter().map(|lc| lc.len()).sum();
|
||||
let matrices = ConstraintMatrices {
|
||||
num_instance_variables: header.n_public + 1,
|
||||
num_witness_variables: header.n_vars - header.n_public,
|
||||
num_constraints,
|
||||
|
||||
a_num_non_zero,
|
||||
b_num_non_zero,
|
||||
c_num_non_zero: 0,
|
||||
|
||||
a,
|
||||
b,
|
||||
c: vec![],
|
||||
};
|
||||
|
||||
Ok(matrices)
|
||||
}
|
||||
|
||||
fn a_query(&mut self, n_vars: usize) -> IoResult<Vec<G1Affine>> {
|
||||
self.g1_section(n_vars, 5)
|
||||
|
@ -257,6 +308,13 @@ impl HeaderGroth {
|
|||
}
|
||||
}
|
||||
|
||||
// need to divide by R, since snarkjs outputs the zkey with coefficients
|
||||
// multiplieid by R^2
|
||||
fn deserialize_field_fr<R: Read>(reader: &mut R) -> IoResult<Fr> {
|
||||
let bigint = BigInteger256::read(reader)?;
|
||||
Ok(Fr::new(Fr::new(bigint).into_repr()))
|
||||
}
|
||||
|
||||
// skips the multiplication by R because Circom points are already in Montgomery form
|
||||
fn deserialize_field<R: Read>(reader: &mut R) -> IoResult<Fq> {
|
||||
let bigint = BigInteger256::read(reader)?;
|
||||
|
@ -300,9 +358,11 @@ mod tests {
|
|||
use serde_json::Value;
|
||||
use std::fs::File;
|
||||
|
||||
use crate::witness::WitnessCalculator;
|
||||
use crate::{circom::CircomReduction, CircomBuilder, CircomConfig};
|
||||
use ark_groth16::{
|
||||
create_random_proof_with_reduction as prove, prepare_verifying_key, verify_proof,
|
||||
create_proof_with_qap_and_matrices, create_random_proof_with_reduction as prove,
|
||||
prepare_verifying_key, verify_proof,
|
||||
};
|
||||
use ark_std::rand::thread_rng;
|
||||
use num_traits::{One, Zero};
|
||||
|
@ -469,7 +529,7 @@ mod tests {
|
|||
fn deser_key() {
|
||||
let path = "./test-vectors/test.zkey";
|
||||
let mut file = File::open(path).unwrap();
|
||||
let params = read_zkey(&mut file).unwrap();
|
||||
let (params, _matrices) = read_zkey(&mut file).unwrap();
|
||||
|
||||
// Check IC
|
||||
let expected = vec![
|
||||
|
@ -689,7 +749,7 @@ mod tests {
|
|||
fn deser_vk() {
|
||||
let path = "./test-vectors/test.zkey";
|
||||
let mut file = File::open(path).unwrap();
|
||||
let params = read_zkey(&mut file).unwrap();
|
||||
let (params, _matrices) = read_zkey(&mut file).unwrap();
|
||||
|
||||
let json = std::fs::read_to_string("./test-vectors/verification_key.json").unwrap();
|
||||
let json: Value = serde_json::from_str(&json).unwrap();
|
||||
|
@ -767,10 +827,10 @@ mod tests {
|
|||
}
|
||||
|
||||
#[test]
|
||||
fn verify_proof_with_zkey() {
|
||||
fn verify_proof_with_zkey_with_r1cs() {
|
||||
let path = "./test-vectors/test.zkey";
|
||||
let mut file = File::open(path).unwrap();
|
||||
let params = read_zkey(&mut file).unwrap(); // binfile.proving_key().unwrap();
|
||||
let (params, _matrices) = read_zkey(&mut file).unwrap(); // binfile.proving_key().unwrap();
|
||||
|
||||
let cfg = CircomConfig::<Bn254>::new(
|
||||
"./test-vectors/mycircuit.wasm",
|
||||
|
@ -794,4 +854,48 @@ mod tests {
|
|||
|
||||
assert!(verified);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn verify_proof_with_zkey_without_r1cs() {
|
||||
let path = "./test-vectors/test.zkey";
|
||||
let mut file = File::open(path).unwrap();
|
||||
let (params, matrices) = read_zkey(&mut file).unwrap();
|
||||
|
||||
let mut wtns = WitnessCalculator::new("./test-vectors/mycircuit.wasm").unwrap();
|
||||
let mut inputs: HashMap<String, Vec<num_bigint::BigInt>> = HashMap::new();
|
||||
let values = inputs.entry("a".to_string()).or_insert_with(Vec::new);
|
||||
values.push(3.into());
|
||||
|
||||
let values = inputs.entry("b".to_string()).or_insert_with(Vec::new);
|
||||
values.push(11.into());
|
||||
|
||||
let mut rng = thread_rng();
|
||||
use ark_std::UniformRand;
|
||||
let num_inputs = matrices.num_instance_variables;
|
||||
let num_constraints = matrices.num_constraints;
|
||||
let rng = &mut rng;
|
||||
|
||||
let r = ark_bn254::Fr::rand(rng);
|
||||
let s = ark_bn254::Fr::rand(rng);
|
||||
|
||||
let full_assignment = wtns
|
||||
.calculate_witness_element::<Bn254, _>(inputs, false)
|
||||
.unwrap();
|
||||
let proof = create_proof_with_qap_and_matrices::<_, CircomReduction>(
|
||||
¶ms,
|
||||
r,
|
||||
s,
|
||||
&matrices,
|
||||
num_inputs,
|
||||
num_constraints,
|
||||
full_assignment.as_slice(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let pvk = prepare_verifying_key(¶ms.vk);
|
||||
let inputs = &full_assignment[1..num_inputs];
|
||||
let verified = verify_proof(&pvk, &proof, inputs).unwrap();
|
||||
|
||||
assert!(verified);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,7 +1,4 @@
|
|||
use ark_circom::{
|
||||
ethereum::{Inputs, Proof, VerifyingKey},
|
||||
CircomBuilder, CircomConfig,
|
||||
};
|
||||
use ark_circom::{ethereum, CircomBuilder, CircomConfig};
|
||||
use ark_std::rand::thread_rng;
|
||||
use color_eyre::Result;
|
||||
|
||||
|
@ -70,18 +67,59 @@ async fn solidity_verifier() -> Result<()> {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
// We need to implement the conversion from the Ark-Circom's internal Ethereum types to
|
||||
// the ones expected by the abigen'd types. Could we maybe provide a convenience
|
||||
// macro for these, given that there's room for implementation error?
|
||||
abigen!(Groth16Verifier, "./tests/verifier_abi.json");
|
||||
use groth16verifier_mod::{G1Point, G2Point, Proof, VerifyingKey};
|
||||
impl From<ethereum::G1> for G1Point {
|
||||
fn from(src: ethereum::G1) -> Self {
|
||||
Self { x: src.x, y: src.y }
|
||||
}
|
||||
}
|
||||
impl From<ethereum::G2> for G2Point {
|
||||
fn from(src: ethereum::G2) -> Self {
|
||||
// We should use the `.as_tuple()` method which handles converting
|
||||
// the G2 elements to have the second limb first
|
||||
let src = src.as_tuple();
|
||||
Self { x: src.0, y: src.1 }
|
||||
}
|
||||
}
|
||||
impl From<ethereum::Proof> for Proof {
|
||||
fn from(src: ethereum::Proof) -> Self {
|
||||
Self {
|
||||
a: src.a.into(),
|
||||
b: src.b.into(),
|
||||
c: src.c.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
impl From<ethereum::VerifyingKey> for VerifyingKey {
|
||||
fn from(src: ethereum::VerifyingKey) -> Self {
|
||||
Self {
|
||||
alfa_1: src.alpha1.into(),
|
||||
beta_2: src.beta2.into(),
|
||||
gamma_2: src.gamma2.into(),
|
||||
delta_2: src.delta2.into(),
|
||||
ic: src.ic.into_iter().map(|i| i.into()).collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<M: Middleware> Groth16Verifier<M> {
|
||||
async fn check_proof<I: Into<Inputs>, P: Into<Proof>, VK: Into<VerifyingKey>>(
|
||||
async fn check_proof<
|
||||
I: Into<ethereum::Inputs>,
|
||||
P: Into<ethereum::Proof>,
|
||||
VK: Into<ethereum::VerifyingKey>,
|
||||
>(
|
||||
&self,
|
||||
proof: P,
|
||||
vk: VK,
|
||||
inputs: I,
|
||||
) -> Result<bool, ContractError<M>> {
|
||||
// convert into the expected format by the contract
|
||||
let proof = proof.into().as_tuple();
|
||||
let vk = vk.into().as_tuple();
|
||||
let proof = proof.into().into();
|
||||
let vk = vk.into().into();
|
||||
let inputs = inputs.into().0;
|
||||
|
||||
// query the contract
|
||||
|
|
Loading…
Reference in New Issue