From 2b4fbe980f06faca3e6668431c680b358ddb7b3f Mon Sep 17 00:00:00 2001 From: staheri14 Date: Fri, 12 Feb 2021 20:27:55 +0000 Subject: [PATCH] deploy: d0977a67d531266ba36cf0e0ab27cec614b3bdd0 --- .gitignore | 38 +- .update.timestamp | 2 +- Cargo.toml | 58 ++ Makefile | 5 +- README.md | 77 +-- examples/export_test_keys/main.rs | 38 ++ rln | 1 + src/circuit/bench.rs | 230 +++++++ src/circuit/mod.rs | 6 + src/circuit/polynomial.rs | 46 ++ src/circuit/poseidon.rs | 403 ++++++++++++ src/circuit/rln.rs | 480 ++++++++++++++ src/ffi.rs | 375 +++++++++++ src/lib.rs | 14 + src/merkle.rs | 229 +++++++ src/poseidon.rs | 227 +++++++ src/public.rs | 254 ++++++++ src/utils.rs | 80 +++ src/wasm.rs | 132 ++++ tests/all_tests_v2.nim | 3 +- waku.nims | 88 +++ .../protocol/waku_protocol.nim.generated.nim | 608 ++++++++++++++++++ waku/v2/protocol/waku_rln_relay/rln.nim | 20 +- 23 files changed, 3325 insertions(+), 89 deletions(-) create mode 100644 Cargo.toml create mode 100644 examples/export_test_keys/main.rs create mode 160000 rln create mode 100644 src/circuit/bench.rs create mode 100644 src/circuit/mod.rs create mode 100644 src/circuit/polynomial.rs create mode 100644 src/circuit/poseidon.rs create mode 100644 src/circuit/rln.rs create mode 100644 src/ffi.rs create mode 100644 src/lib.rs create mode 100644 src/merkle.rs create mode 100644 src/poseidon.rs create mode 100644 src/public.rs create mode 100644 src/utils.rs create mode 100644 src/wasm.rs create mode 100644 waku.nims create mode 100644 waku/v1/protocol/waku_protocol.nim.generated.nim diff --git a/.gitignore b/.gitignore index f51063449..d9d206eec 100644 --- a/.gitignore +++ b/.gitignore @@ -1,30 +1,8 @@ -/nimcache - -# Executables shall be put in an ignored build/ directory -/build - -# Nimble packages -/vendor/.nimble - -# Generated Files -*.generated.nim - -# ntags/ctags output -/tags - -# a symlink that can't be added to the repo because of Windows -/waku.nims - -# Ignore dynamic, static libs and libtool archive files -*.so -*.dylib -*.a -*.la -*.exe -*.dll - -.DS_Store - -# Ignore simulation generated metrics files -/metrics/prometheus -/metrics/waku-sim-all-nodes-grafana-dashboard.json +/target +/pkg +/examples/www +node_modules +*.key +Cargo.lock +.cargo +tmp_wasm \ No newline at end of file diff --git a/.update.timestamp b/.update.timestamp index a1052cb1b..68abfda7d 100644 --- a/.update.timestamp +++ b/.update.timestamp @@ -1 +1 @@ -1613120032 \ No newline at end of file +1613160730 \ No newline at end of file diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 000000000..01a8185dc --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,58 @@ +[package] +name = "rln" +version = "0.1.0" +authors = ["Onur Kılıç "] +edition = "2018" + +[lib] +crate-type = ["cdylib", "rlib"] + +[features] +multicore = ["sapling-crypto/multicore", "bellman/multicore"] +wasm = ["sapling-crypto/wasm", "bellman/wasm", "bellman/nolog"] +bench = [] + +[dependencies] +rand = "0.4" +blake2 = "0.8.1" +sapling-crypto = { package = "sapling-crypto_ce", version = "0.1.3", default-features = false } +# sapling-crypto = {package = "sapling-crypto_ce", path = "../sapling-crypto", default-features = false } +bellman = { package = "bellman_ce", version = "0.3.4", default-features = false } +# bellman = {package = "bellman_ce", path = "../bellman", default-features = false } + +[target.'cfg(target_arch = "wasm32")'.dependencies] +hex = "0.4" +console_error_panic_hook = { version = "0.1.1" } +wasm-bindgen = "=0.2.60" +# wee_alloc = "0.4.5" +web-sys = {version = "0.3", features = ["console", "Performance", "Window"]} +js-sys = "0.3.37" + +[target.'cfg(target_arch = "wasm32")'.dev-dependencies] +wasm-bindgen-test = "0.3" + +[profile.release] +opt-level = 3 +lto = "thin" +incremental = true + +# build all our deps in release mode +[profile.dev.package."*"] +opt-level = 3 + +[profile.bench] +opt-level = 3 +debug = false +rpath = false +lto = "thin" +incremental = true +debug-assertions = false + + +[profile.test] +opt-level = 3 +incremental = true +debug-assertions = true +debug = true + + diff --git a/Makefile b/Makefile index bbde55694..1011bc365 100644 --- a/Makefile +++ b/Makefile @@ -59,7 +59,7 @@ else NIM_PARAMS := $(NIM_PARAMS) -d:release endif -deps: | deps-common nat-libs waku.nims +deps: | deps-common nat-libs waku.nims rlnlib ifneq ($(USE_LIBBACKTRACE), 0) deps: | libbacktrace endif @@ -118,6 +118,9 @@ endif installganache: npm install ganache-cli; npx ganache-cli -p 8540 -g 0 -l 3000000000000& +rlnlib: + #cargo clean --manifest-path rln/Cargo.toml #TODO may need to clean the rln directory before cloning the rln repo + git clone --branch full-node https://github.com/kilic/rln; git --git-dir=rln/.git reset --hard a80f5d0; cargo build --manifest-path rln/Cargo.toml; test2: | build deps installganache echo -e $(BUILD_MSG) "build/$@" && \ diff --git a/README.md b/README.md index 48defce5a..6a9e75bd5 100644 --- a/README.md +++ b/README.md @@ -1,56 +1,41 @@ -# nim-waku +# RLN -## Introduction +This is the development repo of rate limit nullifier zkSNARK circuits. -The nim-waku repository implements Waku v1 and v2, and provides tools related to it. +For details, see work in progress document [here](https://hackmd.io/tMTLMYmTR5eynw2lwK9n1w?view) -- A Nim implementation of the [Waku v1 protocol](https://specs.vac.dev/waku/waku.html). -- A Nim implementation of the [Waku v2 protocol](https://specs.vac.dev/specs/waku/v2/waku-v2.html). -- CLI applications `wakunode` and `wakunode2` that allows you to run a Waku v1 or v2 node. -- Examples of Waku v1 and v2 usage. -- Various tests of above. +## Test -For more details on Waku v1 and v2, see their respective home folders: - -- [Waku v1](waku/v1/README.md) -- [Waku v2](waku/v2/README.md) - -## How to Build & Run - -These instructions are generic and apply to both Waku v1 and v2. For more -detailed instructions, see Waku v1 and v2 home above. - -### Prerequisites - -* GNU Make, Bash and the usual POSIX utilities. Git 2.9.4 or newer. -* PCRE - -More information on the installation of these can be found [here](https://github.com/status-im/nimbus#prerequisites). - -### Wakunode - -```bash -# The first `make` invocation will update all Git submodules. -# You'll run `make update` after each `git pull`, in the future, to keep those submodules up to date. -make wakunode1 wakunode2 - -# See available command line options -./build/wakunode --help -./build/wakunode2 --help - -# Connect the client directly with the Status test fleet -./build/wakunode --log-level:debug --discovery:off --fleet:test --log-metrics -# TODO Equivalent for v2 +``` +cargo test --release --features multicore rln_32 -- --nocapture ``` -### Waku Protocol Test Suite +## Generate Test Keys -```bash -# Run all the Waku v1 and v2 tests -make test +``` +cargo run --release --example export_test_keys ``` -### Examples +## Wasm Support -Examples can be found in the examples folder. For Waku v2, there is a fully -featured chat example. +### Build + +``` +wasm-pack build --release --target=nodejs --scope=rln --out-name=$PACKAGE --out-dir=$PACKAGE_DIR -- --features wasm +``` + +### Test + +With wasm-pack: + +``` +wasm-pack test --release --node -- --features wasm +``` + +With cargo: + +Follow the steps [here](https://rustwasm.github.io/docs/wasm-bindgen/wasm-bindgen-test/usage.html#appendix-using-wasm-bindgen-test-without-wasm-pack) before running the test, then run: + +``` +cargo test --release --target wasm32-unknown-unknown --features wasm +``` \ No newline at end of file diff --git a/examples/export_test_keys/main.rs b/examples/export_test_keys/main.rs new file mode 100644 index 000000000..14d352d31 --- /dev/null +++ b/examples/export_test_keys/main.rs @@ -0,0 +1,38 @@ +#[cfg(not(target_arch = "wasm32"))] +fn main() { + use sapling_crypto::bellman::pairing::bn256::Bn256; + let merkle_depth = 32usize; + test_keys::export::(merkle_depth); +} + +#[cfg(target_arch = "wasm32")] +fn main() { + panic!("should not be run in wasm"); +} + +#[cfg(not(target_arch = "wasm32"))] +mod test_keys { + use sapling_crypto::bellman::pairing::Engine; + pub fn export(merkle_depth: usize) { + use rand::{SeedableRng, XorShiftRng}; + use rln::circuit::poseidon::PoseidonCircuit; + use rln::circuit::rln::{RLNCircuit, RLNInputs}; + use rln::poseidon::PoseidonParams; + use sapling_crypto::bellman::groth16::generate_random_parameters; + use std::fs::File; + + let poseidon_params = PoseidonParams::::new(8, 55, 3, None, None, None); + let mut rng = XorShiftRng::from_seed([0x3dbe6258, 0x8d313d76, 0x3237db17, 0xe5bc0654]); + let hasher = PoseidonCircuit::new(poseidon_params.clone()); + let circuit = RLNCircuit:: { + inputs: RLNInputs::::empty(merkle_depth), + hasher: hasher.clone(), + }; + let parameters = generate_random_parameters(circuit, &mut rng).unwrap(); + let mut file_vk = File::create("verifier.key").unwrap(); + let vk = parameters.vk.clone(); + vk.write(&mut file_vk).unwrap(); + let mut file_paramaters = File::create("parameters.key").unwrap(); + parameters.write(&mut file_paramaters).unwrap(); + } +} diff --git a/rln b/rln new file mode 160000 index 000000000..a80f5d013 --- /dev/null +++ b/rln @@ -0,0 +1 @@ +Subproject commit a80f5d013eb092ff18bd1d946c57565e2cdc65da diff --git a/src/circuit/bench.rs b/src/circuit/bench.rs new file mode 100644 index 000000000..fd2754385 --- /dev/null +++ b/src/circuit/bench.rs @@ -0,0 +1,230 @@ +use crate::circuit::rln::{RLNCircuit, RLNInputs}; +use crate::merkle::MerkleTree; +use crate::poseidon::{Poseidon as PoseidonHasher, PoseidonParams}; +use crate::{circuit::poseidon::PoseidonCircuit, public::RLNSignal}; +use rand::{Rand, SeedableRng, XorShiftRng}; +use sapling_crypto::bellman::groth16::*; +use sapling_crypto::bellman::pairing::ff::{Field, PrimeField, PrimeFieldRepr}; +use sapling_crypto::bellman::pairing::Engine; +use sapling_crypto::bellman::Circuit; +use sapling_crypto::circuit::test::TestConstraintSystem; +use std::io::{self, ErrorKind, Read, Write}; +use std::thread::sleep; +use std::time::{Duration, Instant}; +use std::{error::Error, hash::Hash}; + +use crate::public::RLN; + +pub struct ProverBenchResult { + pub prover_key_size: usize, + pub prover_time: f64, +} + +impl ProverBenchResult { + pub fn new() -> ProverBenchResult { + ProverBenchResult { + prover_key_size: 0, + prover_time: 0f64, + } + } +} + +pub fn run_rln_prover_bench( + merkle_depth: usize, + poseidon_params: PoseidonParams, +) -> ProverBenchResult { + RLNTest::new(merkle_depth, Some(poseidon_params)).run_prover_bench() +} + +pub struct RLNTest +where + E: Engine, +{ + rln: RLN, + merkle_depth: usize, +} + +impl RLNTest +where + E: Engine, +{ + fn rng() -> XorShiftRng { + XorShiftRng::from_seed([0x3dbe6258, 0x8d313d76, 0x3237db17, 0xe5bc0654]) + } + + fn secret_key() -> E::Fr { + E::Fr::from_str("1001").unwrap() + } + + fn insert_public_key(&mut self) { + let hasher = self.hasher(); + let public_key = hasher.hash(vec![Self::secret_key()]); + let mut pubkey_data: Vec = Vec::new(); + public_key.into_repr().write_le(&mut pubkey_data).unwrap(); + self.rln.update_next_member(pubkey_data.as_slice()).unwrap(); + } + + fn id_index() -> usize { + 0 + } + + pub fn new(merkle_depth: usize, poseidon_params: Option>) -> RLNTest { + let mut rln_test = RLNTest { + rln: RLN::new(merkle_depth, poseidon_params), + merkle_depth, + }; + rln_test.insert_public_key(); + rln_test + } + + pub fn hasher(&self) -> PoseidonHasher { + self.rln.hasher() + } + + pub fn valid_inputs(&self) -> RLNInputs { + let mut rng = Self::rng(); + let hasher = self.rln.hasher(); + + // Initialize empty merkle tree + let merkle_depth = self.merkle_depth; + let mut membership_tree = MerkleTree::empty(hasher.clone(), merkle_depth); + + // A. setup an identity + + let secret_key = E::Fr::rand(&mut rng); + let id_comm: E::Fr = hasher.hash(vec![secret_key.clone()]); + + // B. insert to the membership tree + + let id_index = 6; // any number below 2^depth will work + membership_tree.update(id_index, id_comm).unwrap(); + + // C.1 get membership witness + + let auth_path = membership_tree.get_witness(id_index).unwrap(); + assert!(membership_tree + .check_inclusion(auth_path.clone(), id_index) + .unwrap()); + + // C.2 prepare sss + + // get current epoch + let epoch = E::Fr::rand(&mut rng); + + let signal_hash = E::Fr::rand(&mut rng); + // evaluation point is the signal_hash + let share_x = signal_hash.clone(); + + // calculate current line equation + let a_0 = secret_key.clone(); + let a_1: E::Fr = hasher.hash(vec![a_0, epoch]); + + // evaluate line equation + let mut share_y = a_1.clone(); + share_y.mul_assign(&share_x); + share_y.add_assign(&a_0); + + // calculate nullfier + let nullifier = hasher.hash(vec![a_1]); + + // compose the circuit + + let inputs = RLNInputs:: { + share_x: Some(share_x), + share_y: Some(share_y), + epoch: Some(epoch), + nullifier: Some(nullifier), + root: Some(membership_tree.get_root()), + id_key: Some(secret_key), + auth_path: auth_path.into_iter().map(|w| Some(w)).collect(), + }; + + inputs + } + + pub fn signal(&self) -> RLNSignal { + let mut rng = Self::rng(); + let epoch = E::Fr::rand(&mut rng); + let signal_hash = E::Fr::rand(&mut rng); + + RLNSignal { + epoch, + hash: signal_hash, + } + } + + pub fn synthesize(&self) -> usize { + let hasher = PoseidonCircuit::new(self.rln.poseidon_params()); + println!("{}", self.merkle_depth); + let inputs = self.valid_inputs(); + let circuit = RLNCircuit:: { + inputs: inputs.clone(), + hasher: hasher.clone(), + }; + + let mut cs = TestConstraintSystem::::new(); + + let circuit = circuit.clone(); + match circuit.synthesize(&mut cs) { + Ok(_) => (), + Err(e) => { + println!("err\n{}", e); + } + } + let unsatisfied = cs.which_is_unsatisfied(); + if unsatisfied.is_some() { + panic!("unsatisfied\n{}", unsatisfied.unwrap()); + } + let unconstrained = cs.find_unconstrained(); + if !unconstrained.is_empty() { + panic!("unconstrained\n{}", unconstrained); + } + assert!(cs.is_satisfied()); + cs.num_constraints() + } + + pub fn run_prover_bench(&self) -> ProverBenchResult { + let mut signal_data: Vec = Vec::new(); + let signal = self.signal(); + signal.write(&mut signal_data).unwrap(); + + let mut proof: Vec = Vec::new(); + let now = Instant::now(); + + let mut secret_key_data: Vec = Vec::new(); + let secret_key = Self::secret_key(); + secret_key + .into_repr() + .write_le(&mut secret_key_data) + .unwrap(); + let id_index = Self::id_index(); + + self.rln + .generate_proof( + signal_data.as_slice(), + secret_key_data.as_slice(), + id_index, + &mut proof, + ) + .unwrap(); + + let prover_time = now.elapsed().as_millis() as f64 / 1000.0; + + assert!(self.rln.verify(proof.as_slice()).unwrap(), true); + + let mut circuit_parameters: Vec = Vec::new(); + self.rln + .export_circuit_parameters(&mut circuit_parameters) + .unwrap(); + let prover_key_size = circuit_parameters.len(); + + ProverBenchResult { + prover_time, + prover_key_size, + } + } + + pub fn export_circuit_parameters(&self, w: W) -> io::Result<()> { + self.rln.export_circuit_parameters(w) + } +} diff --git a/src/circuit/mod.rs b/src/circuit/mod.rs new file mode 100644 index 000000000..77798738b --- /dev/null +++ b/src/circuit/mod.rs @@ -0,0 +1,6 @@ +mod polynomial; +pub mod poseidon; +pub mod rln; + +#[cfg(any(test, feature = "bench"))] +pub mod bench; diff --git a/src/circuit/polynomial.rs b/src/circuit/polynomial.rs new file mode 100644 index 000000000..d49801c85 --- /dev/null +++ b/src/circuit/polynomial.rs @@ -0,0 +1,46 @@ +use sapling_crypto::bellman::pairing::ff::{Field, PrimeField, PrimeFieldRepr}; +use sapling_crypto::bellman::pairing::Engine; +use sapling_crypto::bellman::{Circuit, ConstraintSystem, SynthesisError, Variable}; +use sapling_crypto::circuit::{boolean, ecc, num, Assignment}; + +// helper for horner evaluation methods +// b = a_0 + a_1 * x +pub fn allocate_add_with_coeff( + mut cs: CS, + a1: &num::AllocatedNum, + x: &num::AllocatedNum, + a0: &num::AllocatedNum, +) -> Result, SynthesisError> +where + E: Engine, + CS: ConstraintSystem, +{ + let ax = num::AllocatedNum::alloc(cs.namespace(|| "a1x"), || { + let mut ax_val = *a1.get_value().get()?; + let x_val = *x.get_value().get()?; + ax_val.mul_assign(&x_val); + Ok(ax_val) + })?; + + cs.enforce( + || "a1*x", + |lc| lc + a1.get_variable(), + |lc| lc + x.get_variable(), + |lc| lc + ax.get_variable(), + ); + + let y = num::AllocatedNum::alloc(cs.namespace(|| "y"), || { + let ax_val = *ax.get_value().get()?; + let mut y_val = *a0.get_value().get()?; + y_val.add_assign(&ax_val); + Ok(y_val) + })?; + + cs.enforce( + || "enforce y", + |lc| lc + ax.get_variable() + a0.get_variable(), + |lc| lc + CS::one(), + |lc| lc + y.get_variable(), + ); + Ok(y) +} diff --git a/src/circuit/poseidon.rs b/src/circuit/poseidon.rs new file mode 100644 index 000000000..f3e276fc2 --- /dev/null +++ b/src/circuit/poseidon.rs @@ -0,0 +1,403 @@ +use crate::poseidon::{Poseidon as PoseidonHasher, PoseidonParams}; +use sapling_crypto::bellman::pairing::ff::{Field, PrimeField, PrimeFieldRepr}; +use sapling_crypto::bellman::pairing::Engine; +use sapling_crypto::bellman::{Circuit, ConstraintSystem, LinearCombination, SynthesisError}; +use sapling_crypto::circuit::{boolean, ecc, num, Assignment}; + +#[derive(Clone)] +struct Element +where + E: Engine, +{ + an: Option>, + nu: Option>, +} + +enum RoundType { + Full, + Partial, + Exhausted, +} + +struct RoundCtx<'a, E> +where + E: Engine, +{ + number: usize, + params: &'a PoseidonParams, +} + +struct State +where + E: Engine, +{ + elements: Vec>, +} + +#[derive(Clone)] +pub struct PoseidonCircuit +where + E: Engine, +{ + params: PoseidonParams, +} + +impl Element +where + E: Engine, +{ + pub fn new_from_alloc(an: num::AllocatedNum) -> Self { + return Element { + an: Some(an), + nu: None, + }; + } + + pub fn new_from_num(nu: num::Num) -> Self { + return Element { + an: None, + nu: Some(nu), + }; + } + + pub fn is_allocated(&self) -> bool { + return self.an.is_some(); + } + + pub fn is_number(&self) -> bool { + return self.nu.is_some(); + } + + pub fn update_with_allocated(&mut self, an: num::AllocatedNum) { + self.an = Some(an); + self.nu = None; + } + + pub fn update_with_num(&mut self, nu: num::Num) { + self.nu = Some(nu); + self.an = None; + } + + pub fn num(&self) -> num::Num { + if let Some(nu) = self.nu.clone() { + nu + } else { + match self.an.clone() { + Some(an) => num::Num::from(an), + None => panic!("element not exist"), + } + } + } + + pub fn allocate>( + &self, + mut cs: CS, + ) -> Result, SynthesisError> { + match self.nu.clone() { + Some(nu) => { + let v = num::AllocatedNum::alloc(cs.namespace(|| "allocate num"), || { + nu.get_value() + .ok_or_else(|| SynthesisError::AssignmentMissing) + })?; + cs.enforce( + || format!("enforce allocated"), + |_| nu.lc(E::Fr::one()), + |lc| lc + CS::one(), + |lc| lc + v.get_variable(), + ); + Ok(v) + } + None => panic!(""), + } + } + + pub fn allocated(&self) -> Option> { + self.an.clone() + } +} + +impl<'a, E> RoundCtx<'a, E> +where + E: Engine, +{ + pub fn new(params: &'a PoseidonParams) -> Self { + RoundCtx { + params, + number: 0usize, + } + } + + pub fn width(&self) -> usize { + self.params.width() + } + + pub fn round_number(&self) -> usize { + self.number + } + + pub fn is_full_round(&self) -> bool { + match self.round_type() { + RoundType::Full => true, + _ => false, + } + } + + pub fn is_exhausted(&self) -> bool { + match self.round_type() { + RoundType::Exhausted => true, + _ => false, + } + } + + pub fn is_last_round(&self) -> bool { + self.number == self.params.total_rounds() - 1 + } + + pub fn in_transition(&self) -> bool { + let a1 = self.params.full_round_half_len(); + let a2 = a1 + self.params.partial_round_len(); + self.number == a1 - 1 || self.number == a2 - 1 + } + + pub fn round_constant(&self) -> E::Fr { + self.params.round_constant(self.number) + } + + pub fn mds_matrix_row(&self, i: usize) -> Vec { + let w = self.width(); + let matrix = self.params.mds_matrix(); + matrix[i * w..(i + 1) * w].to_vec() + } + + pub fn round_type(&self) -> RoundType { + let a1 = self.params.full_round_half_len(); + let (a2, a3) = ( + a1 + self.params.partial_round_len(), + self.params.total_rounds(), + ); + if self.number < a1 { + RoundType::Full + } else if self.number >= a1 && self.number < a2 { + RoundType::Partial + } else if self.number >= a2 && self.number < a3 { + RoundType::Full + } else { + RoundType::Exhausted + } + } + + pub fn round_end(&mut self) { + self.number += 1; + } +} + +impl State +where + E: Engine, +{ + pub fn new(elements: Vec>) -> Self { + Self { elements } + } + + pub fn first_allocated>( + &mut self, + mut cs: CS, + ) -> Result, SynthesisError> { + let el = match self.elements[0].allocated() { + Some(an) => an, + None => self.elements[0].allocate(cs.namespace(|| format!("alloc first")))?, + }; + Ok(el) + } + + fn sbox>( + &mut self, + mut cs: CS, + ctx: &mut RoundCtx, + ) -> Result<(), SynthesisError> { + assert_eq!(ctx.width(), self.elements.len()); + + for i in 0..if ctx.is_full_round() { ctx.width() } else { 1 } { + let round_constant = ctx.round_constant(); + let si = { + match self.elements[i].allocated() { + Some(an) => an, + None => self.elements[i] + .allocate(cs.namespace(|| format!("alloc sbox input {}", i)))?, + } + }; + let si2 = num::AllocatedNum::alloc( + cs.namespace(|| format!("square with round constant {}", i)), + || { + let mut val = *si.get_value().get()?; + val.add_assign(&round_constant); + val.square(); + Ok(val) + }, + )?; + cs.enforce( + || format!("constraint square with round constant {}", i), + |lc| lc + si.get_variable() + (round_constant, CS::one()), + |lc| lc + si.get_variable() + (round_constant, CS::one()), + |lc| lc + si2.get_variable(), + ); + let si4 = si2.square(cs.namespace(|| format!("si^4 {}", i)))?; + let si5 = num::AllocatedNum::alloc(cs.namespace(|| format!("si^5 {}", i)), || { + let mut val = *si4.get_value().get()?; + let mut si_val = *si.get_value().get()?; + si_val.add_assign(&round_constant); + val.mul_assign(&si_val); + Ok(val) + })?; + cs.enforce( + || format!("constraint sbox result {}", i), + |lc| lc + si.get_variable() + (round_constant, CS::one()), + |lc| lc + si4.get_variable(), + |lc| lc + si5.get_variable(), + ); + self.elements[i].update_with_allocated(si5); + } + + Ok(()) + } + + fn mul_mds_matrix>( + &mut self, + ctx: &mut RoundCtx, + ) -> Result<(), SynthesisError> { + assert_eq!(ctx.width(), self.elements.len()); + + if !ctx.is_last_round() { + // skip mds multiplication in last round + + let mut new_state: Vec> = Vec::new(); + let w = ctx.width(); + + for i in 0..w { + let row = ctx.mds_matrix_row(i); + let mut acc = num::Num::::zero(); + for j in 0..w { + let mut r = self.elements[j].num(); + r.scale(row[j]); + acc.add_assign(&r); + } + new_state.push(acc); + } + + // round ends here + let is_full_round = ctx.is_full_round(); + let in_transition = ctx.in_transition(); + ctx.round_end(); + + // add round constants just after mds if + // first full round has just ended + // or in partial rounds expect the last one. + if in_transition == is_full_round { + // add round constants for elements in {1, t} + let round_constant = ctx.round_constant(); + for i in 1..w { + let mut constant_as_num = num::Num::::zero(); + constant_as_num = constant_as_num.add_bool_with_coeff( + CS::one(), + &boolean::Boolean::Constant(true), + round_constant, + ); + new_state[i].add_assign(&constant_as_num); + } + } + + for (s0, s1) in self.elements.iter_mut().zip(new_state) { + s0.update_with_num(s1); + } + } else { + // terminates hades + ctx.round_end(); + } + Ok(()) + } +} + +impl PoseidonCircuit +where + E: Engine, +{ + pub fn new(params: PoseidonParams) -> Self { + Self { params: params } + } + + pub fn width(&self) -> usize { + self.params.width() + } + + pub fn alloc>( + &self, + mut cs: CS, + input: Vec>, + ) -> Result, SynthesisError> { + assert!(input.len() < self.params.width()); + + let mut elements: Vec> = input + .iter() + .map(|el| Element::new_from_alloc(el.clone())) + .collect(); + elements.resize(self.width(), Element::new_from_num(num::Num::zero())); + + let mut state = State::new(elements); + let mut ctx = RoundCtx::new(&self.params); + loop { + match ctx.round_type() { + RoundType::Exhausted => { + break; + } + _ => { + let round_number = ctx.round_number(); + state.sbox(cs.namespace(|| format!("sbox {}", round_number)), &mut ctx)?; + state.mul_mds_matrix::(&mut ctx)?; + } + } + } + state.first_allocated(cs.namespace(|| format!("allocate result"))) + } +} + +#[test] +fn test_poseidon_circuit() { + use sapling_crypto::bellman::pairing::bn256::{Bn256, Fr}; + use sapling_crypto::bellman::pairing::ff::{Field, PrimeField, PrimeFieldRepr}; + use sapling_crypto::circuit::test::TestConstraintSystem; + + let mut cs = TestConstraintSystem::::new(); + let params = PoseidonParams::new(8, 55, 3, None, None, None); + + let inputs: Vec = ["0", "0"] + .iter() + .map(|e| Fr::from_str(e).unwrap()) + .collect(); + let allocated_inputs = inputs + .clone() + .into_iter() + .enumerate() + .map(|(i, e)| { + let a = num::AllocatedNum::alloc(cs.namespace(|| format!("input {}", i)), || Ok(e)); + a.unwrap() + }) + .collect(); + + let circuit = PoseidonCircuit::::new(params.clone()); + let res_allocated = circuit + .alloc(cs.namespace(|| "hash alloc"), allocated_inputs) + .unwrap(); + let result = res_allocated.get_value().unwrap(); + let poseidon = PoseidonHasher::new(params.clone()); + let expected = poseidon.hash(inputs); + + assert_eq!(result, expected); + assert!(cs.is_satisfied()); + println!( + "number of constraints for (t: {}, rf: {}, rp: {}), {}", + params.width(), + params.full_round_half_len() * 2, + params.partial_round_len(), + cs.num_constraints() + ); +} diff --git a/src/circuit/rln.rs b/src/circuit/rln.rs new file mode 100644 index 000000000..0067e056d --- /dev/null +++ b/src/circuit/rln.rs @@ -0,0 +1,480 @@ +use crate::circuit::polynomial::allocate_add_with_coeff; +use crate::circuit::poseidon::PoseidonCircuit; +use crate::poseidon::{Poseidon as PoseidonHasher, PoseidonParams}; +use sapling_crypto::bellman::pairing::ff::{Field, PrimeField, PrimeFieldRepr}; +use sapling_crypto::bellman::pairing::Engine; +use sapling_crypto::bellman::{Circuit, ConstraintSystem, SynthesisError, Variable}; +use sapling_crypto::circuit::{boolean, ecc, num, Assignment}; +use sapling_crypto::jubjub::{JubjubEngine, JubjubParams, PrimeOrder}; + +use std::io::{self, Read, Write}; + +// Rate Limit Nullifier + +#[derive(Clone)] +pub struct RLNInputs +where + E: Engine, +{ + // Public inputs + + // share, (x, y), + // where x should be hash of the signal + // and y is the evaluation + pub share_x: Option, + pub share_y: Option, + + // epoch is the external nullifier + // we derive the line equation and the nullifier from epoch + pub epoch: Option, + + // nullifier + pub nullifier: Option, + + // root is the current state of membership set + pub root: Option, + + // Private inputs + + // id_key must be a preimage of a leaf in membership tree. + // id_key also together with epoch will be used to construct + // a secret line equation together with the epoch + pub id_key: Option, + + // authentication path of the member + pub auth_path: Vec>, +} + +impl RLNInputs +where + E: Engine, +{ + pub fn public_inputs(&self) -> Vec { + vec![ + self.root.unwrap(), + self.epoch.unwrap(), + self.share_x.unwrap(), + self.share_y.unwrap(), + self.nullifier.unwrap(), + ] + } + + pub fn merkle_depth(&self) -> usize { + self.auth_path.len() + } + + pub fn empty(merkle_depth: usize) -> RLNInputs { + RLNInputs:: { + share_x: None, + share_y: None, + epoch: None, + nullifier: None, + root: None, + id_key: None, + auth_path: vec![None; merkle_depth], + } + } + + pub fn read(mut reader: R) -> io::Result> { + let mut buf = ::Repr::default(); + + buf.read_le(&mut reader)?; + let share_x = + E::Fr::from_repr(buf).map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + + + buf.read_le(&mut reader)?; + let share_y = + E::Fr::from_repr(buf).map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + buf.read_le(&mut reader)?; + let epoch = + E::Fr::from_repr(buf).map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + buf.read_le(&mut reader)?; + let nullifier = + E::Fr::from_repr(buf).map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + buf.read_le(&mut reader)?; + let root = + E::Fr::from_repr(buf).map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + buf.read_le(&mut reader)?; + let id_key = + E::Fr::from_repr(buf).map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + let auth_path = Self::decode_auth_path(&mut reader)?; + Ok(RLNInputs { + share_x: Some(share_x), + share_y: Some(share_y), + epoch: Some(epoch), + nullifier: Some(nullifier), + root: Some(root), + id_key: Some(id_key), + auth_path, + }) + } + + pub fn write(&self, mut writer: W) -> io::Result<()> { + self.share_x + .unwrap() + .into_repr() + .write_le(&mut writer) + .unwrap(); + self.share_y + .unwrap() + .into_repr() + .write_le(&mut writer) + .unwrap(); + self.epoch + .unwrap() + .into_repr() + .write_le(&mut writer) + .unwrap(); + self.nullifier + .unwrap() + .into_repr() + .write_le(&mut writer) + .unwrap(); + self.root + .unwrap() + .into_repr() + .write_le(&mut writer) + .unwrap(); + self.id_key + .unwrap() + .into_repr() + .write_le(&mut writer) + .unwrap(); + Self::encode_auth_path(&mut writer, self.auth_path.clone()).unwrap(); + Ok(()) + } + + pub fn read_public_inputs(mut reader: R) -> io::Result> { + let mut buf = ::Repr::default(); + buf.read_le(&mut reader)?; + let root = + E::Fr::from_repr(buf).map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + buf.read_le(&mut reader)?; + let epoch = + E::Fr::from_repr(buf).map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + buf.read_le(&mut reader)?; + let share_x = + E::Fr::from_repr(buf).map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + buf.read_le(&mut reader)?; + let share_y = + E::Fr::from_repr(buf).map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + buf.read_le(&mut reader)?; + let nullifier = + E::Fr::from_repr(buf).map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + Ok(vec![root, epoch, share_x, share_y, nullifier]) + } + + pub fn write_public_inputs(&self, mut writer: W) -> io::Result<()> { + self.root.unwrap().into_repr().write_le(&mut writer)?; + self.epoch.unwrap().into_repr().write_le(&mut writer)?; + self.share_x.unwrap().into_repr().write_le(&mut writer)?; + self.share_y.unwrap().into_repr().write_le(&mut writer)?; + self.nullifier.unwrap().into_repr().write_le(&mut writer)?; + Ok(()) + } + + pub fn encode_auth_path( + mut writer: W, + auth_path: Vec>, + ) -> io::Result<()> { + let path_len = auth_path.len() as u8; + writer.write(&[path_len])?; + for el in auth_path.iter() { + let c = el.unwrap(); + if c.1 { + writer.write(&[1])?; + } else { + writer.write(&[0])?; + } + c.0.into_repr().write_le(&mut writer).unwrap(); + } + Ok(()) + } + + pub fn decode_auth_path(mut reader: R) -> io::Result>> { + let mut byte_buf = vec![0u8; 1]; + let mut el_buf = ::Repr::default(); + let mut auth_path: Vec> = vec![]; + reader.read_exact(&mut byte_buf)?; + let path_len = byte_buf[0]; + if path_len < 2 { + return Err(io::Error::new( + io::ErrorKind::InvalidInput, + "invalid path length", + )); + } + for _ in 0..path_len { + reader.read_exact(&mut byte_buf)?; + let path_dir = match byte_buf[0] { + 0u8 => false, + 1u8 => true, + _ => { + return Err(io::Error::new( + io::ErrorKind::InvalidInput, + "invalid path direction", + )) + } + }; + el_buf.read_le(&mut reader)?; + let node = E::Fr::from_repr(el_buf) + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + auth_path.push(Some((node, path_dir))); + } + Ok(auth_path) + } +} + +#[derive(Clone)] +pub struct RLNCircuit +where + E: Engine, +{ + pub inputs: RLNInputs, + pub hasher: PoseidonCircuit, +} + +impl Circuit for RLNCircuit +where + E: Engine, +{ + fn synthesize>(self, cs: &mut CS) -> Result<(), SynthesisError> { + // 1. Part + // Membership constraints + // root == merkle_proof(auth_path, preimage_of_leaf) + + let root = num::AllocatedNum::alloc(cs.namespace(|| "root"), || { + let value = self.inputs.root.clone(); + Ok(*value.get()?) + })?; + root.inputize(cs.namespace(|| "root is public"))?; + + let preimage = num::AllocatedNum::alloc(cs.namespace(|| "preimage"), || { + let value = self.inputs.id_key; + Ok(*value.get()?) + })?; + + // identity is a leaf of membership tree + + let identity = self + .hasher + .alloc(cs.namespace(|| "identity"), vec![preimage.clone()])?; + + // accumulator up to the root + + let mut acc = identity.clone(); + + // ascend the tree + + let auth_path_witness = self.inputs.auth_path.clone(); + for (i, e) in auth_path_witness.into_iter().enumerate() { + let cs = &mut cs.namespace(|| format!("auth path {}", i)); + let position = boolean::Boolean::from(boolean::AllocatedBit::alloc( + cs.namespace(|| "position bit"), + e.map(|e| e.1), + )?); + let path_element = + num::AllocatedNum::alloc(cs.namespace(|| "path element"), || Ok(e.get()?.0))?; + + let (xr, xl) = num::AllocatedNum::conditionally_reverse( + cs.namespace(|| "conditional reversal of preimage"), + &acc, + &path_element, + &position, + )?; + + acc = self + .hasher + .alloc(cs.namespace(|| "hash couple"), vec![xl, xr])?; + } + + // see if it is a member + + cs.enforce( + || "enforce membership", + |lc| lc + acc.get_variable(), + |lc| lc + CS::one(), + |lc| lc + root.get_variable(), + ); + + // 2. Part + // Line Equation Constaints + // a_1 = hash(a_0, epoch) + // share_y == a_0 + a_1 * share_x + + let epoch = num::AllocatedNum::alloc(cs.namespace(|| "epoch"), || { + let value = self.inputs.epoch.clone(); + Ok(*value.get()?) + })?; + epoch.inputize(cs.namespace(|| "epoch is public"))?; + + let a_0 = preimage.clone(); + + // a_1 == h(a_0, epoch) + + let a_1 = self + .hasher + .alloc(cs.namespace(|| "a_1"), vec![a_0.clone(), epoch])?; + + let share_x = num::AllocatedNum::alloc(cs.namespace(|| "share x"), || { + let value = self.inputs.share_x.clone(); + Ok(*value.get()?) + })?; + share_x.inputize(cs.namespace(|| "share x is public"))?; + + // constaint the evaluation the line equation + + let eval = allocate_add_with_coeff(cs.namespace(|| "eval"), &a_1, &share_x, &a_0)?; + + let share_y = num::AllocatedNum::alloc(cs.namespace(|| "share y"), || { + let value = self.inputs.share_y.clone(); + Ok(*value.get()?) + })?; + share_y.inputize(cs.namespace(|| "share y is public"))?; + + // see if share satisfies the line equation + + cs.enforce( + || "enforce lookup", + |lc| lc + share_y.get_variable(), + |lc| lc + CS::one(), + |lc| lc + eval.get_variable(), + ); + + // 3. Part + // Nullifier constraints + + // hashing secret twice with epoch ingredient + // a_1 == hash(a_0, epoch) is already constrained + + // nullifier == hash(a_1) + + let nullifier_calculated = self + .hasher + .alloc(cs.namespace(|| "calculated nullifier"), vec![a_1.clone()])?; + + let nullifier = num::AllocatedNum::alloc(cs.namespace(|| "nullifier"), || { + let value = self.inputs.nullifier.clone(); + Ok(*value.get()?) + })?; + nullifier.inputize(cs.namespace(|| "nullifier is public"))?; + + // check if correct nullifier supplied + + cs.enforce( + || "enforce nullifier", + |lc| lc + nullifier_calculated.get_variable(), + |lc| lc + CS::one(), + |lc| lc + nullifier.get_variable(), + ); + + Ok(()) + } +} + +#[cfg(test)] +mod test { + + use super::RLNInputs; + use crate::circuit::bench; + use crate::poseidon::PoseidonParams; + use sapling_crypto::bellman::pairing::bls12_381::Bls12; + use sapling_crypto::bellman::pairing::bn256::Bn256; + use sapling_crypto::bellman::pairing::Engine; + + struct TestSuite { + merkle_depth: usize, + poseidon_parameters: PoseidonParams, + } + + fn cases() -> Vec> { + vec![ + TestSuite { + merkle_depth: 3, + poseidon_parameters: PoseidonParams::new(8, 55, 3, None, None, None), + }, + TestSuite { + merkle_depth: 24, + poseidon_parameters: PoseidonParams::new(8, 55, 3, None, None, None), + }, + TestSuite { + merkle_depth: 32, + poseidon_parameters: PoseidonParams::new(8, 55, 3, None, None, None), + }, + TestSuite { + merkle_depth: 16, + poseidon_parameters: PoseidonParams::new(8, 33, 3, None, None, None), + }, + TestSuite { + merkle_depth: 24, + poseidon_parameters: PoseidonParams::new(8, 33, 3, None, None, None), + }, + TestSuite { + merkle_depth: 32, + poseidon_parameters: PoseidonParams::new(8, 33, 3, None, None, None), + }, + ] + } + + #[test] + fn test_rln_bn() { + use sapling_crypto::bellman::pairing::bn256::Bn256; + let cases = cases::(); + for case in cases.iter() { + let rln_test = bench::RLNTest::::new( + case.merkle_depth, + Some(case.poseidon_parameters.clone()), + ); + let num_constraints = rln_test.synthesize(); + let result = rln_test.run_prover_bench(); + println!( + "bn256, t: {}, rf: {}, rp: {}, merkle depth: {}", + case.poseidon_parameters.width(), + case.poseidon_parameters.full_round_half_len() * 2, + case.poseidon_parameters.partial_round_len(), + case.merkle_depth, + ); + println!("number of constatins:\t{}", num_constraints); + println!("prover key size:\t{}", result.prover_key_size); + println!("prover time:\t{}", result.prover_time); + } + } + + #[test] + fn test_input_serialization() { + use sapling_crypto::bellman::pairing::bn256::{Bn256, Fr}; + use sapling_crypto::bellman::pairing::ff::{Field, PrimeField, PrimeFieldRepr}; + let share_x = Fr::from_str("1").unwrap(); + let share_y = Fr::from_str("2").unwrap(); + let epoch = Fr::from_str("3").unwrap(); + let nullifier = Fr::from_str("4").unwrap(); + let root = Fr::from_str("5").unwrap(); + let id_key = Fr::from_str("6").unwrap(); + let auth_path = vec![ + Some((Fr::from_str("20").unwrap(), false)), + Some((Fr::from_str("21").unwrap(), true)), + Some((Fr::from_str("22").unwrap(), true)), + Some((Fr::from_str("23").unwrap(), false)), + ]; + let input0 = RLNInputs:: { + share_x: Some(share_x), + share_y: Some(share_y), + epoch: Some(epoch), + nullifier: Some(nullifier), + root: Some(root), + id_key: Some(id_key), + auth_path, + }; + let mut raw_inputs: Vec = Vec::new(); + input0.write(&mut raw_inputs).unwrap(); + let mut reader = raw_inputs.as_slice(); + let input1 = RLNInputs::::read(&mut reader).unwrap(); + assert_eq!(input0.share_x, input1.share_x); + assert_eq!(input0.share_y, input1.share_y); + assert_eq!(input0.epoch, input1.epoch); + assert_eq!(input0.nullifier, input1.nullifier); + assert_eq!(input0.root, input1.root); + assert_eq!(input0.id_key, input1.id_key); + assert_eq!(input0.auth_path, input1.auth_path); + } +} diff --git a/src/ffi.rs b/src/ffi.rs new file mode 100644 index 000000000..5b73bce3c --- /dev/null +++ b/src/ffi.rs @@ -0,0 +1,375 @@ +use crate::{circuit::rln, public::RLN}; +use bellman::pairing::bn256::Bn256; +use std::slice; + +/// Buffer struct is taken from +/// https://github.com/celo-org/celo-threshold-bls-rs/blob/master/crates/threshold-bls-ffi/src/ffi.rs + +#[repr(C)] +#[derive(Clone, Debug, PartialEq)] +pub struct Buffer { + pub ptr: *const u8, + pub len: usize, +} + +impl From<&[u8]> for Buffer { + fn from(src: &[u8]) -> Self { + Self { + ptr: &src[0] as *const u8, + len: src.len(), + } + } +} + +impl<'a> From<&Buffer> for &'a [u8] { + fn from(src: &Buffer) -> &'a [u8] { + unsafe { slice::from_raw_parts(src.ptr, src.len) } + } +} +#[repr(C)] +#[derive(Clone, Debug, PartialEq)] +pub struct Auth { + secret_buffer: *const Buffer, + pub index: usize, +} + +impl Auth { + fn get_secret(&self) -> &[u8] { + let secret_data = <&[u8]>::from(unsafe { &*self.secret_buffer }); + secret_data + } +} + +#[no_mangle] +pub extern "C" fn new_circuit_from_params( + merkle_depth: usize, + parameters_buffer: *const Buffer, + ctx: *mut *mut RLN, +) -> bool { + let buffer = <&[u8]>::from(unsafe { &*parameters_buffer }); + let rln = match RLN::::new_with_raw_params(merkle_depth, buffer, None) { + Ok(rln) => rln, + Err(_) => return false, + }; + unsafe { *ctx = Box::into_raw(Box::new(rln)) }; + true +} + +#[no_mangle] +pub extern "C" fn update_next_member(ctx: *mut RLN, input_buffer: *const Buffer) -> bool { + let rln = unsafe { &mut *ctx }; + let input_data = <&[u8]>::from(unsafe { &*input_buffer }); + match rln.update_next_member(input_data) { + Ok(_) => true, + Err(_) => false, + } +} + +#[no_mangle] +pub extern "C" fn delete_member(ctx: *mut RLN, index: usize) -> bool { + let rln = unsafe { &mut *ctx }; + match rln.delete_member(index) { + Ok(_) => true, + Err(_) => false, + } +} + +#[no_mangle] +pub extern "C" fn generate_proof( + ctx: *const RLN, + input_buffer: *const Buffer, + auth: *const Auth, + output_buffer: *mut Buffer, +) -> bool { + let rln = unsafe { &*ctx }; + let auth = unsafe { &*auth }; + let input_data = <&[u8]>::from(unsafe { &*input_buffer }); + let mut output_data: Vec = Vec::new(); + + match rln.generate_proof(input_data, auth.get_secret(), auth.index, &mut output_data) { + Ok(proof_data) => proof_data, + Err(_) => return false, + }; + unsafe { *output_buffer = Buffer::from(&output_data[..]) }; + std::mem::forget(output_data); + true +} + +#[no_mangle] +pub extern "C" fn verify( + ctx: *const RLN, + proof_buffer: *mut Buffer, + result_ptr: *mut u32, +) -> bool { + let rln = unsafe { &*ctx }; + let proof_data = <&[u8]>::from(unsafe { &*proof_buffer }); + if match rln.verify(proof_data) { + Ok(verified) => verified, + Err(_) => return false, + } { + unsafe { *result_ptr = 0 }; + } else { + unsafe { *result_ptr = 1 }; + }; + true +} + +#[no_mangle] +pub extern "C" fn hash( + ctx: *const RLN, + inputs_buffer: *const Buffer, + input_len: usize, + output_buffer: *mut Buffer, +) -> bool { + let rln = unsafe { &*ctx }; + let input_data = <&[u8]>::from(unsafe { &*inputs_buffer }); + let mut output_data: Vec = Vec::new(); + match rln.hash(input_data, input_len, &mut output_data) { + Ok(output_data) => output_data, + Err(_) => return false, + }; + unsafe { *output_buffer = Buffer::from(&output_data[..]) }; + std::mem::forget(output_data); + true +} + +#[no_mangle] +pub extern "C" fn key_gen(ctx: *const RLN, keypair_buffer: *mut Buffer) -> bool { + let rln = unsafe { &*ctx }; + let mut output_data: Vec = Vec::new(); + match rln.key_gen(&mut output_data) { + Ok(_) => (), + Err(_) => return false, + } + unsafe { *keypair_buffer = Buffer::from(&output_data[..]) }; + std::mem::forget(output_data); + true +} + +use sapling_crypto::bellman::pairing::ff::{Field, PrimeField, PrimeFieldRepr}; +use sapling_crypto::bellman::pairing::Engine; +use std::io::{self, Read, Write}; + +#[cfg(test)] +mod tests { + use crate::{circuit::bench, public::RLNSignal}; + use crate::{poseidon::PoseidonParams, public}; + use bellman::pairing::bn256::{Bn256, Fr}; + use rand::{Rand, SeedableRng, XorShiftRng}; + + use super::*; + use std::mem::MaybeUninit; + + fn merkle_depth() -> usize { + 3usize + } + + fn index() -> usize { + 2usize + } + + fn rln_test() -> bench::RLNTest { + let merkle_depth = merkle_depth(); + let poseidon_params = PoseidonParams::::new(8, 55, 3, None, None, None); + let rln_test = bench::RLNTest::::new(merkle_depth, Some(poseidon_params)); + rln_test + } + + fn rln_pointer(circuit_parameters: Vec) -> MaybeUninit<*mut RLN> { + // restore this new curcuit with bindings + let merkle_depth = merkle_depth(); + let circuit_parameters_buffer = &Buffer::from(circuit_parameters.as_ref()); + let mut rln_pointer = MaybeUninit::<*mut RLN>::uninit(); + let success = new_circuit_from_params( + merkle_depth, + circuit_parameters_buffer, + rln_pointer.as_mut_ptr(), + ); + assert!(success, "cannot init rln instance"); + + rln_pointer + } + + #[test] + fn test_proof_ffi() { + let mut rng = XorShiftRng::from_seed([0x3dbe6258, 0x8d313d76, 0x3237db17, 0xe5bc0654]); + + // setup new rln instance + let rln_test = rln_test(); + let mut circuit_parameters: Vec = Vec::new(); + rln_test + .export_circuit_parameters(&mut circuit_parameters) + .unwrap(); + let rln_pointer = rln_pointer(circuit_parameters); + let rln_pointer = unsafe { &mut *rln_pointer.assume_init() }; + let index = index(); + + // generate new key pair + let mut keypair_buffer = MaybeUninit::::uninit(); + let success = key_gen(rln_pointer, keypair_buffer.as_mut_ptr()); + assert!(success, "key generation failed"); + let keypair_buffer = unsafe { keypair_buffer.assume_init() }; + let mut keypair_data = <&[u8]>::from(&keypair_buffer); + + // read keypair + let mut buf = ::Repr::default(); + buf.read_le(&mut keypair_data).unwrap(); + let id_key = Fr::from_repr(buf).unwrap(); + buf.read_le(&mut keypair_data).unwrap(); + let public_key = Fr::from_repr(buf).unwrap(); + + // insert members + for i in 0..index + 1 { + let new_member: Fr; + if i == index { + new_member = public_key; + } else { + new_member = Fr::rand(&mut rng); + } + let mut input_data: Vec = Vec::new(); + new_member.into_repr().write_le(&mut input_data).unwrap(); + let input_buffer = &Buffer::from(input_data.as_ref()); + + let success = update_next_member(rln_pointer, input_buffer); + assert!(success, "update with new pubkey failed"); + } + + let mut gen_proof_and_verify = |rln_pointer: *const RLN| { + // create signal + let epoch = Fr::rand(&mut rng); + let signal_hash = Fr::rand(&mut rng); + let inputs = RLNSignal:: { + epoch: epoch, + hash: signal_hash, + }; + + // serialize signal + let mut inputs_data: Vec = Vec::new(); + inputs.write(&mut inputs_data).unwrap(); + let inputs_buffer = &Buffer::from(inputs_data.as_ref()); + + // construct auth object + let mut secret_data: Vec = Vec::new(); + id_key.into_repr().write_le(&mut secret_data).unwrap(); + let secret_buffer = &Buffer::from(secret_data.as_ref()); + let auth = &Auth { + secret_buffer, + index, + } as *const Auth; + + // generate proof + let mut proof_buffer = MaybeUninit::::uninit(); + let success = + generate_proof(rln_pointer, inputs_buffer, auth, proof_buffer.as_mut_ptr()); + assert!(success, "proof generation failed"); + let mut proof_buffer = unsafe { proof_buffer.assume_init() }; + + // verify proof + let mut result = 0u32; + let result_ptr = &mut result as *mut u32; + let success = verify(rln_pointer, &mut proof_buffer, result_ptr); + assert!(success, "verification failed"); + assert_eq!(0, result); + }; + + gen_proof_and_verify(rln_pointer); + + // delete 0th member + let success = delete_member(rln_pointer, 0); + assert!(success, "deletion failed"); + + // gen proof & verify once more + gen_proof_and_verify(rln_pointer); + } + + #[test] + fn test_hash_ffi() { + let rln_test = rln_test(); + let mut circuit_parameters: Vec = Vec::new(); + rln_test + .export_circuit_parameters(&mut circuit_parameters) + .unwrap(); + let hasher = rln_test.hasher(); + let rln_pointer = rln_pointer(circuit_parameters); + let rln_pointer = unsafe { &*rln_pointer.assume_init() }; + let mut input_data: Vec = Vec::new(); + + let inputs: Vec = ["1", "2"] + .iter() + .map(|e| Fr::from_str(e).unwrap()) + .collect(); + inputs.iter().for_each(|e| { + e.into_repr().write_le(&mut input_data).unwrap(); + }); + let input_buffer = &Buffer::from(input_data.as_ref()); + + let input_len: usize = 2; + + let expected = hasher.hash(inputs); + let mut expected_data: Vec = Vec::new(); + expected.into_repr().write_le(&mut expected_data).unwrap(); + + let mut result_buffer = MaybeUninit::::uninit(); + + let success = hash( + rln_pointer, + input_buffer, + input_len, + result_buffer.as_mut_ptr(), + ); + + assert!(success, "hash ffi call failed"); + + let result_buffer = unsafe { result_buffer.assume_init() }; + let result_data = <&[u8]>::from(&result_buffer); + assert_eq!(expected_data.as_slice(), result_data); + } + + #[test] + fn test_keygen_ffi() { + let rln_test = rln_test(); + + let mut circuit_parameters: Vec = Vec::new(); + rln_test + .export_circuit_parameters(&mut circuit_parameters) + .unwrap(); + let hasher = rln_test.hasher(); + + let rln_pointer = rln_pointer(circuit_parameters); + let rln_pointer = unsafe { &*rln_pointer.assume_init() }; + + let mut keypair_buffer = MaybeUninit::::uninit(); + + let success = key_gen(rln_pointer, keypair_buffer.as_mut_ptr()); + assert!(success, "proof generation failed"); + + let keypair_buffer = unsafe { keypair_buffer.assume_init() }; + let mut keypair_data = <&[u8]>::from(&keypair_buffer); + + let mut buf = ::Repr::default(); + buf.read_le(&mut keypair_data).unwrap(); + let secret = Fr::from_repr(buf).unwrap(); + buf.read_le(&mut keypair_data).unwrap(); + let public = Fr::from_repr(buf).unwrap(); + let expected_public: Fr = hasher.hash(vec![secret]); + + assert_eq!(public, expected_public); + } + + #[test] + #[ignore] + fn test_parameters_from_file() { + use hex; + use std::fs; + let data = fs::read("./parameters.key").expect("Unable to read file"); + let merkle_depth = merkle_depth(); + let circuit_parameters_buffer = &Buffer::from(data.as_ref()); + let mut rln_pointer = MaybeUninit::<*mut RLN>::uninit(); + let success = new_circuit_from_params( + merkle_depth, + circuit_parameters_buffer, + rln_pointer.as_mut_ptr(), + ); + assert!(success, "creating failed"); + } +} diff --git a/src/lib.rs b/src/lib.rs new file mode 100644 index 000000000..266691eca --- /dev/null +++ b/src/lib.rs @@ -0,0 +1,14 @@ +#![allow(dead_code)] +#![allow(unused_imports)] + +pub mod circuit; +pub mod merkle; +pub mod poseidon; +pub mod public; +mod utils; + +#[cfg(not(target_arch = "wasm32"))] +pub mod ffi; + +#[cfg(target_arch = "wasm32")] +mod wasm; diff --git a/src/merkle.rs b/src/merkle.rs new file mode 100644 index 000000000..325508c36 --- /dev/null +++ b/src/merkle.rs @@ -0,0 +1,229 @@ +use crate::poseidon::{Poseidon as Hasher, PoseidonParams}; +use sapling_crypto::bellman::pairing::ff::{Field, PrimeField, PrimeFieldRepr}; +use sapling_crypto::bellman::pairing::Engine; +use std::io::{self, Error, ErrorKind}; +use std::{collections::HashMap, hash::Hash}; + +enum SyncMode { + Bootstarp, + Maintain, +} + +pub struct IncrementalMerkleTree +where + E: Engine, +{ + pub current_index: usize, + merkle_tree: MerkleTree, +} + +impl IncrementalMerkleTree +where + E: Engine, +{ + pub fn empty(hasher: Hasher, depth: usize) -> Self { + let mut zero: Vec = Vec::with_capacity(depth + 1); + zero.push(E::Fr::from_str("0").unwrap()); + for i in 0..depth { + zero.push(hasher.hash([zero[i]; 2].to_vec())); + } + zero.reverse(); + let merkle_tree = MerkleTree { + hasher: hasher, + zero: zero.clone(), + depth: depth, + nodes: HashMap::new(), + }; + let current_index: usize = 0; + IncrementalMerkleTree { + current_index, + merkle_tree, + } + } + + pub fn update_next(&mut self, leaf: E::Fr) -> io::Result<()> { + self.merkle_tree.update(self.current_index, leaf)?; + self.current_index += 1; + Ok(()) + } + + pub fn delete(&mut self, index: usize) -> io::Result<()> { + let zero = E::Fr::from_str("0").unwrap(); + self.merkle_tree.update(index, zero)?; + Ok(()) + } + + pub fn get_witness(&self, index: usize) -> io::Result> { + if index >= self.current_index { + return Err(io::Error::new( + io::ErrorKind::InvalidInput, + "index exceeds incremental index", + )); + } + self.merkle_tree.get_witness(index) + } + + pub fn hash(&self, inputs: Vec) -> E::Fr { + self.merkle_tree.hasher.hash(inputs) + } + + pub fn check_inclusion( + &self, + witness: Vec<(E::Fr, bool)>, + leaf_index: usize, + ) -> io::Result { + if leaf_index >= self.current_index { + return Err(io::Error::new( + io::ErrorKind::InvalidInput, + "index exceeds incremental index", + )); + } + self.merkle_tree.check_inclusion(witness, leaf_index) + } + + pub fn get_root(&self) -> E::Fr { + return self.merkle_tree.get_root(); + } +} + +pub struct MerkleTree +where + E: Engine, +{ + pub hasher: Hasher, + pub depth: usize, + zero: Vec, + nodes: HashMap<(usize, usize), E::Fr>, +} + +impl MerkleTree +where + E: Engine, +{ + pub fn empty(hasher: Hasher, depth: usize) -> Self { + let mut zero: Vec = Vec::with_capacity(depth + 1); + zero.push(E::Fr::from_str("0").unwrap()); + for i in 0..depth { + zero.push(hasher.hash([zero[i]; 2].to_vec())); + } + zero.reverse(); + MerkleTree { + hasher: hasher, + zero: zero.clone(), + depth: depth, + nodes: HashMap::new(), + } + } + + pub fn set_size(&self) -> usize { + 1 << self.depth + } + + pub fn update(&mut self, index: usize, leaf: E::Fr) -> io::Result<()> { + if index >= self.set_size() { + return Err(io::Error::new( + io::ErrorKind::InvalidInput, + "index exceeds set size", + )); + } + self.nodes.insert((self.depth, index), leaf); + self.recalculate_from(index); + Ok(()) + } + + pub fn check_inclusion(&self, witness: Vec<(E::Fr, bool)>, index: usize) -> io::Result { + if index >= self.set_size() { + return Err(io::Error::new( + io::ErrorKind::InvalidInput, + "index exceeds set size", + )); + } + let mut acc = self.get_node(self.depth, index); + + for w in witness.into_iter() { + if w.1 { + acc = self.hasher.hash(vec![acc, w.0]); + } else { + acc = self.hasher.hash(vec![w.0, acc]); + } + } + Ok(acc.eq(&self.get_root())) + } + + pub fn get_root(&self) -> E::Fr { + return self.get_node(0, 0); + } + + pub fn get_witness(&self, index: usize) -> io::Result> { + if index >= self.set_size() { + return Err(io::Error::new( + io::ErrorKind::InvalidInput, + "index exceeds set size", + )); + } + let mut witness = Vec::<(E::Fr, bool)>::with_capacity(self.depth); + let mut i = index; + let mut depth = self.depth; + loop { + i ^= 1; + witness.push((self.get_node(depth, i), (i & 1 == 1))); + i >>= 1; + depth -= 1; + if depth == 0 { + break; + } + } + assert_eq!(i, 0); + Ok(witness) + } + + fn get_node(&self, depth: usize, index: usize) -> E::Fr { + let node = *self + .nodes + .get(&(depth, index)) + .unwrap_or_else(|| &self.zero[depth]); + node + } + + fn get_leaf(&self, index: usize) -> E::Fr { + self.get_node(self.depth, index) + } + + fn hash_couple(&mut self, depth: usize, index: usize) -> E::Fr { + let b = index & !1; + self.hasher + .hash([self.get_node(depth, b), self.get_node(depth, b + 1)].to_vec()) + } + + fn recalculate_from(&mut self, index: usize) { + let mut i = index; + let mut depth = self.depth; + loop { + let h = self.hash_couple(depth, i); + i >>= 1; + depth -= 1; + self.nodes.insert((depth, i), h); + if depth == 0 { + break; + } + } + assert_eq!(depth, 0); + assert_eq!(i, 0); + } +} + +#[test] +fn test_merkle_set() { + let data: Vec = (0..8) + .map(|s| Fr::from_str(&format!("{}", s)).unwrap()) + .collect(); + use sapling_crypto::bellman::pairing::bn256::{Bn256, Fr, FrRepr}; + let params = PoseidonParams::::new(8, 55, 3, None, None, None); + let hasher = Hasher::new(params); + let mut set = MerkleTree::empty(hasher.clone(), 3); + let leaf_index = 6; + let leaf = hasher.hash(vec![data[0]]); + set.update(leaf_index, leaf).unwrap(); + let witness = set.get_witness(leaf_index).unwrap(); + assert!(set.check_inclusion(witness, leaf_index).unwrap()); +} diff --git a/src/poseidon.rs b/src/poseidon.rs new file mode 100644 index 000000000..028fb91ad --- /dev/null +++ b/src/poseidon.rs @@ -0,0 +1,227 @@ +use blake2::{Blake2s, Digest}; + +use sapling_crypto::bellman::pairing::ff::{Field, PrimeField, PrimeFieldRepr}; +use sapling_crypto::bellman::pairing::Engine; + +#[derive(Clone)] +pub struct PoseidonParams { + rf: usize, + rp: usize, + t: usize, + round_constants: Vec, + mds_matrix: Vec, +} + +#[derive(Clone)] +pub struct Poseidon { + params: PoseidonParams, +} + +impl PoseidonParams { + pub fn new( + rf: usize, + rp: usize, + t: usize, + round_constants: Option>, + mds_matrix: Option>, + seed: Option>, + ) -> PoseidonParams { + let seed = match seed { + Some(seed) => seed, + None => b"".to_vec(), + }; + + let _round_constants = match round_constants { + Some(round_constants) => round_constants, + None => PoseidonParams::::generate_constants(b"drlnhdsc", seed.clone(), rf + rp), + }; + assert_eq!(rf + rp, _round_constants.len()); + + let _mds_matrix = match mds_matrix { + Some(mds_matrix) => mds_matrix, + None => PoseidonParams::::generate_mds_matrix(b"drlnhdsm", seed.clone(), t), + }; + PoseidonParams { + rf, + rp, + t, + round_constants: _round_constants, + mds_matrix: _mds_matrix, + } + } + + pub fn width(&self) -> usize { + return self.t; + } + + pub fn partial_round_len(&self) -> usize { + return self.rp; + } + + pub fn full_round_half_len(&self) -> usize { + return self.rf / 2; + } + + pub fn total_rounds(&self) -> usize { + return self.rf + self.rp; + } + + pub fn round_constant(&self, round: usize) -> E::Fr { + return self.round_constants[round]; + } + + pub fn mds_matrix_row(&self, i: usize) -> Vec { + let w = self.width(); + self.mds_matrix[i * w..(i + 1) * w].to_vec() + } + + pub fn mds_matrix(&self) -> Vec { + self.mds_matrix.clone() + } + + pub fn generate_mds_matrix(persona: &[u8; 8], seed: Vec, t: usize) -> Vec { + let v: Vec = PoseidonParams::::generate_constants(persona, seed, t * 2); + let mut matrix: Vec = Vec::with_capacity(t * t); + for i in 0..t { + for j in 0..t { + let mut tmp = v[i]; + tmp.add_assign(&v[t + j]); + let entry = tmp.inverse().unwrap(); + matrix.insert((i * t) + j, entry); + } + } + matrix + } + + pub fn generate_constants(persona: &[u8; 8], seed: Vec, len: usize) -> Vec { + let mut constants: Vec = Vec::new(); + let mut source = seed.clone(); + loop { + let mut hasher = Blake2s::new(); + hasher.input(persona); + hasher.input(source); + source = hasher.result().to_vec(); + let mut candidate_repr = ::Repr::default(); + candidate_repr.read_le(&source[..]).unwrap(); + if let Ok(candidate) = E::Fr::from_repr(candidate_repr) { + constants.push(candidate); + if constants.len() == len { + break; + } + } + } + constants + } +} + +impl Poseidon { + pub fn new(params: PoseidonParams) -> Poseidon { + Poseidon { params } + } + + pub fn hash(&self, inputs: Vec) -> E::Fr { + let mut state = inputs.clone(); + state.resize(self.t(), E::Fr::zero()); + let mut round_counter: usize = 0; + loop { + self.round(&mut state, round_counter); + round_counter += 1; + if round_counter == self.params.total_rounds() { + break; + } + } + state[0] + } + + fn t(&self) -> usize { + self.params.t + } + + fn round(&self, state: &mut Vec, round: usize) { + let a1 = self.params.full_round_half_len(); + let a2 = a1 + self.params.partial_round_len(); + let a3 = self.params.total_rounds(); + if round < a1 { + self.full_round(state, round); + } else if round >= a1 && round < a2 { + self.partial_round(state, round); + } else if round >= a2 && round < a3 { + if round == a3 - 1 { + self.full_round_last(state); + } else { + self.full_round(state, round); + } + } else { + panic!("should not be here") + } + } + + fn full_round(&self, state: &mut Vec, round: usize) { + self.add_round_constants(state, round); + self.apply_quintic_sbox(state, true); + self.mul_mds_matrix(state); + } + + fn full_round_last(&self, state: &mut Vec) { + let last_round = self.params.total_rounds() - 1; + self.add_round_constants(state, last_round); + self.apply_quintic_sbox(state, true); + } + + fn partial_round(&self, state: &mut Vec, round: usize) { + self.add_round_constants(state, round); + self.apply_quintic_sbox(state, false); + self.mul_mds_matrix(state); + } + + fn add_round_constants(&self, state: &mut Vec, round: usize) { + for (_, b) in state.iter_mut().enumerate() { + let c = self.params.round_constants[round]; + b.add_assign(&c); + } + } + + fn apply_quintic_sbox(&self, state: &mut Vec, full: bool) { + for s in state.iter_mut() { + let mut b = s.clone(); + b.square(); + b.square(); + s.mul_assign(&b); + if !full { + break; + } + } + } + + fn mul_mds_matrix(&self, state: &mut Vec) { + let w = self.params.t; + let mut new_state = vec![E::Fr::zero(); w]; + for (i, ns) in new_state.iter_mut().enumerate() { + for (j, s) in state.iter().enumerate() { + let mut tmp = s.clone(); + tmp.mul_assign(&self.params.mds_matrix[i * w + j]); + ns.add_assign(&tmp); + } + } + for (i, ns) in new_state.iter_mut().enumerate() { + state[i].clone_from(ns); + } + } +} + +#[test] +fn test_poseidon_hash() { + use sapling_crypto::bellman::pairing::bn256; + use sapling_crypto::bellman::pairing::bn256::{Bn256, Fr}; + let params = PoseidonParams::::new(8, 55, 3, None, None, None); + let hasher = Poseidon::::new(params); + let input1: Vec = ["0"].iter().map(|e| Fr::from_str(e).unwrap()).collect(); + let r1: Fr = hasher.hash(input1.to_vec()); + let input2: Vec = ["0", "0"] + .iter() + .map(|e| Fr::from_str(e).unwrap()) + .collect(); + let r2: Fr = hasher.hash(input2.to_vec()); + // println!("{:?}", r1); + assert_eq!(r1, r2, "just to see if internal state resets"); +} diff --git a/src/public.rs b/src/public.rs new file mode 100644 index 000000000..514cae220 --- /dev/null +++ b/src/public.rs @@ -0,0 +1,254 @@ +use crate::circuit::rln::{RLNCircuit, RLNInputs}; +use crate::merkle::MerkleTree; +use crate::poseidon::{Poseidon as PoseidonHasher, PoseidonParams}; +use crate::utils::{read_fr, read_uncompressed_proof, write_uncompressed_proof}; +use crate::{circuit::poseidon::PoseidonCircuit, merkle::IncrementalMerkleTree}; +use bellman::groth16::generate_random_parameters; +use bellman::groth16::{create_proof, prepare_verifying_key, verify_proof}; +use bellman::groth16::{create_random_proof, Parameters, Proof}; +use bellman::pairing::ff::{Field, PrimeField, PrimeFieldRepr}; +use bellman::pairing::{CurveAffine, EncodedPoint, Engine}; +use bellman::{Circuit, ConstraintSystem, SynthesisError}; +use rand::{Rand, SeedableRng, XorShiftRng}; +use std::{ + io::{self, Error, ErrorKind, Read, Write}, + ptr::null, +}; +// Rate Limit Nullifier + +#[derive(Clone)] +pub struct RLNSignal +where + E: Engine, +{ + pub epoch: E::Fr, + pub hash: E::Fr, +} + +impl RLNSignal +where + E: Engine, +{ + pub fn read(mut reader: R) -> io::Result> { + let mut buf = ::Repr::default(); + + buf.read_le(&mut reader)?; + let hash = + E::Fr::from_repr(buf).map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + buf.read_le(&mut reader)?; + let epoch = + E::Fr::from_repr(buf).map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + + Ok(RLNSignal { epoch, hash }) + } + + pub fn write(&self, mut writer: W) -> io::Result<()> { + self.epoch.into_repr().write_le(&mut writer).unwrap(); + self.hash.into_repr().write_le(&mut writer).unwrap(); + Ok(()) + } +} + +pub struct RLN +where + E: Engine, +{ + circuit_parameters: Parameters, + poseidon_params: PoseidonParams, + tree: IncrementalMerkleTree, +} + +impl RLN +where + E: Engine, +{ + fn default_poseidon_params() -> PoseidonParams { + PoseidonParams::::new(8, 55, 3, None, None, None) + } + + fn new_circuit(merkle_depth: usize, poseidon_params: PoseidonParams) -> Parameters { + let mut rng = XorShiftRng::from_seed([0x3dbe6258, 0x8d313d76, 0x3237db17, 0xe5bc0654]); + let inputs = RLNInputs::::empty(merkle_depth); + let circuit = RLNCircuit:: { + inputs, + hasher: PoseidonCircuit::new(poseidon_params.clone()), + }; + generate_random_parameters(circuit, &mut rng).unwrap() + } + + fn new_with_params( + merkle_depth: usize, + circuit_parameters: Parameters, + poseidon_params: PoseidonParams, + ) -> RLN { + let hasher = PoseidonHasher::new(poseidon_params.clone()); + let tree = IncrementalMerkleTree::empty(hasher, merkle_depth); + RLN { + circuit_parameters, + poseidon_params, + tree, + } + } + + pub fn new(merkle_depth: usize, poseidon_params: Option>) -> RLN { + let poseidon_params = match poseidon_params { + Some(params) => params, + None => Self::default_poseidon_params(), + }; + let circuit_parameters = Self::new_circuit(merkle_depth, poseidon_params.clone()); + Self::new_with_params(merkle_depth, circuit_parameters, poseidon_params) + } + + pub fn new_with_raw_params( + merkle_depth: usize, + raw_circuit_parameters: R, + poseidon_params: Option>, + ) -> io::Result> { + let circuit_parameters = Parameters::::read(raw_circuit_parameters, true)?; + let poseidon_params = match poseidon_params { + Some(params) => params, + None => Self::default_poseidon_params(), + }; + Ok(Self::new_with_params( + merkle_depth, + circuit_parameters, + poseidon_params, + )) + } + + //// inserts new member with given public key + /// * `public_key_data` is a 32 scalar field element in 32 bytes + pub fn update_next_member(&mut self, public_key_data: R) -> io::Result<()> { + let mut buf = ::Repr::default(); + buf.read_le(public_key_data)?; + let leaf = + E::Fr::from_repr(buf).map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + self.tree.update_next(leaf)?; + Ok(()) + } + + //// deletes member with given index + pub fn delete_member(&mut self, index: usize) -> io::Result<()> { + self.tree.delete(index)?; + Ok(()) + } + + /// hashes scalar field elements + /// * expect numbers of scalar field element in 32 bytes in `input_data` + /// * expect `result_data` is a scalar field element in 32 bytes + /// * `n` is number of scalar field elemends stored in `input` + pub fn hash( + &self, + input_data: R, + n: usize, + mut result_data: W, + ) -> io::Result<()> { + let hasher = self.hasher(); + let input: Vec = read_fr::(input_data, n)?; + let result = hasher.hash(input); + result.into_repr().write_le(&mut result_data)?; + Ok(()) + } + + /// given public inputs and autharization data generates public inputs and proof + /// * expect `input` serialized as |epoch<32>|signal_hash<32>| + /// * expect `id_key_data` is a scalar field element in 32 bytes + /// * `output_data` is proof data serialized as |proof<416>|root<32>|epoch<32>|share_x<32>|share_y<32>|nullifier<32>| + pub fn generate_proof( + &self, + input_data: R, + id_key_data: R, + member_index: usize, + mut output_data: W, + ) -> io::Result<()> { + use rand::chacha::ChaChaRng; + use rand::SeedableRng; + let mut rng = ChaChaRng::new_unseeded(); + let signal = RLNSignal::::read(input_data)?; + // prepare inputs + + let hasher = self.hasher(); + let share_x = signal.hash.clone(); + + let id_key: E::Fr = read_fr::(id_key_data, 1)?[0]; + + // line equation + let a_0 = id_key.clone(); + let a_1: E::Fr = hasher.hash(vec![a_0, signal.epoch]); + // evaluate line equation + let mut share_y = a_1.clone(); + share_y.mul_assign(&share_x); + share_y.add_assign(&a_0); + let nullifier = hasher.hash(vec![a_1]); + + let root = self.tree.get_root(); + // TODO: check id key here + let auth_path = self.tree.get_witness(member_index)?; + + let inputs = RLNInputs:: { + share_x: Some(share_x), + share_y: Some(share_y), + epoch: Some(signal.epoch), + nullifier: Some(nullifier), + root: Some(root), + id_key: Some(id_key), + auth_path: auth_path.into_iter().map(|w| Some(w)).collect(), + }; + + let circuit = RLNCircuit { + inputs: inputs.clone(), + hasher: PoseidonCircuit::new(self.poseidon_params.clone()), + }; + + // TOOD: handle create proof error + let proof = create_random_proof(circuit, &self.circuit_parameters, &mut rng).unwrap(); + write_uncompressed_proof(proof.clone(), &mut output_data)?; + root.into_repr().write_le(&mut output_data)?; + signal.epoch.into_repr().write_le(&mut output_data)?; + share_x.into_repr().write_le(&mut output_data)?; + share_y.into_repr().write_le(&mut output_data)?; + nullifier.into_repr().write_le(&mut output_data)?; + + Ok(()) + } + + /// given proof and public data verifies the signal + /// * expect `proof_data` is serialized as: + /// |proof<416>|root<32>|epoch<32>|share_x<32>|share_y<32>|nullifier<32>| + pub fn verify(&self, mut proof_data: R) -> io::Result { + let proof = read_uncompressed_proof(&mut proof_data)?; + let public_inputs = RLNInputs::::read_public_inputs(&mut proof_data)?; + // TODO: root must be checked here + let verifing_key = prepare_verifying_key(&self.circuit_parameters.vk); + let success = verify_proof(&verifing_key, &proof, &public_inputs).unwrap(); + Ok(success) + } + + /// generates public private key pair + /// * `key_pair_data` is seralized as |secret<32>|public<32>| + pub fn key_gen(&self, mut key_pair_data: W) -> io::Result<()> { + let mut rng = XorShiftRng::from_seed([0x3dbe6258, 0x8d313d76, 0x3237db17, 0xe5bc0654]); + let hasher = self.hasher(); + let secret = E::Fr::rand(&mut rng); + let public: E::Fr = hasher.hash(vec![secret.clone()]); + secret.into_repr().write_le(&mut key_pair_data)?; + public.into_repr().write_le(&mut key_pair_data)?; + Ok(()) + } + + pub fn export_verifier_key(&self, w: W) -> io::Result<()> { + self.circuit_parameters.vk.write(w) + } + + pub fn export_circuit_parameters(&self, w: W) -> io::Result<()> { + self.circuit_parameters.write(w) + } + + pub fn hasher(&self) -> PoseidonHasher { + PoseidonHasher::new(self.poseidon_params.clone()) + } + + pub fn poseidon_params(&self) -> PoseidonParams { + self.poseidon_params.clone() + } +} diff --git a/src/utils.rs b/src/utils.rs new file mode 100644 index 000000000..4fd10999d --- /dev/null +++ b/src/utils.rs @@ -0,0 +1,80 @@ +use bellman::groth16::Proof; +use bellman::pairing::ff::{Field, PrimeField, PrimeFieldRepr}; +use bellman::pairing::{CurveAffine, EncodedPoint, Engine}; + +use rand::{Rand, SeedableRng, XorShiftRng}; +use std::io::{self, Error, ErrorKind, Read, Write}; + +pub fn read_fr(mut reader: R, n: usize) -> io::Result> { + let mut out: Vec = Vec::new(); + let mut buf = ::Repr::default(); + for _ in 0..n { + buf.read_le(&mut reader)?; + let input = + E::Fr::from_repr(buf).map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + out.push(input); + } + Ok(out) +} + +pub fn write_uncompressed_proof( + proof: Proof, + mut writer: W, +) -> io::Result<()> { + writer.write_all(proof.a.into_uncompressed().as_ref())?; + writer.write_all(proof.b.into_uncompressed().as_ref())?; + writer.write_all(proof.c.into_uncompressed().as_ref())?; + Ok(()) +} + +pub fn read_uncompressed_proof(mut reader: R) -> io::Result> { + let mut g1_repr = ::Uncompressed::empty(); + let mut g2_repr = ::Uncompressed::empty(); + + reader.read_exact(g1_repr.as_mut())?; + let a = g1_repr + .into_affine() + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) + .and_then(|e| { + if e.is_zero() { + Err(io::Error::new( + io::ErrorKind::InvalidData, + "point at infinity", + )) + } else { + Ok(e) + } + })?; + + reader.read_exact(g2_repr.as_mut())?; + let b = g2_repr + .into_affine() + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) + .and_then(|e| { + if e.is_zero() { + Err(io::Error::new( + io::ErrorKind::InvalidData, + "point at infinity", + )) + } else { + Ok(e) + } + })?; + + reader.read_exact(g1_repr.as_mut())?; + let c = g1_repr + .into_affine() + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) + .and_then(|e| { + if e.is_zero() { + Err(io::Error::new( + io::ErrorKind::InvalidData, + "point at infinity", + )) + } else { + Ok(e) + } + })?; + + Ok(Proof { a, b, c }) +} diff --git a/src/wasm.rs b/src/wasm.rs new file mode 100644 index 000000000..4123d9742 --- /dev/null +++ b/src/wasm.rs @@ -0,0 +1,132 @@ +use crate::public::RLN; + +use std::io::{self, Error, ErrorKind, Read, Write}; +use wasm_bindgen::prelude::*; + +use js_sys::Array; +use sapling_crypto::bellman::pairing::bn256::{Bn256, Fr}; + +pub fn set_panic_hook() { + // When the `console_error_panic_hook` feature is enabled, we can call the + // `set_panic_hook` function at least once during initialization, and then + // we will get better error messages if our code ever panics. + // + // For more details see + // https://github.com/rustwasm/console_error_panic_hook#readme + // #[cfg(feature = "console_error_panic_hook")] + console_error_panic_hook::set_once(); +} + +#[wasm_bindgen] +pub struct RLNWasm { + api: RLN, +} + +#[wasm_bindgen] +impl RLNWasm { + #[wasm_bindgen] + pub fn new(merkle_depth: usize) -> RLNWasm { + set_panic_hook(); + RLNWasm { + api: RLN::::new(merkle_depth, None), + } + } + + #[wasm_bindgen] + pub fn new_with_raw_params( + merkle_depth: usize, + raw_circuit_parameters: &[u8], + ) -> Result { + set_panic_hook(); + let api = match RLN::new_with_raw_params(merkle_depth, raw_circuit_parameters, None) { + Ok(api) => api, + Err(e) => return Err(e.to_string().into()), + }; + Ok(RLNWasm { api }) + } + + #[wasm_bindgen] + pub fn generate_proof(&self, input: &[u8]) -> Result, JsValue> { + let proof = match self.api.generate_proof(input) { + Ok(proof) => proof, + Err(e) => return Err(e.to_string().into()), + }; + Ok(proof) + } + + #[wasm_bindgen] + pub fn verify( + &self, + uncompresed_proof: &[u8], + raw_public_inputs: &[u8], + ) -> Result { + let success = match self.api.verify(uncompresed_proof, raw_public_inputs) { + Ok(success) => success, + Err(e) => return Err(e.to_string().into()), + }; + Ok(success) + } + + #[wasm_bindgen] + pub fn export_verifier_key(&self) -> Result, JsValue> { + let mut output: Vec = Vec::new(); + match self.api.export_verifier_key(&mut output) { + Ok(_) => (), + Err(e) => return Err(e.to_string().into()), + }; + Ok(output) + } + + #[wasm_bindgen] + pub fn export_circuit_parameters(&self) -> Result, JsValue> { + let mut output: Vec = Vec::new(); + match self.api.export_circuit_parameters(&mut output) { + Ok(_) => (), + Err(e) => return Err(e.to_string().into()), + }; + Ok(output) + } +} + +#[cfg(test)] +mod test { + + use crate::circuit::bench; + use wasm_bindgen_test::*; + + use crate::circuit::poseidon::PoseidonCircuit; + use crate::circuit::rln::{RLNCircuit, RLNInputs}; + use crate::merkle::MerkleTree; + use crate::poseidon::{Poseidon as PoseidonHasher, PoseidonParams}; + use bellman::groth16::{generate_random_parameters, Parameters, Proof}; + use bellman::pairing::bn256::{Bn256, Fr}; + use bellman::pairing::ff::{Field, PrimeField, PrimeFieldRepr}; + use rand::{Rand, SeedableRng, XorShiftRng}; + + #[wasm_bindgen_test] + fn test_rln_wasm() { + let merkle_depth = 3usize; + let poseidon_params = PoseidonParams::::new(8, 55, 3, None, None, None); + let rln_test = bench::RLNTest::::new(merkle_depth, Some(poseidon_params)); + + let rln_wasm = super::RLNWasm::new(merkle_depth); + + let mut raw_inputs: Vec = Vec::new(); + let inputs = rln_test.valid_inputs(); + inputs.write(&mut raw_inputs); + + // let now = Instant::now(); + let proof = rln_wasm.generate_proof(raw_inputs.as_slice()).unwrap(); + // let prover_time = now.elapsed().as_millis() as f64 / 1000.0; + + let mut raw_public_inputs: Vec = Vec::new(); + inputs.write_public_inputs(&mut raw_public_inputs); + + assert_eq!( + rln_wasm + .verify(proof.as_slice(), raw_public_inputs.as_slice()) + .unwrap(), + true + ); + } +} diff --git a/tests/all_tests_v2.nim b/tests/all_tests_v2.nim index a4cdaa379..a63892c82 100644 --- a/tests/all_tests_v2.nim +++ b/tests/all_tests_v2.nim @@ -12,7 +12,8 @@ import ./v2/test_jsonrpc_waku, ./v2/test_peer_manager, ./v2/test_web3, # TODO remove it when rln-relay tests get finalized - ./v2/test_waku_rln_relay + ./v2/test_waku_rln_relay, + ./v2/test_rln_relay_wrappers # TODO Only enable this once swap module is integrated more nicely as a dependency, i.e. as submodule with CI etc # For PoC execute it manually and run separate module here: https://github.com/vacp2p/swap-contracts-module diff --git a/waku.nims b/waku.nims new file mode 100644 index 000000000..bbe51fd89 --- /dev/null +++ b/waku.nims @@ -0,0 +1,88 @@ +mode = ScriptMode.Verbose + +### Package +version = "0.1.0" +author = "Status Research & Development GmbH" +description = "Waku, Private P2P Messaging for Resource-Restricted Devices" +license = "MIT or Apache License 2.0" +srcDir = "src" +#bin = @["build/waku"] + +### Dependencies +requires "nim >= 1.2.0", + "chronicles", + "confutils", + "chronos", + "eth", + "json_rpc", + "libbacktrace", + "nimcrypto", + "stew", + "stint", + "metrics", + "libp2p", # Only for Waku v2 + "web3" + +### Helper functions +proc buildBinary(name: string, srcDir = "./", params = "", lang = "c") = + if not dirExists "build": + mkDir "build" + # allow something like "nim nimbus --verbosity:0 --hints:off nimbus.nims" + var extra_params = params + for i in 2.. NIM_PARAMS="-d:chronicles_log_level=INFO" make test2` + # I expect compiler flag to be overridden, however it stays with whatever is + # specified here. + buildBinary name, "tests/", "-d:chronicles_log_level=DEBUG" + #buildBinary name, "tests/", "-d:chronicles_log_level=ERROR" + exec "build/" & name + +### Waku v1 tasks +task wakunode1, "Build Waku v1 cli node": + buildBinary "wakunode1", "waku/v1/node/", "-d:chronicles_log_level=TRACE" + +task sim1, "Build Waku v1 simulation tools": + buildBinary "quicksim", "waku/v1/node/", "-d:chronicles_log_level=INFO" + buildBinary "start_network", "waku/v1/node/", "-d:chronicles_log_level=DEBUG" + +task example1, "Build Waku v1 example": + buildBinary "example", "examples/v1/", "-d:chronicles_log_level=DEBUG" + +task test1, "Build & run Waku v1 tests": + test "all_tests_v1" + +### Waku v2 tasks +task wakunode2, "Build Waku v2 (experimental) cli node": + buildBinary "wakunode2", "waku/v2/node/", "-d:chronicles_log_level=TRACE" + +task sim2, "Build Waku v2 simulation tools": + buildBinary "quicksim2", "waku/v2/node/", "-d:chronicles_log_level=DEBUG" + buildBinary "start_network2", "waku/v2/node/", "-d:chronicles_log_level=TRACE" + +task example2, "Build Waku v2 example": + let name = "basic2" + buildBinary name, "examples/v2/", "-d:chronicles_log_level=DEBUG" + +task test2, "Build & run Waku v2 tests": + test "all_tests_v2" + +task scripts2, "Build Waku v2 scripts": + buildBinary "rpc_publish", "waku/v2/node/scripts/", "-d:chronicles_log_level=DEBUG" + buildBinary "rpc_subscribe", "waku/v2/node/scripts/", "-d:chronicles_log_level=DEBUG" + buildBinary "rpc_subscribe_filter", "waku/v2/node/scripts/", "-d:chronicles_log_level=DEBUG" + buildBinary "rpc_query", "waku/v2/node/scripts/", "-d:chronicles_log_level=DEBUG" + buildBinary "rpc_info", "waku/v2/node/scripts/", "-d:chronicles_log_level=DEBUG" + +task chat2, "Build example Waku v2 chat usage": + let name = "chat2" + # NOTE For debugging, set debug level. For chat usage we want minimal log + # output to STDOUT. Can be fixed by redirecting logs to file (e.g.) + #buildBinary name, "examples/v2/", "-d:chronicles_log_level=WARN" + buildBinary name, "examples/v2/", "-d:chronicles_log_level=DEBUG" + +task bridge, "Build Waku v1 - v2 bridge": + buildBinary "wakubridge", "waku/common/", "-d:chronicles_log_level=DEBUG" diff --git a/waku/v1/protocol/waku_protocol.nim.generated.nim b/waku/v1/protocol/waku_protocol.nim.generated.nim new file mode 100644 index 000000000..3a75661d8 --- /dev/null +++ b/waku/v1/protocol/waku_protocol.nim.generated.nim @@ -0,0 +1,608 @@ + +## Generated at line 228 +type + Waku* = object +template State*(PROTO: type Waku): type = + ref[WakuPeer:ObjectType] + +template NetworkState*(PROTO: type Waku): type = + ref[WakuNetwork:ObjectType] + +type + statusObj* = object + options*: StatusOptions + +template status*(PROTO: type Waku): type = + statusObj + +template msgProtocol*(MSG: type statusObj): type = + Waku + +template RecType*(MSG: type statusObj): untyped = + statusObj + +template msgId*(MSG: type statusObj): int = + 0 + +type + messagesObj* = object + envelopes*: seq[Envelope] + +template messages*(PROTO: type Waku): type = + messagesObj + +template msgProtocol*(MSG: type messagesObj): type = + Waku + +template RecType*(MSG: type messagesObj): untyped = + messagesObj + +template msgId*(MSG: type messagesObj): int = + 1 + +type + statusOptionsObj* = object + options*: StatusOptions + +template statusOptions*(PROTO: type Waku): type = + statusOptionsObj + +template msgProtocol*(MSG: type statusOptionsObj): type = + Waku + +template RecType*(MSG: type statusOptionsObj): untyped = + statusOptionsObj + +template msgId*(MSG: type statusOptionsObj): int = + 22 + +type + p2pRequestObj* = object + envelope*: Envelope + +template p2pRequest*(PROTO: type Waku): type = + p2pRequestObj + +template msgProtocol*(MSG: type p2pRequestObj): type = + Waku + +template RecType*(MSG: type p2pRequestObj): untyped = + p2pRequestObj + +template msgId*(MSG: type p2pRequestObj): int = + 126 + +type + p2pMessageObj* = object + envelopes*: seq[Envelope] + +template p2pMessage*(PROTO: type Waku): type = + p2pMessageObj + +template msgProtocol*(MSG: type p2pMessageObj): type = + Waku + +template RecType*(MSG: type p2pMessageObj): untyped = + p2pMessageObj + +template msgId*(MSG: type p2pMessageObj): int = + 127 + +type + batchAcknowledgedObj* = object + +template batchAcknowledged*(PROTO: type Waku): type = + batchAcknowledgedObj + +template msgProtocol*(MSG: type batchAcknowledgedObj): type = + Waku + +template RecType*(MSG: type batchAcknowledgedObj): untyped = + batchAcknowledgedObj + +template msgId*(MSG: type batchAcknowledgedObj): int = + 11 + +type + messageResponseObj* = object + +template messageResponse*(PROTO: type Waku): type = + messageResponseObj + +template msgProtocol*(MSG: type messageResponseObj): type = + Waku + +template RecType*(MSG: type messageResponseObj): untyped = + messageResponseObj + +template msgId*(MSG: type messageResponseObj): int = + 12 + +type + p2pSyncResponseObj* = object + +template p2pSyncResponse*(PROTO: type Waku): type = + p2pSyncResponseObj + +template msgProtocol*(MSG: type p2pSyncResponseObj): type = + Waku + +template RecType*(MSG: type p2pSyncResponseObj): untyped = + p2pSyncResponseObj + +template msgId*(MSG: type p2pSyncResponseObj): int = + 124 + +type + p2pSyncRequestObj* = object + +template p2pSyncRequest*(PROTO: type Waku): type = + p2pSyncRequestObj + +template msgProtocol*(MSG: type p2pSyncRequestObj): type = + Waku + +template RecType*(MSG: type p2pSyncRequestObj): untyped = + p2pSyncRequestObj + +template msgId*(MSG: type p2pSyncRequestObj): int = + 123 + +type + p2pRequestCompleteObj* = object + requestId*: Hash + lastEnvelopeHash*: Hash + cursor*: seq[byte] + +template p2pRequestComplete*(PROTO: type Waku): type = + p2pRequestCompleteObj + +template msgProtocol*(MSG: type p2pRequestCompleteObj): type = + Waku + +template RecType*(MSG: type p2pRequestCompleteObj): untyped = + p2pRequestCompleteObj + +template msgId*(MSG: type p2pRequestCompleteObj): int = + 125 + +var WakuProtocolObj = initProtocol("waku", 1, createPeerState[Peer, + ref[WakuPeer:ObjectType]], createNetworkState[EthereumNode, + ref[WakuNetwork:ObjectType]]) +var WakuProtocol = addr WakuProtocolObj +template protocolInfo*(PROTO: type Waku): auto = + WakuProtocol + +proc statusRawSender(peerOrResponder: Peer; options: StatusOptions; + timeout: Duration = milliseconds(10000'i64)): Future[void] {. + gcsafe.} = + let peer = getPeer(peerOrResponder) + var writer = initRlpWriter() + const + perProtocolMsgId = 0 + let perPeerMsgId = perPeerMsgIdImpl(peer, WakuProtocol, 0) + append(writer, perPeerMsgId) + append(writer, options) + let msgBytes = finish(writer) + return sendMsg(peer, msgBytes) + +template status*(peer: Peer; options: StatusOptions; + timeout: Duration = milliseconds(10000'i64)): Future[statusObj] = + let peer_175950056 = peer + let sendingFuture`gensym175950057 = statusRawSender(peer, options) + handshakeImpl(peer_175950056, sendingFuture`gensym175950057, + nextMsg(peer_175950056, statusObj), timeout) + +proc messages*(peerOrResponder: Peer; envelopes: openarray[Envelope]): Future[void] {. + gcsafe.} = + let peer = getPeer(peerOrResponder) + var writer = initRlpWriter() + const + perProtocolMsgId = 1 + let perPeerMsgId = perPeerMsgIdImpl(peer, WakuProtocol, 1) + append(writer, perPeerMsgId) + append(writer, envelopes) + let msgBytes = finish(writer) + return sendMsg(peer, msgBytes) + +proc statusOptions*(peerOrResponder: Peer; options: StatusOptions): Future[void] {. + gcsafe.} = + let peer = getPeer(peerOrResponder) + var writer = initRlpWriter() + const + perProtocolMsgId = 22 + let perPeerMsgId = perPeerMsgIdImpl(peer, WakuProtocol, 22) + append(writer, perPeerMsgId) + append(writer, options) + let msgBytes = finish(writer) + return sendMsg(peer, msgBytes) + +proc p2pRequest*(peerOrResponder: Peer; envelope: Envelope): Future[void] {.gcsafe.} = + let peer = getPeer(peerOrResponder) + var writer = initRlpWriter() + const + perProtocolMsgId = 126 + let perPeerMsgId = perPeerMsgIdImpl(peer, WakuProtocol, 126) + append(writer, perPeerMsgId) + append(writer, envelope) + let msgBytes = finish(writer) + return sendMsg(peer, msgBytes) + +proc p2pMessage*(peerOrResponder: Peer; envelopes: openarray[Envelope]): Future[void] {. + gcsafe.} = + let peer = getPeer(peerOrResponder) + var writer = initRlpWriter() + const + perProtocolMsgId = 127 + let perPeerMsgId = perPeerMsgIdImpl(peer, WakuProtocol, 127) + append(writer, perPeerMsgId) + append(writer, envelopes) + let msgBytes = finish(writer) + return sendMsg(peer, msgBytes) + +proc batchAcknowledged*(peerOrResponder: Peer): Future[void] {.gcsafe.} = + let peer = getPeer(peerOrResponder) + var writer = initRlpWriter() + const + perProtocolMsgId = 11 + let perPeerMsgId = perPeerMsgIdImpl(peer, WakuProtocol, 11) + append(writer, perPeerMsgId) + let msgBytes = finish(writer) + return sendMsg(peer, msgBytes) + +proc messageResponse*(peerOrResponder: Peer): Future[void] {.gcsafe.} = + let peer = getPeer(peerOrResponder) + var writer = initRlpWriter() + const + perProtocolMsgId = 12 + let perPeerMsgId = perPeerMsgIdImpl(peer, WakuProtocol, 12) + append(writer, perPeerMsgId) + let msgBytes = finish(writer) + return sendMsg(peer, msgBytes) + +proc p2pSyncResponse*(peerOrResponder: ResponderWithId[p2pSyncResponseObj]): Future[ + void] {.gcsafe.} = + let peer = getPeer(peerOrResponder) + var writer = initRlpWriter() + const + perProtocolMsgId = 124 + let perPeerMsgId = perPeerMsgIdImpl(peer, WakuProtocol, 124) + append(writer, perPeerMsgId) + append(writer, peerOrResponder.reqId) + let msgBytes = finish(writer) + return sendMsg(peer, msgBytes) + +template send*(r`gensym175950072: ResponderWithId[p2pSyncResponseObj]; + args`gensym175950073: varargs[untyped]): auto = + p2pSyncResponse(r`gensym175950072, args`gensym175950073) + +proc p2pSyncRequest*(peerOrResponder: Peer; + timeout: Duration = milliseconds(10000'i64)): Future[ + Option[p2pSyncResponseObj]] {.gcsafe.} = + let peer = getPeer(peerOrResponder) + var writer = initRlpWriter() + const + perProtocolMsgId = 123 + let perPeerMsgId = perPeerMsgIdImpl(peer, WakuProtocol, 123) + append(writer, perPeerMsgId) + initFuture result + let reqId = registerRequest(peer, timeout, result, perPeerMsgId + 1) + append(writer, reqId) + let msgBytes = finish(writer) + linkSendFailureToReqFuture(sendMsg(peer, msgBytes), result) + +proc p2pRequestComplete*(peerOrResponder: Peer; requestId: Hash; + lastEnvelopeHash: Hash; cursor: seq[byte]): Future[void] {. + gcsafe.} = + let peer = getPeer(peerOrResponder) + var writer = initRlpWriter() + const + perProtocolMsgId = 125 + let perPeerMsgId = perPeerMsgIdImpl(peer, WakuProtocol, 125) + append(writer, perPeerMsgId) + startList(writer, 3) + append(writer, requestId) + append(writer, lastEnvelopeHash) + append(writer, cursor) + let msgBytes = finish(writer) + return sendMsg(peer, msgBytes) + +proc messagesUserHandler(peer: Peer; envelopes: seq[Envelope]) {.gcsafe, async.} = + type + CurrentProtocol = Waku + const + perProtocolMsgId = 1 + template state(peer: Peer): ref[WakuPeer:ObjectType] = + cast[ref[WakuPeer:ObjectType]](getState(peer, WakuProtocol)) + + template networkState(peer: Peer): ref[WakuNetwork:ObjectType] = + cast[ref[WakuNetwork:ObjectType]](getNetworkState(peer.network, WakuProtocol)) + + if not peer.state.initialized: + warn "Handshake not completed yet, discarding messages" + return + for envelope in envelopes: + if not envelope.valid(): + warn "Expired or future timed envelope", peer + continue + peer.state.accounting.received += 1 + let msg = initMessage(envelope) + if not msg.allowed(peer.networkState.config): + continue + if peer.state.received.containsOrIncl(msg.hash): + envelopes_dropped.inc(labelValues = ["duplicate"]) + trace "Peer sending duplicate messages", peer, hash = $msg.hash + continue + if peer.networkState.queue[].add(msg): + peer.networkState.filters.notify(msg) + +proc statusOptionsUserHandler(peer: Peer; options: StatusOptions) {.gcsafe, async.} = + type + CurrentProtocol = Waku + const + perProtocolMsgId = 22 + template state(peer: Peer): ref[WakuPeer:ObjectType] = + cast[ref[WakuPeer:ObjectType]](getState(peer, WakuProtocol)) + + template networkState(peer: Peer): ref[WakuNetwork:ObjectType] = + cast[ref[WakuNetwork:ObjectType]](getNetworkState(peer.network, WakuProtocol)) + + if not peer.state.initialized: + warn "Handshake not completed yet, discarding statusOptions" + return + if options.topicInterest.isSome(): + peer.state.topics = options.topicInterest + elif options.bloomFilter.isSome(): + peer.state.bloom = options.bloomFilter.get() + peer.state.topics = none(seq[Topic]) + if options.powRequirement.isSome(): + peer.state.powRequirement = options.powRequirement.get() + if options.lightNode.isSome(): + peer.state.isLightNode = options.lightNode.get() + +proc p2pRequestUserHandler(peer: Peer; envelope: Envelope) {.gcsafe, async.} = + type + CurrentProtocol = Waku + const + perProtocolMsgId = 126 + template state(peer: Peer): ref[WakuPeer:ObjectType] = + cast[ref[WakuPeer:ObjectType]](getState(peer, WakuProtocol)) + + template networkState(peer: Peer): ref[WakuNetwork:ObjectType] = + cast[ref[WakuNetwork:ObjectType]](getNetworkState(peer.network, WakuProtocol)) + + if not peer.networkState.p2pRequestHandler.isNil(): + peer.networkState.p2pRequestHandler(peer, envelope) + +proc p2pMessageUserHandler(peer: Peer; envelopes: seq[Envelope]) {.gcsafe, async.} = + type + CurrentProtocol = Waku + const + perProtocolMsgId = 127 + template state(peer: Peer): ref[WakuPeer:ObjectType] = + cast[ref[WakuPeer:ObjectType]](getState(peer, WakuProtocol)) + + template networkState(peer: Peer): ref[WakuNetwork:ObjectType] = + cast[ref[WakuNetwork:ObjectType]](getNetworkState(peer.network, WakuProtocol)) + + if peer.state.trusted: + for envelope in envelopes: + let msg = Message(env: envelope, isP2P: true) + peer.networkState.filters.notify(msg) + +proc batchAcknowledgedUserHandler(peer: Peer) {.gcsafe, async.} = + type + CurrentProtocol = Waku + const + perProtocolMsgId = 11 + template state(peer: Peer): ref[WakuPeer:ObjectType] = + cast[ref[WakuPeer:ObjectType]](getState(peer, WakuProtocol)) + + template networkState(peer: Peer): ref[WakuNetwork:ObjectType] = + cast[ref[WakuNetwork:ObjectType]](getNetworkState(peer.network, WakuProtocol)) + + discard + +proc messageResponseUserHandler(peer: Peer) {.gcsafe, async.} = + type + CurrentProtocol = Waku + const + perProtocolMsgId = 12 + template state(peer: Peer): ref[WakuPeer:ObjectType] = + cast[ref[WakuPeer:ObjectType]](getState(peer, WakuProtocol)) + + template networkState(peer: Peer): ref[WakuNetwork:ObjectType] = + cast[ref[WakuNetwork:ObjectType]](getNetworkState(peer.network, WakuProtocol)) + + discard + +proc p2pSyncResponseUserHandler(peer: Peer; reqId: int) {.gcsafe, async.} = + type + CurrentProtocol = Waku + const + perProtocolMsgId = 124 + template state(peer: Peer): ref[WakuPeer:ObjectType] = + cast[ref[WakuPeer:ObjectType]](getState(peer, WakuProtocol)) + + template networkState(peer: Peer): ref[WakuNetwork:ObjectType] = + cast[ref[WakuNetwork:ObjectType]](getNetworkState(peer.network, WakuProtocol)) + + discard + +proc p2pSyncRequestUserHandler(peer: Peer; reqId: int) {.gcsafe, async.} = + type + CurrentProtocol = Waku + const + perProtocolMsgId = 123 + template state(peer: Peer): ref[WakuPeer:ObjectType] = + cast[ref[WakuPeer:ObjectType]](getState(peer, WakuProtocol)) + + template networkState(peer: Peer): ref[WakuNetwork:ObjectType] = + cast[ref[WakuNetwork:ObjectType]](getNetworkState(peer.network, WakuProtocol)) + + var response = init(ResponderWithId[p2pSyncResponseObj], peer, reqId) + discard + +proc p2pRequestCompleteUserHandler(peer: Peer; requestId: Hash; + lastEnvelopeHash: Hash; cursor: seq[byte]) {. + gcsafe, async.} = + type + CurrentProtocol = Waku + const + perProtocolMsgId = 125 + template state(peer: Peer): ref[WakuPeer:ObjectType] = + cast[ref[WakuPeer:ObjectType]](getState(peer, WakuProtocol)) + + template networkState(peer: Peer): ref[WakuNetwork:ObjectType] = + cast[ref[WakuNetwork:ObjectType]](getNetworkState(peer.network, WakuProtocol)) + + discard + +proc statusThunk(peer: Peer; _`gensym175950033: int; data`gensym175950034: Rlp) {. + async, gcsafe.} = + var rlp = data`gensym175950034 + var msg {.noinit.}: statusObj + msg.options = checkedRlpRead(peer, rlp, StatusOptions) + +proc messagesThunk(peer: Peer; _`gensym175950058: int; data`gensym175950059: Rlp) {. + async, gcsafe.} = + var rlp = data`gensym175950059 + var msg {.noinit.}: messagesObj + msg.envelopes = checkedRlpRead(peer, rlp, openarray[Envelope]) + await(messagesUserHandler(peer, msg.envelopes)) + +proc statusOptionsThunk(peer: Peer; _`gensym175950060: int; data`gensym175950061: Rlp) {. + async, gcsafe.} = + var rlp = data`gensym175950061 + var msg {.noinit.}: statusOptionsObj + msg.options = checkedRlpRead(peer, rlp, StatusOptions) + await(statusOptionsUserHandler(peer, msg.options)) + +proc p2pRequestThunk(peer: Peer; _`gensym175950062: int; data`gensym175950063: Rlp) {. + async, gcsafe.} = + var rlp = data`gensym175950063 + var msg {.noinit.}: p2pRequestObj + msg.envelope = checkedRlpRead(peer, rlp, Envelope) + await(p2pRequestUserHandler(peer, msg.envelope)) + +proc p2pMessageThunk(peer: Peer; _`gensym175950064: int; data`gensym175950065: Rlp) {. + async, gcsafe.} = + var rlp = data`gensym175950065 + var msg {.noinit.}: p2pMessageObj + msg.envelopes = checkedRlpRead(peer, rlp, openarray[Envelope]) + await(p2pMessageUserHandler(peer, msg.envelopes)) + +proc batchAcknowledgedThunk(peer: Peer; _`gensym175950066: int; + data`gensym175950067: Rlp) {.async, gcsafe.} = + var rlp = data`gensym175950067 + var msg {.noinit.}: batchAcknowledgedObj + await(batchAcknowledgedUserHandler(peer)) + +proc messageResponseThunk(peer: Peer; _`gensym175950068: int; + data`gensym175950069: Rlp) {.async, gcsafe.} = + var rlp = data`gensym175950069 + var msg {.noinit.}: messageResponseObj + await(messageResponseUserHandler(peer)) + +proc p2pSyncResponseThunk(peer: Peer; _`gensym175950070: int; + data`gensym175950071: Rlp) {.async, gcsafe.} = + var rlp = data`gensym175950071 + var msg {.noinit.}: p2pSyncResponseObj + let reqId = read(rlp, int) + await(p2pSyncResponseUserHandler(peer, reqId)) + resolveResponseFuture(peer, perPeerMsgId(peer, p2pSyncResponseObj), addr(msg), + reqId) + +proc p2pSyncRequestThunk(peer: Peer; _`gensym175950074: int; + data`gensym175950075: Rlp) {.async, gcsafe.} = + var rlp = data`gensym175950075 + var msg {.noinit.}: p2pSyncRequestObj + let reqId = read(rlp, int) + await(p2pSyncRequestUserHandler(peer, reqId)) + +proc p2pRequestCompleteThunk(peer: Peer; _`gensym175950076: int; + data`gensym175950077: Rlp) {.async, gcsafe.} = + var rlp = data`gensym175950077 + var msg {.noinit.}: p2pRequestCompleteObj + tryEnterList(rlp) + msg.requestId = checkedRlpRead(peer, rlp, Hash) + msg.lastEnvelopeHash = checkedRlpRead(peer, rlp, Hash) + msg.cursor = checkedRlpRead(peer, rlp, seq[byte]) + await(p2pRequestCompleteUserHandler(peer, msg.requestId, msg.lastEnvelopeHash, + msg.cursor)) + +registerMsg(WakuProtocol, 0, "status", statusThunk, messagePrinter[statusObj], + requestResolver[statusObj], nextMsgResolver[statusObj]) +registerMsg(WakuProtocol, 1, "messages", messagesThunk, messagePrinter[messagesObj], + requestResolver[messagesObj], nextMsgResolver[messagesObj]) +registerMsg(WakuProtocol, 22, "statusOptions", statusOptionsThunk, + messagePrinter[statusOptionsObj], requestResolver[statusOptionsObj], + nextMsgResolver[statusOptionsObj]) +registerMsg(WakuProtocol, 126, "p2pRequest", p2pRequestThunk, + messagePrinter[p2pRequestObj], requestResolver[p2pRequestObj], + nextMsgResolver[p2pRequestObj]) +registerMsg(WakuProtocol, 127, "p2pMessage", p2pMessageThunk, + messagePrinter[p2pMessageObj], requestResolver[p2pMessageObj], + nextMsgResolver[p2pMessageObj]) +registerMsg(WakuProtocol, 11, "batchAcknowledged", batchAcknowledgedThunk, + messagePrinter[batchAcknowledgedObj], + requestResolver[batchAcknowledgedObj], + nextMsgResolver[batchAcknowledgedObj]) +registerMsg(WakuProtocol, 12, "messageResponse", messageResponseThunk, + messagePrinter[messageResponseObj], + requestResolver[messageResponseObj], + nextMsgResolver[messageResponseObj]) +registerMsg(WakuProtocol, 124, "p2pSyncResponse", p2pSyncResponseThunk, + messagePrinter[p2pSyncResponseObj], + requestResolver[p2pSyncResponseObj], + nextMsgResolver[p2pSyncResponseObj]) +registerMsg(WakuProtocol, 123, "p2pSyncRequest", p2pSyncRequestThunk, + messagePrinter[p2pSyncRequestObj], + requestResolver[p2pSyncRequestObj], + nextMsgResolver[p2pSyncRequestObj]) +registerMsg(WakuProtocol, 125, "p2pRequestComplete", p2pRequestCompleteThunk, + messagePrinter[p2pRequestCompleteObj], + requestResolver[p2pRequestCompleteObj], + nextMsgResolver[p2pRequestCompleteObj]) +proc WakuPeerConnected(peer: Peer) {.gcsafe, async.} = + type + CurrentProtocol = Waku + template state(peer: Peer): ref[WakuPeer:ObjectType] = + cast[ref[WakuPeer:ObjectType]](getState(peer, WakuProtocol)) + + template networkState(peer: Peer): ref[WakuNetwork:ObjectType] = + cast[ref[WakuNetwork:ObjectType]](getNetworkState(peer.network, WakuProtocol)) + + trace "onPeerConnected Waku" + let + wakuNet = peer.networkState + wakuPeer = peer.state + let options = StatusOptions(powRequirement: some(wakuNet.config.powRequirement), + bloomFilter: wakuNet.config.bloom, + lightNode: some(wakuNet.config.isLightNode), confirmationsEnabled: some( + wakuNet.config.confirmationsEnabled), + rateLimits: wakuNet.config.rateLimits, + topicInterest: wakuNet.config.topics) + let m = await peer.status(options, timeout = chronos.milliseconds(5000)) + wakuPeer.powRequirement = m.options.powRequirement.get(defaultMinPow) + wakuPeer.bloom = m.options.bloomFilter.get(fullBloom()) + wakuPeer.isLightNode = m.options.lightNode.get(false) + if wakuPeer.isLightNode and wakuNet.config.isLightNode: + raise newException(UselessPeerError, "Two light nodes connected") + wakuPeer.topics = m.options.topicInterest + if wakuPeer.topics.isSome(): + if wakuPeer.topics.get().len > topicInterestMax: + raise newException(UselessPeerError, "Topic-interest is too large") + if wakuNet.config.topics.isSome(): + raise newException(UselessPeerError, + "Two Waku nodes with topic-interest connected") + wakuPeer.received.init() + wakuPeer.trusted = false + wakuPeer.accounting = Accounting(sent: 0, received: 0) + wakuPeer.initialized = true + if not wakuNet.config.isLightNode: + traceAsyncErrors peer.run() + debug "Waku peer initialized", peer + +setEventHandlers(WakuProtocol, WakuPeerConnected, nil) +registerProtocol(WakuProtocol) \ No newline at end of file diff --git a/waku/v2/protocol/waku_rln_relay/rln.nim b/waku/v2/protocol/waku_rln_relay/rln.nim index 355d15dbe..4a54228a3 100644 --- a/waku/v2/protocol/waku_rln_relay/rln.nim +++ b/waku/v2/protocol/waku_rln_relay/rln.nim @@ -1,17 +1,17 @@ # this module contains the Nim wrappers for the rln library https://github.com/kilic/rln/blob/3bbec368a4adc68cd5f9bfae80b17e1bbb4ef373/src/ffi.rs -import stew/byteutils, os -from strutils import rsplit +import os -template sourceDir: string = currentSourcePath.rsplit(DirSep, 1)[0] -# librln.dylib is the binary executable of rln library (originally implemented in rust with an exposed C API) +# librln.dylib is the rln library taken from https://github.com/kilic/rln (originally implemented in rust with an exposed C API) # contains the key generation and other relevant functions -# to generate librln.dylib, clone this repo https://github.com/kilic/rln -# and run the following command in the root directory of the cloned project -# cargo build -# can find the .dylib file under the target/debug directory -# this file is already generated and copied here -const libName* = sourceDir / "librln.dylib" # TODO may need to load different libs based on OS + +const libPath = "rln/target/debug/" +when defined(Windows): + const libName* = libPath / "rln.dll" +elif defined(Linux): + const libName* = libPath / "librln.so" +elif defined(MacOsX): + const libName* = libPath / "librln.dylib" # Data types -----------------------------