efficient serializatoin

This commit is contained in:
Giacomo Pasini 2025-01-09 20:25:20 +01:00
parent 69b45ac791
commit b48f3016df
No known key found for this signature in database
GPG Key ID: FC08489D2D895D4B
7 changed files with 201 additions and 137 deletions

View File

@ -1,5 +1,5 @@
use super::{
merkle::{self, leaf, Path},
merkle::{self, leaf, Path, PathNode},
mmr::{Root, MMR},
Nullifier,
};
@ -246,11 +246,12 @@ impl NullifierTree {
self.leaves.push(new_leaf);
}
BatchUpdateProof {
BatchUpdateProofInner {
low_nfs,
low_nf_paths,
mmr,
}
.serialize()
}
}
@ -306,14 +307,80 @@ impl UpdateProof {
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BatchUpdateProof {
struct BatchUpdateProofInner {
low_nfs: Vec<Leaf>,
low_nf_paths: Vec<Path>,
mmr: MMR,
}
/// Custom zero-copyish deserialization is needed for decent performance
/// in risc0
#[derive(Debug, Clone)]
pub struct BatchUpdateProof {
data: Vec<u8>,
}
struct LowNfIterator<'a> {
data: &'a [u8],
path_len: usize,
}
#[derive(Debug, Clone)]
struct PathIterator<'p> {
path: &'p [u8],
}
impl<'a> Iterator for PathIterator<'a> {
type Item = PathNode;
fn next(&mut self) -> Option<Self::Item> {
if self.path.is_empty() {
return None;
}
let (node, rest) = self.path.split_at(33);
self.path = rest;
match node[0] {
0 => Some(PathNode::Left(node[1..].try_into().unwrap())),
1 => Some(PathNode::Right(node[1..].try_into().unwrap())),
_ => panic!("invalid path node"),
}
}
}
impl<'a> Iterator for LowNfIterator<'a> {
type Item = (Leaf, PathIterator<'a>);
fn next(&mut self) -> Option<Self::Item> {
if self.data.is_empty() {
return None;
}
let (low_nf, rest) = self.data.split_at(64 + self.path_len * 33);
self.data = rest;
let path = PathIterator {
path: &low_nf[64..],
};
let low_nf = Leaf {
value: Nullifier(low_nf[..32].try_into().unwrap()),
next_value: Nullifier(low_nf[32..64].try_into().unwrap()),
};
Some((low_nf, path))
}
}
impl BatchUpdateProof {
pub fn verify(&self, values: &[Nullifier], old_root: [u8; 32]) -> [u8; 32] {
pub fn from_raw_data(data: Vec<u8>) -> Self {
Self { data }
}
fn verify_batch_proof(
values: &[Nullifier],
low_nfs: LowNfIterator,
mut mmr: MMR,
old_root: [u8; 32],
) -> [u8; 32] {
if values.is_empty() {
return old_root;
}
@ -328,9 +395,9 @@ impl BatchUpdateProof {
let mut values = values.iter();
for (low_nf, path) in self.low_nfs.iter().zip(&self.low_nf_paths) {
for (low_nf, path) in low_nfs {
let in_gap = values
.peeking_take_while(|v| in_interval(*low_nf, **v))
.peeking_take_while(|v| in_interval(low_nf, **v))
.copied()
.collect::<Vec<_>>();
assert!(in_gap.len() >= 1, "unused low nf");
@ -352,20 +419,82 @@ impl BatchUpdateProof {
next_value: in_gap[0],
};
assert_eq!(cur_root, merkle::path_root(leaf(&low_nf.to_bytes()), path));
assert_eq!(
cur_root,
merkle::path_root(leaf(&low_nf.to_bytes()), path.clone())
);
cur_root = merkle::path_root(leaf(&updated_low_nf.to_bytes()), path);
}
assert!(values.next().is_none(), "unused values");
assert_eq!(cur_root, frontier_root(&self.mmr.roots));
assert_eq!(cur_root, frontier_root(&mmr.roots));
let mut mmr = self.mmr.clone();
for new_leaf in new_leaves {
mmr.push(&new_leaf.to_bytes());
}
frontier_root(&mmr.roots)
}
pub fn verify(&self, nfs: &[Nullifier], old_root: [u8; 32]) -> [u8; 32] {
if self.data.is_empty() {
return old_root;
}
let len = u32::from_le_bytes(self.data[..4].try_into().unwrap()) as usize;
let path_len = u32::from_le_bytes(self.data[4..8].try_into().unwrap()) as usize;
let low_nf_iterator_end = 8 + (path_len * 33 + 64) * len;
let low_nfs = LowNfIterator {
data: &self.data[8..low_nf_iterator_end],
path_len,
};
let mut roots = Vec::new();
for root in self.data[low_nf_iterator_end..].chunks_exact(33) {
roots.push(Root {
root: root[1..].try_into().unwrap(),
height: root[0],
});
}
Self::verify_batch_proof(nfs, low_nfs, MMR { roots }, old_root)
}
pub fn as_slice(&self) -> &[u8] {
&self.data
}
}
impl BatchUpdateProofInner {
fn serialize(&self) -> BatchUpdateProof {
if self.low_nfs.is_empty() {
return BatchUpdateProof { data: Vec::new() };
}
let mut data = Vec::new();
data.extend_from_slice(&(self.low_nfs.len() as u32).to_le_bytes());
let path_lenghts = self.low_nf_paths[0].len();
data.extend_from_slice(&(path_lenghts as u32).to_le_bytes());
for (low_nf, path) in self.low_nfs.iter().zip(&self.low_nf_paths) {
data.extend_from_slice(&low_nf.to_bytes());
assert_eq!(path.len(), path_lenghts);
for node in path {
match node {
merkle::PathNode::Left(sibling) => {
data.push(0);
data.extend_from_slice(sibling);
}
merkle::PathNode::Right(sibling) => {
data.push(1);
data.extend_from_slice(sibling);
}
}
}
}
for root in &self.mmr.roots {
data.push(root.height);
data.extend_from_slice(&root.root);
}
BatchUpdateProof { data }
}
}
fn in_interval(low_nf: Leaf, value: Nullifier) -> bool {
@ -437,10 +566,10 @@ mod tests {
tree_single.insert(*value).verify(old_root);
}
let proof = tree_batch.insert_batch(values);
let proof = tree_batch.insert_batch(values.clone());
assert_eq!(
proof.verify(NullifierTree::new().root()),
proof.verify(&values, NullifierTree::new().root()),
tree_single.root()
);
}

View File

@ -1,5 +1,6 @@
use risc0_zkvm::sha::rust_crypto::{Digest, Sha256};
use serde::{Deserialize, Serialize};
use std::borrow::Borrow;
pub fn padded_leaves(elements: &[Vec<u8>]) -> Vec<[u8; 32]> {
let mut leaves = std::iter::repeat([0; 32])
@ -52,11 +53,11 @@ pub enum PathNode {
Right([u8; 32]),
}
pub fn path_root(leaf: [u8; 32], path: &[PathNode]) -> [u8; 32] {
pub fn path_root<'a>(leaf: [u8; 32], path: impl IntoIterator<Item: Borrow<PathNode>>) -> [u8; 32] {
let mut computed_hash = leaf;
for path_node in path {
match &path_node {
for path_node in path.into_iter() {
match path_node.borrow() {
PathNode::Left(sibling_hash) => {
computed_hash = node(sibling_hash, computed_hash);
}

View File

@ -82,7 +82,7 @@ pub fn path_key(path: &[merkle::PathNode]) -> [u8; 32] {
assert_eq!(path.len(), 256);
let mut key = [0u8; 32];
for byte_i in (0..4).rev() {
for byte_i in (0..32).rev() {
let mut byte = 0u8;
for bit_i in 0..8 {
byte <<= 1;

View File

@ -12,14 +12,9 @@ pub struct ProvedLedgerTransition {
impl ProvedLedgerTransition {
pub fn prove(mut ledger: LedgerState, zone_id: ZoneId, bundles: Vec<ProvedBundle>) -> Self {
let mut witness = LedgerProofPrivate {
bundles: Vec::new(),
ledger: ledger.to_witness(),
id: zone_id,
};
let mut env = risc0_zkvm::ExecutorEnv::builder();
let mut w_bundles = Vec::new();
let mut nullifiers = Vec::new();
// prepare the sparse merkle tree nullifier proofs
for proved_bundle in &bundles {
env.add_assumption(proved_bundle.risc0_receipt.clone());
@ -40,18 +35,25 @@ impl ProvedLedgerTransition {
(*root, vec![])
}));
let nf_proofs = ledger.add_nullifiers(zone_ledger_update.nullifiers.clone());
nullifiers.extend(zone_ledger_update.nullifiers.clone());
let ledger_bundle = LedgerBundleWitness {
bundle,
cm_root_proofs,
nf_proofs,
};
witness.bundles.push(ledger_bundle)
w_bundles.push(ledger_bundle)
}
let env = env.write(&witness).unwrap().build().unwrap();
let witness = LedgerProofPrivate {
bundles: w_bundles,
ledger: ledger.to_witness(),
id: zone_id,
nf_proofs: ledger.add_nullifiers(nullifiers),
};
witness.write(&mut env);
let env = env.build().unwrap();
// Obtain the default prover.
let prover = risc0_zkvm::default_prover();

View File

@ -3,6 +3,7 @@ use crate::{ledger::ProvedLedgerTransition, stf::StfProof};
use cl::zone_layer::tx::UpdateBundle;
use std::collections::{HashMap, HashSet};
#[derive(Debug, Clone)]
pub struct ProvedUpdateBundle {
pub bundle: UpdateBundle,
pub ledger_proofs: Vec<ProvedLedgerTransition>,

View File

@ -7,6 +7,7 @@ use cl::zone_layer::{
ledger::{Ledger, LedgerWitness},
notes::ZoneId,
};
use risc0_zkvm::guest::env;
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize)]
@ -18,18 +19,18 @@ pub struct LedgerProofPublic {
pub outputs: Vec<NoteCommitment>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[derive(Debug, Clone)]
pub struct LedgerProofPrivate {
pub ledger: LedgerWitness,
pub id: ZoneId,
pub bundles: Vec<LedgerBundleWitness>,
pub nf_proofs: BatchUpdateProof,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LedgerBundleWitness {
pub bundle: BundlePublic,
pub cm_root_proofs: BTreeMap<[u8; 32], merkle::Path>,
pub nf_proofs: BatchUpdateProof,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
@ -38,103 +39,32 @@ pub struct CrossZoneBundle {
pub zones: Vec<ZoneId>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CompactNullifierProofs {
pub siblings: Vec<u8>,
pub paths: Vec<[u8; 32]>,
}
impl LedgerProofPrivate {
pub fn read() -> Self {
let ledger = env::read();
let id = env::read();
let bundles = env::read();
let nf_proofs_len: usize = env::read();
let mut data = vec![0; nf_proofs_len];
env::read_slice(&mut data);
impl CompactNullifierProofs {
pub fn from_paths(input: Vec<merkle::Path>) -> Self {
let mut siblings = Vec::with_capacity(input.len());
let mut paths = Vec::with_capacity(input.len());
for path in input {
let mut path_bits = [0u8; 32];
assert_eq!(path.len(), 256);
for (i, node) in path.iter().enumerate().rev() {
match node {
merkle::PathNode::Left(sibling) => {
siblings.extend(sibling.into_iter());
}
merkle::PathNode::Right(sibling) => {
siblings.extend(sibling.into_iter());
set_bit(i as u8, &mut path_bits);
}
}
}
paths.push(path_bits);
}
Self { siblings, paths }
}
pub fn len(&self) -> usize {
self.paths.len()
}
}
impl IntoIterator for CompactNullifierProofs {
type Item = merkle::Path;
type IntoIter = CompactNfIterator;
fn into_iter(self) -> CompactNfIterator {
CompactNfIterator {
siblings: self.siblings,
paths: self.paths,
LedgerProofPrivate {
ledger,
id,
bundles,
nf_proofs: BatchUpdateProof::from_raw_data(data),
}
}
}
pub struct CompactNfIterator {
pub siblings: Vec<u8>,
pub paths: Vec<[u8; 32]>,
}
#[cfg(not(target_os = "zkvm"))]
impl LedgerProofPrivate {
pub fn write(&self, env: &mut risc0_zkvm::ExecutorEnvBuilder) {
env.write(&self.ledger).unwrap();
env.write(&self.id).unwrap();
env.write(&self.bundles).unwrap();
impl<'a> Iterator for CompactNfIterator {
type Item = merkle::Path;
fn next(&mut self) -> Option<Self::Item> {
if self.paths.is_empty() {
return None;
}
let path = self.paths.pop().unwrap();
let mut res = Vec::with_capacity(256);
for i in 0..=255 {
if get_bit(i, path) {
res.push(merkle::PathNode::Right(
self.siblings[self.siblings.len() - 32..]
.try_into()
.unwrap(),
))
} else {
res.push(merkle::PathNode::Left(
self.siblings[self.siblings.len() - 32..]
.try_into()
.unwrap(),
))
};
self.siblings.truncate(self.siblings.len() - 32);
}
Some(res)
env.write(&self.nf_proofs.as_slice().len()).unwrap();
env.write_slice(self.nf_proofs.as_slice());
}
}
fn get_bit(idx: u8, elem: [u8; 32]) -> bool {
let byte = idx / 8;
let bit_in_byte = idx - byte * 8;
(elem[byte as usize] & (1 << bit_in_byte)) != 0
}
fn set_bit(idx: u8, elem: &mut [u8; 32]) {
let byte = idx / 8;
let bit_in_byte = idx - byte * 8;
elem[byte as usize] |= 1 << bit_in_byte;
}

View File

@ -9,16 +9,17 @@ fn main() {
mut ledger,
id,
bundles,
} = env::read();
nf_proofs,
} = LedgerProofPrivate::read();
let old_ledger = ledger.clone();
let mut cross_bundles = vec![];
let mut outputs = vec![];
let mut nullifiers = vec![];
for LedgerBundleWitness {
bundle,
mut bundle,
cm_root_proofs,
nf_proofs,
} in bundles
{
env::verify(
@ -27,7 +28,13 @@ fn main() {
)
.unwrap();
if let Some(ledger_update) = bundle.zone_ledger_updates.get(&id) {
// TODO: do not add local updates
cross_bundles.push(CrossZoneBundle {
id: bundle.bundle_id,
zones: bundle.zone_ledger_updates.keys().copied().collect(),
});
if let Some(ledger_update) = bundle.zone_ledger_updates.remove(&id) {
for past_cm_root in &ledger_update.cm_roots {
let past_cm_root_proof = cm_root_proofs
.get(past_cm_root)
@ -36,25 +43,19 @@ fn main() {
assert!(old_ledger.valid_cm_root(expected_current_cm_root))
}
let mut sorted_nullifiers = ledger_update.nullifiers.clone();
// TODO: sort outside and check
sorted_nullifiers.sort();
// TODO: remove nullifier duplication
assert_eq!(sorted_nullifiers, nf_proofs.nullifiers());
ledger.assert_nfs_update(&nf_proofs);
for cm in &ledger_update.commitments {
ledger.add_commitment(cm);
outputs.push(*cm)
}
}
cross_bundles.push(CrossZoneBundle {
id: bundle.bundle_id,
zones: bundle.zone_ledger_updates.into_keys().collect(),
});
nullifiers.extend(ledger_update.nullifiers);
}
}
// TODO: sort outside and check
nullifiers.sort();
ledger.assert_nfs_update(&nullifiers, &nf_proofs);
env::commit(&LedgerProofPublic {
old_ledger: old_ledger.commit(),
ledger: ledger.commit(),