Merge branch 'main' into Pravdyvy/lints-fix

This commit is contained in:
Oleksandr Pravdyvyi 2025-06-18 12:44:09 +03:00
commit 411d8e9034
46 changed files with 1416 additions and 2648 deletions

1271
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -36,7 +36,6 @@ lru = "0.7.8"
thiserror = "1.0"
rs_merkle = "1.4"
sha2 = "0.10.8"
monotree = "0.1.5"
hex = "0.4.3"
aes-gcm = "0.10.3"
toml = "0.7.4"

View File

@ -1,14 +1,11 @@
use std::collections::HashMap;
use anyhow::Result;
use common::{merkle_tree_public::TreeHashType, nullifier::UTXONullifier, transaction::Tag};
use common::{merkle_tree_public::TreeHashType, transaction::Tag};
use k256::AffinePoint;
use log::info;
use serde::Serialize;
use utxo::{
utxo_core::{UTXOPayload, UTXO},
utxo_tree::UTXOSparseMerkleTree,
};
use serde::{Deserialize, Serialize};
use utxo::utxo_core::UTXO;
use crate::key_management::{
constants_types::{CipherText, Nonce},
@ -19,11 +16,70 @@ use crate::key_management::{
pub type PublicKey = AffinePoint;
pub type AccountAddress = TreeHashType;
#[derive(Clone)]
pub struct Account {
pub key_holder: AddressKeyHolder,
pub address: AccountAddress,
pub balance: u64,
pub utxo_tree: UTXOSparseMerkleTree,
pub utxos: HashMap<TreeHashType, UTXO>,
}
#[derive(Serialize, Deserialize, Clone)]
pub struct AccountForSerialization {
pub key_holder: AddressKeyHolder,
pub address: AccountAddress,
pub balance: u64,
pub utxos: HashMap<String, UTXO>,
}
impl From<Account> for AccountForSerialization {
fn from(value: Account) -> Self {
AccountForSerialization {
key_holder: value.key_holder,
address: value.address,
balance: value.balance,
utxos: value
.utxos
.into_iter()
.map(|(key, val)| (hex::encode(key), val))
.collect(),
}
}
}
impl From<AccountForSerialization> for Account {
fn from(value: AccountForSerialization) -> Self {
Account {
key_holder: value.key_holder,
address: value.address,
balance: value.balance,
utxos: value
.utxos
.into_iter()
.map(|(key, val)| (hex::decode(key).unwrap().try_into().unwrap(), val))
.collect(),
}
}
}
impl Serialize for Account {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
let account_for_serialization: AccountForSerialization = From::from(self.clone());
account_for_serialization.serialize(serializer)
}
}
impl<'de> Deserialize<'de> for Account {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let account_for_serialization = <AccountForSerialization>::deserialize(deserializer)?;
Ok(account_for_serialization.into())
}
}
///A strucure, which represents all the visible(public) information
@ -42,26 +98,26 @@ impl Account {
let key_holder = AddressKeyHolder::new_os_random();
let address = key_holder.address;
let balance = 0;
let utxo_tree = UTXOSparseMerkleTree::new();
let utxos = HashMap::new();
Self {
key_holder,
address,
balance,
utxo_tree,
utxos,
}
}
pub fn new_with_balance(balance: u64) -> Self {
let key_holder = AddressKeyHolder::new_os_random();
let address = key_holder.address;
let utxo_tree = UTXOSparseMerkleTree::new();
let utxos = HashMap::new();
Self {
key_holder,
address,
balance,
utxo_tree,
utxos,
}
}
@ -87,21 +143,14 @@ impl Account {
.decrypt_data(ephemeral_public_key_sender, ciphertext, nonce)
}
pub fn mark_spent_utxo(
&mut self,
utxo_nullifier_map: HashMap<TreeHashType, UTXONullifier>,
) -> Result<()> {
for (hash, nullifier) in utxo_nullifier_map {
if let Some(utxo_entry) = self.utxo_tree.store.get_mut(&hash) {
utxo_entry.consume_utxo(nullifier)?;
}
}
Ok(())
}
pub fn add_new_utxo_outputs(&mut self, utxos: Vec<UTXO>) -> Result<()> {
Ok(self.utxo_tree.insert_items(utxos)?)
for utxo in utxos {
if self.utxos.contains_key(&utxo.hash) {
return Err(anyhow::anyhow!("UTXO already exists"));
}
self.utxos.insert(utxo.hash, utxo);
}
Ok(())
}
pub fn update_public_balance(&mut self, new_balance: u64) {
@ -114,16 +163,14 @@ impl Account {
amount: u128,
privacy_flag: bool,
) -> Result<()> {
let payload_with_asset = UTXOPayload {
owner: self.address,
asset: serde_json::to_vec(&asset)?,
let asset_utxo = UTXO::new(
self.address,
serde_json::to_vec(&asset)?,
amount,
privacy_flag,
};
);
let asset_utxo = UTXO::create_utxo_from_payload(payload_with_asset)?;
self.utxo_tree.insert_item(asset_utxo)?;
self.utxos.insert(asset_utxo.hash, asset_utxo);
Ok(())
}
@ -159,18 +206,8 @@ impl Default for Account {
mod tests {
use super::*;
fn generate_dummy_utxo_nullifier() -> UTXONullifier {
UTXONullifier::default()
}
fn generate_dummy_utxo(address: TreeHashType, amount: u128) -> anyhow::Result<UTXO> {
let payload = UTXOPayload {
owner: address,
asset: vec![],
amount,
privacy_flag: false,
};
UTXO::create_utxo_from_payload(payload)
fn generate_dummy_utxo(address: TreeHashType, amount: u128) -> UTXO {
UTXO::new(address, vec![], amount, false)
}
#[test]
@ -181,31 +218,16 @@ mod tests {
assert!(account.key_holder.address != [0u8; 32]); // Check if the address is not empty
}
#[test]
fn test_mark_spent_utxo() {
let mut account = Account::new();
let utxo = generate_dummy_utxo(account.address, 100).unwrap();
account.add_new_utxo_outputs(vec![utxo]).unwrap();
let mut utxo_nullifier_map = HashMap::new();
utxo_nullifier_map.insert(account.address, generate_dummy_utxo_nullifier());
let result = account.mark_spent_utxo(utxo_nullifier_map);
assert!(result.is_ok());
assert!(account.utxo_tree.store.get(&account.address).is_none());
}
#[test]
fn test_add_new_utxo_outputs() {
let mut account = Account::new();
let utxo1 = generate_dummy_utxo(account.address, 100).unwrap();
let utxo2 = generate_dummy_utxo(account.address, 200).unwrap();
let utxo1 = generate_dummy_utxo(account.address, 100);
let utxo2 = generate_dummy_utxo(account.address, 200);
let result = account.add_new_utxo_outputs(vec![utxo1.clone(), utxo2.clone()]);
assert!(result.is_ok());
assert_eq!(account.utxo_tree.store.len(), 2);
assert_eq!(account.utxos.len(), 2);
}
#[test]
@ -225,6 +247,6 @@ mod tests {
let result = account.add_asset(asset, amount, false);
assert!(result.is_ok());
assert_eq!(account.utxo_tree.store.len(), 1);
assert_eq!(account.utxos.len(), 1);
}
}

View File

@ -1,4 +1,5 @@
use aes_gcm::{aead::Aead, AeadCore, Aes256Gcm, Key, KeyInit};
use aes_gcm::{aead::Aead, AeadCore, Aes256Gcm, KeyInit};
use elliptic_curve::point::AffineCoordinates;
use elliptic_curve::PrimeField;
use k256::{AffinePoint, FieldBytes, Scalar};
use log::info;
@ -39,14 +40,8 @@ impl EphemeralKeyHolder {
viewing_public_key_receiver: AffinePoint,
data: &[u8],
) -> (CipherText, Nonce) {
let key_point = self.calculate_shared_secret_sender(viewing_public_key_receiver);
let binding = serde_json::to_vec(&key_point).unwrap();
let key_raw = &binding.as_slice()[..32];
let key_raw_adjust: [u8; 32] = key_raw.try_into().unwrap();
let key: Key<Aes256Gcm> = key_raw_adjust.into();
let cipher = Aes256Gcm::new(&key);
let shared_secret = self.calculate_shared_secret_sender(viewing_public_key_receiver);
let cipher = Aes256Gcm::new(&shared_secret.x());
let nonce = Aes256Gcm::generate_nonce(&mut OsRng);
(cipher.encrypt(&nonce, data).unwrap(), nonce)

View File

@ -1,10 +1,12 @@
use aes_gcm::{aead::Aead, Aes256Gcm, Key, KeyInit};
use aes_gcm::{aead::Aead, Aes256Gcm, KeyInit};
use common::merkle_tree_public::TreeHashType;
use constants_types::{CipherText, Nonce};
use elliptic_curve::point::AffineCoordinates;
use ephemeral_key_holder::EphemeralKeyHolder;
use k256::AffinePoint;
use log::info;
use secret_holders::{SeedHolder, TopSecretKeyHolder, UTXOSecretKeyHolder};
use serde::{Deserialize, Serialize};
use crate::account_core::PublicKey;
@ -12,7 +14,7 @@ pub mod constants_types;
pub mod ephemeral_key_holder;
pub mod secret_holders;
#[derive(Clone)]
#[derive(Serialize, Deserialize, Clone)]
///Entrypoint to key management
pub struct AddressKeyHolder {
//Will be useful in future
@ -63,14 +65,8 @@ impl AddressKeyHolder {
ciphertext: CipherText,
nonce: Nonce,
) -> Result<Vec<u8>, aes_gcm::Error> {
let key_point = self.calculate_shared_secret_receiver(ephemeral_public_key_sender);
let binding = serde_json::to_vec(&key_point).unwrap();
let key_raw = &binding.as_slice()[..32];
let key_raw_adjust: [u8; 32] = key_raw.try_into().unwrap();
let key: Key<Aes256Gcm> = key_raw_adjust.into();
let cipher = Aes256Gcm::new(&key);
let shared_secret = self.calculate_shared_secret_receiver(ephemeral_public_key_sender);
let cipher = Aes256Gcm::new(&shared_secret.x());
cipher.decrypt(&nonce, ciphertext.as_slice())
}
@ -115,6 +111,7 @@ mod tests {
use constants_types::{NULLIFIER_SECRET_CONST, VIEWING_SECRET_CONST};
use elliptic_curve::ff::Field;
use elliptic_curve::group::prime::PrimeCurveAffine;
use elliptic_curve::point::AffineCoordinates;
use k256::{AffinePoint, ProjectivePoint, Scalar};
use super::*;
@ -154,22 +151,14 @@ mod tests {
let address_key_holder = AddressKeyHolder::new_os_random();
// Generate an ephemeral key and shared secret
let scalar = Scalar::random(OsRng);
let ephemeral_public_key_sender = address_key_holder
.produce_ephemeral_key_holder()
.generate_ephemeral_public_key();
let shared_secret =
address_key_holder.calculate_shared_secret_receiver(ephemeral_public_key_sender);
// Prepare the encryption key from shared secret
let key_raw = serde_json::to_vec(&shared_secret).unwrap();
let key_raw_adjust_pre = &key_raw.as_slice()[..32];
let key_raw_adjust: [u8; 32] = key_raw_adjust_pre.try_into().unwrap();
let key: Key<Aes256Gcm> = key_raw_adjust.into();
let cipher = Aes256Gcm::new(&key);
// Encrypt sample data
let cipher = Aes256Gcm::new(&shared_secret.x());
let nonce = Nonce::from_slice(b"unique nonce");
let plaintext = b"Sensitive data";
let ciphertext = cipher
@ -225,19 +214,12 @@ mod tests {
// Generate ephemeral public key and shared secret
let scalar = Scalar::random(OsRng);
let ephemeral_public_key_sender = (ProjectivePoint::generator() * scalar).to_affine();
let ephemeral_public_key_sender = (ProjectivePoint::GENERATOR * scalar).to_affine();
let shared_secret =
address_key_holder.calculate_shared_secret_receiver(ephemeral_public_key_sender);
// Prepare the encryption key from shared secret
let key_raw = serde_json::to_vec(&shared_secret).unwrap();
let key_raw_adjust_pre = &key_raw.as_slice()[..32];
let key_raw_adjust: [u8; 32] = key_raw_adjust_pre.try_into().unwrap();
let key: Key<Aes256Gcm> = key_raw_adjust.into();
let cipher = Aes256Gcm::new(&key);
// Encrypt sample data with a specific nonce
let cipher = Aes256Gcm::new(&shared_secret.x());
let nonce = Nonce::from_slice(b"unique nonce");
let plaintext = b"Sensitive data";
let ciphertext = cipher
@ -265,19 +247,12 @@ mod tests {
// Generate ephemeral public key and shared secret
let scalar = Scalar::random(OsRng);
let ephemeral_public_key_sender = (ProjectivePoint::generator() * scalar).to_affine();
let ephemeral_public_key_sender = (ProjectivePoint::GENERATOR * scalar).to_affine();
let shared_secret =
address_key_holder.calculate_shared_secret_receiver(ephemeral_public_key_sender);
// Prepare the encryption key from shared secret
let key_raw = serde_json::to_vec(&shared_secret).unwrap();
let key_raw_adjust_pre = &key_raw.as_slice()[..32];
let key_raw_adjust: [u8; 32] = key_raw_adjust_pre.try_into().unwrap();
let key: Key<Aes256Gcm> = key_raw_adjust.into();
let cipher = Aes256Gcm::new(&key);
// Encrypt sample data
let cipher = Aes256Gcm::new(&shared_secret.x());
let nonce = Nonce::from_slice(b"unique nonce");
let plaintext = b"Sensitive data";
let ciphertext = cipher
@ -307,7 +282,7 @@ mod tests {
// Generate ephemeral key and shared secret
let scalar = Scalar::random(OsRng);
let ephemeral_public_key_sender = (ProjectivePoint::generator() * scalar).to_affine();
let ephemeral_public_key_sender = (ProjectivePoint::GENERATOR * scalar).to_affine();
// Encrypt sample data
let plaintext = b"Round-trip test data";
@ -315,12 +290,7 @@ mod tests {
let shared_secret =
address_key_holder.calculate_shared_secret_receiver(ephemeral_public_key_sender);
// Prepare the encryption key from shared secret
let key_raw = serde_json::to_vec(&shared_secret).unwrap();
let key_raw_adjust_pre = &key_raw.as_slice()[..32];
let key_raw_adjust: [u8; 32] = key_raw_adjust_pre.try_into().unwrap();
let key: Key<Aes256Gcm> = key_raw_adjust.into();
let cipher = Aes256Gcm::new(&key);
let cipher = Aes256Gcm::new(&shared_secret.x());
let ciphertext = cipher
.encrypt(nonce, plaintext.as_ref())

View File

@ -2,6 +2,7 @@ use common::merkle_tree_public::TreeHashType;
use elliptic_curve::PrimeField;
use k256::{AffinePoint, FieldBytes, Scalar};
use rand::{rngs::OsRng, RngCore};
use serde::{Deserialize, Serialize};
use sha2::{digest::FixedOutput, Digest};
use super::constants_types::{NULLIFIER_SECRET_CONST, VIEWING_SECRET_CONST};
@ -13,13 +14,13 @@ pub struct SeedHolder {
seed: Scalar,
}
#[derive(Debug, Clone)]
#[derive(Serialize, Deserialize, Debug, Clone)]
///Secret spending key holder. Produces `UTXOSecretKeyHolder` objects.
pub struct TopSecretKeyHolder {
pub secret_spending_key: Scalar,
}
#[derive(Debug, Clone)]
#[derive(Serialize, Deserialize, Debug, Clone)]
///Nullifier secret key and viewing secret key holder. Produces public keys. Can produce address. Can produce shared secret for recepient.
pub struct UTXOSecretKeyHolder {
pub nullifier_secret_key: Scalar,

View File

@ -9,8 +9,7 @@ thiserror.workspace = true
serde_json.workspace = true
serde.workspace = true
reqwest.workspace = true
monotree.workspace = true
risc0-zkvm = { git = "https://github.com/risc0/risc0.git", branch = "release-2.0" }
risc0-zkvm = { git = "https://github.com/risc0/risc0.git", branch = "release-2.1" }
rs_merkle.workspace = true
sha2.workspace = true

View File

@ -1,283 +0,0 @@
use monotree::database::MemoryDB;
use monotree::hasher::Blake3;
use monotree::{Hasher, Monotree, Proof};
use crate::commitment::Commitment;
use crate::merkle_tree_public::CommitmentHashType;
pub struct CommitmentsSparseMerkleTree {
pub curr_root: Option<CommitmentHashType>,
pub tree: Monotree<MemoryDB, Blake3>,
pub hasher: Blake3,
}
impl CommitmentsSparseMerkleTree {
pub fn new() -> Self {
CommitmentsSparseMerkleTree {
curr_root: None,
tree: Monotree::default(),
hasher: Blake3::new(),
}
}
pub fn insert_item(&mut self, commitment: Commitment) -> Result<(), monotree::Errors> {
let root = self
.curr_root
.as_ref()
.map(|val| val[0..32].try_into().unwrap());
let new_root = self.tree.insert(
root,
&commitment.commitment_hash[0..32].try_into().unwrap(),
&commitment.commitment_hash[0..32].try_into().unwrap(),
)?;
self.curr_root = new_root.map(|val| val.to_vec());
Ok(())
}
pub fn insert_items(&mut self, commitments: Vec<Commitment>) -> Result<(), monotree::Errors> {
let root = self
.curr_root
.as_ref()
.map(|val| val[0..32].try_into().unwrap());
let hashes: Vec<_> = commitments
.iter()
.map(|val| val.commitment_hash[0..32].try_into().unwrap())
.collect::<Vec<_>>();
let new_root = self.tree.inserts(root, &hashes, &hashes)?;
self.curr_root = new_root.map(|val| val[0..32].try_into().unwrap());
Ok(())
}
pub fn search_item_inclusion(
&mut self,
commitment_hash: CommitmentHashType,
) -> Result<bool, monotree::Errors> {
self.tree
.get(
self.curr_root
.as_ref()
.map(|val| val[0..32].try_into().unwrap()),
&commitment_hash[0..32].try_into().unwrap(),
)
.map(|data| data.is_some())
}
pub fn search_item_inclusions(
&mut self,
commitment_hashes: &[CommitmentHashType],
) -> Result<Vec<bool>, monotree::Errors> {
let mut inclusions = vec![];
for nullifier_hash in commitment_hashes {
let is_included = self
.tree
.get(
self.curr_root
.as_ref()
.map(|val| val[0..32].try_into().unwrap()),
nullifier_hash[0..32].try_into().unwrap(),
)
.map(|data| data.is_some())?;
inclusions.push(is_included);
}
Ok(inclusions)
}
pub fn get_non_membership_proof(
&mut self,
commitment_hash: CommitmentHashType,
) -> Result<(Option<Proof>, Option<CommitmentHashType>), monotree::Errors> {
let is_member = self.search_item_inclusion(commitment_hash.clone())?;
if is_member {
Err(monotree::Errors::new("Is a member"))
} else {
Ok((
self.tree.get_merkle_proof(
self.curr_root
.as_ref()
.map(|val| val[0..32].try_into().unwrap()),
&commitment_hash,
)?,
self.curr_root.clone(),
))
}
}
#[allow(clippy::type_complexity)]
pub fn get_non_membership_proofs(
&mut self,
commitment_hashes: &[CommitmentHashType],
) -> Result<Vec<(Option<Proof>, Option<CommitmentHashType>)>, monotree::Errors> {
let mut non_membership_proofs = vec![];
for commitment_hash in commitment_hashes {
let is_member = self.search_item_inclusion(commitment_hash.clone())?;
if is_member {
return Err(monotree::Errors::new(
format!("{commitment_hash:?} Is a member").as_str(),
));
} else {
non_membership_proofs.push((
self.tree.get_merkle_proof(
self.curr_root
.as_ref()
.map(|val| val[0..32].try_into().unwrap()),
commitment_hash,
)?,
self.curr_root.clone(),
))
};
}
Ok(non_membership_proofs)
}
}
impl Default for CommitmentsSparseMerkleTree {
fn default() -> Self {
Self::new()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::nullifier::UTXONullifier;
use monotree::database::MemoryDB;
use monotree::hasher::Blake3;
use monotree::Monotree;
fn create_nullifier(hash: CommitmentHashType) -> Commitment {
Commitment {
commitment_hash: hash,
}
}
#[test]
fn test_new_tree_initialization() {
let tree = CommitmentsSparseMerkleTree::new();
assert!(tree.curr_root.is_none());
}
#[test]
fn test_insert_single_item() {
let mut tree = CommitmentsSparseMerkleTree::new();
let nullifier = create_nullifier([1u8; 32].to_vec()); // Sample 32-byte hash
let result = tree.insert_item(nullifier);
assert!(result.is_ok());
assert!(tree.curr_root.is_some());
}
#[test]
fn test_insert_multiple_items() {
let mut tree = CommitmentsSparseMerkleTree::new();
let nullifiers = vec![
create_nullifier([1u8; 32].to_vec()),
create_nullifier([2u8; 32].to_vec()),
create_nullifier([3u8; 32].to_vec()),
];
let result = tree.insert_items(nullifiers);
assert!(result.is_ok());
assert!(tree.curr_root.is_some());
}
#[test]
fn test_search_item_inclusion() {
let mut tree = CommitmentsSparseMerkleTree::new();
let nullifier = create_nullifier([1u8; 32].to_vec());
tree.insert_item(nullifier.clone()).unwrap();
let result = tree.search_item_inclusion([1u8; 32].to_vec());
assert!(result.is_ok());
assert_eq!(result.unwrap(), true);
let non_existing = tree.search_item_inclusion([99u8; 32].to_vec());
assert!(non_existing.is_ok());
assert_eq!(non_existing.unwrap(), false);
}
#[test]
fn test_search_multiple_item_inclusions() {
let mut tree = CommitmentsSparseMerkleTree::new();
let nullifiers = vec![
create_nullifier([1u8; 32].to_vec()),
create_nullifier([2u8; 32].to_vec()),
create_nullifier([3u8; 32].to_vec()),
];
tree.insert_items(nullifiers).unwrap();
let search_hashes = vec![[1u8; 32].to_vec(), [2u8; 32].to_vec(), [99u8; 32].to_vec()];
let result = tree.search_item_inclusions(&search_hashes);
assert!(result.is_ok());
let expected_results = vec![true, true, false];
assert_eq!(result.unwrap(), expected_results);
}
#[test]
fn test_non_membership_proof() {
let mut tree = CommitmentsSparseMerkleTree::new();
let non_member_hash = [5u8; 32].to_vec();
let result = tree.get_non_membership_proof(non_member_hash);
assert!(result.is_ok());
let (proof, root) = result.unwrap();
assert!(root.is_none());
}
#[test]
fn test_non_membership_proofs_multiple() {
let mut tree = CommitmentsSparseMerkleTree::new();
let non_member_hashes = vec![[5u8; 32].to_vec(), [6u8; 32].to_vec(), [7u8; 32].to_vec()];
let result = tree.get_non_membership_proofs(&non_member_hashes);
assert!(result.is_ok());
let proofs = result.unwrap();
for (proof, root) in proofs {
assert!(root.is_none());
}
}
#[test]
fn test_insert_and_get_proof_of_existing_item() {
let mut tree = CommitmentsSparseMerkleTree::new();
let nullifier = create_nullifier([1u8; 32].to_vec());
tree.insert_item(nullifier.clone()).unwrap();
let proof_result = tree.get_non_membership_proof([1u8; 32].to_vec());
assert!(proof_result.is_err());
}
#[test]
fn test_insert_and_get_proofs_of_existing_items() {
let mut tree = CommitmentsSparseMerkleTree::new();
let nullifiers = vec![
create_nullifier([1u8; 32].to_vec()),
create_nullifier([2u8; 32].to_vec()),
];
tree.insert_items(nullifiers).unwrap();
let proof_result =
tree.get_non_membership_proofs(&[[1u8; 32].to_vec(), [2u8; 32].to_vec()]);
assert!(proof_result.is_err());
}
}

View File

@ -3,10 +3,8 @@ use serde::Deserialize;
pub mod block;
pub mod commitment;
pub mod commitments_sparse_merkle_tree;
pub mod merkle_tree_public;
pub mod nullifier;
pub mod nullifier_sparse_merkle_tree;
pub mod rpc_primitives;
pub mod transaction;
pub mod utxo_commitment;
@ -67,8 +65,6 @@ pub enum ExecutionFailureKind {
AmountMismatchError,
#[error("Sequencer client error: {0:?}")]
SequencerClientError(#[from] SequencerClientError),
#[error("Datebase returned error : {0:?}")]
MonoTreeError(#[from] monotree::Errors),
#[error("Insufficient gas for operation")]
InsufficientGasError,
#[error("Can not pay for operation")]

View File

@ -1,6 +1,11 @@
use std::collections::HashMap;
use std::{collections::HashMap, fmt, marker::PhantomData};
use rs_merkle::{MerkleProof, MerkleTree};
use serde::{
de::{SeqAccess, Visitor},
ser::SerializeSeq,
Deserialize, Deserializer, Serialize,
};
use crate::{transaction::Transaction, utxo_commitment::UTXOCommitment};
@ -12,6 +17,70 @@ pub struct HashStorageMerkleTree<Leav: TreeLeavItem + Clone> {
tree: MerkleTree<OwnHasher>,
}
impl<Leav: TreeLeavItem + Clone + Serialize> Serialize for HashStorageMerkleTree<Leav> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
let mut vector = self.leaves.iter().collect::<Vec<_>>();
vector.sort_by(|a, b| a.0.cmp(b.0));
let mut seq = serializer.serialize_seq(Some(self.leaves.len()))?;
for element in vector.iter() {
seq.serialize_element(element.1)?;
}
seq.end()
}
}
struct HashStorageMerkleTreeDeserializer<Leav: TreeLeavItem + Clone> {
marker: PhantomData<fn() -> HashStorageMerkleTree<Leav>>,
}
impl<Leaf: TreeLeavItem + Clone> HashStorageMerkleTreeDeserializer<Leaf> {
fn new() -> Self {
HashStorageMerkleTreeDeserializer {
marker: PhantomData,
}
}
}
impl<'de, Leav: TreeLeavItem + Clone + Deserialize<'de>> Visitor<'de>
for HashStorageMerkleTreeDeserializer<Leav>
{
type Value = HashStorageMerkleTree<Leav>;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("HashStorageMerkleTree key value sequence.")
}
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
where
A: SeqAccess<'de>,
{
let mut vector = vec![];
loop {
let opt_key = seq.next_element::<Leav>()?;
if let Some(value) = opt_key {
vector.push(value);
} else {
break;
}
}
Ok(HashStorageMerkleTree::new(vector))
}
}
impl<'de, Leav: TreeLeavItem + Clone + Deserialize<'de>> serde::Deserialize<'de>
for HashStorageMerkleTree<Leav>
{
fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
deserializer.deserialize_seq(HashStorageMerkleTreeDeserializer::new())
}
}
pub type PublicTransactionMerkleTree = HashStorageMerkleTree<Transaction>;
pub type UTXOCommitmentsMerkleTree = HashStorageMerkleTree<UTXOCommitment>;
@ -101,7 +170,7 @@ mod tests {
use super::*;
// Mock implementation of TreeLeavItem trait for testing
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
struct MockTransaction {
pub hash: TreeHashType,
}
@ -136,6 +205,26 @@ mod tests {
assert!(tree.get_root().is_some());
}
#[test]
fn test_new_merkle_tree_serialize() {
let tx1 = MockTransaction {
hash: get_first_32_bytes("tx1"),
};
let tx2 = MockTransaction {
hash: get_first_32_bytes("tx2"),
};
let tree = HashStorageMerkleTree::new(vec![tx1.clone(), tx2.clone()]);
let binding = serde_json::to_vec(&tree).unwrap();
let obj: HashStorageMerkleTree<MockTransaction> = serde_json::from_slice(&binding).unwrap();
assert_eq!(tree.leaves, obj.leaves);
assert_eq!(tree.hash_to_id_map, obj.hash_to_id_map);
assert_eq!(tree.tree.root(), obj.tree.root());
}
#[test]
fn test_get_tx() {
let tx1 = MockTransaction {

View File

@ -3,7 +3,7 @@ use serde::{Deserialize, Serialize};
use crate::merkle_tree_public::TreeHashType;
//ToDo: Update Nullifier model, when it is clear
#[derive(Debug, Serialize, Deserialize, Clone, Default, PartialEq, Eq)]
#[derive(Debug, Serialize, Deserialize, Clone, Default, PartialEq, Eq, Hash)]
///General nullifier object
pub struct UTXONullifier {
pub utxo_hash: TreeHashType,

View File

@ -1,245 +0,0 @@
use monotree::database::MemoryDB;
use monotree::hasher::Blake3;
use monotree::{Hasher, Monotree, Proof};
use crate::merkle_tree_public::TreeHashType;
use crate::nullifier::UTXONullifier;
pub struct NullifierSparseMerkleTree {
pub curr_root: Option<TreeHashType>,
pub tree: Monotree<MemoryDB, Blake3>,
pub hasher: Blake3,
}
impl NullifierSparseMerkleTree {
pub fn new() -> Self {
NullifierSparseMerkleTree {
curr_root: None,
tree: Monotree::default(),
hasher: Blake3::new(),
}
}
pub fn insert_item(&mut self, nullifier: UTXONullifier) -> Result<(), monotree::Errors> {
let root = self.curr_root.as_ref();
let new_root = self
.tree
.insert(root, &nullifier.utxo_hash, &nullifier.utxo_hash)?;
self.curr_root = new_root;
Ok(())
}
pub fn insert_items(&mut self, nullifiers: Vec<UTXONullifier>) -> Result<(), monotree::Errors> {
let root = self.curr_root.as_ref();
let hashes: Vec<TreeHashType> = nullifiers.iter().map(|nu| nu.utxo_hash).collect();
let new_root = self.tree.inserts(root, &hashes, &hashes)?;
self.curr_root = new_root;
Ok(())
}
pub fn search_item_inclusion(
&mut self,
nullifier_hash: TreeHashType,
) -> Result<bool, monotree::Errors> {
self.tree
.get(self.curr_root.as_ref(), &nullifier_hash)
.map(|data| data.is_some())
}
pub fn search_item_inclusions(
&mut self,
nullifier_hashes: &[TreeHashType],
) -> Result<Vec<bool>, monotree::Errors> {
let mut inclusions = vec![];
for nullifier_hash in nullifier_hashes {
let is_included = self
.tree
.get(self.curr_root.as_ref(), nullifier_hash)
.map(|data| data.is_some())?;
inclusions.push(is_included);
}
Ok(inclusions)
}
pub fn get_non_membership_proof(
&mut self,
nullifier_hash: TreeHashType,
) -> Result<(Option<Proof>, Option<TreeHashType>), monotree::Errors> {
let is_member = self.search_item_inclusion(nullifier_hash)?;
if is_member {
Err(monotree::Errors::new("Is a member"))
} else {
Ok((
self.tree
.get_merkle_proof(self.curr_root.as_ref(), &nullifier_hash)?,
self.curr_root,
))
}
}
#[allow(clippy::type_complexity)]
pub fn get_non_membership_proofs(
&mut self,
nullifier_hashes: &[TreeHashType],
) -> Result<Vec<(Option<Proof>, Option<TreeHashType>)>, monotree::Errors> {
let mut non_membership_proofs = vec![];
for nullifier_hash in nullifier_hashes {
let is_member = self.search_item_inclusion(*nullifier_hash)?;
if is_member {
return Err(monotree::Errors::new(
format!("{nullifier_hash:?} Is a member").as_str(),
));
} else {
non_membership_proofs.push((
self.tree
.get_merkle_proof(self.curr_root.as_ref(), nullifier_hash)?,
self.curr_root,
))
};
}
Ok(non_membership_proofs)
}
}
impl Default for NullifierSparseMerkleTree {
fn default() -> Self {
Self::new()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::nullifier::UTXONullifier;
fn create_nullifier(hash: TreeHashType) -> UTXONullifier {
UTXONullifier { utxo_hash: hash }
}
#[test]
fn test_new_tree_initialization() {
let tree = NullifierSparseMerkleTree::new();
assert!(tree.curr_root.is_none());
}
#[test]
fn test_insert_single_item() {
let mut tree = NullifierSparseMerkleTree::new();
let nullifier = create_nullifier([1u8; 32]); // Sample 32-byte hash
let result = tree.insert_item(nullifier);
assert!(result.is_ok());
assert!(tree.curr_root.is_some());
}
#[test]
fn test_insert_multiple_items() {
let mut tree = NullifierSparseMerkleTree::new();
let nullifiers = vec![
create_nullifier([1u8; 32]),
create_nullifier([2u8; 32]),
create_nullifier([3u8; 32]),
];
let result = tree.insert_items(nullifiers);
assert!(result.is_ok());
assert!(tree.curr_root.is_some());
}
#[test]
fn test_search_item_inclusion() {
let mut tree = NullifierSparseMerkleTree::new();
let nullifier = create_nullifier([1u8; 32]);
tree.insert_item(nullifier.clone()).unwrap();
let result = tree.search_item_inclusion([1u8; 32]);
assert!(result.is_ok());
assert_eq!(result.unwrap(), true);
let non_existing = tree.search_item_inclusion([99u8; 32]);
assert!(non_existing.is_ok());
assert_eq!(non_existing.unwrap(), false);
}
#[test]
fn test_search_multiple_item_inclusions() {
let mut tree = NullifierSparseMerkleTree::new();
let nullifiers = vec![
create_nullifier([1u8; 32]),
create_nullifier([2u8; 32]),
create_nullifier([3u8; 32]),
];
tree.insert_items(nullifiers).unwrap();
let search_hashes = vec![[1u8; 32], [2u8; 32], [99u8; 32]];
let result = tree.search_item_inclusions(&search_hashes);
assert!(result.is_ok());
let expected_results = vec![true, true, false];
assert_eq!(result.unwrap(), expected_results);
}
#[test]
fn test_non_membership_proof() {
let mut tree = NullifierSparseMerkleTree::new();
let non_member_hash = [5u8; 32];
let result = tree.get_non_membership_proof(non_member_hash);
assert!(result.is_ok());
let (proof, root) = result.unwrap();
assert!(root.is_none());
}
#[test]
fn test_non_membership_proofs_multiple() {
let mut tree = NullifierSparseMerkleTree::new();
let non_member_hashes = vec![[5u8; 32], [6u8; 32], [7u8; 32]];
let result = tree.get_non_membership_proofs(&non_member_hashes);
assert!(result.is_ok());
let proofs = result.unwrap();
for (proof, root) in proofs {
assert!(root.is_none());
}
}
#[test]
fn test_insert_and_get_proof_of_existing_item() {
let mut tree = NullifierSparseMerkleTree::new();
let nullifier = create_nullifier([1u8; 32]);
tree.insert_item(nullifier.clone()).unwrap();
let proof_result = tree.get_non_membership_proof([1u8; 32]);
assert!(proof_result.is_err());
}
#[test]
fn test_insert_and_get_proofs_of_existing_items() {
let mut tree = NullifierSparseMerkleTree::new();
let nullifiers = vec![create_nullifier([1u8; 32]), create_nullifier([2u8; 32])];
tree.insert_items(nullifiers).unwrap();
let proof_result = tree.get_non_membership_proofs(&[[1u8; 32], [2u8; 32]]);
assert!(proof_result.is_err());
}
}

View File

@ -21,8 +21,8 @@ pub struct RegisterAccountRequest {
#[derive(Serialize, Deserialize, Debug)]
pub struct SendTxRequest {
pub transaction: Transaction,
///Nullifier Root, UTXO Commitment Root, Pub Tx Root
pub tx_roots: [[u8; 32]; 3],
///UTXO Commitment Root, Pub Tx Root
pub tx_roots: [[u8; 32]; 2],
}
#[derive(Serialize, Deserialize, Debug)]

View File

@ -12,14 +12,13 @@ serde.workspace = true
rand.workspace = true
k256.workspace = true
sha2.workspace = true
monotree.workspace = true
bincode.workspace = true
elliptic-curve.workspace = true
reqwest.workspace = true
thiserror.workspace = true
tokio.workspace = true
tempfile.workspace = true
risc0-zkvm = { git = "https://github.com/risc0/risc0.git", branch = "release-2.0" }
risc0-zkvm = { git = "https://github.com/risc0/risc0.git", branch = "release-2.1" }
hex.workspace = true
actix-rt.workspace = true

View File

@ -1,10 +1,19 @@
use std::collections::{HashMap, HashSet};
use std::path::Path;
use accounts::account_core::Account;
use anyhow::{anyhow, Result};
use common::block::Block;
use common::merkle_tree_public::merkle_tree::HashStorageMerkleTree;
use common::nullifier::UTXONullifier;
use common::transaction::Transaction;
use common::utxo_commitment::UTXOCommitment;
use log::error;
use storage::sc_db_utils::{DataBlob, DataBlobChangeVariant};
use storage::RocksDBIO;
use crate::chain_storage::AccMap;
pub struct NodeBlockStore {
dbio: RocksDBIO,
}
@ -21,9 +30,9 @@ impl NodeBlockStore {
}
///Reopening existing database
pub fn open_db_restart(location: &Path) -> Result<Self> {
pub fn open_db_restart(location: &Path, genesis_block: Block) -> Result<Self> {
NodeBlockStore::db_destroy(location)?;
NodeBlockStore::open_db_with_genesis(location, None)
NodeBlockStore::open_db_with_genesis(location, Some(genesis_block))
}
///Reloading existing database
@ -56,6 +65,65 @@ impl NodeBlockStore {
pub fn get_sc_sc_state(&self, sc_addr: &str) -> Result<Vec<DataBlob>> {
Ok(self.dbio.get_sc_sc_state(sc_addr)?)
}
pub fn get_snapshot_block_id(&self) -> Result<u64> {
Ok(self.dbio.get_snapshot_block_id()?)
}
pub fn get_snapshot_account(&self) -> Result<HashMap<[u8; 32], Account>> {
let temp: AccMap = serde_json::from_slice(&self.dbio.get_snapshot_account()?)?;
Ok(temp.into())
}
pub fn get_snapshot_commitment(&self) -> Result<HashStorageMerkleTree<UTXOCommitment>> {
Ok(serde_json::from_slice(
&self.dbio.get_snapshot_commitment()?,
)?)
}
pub fn get_snapshot_nullifier(&self) -> Result<HashSet<UTXONullifier>> {
Ok(serde_json::from_slice(
&self.dbio.get_snapshot_nullifier()?,
)?)
}
pub fn get_snapshot_transaction(&self) -> Result<HashStorageMerkleTree<Transaction>> {
Ok(serde_json::from_slice(
&self.dbio.get_snapshot_transaction()?,
)?)
}
pub fn put_snapshot_at_block_id(
&self,
id: u64,
accounts_ser: Vec<u8>,
comm_ser: Vec<u8>,
txs_ser: Vec<u8>,
nullifiers_ser: Vec<u8>,
) -> Result<()> {
//Error notification for writing into DB error
self.dbio
.put_snapshot_block_id_db(id)
.inspect_err(|err| error!("Failed to store snapshot block id with error {err:#?}"))?;
self.dbio
.put_snapshot_account_db(accounts_ser)
.inspect_err(|err| error!("Failed to store snapshot accounts with error {err:#?}"))?;
self.dbio
.put_snapshot_commitement_db(comm_ser)
.inspect_err(|err| {
error!("Failed to store snapshot commitments with error {err:#?}")
})?;
self.dbio
.put_snapshot_transaction_db(txs_ser)
.inspect_err(|err| {
error!("Failed to store snapshot transactions with error {err:#?}")
})?;
self.dbio
.put_snapshot_nullifier_db(nullifiers_ser)
.inspect_err(|err| error!("Failed to store snapshot nullifiers with error {err:#?}"))?;
Ok(())
}
}
#[cfg(test)]
@ -107,13 +175,26 @@ mod tests {
let path = temp_dir.path();
let genesis_block = create_genesis_block();
let _ = NodeBlockStore::open_db_with_genesis(path, Some(genesis_block)).unwrap();
{
let node_store_old =
NodeBlockStore::open_db_with_genesis(path, Some(genesis_block.clone())).unwrap();
let block = create_sample_block(1, 0);
node_store_old.put_block_at_id(block.clone()).unwrap();
}
// Check that the first block is still in the old database
{
let node_store_old = NodeBlockStore::open_db_reload(path).unwrap();
let result = node_store_old.get_block_at_id(1);
assert!(result.is_ok());
}
// Restart the database
let node_store = NodeBlockStore::open_db_restart(path).unwrap();
let node_store = NodeBlockStore::open_db_restart(path, genesis_block).unwrap();
// The block should no longer be available since no genesis block is set on restart
let result = node_store.get_block_at_id(0);
// The block should no longer be available since no first block is set on restart
let result = node_store.get_block_at_id(1);
assert!(result.is_err());
}
@ -150,13 +231,39 @@ mod tests {
}
#[test]
fn test_get_block_not_found() {
fn test_put_snapshot_at_block_id() {
let temp_dir = tempdir().unwrap();
let path = temp_dir.path();
let node_store = NodeBlockStore::open_db_with_genesis(path, None).unwrap();
let genesis_block = create_genesis_block();
let node_store = NodeBlockStore::open_db_with_genesis(path, Some(genesis_block)).unwrap();
let result = node_store.get_block_at_id(42);
assert!(result.is_err());
let id = 3;
let accounts_ser = vec![1, 2, 3, 4];
let comm_ser = vec![5, 6, 7, 8];
let txs_ser = vec![9, 10, 11, 12];
let nullifiers_ser = vec![13, 14, 15, 16];
node_store
.put_snapshot_at_block_id(
id,
accounts_ser.clone(),
comm_ser.clone(),
txs_ser.clone(),
nullifiers_ser.clone(),
)
.unwrap();
assert_eq!(node_store.dbio.get_snapshot_block_id().unwrap(), id);
assert_eq!(
node_store.dbio.get_snapshot_account().unwrap(),
accounts_ser
);
assert_eq!(node_store.dbio.get_snapshot_commitment().unwrap(), comm_ser);
assert_eq!(node_store.dbio.get_snapshot_transaction().unwrap(), txs_ser);
assert_eq!(
node_store.dbio.get_snapshot_nullifier().unwrap(),
nullifiers_ser
);
}
}

View File

@ -1,7 +1,4 @@
use std::{
collections::{BTreeMap, HashMap},
path::Path,
};
use std::collections::{BTreeMap, HashMap, HashSet};
use accounts::account_core::{Account, AccountAddress};
use anyhow::Result;
@ -10,50 +7,125 @@ use common::{
block::Block,
merkle_tree_public::merkle_tree::{PublicTransactionMerkleTree, UTXOCommitmentsMerkleTree},
nullifier::UTXONullifier,
nullifier_sparse_merkle_tree::NullifierSparseMerkleTree,
utxo_commitment::UTXOCommitment,
};
use k256::AffinePoint;
use log::{info, warn};
use public_context::PublicSCContext;
use serde::{Deserialize, Serialize};
use utxo::utxo_core::UTXO;
use crate::ActionData;
use crate::{config::NodeConfig, ActionData};
pub mod accounts_store;
pub mod block_store;
pub mod public_context;
#[derive(Deserialize, Serialize)]
pub struct AccMap {
pub acc_map: HashMap<String, Account>,
}
impl From<HashMap<[u8; 32], Account>> for AccMap {
fn from(value: HashMap<[u8; 32], Account>) -> Self {
AccMap {
acc_map: value
.into_iter()
.map(|(key, val)| (hex::encode(key), val))
.collect(),
}
}
}
impl From<AccMap> for HashMap<[u8; 32], Account> {
fn from(value: AccMap) -> Self {
value
.acc_map
.into_iter()
.map(|(key, val)| (hex::decode(key).unwrap().try_into().unwrap(), val))
.collect()
}
}
pub struct NodeChainStore {
pub acc_map: HashMap<AccountAddress, Account>,
pub block_store: NodeBlockStore,
pub nullifier_store: NullifierSparseMerkleTree,
pub nullifier_store: HashSet<UTXONullifier>,
pub utxo_commitments_store: UTXOCommitmentsMerkleTree,
pub pub_tx_store: PublicTransactionMerkleTree,
pub node_config: NodeConfig,
}
impl NodeChainStore {
pub fn new_with_genesis(home_dir: &Path, genesis_block: Block) -> Self {
let acc_map = HashMap::new();
let nullifier_store = NullifierSparseMerkleTree::default();
let utxo_commitments_store = UTXOCommitmentsMerkleTree::new(vec![]);
let pub_tx_store = PublicTransactionMerkleTree::new(vec![]);
pub fn new(config: NodeConfig, genesis_block: Block) -> Result<(Self, u64)> {
let mut acc_map = HashMap::new();
let mut nullifier_store = HashSet::new();
let mut utxo_commitments_store = UTXOCommitmentsMerkleTree::new(vec![]);
let mut pub_tx_store = PublicTransactionMerkleTree::new(vec![]);
let mut block_id = genesis_block.block_id;
//Sequencer should panic if unable to open db,
//as fixing this issue may require actions non-native to program scope
let block_store =
NodeBlockStore::open_db_with_genesis(&home_dir.join("rocksdb"), Some(genesis_block))
NodeBlockStore::open_db_with_genesis(&config.home.join("rocksdb"), Some(genesis_block))
.unwrap();
Self {
acc_map,
block_store,
nullifier_store,
utxo_commitments_store,
pub_tx_store,
if let Ok(temp_block_id) = block_store.get_snapshot_block_id() {
utxo_commitments_store = block_store.get_snapshot_commitment()?;
nullifier_store = block_store.get_snapshot_nullifier()?;
acc_map = block_store.get_snapshot_account()?;
pub_tx_store = block_store.get_snapshot_transaction()?;
block_id = temp_block_id;
}
Ok((
Self {
acc_map: From::from(acc_map),
block_store,
nullifier_store,
utxo_commitments_store,
pub_tx_store,
node_config: config,
},
block_id,
))
}
pub fn new_after_restart(config: NodeConfig, genesis_block: Block) -> Result<(Self, u64)> {
let mut acc_map = HashMap::new();
let mut nullifier_store = HashSet::new();
let mut utxo_commitments_store = UTXOCommitmentsMerkleTree::new(vec![]);
let mut pub_tx_store = PublicTransactionMerkleTree::new(vec![]);
let mut block_id = genesis_block.block_id;
//Sequencer should panic if unable to open db,
//as fixing this issue may require actions non-native to program scope
let block_store = NodeBlockStore::open_db_reload(&config.home.join("rocksdb")).unwrap();
if let Ok(temp_block_id) = block_store.get_snapshot_block_id() {
utxo_commitments_store = block_store.get_snapshot_commitment()?;
nullifier_store = block_store.get_snapshot_nullifier()?;
acc_map = block_store.get_snapshot_account()?;
pub_tx_store = block_store.get_snapshot_transaction()?;
block_id = temp_block_id;
}
Ok((
Self {
acc_map,
block_store,
nullifier_store,
utxo_commitments_store,
pub_tx_store,
node_config: config,
},
block_id,
))
}
pub fn dissect_insert_block(&mut self, block: Block) -> Result<()> {
let block_id = block.block_id;
for tx in &block.transactions {
if !tx.execution_input.is_empty() {
let public_action = serde_json::from_slice::<ActionData>(&tx.execution_input);
@ -97,13 +169,11 @@ impl NodeChainStore {
.collect(),
);
self.nullifier_store.insert_items(
tx.nullifier_created_hashes
.clone()
.into_iter()
.map(|hash| UTXONullifier { utxo_hash: hash })
.collect(),
)?;
for nullifier in tx.nullifier_created_hashes.iter() {
self.nullifier_store.insert(UTXONullifier {
utxo_hash: *nullifier,
});
}
if !tx.encoded_data.is_empty() {
let ephemeral_public_key_sender =
@ -114,7 +184,7 @@ impl NodeChainStore {
let nonce =
accounts::key_management::constants_types::Nonce::clone_from_slice(slice);
for (acc_id, acc) in self.acc_map.iter_mut() {
if acc_id[0] == tag {
if hex::decode(acc_id).unwrap()[0] == tag {
let decoded_data_curr_acc = acc.decrypt_data(
ephemeral_public_key_sender,
ciphertext.clone(),
@ -125,7 +195,7 @@ impl NodeChainStore {
serde_json::from_slice::<UTXO>(&decoded_data_curr_acc);
if let Ok(utxo) = decoded_utxo_try {
if &utxo.owner == acc_id {
acc.utxo_tree.insert_item(utxo)?;
acc.utxos.insert(utxo.hash, utxo);
}
}
}
@ -139,6 +209,48 @@ impl NodeChainStore {
self.block_store.put_block_at_id(block)?;
//Snapshot
if block_id % self.node_config.shapshot_frequency_in_blocks == 0 {
//Serializing all important data structures
//If we fail serialization, it is not the reason to stop running
//Logging on warn level in this cases
let acc_map: AccMap = self.acc_map.clone().into();
if let Ok(accounts_ser) = serde_json::to_vec(&acc_map).inspect_err(|err| {
warn!("Failed to serialize accounts data {err:#?}");
}) {
if let Ok(comm_ser) =
serde_json::to_vec(&self.utxo_commitments_store).inspect_err(|err| {
warn!("Failed to serialize commitments {err:#?}");
})
{
if let Ok(txs_ser) = serde_json::to_vec(&self.pub_tx_store).inspect_err(|err| {
warn!("Failed to serialize transactions {err:#?}");
}) {
if let Ok(nullifiers_ser) = serde_json::to_vec(&self.nullifier_store)
.inspect_err(|err| {
warn!("Failed to serialize nullifiers {err:#?}");
})
{
let snapshot_trace = self.block_store.put_snapshot_at_block_id(
block_id,
accounts_ser,
comm_ser,
txs_ser,
nullifiers_ser,
);
info!(
"Snapshot executed at {:?} with results {snapshot_trace:#?}",
block_id
);
}
}
}
}
}
Ok(())
}
@ -153,9 +265,178 @@ impl NodeChainStore {
caller_address: caller,
caller_balance: self.acc_map.get(&caller).unwrap().balance,
account_masks,
nullifier_store_root: self.nullifier_store.curr_root.unwrap_or([0; 32]),
comitment_store_root: self.utxo_commitments_store.get_root().unwrap_or([0; 32]),
pub_tx_store_root: self.pub_tx_store.get_root().unwrap_or([0; 32]),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::config::GasConfig;
use accounts::account_core::Account;
use common::block::{Block, Data};
use common::merkle_tree_public::TreeHashType;
use common::transaction::{Transaction, TxKind};
use secp256k1_zkp::Tweak;
use std::path::PathBuf;
use tempfile::tempdir;
fn create_genesis_block() -> Block {
Block {
block_id: 0,
prev_block_id: 0,
prev_block_hash: [0; 32],
hash: [1; 32],
transactions: vec![],
data: Data::default(),
}
}
fn create_dummy_transaction(
hash: TreeHashType,
// execution_input: Vec<u8>,
nullifier_created_hashes: Vec<[u8; 32]>,
utxo_commitments_spent_hashes: Vec<[u8; 32]>,
utxo_commitments_created_hashes: Vec<[u8; 32]>,
) -> Transaction {
let mut rng = rand::thread_rng();
Transaction {
hash,
tx_kind: TxKind::Private,
execution_input: vec![],
execution_output: vec![],
utxo_commitments_spent_hashes,
utxo_commitments_created_hashes,
nullifier_created_hashes,
execution_proof_private: "dummy_proof".to_string(),
encoded_data: vec![],
ephemeral_pub_key: vec![10, 11, 12],
commitment: vec![],
tweak: Tweak::new(&mut rng),
secret_r: [0; 32],
sc_addr: "sc_addr".to_string(),
state_changes: (serde_json::Value::Null, 0),
}
}
fn create_sample_block(block_id: u64, prev_block_id: u64) -> Block {
Block {
block_id: block_id,
prev_block_id: prev_block_id,
prev_block_hash: [0; 32],
hash: [1; 32],
transactions: vec![],
data: Data::default(),
}
}
fn create_sample_node_config(home: PathBuf) -> NodeConfig {
NodeConfig {
home,
override_rust_log: None,
sequencer_addr: "http://127.0.0.1".to_string(),
seq_poll_timeout_secs: 1,
port: 8000,
gas_config: create_sample_gas_config(),
shapshot_frequency_in_blocks: 1,
}
}
fn create_sample_gas_config() -> GasConfig {
GasConfig {
gas_fee_per_byte_deploy: 0,
gas_fee_per_input_buffer_runtime: 0,
gas_fee_per_byte_runtime: 0,
gas_cost_runtime: 0,
gas_cost_deploy: 0,
gas_limit_deploy: 0,
gas_limit_runtime: 0,
}
}
fn generate_dummy_utxo(address: TreeHashType, amount: u128) -> UTXO {
UTXO::new(address, vec![], amount, false)
}
#[test]
fn test_new_initializes_correctly() {
let temp_dir = tempdir().unwrap();
let path = temp_dir.path();
let config = create_sample_node_config(path.to_path_buf());
let genesis_block = create_genesis_block();
let (store, block_id) = NodeChainStore::new(config.clone(), genesis_block.clone()).unwrap();
assert_eq!(block_id, 0);
assert!(store.acc_map.is_empty());
assert!(store.nullifier_store.is_empty());
assert_eq!(
store.utxo_commitments_store.get_root().unwrap_or([0; 32]),
[0; 32]
);
}
#[test]
fn test_new_recovers_from_snapshot() {
let temp_dir = tempdir().unwrap();
let path = temp_dir.path().to_path_buf();
let config = create_sample_node_config(path);
let nullifier_secret_const =
"261d61d294ac4bdc24f91b6f490efa263757a4a95f65871cd4f16b2ea23c3b5d";
std::env::set_var("NULLIFIER_SECRET_CONST", nullifier_secret_const);
let viewing_secret_const =
"6117af750b30d7a296672ec3b3b25d3489beca3cfe5770fa39f275cec395d5ce";
std::env::set_var("VIEWING_SECRET_CONST", viewing_secret_const);
let genesis_block = create_genesis_block();
// Initialize once to create DB and store fake snapshot
{
let (mut store, _) =
NodeChainStore::new(config.clone(), genesis_block.clone()).unwrap();
// Insert state
let mut account = Account::new();
account
.add_new_utxo_outputs(vec![generate_dummy_utxo(account.address, 100)])
.unwrap();
store.acc_map.insert(account.address, account);
store.nullifier_store.insert(UTXONullifier {
utxo_hash: [2u8; 32],
});
store
.utxo_commitments_store
.add_tx_multiple(vec![UTXOCommitment { hash: [3u8; 32] }]);
store.pub_tx_store.add_tx(create_dummy_transaction(
[12; 32],
vec![[9; 32]],
vec![[7; 32]],
vec![[8; 32]],
));
// Put block snapshot to trigger snapshot recovery on next load
let dummy_block = create_sample_block(1, 0);
store.dissect_insert_block(dummy_block).unwrap();
}
// Now reload and verify snapshot is used
let (recovered_store, block_id) =
NodeChainStore::new_after_restart(config.clone(), genesis_block).unwrap();
assert_eq!(block_id, 1);
assert_eq!(recovered_store.acc_map.len(), 1);
assert_eq!(
recovered_store.utxo_commitments_store.get_root().is_some(),
true
);
}
}

View File

@ -9,7 +9,6 @@ pub const CALLER_ADDRESS: &str = "caller_address";
pub const CALLER_BALANCE: &str = "caller_balance";
pub const ACCOUNT_MASKS_KEYS_SORTED: &str = "account_masks_keys_sorted";
pub const ACCOUNT_MASKS_VALUES_SORTED: &str = "account_masks_values_sorted";
pub const NULLIFIER_STORE_ROOT: &str = "nullifier_store_root";
pub const COMMITMENT_STORE_ROOT: &str = "commitment_store_root";
pub const PUT_TX_STORE_ROOT: &str = "put_tx_store_root";
@ -18,7 +17,6 @@ pub struct PublicSCContext {
pub caller_address: AccountAddress,
pub caller_balance: u64,
pub account_masks: BTreeMap<AccountAddress, AccountPublicMask>,
pub nullifier_store_root: TreeHashType,
pub comitment_store_root: TreeHashType,
pub pub_tx_store_root: TreeHashType,
}
@ -41,7 +39,6 @@ impl Serialize for PublicSCContext {
s.serialize_field(CALLER_BALANCE, &self.caller_balance)?;
s.serialize_field(ACCOUNT_MASKS_KEYS_SORTED, &account_masks_keys)?;
s.serialize_field(ACCOUNT_MASKS_VALUES_SORTED, &account_mask_values)?;
s.serialize_field(NULLIFIER_STORE_ROOT, &self.nullifier_store_root)?;
s.serialize_field(COMMITMENT_STORE_ROOT, &self.comitment_store_root)?;
s.serialize_field(PUT_TX_STORE_ROOT, &self.pub_tx_store_root)?;
@ -100,7 +97,6 @@ mod tests {
fn create_test_context() -> PublicSCContext {
let caller_address = [1; 32];
let nullifier_store_root = [2; 32];
let comitment_store_root = [3; 32];
let pub_tx_store_root = [4; 32];
@ -118,7 +114,6 @@ mod tests {
caller_address,
caller_balance: 100,
account_masks,
nullifier_store_root,
comitment_store_root,
pub_tx_store_root,
}

View File

@ -49,4 +49,6 @@ pub struct NodeConfig {
pub port: u16,
///Gas config
pub gas_config: GasConfig,
///Frequency of snapshots
pub shapshot_frequency_in_blocks: u64,
}

View File

@ -1,232 +0,0 @@
use bincode;
use common::{
commitment::Commitment, commitments_sparse_merkle_tree::CommitmentsSparseMerkleTree,
nullifier::UTXONullifier, nullifier_sparse_merkle_tree::NullifierSparseMerkleTree,
};
use k256::Scalar;
use monotree::hasher::Blake3;
use monotree::{Hasher, Monotree};
use rand::thread_rng;
use secp256k1_zkp::{CommitmentSecrets, Generator, PedersenCommitment, Tag, Tweak, SECP256K1};
use sha2::{Digest, Sha256};
use utxo::utxo_core::UTXO;
#[allow(unused)]
fn commitment_secrets_random(value: u64) -> CommitmentSecrets {
CommitmentSecrets {
value,
value_blinding_factor: Tweak::new(&mut thread_rng()),
generator_blinding_factor: Tweak::new(&mut thread_rng()),
}
}
pub fn tag_random() -> Tag {
use rand::thread_rng;
use rand::RngCore;
let mut bytes = [0u8; 32];
thread_rng().fill_bytes(&mut bytes);
Tag::from(bytes)
}
pub fn commit(comm: &CommitmentSecrets, tag: Tag) -> PedersenCommitment {
let generator = Generator::new_blinded(SECP256K1, tag, comm.generator_blinding_factor);
PedersenCommitment::new(SECP256K1, comm.value, comm.value_blinding_factor, generator)
}
fn hash(input: &[u8]) -> Vec<u8> {
Sha256::digest(input).to_vec()
}
// Generate nullifiers
// takes the input_utxo and nsk
// returns the nullifiers[i], where the nullifier[i] = hash(in_commitments[i] || nsk) where the hash function
pub fn generate_nullifiers(input_utxo: &UTXO, nsk: &[u8]) -> Vec<u8> {
let mut input = bincode::serialize(input_utxo).unwrap().to_vec();
input.extend_from_slice(nsk);
hash(&input)
}
// Generate commitments for output UTXOs
// uses the list of input_utxos[]
// returns in_commitments[] where each in_commitments[i] = Commitment(in_utxos[i]) where the commitment
pub fn generate_commitments(input_utxos: &[UTXO]) -> Vec<Vec<u8>> {
input_utxos
.iter()
.map(|utxo| {
let serialized = bincode::serialize(utxo).unwrap(); // Serialize UTXO.
hash(&serialized)
})
.collect()
}
// Validate inclusion proof for in_commitments
// takes the in_commitments[i] as a leaf, the root hash root_commitment and the path in_commitments_proofs[i][],
// returns True if the in_commitments[i] is in the tree with root hash root_commitment otherwise returns False, as membership proof.
pub fn validate_in_commitments_proof(
in_commitment: &Vec<u8>,
root_commitment: Vec<u8>,
in_commitments_proof: &[Vec<u8>],
) -> bool {
// Placeholder implementation.
// Replace with Merkle proof verification logic.
// hash(&[pedersen_commitment.serialize().to_vec(), in_commitments_proof.concat()].concat()) == root_commitment
let mut nsmt = CommitmentsSparseMerkleTree {
curr_root: Option::Some(root_commitment),
tree: Monotree::default(),
hasher: Blake3::new(),
};
let commitments: Vec<_> = in_commitments_proof
.into_iter()
.map(|n_p| Commitment {
commitment_hash: n_p.clone(),
})
.collect();
nsmt.insert_items(commitments).unwrap();
nsmt.get_non_membership_proof(in_commitment.clone())
.unwrap()
.1
.is_some()
}
// Validate non-membership proof for nullifiers
// takes the nullifiers[i], path nullifiers_proof[i][] and the root hash root_nullifier,
// returns True if the nullifiers[i] is not in the tree with root hash root_nullifier otherwise returns False, as non-membership proof.
pub fn validate_nullifiers_proof(
nullifier: [u8; 32],
root_nullifier: [u8; 32],
nullifiers_proof: &[[u8; 32]],
) -> bool {
let mut nsmt = NullifierSparseMerkleTree {
curr_root: Option::Some(root_nullifier),
tree: Monotree::default(),
hasher: Blake3::new(),
};
let nullifiers: Vec<_> = nullifiers_proof
.into_iter()
.map(|n_p| UTXONullifier { utxo_hash: *n_p })
.collect();
nsmt.insert_items(nullifiers).unwrap();
nsmt.get_non_membership_proof(nullifier)
.unwrap()
.1
.is_none()
}
// Check balances
// takes the public_info and output_utxos[],
// returns the True if the token amount in public_info matches the sum of all output_utxos[], otherwise return False.
pub fn check_balances(public_info: u128, output_utxos: &[UTXO]) -> bool {
let total_output: u128 = output_utxos.iter().map(|utxo| utxo.amount).sum();
public_info == total_output
}
// Verify Pedersen commitment
// takes the public_info, secret_r and pedersen_commitment and
// checks that commitment(public_info,secret_r) is equal pedersen_commitment where the commitment is pedersen commitment.
pub fn verify_commitment(
public_info: u64,
secret_r: &[u8],
pedersen_commitment: &PedersenCommitment,
) -> bool {
let commitment_secrets = CommitmentSecrets {
value: public_info,
value_blinding_factor: Tweak::from_slice(secret_r).unwrap(),
generator_blinding_factor: Tweak::new(&mut thread_rng()),
};
let tag = tag_random();
let commitment = commit(&commitment_secrets, tag);
commitment == *pedersen_commitment
}
// new_commitment
pub fn new_commitment(public_info: u64, secret_r: &[u8]) -> (Tweak, &[u8], PedersenCommitment) {
let generator_blinding_factor = Tweak::new(&mut thread_rng());
let commitment_secrets = CommitmentSecrets {
value: public_info,
value_blinding_factor: Tweak::from_slice(secret_r).unwrap(),
generator_blinding_factor,
};
let tag = tag_random();
let commitment = commit(&commitment_secrets, tag);
(generator_blinding_factor, secret_r, commitment)
}
// new_commitment for a Vec of values
pub fn new_commitment_vec(
public_info_vec: Vec<u64>,
secret_r: &[u8],
) -> (Tweak, &[u8], Vec<PedersenCommitment>) {
let generator_blinding_factor = Tweak::new(&mut thread_rng());
let tag = tag_random();
let vec_commitments = public_info_vec
.into_iter()
.map(|public_info| {
let commitment_secrets = CommitmentSecrets {
value: public_info,
value_blinding_factor: Tweak::from_slice(secret_r).unwrap(),
generator_blinding_factor,
};
commit(&commitment_secrets, tag)
})
.collect();
(generator_blinding_factor, secret_r, vec_commitments)
}
#[allow(unused)]
fn de_kernel(
root_commitment: &[u8],
root_nullifier: [u8; 32],
public_info: u64,
input_utxos: &[UTXO],
in_commitments_proof: &[Vec<u8>],
nullifiers_proof: &[[u8; 32]],
nullifier_secret_key: Scalar,
) -> (Vec<u8>, Vec<Vec<u8>>) {
check_balances(public_info as u128, input_utxos);
let nullifiers: Vec<_> = input_utxos
.into_iter()
.map(|utxo| generate_nullifiers(&utxo, &nullifier_secret_key.to_bytes()))
.collect();
let in_commitments = generate_commitments(&input_utxos);
for in_commitment in in_commitments {
validate_in_commitments_proof(
&in_commitment,
root_commitment.to_vec(),
in_commitments_proof,
);
}
for nullifier in nullifiers.iter() {
validate_nullifiers_proof(
nullifier[0..32].try_into().unwrap(),
root_nullifier,
nullifiers_proof,
);
}
(vec![], nullifiers)
}

View File

@ -1,3 +0,0 @@
pub mod de;
pub mod private_exec;
pub mod se;

View File

@ -1,133 +0,0 @@
use bincode;
use common::{
commitment::Commitment, commitments_sparse_merkle_tree::CommitmentsSparseMerkleTree,
nullifier::UTXONullifier, nullifier_sparse_merkle_tree::NullifierSparseMerkleTree,
};
use k256::Scalar;
use monotree::hasher::Blake3;
use monotree::{Hasher, Monotree};
use sha2::{Digest, Sha256};
use utxo::utxo_core::UTXO;
fn hash(input: &[u8]) -> Vec<u8> {
Sha256::digest(input).to_vec()
}
// Generate nullifiers
// takes the input_utxo and nsk
// returns the nullifiers[i], where the nullifier[i] = hash(in_commitments[i] || nsk) where the hash function
pub fn generate_nullifiers(input_utxo: &UTXO, nsk: &[u8]) -> Vec<u8> {
let mut input = bincode::serialize(input_utxo).unwrap().to_vec();
input.extend_from_slice(nsk);
hash(&input)
}
// Generate commitments for output UTXOs
// uses the list of input_utxos[]
// returns in_commitments[] where each in_commitments[i] = Commitment(in_utxos[i]) where the commitment
pub fn generate_commitments(input_utxos: &[UTXO]) -> Vec<Vec<u8>> {
input_utxos
.iter()
.map(|utxo| {
let serialized = bincode::serialize(utxo).unwrap(); // Serialize UTXO.
hash(&serialized)
})
.collect()
}
// Validate inclusion proof for in_commitments
// takes the in_commitments[i] as a leaf, the root hash root_commitment and the path in_commitments_proofs[i][],
// returns True if the in_commitments[i] is in the tree with root hash root_commitment otherwise returns False, as membership proof.
pub fn validate_in_commitments_proof(
in_commitment: &Vec<u8>,
root_commitment: Vec<u8>,
in_commitments_proof: &[Vec<u8>],
) -> bool {
// Placeholder implementation.
// Replace with Merkle proof verification logic.
// hash(&[pedersen_commitment.serialize().to_vec(), in_commitments_proof.concat()].concat()) == root_commitment
let mut nsmt = CommitmentsSparseMerkleTree {
curr_root: Option::Some(root_commitment),
tree: Monotree::default(),
hasher: Blake3::new(),
};
let commitments: Vec<_> = in_commitments_proof
.into_iter()
.map(|n_p| Commitment {
commitment_hash: n_p.clone(),
})
.collect();
nsmt.insert_items(commitments).unwrap();
nsmt.get_non_membership_proof(in_commitment.clone())
.unwrap()
.1
.is_some()
}
// Validate non-membership proof for nullifiers
// takes the nullifiers[i], path nullifiers_proof[i][] and the root hash root_nullifier,
// returns True if the nullifiers[i] is not in the tree with root hash root_nullifier otherwise returns False, as non-membership proof.
pub fn validate_nullifiers_proof(
nullifier: [u8; 32],
root_nullifier: [u8; 32],
nullifiers_proof: &[[u8; 32]],
) -> bool {
let mut nsmt = NullifierSparseMerkleTree {
curr_root: Option::Some(root_nullifier),
tree: Monotree::default(),
hasher: Blake3::new(),
};
let nullifiers: Vec<_> = nullifiers_proof
.into_iter()
.map(|n_p| UTXONullifier { utxo_hash: *n_p })
.collect();
nsmt.insert_items(nullifiers).unwrap();
nsmt.get_non_membership_proof(nullifier)
.unwrap()
.1
.is_none()
}
#[allow(unused)]
fn private_kernel(
root_commitment: &[u8],
root_nullifier: [u8; 32],
input_utxos: &[UTXO],
in_commitments_proof: &[Vec<u8>],
nullifiers_proof: &[[u8; 32]],
nullifier_secret_key: Scalar,
) -> (Vec<u8>, Vec<Vec<u8>>) {
let nullifiers: Vec<_> = input_utxos
.into_iter()
.map(|utxo| generate_nullifiers(&utxo, &nullifier_secret_key.to_bytes()))
.collect();
let in_commitments = generate_commitments(&input_utxos);
for in_commitment in in_commitments {
validate_in_commitments_proof(
&in_commitment,
root_commitment.to_vec(),
in_commitments_proof,
);
}
for nullifier in nullifiers.iter() {
validate_nullifiers_proof(
nullifier[0..32].try_into().unwrap(),
root_nullifier,
nullifiers_proof,
);
}
(vec![], nullifiers)
}

View File

@ -1,186 +0,0 @@
use bincode;
use common::{
commitment::Commitment, commitments_sparse_merkle_tree::CommitmentsSparseMerkleTree,
nullifier::UTXONullifier, nullifier_sparse_merkle_tree::NullifierSparseMerkleTree,
};
use k256::Scalar;
use monotree::hasher::Blake3;
use monotree::{Hasher, Monotree};
use rand::thread_rng;
use secp256k1_zkp::{CommitmentSecrets, Generator, PedersenCommitment, Tag, Tweak, SECP256K1};
use sha2::{Digest, Sha256};
use utxo::utxo_core::UTXO;
#[allow(unused)]
fn commitment_secrets_random(value: u64) -> CommitmentSecrets {
CommitmentSecrets {
value,
value_blinding_factor: Tweak::new(&mut thread_rng()),
generator_blinding_factor: Tweak::new(&mut thread_rng()),
}
}
pub fn tag_random() -> Tag {
use rand::thread_rng;
use rand::RngCore;
let mut bytes = [0u8; 32];
thread_rng().fill_bytes(&mut bytes);
Tag::from(bytes)
}
pub fn commit(comm: &CommitmentSecrets, tag: Tag) -> PedersenCommitment {
let generator = Generator::new_blinded(SECP256K1, tag, comm.generator_blinding_factor);
PedersenCommitment::new(SECP256K1, comm.value, comm.value_blinding_factor, generator)
}
fn hash(input: &[u8]) -> Vec<u8> {
Sha256::digest(input).to_vec()
}
// Generate nullifiers
// takes the pedersen_commitment and nsk then
// returns a list of nullifiers, where the nullifier = hash(pedersen_commitment || nsk) where the hash function will be determined
pub fn generate_nullifiers(pedersen_commitment: &PedersenCommitment, nsk: &[u8]) -> Vec<u8> {
let mut input = pedersen_commitment.serialize().to_vec();
input.extend_from_slice(nsk);
hash(&input)
}
// Generate commitments for output UTXOs
// uses the list of output_utxos[] and
// returns out_commitments[] where each out_commitments[i] = Commitment(output_utxos[i])
// where the commitment will be determined
pub fn generate_commitments(output_utxos: &[UTXO]) -> Vec<Vec<u8>> {
output_utxos
.iter()
.map(|utxo| {
let serialized = bincode::serialize(utxo).unwrap(); // Serialize UTXO.
hash(&serialized)
})
.collect()
}
// Validate inclusion proof for in_commitments
// takes the pedersen_commitment as a leaf, the root hash root_commitment and the path in_commitments_proof[],
// returns True if the pedersen_commitment is in the tree with root hash root_commitment
// otherwise
// returns False, as membership proof.
pub fn validate_in_commitments_proof(
pedersen_commitment: &PedersenCommitment,
root_commitment: Vec<u8>,
in_commitments_proof: &[Vec<u8>],
) -> bool {
let mut nsmt = CommitmentsSparseMerkleTree {
curr_root: Option::Some(root_commitment),
tree: Monotree::default(),
hasher: Blake3::new(),
};
let commitments: Vec<_> = in_commitments_proof
.into_iter()
.map(|n_p| Commitment {
commitment_hash: n_p.clone(),
})
.collect();
nsmt.insert_items(commitments).unwrap();
nsmt.get_non_membership_proof(pedersen_commitment.serialize().to_vec())
.unwrap()
.1
.is_some()
}
// Validate non-membership proof for nullifiers
// takes the nullifier, path nullifiers_proof[] and the root hash root_nullifier,
// returns True if the nullifier is not in the tree with root hash root_nullifier
// otherwise
// returns False, as non-membership proof.
pub fn validate_nullifiers_proof(
nullifier: [u8; 32],
root_nullifier: [u8; 32],
nullifiers_proof: &[[u8; 32]],
) -> bool {
let mut nsmt = NullifierSparseMerkleTree {
curr_root: Option::Some(root_nullifier),
tree: Monotree::default(),
hasher: Blake3::new(),
};
let nullifiers: Vec<_> = nullifiers_proof
.into_iter()
.map(|n_p| UTXONullifier { utxo_hash: *n_p })
.collect();
nsmt.insert_items(nullifiers).unwrap();
nsmt.get_non_membership_proof(nullifier)
.unwrap()
.1
.is_none()
}
// Check balances
// takes the public_info and output_utxos[],
// returns the True if the token amount in public_info matches the sum of all output_utxos[], otherwise return False.
pub fn check_balances(public_info: u128, output_utxos: &[UTXO]) -> bool {
let total_output: u128 = output_utxos.iter().map(|utxo| utxo.amount).sum();
public_info == total_output
}
// Verify Pedersen commitment
// takes the public_info, secret_r and pedersen_commitment and
// checks that commitment(public_info,secret_r) is equal pedersen_commitment where the commitment is pedersen commitment.
pub fn verify_commitment(
public_info: u64,
secret_r: &[u8],
pedersen_commitment: &PedersenCommitment,
) -> bool {
let commitment_secrets = CommitmentSecrets {
value: public_info,
value_blinding_factor: Tweak::from_slice(secret_r).unwrap(),
generator_blinding_factor: Tweak::new(&mut thread_rng()),
};
let tag = tag_random();
let commitment = commit(&commitment_secrets, tag);
commitment == *pedersen_commitment
}
#[allow(unused)]
fn se_kernel(
root_commitment: &[u8],
root_nullifier: [u8; 32],
public_info: u64,
pedersen_commitment: PedersenCommitment,
secret_r: &[u8],
output_utxos: &[UTXO],
in_commitments_proof: &[Vec<u8>],
nullifiers_proof: &[[u8; 32]],
nullifier_secret_key: Scalar,
) -> (Vec<u8>, Vec<Vec<u8>>, Vec<u8>) {
check_balances(public_info as u128, output_utxos);
let out_commitments = generate_commitments(output_utxos);
let nullifier = generate_nullifiers(&pedersen_commitment, &nullifier_secret_key.to_bytes());
validate_in_commitments_proof(
&pedersen_commitment,
root_commitment.to_vec(),
in_commitments_proof,
);
verify_commitment(public_info, secret_r, &pedersen_commitment);
(vec![], out_commitments, nullifier)
}

View File

@ -10,9 +10,10 @@ use anyhow::Result;
use chain_storage::NodeChainStore;
use common::transaction::{Transaction, TransactionPayload, TxKind};
use config::NodeConfig;
use executions::private_exec::{generate_commitments, generate_nullifiers};
use log::info;
use sc_core::proofs_circuits::pedersen_commitment_vec;
use sc_core::proofs_circuits::{
generate_commitments, generate_nullifiers, generate_nullifiers_se, pedersen_commitment_vec,
};
use sequencer_client::{json::SendTxResponse, SequencerClient};
use serde::{Deserialize, Serialize};
use storage::sc_db_utils::DataBlobChangeVariant;
@ -28,7 +29,6 @@ pub const BLOCK_GEN_DELAY_SECS: u64 = 20;
pub mod chain_storage;
pub mod config;
pub mod executions;
///Module, which includes pre start setup helperfunctions
pub mod pre_start;
pub mod sequencer_client;
@ -94,16 +94,14 @@ impl NodeCore {
let client = Arc::new(SequencerClient::new(config.clone())?);
let genesis_id = client.get_genesis_id().await?;
info!("Gesesis id is {genesis_id:?}");
info!("Genesis id is {genesis_id:?}");
let genesis_block = client.get_block(genesis_id.genesis_id).await?.block;
let mut storage = NodeChainStore::new_with_genesis(&config.home, genesis_block);
let (mut storage, mut chain_height) = NodeChainStore::new(config.clone(), genesis_block)?;
pre_start::setup_empty_sc_states(&storage).await?;
let mut chain_height = genesis_id.genesis_id;
//Chain update loop
loop {
let next_block = chain_height + 1;
@ -164,10 +162,9 @@ impl NodeCore {
})
}
pub async fn get_roots(&self) -> [[u8; 32]; 3] {
pub async fn get_roots(&self) -> [[u8; 32]; 2] {
let storage = self.storage.read().await;
[
storage.nullifier_store.curr_root.unwrap_or([0; 32]),
storage.utxo_commitments_store.get_root().unwrap_or([0; 32]),
storage.pub_tx_store.get_root().unwrap_or([0; 32]),
]
@ -651,7 +648,7 @@ impl NodeCore {
)
.unwrap();
let nullifier = executions::se::generate_nullifiers(
let nullifier = generate_nullifiers_se(
&commitment,
&account
.key_holder
@ -1062,7 +1059,7 @@ impl NodeCore {
let acc = write_guard.acc_map.get_mut(&acc_addr).unwrap();
acc.utxo_tree.get_item(new_utxo_hash)?.unwrap().clone()
acc.utxos.get(&new_utxo_hash).unwrap().clone()
};
new_utxo.log();
@ -1101,12 +1098,7 @@ impl NodeCore {
.map(|new_utxo_hash| {
let acc = write_guard.acc_map.get_mut(&acc_addr).unwrap();
let new_utxo = acc
.utxo_tree
.get_item(new_utxo_hash)
.unwrap()
.unwrap()
.clone();
let new_utxo = acc.utxos.get(&new_utxo_hash).unwrap().clone();
new_utxo.log();
info!(
@ -1238,7 +1230,7 @@ impl NodeCore {
let acc = write_guard.acc_map.get_mut(&acc_addr_rec).unwrap();
acc.log();
acc.utxo_tree.get_item(new_utxo_hash)?.unwrap().clone()
acc.utxos.get(&new_utxo_hash).unwrap().clone()
};
new_utxo.log();
info!(
@ -1278,7 +1270,7 @@ impl NodeCore {
let acc = write_guard.acc_map.get_mut(&acc_addr_rec).unwrap();
acc.log();
acc.utxo_tree.get_item(new_utxo_hash)?.unwrap().clone()
acc.utxos.get(&new_utxo_hash).unwrap().clone()
};
new_utxo.log();
info!(
@ -1323,7 +1315,7 @@ impl NodeCore {
let acc = write_guard.acc_map.get_mut(&acc_addr_rec).unwrap();
acc.log();
let new_utxo = acc.utxo_tree.get_item(new_utxo_hash)?.unwrap().clone();
let new_utxo = acc.utxos.get(&new_utxo_hash).unwrap().clone();
new_utxo.log();
info!(
@ -1343,7 +1335,7 @@ impl NodeCore {
let acc = write_guard.acc_map.get_mut(&acc_addr).unwrap();
acc.log();
let new_utxo = acc.utxo_tree.get_item(new_utxo_hash)?.unwrap().clone();
let new_utxo = acc.utxos.get(&new_utxo_hash).unwrap().clone();
new_utxo.log();
info!(
@ -1556,12 +1548,7 @@ impl NodeCore {
.map(|(acc_addr_rec, new_utxo_hash)| {
let acc = write_guard.acc_map.get_mut(&acc_addr_rec).unwrap();
let new_utxo = acc
.utxo_tree
.get_item(new_utxo_hash)
.unwrap()
.unwrap()
.clone();
let new_utxo = acc.utxos.get(&new_utxo_hash).unwrap().clone();
new_utxo.log();
info!(

View File

@ -6,8 +6,8 @@ use serde::{Deserialize, Serialize};
#[derive(Serialize, Deserialize, Debug)]
pub struct SendTxRequest {
pub transaction: Transaction,
///Nullifier Root, UTXO Commitment Root, Pub Tx Root
pub tx_roots: [[u8; 32]; 3],
///UTXO Commitment Root, Pub Tx Root
pub tx_roots: [[u8; 32]; 2],
}
//Responses

View File

@ -73,7 +73,7 @@ impl SequencerClient {
pub async fn send_tx(
&self,
transaction: Transaction,
tx_roots: [[u8; 32]; 3],
tx_roots: [[u8; 32]; 2],
) -> Result<SendTxResponse, SequencerClientError> {
let tx_req = SendTxRequest {
transaction,

View File

@ -268,11 +268,8 @@ impl JsonHandler {
.ok_or(RpcError::new_internal_error(None, ACCOUNT_NOT_FOUND))?;
let utxo = acc
.utxo_tree
.get_item(utxo_hash)
.map_err(|err| {
RpcError::new_internal_error(None, &format!("DB fetch failure {err:?}"))
})?
.utxos
.get(&utxo_hash)
.ok_or(RpcError::new_internal_error(
None,
"UTXO does not exist in the tree",
@ -512,11 +509,8 @@ impl JsonHandler {
.get_mut(&acc_addr_sender)
.ok_or(RpcError::new_internal_error(None, ACCOUNT_NOT_FOUND))?;
acc.utxo_tree
.get_item(utxo_hash)
.map_err(|err| {
RpcError::new_internal_error(None, &format!("DB fetch failure {err:?}"))
})?
acc.utxos
.get(&utxo_hash)
.ok_or(RpcError::new_internal_error(
None,
"UTXO does not exist in tree",
@ -647,11 +641,8 @@ impl JsonHandler {
.get_mut(&acc_addr_sender)
.ok_or(RpcError::new_internal_error(None, ACCOUNT_NOT_FOUND))?;
acc.utxo_tree
.get_item(utxo_hash)
.map_err(|err| {
RpcError::new_internal_error(None, &format!("DB fetch failure {err:?}"))
})?
acc.utxos
.get(&utxo_hash)
.ok_or(RpcError::new_internal_error(
None,
"UTXO does not exist in tree",
@ -735,11 +726,8 @@ impl JsonHandler {
.get_mut(&acc_addr_sender)
.ok_or(RpcError::new_internal_error(None, ACCOUNT_NOT_FOUND))?;
acc.utxo_tree
.get_item(utxo_hash)
.map_err(|err| {
RpcError::new_internal_error(None, &format!("DB fetch failure {err:?}"))
})?
acc.utxos
.get(&utxo_hash)
.ok_or(RpcError::new_internal_error(
None,
"UTXO does not exist in tree",

View File

@ -81,6 +81,5 @@ pub fn cast_common_execution_error_into_rpc_error(comm_exec_err: ExecutionFailur
ExecutionFailureKind::SequencerClientError(seq_cli_err) => {
cast_seq_client_error_into_rpc_error(seq_cli_err)
}
ExecutionFailureKind::MonoTreeError(_) => RpcError::new_internal_error(None, &error_string),
}
}

View File

@ -12,5 +12,6 @@
"gas_cost_deploy": 1000,
"gas_limit_deploy": 30000000,
"gas_limit_runtime": 30000000
}
},
"shapshot_frequency_in_blocks": 10
}

View File

@ -12,7 +12,6 @@ serde.workspace = true
rand.workspace = true
k256.workspace = true
sha2.workspace = true
monotree.workspace = true
bincode.workspace = true
elliptic-curve.workspace = true
hex.workspace = true
@ -20,7 +19,7 @@ light-poseidon.workspace = true
ark-bn254.workspace = true
ark-ff.workspace = true
risc0-zkvm = { git = "https://github.com/risc0/risc0.git", branch = "release-2.0" }
risc0-zkvm = { git = "https://github.com/risc0/risc0.git", branch = "release-2.1" }
[dependencies.accounts]
path = "../accounts"

View File

@ -1,4 +1,3 @@
pub mod cryptography;
pub mod proofs_circuits;
pub mod transaction_payloads_tools;
pub mod utxo_manipulator;

View File

@ -1,11 +1,5 @@
use bincode;
use common::{
commitment::Commitment, commitments_sparse_merkle_tree::CommitmentsSparseMerkleTree,
nullifier::UTXONullifier, nullifier_sparse_merkle_tree::NullifierSparseMerkleTree,
};
use k256::Scalar;
use monotree::hasher::Blake3;
use monotree::{Hasher, Monotree};
use rand::{thread_rng, RngCore};
use secp256k1_zkp::{CommitmentSecrets, Generator, PedersenCommitment, Tag, Tweak, SECP256K1};
use sha2::{Digest, Sha256};
@ -44,59 +38,21 @@ pub fn generate_commitments(input_utxos: &[UTXO]) -> Vec<Vec<u8>> {
// takes the in_commitments[i] as a leaf, the root hash root_commitment and the path in_commitments_proofs[i][],
// returns True if the in_commitments[i] is in the tree with root hash root_commitment otherwise returns False, as membership proof.
pub fn validate_in_commitments_proof(
in_commitment: &Vec<u8>,
root_commitment: Vec<u8>,
in_commitments_proof: &[Vec<u8>],
_in_commitment: &Vec<u8>,
_root_commitment: Vec<u8>,
_in_commitments_proof: &[Vec<u8>],
) -> bool {
// Placeholder implementation.
// Replace with Merkle proof verification logic.
// hash(&[pedersen_commitment.serialize().to_vec(), in_commitments_proof.concat()].concat()) == root_commitment
// ToDo: Implement correct check
let mut nsmt = CommitmentsSparseMerkleTree {
curr_root: Option::Some(root_commitment),
tree: Monotree::default(),
hasher: Blake3::new(),
};
let commitments: Vec<_> = in_commitments_proof
.into_iter()
.map(|n_p| Commitment {
commitment_hash: n_p.clone(),
})
.collect();
nsmt.insert_items(commitments).unwrap();
nsmt.get_non_membership_proof(in_commitment.clone())
.unwrap()
.1
.is_some()
todo!()
}
// Validate non-membership proof for nullifiers
// takes the nullifiers[i], path nullifiers_proof[i][] and the root hash root_nullifier,
// returns True if the nullifiers[i] is not in the tree with root hash root_nullifier otherwise returns False, as non-membership proof.
pub fn validate_nullifiers_proof(
// Validate that `nullifier` has not been present in set items before
pub fn validate_nullifier_not_present_in_set_items(
nullifier: [u8; 32],
root_nullifier: [u8; 32],
nullifiers_proof: &[[u8; 32]],
nullifiers_items: &[[u8; 32]],
) -> bool {
let mut nsmt = NullifierSparseMerkleTree {
curr_root: Option::Some(root_nullifier),
tree: Monotree::default(),
hasher: Blake3::new(),
};
let nullifiers: Vec<_> = nullifiers_proof
.into_iter()
.map(|n_p| UTXONullifier { utxo_hash: *n_p })
.collect();
nsmt.insert_items(nullifiers).unwrap();
nsmt.get_non_membership_proof(nullifier)
.unwrap()
.1
.is_none()
!nullifiers_items.contains(&nullifier)
}
#[allow(unused)]
@ -124,9 +80,8 @@ fn private_kernel(
}
for nullifier in nullifiers.iter() {
validate_nullifiers_proof(
validate_nullifier_not_present_in_set_items(
nullifier[0..32].try_into().unwrap(),
root_nullifier,
nullifiers_proof,
);
}
@ -243,9 +198,8 @@ fn de_kernel(
}
for nullifier in nullifiers.iter() {
validate_nullifiers_proof(
validate_nullifier_not_present_in_set_items(
nullifier[0..32].try_into().unwrap(),
root_nullifier,
nullifiers_proof,
);
}
@ -260,28 +214,13 @@ fn de_kernel(
// otherwise
// returns False, as membership proof.
pub fn validate_in_commitments_proof_se(
pedersen_commitment: &PedersenCommitment,
root_commitment: Vec<u8>,
in_commitments_proof: &[Vec<u8>],
_pedersen_commitment: &PedersenCommitment,
_root_commitment: Vec<u8>,
_in_commitments_proof: &[Vec<u8>],
) -> bool {
let mut nsmt = CommitmentsSparseMerkleTree {
curr_root: Option::Some(root_commitment),
tree: Monotree::default(),
hasher: Blake3::new(),
};
// ToDo: Implement correct check
let commitments: Vec<_> = in_commitments_proof
.into_iter()
.map(|n_p| Commitment {
commitment_hash: n_p.clone(),
})
.collect();
nsmt.insert_items(commitments).unwrap();
nsmt.get_non_membership_proof(pedersen_commitment.serialize().to_vec())
.unwrap()
.1
.is_some()
todo!()
}
// Generate nullifiers SE

View File

@ -1,110 +0,0 @@
use anyhow::Result;
use common::nullifier::UTXONullifier;
use utxo::utxo_core::{UTXOPayload, UTXO};
pub fn utxo_change_owner(
utxo: &mut UTXO,
nullifier: UTXONullifier,
new_owner: [u8; 32],
) -> Result<UTXO> {
let new_payload = UTXOPayload {
owner: new_owner,
asset: utxo.asset.clone(),
amount: utxo.amount,
privacy_flag: utxo.privacy_flag,
};
utxo.consume_utxo(nullifier)?;
Ok(UTXO::create_utxo_from_payload(new_payload)?)
}
pub fn utxo_substact_part_another_owner(
utxo: &mut UTXO,
nullifier: UTXONullifier,
amount: u128,
new_owner: [u8; 32],
) -> Result<(UTXO, UTXO)> {
if amount > utxo.amount {
anyhow::bail!("Amount too big");
}
let diff = utxo.amount - amount;
let new_payload1 = UTXOPayload {
owner: utxo.owner,
asset: utxo.asset.clone(),
amount: diff,
privacy_flag: utxo.privacy_flag,
};
let new_payload2 = UTXOPayload {
owner: new_owner,
asset: utxo.asset.clone(),
amount,
privacy_flag: utxo.privacy_flag,
};
utxo.consume_utxo(nullifier)?;
Ok((
UTXO::create_utxo_from_payload(new_payload1)?,
UTXO::create_utxo_from_payload(new_payload2)?,
))
}
pub fn utxo_substract_part(
utxo: &mut UTXO,
nullifier: UTXONullifier,
amount: u128,
) -> Result<(UTXO, UTXO)> {
let new_owner = utxo.owner;
utxo_substact_part_another_owner(utxo, nullifier, amount, new_owner)
}
pub fn utxo_split_n_users(
utxo: &mut UTXO,
nullifier: UTXONullifier,
users_amounts: Vec<([u8; 32], u128)>,
) -> Result<Vec<UTXO>> {
let cumulative_diff = users_amounts
.iter()
.fold(0, |acc, (_, amount)| acc + *amount);
if cumulative_diff > utxo.amount {
anyhow::bail!("Amount too big");
}
let mut utxo_res = vec![];
for (new_owner, amount) in users_amounts {
let new_payload = UTXOPayload {
owner: new_owner,
asset: utxo.asset.clone(),
amount,
privacy_flag: utxo.privacy_flag,
};
let new_utxo = UTXO::create_utxo_from_payload(new_payload)?;
utxo_res.push(new_utxo);
}
if cumulative_diff != utxo.amount {
let new_payload = UTXOPayload {
owner: utxo.owner,
asset: utxo.asset.clone(),
amount: utxo.amount - cumulative_diff,
privacy_flag: utxo.privacy_flag,
};
let new_utxo = UTXO::create_utxo_from_payload(new_payload)?;
utxo_res.push(new_utxo);
}
utxo.consume_utxo(nullifier)?;
Ok(utxo_res)
}

View File

@ -59,9 +59,8 @@ impl SequencerCore {
}
}
pub fn get_tree_roots(&self) -> [[u8; 32]; 3] {
pub fn get_tree_roots(&self) -> [[u8; 32]; 2] {
[
self.store.nullifier_store.curr_root.unwrap_or([0; 32]),
self.store
.utxo_commitments_store
.get_root()
@ -73,7 +72,7 @@ impl SequencerCore {
pub fn transaction_pre_check(
&mut self,
tx: &Transaction,
tx_roots: [[u8; 32]; 3],
tx_roots: [[u8; 32]; 2],
) -> Result<(), TransactionMalformationErrorKind> {
let Transaction {
hash,
@ -135,10 +134,9 @@ impl SequencerCore {
let nullifier_tree_check = nullifier_created_hashes
.iter()
.map(|nullifier_hash| {
self.store
.nullifier_store
.search_item_inclusion(*nullifier_hash)
.unwrap_or(false)
self.store.nullifier_store.contains(&UTXONullifier {
utxo_hash: *nullifier_hash,
})
})
.any(|check| check);
let utxo_commitments_check = utxo_commitments_created_hashes
@ -173,7 +171,7 @@ impl SequencerCore {
pub fn push_tx_into_mempool_pre_check(
&mut self,
item: TransactionMempool,
tx_roots: [[u8; 32]; 3],
tx_roots: [[u8; 32]; 2],
) -> Result<(), TransactionMalformationErrorKind> {
self.transaction_pre_check(&item.tx, tx_roots)?;
@ -187,7 +185,8 @@ impl SequencerCore {
tx: TransactionMempool,
) -> Result<(), TransactionMalformationErrorKind> {
let Transaction {
hash,
// ToDo: remove hashing of transactions on node side [Issue #66]
hash: _,
ref utxo_commitments_created_hashes,
ref nullifier_created_hashes,
..
@ -199,16 +198,10 @@ impl SequencerCore {
.add_tx(UTXOCommitment { hash: *utxo_comm });
}
for nullifier in nullifier_created_hashes {
self.store
.nullifier_store
.insert_item(UTXONullifier {
utxo_hash: *nullifier,
})
.map_err(|err| TransactionMalformationErrorKind::FailedToInsert {
tx: hash,
details: format!("{err:?}"),
})?;
for nullifier in nullifier_created_hashes.iter() {
self.store.nullifier_store.insert(UTXONullifier {
utxo_hash: *nullifier,
});
}
self.store.pub_tx_store.add_tx(tx.tx);
@ -225,12 +218,14 @@ impl SequencerCore {
///Produces new block from transactions in mempool
pub fn produce_new_block_with_mempool_transactions(&mut self) -> Result<u64> {
let new_block_height = self.chain_height + 1;
let transactions = self
.mempool
.pop_size(self.sequencer_config.max_num_tx_in_block);
for tx in transactions.clone() {
self.execute_check_transaction_on_state(tx)?;
for tx in &transactions {
self.execute_check_transaction_on_state(tx.clone())?;
}
let prev_block_hash = self
@ -240,7 +235,7 @@ impl SequencerCore {
.hash;
let hashable_data = HashableBlockData {
block_id: self.chain_height + 1,
block_id: new_block_height,
prev_block_id: self.chain_height,
transactions: transactions.into_iter().map(|tx_mem| tx_mem.tx).collect(),
data: vec![],
@ -337,7 +332,7 @@ mod tests {
common_setup(&mut sequencer);
let roots = sequencer.get_tree_roots();
assert_eq!(roots.len(), 3); // Should return three roots
assert_eq!(roots.len(), 2); // Should return two roots
}
#[test]

View File

@ -1,11 +1,11 @@
use std::path::Path;
use std::{collections::HashSet, path::Path};
use accounts_store::SequencerAccountsStore;
use block_store::SequecerBlockStore;
use common::{
block::{Block, HashableBlockData},
merkle_tree_public::merkle_tree::{PublicTransactionMerkleTree, UTXOCommitmentsMerkleTree},
nullifier_sparse_merkle_tree::NullifierSparseMerkleTree,
nullifier::UTXONullifier,
};
use rand::{rngs::OsRng, RngCore};
@ -15,7 +15,7 @@ pub mod block_store;
pub struct SequecerChainStore {
pub acc_store: SequencerAccountsStore,
pub block_store: SequecerBlockStore,
pub nullifier_store: NullifierSparseMerkleTree,
pub nullifier_store: HashSet<UTXONullifier>,
pub utxo_commitments_store: UTXOCommitmentsMerkleTree,
pub pub_tx_store: PublicTransactionMerkleTree,
}
@ -23,7 +23,7 @@ pub struct SequecerChainStore {
impl SequecerChainStore {
pub fn new_with_genesis(home_dir: &Path, genesis_id: u64, is_genesis_random: bool) -> Self {
let acc_store = SequencerAccountsStore::default();
let nullifier_store = NullifierSparseMerkleTree::default();
let nullifier_store = HashSet::new();
let utxo_commitments_store = UTXOCommitmentsMerkleTree::new(vec![]);
let pub_tx_store = PublicTransactionMerkleTree::new(vec![]);

View File

@ -2,7 +2,6 @@ use std::{path::Path, sync::Arc};
use common::block::Block;
use error::DbError;
use log::warn;
use rocksdb::{
BoundColumnFamily, ColumnFamilyDescriptor, DBWithThreadMode, MultiThreaded, Options,
};
@ -35,12 +34,25 @@ pub const DB_META_FIRST_BLOCK_SET_KEY: &str = "first_block_set";
///Key to list of all known smart contract addresses
pub const DB_META_SC_LIST: &str = "sc_list";
///Key base for storing snapshot which describe block id
pub const DB_SNAPSHOT_BLOCK_ID_KEY: &str = "block_id";
///Key base for storing snapshot which describe commitment
pub const DB_SNAPSHOT_COMMITMENT_KEY: &str = "commitment";
///Key base for storing snapshot which describe transaction
pub const DB_SNAPSHOT_TRANSACTION_KEY: &str = "transaction";
///Key base for storing snapshot which describe nullifier
pub const DB_SNAPSHOT_NULLIFIER_KEY: &str = "nullifier";
///Key base for storing snapshot which describe account
pub const DB_SNAPSHOT_ACCOUNT_KEY: &str = "account";
///Name of block column family
pub const CF_BLOCK_NAME: &str = "cf_block";
///Name of meta column family
pub const CF_META_NAME: &str = "cf_meta";
///Name of smart contract column family
pub const CF_SC_NAME: &str = "cf_sc";
///Name of snapshot column family
pub const CF_SNAPSHOT_NAME: &str = "cf_snapshot";
///Suffix, used to mark field, which contain length of smart contract
pub const SC_LEN_SUFFIX: &str = "sc_len";
@ -59,6 +71,7 @@ impl RocksDBIO {
let cfb = ColumnFamilyDescriptor::new(CF_BLOCK_NAME, cf_opts.clone());
let cfmeta = ColumnFamilyDescriptor::new(CF_META_NAME, cf_opts.clone());
let cfsc = ColumnFamilyDescriptor::new(CF_SC_NAME, cf_opts.clone());
let cfsnapshot = ColumnFamilyDescriptor::new(CF_SNAPSHOT_NAME, cf_opts.clone());
let mut db_opts = Options::default();
db_opts.create_missing_column_families(true);
@ -66,7 +79,7 @@ impl RocksDBIO {
let db = DBWithThreadMode::<MultiThreaded>::open_cf_descriptors(
&db_opts,
path,
vec![cfb, cfmeta, cfsc],
vec![cfb, cfmeta, cfsc, cfsnapshot],
);
let dbio = Self {
@ -89,9 +102,8 @@ impl RocksDBIO {
Ok(dbio)
} else {
warn!("Starting db in unset mode, will have to set starting block manually");
Ok(dbio)
// Here we are trying to start a DB without a block, one should not do it.
unreachable!()
}
}
@ -101,6 +113,7 @@ impl RocksDBIO {
//ToDo: Add more column families for different data
let _cfb = ColumnFamilyDescriptor::new(CF_BLOCK_NAME, cf_opts.clone());
let _cfmeta = ColumnFamilyDescriptor::new(CF_META_NAME, cf_opts.clone());
let _cfsnapshot = ColumnFamilyDescriptor::new(CF_SNAPSHOT_NAME, cf_opts.clone());
let mut db_opts = Options::default();
db_opts.create_missing_column_families(true);
@ -109,18 +122,22 @@ impl RocksDBIO {
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))
}
pub fn meta_column(&self) -> Arc<BoundColumnFamily> {
pub fn meta_column(&self) -> Arc<BoundColumnFamily<'_>> {
self.db.cf_handle(CF_META_NAME).unwrap()
}
pub fn block_column(&self) -> Arc<BoundColumnFamily> {
pub fn block_column(&self) -> Arc<BoundColumnFamily<'_>> {
self.db.cf_handle(CF_BLOCK_NAME).unwrap()
}
pub fn sc_column(&self) -> Arc<BoundColumnFamily> {
pub fn sc_column(&self) -> Arc<BoundColumnFamily<'_>> {
self.db.cf_handle(CF_SC_NAME).unwrap()
}
pub fn snapshot_column(&self) -> Arc<BoundColumnFamily<'_>> {
self.db.cf_handle(CF_SNAPSHOT_NAME).unwrap()
}
pub fn get_meta_first_block_in_db(&self) -> DbResult<u64> {
let cf_meta = self.meta_column();
let res = self
@ -384,6 +401,142 @@ impl RocksDBIO {
Ok(data_blob_list)
}
pub fn get_snapshot_block_id(&self) -> DbResult<u64> {
let cf_snapshot = self.snapshot_column();
let res = self
.db
.get_cf(&cf_snapshot, DB_SNAPSHOT_BLOCK_ID_KEY)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(u64::from_be_bytes(data.try_into().unwrap()))
} else {
Err(DbError::db_interaction_error(
"Snapshot block ID not found".to_string(),
))
}
}
pub fn get_snapshot_commitment(&self) -> DbResult<Vec<u8>> {
let cf_snapshot = self.snapshot_column();
let res = self
.db
.get_cf(&cf_snapshot, DB_SNAPSHOT_COMMITMENT_KEY)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(data)
} else {
Err(DbError::db_interaction_error(
"Snapshot commitment not found".to_string(),
))
}
}
pub fn get_snapshot_transaction(&self) -> DbResult<Vec<u8>> {
let cf_snapshot = self.snapshot_column();
let res = self
.db
.get_cf(&cf_snapshot, DB_SNAPSHOT_TRANSACTION_KEY)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(data)
} else {
Err(DbError::db_interaction_error(
"Snapshot transaction not found".to_string(),
))
}
}
pub fn get_snapshot_nullifier(&self) -> DbResult<Vec<u8>> {
let cf_snapshot = self.snapshot_column();
let res = self
.db
.get_cf(&cf_snapshot, DB_SNAPSHOT_NULLIFIER_KEY)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(data)
} else {
Err(DbError::db_interaction_error(
"Snapshot nullifier not found".to_string(),
))
}
}
pub fn get_snapshot_account(&self) -> DbResult<Vec<u8>> {
let cf_snapshot = self.snapshot_column();
let res = self
.db
.get_cf(&cf_snapshot, DB_SNAPSHOT_ACCOUNT_KEY)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(data)
} else {
Err(DbError::db_interaction_error(
"Snapshot account not found".to_string(),
))
}
}
pub fn put_snapshot_block_id_db(&self, block_id: u64) -> DbResult<()> {
let cf_snapshot = self.snapshot_column();
self.db
.put_cf(
&cf_snapshot,
DB_SNAPSHOT_BLOCK_ID_KEY.as_bytes(),
block_id.to_be_bytes(),
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
Ok(())
}
pub fn put_snapshot_commitement_db(&self, commitment: Vec<u8>) -> DbResult<()> {
let cf_snapshot = self.snapshot_column();
self.db
.put_cf(
&cf_snapshot,
DB_SNAPSHOT_COMMITMENT_KEY.as_bytes(),
commitment,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
Ok(())
}
pub fn put_snapshot_transaction_db(&self, transaction: Vec<u8>) -> DbResult<()> {
let cf_snapshot = self.snapshot_column();
self.db
.put_cf(
&cf_snapshot,
DB_SNAPSHOT_TRANSACTION_KEY.as_bytes(),
transaction,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
Ok(())
}
pub fn put_snapshot_nullifier_db(&self, nullifier: Vec<u8>) -> DbResult<()> {
let cf_snapshot = self.snapshot_column();
self.db
.put_cf(
&cf_snapshot,
DB_SNAPSHOT_NULLIFIER_KEY.as_bytes(),
nullifier,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
Ok(())
}
pub fn put_snapshot_account_db(&self, account: Vec<u8>) -> DbResult<()> {
let cf_snapshot = self.snapshot_column();
self.db
.put_cf(&cf_snapshot, DB_SNAPSHOT_ACCOUNT_KEY.as_bytes(), account)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
Ok(())
}
}
///Creates address for sc data blob at corresponding id

View File

@ -9,9 +9,9 @@ serde_json.workspace = true
env_logger.workspace = true
log.workspace = true
serde.workspace = true
monotree.workspace = true
sha2.workspace = true
hex.workspace = true
rand.workspace = true
[dependencies.common]
path = "../common"

View File

@ -1,2 +1 @@
pub mod utxo_core;
pub mod utxo_tree;

View File

@ -1,22 +1,24 @@
use anyhow::Result;
use common::{merkle_tree_public::TreeHashType, nullifier::UTXONullifier, AccountId};
use common::{merkle_tree_public::TreeHashType, AccountId};
use log::info;
use rand::{rngs::OsRng, RngCore};
use serde::{Deserialize, Serialize};
use sha2::{digest::FixedOutput, Digest};
///Raw asset data
pub type Asset = Vec<u8>;
pub type Randomness = [u8; 32];
#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)]
///Container for raw utxo payload
pub struct UTXO {
pub hash: TreeHashType,
pub owner: AccountId,
pub nullifier: Option<UTXONullifier>,
pub asset: Asset,
// TODO: change to u256
pub amount: u128,
pub privacy_flag: bool,
pub randomness: Randomness,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
@ -26,36 +28,47 @@ pub struct UTXOPayload {
// TODO: change to u256
pub amount: u128,
pub privacy_flag: bool,
pub randomness: Randomness,
}
impl UTXOPayload {
fn to_bytes(&self) -> Vec<u8> {
let mut result = Vec::new();
result.extend_from_slice(&self.owner);
result.extend_from_slice(&self.asset);
result.extend_from_slice(&self.amount.to_be_bytes());
result.push(self.privacy_flag as u8);
result.extend_from_slice(&self.randomness);
result
}
}
impl UTXO {
pub fn create_utxo_from_payload(payload_with_asset: UTXOPayload) -> anyhow::Result<Self> {
let raw_payload = serde_json::to_vec(&payload_with_asset)?;
pub fn new(owner: AccountId, asset: Asset, amount: u128, privacy_flag: bool) -> Self {
let mut randomness = Randomness::default();
OsRng.fill_bytes(&mut randomness);
let payload = UTXOPayload {
owner,
asset,
amount,
privacy_flag,
randomness,
};
Self::create_utxo_from_payload(payload)
}
pub fn create_utxo_from_payload(payload_with_asset: UTXOPayload) -> Self {
let mut hasher = sha2::Sha256::new();
hasher.update(&raw_payload);
hasher.update(&payload_with_asset.to_bytes());
let hash = <TreeHashType>::from(hasher.finalize_fixed());
Ok(Self {
Self {
hash,
owner: payload_with_asset.owner,
nullifier: None,
asset: payload_with_asset.asset,
amount: payload_with_asset.amount,
privacy_flag: payload_with_asset.privacy_flag,
})
}
pub fn consume_utxo(&mut self, nullifier: UTXONullifier) -> Result<()> {
if self.nullifier.is_some() {
anyhow::bail!("UTXO already consumed");
} else {
self.nullifier = Some(nullifier);
randomness: payload_with_asset.randomness,
}
Ok(())
}
pub fn interpret_asset<'de, ToInterpret: Deserialize<'de>>(&'de self) -> Result<ToInterpret> {
@ -68,16 +81,13 @@ impl UTXO {
asset: self.asset.clone(),
amount: self.amount,
privacy_flag: self.privacy_flag,
randomness: self.randomness,
}
}
pub fn log(&self) {
info!("UTXO hash is {:?}", hex::encode(self.hash));
info!("UTXO owner is {:?}", hex::encode(self.owner));
info!(
"UTXO nullifier is {:?}",
self.nullifier.clone().map(|val| hex::encode(val.utxo_hash))
);
info!("UTXO asset is {:?}", hex::encode(self.asset.clone()));
info!("UTXO amount is {:?}", self.amount);
info!("UTXO privacy_flag is {:?}", self.privacy_flag);
@ -98,14 +108,6 @@ mod tests {
AccountId::default()
}
fn sample_nullifier() -> UTXONullifier {
UTXONullifier::default()
}
fn sample_tree_hash() -> TreeHashType {
TreeHashType::default()
}
fn sample_payload() -> UTXOPayload {
UTXOPayload {
owner: sample_account(),
@ -116,40 +118,24 @@ mod tests {
.unwrap(),
amount: 10,
privacy_flag: false,
randomness: Randomness::default(),
}
}
#[test]
fn test_create_utxo_from_payload() {
let payload = sample_payload();
let utxo = UTXO::create_utxo_from_payload(payload.clone()).unwrap();
let utxo = UTXO::create_utxo_from_payload(payload.clone());
// Ensure hash is created and the UTXO fields are correctly assigned
assert_eq!(utxo.owner, payload.owner);
assert_eq!(utxo.asset, payload.asset);
assert!(utxo.nullifier.is_none());
}
#[test]
fn test_consume_utxo() {
let payload = sample_payload();
let mut utxo = UTXO::create_utxo_from_payload(payload).unwrap();
let nullifier = sample_nullifier();
// First consumption should succeed
assert!(utxo.consume_utxo(nullifier.clone()).is_ok());
assert_eq!(utxo.nullifier, Some(nullifier));
// Second consumption should fail
let result = utxo.consume_utxo(sample_nullifier());
assert!(result.is_err());
}
#[test]
fn test_interpret_asset() {
let payload = sample_payload();
let utxo = UTXO::create_utxo_from_payload(payload).unwrap();
let utxo = UTXO::create_utxo_from_payload(payload);
// Interpret asset as TestAsset
let interpreted: TestAsset = utxo.interpret_asset().unwrap();
@ -167,7 +153,7 @@ mod tests {
fn test_interpret_invalid_asset() {
let mut payload = sample_payload();
payload.asset = vec![0, 1, 2, 3]; // Invalid data for deserialization
let utxo = UTXO::create_utxo_from_payload(payload).unwrap();
let utxo = UTXO::create_utxo_from_payload(payload);
// This should fail because the asset is not valid JSON for TestAsset
let result: Result<TestAsset> = utxo.interpret_asset();

View File

@ -1,192 +0,0 @@
use std::collections::HashMap;
use common::merkle_tree_public::TreeHashType;
use monotree::database::MemoryDB;
use monotree::hasher::Blake3;
use monotree::{Hasher, Monotree, Proof};
use crate::utxo_core::UTXO;
pub struct UTXOSparseMerkleTree {
pub curr_root: Option<TreeHashType>,
pub tree: Monotree<MemoryDB, Blake3>,
pub hasher: Blake3,
pub store: HashMap<TreeHashType, UTXO>,
}
impl UTXOSparseMerkleTree {
pub fn new() -> Self {
UTXOSparseMerkleTree {
curr_root: None,
tree: Monotree::default(),
hasher: Blake3::new(),
store: HashMap::new(),
}
}
pub fn insert_item(&mut self, utxo: UTXO) -> Result<(), monotree::Errors> {
let root = self.curr_root.as_ref();
let new_root = self.tree.insert(root, &utxo.hash, &utxo.hash)?;
self.store.insert(utxo.hash, utxo);
self.curr_root = new_root;
Ok(())
}
pub fn insert_items(&mut self, utxos: Vec<UTXO>) -> Result<(), monotree::Errors> {
let root = self.curr_root.as_ref();
let hashes: Vec<TreeHashType> = utxos.iter().map(|item| item.hash).collect();
let new_root = self.tree.inserts(root, &hashes, &hashes)?;
for utxo in utxos {
self.store.insert(utxo.hash, utxo);
}
self.curr_root = new_root;
Ok(())
}
pub fn get_item(&mut self, hash: TreeHashType) -> Result<Option<&UTXO>, monotree::Errors> {
let hash = self.tree.get(self.curr_root.as_ref(), &hash)?;
Ok(hash.and_then(|hash| self.store.get(&hash)))
}
pub fn get_membership_proof(
&mut self,
nullifier_hash: TreeHashType,
) -> Result<Option<Proof>, monotree::Errors> {
self.tree
.get_merkle_proof(self.curr_root.as_ref(), &nullifier_hash)
}
}
impl Default for UTXOSparseMerkleTree {
fn default() -> Self {
Self::new()
}
}
#[cfg(test)]
mod tests {
use common::AccountId;
use super::*;
use crate::utxo_core::{UTXOPayload, UTXO};
fn sample_utxo_payload() -> UTXOPayload {
UTXOPayload {
owner: AccountId::default(),
asset: vec![1, 2, 3],
amount: 10,
privacy_flag: false,
}
}
fn sample_utxo() -> anyhow::Result<UTXO> {
UTXO::create_utxo_from_payload(sample_utxo_payload())
}
#[test]
fn test_utxo_sparse_merkle_tree_new() {
let smt = UTXOSparseMerkleTree::new();
assert!(smt.curr_root.is_none());
assert_eq!(smt.store.len(), 0);
}
#[test]
fn test_insert_item() {
let mut smt = UTXOSparseMerkleTree::new();
let utxo = sample_utxo().unwrap();
let result = smt.insert_item(utxo.clone());
// Test insertion is successful
assert!(result.is_ok());
// Test UTXO is now stored in the tree
assert_eq!(smt.store.get(&utxo.hash).unwrap().hash, utxo.hash);
// Test curr_root is updated
assert!(smt.curr_root.is_some());
}
#[test]
fn test_insert_items() {
let mut smt = UTXOSparseMerkleTree::new();
let utxo1 = sample_utxo().unwrap();
let utxo2 = sample_utxo().unwrap();
let result = smt.insert_items(vec![utxo1.clone(), utxo2.clone()]);
// Test insertion of multiple items is successful
assert!(result.is_ok());
// Test UTXOs are now stored in the tree
assert!(smt.store.get(&utxo1.hash).is_some());
assert!(smt.store.get(&utxo2.hash).is_some());
// Test curr_root is updated
assert!(smt.curr_root.is_some());
}
#[test]
fn test_get_item_exists() {
let mut smt = UTXOSparseMerkleTree::new();
let utxo = sample_utxo().unwrap();
smt.insert_item(utxo.clone()).unwrap();
// Test that the UTXO can be retrieved by hash
let retrieved_utxo = smt.get_item(utxo.hash).unwrap();
assert!(retrieved_utxo.is_some());
assert_eq!(retrieved_utxo.unwrap().hash, utxo.hash);
}
#[test]
fn test_get_item_not_exists() {
let mut smt = UTXOSparseMerkleTree::new();
let utxo = sample_utxo().unwrap();
// Insert one UTXO and try to fetch a different hash
smt.insert_item(utxo).unwrap();
let non_existent_hash = TreeHashType::default();
let result = smt.get_item(non_existent_hash).unwrap();
// Test that retrieval for a non-existent UTXO returns None
assert!(result.is_none());
}
#[test]
fn test_get_membership_proof() {
let mut smt = UTXOSparseMerkleTree::new();
let utxo = sample_utxo().unwrap();
smt.insert_item(utxo.clone()).unwrap();
// Fetch membership proof for the inserted UTXO
let proof = smt.get_membership_proof(utxo.hash).unwrap();
// Test proof is generated successfully
assert!(proof.is_some());
}
#[test]
fn test_get_membership_proof_not_exists() {
let mut smt = UTXOSparseMerkleTree::new();
// Try fetching proof for a non-existent UTXO hash
let non_existent_hash = TreeHashType::default();
let proof = smt.get_membership_proof(non_existent_hash).unwrap();
// Test no proof is generated for a non-existent UTXO
assert!(proof.is_none());
}
}

View File

@ -10,8 +10,9 @@ env_logger.workspace = true
log.workspace = true
serde.workspace = true
thiserror.workspace = true
rand.workspace = true
risc0-zkvm = { git = "https://github.com/risc0/risc0.git", branch = "release-2.0" }
risc0-zkvm = { git = "https://github.com/risc0/risc0.git", branch = "release-2.1" }
test-methods = { path = "test_methods" }
[dependencies.accounts]

View File

@ -1,8 +1,9 @@
use accounts::account_core::AccountAddress;
use common::ExecutionFailureKind;
use rand::{rngs::OsRng, RngCore};
use risc0_zkvm::{default_executor, default_prover, sha::Digest, ExecutorEnv, Receipt};
use serde::Serialize;
use utxo::utxo_core::{UTXOPayload, UTXO};
use utxo::utxo_core::{Randomness, UTXOPayload, UTXO};
pub mod gas_calculator;
@ -43,6 +44,12 @@ pub fn prove_mint_utxo(
.write(&owner)
.map_err(ExecutionFailureKind::write_error)?;
let mut randomness = Randomness::default();
OsRng.fill_bytes(&mut randomness);
builder
.write(&randomness)
.map_err(ExecutionFailureKind::write_error)?;
let env = builder
.build()
.map_err(ExecutionFailureKind::builder_error)?;
@ -56,10 +63,7 @@ pub fn prove_mint_utxo(
let digest: UTXOPayload = receipt.journal.decode()?;
Ok((
UTXO::create_utxo_from_payload(digest).map_err(ExecutionFailureKind::write_error)?,
receipt,
))
Ok((UTXO::create_utxo_from_payload(digest), receipt))
}
pub fn prove_send_utxo(
@ -78,8 +82,18 @@ pub fn prove_send_utxo(
builder
.write(&utxo_payload)
.map_err(ExecutionFailureKind::write_error)?;
let owners_parts_with_randomness = owners_parts
.into_iter()
.map(|(amount, addr)| {
let mut randomness = Randomness::default();
OsRng.fill_bytes(&mut randomness);
(amount, addr, randomness)
})
.collect::<Vec<_>>();
builder
.write(&owners_parts)
.write(&owners_parts_with_randomness)
.map_err(ExecutionFailureKind::write_error)?;
let env = builder
@ -98,9 +112,8 @@ pub fn prove_send_utxo(
Ok((
digest
.into_iter()
.map(|(payload, addr)| (UTXO::create_utxo_from_payload(payload).map(|sel| (sel, addr))))
.collect::<anyhow::Result<Vec<(UTXO, [u8; 32])>>>()
.map_err(ExecutionFailureKind::write_error)?,
.map(|(payload, addr)| (UTXO::create_utxo_from_payload(payload), addr))
.collect(),
receipt,
))
}
@ -148,14 +161,12 @@ pub fn prove_send_utxo_multiple_assets_one_receiver(
.0
.into_iter()
.map(|payload| UTXO::create_utxo_from_payload(payload))
.collect::<anyhow::Result<Vec<UTXO>>>()
.map_err(ExecutionFailureKind::write_error)?,
.collect(),
digest
.1
.into_iter()
.map(|payload| UTXO::create_utxo_from_payload(payload))
.collect::<anyhow::Result<Vec<UTXO>>>()
.map_err(ExecutionFailureKind::write_error)?,
.collect(),
receipt,
))
}
@ -171,13 +182,7 @@ pub fn prove_send_utxo_shielded(
return Err(ExecutionFailureKind::AmountMismatchError);
}
let temp_utxo_to_spend = UTXO::create_utxo_from_payload(UTXOPayload {
owner,
asset: vec![],
amount,
privacy_flag: true,
})
.map_err(ExecutionFailureKind::write_error)?;
let temp_utxo_to_spend = UTXO::new(owner, vec![], amount, true);
let utxo_payload = temp_utxo_to_spend.into_payload();
let mut builder = ExecutorEnv::builder();
@ -185,8 +190,18 @@ pub fn prove_send_utxo_shielded(
builder
.write(&utxo_payload)
.map_err(ExecutionFailureKind::write_error)?;
let owners_parts_with_randomness = owners_parts
.into_iter()
.map(|(amount, addr)| {
let mut randomness = Randomness::default();
OsRng.fill_bytes(&mut randomness);
(amount, addr, randomness)
})
.collect::<Vec<_>>();
builder
.write(&owners_parts)
.write(&owners_parts_with_randomness)
.map_err(ExecutionFailureKind::write_error)?;
let env = builder
@ -205,9 +220,8 @@ pub fn prove_send_utxo_shielded(
Ok((
digest
.into_iter()
.map(|(payload, addr)| (UTXO::create_utxo_from_payload(payload).map(|sel| (sel, addr))))
.collect::<anyhow::Result<Vec<(UTXO, [u8; 32])>>>()
.map_err(ExecutionFailureKind::write_error)?,
.map(|(payload, addr)| (UTXO::create_utxo_from_payload(payload), addr))
.collect(),
receipt,
))
}
@ -228,8 +242,18 @@ pub fn prove_send_utxo_deshielded(
builder
.write(&utxo_payload)
.map_err(ExecutionFailureKind::write_error)?;
let owners_parts_with_randomness = owners_parts
.into_iter()
.map(|(amount, addr)| {
let mut randomness = Randomness::default();
OsRng.fill_bytes(&mut randomness);
(amount, addr, randomness)
})
.collect::<Vec<_>>();
builder
.write(&owners_parts)
.write(&owners_parts_with_randomness)
.map_err(ExecutionFailureKind::write_error)?;
let env = builder
@ -288,8 +312,7 @@ pub fn prove_mint_utxo_multiple_assets(
digest
.into_iter()
.map(UTXO::create_utxo_from_payload)
.collect::<anyhow::Result<Vec<UTXO>>>()
.map_err(ExecutionFailureKind::write_error)?,
.collect(),
receipt,
))
}
@ -308,7 +331,7 @@ pub fn execute_mint_utxo(amount_to_mint: u128, owner: AccountAddress) -> anyhow:
let digest: UTXOPayload = receipt.journal.decode()?;
UTXO::create_utxo_from_payload(digest)
Ok(UTXO::create_utxo_from_payload(digest))
}
pub fn execute_send_utxo(
@ -320,7 +343,16 @@ pub fn execute_send_utxo(
let utxo_payload = spent_utxo.into_payload();
builder.write(&utxo_payload)?;
builder.write(&owners_parts)?;
let owners_parts_with_randomness = owners_parts
.into_iter()
.map(|(amount, addr)| {
let mut randomness = Randomness::default();
OsRng.fill_bytes(&mut randomness);
(amount, addr, randomness)
})
.collect::<Vec<_>>();
builder.write(&owners_parts_with_randomness)?;
let env = builder.build()?;
@ -331,13 +363,12 @@ pub fn execute_send_utxo(
let digest: (UTXOPayload, Vec<(UTXOPayload, AccountAddress)>) = receipt.journal.decode()?;
Ok((
UTXO::create_utxo_from_payload(digest.0)?,
UTXO::create_utxo_from_payload(digest.0),
digest
.1
.into_iter()
.map(|(payload, addr)| (UTXO::create_utxo_from_payload(payload).map(|sel| (sel, addr))))
.collect::<anyhow::Result<Vec<(UTXO, [u8; 32])>>>()
.map_err(ExecutionFailureKind::write_error)?,
.map(|(payload, addr)| (UTXO::create_utxo_from_payload(payload), addr))
.collect(),
))
}

View File

@ -4,7 +4,7 @@ version = "0.1.0"
edition = "2021"
[build-dependencies]
risc0-build = { git = "https://github.com/risc0/risc0.git", branch = "release-2.0" }
risc0-build = { git = "https://github.com/risc0/risc0.git", branch = "release-2.1" }
[package.metadata.risc0]
methods = ["guest"]

View File

@ -12,17 +12,20 @@ pub struct UTXOPayload {
// TODO: change to u256
pub amount: u128,
pub privacy_flag: bool,
pub randomness: [u8; 32],
}
fn main() {
let amount_to_mint: u128 = env::read();
let owner: AccountAddr = env::read();
let randomness: [u8; 32] = env::read();
let payload = UTXOPayload {
owner,
asset: vec![],
amount: amount_to_mint,
privacy_flag: true,
randomness,
};
env::commit(&(payload));

View File

@ -12,12 +12,14 @@ pub struct UTXOPayload {
// TODO: change to u256
pub amount: u128,
pub privacy_flag: bool,
pub randomness: [u8; 32],
}
fn main() {
let amount_to_mint: u128 = env::read();
let number_of_assets: usize = env::read();
let owner: AccountAddr = env::read();
let randomness: [u8; 32] = env::read();
let mut asseted_utxos = vec![];
@ -27,6 +29,7 @@ fn main() {
asset: vec![i as u8],
amount: amount_to_mint,
privacy_flag: true,
randomness
};
asseted_utxos.push(payload);

View File

@ -12,18 +12,20 @@ pub struct UTXOPayload {
// TODO: change to u256
pub amount: u128,
pub privacy_flag: bool,
pub randomness: [u8; 32],
}
fn main() {
let utxo_spent: UTXOPayload = env::read();
let owners_parts: Vec<(u128, AccountAddr)> = env::read();
let owners_parts: Vec<(u128, AccountAddr, [u8; 32])> = env::read();
let res: Vec<(UTXOPayload, AccountAddr)> = owners_parts.into_iter().map(|(amount, addr)| (
let res: Vec<(UTXOPayload, AccountAddr)> = owners_parts.into_iter().map(|(amount, addr, randomness)| (
UTXOPayload {
owner: addr.clone(),
asset: vec![],
amount,
privacy_flag: true,
randomness,
},
addr
)).collect();

View File

@ -12,6 +12,7 @@ pub struct UTXOPayload {
// TODO: change to u256
pub amount: u128,
pub privacy_flag: bool,
pub randomness: [u8; 32],
}
fn main() {