Merge branch 'main' into schouhy/wallet-minor-improvements

This commit is contained in:
Sergio Chouhy 2025-10-06 16:07:22 -03:00
commit 4bb8bc94ff
18 changed files with 183 additions and 582 deletions

View File

@ -41,9 +41,10 @@ ark-bn254 = "0.5.0"
ark-ff = "0.5.0"
tiny-keccak = { version = "2.0.2", features = ["keccak"] }
base64 = "0.22.1"
chrono = "0.4.41"
bip39 = "2.2.0"
hmac-sha512 = "1.1.7"
chrono = "0.4.41"
borsh = "1.5.7"
rocksdb = { version = "0.21.0", default-features = false, features = [
"snappy",

View File

@ -17,6 +17,7 @@ log.workspace = true
elliptic-curve.workspace = true
hex.workspace = true
nssa-core = { path = "../nssa/core", features = ["host"] }
borsh.workspace = true
[dependencies.nssa]
path = "../nssa"

View File

@ -1,5 +1,5 @@
use borsh::{BorshDeserialize, BorshSerialize};
use rs_merkle::Hasher;
use std::io::{Cursor, Read};
use crate::{OwnHasher, transaction::EncodedTransaction};
@ -27,7 +27,7 @@ pub struct Block {
pub body: BlockBody,
}
#[derive(Debug, PartialEq, Eq)]
#[derive(Debug, PartialEq, Eq, BorshSerialize, BorshDeserialize)]
pub struct HashableBlockData {
pub block_id: BlockId,
pub prev_block_hash: BlockHash,
@ -37,7 +37,7 @@ pub struct HashableBlockData {
impl HashableBlockData {
pub fn into_block(self, signing_key: &nssa::PrivateKey) -> Block {
let data_bytes = self.to_bytes();
let data_bytes = borsh::to_vec(&self).unwrap();
let signature = nssa::Signature::new(signing_key, &data_bytes);
let hash = OwnHasher::hash(&data_bytes);
Block {
@ -66,75 +66,6 @@ impl From<Block> for HashableBlockData {
}
}
impl HashableBlockData {
pub fn to_bytes(&self) -> Vec<u8> {
let mut bytes = Vec::new();
bytes.extend_from_slice(&self.block_id.to_le_bytes());
bytes.extend_from_slice(&self.prev_block_hash);
bytes.extend_from_slice(&self.timestamp.to_le_bytes());
let num_transactions: u32 = self.transactions.len() as u32;
bytes.extend_from_slice(&num_transactions.to_le_bytes());
for tx in &self.transactions {
let transaction_bytes = tx.to_bytes();
let num_transaction_bytes: u32 = transaction_bytes.len() as u32;
bytes.extend_from_slice(&num_transaction_bytes.to_le_bytes());
bytes.extend_from_slice(&tx.to_bytes());
}
bytes
}
// TODO: Improve error handling. Remove unwraps.
pub fn from_bytes(data: &[u8]) -> Self {
let mut cursor = Cursor::new(data);
let block_id = u64_from_cursor(&mut cursor);
let mut prev_block_hash = [0u8; 32];
cursor.read_exact(&mut prev_block_hash).unwrap();
let timestamp = u64_from_cursor(&mut cursor);
let num_transactions = u32_from_cursor(&mut cursor) as usize;
let mut transactions = Vec::with_capacity(num_transactions);
for _ in 0..num_transactions {
let tx_len = u32_from_cursor(&mut cursor) as usize;
let mut tx_bytes = Vec::with_capacity(tx_len);
for _ in 0..tx_len {
let mut buff = [0; 1];
cursor.read_exact(&mut buff).unwrap();
tx_bytes.push(buff[0]);
}
let tx = EncodedTransaction::from_bytes(tx_bytes);
transactions.push(tx);
}
Self {
block_id,
prev_block_hash,
timestamp,
transactions,
}
}
}
// TODO: Improve error handling. Remove unwraps.
pub fn u32_from_cursor(cursor: &mut Cursor<&[u8]>) -> u32 {
let mut word_buf = [0u8; 4];
cursor.read_exact(&mut word_buf).unwrap();
u32::from_le_bytes(word_buf)
}
// TODO: Improve error handling. Remove unwraps.
pub fn u64_from_cursor(cursor: &mut Cursor<&[u8]>) -> u64 {
let mut word_buf = [0u8; 8];
cursor.read_exact(&mut word_buf).unwrap();
u64::from_le_bytes(word_buf)
}
#[cfg(test)]
mod tests {
use crate::{block::HashableBlockData, test_utils};
@ -144,8 +75,8 @@ mod tests {
let transactions = vec![test_utils::produce_dummy_empty_transaction()];
let block = test_utils::produce_dummy_block(1, Some([1; 32]), transactions);
let hashable = HashableBlockData::from(block);
let bytes = hashable.to_bytes();
let block_from_bytes = HashableBlockData::from_bytes(&bytes);
let bytes = borsh::to_vec(&hashable).unwrap();
let block_from_bytes = borsh::from_slice::<HashableBlockData>(&bytes).unwrap();
assert_eq!(hashable, block_from_bytes);
}
}

View File

@ -151,7 +151,7 @@ impl SequencerClient {
let transaction = EncodedTransaction::from(NSSATransaction::Public(transaction));
let tx_req = SendTxRequest {
transaction: transaction.to_bytes(),
transaction: borsh::to_vec(&transaction).unwrap(),
};
let req = serde_json::to_value(tx_req)?;
@ -171,7 +171,7 @@ impl SequencerClient {
let transaction = EncodedTransaction::from(NSSATransaction::PrivacyPreserving(transaction));
let tx_req = SendTxRequest {
transaction: transaction.to_bytes(),
transaction: borsh::to_vec(&transaction).unwrap(),
};
let req = serde_json::to_value(tx_req)?;

View File

@ -1,3 +1,4 @@
use borsh::{BorshDeserialize, BorshSerialize};
use k256::ecdsa::{Signature, SigningKey, VerifyingKey};
use log::info;
use serde::{Deserialize, Serialize};
@ -34,13 +35,15 @@ pub type CipherText = Vec<u8>;
pub type Nonce = GenericArray<u8, UInt<UInt<UInt<UInt<UTerm, B1>, B1>, B0>, B0>>;
pub type Tag = u8;
#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq)]
#[derive(
Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, BorshSerialize, BorshDeserialize,
)]
pub enum TxKind {
Public,
PrivacyPreserving,
}
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize)]
///General transaction object
pub struct EncodedTransaction {
pub tx_kind: TxKind,
@ -174,23 +177,12 @@ impl ActionData {
impl EncodedTransaction {
/// Computes and returns the SHA-256 hash of the JSON-serialized representation of `self`.
pub fn hash(&self) -> TreeHashType {
let bytes_to_hash = self.to_bytes();
let bytes_to_hash = borsh::to_vec(&self).unwrap();
let mut hasher = sha2::Sha256::new();
hasher.update(&bytes_to_hash);
TreeHashType::from(hasher.finalize_fixed())
}
pub fn to_bytes(&self) -> Vec<u8> {
// TODO: Remove `unwrap` by implementing a `to_bytes` method
// that deterministically encodes all transaction fields to bytes
// and guarantees serialization will succeed.
serde_json::to_vec(&self).unwrap()
}
pub fn from_bytes(bytes: Vec<u8>) -> Self {
serde_json::from_slice(&bytes).unwrap()
}
pub fn log(&self) {
info!("Transaction hash is {:?}", hex::encode(self.hash()));
info!("Transaction tx_kind is {:?}", self.tx_kind);
@ -221,7 +213,7 @@ mod tests {
fn test_transaction_hash_is_sha256_of_json_bytes() {
let body = test_transaction_body();
let expected_hash = {
let data = body.to_bytes();
let data = borsh::to_vec(&body).unwrap();
let mut hasher = sha2::Sha256::new();
hasher.update(&data);
TreeHashType::from(hasher.finalize_fixed())
@ -236,8 +228,8 @@ mod tests {
fn test_to_bytes_from_bytes() {
let body = test_transaction_body();
let body_bytes = body.to_bytes();
let body_new = EncodedTransaction::from_bytes(body_bytes);
let body_bytes = borsh::to_vec(&body).unwrap();
let body_new = borsh::from_slice::<EncodedTransaction>(&body_bytes).unwrap();
assert_eq!(body, body_new);
}

View File

@ -13,6 +13,7 @@ base64.workspace = true
tokio.workspace = true
hex.workspace = true
tempfile.workspace = true
borsh.workspace = true
nssa-core = { path = "../nssa/core", features = ["host"] }

View File

@ -949,7 +949,11 @@ async fn fetch_privacy_preserving_tx(
.unwrap();
let tx_base64_decode = BASE64.decode(transaction_encoded).unwrap();
match NSSATransaction::try_from(&EncodedTransaction::from_bytes(tx_base64_decode)).unwrap() {
match NSSATransaction::try_from(
&borsh::from_slice::<EncodedTransaction>(&tx_base64_decode).unwrap(),
)
.unwrap()
{
NSSATransaction::PrivacyPreserving(privacy_preserving_transaction) => {
privacy_preserving_transaction
}

View File

@ -5,7 +5,6 @@ edition = "2024"
[dependencies]
anyhow.workspace = true
serde_json.workspace = true
log.workspace = true
serde.workspace = true
k256.workspace = true

View File

@ -1,4 +1,3 @@
use log::info;
use nssa_core::{
NullifierPublicKey, SharedSecretKey,
encryption::{EphemeralPublicKey, EphemeralSecretKey, IncomingViewingPublicKey},
@ -49,11 +48,4 @@ impl EphemeralKeyHolder {
receiver_incoming_viewing_public_key,
)
}
pub fn log(&self) {
info!(
"Ephemeral private key is {:?}",
hex::encode(serde_json::to_vec(&self.ephemeral_secret_key).unwrap())
);
}
}

View File

@ -1,4 +1,3 @@
use log::info;
use nssa_core::{
NullifierPublicKey, SharedSecretKey,
encryption::{EphemeralPublicKey, IncomingViewingPublicKey},
@ -51,43 +50,13 @@ impl KeyChain {
&ephemeral_public_key_sender,
)
}
pub fn log(&self) {
info!(
"Secret spending key is {:?}",
hex::encode(serde_json::to_vec(&self.secret_spending_key).unwrap()),
);
info!(
"Nulifier secret key is {:?}",
hex::encode(serde_json::to_vec(&self.private_key_holder.nullifier_secret_key).unwrap()),
);
info!(
"Viewing secret key is {:?}",
hex::encode(
serde_json::to_vec(&self.private_key_holder.incoming_viewing_secret_key).unwrap()
),
);
info!(
"Viewing secret key is {:?}",
hex::encode(
serde_json::to_vec(&self.private_key_holder.outgoing_viewing_secret_key).unwrap()
),
);
info!(
"Nullifier public key is {:?}",
hex::encode(serde_json::to_vec(&self.nullifer_public_key).unwrap()),
);
info!(
"Viewing public key is {:?}",
hex::encode(serde_json::to_vec(&self.incoming_viewing_public_key).unwrap()),
);
}
}
#[cfg(test)]
mod tests {
use aes_gcm::aead::OsRng;
use k256::AffinePoint;
use k256::elliptic_curve::group::GroupEncoding;
use rand::RngCore;
use super::*;
@ -136,7 +105,7 @@ mod tests {
println!(
"Group generator {:?}",
hex::encode(serde_json::to_vec(&AffinePoint::GENERATOR).unwrap())
hex::encode(AffinePoint::GENERATOR.to_bytes())
);
println!();
@ -153,11 +122,11 @@ mod tests {
println!("Address{:?}", hex::encode(address.value()));
println!(
"Nulifier public key {:?}",
hex::encode(serde_json::to_vec(&nullifer_public_key).unwrap())
hex::encode(nullifer_public_key.to_byte_array())
);
println!(
"Viewing public key {:?}",
hex::encode(serde_json::to_vec(&viewing_public_key).unwrap())
hex::encode(viewing_public_key.to_bytes())
);
}
}

View File

@ -16,6 +16,7 @@ base64.workspace = true
actix-web.workspace = true
tokio.workspace = true
borsh.workspace = true
[dependencies.sequencer_core]
path = "../sequencer_core"

View File

@ -77,7 +77,7 @@ impl JsonHandler {
async fn process_send_tx(&self, request: Request) -> Result<Value, RpcErr> {
let send_tx_req = SendTxRequest::parse(Some(request.params))?;
let tx = EncodedTransaction::from_bytes(send_tx_req.transaction);
let tx = borsh::from_slice::<EncodedTransaction>(&send_tx_req.transaction).unwrap();
let tx_hash = hex::encode(tx.hash());
{
@ -107,7 +107,7 @@ impl JsonHandler {
};
let helperstruct = GetBlockDataResponse {
block: HashableBlockData::from(block).to_bytes(),
block: borsh::to_vec(&HashableBlockData::from(block)).unwrap(),
};
respond(helperstruct)
@ -243,7 +243,7 @@ impl JsonHandler {
.store
.block_store
.get_transaction_by_hash(hash)
.map(|tx| tx.to_bytes())
.map(|tx| borsh::to_vec(&tx).unwrap())
};
let base64_encoded = transaction.map(|tx| general_purpose::STANDARD.encode(tx));
let helperstruct = GetTransactionByHashResponse {
@ -644,7 +644,7 @@ mod tests {
async fn test_get_transaction_by_hash_for_existing_transaction() {
let (json_handler, _, tx) = components_for_tests();
let tx_hash_hex = hex::encode(tx.hash());
let expected_base64_encoded = general_purpose::STANDARD.encode(tx.to_bytes());
let expected_base64_encoded = general_purpose::STANDARD.encode(borsh::to_vec(&tx).unwrap());
let request = serde_json::json!({
"jsonrpc": "2.0",

View File

@ -5,9 +5,8 @@ edition = "2024"
[dependencies]
anyhow.workspace = true
serde_json.workspace = true
serde.workspace = true
thiserror.workspace = true
borsh.workspace = true
rocksdb.workspace = true

View File

@ -7,7 +7,7 @@ pub enum DbError {
},
#[error("Serialization error")]
SerializationError {
error: serde_json::Error,
error: borsh::io::Error,
additional_info: Option<String>,
},
#[error("Logic Error")]
@ -22,9 +22,9 @@ impl DbError {
}
}
pub fn serde_cast_message(serr: serde_json::Error, message: Option<String>) -> Self {
pub fn borsh_cast_message(berr: borsh::io::Error, message: Option<String>) -> Self {
Self::SerializationError {
error: serr,
error: berr,
additional_info: message,
}
}

View File

@ -5,10 +5,8 @@ use error::DbError;
use rocksdb::{
BoundColumnFamily, ColumnFamilyDescriptor, DBWithThreadMode, MultiThreaded, Options,
};
use sc_db_utils::{DataBlob, DataBlobChangeVariant, produce_blob_from_fit_vec};
pub mod error;
pub mod sc_db_utils;
///Maximal size of stored blocks in base
///
@ -22,9 +20,6 @@ pub const BUFF_SIZE_ROCKSDB: usize = usize::MAX;
///Keeping small to not run out of memory
pub const CACHE_SIZE: usize = 1000;
///Size in bytes of a singular smart contract data blob, stored in db
pub const SC_DATA_BLOB_SIZE: usize = 256;
///Key base for storing metainformation about id of first block in db
pub const DB_META_FIRST_BLOCK_IN_DB_KEY: &str = "first_block_in_db";
///Key base for storing metainformation about id of last current block in db
@ -36,14 +31,6 @@ pub const DB_META_SC_LIST: &str = "sc_list";
///Key base for storing snapshot which describe block id
pub const DB_SNAPSHOT_BLOCK_ID_KEY: &str = "block_id";
///Key base for storing snapshot which describe commitment
pub const DB_SNAPSHOT_COMMITMENT_KEY: &str = "commitment";
///Key base for storing snapshot which describe transaction
pub const DB_SNAPSHOT_TRANSACTION_KEY: &str = "transaction";
///Key base for storing snapshot which describe nullifier
pub const DB_SNAPSHOT_NULLIFIER_KEY: &str = "nullifier";
///Key base for storing snapshot which describe account
pub const DB_SNAPSHOT_ACCOUNT_KEY: &str = "account";
///Name of block column family
pub const CF_BLOCK_NAME: &str = "cf_block";
@ -54,9 +41,6 @@ pub const CF_SC_NAME: &str = "cf_sc";
///Name of snapshot column family
pub const CF_SNAPSHOT_NAME: &str = "cf_snapshot";
///Suffix, used to mark field, which contain length of smart contract
pub const SC_LEN_SUFFIX: &str = "sc_len";
pub type DbResult<T> = Result<T, DbError>;
pub struct RocksDBIO {
@ -142,11 +126,24 @@ impl RocksDBIO {
let cf_meta = self.meta_column();
let res = self
.db
.get_cf(&cf_meta, DB_META_FIRST_BLOCK_IN_DB_KEY)
.get_cf(
&cf_meta,
borsh::to_vec(&DB_META_FIRST_BLOCK_IN_DB_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_FIRST_BLOCK_IN_DB_KEY".to_string()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(u64::from_be_bytes(data.try_into().unwrap()))
Ok(borsh::from_slice::<u64>(&data).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to deserialize first block".to_string()),
)
})?)
} else {
Err(DbError::db_interaction_error(
"First block not found".to_string(),
@ -158,11 +155,24 @@ impl RocksDBIO {
let cf_meta = self.meta_column();
let res = self
.db
.get_cf(&cf_meta, DB_META_LAST_BLOCK_IN_DB_KEY)
.get_cf(
&cf_meta,
borsh::to_vec(&DB_META_LAST_BLOCK_IN_DB_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_LAST_BLOCK_IN_DB_KEY".to_string()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(u64::from_be_bytes(data.try_into().unwrap()))
Ok(borsh::from_slice::<u64>(&data).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to deserialize last block".to_string()),
)
})?)
} else {
Err(DbError::db_interaction_error(
"Last block not found".to_string(),
@ -174,7 +184,15 @@ impl RocksDBIO {
let cf_meta = self.meta_column();
let res = self
.db
.get_cf(&cf_meta, DB_META_FIRST_BLOCK_SET_KEY)
.get_cf(
&cf_meta,
borsh::to_vec(&DB_META_FIRST_BLOCK_SET_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_FIRST_BLOCK_SET_KEY".to_string()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
Ok(res.is_some())
@ -185,8 +203,18 @@ impl RocksDBIO {
self.db
.put_cf(
&cf_meta,
DB_META_FIRST_BLOCK_IN_DB_KEY.as_bytes(),
block.header.block_id.to_be_bytes(),
borsh::to_vec(&DB_META_FIRST_BLOCK_IN_DB_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_FIRST_BLOCK_IN_DB_KEY".to_string()),
)
})?,
borsh::to_vec(&block.header.block_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize first block id".to_string()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
@ -199,8 +227,18 @@ impl RocksDBIO {
self.db
.put_cf(
&cf_meta,
DB_META_LAST_BLOCK_IN_DB_KEY.as_bytes(),
block_id.to_be_bytes(),
borsh::to_vec(&DB_META_LAST_BLOCK_IN_DB_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_LAST_BLOCK_IN_DB_KEY".to_string()),
)
})?,
borsh::to_vec(&block_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize last block id".to_string()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
Ok(())
@ -212,8 +250,18 @@ impl RocksDBIO {
self.db
.put_cf(
&cf_meta,
DB_META_SC_LIST.as_bytes(),
serde_json::to_vec(&sc_list).unwrap(),
borsh::to_vec(&DB_META_SC_LIST).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_SC_LIST".to_string()),
)
})?,
borsh::to_vec(&sc_list).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize list of sc".to_string()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
Ok(())
@ -222,7 +270,16 @@ impl RocksDBIO {
pub fn put_meta_is_first_block_set(&self) -> DbResult<()> {
let cf_meta = self.meta_column();
self.db
.put_cf(&cf_meta, DB_META_FIRST_BLOCK_SET_KEY.as_bytes(), [1u8; 1])
.put_cf(
&cf_meta,
borsh::to_vec(&DB_META_FIRST_BLOCK_SET_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_FIRST_BLOCK_SET_KEY".to_string()),
)
})?,
[1u8; 1],
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
Ok(())
}
@ -241,8 +298,18 @@ impl RocksDBIO {
self.db
.put_cf(
&cf_block,
block.header.block_id.to_be_bytes(),
HashableBlockData::from(block).to_bytes(),
borsh::to_vec(&block.header.block_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize block id".to_string()),
)
})?,
borsh::to_vec(&HashableBlockData::from(block)).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize block data".to_string()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
Ok(())
@ -252,11 +319,26 @@ impl RocksDBIO {
let cf_block = self.block_column();
let res = self
.db
.get_cf(&cf_block, block_id.to_be_bytes())
.get_cf(
&cf_block,
borsh::to_vec(&block_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize block id".to_string()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(HashableBlockData::from_bytes(&data))
Ok(
borsh::from_slice::<HashableBlockData>(&data).map_err(|serr| {
DbError::borsh_cast_message(
serr,
Some("Failed to deserialize block data".to_string()),
)
})?,
)
} else {
Err(DbError::db_interaction_error(
"Block on this id not found".to_string(),
@ -269,17 +351,23 @@ impl RocksDBIO {
let cf_meta = self.meta_column();
let sc_list = self
.db
.get_cf(&cf_meta, DB_META_SC_LIST)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = sc_list {
Ok(
serde_json::from_slice::<Vec<String>>(&data).map_err(|serr| {
DbError::serde_cast_message(
serr,
Some("List of Sc Deserialization failed".to_string()),
.get_cf(
&cf_meta,
borsh::to_vec(&DB_META_SC_LIST).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_SC_LIST".to_string()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = sc_list {
Ok(borsh::from_slice::<Vec<String>>(&data).map_err(|serr| {
DbError::borsh_cast_message(
serr,
Some("List of Sc Deserialization failed".to_string()),
)
})?)
} else {
Err(DbError::db_interaction_error(
"Sc list not found".to_string(),
@ -297,250 +385,32 @@ impl RocksDBIO {
Ok(())
}
///Put/Modify sc state in db
pub fn put_sc_sc_state(
&self,
sc_addr: &str,
length: usize,
modifications: Vec<DataBlobChangeVariant>,
) -> DbResult<()> {
self.put_meta_sc(sc_addr.to_string())?;
let cf_sc = self.sc_column();
let sc_addr_loc = format!("{sc_addr:?}{SC_LEN_SUFFIX}");
let sc_len_addr = sc_addr_loc.as_bytes();
self.db
.put_cf(&cf_sc, sc_len_addr, length.to_be_bytes())
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
for data_change in modifications {
match data_change {
DataBlobChangeVariant::Created { id, blob } => {
let blob_addr = produce_address_for_data_blob_at_id(sc_addr, id);
self.db
.put_cf(&cf_sc, blob_addr, blob)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
}
DataBlobChangeVariant::Modified {
id,
blob_old: _,
blob_new,
} => {
let blob_addr = produce_address_for_data_blob_at_id(sc_addr, id);
self.db
.put_cf(&cf_sc, blob_addr, blob_new)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
}
DataBlobChangeVariant::Deleted { id } => {
let blob_addr = produce_address_for_data_blob_at_id(sc_addr, id);
self.db
.delete_cf(&cf_sc, blob_addr)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
}
}
}
Ok(())
}
///Get sc state length in blobs from DB
pub fn get_sc_sc_state_len(&self, sc_addr: &str) -> DbResult<usize> {
let cf_sc = self.sc_column();
let sc_addr_loc = format!("{sc_addr:?}{SC_LEN_SUFFIX}");
let sc_len_addr = sc_addr_loc.as_bytes();
let sc_len = self
.db
.get_cf(&cf_sc, sc_len_addr)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(sc_len) = sc_len {
Ok(usize::from_be_bytes(sc_len.as_slice().try_into().unwrap()))
} else {
Err(DbError::db_interaction_error(format!(
"Sc len for {sc_addr:?} not found"
)))
}
}
///Get full sc state from DB
pub fn get_sc_sc_state(&self, sc_addr: &str) -> DbResult<Vec<DataBlob>> {
let cf_sc = self.sc_column();
let sc_len = self.get_sc_sc_state_len(sc_addr)?;
let mut data_blob_list = vec![];
for id in 0..sc_len {
let blob_addr = produce_address_for_data_blob_at_id(sc_addr, id);
let blob = self
.db
.get_cf(&cf_sc, blob_addr)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(blob_data) = blob {
data_blob_list.push(produce_blob_from_fit_vec(blob_data));
} else {
return Err(DbError::db_interaction_error(format!(
"Blob for {sc_addr:?} at id {id} not found"
)));
}
}
Ok(data_blob_list)
}
pub fn get_snapshot_block_id(&self) -> DbResult<u64> {
let cf_snapshot = self.snapshot_column();
let res = self
.db
.get_cf(&cf_snapshot, DB_SNAPSHOT_BLOCK_ID_KEY)
.get_cf(
&cf_snapshot,
borsh::to_vec(&DB_SNAPSHOT_BLOCK_ID_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_SNAPSHOT_BLOCK_ID_KEY".to_string()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(u64::from_be_bytes(data.try_into().unwrap()))
Ok(borsh::from_slice::<u64>(&data).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to deserialize last block".to_string()),
)
})?)
} else {
Err(DbError::db_interaction_error(
"Snapshot block ID not found".to_string(),
))
}
}
pub fn get_snapshot_commitment(&self) -> DbResult<Vec<u8>> {
let cf_snapshot = self.snapshot_column();
let res = self
.db
.get_cf(&cf_snapshot, DB_SNAPSHOT_COMMITMENT_KEY)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(data)
} else {
Err(DbError::db_interaction_error(
"Snapshot commitment not found".to_string(),
))
}
}
pub fn get_snapshot_transaction(&self) -> DbResult<Vec<u8>> {
let cf_snapshot = self.snapshot_column();
let res = self
.db
.get_cf(&cf_snapshot, DB_SNAPSHOT_TRANSACTION_KEY)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(data)
} else {
Err(DbError::db_interaction_error(
"Snapshot transaction not found".to_string(),
))
}
}
pub fn get_snapshot_nullifier(&self) -> DbResult<Vec<u8>> {
let cf_snapshot = self.snapshot_column();
let res = self
.db
.get_cf(&cf_snapshot, DB_SNAPSHOT_NULLIFIER_KEY)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(data)
} else {
Err(DbError::db_interaction_error(
"Snapshot nullifier not found".to_string(),
))
}
}
pub fn get_snapshot_account(&self) -> DbResult<Vec<u8>> {
let cf_snapshot = self.snapshot_column();
let res = self
.db
.get_cf(&cf_snapshot, DB_SNAPSHOT_ACCOUNT_KEY)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(data)
} else {
Err(DbError::db_interaction_error(
"Snapshot account not found".to_string(),
))
}
}
pub fn put_snapshot_block_id_db(&self, block_id: u64) -> DbResult<()> {
let cf_snapshot = self.snapshot_column();
self.db
.put_cf(
&cf_snapshot,
DB_SNAPSHOT_BLOCK_ID_KEY.as_bytes(),
block_id.to_be_bytes(),
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
Ok(())
}
pub fn put_snapshot_commitement_db(&self, commitment: Vec<u8>) -> DbResult<()> {
let cf_snapshot = self.snapshot_column();
self.db
.put_cf(
&cf_snapshot,
DB_SNAPSHOT_COMMITMENT_KEY.as_bytes(),
commitment,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
Ok(())
}
pub fn put_snapshot_transaction_db(&self, transaction: Vec<u8>) -> DbResult<()> {
let cf_snapshot = self.snapshot_column();
self.db
.put_cf(
&cf_snapshot,
DB_SNAPSHOT_TRANSACTION_KEY.as_bytes(),
transaction,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
Ok(())
}
pub fn put_snapshot_nullifier_db(&self, nullifier: Vec<u8>) -> DbResult<()> {
let cf_snapshot = self.snapshot_column();
self.db
.put_cf(
&cf_snapshot,
DB_SNAPSHOT_NULLIFIER_KEY.as_bytes(),
nullifier,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
Ok(())
}
pub fn put_snapshot_account_db(&self, account: Vec<u8>) -> DbResult<()> {
let cf_snapshot = self.snapshot_column();
self.db
.put_cf(&cf_snapshot, DB_SNAPSHOT_ACCOUNT_KEY.as_bytes(), account)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
Ok(())
}
}
///Creates address for sc data blob at corresponding id
fn produce_address_for_data_blob_at_id(sc_addr: &str, id: usize) -> Vec<u8> {
let mut prefix_bytes: Vec<u8> = sc_addr.as_bytes().to_vec();
let id_bytes = id.to_be_bytes();
for byte in id_bytes {
prefix_bytes.push(byte);
}
prefix_bytes
}

View File

@ -1,160 +0,0 @@
use serde::{Deserialize, Serialize, de::Error};
use crate::SC_DATA_BLOB_SIZE;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct DataBlob(pub [u8; SC_DATA_BLOB_SIZE]);
impl From<[u8; SC_DATA_BLOB_SIZE]> for DataBlob {
fn from(value: [u8; SC_DATA_BLOB_SIZE]) -> Self {
Self(value)
}
}
impl Serialize for DataBlob {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
let data_vec = self.0.to_vec();
data_vec.serialize(serializer)
}
}
impl AsRef<[u8]> for DataBlob {
fn as_ref(&self) -> &[u8] {
self.0.as_ref()
}
}
impl<'de> Deserialize<'de> for DataBlob {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let data_vec = Vec::<u8>::deserialize(deserializer)?;
let chunk: [u8; SC_DATA_BLOB_SIZE] = data_vec
.try_into()
.map_err(|data| {
anyhow::anyhow!("failed to fit vec {data:?} to {:?}", SC_DATA_BLOB_SIZE)
})
.map_err(D::Error::custom)?;
Ok(Self(chunk))
}
}
impl DataBlob {
pub fn to_vec(&self) -> Vec<u8> {
self.0.to_vec()
}
}
#[allow(clippy::large_enum_variant)]
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]
pub enum DataBlobChangeVariant {
Created {
id: usize,
blob: DataBlob,
},
Modified {
id: usize,
blob_old: DataBlob,
blob_new: DataBlob,
},
Deleted {
id: usize,
},
}
///Produce `DataBlob` from vector of size <= `SC_DATA_BLOB_SIZE`
///
///Extends to `SC_DATA_BLOB_SIZE`, if necessary.
///
///Panics, if size > `SC_DATA_BLOB_SIZE`
pub fn produce_blob_from_fit_vec(data: Vec<u8>) -> DataBlob {
let data_len = data.len();
assert!(data_len <= SC_DATA_BLOB_SIZE);
let mut blob: DataBlob = [0; SC_DATA_BLOB_SIZE].into();
for (idx, item) in data.into_iter().enumerate() {
blob.0[idx] = item
}
blob
}
#[cfg(test)]
mod tests {
use super::*;
use serde_json;
const TEST_BLOB_SIZE: usize = 256; // Define a test blob size for simplicity
static SC_DATA_BLOB_SIZE: usize = TEST_BLOB_SIZE;
fn sample_vec() -> Vec<u8> {
(0..SC_DATA_BLOB_SIZE)
.collect::<Vec<usize>>()
.iter()
.map(|&x| x as u8)
.collect()
}
fn sample_data_blob() -> DataBlob {
let vec: Vec<u8> = sample_vec();
produce_blob_from_fit_vec(vec)
}
#[test]
fn test_serialize_data_blob() {
let blob = sample_data_blob();
let json = serde_json::to_string(&blob).unwrap();
let expected_json = serde_json::to_string(&sample_vec()).unwrap();
assert_eq!(json, expected_json);
}
#[test]
fn test_deserialize_data_blob() {
let data = sample_vec();
let json = serde_json::to_string(&data).unwrap();
let deserialized: DataBlob = serde_json::from_str(&json).unwrap();
assert_eq!(deserialized.to_vec(), data);
}
#[test]
fn test_serialize_deserialize_data_blob_change_variant() {
let blob1 = sample_data_blob();
let blob2 = produce_blob_from_fit_vec((50..50 + SC_DATA_BLOB_SIZE as u8).collect());
let variants = vec![
DataBlobChangeVariant::Created { id: 1, blob: blob1 },
DataBlobChangeVariant::Modified {
id: 2,
blob_old: blob1,
blob_new: blob2,
},
DataBlobChangeVariant::Deleted { id: 3 },
];
for variant in variants {
let json = serde_json::to_string(&variant).unwrap();
let deserialized: DataBlobChangeVariant = serde_json::from_str(&json).unwrap();
assert_eq!(variant, deserialized);
}
}
#[test]
fn test_produce_blob_from_fit_vec() {
let data = (0..255).collect();
let blob = produce_blob_from_fit_vec(data);
assert_eq!(blob.0[..4], [0, 1, 2, 3]);
}
#[test]
#[should_panic]
fn test_produce_blob_from_fit_vec_panic() {
let data = vec![0; SC_DATA_BLOB_SIZE + 1];
let _ = produce_blob_from_fit_vec(data);
}
}

View File

@ -16,6 +16,7 @@ nssa-core = { path = "../nssa/core" }
base64.workspace = true
k256 = { version = "0.13.3" }
bytemuck = "1.23.2"
borsh.workspace = true
hex.workspace = true
rand.workspace = true

View File

@ -203,7 +203,7 @@ impl WalletCore {
pub async fn poll_native_token_transfer(&self, hash: String) -> Result<NSSATransaction> {
let transaction_encoded = self.poller.poll_tx(hash).await?;
let tx_base64_decode = BASE64.decode(transaction_encoded)?;
let pub_tx = EncodedTransaction::from_bytes(tx_base64_decode);
let pub_tx = borsh::from_slice::<EncodedTransaction>(&tx_base64_decode).unwrap();
Ok(NSSATransaction::try_from(&pub_tx)?)
}