diff --git a/Cargo.toml b/Cargo.toml index 9467631..11e3144 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -41,9 +41,10 @@ ark-bn254 = "0.5.0" ark-ff = "0.5.0" tiny-keccak = { version = "2.0.2", features = ["keccak"] } base64 = "0.22.1" -chrono = "0.4.41" bip39 = "2.2.0" hmac-sha512 = "1.1.7" +chrono = "0.4.41" +borsh = "1.5.7" rocksdb = { version = "0.21.0", default-features = false, features = [ "snappy", diff --git a/common/Cargo.toml b/common/Cargo.toml index ea01279..d235246 100644 --- a/common/Cargo.toml +++ b/common/Cargo.toml @@ -17,6 +17,7 @@ log.workspace = true elliptic-curve.workspace = true hex.workspace = true nssa-core = { path = "../nssa/core", features = ["host"] } +borsh.workspace = true [dependencies.nssa] path = "../nssa" diff --git a/common/src/block.rs b/common/src/block.rs index 64bce30..20f3aa7 100644 --- a/common/src/block.rs +++ b/common/src/block.rs @@ -1,5 +1,5 @@ +use borsh::{BorshDeserialize, BorshSerialize}; use rs_merkle::Hasher; -use std::io::{Cursor, Read}; use crate::{OwnHasher, transaction::EncodedTransaction}; @@ -27,7 +27,7 @@ pub struct Block { pub body: BlockBody, } -#[derive(Debug, PartialEq, Eq)] +#[derive(Debug, PartialEq, Eq, BorshSerialize, BorshDeserialize)] pub struct HashableBlockData { pub block_id: BlockId, pub prev_block_hash: BlockHash, @@ -37,7 +37,7 @@ pub struct HashableBlockData { impl HashableBlockData { pub fn into_block(self, signing_key: &nssa::PrivateKey) -> Block { - let data_bytes = self.to_bytes(); + let data_bytes = borsh::to_vec(&self).unwrap(); let signature = nssa::Signature::new(signing_key, &data_bytes); let hash = OwnHasher::hash(&data_bytes); Block { @@ -66,75 +66,6 @@ impl From for HashableBlockData { } } -impl HashableBlockData { - pub fn to_bytes(&self) -> Vec { - let mut bytes = Vec::new(); - bytes.extend_from_slice(&self.block_id.to_le_bytes()); - bytes.extend_from_slice(&self.prev_block_hash); - bytes.extend_from_slice(&self.timestamp.to_le_bytes()); - let num_transactions: u32 = self.transactions.len() as u32; - bytes.extend_from_slice(&num_transactions.to_le_bytes()); - for tx in &self.transactions { - let transaction_bytes = tx.to_bytes(); - let num_transaction_bytes: u32 = transaction_bytes.len() as u32; - - bytes.extend_from_slice(&num_transaction_bytes.to_le_bytes()); - bytes.extend_from_slice(&tx.to_bytes()); - } - bytes - } - - // TODO: Improve error handling. Remove unwraps. - pub fn from_bytes(data: &[u8]) -> Self { - let mut cursor = Cursor::new(data); - - let block_id = u64_from_cursor(&mut cursor); - - let mut prev_block_hash = [0u8; 32]; - cursor.read_exact(&mut prev_block_hash).unwrap(); - - let timestamp = u64_from_cursor(&mut cursor); - - let num_transactions = u32_from_cursor(&mut cursor) as usize; - - let mut transactions = Vec::with_capacity(num_transactions); - for _ in 0..num_transactions { - let tx_len = u32_from_cursor(&mut cursor) as usize; - let mut tx_bytes = Vec::with_capacity(tx_len); - - for _ in 0..tx_len { - let mut buff = [0; 1]; - cursor.read_exact(&mut buff).unwrap(); - tx_bytes.push(buff[0]); - } - - let tx = EncodedTransaction::from_bytes(tx_bytes); - transactions.push(tx); - } - - Self { - block_id, - prev_block_hash, - timestamp, - transactions, - } - } -} - -// TODO: Improve error handling. Remove unwraps. -pub fn u32_from_cursor(cursor: &mut Cursor<&[u8]>) -> u32 { - let mut word_buf = [0u8; 4]; - cursor.read_exact(&mut word_buf).unwrap(); - u32::from_le_bytes(word_buf) -} - -// TODO: Improve error handling. Remove unwraps. -pub fn u64_from_cursor(cursor: &mut Cursor<&[u8]>) -> u64 { - let mut word_buf = [0u8; 8]; - cursor.read_exact(&mut word_buf).unwrap(); - u64::from_le_bytes(word_buf) -} - #[cfg(test)] mod tests { use crate::{block::HashableBlockData, test_utils}; @@ -144,8 +75,8 @@ mod tests { let transactions = vec![test_utils::produce_dummy_empty_transaction()]; let block = test_utils::produce_dummy_block(1, Some([1; 32]), transactions); let hashable = HashableBlockData::from(block); - let bytes = hashable.to_bytes(); - let block_from_bytes = HashableBlockData::from_bytes(&bytes); + let bytes = borsh::to_vec(&hashable).unwrap(); + let block_from_bytes = borsh::from_slice::(&bytes).unwrap(); assert_eq!(hashable, block_from_bytes); } } diff --git a/common/src/sequencer_client/mod.rs b/common/src/sequencer_client/mod.rs index dade286..1aec903 100644 --- a/common/src/sequencer_client/mod.rs +++ b/common/src/sequencer_client/mod.rs @@ -151,7 +151,7 @@ impl SequencerClient { let transaction = EncodedTransaction::from(NSSATransaction::Public(transaction)); let tx_req = SendTxRequest { - transaction: transaction.to_bytes(), + transaction: borsh::to_vec(&transaction).unwrap(), }; let req = serde_json::to_value(tx_req)?; @@ -171,7 +171,7 @@ impl SequencerClient { let transaction = EncodedTransaction::from(NSSATransaction::PrivacyPreserving(transaction)); let tx_req = SendTxRequest { - transaction: transaction.to_bytes(), + transaction: borsh::to_vec(&transaction).unwrap(), }; let req = serde_json::to_value(tx_req)?; diff --git a/common/src/transaction.rs b/common/src/transaction.rs index d196687..3a2bda1 100644 --- a/common/src/transaction.rs +++ b/common/src/transaction.rs @@ -1,3 +1,4 @@ +use borsh::{BorshDeserialize, BorshSerialize}; use k256::ecdsa::{Signature, SigningKey, VerifyingKey}; use log::info; use serde::{Deserialize, Serialize}; @@ -34,13 +35,15 @@ pub type CipherText = Vec; pub type Nonce = GenericArray, B1>, B0>, B0>>; pub type Tag = u8; -#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq)] +#[derive( + Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, BorshSerialize, BorshDeserialize, +)] pub enum TxKind { Public, PrivacyPreserving, } -#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize)] ///General transaction object pub struct EncodedTransaction { pub tx_kind: TxKind, @@ -174,23 +177,12 @@ impl ActionData { impl EncodedTransaction { /// Computes and returns the SHA-256 hash of the JSON-serialized representation of `self`. pub fn hash(&self) -> TreeHashType { - let bytes_to_hash = self.to_bytes(); + let bytes_to_hash = borsh::to_vec(&self).unwrap(); let mut hasher = sha2::Sha256::new(); hasher.update(&bytes_to_hash); TreeHashType::from(hasher.finalize_fixed()) } - pub fn to_bytes(&self) -> Vec { - // TODO: Remove `unwrap` by implementing a `to_bytes` method - // that deterministically encodes all transaction fields to bytes - // and guarantees serialization will succeed. - serde_json::to_vec(&self).unwrap() - } - - pub fn from_bytes(bytes: Vec) -> Self { - serde_json::from_slice(&bytes).unwrap() - } - pub fn log(&self) { info!("Transaction hash is {:?}", hex::encode(self.hash())); info!("Transaction tx_kind is {:?}", self.tx_kind); @@ -221,7 +213,7 @@ mod tests { fn test_transaction_hash_is_sha256_of_json_bytes() { let body = test_transaction_body(); let expected_hash = { - let data = body.to_bytes(); + let data = borsh::to_vec(&body).unwrap(); let mut hasher = sha2::Sha256::new(); hasher.update(&data); TreeHashType::from(hasher.finalize_fixed()) @@ -236,8 +228,8 @@ mod tests { fn test_to_bytes_from_bytes() { let body = test_transaction_body(); - let body_bytes = body.to_bytes(); - let body_new = EncodedTransaction::from_bytes(body_bytes); + let body_bytes = borsh::to_vec(&body).unwrap(); + let body_new = borsh::from_slice::(&body_bytes).unwrap(); assert_eq!(body, body_new); } diff --git a/integration_tests/Cargo.toml b/integration_tests/Cargo.toml index 75edf51..db2dfeb 100644 --- a/integration_tests/Cargo.toml +++ b/integration_tests/Cargo.toml @@ -13,6 +13,7 @@ base64.workspace = true tokio.workspace = true hex.workspace = true tempfile.workspace = true +borsh.workspace = true nssa-core = { path = "../nssa/core", features = ["host"] } diff --git a/integration_tests/src/lib.rs b/integration_tests/src/lib.rs index f27669c..1690445 100644 --- a/integration_tests/src/lib.rs +++ b/integration_tests/src/lib.rs @@ -949,7 +949,11 @@ async fn fetch_privacy_preserving_tx( .unwrap(); let tx_base64_decode = BASE64.decode(transaction_encoded).unwrap(); - match NSSATransaction::try_from(&EncodedTransaction::from_bytes(tx_base64_decode)).unwrap() { + match NSSATransaction::try_from( + &borsh::from_slice::(&tx_base64_decode).unwrap(), + ) + .unwrap() + { NSSATransaction::PrivacyPreserving(privacy_preserving_transaction) => { privacy_preserving_transaction } diff --git a/key_protocol/Cargo.toml b/key_protocol/Cargo.toml index dadc58d..6addf41 100644 --- a/key_protocol/Cargo.toml +++ b/key_protocol/Cargo.toml @@ -5,7 +5,6 @@ edition = "2024" [dependencies] anyhow.workspace = true -serde_json.workspace = true log.workspace = true serde.workspace = true k256.workspace = true diff --git a/key_protocol/src/key_management/ephemeral_key_holder.rs b/key_protocol/src/key_management/ephemeral_key_holder.rs index 8d4b3ac..4d4fe9d 100644 --- a/key_protocol/src/key_management/ephemeral_key_holder.rs +++ b/key_protocol/src/key_management/ephemeral_key_holder.rs @@ -1,4 +1,3 @@ -use log::info; use nssa_core::{ NullifierPublicKey, SharedSecretKey, encryption::{EphemeralPublicKey, EphemeralSecretKey, IncomingViewingPublicKey}, @@ -49,11 +48,4 @@ impl EphemeralKeyHolder { receiver_incoming_viewing_public_key, ) } - - pub fn log(&self) { - info!( - "Ephemeral private key is {:?}", - hex::encode(serde_json::to_vec(&self.ephemeral_secret_key).unwrap()) - ); - } } diff --git a/key_protocol/src/key_management/mod.rs b/key_protocol/src/key_management/mod.rs index b74d3e4..5650fd5 100644 --- a/key_protocol/src/key_management/mod.rs +++ b/key_protocol/src/key_management/mod.rs @@ -1,4 +1,3 @@ -use log::info; use nssa_core::{ NullifierPublicKey, SharedSecretKey, encryption::{EphemeralPublicKey, IncomingViewingPublicKey}, @@ -51,43 +50,13 @@ impl KeyChain { &ephemeral_public_key_sender, ) } - - pub fn log(&self) { - info!( - "Secret spending key is {:?}", - hex::encode(serde_json::to_vec(&self.secret_spending_key).unwrap()), - ); - info!( - "Nulifier secret key is {:?}", - hex::encode(serde_json::to_vec(&self.private_key_holder.nullifier_secret_key).unwrap()), - ); - info!( - "Viewing secret key is {:?}", - hex::encode( - serde_json::to_vec(&self.private_key_holder.incoming_viewing_secret_key).unwrap() - ), - ); - info!( - "Viewing secret key is {:?}", - hex::encode( - serde_json::to_vec(&self.private_key_holder.outgoing_viewing_secret_key).unwrap() - ), - ); - info!( - "Nullifier public key is {:?}", - hex::encode(serde_json::to_vec(&self.nullifer_public_key).unwrap()), - ); - info!( - "Viewing public key is {:?}", - hex::encode(serde_json::to_vec(&self.incoming_viewing_public_key).unwrap()), - ); - } } #[cfg(test)] mod tests { use aes_gcm::aead::OsRng; use k256::AffinePoint; + use k256::elliptic_curve::group::GroupEncoding; use rand::RngCore; use super::*; @@ -136,7 +105,7 @@ mod tests { println!( "Group generator {:?}", - hex::encode(serde_json::to_vec(&AffinePoint::GENERATOR).unwrap()) + hex::encode(AffinePoint::GENERATOR.to_bytes()) ); println!(); @@ -153,11 +122,11 @@ mod tests { println!("Address{:?}", hex::encode(address.value())); println!( "Nulifier public key {:?}", - hex::encode(serde_json::to_vec(&nullifer_public_key).unwrap()) + hex::encode(nullifer_public_key.to_byte_array()) ); println!( "Viewing public key {:?}", - hex::encode(serde_json::to_vec(&viewing_public_key).unwrap()) + hex::encode(viewing_public_key.to_bytes()) ); } } diff --git a/sequencer_rpc/Cargo.toml b/sequencer_rpc/Cargo.toml index 7972342..af7e011 100644 --- a/sequencer_rpc/Cargo.toml +++ b/sequencer_rpc/Cargo.toml @@ -16,6 +16,7 @@ base64.workspace = true actix-web.workspace = true tokio.workspace = true +borsh.workspace = true [dependencies.sequencer_core] path = "../sequencer_core" diff --git a/sequencer_rpc/src/process.rs b/sequencer_rpc/src/process.rs index 578c9bd..f376f94 100644 --- a/sequencer_rpc/src/process.rs +++ b/sequencer_rpc/src/process.rs @@ -77,7 +77,7 @@ impl JsonHandler { async fn process_send_tx(&self, request: Request) -> Result { let send_tx_req = SendTxRequest::parse(Some(request.params))?; - let tx = EncodedTransaction::from_bytes(send_tx_req.transaction); + let tx = borsh::from_slice::(&send_tx_req.transaction).unwrap(); let tx_hash = hex::encode(tx.hash()); { @@ -107,7 +107,7 @@ impl JsonHandler { }; let helperstruct = GetBlockDataResponse { - block: HashableBlockData::from(block).to_bytes(), + block: borsh::to_vec(&HashableBlockData::from(block)).unwrap(), }; respond(helperstruct) @@ -243,7 +243,7 @@ impl JsonHandler { .store .block_store .get_transaction_by_hash(hash) - .map(|tx| tx.to_bytes()) + .map(|tx| borsh::to_vec(&tx).unwrap()) }; let base64_encoded = transaction.map(|tx| general_purpose::STANDARD.encode(tx)); let helperstruct = GetTransactionByHashResponse { @@ -644,7 +644,7 @@ mod tests { async fn test_get_transaction_by_hash_for_existing_transaction() { let (json_handler, _, tx) = components_for_tests(); let tx_hash_hex = hex::encode(tx.hash()); - let expected_base64_encoded = general_purpose::STANDARD.encode(tx.to_bytes()); + let expected_base64_encoded = general_purpose::STANDARD.encode(borsh::to_vec(&tx).unwrap()); let request = serde_json::json!({ "jsonrpc": "2.0", diff --git a/storage/Cargo.toml b/storage/Cargo.toml index 604b2fe..1bc9d07 100644 --- a/storage/Cargo.toml +++ b/storage/Cargo.toml @@ -5,9 +5,8 @@ edition = "2024" [dependencies] anyhow.workspace = true -serde_json.workspace = true -serde.workspace = true thiserror.workspace = true +borsh.workspace = true rocksdb.workspace = true diff --git a/storage/src/error.rs b/storage/src/error.rs index feb36f6..a3fd80b 100644 --- a/storage/src/error.rs +++ b/storage/src/error.rs @@ -7,7 +7,7 @@ pub enum DbError { }, #[error("Serialization error")] SerializationError { - error: serde_json::Error, + error: borsh::io::Error, additional_info: Option, }, #[error("Logic Error")] @@ -22,9 +22,9 @@ impl DbError { } } - pub fn serde_cast_message(serr: serde_json::Error, message: Option) -> Self { + pub fn borsh_cast_message(berr: borsh::io::Error, message: Option) -> Self { Self::SerializationError { - error: serr, + error: berr, additional_info: message, } } diff --git a/storage/src/lib.rs b/storage/src/lib.rs index d06fc0c..37d5971 100644 --- a/storage/src/lib.rs +++ b/storage/src/lib.rs @@ -5,10 +5,8 @@ use error::DbError; use rocksdb::{ BoundColumnFamily, ColumnFamilyDescriptor, DBWithThreadMode, MultiThreaded, Options, }; -use sc_db_utils::{DataBlob, DataBlobChangeVariant, produce_blob_from_fit_vec}; pub mod error; -pub mod sc_db_utils; ///Maximal size of stored blocks in base /// @@ -22,9 +20,6 @@ pub const BUFF_SIZE_ROCKSDB: usize = usize::MAX; ///Keeping small to not run out of memory pub const CACHE_SIZE: usize = 1000; -///Size in bytes of a singular smart contract data blob, stored in db -pub const SC_DATA_BLOB_SIZE: usize = 256; - ///Key base for storing metainformation about id of first block in db pub const DB_META_FIRST_BLOCK_IN_DB_KEY: &str = "first_block_in_db"; ///Key base for storing metainformation about id of last current block in db @@ -36,14 +31,6 @@ pub const DB_META_SC_LIST: &str = "sc_list"; ///Key base for storing snapshot which describe block id pub const DB_SNAPSHOT_BLOCK_ID_KEY: &str = "block_id"; -///Key base for storing snapshot which describe commitment -pub const DB_SNAPSHOT_COMMITMENT_KEY: &str = "commitment"; -///Key base for storing snapshot which describe transaction -pub const DB_SNAPSHOT_TRANSACTION_KEY: &str = "transaction"; -///Key base for storing snapshot which describe nullifier -pub const DB_SNAPSHOT_NULLIFIER_KEY: &str = "nullifier"; -///Key base for storing snapshot which describe account -pub const DB_SNAPSHOT_ACCOUNT_KEY: &str = "account"; ///Name of block column family pub const CF_BLOCK_NAME: &str = "cf_block"; @@ -54,9 +41,6 @@ pub const CF_SC_NAME: &str = "cf_sc"; ///Name of snapshot column family pub const CF_SNAPSHOT_NAME: &str = "cf_snapshot"; -///Suffix, used to mark field, which contain length of smart contract -pub const SC_LEN_SUFFIX: &str = "sc_len"; - pub type DbResult = Result; pub struct RocksDBIO { @@ -142,11 +126,24 @@ impl RocksDBIO { let cf_meta = self.meta_column(); let res = self .db - .get_cf(&cf_meta, DB_META_FIRST_BLOCK_IN_DB_KEY) + .get_cf( + &cf_meta, + borsh::to_vec(&DB_META_FIRST_BLOCK_IN_DB_KEY).map_err(|err| { + DbError::borsh_cast_message( + err, + Some("Failed to serialize DB_META_FIRST_BLOCK_IN_DB_KEY".to_string()), + ) + })?, + ) .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; if let Some(data) = res { - Ok(u64::from_be_bytes(data.try_into().unwrap())) + Ok(borsh::from_slice::(&data).map_err(|err| { + DbError::borsh_cast_message( + err, + Some("Failed to deserialize first block".to_string()), + ) + })?) } else { Err(DbError::db_interaction_error( "First block not found".to_string(), @@ -158,11 +155,24 @@ impl RocksDBIO { let cf_meta = self.meta_column(); let res = self .db - .get_cf(&cf_meta, DB_META_LAST_BLOCK_IN_DB_KEY) + .get_cf( + &cf_meta, + borsh::to_vec(&DB_META_LAST_BLOCK_IN_DB_KEY).map_err(|err| { + DbError::borsh_cast_message( + err, + Some("Failed to serialize DB_META_LAST_BLOCK_IN_DB_KEY".to_string()), + ) + })?, + ) .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; if let Some(data) = res { - Ok(u64::from_be_bytes(data.try_into().unwrap())) + Ok(borsh::from_slice::(&data).map_err(|err| { + DbError::borsh_cast_message( + err, + Some("Failed to deserialize last block".to_string()), + ) + })?) } else { Err(DbError::db_interaction_error( "Last block not found".to_string(), @@ -174,7 +184,15 @@ impl RocksDBIO { let cf_meta = self.meta_column(); let res = self .db - .get_cf(&cf_meta, DB_META_FIRST_BLOCK_SET_KEY) + .get_cf( + &cf_meta, + borsh::to_vec(&DB_META_FIRST_BLOCK_SET_KEY).map_err(|err| { + DbError::borsh_cast_message( + err, + Some("Failed to serialize DB_META_FIRST_BLOCK_SET_KEY".to_string()), + ) + })?, + ) .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; Ok(res.is_some()) @@ -185,8 +203,18 @@ impl RocksDBIO { self.db .put_cf( &cf_meta, - DB_META_FIRST_BLOCK_IN_DB_KEY.as_bytes(), - block.header.block_id.to_be_bytes(), + borsh::to_vec(&DB_META_FIRST_BLOCK_IN_DB_KEY).map_err(|err| { + DbError::borsh_cast_message( + err, + Some("Failed to serialize DB_META_FIRST_BLOCK_IN_DB_KEY".to_string()), + ) + })?, + borsh::to_vec(&block.header.block_id).map_err(|err| { + DbError::borsh_cast_message( + err, + Some("Failed to serialize first block id".to_string()), + ) + })?, ) .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; @@ -199,8 +227,18 @@ impl RocksDBIO { self.db .put_cf( &cf_meta, - DB_META_LAST_BLOCK_IN_DB_KEY.as_bytes(), - block_id.to_be_bytes(), + borsh::to_vec(&DB_META_LAST_BLOCK_IN_DB_KEY).map_err(|err| { + DbError::borsh_cast_message( + err, + Some("Failed to serialize DB_META_LAST_BLOCK_IN_DB_KEY".to_string()), + ) + })?, + borsh::to_vec(&block_id).map_err(|err| { + DbError::borsh_cast_message( + err, + Some("Failed to serialize last block id".to_string()), + ) + })?, ) .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; Ok(()) @@ -212,8 +250,18 @@ impl RocksDBIO { self.db .put_cf( &cf_meta, - DB_META_SC_LIST.as_bytes(), - serde_json::to_vec(&sc_list).unwrap(), + borsh::to_vec(&DB_META_SC_LIST).map_err(|err| { + DbError::borsh_cast_message( + err, + Some("Failed to serialize DB_META_SC_LIST".to_string()), + ) + })?, + borsh::to_vec(&sc_list).map_err(|err| { + DbError::borsh_cast_message( + err, + Some("Failed to serialize list of sc".to_string()), + ) + })?, ) .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; Ok(()) @@ -222,7 +270,16 @@ impl RocksDBIO { pub fn put_meta_is_first_block_set(&self) -> DbResult<()> { let cf_meta = self.meta_column(); self.db - .put_cf(&cf_meta, DB_META_FIRST_BLOCK_SET_KEY.as_bytes(), [1u8; 1]) + .put_cf( + &cf_meta, + borsh::to_vec(&DB_META_FIRST_BLOCK_SET_KEY).map_err(|err| { + DbError::borsh_cast_message( + err, + Some("Failed to serialize DB_META_FIRST_BLOCK_SET_KEY".to_string()), + ) + })?, + [1u8; 1], + ) .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; Ok(()) } @@ -241,8 +298,18 @@ impl RocksDBIO { self.db .put_cf( &cf_block, - block.header.block_id.to_be_bytes(), - HashableBlockData::from(block).to_bytes(), + borsh::to_vec(&block.header.block_id).map_err(|err| { + DbError::borsh_cast_message( + err, + Some("Failed to serialize block id".to_string()), + ) + })?, + borsh::to_vec(&HashableBlockData::from(block)).map_err(|err| { + DbError::borsh_cast_message( + err, + Some("Failed to serialize block data".to_string()), + ) + })?, ) .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; Ok(()) @@ -252,11 +319,26 @@ impl RocksDBIO { let cf_block = self.block_column(); let res = self .db - .get_cf(&cf_block, block_id.to_be_bytes()) + .get_cf( + &cf_block, + borsh::to_vec(&block_id).map_err(|err| { + DbError::borsh_cast_message( + err, + Some("Failed to serialize block id".to_string()), + ) + })?, + ) .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; if let Some(data) = res { - Ok(HashableBlockData::from_bytes(&data)) + Ok( + borsh::from_slice::(&data).map_err(|serr| { + DbError::borsh_cast_message( + serr, + Some("Failed to deserialize block data".to_string()), + ) + })?, + ) } else { Err(DbError::db_interaction_error( "Block on this id not found".to_string(), @@ -269,17 +351,23 @@ impl RocksDBIO { let cf_meta = self.meta_column(); let sc_list = self .db - .get_cf(&cf_meta, DB_META_SC_LIST) - .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; - if let Some(data) = sc_list { - Ok( - serde_json::from_slice::>(&data).map_err(|serr| { - DbError::serde_cast_message( - serr, - Some("List of Sc Deserialization failed".to_string()), + .get_cf( + &cf_meta, + borsh::to_vec(&DB_META_SC_LIST).map_err(|err| { + DbError::borsh_cast_message( + err, + Some("Failed to serialize DB_META_SC_LIST".to_string()), ) })?, ) + .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; + if let Some(data) = sc_list { + Ok(borsh::from_slice::>(&data).map_err(|serr| { + DbError::borsh_cast_message( + serr, + Some("List of Sc Deserialization failed".to_string()), + ) + })?) } else { Err(DbError::db_interaction_error( "Sc list not found".to_string(), @@ -297,250 +385,32 @@ impl RocksDBIO { Ok(()) } - ///Put/Modify sc state in db - pub fn put_sc_sc_state( - &self, - sc_addr: &str, - length: usize, - modifications: Vec, - ) -> DbResult<()> { - self.put_meta_sc(sc_addr.to_string())?; - - let cf_sc = self.sc_column(); - - let sc_addr_loc = format!("{sc_addr:?}{SC_LEN_SUFFIX}"); - let sc_len_addr = sc_addr_loc.as_bytes(); - - self.db - .put_cf(&cf_sc, sc_len_addr, length.to_be_bytes()) - .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; - - for data_change in modifications { - match data_change { - DataBlobChangeVariant::Created { id, blob } => { - let blob_addr = produce_address_for_data_blob_at_id(sc_addr, id); - - self.db - .put_cf(&cf_sc, blob_addr, blob) - .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; - } - DataBlobChangeVariant::Modified { - id, - blob_old: _, - blob_new, - } => { - let blob_addr = produce_address_for_data_blob_at_id(sc_addr, id); - - self.db - .put_cf(&cf_sc, blob_addr, blob_new) - .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; - } - DataBlobChangeVariant::Deleted { id } => { - let blob_addr = produce_address_for_data_blob_at_id(sc_addr, id); - - self.db - .delete_cf(&cf_sc, blob_addr) - .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; - } - } - } - - Ok(()) - } - - ///Get sc state length in blobs from DB - pub fn get_sc_sc_state_len(&self, sc_addr: &str) -> DbResult { - let cf_sc = self.sc_column(); - let sc_addr_loc = format!("{sc_addr:?}{SC_LEN_SUFFIX}"); - - let sc_len_addr = sc_addr_loc.as_bytes(); - - let sc_len = self - .db - .get_cf(&cf_sc, sc_len_addr) - .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; - - if let Some(sc_len) = sc_len { - Ok(usize::from_be_bytes(sc_len.as_slice().try_into().unwrap())) - } else { - Err(DbError::db_interaction_error(format!( - "Sc len for {sc_addr:?} not found" - ))) - } - } - - ///Get full sc state from DB - pub fn get_sc_sc_state(&self, sc_addr: &str) -> DbResult> { - let cf_sc = self.sc_column(); - let sc_len = self.get_sc_sc_state_len(sc_addr)?; - let mut data_blob_list = vec![]; - - for id in 0..sc_len { - let blob_addr = produce_address_for_data_blob_at_id(sc_addr, id); - - let blob = self - .db - .get_cf(&cf_sc, blob_addr) - .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; - - if let Some(blob_data) = blob { - data_blob_list.push(produce_blob_from_fit_vec(blob_data)); - } else { - return Err(DbError::db_interaction_error(format!( - "Blob for {sc_addr:?} at id {id} not found" - ))); - } - } - - Ok(data_blob_list) - } - pub fn get_snapshot_block_id(&self) -> DbResult { let cf_snapshot = self.snapshot_column(); let res = self .db - .get_cf(&cf_snapshot, DB_SNAPSHOT_BLOCK_ID_KEY) + .get_cf( + &cf_snapshot, + borsh::to_vec(&DB_SNAPSHOT_BLOCK_ID_KEY).map_err(|err| { + DbError::borsh_cast_message( + err, + Some("Failed to serialize DB_SNAPSHOT_BLOCK_ID_KEY".to_string()), + ) + })?, + ) .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; if let Some(data) = res { - Ok(u64::from_be_bytes(data.try_into().unwrap())) + Ok(borsh::from_slice::(&data).map_err(|err| { + DbError::borsh_cast_message( + err, + Some("Failed to deserialize last block".to_string()), + ) + })?) } else { Err(DbError::db_interaction_error( "Snapshot block ID not found".to_string(), )) } } - - pub fn get_snapshot_commitment(&self) -> DbResult> { - let cf_snapshot = self.snapshot_column(); - let res = self - .db - .get_cf(&cf_snapshot, DB_SNAPSHOT_COMMITMENT_KEY) - .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; - - if let Some(data) = res { - Ok(data) - } else { - Err(DbError::db_interaction_error( - "Snapshot commitment not found".to_string(), - )) - } - } - - pub fn get_snapshot_transaction(&self) -> DbResult> { - let cf_snapshot = self.snapshot_column(); - let res = self - .db - .get_cf(&cf_snapshot, DB_SNAPSHOT_TRANSACTION_KEY) - .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; - - if let Some(data) = res { - Ok(data) - } else { - Err(DbError::db_interaction_error( - "Snapshot transaction not found".to_string(), - )) - } - } - - pub fn get_snapshot_nullifier(&self) -> DbResult> { - let cf_snapshot = self.snapshot_column(); - let res = self - .db - .get_cf(&cf_snapshot, DB_SNAPSHOT_NULLIFIER_KEY) - .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; - - if let Some(data) = res { - Ok(data) - } else { - Err(DbError::db_interaction_error( - "Snapshot nullifier not found".to_string(), - )) - } - } - - pub fn get_snapshot_account(&self) -> DbResult> { - let cf_snapshot = self.snapshot_column(); - let res = self - .db - .get_cf(&cf_snapshot, DB_SNAPSHOT_ACCOUNT_KEY) - .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; - - if let Some(data) = res { - Ok(data) - } else { - Err(DbError::db_interaction_error( - "Snapshot account not found".to_string(), - )) - } - } - - pub fn put_snapshot_block_id_db(&self, block_id: u64) -> DbResult<()> { - let cf_snapshot = self.snapshot_column(); - self.db - .put_cf( - &cf_snapshot, - DB_SNAPSHOT_BLOCK_ID_KEY.as_bytes(), - block_id.to_be_bytes(), - ) - .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; - Ok(()) - } - - pub fn put_snapshot_commitement_db(&self, commitment: Vec) -> DbResult<()> { - let cf_snapshot = self.snapshot_column(); - self.db - .put_cf( - &cf_snapshot, - DB_SNAPSHOT_COMMITMENT_KEY.as_bytes(), - commitment, - ) - .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; - Ok(()) - } - - pub fn put_snapshot_transaction_db(&self, transaction: Vec) -> DbResult<()> { - let cf_snapshot = self.snapshot_column(); - self.db - .put_cf( - &cf_snapshot, - DB_SNAPSHOT_TRANSACTION_KEY.as_bytes(), - transaction, - ) - .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; - Ok(()) - } - - pub fn put_snapshot_nullifier_db(&self, nullifier: Vec) -> DbResult<()> { - let cf_snapshot = self.snapshot_column(); - self.db - .put_cf( - &cf_snapshot, - DB_SNAPSHOT_NULLIFIER_KEY.as_bytes(), - nullifier, - ) - .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; - Ok(()) - } - - pub fn put_snapshot_account_db(&self, account: Vec) -> DbResult<()> { - let cf_snapshot = self.snapshot_column(); - self.db - .put_cf(&cf_snapshot, DB_SNAPSHOT_ACCOUNT_KEY.as_bytes(), account) - .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; - Ok(()) - } -} - -///Creates address for sc data blob at corresponding id -fn produce_address_for_data_blob_at_id(sc_addr: &str, id: usize) -> Vec { - let mut prefix_bytes: Vec = sc_addr.as_bytes().to_vec(); - - let id_bytes = id.to_be_bytes(); - - for byte in id_bytes { - prefix_bytes.push(byte); - } - - prefix_bytes } diff --git a/storage/src/sc_db_utils.rs b/storage/src/sc_db_utils.rs deleted file mode 100644 index af982cf..0000000 --- a/storage/src/sc_db_utils.rs +++ /dev/null @@ -1,160 +0,0 @@ -use serde::{Deserialize, Serialize, de::Error}; - -use crate::SC_DATA_BLOB_SIZE; - -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub struct DataBlob(pub [u8; SC_DATA_BLOB_SIZE]); - -impl From<[u8; SC_DATA_BLOB_SIZE]> for DataBlob { - fn from(value: [u8; SC_DATA_BLOB_SIZE]) -> Self { - Self(value) - } -} - -impl Serialize for DataBlob { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - let data_vec = self.0.to_vec(); - data_vec.serialize(serializer) - } -} - -impl AsRef<[u8]> for DataBlob { - fn as_ref(&self) -> &[u8] { - self.0.as_ref() - } -} - -impl<'de> Deserialize<'de> for DataBlob { - fn deserialize(deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - let data_vec = Vec::::deserialize(deserializer)?; - let chunk: [u8; SC_DATA_BLOB_SIZE] = data_vec - .try_into() - .map_err(|data| { - anyhow::anyhow!("failed to fit vec {data:?} to {:?}", SC_DATA_BLOB_SIZE) - }) - .map_err(D::Error::custom)?; - Ok(Self(chunk)) - } -} - -impl DataBlob { - pub fn to_vec(&self) -> Vec { - self.0.to_vec() - } -} - -#[allow(clippy::large_enum_variant)] -#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)] -pub enum DataBlobChangeVariant { - Created { - id: usize, - blob: DataBlob, - }, - Modified { - id: usize, - blob_old: DataBlob, - blob_new: DataBlob, - }, - Deleted { - id: usize, - }, -} - -///Produce `DataBlob` from vector of size <= `SC_DATA_BLOB_SIZE` -/// -///Extends to `SC_DATA_BLOB_SIZE`, if necessary. -/// -///Panics, if size > `SC_DATA_BLOB_SIZE` -pub fn produce_blob_from_fit_vec(data: Vec) -> DataBlob { - let data_len = data.len(); - - assert!(data_len <= SC_DATA_BLOB_SIZE); - let mut blob: DataBlob = [0; SC_DATA_BLOB_SIZE].into(); - - for (idx, item) in data.into_iter().enumerate() { - blob.0[idx] = item - } - - blob -} - -#[cfg(test)] -mod tests { - use super::*; - use serde_json; - - const TEST_BLOB_SIZE: usize = 256; // Define a test blob size for simplicity - static SC_DATA_BLOB_SIZE: usize = TEST_BLOB_SIZE; - - fn sample_vec() -> Vec { - (0..SC_DATA_BLOB_SIZE) - .collect::>() - .iter() - .map(|&x| x as u8) - .collect() - } - - fn sample_data_blob() -> DataBlob { - let vec: Vec = sample_vec(); - produce_blob_from_fit_vec(vec) - } - - #[test] - fn test_serialize_data_blob() { - let blob = sample_data_blob(); - let json = serde_json::to_string(&blob).unwrap(); - - let expected_json = serde_json::to_string(&sample_vec()).unwrap(); - assert_eq!(json, expected_json); - } - - #[test] - fn test_deserialize_data_blob() { - let data = sample_vec(); - let json = serde_json::to_string(&data).unwrap(); - let deserialized: DataBlob = serde_json::from_str(&json).unwrap(); - assert_eq!(deserialized.to_vec(), data); - } - - #[test] - fn test_serialize_deserialize_data_blob_change_variant() { - let blob1 = sample_data_blob(); - let blob2 = produce_blob_from_fit_vec((50..50 + SC_DATA_BLOB_SIZE as u8).collect()); - - let variants = vec![ - DataBlobChangeVariant::Created { id: 1, blob: blob1 }, - DataBlobChangeVariant::Modified { - id: 2, - blob_old: blob1, - blob_new: blob2, - }, - DataBlobChangeVariant::Deleted { id: 3 }, - ]; - - for variant in variants { - let json = serde_json::to_string(&variant).unwrap(); - let deserialized: DataBlobChangeVariant = serde_json::from_str(&json).unwrap(); - assert_eq!(variant, deserialized); - } - } - - #[test] - fn test_produce_blob_from_fit_vec() { - let data = (0..255).collect(); - let blob = produce_blob_from_fit_vec(data); - assert_eq!(blob.0[..4], [0, 1, 2, 3]); - } - - #[test] - #[should_panic] - fn test_produce_blob_from_fit_vec_panic() { - let data = vec![0; SC_DATA_BLOB_SIZE + 1]; - let _ = produce_blob_from_fit_vec(data); - } -} diff --git a/wallet/Cargo.toml b/wallet/Cargo.toml index 1bbd79e..ebd0dc5 100644 --- a/wallet/Cargo.toml +++ b/wallet/Cargo.toml @@ -16,6 +16,7 @@ nssa-core = { path = "../nssa/core" } base64.workspace = true k256 = { version = "0.13.3" } bytemuck = "1.23.2" +borsh.workspace = true hex.workspace = true rand.workspace = true diff --git a/wallet/src/lib.rs b/wallet/src/lib.rs index d3bcc8e..0e8b1cb 100644 --- a/wallet/src/lib.rs +++ b/wallet/src/lib.rs @@ -203,7 +203,7 @@ impl WalletCore { pub async fn poll_native_token_transfer(&self, hash: String) -> Result { let transaction_encoded = self.poller.poll_tx(hash).await?; let tx_base64_decode = BASE64.decode(transaction_encoded)?; - let pub_tx = EncodedTransaction::from_bytes(tx_base64_decode); + let pub_tx = borsh::from_slice::(&tx_base64_decode).unwrap(); Ok(NSSATransaction::try_from(&pub_tx)?) }