diff --git a/storage/src/indexer/mod.rs b/storage/src/indexer/mod.rs index 12faf96d..c5d47c1f 100644 --- a/storage/src/indexer/mod.rs +++ b/storage/src/indexer/mod.rs @@ -8,10 +8,10 @@ use rocksdb::{ use crate::error::DbError; -pub mod read_multi_get; +pub mod read_multiple; pub mod read_once; -pub mod write_batch; -pub mod write_once; +pub mod write_atomic; +pub mod write_non_atomic; /// Maximal size of stored blocks in base. /// @@ -57,13 +57,6 @@ pub const CF_ACC_TO_TX: &str = "cf_acc_to_tx"; pub type DbResult = Result; -fn closest_breakpoint_id(block_id: u64) -> u64 { - block_id - .saturating_sub(1) - .checked_div(u64::from(BREAKPOINT_INTERVAL)) - .expect("Breakpoint interval is not zero") -} - pub struct RocksDBIO { pub db: DBWithThreadMode, } @@ -184,7 +177,9 @@ impl RocksDBIO { self.get_meta_first_block_in_db()? }; - for block in self.get_block_batch_seq((start + 1)..=block_id)? { + for block in self.get_block_batch_seq( + start.checked_add(1).expect("Will be lesser that u64::MAX")..=block_id, + )? { for transaction in block.body.transactions { transaction .transaction_stateless_check() @@ -215,11 +210,17 @@ impl RocksDBIO { } } -#[allow(clippy::shadow_unrelated)] +fn closest_breakpoint_id(block_id: u64) -> u64 { + block_id + .saturating_sub(1) + .checked_div(u64::from(BREAKPOINT_INTERVAL)) + .expect("Breakpoint interval is not zero") +} + +#[expect(clippy::shadow_unrelated, reason = "Fine for tests")] #[cfg(test)] mod tests { - use common::transaction::NSSATransaction; - use nssa::AccountId; + use nssa::{AccountId, PublicKey}; use tempfile::tempdir; use super::*; @@ -228,20 +229,6 @@ mod tests { common::test_utils::produce_dummy_block(1, None, vec![]) } - fn acc1() -> AccountId { - AccountId::new([ - 148, 179, 206, 253, 199, 51, 82, 86, 232, 2, 152, 122, 80, 243, 54, 207, 237, 112, 83, - 153, 44, 59, 204, 49, 128, 84, 160, 227, 216, 149, 97, 102, - ]) - } - - fn acc2() -> AccountId { - AccountId::new([ - 30, 145, 107, 3, 207, 73, 192, 230, 160, 63, 238, 207, 18, 69, 54, 216, 103, 244, 92, - 94, 124, 248, 42, 16, 141, 19, 119, 18, 14, 226, 140, 204, - ]) - } - fn acc1_sign_key() -> nssa::PrivateKey { nssa::PrivateKey::try_new([1; 32]).unwrap() } @@ -250,28 +237,12 @@ mod tests { nssa::PrivateKey::try_new([2; 32]).unwrap() } - fn initial_state() -> V02State { - nssa::V02State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[]) + fn acc1() -> AccountId { + AccountId::from(&PublicKey::new_from_private_key(&acc1_sign_key())) } - fn transfer(amount: u128, nonce: u128, direction: bool) -> NSSATransaction { - let from; - let to; - let sign_key; - - if direction { - from = acc1(); - to = acc2(); - sign_key = acc1_sign_key(); - } else { - from = acc2(); - to = acc1(); - sign_key = acc2_sign_key(); - } - - common::test_utils::create_transaction_native_token_transfer( - from, nonce, to, amount, &sign_key, - ) + fn acc2() -> AccountId { + AccountId::from(&PublicKey::new_from_private_key(&acc2_sign_key())) } #[test] @@ -279,8 +250,12 @@ mod tests { let temp_dir = tempdir().unwrap(); let temdir_path = temp_dir.path(); - let dbio = - RocksDBIO::open_or_create(temdir_path, &genesis_block(), &initial_state()).unwrap(); + let dbio = RocksDBIO::open_or_create( + temdir_path, + &genesis_block(), + &nssa::V02State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[]), + ) + .unwrap(); let last_id = dbio.get_meta_last_block_in_db().unwrap(); let first_id = dbio.get_meta_first_block_in_db().unwrap(); @@ -312,11 +287,20 @@ mod tests { let temp_dir = tempdir().unwrap(); let temdir_path = temp_dir.path(); - let dbio = - RocksDBIO::open_or_create(temdir_path, &genesis_block(), &initial_state()).unwrap(); + let dbio = RocksDBIO::open_or_create( + temdir_path, + &genesis_block(), + &nssa::V02State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[]), + ) + .unwrap(); let prev_hash = genesis_block().header.hash; - let transfer_tx = transfer(1, 0, true); + let from = acc1(); + let to = acc2(); + let sign_key = acc1_sign_key(); + + let transfer_tx = + common::test_utils::create_transaction_native_token_transfer(from, 0, to, 1, &sign_key); let block = common::test_utils::produce_dummy_block(2, Some(prev_hash), vec![transfer_tx]); dbio.put_block(&block, [1; 32]).unwrap(); @@ -356,15 +340,30 @@ mod tests { let temp_dir = tempdir().unwrap(); let temdir_path = temp_dir.path(); - let dbio = - RocksDBIO::open_or_create(temdir_path, &genesis_block(), &initial_state()).unwrap(); + let dbio = RocksDBIO::open_or_create( + temdir_path, + &genesis_block(), + &nssa::V02State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[]), + ) + .unwrap(); + + let from = acc1(); + let to = acc2(); + let sign_key = acc1_sign_key(); for i in 1..=BREAKPOINT_INTERVAL { let last_id = dbio.get_meta_last_block_in_db().unwrap(); let last_block = dbio.get_block(last_id).unwrap(); let prev_hash = last_block.header.hash; - let transfer_tx = transfer(1, (i - 1).into(), true); + + let transfer_tx = common::test_utils::create_transaction_native_token_transfer( + from, + (i - 1).into(), + to, + 1, + &sign_key, + ); let block = common::test_utils::produce_dummy_block( (i + 1).into(), Some(prev_hash), @@ -414,14 +413,23 @@ mod tests { let temp_dir = tempdir().unwrap(); let temdir_path = temp_dir.path(); - let dbio = - RocksDBIO::open_or_create(temdir_path, &genesis_block(), &initial_state()).unwrap(); + let dbio = RocksDBIO::open_or_create( + temdir_path, + &genesis_block(), + &nssa::V02State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[]), + ) + .unwrap(); + + let from = acc1(); + let to = acc2(); + let sign_key = acc1_sign_key(); let last_id = dbio.get_meta_last_block_in_db().unwrap(); let last_block = dbio.get_block(last_id).unwrap(); let prev_hash = last_block.header.hash; - let transfer_tx = transfer(1, 0, true); + let transfer_tx = + common::test_utils::create_transaction_native_token_transfer(from, 0, to, 1, &sign_key); let block = common::test_utils::produce_dummy_block(2, Some(prev_hash), vec![transfer_tx]); let control_hash1 = block.header.hash; @@ -432,7 +440,8 @@ mod tests { let last_block = dbio.get_block(last_id).unwrap(); let prev_hash = last_block.header.hash; - let transfer_tx = transfer(1, 1, true); + let transfer_tx = + common::test_utils::create_transaction_native_token_transfer(from, 1, to, 1, &sign_key); let block = common::test_utils::produce_dummy_block(3, Some(prev_hash), vec![transfer_tx]); let control_hash2 = block.header.hash; @@ -443,7 +452,8 @@ mod tests { let last_block = dbio.get_block(last_id).unwrap(); let prev_hash = last_block.header.hash; - let transfer_tx = transfer(1, 2, true); + let transfer_tx = + common::test_utils::create_transaction_native_token_transfer(from, 2, to, 1, &sign_key); let control_tx_hash1 = transfer_tx.hash(); @@ -454,7 +464,8 @@ mod tests { let last_block = dbio.get_block(last_id).unwrap(); let prev_hash = last_block.header.hash; - let transfer_tx = transfer(1, 3, true); + let transfer_tx = + common::test_utils::create_transaction_native_token_transfer(from, 3, to, 1, &sign_key); let control_tx_hash2 = transfer_tx.hash(); @@ -479,14 +490,23 @@ mod tests { let mut block_res = vec![]; - let dbio = - RocksDBIO::open_or_create(temdir_path, &genesis_block(), &initial_state()).unwrap(); + let dbio = RocksDBIO::open_or_create( + temdir_path, + &genesis_block(), + &nssa::V02State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[]), + ) + .unwrap(); + + let from = acc1(); + let to = acc2(); + let sign_key = acc1_sign_key(); let last_id = dbio.get_meta_last_block_in_db().unwrap(); let last_block = dbio.get_block(last_id).unwrap(); let prev_hash = last_block.header.hash; - let transfer_tx = transfer(1, 0, true); + let transfer_tx = + common::test_utils::create_transaction_native_token_transfer(from, 0, to, 1, &sign_key); let block = common::test_utils::produce_dummy_block(2, Some(prev_hash), vec![transfer_tx]); block_res.push(block.clone()); @@ -496,7 +516,8 @@ mod tests { let last_block = dbio.get_block(last_id).unwrap(); let prev_hash = last_block.header.hash; - let transfer_tx = transfer(1, 1, true); + let transfer_tx = + common::test_utils::create_transaction_native_token_transfer(from, 1, to, 1, &sign_key); let block = common::test_utils::produce_dummy_block(3, Some(prev_hash), vec![transfer_tx]); block_res.push(block.clone()); @@ -506,7 +527,8 @@ mod tests { let last_block = dbio.get_block(last_id).unwrap(); let prev_hash = last_block.header.hash; - let transfer_tx = transfer(1, 2, true); + let transfer_tx = + common::test_utils::create_transaction_native_token_transfer(from, 2, to, 1, &sign_key); let block = common::test_utils::produce_dummy_block(4, Some(prev_hash), vec![transfer_tx]); block_res.push(block.clone()); @@ -516,7 +538,8 @@ mod tests { let last_block = dbio.get_block(last_id).unwrap(); let prev_hash = last_block.header.hash; - let transfer_tx = transfer(1, 3, true); + let transfer_tx = + common::test_utils::create_transaction_native_token_transfer(from, 3, to, 1, &sign_key); let block = common::test_utils::produce_dummy_block(5, Some(prev_hash), vec![transfer_tx]); block_res.push(block.clone()); @@ -563,8 +586,16 @@ mod tests { let temp_dir = tempdir().unwrap(); let temdir_path = temp_dir.path(); - let dbio = - RocksDBIO::open_or_create(temdir_path, &genesis_block(), &initial_state()).unwrap(); + let dbio = RocksDBIO::open_or_create( + temdir_path, + &genesis_block(), + &nssa::V02State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[]), + ) + .unwrap(); + + let from = acc1(); + let to = acc2(); + let sign_key = acc1_sign_key(); let mut tx_hash_res = vec![]; @@ -572,8 +603,10 @@ mod tests { let last_block = dbio.get_block(last_id).unwrap(); let prev_hash = last_block.header.hash; - let transfer_tx1 = transfer(1, 0, true); - let transfer_tx2 = transfer(1, 1, true); + let transfer_tx1 = + common::test_utils::create_transaction_native_token_transfer(from, 0, to, 1, &sign_key); + let transfer_tx2 = + common::test_utils::create_transaction_native_token_transfer(from, 1, to, 1, &sign_key); tx_hash_res.push(transfer_tx1.hash().0); tx_hash_res.push(transfer_tx2.hash().0); @@ -589,8 +622,10 @@ mod tests { let last_block = dbio.get_block(last_id).unwrap(); let prev_hash = last_block.header.hash; - let transfer_tx1 = transfer(1, 2, true); - let transfer_tx2 = transfer(1, 3, true); + let transfer_tx1 = + common::test_utils::create_transaction_native_token_transfer(from, 2, to, 1, &sign_key); + let transfer_tx2 = + common::test_utils::create_transaction_native_token_transfer(from, 3, to, 1, &sign_key); tx_hash_res.push(transfer_tx1.hash().0); tx_hash_res.push(transfer_tx2.hash().0); @@ -606,8 +641,10 @@ mod tests { let last_block = dbio.get_block(last_id).unwrap(); let prev_hash = last_block.header.hash; - let transfer_tx1 = transfer(1, 4, true); - let transfer_tx2 = transfer(1, 5, true); + let transfer_tx1 = + common::test_utils::create_transaction_native_token_transfer(from, 4, to, 1, &sign_key); + let transfer_tx2 = + common::test_utils::create_transaction_native_token_transfer(from, 5, to, 1, &sign_key); tx_hash_res.push(transfer_tx1.hash().0); tx_hash_res.push(transfer_tx2.hash().0); @@ -623,7 +660,8 @@ mod tests { let last_block = dbio.get_block(last_id).unwrap(); let prev_hash = last_block.header.hash; - let transfer_tx = transfer(1, 6, true); + let transfer_tx = + common::test_utils::create_transaction_native_token_transfer(from, 6, to, 1, &sign_key); tx_hash_res.push(transfer_tx.hash().0); let block = common::test_utils::produce_dummy_block(5, Some(prev_hash), vec![transfer_tx]); diff --git a/storage/src/indexer/read_multi_get.rs b/storage/src/indexer/read_multiple.rs similarity index 81% rename from storage/src/indexer/read_multi_get.rs rename to storage/src/indexer/read_multiple.rs index 66fe25d7..866fc7b0 100644 --- a/storage/src/indexer/read_multi_get.rs +++ b/storage/src/indexer/read_multiple.rs @@ -1,7 +1,8 @@ use common::transaction::NSSATransaction; -use super::*; +use super::{Block, DbError, DbResult, RocksDBIO}; +#[expect(clippy::multiple_inherent_impl, reason = "Readability")] impl RocksDBIO { pub fn get_block_batch(&self, before: Option, limit: u64) -> DbResult> { let mut seq = vec![]; @@ -25,11 +26,11 @@ impl RocksDBIO { self.get_block_batch_seq(seq.into_iter()) } - /// Get block batch from a sequence + /// Get block batch from a sequence. /// - /// Currently assumes non-decreasing sequence + /// Currently assumes non-decreasing sequence. /// - /// ToDo: Add suport of arbitrary sequences + /// `ToDo`: Add suport of arbitrary sequences. pub fn get_block_batch_seq(&self, seq: impl Iterator) -> DbResult> { let cf_block = self.block_column(); @@ -72,11 +73,9 @@ impl RocksDBIO { Ok(block_batch) } - /// Get block ids by txs + /// Get block ids by txs. /// - /// Transactions must be sorted by time of arrival - /// - /// ToDo: There may be multiple transactions in one block + /// `ToDo`: There may be multiple transactions in one block /// so this method can take redundant reads. /// Need to update signature and implementation. fn get_block_ids_by_tx_vec(&self, tx_vec: &[[u8; 32]]) -> DbResult> { @@ -98,18 +97,21 @@ impl RocksDBIO { // Keys parsing let mut block_id_batch = vec![]; for res in multi_get_res { - let res = res.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; + let res = res + .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))? + .ok_or_else(|| { + DbError::db_interaction_error( + "Tx to block id mapping do not contain transaction from vec".to_owned(), + ) + })?; - let block_id = if let Some(data) = res { - Ok(borsh::from_slice::(&data).map_err(|serr| { + let block_id = { + Ok(borsh::from_slice::(&res).map_err(|serr| { DbError::borsh_cast_message( serr, Some("Failed to deserialize block id".to_owned()), ) })?) - } else { - // Block not found, assuming that previous one was the last - break; }?; block_id_batch.push(block_id); @@ -131,7 +133,11 @@ impl RocksDBIO { // Keys preparation let mut keys = vec![]; - for tx_id in offset..(offset + limit) { + for tx_id in offset + ..offset + .checked_add(limit) + .expect("Transaction limit should be lesser than u64::MAX") + { let mut prefix = borsh::to_vec(&acc_id).map_err(|berr| { DbError::borsh_cast_message(berr, Some("Failed to serialize account id".to_owned())) })?; @@ -188,10 +194,12 @@ impl RocksDBIO { .transactions .iter() .find(|tx| tx.hash().0 == tx_hash) - .ok_or(DbError::db_interaction_error(format!( - "Missing transaction in block {} with hash {:#?}", - block.header.block_id, tx_hash - )))?; + .ok_or_else(|| { + DbError::db_interaction_error(format!( + "Missing transaction in block {} with hash {:#?}", + block.header.block_id, tx_hash + )) + })?; tx_batch.push(transaction.clone()); } diff --git a/storage/src/indexer/read_once.rs b/storage/src/indexer/read_once.rs index 40929fd6..f966f349 100644 --- a/storage/src/indexer/read_once.rs +++ b/storage/src/indexer/read_once.rs @@ -1,5 +1,10 @@ -use super::*; +use super::{ + Block, DB_META_FIRST_BLOCK_IN_DB_KEY, DB_META_FIRST_BLOCK_SET_KEY, + DB_META_LAST_BLOCK_IN_DB_KEY, DB_META_LAST_BREAKPOINT_ID, + DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY, DbError, DbResult, RocksDBIO, V02State, +}; +#[expect(clippy::multiple_inherent_impl, reason = "Readability")] impl RocksDBIO { // Meta diff --git a/storage/src/indexer/write_batch.rs b/storage/src/indexer/write_atomic.rs similarity index 87% rename from storage/src/indexer/write_batch.rs rename to storage/src/indexer/write_atomic.rs index a2d5b46b..161d763a 100644 --- a/storage/src/indexer/write_batch.rs +++ b/storage/src/indexer/write_atomic.rs @@ -2,8 +2,13 @@ use std::collections::HashMap; use rocksdb::WriteBatch; -use super::*; +use super::{ + Arc, BREAKPOINT_INTERVAL, Block, BoundColumnFamily, DB_META_FIRST_BLOCK_IN_DB_KEY, + DB_META_FIRST_BLOCK_SET_KEY, DB_META_LAST_BLOCK_IN_DB_KEY, DB_META_LAST_BREAKPOINT_ID, + DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY, DbError, DbResult, RocksDBIO, +}; +#[expect(clippy::multiple_inherent_impl, reason = "Readability")] impl RocksDBIO { // Accounts meta @@ -36,14 +41,16 @@ impl RocksDBIO { pub fn put_account_transactions( &self, acc_id: [u8; 32], - tx_hashes: Vec<[u8; 32]>, + tx_hashes: &[[u8; 32]], ) -> DbResult<()> { let acc_num_tx = self.get_acc_meta_num_tx(acc_id)?.unwrap_or(0); let cf_att = self.account_id_to_tx_hash_column(); let mut write_batch = WriteBatch::new(); for (tx_id, tx_hash) in tx_hashes.iter().enumerate() { - let put_id = acc_num_tx + tx_id as u64; + let put_id = acc_num_tx + .checked_add(tx_id.try_into().expect("Must fit into u64")) + .expect("Tx count should be lesser that u64::MAX"); let mut prefix = borsh::to_vec(&acc_id).map_err(|berr| { DbError::borsh_cast_message(berr, Some("Failed to serialize account id".to_owned())) @@ -68,7 +75,9 @@ impl RocksDBIO { self.update_acc_meta_batch( acc_id, - acc_num_tx + (tx_hashes.len() as u64), + acc_num_tx + .checked_add(tx_hashes.len().try_into().expect("Must fit into u64")) + .expect("Tx count should be lesser that u64::MAX"), &mut write_batch, )?; @@ -80,14 +89,16 @@ impl RocksDBIO { pub fn put_account_transactions_dependant( &self, acc_id: [u8; 32], - tx_hashes: Vec<[u8; 32]>, + tx_hashes: &[[u8; 32]], write_batch: &mut WriteBatch, ) -> DbResult<()> { let acc_num_tx = self.get_acc_meta_num_tx(acc_id)?.unwrap_or(0); let cf_att = self.account_id_to_tx_hash_column(); for (tx_id, tx_hash) in tx_hashes.iter().enumerate() { - let put_id = acc_num_tx + tx_id as u64; + let put_id = acc_num_tx + .checked_add(tx_id.try_into().expect("Must fit into u64")) + .expect("Tx count should be lesser that u64::MAX"); let mut prefix = borsh::to_vec(&acc_id).map_err(|berr| { DbError::borsh_cast_message(berr, Some("Failed to serialize account id".to_owned())) @@ -110,7 +121,13 @@ impl RocksDBIO { ); } - self.update_acc_meta_batch(acc_id, acc_num_tx + (tx_hashes.len() as u64), write_batch)?; + self.update_acc_meta_batch( + acc_id, + acc_num_tx + .checked_add(tx_hashes.len().try_into().expect("Must fit into u64")) + .expect("Tx count should be lesser that u64::MAX"), + write_batch, + )?; Ok(()) } @@ -226,7 +243,7 @@ impl RocksDBIO { Some("Failed to serialize DB_META_FIRST_BLOCK_SET_KEY".to_owned()), ) })?, - [1u8; 1], + [1_u8; 1], ); Ok(()) } @@ -286,14 +303,14 @@ impl RocksDBIO { let acc_ids = tx .affected_public_account_ids() .into_iter() - .map(|account_id| account_id.into_value()) + .map(nssa::AccountId::into_value) .collect::>(); for acc_id in acc_ids { acc_to_tx_map .entry(acc_id) .and_modify(|tx_hashes| tx_hashes.push(tx_hash.into())) - .or_insert(vec![tx_hash.into()]); + .or_insert_with(|| vec![tx_hash.into()]); } } @@ -302,7 +319,7 @@ impl RocksDBIO { reason = "RocksDB will keep ordering persistent" )] for (acc_id, tx_hashes) in acc_to_tx_map { - self.put_account_transactions_dependant(acc_id, tx_hashes, &mut write_batch)?; + self.put_account_transactions_dependant(acc_id, &tx_hashes, &mut write_batch)?; } self.db.write(write_batch).map_err(|rerr| { diff --git a/storage/src/indexer/write_once.rs b/storage/src/indexer/write_non_atomic.rs similarity index 90% rename from storage/src/indexer/write_once.rs rename to storage/src/indexer/write_non_atomic.rs index 0edd7dfa..84fc7de5 100644 --- a/storage/src/indexer/write_once.rs +++ b/storage/src/indexer/write_non_atomic.rs @@ -1,5 +1,10 @@ -use super::*; +use super::{ + BREAKPOINT_INTERVAL, DB_META_FIRST_BLOCK_SET_KEY, DB_META_LAST_BLOCK_IN_DB_KEY, + DB_META_LAST_BREAKPOINT_ID, DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY, DbError, + DbResult, RocksDBIO, V02State, +}; +#[expect(clippy::multiple_inherent_impl, reason = "Readability")] impl RocksDBIO { // Meta @@ -88,7 +93,7 @@ impl RocksDBIO { Some("Failed to serialize DB_META_FIRST_BLOCK_SET_KEY".to_owned()), ) })?, - [1u8; 1], + [1_u8; 1], ) .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; Ok(()) @@ -120,7 +125,10 @@ impl RocksDBIO { pub fn put_next_breakpoint(&self) -> DbResult<()> { let last_block = self.get_meta_last_block_in_db()?; - let next_breakpoint_id = self.get_meta_last_breakpoint_id()? + 1; + let next_breakpoint_id = self + .get_meta_last_breakpoint_id()? + .checked_add(1) + .expect("Breakpoint Id will be lesser than u64::MAX"); let block_to_break_id = next_breakpoint_id .checked_mul(u64::from(BREAKPOINT_INTERVAL)) .expect("Reached maximum breakpoint id");