diff --git a/storage/src/indexer/mod.rs b/storage/src/indexer/mod.rs index 7bc7c85c..9fa5ebde 100644 --- a/storage/src/indexer/mod.rs +++ b/storage/src/indexer/mod.rs @@ -1,159 +1,26 @@ use std::{path::Path, sync::Arc}; -use borsh::{BorshDeserialize, BorshSerialize}; use common::block::Block; use nssa::V03State; use rocksdb::{ BoundColumnFamily, ColumnFamilyDescriptor, DBWithThreadMode, MultiThreaded, Options, WriteBatch, }; -use crate::error::DbError; +use crate::{ + BREAKPOINT_INTERVAL, CF_ACC_META, CF_ACC_TO_TX, CF_BLOCK_NAME, CF_BREAKPOINT_NAME, + CF_HASH_TO_ID, CF_META_NAME, CF_TX_TO_ID, DbResult, error::DbError, + storable_cell::SimpleStorableCell, +}; -pub mod meta_cells; pub mod read_multiple; pub mod read_once; pub mod write_atomic; pub mod write_non_atomic; -/// Maximal size of stored blocks in base. -/// -/// Used to control db size. -/// -/// Currently effectively unbounded. -pub const BUFF_SIZE_ROCKSDB: usize = usize::MAX; - -/// Size of stored blocks cache in memory. -/// -/// Keeping small to not run out of memory. -pub const CACHE_SIZE: usize = 1000; - -/// Key base for storing metainformation about id of first block in db. -pub const DB_META_FIRST_BLOCK_IN_DB_KEY: &str = "first_block_in_db"; -/// Key base for storing metainformation about id of last current block in db. -pub const DB_META_LAST_BLOCK_IN_DB_KEY: &str = "last_block_in_db"; -/// Key base for storing metainformation about id of last observed L1 lib header in db. -pub const DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY: &str = - "last_observed_l1_lib_header_in_db"; -/// Key base for storing metainformation which describe if first block has been set. -pub const DB_META_FIRST_BLOCK_SET_KEY: &str = "first_block_set"; -/// Key base for storing metainformation about the last breakpoint. -pub const DB_META_LAST_BREAKPOINT_ID: &str = "last_breakpoint_id"; - -/// Interval between state breakpoints. -pub const BREAKPOINT_INTERVAL: u8 = 100; - -/// Name of block column family. -pub const CF_BLOCK_NAME: &str = "cf_block"; -/// Name of meta column family. -pub const CF_META_NAME: &str = "cf_meta"; -/// Name of breakpoint column family. -pub const CF_BREAKPOINT_NAME: &str = "cf_breakpoint"; -/// Name of hash to id map column family. -pub const CF_HASH_TO_ID: &str = "cf_hash_to_id"; -/// Name of tx hash to id map column family. -pub const CF_TX_TO_ID: &str = "cf_tx_to_id"; -/// Name of account meta column family. -pub const CF_ACC_META: &str = "cf_acc_meta"; -/// Name of account id to tx hash map column family. -pub const CF_ACC_TO_TX: &str = "cf_acc_to_tx"; - -pub type DbResult = Result; - pub struct RocksDBIO { pub db: DBWithThreadMode, } -pub trait SimpleStorableCell: BorshSerialize + BorshDeserialize { - const CF_NAME: &'static str; - const CELL_NAME: &'static str; - - fn key_constructor() -> DbResult>; - fn value_constructor(&self) -> DbResult>; - - fn column_ref(db: &RocksDBIO) -> Arc> { - db.db - .cf_handle(Self::CF_NAME) - .unwrap_or_else(|| panic!("Column family {:?} must be present", Self::CF_NAME)) - } - - fn get(db: &RocksDBIO) -> DbResult { - let cf_ref = Self::column_ref(db); - let res = db - .db - .get_cf(&cf_ref, Self::key_constructor()?) - .map_err(|rerr| { - DbError::rocksdb_cast_message( - rerr, - Some(format!("Failed to read {:?}", Self::CELL_NAME)), - ) - })?; - - if let Some(data) = res { - Ok(borsh::from_slice::(&data).map_err(|err| { - DbError::borsh_cast_message( - err, - Some(format!("Failed to deserialize {:?}", Self::CELL_NAME)), - ) - })?) - } else { - Err(DbError::db_interaction_error(format!( - "{:?} not found", - Self::CELL_NAME - ))) - } - } - - fn get_opt(db: &RocksDBIO) -> DbResult> { - let cf_ref = Self::column_ref(db); - let res = db - .db - .get_cf(&cf_ref, Self::key_constructor()?) - .map_err(|rerr| { - DbError::rocksdb_cast_message( - rerr, - Some(format!("Failed to read {:?}", Self::CELL_NAME)), - ) - })?; - - res.map(|data| { - borsh::from_slice::(&data).map_err(|err| { - DbError::borsh_cast_message( - err, - Some(format!("Failed to deserialize {:?}", Self::CELL_NAME)), - ) - }) - }) - .transpose() - } - - fn put(&self, db: &RocksDBIO) -> DbResult<()> { - let cf_meta = db.meta_column(); - db.db - .put_cf( - &cf_meta, - Self::key_constructor()?, - self.value_constructor()?, - ) - .map_err(|rerr| { - DbError::rocksdb_cast_message( - rerr, - Some(format!("Failed to write {:?}", Self::CELL_NAME)), - ) - })?; - Ok(()) - } - - fn put_batch(&self, db: &RocksDBIO, write_batch: &mut WriteBatch) -> DbResult<()> { - let cf_meta = db.meta_column(); - write_batch.put_cf( - &cf_meta, - Self::key_constructor()?, - self.value_constructor()?, - ); - Ok(()) - } -} - impl RocksDBIO { pub fn open_or_create( path: &Path, @@ -254,16 +121,16 @@ impl RocksDBIO { // Generics fn get(&self) -> DbResult { - T::get(self) + T::get(&self.db) } #[expect(unused, reason = "Unused")] fn get_opt(&self) -> DbResult> { - T::get_opt(self) + T::get_opt(&self.db) } fn put(&self, cell: &T) -> DbResult<()> { - cell.put(self) + cell.put(&self.db) } fn put_batch( @@ -271,7 +138,7 @@ impl RocksDBIO { cell: &T, write_batch: &mut WriteBatch, ) -> DbResult<()> { - cell.put_batch(self, write_batch) + cell.put_batch(&self.db, write_batch) } // State diff --git a/storage/src/indexer/read_once.rs b/storage/src/indexer/read_once.rs index eb1499ba..a4ff5877 100644 --- a/storage/src/indexer/read_once.rs +++ b/storage/src/indexer/read_once.rs @@ -1,8 +1,9 @@ -use super::{ - Block, DB_META_FIRST_BLOCK_IN_DB_KEY, DB_META_FIRST_BLOCK_SET_KEY, DB_META_LAST_BREAKPOINT_ID, - DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY, DbError, DbResult, RocksDBIO, V03State, +use super::{Block, DbError, DbResult, RocksDBIO, V03State}; +use crate::{ + DB_META_FIRST_BLOCK_IN_DB_KEY, DB_META_FIRST_BLOCK_SET_KEY, DB_META_LAST_BREAKPOINT_ID, + DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY, + storable_cell::cells::meta_shared::LastBlockCell, }; -use crate::indexer::meta_cells::LastBlockCell; #[expect(clippy::multiple_inherent_impl, reason = "Readability")] impl RocksDBIO { diff --git a/storage/src/indexer/write_atomic.rs b/storage/src/indexer/write_atomic.rs index 7e81d4b6..3274eb8e 100644 --- a/storage/src/indexer/write_atomic.rs +++ b/storage/src/indexer/write_atomic.rs @@ -2,12 +2,12 @@ use std::collections::HashMap; use rocksdb::WriteBatch; -use super::{ - Arc, BREAKPOINT_INTERVAL, Block, BoundColumnFamily, DB_META_FIRST_BLOCK_IN_DB_KEY, - DB_META_FIRST_BLOCK_SET_KEY, DB_META_LAST_BREAKPOINT_ID, - DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY, DbError, DbResult, RocksDBIO, +use super::{Arc, BREAKPOINT_INTERVAL, Block, BoundColumnFamily, DbError, DbResult, RocksDBIO}; +use crate::{ + DB_META_FIRST_BLOCK_IN_DB_KEY, DB_META_FIRST_BLOCK_SET_KEY, DB_META_LAST_BREAKPOINT_ID, + DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY, + storable_cell::cells::meta_shared::LastBlockCell, }; -use crate::indexer::meta_cells::LastBlockCell; #[expect(clippy::multiple_inherent_impl, reason = "Readability")] impl RocksDBIO { diff --git a/storage/src/indexer/write_non_atomic.rs b/storage/src/indexer/write_non_atomic.rs index fc93b68c..f44cda7d 100644 --- a/storage/src/indexer/write_non_atomic.rs +++ b/storage/src/indexer/write_non_atomic.rs @@ -1,8 +1,9 @@ -use super::{ - BREAKPOINT_INTERVAL, DB_META_FIRST_BLOCK_SET_KEY, DB_META_LAST_BREAKPOINT_ID, - DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY, DbError, DbResult, RocksDBIO, V03State, +use super::{BREAKPOINT_INTERVAL, DbError, DbResult, RocksDBIO, V03State}; +use crate::{ + DB_META_FIRST_BLOCK_SET_KEY, DB_META_LAST_BREAKPOINT_ID, + DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY, + storable_cell::cells::meta_shared::LastBlockCell, }; -use crate::indexer::meta_cells::LastBlockCell; #[expect(clippy::multiple_inherent_impl, reason = "Readability")] impl RocksDBIO { diff --git a/storage/src/lib.rs b/storage/src/lib.rs index 05c4a374..b1126626 100644 --- a/storage/src/lib.rs +++ b/storage/src/lib.rs @@ -1,3 +1,69 @@ +use crate::error::DbError; + pub mod error; pub mod indexer; pub mod sequencer; +pub mod storable_cell; + +pub type DbResult = Result; + +// General + +/// Maximal size of stored blocks in base. +/// +/// Used to control db size. +/// +/// Currently effectively unbounded. +pub const BUFF_SIZE_ROCKSDB: usize = usize::MAX; + +/// Size of stored blocks cache in memory. +/// +/// Keeping small to not run out of memory. +pub const CACHE_SIZE: usize = 1000; + +/// Key base for storing metainformation which describe if first block has been set. +pub const DB_META_FIRST_BLOCK_SET_KEY: &str = "first_block_set"; +/// Key base for storing metainformation about id of first block in db. +pub const DB_META_FIRST_BLOCK_IN_DB_KEY: &str = "first_block_in_db"; +/// Key base for storing metainformation about id of last current block in db. +pub const DB_META_LAST_BLOCK_IN_DB_KEY: &str = "last_block_in_db"; + +/// Interval between state breakpoints. +pub const BREAKPOINT_INTERVAL: u8 = 100; + +/// Name of block column family. +pub const CF_BLOCK_NAME: &str = "cf_block"; +/// Name of meta column family. +pub const CF_META_NAME: &str = "cf_meta"; + +// Indexer-specific + +/// Key base for storing metainformation about id of last observed L1 lib header in db. +pub const DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY: &str = + "last_observed_l1_lib_header_in_db"; +/// Key base for storing metainformation about the last breakpoint. +pub const DB_META_LAST_BREAKPOINT_ID: &str = "last_breakpoint_id"; + +/// Name of breakpoint column family. +pub const CF_BREAKPOINT_NAME: &str = "cf_breakpoint"; +/// Name of hash to id map column family. +pub const CF_HASH_TO_ID: &str = "cf_hash_to_id"; +/// Name of tx hash to id map column family. +pub const CF_TX_TO_ID: &str = "cf_tx_to_id"; +/// Name of account meta column family. +pub const CF_ACC_META: &str = "cf_acc_meta"; +/// Name of account id to tx hash map column family. +pub const CF_ACC_TO_TX: &str = "cf_acc_to_tx"; + +// Sequencer-specific + +/// Key base for storing metainformation about the last finalized block on Bedrock. +pub const DB_META_LAST_FINALIZED_BLOCK_ID: &str = "last_finalized_block_id"; +/// Key base for storing metainformation about the latest block meta. +pub const DB_META_LATEST_BLOCK_META_KEY: &str = "latest_block_meta"; + +/// Key base for storing the NSSA state. +pub const DB_NSSA_STATE_KEY: &str = "nssa_state"; + +/// Name of state column family. +pub const CF_NSSA_STATE_NAME: &str = "cf_nssa_state"; diff --git a/storage/src/sequencer.rs b/storage/src/sequencer.rs index 17d0e73e..f04785e5 100644 --- a/storage/src/sequencer.rs +++ b/storage/src/sequencer.rs @@ -6,40 +6,13 @@ use rocksdb::{ BoundColumnFamily, ColumnFamilyDescriptor, DBWithThreadMode, MultiThreaded, Options, WriteBatch, }; -use crate::error::DbError; - -/// Maximal size of stored blocks in base. -/// -/// Used to control db size. -/// -/// Currently effectively unbounded. -pub const BUFF_SIZE_ROCKSDB: usize = usize::MAX; - -/// Size of stored blocks cache in memory. -/// -/// Keeping small to not run out of memory. -pub const CACHE_SIZE: usize = 1000; - -/// Key base for storing metainformation about id of first block in db. -pub const DB_META_FIRST_BLOCK_IN_DB_KEY: &str = "first_block_in_db"; -/// Key base for storing metainformation about id of last current block in db. -pub const DB_META_LAST_BLOCK_IN_DB_KEY: &str = "last_block_in_db"; -/// Key base for storing metainformation which describe if first block has been set. -pub const DB_META_FIRST_BLOCK_SET_KEY: &str = "first_block_set"; -/// Key base for storing metainformation about the last finalized block on Bedrock. -pub const DB_META_LAST_FINALIZED_BLOCK_ID: &str = "last_finalized_block_id"; -/// Key base for storing metainformation about the latest block meta. -pub const DB_META_LATEST_BLOCK_META_KEY: &str = "latest_block_meta"; - -/// Key base for storing the NSSA state. -pub const DB_NSSA_STATE_KEY: &str = "nssa_state"; - -/// Name of block column family. -pub const CF_BLOCK_NAME: &str = "cf_block"; -/// Name of meta column family. -pub const CF_META_NAME: &str = "cf_meta"; -/// Name of state column family. -pub const CF_NSSA_STATE_NAME: &str = "cf_nssa_state"; +use crate::{ + CF_BLOCK_NAME, CF_META_NAME, CF_NSSA_STATE_NAME, DB_META_FIRST_BLOCK_IN_DB_KEY, + DB_META_FIRST_BLOCK_SET_KEY, DB_META_LAST_FINALIZED_BLOCK_ID, DB_META_LATEST_BLOCK_META_KEY, + DB_NSSA_STATE_KEY, + error::DbError, + storable_cell::{SimpleStorableCell, cells::meta_shared::LastBlockCell}, +}; pub type DbResult = Result; @@ -119,6 +92,29 @@ impl RocksDBIO { self.db.cf_handle(CF_NSSA_STATE_NAME).unwrap() } + // Generics + + fn get(&self) -> DbResult { + T::get(&self.db) + } + + #[expect(unused, reason = "Unused")] + fn get_opt(&self) -> DbResult> { + T::get_opt(&self.db) + } + + fn put(&self, cell: &T) -> DbResult<()> { + cell.put(&self.db) + } + + fn put_batch( + &self, + cell: &T, + write_batch: &mut WriteBatch, + ) -> DbResult<()> { + cell.put_batch(&self.db, write_batch) + } + pub fn get_meta_first_block_in_db(&self) -> DbResult { let cf_meta = self.meta_column(); let res = self @@ -149,32 +145,7 @@ impl RocksDBIO { } pub fn get_meta_last_block_in_db(&self) -> DbResult { - let cf_meta = self.meta_column(); - let res = self - .db - .get_cf( - &cf_meta, - borsh::to_vec(&DB_META_LAST_BLOCK_IN_DB_KEY).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize DB_META_LAST_BLOCK_IN_DB_KEY".to_owned()), - ) - })?, - ) - .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; - - if let Some(data) = res { - Ok(borsh::from_slice::(&data).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to deserialize last block".to_owned()), - ) - })?) - } else { - Err(DbError::db_interaction_error( - "Last block not found".to_owned(), - )) - } + self.get::().map(|cell| cell.0) } pub fn get_meta_is_first_block_set(&self) -> DbResult { @@ -246,25 +217,7 @@ impl RocksDBIO { } pub fn put_meta_last_block_in_db(&self, block_id: u64) -> DbResult<()> { - let cf_meta = self.meta_column(); - self.db - .put_cf( - &cf_meta, - borsh::to_vec(&DB_META_LAST_BLOCK_IN_DB_KEY).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize DB_META_LAST_BLOCK_IN_DB_KEY".to_owned()), - ) - })?, - borsh::to_vec(&block_id).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize last block id".to_owned()), - ) - })?, - ) - .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; - Ok(()) + self.put(&LastBlockCell(block_id)) } fn put_meta_last_block_in_db_batch( @@ -272,23 +225,7 @@ impl RocksDBIO { block_id: u64, batch: &mut WriteBatch, ) -> DbResult<()> { - let cf_meta = self.meta_column(); - batch.put_cf( - &cf_meta, - borsh::to_vec(&DB_META_LAST_BLOCK_IN_DB_KEY).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize DB_META_LAST_BLOCK_IN_DB_KEY".to_owned()), - ) - })?, - borsh::to_vec(&block_id).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize last block id".to_owned()), - ) - })?, - ); - Ok(()) + self.put_batch(&LastBlockCell(block_id), batch) } pub fn put_meta_last_finalized_block_id(&self, block_id: Option) -> DbResult<()> { diff --git a/storage/src/storable_cell/cells/meta_indexer.rs b/storage/src/storable_cell/cells/meta_indexer.rs new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/storage/src/storable_cell/cells/meta_indexer.rs @@ -0,0 +1 @@ + diff --git a/storage/src/storable_cell/cells/meta_sequencer.rs b/storage/src/storable_cell/cells/meta_sequencer.rs new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/storage/src/storable_cell/cells/meta_sequencer.rs @@ -0,0 +1 @@ + diff --git a/storage/src/indexer/meta_cells.rs b/storage/src/storable_cell/cells/meta_shared.rs similarity index 90% rename from storage/src/indexer/meta_cells.rs rename to storage/src/storable_cell/cells/meta_shared.rs index 216866a3..4b88a385 100644 --- a/storage/src/indexer/meta_cells.rs +++ b/storage/src/storable_cell/cells/meta_shared.rs @@ -1,8 +1,8 @@ use borsh::{BorshDeserialize, BorshSerialize}; use crate::{ - error::DbError, - indexer::{CF_META_NAME, DB_META_LAST_BLOCK_IN_DB_KEY, DbResult, SimpleStorableCell}, + CF_META_NAME, DB_META_LAST_BLOCK_IN_DB_KEY, DbResult, error::DbError, + storable_cell::SimpleStorableCell, }; #[derive(Debug)] diff --git a/storage/src/storable_cell/cells/mod.rs b/storage/src/storable_cell/cells/mod.rs new file mode 100644 index 00000000..914befa1 --- /dev/null +++ b/storage/src/storable_cell/cells/mod.rs @@ -0,0 +1,3 @@ +pub mod meta_indexer; +pub mod meta_sequencer; +pub mod meta_shared; diff --git a/storage/src/storable_cell/mod.rs b/storage/src/storable_cell/mod.rs new file mode 100644 index 00000000..06912829 --- /dev/null +++ b/storage/src/storable_cell/mod.rs @@ -0,0 +1,91 @@ +use std::sync::Arc; + +use borsh::{BorshDeserialize, BorshSerialize}; +use rocksdb::{BoundColumnFamily, DBWithThreadMode, MultiThreaded, WriteBatch}; + +use crate::{DbResult, error::DbError}; + +pub mod cells; + +pub trait SimpleStorableCell: BorshSerialize + BorshDeserialize { + const CF_NAME: &'static str; + const CELL_NAME: &'static str; + + fn key_constructor() -> DbResult>; + fn value_constructor(&self) -> DbResult>; + + fn column_ref(db: &DBWithThreadMode) -> Arc> { + db.cf_handle(Self::CF_NAME) + .unwrap_or_else(|| panic!("Column family {:?} must be present", Self::CF_NAME)) + } + + fn get(db: &DBWithThreadMode) -> DbResult { + let cf_ref = Self::column_ref(db); + let res = db + .get_cf(&cf_ref, Self::key_constructor()?) + .map_err(|rerr| { + DbError::rocksdb_cast_message( + rerr, + Some(format!("Failed to read {:?}", Self::CELL_NAME)), + ) + })?; + + if let Some(data) = res { + Ok(borsh::from_slice::(&data).map_err(|err| { + DbError::borsh_cast_message( + err, + Some(format!("Failed to deserialize {:?}", Self::CELL_NAME)), + ) + })?) + } else { + Err(DbError::db_interaction_error(format!( + "{:?} not found", + Self::CELL_NAME + ))) + } + } + + fn get_opt(db: &DBWithThreadMode) -> DbResult> { + let cf_ref = Self::column_ref(db); + let res = db + .get_cf(&cf_ref, Self::key_constructor()?) + .map_err(|rerr| { + DbError::rocksdb_cast_message( + rerr, + Some(format!("Failed to read {:?}", Self::CELL_NAME)), + ) + })?; + + res.map(|data| { + borsh::from_slice::(&data).map_err(|err| { + DbError::borsh_cast_message( + err, + Some(format!("Failed to deserialize {:?}", Self::CELL_NAME)), + ) + }) + }) + .transpose() + } + + fn put(&self, db: &DBWithThreadMode) -> DbResult<()> { + let cf_ref = Self::column_ref(db); + db.put_cf(&cf_ref, Self::key_constructor()?, self.value_constructor()?) + .map_err(|rerr| { + DbError::rocksdb_cast_message( + rerr, + Some(format!("Failed to write {:?}", Self::CELL_NAME)), + ) + })?; + Ok(()) + } + + fn put_batch( + &self, + db: &DBWithThreadMode, + write_batch: &mut WriteBatch, + ) -> DbResult<()> { + let cf_ref = Self::column_ref(db); + write_batch.put_cf(&cf_ref, Self::key_constructor()?, self.value_constructor()?); + Ok(()) + } +}