diff --git a/storage/src/indexer/meta_cells.rs b/storage/src/indexer/meta_cells.rs new file mode 100644 index 00000000..b9c7eb94 --- /dev/null +++ b/storage/src/indexer/meta_cells.rs @@ -0,0 +1,41 @@ +use borsh::{BorshDeserialize, BorshSerialize}; + +use crate::{ + error::DbError, + indexer::{CF_META_NAME, DB_META_LAST_BLOCK_IN_DB_KEY, DbResult, SimpleStorableCell}, +}; + +#[derive(Debug)] +pub struct LastBlockCell(pub u64); + +impl BorshSerialize for LastBlockCell { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + u64::serialize(&self.0, writer) + } +} + +impl BorshDeserialize for LastBlockCell { + fn deserialize_reader(reader: &mut R) -> std::io::Result { + u64::deserialize_reader(reader).map(|val| LastBlockCell(val)) + } +} + +impl SimpleStorableCell for LastBlockCell { + const CELL_NAME: &'static str = DB_META_LAST_BLOCK_IN_DB_KEY; + const CF_NAME: &'static str = CF_META_NAME; + + fn key_constructor() -> DbResult> { + borsh::to_vec(&Self::CELL_NAME).map_err(|err| { + DbError::borsh_cast_message( + err, + Some(format!("Failed to serialize {:?}", Self::CELL_NAME)), + ) + }) + } + + fn value_constructor(&self) -> DbResult> { + borsh::to_vec(&self.0).map_err(|err| { + DbError::borsh_cast_message(err, Some("Failed to serialize last block id".to_owned())) + }) + } +} diff --git a/storage/src/indexer/mod.rs b/storage/src/indexer/mod.rs index c5d47c1f..340fb515 100644 --- a/storage/src/indexer/mod.rs +++ b/storage/src/indexer/mod.rs @@ -1,13 +1,15 @@ use std::{path::Path, sync::Arc}; +use borsh::{BorshDeserialize, BorshSerialize}; use common::block::Block; use nssa::V02State; use rocksdb::{ - BoundColumnFamily, ColumnFamilyDescriptor, DBWithThreadMode, MultiThreaded, Options, + BoundColumnFamily, ColumnFamilyDescriptor, DBWithThreadMode, MultiThreaded, Options, WriteBatch, }; use crate::error::DbError; +pub mod meta_cells; pub mod read_multiple; pub mod read_once; pub mod write_atomic; @@ -61,6 +63,97 @@ pub struct RocksDBIO { pub db: DBWithThreadMode, } +pub trait SimpleStorableCell: BorshSerialize + BorshDeserialize { + const CF_NAME: &'static str; + const CELL_NAME: &'static str; + + fn key_constructor() -> DbResult>; + fn value_constructor(&self) -> DbResult>; + + fn column_ref(db: &RocksDBIO) -> Arc> { + db.db + .cf_handle(Self::CF_NAME) + .expect(format!("Column family {:?} must be present", Self::CF_NAME).as_str()) + } + + fn get(db: &RocksDBIO) -> DbResult { + let cf_ref = Self::column_ref(db); + let res = db + .db + .get_cf(&cf_ref, Self::key_constructor()?) + .map_err(|rerr| { + DbError::rocksdb_cast_message( + rerr, + Some(format!("Failed to read {:?}", Self::CELL_NAME)), + ) + })?; + + if let Some(data) = res { + Ok(borsh::from_slice::(&data).map_err(|err| { + DbError::borsh_cast_message( + err, + Some(format!("Failed to deserialize {:?}", Self::CELL_NAME)), + ) + })?) + } else { + Err(DbError::db_interaction_error(format!( + "{:?} not found", + Self::CELL_NAME + ))) + } + } + + fn get_opt(db: &RocksDBIO) -> DbResult> { + let cf_ref = Self::column_ref(db); + let res = db + .db + .get_cf(&cf_ref, Self::key_constructor()?) + .map_err(|rerr| { + DbError::rocksdb_cast_message( + rerr, + Some(format!("Failed to read {:?}", Self::CELL_NAME)), + ) + })?; + + res.map(|data| { + borsh::from_slice::(&data).map_err(|err| { + DbError::borsh_cast_message( + err, + Some(format!("Failed to deserialize {:?}", Self::CELL_NAME)), + ) + }) + }) + .transpose() + } + + fn put(&self, db: &RocksDBIO) -> DbResult<()> { + let cf_meta = db.meta_column(); + db.db + .put_cf( + &cf_meta, + Self::key_constructor()?, + self.value_constructor()?, + ) + .map_err(|rerr| { + DbError::rocksdb_cast_message( + rerr, + Some(format!("Failed to write {:?}", Self::CELL_NAME)), + ) + })?; + Ok(()) + } + + fn put_batch(&self, db: &RocksDBIO, write_batch: &mut WriteBatch) -> DbResult<()> { + let cf_meta = db.meta_column(); + write_batch.put_cf( + &cf_meta, + Self::key_constructor()?, + self.value_constructor()?, + ); + Ok(()) + } +} + impl RocksDBIO { pub fn open_or_create( path: &Path, @@ -158,6 +251,32 @@ impl RocksDBIO { .expect("Account meta column should exist") } + // Generics + + #[allow(unused)] + fn get(&self) -> DbResult { + T::get(&self) + } + + #[allow(unused)] + fn get_opt(&self) -> DbResult> { + T::get_opt(&self) + } + + #[allow(unused)] + fn put(&self, cell: T) -> DbResult<()> { + cell.put(&self) + } + + #[allow(unused)] + fn put_batch( + &self, + cell: T, + write_batch: &mut WriteBatch, + ) -> DbResult<()> { + cell.put_batch(&self, write_batch) + } + // State pub fn calculate_state_for_id(&self, block_id: u64) -> DbResult { diff --git a/storage/src/indexer/read_once.rs b/storage/src/indexer/read_once.rs index f966f349..eb5fc697 100644 --- a/storage/src/indexer/read_once.rs +++ b/storage/src/indexer/read_once.rs @@ -1,8 +1,8 @@ use super::{ - Block, DB_META_FIRST_BLOCK_IN_DB_KEY, DB_META_FIRST_BLOCK_SET_KEY, - DB_META_LAST_BLOCK_IN_DB_KEY, DB_META_LAST_BREAKPOINT_ID, + Block, DB_META_FIRST_BLOCK_IN_DB_KEY, DB_META_FIRST_BLOCK_SET_KEY, DB_META_LAST_BREAKPOINT_ID, DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY, DbError, DbResult, RocksDBIO, V02State, }; +use crate::indexer::meta_cells::LastBlockCell; #[expect(clippy::multiple_inherent_impl, reason = "Readability")] impl RocksDBIO { @@ -38,32 +38,7 @@ impl RocksDBIO { } pub fn get_meta_last_block_in_db(&self) -> DbResult { - let cf_meta = self.meta_column(); - let res = self - .db - .get_cf( - &cf_meta, - borsh::to_vec(&DB_META_LAST_BLOCK_IN_DB_KEY).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize DB_META_LAST_BLOCK_IN_DB_KEY".to_owned()), - ) - })?, - ) - .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; - - if let Some(data) = res { - Ok(borsh::from_slice::(&data).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to deserialize last block".to_owned()), - ) - })?) - } else { - Err(DbError::db_interaction_error( - "Last block not found".to_owned(), - )) - } + self.get::().map(|cell| cell.0) } pub fn get_meta_last_observed_l1_lib_header_in_db(&self) -> DbResult> { diff --git a/storage/src/indexer/write_atomic.rs b/storage/src/indexer/write_atomic.rs index 161d763a..82dfba7c 100644 --- a/storage/src/indexer/write_atomic.rs +++ b/storage/src/indexer/write_atomic.rs @@ -4,9 +4,10 @@ use rocksdb::WriteBatch; use super::{ Arc, BREAKPOINT_INTERVAL, Block, BoundColumnFamily, DB_META_FIRST_BLOCK_IN_DB_KEY, - DB_META_FIRST_BLOCK_SET_KEY, DB_META_LAST_BLOCK_IN_DB_KEY, DB_META_LAST_BREAKPOINT_ID, + DB_META_FIRST_BLOCK_SET_KEY, DB_META_LAST_BREAKPOINT_ID, DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY, DbError, DbResult, RocksDBIO, }; +use crate::indexer::meta_cells::LastBlockCell; #[expect(clippy::multiple_inherent_impl, reason = "Readability")] impl RocksDBIO { @@ -163,23 +164,7 @@ impl RocksDBIO { block_id: u64, write_batch: &mut WriteBatch, ) -> DbResult<()> { - let cf_meta = self.meta_column(); - write_batch.put_cf( - &cf_meta, - borsh::to_vec(&DB_META_LAST_BLOCK_IN_DB_KEY).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize DB_META_LAST_BLOCK_IN_DB_KEY".to_owned()), - ) - })?, - borsh::to_vec(&block_id).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize last block id".to_owned()), - ) - })?, - ); - Ok(()) + self.put_batch(LastBlockCell(block_id), write_batch) } pub fn put_meta_last_observed_l1_lib_header_in_db_batch( diff --git a/storage/src/indexer/write_non_atomic.rs b/storage/src/indexer/write_non_atomic.rs index 84fc7de5..c674095a 100644 --- a/storage/src/indexer/write_non_atomic.rs +++ b/storage/src/indexer/write_non_atomic.rs @@ -1,33 +1,15 @@ use super::{ - BREAKPOINT_INTERVAL, DB_META_FIRST_BLOCK_SET_KEY, DB_META_LAST_BLOCK_IN_DB_KEY, - DB_META_LAST_BREAKPOINT_ID, DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY, DbError, - DbResult, RocksDBIO, V02State, + BREAKPOINT_INTERVAL, DB_META_FIRST_BLOCK_SET_KEY, DB_META_LAST_BREAKPOINT_ID, + DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY, DbError, DbResult, RocksDBIO, V02State, }; +use crate::indexer::meta_cells::LastBlockCell; #[expect(clippy::multiple_inherent_impl, reason = "Readability")] impl RocksDBIO { // Meta pub fn put_meta_last_block_in_db(&self, block_id: u64) -> DbResult<()> { - let cf_meta = self.meta_column(); - self.db - .put_cf( - &cf_meta, - borsh::to_vec(&DB_META_LAST_BLOCK_IN_DB_KEY).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize DB_META_LAST_BLOCK_IN_DB_KEY".to_owned()), - ) - })?, - borsh::to_vec(&block_id).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize last block id".to_owned()), - ) - })?, - ) - .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; - Ok(()) + self.put(LastBlockCell(block_id)) } pub fn put_meta_last_observed_l1_lib_header_in_db(