lssa/storage/src/lib.rs

546 lines
19 KiB
Rust
Raw Normal View History

2024-10-10 14:09:31 +03:00
use std::{path::Path, sync::Arc};
2026-01-27 01:20:17 -03:00
use common::block::Block;
2024-10-10 14:09:31 +03:00
use error::DbError;
2026-01-27 01:20:17 -03:00
use nssa::V02State;
2024-10-10 14:09:31 +03:00
use rocksdb::{
2026-01-27 01:20:17 -03:00
BoundColumnFamily, ColumnFamilyDescriptor, DBWithThreadMode, MultiThreaded, Options, WriteBatch,
2024-10-10 14:09:31 +03:00
};
pub mod error;
2024-10-22 12:46:44 +03:00
2025-11-26 00:27:20 +03:00
/// Maximal size of stored blocks in base
2024-10-10 14:09:31 +03:00
///
2025-11-26 00:27:20 +03:00
/// Used to control db size
2024-10-10 14:09:31 +03:00
///
2025-11-26 00:27:20 +03:00
/// Currently effectively unbounded.
2024-10-10 14:09:31 +03:00
pub const BUFF_SIZE_ROCKSDB: usize = usize::MAX;
2025-11-26 00:27:20 +03:00
/// Size of stored blocks cache in memory
2024-10-10 14:09:31 +03:00
///
2025-11-26 00:27:20 +03:00
/// Keeping small to not run out of memory
2024-10-10 14:09:31 +03:00
pub const CACHE_SIZE: usize = 1000;
2025-11-26 00:27:20 +03:00
/// Key base for storing metainformation about id of first block in db
2024-10-10 14:09:31 +03:00
pub const DB_META_FIRST_BLOCK_IN_DB_KEY: &str = "first_block_in_db";
2025-11-26 00:27:20 +03:00
/// Key base for storing metainformation about id of last current block in db
2024-10-10 14:09:31 +03:00
pub const DB_META_LAST_BLOCK_IN_DB_KEY: &str = "last_block_in_db";
2025-11-26 00:27:20 +03:00
/// Key base for storing metainformation which describe if first block has been set
2024-10-10 14:09:31 +03:00
pub const DB_META_FIRST_BLOCK_SET_KEY: &str = "first_block_set";
/// Key base for storing metainformation about the last finalized block on Bedrock
pub const DB_META_LAST_FINALIZED_BLOCK_ID: &str = "last_finalized_block_id";
/// Key base for storing metainformation about the latest block hash
pub const DB_META_LATEST_BLOCK_HASH_KEY: &str = "latest_block_hash";
2024-10-10 14:09:31 +03:00
2026-01-27 01:20:17 -03:00
/// Key base for storing the NSSA state
pub const DB_NSSA_STATE_KEY: &str = "nssa_state";
2025-05-23 15:47:20 -04:00
2025-11-26 00:27:20 +03:00
/// Name of block column family
2024-10-10 14:09:31 +03:00
pub const CF_BLOCK_NAME: &str = "cf_block";
2025-11-26 00:27:20 +03:00
/// Name of meta column family
2024-10-10 14:09:31 +03:00
pub const CF_META_NAME: &str = "cf_meta";
2026-01-27 01:20:17 -03:00
/// Name of state column family
2026-01-27 16:03:21 -03:00
pub const CF_NSSA_STATE_NAME: &str = "cf_nssa_state";
2025-04-02 12:16:02 +03:00
2024-10-10 14:09:31 +03:00
pub type DbResult<T> = Result<T, DbError>;
pub struct RocksDBIO {
pub db: DBWithThreadMode<MultiThreaded>,
}
impl RocksDBIO {
pub fn open_or_create(path: &Path, start_block: Option<&Block>) -> DbResult<Self> {
2024-10-10 14:09:31 +03:00
let mut cf_opts = Options::default();
cf_opts.set_max_write_buffer_number(16);
2025-11-26 00:27:20 +03:00
// ToDo: Add more column families for different data
2024-10-10 14:09:31 +03:00
let cfb = ColumnFamilyDescriptor::new(CF_BLOCK_NAME, cf_opts.clone());
let cfmeta = ColumnFamilyDescriptor::new(CF_META_NAME, cf_opts.clone());
2026-01-27 01:20:17 -03:00
let cfstate = ColumnFamilyDescriptor::new(CF_NSSA_STATE_NAME, cf_opts.clone());
2024-10-10 14:09:31 +03:00
let mut db_opts = Options::default();
db_opts.create_missing_column_families(true);
db_opts.create_if_missing(true);
let db = DBWithThreadMode::<MultiThreaded>::open_cf_descriptors(
&db_opts,
path,
2026-01-27 01:20:17 -03:00
vec![cfb, cfmeta, cfstate],
2024-10-10 14:09:31 +03:00
);
let dbio = Self {
2025-11-26 00:27:20 +03:00
// There is no point in handling this from runner code
2024-10-10 14:09:31 +03:00
db: db.unwrap(),
};
let is_start_set = dbio.get_meta_is_first_block_set()?;
if is_start_set {
Ok(dbio)
} else if let Some(block) = start_block {
2025-09-02 11:06:41 +03:00
let block_id = block.header.block_id;
2024-10-10 14:09:31 +03:00
dbio.put_meta_first_block_in_db(block)?;
dbio.put_meta_is_first_block_set()?;
2024-12-09 03:59:53 +01:00
dbio.put_meta_last_block_in_db(block_id)?;
dbio.put_meta_last_finalized_block_id(None)?;
dbio.put_meta_latest_block_hash(block.header.hash)?;
2024-12-09 03:59:53 +01:00
2024-10-10 14:09:31 +03:00
Ok(dbio)
} else {
// Here we are trying to start a DB without a block, one should not do it.
unreachable!()
2024-10-10 14:09:31 +03:00
}
}
2024-12-09 04:00:08 +01:00
pub fn destroy(path: &Path) -> DbResult<()> {
let mut cf_opts = Options::default();
cf_opts.set_max_write_buffer_number(16);
2025-11-26 00:27:20 +03:00
// ToDo: Add more column families for different data
2025-01-31 17:01:39 -05:00
let _cfb = ColumnFamilyDescriptor::new(CF_BLOCK_NAME, cf_opts.clone());
let _cfmeta = ColumnFamilyDescriptor::new(CF_META_NAME, cf_opts.clone());
2026-01-27 01:20:17 -03:00
let _cfstate = ColumnFamilyDescriptor::new(CF_NSSA_STATE_NAME, cf_opts.clone());
2024-12-09 04:00:08 +01:00
let mut db_opts = Options::default();
db_opts.create_missing_column_families(true);
db_opts.create_if_missing(true);
2024-12-09 04:18:27 +01:00
DBWithThreadMode::<MultiThreaded>::destroy(&db_opts, path)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))
2024-12-09 04:00:08 +01:00
}
2025-06-10 01:39:11 -04:00
pub fn meta_column(&self) -> Arc<BoundColumnFamily<'_>> {
2024-10-10 14:09:31 +03:00
self.db.cf_handle(CF_META_NAME).unwrap()
}
2025-06-10 01:39:11 -04:00
pub fn block_column(&self) -> Arc<BoundColumnFamily<'_>> {
2024-10-10 14:09:31 +03:00
self.db.cf_handle(CF_BLOCK_NAME).unwrap()
}
2026-01-27 01:20:17 -03:00
pub fn nssa_state_column(&self) -> Arc<BoundColumnFamily<'_>> {
self.db.cf_handle(CF_NSSA_STATE_NAME).unwrap()
2025-05-23 15:48:09 -04:00
}
2024-10-10 14:09:31 +03:00
pub fn get_meta_first_block_in_db(&self) -> DbResult<u64> {
let cf_meta = self.meta_column();
let res = self
.db
2025-09-25 11:53:42 +03:00
.get_cf(
&cf_meta,
borsh::to_vec(&DB_META_FIRST_BLOCK_IN_DB_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_FIRST_BLOCK_IN_DB_KEY".to_string()),
)
})?,
)
2024-10-10 14:09:31 +03:00
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
2025-09-25 11:53:42 +03:00
Ok(borsh::from_slice::<u64>(&data).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to deserialize first block".to_string()),
)
})?)
2024-10-10 14:09:31 +03:00
} else {
Err(DbError::db_interaction_error(
"First block not found".to_string(),
))
}
}
pub fn get_meta_last_block_in_db(&self) -> DbResult<u64> {
let cf_meta = self.meta_column();
let res = self
.db
2025-09-25 11:53:42 +03:00
.get_cf(
&cf_meta,
borsh::to_vec(&DB_META_LAST_BLOCK_IN_DB_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_LAST_BLOCK_IN_DB_KEY".to_string()),
)
})?,
)
2024-10-10 14:09:31 +03:00
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
2025-09-25 11:53:42 +03:00
Ok(borsh::from_slice::<u64>(&data).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to deserialize last block".to_string()),
)
})?)
2024-10-10 14:09:31 +03:00
} else {
Err(DbError::db_interaction_error(
"Last block not found".to_string(),
))
}
}
pub fn get_meta_is_first_block_set(&self) -> DbResult<bool> {
let cf_meta = self.meta_column();
let res = self
.db
2025-09-25 11:53:42 +03:00
.get_cf(
&cf_meta,
borsh::to_vec(&DB_META_FIRST_BLOCK_SET_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_FIRST_BLOCK_SET_KEY".to_string()),
)
})?,
)
2024-10-10 14:09:31 +03:00
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
Ok(res.is_some())
}
2026-01-27 01:20:17 -03:00
pub fn put_nssa_state_in_db(&self, state: &V02State, batch: &mut WriteBatch) -> DbResult<()> {
2026-01-27 16:03:21 -03:00
let cf_nssa_state = self.nssa_state_column();
2026-01-27 01:20:17 -03:00
batch.put_cf(
2026-01-27 16:03:21 -03:00
&cf_nssa_state,
2026-01-27 01:20:17 -03:00
borsh::to_vec(&DB_NSSA_STATE_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_NSSA_STATE_KEY".to_string()),
)
})?,
borsh::to_vec(state).map_err(|err| {
2026-01-29 12:25:48 -03:00
DbError::borsh_cast_message(err, Some("Failed to serialize NSSA state".to_string()))
2026-01-27 01:20:17 -03:00
})?,
);
Ok(())
}
pub fn put_meta_first_block_in_db(&self, block: &Block) -> DbResult<()> {
2024-10-10 14:09:31 +03:00
let cf_meta = self.meta_column();
self.db
.put_cf(
&cf_meta,
2025-09-25 11:53:42 +03:00
borsh::to_vec(&DB_META_FIRST_BLOCK_IN_DB_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_FIRST_BLOCK_IN_DB_KEY".to_string()),
)
})?,
borsh::to_vec(&block.header.block_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize first block id".to_string()),
)
})?,
2024-10-10 14:09:31 +03:00
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
2026-01-27 01:20:17 -03:00
let mut batch = WriteBatch::default();
self.put_block(block, true, &mut batch)?;
self.db.write(batch).map_err(|rerr| {
DbError::rocksdb_cast_message(
rerr,
Some("Failed to write first block in db".to_string()),
)
2026-01-27 10:09:34 -03:00
})?;
2026-01-27 01:20:17 -03:00
2024-10-10 14:09:31 +03:00
Ok(())
}
pub fn put_meta_last_block_in_db(&self, block_id: u64) -> DbResult<()> {
let cf_meta = self.meta_column();
self.db
.put_cf(
&cf_meta,
2025-09-25 11:53:42 +03:00
borsh::to_vec(&DB_META_LAST_BLOCK_IN_DB_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_LAST_BLOCK_IN_DB_KEY".to_string()),
)
})?,
borsh::to_vec(&block_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize last block id".to_string()),
)
})?,
2024-10-10 14:09:31 +03:00
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
Ok(())
}
fn put_meta_last_block_in_db_batch(
&self,
block_id: u64,
batch: &mut WriteBatch,
) -> DbResult<()> {
let cf_meta = self.meta_column();
batch.put_cf(
&cf_meta,
borsh::to_vec(&DB_META_LAST_BLOCK_IN_DB_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_LAST_BLOCK_IN_DB_KEY".to_string()),
)
})?,
borsh::to_vec(&block_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize last block id".to_string()),
)
})?,
);
Ok(())
}
pub fn put_meta_last_finalized_block_id(&self, block_id: Option<u64>) -> DbResult<()> {
let cf_meta = self.meta_column();
self.db
.put_cf(
&cf_meta,
borsh::to_vec(&DB_META_LAST_FINALIZED_BLOCK_ID).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_LAST_FINALIZED_BLOCK_ID".to_string()),
)
})?,
borsh::to_vec(&block_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize last block id".to_string()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
Ok(())
}
2024-10-10 14:09:31 +03:00
pub fn put_meta_is_first_block_set(&self) -> DbResult<()> {
let cf_meta = self.meta_column();
self.db
2025-09-25 11:53:42 +03:00
.put_cf(
&cf_meta,
borsh::to_vec(&DB_META_FIRST_BLOCK_SET_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_FIRST_BLOCK_SET_KEY".to_string()),
)
})?,
[1u8; 1],
)
2024-10-10 14:09:31 +03:00
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
Ok(())
}
fn put_meta_latest_block_hash(&self, block_hash: common::HashType) -> DbResult<()> {
let cf_meta = self.meta_column();
self.db
.put_cf(
&cf_meta,
borsh::to_vec(&DB_META_LATEST_BLOCK_HASH_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_LATEST_BLOCK_HASH_KEY".to_string()),
)
})?,
borsh::to_vec(&block_hash).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize latest block hash".to_string()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
Ok(())
}
fn put_meta_latest_block_hash_batch(
&self,
block_hash: common::HashType,
batch: &mut WriteBatch,
) -> DbResult<()> {
let cf_meta = self.meta_column();
batch.put_cf(
&cf_meta,
borsh::to_vec(&DB_META_LATEST_BLOCK_HASH_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_LATEST_BLOCK_HASH_KEY".to_string()),
)
})?,
borsh::to_vec(&block_hash).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize latest block hash".to_string()),
)
})?,
);
Ok(())
}
pub fn latest_block_hash(&self) -> DbResult<common::HashType> {
let cf_meta = self.meta_column();
let res = self
.db
.get_cf(
&cf_meta,
borsh::to_vec(&DB_META_LATEST_BLOCK_HASH_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_LATEST_BLOCK_HASH_KEY".to_string()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(borsh::from_slice::<common::HashType>(&data).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to deserialize latest block hash".to_string()),
)
})?)
} else {
Err(DbError::db_interaction_error(
"Latest block hash not found".to_string(),
))
}
}
pub fn put_block(&self, block: &Block, first: bool, batch: &mut WriteBatch) -> DbResult<()> {
2024-10-10 14:09:31 +03:00
let cf_block = self.block_column();
2024-12-09 03:59:23 +01:00
if !first {
2024-12-09 04:01:20 +01:00
let last_curr_block = self.get_meta_last_block_in_db()?;
2024-10-10 14:09:31 +03:00
2025-09-02 11:06:41 +03:00
if block.header.block_id > last_curr_block {
self.put_meta_last_block_in_db_batch(block.header.block_id, batch)?;
self.put_meta_latest_block_hash_batch(block.header.hash, batch)?;
2024-12-09 03:59:23 +01:00
}
2024-10-10 14:09:31 +03:00
}
2026-01-27 01:20:17 -03:00
batch.put_cf(
&cf_block,
borsh::to_vec(&block.header.block_id).map_err(|err| {
DbError::borsh_cast_message(err, Some("Failed to serialize block id".to_string()))
})?,
borsh::to_vec(block).map_err(|err| {
2026-01-27 01:20:17 -03:00
DbError::borsh_cast_message(err, Some("Failed to serialize block data".to_string()))
})?,
);
2024-10-10 14:09:31 +03:00
Ok(())
}
pub fn get_block(&self, block_id: u64) -> DbResult<Block> {
2024-10-10 14:09:31 +03:00
let cf_block = self.block_column();
let res = self
.db
2025-09-25 11:53:42 +03:00
.get_cf(
&cf_block,
borsh::to_vec(&block_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize block id".to_string()),
)
})?,
)
2024-10-10 14:09:31 +03:00
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(borsh::from_slice::<Block>(&data).map_err(|serr| {
DbError::borsh_cast_message(
serr,
Some("Failed to deserialize block data".to_string()),
)
})?)
2024-10-10 14:09:31 +03:00
} else {
Err(DbError::db_interaction_error(
"Block on this id not found".to_string(),
2026-01-27 16:03:21 -03:00
))
}
}
pub fn get_nssa_state(&self) -> DbResult<V02State> {
let cf_nssa_state = self.nssa_state_column();
let res = self
.db
.get_cf(
&cf_nssa_state,
borsh::to_vec(&DB_NSSA_STATE_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize block id".to_string()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(borsh::from_slice::<V02State>(&data).map_err(|serr| {
DbError::borsh_cast_message(
serr,
Some("Failed to deserialize block data".to_string()),
)
})?)
} else {
Err(DbError::db_interaction_error(
"Block on this id not found".to_string(),
2024-10-10 14:09:31 +03:00
))
}
}
2025-04-02 12:16:02 +03:00
pub fn delete_block(&self, block_id: u64) -> DbResult<()> {
let cf_block = self.block_column();
let key = borsh::to_vec(&block_id).map_err(|err| {
DbError::borsh_cast_message(err, Some("Failed to serialize block id".to_string()))
})?;
if self
.db
.get_cf(&cf_block, &key)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?
.is_none()
{
return Err(DbError::db_interaction_error(
"Block on this id not found".to_string(),
));
}
self.db
.delete_cf(&cf_block, key)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
Ok(())
}
pub fn get_all_blocks(&self) -> impl Iterator<Item = DbResult<Block>> {
let cf_block = self.block_column();
self.db
.iterator_cf(&cf_block, rocksdb::IteratorMode::Start)
.map(|res| {
let (_key, value) = res.map_err(|rerr| {
DbError::rocksdb_cast_message(
rerr,
Some("Failed to get key value pair".to_string()),
)
})?;
borsh::from_slice::<Block>(&value).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to deserialize block data".to_string()),
)
})
})
}
2026-01-27 01:20:17 -03:00
pub fn atomic_update(&self, block: &Block, state: &V02State) -> DbResult<()> {
2026-01-27 01:20:17 -03:00
let block_id = block.header.block_id;
let mut batch = WriteBatch::default();
self.put_block(block, false, &mut batch)?;
self.put_nssa_state_in_db(state, &mut batch)?;
self.db.write(batch).map_err(|rerr| {
DbError::rocksdb_cast_message(
rerr,
Some(format!("Failed to udpate db with block {block_id}")),
)
})
}
2024-10-10 14:09:31 +03:00
}