Merge c3723776c6e7f74f05d0993ee8eae3eba52a01cf into fb083ce91ec10487fc17137a48c47f4322f9c768

This commit is contained in:
Pravdyvy 2026-03-23 17:21:51 +00:00 committed by GitHub
commit b0d1c7b3a4
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
7 changed files with 1667 additions and 1451 deletions

107
Cargo.lock generated
View File

@ -629,9 +629,9 @@ checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50"
[[package]]
name = "astral-tokio-tar"
version = "0.5.6"
version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ec179a06c1769b1e42e1e2cbe74c7dcdb3d6383c838454d063eaac5bbb7ebbe5"
checksum = "3c23f3af104b40a3430ccb90ed5f7bd877a8dc5c26fc92fde51a22b40890dcf9"
dependencies = [
"filetime",
"futures-core",
@ -1939,7 +1939,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7ab67060fc6b8ef687992d439ca0fa36e7ed17e9a0b16b25b601e8757df720de"
dependencies = [
"data-encoding",
"syn 2.0.117",
"syn 1.0.109",
]
[[package]]
@ -5930,7 +5930,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d"
dependencies = [
"anyhow",
"itertools 0.14.0",
"itertools 0.10.5",
"proc-macro2",
"quote",
"syn 2.0.117",
@ -5943,7 +5943,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "27c6023962132f4b30eb4c172c91ce92d933da334c59c23cddee82358ddafb0b"
dependencies = [
"anyhow",
"itertools 0.14.0",
"itertools 0.10.5",
"proc-macro2",
"quote",
"syn 2.0.117",
@ -6024,7 +6024,7 @@ dependencies = [
"once_cell",
"socket2",
"tracing",
"windows-sys 0.60.2",
"windows-sys 0.52.0",
]
[[package]]
@ -6919,7 +6919,7 @@ dependencies = [
"security-framework",
"security-framework-sys",
"webpki-root-certs 0.26.11",
"windows-sys 0.59.0",
"windows-sys 0.52.0",
]
[[package]]
@ -6930,9 +6930,9 @@ checksum = "f87165f0995f63a9fbeea62b64d10b4d9d8e78ec6d7d51fb2125fda7bb36788f"
[[package]]
name = "rustls-webpki"
version = "0.103.9"
version = "0.103.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d7df23109aa6c1567d1c575b9952556388da57401e4ace1d15f79eedad0d8f53"
checksum = "df33b2b81ac578cabaf06b89b0631153a3f416b0a886e8a7a1707fb51abbd1ef"
dependencies = [
"ring",
"rustls-pki-types",
@ -7842,9 +7842,9 @@ dependencies = [
[[package]]
name = "testcontainers"
version = "0.27.1"
version = "0.27.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c1c0624faaa317c56d6d19136580be889677259caf5c897941c6f446b4655068"
checksum = "0bd36b06a2a6c0c3c81a83be1ab05fe86460d054d4d51bf513bc56b3e15bdc22"
dependencies = [
"astral-tokio-tar",
"async-trait",
@ -9038,24 +9038,6 @@ dependencies = [
"windows-targets 0.52.6",
]
[[package]]
name = "windows-sys"
version = "0.59.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b"
dependencies = [
"windows-targets 0.52.6",
]
[[package]]
name = "windows-sys"
version = "0.60.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb"
dependencies = [
"windows-targets 0.53.5",
]
[[package]]
name = "windows-sys"
version = "0.61.2"
@ -9089,30 +9071,13 @@ dependencies = [
"windows_aarch64_gnullvm 0.52.6",
"windows_aarch64_msvc 0.52.6",
"windows_i686_gnu 0.52.6",
"windows_i686_gnullvm 0.52.6",
"windows_i686_gnullvm",
"windows_i686_msvc 0.52.6",
"windows_x86_64_gnu 0.52.6",
"windows_x86_64_gnullvm 0.52.6",
"windows_x86_64_msvc 0.52.6",
]
[[package]]
name = "windows-targets"
version = "0.53.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4945f9f551b88e0d65f3db0bc25c33b8acea4d9e41163edf90dcd0b19f9069f3"
dependencies = [
"windows-link",
"windows_aarch64_gnullvm 0.53.1",
"windows_aarch64_msvc 0.53.1",
"windows_i686_gnu 0.53.1",
"windows_i686_gnullvm 0.53.1",
"windows_i686_msvc 0.53.1",
"windows_x86_64_gnu 0.53.1",
"windows_x86_64_gnullvm 0.53.1",
"windows_x86_64_msvc 0.53.1",
]
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.42.2"
@ -9125,12 +9090,6 @@ version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3"
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.53.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53"
[[package]]
name = "windows_aarch64_msvc"
version = "0.42.2"
@ -9143,12 +9102,6 @@ version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469"
[[package]]
name = "windows_aarch64_msvc"
version = "0.53.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006"
[[package]]
name = "windows_i686_gnu"
version = "0.42.2"
@ -9161,24 +9114,12 @@ version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b"
[[package]]
name = "windows_i686_gnu"
version = "0.53.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "960e6da069d81e09becb0ca57a65220ddff016ff2d6af6a223cf372a506593a3"
[[package]]
name = "windows_i686_gnullvm"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66"
[[package]]
name = "windows_i686_gnullvm"
version = "0.53.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c"
[[package]]
name = "windows_i686_msvc"
version = "0.42.2"
@ -9191,12 +9132,6 @@ version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66"
[[package]]
name = "windows_i686_msvc"
version = "0.53.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2"
[[package]]
name = "windows_x86_64_gnu"
version = "0.42.2"
@ -9209,12 +9144,6 @@ version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78"
[[package]]
name = "windows_x86_64_gnu"
version = "0.53.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.42.2"
@ -9227,12 +9156,6 @@ version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.53.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1"
[[package]]
name = "windows_x86_64_msvc"
version = "0.42.2"
@ -9245,12 +9168,6 @@ version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
[[package]]
name = "windows_x86_64_msvc"
version = "0.53.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650"
[[package]]
name = "winnow"
version = "0.7.15"

File diff suppressed because it is too large Load Diff

688
storage/src/indexer/mod.rs Normal file
View File

@ -0,0 +1,688 @@
use std::{path::Path, sync::Arc};
use common::block::Block;
use nssa::V03State;
use rocksdb::{
BoundColumnFamily, ColumnFamilyDescriptor, DBWithThreadMode, MultiThreaded, Options,
};
use crate::error::DbError;
pub mod read_multiple;
pub mod read_once;
pub mod write_atomic;
pub mod write_non_atomic;
/// Maximal size of stored blocks in base.
///
/// Used to control db size.
///
/// Currently effectively unbounded.
pub const BUFF_SIZE_ROCKSDB: usize = usize::MAX;
/// Size of stored blocks cache in memory.
///
/// Keeping small to not run out of memory.
pub const CACHE_SIZE: usize = 1000;
/// Key base for storing metainformation about id of first block in db.
pub const DB_META_FIRST_BLOCK_IN_DB_KEY: &str = "first_block_in_db";
/// Key base for storing metainformation about id of last current block in db.
pub const DB_META_LAST_BLOCK_IN_DB_KEY: &str = "last_block_in_db";
/// Key base for storing metainformation about id of last observed L1 lib header in db.
pub const DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY: &str =
"last_observed_l1_lib_header_in_db";
/// Key base for storing metainformation which describe if first block has been set.
pub const DB_META_FIRST_BLOCK_SET_KEY: &str = "first_block_set";
/// Key base for storing metainformation about the last breakpoint.
pub const DB_META_LAST_BREAKPOINT_ID: &str = "last_breakpoint_id";
/// Interval between state breakpoints.
pub const BREAKPOINT_INTERVAL: u8 = 100;
/// Name of block column family.
pub const CF_BLOCK_NAME: &str = "cf_block";
/// Name of meta column family.
pub const CF_META_NAME: &str = "cf_meta";
/// Name of breakpoint column family.
pub const CF_BREAKPOINT_NAME: &str = "cf_breakpoint";
/// Name of hash to id map column family.
pub const CF_HASH_TO_ID: &str = "cf_hash_to_id";
/// Name of tx hash to id map column family.
pub const CF_TX_TO_ID: &str = "cf_tx_to_id";
/// Name of account meta column family.
pub const CF_ACC_META: &str = "cf_acc_meta";
/// Name of account id to tx hash map column family.
pub const CF_ACC_TO_TX: &str = "cf_acc_to_tx";
pub type DbResult<T> = Result<T, DbError>;
pub struct RocksDBIO {
pub db: DBWithThreadMode<MultiThreaded>,
}
impl RocksDBIO {
pub fn open_or_create(
path: &Path,
genesis_block: &Block,
initial_state: &V03State,
) -> DbResult<Self> {
let mut cf_opts = Options::default();
cf_opts.set_max_write_buffer_number(16);
// ToDo: Add more column families for different data
let cfb = ColumnFamilyDescriptor::new(CF_BLOCK_NAME, cf_opts.clone());
let cfmeta = ColumnFamilyDescriptor::new(CF_META_NAME, cf_opts.clone());
let cfbreakpoint = ColumnFamilyDescriptor::new(CF_BREAKPOINT_NAME, cf_opts.clone());
let cfhti = ColumnFamilyDescriptor::new(CF_HASH_TO_ID, cf_opts.clone());
let cftti = ColumnFamilyDescriptor::new(CF_TX_TO_ID, cf_opts.clone());
let cfameta = ColumnFamilyDescriptor::new(CF_ACC_META, cf_opts.clone());
let cfatt = ColumnFamilyDescriptor::new(CF_ACC_TO_TX, cf_opts.clone());
let mut db_opts = Options::default();
db_opts.create_missing_column_families(true);
db_opts.create_if_missing(true);
let db = DBWithThreadMode::<MultiThreaded>::open_cf_descriptors(
&db_opts,
path,
vec![cfb, cfmeta, cfbreakpoint, cfhti, cftti, cfameta, cfatt],
)
.map_err(|err| DbError::RocksDbError {
error: err,
additional_info: Some("Failed to open or create DB".to_owned()),
})?;
let dbio = Self { db };
let is_start_set = dbio.get_meta_is_first_block_set()?;
if !is_start_set {
let block_id = genesis_block.header.block_id;
dbio.put_meta_last_block_in_db(block_id)?;
dbio.put_meta_first_block_in_db_batch(genesis_block)?;
dbio.put_meta_is_first_block_set()?;
// First breakpoint setup
dbio.put_breakpoint(0, initial_state)?;
dbio.put_meta_last_breakpoint_id(0)?;
}
Ok(dbio)
}
pub fn destroy(path: &Path) -> DbResult<()> {
let db_opts = Options::default();
DBWithThreadMode::<MultiThreaded>::destroy(&db_opts, path)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))
}
// Columns
pub fn meta_column(&self) -> Arc<BoundColumnFamily<'_>> {
self.db
.cf_handle(CF_META_NAME)
.expect("Meta column should exist")
}
pub fn block_column(&self) -> Arc<BoundColumnFamily<'_>> {
self.db
.cf_handle(CF_BLOCK_NAME)
.expect("Block column should exist")
}
pub fn breakpoint_column(&self) -> Arc<BoundColumnFamily<'_>> {
self.db
.cf_handle(CF_BREAKPOINT_NAME)
.expect("Breakpoint column should exist")
}
pub fn hash_to_id_column(&self) -> Arc<BoundColumnFamily<'_>> {
self.db
.cf_handle(CF_HASH_TO_ID)
.expect("Hash to id map column should exist")
}
pub fn tx_hash_to_id_column(&self) -> Arc<BoundColumnFamily<'_>> {
self.db
.cf_handle(CF_TX_TO_ID)
.expect("Tx hash to id map column should exist")
}
pub fn account_id_to_tx_hash_column(&self) -> Arc<BoundColumnFamily<'_>> {
self.db
.cf_handle(CF_ACC_TO_TX)
.expect("Account id to tx map column should exist")
}
pub fn account_meta_column(&self) -> Arc<BoundColumnFamily<'_>> {
self.db
.cf_handle(CF_ACC_META)
.expect("Account meta column should exist")
}
// State
pub fn calculate_state_for_id(&self, block_id: u64) -> DbResult<V03State> {
let last_block = self.get_meta_last_block_in_db()?;
if block_id <= last_block {
let br_id = closest_breakpoint_id(block_id);
let mut breakpoint = self.get_breakpoint(br_id)?;
// ToDo: update it to handle any genesis id
// right now works correctly only if genesis_id < BREAKPOINT_INTERVAL
let start = if br_id != 0 {
u64::from(BREAKPOINT_INTERVAL)
.checked_mul(br_id)
.expect("Reached maximum breakpoint id")
} else {
self.get_meta_first_block_in_db()?
};
for block in self.get_block_batch_seq(
start.checked_add(1).expect("Will be lesser that u64::MAX")..=block_id,
)? {
for transaction in block.body.transactions {
transaction
.transaction_stateless_check()
.map_err(|err| {
DbError::db_interaction_error(format!(
"transaction pre check failed with err {err:?}"
))
})?
.execute_check_on_state(&mut breakpoint)
.map_err(|err| {
DbError::db_interaction_error(format!(
"transaction execution failed with err {err:?}"
))
})?;
}
}
Ok(breakpoint)
} else {
Err(DbError::db_interaction_error(
"Block on this id not found".to_owned(),
))
}
}
pub fn final_state(&self) -> DbResult<V03State> {
self.calculate_state_for_id(self.get_meta_last_block_in_db()?)
}
}
fn closest_breakpoint_id(block_id: u64) -> u64 {
block_id
.saturating_sub(1)
.checked_div(u64::from(BREAKPOINT_INTERVAL))
.expect("Breakpoint interval is not zero")
}
#[expect(clippy::shadow_unrelated, reason = "Fine for tests")]
#[cfg(test)]
mod tests {
use nssa::{AccountId, PublicKey};
use tempfile::tempdir;
use super::*;
fn genesis_block() -> Block {
common::test_utils::produce_dummy_block(1, None, vec![])
}
fn acc1_sign_key() -> nssa::PrivateKey {
nssa::PrivateKey::try_new([1; 32]).unwrap()
}
fn acc2_sign_key() -> nssa::PrivateKey {
nssa::PrivateKey::try_new([2; 32]).unwrap()
}
fn acc1() -> AccountId {
AccountId::from(&PublicKey::new_from_private_key(&acc1_sign_key()))
}
fn acc2() -> AccountId {
AccountId::from(&PublicKey::new_from_private_key(&acc2_sign_key()))
}
#[test]
fn start_db() {
let temp_dir = tempdir().unwrap();
let temdir_path = temp_dir.path();
let dbio = RocksDBIO::open_or_create(
temdir_path,
&genesis_block(),
&nssa::V03State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[]),
)
.unwrap();
let last_id = dbio.get_meta_last_block_in_db().unwrap();
let first_id = dbio.get_meta_first_block_in_db().unwrap();
let is_first_set = dbio.get_meta_is_first_block_set().unwrap();
let last_observed_l1_header = dbio.get_meta_last_observed_l1_lib_header_in_db().unwrap();
let last_br_id = dbio.get_meta_last_breakpoint_id().unwrap();
let last_block = dbio.get_block(1).unwrap().unwrap();
let breakpoint = dbio.get_breakpoint(0).unwrap();
let final_state = dbio.final_state().unwrap();
assert_eq!(last_id, 1);
assert_eq!(first_id, 1);
assert_eq!(last_observed_l1_header, None);
assert!(is_first_set);
assert_eq!(last_br_id, 0);
assert_eq!(last_block.header.hash, genesis_block().header.hash);
assert_eq!(
breakpoint.get_account_by_id(acc1()),
final_state.get_account_by_id(acc1())
);
assert_eq!(
breakpoint.get_account_by_id(acc2()),
final_state.get_account_by_id(acc2())
);
}
#[test]
fn one_block_insertion() {
let temp_dir = tempdir().unwrap();
let temdir_path = temp_dir.path();
let dbio = RocksDBIO::open_or_create(
temdir_path,
&genesis_block(),
&nssa::V03State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[]),
)
.unwrap();
let prev_hash = genesis_block().header.hash;
let from = acc1();
let to = acc2();
let sign_key = acc1_sign_key();
let transfer_tx =
common::test_utils::create_transaction_native_token_transfer(from, 0, to, 1, &sign_key);
let block = common::test_utils::produce_dummy_block(2, Some(prev_hash), vec![transfer_tx]);
dbio.put_block(&block, [1; 32]).unwrap();
let last_id = dbio.get_meta_last_block_in_db().unwrap();
let first_id = dbio.get_meta_first_block_in_db().unwrap();
let last_observed_l1_header = dbio
.get_meta_last_observed_l1_lib_header_in_db()
.unwrap()
.unwrap();
let is_first_set = dbio.get_meta_is_first_block_set().unwrap();
let last_br_id = dbio.get_meta_last_breakpoint_id().unwrap();
let last_block = dbio.get_block(last_id).unwrap().unwrap();
let breakpoint = dbio.get_breakpoint(0).unwrap();
let final_state = dbio.final_state().unwrap();
assert_eq!(last_id, 2);
assert_eq!(first_id, 1);
assert_eq!(last_observed_l1_header, [1; 32]);
assert!(is_first_set);
assert_eq!(last_br_id, 0);
assert_ne!(last_block.header.hash, genesis_block().header.hash);
assert_eq!(
breakpoint.get_account_by_id(acc1()).balance
- final_state.get_account_by_id(acc1()).balance,
1
);
assert_eq!(
final_state.get_account_by_id(acc2()).balance
- breakpoint.get_account_by_id(acc2()).balance,
1
);
}
#[test]
fn new_breakpoint() {
let temp_dir = tempdir().unwrap();
let temdir_path = temp_dir.path();
let dbio = RocksDBIO::open_or_create(
temdir_path,
&genesis_block(),
&nssa::V03State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[]),
)
.unwrap();
let from = acc1();
let to = acc2();
let sign_key = acc1_sign_key();
for i in 1..=BREAKPOINT_INTERVAL {
let last_id = dbio.get_meta_last_block_in_db().unwrap();
let last_block = dbio.get_block(last_id).unwrap().unwrap();
let prev_hash = last_block.header.hash;
let transfer_tx = common::test_utils::create_transaction_native_token_transfer(
from,
(i - 1).into(),
to,
1,
&sign_key,
);
let block = common::test_utils::produce_dummy_block(
(i + 1).into(),
Some(prev_hash),
vec![transfer_tx],
);
dbio.put_block(&block, [i; 32]).unwrap();
}
let last_id = dbio.get_meta_last_block_in_db().unwrap();
let first_id = dbio.get_meta_first_block_in_db().unwrap();
let is_first_set = dbio.get_meta_is_first_block_set().unwrap();
let last_br_id = dbio.get_meta_last_breakpoint_id().unwrap();
let last_block = dbio.get_block(last_id).unwrap().unwrap();
let prev_breakpoint = dbio.get_breakpoint(0).unwrap();
let breakpoint = dbio.get_breakpoint(1).unwrap();
let final_state = dbio.final_state().unwrap();
assert_eq!(last_id, 101);
assert_eq!(first_id, 1);
assert!(is_first_set);
assert_eq!(last_br_id, 1);
assert_ne!(last_block.header.hash, genesis_block().header.hash);
assert_eq!(
prev_breakpoint.get_account_by_id(acc1()).balance
- final_state.get_account_by_id(acc1()).balance,
100
);
assert_eq!(
final_state.get_account_by_id(acc2()).balance
- prev_breakpoint.get_account_by_id(acc2()).balance,
100
);
assert_eq!(
breakpoint.get_account_by_id(acc1()).balance
- final_state.get_account_by_id(acc1()).balance,
1
);
assert_eq!(
final_state.get_account_by_id(acc2()).balance
- breakpoint.get_account_by_id(acc2()).balance,
1
);
}
#[test]
fn simple_maps() {
let temp_dir = tempdir().unwrap();
let temdir_path = temp_dir.path();
let dbio = RocksDBIO::open_or_create(
temdir_path,
&genesis_block(),
&nssa::V03State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[]),
)
.unwrap();
let from = acc1();
let to = acc2();
let sign_key = acc1_sign_key();
let last_id = dbio.get_meta_last_block_in_db().unwrap();
let last_block = dbio.get_block(last_id).unwrap().unwrap();
let prev_hash = last_block.header.hash;
let transfer_tx =
common::test_utils::create_transaction_native_token_transfer(from, 0, to, 1, &sign_key);
let block = common::test_utils::produce_dummy_block(2, Some(prev_hash), vec![transfer_tx]);
let control_hash1 = block.header.hash;
dbio.put_block(&block, [1; 32]).unwrap();
let last_id = dbio.get_meta_last_block_in_db().unwrap();
let last_block = dbio.get_block(last_id).unwrap().unwrap();
let prev_hash = last_block.header.hash;
let transfer_tx =
common::test_utils::create_transaction_native_token_transfer(from, 1, to, 1, &sign_key);
let block = common::test_utils::produce_dummy_block(3, Some(prev_hash), vec![transfer_tx]);
let control_hash2 = block.header.hash;
dbio.put_block(&block, [2; 32]).unwrap();
let last_id = dbio.get_meta_last_block_in_db().unwrap();
let last_block = dbio.get_block(last_id).unwrap().unwrap();
let prev_hash = last_block.header.hash;
let transfer_tx =
common::test_utils::create_transaction_native_token_transfer(from, 2, to, 1, &sign_key);
let control_tx_hash1 = transfer_tx.hash();
let block = common::test_utils::produce_dummy_block(4, Some(prev_hash), vec![transfer_tx]);
dbio.put_block(&block, [3; 32]).unwrap();
let last_id = dbio.get_meta_last_block_in_db().unwrap();
let last_block = dbio.get_block(last_id).unwrap().unwrap();
let prev_hash = last_block.header.hash;
let transfer_tx =
common::test_utils::create_transaction_native_token_transfer(from, 3, to, 1, &sign_key);
let control_tx_hash2 = transfer_tx.hash();
let block = common::test_utils::produce_dummy_block(5, Some(prev_hash), vec![transfer_tx]);
dbio.put_block(&block, [4; 32]).unwrap();
let control_block_id1 = dbio.get_block_id_by_hash(control_hash1.0).unwrap().unwrap();
let control_block_id2 = dbio.get_block_id_by_hash(control_hash2.0).unwrap().unwrap();
let control_block_id3 = dbio
.get_block_id_by_tx_hash(control_tx_hash1.0)
.unwrap()
.unwrap();
let control_block_id4 = dbio
.get_block_id_by_tx_hash(control_tx_hash2.0)
.unwrap()
.unwrap();
assert_eq!(control_block_id1, 2);
assert_eq!(control_block_id2, 3);
assert_eq!(control_block_id3, 4);
assert_eq!(control_block_id4, 5);
}
#[test]
fn block_batch() {
let temp_dir = tempdir().unwrap();
let temdir_path = temp_dir.path();
let mut block_res = vec![];
let dbio = RocksDBIO::open_or_create(
temdir_path,
&genesis_block(),
&nssa::V03State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[]),
)
.unwrap();
let from = acc1();
let to = acc2();
let sign_key = acc1_sign_key();
let last_id = dbio.get_meta_last_block_in_db().unwrap();
let last_block = dbio.get_block(last_id).unwrap().unwrap();
let prev_hash = last_block.header.hash;
let transfer_tx =
common::test_utils::create_transaction_native_token_transfer(from, 0, to, 1, &sign_key);
let block = common::test_utils::produce_dummy_block(2, Some(prev_hash), vec![transfer_tx]);
block_res.push(block.clone());
dbio.put_block(&block, [1; 32]).unwrap();
let last_id = dbio.get_meta_last_block_in_db().unwrap();
let last_block = dbio.get_block(last_id).unwrap().unwrap();
let prev_hash = last_block.header.hash;
let transfer_tx =
common::test_utils::create_transaction_native_token_transfer(from, 1, to, 1, &sign_key);
let block = common::test_utils::produce_dummy_block(3, Some(prev_hash), vec![transfer_tx]);
block_res.push(block.clone());
dbio.put_block(&block, [2; 32]).unwrap();
let last_id = dbio.get_meta_last_block_in_db().unwrap();
let last_block = dbio.get_block(last_id).unwrap().unwrap();
let prev_hash = last_block.header.hash;
let transfer_tx =
common::test_utils::create_transaction_native_token_transfer(from, 2, to, 1, &sign_key);
let block = common::test_utils::produce_dummy_block(4, Some(prev_hash), vec![transfer_tx]);
block_res.push(block.clone());
dbio.put_block(&block, [3; 32]).unwrap();
let last_id = dbio.get_meta_last_block_in_db().unwrap();
let last_block = dbio.get_block(last_id).unwrap().unwrap();
let prev_hash = last_block.header.hash;
let transfer_tx =
common::test_utils::create_transaction_native_token_transfer(from, 3, to, 1, &sign_key);
let block = common::test_utils::produce_dummy_block(5, Some(prev_hash), vec![transfer_tx]);
block_res.push(block.clone());
dbio.put_block(&block, [4; 32]).unwrap();
let block_hashes_mem: Vec<[u8; 32]> =
block_res.into_iter().map(|bl| bl.header.hash.0).collect();
// Get blocks before ID 6 (i.e., starting from 5 going backwards), limit 4
// This should return blocks 5, 4, 3, 2 in descending order
let mut batch_res = dbio.get_block_batch(Some(6), 4).unwrap();
batch_res.reverse(); // Reverse to match ascending order for comparison
let block_hashes_db: Vec<[u8; 32]> =
batch_res.into_iter().map(|bl| bl.header.hash.0).collect();
assert_eq!(block_hashes_mem, block_hashes_db);
let block_hashes_mem_limited = &block_hashes_mem[1..];
// Get blocks before ID 6, limit 3
// This should return blocks 5, 4, 3 in descending order
let mut batch_res_limited = dbio.get_block_batch(Some(6), 3).unwrap();
batch_res_limited.reverse(); // Reverse to match ascending order for comparison
let block_hashes_db_limited: Vec<[u8; 32]> = batch_res_limited
.into_iter()
.map(|bl| bl.header.hash.0)
.collect();
assert_eq!(block_hashes_mem_limited, block_hashes_db_limited.as_slice());
let block_batch_seq = dbio.get_block_batch_seq(1..=5).unwrap();
let block_batch_ids = block_batch_seq
.into_iter()
.map(|block| block.header.block_id)
.collect::<Vec<_>>();
assert_eq!(block_batch_ids, vec![1, 2, 3, 4, 5]);
}
#[test]
fn account_map() {
let temp_dir = tempdir().unwrap();
let temdir_path = temp_dir.path();
let dbio = RocksDBIO::open_or_create(
temdir_path,
&genesis_block(),
&nssa::V03State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[]),
)
.unwrap();
let from = acc1();
let to = acc2();
let sign_key = acc1_sign_key();
let mut tx_hash_res = vec![];
let last_id = dbio.get_meta_last_block_in_db().unwrap();
let last_block = dbio.get_block(last_id).unwrap().unwrap();
let prev_hash = last_block.header.hash;
let transfer_tx1 =
common::test_utils::create_transaction_native_token_transfer(from, 0, to, 1, &sign_key);
let transfer_tx2 =
common::test_utils::create_transaction_native_token_transfer(from, 1, to, 1, &sign_key);
tx_hash_res.push(transfer_tx1.hash().0);
tx_hash_res.push(transfer_tx2.hash().0);
let block = common::test_utils::produce_dummy_block(
2,
Some(prev_hash),
vec![transfer_tx1, transfer_tx2],
);
dbio.put_block(&block, [1; 32]).unwrap();
let last_id = dbio.get_meta_last_block_in_db().unwrap();
let last_block = dbio.get_block(last_id).unwrap().unwrap();
let prev_hash = last_block.header.hash;
let transfer_tx1 =
common::test_utils::create_transaction_native_token_transfer(from, 2, to, 1, &sign_key);
let transfer_tx2 =
common::test_utils::create_transaction_native_token_transfer(from, 3, to, 1, &sign_key);
tx_hash_res.push(transfer_tx1.hash().0);
tx_hash_res.push(transfer_tx2.hash().0);
let block = common::test_utils::produce_dummy_block(
3,
Some(prev_hash),
vec![transfer_tx1, transfer_tx2],
);
dbio.put_block(&block, [2; 32]).unwrap();
let last_id = dbio.get_meta_last_block_in_db().unwrap();
let last_block = dbio.get_block(last_id).unwrap().unwrap();
let prev_hash = last_block.header.hash;
let transfer_tx1 =
common::test_utils::create_transaction_native_token_transfer(from, 4, to, 1, &sign_key);
let transfer_tx2 =
common::test_utils::create_transaction_native_token_transfer(from, 5, to, 1, &sign_key);
tx_hash_res.push(transfer_tx1.hash().0);
tx_hash_res.push(transfer_tx2.hash().0);
let block = common::test_utils::produce_dummy_block(
4,
Some(prev_hash),
vec![transfer_tx1, transfer_tx2],
);
dbio.put_block(&block, [3; 32]).unwrap();
let last_id = dbio.get_meta_last_block_in_db().unwrap();
let last_block = dbio.get_block(last_id).unwrap().unwrap();
let prev_hash = last_block.header.hash;
let transfer_tx =
common::test_utils::create_transaction_native_token_transfer(from, 6, to, 1, &sign_key);
tx_hash_res.push(transfer_tx.hash().0);
let block = common::test_utils::produce_dummy_block(5, Some(prev_hash), vec![transfer_tx]);
dbio.put_block(&block, [4; 32]).unwrap();
let acc1_tx = dbio.get_acc_transactions(*acc1().value(), 0, 7).unwrap();
let acc1_tx_hashes: Vec<[u8; 32]> = acc1_tx.into_iter().map(|tx| tx.hash().0).collect();
assert_eq!(acc1_tx_hashes, tx_hash_res);
let acc1_tx_limited = dbio.get_acc_transactions(*acc1().value(), 1, 4).unwrap();
let acc1_tx_limited_hashes: Vec<[u8; 32]> =
acc1_tx_limited.into_iter().map(|tx| tx.hash().0).collect();
assert_eq!(acc1_tx_limited_hashes.as_slice(), &tx_hash_res[1..5]);
}
}

View File

@ -0,0 +1,209 @@
use common::transaction::NSSATransaction;
use super::{Block, DbError, DbResult, RocksDBIO};
#[expect(clippy::multiple_inherent_impl, reason = "Readability")]
impl RocksDBIO {
pub fn get_block_batch(&self, before: Option<u64>, limit: u64) -> DbResult<Vec<Block>> {
let mut seq = vec![];
// Determine the starting block ID
let start_block_id = if let Some(before_id) = before {
before_id.saturating_sub(1)
} else {
// Get the latest block ID
self.get_meta_last_block_in_db()?
};
for i in 0..limit {
let block_id = start_block_id.saturating_sub(i);
if block_id == 0 {
break;
}
seq.push(block_id);
}
self.get_block_batch_seq(seq.into_iter())
}
/// Get block batch from a sequence.
///
/// Currently assumes non-decreasing sequence.
///
/// `ToDo`: Add suport of arbitrary sequences.
pub fn get_block_batch_seq(&self, seq: impl Iterator<Item = u64>) -> DbResult<Vec<Block>> {
let cf_block = self.block_column();
// Keys setup
let mut keys = vec![];
for block_id in seq {
keys.push((
&cf_block,
borsh::to_vec(&block_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize block id".to_owned()),
)
})?,
));
}
let multi_get_res = self.db.multi_get_cf(keys);
// Keys parsing
let mut block_batch = vec![];
for res in multi_get_res {
let res = res.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
let block = if let Some(data) = res {
Ok(borsh::from_slice::<Block>(&data).map_err(|serr| {
DbError::borsh_cast_message(
serr,
Some("Failed to deserialize block data".to_owned()),
)
})?)
} else {
// Block not found, assuming that previous one was the last
break;
}?;
block_batch.push(block);
}
Ok(block_batch)
}
/// Get block ids by txs.
///
/// `ToDo`: There may be multiple transactions in one block
/// so this method can take redundant reads.
/// Need to update signature and implementation.
fn get_block_ids_by_tx_vec(&self, tx_vec: &[[u8; 32]]) -> DbResult<Vec<u64>> {
let cf_tti = self.tx_hash_to_id_column();
// Keys setup
let mut keys = vec![];
for tx_hash in tx_vec {
keys.push((
&cf_tti,
borsh::to_vec(tx_hash).map_err(|err| {
DbError::borsh_cast_message(err, Some("Failed to serialize tx_hash".to_owned()))
})?,
));
}
let multi_get_res = self.db.multi_get_cf(keys);
// Keys parsing
let mut block_id_batch = vec![];
for res in multi_get_res {
let res = res
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?
.ok_or_else(|| {
DbError::db_interaction_error(
"Tx to block id mapping do not contain transaction from vec".to_owned(),
)
})?;
let block_id = {
Ok(borsh::from_slice::<u64>(&res).map_err(|serr| {
DbError::borsh_cast_message(
serr,
Some("Failed to deserialize block id".to_owned()),
)
})?)
}?;
block_id_batch.push(block_id);
}
Ok(block_id_batch)
}
// Account
pub(crate) fn get_acc_transaction_hashes(
&self,
acc_id: [u8; 32],
offset: u64,
limit: u64,
) -> DbResult<Vec<[u8; 32]>> {
let cf_att = self.account_id_to_tx_hash_column();
let mut tx_batch = vec![];
// Keys preparation
let mut keys = vec![];
for tx_id in offset
..offset
.checked_add(limit)
.expect("Transaction limit should be lesser than u64::MAX")
{
let mut prefix = borsh::to_vec(&acc_id).map_err(|berr| {
DbError::borsh_cast_message(berr, Some("Failed to serialize account id".to_owned()))
})?;
let suffix = borsh::to_vec(&tx_id).map_err(|berr| {
DbError::borsh_cast_message(berr, Some("Failed to serialize tx id".to_owned()))
})?;
prefix.extend_from_slice(&suffix);
keys.push((&cf_att, prefix));
}
let multi_get_res = self.db.multi_get_cf(keys);
for res in multi_get_res {
let res = res.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
let tx_hash = if let Some(data) = res {
Ok(borsh::from_slice::<[u8; 32]>(&data).map_err(|serr| {
DbError::borsh_cast_message(
serr,
Some("Failed to deserialize tx_hash".to_owned()),
)
})?)
} else {
// Tx hash not found, assuming that previous one was the last
break;
}?;
tx_batch.push(tx_hash);
}
Ok(tx_batch)
}
pub fn get_acc_transactions(
&self,
acc_id: [u8; 32],
offset: u64,
limit: u64,
) -> DbResult<Vec<NSSATransaction>> {
let mut tx_batch = vec![];
let tx_hashes = self.get_acc_transaction_hashes(acc_id, offset, limit)?;
let associated_blocks_multi_get = self
.get_block_batch_seq(self.get_block_ids_by_tx_vec(&tx_hashes)?.into_iter())?
.into_iter()
.zip(tx_hashes);
for (block, tx_hash) in associated_blocks_multi_get {
let transaction = block
.body
.transactions
.iter()
.find(|tx| tx.hash().0 == tx_hash)
.ok_or_else(|| {
DbError::db_interaction_error(format!(
"Missing transaction in block {} with hash {:#?}",
block.header.block_id, tx_hash
))
})?;
tx_batch.push(transaction.clone());
}
Ok(tx_batch)
}
}

View File

@ -0,0 +1,272 @@
use super::{
Block, DB_META_FIRST_BLOCK_IN_DB_KEY, DB_META_FIRST_BLOCK_SET_KEY,
DB_META_LAST_BLOCK_IN_DB_KEY, DB_META_LAST_BREAKPOINT_ID,
DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY, DbError, DbResult, RocksDBIO, V03State,
};
#[expect(clippy::multiple_inherent_impl, reason = "Readability")]
impl RocksDBIO {
// Meta
pub fn get_meta_first_block_in_db(&self) -> DbResult<u64> {
let cf_meta = self.meta_column();
let res = self
.db
.get_cf(
&cf_meta,
borsh::to_vec(&DB_META_FIRST_BLOCK_IN_DB_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_FIRST_BLOCK_IN_DB_KEY".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(borsh::from_slice::<u64>(&data).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to deserialize first block".to_owned()),
)
})?)
} else {
Err(DbError::db_interaction_error(
"First block not found".to_owned(),
))
}
}
pub fn get_meta_last_block_in_db(&self) -> DbResult<u64> {
let cf_meta = self.meta_column();
let res = self
.db
.get_cf(
&cf_meta,
borsh::to_vec(&DB_META_LAST_BLOCK_IN_DB_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_LAST_BLOCK_IN_DB_KEY".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(borsh::from_slice::<u64>(&data).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to deserialize last block".to_owned()),
)
})?)
} else {
Err(DbError::db_interaction_error(
"Last block not found".to_owned(),
))
}
}
pub fn get_meta_last_observed_l1_lib_header_in_db(&self) -> DbResult<Option<[u8; 32]>> {
let cf_meta = self.meta_column();
let res = self
.db
.get_cf(
&cf_meta,
borsh::to_vec(&DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY).map_err(
|err| {
DbError::borsh_cast_message(
err,
Some(
"Failed to serialize DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY"
.to_owned(),
),
)
},
)?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
res.map(|data| {
borsh::from_slice::<[u8; 32]>(&data).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to deserialize last l1 lib header".to_owned()),
)
})
})
.transpose()
}
pub fn get_meta_is_first_block_set(&self) -> DbResult<bool> {
let cf_meta = self.meta_column();
let res = self
.db
.get_cf(
&cf_meta,
borsh::to_vec(&DB_META_FIRST_BLOCK_SET_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_FIRST_BLOCK_SET_KEY".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
Ok(res.is_some())
}
pub fn get_meta_last_breakpoint_id(&self) -> DbResult<u64> {
let cf_meta = self.meta_column();
let res = self
.db
.get_cf(
&cf_meta,
borsh::to_vec(&DB_META_LAST_BREAKPOINT_ID).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_LAST_BREAKPOINT_ID".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(borsh::from_slice::<u64>(&data).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to deserialize last breakpoint id".to_owned()),
)
})?)
} else {
Err(DbError::db_interaction_error(
"Last breakpoint id not found".to_owned(),
))
}
}
// Block
pub fn get_block(&self, block_id: u64) -> DbResult<Option<Block>> {
let cf_block = self.block_column();
let res = self
.db
.get_cf(
&cf_block,
borsh::to_vec(&block_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize block id".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(Some(borsh::from_slice::<Block>(&data).map_err(|serr| {
DbError::borsh_cast_message(
serr,
Some("Failed to deserialize block data".to_owned()),
)
})?))
} else {
Ok(None)
}
}
// State
pub fn get_breakpoint(&self, br_id: u64) -> DbResult<V03State> {
let cf_br = self.breakpoint_column();
let res = self
.db
.get_cf(
&cf_br,
borsh::to_vec(&br_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize breakpoint id".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(borsh::from_slice::<V03State>(&data).map_err(|serr| {
DbError::borsh_cast_message(
serr,
Some("Failed to deserialize breakpoint data".to_owned()),
)
})?)
} else {
Err(DbError::db_interaction_error(
"Breakpoint on this id not found".to_owned(),
))
}
}
// Mappings
pub fn get_block_id_by_hash(&self, hash: [u8; 32]) -> DbResult<Option<u64>> {
let cf_hti = self.hash_to_id_column();
let res = self
.db
.get_cf(
&cf_hti,
borsh::to_vec(&hash).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize block hash".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(Some(borsh::from_slice::<u64>(&data).map_err(|serr| {
DbError::borsh_cast_message(serr, Some("Failed to deserialize block id".to_owned()))
})?))
} else {
Ok(None)
}
}
pub fn get_block_id_by_tx_hash(&self, tx_hash: [u8; 32]) -> DbResult<Option<u64>> {
let cf_tti = self.tx_hash_to_id_column();
let res = self
.db
.get_cf(
&cf_tti,
borsh::to_vec(&tx_hash).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize transaction hash".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(Some(borsh::from_slice::<u64>(&data).map_err(|serr| {
DbError::borsh_cast_message(serr, Some("Failed to deserialize block id".to_owned()))
})?))
} else {
Ok(None)
}
}
// Accounts meta
pub(crate) fn get_acc_meta_num_tx(&self, acc_id: [u8; 32]) -> DbResult<Option<u64>> {
let cf_ameta = self.account_meta_column();
let res = self.db.get_cf(&cf_ameta, acc_id).map_err(|rerr| {
DbError::rocksdb_cast_message(rerr, Some("Failed to read from acc meta cf".to_owned()))
})?;
res.map(|data| {
borsh::from_slice::<u64>(&data).map_err(|serr| {
DbError::borsh_cast_message(serr, Some("Failed to deserialize num tx".to_owned()))
})
})
.transpose()
}
}

View File

@ -0,0 +1,339 @@
use std::collections::HashMap;
use rocksdb::WriteBatch;
use super::{
Arc, BREAKPOINT_INTERVAL, Block, BoundColumnFamily, DB_META_FIRST_BLOCK_IN_DB_KEY,
DB_META_FIRST_BLOCK_SET_KEY, DB_META_LAST_BLOCK_IN_DB_KEY, DB_META_LAST_BREAKPOINT_ID,
DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY, DbError, DbResult, RocksDBIO,
};
#[expect(clippy::multiple_inherent_impl, reason = "Readability")]
impl RocksDBIO {
// Accounts meta
pub(crate) fn update_acc_meta_batch(
&self,
acc_id: [u8; 32],
num_tx: u64,
write_batch: &mut WriteBatch,
) -> DbResult<()> {
let cf_ameta = self.account_meta_column();
write_batch.put_cf(
&cf_ameta,
borsh::to_vec(&acc_id).map_err(|err| {
DbError::borsh_cast_message(err, Some("Failed to serialize account id".to_owned()))
})?,
borsh::to_vec(&num_tx).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize acc metadata".to_owned()),
)
})?,
);
Ok(())
}
// Account
pub fn put_account_transactions(
&self,
acc_id: [u8; 32],
tx_hashes: &[[u8; 32]],
) -> DbResult<()> {
let acc_num_tx = self.get_acc_meta_num_tx(acc_id)?.unwrap_or(0);
let cf_att = self.account_id_to_tx_hash_column();
let mut write_batch = WriteBatch::new();
for (tx_id, tx_hash) in tx_hashes.iter().enumerate() {
let put_id = acc_num_tx
.checked_add(tx_id.try_into().expect("Must fit into u64"))
.expect("Tx count should be lesser that u64::MAX");
let mut prefix = borsh::to_vec(&acc_id).map_err(|berr| {
DbError::borsh_cast_message(berr, Some("Failed to serialize account id".to_owned()))
})?;
let suffix = borsh::to_vec(&put_id).map_err(|berr| {
DbError::borsh_cast_message(berr, Some("Failed to serialize tx id".to_owned()))
})?;
prefix.extend_from_slice(&suffix);
write_batch.put_cf(
&cf_att,
prefix,
borsh::to_vec(tx_hash).map_err(|berr| {
DbError::borsh_cast_message(
berr,
Some("Failed to serialize tx hash".to_owned()),
)
})?,
);
}
self.update_acc_meta_batch(
acc_id,
acc_num_tx
.checked_add(tx_hashes.len().try_into().expect("Must fit into u64"))
.expect("Tx count should be lesser that u64::MAX"),
&mut write_batch,
)?;
self.db.write(write_batch).map_err(|rerr| {
DbError::rocksdb_cast_message(rerr, Some("Failed to write batch".to_owned()))
})
}
pub fn put_account_transactions_dependant(
&self,
acc_id: [u8; 32],
tx_hashes: &[[u8; 32]],
write_batch: &mut WriteBatch,
) -> DbResult<()> {
let acc_num_tx = self.get_acc_meta_num_tx(acc_id)?.unwrap_or(0);
let cf_att = self.account_id_to_tx_hash_column();
for (tx_id, tx_hash) in tx_hashes.iter().enumerate() {
let put_id = acc_num_tx
.checked_add(tx_id.try_into().expect("Must fit into u64"))
.expect("Tx count should be lesser that u64::MAX");
let mut prefix = borsh::to_vec(&acc_id).map_err(|berr| {
DbError::borsh_cast_message(berr, Some("Failed to serialize account id".to_owned()))
})?;
let suffix = borsh::to_vec(&put_id).map_err(|berr| {
DbError::borsh_cast_message(berr, Some("Failed to serialize tx id".to_owned()))
})?;
prefix.extend_from_slice(&suffix);
write_batch.put_cf(
&cf_att,
prefix,
borsh::to_vec(tx_hash).map_err(|berr| {
DbError::borsh_cast_message(
berr,
Some("Failed to serialize tx hash".to_owned()),
)
})?,
);
}
self.update_acc_meta_batch(
acc_id,
acc_num_tx
.checked_add(tx_hashes.len().try_into().expect("Must fit into u64"))
.expect("Tx count should be lesser that u64::MAX"),
write_batch,
)?;
Ok(())
}
// Meta
pub fn put_meta_first_block_in_db_batch(&self, block: &Block) -> DbResult<()> {
let cf_meta = self.meta_column();
self.db
.put_cf(
&cf_meta,
borsh::to_vec(&DB_META_FIRST_BLOCK_IN_DB_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_FIRST_BLOCK_IN_DB_KEY".to_owned()),
)
})?,
borsh::to_vec(&block.header.block_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize first block id".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
self.put_block(block, [0; 32])?;
Ok(())
}
pub fn put_meta_last_block_in_db_batch(
&self,
block_id: u64,
write_batch: &mut WriteBatch,
) -> DbResult<()> {
let cf_meta = self.meta_column();
write_batch.put_cf(
&cf_meta,
borsh::to_vec(&DB_META_LAST_BLOCK_IN_DB_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_LAST_BLOCK_IN_DB_KEY".to_owned()),
)
})?,
borsh::to_vec(&block_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize last block id".to_owned()),
)
})?,
);
Ok(())
}
pub fn put_meta_last_observed_l1_lib_header_in_db_batch(
&self,
l1_lib_header: [u8; 32],
write_batch: &mut WriteBatch,
) -> DbResult<()> {
let cf_meta = self.meta_column();
write_batch.put_cf(
&cf_meta,
borsh::to_vec(&DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some(
"Failed to serialize DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY"
.to_owned(),
),
)
})?,
borsh::to_vec(&l1_lib_header).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize last l1 block header".to_owned()),
)
})?,
);
Ok(())
}
pub fn put_meta_last_breakpoint_id_batch(
&self,
br_id: u64,
write_batch: &mut WriteBatch,
) -> DbResult<()> {
let cf_meta = self.meta_column();
write_batch.put_cf(
&cf_meta,
borsh::to_vec(&DB_META_LAST_BREAKPOINT_ID).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_LAST_BREAKPOINT_ID".to_owned()),
)
})?,
borsh::to_vec(&br_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize last block id".to_owned()),
)
})?,
);
Ok(())
}
pub fn put_meta_is_first_block_set_batch(&self, write_batch: &mut WriteBatch) -> DbResult<()> {
let cf_meta = self.meta_column();
write_batch.put_cf(
&cf_meta,
borsh::to_vec(&DB_META_FIRST_BLOCK_SET_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_FIRST_BLOCK_SET_KEY".to_owned()),
)
})?,
[1_u8; 1],
);
Ok(())
}
// Block
pub fn put_block(&self, block: &Block, l1_lib_header: [u8; 32]) -> DbResult<()> {
let cf_block = self.block_column();
let cf_hti = self.hash_to_id_column();
let cf_tti: Arc<BoundColumnFamily<'_>> = self.tx_hash_to_id_column();
let last_curr_block = self.get_meta_last_block_in_db()?;
let mut write_batch = WriteBatch::default();
write_batch.put_cf(
&cf_block,
borsh::to_vec(&block.header.block_id).map_err(|err| {
DbError::borsh_cast_message(err, Some("Failed to serialize block id".to_owned()))
})?,
borsh::to_vec(block).map_err(|err| {
DbError::borsh_cast_message(err, Some("Failed to serialize block data".to_owned()))
})?,
);
if block.header.block_id > last_curr_block {
self.put_meta_last_block_in_db_batch(block.header.block_id, &mut write_batch)?;
self.put_meta_last_observed_l1_lib_header_in_db_batch(l1_lib_header, &mut write_batch)?;
}
write_batch.put_cf(
&cf_hti,
borsh::to_vec(&block.header.hash).map_err(|err| {
DbError::borsh_cast_message(err, Some("Failed to serialize block hash".to_owned()))
})?,
borsh::to_vec(&block.header.block_id).map_err(|err| {
DbError::borsh_cast_message(err, Some("Failed to serialize block id".to_owned()))
})?,
);
let mut acc_to_tx_map: HashMap<[u8; 32], Vec<[u8; 32]>> = HashMap::new();
for tx in &block.body.transactions {
let tx_hash = tx.hash();
write_batch.put_cf(
&cf_tti,
borsh::to_vec(&tx_hash).map_err(|err| {
DbError::borsh_cast_message(err, Some("Failed to serialize tx hash".to_owned()))
})?,
borsh::to_vec(&block.header.block_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize block id".to_owned()),
)
})?,
);
let acc_ids = tx
.affected_public_account_ids()
.into_iter()
.map(nssa::AccountId::into_value)
.collect::<Vec<_>>();
for acc_id in acc_ids {
acc_to_tx_map
.entry(acc_id)
.and_modify(|tx_hashes| tx_hashes.push(tx_hash.into()))
.or_insert_with(|| vec![tx_hash.into()]);
}
}
#[expect(
clippy::iter_over_hash_type,
reason = "RocksDB will keep ordering persistent"
)]
for (acc_id, tx_hashes) in acc_to_tx_map {
self.put_account_transactions_dependant(acc_id, &tx_hashes, &mut write_batch)?;
}
self.db.write(write_batch).map_err(|rerr| {
DbError::rocksdb_cast_message(rerr, Some("Failed to write batch".to_owned()))
})?;
if block
.header
.block_id
.is_multiple_of(BREAKPOINT_INTERVAL.into())
{
self.put_next_breakpoint()?;
}
Ok(())
}
}

View File

@ -0,0 +1,147 @@
use super::{
BREAKPOINT_INTERVAL, DB_META_FIRST_BLOCK_SET_KEY, DB_META_LAST_BLOCK_IN_DB_KEY,
DB_META_LAST_BREAKPOINT_ID, DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY, DbError,
DbResult, RocksDBIO, V03State,
};
#[expect(clippy::multiple_inherent_impl, reason = "Readability")]
impl RocksDBIO {
// Meta
pub fn put_meta_last_block_in_db(&self, block_id: u64) -> DbResult<()> {
let cf_meta = self.meta_column();
self.db
.put_cf(
&cf_meta,
borsh::to_vec(&DB_META_LAST_BLOCK_IN_DB_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_LAST_BLOCK_IN_DB_KEY".to_owned()),
)
})?,
borsh::to_vec(&block_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize last block id".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
Ok(())
}
pub fn put_meta_last_observed_l1_lib_header_in_db(
&self,
l1_lib_header: [u8; 32],
) -> DbResult<()> {
let cf_meta = self.meta_column();
self.db
.put_cf(
&cf_meta,
borsh::to_vec(&DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY).map_err(
|err| {
DbError::borsh_cast_message(
err,
Some(
"Failed to serialize DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY"
.to_owned(),
),
)
},
)?,
borsh::to_vec(&l1_lib_header).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize last l1 block header".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
Ok(())
}
pub fn put_meta_last_breakpoint_id(&self, br_id: u64) -> DbResult<()> {
let cf_meta = self.meta_column();
self.db
.put_cf(
&cf_meta,
borsh::to_vec(&DB_META_LAST_BREAKPOINT_ID).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_LAST_BREAKPOINT_ID".to_owned()),
)
})?,
borsh::to_vec(&br_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize last block id".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
Ok(())
}
pub fn put_meta_is_first_block_set(&self) -> DbResult<()> {
let cf_meta = self.meta_column();
self.db
.put_cf(
&cf_meta,
borsh::to_vec(&DB_META_FIRST_BLOCK_SET_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_FIRST_BLOCK_SET_KEY".to_owned()),
)
})?,
[1_u8; 1],
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
Ok(())
}
// State
pub fn put_breakpoint(&self, br_id: u64, breakpoint: &V03State) -> DbResult<()> {
let cf_br = self.breakpoint_column();
self.db
.put_cf(
&cf_br,
borsh::to_vec(&br_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize breakpoint id".to_owned()),
)
})?,
borsh::to_vec(breakpoint).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize breakpoint data".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))
}
pub fn put_next_breakpoint(&self) -> DbResult<()> {
let last_block = self.get_meta_last_block_in_db()?;
let next_breakpoint_id = self
.get_meta_last_breakpoint_id()?
.checked_add(1)
.expect("Breakpoint Id will be lesser than u64::MAX");
let block_to_break_id = next_breakpoint_id
.checked_mul(u64::from(BREAKPOINT_INTERVAL))
.expect("Reached maximum breakpoint id");
if block_to_break_id <= last_block {
let next_breakpoint = self.calculate_state_for_id(block_to_break_id)?;
self.put_breakpoint(next_breakpoint_id, &next_breakpoint)?;
self.put_meta_last_breakpoint_id(next_breakpoint_id)
} else {
Err(DbError::db_interaction_error(
"Breakpoint not yet achieved".to_owned(),
))
}
}
}