diff --git a/Cargo.lock b/Cargo.lock index 1e58c0cc..e774e577 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5911,6 +5911,8 @@ version = "0.1.0" dependencies = [ "borsh", "common", + "nssa", + "nssa_core", "rocksdb", "thiserror 2.0.17", ] diff --git a/common/src/transaction.rs b/common/src/transaction.rs index 372bc955..74170912 100644 --- a/common/src/transaction.rs +++ b/common/src/transaction.rs @@ -1,5 +1,6 @@ use borsh::{BorshDeserialize, BorshSerialize}; -use log::info; +use log::{info, warn}; +use nssa::V02State; use serde::{Deserialize, Serialize}; use sha2::{Digest, digest::FixedOutput}; @@ -100,6 +101,22 @@ impl EncodedTransaction { } } +pub fn execute_check_transaction_on_state( + state: &mut V02State, + tx: NSSATransaction, + ) -> Result { + match &tx { + NSSATransaction::Public(tx) => state.transition_from_public_transaction(tx), + NSSATransaction::PrivacyPreserving(tx) => state + .transition_from_privacy_preserving_transaction(tx), + NSSATransaction::ProgramDeployment(tx) => state + .transition_from_program_deployment_transaction(tx), + } + .inspect_err(|err| warn!("Error at transition {err:#?}"))?; + + Ok(tx) + } + #[cfg(test)] mod tests { use sha2::{Digest, digest::FixedOutput}; diff --git a/nssa/core/src/commitment.rs b/nssa/core/src/commitment.rs index 52344177..8d461ca2 100644 --- a/nssa/core/src/commitment.rs +++ b/nssa/core/src/commitment.rs @@ -4,8 +4,8 @@ use serde::{Deserialize, Serialize}; use crate::{NullifierPublicKey, account::Account}; -#[derive(Serialize, Deserialize, BorshSerialize, BorshDeserialize)] -#[cfg_attr(any(feature = "host", test), derive(Debug, Clone, PartialEq, Eq, Hash))] +#[derive(Serialize, Deserialize, BorshSerialize, BorshDeserialize, PartialEq, Eq, PartialOrd, Ord)] +#[cfg_attr(any(feature = "host", test), derive(Debug, Clone, Hash))] pub struct Commitment(pub(super) [u8; 32]); /// A commitment to all zero data. diff --git a/nssa/core/src/nullifier.rs b/nssa/core/src/nullifier.rs index 8d9d59fa..ec16605f 100644 --- a/nssa/core/src/nullifier.rs +++ b/nssa/core/src/nullifier.rs @@ -41,8 +41,8 @@ impl From<&NullifierSecretKey> for NullifierPublicKey { pub type NullifierSecretKey = [u8; 32]; -#[derive(Serialize, Deserialize, BorshSerialize, BorshDeserialize)] -#[cfg_attr(any(feature = "host", test), derive(Debug, Clone, PartialEq, Eq, Hash))] +#[derive(Serialize, Deserialize, BorshSerialize, BorshDeserialize, PartialEq, Eq, PartialOrd, Ord)] +#[cfg_attr(any(feature = "host", test), derive(Debug, Clone, Hash))] pub struct Nullifier(pub(super) [u8; 32]); impl Nullifier { diff --git a/nssa/src/merkle_tree/mod.rs b/nssa/src/merkle_tree/mod.rs index c4501cf8..b3637b13 100644 --- a/nssa/src/merkle_tree/mod.rs +++ b/nssa/src/merkle_tree/mod.rs @@ -1,3 +1,4 @@ +use borsh::{BorshDeserialize, BorshSerialize}; use sha2::{Digest, Sha256}; mod default_values; @@ -20,6 +21,7 @@ fn hash_value(value: &Value) -> Node { } #[cfg_attr(test, derive(Debug, PartialEq, Eq))] +#[derive(BorshSerialize, BorshDeserialize)] pub struct MerkleTree { nodes: Vec, capacity: usize, diff --git a/nssa/src/program.rs b/nssa/src/program.rs index 943b16ed..06c7ad29 100644 --- a/nssa/src/program.rs +++ b/nssa/src/program.rs @@ -1,3 +1,4 @@ +use borsh::{BorshDeserialize, BorshSerialize}; use nssa_core::{ account::AccountWithMetadata, program::{InstructionData, ProgramId, ProgramOutput}, @@ -14,7 +15,7 @@ use crate::{ /// TODO: Make this variable when fees are implemented const MAX_NUM_CYCLES_PUBLIC_EXECUTION: u64 = 1024 * 1024 * 32; // 32M cycles -#[derive(Clone, Debug, PartialEq, Eq)] +#[derive(Clone, Debug, PartialEq, Eq, BorshSerialize, BorshDeserialize)] pub struct Program { id: ProgramId, elf: Vec, diff --git a/nssa/src/state.rs b/nssa/src/state.rs index 1a384b2f..223b7c00 100644 --- a/nssa/src/state.rs +++ b/nssa/src/state.rs @@ -1,5 +1,6 @@ use std::collections::{HashMap, HashSet}; +use borsh::{BorshDeserialize, BorshSerialize}; use nssa_core::{ Commitment, CommitmentSetDigest, DUMMY_COMMITMENT, MembershipProof, Nullifier, account::{Account, AccountId}, @@ -15,6 +16,7 @@ use crate::{ pub const MAX_NUMBER_CHAINED_CALLS: usize = 10; +#[derive(BorshSerialize, BorshDeserialize)] pub(crate) struct CommitmentSet { merkle_tree: MerkleTree, commitments: HashMap, @@ -62,6 +64,7 @@ impl CommitmentSet { type NullifierSet = HashSet; +#[derive(BorshSerialize, BorshDeserialize)] pub struct V02State { public_state: HashMap, private_state: (CommitmentSet, NullifierSet), diff --git a/storage/Cargo.toml b/storage/Cargo.toml index 4678560e..f3c3c7d2 100644 --- a/storage/Cargo.toml +++ b/storage/Cargo.toml @@ -5,6 +5,8 @@ edition = "2024" [dependencies] common.workspace = true +nssa_core.workspace = true +nssa.workspace = true thiserror.workspace = true borsh.workspace = true diff --git a/storage/src/indexer.rs b/storage/src/indexer.rs index 8c461eb1..6c09a7e0 100644 --- a/storage/src/indexer.rs +++ b/storage/src/indexer.rs @@ -1,48 +1,63 @@ -use std::{path::Path, sync::Arc}; - -use common::block::{Block, HashableBlockData}; -use rocksdb::{BoundColumnFamily, ColumnFamilyDescriptor, DBWithThreadMode, MultiThreaded, Options}; +use std::{ops::Div, path::Path, sync::Arc}; +use common::{block::Block, transaction::{NSSATransaction, execute_check_transaction_on_state}}; +use nssa::V02State; +use rocksdb::{ + BoundColumnFamily, ColumnFamilyDescriptor, DBWithThreadMode, MultiThreaded, Options, +}; use crate::error::DbError; -/// Maximal size of stored diff in base +/// Maximal size of stored blocks in base /// /// Used to control db size /// /// Currently effectively unbounded. pub const BUFF_SIZE_ROCKSDB: usize = usize::MAX; -/// Delay in diffs between breakpoints -pub const BREAKPOINT_DELAY: usize = 100; +/// Size of stored blocks cache in memory +/// +/// Keeping small to not run out of memory +pub const CACHE_SIZE: usize = 1000; -/// Key base for storing metainformation about id of first diff in db -pub const DB_META_FIRST_DIFF_IN_DB_KEY: &str = "first_diff_in_db"; -/// Key base for storing metainformation about id of last current diff in db -pub const DB_META_LAST_DIFF_IN_DB_KEY: &str = "last_diff_in_db"; -/// Key base for storing metainformation which describe if first diff has been set -pub const DB_META_FIRST_DIFF_SET_KEY: &str = "first_diff_set"; +/// Key base for storing metainformation about id of first block in db +pub const DB_META_FIRST_BLOCK_IN_DB_KEY: &str = "first_block_in_db"; +/// Key base for storing metainformation about id of last current block in db +pub const DB_META_LAST_BLOCK_IN_DB_KEY: &str = "last_block_in_db"; +/// Key base for storing metainformation which describe if first block has been set +pub const DB_META_FIRST_BLOCK_SET_KEY: &str = "first_block_set"; +/// Key base for storing metainformation about the last finalized block on Bedrock +pub const DB_META_LAST_FINALIZED_BLOCK_ID: &str = "last_finalized_block_id"; +/// Key base for storing metainformation about the last breakpoint +pub const DB_META_LAST_BREAKPOINT_ID: &str = "last_breakpoint_id"; -/// Name of diff column family -pub const CF_DIFF_NAME: &str = "cf_diff"; -/// Name of breakpoint coumn family -pub const CF_BREAKPOINT_NAME: &str = "cf_breakpoint"; +/// Interval between state breakpoints +pub const BREAKPOINT_INTERVAL: u64 = 100; + +/// Name of block column family +pub const CF_BLOCK_NAME: &str = "cf_block"; /// Name of meta column family pub const CF_META_NAME: &str = "cf_meta"; +/// Name of breakpoint column family +pub const CF_BREAKPOINT_NAME: &str = "cf_breakpoint"; pub type DbResult = Result; +fn closest_breakpoint_id(block_id: u64) -> u64 { + block_id.div(BREAKPOINT_INTERVAL) +} + pub struct RocksDBIO { pub db: DBWithThreadMode, } impl RocksDBIO { - pub fn open_or_create(path: &Path, start_diff: Option) -> DbResult { + pub fn open_or_create(path: &Path, start_block: Option, initial_state: V02State) -> DbResult { let mut cf_opts = Options::default(); cf_opts.set_max_write_buffer_number(16); // ToDo: Add more column families for different data - let cfdiff = ColumnFamilyDescriptor::new(CF_DIFF_NAME, cf_opts.clone()); + let cfb = ColumnFamilyDescriptor::new(CF_BLOCK_NAME, cf_opts.clone()); let cfmeta = ColumnFamilyDescriptor::new(CF_META_NAME, cf_opts.clone()); - let cfbr = ColumnFamilyDescriptor::new(CF_BREAKPOINT_NAME, cf_opts.clone()); + let cfbreakpoint = ColumnFamilyDescriptor::new(CF_BREAKPOINT_NAME, cf_opts.clone()); let mut db_opts = Options::default(); db_opts.create_missing_column_families(true); @@ -50,7 +65,7 @@ impl RocksDBIO { let db = DBWithThreadMode::::open_cf_descriptors( &db_opts, path, - vec![cfdiff, cfmeta, cfbr], + vec![cfb, cfmeta, cfbreakpoint], ); let dbio = Self { @@ -58,19 +73,24 @@ impl RocksDBIO { db: db.unwrap(), }; - let is_start_set = dbio.get_meta_is_first_diff_set()?; + let is_start_set = dbio.get_meta_is_first_block_set()?; if is_start_set { Ok(dbio) - } else if let Some(diff) = start_diff { - let diff_id = diff.header.diff_id; - dbio.put_meta_first_diff_in_db(diff)?; - dbio.put_meta_is_first_diff_set()?; - dbio.put_meta_last_diff_in_db(diff_id)?; + } else if let Some(block) = start_block { + let block_id = block.header.block_id; + dbio.put_meta_first_block_in_db(block)?; + dbio.put_meta_is_first_block_set()?; + dbio.put_meta_last_block_in_db(block_id)?; + dbio.put_meta_last_finalized_block_id(None)?; + + // First breakpoint setup + dbio.put_breakpoint(0, initial_state)?; + dbio.put_meta_last_breakpoint_id(0)?; Ok(dbio) } else { - // Here we are trying to start a DB without a diff, one should not do it. + // Here we are trying to start a DB without a block, one should not do it. unreachable!() } } @@ -79,8 +99,9 @@ impl RocksDBIO { let mut cf_opts = Options::default(); cf_opts.set_max_write_buffer_number(16); // ToDo: Add more column families for different data - let _cfb = ColumnFamilyDescriptor::new(CF_DIFF_NAME, cf_opts.clone()); + let _cfb = ColumnFamilyDescriptor::new(CF_BLOCK_NAME, cf_opts.clone()); let _cfmeta = ColumnFamilyDescriptor::new(CF_META_NAME, cf_opts.clone()); + let _cfsnapshot = ColumnFamilyDescriptor::new(CF_BREAKPOINT_NAME, cf_opts.clone()); let mut db_opts = Options::default(); db_opts.create_missing_column_families(true); @@ -93,20 +114,24 @@ impl RocksDBIO { self.db.cf_handle(CF_META_NAME).unwrap() } - pub fn diff_column(&self) -> Arc> { - self.db.cf_handle(CF_DIFF_NAME).unwrap() + pub fn block_column(&self) -> Arc> { + self.db.cf_handle(CF_BLOCK_NAME).unwrap() } - pub fn get_meta_first_diff_in_db(&self) -> DbResult { + pub fn breakpoint_column(&self) -> Arc> { + self.db.cf_handle(CF_BREAKPOINT_NAME).unwrap() + } + + pub fn get_meta_first_block_in_db(&self) -> DbResult { let cf_meta = self.meta_column(); let res = self .db .get_cf( &cf_meta, - borsh::to_vec(&DB_META_FIRST_DIFF_IN_DB_KEY).map_err(|err| { + borsh::to_vec(&DB_META_FIRST_BLOCK_IN_DB_KEY).map_err(|err| { DbError::borsh_cast_message( err, - Some("Failed to serialize DB_META_FIRST_DIFF_IN_DB_KEY".to_string()), + Some("Failed to serialize DB_META_FIRST_BLOCK_IN_DB_KEY".to_string()), ) })?, ) @@ -116,26 +141,26 @@ impl RocksDBIO { Ok(borsh::from_slice::(&data).map_err(|err| { DbError::borsh_cast_message( err, - Some("Failed to deserialize first diff".to_string()), + Some("Failed to deserialize first block".to_string()), ) })?) } else { Err(DbError::db_interaction_error( - "First diff not found".to_string(), + "First block not found".to_string(), )) } } - pub fn get_meta_last_diff_in_db(&self) -> DbResult { + pub fn get_meta_last_block_in_db(&self) -> DbResult { let cf_meta = self.meta_column(); let res = self .db .get_cf( &cf_meta, - borsh::to_vec(&DB_META_LAST_DIFF_IN_DB_KEY).map_err(|err| { + borsh::to_vec(&DB_META_LAST_BLOCK_IN_DB_KEY).map_err(|err| { DbError::borsh_cast_message( err, - Some("Failed to serialize DB_META_LAST_DIFF_IN_DB_KEY".to_string()), + Some("Failed to serialize DB_META_LAST_BLOCK_IN_DB_KEY".to_string()), ) })?, ) @@ -145,26 +170,26 @@ impl RocksDBIO { Ok(borsh::from_slice::(&data).map_err(|err| { DbError::borsh_cast_message( err, - Some("Failed to deserialize last diff".to_string()), + Some("Failed to deserialize last block".to_string()), ) })?) } else { Err(DbError::db_interaction_error( - "Last diff not found".to_string(), + "Last block not found".to_string(), )) } } - pub fn get_meta_is_first_diff_set(&self) -> DbResult { + pub fn get_meta_is_first_block_set(&self) -> DbResult { let cf_meta = self.meta_column(); let res = self .db .get_cf( &cf_meta, - borsh::to_vec(&DB_META_FIRST_DIFF_SET_KEY).map_err(|err| { + borsh::to_vec(&DB_META_FIRST_BLOCK_SET_KEY).map_err(|err| { DbError::borsh_cast_message( err, - Some("Failed to serialize DB_META_FIRST_DIFF_SET_KEY".to_string()), + Some("Failed to serialize DB_META_FIRST_BLOCK_SET_KEY".to_string()), ) })?, ) @@ -173,45 +198,73 @@ impl RocksDBIO { Ok(res.is_some()) } - pub fn put_meta_first_diff_in_db(&self, diff: Block) -> DbResult<()> { + pub fn get_meta_last_breakpoint_id(&self) -> DbResult { let cf_meta = self.meta_column(); - self.db - .put_cf( + let res = self.db + .get_cf( &cf_meta, - borsh::to_vec(&DB_META_FIRST_DIFF_IN_DB_KEY).map_err(|err| { + borsh::to_vec(&DB_META_LAST_BREAKPOINT_ID).map_err(|err| { DbError::borsh_cast_message( err, - Some("Failed to serialize DB_META_FIRST_DIFF_IN_DB_KEY".to_string()), - ) - })?, - borsh::to_vec(&diff.header.diff_id).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize first diff id".to_string()), + Some("Failed to serialize DB_META_LAST_BREAKPOINT_ID".to_string()), ) })?, ) .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; - self.put_diff(diff, true)?; - Ok(()) + if let Some(data) = res { + Ok(borsh::from_slice::(&data).map_err(|err| { + DbError::borsh_cast_message( + err, + Some("Failed to deserialize last breakpoint id".to_string()), + ) + })?) + } else { + Err(DbError::db_interaction_error( + "Last breakpoint id not found".to_string(), + )) + } } - pub fn put_meta_last_diff_in_db(&self, diff_id: u64) -> DbResult<()> { + pub fn put_meta_first_block_in_db(&self, block: Block) -> DbResult<()> { let cf_meta = self.meta_column(); self.db .put_cf( &cf_meta, - borsh::to_vec(&DB_META_LAST_DIFF_IN_DB_KEY).map_err(|err| { + borsh::to_vec(&DB_META_FIRST_BLOCK_IN_DB_KEY).map_err(|err| { DbError::borsh_cast_message( err, - Some("Failed to serialize DB_META_LAST_DIFF_IN_DB_KEY".to_string()), + Some("Failed to serialize DB_META_FIRST_BLOCK_IN_DB_KEY".to_string()), ) })?, - borsh::to_vec(&diff_id).map_err(|err| { + borsh::to_vec(&block.header.block_id).map_err(|err| { DbError::borsh_cast_message( err, - Some("Failed to serialize last diff id".to_string()), + Some("Failed to serialize first block id".to_string()), + ) + })?, + ) + .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; + + self.put_block(block, true)?; + Ok(()) + } + + pub fn put_meta_last_block_in_db(&self, block_id: u64) -> DbResult<()> { + let cf_meta = self.meta_column(); + self.db + .put_cf( + &cf_meta, + borsh::to_vec(&DB_META_LAST_BLOCK_IN_DB_KEY).map_err(|err| { + DbError::borsh_cast_message( + err, + Some("Failed to serialize DB_META_LAST_BLOCK_IN_DB_KEY".to_string()), + ) + })?, + borsh::to_vec(&block_id).map_err(|err| { + DbError::borsh_cast_message( + err, + Some("Failed to serialize last block id".to_string()), ) })?, ) @@ -219,15 +272,59 @@ impl RocksDBIO { Ok(()) } - pub fn put_meta_is_first_diff_set(&self) -> DbResult<()> { + pub fn put_meta_last_finalized_block_id(&self, block_id: Option) -> DbResult<()> { let cf_meta = self.meta_column(); self.db .put_cf( &cf_meta, - borsh::to_vec(&DB_META_FIRST_DIFF_SET_KEY).map_err(|err| { + borsh::to_vec(&DB_META_LAST_FINALIZED_BLOCK_ID).map_err(|err| { DbError::borsh_cast_message( err, - Some("Failed to serialize DB_META_FIRST_DIFF_SET_KEY".to_string()), + Some("Failed to serialize DB_META_LAST_FINALIZED_BLOCK_ID".to_string()), + ) + })?, + borsh::to_vec(&block_id).map_err(|err| { + DbError::borsh_cast_message( + err, + Some("Failed to serialize last block id".to_string()), + ) + })?, + ) + .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; + Ok(()) + } + + pub fn put_meta_last_breakpoint_id(&self, br_id: u64) -> DbResult<()> { + let cf_meta = self.meta_column(); + self.db + .put_cf( + &cf_meta, + borsh::to_vec(&DB_META_LAST_BREAKPOINT_ID).map_err(|err| { + DbError::borsh_cast_message( + err, + Some("Failed to serialize DB_META_LAST_BREAKPOINT_ID".to_string()), + ) + })?, + borsh::to_vec(&br_id).map_err(|err| { + DbError::borsh_cast_message( + err, + Some("Failed to serialize last block id".to_string()), + ) + })?, + ) + .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; + Ok(()) + } + + pub fn put_meta_is_first_block_set(&self) -> DbResult<()> { + let cf_meta = self.meta_column(); + self.db + .put_cf( + &cf_meta, + borsh::to_vec(&DB_META_FIRST_BLOCK_SET_KEY).map_err(|err| { + DbError::borsh_cast_message( + err, + Some("Failed to serialize DB_META_FIRST_BLOCK_SET_KEY".to_string()), ) })?, [1u8; 1], @@ -236,65 +333,161 @@ impl RocksDBIO { Ok(()) } - pub fn put_diff(&self, diff: Block, first: bool) -> DbResult<()> { - let cf_diff = self.diff_column(); + pub fn put_block(&self, block: Block, first: bool) -> DbResult<()> { + let cf_block = self.block_column(); if !first { - let last_curr_diff = self.get_meta_last_diff_in_db()?; + let last_curr_block = self.get_meta_last_block_in_db()?; - if diff.header.diff_id > last_curr_diff { - self.put_meta_last_diff_in_db(diff.header.diff_id)?; + if block.header.block_id > last_curr_block { + self.put_meta_last_block_in_db(block.header.block_id)?; } } self.db .put_cf( - &cf_diff, - borsh::to_vec(&diff.header.diff_id).map_err(|err| { + &cf_block, + borsh::to_vec(&block.header.block_id).map_err(|err| { DbError::borsh_cast_message( err, - Some("Failed to serialize diff id".to_string()), + Some("Failed to serialize block id".to_string()), ) })?, - borsh::to_vec(&HashableBlockData::from(diff)).map_err(|err| { + borsh::to_vec(&block).map_err(|err| { DbError::borsh_cast_message( err, - Some("Failed to serialize diff data".to_string()), + Some("Failed to serialize block data".to_string()), ) })?, ) .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; + + if block.header.block_id.is_multiple_of(BREAKPOINT_INTERVAL) { + self.put_next_breakpoint()?; + } + Ok(()) } - pub fn get_diff(&self, diff_id: u64) -> DbResult { - let cf_diff = self.diff_column(); + pub fn get_block(&self, block_id: u64) -> DbResult { + let cf_block = self.block_column(); let res = self .db .get_cf( - &cf_diff, - borsh::to_vec(&diff_id).map_err(|err| { + &cf_block, + borsh::to_vec(&block_id).map_err(|err| { DbError::borsh_cast_message( err, - Some("Failed to serialize diff id".to_string()), + Some("Failed to serialize block id".to_string()), ) })?, ) .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; if let Some(data) = res { - Ok( - borsh::from_slice::(&data).map_err(|serr| { - DbError::borsh_cast_message( - serr, - Some("Failed to deserialize diff data".to_string()), - ) - })?, - ) + Ok(borsh::from_slice::(&data).map_err(|serr| { + DbError::borsh_cast_message( + serr, + Some("Failed to deserialize block data".to_string()), + ) + })?) } else { Err(DbError::db_interaction_error( "Block on this id not found".to_string(), )) } } -} \ No newline at end of file + + pub fn put_breakpoint(&self, br_id: u64, breakpoint: V02State) -> DbResult<()> { + let cf_br = self.breakpoint_column(); + + self.db + .put_cf( + &cf_br, + borsh::to_vec(&br_id).map_err(|err| { + DbError::borsh_cast_message( + err, + Some("Failed to serialize breakpoint id".to_string()), + ) + })?, + borsh::to_vec(&breakpoint).map_err(|err| { + DbError::borsh_cast_message( + err, + Some("Failed to serialize breakpoint data".to_string()), + ) + })?, + ) + .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None)) + } + + pub fn get_breakpoint(&self, br_id: u64) -> DbResult { + let cf_br = self.breakpoint_column(); + let res = self + .db + .get_cf( + &cf_br, + borsh::to_vec(&br_id).map_err(|err| { + DbError::borsh_cast_message( + err, + Some("Failed to serialize breakpoint id".to_string()), + ) + })?, + ) + .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; + + if let Some(data) = res { + Ok(borsh::from_slice::(&data).map_err(|serr| { + DbError::borsh_cast_message( + serr, + Some("Failed to deserialize breakpoint data".to_string()), + ) + })?) + } else { + Err(DbError::db_interaction_error( + "Breakpoint on this id not found".to_string(), + )) + } + } + + pub fn calculate_state_for_id(&self, block_id: u64) -> DbResult { + let last_block = self.get_meta_last_block_in_db()?; + + if last_block <= block_id { + let br_id = closest_breakpoint_id(block_id); + let mut breakpoint = self.get_breakpoint(br_id)?; + + for id in (BREAKPOINT_INTERVAL*br_id)..=block_id { + let block = self.get_block(id)?; + + for encoded_transaction in block.body.transactions { + let transaction = NSSATransaction::try_from(&encoded_transaction).unwrap(); + + execute_check_transaction_on_state(&mut breakpoint, transaction).unwrap(); + } + } + + Ok(breakpoint) + } else { + Err(DbError::db_interaction_error( + "Block on this id not found".to_string(), + )) + } + } + + pub fn put_next_breakpoint(&self) -> DbResult<()> { + let last_block = self.get_meta_last_block_in_db()?; + let breakpoint_id = self.get_meta_last_breakpoint_id()?; + let block_to_break_id = breakpoint_id * BREAKPOINT_INTERVAL; + + if last_block <= block_to_break_id { + let next_breakpoint = self.calculate_state_for_id(block_to_break_id)?; + + self.put_breakpoint(breakpoint_id, next_breakpoint)?; + self.put_meta_last_breakpoint_id(breakpoint_id) + } else { + Err(DbError::db_interaction_error( + "Breakpoint not yet achieved".to_string(), + )) + } + } +} diff --git a/storage/src/lib.rs b/storage/src/lib.rs index 9caf322d..05c4a374 100644 --- a/storage/src/lib.rs +++ b/storage/src/lib.rs @@ -1,3 +1,3 @@ pub mod error; -pub mod sequencer; pub mod indexer; +pub mod sequencer; diff --git a/storage/src/sequencer.rs b/storage/src/sequencer.rs index 613f5120..713772b9 100644 --- a/storage/src/sequencer.rs +++ b/storage/src/sequencer.rs @@ -361,4 +361,4 @@ impl RocksDBIO { )) } } -} \ No newline at end of file +}