diff --git a/common/src/block.rs b/common/src/block.rs index a5fc9218..91ad1175 100644 --- a/common/src/block.rs +++ b/common/src/block.rs @@ -4,6 +4,15 @@ use sha2::{Digest, Sha256, digest::FixedOutput}; use crate::{HashType, transaction::NSSATransaction}; pub type MantleMsgId = [u8; 32]; +pub type BlockHash = HashType; +pub type BlockId = u64; +pub type TimeStamp = u64; + +#[derive(Debug, Clone, BorshSerialize, BorshDeserialize)] +pub struct BlockMeta { + pub hash: BlockHash, + pub msg_id: MantleMsgId, +} #[derive(Debug, Clone)] /// Our own hasher. @@ -19,10 +28,6 @@ impl OwnHasher { } } -pub type BlockHash = HashType; -pub type BlockId = u64; -pub type TimeStamp = u64; - #[derive(Debug, Clone, BorshSerialize, BorshDeserialize)] pub struct BlockHeader { pub block_id: BlockId, diff --git a/sequencer_core/src/block_store.rs b/sequencer_core/src/block_store.rs index 53518a70..49464dab 100644 --- a/sequencer_core/src/block_store.rs +++ b/sequencer_core/src/block_store.rs @@ -1,7 +1,11 @@ use std::{collections::HashMap, path::Path}; use anyhow::Result; -use common::{HashType, block::Block, transaction::NSSATransaction}; +use common::{ + HashType, + block::{Block, BlockMeta, MantleMsgId}, + transaction::NSSATransaction, +}; use nssa::V02State; use storage::RocksDBIO; @@ -20,10 +24,10 @@ impl SequencerStore { /// ATTENTION: Will overwrite genesis block. pub fn open_db_with_genesis( location: &Path, - genesis_block: Option<&Block>, + genesis_block: Option<(&Block, MantleMsgId)>, signing_key: nssa::PrivateKey, ) -> Result { - let tx_hash_to_block_map = if let Some(block) = &genesis_block { + let tx_hash_to_block_map = if let Some((block, _msg_id)) = &genesis_block { block_to_transactions_map(block) } else { HashMap::new() @@ -68,8 +72,8 @@ impl SequencerStore { None } - pub fn latest_block_hash(&self) -> Result { - Ok(self.dbio.latest_block_hash()?) + pub fn latest_block_meta(&self) -> Result { + Ok(self.dbio.latest_block_meta()?) } pub fn genesis_id(&self) -> u64 { @@ -84,9 +88,14 @@ impl SequencerStore { self.dbio.get_all_blocks().map(|res| Ok(res?)) } - pub(crate) fn update(&mut self, block: &Block, state: &V02State) -> Result<()> { + pub(crate) fn update( + &mut self, + block: &Block, + msg_id: MantleMsgId, + state: &V02State, + ) -> Result<()> { let new_transactions_map = block_to_transactions_map(block); - self.dbio.atomic_update(block, state)?; + self.dbio.atomic_update(block, msg_id, state)?; self.tx_hash_to_block_map.extend(new_transactions_map); Ok(()) } @@ -128,8 +137,12 @@ mod tests { let genesis_block = genesis_block_hashable_data.into_pending_block(&signing_key, [0; 32]); // Start an empty node store - let mut node_store = - SequencerStore::open_db_with_genesis(path, Some(&genesis_block), signing_key).unwrap(); + let mut node_store = SequencerStore::open_db_with_genesis( + path, + Some((&genesis_block, [0; 32])), + signing_key, + ) + .unwrap(); let tx = common::test_utils::produce_dummy_empty_transaction(); let block = common::test_utils::produce_dummy_block(1, None, vec![tx.clone()]); @@ -139,14 +152,14 @@ mod tests { assert_eq!(None, retrieved_tx); // Add the block with the transaction let dummy_state = V02State::new_with_genesis_accounts(&[], &[]); - node_store.update(&block, &dummy_state).unwrap(); + node_store.update(&block, [1; 32], &dummy_state).unwrap(); // Try again let retrieved_tx = node_store.get_transaction_by_hash(tx.hash()); assert_eq!(Some(tx), retrieved_tx); } #[test] - fn test_latest_block_hash_returns_genesis_hash_initially() { + fn test_latest_block_meta_returns_genesis_meta_initially() { let temp_dir = tempdir().unwrap(); let path = temp_dir.path(); @@ -162,16 +175,21 @@ mod tests { let genesis_block = genesis_block_hashable_data.into_pending_block(&signing_key, [0; 32]); let genesis_hash = genesis_block.header.hash; - let node_store = - SequencerStore::open_db_with_genesis(path, Some(&genesis_block), signing_key).unwrap(); + let node_store = SequencerStore::open_db_with_genesis( + path, + Some((&genesis_block, [0; 32])), + signing_key, + ) + .unwrap(); // Verify that initially the latest block hash equals genesis hash - let latest_hash = node_store.latest_block_hash().unwrap(); - assert_eq!(latest_hash, genesis_hash); + let latest_meta = node_store.latest_block_meta().unwrap(); + assert_eq!(latest_meta.hash, genesis_hash); + assert_eq!(latest_meta.msg_id, [0; 32]); } #[test] - fn test_latest_block_hash_updates_after_new_block() { + fn test_latest_block_meta_updates_after_new_block() { let temp_dir = tempdir().unwrap(); let path = temp_dir.path(); @@ -185,19 +203,27 @@ mod tests { }; let genesis_block = genesis_block_hashable_data.into_pending_block(&signing_key, [0; 32]); - let mut node_store = - SequencerStore::open_db_with_genesis(path, Some(&genesis_block), signing_key).unwrap(); + let mut node_store = SequencerStore::open_db_with_genesis( + path, + Some((&genesis_block, [0; 32])), + signing_key, + ) + .unwrap(); // Add a new block let tx = common::test_utils::produce_dummy_empty_transaction(); let block = common::test_utils::produce_dummy_block(1, None, vec![tx.clone()]); let block_hash = block.header.hash; + let block_msg_id = [1; 32]; let dummy_state = V02State::new_with_genesis_accounts(&[], &[]); - node_store.update(&block, &dummy_state).unwrap(); + node_store + .update(&block, block_msg_id, &dummy_state) + .unwrap(); - // Verify that the latest block hash now equals the new block's hash - let latest_hash = node_store.latest_block_hash().unwrap(); - assert_eq!(latest_hash, block_hash); + // Verify that the latest block meta now equals the new block's hash and msg_id + let latest_meta = node_store.latest_block_meta().unwrap(); + assert_eq!(latest_meta.hash, block_hash); + assert_eq!(latest_meta.msg_id, block_msg_id); } } diff --git a/sequencer_core/src/lib.rs b/sequencer_core/src/lib.rs index f9b8f661..49ea56ee 100644 --- a/sequencer_core/src/lib.rs +++ b/sequencer_core/src/lib.rs @@ -1,11 +1,12 @@ use std::{fmt::Display, path::Path, time::Instant}; use anyhow::{Context as _, Result, anyhow}; +use bedrock_client::SignedMantleTx; #[cfg(feature = "testnet")] use common::PINATA_BASE58; use common::{ HashType, - block::{BedrockStatus, Block, HashableBlockData, MantleMsgId}, + block::{BedrockStatus, Block, HashableBlockData}, transaction::NSSATransaction, }; use config::SequencerConfig; @@ -15,7 +16,7 @@ use mempool::{MemPool, MemPoolHandle}; use serde::{Deserialize, Serialize}; use crate::{ - block_settlement_client::{BlockSettlementClient, BlockSettlementClientTrait}, + block_settlement_client::{BlockSettlementClient, BlockSettlementClientTrait, MsgId}, block_store::SequencerStore, indexer_client::{IndexerClient, IndexerClientTrait}, }; @@ -38,7 +39,6 @@ pub struct SequencerCore< chain_height: u64, block_settlement_client: BC, indexer_client: IC, - last_bedrock_msg_id: MantleMsgId, } #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] @@ -75,11 +75,26 @@ impl SequencerCore SequencerCore SequencerCore SequencerCore Result { { - let block = self + let (tx, msg_id) = self .produce_new_block_with_mempool_transactions() .context("Failed to produce new block with mempool transactions")?; - let (tx, msg_id) = self - .block_settlement_client - .create_inscribe_tx(&block) - .with_context(|| { - format!( - "Failed to create inscribe transaction for block with id {}", - block.header.block_id - ) - })?; - self.last_bedrock_msg_id = msg_id.into(); match self .block_settlement_client .submit_inscribe_tx_to_bedrock(tx) @@ -197,8 +192,10 @@ impl SequencerCore Result { + /// Produces new block from transactions in mempool and packs it into a SignedMantleTx. + pub fn produce_new_block_with_mempool_transactions( + &mut self, + ) -> Result<(SignedMantleTx, MsgId)> { let now = Instant::now(); let new_block_height = self.chain_height + 1; @@ -225,25 +222,35 @@ impl SequencerCore SequencerCore &nssa::V02State { @@ -348,6 +355,10 @@ fn load_or_create_signing_key(path: &Path) -> Result { } else { let mut key_bytes = [0u8; ED25519_SECRET_KEY_SIZE]; rand::RngCore::fill_bytes(&mut rand::thread_rng(), &mut key_bytes); + // Create parent directory if it doesn't exist + if let Some(parent) = path.parent() { + std::fs::create_dir_all(parent)?; + } std::fs::write(path, key_bytes)?; Ok(Ed25519Key::from_bytes(&key_bytes)) } @@ -652,9 +663,9 @@ mod tests { let tx = common::test_utils::produce_dummy_empty_transaction(); mempool_handle.push(tx).await.unwrap(); - let block = sequencer.produce_new_block_with_mempool_transactions(); - assert!(block.is_ok()); - assert_eq!(block.unwrap().header.block_id, genesis_height + 1); + let result = sequencer.produce_new_block_with_mempool_transactions(); + assert!(result.is_ok()); + assert_eq!(sequencer.chain_height, genesis_height + 1); } #[tokio::test] @@ -677,12 +688,13 @@ mod tests { mempool_handle.push(tx_replay).await.unwrap(); // Create block - let current_height = sequencer + sequencer .produce_new_block_with_mempool_transactions() - .unwrap() - .header - .block_id; - let block = sequencer.store.get_block_at_id(current_height).unwrap(); + .unwrap(); + let block = sequencer + .store + .get_block_at_id(sequencer.chain_height) + .unwrap(); // Only one should be included in the block assert_eq!(block.body.transactions, vec![tx.clone()]); @@ -703,22 +715,24 @@ mod tests { // The transaction should be included the first time mempool_handle.push(tx.clone()).await.unwrap(); - let current_height = sequencer + sequencer .produce_new_block_with_mempool_transactions() - .unwrap() - .header - .block_id; - let block = sequencer.store.get_block_at_id(current_height).unwrap(); + .unwrap(); + let block = sequencer + .store + .get_block_at_id(sequencer.chain_height) + .unwrap(); assert_eq!(block.body.transactions, vec![tx.clone()]); // Add same transaction should fail mempool_handle.push(tx.clone()).await.unwrap(); - let current_height = sequencer + sequencer .produce_new_block_with_mempool_transactions() - .unwrap() - .header - .block_id; - let block = sequencer.store.get_block_at_id(current_height).unwrap(); + .unwrap(); + let block = sequencer + .store + .get_block_at_id(sequencer.chain_height) + .unwrap(); assert!(block.body.transactions.is_empty()); } @@ -746,12 +760,13 @@ mod tests { ); mempool_handle.push(tx.clone()).await.unwrap(); - let current_height = sequencer + sequencer .produce_new_block_with_mempool_transactions() - .unwrap() - .header - .block_id; - let block = sequencer.store.get_block_at_id(current_height).unwrap(); + .unwrap(); + let block = sequencer + .store + .get_block_at_id(sequencer.chain_height) + .unwrap(); assert_eq!(block.body.transactions, vec![tx.clone()]); } @@ -814,33 +829,75 @@ mod tests { } #[tokio::test] - async fn test_last_bedrock_msg_id_updated_even_when_posting_fails() { - use crate::mock::{MockBlockSettlementClientWithError, MockIndexerClient}; - + async fn test_produce_block_with_correct_prev_meta_after_restart() { let config = setup_sequencer_config(); - let (mut sequencer, mempool_handle) = crate::SequencerCore::< - MockBlockSettlementClientWithError, - MockIndexerClient, - >::start_from_config(config) - .await; + let acc1_account_id = config.initial_accounts[0].account_id; + let acc2_account_id = config.initial_accounts[1].account_id; - // Store the initial last_bedrock_msg_id (should be genesis parent msg id) - let initial_msg_id = sequencer.last_bedrock_msg_id; - assert_eq!(initial_msg_id, [0; 32]); + // Step 1: Create initial database with some block metadata + let expected_prev_meta = { + let (mut sequencer, mempool_handle) = + SequencerCoreWithMockClients::start_from_config(config.clone()).await; - // Add a transaction to the mempool - let tx = common::test_utils::produce_dummy_empty_transaction(); - mempool_handle.push(tx).await.unwrap(); + let signing_key = PrivateKey::try_new([1; 32]).unwrap(); - // Produce a block and post to settlement layer (which will fail) - let result = sequencer - .produce_new_block_and_post_to_settlement_layer() - .await; + // Add a transaction and produce a block to set up block metadata + let tx = common::test_utils::create_transaction_native_token_transfer( + acc1_account_id, + 0, + acc2_account_id, + 100, + signing_key, + ); - // The method should succeed even though posting to Bedrock failed - assert!(result.is_ok()); + mempool_handle.push(tx).await.unwrap(); + sequencer + .produce_new_block_with_mempool_transactions() + .unwrap(); - // Verify that last_bedrock_msg_id was updated despite the posting failure - assert_ne!(sequencer.last_bedrock_msg_id, initial_msg_id); + // Get the metadata of the last block produced + sequencer.store.latest_block_meta().unwrap() + }; + + // Step 2: Restart sequencer from the same storage + let (mut sequencer, mempool_handle) = + SequencerCoreWithMockClients::start_from_config(config.clone()).await; + + // Step 3: Submit a new transaction + let signing_key = PrivateKey::try_new([1; 32]).unwrap(); + let tx = common::test_utils::create_transaction_native_token_transfer( + acc1_account_id, + 1, // Next nonce + acc2_account_id, + 50, + signing_key, + ); + + mempool_handle.push(tx.clone()).await.unwrap(); + + // Step 4: Produce new block + sequencer + .produce_new_block_with_mempool_transactions() + .unwrap(); + + // Step 5: Verify the new block has correct previous block metadata + let new_block = sequencer + .store + .get_block_at_id(sequencer.chain_height) + .unwrap(); + + assert_eq!( + new_block.header.prev_block_hash, expected_prev_meta.hash, + "New block's prev_block_hash should match the stored metadata hash" + ); + assert_eq!( + new_block.bedrock_parent_id, expected_prev_meta.msg_id, + "New block's bedrock_parent_id should match the stored metadata msg_id" + ); + assert_eq!( + new_block.body.transactions, + vec![tx], + "New block should contain the submitted transaction" + ); } } diff --git a/storage/src/lib.rs b/storage/src/lib.rs index b9bbd7d5..a9ce2d52 100644 --- a/storage/src/lib.rs +++ b/storage/src/lib.rs @@ -1,6 +1,6 @@ use std::{path::Path, sync::Arc}; -use common::block::Block; +use common::block::{Block, BlockMeta, MantleMsgId}; use error::DbError; use nssa::V02State; use rocksdb::{ @@ -29,8 +29,8 @@ pub const DB_META_LAST_BLOCK_IN_DB_KEY: &str = "last_block_in_db"; pub const DB_META_FIRST_BLOCK_SET_KEY: &str = "first_block_set"; /// Key base for storing metainformation about the last finalized block on Bedrock pub const DB_META_LAST_FINALIZED_BLOCK_ID: &str = "last_finalized_block_id"; -/// Key base for storing metainformation about the latest block hash -pub const DB_META_LATEST_BLOCK_HASH_KEY: &str = "latest_block_hash"; +/// Key base for storing metainformation about the latest block meta +pub const DB_META_LATEST_BLOCK_META_KEY: &str = "latest_block_meta"; /// Key base for storing the NSSA state pub const DB_NSSA_STATE_KEY: &str = "nssa_state"; @@ -49,7 +49,10 @@ pub struct RocksDBIO { } impl RocksDBIO { - pub fn open_or_create(path: &Path, start_block: Option<&Block>) -> DbResult { + pub fn open_or_create( + path: &Path, + start_block: Option<(&Block, MantleMsgId)>, + ) -> DbResult { let mut cf_opts = Options::default(); cf_opts.set_max_write_buffer_number(16); // ToDo: Add more column families for different data @@ -75,13 +78,16 @@ impl RocksDBIO { if is_start_set { Ok(dbio) - } else if let Some(block) = start_block { + } else if let Some((block, msg_id)) = start_block { let block_id = block.header.block_id; - dbio.put_meta_first_block_in_db(block)?; + dbio.put_meta_first_block_in_db(block, msg_id)?; dbio.put_meta_is_first_block_set()?; dbio.put_meta_last_block_in_db(block_id)?; dbio.put_meta_last_finalized_block_id(None)?; - dbio.put_meta_latest_block_hash(block.header.hash)?; + dbio.put_meta_latest_block_meta(&BlockMeta { + hash: block.header.hash, + msg_id, + })?; Ok(dbio) } else { @@ -211,7 +217,7 @@ impl RocksDBIO { Ok(()) } - pub fn put_meta_first_block_in_db(&self, block: &Block) -> DbResult<()> { + pub fn put_meta_first_block_in_db(&self, block: &Block, msg_id: MantleMsgId) -> DbResult<()> { let cf_meta = self.meta_column(); self.db .put_cf( @@ -232,7 +238,7 @@ impl RocksDBIO { .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; let mut batch = WriteBatch::default(); - self.put_block(block, true, &mut batch)?; + self.put_block(block, msg_id, true, &mut batch)?; self.db.write(batch).map_err(|rerr| { DbError::rocksdb_cast_message( rerr, @@ -328,21 +334,21 @@ impl RocksDBIO { Ok(()) } - fn put_meta_latest_block_hash(&self, block_hash: common::HashType) -> DbResult<()> { + fn put_meta_latest_block_meta(&self, block_meta: &BlockMeta) -> DbResult<()> { let cf_meta = self.meta_column(); self.db .put_cf( &cf_meta, - borsh::to_vec(&DB_META_LATEST_BLOCK_HASH_KEY).map_err(|err| { + borsh::to_vec(&DB_META_LATEST_BLOCK_META_KEY).map_err(|err| { DbError::borsh_cast_message( err, - Some("Failed to serialize DB_META_LATEST_BLOCK_HASH_KEY".to_string()), + Some("Failed to serialize DB_META_LATEST_BLOCK_META_KEY".to_string()), ) })?, - borsh::to_vec(&block_hash).map_err(|err| { + borsh::to_vec(&block_meta).map_err(|err| { DbError::borsh_cast_message( err, - Some("Failed to serialize latest block hash".to_string()), + Some("Failed to serialize latest block meta".to_string()), ) })?, ) @@ -350,60 +356,66 @@ impl RocksDBIO { Ok(()) } - fn put_meta_latest_block_hash_batch( + fn put_meta_latest_block_meta_batch( &self, - block_hash: common::HashType, + block_meta: &BlockMeta, batch: &mut WriteBatch, ) -> DbResult<()> { let cf_meta = self.meta_column(); batch.put_cf( &cf_meta, - borsh::to_vec(&DB_META_LATEST_BLOCK_HASH_KEY).map_err(|err| { + borsh::to_vec(&DB_META_LATEST_BLOCK_META_KEY).map_err(|err| { DbError::borsh_cast_message( err, - Some("Failed to serialize DB_META_LATEST_BLOCK_HASH_KEY".to_string()), + Some("Failed to serialize DB_META_LATEST_BLOCK_META_KEY".to_string()), ) })?, - borsh::to_vec(&block_hash).map_err(|err| { + borsh::to_vec(&block_meta).map_err(|err| { DbError::borsh_cast_message( err, - Some("Failed to serialize latest block hash".to_string()), + Some("Failed to serialize latest block meta".to_string()), ) })?, ); Ok(()) } - pub fn latest_block_hash(&self) -> DbResult { + pub fn latest_block_meta(&self) -> DbResult { let cf_meta = self.meta_column(); let res = self .db .get_cf( &cf_meta, - borsh::to_vec(&DB_META_LATEST_BLOCK_HASH_KEY).map_err(|err| { + borsh::to_vec(&DB_META_LATEST_BLOCK_META_KEY).map_err(|err| { DbError::borsh_cast_message( err, - Some("Failed to serialize DB_META_LATEST_BLOCK_HASH_KEY".to_string()), + Some("Failed to serialize DB_META_LATEST_BLOCK_META_KEY".to_string()), ) })?, ) .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; if let Some(data) = res { - Ok(borsh::from_slice::(&data).map_err(|err| { + Ok(borsh::from_slice::(&data).map_err(|err| { DbError::borsh_cast_message( err, - Some("Failed to deserialize latest block hash".to_string()), + Some("Failed to deserialize latest block meta".to_string()), ) })?) } else { Err(DbError::db_interaction_error( - "Latest block hash not found".to_string(), + "Latest block meta not found".to_string(), )) } } - pub fn put_block(&self, block: &Block, first: bool, batch: &mut WriteBatch) -> DbResult<()> { + pub fn put_block( + &self, + block: &Block, + msg_id: MantleMsgId, + first: bool, + batch: &mut WriteBatch, + ) -> DbResult<()> { let cf_block = self.block_column(); if !first { @@ -411,7 +423,13 @@ impl RocksDBIO { if block.header.block_id > last_curr_block { self.put_meta_last_block_in_db_batch(block.header.block_id, batch)?; - self.put_meta_latest_block_hash_batch(block.header.hash, batch)?; + self.put_meta_latest_block_meta_batch( + &BlockMeta { + hash: block.header.hash, + msg_id, + }, + batch, + )?; } } @@ -530,10 +548,15 @@ impl RocksDBIO { }) } - pub fn atomic_update(&self, block: &Block, state: &V02State) -> DbResult<()> { + pub fn atomic_update( + &self, + block: &Block, + msg_id: MantleMsgId, + state: &V02State, + ) -> DbResult<()> { let block_id = block.header.block_id; let mut batch = WriteBatch::default(); - self.put_block(block, false, &mut batch)?; + self.put_block(block, msg_id, false, &mut batch)?; self.put_nssa_state_in_db(state, &mut batch)?; self.db.write(batch).map_err(|rerr| { DbError::rocksdb_cast_message(