From 0a8d6f1eb43e2072574757d7ced2bb07c5e7e55b Mon Sep 17 00:00:00 2001 From: Daniil Polyakov Date: Wed, 11 Feb 2026 23:47:36 +0300 Subject: [PATCH 01/16] fix: always store latest block hash in db --- sequencer_core/src/block_store.rs | 60 ++++++++++++++++- sequencer_core/src/lib.rs | 17 ++--- storage/src/lib.rs | 105 +++++++++++++++++++++++++++++- 3 files changed, 167 insertions(+), 15 deletions(-) diff --git a/sequencer_core/src/block_store.rs b/sequencer_core/src/block_store.rs index 3b97653d..53518a70 100644 --- a/sequencer_core/src/block_store.rs +++ b/sequencer_core/src/block_store.rs @@ -68,8 +68,8 @@ impl SequencerStore { None } - pub fn insert(&mut self, tx: &NSSATransaction, block_id: u64) { - self.tx_hash_to_block_map.insert(tx.hash(), block_id); + pub fn latest_block_hash(&self) -> Result { + Ok(self.dbio.latest_block_hash()?) } pub fn genesis_id(&self) -> u64 { @@ -144,4 +144,60 @@ mod tests { let retrieved_tx = node_store.get_transaction_by_hash(tx.hash()); assert_eq!(Some(tx), retrieved_tx); } + + #[test] + fn test_latest_block_hash_returns_genesis_hash_initially() { + let temp_dir = tempdir().unwrap(); + let path = temp_dir.path(); + + let signing_key = sequencer_sign_key_for_testing(); + + let genesis_block_hashable_data = HashableBlockData { + block_id: 0, + prev_block_hash: HashType([0; 32]), + timestamp: 0, + transactions: vec![], + }; + + let genesis_block = genesis_block_hashable_data.into_pending_block(&signing_key, [0; 32]); + let genesis_hash = genesis_block.header.hash; + + let node_store = + SequencerStore::open_db_with_genesis(path, Some(&genesis_block), signing_key).unwrap(); + + // Verify that initially the latest block hash equals genesis hash + let latest_hash = node_store.latest_block_hash().unwrap(); + assert_eq!(latest_hash, genesis_hash); + } + + #[test] + fn test_latest_block_hash_updates_after_new_block() { + let temp_dir = tempdir().unwrap(); + let path = temp_dir.path(); + + let signing_key = sequencer_sign_key_for_testing(); + + let genesis_block_hashable_data = HashableBlockData { + block_id: 0, + prev_block_hash: HashType([0; 32]), + timestamp: 0, + transactions: vec![], + }; + + let genesis_block = genesis_block_hashable_data.into_pending_block(&signing_key, [0; 32]); + let mut node_store = + SequencerStore::open_db_with_genesis(path, Some(&genesis_block), signing_key).unwrap(); + + // Add a new block + let tx = common::test_utils::produce_dummy_empty_transaction(); + let block = common::test_utils::produce_dummy_block(1, None, vec![tx.clone()]); + let block_hash = block.header.hash; + + let dummy_state = V02State::new_with_genesis_accounts(&[], &[]); + node_store.update(&block, &dummy_state).unwrap(); + + // Verify that the latest block hash now equals the new block's hash + let latest_hash = node_store.latest_block_hash().unwrap(); + assert_eq!(latest_hash, block_hash); + } } diff --git a/sequencer_core/src/lib.rs b/sequencer_core/src/lib.rs index c662c167..8d719d65 100644 --- a/sequencer_core/src/lib.rs +++ b/sequencer_core/src/lib.rs @@ -1,6 +1,6 @@ use std::{fmt::Display, path::Path, time::Instant}; -use anyhow::{Result, anyhow}; +use anyhow::{Context as _, Result, anyhow}; #[cfg(feature = "testnet")] use common::PINATA_BASE58; use common::{ @@ -214,7 +214,10 @@ impl SequencerCore SequencerCore DbResult<()> { + let cf_meta = self.meta_column(); + batch.put_cf( + &cf_meta, + borsh::to_vec(&DB_META_LAST_BLOCK_IN_DB_KEY).map_err(|err| { + DbError::borsh_cast_message( + err, + Some("Failed to serialize DB_META_LAST_BLOCK_IN_DB_KEY".to_string()), + ) + })?, + borsh::to_vec(&block_id).map_err(|err| { + DbError::borsh_cast_message( + err, + Some("Failed to serialize last block id".to_string()), + ) + })?, + ); + Ok(()) + } + pub fn put_meta_last_finalized_block_id(&self, block_id: Option) -> DbResult<()> { let cf_meta = self.meta_column(); self.db @@ -301,6 +328,81 @@ impl RocksDBIO { Ok(()) } + fn put_meta_latest_block_hash(&self, block_hash: common::HashType) -> DbResult<()> { + let cf_meta = self.meta_column(); + self.db + .put_cf( + &cf_meta, + borsh::to_vec(&DB_META_LATEST_BLOCK_HASH_KEY).map_err(|err| { + DbError::borsh_cast_message( + err, + Some("Failed to serialize DB_META_LATEST_BLOCK_HASH_KEY".to_string()), + ) + })?, + borsh::to_vec(&block_hash).map_err(|err| { + DbError::borsh_cast_message( + err, + Some("Failed to serialize latest block hash".to_string()), + ) + })?, + ) + .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; + Ok(()) + } + + fn put_meta_latest_block_hash_batch( + &self, + block_hash: common::HashType, + batch: &mut WriteBatch, + ) -> DbResult<()> { + let cf_meta = self.meta_column(); + batch.put_cf( + &cf_meta, + borsh::to_vec(&DB_META_LATEST_BLOCK_HASH_KEY).map_err(|err| { + DbError::borsh_cast_message( + err, + Some("Failed to serialize DB_META_LATEST_BLOCK_HASH_KEY".to_string()), + ) + })?, + borsh::to_vec(&block_hash).map_err(|err| { + DbError::borsh_cast_message( + err, + Some("Failed to serialize latest block hash".to_string()), + ) + })?, + ); + Ok(()) + } + + pub fn latest_block_hash(&self) -> DbResult { + let cf_meta = self.meta_column(); + let res = self + .db + .get_cf( + &cf_meta, + borsh::to_vec(&DB_META_LATEST_BLOCK_HASH_KEY).map_err(|err| { + DbError::borsh_cast_message( + err, + Some("Failed to serialize DB_META_LATEST_BLOCK_HASH_KEY".to_string()), + ) + })?, + ) + .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; + + if let Some(data) = res { + Ok(borsh::from_slice::(&data).map_err(|err| { + DbError::borsh_cast_message( + err, + Some("Failed to deserialize latest block hash".to_string()), + ) + })?) + } else { + Err(DbError::db_interaction_error( + "Latest block hash not found".to_string(), + )) + } + } + pub fn put_block(&self, block: &Block, first: bool, batch: &mut WriteBatch) -> DbResult<()> { let cf_block = self.block_column(); @@ -308,7 +410,8 @@ impl RocksDBIO { let last_curr_block = self.get_meta_last_block_in_db()?; if block.header.block_id > last_curr_block { - self.put_meta_last_block_in_db(block.header.block_id)?; + self.put_meta_last_block_in_db_batch(block.header.block_id, batch)?; + self.put_meta_latest_block_hash_batch(block.header.hash, batch)?; } } From d0f26744ebf1738af3c114ef736e9cb516bc5f69 Mon Sep 17 00:00:00 2001 From: Daniil Polyakov Date: Thu, 12 Feb 2026 00:01:00 +0300 Subject: [PATCH 02/16] fix: sync last_bedrock_msg_id even if posting to Bedrock failed --- sequencer_core/src/block_settlement_client.rs | 9 ++-- sequencer_core/src/lib.rs | 50 +++++++++++++++++-- sequencer_core/src/mock.rs | 37 ++++++++++++-- sequencer_runner/src/lib.rs | 15 ++++-- 4 files changed, 93 insertions(+), 18 deletions(-) diff --git a/sequencer_core/src/block_settlement_client.rs b/sequencer_core/src/block_settlement_client.rs index e3badf2d..16afa9c1 100644 --- a/sequencer_core/src/block_settlement_client.rs +++ b/sequencer_core/src/block_settlement_client.rs @@ -23,7 +23,7 @@ pub trait BlockSettlementClientTrait: Clone { fn bedrock_signing_key(&self) -> &Ed25519Key; /// Post a transaction to the node. - async fn submit_block_to_bedrock(&self, block: &Block) -> Result; + async fn submit_inscribe_tx_to_bedrock(&self, tx: SignedMantleTx) -> Result<()>; /// Create and sign a transaction for inscribing data. fn create_inscribe_tx(&self, block: &Block) -> Result<(SignedMantleTx, MsgId)> { @@ -89,16 +89,13 @@ impl BlockSettlementClientTrait for BlockSettlementClient { }) } - async fn submit_block_to_bedrock(&self, block: &Block) -> Result { - let (tx, new_msg_id) = self.create_inscribe_tx(block)?; - - // Post the transaction + async fn submit_inscribe_tx_to_bedrock(&self, tx: SignedMantleTx) -> Result<()> { self.bedrock_client .post_transaction(tx) .await .context("Failed to post transaction to Bedrock")?; - Ok(new_msg_id) + Ok(()) } fn bedrock_channel_id(&self) -> ChannelId { diff --git a/sequencer_core/src/lib.rs b/sequencer_core/src/lib.rs index 8d719d65..f9b8f661 100644 --- a/sequencer_core/src/lib.rs +++ b/sequencer_core/src/lib.rs @@ -167,14 +167,25 @@ impl SequencerCore Result { { - let block = self.produce_new_block_with_mempool_transactions()?; + let block = self + .produce_new_block_with_mempool_transactions() + .context("Failed to produce new block with mempool transactions")?; + let (tx, msg_id) = self + .block_settlement_client + .create_inscribe_tx(&block) + .with_context(|| { + format!( + "Failed to create inscribe transaction for block with id {}", + block.header.block_id + ) + })?; + self.last_bedrock_msg_id = msg_id.into(); match self .block_settlement_client - .submit_block_to_bedrock(&block) + .submit_inscribe_tx_to_bedrock(tx) .await { - Ok(msg_id) => { - self.last_bedrock_msg_id = msg_id.into(); + Ok(()) => { info!("Posted block data to Bedrock, msg_id: {msg_id:?}"); } Err(err) => { @@ -801,4 +812,35 @@ mod tests { assert_eq!(sequencer.get_pending_blocks().unwrap().len(), 1); } + + #[tokio::test] + async fn test_last_bedrock_msg_id_updated_even_when_posting_fails() { + use crate::mock::{MockBlockSettlementClientWithError, MockIndexerClient}; + + let config = setup_sequencer_config(); + let (mut sequencer, mempool_handle) = crate::SequencerCore::< + MockBlockSettlementClientWithError, + MockIndexerClient, + >::start_from_config(config) + .await; + + // Store the initial last_bedrock_msg_id (should be genesis parent msg id) + let initial_msg_id = sequencer.last_bedrock_msg_id; + assert_eq!(initial_msg_id, [0; 32]); + + // Add a transaction to the mempool + let tx = common::test_utils::produce_dummy_empty_transaction(); + mempool_handle.push(tx).await.unwrap(); + + // Produce a block and post to settlement layer (which will fail) + let result = sequencer + .produce_new_block_and_post_to_settlement_layer() + .await; + + // The method should succeed even though posting to Bedrock failed + assert!(result.is_ok()); + + // Verify that last_bedrock_msg_id was updated despite the posting failure + assert_ne!(sequencer.last_bedrock_msg_id, initial_msg_id); + } } diff --git a/sequencer_core/src/mock.rs b/sequencer_core/src/mock.rs index 16799d11..930ff946 100644 --- a/sequencer_core/src/mock.rs +++ b/sequencer_core/src/mock.rs @@ -1,6 +1,6 @@ -use anyhow::Result; -use common::block::Block; -use logos_blockchain_core::mantle::ops::channel::{ChannelId, MsgId}; +use anyhow::{Result, anyhow}; +use bedrock_client::SignedMantleTx; +use logos_blockchain_core::mantle::ops::channel::ChannelId; use logos_blockchain_key_management_system_service::keys::Ed25519Key; use url::Url; @@ -34,8 +34,35 @@ impl BlockSettlementClientTrait for MockBlockSettlementClient { &self.bedrock_signing_key } - async fn submit_block_to_bedrock(&self, block: &Block) -> Result { - self.create_inscribe_tx(block).map(|(_, msg_id)| msg_id) + async fn submit_inscribe_tx_to_bedrock(&self, _tx: SignedMantleTx) -> Result<()> { + Ok(()) + } +} + +#[derive(Clone)] +pub struct MockBlockSettlementClientWithError { + bedrock_channel_id: ChannelId, + bedrock_signing_key: Ed25519Key, +} + +impl BlockSettlementClientTrait for MockBlockSettlementClientWithError { + fn new(config: &BedrockConfig, bedrock_signing_key: Ed25519Key) -> Result { + Ok(Self { + bedrock_channel_id: config.channel_id, + bedrock_signing_key, + }) + } + + fn bedrock_channel_id(&self) -> ChannelId { + self.bedrock_channel_id + } + + fn bedrock_signing_key(&self) -> &Ed25519Key { + &self.bedrock_signing_key + } + + async fn submit_inscribe_tx_to_bedrock(&self, _tx: SignedMantleTx) -> Result<()> { + Err(anyhow!("Mock error")) } } diff --git a/sequencer_runner/src/lib.rs b/sequencer_runner/src/lib.rs index 1dab37d5..f6391af2 100644 --- a/sequencer_runner/src/lib.rs +++ b/sequencer_runner/src/lib.rs @@ -176,10 +176,19 @@ async fn retry_pending_blocks_loop( info!("Resubmitting {} pending blocks", pending_blocks.len()); for block in &pending_blocks { - if let Err(e) = block_settlement_client.submit_block_to_bedrock(block).await { + // TODO: We could cache the inscribe tx for each pending block to avoid re-creating it + // on every retry. + let (tx, _msg_id) = block_settlement_client + .create_inscribe_tx(block) + .context("Failed to create inscribe tx for pending block")?; + + if let Err(e) = block_settlement_client + .submit_inscribe_tx_to_bedrock(tx) + .await + { warn!( - "Failed to resubmit block with id {} with error {}", - block.header.block_id, e + "Failed to resubmit block with id {} with error {e:#}", + block.header.block_id ); } } From 51ab510e71be8c0da464fedab45f63f7d34586b7 Mon Sep 17 00:00:00 2001 From: Daniil Polyakov Date: Thu, 12 Feb 2026 02:39:22 +0300 Subject: [PATCH 03/16] fix: retrieve last bedrock message id from db --- common/src/block.rs | 13 +- sequencer_core/src/block_store.rs | 70 +++++++--- sequencer_core/src/lib.rs | 215 +++++++++++++++++++----------- storage/src/lib.rs | 83 +++++++----- 4 files changed, 246 insertions(+), 135 deletions(-) diff --git a/common/src/block.rs b/common/src/block.rs index a5fc9218..91ad1175 100644 --- a/common/src/block.rs +++ b/common/src/block.rs @@ -4,6 +4,15 @@ use sha2::{Digest, Sha256, digest::FixedOutput}; use crate::{HashType, transaction::NSSATransaction}; pub type MantleMsgId = [u8; 32]; +pub type BlockHash = HashType; +pub type BlockId = u64; +pub type TimeStamp = u64; + +#[derive(Debug, Clone, BorshSerialize, BorshDeserialize)] +pub struct BlockMeta { + pub hash: BlockHash, + pub msg_id: MantleMsgId, +} #[derive(Debug, Clone)] /// Our own hasher. @@ -19,10 +28,6 @@ impl OwnHasher { } } -pub type BlockHash = HashType; -pub type BlockId = u64; -pub type TimeStamp = u64; - #[derive(Debug, Clone, BorshSerialize, BorshDeserialize)] pub struct BlockHeader { pub block_id: BlockId, diff --git a/sequencer_core/src/block_store.rs b/sequencer_core/src/block_store.rs index 53518a70..49464dab 100644 --- a/sequencer_core/src/block_store.rs +++ b/sequencer_core/src/block_store.rs @@ -1,7 +1,11 @@ use std::{collections::HashMap, path::Path}; use anyhow::Result; -use common::{HashType, block::Block, transaction::NSSATransaction}; +use common::{ + HashType, + block::{Block, BlockMeta, MantleMsgId}, + transaction::NSSATransaction, +}; use nssa::V02State; use storage::RocksDBIO; @@ -20,10 +24,10 @@ impl SequencerStore { /// ATTENTION: Will overwrite genesis block. pub fn open_db_with_genesis( location: &Path, - genesis_block: Option<&Block>, + genesis_block: Option<(&Block, MantleMsgId)>, signing_key: nssa::PrivateKey, ) -> Result { - let tx_hash_to_block_map = if let Some(block) = &genesis_block { + let tx_hash_to_block_map = if let Some((block, _msg_id)) = &genesis_block { block_to_transactions_map(block) } else { HashMap::new() @@ -68,8 +72,8 @@ impl SequencerStore { None } - pub fn latest_block_hash(&self) -> Result { - Ok(self.dbio.latest_block_hash()?) + pub fn latest_block_meta(&self) -> Result { + Ok(self.dbio.latest_block_meta()?) } pub fn genesis_id(&self) -> u64 { @@ -84,9 +88,14 @@ impl SequencerStore { self.dbio.get_all_blocks().map(|res| Ok(res?)) } - pub(crate) fn update(&mut self, block: &Block, state: &V02State) -> Result<()> { + pub(crate) fn update( + &mut self, + block: &Block, + msg_id: MantleMsgId, + state: &V02State, + ) -> Result<()> { let new_transactions_map = block_to_transactions_map(block); - self.dbio.atomic_update(block, state)?; + self.dbio.atomic_update(block, msg_id, state)?; self.tx_hash_to_block_map.extend(new_transactions_map); Ok(()) } @@ -128,8 +137,12 @@ mod tests { let genesis_block = genesis_block_hashable_data.into_pending_block(&signing_key, [0; 32]); // Start an empty node store - let mut node_store = - SequencerStore::open_db_with_genesis(path, Some(&genesis_block), signing_key).unwrap(); + let mut node_store = SequencerStore::open_db_with_genesis( + path, + Some((&genesis_block, [0; 32])), + signing_key, + ) + .unwrap(); let tx = common::test_utils::produce_dummy_empty_transaction(); let block = common::test_utils::produce_dummy_block(1, None, vec![tx.clone()]); @@ -139,14 +152,14 @@ mod tests { assert_eq!(None, retrieved_tx); // Add the block with the transaction let dummy_state = V02State::new_with_genesis_accounts(&[], &[]); - node_store.update(&block, &dummy_state).unwrap(); + node_store.update(&block, [1; 32], &dummy_state).unwrap(); // Try again let retrieved_tx = node_store.get_transaction_by_hash(tx.hash()); assert_eq!(Some(tx), retrieved_tx); } #[test] - fn test_latest_block_hash_returns_genesis_hash_initially() { + fn test_latest_block_meta_returns_genesis_meta_initially() { let temp_dir = tempdir().unwrap(); let path = temp_dir.path(); @@ -162,16 +175,21 @@ mod tests { let genesis_block = genesis_block_hashable_data.into_pending_block(&signing_key, [0; 32]); let genesis_hash = genesis_block.header.hash; - let node_store = - SequencerStore::open_db_with_genesis(path, Some(&genesis_block), signing_key).unwrap(); + let node_store = SequencerStore::open_db_with_genesis( + path, + Some((&genesis_block, [0; 32])), + signing_key, + ) + .unwrap(); // Verify that initially the latest block hash equals genesis hash - let latest_hash = node_store.latest_block_hash().unwrap(); - assert_eq!(latest_hash, genesis_hash); + let latest_meta = node_store.latest_block_meta().unwrap(); + assert_eq!(latest_meta.hash, genesis_hash); + assert_eq!(latest_meta.msg_id, [0; 32]); } #[test] - fn test_latest_block_hash_updates_after_new_block() { + fn test_latest_block_meta_updates_after_new_block() { let temp_dir = tempdir().unwrap(); let path = temp_dir.path(); @@ -185,19 +203,27 @@ mod tests { }; let genesis_block = genesis_block_hashable_data.into_pending_block(&signing_key, [0; 32]); - let mut node_store = - SequencerStore::open_db_with_genesis(path, Some(&genesis_block), signing_key).unwrap(); + let mut node_store = SequencerStore::open_db_with_genesis( + path, + Some((&genesis_block, [0; 32])), + signing_key, + ) + .unwrap(); // Add a new block let tx = common::test_utils::produce_dummy_empty_transaction(); let block = common::test_utils::produce_dummy_block(1, None, vec![tx.clone()]); let block_hash = block.header.hash; + let block_msg_id = [1; 32]; let dummy_state = V02State::new_with_genesis_accounts(&[], &[]); - node_store.update(&block, &dummy_state).unwrap(); + node_store + .update(&block, block_msg_id, &dummy_state) + .unwrap(); - // Verify that the latest block hash now equals the new block's hash - let latest_hash = node_store.latest_block_hash().unwrap(); - assert_eq!(latest_hash, block_hash); + // Verify that the latest block meta now equals the new block's hash and msg_id + let latest_meta = node_store.latest_block_meta().unwrap(); + assert_eq!(latest_meta.hash, block_hash); + assert_eq!(latest_meta.msg_id, block_msg_id); } } diff --git a/sequencer_core/src/lib.rs b/sequencer_core/src/lib.rs index f9b8f661..49ea56ee 100644 --- a/sequencer_core/src/lib.rs +++ b/sequencer_core/src/lib.rs @@ -1,11 +1,12 @@ use std::{fmt::Display, path::Path, time::Instant}; use anyhow::{Context as _, Result, anyhow}; +use bedrock_client::SignedMantleTx; #[cfg(feature = "testnet")] use common::PINATA_BASE58; use common::{ HashType, - block::{BedrockStatus, Block, HashableBlockData, MantleMsgId}, + block::{BedrockStatus, Block, HashableBlockData}, transaction::NSSATransaction, }; use config::SequencerConfig; @@ -15,7 +16,7 @@ use mempool::{MemPool, MemPoolHandle}; use serde::{Deserialize, Serialize}; use crate::{ - block_settlement_client::{BlockSettlementClient, BlockSettlementClientTrait}, + block_settlement_client::{BlockSettlementClient, BlockSettlementClientTrait, MsgId}, block_store::SequencerStore, indexer_client::{IndexerClient, IndexerClientTrait}, }; @@ -38,7 +39,6 @@ pub struct SequencerCore< chain_height: u64, block_settlement_client: BC, indexer_client: IC, - last_bedrock_msg_id: MantleMsgId, } #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] @@ -75,11 +75,26 @@ impl SequencerCore SequencerCore SequencerCore SequencerCore Result { { - let block = self + let (tx, msg_id) = self .produce_new_block_with_mempool_transactions() .context("Failed to produce new block with mempool transactions")?; - let (tx, msg_id) = self - .block_settlement_client - .create_inscribe_tx(&block) - .with_context(|| { - format!( - "Failed to create inscribe transaction for block with id {}", - block.header.block_id - ) - })?; - self.last_bedrock_msg_id = msg_id.into(); match self .block_settlement_client .submit_inscribe_tx_to_bedrock(tx) @@ -197,8 +192,10 @@ impl SequencerCore Result { + /// Produces new block from transactions in mempool and packs it into a SignedMantleTx. + pub fn produce_new_block_with_mempool_transactions( + &mut self, + ) -> Result<(SignedMantleTx, MsgId)> { let now = Instant::now(); let new_block_height = self.chain_height + 1; @@ -225,25 +222,35 @@ impl SequencerCore SequencerCore &nssa::V02State { @@ -348,6 +355,10 @@ fn load_or_create_signing_key(path: &Path) -> Result { } else { let mut key_bytes = [0u8; ED25519_SECRET_KEY_SIZE]; rand::RngCore::fill_bytes(&mut rand::thread_rng(), &mut key_bytes); + // Create parent directory if it doesn't exist + if let Some(parent) = path.parent() { + std::fs::create_dir_all(parent)?; + } std::fs::write(path, key_bytes)?; Ok(Ed25519Key::from_bytes(&key_bytes)) } @@ -652,9 +663,9 @@ mod tests { let tx = common::test_utils::produce_dummy_empty_transaction(); mempool_handle.push(tx).await.unwrap(); - let block = sequencer.produce_new_block_with_mempool_transactions(); - assert!(block.is_ok()); - assert_eq!(block.unwrap().header.block_id, genesis_height + 1); + let result = sequencer.produce_new_block_with_mempool_transactions(); + assert!(result.is_ok()); + assert_eq!(sequencer.chain_height, genesis_height + 1); } #[tokio::test] @@ -677,12 +688,13 @@ mod tests { mempool_handle.push(tx_replay).await.unwrap(); // Create block - let current_height = sequencer + sequencer .produce_new_block_with_mempool_transactions() - .unwrap() - .header - .block_id; - let block = sequencer.store.get_block_at_id(current_height).unwrap(); + .unwrap(); + let block = sequencer + .store + .get_block_at_id(sequencer.chain_height) + .unwrap(); // Only one should be included in the block assert_eq!(block.body.transactions, vec![tx.clone()]); @@ -703,22 +715,24 @@ mod tests { // The transaction should be included the first time mempool_handle.push(tx.clone()).await.unwrap(); - let current_height = sequencer + sequencer .produce_new_block_with_mempool_transactions() - .unwrap() - .header - .block_id; - let block = sequencer.store.get_block_at_id(current_height).unwrap(); + .unwrap(); + let block = sequencer + .store + .get_block_at_id(sequencer.chain_height) + .unwrap(); assert_eq!(block.body.transactions, vec![tx.clone()]); // Add same transaction should fail mempool_handle.push(tx.clone()).await.unwrap(); - let current_height = sequencer + sequencer .produce_new_block_with_mempool_transactions() - .unwrap() - .header - .block_id; - let block = sequencer.store.get_block_at_id(current_height).unwrap(); + .unwrap(); + let block = sequencer + .store + .get_block_at_id(sequencer.chain_height) + .unwrap(); assert!(block.body.transactions.is_empty()); } @@ -746,12 +760,13 @@ mod tests { ); mempool_handle.push(tx.clone()).await.unwrap(); - let current_height = sequencer + sequencer .produce_new_block_with_mempool_transactions() - .unwrap() - .header - .block_id; - let block = sequencer.store.get_block_at_id(current_height).unwrap(); + .unwrap(); + let block = sequencer + .store + .get_block_at_id(sequencer.chain_height) + .unwrap(); assert_eq!(block.body.transactions, vec![tx.clone()]); } @@ -814,33 +829,75 @@ mod tests { } #[tokio::test] - async fn test_last_bedrock_msg_id_updated_even_when_posting_fails() { - use crate::mock::{MockBlockSettlementClientWithError, MockIndexerClient}; - + async fn test_produce_block_with_correct_prev_meta_after_restart() { let config = setup_sequencer_config(); - let (mut sequencer, mempool_handle) = crate::SequencerCore::< - MockBlockSettlementClientWithError, - MockIndexerClient, - >::start_from_config(config) - .await; + let acc1_account_id = config.initial_accounts[0].account_id; + let acc2_account_id = config.initial_accounts[1].account_id; - // Store the initial last_bedrock_msg_id (should be genesis parent msg id) - let initial_msg_id = sequencer.last_bedrock_msg_id; - assert_eq!(initial_msg_id, [0; 32]); + // Step 1: Create initial database with some block metadata + let expected_prev_meta = { + let (mut sequencer, mempool_handle) = + SequencerCoreWithMockClients::start_from_config(config.clone()).await; - // Add a transaction to the mempool - let tx = common::test_utils::produce_dummy_empty_transaction(); - mempool_handle.push(tx).await.unwrap(); + let signing_key = PrivateKey::try_new([1; 32]).unwrap(); - // Produce a block and post to settlement layer (which will fail) - let result = sequencer - .produce_new_block_and_post_to_settlement_layer() - .await; + // Add a transaction and produce a block to set up block metadata + let tx = common::test_utils::create_transaction_native_token_transfer( + acc1_account_id, + 0, + acc2_account_id, + 100, + signing_key, + ); - // The method should succeed even though posting to Bedrock failed - assert!(result.is_ok()); + mempool_handle.push(tx).await.unwrap(); + sequencer + .produce_new_block_with_mempool_transactions() + .unwrap(); - // Verify that last_bedrock_msg_id was updated despite the posting failure - assert_ne!(sequencer.last_bedrock_msg_id, initial_msg_id); + // Get the metadata of the last block produced + sequencer.store.latest_block_meta().unwrap() + }; + + // Step 2: Restart sequencer from the same storage + let (mut sequencer, mempool_handle) = + SequencerCoreWithMockClients::start_from_config(config.clone()).await; + + // Step 3: Submit a new transaction + let signing_key = PrivateKey::try_new([1; 32]).unwrap(); + let tx = common::test_utils::create_transaction_native_token_transfer( + acc1_account_id, + 1, // Next nonce + acc2_account_id, + 50, + signing_key, + ); + + mempool_handle.push(tx.clone()).await.unwrap(); + + // Step 4: Produce new block + sequencer + .produce_new_block_with_mempool_transactions() + .unwrap(); + + // Step 5: Verify the new block has correct previous block metadata + let new_block = sequencer + .store + .get_block_at_id(sequencer.chain_height) + .unwrap(); + + assert_eq!( + new_block.header.prev_block_hash, expected_prev_meta.hash, + "New block's prev_block_hash should match the stored metadata hash" + ); + assert_eq!( + new_block.bedrock_parent_id, expected_prev_meta.msg_id, + "New block's bedrock_parent_id should match the stored metadata msg_id" + ); + assert_eq!( + new_block.body.transactions, + vec![tx], + "New block should contain the submitted transaction" + ); } } diff --git a/storage/src/lib.rs b/storage/src/lib.rs index b9bbd7d5..a9ce2d52 100644 --- a/storage/src/lib.rs +++ b/storage/src/lib.rs @@ -1,6 +1,6 @@ use std::{path::Path, sync::Arc}; -use common::block::Block; +use common::block::{Block, BlockMeta, MantleMsgId}; use error::DbError; use nssa::V02State; use rocksdb::{ @@ -29,8 +29,8 @@ pub const DB_META_LAST_BLOCK_IN_DB_KEY: &str = "last_block_in_db"; pub const DB_META_FIRST_BLOCK_SET_KEY: &str = "first_block_set"; /// Key base for storing metainformation about the last finalized block on Bedrock pub const DB_META_LAST_FINALIZED_BLOCK_ID: &str = "last_finalized_block_id"; -/// Key base for storing metainformation about the latest block hash -pub const DB_META_LATEST_BLOCK_HASH_KEY: &str = "latest_block_hash"; +/// Key base for storing metainformation about the latest block meta +pub const DB_META_LATEST_BLOCK_META_KEY: &str = "latest_block_meta"; /// Key base for storing the NSSA state pub const DB_NSSA_STATE_KEY: &str = "nssa_state"; @@ -49,7 +49,10 @@ pub struct RocksDBIO { } impl RocksDBIO { - pub fn open_or_create(path: &Path, start_block: Option<&Block>) -> DbResult { + pub fn open_or_create( + path: &Path, + start_block: Option<(&Block, MantleMsgId)>, + ) -> DbResult { let mut cf_opts = Options::default(); cf_opts.set_max_write_buffer_number(16); // ToDo: Add more column families for different data @@ -75,13 +78,16 @@ impl RocksDBIO { if is_start_set { Ok(dbio) - } else if let Some(block) = start_block { + } else if let Some((block, msg_id)) = start_block { let block_id = block.header.block_id; - dbio.put_meta_first_block_in_db(block)?; + dbio.put_meta_first_block_in_db(block, msg_id)?; dbio.put_meta_is_first_block_set()?; dbio.put_meta_last_block_in_db(block_id)?; dbio.put_meta_last_finalized_block_id(None)?; - dbio.put_meta_latest_block_hash(block.header.hash)?; + dbio.put_meta_latest_block_meta(&BlockMeta { + hash: block.header.hash, + msg_id, + })?; Ok(dbio) } else { @@ -211,7 +217,7 @@ impl RocksDBIO { Ok(()) } - pub fn put_meta_first_block_in_db(&self, block: &Block) -> DbResult<()> { + pub fn put_meta_first_block_in_db(&self, block: &Block, msg_id: MantleMsgId) -> DbResult<()> { let cf_meta = self.meta_column(); self.db .put_cf( @@ -232,7 +238,7 @@ impl RocksDBIO { .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; let mut batch = WriteBatch::default(); - self.put_block(block, true, &mut batch)?; + self.put_block(block, msg_id, true, &mut batch)?; self.db.write(batch).map_err(|rerr| { DbError::rocksdb_cast_message( rerr, @@ -328,21 +334,21 @@ impl RocksDBIO { Ok(()) } - fn put_meta_latest_block_hash(&self, block_hash: common::HashType) -> DbResult<()> { + fn put_meta_latest_block_meta(&self, block_meta: &BlockMeta) -> DbResult<()> { let cf_meta = self.meta_column(); self.db .put_cf( &cf_meta, - borsh::to_vec(&DB_META_LATEST_BLOCK_HASH_KEY).map_err(|err| { + borsh::to_vec(&DB_META_LATEST_BLOCK_META_KEY).map_err(|err| { DbError::borsh_cast_message( err, - Some("Failed to serialize DB_META_LATEST_BLOCK_HASH_KEY".to_string()), + Some("Failed to serialize DB_META_LATEST_BLOCK_META_KEY".to_string()), ) })?, - borsh::to_vec(&block_hash).map_err(|err| { + borsh::to_vec(&block_meta).map_err(|err| { DbError::borsh_cast_message( err, - Some("Failed to serialize latest block hash".to_string()), + Some("Failed to serialize latest block meta".to_string()), ) })?, ) @@ -350,60 +356,66 @@ impl RocksDBIO { Ok(()) } - fn put_meta_latest_block_hash_batch( + fn put_meta_latest_block_meta_batch( &self, - block_hash: common::HashType, + block_meta: &BlockMeta, batch: &mut WriteBatch, ) -> DbResult<()> { let cf_meta = self.meta_column(); batch.put_cf( &cf_meta, - borsh::to_vec(&DB_META_LATEST_BLOCK_HASH_KEY).map_err(|err| { + borsh::to_vec(&DB_META_LATEST_BLOCK_META_KEY).map_err(|err| { DbError::borsh_cast_message( err, - Some("Failed to serialize DB_META_LATEST_BLOCK_HASH_KEY".to_string()), + Some("Failed to serialize DB_META_LATEST_BLOCK_META_KEY".to_string()), ) })?, - borsh::to_vec(&block_hash).map_err(|err| { + borsh::to_vec(&block_meta).map_err(|err| { DbError::borsh_cast_message( err, - Some("Failed to serialize latest block hash".to_string()), + Some("Failed to serialize latest block meta".to_string()), ) })?, ); Ok(()) } - pub fn latest_block_hash(&self) -> DbResult { + pub fn latest_block_meta(&self) -> DbResult { let cf_meta = self.meta_column(); let res = self .db .get_cf( &cf_meta, - borsh::to_vec(&DB_META_LATEST_BLOCK_HASH_KEY).map_err(|err| { + borsh::to_vec(&DB_META_LATEST_BLOCK_META_KEY).map_err(|err| { DbError::borsh_cast_message( err, - Some("Failed to serialize DB_META_LATEST_BLOCK_HASH_KEY".to_string()), + Some("Failed to serialize DB_META_LATEST_BLOCK_META_KEY".to_string()), ) })?, ) .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; if let Some(data) = res { - Ok(borsh::from_slice::(&data).map_err(|err| { + Ok(borsh::from_slice::(&data).map_err(|err| { DbError::borsh_cast_message( err, - Some("Failed to deserialize latest block hash".to_string()), + Some("Failed to deserialize latest block meta".to_string()), ) })?) } else { Err(DbError::db_interaction_error( - "Latest block hash not found".to_string(), + "Latest block meta not found".to_string(), )) } } - pub fn put_block(&self, block: &Block, first: bool, batch: &mut WriteBatch) -> DbResult<()> { + pub fn put_block( + &self, + block: &Block, + msg_id: MantleMsgId, + first: bool, + batch: &mut WriteBatch, + ) -> DbResult<()> { let cf_block = self.block_column(); if !first { @@ -411,7 +423,13 @@ impl RocksDBIO { if block.header.block_id > last_curr_block { self.put_meta_last_block_in_db_batch(block.header.block_id, batch)?; - self.put_meta_latest_block_hash_batch(block.header.hash, batch)?; + self.put_meta_latest_block_meta_batch( + &BlockMeta { + hash: block.header.hash, + msg_id, + }, + batch, + )?; } } @@ -530,10 +548,15 @@ impl RocksDBIO { }) } - pub fn atomic_update(&self, block: &Block, state: &V02State) -> DbResult<()> { + pub fn atomic_update( + &self, + block: &Block, + msg_id: MantleMsgId, + state: &V02State, + ) -> DbResult<()> { let block_id = block.header.block_id; let mut batch = WriteBatch::default(); - self.put_block(block, false, &mut batch)?; + self.put_block(block, msg_id, false, &mut batch)?; self.put_nssa_state_in_db(state, &mut batch)?; self.db.write(batch).map_err(|rerr| { DbError::rocksdb_cast_message( From 9b51fd1fc2346e61d6709546286bcf20575cd46d Mon Sep 17 00:00:00 2001 From: Daniil Polyakov Date: Thu, 12 Feb 2026 17:49:18 +0300 Subject: [PATCH 04/16] fix: make InternalError more descriptive --- common/src/error.rs | 18 +++--------------- 1 file changed, 3 insertions(+), 15 deletions(-) diff --git a/common/src/error.rs b/common/src/error.rs index 5c81a106..3301bc87 100644 --- a/common/src/error.rs +++ b/common/src/error.rs @@ -13,25 +13,13 @@ pub struct SequencerRpcError { #[derive(thiserror::Error, Debug)] pub enum SequencerClientError { #[error("HTTP error")] - HTTPError(reqwest::Error), + HTTPError(#[from] reqwest::Error), #[error("Serde error")] - SerdeError(serde_json::Error), - #[error("Internal error")] + SerdeError(#[from] serde_json::Error), + #[error("Internal error: {0:?}")] InternalError(SequencerRpcError), } -impl From for SequencerClientError { - fn from(value: reqwest::Error) -> Self { - SequencerClientError::HTTPError(value) - } -} - -impl From for SequencerClientError { - fn from(value: serde_json::Error) -> Self { - SequencerClientError::SerdeError(value) - } -} - impl From for SequencerClientError { fn from(value: SequencerRpcError) -> Self { SequencerClientError::InternalError(value) From 378d53f152abc9f6d949caaf2648f5c6ca63f841 Mon Sep 17 00:00:00 2001 From: Daniil Polyakov Date: Thu, 12 Feb 2026 18:20:25 +0300 Subject: [PATCH 05/16] fix: don't delete finalized blocks but only mark them --- sequencer_core/src/block_store.rs | 52 +++++++++++++++++++++++++++++++ sequencer_core/src/lib.rs | 4 ++- storage/src/lib.rs | 33 +++++++++++++++++++- 3 files changed, 87 insertions(+), 2 deletions(-) diff --git a/sequencer_core/src/block_store.rs b/sequencer_core/src/block_store.rs index 49464dab..8c06d992 100644 --- a/sequencer_core/src/block_store.rs +++ b/sequencer_core/src/block_store.rs @@ -58,6 +58,10 @@ impl SequencerStore { Ok(self.dbio.delete_block(block_id)?) } + pub fn mark_block_as_finalized(&mut self, block_id: u64) -> Result<()> { + Ok(self.dbio.mark_block_as_finalized(block_id)?) + } + /// Returns the transaction corresponding to the given hash, if it exists in the blockchain. pub fn get_transaction_by_hash(&self, hash: HashType) -> Option { let block_id = self.tx_hash_to_block_map.get(&hash); @@ -226,4 +230,52 @@ mod tests { assert_eq!(latest_meta.hash, block_hash); assert_eq!(latest_meta.msg_id, block_msg_id); } + + #[test] + fn test_mark_block_finalized() { + let temp_dir = tempdir().unwrap(); + let path = temp_dir.path(); + + let signing_key = sequencer_sign_key_for_testing(); + + let genesis_block_hashable_data = HashableBlockData { + block_id: 0, + prev_block_hash: HashType([0; 32]), + timestamp: 0, + transactions: vec![], + }; + + let genesis_block = genesis_block_hashable_data.into_pending_block(&signing_key, [0; 32]); + let mut node_store = SequencerStore::open_db_with_genesis( + path, + Some((&genesis_block, [0; 32])), + signing_key, + ) + .unwrap(); + + // Add a new block with Pending status + let tx = common::test_utils::produce_dummy_empty_transaction(); + let block = common::test_utils::produce_dummy_block(1, None, vec![tx.clone()]); + let block_id = block.header.block_id; + + let dummy_state = V02State::new_with_genesis_accounts(&[], &[]); + node_store.update(&block, [1; 32], &dummy_state).unwrap(); + + // Verify initial status is Pending + let retrieved_block = node_store.get_block_at_id(block_id).unwrap(); + assert!(matches!( + retrieved_block.bedrock_status, + common::block::BedrockStatus::Pending + )); + + // Mark block as finalized + node_store.mark_block_as_finalized(block_id).unwrap(); + + // Verify status is now Finalized + let finalized_block = node_store.get_block_at_id(block_id).unwrap(); + assert!(matches!( + finalized_block.bedrock_status, + common::block::BedrockStatus::Finalized + )); + } } diff --git a/sequencer_core/src/lib.rs b/sequencer_core/src/lib.rs index 49ea56ee..31f76a0a 100644 --- a/sequencer_core/src/lib.rs +++ b/sequencer_core/src/lib.rs @@ -293,8 +293,10 @@ impl SequencerCore DbResult<()> { + let mut block = self.get_block(block_id)?; + block.bedrock_status = BedrockStatus::Finalized; + + let cf_block = self.block_column(); + self.db + .put_cf( + &cf_block, + borsh::to_vec(&block_id).map_err(|err| { + DbError::borsh_cast_message( + err, + Some("Failed to serialize block id".to_string()), + ) + })?, + borsh::to_vec(&block).map_err(|err| { + DbError::borsh_cast_message( + err, + Some("Failed to serialize block data".to_string()), + ) + })?, + ) + .map_err(|rerr| { + DbError::rocksdb_cast_message( + rerr, + Some(format!("Failed to mark block {block_id} as finalized")), + ) + })?; + + Ok(()) + } + pub fn get_all_blocks(&self) -> impl Iterator> { let cf_block = self.block_column(); self.db From d2ce0cd51bc114062d655f89a75050467871d7ca Mon Sep 17 00:00:00 2001 From: Daniil Polyakov Date: Thu, 12 Feb 2026 19:35:07 +0300 Subject: [PATCH 06/16] fix: run async runtime when dropping TestContext in BlockingTestContext --- integration_tests/src/lib.rs | 26 +++++++++++++++++++++++--- integration_tests/tests/wallet_ffi.rs | 22 +++++++++++----------- 2 files changed, 34 insertions(+), 14 deletions(-) diff --git a/integration_tests/src/lib.rs b/integration_tests/src/lib.rs index c193b2d7..6497698d 100644 --- a/integration_tests/src/lib.rs +++ b/integration_tests/src/lib.rs @@ -321,15 +321,22 @@ impl Drop for TestContext { /// A test context to be used in normal #[test] tests pub struct BlockingTestContext { - pub ctx: TestContext, - pub runtime: tokio::runtime::Runtime, + ctx: Option, + runtime: tokio::runtime::Runtime, } impl BlockingTestContext { pub fn new() -> Result { let runtime = tokio::runtime::Runtime::new().unwrap(); let ctx = runtime.block_on(TestContext::new())?; - Ok(Self { ctx, runtime }) + Ok(Self { + ctx: Some(ctx), + runtime, + }) + } + + pub fn ctx(&self) -> &TestContext { + self.ctx.as_ref().expect("TestContext is set") } } @@ -370,6 +377,19 @@ impl TestContextBuilder { } } +impl Drop for BlockingTestContext { + fn drop(&mut self) { + let Self { ctx, runtime } = self; + + // Ensure async cleanup of TestContext by blocking on its drop in the runtime. + runtime.block_on(async { + if let Some(ctx) = ctx.take() { + drop(ctx); + } + }) + } +} + pub fn format_public_account_id(account_id: AccountId) -> String { format!("Public/{account_id}") } diff --git a/integration_tests/tests/wallet_ffi.rs b/integration_tests/tests/wallet_ffi.rs index 9a36cf93..6fbcd818 100644 --- a/integration_tests/tests/wallet_ffi.rs +++ b/integration_tests/tests/wallet_ffi.rs @@ -102,8 +102,8 @@ fn new_wallet_ffi_with_test_context_config(ctx: &BlockingTestContext) -> *mut Wa let tempdir = tempfile::tempdir().unwrap(); let config_path = tempdir.path().join("wallet_config.json"); let storage_path = tempdir.path().join("storage.json"); - let mut config = ctx.ctx.wallet().config().to_owned(); - if let Some(config_overrides) = ctx.ctx.wallet().config_overrides().clone() { + let mut config = ctx.ctx().wallet().config().to_owned(); + if let Some(config_overrides) = ctx.ctx().wallet().config_overrides().clone() { config.apply_overrides(config_overrides); } let mut file = std::fs::OpenOptions::new() @@ -119,7 +119,7 @@ fn new_wallet_ffi_with_test_context_config(ctx: &BlockingTestContext) -> *mut Wa let config_path = CString::new(config_path.to_str().unwrap()).unwrap(); let storage_path = CString::new(storage_path.to_str().unwrap()).unwrap(); - let password = CString::new(ctx.ctx.wallet_password()).unwrap(); + let password = CString::new(ctx.ctx().wallet_password()).unwrap(); unsafe { wallet_ffi_create_new( @@ -325,7 +325,7 @@ fn test_wallet_ffi_list_accounts() { #[test] fn test_wallet_ffi_get_balance_public() -> Result<()> { let ctx = BlockingTestContext::new()?; - let account_id: AccountId = ctx.ctx.existing_public_accounts()[0]; + let account_id: AccountId = ctx.ctx().existing_public_accounts()[0]; let wallet_ffi_handle = new_wallet_ffi_with_test_context_config(&ctx); let balance = unsafe { @@ -353,7 +353,7 @@ fn test_wallet_ffi_get_balance_public() -> Result<()> { #[test] fn test_wallet_ffi_get_account_public() -> Result<()> { let ctx = BlockingTestContext::new()?; - let account_id: AccountId = ctx.ctx.existing_public_accounts()[0]; + let account_id: AccountId = ctx.ctx().existing_public_accounts()[0]; let wallet_ffi_handle = new_wallet_ffi_with_test_context_config(&ctx); let mut out_account = FfiAccount::default(); @@ -388,7 +388,7 @@ fn test_wallet_ffi_get_account_public() -> Result<()> { #[test] fn test_wallet_ffi_get_public_account_keys() -> Result<()> { let ctx = BlockingTestContext::new()?; - let account_id: AccountId = ctx.ctx.existing_public_accounts()[0]; + let account_id: AccountId = ctx.ctx().existing_public_accounts()[0]; let wallet_ffi_handle = new_wallet_ffi_with_test_context_config(&ctx); let mut out_key = FfiPublicAccountKey::default(); @@ -404,7 +404,7 @@ fn test_wallet_ffi_get_public_account_keys() -> Result<()> { let expected_key = { let private_key = ctx - .ctx + .ctx() .wallet() .get_account_public_signing_key(account_id) .unwrap(); @@ -425,7 +425,7 @@ fn test_wallet_ffi_get_public_account_keys() -> Result<()> { #[test] fn test_wallet_ffi_get_private_account_keys() -> Result<()> { let ctx = BlockingTestContext::new()?; - let account_id: AccountId = ctx.ctx.existing_public_accounts()[0]; + let account_id: AccountId = ctx.ctx().existing_public_accounts()[0]; let wallet_ffi_handle = new_wallet_ffi_with_test_context_config(&ctx); let mut keys = FfiPrivateAccountKeys::default(); @@ -439,7 +439,7 @@ fn test_wallet_ffi_get_private_account_keys() -> Result<()> { }; let key_chain = &ctx - .ctx + .ctx() .wallet() .storage() .user_data @@ -567,8 +567,8 @@ fn test_wallet_ffi_init_public_account_auth_transfer() -> Result<()> { fn test_wallet_ffi_transfer_public() -> Result<()> { let ctx = BlockingTestContext::new().unwrap(); let wallet_ffi_handle = new_wallet_ffi_with_test_context_config(&ctx); - let from: FfiBytes32 = (&ctx.ctx.existing_public_accounts()[0]).into(); - let to: FfiBytes32 = (&ctx.ctx.existing_public_accounts()[1]).into(); + let from: FfiBytes32 = (&ctx.ctx().existing_public_accounts()[0]).into(); + let to: FfiBytes32 = (&ctx.ctx().existing_public_accounts()[1]).into(); let amount: [u8; 16] = 100u128.to_le_bytes(); let mut transfer_result = FfiTransferResult::default(); From d4494c1f21b7ba0eb803b3fda7e20de3457b5b24 Mon Sep 17 00:00:00 2001 From: Daniil Polyakov Date: Thu, 12 Feb 2026 19:38:04 +0300 Subject: [PATCH 07/16] fix: use private account for private account test in wallet_ffi --- integration_tests/tests/wallet_ffi.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integration_tests/tests/wallet_ffi.rs b/integration_tests/tests/wallet_ffi.rs index 6fbcd818..2afd162c 100644 --- a/integration_tests/tests/wallet_ffi.rs +++ b/integration_tests/tests/wallet_ffi.rs @@ -425,7 +425,7 @@ fn test_wallet_ffi_get_public_account_keys() -> Result<()> { #[test] fn test_wallet_ffi_get_private_account_keys() -> Result<()> { let ctx = BlockingTestContext::new()?; - let account_id: AccountId = ctx.ctx().existing_public_accounts()[0]; + let account_id: AccountId = ctx.ctx().existing_private_accounts()[0]; let wallet_ffi_handle = new_wallet_ffi_with_test_context_config(&ctx); let mut keys = FfiPrivateAccountKeys::default(); From 2acc388ae9c940bb09d1b388322ffeb2b8a5cd02 Mon Sep 17 00:00:00 2001 From: Daniil Polyakov Date: Fri, 13 Feb 2026 01:49:20 +0300 Subject: [PATCH 08/16] chore: rename `get_schema` endpoint to more common `getSchema` --- indexer/service/rpc/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/indexer/service/rpc/src/lib.rs b/indexer/service/rpc/src/lib.rs index 4debe9dc..ee0eab9b 100644 --- a/indexer/service/rpc/src/lib.rs +++ b/indexer/service/rpc/src/lib.rs @@ -10,7 +10,7 @@ compile_error!("At least one of `server` or `client` features must be enabled.") #[cfg_attr(all(feature = "client", not(feature = "server")), rpc(client))] #[cfg_attr(all(feature = "server", feature = "client"), rpc(server, client))] pub trait Rpc { - #[method(name = "get_schema")] + #[method(name = "getSchema")] fn get_schema(&self) -> Result { // TODO: Canonical solution would be to provide `describe` method returning OpenRPC spec, // But for now it's painful to implement, although can be done if really needed. From c663ad1a56749812f1bdc2c33e63460888c68151 Mon Sep 17 00:00:00 2001 From: Daniil Polyakov Date: Fri, 13 Feb 2026 01:54:40 +0300 Subject: [PATCH 09/16] chore: better error messages --- bedrock_client/src/lib.rs | 1 - storage/src/error.rs | 8 +++++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/bedrock_client/src/lib.rs b/bedrock_client/src/lib.rs index 91687545..7655a31c 100644 --- a/bedrock_client/src/lib.rs +++ b/bedrock_client/src/lib.rs @@ -62,7 +62,6 @@ impl BedrockClient { Retry::spawn(self.backoff_strategy(), || { self.http_client .post_transaction(self.node_url.clone(), tx.clone()) - .inspect_err(|err| warn!("Transaction posting failed with error: {err:#}")) }) .await } diff --git a/storage/src/error.rs b/storage/src/error.rs index a3fd80b7..fa8a3265 100644 --- a/storage/src/error.rs +++ b/storage/src/error.rs @@ -1,16 +1,18 @@ #[derive(thiserror::Error, Debug)] pub enum DbError { - #[error("RocksDb error")] + #[error("RocksDb error: {}", additional_info.as_deref().unwrap_or("No additional info"))] RocksDbError { + #[source] error: rocksdb::Error, additional_info: Option, }, - #[error("Serialization error")] + #[error("Serialization error: {}", additional_info.as_deref().unwrap_or("No additional info"))] SerializationError { + #[source] error: borsh::io::Error, additional_info: Option, }, - #[error("Logic Error")] + #[error("Logic Error: {additional_info}")] DbInteractionError { additional_info: String }, } From 9d35cf4ea8be672d84e806068b0a50e8062d831c Mon Sep 17 00:00:00 2001 From: Daniil Polyakov Date: Fri, 13 Feb 2026 01:55:36 +0300 Subject: [PATCH 10/16] chore: adjuct docs and configs for manual run --- README.md | 4 ++-- indexer/service/configs/indexer_config.json | 2 +- sequencer_runner/configs/debug/sequencer_config.json | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 07b883a8..364ee2a5 100644 --- a/README.md +++ b/README.md @@ -135,10 +135,10 @@ The sequencer and node can be run locally: 1. On one terminal go to the `logos-blockchain/logos-blockchain` repo and run a local logos blockchain node: - `git checkout master; git pull` - `cargo clean` - - `rm ~/.logos-blockchain-circuits` + - `rm -r ~/.logos-blockchain-circuits` - `./scripts/setup-logos-blockchain-circuits.sh` - `cargo build --all-features` - - `./target/debug/logos-blockchain-node nodes/node/config-one-node.yaml` + - `./target/debug/logos-blockchain-node --deployment nodes/node/standalone-deployment-config.yaml nodes/node/standalone-node-config.yaml` 2. On another terminal go to the `logos-blockchain/lssa` repo and run indexer service: - `RUST_LOG=info cargo run --release -p indexer_service indexer/service/configs/indexer_config.json` diff --git a/indexer/service/configs/indexer_config.json b/indexer/service/configs/indexer_config.json index 93d424c5..b83c4650 100644 --- a/indexer/service/configs/indexer_config.json +++ b/indexer/service/configs/indexer_config.json @@ -1,7 +1,7 @@ { "resubscribe_interval_millis": 1000, "bedrock_client_config": { - "addr": "http://localhost:18080", + "addr": "http://localhost:8080", "backoff": { "start_delay_millis": 100, "max_retries": 5 diff --git a/sequencer_runner/configs/debug/sequencer_config.json b/sequencer_runner/configs/debug/sequencer_config.json index cd6840e6..2e4d4ed3 100644 --- a/sequencer_runner/configs/debug/sequencer_config.json +++ b/sequencer_runner/configs/debug/sequencer_config.json @@ -14,7 +14,7 @@ "max_retries": 5 }, "channel_id": "0101010101010101010101010101010101010101010101010101010101010101", - "node_url": "http://localhost:18080" + "node_url": "http://localhost:8080" }, "indexer_rpc_url": "ws://localhost:8779", "initial_accounts": [ From f64b7ba0ef327dd10826cffe526fa82cb7427f38 Mon Sep 17 00:00:00 2001 From: Daniil Polyakov Date: Fri, 13 Feb 2026 01:56:10 +0300 Subject: [PATCH 11/16] chore: ignore bedrock_signing_key --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 3ae41ab9..40be2daa 100644 --- a/.gitignore +++ b/.gitignore @@ -10,3 +10,4 @@ sequencer_runner/data/ storage.json result wallet-ffi/wallet_ffi.h +bedrock_signing_key From c63ecd6aba69e1dee2f038dac580bee1943bcea6 Mon Sep 17 00:00:00 2001 From: Daniil Polyakov Date: Fri, 13 Feb 2026 02:05:55 +0300 Subject: [PATCH 12/16] fix: initiate sequencer with chain height from db and not from config genesis id --- common/src/block.rs | 1 + sequencer_core/src/lib.rs | 54 ++++++++++++++++++++++++++++++++++++++- storage/src/lib.rs | 2 ++ 3 files changed, 56 insertions(+), 1 deletion(-) diff --git a/common/src/block.rs b/common/src/block.rs index 91ad1175..90bbe357 100644 --- a/common/src/block.rs +++ b/common/src/block.rs @@ -10,6 +10,7 @@ pub type TimeStamp = u64; #[derive(Debug, Clone, BorshSerialize, BorshDeserialize)] pub struct BlockMeta { + pub id: BlockId, pub hash: BlockHash, pub msg_id: MantleMsgId, } diff --git a/sequencer_core/src/lib.rs b/sequencer_core/src/lib.rs index 31f76a0a..596ac747 100644 --- a/sequencer_core/src/lib.rs +++ b/sequencer_core/src/lib.rs @@ -98,6 +98,9 @@ impl SequencerCore SequencerCore Date: Fri, 13 Feb 2026 15:44:16 +0300 Subject: [PATCH 13/16] fix: resubmit just one pending block --- sequencer_runner/src/lib.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/sequencer_runner/src/lib.rs b/sequencer_runner/src/lib.rs index f6391af2..aab035fd 100644 --- a/sequencer_runner/src/lib.rs +++ b/sequencer_runner/src/lib.rs @@ -174,8 +174,11 @@ async fn retry_pending_blocks_loop( (pending_blocks, client) }; - info!("Resubmitting {} pending blocks", pending_blocks.len()); - for block in &pending_blocks { + if let Some(block) = pending_blocks.first() { + info!( + "Resubmitting pending block with id {}", + block.header.block_id + ); // TODO: We could cache the inscribe tx for each pending block to avoid re-creating it // on every retry. let (tx, _msg_id) = block_settlement_client From 439392cf26a45e2338124ddaa99de0321f49c961 Mon Sep 17 00:00:00 2001 From: Sergio Chouhy Date: Fri, 13 Feb 2026 17:57:43 -0300 Subject: [PATCH 14/16] fix: sequencer does not submit newly created blocks & fix to resubmit first pending block logic --- sequencer_core/src/lib.rs | 22 ++++--------------- .../configs/debug/sequencer_config.json | 4 ++-- sequencer_runner/src/lib.rs | 9 ++++---- 3 files changed, 11 insertions(+), 24 deletions(-) diff --git a/sequencer_core/src/lib.rs b/sequencer_core/src/lib.rs index 596ac747..9a2ca2b1 100644 --- a/sequencer_core/src/lib.rs +++ b/sequencer_core/src/lib.rs @@ -173,24 +173,10 @@ impl SequencerCore Result { - { - let (tx, msg_id) = self - .produce_new_block_with_mempool_transactions() - .context("Failed to produce new block with mempool transactions")?; - match self - .block_settlement_client - .submit_inscribe_tx_to_bedrock(tx) - .await - { - Ok(()) => { - info!("Posted block data to Bedrock, msg_id: {msg_id:?}"); - } - Err(err) => { - error!("Failed to post block data to Bedrock with error: {err:#}"); - } - } - } + pub async fn produce_new_block(&mut self) -> Result { + let (_tx, _msg_id) = self + .produce_new_block_with_mempool_transactions() + .context("Failed to produce new block with mempool transactions")?; Ok(self.chain_height) } diff --git a/sequencer_runner/configs/debug/sequencer_config.json b/sequencer_runner/configs/debug/sequencer_config.json index 2e4d4ed3..4ac8e15b 100644 --- a/sequencer_runner/configs/debug/sequencer_config.json +++ b/sequencer_runner/configs/debug/sequencer_config.json @@ -5,8 +5,8 @@ "is_genesis_random": true, "max_num_tx_in_block": 20, "mempool_max_size": 1000, - "block_create_timeout_millis": 5000, - "retry_pending_blocks_timeout_millis": 7000, + "block_create_timeout_millis": 12000, + "retry_pending_blocks_timeout_millis": 6000, "port": 3040, "bedrock_config": { "backoff": { diff --git a/sequencer_runner/src/lib.rs b/sequencer_runner/src/lib.rs index aab035fd..b3020e93 100644 --- a/sequencer_runner/src/lib.rs +++ b/sequencer_runner/src/lib.rs @@ -147,9 +147,7 @@ async fn main_loop(seq_core: Arc>, block_timeout: Duration) let id = { let mut state = seq_core.lock().await; - state - .produce_new_block_and_post_to_settlement_layer() - .await? + state.produce_new_block().await? }; info!("Block with id {id} created"); @@ -174,7 +172,10 @@ async fn retry_pending_blocks_loop( (pending_blocks, client) }; - if let Some(block) = pending_blocks.first() { + if let Some(block) = pending_blocks + .iter() + .min_by_key(|block| block.header.block_id) + { info!( "Resubmitting pending block with id {}", block.header.block_id From 8b16318c38b53ac5bf5048e57e7f3022d708ba5b Mon Sep 17 00:00:00 2001 From: Daniil Polyakov Date: Fri, 13 Feb 2026 23:54:50 +0300 Subject: [PATCH 15/16] fix: use base58 encoding for account in Explorer & some formatting chores --- Cargo.lock | 6 +- explorer_service/Cargo.toml | 3 - explorer_service/src/api.rs | 28 ++--- .../src/components/account_preview.rs | 6 +- .../src/components/block_preview.rs | 4 +- .../src/components/transaction_preview.rs | 2 +- explorer_service/src/format_utils.rs | 24 ---- explorer_service/src/pages/account_page.rs | 19 +-- explorer_service/src/pages/block_page.rs | 15 ++- .../src/pages/transaction_page.rs | 28 ++--- indexer/service/protocol/Cargo.toml | 4 + indexer/service/protocol/src/convert.rs | 20 ++- indexer/service/protocol/src/lib.rs | 114 +++++++++++++++--- indexer/service/src/mock_service.rs | 10 +- wallet/Cargo.toml | 1 - wallet/src/cli/account.rs | 4 +- 16 files changed, 169 insertions(+), 119 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 157d25ab..9ef3c7a2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2484,7 +2484,6 @@ dependencies = [ "console_error_panic_hook", "console_log", "env_logger", - "hex", "indexer_service_protocol", "indexer_service_rpc", "jsonrpsee", @@ -3427,12 +3426,16 @@ dependencies = [ name = "indexer_service_protocol" version = "0.1.0" dependencies = [ + "anyhow", + "base58", "base64 0.22.1", "common", + "hex", "nssa", "nssa_core", "schemars 1.2.0", "serde", + "serde_with", ] [[package]] @@ -8257,7 +8260,6 @@ dependencies = [ "amm_core", "anyhow", "async-stream", - "base58", "base64 0.22.1", "borsh", "bytemuck", diff --git a/explorer_service/Cargo.toml b/explorer_service/Cargo.toml index 49d1ddce..219f2bc0 100644 --- a/explorer_service/Cargo.toml +++ b/explorer_service/Cargo.toml @@ -26,9 +26,6 @@ console_log = "1.0" # Date/Time chrono.workspace = true -# Hex encoding/decoding -hex.workspace = true - # URL encoding urlencoding = "2.1" diff --git a/explorer_service/src/api.rs b/explorer_service/src/api.rs index c3360c01..c489c827 100644 --- a/explorer_service/src/api.rs +++ b/explorer_service/src/api.rs @@ -1,3 +1,5 @@ +use std::str::FromStr as _; + use indexer_service_protocol::{Account, AccountId, Block, BlockId, HashType, Transaction}; use leptos::prelude::*; use serde::{Deserialize, Serialize}; @@ -25,13 +27,6 @@ pub async fn get_account(account_id: AccountId) -> Result Option> { - let s = s.trim().trim_start_matches("0x"); - hex::decode(s).ok() -} - /// Search for a block, transaction, or account by query string #[server] pub async fn search(query: String) -> Result { @@ -42,12 +37,8 @@ pub async fn search(query: String) -> Result { let mut transactions = Vec::new(); let mut accounts = Vec::new(); - // Try to parse as hash (32 bytes) - if let Some(bytes) = parse_hex(&query) - && let Ok(hash_array) = <[u8; 32]>::try_from(bytes) - { - let hash = HashType(hash_array); - + // Try as hash + if let Ok(hash) = HashType::from_str(&query) { // Try as block hash if let Ok(block) = client.get_block_by_hash(hash).await { blocks.push(block); @@ -57,12 +48,13 @@ pub async fn search(query: String) -> Result { if let Ok(tx) = client.get_transaction(hash).await { transactions.push(tx); } + } - // Try as account ID - let account_id = AccountId { value: hash_array }; - if let Ok(account) = client.get_account(account_id).await { - accounts.push((account_id, account)); - } + // Try as account ID + if let Ok(account_id) = AccountId::from_str(&query) + && let Ok(account) = client.get_account(account_id).await + { + accounts.push((account_id, account)); } // Try as block ID diff --git a/explorer_service/src/components/account_preview.rs b/explorer_service/src/components/account_preview.rs index 3a99eeb8..bbe59c0f 100644 --- a/explorer_service/src/components/account_preview.rs +++ b/explorer_service/src/components/account_preview.rs @@ -2,12 +2,10 @@ use indexer_service_protocol::{Account, AccountId}; use leptos::prelude::*; use leptos_router::components::A; -use crate::format_utils; - /// Account preview component #[component] pub fn AccountPreview(account_id: AccountId, account: Account) -> impl IntoView { - let account_id_str = format_utils::format_account_id(&account_id); + let account_id_str = account_id.to_string(); view! { {move || { let Account { program_owner, balance, data, nonce } = &account; - let program_id = format_utils::format_program_id(program_owner); + let program_id = program_owner.to_string(); view! {