feat: sort blocks from latest to oldest in explorer

This commit is contained in:
Daniil Polyakov 2026-03-04 21:54:23 +03:00
parent db5d1e5cf7
commit ec1018deac
11 changed files with 156 additions and 59 deletions

View File

@ -37,9 +37,15 @@ run-sequencer:
# Run Indexer
[working-directory: 'indexer/service']
run-indexer:
run-indexer mock="":
@echo "🔍 Running indexer"
RUST_LOG=info RISC0_DEV_MODE=1 cargo run --release -p indexer_service configs/indexer_config.json
@if [ "{{mock}}" = "mock" ]; then \
echo "🧪 Using mock data"; \
RUST_LOG=info RISC0_DEV_MODE=1 cargo run --release --features mock-responses -p indexer_service configs/indexer_config.json; \
else \
echo "🚀 Using real data"; \
RUST_LOG=info RISC0_DEV_MODE=1 cargo run --release -p indexer_service configs/indexer_config.json; \
fi
# Run Explorer
[working-directory: 'explorer_service']
@ -58,4 +64,5 @@ clean:
@echo "🧹 Cleaning run artifacts"
rm -rf sequencer_runner/bedrock_signing_key
rm -rf sequencer_runner/rocksdb
rm -rf indexer/rocksdb
rm -rf wallet/configs/debug/storage.json

View File

@ -118,11 +118,11 @@ pub async fn get_transaction(tx_hash: HashType) -> Result<Transaction, ServerFnE
/// Get blocks with pagination
#[server]
pub async fn get_blocks(offset: u32, limit: u32) -> Result<Vec<Block>, ServerFnError> {
pub async fn get_blocks(before: Option<u64>, limit: u32) -> Result<Vec<Block>, ServerFnError> {
use indexer_service_rpc::RpcClient as _;
let client = expect_context::<IndexerRpcClient>();
client
.get_blocks(offset, limit)
.get_blocks(before, limit)
.await
.map_err(|e| ServerFnError::ServerError(format!("RPC error: {}", e)))
}

View File

@ -7,7 +7,7 @@ use crate::{
components::{AccountPreview, BlockPreview, TransactionPreview},
};
const RECENT_BLOCKS_LIMIT: u64 = 10;
const RECENT_BLOCKS_LIMIT: u32 = 10;
/// Main page component
#[component]
@ -39,23 +39,57 @@ pub fn MainPage() -> impl IntoView {
}
});
// Pagination state for blocks
let (all_blocks, set_all_blocks) = signal(Vec::new());
let (is_loading_blocks, set_is_loading_blocks) = signal(false);
let (has_more_blocks, set_has_more_blocks) = signal(true);
let (oldest_loaded_block_id, set_oldest_loaded_block_id) = signal(None::<u64>);
// Load recent blocks on mount
let recent_blocks_resource = Resource::new(
|| (),
|_| async {
match api::get_latest_block_id().await {
Ok(last_id) => {
api::get_blocks(
std::cmp::max(last_id.saturating_sub(RECENT_BLOCKS_LIMIT) as u32, 1),
(RECENT_BLOCKS_LIMIT + 1) as u32,
)
.await
}
Err(err) => Err(err),
}
},
|_| async { api::get_blocks(None, RECENT_BLOCKS_LIMIT).await },
);
// Update all_blocks when initial load completes
Effect::new(move || {
if let Some(Ok(blocks)) = recent_blocks_resource.get() {
let oldest_id = blocks.last().map(|b| b.header.block_id);
set_all_blocks.set(blocks.clone());
set_oldest_loaded_block_id.set(oldest_id);
set_has_more_blocks
.set(blocks.len() as u32 == RECENT_BLOCKS_LIMIT && oldest_id.unwrap_or(0) > 1);
}
});
// Load more blocks handler
let load_more_blocks = move |_| {
let before_id = oldest_loaded_block_id.get();
if before_id.is_none() {
return;
}
set_is_loading_blocks.set(true);
leptos::task::spawn_local(async move {
match api::get_blocks(before_id, RECENT_BLOCKS_LIMIT).await {
Ok(new_blocks) => {
let blocks_count = new_blocks.len() as u32;
let new_oldest_id = new_blocks.last().map(|b| b.header.block_id);
set_all_blocks.update(|blocks| blocks.extend(new_blocks));
set_oldest_loaded_block_id.set(new_oldest_id);
set_has_more_blocks
.set(blocks_count == RECENT_BLOCKS_LIMIT && new_oldest_id.unwrap_or(0) > 1);
}
Err(e) => {
log::error!("Failed to load more blocks: {}", e);
}
}
set_is_loading_blocks.set(false);
});
};
// Handle search - update URL parameter
let on_search = move |ev: SubmitEvent| {
ev.prevent_default();
@ -196,19 +230,48 @@ pub fn MainPage() -> impl IntoView {
recent_blocks_resource
.get()
.map(|result| match result {
Ok(blocks) if !blocks.is_empty() => {
view! {
<div class="blocks-list">
{blocks
.into_iter()
.map(|block| view! { <BlockPreview block=block /> })
.collect::<Vec<_>>()}
</div>
}
.into_any()
}
Ok(_) => {
view! { <div class="no-blocks">"No blocks found"</div> }.into_any()
let blocks = all_blocks.get();
if blocks.is_empty() {
view! { <div class="no-blocks">"No blocks found"</div> }
.into_any()
} else {
view! {
<div>
<div class="blocks-list">
{blocks
.into_iter()
.map(|block| view! { <BlockPreview block=block /> })
.collect::<Vec<_>>()}
</div>
{move || {
if has_more_blocks.get() {
view! {
<button
class="load-more-button"
on:click=load_more_blocks
disabled=move || is_loading_blocks.get()
>
{move || {
if is_loading_blocks.get() {
"Loading..."
} else {
"Load More"
}
}}
</button>
}
.into_any()
} else {
().into_any()
}
}}
</div>
}
.into_any()
}
}
Err(e) => {
view! { <div class="error">{format!("Error: {}", e)}</div> }

View File

@ -50,8 +50,8 @@ impl IndexerStore {
Ok(self.dbio.get_block(id)?)
}
pub fn get_block_batch(&self, offset: u64, limit: u64) -> Result<Vec<Block>> {
Ok(self.dbio.get_block_batch(offset, limit)?)
pub fn get_block_batch(&self, before: Option<u64>, limit: u64) -> Result<Vec<Block>> {
Ok(self.dbio.get_block_batch(before, limit)?)
}
pub fn get_transaction_by_hash(&self, tx_hash: [u8; 32]) -> Result<NSSATransaction> {

View File

@ -1,5 +1,5 @@
{
"home": "./indexer/service",
"home": ".",
"consensus_info_polling_interval": "60s",
"bedrock_client_config": {
"addr": "http://localhost:8080",

View File

@ -42,7 +42,11 @@ pub trait Rpc {
async fn get_transaction(&self, tx_hash: HashType) -> Result<Transaction, ErrorObjectOwned>;
#[method(name = "getBlocks")]
async fn get_blocks(&self, offset: u32, limit: u32) -> Result<Vec<Block>, ErrorObjectOwned>;
async fn get_blocks(
&self,
before: Option<u64>,
limit: u32,
) -> Result<Vec<Block>, ErrorObjectOwned>;
#[method(name = "getTransactionsByAccount")]
async fn get_transactions_by_account(

View File

@ -43,10 +43,10 @@ impl MockIndexerService {
);
}
// Create 10 blocks with transactions
// Create 100 blocks with transactions
let mut prev_hash = HashType([0u8; 32]);
for block_id in 0..10 {
for block_id in 1..=100 {
let block_hash = {
let mut hash = [0u8; 32];
hash[0] = block_id as u8;
@ -225,23 +225,20 @@ impl indexer_service_rpc::RpcServer for MockIndexerService {
.ok_or_else(|| ErrorObjectOwned::owned(-32001, "Transaction not found", None::<()>))
}
async fn get_blocks(&self, offset: u32, limit: u32) -> Result<Vec<Block>, ErrorObjectOwned> {
let offset = offset as usize;
let limit = limit as usize;
let total = self.blocks.len();
async fn get_blocks(
&self,
before: Option<u64>,
limit: u32,
) -> Result<Vec<Block>, ErrorObjectOwned> {
let start_id = before.map_or_else(|| self.blocks.len() as u64, |id| id.saturating_sub(1));
// Return blocks in reverse order (newest first), with pagination
let start = offset.min(total);
let end = (offset + limit).min(total);
Ok(self
.blocks
.iter()
let result = (1..=start_id)
.rev()
.skip(start)
.take(end - start)
.cloned()
.collect())
.take(limit as usize)
.map_while(|block_id| self.blocks.get(block_id as usize - 1).cloned())
.collect();
Ok(result)
}
async fn get_transactions_by_account(

View File

@ -88,11 +88,15 @@ impl indexer_service_rpc::RpcServer for IndexerService {
.into())
}
async fn get_blocks(&self, offset: u32, limit: u32) -> Result<Vec<Block>, ErrorObjectOwned> {
async fn get_blocks(
&self,
before: Option<u64>,
limit: u32,
) -> Result<Vec<Block>, ErrorObjectOwned> {
let blocks = self
.indexer
.store
.get_block_batch(offset as u64, limit as u64)
.get_block_batch(before, limit as u64)
.map_err(db_error)?;
let mut block_res = vec![];

View File

@ -61,8 +61,11 @@ async fn indexer_block_batching() -> Result<()> {
assert!(last_block_indexer > 1);
// Getting wide batch to fit all blocks
let block_batch = ctx.indexer_client().get_blocks(1, 100).await.unwrap();
// Getting wide batch to fit all blocks (from latest backwards)
let mut block_batch = ctx.indexer_client().get_blocks(None, 100).await.unwrap();
// Reverse to check chain consistency from oldest to newest
block_batch.reverse();
// Checking chain consistency
let mut prev_block_hash = block_batch.first().unwrap().header.hash;

View File

@ -1,5 +1,5 @@
{
"home": "./sequencer_runner",
"home": ".",
"override_rust_log": null,
"genesis_id": 1,
"is_genesis_random": true,

View File

@ -539,13 +539,26 @@ impl RocksDBIO {
}
}
pub fn get_block_batch(&self, offset: u64, limit: u64) -> DbResult<Vec<Block>> {
pub fn get_block_batch(&self, before: Option<u64>, limit: u64) -> DbResult<Vec<Block>> {
let cf_block = self.block_column();
let mut block_batch = vec![];
// Determine the starting block ID
let start_block_id = if let Some(before_id) = before {
before_id.saturating_sub(1)
} else {
// Get the latest block ID
self.get_meta_last_block_in_db()?
};
// ToDo: Multi get this
for block_id in offset..(offset + limit) {
for i in 0..limit {
let block_id = start_block_id.saturating_sub(i);
if block_id == 0 {
break;
}
let res = self
.db
.get_cf(
@ -1215,7 +1228,10 @@ mod tests {
let block_hashes_mem: Vec<[u8; 32]> =
block_res.into_iter().map(|bl| bl.header.hash.0).collect();
let batch_res = dbio.get_block_batch(2, 4).unwrap();
// Get blocks before ID 6 (i.e., starting from 5 going backwards), limit 4
// This should return blocks 5, 4, 3, 2 in descending order
let mut batch_res = dbio.get_block_batch(Some(6), 4).unwrap();
batch_res.reverse(); // Reverse to match ascending order for comparison
let block_hashes_db: Vec<[u8; 32]> =
batch_res.into_iter().map(|bl| bl.header.hash.0).collect();
@ -1224,7 +1240,10 @@ mod tests {
let block_hashes_mem_limited = &block_hashes_mem[1..];
let batch_res_limited = dbio.get_block_batch(3, 4).unwrap();
// Get blocks before ID 6, limit 3
// This should return blocks 5, 4, 3 in descending order
let mut batch_res_limited = dbio.get_block_batch(Some(6), 3).unwrap();
batch_res_limited.reverse(); // Reverse to match ascending order for comparison
let block_hashes_db_limited: Vec<[u8; 32]> = batch_res_limited
.into_iter()