diff --git a/Cargo.lock b/Cargo.lock index 94880644..33f810e0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1765,6 +1765,7 @@ dependencies = [ "serde_with", "sha2", "thiserror 2.0.18", + "tokio-retry", "url", ] diff --git a/Justfile b/Justfile index db167d26..c8ffe09d 100644 --- a/Justfile +++ b/Justfile @@ -37,9 +37,15 @@ run-sequencer: # Run Indexer [working-directory: 'indexer/service'] -run-indexer: +run-indexer mock="": @echo "๐Ÿ” Running indexer" - RUST_LOG=info RISC0_DEV_MODE=1 cargo run --release -p indexer_service configs/indexer_config.json + @if [ "{{mock}}" = "mock" ]; then \ + echo "๐Ÿงช Using mock data"; \ + RUST_LOG=info RISC0_DEV_MODE=1 cargo run --release --features mock-responses -p indexer_service configs/indexer_config.json; \ + else \ + echo "๐Ÿš€ Using real data"; \ + RUST_LOG=info RISC0_DEV_MODE=1 cargo run --release -p indexer_service configs/indexer_config.json; \ + fi # Run Explorer [working-directory: 'explorer_service'] @@ -58,4 +64,6 @@ clean: @echo "๐Ÿงน Cleaning run artifacts" rm -rf sequencer_runner/bedrock_signing_key rm -rf sequencer_runner/rocksdb + rm -rf indexer/service/rocksdb rm -rf wallet/configs/debug/storage.json + cd bedrock && docker compose down -v diff --git a/artifacts/program_methods/privacy_preserving_circuit.bin b/artifacts/program_methods/privacy_preserving_circuit.bin index 3a0330e6..5b854dca 100644 Binary files a/artifacts/program_methods/privacy_preserving_circuit.bin and b/artifacts/program_methods/privacy_preserving_circuit.bin differ diff --git a/common/Cargo.toml b/common/Cargo.toml index f7658304..bf4a0032 100644 --- a/common/Cargo.toml +++ b/common/Cargo.toml @@ -22,3 +22,4 @@ bytesize.workspace = true base64.workspace = true url.workspace = true logos-blockchain-common-http-client.workspace = true +tokio-retry.workspace = true diff --git a/common/src/sequencer_client.rs b/common/src/sequencer_client.rs index 7e5819ec..f847b865 100644 --- a/common/src/sequencer_client.rs +++ b/common/src/sequencer_client.rs @@ -67,15 +67,24 @@ impl SequencerClient { "Calling method {method} with payload {request:?} to sequencer at {}", self.sequencer_addr ); - let mut call_builder = self.client.post(self.sequencer_addr.clone()); - if let Some(BasicAuth { username, password }) = &self.basic_auth { - call_builder = call_builder.basic_auth(username, password.as_deref()); - } + let strategy = tokio_retry::strategy::FixedInterval::from_millis(10000).take(60); - let call_res = call_builder.json(&request).send().await?; + let response_vall = tokio_retry::Retry::spawn(strategy, || async { + let mut call_builder = self.client.post(self.sequencer_addr.clone()); - let response_vall = call_res.json::().await?; + if let Some(BasicAuth { username, password }) = &self.basic_auth { + call_builder = call_builder.basic_auth(username, password.as_deref()); + } + + let call_res_res = call_builder.json(&request).send().await; + + match call_res_res { + Err(err) => Err(err), + Ok(call_res) => call_res.json::().await, + } + }) + .await?; #[derive(Debug, Clone, Deserialize)] #[allow(dead_code)] diff --git a/completions/README.md b/completions/README.md index 4695c9e6..9da5a093 100644 --- a/completions/README.md +++ b/completions/README.md @@ -1,6 +1,6 @@ # Wallet CLI Completion -Completion scripts for the LSSA `wallet` command. +Completion scripts for the LSSA `wallet` command. ## ZSH @@ -19,9 +19,9 @@ Preconfigured accounts and accounts only with `/` (no number) are not completed. e.g.: ``` -โ–ถ wallet account list -Preconfigured Public/Gj1mJy5W7J5pfmLRujmQaLfLMWidNxQ6uwnhb666ZwHw, -Preconfigured Public/BLgCRDXYdQPMMWVHYRFGQZbgeHx9frkipa8GtpG2Syqy, +โ–ถ wallet account list +Preconfigured Public/7wHg9sbJwc6h3NP1S9bekfAzB8CHifEcxKswCKUt3YQo, +Preconfigured Public/6iArKUXxhUJqS7kCaPNhwMWt3ro71PDyBj7jwAyE2VQV, Preconfigured Private/3oCG8gqdKLMegw4rRfyaMQvuPHpcASt7xwttsmnZLSkw, Preconfigured Private/AKTcXgJ1xoynta1Ec7y6Jso1z1JQtHqd7aPQ1h9er6xX, / Public/8DstRgMQrB2N9a7ymv98RDDbt8nctrP9ZzaNRSpKDZSu, diff --git a/configs/docker-all-in-one/indexer/indexer_config.json b/configs/docker-all-in-one/indexer/indexer_config.json index 4c4fe085..c2b07e3e 100644 --- a/configs/docker-all-in-one/indexer/indexer_config.json +++ b/configs/docker-all-in-one/indexer/indexer_config.json @@ -1,5 +1,6 @@ { - "resubscribe_interval": "1s", + "home": "./indexer/service", + "consensus_info_polling_interval": "1s", "bedrock_client_config": { "addr": "http://logos-blockchain-node-0:18080", "backoff": { @@ -7,5 +8,153 @@ "max_retries": 5 } }, - "channel_id": "0101010101010101010101010101010101010101010101010101010101010101" + "channel_id": "0101010101010101010101010101010101010101010101010101010101010101", + "initial_accounts": [ + { + "account_id": "6iArKUXxhUJqS7kCaPNhwMWt3ro71PDyBj7jwAyE2VQV", + "balance": 10000 + }, + { + "account_id": "7wHg9sbJwc6h3NP1S9bekfAzB8CHifEcxKswCKUt3YQo", + "balance": 20000 + } + ], + "initial_commitments": [ + { + "npk":[ + 177, + 64, + 1, + 11, + 87, + 38, + 254, + 159, + 231, + 165, + 1, + 94, + 64, + 137, + 243, + 76, + 249, + 101, + 251, + 129, + 33, + 101, + 189, + 30, + 42, + 11, + 191, + 34, + 103, + 186, + 227, + 230 + ] , + "account": { + "program_owner": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "balance": 10000, + "data": [], + "nonce": 0 + } + }, + { + "npk": [ + 32, + 67, + 72, + 164, + 106, + 53, + 66, + 239, + 141, + 15, + 52, + 230, + 136, + 177, + 2, + 236, + 207, + 243, + 134, + 135, + 210, + 143, + 87, + 232, + 215, + 128, + 194, + 120, + 113, + 224, + 4, + 165 + ], + "account": { + "program_owner": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "balance": 20000, + "data": [], + "nonce": 0 + } + } + ], + "signing_key": [ + 37, + 37, + 37, + 37, + 37, + 37, + 37, + 37, + 37, + 37, + 37, + 37, + 37, + 37, + 37, + 37, + 37, + 37, + 37, + 37, + 37, + 37, + 37, + 37, + 37, + 37, + 37, + 37, + 37, + 37, + 37, + 37 + ] } diff --git a/configs/docker-all-in-one/sequencer/sequencer_config.json b/configs/docker-all-in-one/sequencer/sequencer_config.json index 7217bf5a..8fc34911 100644 --- a/configs/docker-all-in-one/sequencer/sequencer_config.json +++ b/configs/docker-all-in-one/sequencer/sequencer_config.json @@ -20,17 +20,50 @@ "indexer_rpc_url": "ws://indexer_service:8779", "initial_accounts": [ { - "account_id": "BLgCRDXYdQPMMWVHYRFGQZbgeHx9frkipa8GtpG2Syqy", + "account_id": "6iArKUXxhUJqS7kCaPNhwMWt3ro71PDyBj7jwAyE2VQV", "balance": 10000 }, { - "account_id": "Gj1mJy5W7J5pfmLRujmQaLfLMWidNxQ6uwnhb666ZwHw", + "account_id": "7wHg9sbJwc6h3NP1S9bekfAzB8CHifEcxKswCKUt3YQo", "balance": 20000 } ], "initial_commitments": [ { - "npk": [13, 25, 40, 5, 198, 248, 210, 248, 237, 121, 124, 145, 186, 142, 253, 216, 236, 69, 193, 32, 166, 167, 49, 133, 172, 111, 159, 46, 84, 17, 157, 23], + "npk":[ + 177, + 64, + 1, + 11, + 87, + 38, + 254, + 159, + 231, + 165, + 1, + 94, + 64, + 137, + 243, + 76, + 249, + 101, + 251, + 129, + 33, + 101, + 189, + 30, + 42, + 11, + 191, + 34, + 103, + 186, + 227, + 230 + ] , "account": { "program_owner": [ 0, @@ -48,7 +81,40 @@ } }, { - "npk": [32, 67, 72, 164, 106, 53, 66, 239, 141, 15, 52, 230, 136, 177, 2, 236, 207, 243, 134, 135, 210, 143, 87, 232, 215, 128, 194, 120, 113, 224, 4, 165], + "npk": [ + 32, + 67, + 72, + 164, + 106, + 53, + 66, + 239, + 141, + 15, + 52, + 230, + 136, + 177, + 2, + 236, + 207, + 243, + 134, + 135, + 210, + 143, + 87, + 232, + 215, + 128, + 194, + 120, + 113, + 224, + 4, + 165 + ], "account": { "program_owner": [ 0, diff --git a/explorer_service/src/api.rs b/explorer_service/src/api.rs index 4dfd75cd..1e2bdd5b 100644 --- a/explorer_service/src/api.rs +++ b/explorer_service/src/api.rs @@ -118,11 +118,11 @@ pub async fn get_transaction(tx_hash: HashType) -> Result Result, ServerFnError> { +pub async fn get_blocks(before: Option, limit: u32) -> Result, ServerFnError> { use indexer_service_rpc::RpcClient as _; let client = expect_context::(); client - .get_blocks(offset, limit) + .get_blocks(before, limit) .await .map_err(|e| ServerFnError::ServerError(format!("RPC error: {}", e))) } diff --git a/explorer_service/src/pages/main_page.rs b/explorer_service/src/pages/main_page.rs index 3cfb832d..28d1d1d9 100644 --- a/explorer_service/src/pages/main_page.rs +++ b/explorer_service/src/pages/main_page.rs @@ -7,7 +7,7 @@ use crate::{ components::{AccountPreview, BlockPreview, TransactionPreview}, }; -const RECENT_BLOCKS_LIMIT: u64 = 10; +const RECENT_BLOCKS_LIMIT: u32 = 10; /// Main page component #[component] @@ -39,23 +39,57 @@ pub fn MainPage() -> impl IntoView { } }); + // Pagination state for blocks + let (all_blocks, set_all_blocks) = signal(Vec::new()); + let (is_loading_blocks, set_is_loading_blocks) = signal(false); + let (has_more_blocks, set_has_more_blocks) = signal(true); + let (oldest_loaded_block_id, set_oldest_loaded_block_id) = signal(None::); + // Load recent blocks on mount let recent_blocks_resource = Resource::new( || (), - |_| async { - match api::get_latest_block_id().await { - Ok(last_id) => { - api::get_blocks( - std::cmp::max(last_id.saturating_sub(RECENT_BLOCKS_LIMIT) as u32, 1), - (RECENT_BLOCKS_LIMIT + 1) as u32, - ) - .await - } - Err(err) => Err(err), - } - }, + |_| async { api::get_blocks(None, RECENT_BLOCKS_LIMIT).await }, ); + // Update all_blocks when initial load completes + Effect::new(move || { + if let Some(Ok(blocks)) = recent_blocks_resource.get() { + let oldest_id = blocks.last().map(|b| b.header.block_id); + set_all_blocks.set(blocks.clone()); + set_oldest_loaded_block_id.set(oldest_id); + set_has_more_blocks + .set(blocks.len() as u32 == RECENT_BLOCKS_LIMIT && oldest_id.unwrap_or(0) > 1); + } + }); + + // Load more blocks handler + let load_more_blocks = move |_| { + let before_id = oldest_loaded_block_id.get(); + + if before_id.is_none() { + return; + } + + set_is_loading_blocks.set(true); + + leptos::task::spawn_local(async move { + match api::get_blocks(before_id, RECENT_BLOCKS_LIMIT).await { + Ok(new_blocks) => { + let blocks_count = new_blocks.len() as u32; + let new_oldest_id = new_blocks.last().map(|b| b.header.block_id); + set_all_blocks.update(|blocks| blocks.extend(new_blocks)); + set_oldest_loaded_block_id.set(new_oldest_id); + set_has_more_blocks + .set(blocks_count == RECENT_BLOCKS_LIMIT && new_oldest_id.unwrap_or(0) > 1); + } + Err(e) => { + log::error!("Failed to load more blocks: {}", e); + } + } + set_is_loading_blocks.set(false); + }); + }; + // Handle search - update URL parameter let on_search = move |ev: SubmitEvent| { ev.prevent_default(); @@ -196,19 +230,48 @@ pub fn MainPage() -> impl IntoView { recent_blocks_resource .get() .map(|result| match result { - Ok(blocks) if !blocks.is_empty() => { - view! { -
- {blocks - .into_iter() - .map(|block| view! { }) - .collect::>()} -
- } - .into_any() - } Ok(_) => { - view! {
"No blocks found"
}.into_any() + let blocks = all_blocks.get(); + if blocks.is_empty() { + view! {
"No blocks found"
} + .into_any() + } else { + view! { +
+
+ {blocks + .into_iter() + .map(|block| view! { }) + .collect::>()} +
+ {move || { + if has_more_blocks.get() { + view! { + + } + .into_any() + } else { + ().into_any() + } + }} + +
+ } + .into_any() + } } Err(e) => { view! {
{format!("Error: {}", e)}
} diff --git a/indexer/core/src/block_store.rs b/indexer/core/src/block_store.rs index ce3881bd..681b63c8 100644 --- a/indexer/core/src/block_store.rs +++ b/indexer/core/src/block_store.rs @@ -50,8 +50,8 @@ impl IndexerStore { Ok(self.dbio.get_block(id)?) } - pub fn get_block_batch(&self, offset: u64, limit: u64) -> Result> { - Ok(self.dbio.get_block_batch(offset, limit)?) + pub fn get_block_batch(&self, before: Option, limit: u64) -> Result> { + Ok(self.dbio.get_block_batch(before, limit)?) } pub fn get_transaction_by_hash(&self, tx_hash: [u8; 32]) -> Result { diff --git a/indexer/service/configs/indexer_config.json b/indexer/service/configs/indexer_config.json index 8b1b3c5e..e4dd8f93 100644 --- a/indexer/service/configs/indexer_config.json +++ b/indexer/service/configs/indexer_config.json @@ -1,6 +1,6 @@ { - "home": "./indexer/service", - "consensus_info_polling_interval": "60s", + "home": ".", + "consensus_info_polling_interval": "1s", "bedrock_client_config": { "addr": "http://localhost:8080", "backoff": { diff --git a/indexer/service/docker-entrypoint.sh b/indexer/service/docker-entrypoint.sh index d9408a05..49a5f891 100644 --- a/indexer/service/docker-entrypoint.sh +++ b/indexer/service/docker-entrypoint.sh @@ -5,7 +5,7 @@ set -e -CONFIG="/etc/indexer_service/indexer_service.json" +CONFIG="/etc/indexer_service/indexer_config.json" # Check config file exists if [ ! -f "$CONFIG" ]; then diff --git a/indexer/service/rpc/Cargo.toml b/indexer/service/rpc/Cargo.toml index b2194882..0fa72635 100644 --- a/indexer/service/rpc/Cargo.toml +++ b/indexer/service/rpc/Cargo.toml @@ -5,7 +5,7 @@ edition = "2024" license = { workspace = true } [dependencies] -indexer_service_protocol = { workspace = true, features = ["convert"] } +indexer_service_protocol.workspace = true jsonrpsee = { workspace = true, features = ["macros"] } serde_json.workspace = true diff --git a/indexer/service/rpc/src/lib.rs b/indexer/service/rpc/src/lib.rs index a769a2a7..2a67ac50 100644 --- a/indexer/service/rpc/src/lib.rs +++ b/indexer/service/rpc/src/lib.rs @@ -42,7 +42,11 @@ pub trait Rpc { async fn get_transaction(&self, tx_hash: HashType) -> Result; #[method(name = "getBlocks")] - async fn get_blocks(&self, offset: u32, limit: u32) -> Result, ErrorObjectOwned>; + async fn get_blocks( + &self, + before: Option, + limit: u32, + ) -> Result, ErrorObjectOwned>; #[method(name = "getTransactionsByAccount")] async fn get_transactions_by_account( diff --git a/indexer/service/src/mock_service.rs b/indexer/service/src/mock_service.rs index 3343e9cc..5f0cfbf2 100644 --- a/indexer/service/src/mock_service.rs +++ b/indexer/service/src/mock_service.rs @@ -43,10 +43,10 @@ impl MockIndexerService { ); } - // Create 10 blocks with transactions + // Create 100 blocks with transactions let mut prev_hash = HashType([0u8; 32]); - for block_id in 0..10 { + for block_id in 1..=100 { let block_hash = { let mut hash = [0u8; 32]; hash[0] = block_id as u8; @@ -225,23 +225,20 @@ impl indexer_service_rpc::RpcServer for MockIndexerService { .ok_or_else(|| ErrorObjectOwned::owned(-32001, "Transaction not found", None::<()>)) } - async fn get_blocks(&self, offset: u32, limit: u32) -> Result, ErrorObjectOwned> { - let offset = offset as usize; - let limit = limit as usize; - let total = self.blocks.len(); + async fn get_blocks( + &self, + before: Option, + limit: u32, + ) -> Result, ErrorObjectOwned> { + let start_id = before.map_or_else(|| self.blocks.len() as u64, |id| id.saturating_sub(1)); - // Return blocks in reverse order (newest first), with pagination - let start = offset.min(total); - let end = (offset + limit).min(total); - - Ok(self - .blocks - .iter() + let result = (1..=start_id) .rev() - .skip(start) - .take(end - start) - .cloned() - .collect()) + .take(limit as usize) + .map_while(|block_id| self.blocks.get(block_id as usize - 1).cloned()) + .collect(); + + Ok(result) } async fn get_transactions_by_account( diff --git a/indexer/service/src/service.rs b/indexer/service/src/service.rs index 13ec2ea8..da3e7cbd 100644 --- a/indexer/service/src/service.rs +++ b/indexer/service/src/service.rs @@ -88,11 +88,15 @@ impl indexer_service_rpc::RpcServer for IndexerService { .into()) } - async fn get_blocks(&self, offset: u32, limit: u32) -> Result, ErrorObjectOwned> { + async fn get_blocks( + &self, + before: Option, + limit: u32, + ) -> Result, ErrorObjectOwned> { let blocks = self .indexer .store - .get_block_batch(offset as u64, limit as u64) + .get_block_batch(before, limit as u64) .map_err(db_error)?; let mut block_res = vec![]; diff --git a/integration_tests/tests/indexer.rs b/integration_tests/tests/indexer.rs index d5207a41..ad169790 100644 --- a/integration_tests/tests/indexer.rs +++ b/integration_tests/tests/indexer.rs @@ -61,8 +61,11 @@ async fn indexer_block_batching() -> Result<()> { assert!(last_block_indexer > 1); - // Getting wide batch to fit all blocks - let block_batch = ctx.indexer_client().get_blocks(1, 100).await.unwrap(); + // Getting wide batch to fit all blocks (from latest backwards) + let mut block_batch = ctx.indexer_client().get_blocks(None, 100).await.unwrap(); + + // Reverse to check chain consistency from oldest to newest + block_batch.reverse(); // Checking chain consistency let mut prev_block_hash = block_batch.first().unwrap().header.hash; diff --git a/integration_tests/tests/keys_restoration.rs b/integration_tests/tests/keys_restoration.rs index 24299a56..1bd207be 100644 --- a/integration_tests/tests/keys_restoration.rs +++ b/integration_tests/tests/keys_restoration.rs @@ -1,9 +1,9 @@ use std::{str::FromStr, time::Duration}; -use anyhow::Result; +use anyhow::{Context, Result}; use integration_tests::{ - TIME_TO_WAIT_FOR_BLOCK_SECONDS, TestContext, format_private_account_id, - format_public_account_id, verify_commitment_is_in_state, + TIME_TO_WAIT_FOR_BLOCK_SECONDS, TestContext, fetch_privacy_preserving_tx, + format_private_account_id, format_public_account_id, verify_commitment_is_in_state, }; use key_protocol::key_management::key_tree::chain_index::ChainIndex; use log::info; @@ -15,6 +15,93 @@ use wallet::cli::{ programs::native_token_transfer::AuthTransferSubcommand, }; +#[test] +async fn sync_private_account_with_non_zero_chain_index() -> Result<()> { + let mut ctx = TestContext::new().await?; + + let from: AccountId = ctx.existing_private_accounts()[0]; + + // Create a new private account + let command = Command::Account(AccountSubcommand::New(NewSubcommand::Private { + cci: None, + label: None, + })); + + for _ in 0..3 { + // Key Tree shift + // This way we have account with child index > 0. + let result = wallet::cli::execute_subcommand( + ctx.wallet_mut(), + Command::Account(AccountSubcommand::New(NewSubcommand::Private { + cci: None, + label: None, + })), + ) + .await?; + let SubcommandReturnValue::RegisterAccount { account_id: _ } = result else { + anyhow::bail!("Expected RegisterAccount return value"); + }; + } + + let sub_ret = wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?; + let SubcommandReturnValue::RegisterAccount { + account_id: to_account_id, + } = sub_ret + else { + anyhow::bail!("Expected RegisterAccount return value"); + }; + + // Get the keys for the newly created account + let (to_keys, _) = ctx + .wallet() + .storage() + .user_data + .get_private_account(to_account_id) + .cloned() + .context("Failed to get private account")?; + + // Send to this account using claiming path (using npk and vpk instead of account ID) + let command = Command::AuthTransfer(AuthTransferSubcommand::Send { + from: format_private_account_id(from), + to: None, + to_npk: Some(hex::encode(to_keys.nullifer_public_key.0)), + to_vpk: Some(hex::encode(to_keys.viewing_public_key.0)), + amount: 100, + }); + + let sub_ret = wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?; + let SubcommandReturnValue::PrivacyPreservingTransfer { tx_hash } = sub_ret else { + anyhow::bail!("Expected PrivacyPreservingTransfer return value"); + }; + + let tx = fetch_privacy_preserving_tx(ctx.sequencer_client(), tx_hash).await; + + // Sync the wallet to claim the new account + let command = Command::Account(AccountSubcommand::SyncPrivate {}); + wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?; + + let new_commitment1 = ctx + .wallet() + .get_private_account_commitment(from) + .context("Failed to get private account commitment for sender")?; + assert_eq!(tx.message.new_commitments[0], new_commitment1); + + assert_eq!(tx.message.new_commitments.len(), 2); + for commitment in tx.message.new_commitments.into_iter() { + assert!(verify_commitment_is_in_state(commitment, ctx.sequencer_client()).await); + } + + let to_res_acc = ctx + .wallet() + .get_account_private(to_account_id) + .context("Failed to get recipient's private account")?; + assert_eq!(to_res_acc.balance, 100); + + info!("Successfully transferred using claiming path"); + + Ok(()) +} + #[test] async fn restore_keys_from_seed() -> Result<()> { let mut ctx = TestContext::new().await?; diff --git a/key_protocol/src/key_management/key_tree/chain_index.rs b/key_protocol/src/key_management/key_tree/chain_index.rs index 6dbaf9a9..d5fbf401 100644 --- a/key_protocol/src/key_management/key_tree/chain_index.rs +++ b/key_protocol/src/key_management/key_tree/chain_index.rs @@ -68,6 +68,10 @@ impl ChainIndex { &self.0 } + pub fn index(&self) -> Option { + self.chain().last().copied() + } + pub fn next_in_line(&self) -> ChainIndex { let mut chain = self.0.clone(); // ToDo: Add overflow check diff --git a/key_protocol/src/key_management/mod.rs b/key_protocol/src/key_management/mod.rs index d46dcf35..6e2891ce 100644 --- a/key_protocol/src/key_management/mod.rs +++ b/key_protocol/src/key_management/mod.rs @@ -62,9 +62,10 @@ impl KeyChain { pub fn calculate_shared_secret_receiver( &self, ephemeral_public_key_sender: EphemeralPublicKey, + index: Option, ) -> SharedSecretKey { SharedSecretKey::new( - &self.secret_spending_key.generate_viewing_secret_key(None), + &self.secret_spending_key.generate_viewing_secret_key(index), &ephemeral_public_key_sender, ) } @@ -78,6 +79,9 @@ mod tests { use rand::RngCore; use super::*; + use crate::key_management::{ + ephemeral_key_holder::EphemeralKeyHolder, key_tree::KeyTreePrivate, + }; #[test] fn test_new_os_random() { @@ -101,8 +105,8 @@ mod tests { let ephemeral_public_key_sender = EphemeralPublicKey::from_scalar(scalar); // Calculate shared secret - let _shared_secret = - account_id_key_holder.calculate_shared_secret_receiver(ephemeral_public_key_sender); + let _shared_secret = account_id_key_holder + .calculate_shared_secret_receiver(ephemeral_public_key_sender, None); } #[test] @@ -150,4 +154,40 @@ mod tests { hex::encode(viewing_public_key.to_bytes()) ); } + + fn account_with_chain_index_2_for_tests() -> KeyChain { + let seed = SeedHolder::new_os_random(); + let mut key_tree_private = KeyTreePrivate::new(&seed); + + // /0 + key_tree_private.generate_new_node_layered().unwrap(); + // /1 + key_tree_private.generate_new_node_layered().unwrap(); + // /0/0 + key_tree_private.generate_new_node_layered().unwrap(); + // /2 + let (second_child_id, _) = key_tree_private.generate_new_node_layered().unwrap(); + + key_tree_private + .get_node(second_child_id) + .unwrap() + .value + .0 + .clone() + } + + #[test] + fn test_non_trivial_chain_index() { + let keys = account_with_chain_index_2_for_tests(); + + let eph_key_holder = EphemeralKeyHolder::new(&keys.nullifer_public_key); + + let key_sender = eph_key_holder.calculate_shared_secret_sender(&keys.viewing_public_key); + let key_receiver = keys.calculate_shared_secret_receiver( + eph_key_holder.generate_ephemeral_public_key(), + Some(2), + ); + + assert_eq!(key_sender.0, key_receiver.0); + } } diff --git a/nssa/core/src/encryption/mod.rs b/nssa/core/src/encryption/mod.rs index 9ccbf2c8..4817d3c8 100644 --- a/nssa/core/src/encryption/mod.rs +++ b/nssa/core/src/encryption/mod.rs @@ -75,6 +75,17 @@ impl EncryptionScheme { Self::symmetric_transform(&mut buffer, shared_secret, commitment, output_index); let mut cursor = Cursor::new(buffer.as_slice()); - Account::from_cursor(&mut cursor).ok() + Account::from_cursor(&mut cursor) + .inspect_err(|err| { + println!( + "Failed to decode {ciphertext:?} \n + with secret {:?} ,\n + commitment {commitment:?} ,\n + and output_index {output_index} ,\n + with error {err:?}", + shared_secret.0 + ) + }) + .ok() } } diff --git a/nssa/src/state.rs b/nssa/src/state.rs index 4c9a79b8..c8599d97 100644 --- a/nssa/src/state.rs +++ b/nssa/src/state.rs @@ -284,7 +284,7 @@ impl V02State { account_id, Account { program_owner: Program::pinata().id(), - balance: 1500, + balance: 1500000, // Difficulty: 3 data: vec![3; 33].try_into().expect("should fit"), nonce: 0, diff --git a/sequencer_runner/configs/debug/sequencer_config.json b/sequencer_runner/configs/debug/sequencer_config.json index a2306dee..8491c2a1 100644 --- a/sequencer_runner/configs/debug/sequencer_config.json +++ b/sequencer_runner/configs/debug/sequencer_config.json @@ -1,5 +1,5 @@ { - "home": "./sequencer_runner", + "home": ".", "override_rust_log": null, "genesis_id": 1, "is_genesis_random": true, diff --git a/sequencer_runner/src/lib.rs b/sequencer_runner/src/lib.rs index d74792c8..944a6402 100644 --- a/sequencer_runner/src/lib.rs +++ b/sequencer_runner/src/lib.rs @@ -173,7 +173,7 @@ async fn retry_pending_blocks(seq_core: &Arc>) -> Result<() use log::debug; - let (pending_blocks, block_settlement_client) = { + let (mut pending_blocks, block_settlement_client) = { let sequencer_core = seq_core.lock().await; let client = sequencer_core.block_settlement_client(); let pending_blocks = sequencer_core @@ -182,6 +182,8 @@ async fn retry_pending_blocks(seq_core: &Arc>) -> Result<() (pending_blocks, client) }; + pending_blocks.sort_by(|block1, block2| block1.header.block_id.cmp(&block2.header.block_id)); + if !pending_blocks.is_empty() { info!( "Resubmitting blocks from {} to {}", diff --git a/storage/src/indexer.rs b/storage/src/indexer.rs index 94a1a1a0..2c37ab0f 100644 --- a/storage/src/indexer.rs +++ b/storage/src/indexer.rs @@ -539,13 +539,26 @@ impl RocksDBIO { } } - pub fn get_block_batch(&self, offset: u64, limit: u64) -> DbResult> { + pub fn get_block_batch(&self, before: Option, limit: u64) -> DbResult> { let cf_block = self.block_column(); let mut block_batch = vec![]; + // Determine the starting block ID + let start_block_id = if let Some(before_id) = before { + before_id.saturating_sub(1) + } else { + // Get the latest block ID + self.get_meta_last_block_in_db()? + }; + // ToDo: Multi get this - for block_id in offset..(offset + limit) { + for i in 0..limit { + let block_id = start_block_id.saturating_sub(i); + if block_id == 0 { + break; + } + let res = self .db .get_cf( @@ -1215,7 +1228,10 @@ mod tests { let block_hashes_mem: Vec<[u8; 32]> = block_res.into_iter().map(|bl| bl.header.hash.0).collect(); - let batch_res = dbio.get_block_batch(2, 4).unwrap(); + // Get blocks before ID 6 (i.e., starting from 5 going backwards), limit 4 + // This should return blocks 5, 4, 3, 2 in descending order + let mut batch_res = dbio.get_block_batch(Some(6), 4).unwrap(); + batch_res.reverse(); // Reverse to match ascending order for comparison let block_hashes_db: Vec<[u8; 32]> = batch_res.into_iter().map(|bl| bl.header.hash.0).collect(); @@ -1224,7 +1240,10 @@ mod tests { let block_hashes_mem_limited = &block_hashes_mem[1..]; - let batch_res_limited = dbio.get_block_batch(3, 4).unwrap(); + // Get blocks before ID 6, limit 3 + // This should return blocks 5, 4, 3 in descending order + let mut batch_res_limited = dbio.get_block_batch(Some(6), 3).unwrap(); + batch_res_limited.reverse(); // Reverse to match ascending order for comparison let block_hashes_db_limited: Vec<[u8; 32]> = batch_res_limited .into_iter() diff --git a/wallet/src/lib.rs b/wallet/src/lib.rs index 0162dcb1..8d8924cf 100644 --- a/wallet/src/lib.rs +++ b/wallet/src/lib.rs @@ -363,7 +363,7 @@ impl WalletCore { ); let tx = PrivacyPreservingTransaction::new(message, witness_set); - let shared_secrets = private_account_keys + let shared_secrets: Vec<_> = private_account_keys .into_iter() .map(|keys| keys.ssk) .collect(); @@ -419,18 +419,19 @@ impl WalletCore { .user_data .default_user_private_accounts .iter() - .map(|(acc_account_id, (key_chain, _))| (*acc_account_id, key_chain)) - .chain( - self.storage - .user_data - .private_key_tree - .key_map - .values() - .map(|keys_node| (keys_node.account_id(), &keys_node.value.0)), - ); + .map(|(acc_account_id, (key_chain, _))| (*acc_account_id, key_chain, None)) + .chain(self.storage.user_data.private_key_tree.key_map.iter().map( + |(chain_index, keys_node)| { + ( + keys_node.account_id(), + &keys_node.value.0, + chain_index.index(), + ) + }, + )); let affected_accounts = private_account_key_chains - .flat_map(|(acc_account_id, key_chain)| { + .flat_map(|(acc_account_id, key_chain, index)| { let view_tag = EncryptedAccountData::compute_view_tag( key_chain.nullifer_public_key.clone(), key_chain.viewing_public_key.clone(), @@ -444,8 +445,8 @@ impl WalletCore { .filter_map(|(ciph_id, encrypted_data)| { let ciphertext = &encrypted_data.ciphertext; let commitment = &tx.message.new_commitments[ciph_id]; - let shared_secret = - key_chain.calculate_shared_secret_receiver(encrypted_data.epk.clone()); + let shared_secret = key_chain + .calculate_shared_secret_receiver(encrypted_data.epk.clone(), index); nssa_core::EncryptionScheme::decrypt( ciphertext, @@ -455,6 +456,7 @@ impl WalletCore { ) }) .map(move |res_acc| (acc_account_id, res_acc)) + .collect::>() }) .collect::>();