Merge branch 'main' into marvin/bip-32-comp

This commit is contained in:
jonesmarvin8 2026-03-09 05:23:24 -04:00
commit 1cfc710024
27 changed files with 570 additions and 100 deletions

1
Cargo.lock generated
View File

@ -1765,6 +1765,7 @@ dependencies = [
"serde_with",
"sha2",
"thiserror 2.0.18",
"tokio-retry",
"url",
]

View File

@ -37,9 +37,15 @@ run-sequencer:
# Run Indexer
[working-directory: 'indexer/service']
run-indexer:
run-indexer mock="":
@echo "🔍 Running indexer"
RUST_LOG=info RISC0_DEV_MODE=1 cargo run --release -p indexer_service configs/indexer_config.json
@if [ "{{mock}}" = "mock" ]; then \
echo "🧪 Using mock data"; \
RUST_LOG=info RISC0_DEV_MODE=1 cargo run --release --features mock-responses -p indexer_service configs/indexer_config.json; \
else \
echo "🚀 Using real data"; \
RUST_LOG=info RISC0_DEV_MODE=1 cargo run --release -p indexer_service configs/indexer_config.json; \
fi
# Run Explorer
[working-directory: 'explorer_service']
@ -58,4 +64,6 @@ clean:
@echo "🧹 Cleaning run artifacts"
rm -rf sequencer_runner/bedrock_signing_key
rm -rf sequencer_runner/rocksdb
rm -rf indexer/service/rocksdb
rm -rf wallet/configs/debug/storage.json
cd bedrock && docker compose down -v

View File

@ -22,3 +22,4 @@ bytesize.workspace = true
base64.workspace = true
url.workspace = true
logos-blockchain-common-http-client.workspace = true
tokio-retry.workspace = true

View File

@ -67,15 +67,24 @@ impl SequencerClient {
"Calling method {method} with payload {request:?} to sequencer at {}",
self.sequencer_addr
);
let mut call_builder = self.client.post(self.sequencer_addr.clone());
if let Some(BasicAuth { username, password }) = &self.basic_auth {
call_builder = call_builder.basic_auth(username, password.as_deref());
}
let strategy = tokio_retry::strategy::FixedInterval::from_millis(10000).take(60);
let call_res = call_builder.json(&request).send().await?;
let response_vall = tokio_retry::Retry::spawn(strategy, || async {
let mut call_builder = self.client.post(self.sequencer_addr.clone());
let response_vall = call_res.json::<Value>().await?;
if let Some(BasicAuth { username, password }) = &self.basic_auth {
call_builder = call_builder.basic_auth(username, password.as_deref());
}
let call_res_res = call_builder.json(&request).send().await;
match call_res_res {
Err(err) => Err(err),
Ok(call_res) => call_res.json::<Value>().await,
}
})
.await?;
#[derive(Debug, Clone, Deserialize)]
#[allow(dead_code)]

View File

@ -1,6 +1,6 @@
# Wallet CLI Completion
Completion scripts for the LSSA `wallet` command.
Completion scripts for the LSSA `wallet` command.
## ZSH
@ -19,9 +19,9 @@ Preconfigured accounts and accounts only with `/` (no number) are not completed.
e.g.:
```
▶ wallet account list
Preconfigured Public/Gj1mJy5W7J5pfmLRujmQaLfLMWidNxQ6uwnhb666ZwHw,
Preconfigured Public/BLgCRDXYdQPMMWVHYRFGQZbgeHx9frkipa8GtpG2Syqy,
▶ wallet account list
Preconfigured Public/7wHg9sbJwc6h3NP1S9bekfAzB8CHifEcxKswCKUt3YQo,
Preconfigured Public/6iArKUXxhUJqS7kCaPNhwMWt3ro71PDyBj7jwAyE2VQV,
Preconfigured Private/3oCG8gqdKLMegw4rRfyaMQvuPHpcASt7xwttsmnZLSkw,
Preconfigured Private/AKTcXgJ1xoynta1Ec7y6Jso1z1JQtHqd7aPQ1h9er6xX,
/ Public/8DstRgMQrB2N9a7ymv98RDDbt8nctrP9ZzaNRSpKDZSu,

View File

@ -1,5 +1,6 @@
{
"resubscribe_interval": "1s",
"home": "./indexer/service",
"consensus_info_polling_interval": "1s",
"bedrock_client_config": {
"addr": "http://logos-blockchain-node-0:18080",
"backoff": {
@ -7,5 +8,153 @@
"max_retries": 5
}
},
"channel_id": "0101010101010101010101010101010101010101010101010101010101010101"
"channel_id": "0101010101010101010101010101010101010101010101010101010101010101",
"initial_accounts": [
{
"account_id": "6iArKUXxhUJqS7kCaPNhwMWt3ro71PDyBj7jwAyE2VQV",
"balance": 10000
},
{
"account_id": "7wHg9sbJwc6h3NP1S9bekfAzB8CHifEcxKswCKUt3YQo",
"balance": 20000
}
],
"initial_commitments": [
{
"npk":[
177,
64,
1,
11,
87,
38,
254,
159,
231,
165,
1,
94,
64,
137,
243,
76,
249,
101,
251,
129,
33,
101,
189,
30,
42,
11,
191,
34,
103,
186,
227,
230
] ,
"account": {
"program_owner": [
0,
0,
0,
0,
0,
0,
0,
0
],
"balance": 10000,
"data": [],
"nonce": 0
}
},
{
"npk": [
32,
67,
72,
164,
106,
53,
66,
239,
141,
15,
52,
230,
136,
177,
2,
236,
207,
243,
134,
135,
210,
143,
87,
232,
215,
128,
194,
120,
113,
224,
4,
165
],
"account": {
"program_owner": [
0,
0,
0,
0,
0,
0,
0,
0
],
"balance": 20000,
"data": [],
"nonce": 0
}
}
],
"signing_key": [
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37
]
}

View File

@ -20,17 +20,50 @@
"indexer_rpc_url": "ws://indexer_service:8779",
"initial_accounts": [
{
"account_id": "BLgCRDXYdQPMMWVHYRFGQZbgeHx9frkipa8GtpG2Syqy",
"account_id": "6iArKUXxhUJqS7kCaPNhwMWt3ro71PDyBj7jwAyE2VQV",
"balance": 10000
},
{
"account_id": "Gj1mJy5W7J5pfmLRujmQaLfLMWidNxQ6uwnhb666ZwHw",
"account_id": "7wHg9sbJwc6h3NP1S9bekfAzB8CHifEcxKswCKUt3YQo",
"balance": 20000
}
],
"initial_commitments": [
{
"npk": [13, 25, 40, 5, 198, 248, 210, 248, 237, 121, 124, 145, 186, 142, 253, 216, 236, 69, 193, 32, 166, 167, 49, 133, 172, 111, 159, 46, 84, 17, 157, 23],
"npk":[
177,
64,
1,
11,
87,
38,
254,
159,
231,
165,
1,
94,
64,
137,
243,
76,
249,
101,
251,
129,
33,
101,
189,
30,
42,
11,
191,
34,
103,
186,
227,
230
] ,
"account": {
"program_owner": [
0,
@ -48,7 +81,40 @@
}
},
{
"npk": [32, 67, 72, 164, 106, 53, 66, 239, 141, 15, 52, 230, 136, 177, 2, 236, 207, 243, 134, 135, 210, 143, 87, 232, 215, 128, 194, 120, 113, 224, 4, 165],
"npk": [
32,
67,
72,
164,
106,
53,
66,
239,
141,
15,
52,
230,
136,
177,
2,
236,
207,
243,
134,
135,
210,
143,
87,
232,
215,
128,
194,
120,
113,
224,
4,
165
],
"account": {
"program_owner": [
0,

View File

@ -118,11 +118,11 @@ pub async fn get_transaction(tx_hash: HashType) -> Result<Transaction, ServerFnE
/// Get blocks with pagination
#[server]
pub async fn get_blocks(offset: u32, limit: u32) -> Result<Vec<Block>, ServerFnError> {
pub async fn get_blocks(before: Option<u64>, limit: u32) -> Result<Vec<Block>, ServerFnError> {
use indexer_service_rpc::RpcClient as _;
let client = expect_context::<IndexerRpcClient>();
client
.get_blocks(offset, limit)
.get_blocks(before, limit)
.await
.map_err(|e| ServerFnError::ServerError(format!("RPC error: {}", e)))
}

View File

@ -7,7 +7,7 @@ use crate::{
components::{AccountPreview, BlockPreview, TransactionPreview},
};
const RECENT_BLOCKS_LIMIT: u64 = 10;
const RECENT_BLOCKS_LIMIT: u32 = 10;
/// Main page component
#[component]
@ -39,23 +39,57 @@ pub fn MainPage() -> impl IntoView {
}
});
// Pagination state for blocks
let (all_blocks, set_all_blocks) = signal(Vec::new());
let (is_loading_blocks, set_is_loading_blocks) = signal(false);
let (has_more_blocks, set_has_more_blocks) = signal(true);
let (oldest_loaded_block_id, set_oldest_loaded_block_id) = signal(None::<u64>);
// Load recent blocks on mount
let recent_blocks_resource = Resource::new(
|| (),
|_| async {
match api::get_latest_block_id().await {
Ok(last_id) => {
api::get_blocks(
std::cmp::max(last_id.saturating_sub(RECENT_BLOCKS_LIMIT) as u32, 1),
(RECENT_BLOCKS_LIMIT + 1) as u32,
)
.await
}
Err(err) => Err(err),
}
},
|_| async { api::get_blocks(None, RECENT_BLOCKS_LIMIT).await },
);
// Update all_blocks when initial load completes
Effect::new(move || {
if let Some(Ok(blocks)) = recent_blocks_resource.get() {
let oldest_id = blocks.last().map(|b| b.header.block_id);
set_all_blocks.set(blocks.clone());
set_oldest_loaded_block_id.set(oldest_id);
set_has_more_blocks
.set(blocks.len() as u32 == RECENT_BLOCKS_LIMIT && oldest_id.unwrap_or(0) > 1);
}
});
// Load more blocks handler
let load_more_blocks = move |_| {
let before_id = oldest_loaded_block_id.get();
if before_id.is_none() {
return;
}
set_is_loading_blocks.set(true);
leptos::task::spawn_local(async move {
match api::get_blocks(before_id, RECENT_BLOCKS_LIMIT).await {
Ok(new_blocks) => {
let blocks_count = new_blocks.len() as u32;
let new_oldest_id = new_blocks.last().map(|b| b.header.block_id);
set_all_blocks.update(|blocks| blocks.extend(new_blocks));
set_oldest_loaded_block_id.set(new_oldest_id);
set_has_more_blocks
.set(blocks_count == RECENT_BLOCKS_LIMIT && new_oldest_id.unwrap_or(0) > 1);
}
Err(e) => {
log::error!("Failed to load more blocks: {}", e);
}
}
set_is_loading_blocks.set(false);
});
};
// Handle search - update URL parameter
let on_search = move |ev: SubmitEvent| {
ev.prevent_default();
@ -196,19 +230,48 @@ pub fn MainPage() -> impl IntoView {
recent_blocks_resource
.get()
.map(|result| match result {
Ok(blocks) if !blocks.is_empty() => {
view! {
<div class="blocks-list">
{blocks
.into_iter()
.map(|block| view! { <BlockPreview block=block /> })
.collect::<Vec<_>>()}
</div>
}
.into_any()
}
Ok(_) => {
view! { <div class="no-blocks">"No blocks found"</div> }.into_any()
let blocks = all_blocks.get();
if blocks.is_empty() {
view! { <div class="no-blocks">"No blocks found"</div> }
.into_any()
} else {
view! {
<div>
<div class="blocks-list">
{blocks
.into_iter()
.map(|block| view! { <BlockPreview block=block /> })
.collect::<Vec<_>>()}
</div>
{move || {
if has_more_blocks.get() {
view! {
<button
class="load-more-button"
on:click=load_more_blocks
disabled=move || is_loading_blocks.get()
>
{move || {
if is_loading_blocks.get() {
"Loading..."
} else {
"Load More"
}
}}
</button>
}
.into_any()
} else {
().into_any()
}
}}
</div>
}
.into_any()
}
}
Err(e) => {
view! { <div class="error">{format!("Error: {}", e)}</div> }

View File

@ -50,8 +50,8 @@ impl IndexerStore {
Ok(self.dbio.get_block(id)?)
}
pub fn get_block_batch(&self, offset: u64, limit: u64) -> Result<Vec<Block>> {
Ok(self.dbio.get_block_batch(offset, limit)?)
pub fn get_block_batch(&self, before: Option<u64>, limit: u64) -> Result<Vec<Block>> {
Ok(self.dbio.get_block_batch(before, limit)?)
}
pub fn get_transaction_by_hash(&self, tx_hash: [u8; 32]) -> Result<NSSATransaction> {

View File

@ -1,6 +1,6 @@
{
"home": "./indexer/service",
"consensus_info_polling_interval": "60s",
"home": ".",
"consensus_info_polling_interval": "1s",
"bedrock_client_config": {
"addr": "http://localhost:8080",
"backoff": {

View File

@ -5,7 +5,7 @@
set -e
CONFIG="/etc/indexer_service/indexer_service.json"
CONFIG="/etc/indexer_service/indexer_config.json"
# Check config file exists
if [ ! -f "$CONFIG" ]; then

View File

@ -5,7 +5,7 @@ edition = "2024"
license = { workspace = true }
[dependencies]
indexer_service_protocol = { workspace = true, features = ["convert"] }
indexer_service_protocol.workspace = true
jsonrpsee = { workspace = true, features = ["macros"] }
serde_json.workspace = true

View File

@ -42,7 +42,11 @@ pub trait Rpc {
async fn get_transaction(&self, tx_hash: HashType) -> Result<Transaction, ErrorObjectOwned>;
#[method(name = "getBlocks")]
async fn get_blocks(&self, offset: u32, limit: u32) -> Result<Vec<Block>, ErrorObjectOwned>;
async fn get_blocks(
&self,
before: Option<u64>,
limit: u32,
) -> Result<Vec<Block>, ErrorObjectOwned>;
#[method(name = "getTransactionsByAccount")]
async fn get_transactions_by_account(

View File

@ -43,10 +43,10 @@ impl MockIndexerService {
);
}
// Create 10 blocks with transactions
// Create 100 blocks with transactions
let mut prev_hash = HashType([0u8; 32]);
for block_id in 0..10 {
for block_id in 1..=100 {
let block_hash = {
let mut hash = [0u8; 32];
hash[0] = block_id as u8;
@ -225,23 +225,20 @@ impl indexer_service_rpc::RpcServer for MockIndexerService {
.ok_or_else(|| ErrorObjectOwned::owned(-32001, "Transaction not found", None::<()>))
}
async fn get_blocks(&self, offset: u32, limit: u32) -> Result<Vec<Block>, ErrorObjectOwned> {
let offset = offset as usize;
let limit = limit as usize;
let total = self.blocks.len();
async fn get_blocks(
&self,
before: Option<u64>,
limit: u32,
) -> Result<Vec<Block>, ErrorObjectOwned> {
let start_id = before.map_or_else(|| self.blocks.len() as u64, |id| id.saturating_sub(1));
// Return blocks in reverse order (newest first), with pagination
let start = offset.min(total);
let end = (offset + limit).min(total);
Ok(self
.blocks
.iter()
let result = (1..=start_id)
.rev()
.skip(start)
.take(end - start)
.cloned()
.collect())
.take(limit as usize)
.map_while(|block_id| self.blocks.get(block_id as usize - 1).cloned())
.collect();
Ok(result)
}
async fn get_transactions_by_account(

View File

@ -88,11 +88,15 @@ impl indexer_service_rpc::RpcServer for IndexerService {
.into())
}
async fn get_blocks(&self, offset: u32, limit: u32) -> Result<Vec<Block>, ErrorObjectOwned> {
async fn get_blocks(
&self,
before: Option<u64>,
limit: u32,
) -> Result<Vec<Block>, ErrorObjectOwned> {
let blocks = self
.indexer
.store
.get_block_batch(offset as u64, limit as u64)
.get_block_batch(before, limit as u64)
.map_err(db_error)?;
let mut block_res = vec![];

View File

@ -61,8 +61,11 @@ async fn indexer_block_batching() -> Result<()> {
assert!(last_block_indexer > 1);
// Getting wide batch to fit all blocks
let block_batch = ctx.indexer_client().get_blocks(1, 100).await.unwrap();
// Getting wide batch to fit all blocks (from latest backwards)
let mut block_batch = ctx.indexer_client().get_blocks(None, 100).await.unwrap();
// Reverse to check chain consistency from oldest to newest
block_batch.reverse();
// Checking chain consistency
let mut prev_block_hash = block_batch.first().unwrap().header.hash;

View File

@ -1,9 +1,9 @@
use std::{str::FromStr, time::Duration};
use anyhow::Result;
use anyhow::{Context, Result};
use integration_tests::{
TIME_TO_WAIT_FOR_BLOCK_SECONDS, TestContext, format_private_account_id,
format_public_account_id, verify_commitment_is_in_state,
TIME_TO_WAIT_FOR_BLOCK_SECONDS, TestContext, fetch_privacy_preserving_tx,
format_private_account_id, format_public_account_id, verify_commitment_is_in_state,
};
use key_protocol::key_management::key_tree::chain_index::ChainIndex;
use log::info;
@ -15,6 +15,93 @@ use wallet::cli::{
programs::native_token_transfer::AuthTransferSubcommand,
};
#[test]
async fn sync_private_account_with_non_zero_chain_index() -> Result<()> {
let mut ctx = TestContext::new().await?;
let from: AccountId = ctx.existing_private_accounts()[0];
// Create a new private account
let command = Command::Account(AccountSubcommand::New(NewSubcommand::Private {
cci: None,
label: None,
}));
for _ in 0..3 {
// Key Tree shift
// This way we have account with child index > 0.
let result = wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::Account(AccountSubcommand::New(NewSubcommand::Private {
cci: None,
label: None,
})),
)
.await?;
let SubcommandReturnValue::RegisterAccount { account_id: _ } = result else {
anyhow::bail!("Expected RegisterAccount return value");
};
}
let sub_ret = wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
let SubcommandReturnValue::RegisterAccount {
account_id: to_account_id,
} = sub_ret
else {
anyhow::bail!("Expected RegisterAccount return value");
};
// Get the keys for the newly created account
let (to_keys, _) = ctx
.wallet()
.storage()
.user_data
.get_private_account(to_account_id)
.cloned()
.context("Failed to get private account")?;
// Send to this account using claiming path (using npk and vpk instead of account ID)
let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
from: format_private_account_id(from),
to: None,
to_npk: Some(hex::encode(to_keys.nullifer_public_key.0)),
to_vpk: Some(hex::encode(to_keys.viewing_public_key.0)),
amount: 100,
});
let sub_ret = wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
let SubcommandReturnValue::PrivacyPreservingTransfer { tx_hash } = sub_ret else {
anyhow::bail!("Expected PrivacyPreservingTransfer return value");
};
let tx = fetch_privacy_preserving_tx(ctx.sequencer_client(), tx_hash).await;
// Sync the wallet to claim the new account
let command = Command::Account(AccountSubcommand::SyncPrivate {});
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
let new_commitment1 = ctx
.wallet()
.get_private_account_commitment(from)
.context("Failed to get private account commitment for sender")?;
assert_eq!(tx.message.new_commitments[0], new_commitment1);
assert_eq!(tx.message.new_commitments.len(), 2);
for commitment in tx.message.new_commitments.into_iter() {
assert!(verify_commitment_is_in_state(commitment, ctx.sequencer_client()).await);
}
let to_res_acc = ctx
.wallet()
.get_account_private(to_account_id)
.context("Failed to get recipient's private account")?;
assert_eq!(to_res_acc.balance, 100);
info!("Successfully transferred using claiming path");
Ok(())
}
#[test]
async fn restore_keys_from_seed() -> Result<()> {
let mut ctx = TestContext::new().await?;

View File

@ -68,6 +68,10 @@ impl ChainIndex {
&self.0
}
pub fn index(&self) -> Option<u32> {
self.chain().last().copied()
}
pub fn next_in_line(&self) -> ChainIndex {
let mut chain = self.0.clone();
// ToDo: Add overflow check

View File

@ -62,9 +62,10 @@ impl KeyChain {
pub fn calculate_shared_secret_receiver(
&self,
ephemeral_public_key_sender: EphemeralPublicKey,
index: Option<u32>,
) -> SharedSecretKey {
SharedSecretKey::new(
&self.secret_spending_key.generate_viewing_secret_key(None),
&self.secret_spending_key.generate_viewing_secret_key(index),
&ephemeral_public_key_sender,
)
}
@ -78,6 +79,9 @@ mod tests {
use rand::RngCore;
use super::*;
use crate::key_management::{
ephemeral_key_holder::EphemeralKeyHolder, key_tree::KeyTreePrivate,
};
#[test]
fn test_new_os_random() {
@ -101,8 +105,8 @@ mod tests {
let ephemeral_public_key_sender = EphemeralPublicKey::from_scalar(scalar);
// Calculate shared secret
let _shared_secret =
account_id_key_holder.calculate_shared_secret_receiver(ephemeral_public_key_sender);
let _shared_secret = account_id_key_holder
.calculate_shared_secret_receiver(ephemeral_public_key_sender, None);
}
#[test]
@ -150,4 +154,40 @@ mod tests {
hex::encode(viewing_public_key.to_bytes())
);
}
fn account_with_chain_index_2_for_tests() -> KeyChain {
let seed = SeedHolder::new_os_random();
let mut key_tree_private = KeyTreePrivate::new(&seed);
// /0
key_tree_private.generate_new_node_layered().unwrap();
// /1
key_tree_private.generate_new_node_layered().unwrap();
// /0/0
key_tree_private.generate_new_node_layered().unwrap();
// /2
let (second_child_id, _) = key_tree_private.generate_new_node_layered().unwrap();
key_tree_private
.get_node(second_child_id)
.unwrap()
.value
.0
.clone()
}
#[test]
fn test_non_trivial_chain_index() {
let keys = account_with_chain_index_2_for_tests();
let eph_key_holder = EphemeralKeyHolder::new(&keys.nullifer_public_key);
let key_sender = eph_key_holder.calculate_shared_secret_sender(&keys.viewing_public_key);
let key_receiver = keys.calculate_shared_secret_receiver(
eph_key_holder.generate_ephemeral_public_key(),
Some(2),
);
assert_eq!(key_sender.0, key_receiver.0);
}
}

View File

@ -75,6 +75,17 @@ impl EncryptionScheme {
Self::symmetric_transform(&mut buffer, shared_secret, commitment, output_index);
let mut cursor = Cursor::new(buffer.as_slice());
Account::from_cursor(&mut cursor).ok()
Account::from_cursor(&mut cursor)
.inspect_err(|err| {
println!(
"Failed to decode {ciphertext:?} \n
with secret {:?} ,\n
commitment {commitment:?} ,\n
and output_index {output_index} ,\n
with error {err:?}",
shared_secret.0
)
})
.ok()
}
}

View File

@ -284,7 +284,7 @@ impl V02State {
account_id,
Account {
program_owner: Program::pinata().id(),
balance: 1500,
balance: 1500000,
// Difficulty: 3
data: vec![3; 33].try_into().expect("should fit"),
nonce: 0,

View File

@ -1,5 +1,5 @@
{
"home": "./sequencer_runner",
"home": ".",
"override_rust_log": null,
"genesis_id": 1,
"is_genesis_random": true,

View File

@ -173,7 +173,7 @@ async fn retry_pending_blocks(seq_core: &Arc<Mutex<SequencerCore>>) -> Result<()
use log::debug;
let (pending_blocks, block_settlement_client) = {
let (mut pending_blocks, block_settlement_client) = {
let sequencer_core = seq_core.lock().await;
let client = sequencer_core.block_settlement_client();
let pending_blocks = sequencer_core
@ -182,6 +182,8 @@ async fn retry_pending_blocks(seq_core: &Arc<Mutex<SequencerCore>>) -> Result<()
(pending_blocks, client)
};
pending_blocks.sort_by(|block1, block2| block1.header.block_id.cmp(&block2.header.block_id));
if !pending_blocks.is_empty() {
info!(
"Resubmitting blocks from {} to {}",

View File

@ -539,13 +539,26 @@ impl RocksDBIO {
}
}
pub fn get_block_batch(&self, offset: u64, limit: u64) -> DbResult<Vec<Block>> {
pub fn get_block_batch(&self, before: Option<u64>, limit: u64) -> DbResult<Vec<Block>> {
let cf_block = self.block_column();
let mut block_batch = vec![];
// Determine the starting block ID
let start_block_id = if let Some(before_id) = before {
before_id.saturating_sub(1)
} else {
// Get the latest block ID
self.get_meta_last_block_in_db()?
};
// ToDo: Multi get this
for block_id in offset..(offset + limit) {
for i in 0..limit {
let block_id = start_block_id.saturating_sub(i);
if block_id == 0 {
break;
}
let res = self
.db
.get_cf(
@ -1215,7 +1228,10 @@ mod tests {
let block_hashes_mem: Vec<[u8; 32]> =
block_res.into_iter().map(|bl| bl.header.hash.0).collect();
let batch_res = dbio.get_block_batch(2, 4).unwrap();
// Get blocks before ID 6 (i.e., starting from 5 going backwards), limit 4
// This should return blocks 5, 4, 3, 2 in descending order
let mut batch_res = dbio.get_block_batch(Some(6), 4).unwrap();
batch_res.reverse(); // Reverse to match ascending order for comparison
let block_hashes_db: Vec<[u8; 32]> =
batch_res.into_iter().map(|bl| bl.header.hash.0).collect();
@ -1224,7 +1240,10 @@ mod tests {
let block_hashes_mem_limited = &block_hashes_mem[1..];
let batch_res_limited = dbio.get_block_batch(3, 4).unwrap();
// Get blocks before ID 6, limit 3
// This should return blocks 5, 4, 3 in descending order
let mut batch_res_limited = dbio.get_block_batch(Some(6), 3).unwrap();
batch_res_limited.reverse(); // Reverse to match ascending order for comparison
let block_hashes_db_limited: Vec<[u8; 32]> = batch_res_limited
.into_iter()

View File

@ -363,7 +363,7 @@ impl WalletCore {
);
let tx = PrivacyPreservingTransaction::new(message, witness_set);
let shared_secrets = private_account_keys
let shared_secrets: Vec<_> = private_account_keys
.into_iter()
.map(|keys| keys.ssk)
.collect();
@ -419,18 +419,19 @@ impl WalletCore {
.user_data
.default_user_private_accounts
.iter()
.map(|(acc_account_id, (key_chain, _))| (*acc_account_id, key_chain))
.chain(
self.storage
.user_data
.private_key_tree
.key_map
.values()
.map(|keys_node| (keys_node.account_id(), &keys_node.value.0)),
);
.map(|(acc_account_id, (key_chain, _))| (*acc_account_id, key_chain, None))
.chain(self.storage.user_data.private_key_tree.key_map.iter().map(
|(chain_index, keys_node)| {
(
keys_node.account_id(),
&keys_node.value.0,
chain_index.index(),
)
},
));
let affected_accounts = private_account_key_chains
.flat_map(|(acc_account_id, key_chain)| {
.flat_map(|(acc_account_id, key_chain, index)| {
let view_tag = EncryptedAccountData::compute_view_tag(
key_chain.nullifer_public_key.clone(),
key_chain.viewing_public_key.clone(),
@ -444,8 +445,8 @@ impl WalletCore {
.filter_map(|(ciph_id, encrypted_data)| {
let ciphertext = &encrypted_data.ciphertext;
let commitment = &tx.message.new_commitments[ciph_id];
let shared_secret =
key_chain.calculate_shared_secret_receiver(encrypted_data.epk.clone());
let shared_secret = key_chain
.calculate_shared_secret_receiver(encrypted_data.epk.clone(), index);
nssa_core::EncryptionScheme::decrypt(
ciphertext,
@ -455,6 +456,7 @@ impl WalletCore {
)
})
.map(move |res_acc| (acc_account_id, res_acc))
.collect::<Vec<_>>()
})
.collect::<Vec<_>>();