Merge branch 'main' into schouhy/add-block-context

This commit is contained in:
Sergio Chouhy 2026-03-26 21:12:41 -03:00
commit 99071a4ef9
26 changed files with 2305 additions and 1695 deletions

19
Cargo.lock generated
View File

@ -1522,7 +1522,6 @@ dependencies = [
"log",
"logos-blockchain-common-http-client",
"nssa",
"nssa_core",
"serde",
"serde_with",
"sha2",
@ -3460,6 +3459,7 @@ dependencies = [
"serde_json",
"storage",
"tempfile",
"testnet_initial_state",
"tokio",
"url",
]
@ -3587,6 +3587,7 @@ dependencies = [
"serde_json",
"tempfile",
"testcontainers",
"testnet_initial_state",
"token_core",
"tokio",
"url",
@ -6940,7 +6941,7 @@ dependencies = [
"security-framework",
"security-framework-sys",
"webpki-root-certs 0.26.11",
"windows-sys 0.59.0",
"windows-sys 0.52.0",
]
[[package]]
@ -7158,7 +7159,6 @@ name = "sequencer_core"
version = "0.1.0"
dependencies = [
"anyhow",
"base58",
"bedrock_client",
"borsh",
"bytesize",
@ -7178,6 +7178,7 @@ dependencies = [
"serde_json",
"storage",
"tempfile",
"testnet_initial_state",
"tokio",
"url",
]
@ -7894,6 +7895,17 @@ dependencies = [
"uuid",
]
[[package]]
name = "testnet_initial_state"
version = "0.1.0"
dependencies = [
"common",
"key_protocol",
"nssa",
"nssa_core",
"serde",
]
[[package]]
name = "thiserror"
version = "1.0.69"
@ -8691,6 +8703,7 @@ dependencies = [
"serde",
"serde_json",
"sha2",
"testnet_initial_state",
"thiserror 2.0.18",
"token_core",
"tokio",

View File

@ -36,6 +36,7 @@ members = [
"examples/program_deployment/methods",
"examples/program_deployment/methods/guest",
"bedrock_client",
"testnet_initial_state",
]
[workspace.dependencies]
@ -63,6 +64,7 @@ ata_core = { path = "programs/associated_token_account/core" }
ata_program = { path = "programs/associated_token_account" }
test_program_methods = { path = "test_program_methods" }
bedrock_client = { path = "bedrock_client" }
testnet_initial_state = { path = "testnet_initial_state" }
tokio = { version = "1.50", features = [
"net",

View File

@ -9,7 +9,6 @@ workspace = true
[dependencies]
nssa.workspace = true
nssa_core.workspace = true
anyhow.workspace = true
thiserror.workspace = true

View File

@ -1,5 +1,4 @@
use borsh::{BorshDeserialize, BorshSerialize};
use nssa::AccountId;
use serde::{Deserialize, Serialize};
use sha2::{Digest as _, Sha256, digest::FixedOutput as _};
@ -123,20 +122,6 @@ impl From<Block> for HashableBlockData {
}
}
/// Helper struct for account (de-)serialization.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AccountInitialData {
pub account_id: AccountId,
pub balance: u128,
}
/// Helper struct to (de-)serialize initial commitments.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CommitmentsInitialData {
pub npk: nssa_core::NullifierPublicKey,
pub account: nssa_core::account::Account,
}
#[cfg(test)]
mod tests {
use crate::{HashType, block::HashableBlockData, test_utils};

View File

@ -13,6 +13,7 @@ bedrock_client.workspace = true
nssa.workspace = true
nssa_core.workspace = true
storage.workspace = true
testnet_initial_state.workspace = true
anyhow.workspace = true
log.workspace = true

View File

@ -7,13 +7,11 @@ use std::{
use anyhow::{Context as _, Result};
pub use bedrock_client::BackoffConfig;
use common::{
block::{AccountInitialData, CommitmentsInitialData},
config::BasicAuth,
};
use common::config::BasicAuth;
use humantime_serde;
pub use logos_blockchain_core::mantle::ops::channel::ChannelId;
use serde::{Deserialize, Serialize};
use testnet_initial_state::{PrivateAccountPublicInitialData, PublicAccountPublicInitialData};
use url::Url;
#[derive(Debug, Clone, Serialize, Deserialize)]
@ -29,16 +27,16 @@ pub struct ClientConfig {
pub struct IndexerConfig {
/// Home dir of sequencer storage.
pub home: PathBuf,
/// List of initial accounts data.
pub initial_accounts: Vec<AccountInitialData>,
/// List of initial commitments.
pub initial_commitments: Vec<CommitmentsInitialData>,
/// Sequencers signing key.
pub signing_key: [u8; 32],
#[serde(with = "humantime_serde")]
pub consensus_info_polling_interval: Duration,
pub bedrock_client_config: ClientConfig,
pub channel_id: ChannelId,
#[serde(skip_serializing_if = "Option::is_none")]
pub initial_public_accounts: Option<Vec<PublicAccountPublicInitialData>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub initial_private_accounts: Option<Vec<PrivateAccountPublicInitialData>>,
}
impl IndexerConfig {

View File

@ -2,14 +2,17 @@ use std::collections::VecDeque;
use anyhow::Result;
use bedrock_client::{BedrockClient, HeaderId};
use common::block::{Block, HashableBlockData};
// ToDo: Remove after testnet
use common::{HashType, PINATA_BASE58};
use common::{
HashType, PINATA_BASE58,
block::{Block, HashableBlockData},
};
use log::{debug, error, info};
use logos_blockchain_core::mantle::{
Op, SignedMantleTx,
ops::channel::{ChannelId, inscribe::InscriptionOp},
};
use nssa::V03State;
use testnet_initial_state::initial_state_testnet;
use crate::{block_store::IndexerStore, config::IndexerConfig};
@ -54,36 +57,50 @@ impl IndexerCore {
let channel_genesis_msg_id = [0; 32];
let genesis_block = hashable_data.into_pending_block(&signing_key, channel_genesis_msg_id);
// This is a troubling moment, because changes in key protocol can
// affect this. And indexer can not reliably ask this data from sequencer
// because indexer must be independent from it.
// ToDo: move initial state generation into common and use the same method
// for indexer and sequencer. This way both services buit at same version
// could be in sync.
let initial_commitments: Vec<nssa_core::Commitment> = config
.initial_commitments
.iter()
.map(|init_comm_data| {
let npk = &init_comm_data.npk;
let initial_commitments: Option<Vec<nssa_core::Commitment>> = config
.initial_private_accounts
.as_ref()
.map(|initial_commitments| {
initial_commitments
.iter()
.map(|init_comm_data| {
let npk = &init_comm_data.npk;
let mut acc = init_comm_data.account.clone();
let mut acc = init_comm_data.account.clone();
acc.program_owner = nssa::program::Program::authenticated_transfer_program().id();
acc.program_owner =
nssa::program::Program::authenticated_transfer_program().id();
nssa_core::Commitment::new(npk, &acc)
})
.collect();
nssa_core::Commitment::new(npk, &acc)
})
.collect()
});
let init_accs: Vec<(nssa::AccountId, u128)> = config
.initial_accounts
.iter()
.map(|acc_data| (acc_data.account_id, acc_data.balance))
.collect();
let init_accs: Option<Vec<(nssa::AccountId, u128)>> = config
.initial_public_accounts
.as_ref()
.map(|initial_accounts| {
initial_accounts
.iter()
.map(|acc_data| (acc_data.account_id, acc_data.balance))
.collect()
});
let mut state = nssa::V03State::new_with_genesis_accounts(&init_accs, &initial_commitments);
// If initial commitments or accounts are present in config, need to construct state from
// them
let state = if initial_commitments.is_some() || init_accs.is_some() {
let mut state = V03State::new_with_genesis_accounts(
&init_accs.unwrap_or_default(),
&initial_commitments.unwrap_or_default(),
);
// ToDo: Remove after testnet
state.add_pinata_program(PINATA_BASE58.parse().unwrap());
// ToDo: Remove after testnet
state.add_pinata_program(PINATA_BASE58.parse().unwrap());
state
} else {
initial_state_testnet()
};
let home = config.home.join("rocksdb");

View File

@ -22,6 +22,7 @@ ata_core.workspace = true
indexer_service_rpc.workspace = true
sequencer_service_rpc = { workspace = true, features = ["client"] }
wallet-ffi.workspace = true
testnet_initial_state.workspace = true
url.workspace = true

View File

@ -2,16 +2,17 @@ use std::{net::SocketAddr, path::PathBuf, time::Duration};
use anyhow::{Context as _, Result};
use bytesize::ByteSize;
use common::block::{AccountInitialData, CommitmentsInitialData};
use indexer_service::{BackoffConfig, ChannelId, ClientConfig, IndexerConfig};
use key_protocol::key_management::KeyChain;
use nssa::{Account, AccountId, PrivateKey, PublicKey};
use nssa_core::{account::Data, program::DEFAULT_PROGRAM_ID};
use sequencer_core::config::{BedrockConfig, SequencerConfig};
use url::Url;
use wallet::config::{
InitialAccountData, InitialAccountDataPrivate, InitialAccountDataPublic, WalletConfig,
use testnet_initial_state::{
PrivateAccountPrivateInitialData, PrivateAccountPublicInitialData,
PublicAccountPrivateInitialData, PublicAccountPublicInitialData,
};
use url::Url;
use wallet::config::{InitialAccountData, WalletConfig};
/// Sequencer config options available for custom changes in integration tests.
#[derive(Debug, Clone, Copy)]
@ -102,13 +103,13 @@ impl InitialData {
}
}
fn sequencer_initial_accounts(&self) -> Vec<AccountInitialData> {
fn sequencer_initial_public_accounts(&self) -> Vec<PublicAccountPublicInitialData> {
self.public_accounts
.iter()
.map(|(priv_key, balance)| {
let pub_key = PublicKey::new_from_private_key(priv_key);
let account_id = AccountId::from(&pub_key);
AccountInitialData {
PublicAccountPublicInitialData {
account_id,
balance: *balance,
}
@ -116,10 +117,10 @@ impl InitialData {
.collect()
}
fn sequencer_initial_commitments(&self) -> Vec<CommitmentsInitialData> {
fn sequencer_initial_private_accounts(&self) -> Vec<PrivateAccountPublicInitialData> {
self.private_accounts
.iter()
.map(|(key_chain, account)| CommitmentsInitialData {
.map(|(key_chain, account)| PrivateAccountPublicInitialData {
npk: key_chain.nullifier_public_key.clone(),
account: account.clone(),
})
@ -132,14 +133,14 @@ impl InitialData {
.map(|(priv_key, _)| {
let pub_key = PublicKey::new_from_private_key(priv_key);
let account_id = AccountId::from(&pub_key);
InitialAccountData::Public(InitialAccountDataPublic {
InitialAccountData::Public(PublicAccountPrivateInitialData {
account_id,
pub_sign_key: priv_key.clone(),
})
})
.chain(self.private_accounts.iter().map(|(key_chain, account)| {
let account_id = AccountId::from(&key_chain.nullifier_public_key);
InitialAccountData::Private(Box::new(InitialAccountDataPrivate {
InitialAccountData::Private(Box::new(PrivateAccountPrivateInitialData {
account_id,
account: account.clone(),
key_chain: key_chain.clone(),
@ -181,8 +182,8 @@ pub fn indexer_config(
max_retries: 10,
},
},
initial_accounts: initial_data.sequencer_initial_accounts(),
initial_commitments: initial_data.sequencer_initial_commitments(),
initial_public_accounts: Some(initial_data.sequencer_initial_public_accounts()),
initial_private_accounts: Some(initial_data.sequencer_initial_private_accounts()),
signing_key: [37; 32],
channel_id: bedrock_channel_id(),
})
@ -211,8 +212,8 @@ pub fn sequencer_config(
mempool_max_size,
block_create_timeout,
retry_pending_blocks_timeout: Duration::from_mins(2),
initial_accounts: initial_data.sequencer_initial_accounts(),
initial_commitments: initial_data.sequencer_initial_commitments(),
initial_public_accounts: Some(initial_data.sequencer_initial_public_accounts()),
initial_private_accounts: Some(initial_data.sequencer_initial_private_accounts()),
signing_key: [37; 32],
bedrock_config: BedrockConfig {
backoff: BackoffConfig {
@ -240,7 +241,7 @@ pub fn wallet_config(
seq_tx_poll_max_blocks: 15,
seq_poll_max_retries: 10,
seq_block_poll_max_amount: 100,
initial_accounts: initial_data.wallet_initial_accounts(),
initial_accounts: Some(initial_data.wallet_initial_accounts()),
basic_auth: None,
})
}

View File

@ -20,17 +20,16 @@ pub struct SeedHolder {
/// Secret spending key object. Can produce `PrivateKeyHolder` objects.
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
pub struct SecretSpendingKey(pub(crate) [u8; 32]);
pub struct SecretSpendingKey(pub [u8; 32]);
pub type ViewingSecretKey = Scalar;
#[derive(Serialize, Deserialize, Debug, Clone)]
/// Private key holder. Produces public keys. Can produce `account_id`. Can produce shared secret
/// for recepient.
#[expect(clippy::partial_pub_fields, reason = "TODO: fix later")]
pub struct PrivateKeyHolder {
pub nullifier_secret_key: NullifierSecretKey,
pub(crate) viewing_secret_key: ViewingSecretKey,
pub viewing_secret_key: ViewingSecretKey,
}
impl SeedHolder {

View File

@ -14,8 +14,8 @@ common.workspace = true
storage.workspace = true
mempool.workspace = true
bedrock_client.workspace = true
testnet_initial_state.workspace = true
base58.workspace = true
anyhow.workspace = true
serde.workspace = true
serde_json.workspace = true

View File

@ -8,13 +8,11 @@ use std::{
use anyhow::Result;
use bedrock_client::BackoffConfig;
use bytesize::ByteSize;
use common::{
block::{AccountInitialData, CommitmentsInitialData},
config::BasicAuth,
};
use common::config::BasicAuth;
use humantime_serde;
use logos_blockchain_core::mantle::ops::channel::ChannelId;
use serde::{Deserialize, Serialize};
use testnet_initial_state::{PrivateAccountPublicInitialData, PublicAccountPublicInitialData};
use url::Url;
// TODO: Provide default values
@ -39,16 +37,16 @@ pub struct SequencerConfig {
/// Interval in which pending blocks are retried.
#[serde(with = "humantime_serde")]
pub retry_pending_blocks_timeout: Duration,
/// List of initial accounts data.
pub initial_accounts: Vec<AccountInitialData>,
/// List of initial commitments.
pub initial_commitments: Vec<CommitmentsInitialData>,
/// Sequencer own signing key.
pub signing_key: [u8; 32],
/// Bedrock configuration options.
pub bedrock_config: BedrockConfig,
/// Indexer RPC URL.
pub indexer_rpc_url: Url,
#[serde(skip_serializing_if = "Option::is_none")]
pub initial_public_accounts: Option<Vec<PublicAccountPublicInitialData>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub initial_private_accounts: Option<Vec<PrivateAccountPublicInitialData>>,
}
#[derive(Clone, Serialize, Deserialize)]

View File

@ -15,7 +15,9 @@ use logos_blockchain_key_management_system_service::keys::{ED25519_SECRET_KEY_SI
use mempool::{MemPool, MemPoolHandle};
#[cfg(feature = "mock")]
pub use mock::SequencerCoreWithMockClients;
use nssa::V03State;
pub use storage::error::DbError;
use testnet_initial_state::initial_state;
use crate::{
block_settlement_client::{BlockSettlementClient, BlockSettlementClientTrait, MsgId},
@ -98,30 +100,48 @@ impl<BC: BlockSettlementClientTrait, IC: IndexerClientTrait> SequencerCore<BC, I
state
} else {
info!(
"No database found when starting the sequencer. Creating a fresh new with the initial data in config"
"No database found when starting the sequencer. Creating a fresh new with the initial data"
);
let initial_commitments: Vec<nssa_core::Commitment> = config
.initial_commitments
.iter()
.map(|init_comm_data| {
let npk = &init_comm_data.npk;
let mut acc = init_comm_data.account.clone();
let initial_commitments: Option<Vec<nssa_core::Commitment>> = config
.initial_private_accounts
.clone()
.map(|initial_commitments| {
initial_commitments
.iter()
.map(|init_comm_data| {
let npk = &init_comm_data.npk;
acc.program_owner =
nssa::program::Program::authenticated_transfer_program().id();
let mut acc = init_comm_data.account.clone();
nssa_core::Commitment::new(npk, &acc)
})
.collect();
acc.program_owner =
nssa::program::Program::authenticated_transfer_program().id();
let init_accs: Vec<(nssa::AccountId, u128)> = config
.initial_accounts
.iter()
.map(|acc_data| (acc_data.account_id, acc_data.balance))
.collect();
nssa_core::Commitment::new(npk, &acc)
})
.collect()
});
nssa::V03State::new_with_genesis_accounts(&init_accs, &initial_commitments)
let init_accs: Option<Vec<(nssa::AccountId, u128)>> = config
.initial_public_accounts
.clone()
.map(|initial_accounts| {
initial_accounts
.iter()
.map(|acc_data| (acc_data.account_id, acc_data.balance))
.collect()
});
// If initial commitments or accounts are present in config, need to construct state
// from them
if initial_commitments.is_some() || init_accs.is_some() {
V03State::new_with_genesis_accounts(
&init_accs.unwrap_or_default(),
&initial_commitments.unwrap_or_default(),
)
} else {
initial_state()
}
};
#[cfg(feature = "testnet")]
@ -368,26 +388,20 @@ fn load_or_create_signing_key(path: &Path) -> Result<Ed25519Key> {
mod tests {
#![expect(clippy::shadow_unrelated, reason = "We don't care about it in tests")]
use std::{pin::pin, str::FromStr as _, time::Duration};
use std::{pin::pin, time::Duration};
use base58::ToBase58 as _;
use bedrock_client::BackoffConfig;
use common::{
block::AccountInitialData, test_utils::sequencer_sign_key_for_testing,
transaction::NSSATransaction,
};
use common::{test_utils::sequencer_sign_key_for_testing, transaction::NSSATransaction};
use logos_blockchain_core::mantle::ops::channel::ChannelId;
use mempool::MemPoolHandle;
use nssa::{AccountId, PrivateKey};
use testnet_initial_state::{initial_accounts, initial_pub_accounts_private_keys};
use crate::{
config::{BedrockConfig, SequencerConfig},
mock::SequencerCoreWithMockClients,
};
fn setup_sequencer_config_variable_initial_accounts(
initial_accounts: Vec<AccountInitialData>,
) -> SequencerConfig {
fn setup_sequencer_config() -> SequencerConfig {
let tempdir = tempfile::tempdir().unwrap();
let home = tempdir.path().to_path_buf();
@ -399,8 +413,6 @@ mod tests {
max_block_size: bytesize::ByteSize::mib(1),
mempool_max_size: 10000,
block_create_timeout: Duration::from_secs(1),
initial_accounts,
initial_commitments: vec![],
signing_key: *sequencer_sign_key_for_testing().value(),
bedrock_config: BedrockConfig {
backoff: BackoffConfig {
@ -413,41 +425,17 @@ mod tests {
},
retry_pending_blocks_timeout: Duration::from_mins(4),
indexer_rpc_url: "ws://localhost:8779".parse().unwrap(),
initial_public_accounts: None,
initial_private_accounts: None,
}
}
fn setup_sequencer_config() -> SequencerConfig {
let acc1_account_id: Vec<u8> = vec![
148, 179, 206, 253, 199, 51, 82, 86, 232, 2, 152, 122, 80, 243, 54, 207, 237, 112, 83,
153, 44, 59, 204, 49, 128, 84, 160, 227, 216, 149, 97, 102,
];
let acc2_account_id: Vec<u8> = vec![
30, 145, 107, 3, 207, 73, 192, 230, 160, 63, 238, 207, 18, 69, 54, 216, 103, 244, 92,
94, 124, 248, 42, 16, 141, 19, 119, 18, 14, 226, 140, 204,
];
let initial_acc1 = AccountInitialData {
account_id: AccountId::from_str(&acc1_account_id.to_base58()).unwrap(),
balance: 10000,
};
let initial_acc2 = AccountInitialData {
account_id: AccountId::from_str(&acc2_account_id.to_base58()).unwrap(),
balance: 20000,
};
let initial_accounts = vec![initial_acc1, initial_acc2];
setup_sequencer_config_variable_initial_accounts(initial_accounts)
}
fn create_signing_key_for_account1() -> nssa::PrivateKey {
nssa::PrivateKey::try_new([1; 32]).unwrap()
initial_pub_accounts_private_keys()[0].pub_sign_key.clone()
}
fn create_signing_key_for_account2() -> nssa::PrivateKey {
nssa::PrivateKey::try_new([2; 32]).unwrap()
initial_pub_accounts_private_keys()[1].pub_sign_key.clone()
}
async fn common_setup() -> (SequencerCoreWithMockClients, MemPoolHandle<NSSATransaction>) {
@ -480,8 +468,8 @@ mod tests {
assert_eq!(sequencer.chain_height, config.genesis_id);
assert_eq!(sequencer.sequencer_config.max_num_tx_in_block, 10);
let acc1_account_id = config.initial_accounts[0].account_id;
let acc2_account_id = config.initial_accounts[1].account_id;
let acc1_account_id = initial_accounts()[0].account_id;
let acc2_account_id = initial_accounts()[1].account_id;
let balance_acc_1 = sequencer.state.get_account_by_id(acc1_account_id).balance;
let balance_acc_2 = sequencer.state.get_account_by_id(acc2_account_id).balance;
@ -490,47 +478,6 @@ mod tests {
assert_eq!(20000, balance_acc_2);
}
#[tokio::test]
async fn start_different_intial_accounts_balances() {
let acc1_account_id: Vec<u8> = vec![
27, 132, 197, 86, 123, 18, 100, 64, 153, 93, 62, 213, 170, 186, 5, 101, 215, 30, 24,
52, 96, 72, 25, 255, 156, 23, 245, 233, 213, 221, 7, 143,
];
let acc2_account_id: Vec<u8> = vec![
77, 75, 108, 209, 54, 16, 50, 202, 155, 210, 174, 185, 217, 0, 170, 77, 69, 217, 234,
216, 10, 201, 66, 51, 116, 196, 81, 167, 37, 77, 7, 102,
];
let initial_acc1 = AccountInitialData {
account_id: AccountId::from_str(&acc1_account_id.to_base58()).unwrap(),
balance: 10000,
};
let initial_acc2 = AccountInitialData {
account_id: AccountId::from_str(&acc2_account_id.to_base58()).unwrap(),
balance: 20000,
};
let initial_accounts = vec![initial_acc1, initial_acc2];
let config = setup_sequencer_config_variable_initial_accounts(initial_accounts);
let (sequencer, _mempool_handle) =
SequencerCoreWithMockClients::start_from_config(config.clone()).await;
let acc1_account_id = config.initial_accounts[0].account_id;
let acc2_account_id = config.initial_accounts[1].account_id;
assert_eq!(
10000,
sequencer.state.get_account_by_id(acc1_account_id).balance
);
assert_eq!(
20000,
sequencer.state.get_account_by_id(acc2_account_id).balance
);
}
#[test]
fn transaction_pre_check_pass() {
let tx = common::test_utils::produce_dummy_empty_transaction();
@ -541,10 +488,10 @@ mod tests {
#[tokio::test]
async fn transaction_pre_check_native_transfer_valid() {
let (sequencer, _mempool_handle) = common_setup().await;
let (_sequencer, _mempool_handle) = common_setup().await;
let acc1 = sequencer.sequencer_config.initial_accounts[0].account_id;
let acc2 = sequencer.sequencer_config.initial_accounts[1].account_id;
let acc1 = initial_accounts()[0].account_id;
let acc2 = initial_accounts()[1].account_id;
let sign_key1 = create_signing_key_for_account1();
@ -560,8 +507,8 @@ mod tests {
async fn transaction_pre_check_native_transfer_other_signature() {
let (mut sequencer, _mempool_handle) = common_setup().await;
let acc1 = sequencer.sequencer_config.initial_accounts[0].account_id;
let acc2 = sequencer.sequencer_config.initial_accounts[1].account_id;
let acc1 = initial_accounts()[0].account_id;
let acc2 = initial_accounts()[1].account_id;
let sign_key2 = create_signing_key_for_account2();
@ -585,8 +532,8 @@ mod tests {
async fn transaction_pre_check_native_transfer_sent_too_much() {
let (mut sequencer, _mempool_handle) = common_setup().await;
let acc1 = sequencer.sequencer_config.initial_accounts[0].account_id;
let acc2 = sequencer.sequencer_config.initial_accounts[1].account_id;
let acc1 = initial_accounts()[0].account_id;
let acc2 = initial_accounts()[1].account_id;
let sign_key1 = create_signing_key_for_account1();
@ -612,8 +559,8 @@ mod tests {
async fn transaction_execute_native_transfer() {
let (mut sequencer, _mempool_handle) = common_setup().await;
let acc1 = sequencer.sequencer_config.initial_accounts[0].account_id;
let acc2 = sequencer.sequencer_config.initial_accounts[1].account_id;
let acc1 = initial_accounts()[0].account_id;
let acc2 = initial_accounts()[1].account_id;
let sign_key1 = create_signing_key_for_account1();
@ -674,8 +621,8 @@ mod tests {
async fn replay_transactions_are_rejected_in_the_same_block() {
let (mut sequencer, mempool_handle) = common_setup().await;
let acc1 = sequencer.sequencer_config.initial_accounts[0].account_id;
let acc2 = sequencer.sequencer_config.initial_accounts[1].account_id;
let acc1 = initial_accounts()[0].account_id;
let acc2 = initial_accounts()[1].account_id;
let sign_key1 = create_signing_key_for_account1();
@ -707,8 +654,8 @@ mod tests {
async fn replay_transactions_are_rejected_in_different_blocks() {
let (mut sequencer, mempool_handle) = common_setup().await;
let acc1 = sequencer.sequencer_config.initial_accounts[0].account_id;
let acc2 = sequencer.sequencer_config.initial_accounts[1].account_id;
let acc1 = initial_accounts()[0].account_id;
let acc2 = initial_accounts()[1].account_id;
let sign_key1 = create_signing_key_for_account1();
@ -744,8 +691,8 @@ mod tests {
#[tokio::test]
async fn restart_from_storage() {
let config = setup_sequencer_config();
let acc1_account_id = config.initial_accounts[0].account_id;
let acc2_account_id = config.initial_accounts[1].account_id;
let acc1_account_id = initial_accounts()[0].account_id;
let acc2_account_id = initial_accounts()[1].account_id;
let balance_to_move = 13;
// In the following code block a transaction will be processed that moves `balance_to_move`
@ -754,7 +701,7 @@ mod tests {
{
let (mut sequencer, mempool_handle) =
SequencerCoreWithMockClients::start_from_config(config.clone()).await;
let signing_key = PrivateKey::try_new([1; 32]).unwrap();
let signing_key = create_signing_key_for_account1();
let tx = common::test_utils::create_transaction_native_token_transfer(
acc1_account_id,
@ -786,11 +733,11 @@ mod tests {
// Balances should be consistent with the stored block
assert_eq!(
balance_acc_1,
config.initial_accounts[0].balance - balance_to_move
initial_accounts()[0].balance - balance_to_move
);
assert_eq!(
balance_acc_2,
config.initial_accounts[1].balance + balance_to_move
initial_accounts()[1].balance + balance_to_move
);
}
@ -837,15 +784,15 @@ mod tests {
#[tokio::test]
async fn produce_block_with_correct_prev_meta_after_restart() {
let config = setup_sequencer_config();
let acc1_account_id = config.initial_accounts[0].account_id;
let acc2_account_id = config.initial_accounts[1].account_id;
let acc1_account_id = initial_accounts()[0].account_id;
let acc2_account_id = initial_accounts()[1].account_id;
// Step 1: Create initial database with some block metadata
let expected_prev_meta = {
let (mut sequencer, mempool_handle) =
SequencerCoreWithMockClients::start_from_config(config.clone()).await;
let signing_key = PrivateKey::try_new([1; 32]).unwrap();
let signing_key = create_signing_key_for_account1();
// Add a transaction and produce a block to set up block metadata
let tx = common::test_utils::create_transaction_native_token_transfer(
@ -870,7 +817,7 @@ mod tests {
SequencerCoreWithMockClients::start_from_config(config.clone()).await;
// Step 3: Submit a new transaction
let signing_key = PrivateKey::try_new([1; 32]).unwrap();
let signing_key = create_signing_key_for_account1();
let tx = common::test_utils::create_transaction_native_token_transfer(
acc1_account_id,
1, // Next nonce

File diff suppressed because it is too large Load Diff

688
storage/src/indexer/mod.rs Normal file
View File

@ -0,0 +1,688 @@
use std::{path::Path, sync::Arc};
use common::block::Block;
use nssa::V03State;
use rocksdb::{
BoundColumnFamily, ColumnFamilyDescriptor, DBWithThreadMode, MultiThreaded, Options,
};
use crate::error::DbError;
pub mod read_multiple;
pub mod read_once;
pub mod write_atomic;
pub mod write_non_atomic;
/// Maximal size of stored blocks in base.
///
/// Used to control db size.
///
/// Currently effectively unbounded.
pub const BUFF_SIZE_ROCKSDB: usize = usize::MAX;
/// Size of stored blocks cache in memory.
///
/// Keeping small to not run out of memory.
pub const CACHE_SIZE: usize = 1000;
/// Key base for storing metainformation about id of first block in db.
pub const DB_META_FIRST_BLOCK_IN_DB_KEY: &str = "first_block_in_db";
/// Key base for storing metainformation about id of last current block in db.
pub const DB_META_LAST_BLOCK_IN_DB_KEY: &str = "last_block_in_db";
/// Key base for storing metainformation about id of last observed L1 lib header in db.
pub const DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY: &str =
"last_observed_l1_lib_header_in_db";
/// Key base for storing metainformation which describe if first block has been set.
pub const DB_META_FIRST_BLOCK_SET_KEY: &str = "first_block_set";
/// Key base for storing metainformation about the last breakpoint.
pub const DB_META_LAST_BREAKPOINT_ID: &str = "last_breakpoint_id";
/// Interval between state breakpoints.
pub const BREAKPOINT_INTERVAL: u8 = 100;
/// Name of block column family.
pub const CF_BLOCK_NAME: &str = "cf_block";
/// Name of meta column family.
pub const CF_META_NAME: &str = "cf_meta";
/// Name of breakpoint column family.
pub const CF_BREAKPOINT_NAME: &str = "cf_breakpoint";
/// Name of hash to id map column family.
pub const CF_HASH_TO_ID: &str = "cf_hash_to_id";
/// Name of tx hash to id map column family.
pub const CF_TX_TO_ID: &str = "cf_tx_to_id";
/// Name of account meta column family.
pub const CF_ACC_META: &str = "cf_acc_meta";
/// Name of account id to tx hash map column family.
pub const CF_ACC_TO_TX: &str = "cf_acc_to_tx";
pub type DbResult<T> = Result<T, DbError>;
pub struct RocksDBIO {
pub db: DBWithThreadMode<MultiThreaded>,
}
impl RocksDBIO {
pub fn open_or_create(
path: &Path,
genesis_block: &Block,
initial_state: &V03State,
) -> DbResult<Self> {
let mut cf_opts = Options::default();
cf_opts.set_max_write_buffer_number(16);
// ToDo: Add more column families for different data
let cfb = ColumnFamilyDescriptor::new(CF_BLOCK_NAME, cf_opts.clone());
let cfmeta = ColumnFamilyDescriptor::new(CF_META_NAME, cf_opts.clone());
let cfbreakpoint = ColumnFamilyDescriptor::new(CF_BREAKPOINT_NAME, cf_opts.clone());
let cfhti = ColumnFamilyDescriptor::new(CF_HASH_TO_ID, cf_opts.clone());
let cftti = ColumnFamilyDescriptor::new(CF_TX_TO_ID, cf_opts.clone());
let cfameta = ColumnFamilyDescriptor::new(CF_ACC_META, cf_opts.clone());
let cfatt = ColumnFamilyDescriptor::new(CF_ACC_TO_TX, cf_opts.clone());
let mut db_opts = Options::default();
db_opts.create_missing_column_families(true);
db_opts.create_if_missing(true);
let db = DBWithThreadMode::<MultiThreaded>::open_cf_descriptors(
&db_opts,
path,
vec![cfb, cfmeta, cfbreakpoint, cfhti, cftti, cfameta, cfatt],
)
.map_err(|err| DbError::RocksDbError {
error: err,
additional_info: Some("Failed to open or create DB".to_owned()),
})?;
let dbio = Self { db };
let is_start_set = dbio.get_meta_is_first_block_set()?;
if !is_start_set {
let block_id = genesis_block.header.block_id;
dbio.put_meta_last_block_in_db(block_id)?;
dbio.put_meta_first_block_in_db_batch(genesis_block)?;
dbio.put_meta_is_first_block_set()?;
// First breakpoint setup
dbio.put_breakpoint(0, initial_state)?;
dbio.put_meta_last_breakpoint_id(0)?;
}
Ok(dbio)
}
pub fn destroy(path: &Path) -> DbResult<()> {
let db_opts = Options::default();
DBWithThreadMode::<MultiThreaded>::destroy(&db_opts, path)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))
}
// Columns
pub fn meta_column(&self) -> Arc<BoundColumnFamily<'_>> {
self.db
.cf_handle(CF_META_NAME)
.expect("Meta column should exist")
}
pub fn block_column(&self) -> Arc<BoundColumnFamily<'_>> {
self.db
.cf_handle(CF_BLOCK_NAME)
.expect("Block column should exist")
}
pub fn breakpoint_column(&self) -> Arc<BoundColumnFamily<'_>> {
self.db
.cf_handle(CF_BREAKPOINT_NAME)
.expect("Breakpoint column should exist")
}
pub fn hash_to_id_column(&self) -> Arc<BoundColumnFamily<'_>> {
self.db
.cf_handle(CF_HASH_TO_ID)
.expect("Hash to id map column should exist")
}
pub fn tx_hash_to_id_column(&self) -> Arc<BoundColumnFamily<'_>> {
self.db
.cf_handle(CF_TX_TO_ID)
.expect("Tx hash to id map column should exist")
}
pub fn account_id_to_tx_hash_column(&self) -> Arc<BoundColumnFamily<'_>> {
self.db
.cf_handle(CF_ACC_TO_TX)
.expect("Account id to tx map column should exist")
}
pub fn account_meta_column(&self) -> Arc<BoundColumnFamily<'_>> {
self.db
.cf_handle(CF_ACC_META)
.expect("Account meta column should exist")
}
// State
pub fn calculate_state_for_id(&self, block_id: u64) -> DbResult<V03State> {
let last_block = self.get_meta_last_block_in_db()?;
if block_id <= last_block {
let br_id = closest_breakpoint_id(block_id);
let mut breakpoint = self.get_breakpoint(br_id)?;
// ToDo: update it to handle any genesis id
// right now works correctly only if genesis_id < BREAKPOINT_INTERVAL
let start = if br_id != 0 {
u64::from(BREAKPOINT_INTERVAL)
.checked_mul(br_id)
.expect("Reached maximum breakpoint id")
} else {
self.get_meta_first_block_in_db()?
};
for block in self.get_block_batch_seq(
start.checked_add(1).expect("Will be lesser that u64::MAX")..=block_id,
)? {
for transaction in block.body.transactions {
transaction
.transaction_stateless_check()
.map_err(|err| {
DbError::db_interaction_error(format!(
"transaction pre check failed with err {err:?}"
))
})?
.execute_check_on_state(&mut breakpoint, block.header.block_id)
.map_err(|err| {
DbError::db_interaction_error(format!(
"transaction execution failed with err {err:?}"
))
})?;
}
}
Ok(breakpoint)
} else {
Err(DbError::db_interaction_error(
"Block on this id not found".to_owned(),
))
}
}
pub fn final_state(&self) -> DbResult<V03State> {
self.calculate_state_for_id(self.get_meta_last_block_in_db()?)
}
}
fn closest_breakpoint_id(block_id: u64) -> u64 {
block_id
.saturating_sub(1)
.checked_div(u64::from(BREAKPOINT_INTERVAL))
.expect("Breakpoint interval is not zero")
}
#[expect(clippy::shadow_unrelated, reason = "Fine for tests")]
#[cfg(test)]
mod tests {
use nssa::{AccountId, PublicKey};
use tempfile::tempdir;
use super::*;
fn genesis_block() -> Block {
common::test_utils::produce_dummy_block(1, None, vec![])
}
fn acc1_sign_key() -> nssa::PrivateKey {
nssa::PrivateKey::try_new([1; 32]).unwrap()
}
fn acc2_sign_key() -> nssa::PrivateKey {
nssa::PrivateKey::try_new([2; 32]).unwrap()
}
fn acc1() -> AccountId {
AccountId::from(&PublicKey::new_from_private_key(&acc1_sign_key()))
}
fn acc2() -> AccountId {
AccountId::from(&PublicKey::new_from_private_key(&acc2_sign_key()))
}
#[test]
fn start_db() {
let temp_dir = tempdir().unwrap();
let temdir_path = temp_dir.path();
let dbio = RocksDBIO::open_or_create(
temdir_path,
&genesis_block(),
&nssa::V03State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[]),
)
.unwrap();
let last_id = dbio.get_meta_last_block_in_db().unwrap();
let first_id = dbio.get_meta_first_block_in_db().unwrap();
let is_first_set = dbio.get_meta_is_first_block_set().unwrap();
let last_observed_l1_header = dbio.get_meta_last_observed_l1_lib_header_in_db().unwrap();
let last_br_id = dbio.get_meta_last_breakpoint_id().unwrap();
let last_block = dbio.get_block(1).unwrap().unwrap();
let breakpoint = dbio.get_breakpoint(0).unwrap();
let final_state = dbio.final_state().unwrap();
assert_eq!(last_id, 1);
assert_eq!(first_id, 1);
assert_eq!(last_observed_l1_header, None);
assert!(is_first_set);
assert_eq!(last_br_id, 0);
assert_eq!(last_block.header.hash, genesis_block().header.hash);
assert_eq!(
breakpoint.get_account_by_id(acc1()),
final_state.get_account_by_id(acc1())
);
assert_eq!(
breakpoint.get_account_by_id(acc2()),
final_state.get_account_by_id(acc2())
);
}
#[test]
fn one_block_insertion() {
let temp_dir = tempdir().unwrap();
let temdir_path = temp_dir.path();
let dbio = RocksDBIO::open_or_create(
temdir_path,
&genesis_block(),
&nssa::V03State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[]),
)
.unwrap();
let prev_hash = genesis_block().header.hash;
let from = acc1();
let to = acc2();
let sign_key = acc1_sign_key();
let transfer_tx =
common::test_utils::create_transaction_native_token_transfer(from, 0, to, 1, &sign_key);
let block = common::test_utils::produce_dummy_block(2, Some(prev_hash), vec![transfer_tx]);
dbio.put_block(&block, [1; 32]).unwrap();
let last_id = dbio.get_meta_last_block_in_db().unwrap();
let first_id = dbio.get_meta_first_block_in_db().unwrap();
let last_observed_l1_header = dbio
.get_meta_last_observed_l1_lib_header_in_db()
.unwrap()
.unwrap();
let is_first_set = dbio.get_meta_is_first_block_set().unwrap();
let last_br_id = dbio.get_meta_last_breakpoint_id().unwrap();
let last_block = dbio.get_block(last_id).unwrap().unwrap();
let breakpoint = dbio.get_breakpoint(0).unwrap();
let final_state = dbio.final_state().unwrap();
assert_eq!(last_id, 2);
assert_eq!(first_id, 1);
assert_eq!(last_observed_l1_header, [1; 32]);
assert!(is_first_set);
assert_eq!(last_br_id, 0);
assert_ne!(last_block.header.hash, genesis_block().header.hash);
assert_eq!(
breakpoint.get_account_by_id(acc1()).balance
- final_state.get_account_by_id(acc1()).balance,
1
);
assert_eq!(
final_state.get_account_by_id(acc2()).balance
- breakpoint.get_account_by_id(acc2()).balance,
1
);
}
#[test]
fn new_breakpoint() {
let temp_dir = tempdir().unwrap();
let temdir_path = temp_dir.path();
let dbio = RocksDBIO::open_or_create(
temdir_path,
&genesis_block(),
&nssa::V03State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[]),
)
.unwrap();
let from = acc1();
let to = acc2();
let sign_key = acc1_sign_key();
for i in 1..=BREAKPOINT_INTERVAL {
let last_id = dbio.get_meta_last_block_in_db().unwrap();
let last_block = dbio.get_block(last_id).unwrap().unwrap();
let prev_hash = last_block.header.hash;
let transfer_tx = common::test_utils::create_transaction_native_token_transfer(
from,
(i - 1).into(),
to,
1,
&sign_key,
);
let block = common::test_utils::produce_dummy_block(
(i + 1).into(),
Some(prev_hash),
vec![transfer_tx],
);
dbio.put_block(&block, [i; 32]).unwrap();
}
let last_id = dbio.get_meta_last_block_in_db().unwrap();
let first_id = dbio.get_meta_first_block_in_db().unwrap();
let is_first_set = dbio.get_meta_is_first_block_set().unwrap();
let last_br_id = dbio.get_meta_last_breakpoint_id().unwrap();
let last_block = dbio.get_block(last_id).unwrap().unwrap();
let prev_breakpoint = dbio.get_breakpoint(0).unwrap();
let breakpoint = dbio.get_breakpoint(1).unwrap();
let final_state = dbio.final_state().unwrap();
assert_eq!(last_id, 101);
assert_eq!(first_id, 1);
assert!(is_first_set);
assert_eq!(last_br_id, 1);
assert_ne!(last_block.header.hash, genesis_block().header.hash);
assert_eq!(
prev_breakpoint.get_account_by_id(acc1()).balance
- final_state.get_account_by_id(acc1()).balance,
100
);
assert_eq!(
final_state.get_account_by_id(acc2()).balance
- prev_breakpoint.get_account_by_id(acc2()).balance,
100
);
assert_eq!(
breakpoint.get_account_by_id(acc1()).balance
- final_state.get_account_by_id(acc1()).balance,
1
);
assert_eq!(
final_state.get_account_by_id(acc2()).balance
- breakpoint.get_account_by_id(acc2()).balance,
1
);
}
#[test]
fn simple_maps() {
let temp_dir = tempdir().unwrap();
let temdir_path = temp_dir.path();
let dbio = RocksDBIO::open_or_create(
temdir_path,
&genesis_block(),
&nssa::V03State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[]),
)
.unwrap();
let from = acc1();
let to = acc2();
let sign_key = acc1_sign_key();
let last_id = dbio.get_meta_last_block_in_db().unwrap();
let last_block = dbio.get_block(last_id).unwrap().unwrap();
let prev_hash = last_block.header.hash;
let transfer_tx =
common::test_utils::create_transaction_native_token_transfer(from, 0, to, 1, &sign_key);
let block = common::test_utils::produce_dummy_block(2, Some(prev_hash), vec![transfer_tx]);
let control_hash1 = block.header.hash;
dbio.put_block(&block, [1; 32]).unwrap();
let last_id = dbio.get_meta_last_block_in_db().unwrap();
let last_block = dbio.get_block(last_id).unwrap().unwrap();
let prev_hash = last_block.header.hash;
let transfer_tx =
common::test_utils::create_transaction_native_token_transfer(from, 1, to, 1, &sign_key);
let block = common::test_utils::produce_dummy_block(3, Some(prev_hash), vec![transfer_tx]);
let control_hash2 = block.header.hash;
dbio.put_block(&block, [2; 32]).unwrap();
let last_id = dbio.get_meta_last_block_in_db().unwrap();
let last_block = dbio.get_block(last_id).unwrap().unwrap();
let prev_hash = last_block.header.hash;
let transfer_tx =
common::test_utils::create_transaction_native_token_transfer(from, 2, to, 1, &sign_key);
let control_tx_hash1 = transfer_tx.hash();
let block = common::test_utils::produce_dummy_block(4, Some(prev_hash), vec![transfer_tx]);
dbio.put_block(&block, [3; 32]).unwrap();
let last_id = dbio.get_meta_last_block_in_db().unwrap();
let last_block = dbio.get_block(last_id).unwrap().unwrap();
let prev_hash = last_block.header.hash;
let transfer_tx =
common::test_utils::create_transaction_native_token_transfer(from, 3, to, 1, &sign_key);
let control_tx_hash2 = transfer_tx.hash();
let block = common::test_utils::produce_dummy_block(5, Some(prev_hash), vec![transfer_tx]);
dbio.put_block(&block, [4; 32]).unwrap();
let control_block_id1 = dbio.get_block_id_by_hash(control_hash1.0).unwrap().unwrap();
let control_block_id2 = dbio.get_block_id_by_hash(control_hash2.0).unwrap().unwrap();
let control_block_id3 = dbio
.get_block_id_by_tx_hash(control_tx_hash1.0)
.unwrap()
.unwrap();
let control_block_id4 = dbio
.get_block_id_by_tx_hash(control_tx_hash2.0)
.unwrap()
.unwrap();
assert_eq!(control_block_id1, 2);
assert_eq!(control_block_id2, 3);
assert_eq!(control_block_id3, 4);
assert_eq!(control_block_id4, 5);
}
#[test]
fn block_batch() {
let temp_dir = tempdir().unwrap();
let temdir_path = temp_dir.path();
let mut block_res = vec![];
let dbio = RocksDBIO::open_or_create(
temdir_path,
&genesis_block(),
&nssa::V03State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[]),
)
.unwrap();
let from = acc1();
let to = acc2();
let sign_key = acc1_sign_key();
let last_id = dbio.get_meta_last_block_in_db().unwrap();
let last_block = dbio.get_block(last_id).unwrap().unwrap();
let prev_hash = last_block.header.hash;
let transfer_tx =
common::test_utils::create_transaction_native_token_transfer(from, 0, to, 1, &sign_key);
let block = common::test_utils::produce_dummy_block(2, Some(prev_hash), vec![transfer_tx]);
block_res.push(block.clone());
dbio.put_block(&block, [1; 32]).unwrap();
let last_id = dbio.get_meta_last_block_in_db().unwrap();
let last_block = dbio.get_block(last_id).unwrap().unwrap();
let prev_hash = last_block.header.hash;
let transfer_tx =
common::test_utils::create_transaction_native_token_transfer(from, 1, to, 1, &sign_key);
let block = common::test_utils::produce_dummy_block(3, Some(prev_hash), vec![transfer_tx]);
block_res.push(block.clone());
dbio.put_block(&block, [2; 32]).unwrap();
let last_id = dbio.get_meta_last_block_in_db().unwrap();
let last_block = dbio.get_block(last_id).unwrap().unwrap();
let prev_hash = last_block.header.hash;
let transfer_tx =
common::test_utils::create_transaction_native_token_transfer(from, 2, to, 1, &sign_key);
let block = common::test_utils::produce_dummy_block(4, Some(prev_hash), vec![transfer_tx]);
block_res.push(block.clone());
dbio.put_block(&block, [3; 32]).unwrap();
let last_id = dbio.get_meta_last_block_in_db().unwrap();
let last_block = dbio.get_block(last_id).unwrap().unwrap();
let prev_hash = last_block.header.hash;
let transfer_tx =
common::test_utils::create_transaction_native_token_transfer(from, 3, to, 1, &sign_key);
let block = common::test_utils::produce_dummy_block(5, Some(prev_hash), vec![transfer_tx]);
block_res.push(block.clone());
dbio.put_block(&block, [4; 32]).unwrap();
let block_hashes_mem: Vec<[u8; 32]> =
block_res.into_iter().map(|bl| bl.header.hash.0).collect();
// Get blocks before ID 6 (i.e., starting from 5 going backwards), limit 4
// This should return blocks 5, 4, 3, 2 in descending order
let mut batch_res = dbio.get_block_batch(Some(6), 4).unwrap();
batch_res.reverse(); // Reverse to match ascending order for comparison
let block_hashes_db: Vec<[u8; 32]> =
batch_res.into_iter().map(|bl| bl.header.hash.0).collect();
assert_eq!(block_hashes_mem, block_hashes_db);
let block_hashes_mem_limited = &block_hashes_mem[1..];
// Get blocks before ID 6, limit 3
// This should return blocks 5, 4, 3 in descending order
let mut batch_res_limited = dbio.get_block_batch(Some(6), 3).unwrap();
batch_res_limited.reverse(); // Reverse to match ascending order for comparison
let block_hashes_db_limited: Vec<[u8; 32]> = batch_res_limited
.into_iter()
.map(|bl| bl.header.hash.0)
.collect();
assert_eq!(block_hashes_mem_limited, block_hashes_db_limited.as_slice());
let block_batch_seq = dbio.get_block_batch_seq(1..=5).unwrap();
let block_batch_ids = block_batch_seq
.into_iter()
.map(|block| block.header.block_id)
.collect::<Vec<_>>();
assert_eq!(block_batch_ids, vec![1, 2, 3, 4, 5]);
}
#[test]
fn account_map() {
let temp_dir = tempdir().unwrap();
let temdir_path = temp_dir.path();
let dbio = RocksDBIO::open_or_create(
temdir_path,
&genesis_block(),
&nssa::V03State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[]),
)
.unwrap();
let from = acc1();
let to = acc2();
let sign_key = acc1_sign_key();
let mut tx_hash_res = vec![];
let last_id = dbio.get_meta_last_block_in_db().unwrap();
let last_block = dbio.get_block(last_id).unwrap().unwrap();
let prev_hash = last_block.header.hash;
let transfer_tx1 =
common::test_utils::create_transaction_native_token_transfer(from, 0, to, 1, &sign_key);
let transfer_tx2 =
common::test_utils::create_transaction_native_token_transfer(from, 1, to, 1, &sign_key);
tx_hash_res.push(transfer_tx1.hash().0);
tx_hash_res.push(transfer_tx2.hash().0);
let block = common::test_utils::produce_dummy_block(
2,
Some(prev_hash),
vec![transfer_tx1, transfer_tx2],
);
dbio.put_block(&block, [1; 32]).unwrap();
let last_id = dbio.get_meta_last_block_in_db().unwrap();
let last_block = dbio.get_block(last_id).unwrap().unwrap();
let prev_hash = last_block.header.hash;
let transfer_tx1 =
common::test_utils::create_transaction_native_token_transfer(from, 2, to, 1, &sign_key);
let transfer_tx2 =
common::test_utils::create_transaction_native_token_transfer(from, 3, to, 1, &sign_key);
tx_hash_res.push(transfer_tx1.hash().0);
tx_hash_res.push(transfer_tx2.hash().0);
let block = common::test_utils::produce_dummy_block(
3,
Some(prev_hash),
vec![transfer_tx1, transfer_tx2],
);
dbio.put_block(&block, [2; 32]).unwrap();
let last_id = dbio.get_meta_last_block_in_db().unwrap();
let last_block = dbio.get_block(last_id).unwrap().unwrap();
let prev_hash = last_block.header.hash;
let transfer_tx1 =
common::test_utils::create_transaction_native_token_transfer(from, 4, to, 1, &sign_key);
let transfer_tx2 =
common::test_utils::create_transaction_native_token_transfer(from, 5, to, 1, &sign_key);
tx_hash_res.push(transfer_tx1.hash().0);
tx_hash_res.push(transfer_tx2.hash().0);
let block = common::test_utils::produce_dummy_block(
4,
Some(prev_hash),
vec![transfer_tx1, transfer_tx2],
);
dbio.put_block(&block, [3; 32]).unwrap();
let last_id = dbio.get_meta_last_block_in_db().unwrap();
let last_block = dbio.get_block(last_id).unwrap().unwrap();
let prev_hash = last_block.header.hash;
let transfer_tx =
common::test_utils::create_transaction_native_token_transfer(from, 6, to, 1, &sign_key);
tx_hash_res.push(transfer_tx.hash().0);
let block = common::test_utils::produce_dummy_block(5, Some(prev_hash), vec![transfer_tx]);
dbio.put_block(&block, [4; 32]).unwrap();
let acc1_tx = dbio.get_acc_transactions(*acc1().value(), 0, 7).unwrap();
let acc1_tx_hashes: Vec<[u8; 32]> = acc1_tx.into_iter().map(|tx| tx.hash().0).collect();
assert_eq!(acc1_tx_hashes, tx_hash_res);
let acc1_tx_limited = dbio.get_acc_transactions(*acc1().value(), 1, 4).unwrap();
let acc1_tx_limited_hashes: Vec<[u8; 32]> =
acc1_tx_limited.into_iter().map(|tx| tx.hash().0).collect();
assert_eq!(acc1_tx_limited_hashes.as_slice(), &tx_hash_res[1..5]);
}
}

View File

@ -0,0 +1,209 @@
use common::transaction::NSSATransaction;
use super::{Block, DbError, DbResult, RocksDBIO};
#[expect(clippy::multiple_inherent_impl, reason = "Readability")]
impl RocksDBIO {
pub fn get_block_batch(&self, before: Option<u64>, limit: u64) -> DbResult<Vec<Block>> {
let mut seq = vec![];
// Determine the starting block ID
let start_block_id = if let Some(before_id) = before {
before_id.saturating_sub(1)
} else {
// Get the latest block ID
self.get_meta_last_block_in_db()?
};
for i in 0..limit {
let block_id = start_block_id.saturating_sub(i);
if block_id == 0 {
break;
}
seq.push(block_id);
}
self.get_block_batch_seq(seq.into_iter())
}
/// Get block batch from a sequence.
///
/// Currently assumes non-decreasing sequence.
///
/// `ToDo`: Add suport of arbitrary sequences.
pub fn get_block_batch_seq(&self, seq: impl Iterator<Item = u64>) -> DbResult<Vec<Block>> {
let cf_block = self.block_column();
// Keys setup
let mut keys = vec![];
for block_id in seq {
keys.push((
&cf_block,
borsh::to_vec(&block_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize block id".to_owned()),
)
})?,
));
}
let multi_get_res = self.db.multi_get_cf(keys);
// Keys parsing
let mut block_batch = vec![];
for res in multi_get_res {
let res = res.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
let block = if let Some(data) = res {
Ok(borsh::from_slice::<Block>(&data).map_err(|serr| {
DbError::borsh_cast_message(
serr,
Some("Failed to deserialize block data".to_owned()),
)
})?)
} else {
// Block not found, assuming that previous one was the last
break;
}?;
block_batch.push(block);
}
Ok(block_batch)
}
/// Get block ids by txs.
///
/// `ToDo`: There may be multiple transactions in one block
/// so this method can take redundant reads.
/// Need to update signature and implementation.
fn get_block_ids_by_tx_vec(&self, tx_vec: &[[u8; 32]]) -> DbResult<Vec<u64>> {
let cf_tti = self.tx_hash_to_id_column();
// Keys setup
let mut keys = vec![];
for tx_hash in tx_vec {
keys.push((
&cf_tti,
borsh::to_vec(tx_hash).map_err(|err| {
DbError::borsh_cast_message(err, Some("Failed to serialize tx_hash".to_owned()))
})?,
));
}
let multi_get_res = self.db.multi_get_cf(keys);
// Keys parsing
let mut block_id_batch = vec![];
for res in multi_get_res {
let res = res
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?
.ok_or_else(|| {
DbError::db_interaction_error(
"Tx to block id mapping do not contain transaction from vec".to_owned(),
)
})?;
let block_id = {
Ok(borsh::from_slice::<u64>(&res).map_err(|serr| {
DbError::borsh_cast_message(
serr,
Some("Failed to deserialize block id".to_owned()),
)
})?)
}?;
block_id_batch.push(block_id);
}
Ok(block_id_batch)
}
// Account
pub(crate) fn get_acc_transaction_hashes(
&self,
acc_id: [u8; 32],
offset: u64,
limit: u64,
) -> DbResult<Vec<[u8; 32]>> {
let cf_att = self.account_id_to_tx_hash_column();
let mut tx_batch = vec![];
// Keys preparation
let mut keys = vec![];
for tx_id in offset
..offset
.checked_add(limit)
.expect("Transaction limit should be lesser than u64::MAX")
{
let mut prefix = borsh::to_vec(&acc_id).map_err(|berr| {
DbError::borsh_cast_message(berr, Some("Failed to serialize account id".to_owned()))
})?;
let suffix = borsh::to_vec(&tx_id).map_err(|berr| {
DbError::borsh_cast_message(berr, Some("Failed to serialize tx id".to_owned()))
})?;
prefix.extend_from_slice(&suffix);
keys.push((&cf_att, prefix));
}
let multi_get_res = self.db.multi_get_cf(keys);
for res in multi_get_res {
let res = res.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
let tx_hash = if let Some(data) = res {
Ok(borsh::from_slice::<[u8; 32]>(&data).map_err(|serr| {
DbError::borsh_cast_message(
serr,
Some("Failed to deserialize tx_hash".to_owned()),
)
})?)
} else {
// Tx hash not found, assuming that previous one was the last
break;
}?;
tx_batch.push(tx_hash);
}
Ok(tx_batch)
}
pub fn get_acc_transactions(
&self,
acc_id: [u8; 32],
offset: u64,
limit: u64,
) -> DbResult<Vec<NSSATransaction>> {
let mut tx_batch = vec![];
let tx_hashes = self.get_acc_transaction_hashes(acc_id, offset, limit)?;
let associated_blocks_multi_get = self
.get_block_batch_seq(self.get_block_ids_by_tx_vec(&tx_hashes)?.into_iter())?
.into_iter()
.zip(tx_hashes);
for (block, tx_hash) in associated_blocks_multi_get {
let transaction = block
.body
.transactions
.iter()
.find(|tx| tx.hash().0 == tx_hash)
.ok_or_else(|| {
DbError::db_interaction_error(format!(
"Missing transaction in block {} with hash {:#?}",
block.header.block_id, tx_hash
))
})?;
tx_batch.push(transaction.clone());
}
Ok(tx_batch)
}
}

View File

@ -0,0 +1,272 @@
use super::{
Block, DB_META_FIRST_BLOCK_IN_DB_KEY, DB_META_FIRST_BLOCK_SET_KEY,
DB_META_LAST_BLOCK_IN_DB_KEY, DB_META_LAST_BREAKPOINT_ID,
DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY, DbError, DbResult, RocksDBIO, V03State,
};
#[expect(clippy::multiple_inherent_impl, reason = "Readability")]
impl RocksDBIO {
// Meta
pub fn get_meta_first_block_in_db(&self) -> DbResult<u64> {
let cf_meta = self.meta_column();
let res = self
.db
.get_cf(
&cf_meta,
borsh::to_vec(&DB_META_FIRST_BLOCK_IN_DB_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_FIRST_BLOCK_IN_DB_KEY".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(borsh::from_slice::<u64>(&data).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to deserialize first block".to_owned()),
)
})?)
} else {
Err(DbError::db_interaction_error(
"First block not found".to_owned(),
))
}
}
pub fn get_meta_last_block_in_db(&self) -> DbResult<u64> {
let cf_meta = self.meta_column();
let res = self
.db
.get_cf(
&cf_meta,
borsh::to_vec(&DB_META_LAST_BLOCK_IN_DB_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_LAST_BLOCK_IN_DB_KEY".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(borsh::from_slice::<u64>(&data).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to deserialize last block".to_owned()),
)
})?)
} else {
Err(DbError::db_interaction_error(
"Last block not found".to_owned(),
))
}
}
pub fn get_meta_last_observed_l1_lib_header_in_db(&self) -> DbResult<Option<[u8; 32]>> {
let cf_meta = self.meta_column();
let res = self
.db
.get_cf(
&cf_meta,
borsh::to_vec(&DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY).map_err(
|err| {
DbError::borsh_cast_message(
err,
Some(
"Failed to serialize DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY"
.to_owned(),
),
)
},
)?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
res.map(|data| {
borsh::from_slice::<[u8; 32]>(&data).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to deserialize last l1 lib header".to_owned()),
)
})
})
.transpose()
}
pub fn get_meta_is_first_block_set(&self) -> DbResult<bool> {
let cf_meta = self.meta_column();
let res = self
.db
.get_cf(
&cf_meta,
borsh::to_vec(&DB_META_FIRST_BLOCK_SET_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_FIRST_BLOCK_SET_KEY".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
Ok(res.is_some())
}
pub fn get_meta_last_breakpoint_id(&self) -> DbResult<u64> {
let cf_meta = self.meta_column();
let res = self
.db
.get_cf(
&cf_meta,
borsh::to_vec(&DB_META_LAST_BREAKPOINT_ID).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_LAST_BREAKPOINT_ID".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(borsh::from_slice::<u64>(&data).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to deserialize last breakpoint id".to_owned()),
)
})?)
} else {
Err(DbError::db_interaction_error(
"Last breakpoint id not found".to_owned(),
))
}
}
// Block
pub fn get_block(&self, block_id: u64) -> DbResult<Option<Block>> {
let cf_block = self.block_column();
let res = self
.db
.get_cf(
&cf_block,
borsh::to_vec(&block_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize block id".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(Some(borsh::from_slice::<Block>(&data).map_err(|serr| {
DbError::borsh_cast_message(
serr,
Some("Failed to deserialize block data".to_owned()),
)
})?))
} else {
Ok(None)
}
}
// State
pub fn get_breakpoint(&self, br_id: u64) -> DbResult<V03State> {
let cf_br = self.breakpoint_column();
let res = self
.db
.get_cf(
&cf_br,
borsh::to_vec(&br_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize breakpoint id".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(borsh::from_slice::<V03State>(&data).map_err(|serr| {
DbError::borsh_cast_message(
serr,
Some("Failed to deserialize breakpoint data".to_owned()),
)
})?)
} else {
Err(DbError::db_interaction_error(
"Breakpoint on this id not found".to_owned(),
))
}
}
// Mappings
pub fn get_block_id_by_hash(&self, hash: [u8; 32]) -> DbResult<Option<u64>> {
let cf_hti = self.hash_to_id_column();
let res = self
.db
.get_cf(
&cf_hti,
borsh::to_vec(&hash).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize block hash".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(Some(borsh::from_slice::<u64>(&data).map_err(|serr| {
DbError::borsh_cast_message(serr, Some("Failed to deserialize block id".to_owned()))
})?))
} else {
Ok(None)
}
}
pub fn get_block_id_by_tx_hash(&self, tx_hash: [u8; 32]) -> DbResult<Option<u64>> {
let cf_tti = self.tx_hash_to_id_column();
let res = self
.db
.get_cf(
&cf_tti,
borsh::to_vec(&tx_hash).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize transaction hash".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(Some(borsh::from_slice::<u64>(&data).map_err(|serr| {
DbError::borsh_cast_message(serr, Some("Failed to deserialize block id".to_owned()))
})?))
} else {
Ok(None)
}
}
// Accounts meta
pub(crate) fn get_acc_meta_num_tx(&self, acc_id: [u8; 32]) -> DbResult<Option<u64>> {
let cf_ameta = self.account_meta_column();
let res = self.db.get_cf(&cf_ameta, acc_id).map_err(|rerr| {
DbError::rocksdb_cast_message(rerr, Some("Failed to read from acc meta cf".to_owned()))
})?;
res.map(|data| {
borsh::from_slice::<u64>(&data).map_err(|serr| {
DbError::borsh_cast_message(serr, Some("Failed to deserialize num tx".to_owned()))
})
})
.transpose()
}
}

View File

@ -0,0 +1,339 @@
use std::collections::HashMap;
use rocksdb::WriteBatch;
use super::{
Arc, BREAKPOINT_INTERVAL, Block, BoundColumnFamily, DB_META_FIRST_BLOCK_IN_DB_KEY,
DB_META_FIRST_BLOCK_SET_KEY, DB_META_LAST_BLOCK_IN_DB_KEY, DB_META_LAST_BREAKPOINT_ID,
DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY, DbError, DbResult, RocksDBIO,
};
#[expect(clippy::multiple_inherent_impl, reason = "Readability")]
impl RocksDBIO {
// Accounts meta
pub(crate) fn update_acc_meta_batch(
&self,
acc_id: [u8; 32],
num_tx: u64,
write_batch: &mut WriteBatch,
) -> DbResult<()> {
let cf_ameta = self.account_meta_column();
write_batch.put_cf(
&cf_ameta,
borsh::to_vec(&acc_id).map_err(|err| {
DbError::borsh_cast_message(err, Some("Failed to serialize account id".to_owned()))
})?,
borsh::to_vec(&num_tx).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize acc metadata".to_owned()),
)
})?,
);
Ok(())
}
// Account
pub fn put_account_transactions(
&self,
acc_id: [u8; 32],
tx_hashes: &[[u8; 32]],
) -> DbResult<()> {
let acc_num_tx = self.get_acc_meta_num_tx(acc_id)?.unwrap_or(0);
let cf_att = self.account_id_to_tx_hash_column();
let mut write_batch = WriteBatch::new();
for (tx_id, tx_hash) in tx_hashes.iter().enumerate() {
let put_id = acc_num_tx
.checked_add(tx_id.try_into().expect("Must fit into u64"))
.expect("Tx count should be lesser that u64::MAX");
let mut prefix = borsh::to_vec(&acc_id).map_err(|berr| {
DbError::borsh_cast_message(berr, Some("Failed to serialize account id".to_owned()))
})?;
let suffix = borsh::to_vec(&put_id).map_err(|berr| {
DbError::borsh_cast_message(berr, Some("Failed to serialize tx id".to_owned()))
})?;
prefix.extend_from_slice(&suffix);
write_batch.put_cf(
&cf_att,
prefix,
borsh::to_vec(tx_hash).map_err(|berr| {
DbError::borsh_cast_message(
berr,
Some("Failed to serialize tx hash".to_owned()),
)
})?,
);
}
self.update_acc_meta_batch(
acc_id,
acc_num_tx
.checked_add(tx_hashes.len().try_into().expect("Must fit into u64"))
.expect("Tx count should be lesser that u64::MAX"),
&mut write_batch,
)?;
self.db.write(write_batch).map_err(|rerr| {
DbError::rocksdb_cast_message(rerr, Some("Failed to write batch".to_owned()))
})
}
pub fn put_account_transactions_dependant(
&self,
acc_id: [u8; 32],
tx_hashes: &[[u8; 32]],
write_batch: &mut WriteBatch,
) -> DbResult<()> {
let acc_num_tx = self.get_acc_meta_num_tx(acc_id)?.unwrap_or(0);
let cf_att = self.account_id_to_tx_hash_column();
for (tx_id, tx_hash) in tx_hashes.iter().enumerate() {
let put_id = acc_num_tx
.checked_add(tx_id.try_into().expect("Must fit into u64"))
.expect("Tx count should be lesser that u64::MAX");
let mut prefix = borsh::to_vec(&acc_id).map_err(|berr| {
DbError::borsh_cast_message(berr, Some("Failed to serialize account id".to_owned()))
})?;
let suffix = borsh::to_vec(&put_id).map_err(|berr| {
DbError::borsh_cast_message(berr, Some("Failed to serialize tx id".to_owned()))
})?;
prefix.extend_from_slice(&suffix);
write_batch.put_cf(
&cf_att,
prefix,
borsh::to_vec(tx_hash).map_err(|berr| {
DbError::borsh_cast_message(
berr,
Some("Failed to serialize tx hash".to_owned()),
)
})?,
);
}
self.update_acc_meta_batch(
acc_id,
acc_num_tx
.checked_add(tx_hashes.len().try_into().expect("Must fit into u64"))
.expect("Tx count should be lesser that u64::MAX"),
write_batch,
)?;
Ok(())
}
// Meta
pub fn put_meta_first_block_in_db_batch(&self, block: &Block) -> DbResult<()> {
let cf_meta = self.meta_column();
self.db
.put_cf(
&cf_meta,
borsh::to_vec(&DB_META_FIRST_BLOCK_IN_DB_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_FIRST_BLOCK_IN_DB_KEY".to_owned()),
)
})?,
borsh::to_vec(&block.header.block_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize first block id".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
self.put_block(block, [0; 32])?;
Ok(())
}
pub fn put_meta_last_block_in_db_batch(
&self,
block_id: u64,
write_batch: &mut WriteBatch,
) -> DbResult<()> {
let cf_meta = self.meta_column();
write_batch.put_cf(
&cf_meta,
borsh::to_vec(&DB_META_LAST_BLOCK_IN_DB_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_LAST_BLOCK_IN_DB_KEY".to_owned()),
)
})?,
borsh::to_vec(&block_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize last block id".to_owned()),
)
})?,
);
Ok(())
}
pub fn put_meta_last_observed_l1_lib_header_in_db_batch(
&self,
l1_lib_header: [u8; 32],
write_batch: &mut WriteBatch,
) -> DbResult<()> {
let cf_meta = self.meta_column();
write_batch.put_cf(
&cf_meta,
borsh::to_vec(&DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some(
"Failed to serialize DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY"
.to_owned(),
),
)
})?,
borsh::to_vec(&l1_lib_header).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize last l1 block header".to_owned()),
)
})?,
);
Ok(())
}
pub fn put_meta_last_breakpoint_id_batch(
&self,
br_id: u64,
write_batch: &mut WriteBatch,
) -> DbResult<()> {
let cf_meta = self.meta_column();
write_batch.put_cf(
&cf_meta,
borsh::to_vec(&DB_META_LAST_BREAKPOINT_ID).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_LAST_BREAKPOINT_ID".to_owned()),
)
})?,
borsh::to_vec(&br_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize last block id".to_owned()),
)
})?,
);
Ok(())
}
pub fn put_meta_is_first_block_set_batch(&self, write_batch: &mut WriteBatch) -> DbResult<()> {
let cf_meta = self.meta_column();
write_batch.put_cf(
&cf_meta,
borsh::to_vec(&DB_META_FIRST_BLOCK_SET_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_FIRST_BLOCK_SET_KEY".to_owned()),
)
})?,
[1_u8; 1],
);
Ok(())
}
// Block
pub fn put_block(&self, block: &Block, l1_lib_header: [u8; 32]) -> DbResult<()> {
let cf_block = self.block_column();
let cf_hti = self.hash_to_id_column();
let cf_tti: Arc<BoundColumnFamily<'_>> = self.tx_hash_to_id_column();
let last_curr_block = self.get_meta_last_block_in_db()?;
let mut write_batch = WriteBatch::default();
write_batch.put_cf(
&cf_block,
borsh::to_vec(&block.header.block_id).map_err(|err| {
DbError::borsh_cast_message(err, Some("Failed to serialize block id".to_owned()))
})?,
borsh::to_vec(block).map_err(|err| {
DbError::borsh_cast_message(err, Some("Failed to serialize block data".to_owned()))
})?,
);
if block.header.block_id > last_curr_block {
self.put_meta_last_block_in_db_batch(block.header.block_id, &mut write_batch)?;
self.put_meta_last_observed_l1_lib_header_in_db_batch(l1_lib_header, &mut write_batch)?;
}
write_batch.put_cf(
&cf_hti,
borsh::to_vec(&block.header.hash).map_err(|err| {
DbError::borsh_cast_message(err, Some("Failed to serialize block hash".to_owned()))
})?,
borsh::to_vec(&block.header.block_id).map_err(|err| {
DbError::borsh_cast_message(err, Some("Failed to serialize block id".to_owned()))
})?,
);
let mut acc_to_tx_map: HashMap<[u8; 32], Vec<[u8; 32]>> = HashMap::new();
for tx in &block.body.transactions {
let tx_hash = tx.hash();
write_batch.put_cf(
&cf_tti,
borsh::to_vec(&tx_hash).map_err(|err| {
DbError::borsh_cast_message(err, Some("Failed to serialize tx hash".to_owned()))
})?,
borsh::to_vec(&block.header.block_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize block id".to_owned()),
)
})?,
);
let acc_ids = tx
.affected_public_account_ids()
.into_iter()
.map(nssa::AccountId::into_value)
.collect::<Vec<_>>();
for acc_id in acc_ids {
acc_to_tx_map
.entry(acc_id)
.and_modify(|tx_hashes| tx_hashes.push(tx_hash.into()))
.or_insert_with(|| vec![tx_hash.into()]);
}
}
#[expect(
clippy::iter_over_hash_type,
reason = "RocksDB will keep ordering persistent"
)]
for (acc_id, tx_hashes) in acc_to_tx_map {
self.put_account_transactions_dependant(acc_id, &tx_hashes, &mut write_batch)?;
}
self.db.write(write_batch).map_err(|rerr| {
DbError::rocksdb_cast_message(rerr, Some("Failed to write batch".to_owned()))
})?;
if block
.header
.block_id
.is_multiple_of(BREAKPOINT_INTERVAL.into())
{
self.put_next_breakpoint()?;
}
Ok(())
}
}

View File

@ -0,0 +1,147 @@
use super::{
BREAKPOINT_INTERVAL, DB_META_FIRST_BLOCK_SET_KEY, DB_META_LAST_BLOCK_IN_DB_KEY,
DB_META_LAST_BREAKPOINT_ID, DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY, DbError,
DbResult, RocksDBIO, V03State,
};
#[expect(clippy::multiple_inherent_impl, reason = "Readability")]
impl RocksDBIO {
// Meta
pub fn put_meta_last_block_in_db(&self, block_id: u64) -> DbResult<()> {
let cf_meta = self.meta_column();
self.db
.put_cf(
&cf_meta,
borsh::to_vec(&DB_META_LAST_BLOCK_IN_DB_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_LAST_BLOCK_IN_DB_KEY".to_owned()),
)
})?,
borsh::to_vec(&block_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize last block id".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
Ok(())
}
pub fn put_meta_last_observed_l1_lib_header_in_db(
&self,
l1_lib_header: [u8; 32],
) -> DbResult<()> {
let cf_meta = self.meta_column();
self.db
.put_cf(
&cf_meta,
borsh::to_vec(&DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY).map_err(
|err| {
DbError::borsh_cast_message(
err,
Some(
"Failed to serialize DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY"
.to_owned(),
),
)
},
)?,
borsh::to_vec(&l1_lib_header).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize last l1 block header".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
Ok(())
}
pub fn put_meta_last_breakpoint_id(&self, br_id: u64) -> DbResult<()> {
let cf_meta = self.meta_column();
self.db
.put_cf(
&cf_meta,
borsh::to_vec(&DB_META_LAST_BREAKPOINT_ID).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_LAST_BREAKPOINT_ID".to_owned()),
)
})?,
borsh::to_vec(&br_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize last block id".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
Ok(())
}
pub fn put_meta_is_first_block_set(&self) -> DbResult<()> {
let cf_meta = self.meta_column();
self.db
.put_cf(
&cf_meta,
borsh::to_vec(&DB_META_FIRST_BLOCK_SET_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_FIRST_BLOCK_SET_KEY".to_owned()),
)
})?,
[1_u8; 1],
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
Ok(())
}
// State
pub fn put_breakpoint(&self, br_id: u64, breakpoint: &V03State) -> DbResult<()> {
let cf_br = self.breakpoint_column();
self.db
.put_cf(
&cf_br,
borsh::to_vec(&br_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize breakpoint id".to_owned()),
)
})?,
borsh::to_vec(breakpoint).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize breakpoint data".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))
}
pub fn put_next_breakpoint(&self) -> DbResult<()> {
let last_block = self.get_meta_last_block_in_db()?;
let next_breakpoint_id = self
.get_meta_last_breakpoint_id()?
.checked_add(1)
.expect("Breakpoint Id will be lesser than u64::MAX");
let block_to_break_id = next_breakpoint_id
.checked_mul(u64::from(BREAKPOINT_INTERVAL))
.expect("Reached maximum breakpoint id");
if block_to_break_id <= last_block {
let next_breakpoint = self.calculate_state_for_id(block_to_break_id)?;
self.put_breakpoint(next_breakpoint_id, &next_breakpoint)?;
self.put_meta_last_breakpoint_id(next_breakpoint_id)
} else {
Err(DbError::db_interaction_error(
"Breakpoint not yet achieved".to_owned(),
))
}
}
}

View File

@ -0,0 +1,16 @@
[package]
name = "testnet_initial_state"
version = "0.1.0"
edition = "2024"
license.workspace = true
[dependencies]
key_protocol.workspace = true
nssa.workspace = true
nssa_core.workspace = true
common.workspace = true
serde.workspace = true
[lints]
workspace = true

View File

@ -0,0 +1,396 @@
use common::PINATA_BASE58;
use key_protocol::key_management::{
KeyChain,
secret_holders::{PrivateKeyHolder, SecretSpendingKey},
};
use nssa::{Account, AccountId, Data, PrivateKey, PublicKey, V03State};
use nssa_core::{NullifierPublicKey, encryption::shared_key_derivation::Secp256k1Point};
use serde::{Deserialize, Serialize};
const PRIVATE_KEY_PUB_ACC_A: [u8; 32] = [
16, 162, 106, 154, 236, 125, 52, 184, 35, 100, 238, 174, 69, 197, 41, 77, 187, 10, 118, 75, 0,
11, 148, 238, 185, 181, 133, 17, 220, 72, 124, 77,
];
const PRIVATE_KEY_PUB_ACC_B: [u8; 32] = [
113, 121, 64, 177, 204, 85, 229, 214, 178, 6, 109, 191, 29, 154, 63, 38, 242, 18, 244, 219, 8,
208, 35, 136, 23, 127, 207, 237, 216, 169, 190, 27,
];
const SSK_PRIV_ACC_A: [u8; 32] = [
93, 13, 190, 240, 250, 33, 108, 195, 176, 40, 144, 61, 4, 28, 58, 112, 53, 161, 42, 238, 155,
27, 23, 176, 208, 121, 15, 229, 165, 180, 99, 143,
];
const SSK_PRIV_ACC_B: [u8; 32] = [
48, 175, 124, 10, 230, 240, 166, 14, 249, 254, 157, 226, 208, 124, 122, 177, 203, 139, 192,
180, 43, 120, 55, 151, 50, 21, 113, 22, 254, 83, 148, 56,
];
const NSK_PRIV_ACC_A: [u8; 32] = [
25, 21, 186, 59, 180, 224, 101, 64, 163, 208, 228, 43, 13, 185, 100, 123, 156, 47, 80, 179, 72,
51, 115, 11, 180, 99, 21, 201, 48, 194, 118, 144,
];
const NSK_PRIV_ACC_B: [u8; 32] = [
99, 82, 190, 140, 234, 10, 61, 163, 15, 211, 179, 54, 70, 166, 87, 5, 182, 68, 117, 244, 217,
23, 99, 9, 4, 177, 230, 125, 109, 91, 160, 30,
];
const VSK_PRIV_ACC_A: [u8; 32] = [
5, 85, 114, 119, 141, 187, 202, 170, 122, 253, 198, 81, 150, 8, 155, 21, 192, 65, 24, 124, 116,
98, 110, 106, 137, 90, 165, 239, 80, 13, 222, 30,
];
const VSK_PRIV_ACC_B: [u8; 32] = [
205, 32, 76, 251, 255, 236, 96, 119, 61, 111, 65, 100, 75, 218, 12, 22, 17, 170, 55, 226, 21,
154, 161, 34, 208, 74, 27, 1, 119, 13, 88, 128,
];
const VPK_PRIV_ACC_A: [u8; 33] = [
2, 210, 206, 38, 213, 4, 182, 198, 220, 47, 93, 148, 61, 84, 148, 250, 158, 45, 8, 81, 48, 80,
46, 230, 87, 210, 47, 204, 76, 58, 214, 167, 81,
];
const VPK_PRIV_ACC_B: [u8; 33] = [
2, 79, 110, 46, 203, 29, 206, 205, 18, 86, 27, 189, 104, 103, 113, 181, 110, 53, 78, 172, 11,
171, 190, 18, 126, 214, 81, 77, 192, 154, 58, 195, 238,
];
const NPK_PRIV_ACC_A: [u8; 32] = [
167, 108, 50, 153, 74, 47, 151, 188, 140, 79, 195, 31, 181, 9, 40, 167, 201, 32, 175, 129, 45,
245, 223, 193, 210, 170, 247, 128, 167, 140, 155, 129,
];
const NPK_PRIV_ACC_B: [u8; 32] = [
32, 67, 72, 164, 106, 53, 66, 239, 141, 15, 52, 230, 136, 177, 2, 236, 207, 243, 134, 135, 210,
143, 87, 232, 215, 128, 194, 120, 113, 224, 4, 165,
];
const DEFAULT_PROGRAM_OWNER: [u32; 8] = [0, 0, 0, 0, 0, 0, 0, 0];
const PUB_ACC_A_INITIAL_BALANCE: u128 = 10000;
const PUB_ACC_B_INITIAL_BALANCE: u128 = 20000;
const PRIV_ACC_A_INITIAL_BALANCE: u128 = 10000;
const PRIV_ACC_B_INITIAL_BALANCE: u128 = 20000;
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct PublicAccountPublicInitialData {
pub account_id: AccountId,
pub balance: u128,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct PrivateAccountPublicInitialData {
pub npk: nssa_core::NullifierPublicKey,
pub account: nssa_core::account::Account,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct PublicAccountPrivateInitialData {
pub account_id: nssa::AccountId,
pub pub_sign_key: nssa::PrivateKey,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PrivateAccountPrivateInitialData {
pub account_id: nssa::AccountId,
pub account: nssa_core::account::Account,
pub key_chain: KeyChain,
}
#[must_use]
pub fn initial_pub_accounts_private_keys() -> Vec<PublicAccountPrivateInitialData> {
let acc1_pub_sign_key = PrivateKey::try_new(PRIVATE_KEY_PUB_ACC_A).unwrap();
let acc2_pub_sign_key = PrivateKey::try_new(PRIVATE_KEY_PUB_ACC_B).unwrap();
vec![
PublicAccountPrivateInitialData {
account_id: AccountId::from(&PublicKey::new_from_private_key(&acc1_pub_sign_key)),
pub_sign_key: acc1_pub_sign_key,
},
PublicAccountPrivateInitialData {
account_id: AccountId::from(&PublicKey::new_from_private_key(&acc2_pub_sign_key)),
pub_sign_key: acc2_pub_sign_key,
},
]
}
#[must_use]
pub fn initial_priv_accounts_private_keys() -> Vec<PrivateAccountPrivateInitialData> {
let key_chain_1 = KeyChain {
secret_spending_key: SecretSpendingKey(SSK_PRIV_ACC_A),
private_key_holder: PrivateKeyHolder {
nullifier_secret_key: NSK_PRIV_ACC_A,
viewing_secret_key: VSK_PRIV_ACC_A,
},
nullifier_public_key: NullifierPublicKey(NPK_PRIV_ACC_A),
viewing_public_key: Secp256k1Point(VPK_PRIV_ACC_A.to_vec()),
};
let key_chain_2 = KeyChain {
secret_spending_key: SecretSpendingKey(SSK_PRIV_ACC_B),
private_key_holder: PrivateKeyHolder {
nullifier_secret_key: NSK_PRIV_ACC_B,
viewing_secret_key: VSK_PRIV_ACC_B,
},
nullifier_public_key: NullifierPublicKey(NPK_PRIV_ACC_B),
viewing_public_key: Secp256k1Point(VPK_PRIV_ACC_B.to_vec()),
};
vec![
PrivateAccountPrivateInitialData {
account_id: AccountId::from(&key_chain_1.nullifier_public_key),
account: Account {
program_owner: DEFAULT_PROGRAM_OWNER,
balance: PRIV_ACC_A_INITIAL_BALANCE,
data: Data::default(),
nonce: 0.into(),
},
key_chain: key_chain_1,
},
PrivateAccountPrivateInitialData {
account_id: AccountId::from(&key_chain_2.nullifier_public_key),
account: Account {
program_owner: DEFAULT_PROGRAM_OWNER,
balance: PRIV_ACC_B_INITIAL_BALANCE,
data: Data::default(),
nonce: 0.into(),
},
key_chain: key_chain_2,
},
]
}
#[must_use]
pub fn initial_commitments() -> Vec<PrivateAccountPublicInitialData> {
initial_priv_accounts_private_keys()
.into_iter()
.map(|data| PrivateAccountPublicInitialData {
npk: data.key_chain.nullifier_public_key.clone(),
account: data.account,
})
.collect()
}
#[must_use]
pub fn initial_accounts() -> Vec<PublicAccountPublicInitialData> {
let initial_account_ids = initial_pub_accounts_private_keys()
.into_iter()
.map(|data| data.account_id)
.collect::<Vec<_>>();
vec![
PublicAccountPublicInitialData {
account_id: initial_account_ids[0],
balance: PUB_ACC_A_INITIAL_BALANCE,
},
PublicAccountPublicInitialData {
account_id: initial_account_ids[1],
balance: PUB_ACC_B_INITIAL_BALANCE,
},
]
}
#[must_use]
pub fn initial_state() -> V03State {
let initial_commitments: Vec<nssa_core::Commitment> = initial_commitments()
.iter()
.map(|init_comm_data| {
let npk = &init_comm_data.npk;
let mut acc = init_comm_data.account.clone();
acc.program_owner = nssa::program::Program::authenticated_transfer_program().id();
nssa_core::Commitment::new(npk, &acc)
})
.collect();
let init_accs: Vec<(nssa::AccountId, u128)> = initial_accounts()
.iter()
.map(|acc_data| (acc_data.account_id, acc_data.balance))
.collect();
nssa::V03State::new_with_genesis_accounts(&init_accs, &initial_commitments)
}
#[must_use]
pub fn initial_state_testnet() -> V03State {
let mut state = initial_state();
state.add_pinata_program(PINATA_BASE58.parse().unwrap());
state
}
#[cfg(test)]
mod tests {
use std::str::FromStr as _;
use super::*;
const PUB_ACC_A_TEXT_ADDR: &str = "6iArKUXxhUJqS7kCaPNhwMWt3ro71PDyBj7jwAyE2VQV";
const PUB_ACC_B_TEXT_ADDR: &str = "7wHg9sbJwc6h3NP1S9bekfAzB8CHifEcxKswCKUt3YQo";
const PRIV_ACC_A_TEXT_ADDR: &str = "5ya25h4Xc9GAmrGB2WrTEnEWtQKJwRwQx3Xfo2tucNcE";
const PRIV_ACC_B_TEXT_ADDR: &str = "E8HwiTyQe4H9HK7icTvn95HQMnzx49mP9A2ddtMLpNaN";
#[test]
fn pub_state_consistency() {
let init_accs_private_data = initial_pub_accounts_private_keys();
let init_accs_pub_data = initial_accounts();
assert_eq!(
init_accs_private_data[0].account_id,
init_accs_pub_data[0].account_id
);
assert_eq!(
init_accs_private_data[1].account_id,
init_accs_pub_data[1].account_id
);
assert_eq!(
init_accs_pub_data[0],
PublicAccountPublicInitialData {
account_id: AccountId::from_str(PUB_ACC_A_TEXT_ADDR).unwrap(),
balance: PUB_ACC_A_INITIAL_BALANCE,
}
);
assert_eq!(
init_accs_pub_data[1],
PublicAccountPublicInitialData {
account_id: AccountId::from_str(PUB_ACC_B_TEXT_ADDR).unwrap(),
balance: PUB_ACC_B_INITIAL_BALANCE,
}
);
}
#[test]
fn private_state_consistency() {
let init_private_accs_keys = initial_priv_accounts_private_keys();
let init_comms = initial_commitments();
assert_eq!(
init_private_accs_keys[0]
.key_chain
.secret_spending_key
.produce_private_key_holder(None)
.nullifier_secret_key,
init_private_accs_keys[0]
.key_chain
.private_key_holder
.nullifier_secret_key
);
assert_eq!(
init_private_accs_keys[0]
.key_chain
.secret_spending_key
.produce_private_key_holder(None)
.viewing_secret_key,
init_private_accs_keys[0]
.key_chain
.private_key_holder
.viewing_secret_key
);
assert_eq!(
init_private_accs_keys[0]
.key_chain
.private_key_holder
.generate_nullifier_public_key(),
init_private_accs_keys[0].key_chain.nullifier_public_key
);
assert_eq!(
init_private_accs_keys[0]
.key_chain
.private_key_holder
.generate_viewing_public_key(),
init_private_accs_keys[0].key_chain.viewing_public_key
);
assert_eq!(
init_private_accs_keys[1]
.key_chain
.secret_spending_key
.produce_private_key_holder(None)
.nullifier_secret_key,
init_private_accs_keys[1]
.key_chain
.private_key_holder
.nullifier_secret_key
);
assert_eq!(
init_private_accs_keys[1]
.key_chain
.secret_spending_key
.produce_private_key_holder(None)
.viewing_secret_key,
init_private_accs_keys[1]
.key_chain
.private_key_holder
.viewing_secret_key
);
assert_eq!(
init_private_accs_keys[1]
.key_chain
.private_key_holder
.generate_nullifier_public_key(),
init_private_accs_keys[1].key_chain.nullifier_public_key
);
assert_eq!(
init_private_accs_keys[1]
.key_chain
.private_key_holder
.generate_viewing_public_key(),
init_private_accs_keys[1].key_chain.viewing_public_key
);
assert_eq!(
init_private_accs_keys[0].account_id.to_string(),
PRIV_ACC_A_TEXT_ADDR
);
assert_eq!(
init_private_accs_keys[1].account_id.to_string(),
PRIV_ACC_B_TEXT_ADDR
);
assert_eq!(
init_private_accs_keys[0].key_chain.nullifier_public_key,
init_comms[0].npk
);
assert_eq!(
init_private_accs_keys[1].key_chain.nullifier_public_key,
init_comms[1].npk
);
assert_eq!(
init_comms[0],
PrivateAccountPublicInitialData {
npk: NullifierPublicKey(NPK_PRIV_ACC_A),
account: Account {
program_owner: DEFAULT_PROGRAM_OWNER,
balance: PRIV_ACC_A_INITIAL_BALANCE,
data: Data::default(),
nonce: 0.into(),
},
}
);
assert_eq!(
init_comms[1],
PrivateAccountPublicInitialData {
npk: NullifierPublicKey(NPK_PRIV_ACC_B),
account: Account {
program_owner: DEFAULT_PROGRAM_OWNER,
balance: PRIV_ACC_B_INITIAL_BALANCE,
data: Data::default(),
nonce: 0.into(),
},
}
);
}
}

View File

@ -15,6 +15,7 @@ key_protocol.workspace = true
sequencer_service_rpc = { workspace = true, features = ["client"] }
token_core.workspace = true
amm_core.workspace = true
testnet_initial_state.workspace = true
ata_core.workspace = true
anyhow.workspace = true

View File

@ -99,16 +99,22 @@ impl WalletChainStore {
let mut public_init_acc_map = BTreeMap::new();
let mut private_init_acc_map = BTreeMap::new();
for init_acc_data in config.initial_accounts.clone() {
let initial_accounts = config
.initial_accounts
.clone()
.unwrap_or_else(InitialAccountData::create_initial_accounts_data);
for init_acc_data in initial_accounts {
match init_acc_data {
InitialAccountData::Public(data) => {
public_init_acc_map.insert(data.account_id, data.pub_sign_key);
}
InitialAccountData::Private(data) => {
let mut account = data.account;
// TODO: Program owner is only known after code is compiled and can't be set in
// the config. Therefore we overwrite it here on startup. Fix this when program
// id can be fetched from the node and queried from the wallet.
// TODO: Program owner is only known after code is compiled and can't be set
// in the config. Therefore we overwrite it here on
// startup. Fix this when program id can be fetched
// from the node and queried from the wallet.
account.program_owner = Program::authenticated_transfer_program().id();
private_init_acc_map.insert(data.account_id, (data.key_chain, account));
}
@ -161,45 +167,12 @@ impl WalletChainStore {
#[cfg(test)]
mod tests {
use std::str::FromStr as _;
use key_protocol::key_management::key_tree::{
keys_private::ChildKeysPrivate, keys_public::ChildKeysPublic, traits::KeyNode as _,
};
use nssa::PrivateKey;
use super::*;
use crate::config::{
InitialAccountData, InitialAccountDataPublic, PersistentAccountDataPrivate,
PersistentAccountDataPublic,
};
fn create_initial_accounts() -> Vec<InitialAccountData> {
vec![
InitialAccountData::Public(InitialAccountDataPublic {
account_id: nssa::AccountId::from_str(
"CbgR6tj5kWx5oziiFptM7jMvrQeYY3Mzaao6ciuhSr2r",
)
.unwrap(),
pub_sign_key: PrivateKey::try_new([
127, 39, 48, 152, 242, 91, 113, 230, 192, 5, 169, 81, 159, 38, 120, 218, 141,
28, 127, 1, 246, 162, 119, 120, 226, 217, 148, 138, 189, 249, 1, 251,
])
.unwrap(),
}),
InitialAccountData::Public(InitialAccountDataPublic {
account_id: nssa::AccountId::from_str(
"2RHZhw9h534Zr3eq2RGhQete2Hh667foECzXPmSkGni2",
)
.unwrap(),
pub_sign_key: PrivateKey::try_new([
244, 52, 248, 116, 23, 32, 1, 69, 134, 174, 67, 53, 109, 42, 236, 98, 87, 218,
8, 98, 34, 246, 4, 221, 183, 93, 105, 115, 59, 134, 252, 76,
])
.unwrap(),
}),
]
}
use crate::config::{PersistentAccountDataPrivate, PersistentAccountDataPublic};
fn create_sample_wallet_config() -> WalletConfig {
WalletConfig {
@ -208,8 +181,8 @@ mod tests {
seq_tx_poll_max_blocks: 5,
seq_poll_max_retries: 10,
seq_block_poll_max_amount: 100,
initial_accounts: create_initial_accounts(),
basic_auth: None,
initial_accounts: None,
}
}

View File

@ -4,6 +4,7 @@ use clap::Subcommand;
use crate::{
WalletCore,
cli::{SubcommandReturnValue, WalletSubcommand},
config::InitialAccountData,
};
/// Represents generic config CLI subcommand.
@ -59,7 +60,17 @@ impl WalletSubcommand for ConfigSubcommand {
);
}
"initial_accounts" => {
println!("{:#?}", wallet_core.storage.wallet_config.initial_accounts);
println!(
"{:#?}",
wallet_core
.storage
.wallet_config
.initial_accounts
.clone()
.unwrap_or_else(
InitialAccountData::create_initial_accounts_data
)
);
}
"basic_auth" => {
if let Some(basic_auth) = &wallet_core.storage.wallet_config.basic_auth

View File

@ -8,22 +8,17 @@ use std::{
use anyhow::{Context as _, Result};
use common::config::BasicAuth;
use humantime_serde;
use key_protocol::key_management::{
KeyChain,
key_tree::{
chain_index::ChainIndex, keys_private::ChildKeysPrivate, keys_public::ChildKeysPublic,
},
use key_protocol::key_management::key_tree::{
chain_index::ChainIndex, keys_private::ChildKeysPrivate, keys_public::ChildKeysPublic,
};
use log::warn;
use serde::{Deserialize, Serialize};
use testnet_initial_state::{
PrivateAccountPrivateInitialData, PublicAccountPrivateInitialData,
initial_priv_accounts_private_keys, initial_pub_accounts_private_keys,
};
use url::Url;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct InitialAccountDataPublic {
pub account_id: nssa::AccountId,
pub pub_sign_key: nssa::PrivateKey,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PersistentAccountDataPublic {
pub account_id: nssa::AccountId,
@ -31,13 +26,6 @@ pub struct PersistentAccountDataPublic {
pub data: ChildKeysPublic,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct InitialAccountDataPrivate {
pub account_id: nssa::AccountId,
pub account: nssa_core::account::Account,
pub key_chain: KeyChain,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PersistentAccountDataPrivate {
pub account_id: nssa::AccountId,
@ -50,8 +38,29 @@ pub struct PersistentAccountDataPrivate {
// memory
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum InitialAccountData {
Public(InitialAccountDataPublic),
Private(Box<InitialAccountDataPrivate>),
Public(PublicAccountPrivateInitialData),
Private(Box<PrivateAccountPrivateInitialData>),
}
impl InitialAccountData {
#[must_use]
pub const fn account_id(&self) -> nssa::AccountId {
match &self {
Self::Public(acc) => acc.account_id,
Self::Private(acc) => acc.account_id,
}
}
pub(crate) fn create_initial_accounts_data() -> Vec<Self> {
let pub_data = initial_pub_accounts_private_keys();
let priv_data = initial_priv_accounts_private_keys();
pub_data
.into_iter()
.map(Into::into)
.chain(priv_data.into_iter().map(Into::into))
.collect()
}
}
// Big difference in enum variants sizes
@ -114,16 +123,6 @@ impl PersistentStorage {
}
}
impl InitialAccountData {
#[must_use]
pub fn account_id(&self) -> nssa::AccountId {
match &self {
Self::Public(acc) => acc.account_id,
Self::Private(acc) => acc.account_id,
}
}
}
impl PersistentAccountData {
#[must_use]
pub fn account_id(&self) -> nssa::AccountId {
@ -135,14 +134,14 @@ impl PersistentAccountData {
}
}
impl From<InitialAccountDataPublic> for InitialAccountData {
fn from(value: InitialAccountDataPublic) -> Self {
impl From<PublicAccountPrivateInitialData> for InitialAccountData {
fn from(value: PublicAccountPrivateInitialData) -> Self {
Self::Public(value)
}
}
impl From<InitialAccountDataPrivate> for InitialAccountData {
fn from(value: InitialAccountDataPrivate) -> Self {
impl From<PrivateAccountPrivateInitialData> for InitialAccountData {
fn from(value: PrivateAccountPrivateInitialData) -> Self {
Self::Private(Box::new(value))
}
}
@ -197,37 +196,15 @@ pub struct WalletConfig {
pub seq_poll_max_retries: u64,
/// Max amount of blocks to poll in one request.
pub seq_block_poll_max_amount: u64,
/// Initial accounts for wallet.
pub initial_accounts: Vec<InitialAccountData>,
/// Basic authentication credentials.
/// Basic authentication credentials
#[serde(skip_serializing_if = "Option::is_none")]
pub basic_auth: Option<BasicAuth>,
#[serde(skip_serializing_if = "Option::is_none")]
pub initial_accounts: Option<Vec<InitialAccountData>>,
}
impl Default for WalletConfig {
fn default() -> Self {
let pub_sign_key1 = nssa::PrivateKey::try_new([
127, 39, 48, 152, 242, 91, 113, 230, 192, 5, 169, 81, 159, 38, 120, 218, 141, 28, 127,
1, 246, 162, 119, 120, 226, 217, 148, 138, 189, 249, 1, 251,
])
.unwrap();
let public_key1 = nssa::PublicKey::new_from_private_key(&pub_sign_key1);
let public_account_id1 = nssa::AccountId::from(&public_key1);
let pub_sign_key2 = nssa::PrivateKey::try_new([
244, 52, 248, 116, 23, 32, 1, 69, 134, 174, 67, 53, 109, 42, 236, 98, 87, 218, 8, 98,
34, 246, 4, 221, 183, 93, 105, 115, 59, 134, 252, 76,
])
.unwrap();
let public_key2 = nssa::PublicKey::new_from_private_key(&pub_sign_key2);
let public_account_id2 = nssa::AccountId::from(&public_key2);
let key_chain1 = KeyChain::new_mnemonic("default_private_account_1".to_owned());
let private_account_id1 = nssa::AccountId::from(&key_chain1.nullifier_public_key);
let key_chain2 = KeyChain::new_mnemonic("default_private_account_2".to_owned());
let private_account_id2 = nssa::AccountId::from(&key_chain2.nullifier_public_key);
Self {
sequencer_addr: "http://127.0.0.1:3040".parse().unwrap(),
seq_poll_timeout: Duration::from_secs(12),
@ -235,32 +212,7 @@ impl Default for WalletConfig {
seq_poll_max_retries: 5,
seq_block_poll_max_amount: 100,
basic_auth: None,
initial_accounts: vec![
InitialAccountData::Public(InitialAccountDataPublic {
account_id: public_account_id1,
pub_sign_key: pub_sign_key1,
}),
InitialAccountData::Public(InitialAccountDataPublic {
account_id: public_account_id2,
pub_sign_key: pub_sign_key2,
}),
InitialAccountData::Private(Box::new(InitialAccountDataPrivate {
account_id: private_account_id1,
account: nssa::Account {
balance: 10_000,
..Default::default()
},
key_chain: key_chain1,
})),
InitialAccountData::Private(Box::new(InitialAccountDataPrivate {
account_id: private_account_id2,
account: nssa::Account {
balance: 20_000,
..Default::default()
},
key_chain: key_chain2,
})),
],
initial_accounts: None,
}
}
}
@ -310,8 +262,8 @@ impl WalletConfig {
seq_tx_poll_max_blocks,
seq_poll_max_retries,
seq_block_poll_max_amount,
initial_accounts,
basic_auth,
initial_accounts,
} = self;
let WalletConfigOverrides {
@ -320,8 +272,8 @@ impl WalletConfig {
seq_tx_poll_max_blocks: o_seq_tx_poll_max_blocks,
seq_poll_max_retries: o_seq_poll_max_retries,
seq_block_poll_max_amount: o_seq_block_poll_max_amount,
initial_accounts: o_initial_accounts,
basic_auth: o_basic_auth,
initial_accounts: o_initial_accounts,
} = overrides;
if let Some(v) = o_sequencer_addr {
@ -344,13 +296,13 @@ impl WalletConfig {
warn!("Overriding wallet config 'seq_block_poll_max_amount' to {v}");
*seq_block_poll_max_amount = v;
}
if let Some(v) = o_initial_accounts {
warn!("Overriding wallet config 'initial_accounts' to {v:#?}");
*initial_accounts = v;
}
if let Some(v) = o_basic_auth {
warn!("Overriding wallet config 'basic_auth' to {v:#?}");
*basic_auth = v;
}
if let Some(v) = o_initial_accounts {
warn!("Overriding wallet config 'initial_accounts' to {v:#?}");
*initial_accounts = v;
}
}
}

View File

@ -7,12 +7,13 @@ use nssa::Account;
use nssa_core::account::Nonce;
use rand::{RngCore as _, rngs::OsRng};
use serde::Serialize;
use testnet_initial_state::{PrivateAccountPrivateInitialData, PublicAccountPrivateInitialData};
use crate::{
HOME_DIR_ENV_VAR,
config::{
InitialAccountData, InitialAccountDataPrivate, InitialAccountDataPublic, Label,
PersistentAccountDataPrivate, PersistentAccountDataPublic, PersistentStorage,
InitialAccountData, Label, PersistentAccountDataPrivate, PersistentAccountDataPublic,
PersistentStorage,
},
};
@ -119,7 +120,7 @@ pub fn produce_data_for_storage(
for (account_id, key) in &user_data.default_pub_account_signing_keys {
vec_for_storage.push(
InitialAccountData::Public(InitialAccountDataPublic {
InitialAccountData::Public(PublicAccountPrivateInitialData {
account_id: *account_id,
pub_sign_key: key.clone(),
})
@ -129,7 +130,7 @@ pub fn produce_data_for_storage(
for (account_id, (key_chain, account)) in &user_data.default_user_private_accounts {
vec_for_storage.push(
InitialAccountData::Private(Box::new(InitialAccountDataPrivate {
InitialAccountData::Private(Box::new(PrivateAccountPrivateInitialData {
account_id: *account_id,
account: account.clone(),
key_chain: key_chain.clone(),