487 lines
16 KiB
Rust
Raw Normal View History

//! This library contains common code for integration tests.
use std::{net::SocketAddr, path::PathBuf, sync::LazyLock};
2025-07-28 16:31:43 +03:00
2026-03-04 18:42:33 +03:00
use anyhow::{Context as _, Result, bail};
use common::{HashType, transaction::NSSATransaction};
use futures::FutureExt as _;
use indexer_service::IndexerHandle;
use log::{debug, error, warn};
use nssa::{AccountId, PrivacyPreservingTransaction};
2025-10-22 13:55:35 +03:00
use nssa_core::Commitment;
2026-03-04 18:42:33 +03:00
use sequencer_core::indexer_client::{IndexerClient, IndexerClientTrait as _};
use sequencer_service::SequencerHandle;
use sequencer_service_rpc::{RpcClient as _, SequencerClient, SequencerClientBuilder};
2025-07-31 14:40:23 +03:00
use tempfile::TempDir;
use testcontainers::compose::DockerCompose;
use wallet::{WalletCore, config::WalletConfigOverrides};
2025-10-22 13:55:35 +03:00
pub mod config;
// TODO: Remove this and control time from tests
pub const TIME_TO_WAIT_FOR_BLOCK_SECONDS: u64 = 12;
2025-12-05 17:54:51 -03:00
pub const NSSA_PROGRAM_FOR_TEST_DATA_CHANGER: &str = "data_changer.bin";
2026-02-24 19:41:01 +03:00
pub const NSSA_PROGRAM_FOR_TEST_NOOP: &str = "noop.bin";
2025-10-15 20:14:19 -03:00
const BEDROCK_SERVICE_WITH_OPEN_PORT: &str = "logos-blockchain-node-0";
const BEDROCK_SERVICE_PORT: u16 = 18080;
static LOGGER: LazyLock<()> = LazyLock::new(env_logger::init);
/// Test context which sets up a sequencer and a wallet for integration tests.
///
/// It's memory and logically safe to create multiple instances of this struct in parallel tests,
/// as each instance uses its own temporary directories for sequencer and wallet data.
// NOTE: Order of fields is important for proper drop order.
pub struct TestContext {
sequencer_client: SequencerClient,
2026-02-10 14:03:56 +02:00
indexer_client: IndexerClient,
wallet: WalletCore,
2026-02-03 19:04:41 -03:00
wallet_password: String,
2026-03-13 22:56:14 +03:00
/// Optional to move out value in Drop.
sequencer_handle: Option<SequencerHandle>,
indexer_handle: IndexerHandle,
bedrock_compose: DockerCompose,
2026-02-10 14:03:56 +02:00
_temp_indexer_dir: TempDir,
_temp_sequencer_dir: TempDir,
_temp_wallet_dir: TempDir,
2025-10-27 14:32:28 +02:00
}
impl TestContext {
/// Create new test context.
pub async fn new() -> Result<Self> {
Self::builder().build().await
}
2025-07-28 16:31:43 +03:00
2026-03-03 23:21:08 +03:00
#[must_use]
2026-03-09 18:27:56 +03:00
pub const fn builder() -> TestContextBuilder {
TestContextBuilder::new()
2026-01-23 10:39:34 +02:00
}
async fn new_configured(
sequencer_partial_config: config::SequencerPartialConfig,
initial_data: config::InitialData,
2026-01-23 10:39:34 +02:00
) -> Result<Self> {
// Ensure logger is initialized only once
*LOGGER;
debug!("Test context setup");
let (bedrock_compose, bedrock_addr) = Self::setup_bedrock_node().await?;
2026-02-10 14:03:56 +02:00
let (indexer_handle, temp_indexer_dir) = Self::setup_indexer(bedrock_addr, &initial_data)
.await
.context("Failed to setup Indexer")?;
let (sequencer_handle, temp_sequencer_dir) = Self::setup_sequencer(
sequencer_partial_config,
bedrock_addr,
indexer_handle.addr(),
&initial_data,
2026-01-27 09:46:31 +02:00
)
.await
.context("Failed to setup Sequencer")?;
let (wallet, temp_wallet_dir, wallet_password) =
Self::setup_wallet(sequencer_handle.addr(), &initial_data)
.await
.context("Failed to setup wallet")?;
let sequencer_url = config::addr_to_url(config::UrlProtocol::Http, sequencer_handle.addr())
.context("Failed to convert sequencer addr to URL")?;
2026-02-10 14:03:56 +02:00
let indexer_url = config::addr_to_url(config::UrlProtocol::Ws, indexer_handle.addr())
.context("Failed to convert indexer addr to URL")?;
let sequencer_client = SequencerClientBuilder::default()
.build(sequencer_url)
.context("Failed to create sequencer client")?;
2026-02-10 14:03:56 +02:00
let indexer_client = IndexerClient::new(&indexer_url)
.await
.context("Failed to create indexer client")?;
Ok(Self {
sequencer_client,
2026-02-10 14:03:56 +02:00
indexer_client,
wallet,
wallet_password,
bedrock_compose,
sequencer_handle: Some(sequencer_handle),
indexer_handle,
2026-02-10 14:03:56 +02:00
_temp_indexer_dir: temp_indexer_dir,
_temp_sequencer_dir: temp_sequencer_dir,
_temp_wallet_dir: temp_wallet_dir,
})
}
async fn setup_bedrock_node() -> Result<(DockerCompose, SocketAddr)> {
let manifest_dir = env!("CARGO_MANIFEST_DIR");
let bedrock_compose_path =
PathBuf::from(manifest_dir).join("../bedrock/docker-compose.yml");
let mut compose = DockerCompose::with_auto_client(&[bedrock_compose_path])
.await
2026-02-23 17:02:24 +03:00
.context("Failed to setup docker compose for Bedrock")?
// Setting port to 0 to avoid conflicts between parallel tests, actual port will be retrieved after container is up
.with_env("PORT", "0");
2026-03-03 23:21:08 +03:00
#[expect(
clippy::items_after_statements,
reason = "This is more readable is this function used just after its definition"
)]
async fn up_and_retrieve_port(compose: &mut DockerCompose) -> Result<u16> {
compose
.up()
.await
.context("Failed to bring up Bedrock services")?;
let container = compose
.service(BEDROCK_SERVICE_WITH_OPEN_PORT)
.with_context(|| {
format!(
"Failed to get Bedrock service container `{BEDROCK_SERVICE_WITH_OPEN_PORT}`"
)
})?;
let ports = container.ports().await.with_context(|| {
format!(
"Failed to get ports for Bedrock service container `{}`",
container.id()
)
})?;
ports
.map_to_host_port_ipv4(BEDROCK_SERVICE_PORT)
.with_context(|| {
format!(
"Failed to retrieve host port of {BEDROCK_SERVICE_PORT} container \
port for container `{}`, existing ports: {ports:?}",
container.id()
)
})
}
let mut port = None;
2026-03-04 18:42:33 +03:00
let mut attempt = 0_u32;
let max_attempts = 5_u32;
while port.is_none() && attempt < max_attempts {
2026-03-04 18:42:33 +03:00
attempt = attempt
.checked_add(1)
.expect("We check that attempt < max_attempts, so this won't overflow");
match up_and_retrieve_port(&mut compose).await {
Ok(p) => {
port = Some(p);
}
Err(err) => {
warn!(
"Failed to bring up Bedrock services: {err:?}, attempt {attempt}/{max_attempts}"
);
}
}
2026-01-27 09:46:31 +02:00
}
let Some(port) = port else {
bail!("Failed to bring up Bedrock services after {max_attempts} attempts");
};
let addr = SocketAddr::from(([127, 0, 0, 1], port));
Ok((compose, addr))
}
2026-02-10 14:03:56 +02:00
async fn setup_indexer(
bedrock_addr: SocketAddr,
initial_data: &config::InitialData,
) -> Result<(IndexerHandle, TempDir)> {
let temp_indexer_dir =
tempfile::tempdir().context("Failed to create temp dir for indexer home")?;
2026-03-03 23:21:08 +03:00
debug!(
"Using temp indexer home at {}",
temp_indexer_dir.path().display()
);
2026-02-10 14:03:56 +02:00
let indexer_config = config::indexer_config(
bedrock_addr,
temp_indexer_dir.path().to_owned(),
initial_data,
)
.context("Failed to create Indexer config")?;
indexer_service::run_server(indexer_config, 0)
.await
.context("Failed to run Indexer Service")
2026-02-10 14:03:56 +02:00
.map(|handle| (handle, temp_indexer_dir))
}
2025-07-31 14:40:23 +03:00
async fn setup_sequencer(
partial: config::SequencerPartialConfig,
bedrock_addr: SocketAddr,
indexer_addr: SocketAddr,
initial_data: &config::InitialData,
) -> Result<(SequencerHandle, TempDir)> {
let temp_sequencer_dir =
tempfile::tempdir().context("Failed to create temp dir for sequencer home")?;
debug!(
2026-03-03 23:21:08 +03:00
"Using temp sequencer home at {}",
temp_sequencer_dir.path().display()
);
let config = config::sequencer_config(
partial,
temp_sequencer_dir.path().to_owned(),
bedrock_addr,
indexer_addr,
initial_data,
)
.context("Failed to create Sequencer config")?;
let sequencer_handle = sequencer_service::run(config, 0).await?;
Ok((sequencer_handle, temp_sequencer_dir))
}
2025-07-28 16:31:43 +03:00
async fn setup_wallet(
sequencer_addr: SocketAddr,
initial_data: &config::InitialData,
) -> Result<(WalletCore, TempDir, String)> {
let config = config::wallet_config(sequencer_addr, initial_data)
.context("Failed to create Wallet config")?;
let config_serialized =
serde_json::to_string_pretty(&config).context("Failed to serialize Wallet config")?;
let temp_wallet_dir =
tempfile::tempdir().context("Failed to create temp dir for wallet home")?;
let config_path = temp_wallet_dir.path().join("wallet_config.json");
std::fs::write(&config_path, config_serialized)
.context("Failed to write wallet config in temp dir")?;
let storage_path = temp_wallet_dir.path().join("storage.json");
let config_overrides = WalletConfigOverrides::default();
2026-02-03 19:04:41 -03:00
let wallet_password = "test_pass".to_owned();
fix(wallet): use cryptographically secure entropy for mnemonic generation The mnemonic/wallet generation was using a constant zero-byte array for entropy ([0u8; 32]), making all wallets deterministic based solely on the password. This commit introduces proper random entropy using OsRng and enables users to back up their recovery phrase. Changes: - SeedHolder::new_mnemonic() now uses OsRng for 256-bit random entropy and returns the generated mnemonic - Added SeedHolder::from_mnemonic() to recover a wallet from an existing mnemonic phrase - WalletChainStore::new_storage() returns the mnemonic for user backup - Added WalletChainStore::restore_storage() for recovery from a mnemonic - WalletCore::new_init_storage() now returns the mnemonic - Renamed reset_storage to restore_storage, which accepts a mnemonic for recovery - CLI displays the recovery phrase when a new wallet is created - RestoreKeys command now prompts for the mnemonic phrase via read_mnemonic_from_stdin() Note: The password parameter is retained for future storage encryption but is no longer used in seed derivation (empty passphrase is used instead). This means the mnemonic alone is sufficient to recover accounts. Usage: On first wallet initialization, users will see: IMPORTANT: Write down your recovery phrase and store it securely. This is the only way to recover your wallet if you lose access. Recovery phrase: word1 word2 word3 ... word24 To restore keys: wallet restore-keys --depth 5 Input recovery phrase: <24 words> Input password: <password>
2026-01-20 16:47:34 +01:00
let (wallet, _mnemonic) = WalletCore::new_init_storage(
config_path,
storage_path,
Some(config_overrides),
fix(wallet): use cryptographically secure entropy for mnemonic generation The mnemonic/wallet generation was using a constant zero-byte array for entropy ([0u8; 32]), making all wallets deterministic based solely on the password. This commit introduces proper random entropy using OsRng and enables users to back up their recovery phrase. Changes: - SeedHolder::new_mnemonic() now uses OsRng for 256-bit random entropy and returns the generated mnemonic - Added SeedHolder::from_mnemonic() to recover a wallet from an existing mnemonic phrase - WalletChainStore::new_storage() returns the mnemonic for user backup - Added WalletChainStore::restore_storage() for recovery from a mnemonic - WalletCore::new_init_storage() now returns the mnemonic - Renamed reset_storage to restore_storage, which accepts a mnemonic for recovery - CLI displays the recovery phrase when a new wallet is created - RestoreKeys command now prompts for the mnemonic phrase via read_mnemonic_from_stdin() Note: The password parameter is retained for future storage encryption but is no longer used in seed derivation (empty passphrase is used instead). This means the mnemonic alone is sufficient to recover accounts. Usage: On first wallet initialization, users will see: IMPORTANT: Write down your recovery phrase and store it securely. This is the only way to recover your wallet if you lose access. Recovery phrase: word1 word2 word3 ... word24 To restore keys: wallet restore-keys --depth 5 Input recovery phrase: <24 words> Input password: <password>
2026-01-20 16:47:34 +01:00
&wallet_password,
)
.context("Failed to init wallet")?;
wallet
.store_persistent_data()
.await
.context("Failed to store wallet persistent data")?;
2026-02-03 19:04:41 -03:00
Ok((wallet, temp_wallet_dir, wallet_password))
}
2025-07-31 14:40:23 +03:00
/// Get reference to the wallet.
2026-03-03 23:21:08 +03:00
#[must_use]
2026-03-09 18:27:56 +03:00
pub const fn wallet(&self) -> &WalletCore {
&self.wallet
}
2025-07-31 14:40:23 +03:00
2026-03-03 23:21:08 +03:00
#[must_use]
2026-02-03 19:04:41 -03:00
pub fn wallet_password(&self) -> &str {
&self.wallet_password
}
/// Get mutable reference to the wallet.
2026-03-09 18:27:56 +03:00
pub const fn wallet_mut(&mut self) -> &mut WalletCore {
&mut self.wallet
}
2025-07-31 14:40:23 +03:00
/// Get reference to the sequencer client.
2026-03-03 23:21:08 +03:00
#[must_use]
2026-03-09 18:27:56 +03:00
pub const fn sequencer_client(&self) -> &SequencerClient {
&self.sequencer_client
}
2026-02-10 14:03:56 +02:00
/// Get reference to the indexer client.
2026-03-03 23:21:08 +03:00
#[must_use]
2026-03-09 18:27:56 +03:00
pub const fn indexer_client(&self) -> &IndexerClient {
2026-02-10 14:03:56 +02:00
&self.indexer_client
}
/// Get existing public account IDs in the wallet.
2026-03-03 23:21:08 +03:00
#[must_use]
pub fn existing_public_accounts(&self) -> Vec<AccountId> {
self.wallet
.storage()
.user_data
.public_account_ids()
.collect()
}
/// Get existing private account IDs in the wallet.
2026-03-03 23:21:08 +03:00
#[must_use]
pub fn existing_private_accounts(&self) -> Vec<AccountId> {
self.wallet
.storage()
.user_data
.private_account_ids()
.collect()
}
2025-07-31 14:40:23 +03:00
}
impl Drop for TestContext {
fn drop(&mut self) {
let Self {
sequencer_handle,
indexer_handle,
bedrock_compose,
2026-02-10 14:03:56 +02:00
_temp_indexer_dir: _,
_temp_sequencer_dir: _,
_temp_wallet_dir: _,
sequencer_client: _,
2026-02-10 14:03:56 +02:00
indexer_client: _,
wallet: _,
2026-02-03 19:04:41 -03:00
wallet_password: _,
} = self;
2025-07-31 14:40:23 +03:00
let sequencer_handle = sequencer_handle
.take()
.expect("Sequencer handle should be present in TestContext drop");
if !sequencer_handle.is_healthy() {
let Err(err) = sequencer_handle
.failed()
.now_or_never()
.expect("Sequencer handle should not be running");
error!(
"Sequencer handle has unexpectedly stopped before TestContext drop with error: {err:#}"
);
2026-01-23 10:39:34 +02:00
}
2025-08-22 15:58:43 +03:00
if !indexer_handle.is_healthy() {
error!("Indexer handle has unexpectedly stopped before TestContext drop");
}
let container = bedrock_compose
.service(BEDROCK_SERVICE_WITH_OPEN_PORT)
.unwrap_or_else(|| {
panic!("Failed to get Bedrock service container `{BEDROCK_SERVICE_WITH_OPEN_PORT}`")
});
let output = std::process::Command::new("docker")
.args(["inspect", "-f", "{{.State.Running}}", container.id()])
.output()
.expect("Failed to execute docker inspect command to check if Bedrock container is still running");
let stdout = String::from_utf8(output.stdout)
.expect("Failed to parse docker inspect output as String");
if stdout.trim() != "true" {
error!(
"Bedrock container `{}` is not running during TestContext drop, docker inspect output: {stdout}",
container.id()
);
}
}
2025-07-31 14:40:23 +03:00
}
2026-03-10 00:17:43 +03:00
/// A test context to be used in normal #[test] tests.
pub struct BlockingTestContext {
ctx: Option<TestContext>,
runtime: tokio::runtime::Runtime,
}
impl BlockingTestContext {
pub fn new() -> Result<Self> {
let runtime = tokio::runtime::Runtime::new().unwrap();
let ctx = runtime.block_on(TestContext::new())?;
Ok(Self {
ctx: Some(ctx),
runtime,
})
}
2026-03-09 18:27:56 +03:00
pub const fn ctx(&self) -> &TestContext {
self.ctx.as_ref().expect("TestContext is set")
}
}
pub struct TestContextBuilder {
initial_data: Option<config::InitialData>,
sequencer_partial_config: Option<config::SequencerPartialConfig>,
}
impl TestContextBuilder {
2026-03-09 18:27:56 +03:00
const fn new() -> Self {
Self {
initial_data: None,
sequencer_partial_config: None,
}
}
2026-03-03 23:21:08 +03:00
#[must_use]
pub fn with_initial_data(mut self, initial_data: config::InitialData) -> Self {
self.initial_data = Some(initial_data);
self
}
2026-03-03 23:21:08 +03:00
#[must_use]
2026-03-09 18:27:56 +03:00
pub const fn with_sequencer_partial_config(
mut self,
sequencer_partial_config: config::SequencerPartialConfig,
) -> Self {
self.sequencer_partial_config = Some(sequencer_partial_config);
self
}
pub async fn build(self) -> Result<TestContext> {
TestContext::new_configured(
self.sequencer_partial_config.unwrap_or_default(),
self.initial_data.unwrap_or_else(|| {
config::InitialData::with_two_public_and_two_private_initialized_accounts()
}),
)
.await
}
}
impl Drop for BlockingTestContext {
fn drop(&mut self) {
let Self { ctx, runtime } = self;
// Ensure async cleanup of TestContext by blocking on its drop in the runtime.
runtime.block_on(async {
if let Some(ctx) = ctx.take() {
drop(ctx);
}
2026-03-03 23:21:08 +03:00
});
}
}
2026-03-03 23:21:08 +03:00
#[must_use]
pub fn format_public_account_id(account_id: AccountId) -> String {
format!("Public/{account_id}")
}
2025-07-30 14:01:40 +03:00
2026-03-03 23:21:08 +03:00
#[must_use]
pub fn format_private_account_id(account_id: AccountId) -> String {
format!("Private/{account_id}")
2025-07-25 19:42:29 +03:00
}
2025-10-03 15:59:27 -03:00
2026-03-04 18:42:33 +03:00
#[expect(
clippy::wildcard_enum_match_arm,
reason = "We want the code to panic if the transaction type is not PrivacyPreserving"
)]
pub async fn fetch_privacy_preserving_tx(
2025-10-03 15:59:27 -03:00
seq_client: &SequencerClient,
tx_hash: HashType,
2025-10-03 15:59:27 -03:00
) -> PrivacyPreservingTransaction {
let tx = seq_client.get_transaction(tx_hash).await.unwrap().unwrap();
2025-10-03 15:59:27 -03:00
match tx {
2025-10-03 15:59:27 -03:00
NSSATransaction::PrivacyPreserving(privacy_preserving_transaction) => {
privacy_preserving_transaction
}
_ => panic!("Invalid tx type"),
}
}
pub async fn verify_commitment_is_in_state(
2025-10-03 15:59:27 -03:00
commitment: Commitment,
seq_client: &SequencerClient,
) -> bool {
seq_client
.get_proof_for_commitment(commitment)
.await
2026-03-14 03:20:37 +03:00
.ok()
.flatten()
.is_some()
2025-10-03 15:59:27 -03:00
}