Merge branch 'main' into schouhy/generalize-npk-to-multiple-accounts

This commit is contained in:
Sergio Chouhy 2026-04-29 12:27:43 -03:00
commit 72756e8622
28 changed files with 1355 additions and 273 deletions

45
Cargo.lock generated
View File

@ -1111,7 +1111,7 @@ dependencies = [
"log",
"num",
"pin-project-lite",
"rand 0.9.2",
"rand 0.9.3",
"rustls",
"rustls-native-certs",
"rustls-pki-types",
@ -2506,7 +2506,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bb330bbd4cb7a5b9f559427f06f98a4f853a137c8298f3bd3f8ca57663e21986"
dependencies = [
"portable-atomic",
"rand 0.9.2",
"rand 0.9.3",
"web-time",
]
@ -3478,6 +3478,16 @@ dependencies = [
"url",
]
[[package]]
name = "indexer_ffi"
version = "0.1.0"
dependencies = [
"cbindgen",
"indexer_service",
"log",
"tokio",
]
[[package]]
name = "indexer_service"
version = "0.1.0"
@ -3589,6 +3599,7 @@ dependencies = [
"env_logger",
"futures",
"hex",
"indexer_ffi",
"indexer_service",
"indexer_service_rpc",
"key_protocol",
@ -3838,7 +3849,7 @@ dependencies = [
"jsonrpsee-types",
"parking_lot",
"pin-project",
"rand 0.9.2",
"rand 0.9.3",
"rustc-hash",
"serde",
"serde_json",
@ -4055,7 +4066,7 @@ dependencies = [
"oco_ref",
"or_poisoned",
"paste",
"rand 0.9.2",
"rand 0.9.3",
"reactive_graph",
"rustc-hash",
"rustc_version",
@ -5717,7 +5728,7 @@ dependencies = [
"futures-util",
"opentelemetry",
"percent-encoding",
"rand 0.9.2",
"rand 0.9.3",
"thiserror 2.0.18",
"tokio",
"tokio-stream",
@ -6101,7 +6112,7 @@ checksum = "37566cb3fdacef14c0737f9546df7cfeadbfbc9fef10991038bf5015d0c80532"
dependencies = [
"bitflags 2.11.0",
"num-traits",
"rand 0.9.2",
"rand 0.9.3",
"rand_chacha 0.9.0",
"rand_xorshift",
"unarray",
@ -6214,7 +6225,7 @@ dependencies = [
"bytes",
"getrandom 0.3.4",
"lru-slab",
"rand 0.9.2",
"rand 0.9.3",
"ring",
"rustc-hash",
"rustls",
@ -6302,9 +6313,9 @@ dependencies = [
[[package]]
name = "rand"
version = "0.9.2"
version = "0.9.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1"
checksum = "7ec095654a25171c2124e9e3393a930bddbffdc939556c914957a4c3e0a87166"
dependencies = [
"rand_chacha 0.9.0",
"rand_core 0.9.5",
@ -6606,7 +6617,7 @@ dependencies = [
"elf",
"lazy_static",
"postcard",
"rand 0.9.2",
"rand 0.9.3",
"risc0-zkp",
"risc0-zkvm-platform",
"ruint",
@ -6702,7 +6713,7 @@ dependencies = [
"hex",
"lazy-regex",
"metal",
"rand 0.9.2",
"rand 0.9.3",
"rayon",
"risc0-circuit-recursion-sys",
"risc0-core",
@ -6746,7 +6757,7 @@ dependencies = [
"num-traits",
"paste",
"postcard",
"rand 0.9.2",
"rand 0.9.3",
"rayon",
"ringbuffer",
"risc0-binfmt",
@ -6853,7 +6864,7 @@ dependencies = [
"ndarray",
"parking_lot",
"paste",
"rand 0.9.2",
"rand 0.9.3",
"rand_core 0.9.5",
"rayon",
"risc0-core",
@ -6891,7 +6902,7 @@ dependencies = [
"num-traits",
"object",
"prost 0.13.5",
"rand 0.9.2",
"rand 0.9.3",
"rayon",
"risc0-binfmt",
"risc0-build",
@ -6979,7 +6990,7 @@ dependencies = [
"futures",
"light-poseidon",
"quote",
"rand 0.9.2",
"rand 0.9.3",
"syn 1.0.109",
"thiserror 2.0.18",
"tiny-keccak",
@ -7030,7 +7041,7 @@ dependencies = [
"borsh",
"proptest",
"rand 0.8.5",
"rand 0.9.2",
"rand 0.9.3",
"ruint-macro",
"serde_core",
"valuable",
@ -8722,7 +8733,7 @@ dependencies = [
"http",
"httparse",
"log",
"rand 0.9.2",
"rand 0.9.3",
"sha1",
"thiserror 2.0.18",
"utf-8",

View File

@ -38,6 +38,7 @@ members = [
"examples/program_deployment/methods/guest",
"bedrock_client",
"testnet_initial_state",
"indexer_ffi",
]
[workspace.dependencies]
@ -57,6 +58,7 @@ indexer_service_protocol = { path = "indexer/service/protocol" }
indexer_service_rpc = { path = "indexer/service/rpc" }
wallet = { path = "wallet" }
wallet-ffi = { path = "wallet-ffi", default-features = false }
indexer_ffi = { path = "indexer_ffi" }
clock_core = { path = "programs/clock/core" }
token_core = { path = "programs/token/core" }
token_program = { path = "programs/token" }

View File

@ -3,7 +3,7 @@ use nssa::AccountId;
use crate::{
HashType,
block::{Block, HashableBlockData},
transaction::NSSATransaction,
transaction::{NSSATransaction, clock_invocation},
};
// Helpers
@ -15,7 +15,7 @@ pub fn sequencer_sign_key_for_testing() -> nssa::PrivateKey {
// Dummy producers
/// Produce dummy block with.
/// Produce dummy block with provided transactions + clock transaction an the end.
///
/// `id` - block id, provide zero for genesis.
///
@ -26,8 +26,12 @@ pub fn sequencer_sign_key_for_testing() -> nssa::PrivateKey {
pub fn produce_dummy_block(
id: u64,
prev_hash: Option<HashType>,
transactions: Vec<NSSATransaction>,
mut transactions: Vec<NSSATransaction>,
) -> Block {
transactions.push(NSSATransaction::Public(clock_invocation(
id.saturating_mul(100),
)));
let block_data = HashableBlockData {
block_id: id,
prev_block_hash: prev_hash.unwrap_or_default(),

24
flake.lock generated
View File

@ -2,11 +2,11 @@
"nodes": {
"crane": {
"locked": {
"lastModified": 1769737823,
"narHash": "sha256-DrBaNpZ+sJ4stXm+0nBX7zqZT9t9P22zbk6m5YhQxS4=",
"lastModified": 1776396856,
"narHash": "sha256-aRJpIJUlZLaf06ekPvqjuU46zvO9K90IxJGpbqodkPs=",
"owner": "ipetkov",
"repo": "crane",
"rev": "b2f45c3830aa96b7456a4c4bc327d04d7a43e1ba",
"rev": "28462d6d55c33206ffa5a56c7907ca3125ed788f",
"type": "github"
},
"original": {
@ -20,11 +20,11 @@
"nixpkgs": "nixpkgs"
},
"locked": {
"lastModified": 1770979891,
"narHash": "sha256-cvkVnE7btuFLzv70ORAZve9K1Huiplq0iECgXSXb0ZY=",
"lastModified": 1775835011,
"narHash": "sha256-SQDLyyRUa5J9QHjNiHbeZw4rQOZnTEo61TcaUpjtLBs=",
"owner": "logos-blockchain",
"repo": "logos-blockchain-circuits",
"rev": "ec7d298e5a3a0507bb8570df86cdf78dc452d024",
"rev": "d6cf41f66500d4afc157b4f43de0f0d5bfa01443",
"type": "github"
},
"original": {
@ -51,11 +51,11 @@
},
"nixpkgs_2": {
"locked": {
"lastModified": 1770019141,
"narHash": "sha256-VKS4ZLNx4PNrABoB0L8KUpc1fE7CLpQXQs985tGfaCU=",
"lastModified": 1776169885,
"narHash": "sha256-l/iNYDZ4bGOAFQY2q8y5OAfBBtrDAaPuRQqWaFHVRXM=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "cb369ef2efd432b3cdf8622b0ffc0a97a02f3137",
"rev": "4bd9165a9165d7b5e33ae57f3eecbcb28fb231c9",
"type": "github"
},
"original": {
@ -80,11 +80,11 @@
]
},
"locked": {
"lastModified": 1770088046,
"narHash": "sha256-4hfYDnUTvL1qSSZEA4CEThxfz+KlwSFQ30Z9jgDguO0=",
"lastModified": 1776395632,
"narHash": "sha256-Mi1uF5f2FsdBIvy+v7MtsqxD3Xjhd0ARJdwoqqqPtJo=",
"owner": "oxalica",
"repo": "rust-overlay",
"rev": "71f9daa4e05e49c434d08627e755495ae222bc34",
"rev": "8087ff1f47fff983a1fba70fa88b759f2fd8ae97",
"type": "github"
},
"original": {

View File

@ -130,9 +130,26 @@
'';
}
);
indexerFfiPackage = craneLib.buildPackage (
commonArgs
// {
pname = "logos-execution-zone-indexer-ffi";
version = "0.1.0";
cargoExtraArgs = "-p indexer_ffi";
postInstall = ''
mkdir -p $out/include
cp indexer_ffi/indexer_ffi.h $out/include/
''
+ pkgs.lib.optionalString pkgs.stdenv.isDarwin ''
install_name_tool -id @rpath/libindexer_ffi.dylib $out/lib/libindexer_ffi.dylib
'';
}
);
in
{
wallet = walletFfiPackage;
indexer = indexerFfiPackage;
default = walletFfiPackage;
}
);
@ -144,9 +161,14 @@
walletFfiShell = pkgs.mkShell {
inputsFrom = [ walletFfiPackage ];
};
indexerFfiPackage = self.packages.${system}.indexer;
indexerFfiShell = pkgs.mkShell {
inputsFrom = [ indexerFfiPackage ];
};
in
{
wallet = walletFfiShell;
indexer = indexerFfiShell;
default = walletFfiShell;
}
);

View File

@ -243,14 +243,9 @@ mod tests {
&sign_key,
);
let block_id = u64::try_from(i).unwrap();
let block_timestamp = block_id.saturating_mul(100);
let clock_tx = NSSATransaction::Public(clock_invocation(block_timestamp));
let next_block = common::test_utils::produce_dummy_block(
block_id,
Some(prev_hash),
vec![tx, clock_tx],
);
let next_block =
common::test_utils::produce_dummy_block(block_id, Some(prev_hash), vec![tx]);
prev_hash = next_block.header.hash;
storage

View File

@ -144,23 +144,27 @@ impl IndexerCore {
l2_blocks_parsed_ids.sort_unstable();
info!("Parsed {} L2 blocks with ids {:?}", l2_block_vec.len(), l2_blocks_parsed_ids);
for l2_block in l2_block_vec {
// TODO: proper fix is to make the sequencer's genesis include a
// trailing `clock_invocation(0)` (and have the indexer's
// `open_db_with_genesis` not pre-apply state transitions) so the
// inscribed genesis can flow through `put_block` like any other
// block. For now we skip re-applying it.
//
// The channel-start (block_id == 1) is the sequencer's genesis
// inscription that we re-discover during initial search. The
// indexer already has its own locally-constructed genesis in
// the store from `open_db_with_genesis`, so re-applying the
// inscribed copy is both redundant and would fail the strict
// block validation in `put_block` (the inscribed genesis lacks
// the trailing clock invocation).
if l2_block.header.block_id != 1 {
self.store.put_block(l2_block.clone(), l1_header).await?;
}
for l2_block in l2_block_vec {
// TODO: proper fix is to make the sequencer's genesis include a
// trailing `clock_invocation(0)` (and have the indexer's
// `open_db_with_genesis` not pre-apply state transitions) so the
// inscribed genesis can flow through `put_block` like any other
// block. For now we skip re-applying it.
//
// The channel-start (block_id == 1) is the sequencer's genesis
// inscription that we re-discover during initial search. The
// indexer already has its own locally-constructed genesis in
// the store from `open_db_with_genesis`, so re-applying the
// inscribed copy is both redundant and would fail the strict
// block validation in `put_block` (the inscribed genesis lacks
// the trailing clock invocation).
if l2_block.header.block_id != 1 {
self
.store
.put_block(l2_block.clone(), l1_header)
.await
.inspect_err(|err| error!("Failed to put block with err {err:?}"))?;
}
yield Ok(l2_block);
}

25
indexer_ffi/Cargo.toml Normal file
View File

@ -0,0 +1,25 @@
[package]
edition = "2024"
license = { workspace = true }
name = "indexer_ffi"
version = "0.1.0"
[dependencies]
indexer_service.workspace = true
log = { workspace = true }
tokio = { features = ["rt-multi-thread"], workspace = true }
[build-dependencies]
cbindgen = "0.29"
[lib]
crate-type = ["rlib", "cdylib", "staticlib"]
name = "indexer_ffi"
[lints]
workspace = true
[package.metadata.cargo-machete]
ignored = [
"cbindgen",
] # machete does not recognize this for build dep and complains.

12
indexer_ffi/build.rs Normal file
View File

@ -0,0 +1,12 @@
use std::env;
fn main() {
let crate_dir = env::var("CARGO_MANIFEST_DIR").unwrap();
println!("cargo:rerun-if-changed=src/");
cbindgen::Builder::new()
.with_crate(crate_dir)
.with_language(cbindgen::Language::C)
.generate()
.expect("Unable to generate bindings")
.write_to_file("indexer_ffi.h");
}

View File

@ -0,0 +1,2 @@
language = "C" # For increased compatibility
no_includes = true

76
indexer_ffi/indexer_ffi.h Normal file
View File

@ -0,0 +1,76 @@
#include <stdarg.h>
#include <stdbool.h>
#include <stdint.h>
#include <stdlib.h>
typedef enum OperationStatus {
Ok = 0,
NullPointer = 1,
InitializationError = 2,
} OperationStatus;
typedef struct IndexerServiceFFI {
void *indexer_handle;
void *runtime;
} IndexerServiceFFI;
/**
* Simple wrapper around a pointer to a value or an error.
*
* Pointer is not guaranteed. You should check the error field before
* dereferencing the pointer.
*/
typedef struct PointerResult_IndexerServiceFFI__OperationStatus {
struct IndexerServiceFFI *value;
enum OperationStatus error;
} PointerResult_IndexerServiceFFI__OperationStatus;
typedef struct PointerResult_IndexerServiceFFI__OperationStatus InitializedIndexerServiceFFIResult;
/**
* Creates and starts an indexer based on the provided
* configuration file path.
*
* # Arguments
*
* - `config_path`: A pointer to a string representing the path to the configuration file.
* - `port`: Number representing a port, on which indexers RPC will start.
*
* # Returns
*
* An `InitializedIndexerServiceFFIResult` containing either a pointer to the
* initialized `IndexerServiceFFI` or an error code.
*/
InitializedIndexerServiceFFIResult start_indexer(const char *config_path, uint16_t port);
/**
* Stops and frees the resources associated with the given indexer service.
*
* # Arguments
*
* - `indexer`: A pointer to the `IndexerServiceFFI` instance to be stopped.
*
* # Returns
*
* An `OperationStatus` indicating success or failure.
*
* # Safety
*
* The caller must ensure that:
* - `indexer` is a valid pointer to a `IndexerServiceFFI` instance
* - The `IndexerServiceFFI` instance was created by this library
* - The pointer will not be used after this function returns
*/
enum OperationStatus stop_indexer(struct IndexerServiceFFI *indexer);
/**
* # Safety
* It's up to the caller to pass a proper pointer, if somehow from c/c++ side
* this is called with a type which doesn't come from a returned `CString` it
* will cause a segfault.
*/
void free_cstring(char *block);
bool is_ok(const enum OperationStatus *self);
bool is_error(const enum OperationStatus *self);

View File

@ -0,0 +1,100 @@
use std::{ffi::c_char, path::PathBuf};
use tokio::runtime::Runtime;
use crate::{IndexerServiceFFI, api::PointerResult, errors::OperationStatus};
pub type InitializedIndexerServiceFFIResult = PointerResult<IndexerServiceFFI, OperationStatus>;
/// Creates and starts an indexer based on the provided
/// configuration file path.
///
/// # Arguments
///
/// - `config_path`: A pointer to a string representing the path to the configuration file.
/// - `port`: Number representing a port, on which indexers RPC will start.
///
/// # Returns
///
/// An `InitializedIndexerServiceFFIResult` containing either a pointer to the
/// initialized `IndexerServiceFFI` or an error code.
#[unsafe(no_mangle)]
pub extern "C" fn start_indexer(
config_path: *const c_char,
port: u16,
) -> InitializedIndexerServiceFFIResult {
setup_indexer(config_path, port).map_or_else(
InitializedIndexerServiceFFIResult::from_error,
InitializedIndexerServiceFFIResult::from_value,
)
}
/// Initializes and starts an indexer based on the provided
/// configuration file path.
///
/// # Arguments
///
/// - `config_path`: A pointer to a string representing the path to the configuration file.
/// - `port`: Number representing a port, on which indexers RPC will start.
///
/// # Returns
///
/// A `Result` containing either the initialized `IndexerServiceFFI` or an
/// error code.
fn setup_indexer(
config_path: *const c_char,
port: u16,
) -> Result<IndexerServiceFFI, OperationStatus> {
let user_config_path = PathBuf::from(
unsafe { std::ffi::CStr::from_ptr(config_path) }
.to_str()
.map_err(|e| {
log::error!("Could not convert the config path to string: {e}");
OperationStatus::InitializationError
})?,
);
let config = indexer_service::IndexerConfig::from_path(&user_config_path).map_err(|e| {
log::error!("Failed to read config: {e}");
OperationStatus::InitializationError
})?;
let rt = Runtime::new().unwrap();
let indexer_handle = rt
.block_on(indexer_service::run_server(config, port))
.map_err(|e| {
log::error!("Could not start indexer service: {e}");
OperationStatus::InitializationError
})?;
Ok(IndexerServiceFFI::new(indexer_handle, rt))
}
/// Stops and frees the resources associated with the given indexer service.
///
/// # Arguments
///
/// - `indexer`: A pointer to the `IndexerServiceFFI` instance to be stopped.
///
/// # Returns
///
/// An `OperationStatus` indicating success or failure.
///
/// # Safety
///
/// The caller must ensure that:
/// - `indexer` is a valid pointer to a `IndexerServiceFFI` instance
/// - The `IndexerServiceFFI` instance was created by this library
/// - The pointer will not be used after this function returns
#[unsafe(no_mangle)]
pub unsafe extern "C" fn stop_indexer(indexer: *mut IndexerServiceFFI) -> OperationStatus {
if indexer.is_null() {
log::error!("Attempted to stop a null indexer pointer. This is a bug. Aborting.");
return OperationStatus::NullPointer;
}
let indexer = unsafe { Box::from_raw(indexer) };
drop(indexer);
OperationStatus::Ok
}

View File

@ -0,0 +1,14 @@
use std::ffi::{CString, c_char};
/// # Safety
/// It's up to the caller to pass a proper pointer, if somehow from c/c++ side
/// this is called with a type which doesn't come from a returned `CString` it
/// will cause a segfault.
#[unsafe(no_mangle)]
pub unsafe extern "C" fn free_cstring(block: *mut c_char) {
if block.is_null() {
log::error!("Trying to free a null pointer. Exiting");
return;
}
drop(unsafe { CString::from_raw(block) });
}

View File

@ -0,0 +1,5 @@
pub use result::PointerResult;
pub mod lifecycle;
pub mod memory;
pub mod result;

View File

@ -0,0 +1,29 @@
/// Simple wrapper around a pointer to a value or an error.
///
/// Pointer is not guaranteed. You should check the error field before
/// dereferencing the pointer.
#[repr(C)]
pub struct PointerResult<Type, Error> {
pub value: *mut Type,
pub error: Error,
}
impl<Type, Error: Default> PointerResult<Type, Error> {
pub fn from_pointer(pointer: *mut Type) -> Self {
Self {
value: pointer,
error: Error::default(),
}
}
pub fn from_value(value: Type) -> Self {
Self::from_pointer(Box::into_raw(Box::new(value)))
}
pub const fn from_error(error: Error) -> Self {
Self {
value: std::ptr::null_mut(),
error,
}
}
}

22
indexer_ffi/src/errors.rs Normal file
View File

@ -0,0 +1,22 @@
#[derive(Debug, Default, PartialEq, Eq)]
#[repr(C)]
pub enum OperationStatus {
#[default]
Ok = 0x0,
NullPointer = 0x1,
InitializationError = 0x2,
}
impl OperationStatus {
#[must_use]
#[unsafe(no_mangle)]
pub extern "C" fn is_ok(&self) -> bool {
*self == Self::Ok
}
#[must_use]
#[unsafe(no_mangle)]
pub extern "C" fn is_error(&self) -> bool {
!self.is_ok()
}
}

View File

@ -0,0 +1,86 @@
use std::{ffi::c_void, net::SocketAddr};
use indexer_service::IndexerHandle;
use tokio::runtime::Runtime;
#[repr(C)]
pub struct IndexerServiceFFI {
indexer_handle: *mut c_void,
runtime: *mut c_void,
}
impl IndexerServiceFFI {
pub fn new(indexer_handle: indexer_service::IndexerHandle, runtime: Runtime) -> Self {
Self {
// Box the complex types and convert to opaque pointers
indexer_handle: Box::into_raw(Box::new(indexer_handle)).cast::<c_void>(),
runtime: Box::into_raw(Box::new(runtime)).cast::<c_void>(),
}
}
/// Helper to take ownership back.
///
/// # Safety
///
/// The caller must ensure that:
/// - `self` is a valid object(contains valid pointers in all fields)
#[must_use]
pub unsafe fn into_parts(self) -> (Box<IndexerHandle>, Box<Runtime>) {
let indexer_handle = unsafe { Box::from_raw(self.indexer_handle.cast::<IndexerHandle>()) };
let runtime = unsafe { Box::from_raw(self.runtime.cast::<Runtime>()) };
(indexer_handle, runtime)
}
/// Helper to get indexer handle addr.
///
/// # Safety
///
/// The caller must ensure that:
/// - `self` is a valid object(contains valid pointers in all fields)
#[must_use]
pub const unsafe fn addr(&self) -> SocketAddr {
let indexer_handle = unsafe {
self.indexer_handle
.cast::<IndexerHandle>()
.as_ref()
.expect("Indexer Handle must be non-null pointer")
};
indexer_handle.addr()
}
/// Helper to get indexer handle addr.
///
/// # Safety
///
/// The caller must ensure that:
/// - `self` is a valid object(contains valid pointers in all fields)
#[must_use]
pub const unsafe fn handle(&self) -> &IndexerHandle {
unsafe {
self.indexer_handle
.cast::<IndexerHandle>()
.as_ref()
.expect("Indexer Handle must be non-null pointer")
}
}
}
// Implement Drop to prevent memory leaks
impl Drop for IndexerServiceFFI {
fn drop(&mut self) {
let Self {
indexer_handle,
runtime,
} = self;
if indexer_handle.is_null() {
log::error!("Attempted to drop a null indexer pointer. This is a bug");
}
if runtime.is_null() {
log::error!("Attempted to drop a null tokio runtime pointer. This is a bug");
}
drop(unsafe { Box::from_raw(indexer_handle.cast::<IndexerHandle>()) });
drop(unsafe { Box::from_raw(runtime.cast::<Runtime>()) });
}
}

8
indexer_ffi/src/lib.rs Normal file
View File

@ -0,0 +1,8 @@
#![allow(clippy::undocumented_unsafe_blocks, reason = "It is an FFI")]
pub use errors::OperationStatus;
pub use indexer::IndexerServiceFFI;
pub mod api;
mod errors;
mod indexer;

View File

@ -22,6 +22,7 @@ ata_core.workspace = true
indexer_service_rpc.workspace = true
sequencer_service_rpc = { workspace = true, features = ["client"] }
wallet-ffi.workspace = true
indexer_ffi.workspace = true
testnet_initial_state.workspace = true
url.workspace = true

View File

@ -1,12 +1,12 @@
//! This library contains common code for integration tests.
use std::{net::SocketAddr, path::PathBuf, sync::LazyLock};
use std::sync::LazyLock;
use anyhow::{Context as _, Result, bail};
use anyhow::{Context as _, Result};
use common::{HashType, transaction::NSSATransaction};
use futures::FutureExt as _;
use indexer_service::IndexerHandle;
use log::{debug, error, warn};
use log::{debug, error};
use nssa::{AccountId, PrivacyPreservingTransaction};
use nssa_core::Commitment;
use sequencer_core::indexer_client::{IndexerClient, IndexerClientTrait as _};
@ -14,9 +14,13 @@ use sequencer_service::SequencerHandle;
use sequencer_service_rpc::{RpcClient as _, SequencerClient, SequencerClientBuilder};
use tempfile::TempDir;
use testcontainers::compose::DockerCompose;
use wallet::{WalletCore, config::WalletConfigOverrides};
use wallet::WalletCore;
use crate::setup::{setup_bedrock_node, setup_indexer, setup_sequencer, setup_wallet};
pub mod config;
pub mod setup;
pub mod test_context_ffi;
// TODO: Remove this and control time from tests
pub const TIME_TO_WAIT_FOR_BLOCK_SECONDS: u64 = 12;
@ -67,13 +71,13 @@ impl TestContext {
debug!("Test context setup");
let (bedrock_compose, bedrock_addr) = Self::setup_bedrock_node().await?;
let (bedrock_compose, bedrock_addr) = setup_bedrock_node().await?;
let (indexer_handle, temp_indexer_dir) = Self::setup_indexer(bedrock_addr, &initial_data)
let (indexer_handle, temp_indexer_dir) = setup_indexer(bedrock_addr, &initial_data)
.await
.context("Failed to setup Indexer")?;
let (sequencer_handle, temp_sequencer_dir) = Self::setup_sequencer(
let (sequencer_handle, temp_sequencer_dir) = setup_sequencer(
sequencer_partial_config,
bedrock_addr,
indexer_handle.addr(),
@ -83,7 +87,7 @@ impl TestContext {
.context("Failed to setup Sequencer")?;
let (wallet, temp_wallet_dir, wallet_password) =
Self::setup_wallet(sequencer_handle.addr(), &initial_data)
setup_wallet(sequencer_handle.addr(), &initial_data)
.await
.context("Failed to setup wallet")?;
@ -112,165 +116,6 @@ impl TestContext {
})
}
async fn setup_bedrock_node() -> Result<(DockerCompose, SocketAddr)> {
let manifest_dir = env!("CARGO_MANIFEST_DIR");
let bedrock_compose_path =
PathBuf::from(manifest_dir).join("../bedrock/docker-compose.yml");
let mut compose = DockerCompose::with_auto_client(&[bedrock_compose_path])
.await
.context("Failed to setup docker compose for Bedrock")?
// Setting port to 0 to avoid conflicts between parallel tests, actual port will be retrieved after container is up
.with_env("PORT", "0");
#[expect(
clippy::items_after_statements,
reason = "This is more readable is this function used just after its definition"
)]
async fn up_and_retrieve_port(compose: &mut DockerCompose) -> Result<u16> {
compose
.up()
.await
.context("Failed to bring up Bedrock services")?;
let container = compose
.service(BEDROCK_SERVICE_WITH_OPEN_PORT)
.with_context(|| {
format!(
"Failed to get Bedrock service container `{BEDROCK_SERVICE_WITH_OPEN_PORT}`"
)
})?;
let ports = container.ports().await.with_context(|| {
format!(
"Failed to get ports for Bedrock service container `{}`",
container.id()
)
})?;
ports
.map_to_host_port_ipv4(BEDROCK_SERVICE_PORT)
.with_context(|| {
format!(
"Failed to retrieve host port of {BEDROCK_SERVICE_PORT} container \
port for container `{}`, existing ports: {ports:?}",
container.id()
)
})
}
let mut port = None;
let mut attempt = 0_u32;
let max_attempts = 5_u32;
while port.is_none() && attempt < max_attempts {
attempt = attempt
.checked_add(1)
.expect("We check that attempt < max_attempts, so this won't overflow");
match up_and_retrieve_port(&mut compose).await {
Ok(p) => {
port = Some(p);
}
Err(err) => {
warn!(
"Failed to bring up Bedrock services: {err:?}, attempt {attempt}/{max_attempts}"
);
}
}
}
let Some(port) = port else {
bail!("Failed to bring up Bedrock services after {max_attempts} attempts");
};
let addr = SocketAddr::from(([127, 0, 0, 1], port));
Ok((compose, addr))
}
async fn setup_indexer(
bedrock_addr: SocketAddr,
initial_data: &config::InitialData,
) -> Result<(IndexerHandle, TempDir)> {
let temp_indexer_dir =
tempfile::tempdir().context("Failed to create temp dir for indexer home")?;
debug!(
"Using temp indexer home at {}",
temp_indexer_dir.path().display()
);
let indexer_config = config::indexer_config(
bedrock_addr,
temp_indexer_dir.path().to_owned(),
initial_data,
)
.context("Failed to create Indexer config")?;
indexer_service::run_server(indexer_config, 0)
.await
.context("Failed to run Indexer Service")
.map(|handle| (handle, temp_indexer_dir))
}
async fn setup_sequencer(
partial: config::SequencerPartialConfig,
bedrock_addr: SocketAddr,
indexer_addr: SocketAddr,
initial_data: &config::InitialData,
) -> Result<(SequencerHandle, TempDir)> {
let temp_sequencer_dir =
tempfile::tempdir().context("Failed to create temp dir for sequencer home")?;
debug!(
"Using temp sequencer home at {}",
temp_sequencer_dir.path().display()
);
let config = config::sequencer_config(
partial,
temp_sequencer_dir.path().to_owned(),
bedrock_addr,
indexer_addr,
initial_data,
)
.context("Failed to create Sequencer config")?;
let sequencer_handle = sequencer_service::run(config, 0).await?;
Ok((sequencer_handle, temp_sequencer_dir))
}
async fn setup_wallet(
sequencer_addr: SocketAddr,
initial_data: &config::InitialData,
) -> Result<(WalletCore, TempDir, String)> {
let config = config::wallet_config(sequencer_addr, initial_data)
.context("Failed to create Wallet config")?;
let config_serialized =
serde_json::to_string_pretty(&config).context("Failed to serialize Wallet config")?;
let temp_wallet_dir =
tempfile::tempdir().context("Failed to create temp dir for wallet home")?;
let config_path = temp_wallet_dir.path().join("wallet_config.json");
std::fs::write(&config_path, config_serialized)
.context("Failed to write wallet config in temp dir")?;
let storage_path = temp_wallet_dir.path().join("storage.json");
let config_overrides = WalletConfigOverrides::default();
let wallet_password = "test_pass".to_owned();
let (wallet, _mnemonic) = WalletCore::new_init_storage(
config_path,
storage_path,
Some(config_overrides),
&wallet_password,
)
.context("Failed to init wallet")?;
wallet
.store_persistent_data()
.await
.context("Failed to store wallet persistent data")?;
Ok((wallet, temp_wallet_dir, wallet_password))
}
/// Get reference to the wallet.
#[must_use]
pub const fn wallet(&self) -> &WalletCore {

View File

@ -0,0 +1,220 @@
use std::{
ffi::{CString, c_char},
fs::File,
io::Write as _,
net::SocketAddr,
path::PathBuf,
};
use anyhow::{Context as _, Result, bail};
use indexer_ffi::{IndexerServiceFFI, api::lifecycle::InitializedIndexerServiceFFIResult};
use indexer_service::IndexerHandle;
use log::{debug, warn};
use sequencer_service::SequencerHandle;
use tempfile::TempDir;
use testcontainers::compose::DockerCompose;
use wallet::{WalletCore, config::WalletConfigOverrides};
use crate::{BEDROCK_SERVICE_PORT, BEDROCK_SERVICE_WITH_OPEN_PORT, config};
unsafe extern "C" {
fn start_indexer(config_path: *const c_char, port: u16) -> InitializedIndexerServiceFFIResult;
}
pub(crate) async fn setup_bedrock_node() -> Result<(DockerCompose, SocketAddr)> {
let manifest_dir = env!("CARGO_MANIFEST_DIR");
let bedrock_compose_path = PathBuf::from(manifest_dir).join("../bedrock/docker-compose.yml");
let mut compose = DockerCompose::with_auto_client(&[bedrock_compose_path])
.await
.context("Failed to setup docker compose for Bedrock")?
// Setting port to 0 to avoid conflicts between parallel tests, actual port will be retrieved after container is up
.with_env("PORT", "0");
#[expect(
clippy::items_after_statements,
reason = "This is more readable is this function used just after its definition"
)]
async fn up_and_retrieve_port(compose: &mut DockerCompose) -> Result<u16> {
compose
.up()
.await
.context("Failed to bring up Bedrock services")?;
let container = compose
.service(BEDROCK_SERVICE_WITH_OPEN_PORT)
.with_context(|| {
format!(
"Failed to get Bedrock service container `{BEDROCK_SERVICE_WITH_OPEN_PORT}`"
)
})?;
let ports = container.ports().await.with_context(|| {
format!(
"Failed to get ports for Bedrock service container `{}`",
container.id()
)
})?;
ports
.map_to_host_port_ipv4(BEDROCK_SERVICE_PORT)
.with_context(|| {
format!(
"Failed to retrieve host port of {BEDROCK_SERVICE_PORT} container \
port for container `{}`, existing ports: {ports:?}",
container.id()
)
})
}
let mut port = None;
let mut attempt = 0_u32;
let max_attempts = 5_u32;
while port.is_none() && attempt < max_attempts {
attempt = attempt
.checked_add(1)
.expect("We check that attempt < max_attempts, so this won't overflow");
match up_and_retrieve_port(&mut compose).await {
Ok(p) => {
port = Some(p);
}
Err(err) => {
warn!(
"Failed to bring up Bedrock services: {err:?}, attempt {attempt}/{max_attempts}"
);
}
}
}
let Some(port) = port else {
bail!("Failed to bring up Bedrock services after {max_attempts} attempts");
};
let addr = SocketAddr::from(([127, 0, 0, 1], port));
Ok((compose, addr))
}
pub(crate) async fn setup_indexer(
bedrock_addr: SocketAddr,
initial_data: &config::InitialData,
) -> Result<(IndexerHandle, TempDir)> {
let temp_indexer_dir =
tempfile::tempdir().context("Failed to create temp dir for indexer home")?;
debug!(
"Using temp indexer home at {}",
temp_indexer_dir.path().display()
);
let indexer_config = config::indexer_config(
bedrock_addr,
temp_indexer_dir.path().to_owned(),
initial_data,
)
.context("Failed to create Indexer config")?;
indexer_service::run_server(indexer_config, 0)
.await
.context("Failed to run Indexer Service")
.map(|handle| (handle, temp_indexer_dir))
}
pub(crate) async fn setup_sequencer(
partial: config::SequencerPartialConfig,
bedrock_addr: SocketAddr,
indexer_addr: SocketAddr,
initial_data: &config::InitialData,
) -> Result<(SequencerHandle, TempDir)> {
let temp_sequencer_dir =
tempfile::tempdir().context("Failed to create temp dir for sequencer home")?;
debug!(
"Using temp sequencer home at {}",
temp_sequencer_dir.path().display()
);
let config = config::sequencer_config(
partial,
temp_sequencer_dir.path().to_owned(),
bedrock_addr,
indexer_addr,
initial_data,
)
.context("Failed to create Sequencer config")?;
let sequencer_handle = sequencer_service::run(config, 0).await?;
Ok((sequencer_handle, temp_sequencer_dir))
}
pub(crate) async fn setup_wallet(
sequencer_addr: SocketAddr,
initial_data: &config::InitialData,
) -> Result<(WalletCore, TempDir, String)> {
let config = config::wallet_config(sequencer_addr, initial_data)
.context("Failed to create Wallet config")?;
let config_serialized =
serde_json::to_string_pretty(&config).context("Failed to serialize Wallet config")?;
let temp_wallet_dir =
tempfile::tempdir().context("Failed to create temp dir for wallet home")?;
let config_path = temp_wallet_dir.path().join("wallet_config.json");
std::fs::write(&config_path, config_serialized)
.context("Failed to write wallet config in temp dir")?;
let storage_path = temp_wallet_dir.path().join("storage.json");
let config_overrides = WalletConfigOverrides::default();
let wallet_password = "test_pass".to_owned();
let (wallet, _mnemonic) = WalletCore::new_init_storage(
config_path,
storage_path,
Some(config_overrides),
&wallet_password,
)
.context("Failed to init wallet")?;
wallet
.store_persistent_data()
.await
.context("Failed to store wallet persistent data")?;
Ok((wallet, temp_wallet_dir, wallet_password))
}
pub(crate) fn setup_indexer_ffi(
bedrock_addr: SocketAddr,
initial_data: &config::InitialData,
) -> Result<(IndexerServiceFFI, TempDir)> {
let temp_indexer_dir =
tempfile::tempdir().context("Failed to create temp dir for indexer home")?;
debug!(
"Using temp indexer home at {}",
temp_indexer_dir.path().display()
);
let indexer_config = config::indexer_config(
bedrock_addr,
temp_indexer_dir.path().to_owned(),
initial_data,
)
.context("Failed to create Indexer config")?;
let config_json = serde_json::to_vec(&indexer_config)?;
let config_path = temp_indexer_dir.path().join("indexer_config.json");
let mut file = File::create(config_path.as_path())?;
file.write_all(&config_json)?;
file.flush()?;
let res =
// SAFETY: lib function ensures validity of value.
unsafe { start_indexer(CString::new(config_path.to_str().unwrap())?.as_ptr(), 0) };
if res.error.is_error() {
anyhow::bail!("Indexer FFI error {:?}", res.error);
}
Ok((
// SAFETY: lib function ensures validity of value.
unsafe { std::ptr::read(res.value) },
temp_indexer_dir,
))
}

View File

@ -0,0 +1,296 @@
use std::sync::Arc;
use anyhow::{Context as _, Result};
use futures::FutureExt as _;
use indexer_ffi::IndexerServiceFFI;
use indexer_service_rpc::RpcClient as _;
use log::{debug, error};
use nssa::AccountId;
use sequencer_core::indexer_client::{IndexerClient, IndexerClientTrait as _};
use sequencer_service::SequencerHandle;
use sequencer_service_rpc::{RpcClient as _, SequencerClient, SequencerClientBuilder};
use tempfile::TempDir;
use testcontainers::compose::DockerCompose;
use wallet::WalletCore;
use crate::{
BEDROCK_SERVICE_WITH_OPEN_PORT, LOGGER, TestContextBuilder, config,
setup::{setup_bedrock_node, setup_indexer_ffi, setup_sequencer, setup_wallet},
};
/// Test context which sets up a sequencer, indexer through ffi and a wallet for integration tests.
///
/// It's memory and logically safe to create multiple instances of this struct in parallel tests,
/// as each instance uses its own temporary directories for sequencer and wallet data.
// NOTE: Order of fields is important for proper drop order.
pub struct TestContextFFI {
sequencer_client: SequencerClient,
indexer_client: IndexerClient,
wallet: WalletCore,
wallet_password: String,
/// Optional to move out value in Drop.
sequencer_handle: Option<SequencerHandle>,
bedrock_compose: DockerCompose,
_temp_indexer_dir: TempDir,
_temp_sequencer_dir: TempDir,
_temp_wallet_dir: TempDir,
}
#[expect(
clippy::multiple_inherent_impl,
reason = "It is more natural to have this implementation here"
)]
impl TestContextBuilder {
pub fn build_ffi(
self,
runtime: &Arc<tokio::runtime::Runtime>,
) -> Result<(TestContextFFI, IndexerServiceFFI)> {
TestContextFFI::new_configured(
self.sequencer_partial_config.unwrap_or_default(),
&self.initial_data.unwrap_or_else(|| {
config::InitialData::with_two_public_and_two_private_initialized_accounts()
}),
runtime,
)
}
}
impl TestContextFFI {
/// Create new test context.
pub fn new(runtime: &Arc<tokio::runtime::Runtime>) -> Result<(Self, IndexerServiceFFI)> {
Self::builder().build_ffi(runtime)
}
#[must_use]
pub const fn builder() -> TestContextBuilder {
TestContextBuilder::new()
}
fn new_configured(
sequencer_partial_config: config::SequencerPartialConfig,
initial_data: &config::InitialData,
runtime: &Arc<tokio::runtime::Runtime>,
) -> Result<(Self, IndexerServiceFFI)> {
// Ensure logger is initialized only once
*LOGGER;
debug!("Test context setup");
let (bedrock_compose, bedrock_addr) = runtime.block_on(setup_bedrock_node())?;
let (indexer_ffi, temp_indexer_dir) =
setup_indexer_ffi(bedrock_addr, initial_data).context("Failed to setup Indexer")?;
let (sequencer_handle, temp_sequencer_dir) = runtime
.block_on(setup_sequencer(
sequencer_partial_config,
bedrock_addr,
// SAFETY: addr is valid if indexer_ffi is valid.
unsafe { indexer_ffi.addr() },
initial_data,
))
.context("Failed to setup Sequencer")?;
let (wallet, temp_wallet_dir, wallet_password) = runtime
.block_on(setup_wallet(sequencer_handle.addr(), initial_data))
.context("Failed to setup wallet")?;
let sequencer_url = config::addr_to_url(config::UrlProtocol::Http, sequencer_handle.addr())
.context("Failed to convert sequencer addr to URL")?;
let indexer_url = config::addr_to_url(
config::UrlProtocol::Ws,
// SAFETY: addr is valid if indexer_ffi is valid.
unsafe { indexer_ffi.addr() },
)
.context("Failed to convert indexer addr to URL")?;
let sequencer_client = SequencerClientBuilder::default()
.build(sequencer_url)
.context("Failed to create sequencer client")?;
let indexer_client = runtime
.block_on(IndexerClient::new(&indexer_url))
.context("Failed to create indexer client")?;
Ok((
Self {
sequencer_client,
indexer_client,
wallet,
wallet_password,
bedrock_compose,
sequencer_handle: Some(sequencer_handle),
_temp_indexer_dir: temp_indexer_dir,
_temp_sequencer_dir: temp_sequencer_dir,
_temp_wallet_dir: temp_wallet_dir,
},
indexer_ffi,
))
}
/// Get reference to the wallet.
#[must_use]
pub const fn wallet(&self) -> &WalletCore {
&self.wallet
}
#[must_use]
pub fn wallet_password(&self) -> &str {
&self.wallet_password
}
/// Get mutable reference to the wallet.
pub const fn wallet_mut(&mut self) -> &mut WalletCore {
&mut self.wallet
}
/// Get reference to the sequencer client.
#[must_use]
pub const fn sequencer_client(&self) -> &SequencerClient {
&self.sequencer_client
}
/// Get reference to the indexer client.
#[must_use]
pub const fn indexer_client(&self) -> &IndexerClient {
&self.indexer_client
}
/// Get existing public account IDs in the wallet.
#[must_use]
pub fn existing_public_accounts(&self) -> Vec<AccountId> {
self.wallet
.storage()
.user_data
.public_account_ids()
.collect()
}
/// Get existing private account IDs in the wallet.
#[must_use]
pub fn existing_private_accounts(&self) -> Vec<AccountId> {
self.wallet
.storage()
.user_data
.private_account_ids()
.collect()
}
pub fn get_last_block_sequencer(&self, runtime: &Arc<tokio::runtime::Runtime>) -> Result<u64> {
Ok(runtime.block_on(self.sequencer_client.get_last_block_id())?)
}
pub fn get_last_block_indexer(&self, runtime: &Arc<tokio::runtime::Runtime>) -> Result<u64> {
Ok(runtime.block_on(self.indexer_client.get_last_finalized_block_id())?)
}
}
impl Drop for TestContextFFI {
fn drop(&mut self) {
let Self {
sequencer_handle,
bedrock_compose,
_temp_indexer_dir: _,
_temp_sequencer_dir: _,
_temp_wallet_dir: _,
sequencer_client: _,
indexer_client: _,
wallet: _,
wallet_password: _,
} = self;
let sequencer_handle = sequencer_handle
.take()
.expect("Sequencer handle should be present in TestContext drop");
if !sequencer_handle.is_healthy() {
let Err(err) = sequencer_handle
.failed()
.now_or_never()
.expect("Sequencer handle should not be running");
error!(
"Sequencer handle has unexpectedly stopped before TestContext drop with error: {err:#}"
);
}
let container = bedrock_compose
.service(BEDROCK_SERVICE_WITH_OPEN_PORT)
.unwrap_or_else(|| {
panic!("Failed to get Bedrock service container `{BEDROCK_SERVICE_WITH_OPEN_PORT}`")
});
let output = std::process::Command::new("docker")
.args(["inspect", "-f", "{{.State.Running}}", container.id()])
.output()
.expect("Failed to execute docker inspect command to check if Bedrock container is still running");
let stdout = String::from_utf8(output.stdout)
.expect("Failed to parse docker inspect output as String");
if stdout.trim() != "true" {
error!(
"Bedrock container `{}` is not running during TestContext drop, docker inspect output: {stdout}",
container.id()
);
}
}
}
/// A test context with ffi to be used in normal #[test] tests.
pub struct BlockingTestContextFFI {
ctx: Option<TestContextFFI>,
runtime: Arc<tokio::runtime::Runtime>,
indexer_ffi: IndexerServiceFFI,
}
impl BlockingTestContextFFI {
pub fn new() -> Result<Self> {
let runtime = tokio::runtime::Runtime::new().unwrap();
let runtime_wrapped = Arc::new(runtime);
let (ctx, indexer_ffi) = TestContextFFI::new(&runtime_wrapped)?;
Ok(Self {
ctx: Some(ctx),
runtime: runtime_wrapped,
indexer_ffi,
})
}
#[must_use]
pub const fn ctx(&self) -> &TestContextFFI {
self.ctx.as_ref().expect("TestContext is set")
}
#[must_use]
pub const fn ctx_mut(&mut self) -> &mut TestContextFFI {
self.ctx.as_mut().expect("TestContext is set")
}
#[must_use]
pub const fn runtime(&self) -> &Arc<tokio::runtime::Runtime> {
&self.runtime
}
#[must_use]
pub fn runtime_clone(&self) -> Arc<tokio::runtime::Runtime> {
Arc::<tokio::runtime::Runtime>::clone(&self.runtime)
}
}
impl Drop for BlockingTestContextFFI {
fn drop(&mut self) {
let Self {
ctx,
runtime,
indexer_ffi,
} = self;
// Ensure async cleanup of TestContext by blocking on its drop in the runtime.
runtime.block_on(async {
if let Some(ctx) = ctx.take() {
drop(ctx);
}
});
let indexer_handle =
// SAFETY: lib function ensures validity of value.
unsafe { indexer_ffi.handle() };
if !indexer_handle.is_healthy() {
error!("Indexer handle has unexpectedly stopped before TestContext drop");
}
}
}

View File

@ -14,7 +14,6 @@ use integration_tests::{
};
use log::info;
use nssa::AccountId;
use tokio::test;
use wallet::cli::{Command, programs::native_token_transfer::AuthTransferSubcommand};
/// Maximum time to wait for the indexer to catch up to the sequencer.
@ -53,7 +52,7 @@ async fn wait_for_indexer_to_catch_up(ctx: &TestContext) -> u64 {
})
}
#[test]
#[tokio::test]
async fn indexer_test_run() -> Result<()> {
let ctx = TestContext::new().await?;
@ -70,7 +69,7 @@ async fn indexer_test_run() -> Result<()> {
Ok(())
}
#[test]
#[tokio::test]
async fn indexer_block_batching() -> Result<()> {
let ctx = TestContext::new().await?;
@ -101,7 +100,7 @@ async fn indexer_block_batching() -> Result<()> {
Ok(())
}
#[test]
#[tokio::test]
async fn indexer_state_consistency() -> Result<()> {
let mut ctx = TestContext::new().await?;
@ -206,7 +205,7 @@ async fn indexer_state_consistency() -> Result<()> {
Ok(())
}
#[test]
#[tokio::test]
async fn indexer_state_consistency_with_labels() -> Result<()> {
let mut ctx = TestContext::new().await?;

View File

@ -0,0 +1,284 @@
#![expect(
clippy::shadow_unrelated,
clippy::tests_outside_test_module,
reason = "We don't care about these in tests"
)]
use anyhow::{Context as _, Result};
use indexer_service_rpc::RpcClient as _;
use integration_tests::{
TIME_TO_WAIT_FOR_BLOCK_SECONDS, format_private_account_id, format_public_account_id,
test_context_ffi::BlockingTestContextFFI, verify_commitment_is_in_state,
};
use log::info;
use nssa::AccountId;
use wallet::cli::{Command, programs::native_token_transfer::AuthTransferSubcommand};
/// Maximum time to wait for the indexer to catch up to the sequencer.
const L2_TO_L1_TIMEOUT_MILLIS: u64 = 180_000;
#[test]
fn indexer_test_run_ffi() -> Result<()> {
let blocking_ctx = BlockingTestContextFFI::new()?;
let runtime_wrapped = blocking_ctx.runtime();
// RUN OBSERVATION
runtime_wrapped.block_on(async {
tokio::time::sleep(std::time::Duration::from_millis(L2_TO_L1_TIMEOUT_MILLIS)).await;
});
let last_block_indexer = blocking_ctx.ctx().get_last_block_indexer(runtime_wrapped)?;
info!("Last block on ind now is {last_block_indexer}");
assert!(last_block_indexer > 1);
Ok(())
}
#[test]
fn indexer_ffi_block_batching() -> Result<()> {
let blocking_ctx = BlockingTestContextFFI::new()?;
let runtime_wrapped = blocking_ctx.runtime();
let ctx = blocking_ctx.ctx();
// WAIT
info!("Waiting for indexer to parse blocks");
runtime_wrapped.block_on(async {
tokio::time::sleep(std::time::Duration::from_millis(L2_TO_L1_TIMEOUT_MILLIS)).await;
});
let last_block_indexer = runtime_wrapped
.block_on(ctx.indexer_client().get_last_finalized_block_id())
.unwrap();
info!("Last block on ind now is {last_block_indexer}");
assert!(last_block_indexer > 1);
// Getting wide batch to fit all blocks (from latest backwards)
let mut block_batch = runtime_wrapped
.block_on(ctx.indexer_client().get_blocks(None, 100))
.unwrap();
// Reverse to check chain consistency from oldest to newest
block_batch.reverse();
// Checking chain consistency
let mut prev_block_hash = block_batch.first().unwrap().header.hash;
for block in &block_batch[1..] {
assert_eq!(block.header.prev_block_hash, prev_block_hash);
info!("Block {} chain-consistent", block.header.block_id);
prev_block_hash = block.header.hash;
}
Ok(())
}
#[test]
fn indexer_ffi_state_consistency() -> Result<()> {
let mut blocking_ctx = BlockingTestContextFFI::new()?;
let runtime_wrapped = blocking_ctx.runtime_clone();
let ctx = blocking_ctx.ctx_mut();
let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
from: Some(format_public_account_id(ctx.existing_public_accounts()[0])),
from_label: None,
to: Some(format_public_account_id(ctx.existing_public_accounts()[1])),
to_label: None,
to_npk: None,
to_vpk: None,
amount: 100,
});
runtime_wrapped.block_on(wallet::cli::execute_subcommand(ctx.wallet_mut(), command))?;
info!("Waiting for next block creation");
runtime_wrapped.block_on(async {
tokio::time::sleep(std::time::Duration::from_millis(
TIME_TO_WAIT_FOR_BLOCK_SECONDS,
))
.await;
});
info!("Checking correct balance move");
let acc_1_balance =
runtime_wrapped.block_on(sequencer_service_rpc::RpcClient::get_account_balance(
ctx.sequencer_client(),
ctx.existing_public_accounts()[0],
))?;
let acc_2_balance =
runtime_wrapped.block_on(sequencer_service_rpc::RpcClient::get_account_balance(
ctx.sequencer_client(),
ctx.existing_public_accounts()[1],
))?;
info!("Balance of sender: {acc_1_balance:#?}");
info!("Balance of receiver: {acc_2_balance:#?}");
assert_eq!(acc_1_balance, 9900);
assert_eq!(acc_2_balance, 20100);
let from: AccountId = ctx.existing_private_accounts()[0];
let to: AccountId = ctx.existing_private_accounts()[1];
let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
from: Some(format_private_account_id(from)),
from_label: None,
to: Some(format_private_account_id(to)),
to_label: None,
to_npk: None,
to_vpk: None,
amount: 100,
});
runtime_wrapped.block_on(wallet::cli::execute_subcommand(ctx.wallet_mut(), command))?;
info!("Waiting for next block creation");
runtime_wrapped.block_on(async {
tokio::time::sleep(std::time::Duration::from_millis(
TIME_TO_WAIT_FOR_BLOCK_SECONDS,
))
.await;
});
let new_commitment1 = ctx
.wallet()
.get_private_account_commitment(from)
.context("Failed to get private account commitment for sender")?;
let commitment_check1 = runtime_wrapped.block_on(verify_commitment_is_in_state(
new_commitment1,
ctx.sequencer_client(),
));
assert!(commitment_check1);
let new_commitment2 = ctx
.wallet()
.get_private_account_commitment(to)
.context("Failed to get private account commitment for receiver")?;
let commitment_check2 = runtime_wrapped.block_on(verify_commitment_is_in_state(
new_commitment2,
ctx.sequencer_client(),
));
assert!(commitment_check2);
info!("Successfully transferred privately to owned account");
// WAIT
info!("Waiting for indexer to parse blocks");
runtime_wrapped.block_on(async {
tokio::time::sleep(std::time::Duration::from_millis(L2_TO_L1_TIMEOUT_MILLIS)).await;
});
let acc1_ind_state = runtime_wrapped.block_on(
ctx.indexer_client()
.get_account(ctx.existing_public_accounts()[0].into()),
)?;
let acc2_ind_state = runtime_wrapped.block_on(
ctx.indexer_client()
.get_account(ctx.existing_public_accounts()[1].into()),
)?;
info!("Checking correct state transition");
let acc1_seq_state =
runtime_wrapped.block_on(sequencer_service_rpc::RpcClient::get_account(
ctx.sequencer_client(),
ctx.existing_public_accounts()[0],
))?;
let acc2_seq_state =
runtime_wrapped.block_on(sequencer_service_rpc::RpcClient::get_account(
ctx.sequencer_client(),
ctx.existing_public_accounts()[1],
))?;
assert_eq!(acc1_ind_state, acc1_seq_state.into());
assert_eq!(acc2_ind_state, acc2_seq_state.into());
// ToDo: Check private state transition
Ok(())
}
#[test]
fn indexer_ffi_state_consistency_with_labels() -> Result<()> {
let mut blocking_ctx = BlockingTestContextFFI::new()?;
let runtime_wrapped = blocking_ctx.runtime_clone();
let ctx = blocking_ctx.ctx_mut();
// Assign labels to both accounts
let from_label = "idx-sender-label".to_owned();
let to_label_str = "idx-receiver-label".to_owned();
let label_cmd = Command::Account(wallet::cli::account::AccountSubcommand::Label {
account_id: Some(format_public_account_id(ctx.existing_public_accounts()[0])),
account_label: None,
label: from_label.clone(),
});
runtime_wrapped.block_on(wallet::cli::execute_subcommand(ctx.wallet_mut(), label_cmd))?;
let label_cmd = Command::Account(wallet::cli::account::AccountSubcommand::Label {
account_id: Some(format_public_account_id(ctx.existing_public_accounts()[1])),
account_label: None,
label: to_label_str.clone(),
});
runtime_wrapped.block_on(wallet::cli::execute_subcommand(ctx.wallet_mut(), label_cmd))?;
// Send using labels instead of account IDs
let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
from: None,
from_label: Some(from_label),
to: None,
to_label: Some(to_label_str),
to_npk: None,
to_vpk: None,
amount: 100,
});
runtime_wrapped.block_on(wallet::cli::execute_subcommand(ctx.wallet_mut(), command))?;
info!("Waiting for next block creation");
runtime_wrapped.block_on(async {
tokio::time::sleep(std::time::Duration::from_millis(
TIME_TO_WAIT_FOR_BLOCK_SECONDS,
))
.await;
});
let acc_1_balance =
runtime_wrapped.block_on(sequencer_service_rpc::RpcClient::get_account_balance(
ctx.sequencer_client(),
ctx.existing_public_accounts()[0],
))?;
let acc_2_balance =
runtime_wrapped.block_on(sequencer_service_rpc::RpcClient::get_account_balance(
ctx.sequencer_client(),
ctx.existing_public_accounts()[1],
))?;
assert_eq!(acc_1_balance, 9900);
assert_eq!(acc_2_balance, 20100);
info!("Waiting for indexer to parse blocks");
runtime_wrapped.block_on(async {
tokio::time::sleep(std::time::Duration::from_millis(L2_TO_L1_TIMEOUT_MILLIS)).await;
});
let acc1_ind_state = runtime_wrapped.block_on(
ctx.indexer_client()
.get_account(ctx.existing_public_accounts()[0].into()),
)?;
let acc1_seq_state =
runtime_wrapped.block_on(sequencer_service_rpc::RpcClient::get_account(
ctx.sequencer_client(),
ctx.existing_public_accounts()[0],
))?;
assert_eq!(acc1_ind_state, acc1_seq_state.into());
info!("Indexer state is consistent after label-based transfer");
Ok(())
}

View File

@ -1,6 +1,9 @@
use std::{path::Path, sync::Arc};
use common::block::Block;
use common::{
block::Block,
transaction::{NSSATransaction, clock_invocation},
};
use nssa::V03State;
use rocksdb::{
BoundColumnFamily, ColumnFamilyDescriptor, DBWithThreadMode, MultiThreaded, Options,
@ -169,22 +172,52 @@ impl RocksDBIO {
for block in self.get_block_batch_seq(
start.checked_add(1).expect("Will be lesser that u64::MAX")..=block_id,
)? {
for transaction in block.body.transactions {
transaction
.transaction_stateless_check()
.map_err(|err| {
DbError::db_interaction_error(format!(
"transaction pre check failed with err {err:?}"
))
})?
.execute_check_on_state(
&mut breakpoint,
let expected_clock =
NSSATransaction::Public(clock_invocation(block.header.timestamp));
if let Some((clock_tx, user_txs)) = block.body.transactions.split_last() {
if *clock_tx != expected_clock {
return Err(DbError::db_interaction_error(
"Last transaction in block must be the clock invocation for the block timestamp"
.to_owned(),
));
}
for transaction in user_txs {
transaction
.clone()
.transaction_stateless_check()
.map_err(|err| {
DbError::db_interaction_error(format!(
"transaction pre check failed with err {err:?}"
))
})?
.execute_check_on_state(
&mut breakpoint,
block.header.block_id,
block.header.timestamp,
)
.map_err(|err| {
DbError::db_interaction_error(format!(
"transaction execution failed with err {err:?}"
))
})?;
}
let NSSATransaction::Public(clock_public_tx) = clock_tx else {
return Err(DbError::db_interaction_error(
"Clock invocation must be a public transaction".to_owned(),
));
};
breakpoint
.transition_from_public_transaction(
clock_public_tx,
block.header.block_id,
block.header.timestamp,
)
.map_err(|err| {
DbError::db_interaction_error(format!(
"transaction execution failed with err {err:?}"
"clock transaction execution failed with err {err:?}"
))
})?;
}
@ -213,6 +246,7 @@ fn closest_breakpoint_id(block_id: u64) -> u64 {
#[expect(clippy::shadow_unrelated, reason = "Fine for tests")]
#[cfg(test)]
mod tests {
use common::test_utils::produce_dummy_block;
use nssa::{AccountId, PublicKey};
use tempfile::tempdir;
@ -302,7 +336,7 @@ mod tests {
let transfer_tx =
common::test_utils::create_transaction_native_token_transfer(from, 0, to, 1, &sign_key);
let block = common::test_utils::produce_dummy_block(2, Some(prev_hash), vec![transfer_tx]);
let block = produce_dummy_block(2, Some(prev_hash), vec![transfer_tx]);
dbio.put_block(&block, [1; 32]).unwrap();
@ -369,11 +403,7 @@ mod tests {
1,
&sign_key,
);
let block = common::test_utils::produce_dummy_block(
(i + 1).into(),
Some(prev_hash),
vec![transfer_tx],
);
let block = produce_dummy_block((i + 1).into(), Some(prev_hash), vec![transfer_tx]);
dbio.put_block(&block, [i; 32]).unwrap();
}
@ -439,7 +469,7 @@ mod tests {
let prev_hash = last_block.header.hash;
let transfer_tx =
common::test_utils::create_transaction_native_token_transfer(from, 0, to, 1, &sign_key);
let block = common::test_utils::produce_dummy_block(2, Some(prev_hash), vec![transfer_tx]);
let block = produce_dummy_block(2, Some(prev_hash), vec![transfer_tx]);
let control_hash1 = block.header.hash;
@ -451,7 +481,7 @@ mod tests {
let prev_hash = last_block.header.hash;
let transfer_tx =
common::test_utils::create_transaction_native_token_transfer(from, 1, to, 1, &sign_key);
let block = common::test_utils::produce_dummy_block(3, Some(prev_hash), vec![transfer_tx]);
let block = produce_dummy_block(3, Some(prev_hash), vec![transfer_tx]);
let control_hash2 = block.header.hash;
@ -466,7 +496,7 @@ mod tests {
let control_tx_hash1 = transfer_tx.hash();
let block = common::test_utils::produce_dummy_block(4, Some(prev_hash), vec![transfer_tx]);
let block = produce_dummy_block(4, Some(prev_hash), vec![transfer_tx]);
dbio.put_block(&block, [3; 32]).unwrap();
let last_id = dbio.get_meta_last_block_in_db().unwrap();
@ -478,7 +508,7 @@ mod tests {
let control_tx_hash2 = transfer_tx.hash();
let block = common::test_utils::produce_dummy_block(5, Some(prev_hash), vec![transfer_tx]);
let block = produce_dummy_block(5, Some(prev_hash), vec![transfer_tx]);
dbio.put_block(&block, [4; 32]).unwrap();
let control_block_id1 = dbio.get_block_id_by_hash(control_hash1.0).unwrap().unwrap();
@ -526,7 +556,7 @@ mod tests {
let prev_hash = last_block.header.hash;
let transfer_tx =
common::test_utils::create_transaction_native_token_transfer(from, 0, to, 1, &sign_key);
let block = common::test_utils::produce_dummy_block(2, Some(prev_hash), vec![transfer_tx]);
let block = produce_dummy_block(2, Some(prev_hash), vec![transfer_tx]);
block_res.push(block.clone());
dbio.put_block(&block, [1; 32]).unwrap();
@ -537,7 +567,7 @@ mod tests {
let prev_hash = last_block.header.hash;
let transfer_tx =
common::test_utils::create_transaction_native_token_transfer(from, 1, to, 1, &sign_key);
let block = common::test_utils::produce_dummy_block(3, Some(prev_hash), vec![transfer_tx]);
let block = produce_dummy_block(3, Some(prev_hash), vec![transfer_tx]);
block_res.push(block.clone());
dbio.put_block(&block, [2; 32]).unwrap();
@ -549,7 +579,7 @@ mod tests {
let transfer_tx =
common::test_utils::create_transaction_native_token_transfer(from, 2, to, 1, &sign_key);
let block = common::test_utils::produce_dummy_block(4, Some(prev_hash), vec![transfer_tx]);
let block = produce_dummy_block(4, Some(prev_hash), vec![transfer_tx]);
block_res.push(block.clone());
dbio.put_block(&block, [3; 32]).unwrap();
@ -560,7 +590,7 @@ mod tests {
let transfer_tx =
common::test_utils::create_transaction_native_token_transfer(from, 3, to, 1, &sign_key);
let block = common::test_utils::produce_dummy_block(5, Some(prev_hash), vec![transfer_tx]);
let block = produce_dummy_block(5, Some(prev_hash), vec![transfer_tx]);
block_res.push(block.clone());
dbio.put_block(&block, [4; 32]).unwrap();
@ -633,11 +663,7 @@ mod tests {
tx_hash_res.push(transfer_tx1.hash().0);
tx_hash_res.push(transfer_tx2.hash().0);
let block = common::test_utils::produce_dummy_block(
2,
Some(prev_hash),
vec![transfer_tx1, transfer_tx2],
);
let block = produce_dummy_block(2, Some(prev_hash), vec![transfer_tx1, transfer_tx2]);
dbio.put_block(&block, [1; 32]).unwrap();
@ -652,11 +678,7 @@ mod tests {
tx_hash_res.push(transfer_tx1.hash().0);
tx_hash_res.push(transfer_tx2.hash().0);
let block = common::test_utils::produce_dummy_block(
3,
Some(prev_hash),
vec![transfer_tx1, transfer_tx2],
);
let block = produce_dummy_block(3, Some(prev_hash), vec![transfer_tx1, transfer_tx2]);
dbio.put_block(&block, [2; 32]).unwrap();
@ -671,11 +693,7 @@ mod tests {
tx_hash_res.push(transfer_tx1.hash().0);
tx_hash_res.push(transfer_tx2.hash().0);
let block = common::test_utils::produce_dummy_block(
4,
Some(prev_hash),
vec![transfer_tx1, transfer_tx2],
);
let block = produce_dummy_block(4, Some(prev_hash), vec![transfer_tx1, transfer_tx2]);
dbio.put_block(&block, [3; 32]).unwrap();
@ -687,7 +705,7 @@ mod tests {
common::test_utils::create_transaction_native_token_transfer(from, 6, to, 1, &sign_key);
tx_hash_res.push(transfer_tx.hash().0);
let block = common::test_utils::produce_dummy_block(5, Some(prev_hash), vec![transfer_tx]);
let block = produce_dummy_block(5, Some(prev_hash), vec![transfer_tx]);
dbio.put_block(&block, [4; 32]).unwrap();

View File

@ -74,7 +74,9 @@ impl WalletChainStore {
let chain_index = data.chain_index;
for identifier in &data.identifiers {
let account_id = nssa::AccountId::from((&npk, *identifier));
private_tree.account_id_map.insert(account_id, chain_index.clone());
private_tree
.account_id_map
.insert(account_id, chain_index.clone());
}
private_tree.key_map.insert(chain_index, data.data);
}

View File

@ -82,7 +82,8 @@ pub enum NewSubcommand {
/// Label to assign to the new account.
label: Option<String>,
},
/// Single-account convenience: creates a key node and auto-registers one account with a random identifier.
/// Single-account convenience: creates a key node and auto-registers one account with a random
/// identifier.
Private {
#[arg(long)]
/// Chain index of a parent node.

View File

@ -123,7 +123,6 @@ impl PersistentStorage {
}
}
impl From<PublicAccountPrivateInitialData> for InitialAccountData {
fn from(value: PublicAccountPrivateInitialData) -> Self {
Self::Public(value)