Merge branch 'main' into marvin/keycard-commands

This commit is contained in:
jonesmarvin8 2026-05-14 10:29:40 -04:00
commit 3fce53f663
146 changed files with 8633 additions and 3382 deletions

View File

@ -14,6 +14,8 @@ ignore = [
{ id = "RUSTSEC-2025-0141", reason = "`bincode` is unmaintained but continuing to use it." },
{ id = "RUSTSEC-2023-0089", reason = "atomic-polyfill is pulled transitively via risc0-zkvm; waiting on upstream fix (see https://github.com/risc0/risc0/issues/3453)" },
{ id = "RUSTSEC-2026-0097", reason = "`rand` v0.8.5 is present transitively from logos crates, modification may break integration" },
{ id = "RUSTSEC-2026-0118", reason = "`hickory-proto` v0.25.0-alpha.5 is present transitively from logos crates, modification may break integration"},
{ id = "RUSTSEC-2026-0119", reason = "`hickory-proto` v0.25.0-alpha.5 is present transitively from logos crates, modification may break integration"},
]
yanked = "deny"
unused-ignored-advisory = "deny"

View File

@ -225,7 +225,7 @@ jobs:
- uses: ./.github/actions/install-risc0
- name: Install just
run: cargo install just
run: cargo install --locked just
- name: Build artifacts
run: just build-artifacts

1671
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -36,7 +36,6 @@ members = [
"examples/program_deployment",
"examples/program_deployment/methods",
"examples/program_deployment/methods/guest",
"bedrock_client",
"testnet_initial_state",
"keycard_wallet",
"indexer_ffi",
@ -60,6 +59,7 @@ indexer_service_rpc = { path = "indexer/service/rpc" }
indexer_ffi = { path = "indexer_ffi" }
wallet = { path = "wallet" }
wallet-ffi = { path = "wallet-ffi", default-features = false }
indexer_ffi = { path = "indexer/ffi" }
clock_core = { path = "programs/clock/core" }
token_core = { path = "programs/token/core" }
token_program = { path = "programs/token" }
@ -68,7 +68,6 @@ amm_program = { path = "programs/amm" }
ata_core = { path = "programs/associated_token_account/core" }
ata_program = { path = "programs/associated_token_account" }
test_program_methods = { path = "test_program_methods" }
bedrock_client = { path = "bedrock_client" }
testnet_initial_state = { path = "testnet_initial_state" }
keycard_wallet = { path = "keycard_wallet" }
@ -125,11 +124,12 @@ schemars = "1.2"
async-stream = "0.3.6"
pyo3 = { version = "0.24", features = ["auto-initialize"] }
logos-blockchain-common-http-client = { git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "1da154c74b911318fb853d37261f8a05ffe513b4" }
logos-blockchain-key-management-system-service = { git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "1da154c74b911318fb853d37261f8a05ffe513b4" }
logos-blockchain-core = { git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "1da154c74b911318fb853d37261f8a05ffe513b4" }
logos-blockchain-chain-broadcast-service = { git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "1da154c74b911318fb853d37261f8a05ffe513b4" }
logos-blockchain-chain-service = { git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "1da154c74b911318fb853d37261f8a05ffe513b4" }
logos-blockchain-common-http-client = { git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "ee281a447d95a951752461ee0a6e88eb4a0f17cf" }
logos-blockchain-key-management-system-service = { git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "ee281a447d95a951752461ee0a6e88eb4a0f17cf" }
logos-blockchain-core = { git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "ee281a447d95a951752461ee0a6e88eb4a0f17cf" }
logos-blockchain-chain-broadcast-service = { git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "ee281a447d95a951752461ee0a6e88eb4a0f17cf" }
logos-blockchain-chain-service = { git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "ee281a447d95a951752461ee0a6e88eb4a0f17cf" }
logos-blockchain-zone-sdk = { git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "ee281a447d95a951752461ee0a6e88eb4a0f17cf" }
rocksdb = { version = "0.24.0", default-features = false, features = [
"snappy",

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -39,42 +39,42 @@ cryptarchia:
threshold: 1
timestamp: 0
gossipsub_protocol: /integration/logos-blockchain/cryptarchia/proto/1.0.0
genesis_state:
mantle_tx:
ops:
genesis_block:
header:
version: Bedrock
parent_block: '0000000000000000000000000000000000000000000000000000000000000000'
slot: 0
block_root: b5f8787ac23674822414c70eea15d842da38f2e806ede1a73cf7b5cf0277da07
proof_of_leadership:
proof: '0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
entropy_contribution: '0000000000000000000000000000000000000000000000000000000000000000'
leader_key: '0000000000000000000000000000000000000000000000000000000000000000'
voucher_cm: '0000000000000000000000000000000000000000000000000000000000000000'
signature: '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
transactions:
- mantle_tx:
ops:
- opcode: 0
payload:
inputs: [ ]
inputs: []
outputs:
- value: 1
pk: d204000000000000000000000000000000000000000000000000000000000000
- value: 100
pk: 2e03b2eff5a45478e7e79668d2a146cf2c5c7925bce927f2b1c67f2ab4fc0d26
- value: 1
pk: d204000000000000000000000000000000000000000000000000000000000000
- value: 100
pk: '2e03b2eff5a45478e7e79668d2a146cf2c5c7925bce927f2b1c67f2ab4fc0d26'
- value: 1
pk: ed266e6e887b9b97059dc1aa1b7b2e19b934291753c6336a163fe4ebaa28e717
- opcode: 17
payload:
channel_id: "0000000000000000000000000000000000000000000000000000000000000000"
inscription: [ 103, 101, 110, 101, 115, 105, 115 ] # "genesis" in bytes
parent: "0000000000000000000000000000000000000000000000000000000000000000"
signer: "0000000000000000000000000000000000000000000000000000000000000000"
execution_gas_price: 0
storage_gas_price: 0
ops_proofs:
- !ZkSig
pi_a: [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
]
pi_b: [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
]
pi_c: [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
]
- NoProof
channel_id: '0000000000000000000000000000000000000000000000000000000000000000'
inscription: '67656e65736973'
parent: '0000000000000000000000000000000000000000000000000000000000000000'
signer: '0000000000000000000000000000000000000000000000000000000000000000'
execution_gas_price: 0
storage_gas_price: 0
ops_proofs:
- !Ed25519Sig '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
- !Ed25519Sig '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
time:
slot_duration: '1.0'
chain_start_time: PLACEHOLDER_CHAIN_START_TIME

View File

@ -1,7 +1,7 @@
services:
logos-blockchain-node-0:
image: ghcr.io/logos-blockchain/logos-blockchain@sha256:c5243681b353278cabb562a176f0a5cfbefc2056f18cebc47fe0e3720c29fb12
image: ghcr.io/logos-blockchain/logos-blockchain@sha256:9f1829dea335c56f6ff68ae37ea872ed5313b96b69e8ffe143c02b7217de85fc
ports:
- "${PORT:-8080}:18080/tcp"
volumes:

View File

@ -1,23 +0,0 @@
[package]
name = "bedrock_client"
version = "0.1.0"
edition = "2024"
license = { workspace = true }
[lints]
workspace = true
[dependencies]
common.workspace = true
reqwest.workspace = true
anyhow.workspace = true
tokio-retry.workspace = true
futures.workspace = true
log.workspace = true
serde.workspace = true
humantime-serde.workspace = true
logos-blockchain-common-http-client.workspace = true
logos-blockchain-core.workspace = true
logos-blockchain-chain-broadcast-service.workspace = true
logos-blockchain-chain-service.workspace = true

View File

@ -1,121 +0,0 @@
use std::time::Duration;
use anyhow::{Context as _, Result};
use common::config::BasicAuth;
use futures::{Stream, TryFutureExt as _};
#[expect(clippy::single_component_path_imports, reason = "Satisfy machete")]
use humantime_serde;
use log::{info, warn};
pub use logos_blockchain_chain_broadcast_service::BlockInfo;
use logos_blockchain_chain_service::CryptarchiaInfo;
pub use logos_blockchain_common_http_client::{CommonHttpClient, Error};
pub use logos_blockchain_core::{block::Block, header::HeaderId, mantle::SignedMantleTx};
use reqwest::{Client, Url};
use serde::{Deserialize, Serialize};
use tokio_retry::Retry;
/// Fibonacci backoff retry strategy configuration.
#[derive(Debug, Copy, Clone, Serialize, Deserialize)]
pub struct BackoffConfig {
#[serde(with = "humantime_serde")]
pub start_delay: Duration,
pub max_retries: usize,
}
impl Default for BackoffConfig {
fn default() -> Self {
Self {
start_delay: Duration::from_millis(100),
max_retries: 5,
}
}
}
/// Simple wrapper
/// maybe extend in the future for our purposes
/// `Clone` is cheap because `CommonHttpClient` is internally reference counted (`Arc`).
#[derive(Clone)]
pub struct BedrockClient {
http_client: CommonHttpClient,
node_url: Url,
backoff: BackoffConfig,
}
impl BedrockClient {
pub fn new(backoff: BackoffConfig, node_url: Url, auth: Option<BasicAuth>) -> Result<Self> {
info!("Creating Bedrock client with node URL {node_url}");
let client = Client::builder()
//Add more fields if needed
.timeout(std::time::Duration::from_mins(1))
.build()
.context("Failed to build HTTP client")?;
let auth = auth.map(|a| {
logos_blockchain_common_http_client::BasicAuthCredentials::new(a.username, a.password)
});
let http_client = CommonHttpClient::new_with_client(client, auth);
Ok(Self {
http_client,
node_url,
backoff,
})
}
pub async fn post_transaction(&self, tx: SignedMantleTx) -> Result<Result<(), Error>, Error> {
Retry::spawn(self.backoff_strategy(), || async {
match self
.http_client
.post_transaction(self.node_url.clone(), tx.clone())
.await
{
Ok(()) => Ok(Ok(())),
Err(err) => match err {
// Retry arm.
// Retrying only reqwest errors: mainly connected to http.
Error::Request(_) => Err(err),
// Returning non-retryable error
Error::Server(_) | Error::Client(_) | Error::Url(_) => Ok(Err(err)),
},
}
})
.await
}
pub async fn get_lib_stream(&self) -> Result<impl Stream<Item = BlockInfo>, Error> {
self.http_client.get_lib_stream(self.node_url.clone()).await
}
pub async fn get_block_by_id(
&self,
header_id: HeaderId,
) -> Result<Option<Block<SignedMantleTx>>, Error> {
Retry::spawn(self.backoff_strategy(), || {
self.http_client
.get_block_by_id(self.node_url.clone(), header_id)
.inspect_err(|err| warn!("Block fetching failed with error: {err:#}"))
})
.await
}
pub async fn get_consensus_info(&self) -> Result<CryptarchiaInfo, Error> {
Retry::spawn(self.backoff_strategy(), || {
self.http_client
.consensus_info(self.node_url.clone())
.inspect_err(|err| warn!("Block fetching failed with error: {err:#}"))
})
.await
}
fn backoff_strategy(&self) -> impl Iterator<Item = Duration> {
let start_delay_millis = self
.backoff
.start_delay
.as_millis()
.try_into()
.expect("Start delay must be less than u64::MAX milliseconds");
tokio_retry::strategy::FibonacciBackoff::from_millis(start_delay_millis)
.take(self.backoff.max_retries)
}
}

View File

@ -114,11 +114,6 @@ impl HashableBlockData {
bedrock_parent_id,
}
}
#[must_use]
pub fn block_hash(&self) -> BlockHash {
OwnHasher::hash(&borsh::to_vec(&self).unwrap())
}
}
impl From<Block> for HashableBlockData {

View File

@ -1,12 +1,8 @@
{
"home": "./indexer/service",
"consensus_info_polling_interval": "1s",
"bedrock_client_config": {
"addr": "http://logos-blockchain-node-0:18080",
"backoff": {
"start_delay": "100ms",
"max_retries": 5
}
"bedrock_config": {
"addr": "http://logos-blockchain-node-0:18080"
},
"channel_id": "0101010101010101010101010101010101010101010101010101010101010101",
"initial_accounts": [

View File

@ -9,7 +9,7 @@ workspace = true
[dependencies]
common.workspace = true
bedrock_client.workspace = true
logos-blockchain-zone-sdk.workspace = true
nssa.workspace = true
nssa_core.workspace = true
storage.workspace = true
@ -19,13 +19,13 @@ anyhow.workspace = true
log.workspace = true
serde.workspace = true
humantime-serde.workspace = true
tokio.workspace = true
borsh.workspace = true
futures.workspace = true
url.workspace = true
logos-blockchain-core.workspace = true
serde_json.workspace = true
async-stream.workspace = true
tokio.workspace = true
[dev-dependencies]
tempfile.workspace = true

View File

@ -1,11 +1,12 @@
use std::{path::Path, sync::Arc};
use anyhow::Result;
use bedrock_client::HeaderId;
use anyhow::{Context as _, Result};
use common::{
block::{BedrockStatus, Block},
transaction::{NSSATransaction, clock_invocation},
};
use logos_blockchain_core::{header::HeaderId, mantle::ops::channel::MsgId};
use logos_blockchain_zone_sdk::Slot;
use nssa::{Account, AccountId, V03State};
use nssa_core::BlockId;
use storage::indexer::RocksDBIO;
@ -103,6 +104,22 @@ impl IndexerStore {
Ok(self.dbio.calculate_state_for_id(block_id)?)
}
pub fn get_zone_cursor(&self) -> Result<Option<(MsgId, Slot)>> {
let Some(bytes) = self.dbio.get_zone_sdk_indexer_cursor_bytes()? else {
return Ok(None);
};
let cursor: (MsgId, Slot) = serde_json::from_slice(&bytes)
.context("Failed to deserialize stored zone-sdk indexer cursor")?;
Ok(Some(cursor))
}
pub fn set_zone_cursor(&self, cursor: &(MsgId, Slot)) -> Result<()> {
let bytes =
serde_json::to_vec(cursor).context("Failed to serialize zone-sdk indexer cursor")?;
self.dbio.put_zone_sdk_indexer_cursor_bytes(&bytes)?;
Ok(())
}
/// Recalculation of final state directly from DB.
///
/// Used for indexer healthcheck.
@ -118,6 +135,12 @@ impl IndexerStore {
.get_account_by_id(*account_id))
}
pub fn account_state_at_block(&self, account_id: &AccountId, block_id: u64) -> Result<Account> {
Ok(self
.get_state_at_block(block_id)?
.get_account_by_id(*account_id))
}
pub async fn put_block(&self, mut block: Block, l1_header: HeaderId) -> Result<()> {
{
let mut state_guard = self.current_state.write().await;
@ -260,4 +283,64 @@ mod tests {
assert_eq!(acc1_val.balance, 9920);
assert_eq!(acc2_val.balance, 20080);
}
#[tokio::test]
async fn account_state_at_block() {
let home = tempdir().unwrap();
let storage = IndexerStore::open_db_with_genesis(
home.as_ref(),
&genesis_block(),
&nssa::V03State::new_with_genesis_accounts(
&[(acc1(), 10000), (acc2(), 20000)],
vec![],
0,
),
)
.unwrap();
let mut prev_hash = genesis_block().header.hash;
let from = acc1();
let to = acc2();
let sign_key = acc1_sign_key();
for i in 2..10 {
let tx = common::test_utils::create_transaction_native_token_transfer(
from,
i - 2,
to,
10,
&sign_key,
);
let block_id = u64::try_from(i).unwrap();
let next_block =
common::test_utils::produce_dummy_block(block_id, Some(prev_hash), vec![tx]);
prev_hash = next_block.header.hash;
storage
.put_block(next_block, HeaderId::from([u8::try_from(i).unwrap(); 32]))
.await
.unwrap();
}
// Genesis block: no transfers applied yet.
let acc1_at_1 = storage.account_state_at_block(&acc1(), 1).unwrap();
let acc2_at_1 = storage.account_state_at_block(&acc2(), 1).unwrap();
assert_eq!(acc1_at_1.balance, 10000);
assert_eq!(acc2_at_1.balance, 20000);
// After block 5: 4 transfers of 10 applied (one each in blocks 2..=5).
let acc1_at_5 = storage.account_state_at_block(&acc1(), 5).unwrap();
let acc2_at_5 = storage.account_state_at_block(&acc2(), 5).unwrap();
assert_eq!(acc1_at_5.balance, 9960);
assert_eq!(acc2_at_5.balance, 20040);
// After final block 9: 8 transfers applied; should match current state.
let acc1_at_9 = storage.account_state_at_block(&acc1(), 9).unwrap();
let acc2_at_9 = storage.account_state_at_block(&acc2(), 9).unwrap();
assert_eq!(acc1_at_9.balance, 9920);
assert_eq!(acc2_at_9.balance, 20080);
}
}

View File

@ -6,7 +6,6 @@ use std::{
};
use anyhow::{Context as _, Result};
pub use bedrock_client::BackoffConfig;
use common::config::BasicAuth;
use humantime_serde;
pub use logos_blockchain_core::mantle::ops::channel::ChannelId;
@ -16,8 +15,6 @@ use url::Url;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ClientConfig {
/// For individual RPC requests we use Fibonacci backoff retry strategy.
pub backoff: BackoffConfig,
pub addr: Url,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub auth: Option<BasicAuth>,
@ -31,7 +28,7 @@ pub struct IndexerConfig {
pub signing_key: [u8; 32],
#[serde(with = "humantime_serde")]
pub consensus_info_polling_interval: Duration,
pub bedrock_client_config: ClientConfig,
pub bedrock_config: ClientConfig,
pub channel_id: ChannelId,
#[serde(skip_serializing_if = "Option::is_none")]
pub initial_public_accounts: Option<Vec<PublicAccountPublicInitialData>>,

View File

@ -1,15 +1,14 @@
use std::collections::VecDeque;
use std::sync::Arc;
use anyhow::Result;
use bedrock_client::{BedrockClient, HeaderId};
use common::{
HashType, PINATA_BASE58,
block::{Block, HashableBlockData},
};
use log::{debug, error, info};
use logos_blockchain_core::mantle::{
Op, SignedMantleTx,
ops::channel::{ChannelId, inscribe::InscriptionOp},
use common::block::{Block, HashableBlockData};
// ToDo: Remove after testnet
use common::{HashType, PINATA_BASE58};
use futures::StreamExt as _;
use log::{error, info, warn};
use logos_blockchain_core::header::HeaderId;
use logos_blockchain_zone_sdk::{
CommonHttpClient, ZoneMessage, adapter::NodeHttpClient, indexer::ZoneIndexer,
};
use nssa::V03State;
use testnet_initial_state::initial_state_testnet;
@ -21,25 +20,11 @@ pub mod config;
#[derive(Clone)]
pub struct IndexerCore {
pub bedrock_client: BedrockClient,
pub zone_indexer: Arc<ZoneIndexer<NodeHttpClient>>,
pub config: IndexerConfig,
pub store: IndexerStore,
}
#[derive(Clone)]
/// This struct represents one L1 block data fetched from backfilling.
pub struct BackfillBlockData {
l2_blocks: Vec<Block>,
l1_header: HeaderId,
}
#[derive(Clone)]
/// This struct represents data fetched fom backfilling in one iteration.
pub struct BackfillData {
block_data: VecDeque<BackfillBlockData>,
curr_fin_l1_lib_header: HeaderId,
}
impl IndexerCore {
pub fn new(config: IndexerConfig) -> Result<Self> {
let hashable_data = HashableBlockData {
@ -63,7 +48,7 @@ impl IndexerCore {
.iter()
.map(|init_comm_data| {
let npk = &init_comm_data.npk;
let account_id = nssa::AccountId::from((npk, 0));
let account_id = nssa::AccountId::for_regular_private_account(npk, 0);
let mut acc = init_comm_data.account.clone();
@ -107,279 +92,88 @@ impl IndexerCore {
let home = config.home.join("rocksdb");
let basic_auth = config.bedrock_config.auth.clone().map(Into::into);
let node = NodeHttpClient::new(
CommonHttpClient::new(basic_auth),
config.bedrock_config.addr.clone(),
);
let zone_indexer = ZoneIndexer::new(config.channel_id, node);
Ok(Self {
bedrock_client: BedrockClient::new(
config.bedrock_client_config.backoff,
config.bedrock_client_config.addr.clone(),
config.bedrock_client_config.auth.clone(),
)?,
zone_indexer: Arc::new(zone_indexer),
config,
store: IndexerStore::open_db_with_genesis(&home, &genesis_block, &state)?,
})
}
pub fn subscribe_parse_block_stream(&self) -> impl futures::Stream<Item = Result<Block>> {
pub fn subscribe_parse_block_stream(&self) -> impl futures::Stream<Item = Result<Block>> + '_ {
let poll_interval = self.config.consensus_info_polling_interval;
let initial_cursor = self
.store
.get_zone_cursor()
.expect("Failed to load zone-sdk indexer cursor");
async_stream::stream! {
info!("Searching for initial header");
let mut cursor = initial_cursor;
let last_stored_l1_lib_header = self.store.last_observed_l1_lib_header()?;
let mut prev_last_l1_lib_header = if let Some(last_l1_lib_header) = last_stored_l1_lib_header {
info!("Last l1 lib header found: {last_l1_lib_header}");
last_l1_lib_header
if cursor.is_some() {
info!("Resuming indexer from cursor {cursor:?}");
} else {
info!("Last l1 lib header not found in DB");
info!("Searching for the start of a channel");
let BackfillData {
block_data: start_buff,
curr_fin_l1_lib_header: last_l1_lib_header,
} = self.search_for_channel_start().await?;
for BackfillBlockData {
l2_blocks: l2_block_vec,
l1_header,
} in start_buff {
let mut l2_blocks_parsed_ids: Vec<_> = l2_block_vec.iter().map(|block| block.header.block_id).collect();
l2_blocks_parsed_ids.sort_unstable();
info!("Parsed {} L2 blocks with ids {:?}", l2_block_vec.len(), l2_blocks_parsed_ids);
for l2_block in l2_block_vec {
// TODO: proper fix is to make the sequencer's genesis include a
// trailing `clock_invocation(0)` (and have the indexer's
// `open_db_with_genesis` not pre-apply state transitions) so the
// inscribed genesis can flow through `put_block` like any other
// block. For now we skip re-applying it.
//
// The channel-start (block_id == 1) is the sequencer's genesis
// inscription that we re-discover during initial search. The
// indexer already has its own locally-constructed genesis in
// the store from `open_db_with_genesis`, so re-applying the
// inscribed copy is both redundant and would fail the strict
// block validation in `put_block` (the inscribed genesis lacks
// the trailing clock invocation).
if l2_block.header.block_id != 1 {
self
.store
.put_block(l2_block.clone(), l1_header)
.await
.inspect_err(|err| error!("Failed to put block with err {err:?}"))?;
}
yield Ok(l2_block);
}
}
last_l1_lib_header
};
info!("Searching for initial header finished");
info!("Starting backfilling from {prev_last_l1_lib_header}");
info!("Starting indexer from beginning of channel");
}
loop {
let BackfillData {
block_data: buff,
curr_fin_l1_lib_header,
} = self
.backfill_to_last_l1_lib_header_id(prev_last_l1_lib_header, &self.config.channel_id)
.await
.inspect_err(|err| error!("Failed to backfill to last l1 lib header id with err {err:#?}"))?;
prev_last_l1_lib_header = curr_fin_l1_lib_header;
for BackfillBlockData {
l2_blocks: l2_block_vec,
l1_header: header,
} in buff {
let mut l2_blocks_parsed_ids: Vec<_> = l2_block_vec.iter().map(|block| block.header.block_id).collect();
l2_blocks_parsed_ids.sort_unstable();
info!("Parsed {} L2 blocks with ids {:?}", l2_block_vec.len(), l2_blocks_parsed_ids);
for l2_block in l2_block_vec {
self.store.put_block(l2_block.clone(), header).await?;
yield Ok(l2_block);
let stream = match self.zone_indexer.next_messages(cursor).await {
Ok(s) => s,
Err(err) => {
error!("Failed to start zone-sdk next_messages stream: {err}");
tokio::time::sleep(poll_interval).await;
continue;
}
}
}
}
}
async fn get_lib(&self) -> Result<HeaderId> {
Ok(self.bedrock_client.get_consensus_info().await?.lib)
}
async fn get_next_lib(&self, prev_lib: HeaderId) -> Result<HeaderId> {
loop {
let next_lib = self.get_lib().await?;
if next_lib == prev_lib {
info!(
"Wait {:?} to not spam the node",
self.config.consensus_info_polling_interval
);
tokio::time::sleep(self.config.consensus_info_polling_interval).await;
} else {
break Ok(next_lib);
}
}
}
/// WARNING: depending on channel state,
/// may take indefinite amount of time.
pub async fn search_for_channel_start(&self) -> Result<BackfillData> {
let mut curr_last_l1_lib_header = self.get_lib().await?;
let mut backfill_start = curr_last_l1_lib_header;
// ToDo: How to get root?
let mut backfill_limit = HeaderId::from([0; 32]);
// ToDo: Not scalable, initial buffer should be stored in DB to not run out of memory
// Don't want to complicate DB even more right now.
let mut block_buffer = VecDeque::new();
'outer: loop {
let mut cycle_header = curr_last_l1_lib_header;
loop {
let Some(cycle_block) = self.bedrock_client.get_block_by_id(cycle_header).await?
else {
// First run can reach root easily
// so here we are optimistic about L1
// failing to get parent.
break;
};
let mut stream = std::pin::pin!(stream);
// It would be better to have id, but block does not have it, so slot will do.
info!(
"INITIAL SEARCH: Observed L1 block at slot {}",
cycle_block.header().slot().into_inner()
);
debug!(
"INITIAL SEARCH: This block header is {}",
cycle_block.header().id()
);
debug!(
"INITIAL SEARCH: This block parent is {}",
cycle_block.header().parent()
);
while let Some((msg, slot)) = stream.next().await {
let zone_block = match msg {
ZoneMessage::Block(b) => b,
// Non-block messages don't carry a cursor position; the
// next ZoneBlock advances past them implicitly.
ZoneMessage::Deposit(_) | ZoneMessage::Withdraw(_) => continue,
};
let (l2_block_vec, l1_header) =
parse_block_owned(&cycle_block, &self.config.channel_id);
let block: Block = match borsh::from_slice(&zone_block.data) {
Ok(b) => b,
Err(e) => {
error!("Failed to deserialize L2 block from zone-sdk: {e}");
// Advance past the broken inscription so we don't
// re-process it on restart.
cursor = Some((zone_block.id, slot));
if let Err(err) = self.store.set_zone_cursor(&(zone_block.id, slot)) {
warn!("Failed to persist indexer cursor: {err:#}");
}
continue;
}
};
info!("Parsed {} L2 blocks", l2_block_vec.len());
info!("Indexed L2 block {}", block.header.block_id);
if !l2_block_vec.is_empty() {
block_buffer.push_front(BackfillBlockData {
l2_blocks: l2_block_vec.clone(),
l1_header,
});
}
if let Some(first_l2_block) = l2_block_vec.first()
&& first_l2_block.header.block_id == 1
{
info!("INITIAL_SEARCH: Found channel start");
break 'outer;
}
// Step back to parent
let parent = cycle_block.header().parent();
if parent == backfill_limit {
break;
}
cycle_header = parent;
}
info!("INITIAL_SEARCH: Reached backfill limit, refetching last l1 lib header");
block_buffer.clear();
backfill_limit = backfill_start;
curr_last_l1_lib_header = self.get_next_lib(curr_last_l1_lib_header).await?;
backfill_start = curr_last_l1_lib_header;
}
Ok(BackfillData {
block_data: block_buffer,
curr_fin_l1_lib_header: curr_last_l1_lib_header,
})
}
pub async fn backfill_to_last_l1_lib_header_id(
&self,
last_fin_l1_lib_header: HeaderId,
channel_id: &ChannelId,
) -> Result<BackfillData> {
let curr_fin_l1_lib_header = self.get_next_lib(last_fin_l1_lib_header).await?;
// ToDo: Not scalable, buffer should be stored in DB to not run out of memory
// Don't want to complicate DB even more right now.
let mut block_buffer = VecDeque::new();
let mut cycle_header = curr_fin_l1_lib_header;
loop {
let Some(cycle_block) = self.bedrock_client.get_block_by_id(cycle_header).await? else {
return Err(anyhow::anyhow!("Parent not found"));
};
if cycle_block.header().id() == last_fin_l1_lib_header {
break;
}
// Step back to parent
cycle_header = cycle_block.header().parent();
// It would be better to have id, but block does not have it, so slot will do.
info!(
"Observed L1 block at slot {}",
cycle_block.header().slot().into_inner()
);
let (l2_block_vec, l1_header) = parse_block_owned(&cycle_block, channel_id);
info!("Parsed {} L2 blocks", l2_block_vec.len());
if !l2_block_vec.is_empty() {
block_buffer.push_front(BackfillBlockData {
l2_blocks: l2_block_vec,
l1_header,
});
}
}
Ok(BackfillData {
block_data: block_buffer,
curr_fin_l1_lib_header,
})
}
}
fn parse_block_owned(
l1_block: &bedrock_client::Block<SignedMantleTx>,
decoded_channel_id: &ChannelId,
) -> (Vec<Block>, HeaderId) {
(
#[expect(
clippy::wildcard_enum_match_arm,
reason = "We are only interested in channel inscription ops, so it's fine to ignore the rest"
)]
l1_block
.transactions()
.flat_map(|tx| {
tx.mantle_tx.ops.iter().filter_map(|op| match op {
Op::ChannelInscribe(InscriptionOp {
channel_id,
inscription,
..
}) if channel_id == decoded_channel_id => {
borsh::from_slice::<Block>(inscription)
.inspect_err(|err| {
error!("Failed to deserialize our inscription with err: {err:#?}");
})
.ok()
// TODO: Remove l1_header placeholder once storage layer
// no longer requires it. Zone-sdk handles L1 tracking internally.
let placeholder_l1_header = HeaderId::from([0_u8; 32]);
if let Err(err) = self.store.put_block(block.clone(), placeholder_l1_header).await {
error!("Failed to store block {}: {err:#}", block.header.block_id);
}
_ => None,
})
})
.collect(),
l1_block.header().id(),
)
cursor = Some((zone_block.id, slot));
if let Err(err) = self.store.set_zone_cursor(&(zone_block.id, slot)) {
warn!("Failed to persist indexer cursor: {err:#}");
}
yield Ok(block);
}
// Stream ended (caught up to LIB). Sleep then poll again.
tokio::time::sleep(poll_interval).await;
}
}
}
}

View File

@ -5,9 +5,16 @@ name = "indexer_ffi"
version = "0.1.0"
[dependencies]
nssa.workspace = true
indexer_service.workspace = true
indexer_service_rpc = { workspace = true, features = ["client"] }
indexer_service_protocol.workspace = true
url.workspace = true
log = { workspace = true }
tokio = { features = ["rt-multi-thread"], workspace = true }
jsonrpsee.workspace = true
anyhow.workspace = true
[build-dependencies]
cbindgen = "0.29"

685
indexer/ffi/indexer_ffi.h Normal file
View File

@ -0,0 +1,685 @@
#include <stdarg.h>
#include <stdbool.h>
#include <stdint.h>
#include <stdlib.h>
typedef enum OperationStatus {
Ok = 0,
NullPointer = 1,
InitializationError = 2,
ClientError = 3,
} OperationStatus;
typedef enum FfiTransactionKind {
Public = 0,
Private,
ProgramDeploy,
} FfiTransactionKind;
typedef enum FfiBedrockStatus {
Pending = 0,
Safe,
Finalized,
} FfiBedrockStatus;
typedef struct IndexerServiceFFI {
void *indexer_handle;
void *runtime;
void *indexer_client;
} IndexerServiceFFI;
/**
* Simple wrapper around a pointer to a value or an error.
*
* Pointer is not guaranteed. You should check the error field before
* dereferencing the pointer.
*/
typedef struct PointerResult_IndexerServiceFFI__OperationStatus {
struct IndexerServiceFFI *value;
enum OperationStatus error;
} PointerResult_IndexerServiceFFI__OperationStatus;
typedef struct PointerResult_IndexerServiceFFI__OperationStatus InitializedIndexerServiceFFIResult;
/**
* Simple wrapper around a pointer to a value or an error.
*
* Pointer is not guaranteed. You should check the error field before
* dereferencing the pointer.
*/
typedef struct PointerResult_u64__OperationStatus {
uint64_t *value;
enum OperationStatus error;
} PointerResult_u64__OperationStatus;
typedef uint64_t FfiBlockId;
/**
* 32-byte array type for `AccountId`, keys, hashes, etc.
*/
typedef struct FfiBytes32 {
uint8_t data[32];
} FfiBytes32;
typedef struct FfiBytes32 FfiHashType;
typedef uint64_t FfiTimestamp;
/**
* 64-byte array type for signatures, etc.
*/
typedef struct FfiBytes64 {
uint8_t data[64];
} FfiBytes64;
typedef struct FfiBytes64 FfiSignature;
typedef struct FfiBlockHeader {
FfiBlockId block_id;
FfiHashType prev_block_hash;
FfiHashType hash;
FfiTimestamp timestamp;
FfiSignature signature;
} FfiBlockHeader;
/**
* Program ID - 8 u32 values (32 bytes total).
*/
typedef struct FfiProgramId {
uint32_t data[8];
} FfiProgramId;
typedef struct FfiBytes32 FfiAccountId;
typedef struct FfiVec_FfiAccountId {
FfiAccountId *entries;
uintptr_t len;
uintptr_t capacity;
} FfiVec_FfiAccountId;
typedef struct FfiVec_FfiAccountId FfiAccountIdList;
/**
* U128 - 16 bytes little endian.
*/
typedef struct FfiU128 {
uint8_t data[16];
} FfiU128;
typedef struct FfiU128 FfiNonce;
typedef struct FfiVec_FfiNonce {
FfiNonce *entries;
uintptr_t len;
uintptr_t capacity;
} FfiVec_FfiNonce;
typedef struct FfiVec_FfiNonce FfiNonceList;
typedef struct FfiVec_u32 {
uint32_t *entries;
uintptr_t len;
uintptr_t capacity;
} FfiVec_u32;
typedef struct FfiVec_u32 FfiInstructionDataList;
typedef struct FfiPublicMessage {
struct FfiProgramId program_id;
FfiAccountIdList account_ids;
FfiNonceList nonces;
FfiInstructionDataList instruction_data;
} FfiPublicMessage;
typedef struct FfiBytes32 FfiPublicKey;
typedef struct FfiSignaturePubKeyEntry {
FfiSignature signature;
FfiPublicKey public_key;
} FfiSignaturePubKeyEntry;
typedef struct FfiVec_FfiSignaturePubKeyEntry {
struct FfiSignaturePubKeyEntry *entries;
uintptr_t len;
uintptr_t capacity;
} FfiVec_FfiSignaturePubKeyEntry;
typedef struct FfiVec_FfiSignaturePubKeyEntry FfiSignaturePubKeyList;
typedef struct FfiPublicTransactionBody {
FfiHashType hash;
struct FfiPublicMessage message;
FfiSignaturePubKeyList witness_set;
} FfiPublicTransactionBody;
/**
* Account data structure - C-compatible version of nssa Account.
*
* Note: `balance` and `nonce` are u128 values represented as little-endian
* byte arrays since C doesn't have native u128 support.
*/
typedef struct FfiAccount {
struct FfiProgramId program_owner;
/**
* Balance as little-endian [u8; 16].
*/
struct FfiU128 balance;
/**
* Pointer to account data bytes.
*/
uint8_t *data;
/**
* Length of account data.
*/
uintptr_t data_len;
/**
* Capacity of account data.
*/
uintptr_t data_cap;
/**
* Nonce as little-endian [u8; 16].
*/
struct FfiU128 nonce;
} FfiAccount;
typedef struct FfiVec_FfiAccount {
struct FfiAccount *entries;
uintptr_t len;
uintptr_t capacity;
} FfiVec_FfiAccount;
typedef struct FfiVec_FfiAccount FfiAccountList;
typedef struct FfiVec_u8 {
uint8_t *entries;
uintptr_t len;
uintptr_t capacity;
} FfiVec_u8;
typedef struct FfiVec_u8 FfiVecU8;
typedef struct FfiEncryptedAccountData {
FfiVecU8 ciphertext;
FfiVecU8 epk;
uint8_t view_tag;
} FfiEncryptedAccountData;
typedef struct FfiVec_FfiEncryptedAccountData {
struct FfiEncryptedAccountData *entries;
uintptr_t len;
uintptr_t capacity;
} FfiVec_FfiEncryptedAccountData;
typedef struct FfiVec_FfiEncryptedAccountData FfiEncryptedAccountDataList;
typedef struct FfiVec_FfiBytes32 {
struct FfiBytes32 *entries;
uintptr_t len;
uintptr_t capacity;
} FfiVec_FfiBytes32;
typedef struct FfiVec_FfiBytes32 FfiVecBytes32;
typedef struct FfiNullifierCommitmentSet {
struct FfiBytes32 nullifier;
struct FfiBytes32 commitment_set_digest;
} FfiNullifierCommitmentSet;
typedef struct FfiVec_FfiNullifierCommitmentSet {
struct FfiNullifierCommitmentSet *entries;
uintptr_t len;
uintptr_t capacity;
} FfiVec_FfiNullifierCommitmentSet;
typedef struct FfiVec_FfiNullifierCommitmentSet FfiNullifierCommitmentSetList;
typedef struct FfiPrivacyPreservingMessage {
FfiAccountIdList public_account_ids;
FfiNonceList nonces;
FfiAccountList public_post_states;
FfiEncryptedAccountDataList encrypted_private_post_states;
FfiVecBytes32 new_commitments;
FfiNullifierCommitmentSetList new_nullifiers;
uint64_t block_validity_window[2];
uint64_t timestamp_validity_window[2];
} FfiPrivacyPreservingMessage;
typedef FfiVecU8 FfiProof;
typedef struct FfiPrivateTransactionBody {
FfiHashType hash;
struct FfiPrivacyPreservingMessage message;
FfiSignaturePubKeyList witness_set;
FfiProof proof;
} FfiPrivateTransactionBody;
typedef FfiVecU8 FfiProgramDeploymentMessage;
typedef struct FfiProgramDeploymentTransactionBody {
FfiHashType hash;
FfiProgramDeploymentMessage message;
} FfiProgramDeploymentTransactionBody;
typedef struct FfiTransactionBody {
struct FfiPublicTransactionBody *public_body;
struct FfiPrivateTransactionBody *private_body;
struct FfiProgramDeploymentTransactionBody *program_deployment_body;
} FfiTransactionBody;
typedef struct FfiTransaction {
struct FfiTransactionBody body;
enum FfiTransactionKind kind;
} FfiTransaction;
typedef struct FfiVec_FfiTransaction {
struct FfiTransaction *entries;
uintptr_t len;
uintptr_t capacity;
} FfiVec_FfiTransaction;
typedef struct FfiVec_FfiTransaction FfiBlockBody;
typedef struct FfiBytes32 FfiMsgId;
typedef struct FfiBlock {
struct FfiBlockHeader header;
FfiBlockBody body;
enum FfiBedrockStatus bedrock_status;
FfiMsgId bedrock_parent_id;
} FfiBlock;
typedef struct FfiOption_FfiBlock {
struct FfiBlock *value;
bool is_some;
} FfiOption_FfiBlock;
typedef struct FfiOption_FfiBlock FfiBlockOpt;
/**
* Simple wrapper around a pointer to a value or an error.
*
* Pointer is not guaranteed. You should check the error field before
* dereferencing the pointer.
*/
typedef struct PointerResult_FfiBlockOpt__OperationStatus {
FfiBlockOpt *value;
enum OperationStatus error;
} PointerResult_FfiBlockOpt__OperationStatus;
/**
* Simple wrapper around a pointer to a value or an error.
*
* Pointer is not guaranteed. You should check the error field before
* dereferencing the pointer.
*/
typedef struct PointerResult_FfiAccount__OperationStatus {
struct FfiAccount *value;
enum OperationStatus error;
} PointerResult_FfiAccount__OperationStatus;
typedef struct FfiOption_FfiTransaction {
struct FfiTransaction *value;
bool is_some;
} FfiOption_FfiTransaction;
/**
* Simple wrapper around a pointer to a value or an error.
*
* Pointer is not guaranteed. You should check the error field before
* dereferencing the pointer.
*/
typedef struct PointerResult_FfiOption_FfiTransaction_____OperationStatus {
struct FfiOption_FfiTransaction *value;
enum OperationStatus error;
} PointerResult_FfiOption_FfiTransaction_____OperationStatus;
typedef struct FfiVec_FfiBlock {
struct FfiBlock *entries;
uintptr_t len;
uintptr_t capacity;
} FfiVec_FfiBlock;
/**
* Simple wrapper around a pointer to a value or an error.
*
* Pointer is not guaranteed. You should check the error field before
* dereferencing the pointer.
*/
typedef struct PointerResult_FfiVec_FfiBlock_____OperationStatus {
struct FfiVec_FfiBlock *value;
enum OperationStatus error;
} PointerResult_FfiVec_FfiBlock_____OperationStatus;
typedef struct FfiOption_u64 {
uint64_t *value;
bool is_some;
} FfiOption_u64;
/**
* Simple wrapper around a pointer to a value or an error.
*
* Pointer is not guaranteed. You should check the error field before
* dereferencing the pointer.
*/
typedef struct PointerResult_FfiVec_FfiTransaction_____OperationStatus {
struct FfiVec_FfiTransaction *value;
enum OperationStatus error;
} PointerResult_FfiVec_FfiTransaction_____OperationStatus;
/**
* Creates and starts an indexer based on the provided
* configuration file path.
*
* # Arguments
*
* - `config_path`: A pointer to a string representing the path to the configuration file.
* - `port`: Number representing a port, on which indexers RPC will start.
*
* # Returns
*
* An `InitializedIndexerServiceFFIResult` containing either a pointer to the
* initialized `IndexerServiceFFI` or an error code.
*/
InitializedIndexerServiceFFIResult start_indexer(const char *config_path, uint16_t port);
/**
* Stops and frees the resources associated with the given indexer service.
*
* # Arguments
*
* - `indexer`: A pointer to the `IndexerServiceFFI` instance to be stopped.
*
* # Returns
*
* An `OperationStatus` indicating success or failure.
*
* # Safety
*
* The caller must ensure that:
* - `indexer` is a valid pointer to a `IndexerServiceFFI` instance
* - The `IndexerServiceFFI` instance was created by this library
* - The pointer will not be used after this function returns
*/
enum OperationStatus stop_indexer(struct IndexerServiceFFI *indexer);
/**
* # Safety
* It's up to the caller to pass a proper pointer, if somehow from c/c++ side
* this is called with a type which doesn't come from a returned `CString` it
* will cause a segfault.
*/
void free_cstring(char *block);
/**
* Query the last block id from indexer.
*
* # Arguments
*
* - `indexer`: A pointer to the `IndexerServiceFFI` instance to be queried.
*
* # Returns
*
* A `PointerResult<u64, OperationStatus>` indicating success or failure.
*
* # Safety
*
* The caller must ensure that:
* - `indexer` is a valid pointer to a `IndexerServiceFFI` instance
*/
struct PointerResult_u64__OperationStatus query_last_block(const struct IndexerServiceFFI *indexer);
/**
* Query the block by id from indexer.
*
* # Arguments
*
* - `indexer`: A pointer to the `IndexerServiceFFI` instance to be queried.
* - `block_id`: `u64` number of block id
*
* # Returns
*
* A `PointerResult<FfiBlockOpt, OperationStatus>` indicating success or failure.
*
* # Safety
*
* The caller must ensure that:
* - `indexer` is a valid pointer to a `IndexerServiceFFI` instance
*/
struct PointerResult_FfiBlockOpt__OperationStatus query_block(const struct IndexerServiceFFI *indexer,
FfiBlockId block_id);
/**
* Query the block by id from indexer.
*
* # Arguments
*
* - `indexer`: A pointer to the `IndexerServiceFFI` instance to be queried.
* - `hash`: `FfiHashType` - hash of block
*
* # Returns
*
* A `PointerResult<FfiBlockOpt, OperationStatus>` indicating success or failure.
*
* # Safety
*
* The caller must ensure that:
* - `indexer` is a valid pointer to a `IndexerServiceFFI` instance
*/
struct PointerResult_FfiBlockOpt__OperationStatus query_block_by_hash(const struct IndexerServiceFFI *indexer,
FfiHashType hash);
/**
* Query the account by id from indexer.
*
* # Arguments
*
* - `indexer`: A pointer to the `IndexerServiceFFI` instance to be queried.
* - `account_id`: `FfiAccountId` - id of queried account
*
* # Returns
*
* A `PointerResult<FfiAccount, OperationStatus>` indicating success or failure.
*
* # Safety
*
* The caller must ensure that:
* - `indexer` is a valid pointer to a `IndexerServiceFFI` instance
*/
struct PointerResult_FfiAccount__OperationStatus query_account(const struct IndexerServiceFFI *indexer,
FfiAccountId account_id);
/**
* Query the trasnaction by hash from indexer.
*
* # Arguments
*
* - `indexer`: A pointer to the `IndexerServiceFFI` instance to be queried.
* - `hash`: `FfiHashType` - hash of transaction
*
* # Returns
*
* A `PointerResult<FfiOption<FfiTransaction>, OperationStatus>` indicating success or failure.
*
* # Safety
*
* The caller must ensure that:
* - `indexer` is a valid pointer to a `IndexerServiceFFI` instance
*/
struct PointerResult_FfiOption_FfiTransaction_____OperationStatus query_transaction(const struct IndexerServiceFFI *indexer,
FfiHashType hash);
/**
* Query the blocks by block range from indexer.
*
* # Arguments
*
* - `indexer`: A pointer to the `IndexerServiceFFI` instance to be queried.
* - `before`: `FfiOption<u64>` - end block of query
* - `limit`: `u64` - number of blocks to query before `before`
*
* # Returns
*
* A `PointerResult<FfiVec<FfiBlock>, OperationStatus>` indicating success or failure.
*
* # Safety
*
* The caller must ensure that:
* - `indexer` is a valid pointer to a `IndexerServiceFFI` instance
*/
struct PointerResult_FfiVec_FfiBlock_____OperationStatus query_block_vec(const struct IndexerServiceFFI *indexer,
struct FfiOption_u64 before,
uint64_t limit);
/**
* Query the transactions range by account id from indexer.
*
* # Arguments
*
* - `indexer`: A pointer to the `IndexerServiceFFI` instance to be queried.
* - `account_id`: `FfiAccountId` - id of queried account
* - `offset`: `u64` - first tx id of query
* - `limit`: `u64` - number of tx ids to query after `offset`
*
* # Returns
*
* A `PointerResult<FfiVec<FfiBlock>, OperationStatus>` indicating success or failure.
*
* # Safety
*
* The caller must ensure that:
* - `indexer` is a valid pointer to a `IndexerServiceFFI` instance
*/
struct PointerResult_FfiVec_FfiTransaction_____OperationStatus query_transactions_by_account(const struct IndexerServiceFFI *indexer,
FfiAccountId account_id,
uint64_t offset,
uint64_t limit);
/**
* Frees the resources associated with the given ffi account.
*
* # Arguments
*
* - `val`: An instance of `FfiAccount`.
*
* # Returns
*
* void.
*
* # Safety
*
* The caller must ensure that:
* - `val` is a valid instance of `FfiAccount`.
*/
void free_ffi_account(struct FfiAccount val);
/**
* Frees the resources associated with the given ffi block.
*
* # Arguments
*
* - `val`: An instance of `FfiBlock`.
*
* # Returns
*
* void.
*
* # Safety
*
* The caller must ensure that:
* - `val` is a valid instance of `FfiBlock`.
*/
void free_ffi_block(struct FfiBlock val);
/**
* Frees the resources associated with the given ffi block option.
*
* # Arguments
*
* - `val`: An instance of `FfiBlockOpt`.
*
* # Returns
*
* void.
*
* # Safety
*
* The caller must ensure that:
* - `val` is a valid instance of `FfiBlockOpt`.
*/
void free_ffi_block_opt(FfiBlockOpt val);
/**
* Frees the resources associated with the given ffi block vector.
*
* # Arguments
*
* - `val`: An instance of `FfiVec<FfiBlock>`.
*
* # Returns
*
* void.
*
* # Safety
*
* The caller must ensure that:
* - `val` is a valid instance of `FfiVec<FfiBlock>`.
*/
void free_ffi_block_vec(struct FfiVec_FfiBlock val);
/**
* Frees the resources associated with the given ffi transaction.
*
* # Arguments
*
* - `val`: An instance of `FfiTransaction`.
*
* # Returns
*
* void.
*
* # Safety
*
* The caller must ensure that:
* - `val` is a valid instance of `FfiTransaction`.
*/
void free_ffi_transaction(struct FfiTransaction val);
/**
* Frees the resources associated with the given ffi transaction option.
*
* # Arguments
*
* - `val`: An instance of `FfiOption<FfiTransaction>`.
*
* # Returns
*
* void.
*
* # Safety
*
* The caller must ensure that:
* - `val` is a valid instance of `FfiOption<FfiTransaction>`.
*/
void free_ffi_transaction_opt(struct FfiOption_FfiTransaction val);
/**
* Frees the resources associated with the given vector of ffi transactions.
*
* # Arguments
*
* - `val`: An instance of `FfiVec<FfiTransaction>`.
*
* # Returns
*
* void.
*
* # Safety
*
* The caller must ensure that:
* - `val` is a valid instance of `FfiVec<FfiTransaction>`.
*/
void free_ffi_transaction_vec(struct FfiVec_FfiTransaction val);
bool is_ok(const enum OperationStatus *self);
bool is_error(const enum OperationStatus *self);

View File

@ -0,0 +1,36 @@
use std::net::SocketAddr;
use url::Url;
use crate::OperationStatus;
#[derive(Debug, Clone, Copy)]
pub enum UrlProtocol {
Http,
Ws,
}
impl std::fmt::Display for UrlProtocol {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Http => write!(f, "http"),
Self::Ws => write!(f, "ws"),
}
}
}
pub(crate) fn addr_to_url(protocol: UrlProtocol, addr: SocketAddr) -> Result<Url, OperationStatus> {
// Convert 0.0.0.0 to 127.0.0.1 for client connections
// When binding to port 0, the server binds to 0.0.0.0:<random_port>
// but clients need to connect to 127.0.0.1:<port> to work reliably
let url_string = if addr.ip().is_unspecified() {
format!("{protocol}://127.0.0.1:{}", addr.port())
} else {
format!("{protocol}://{addr}")
};
url_string.parse().map_err(|e| {
log::error!("Could not parse indexer url: {e}");
OperationStatus::InitializationError
})
}

View File

@ -2,7 +2,15 @@ use std::{ffi::c_char, path::PathBuf};
use tokio::runtime::Runtime;
use crate::{IndexerServiceFFI, api::PointerResult, errors::OperationStatus};
use crate::{
IndexerServiceFFI,
api::{
PointerResult,
client::{UrlProtocol, addr_to_url},
},
client::{IndexerClient, IndexerClientTrait as _},
errors::OperationStatus,
};
pub type InitializedIndexerServiceFFIResult = PointerResult<IndexerServiceFFI, OperationStatus>;
@ -67,7 +75,13 @@ fn setup_indexer(
OperationStatus::InitializationError
})?;
Ok(IndexerServiceFFI::new(indexer_handle, rt))
let indexer_url = addr_to_url(UrlProtocol::Ws, indexer_handle.addr())?;
let indexer_client = rt.block_on(IndexerClient::new(&indexer_url)).map_err(|e| {
log::error!("Could not start indexer client: {e}");
OperationStatus::InitializationError
})?;
Ok(IndexerServiceFFI::new(indexer_handle, rt, indexer_client))
}
/// Stops and frees the resources associated with the given indexer service.

View File

@ -1,5 +1,8 @@
pub use result::PointerResult;
pub mod client;
pub mod lifecycle;
pub mod memory;
pub mod query;
pub mod result;
pub mod types;

View File

@ -0,0 +1,334 @@
use indexer_service_protocol::{AccountId, HashType};
use indexer_service_rpc::RpcClient as _;
use crate::{
IndexerServiceFFI,
api::{
PointerResult,
types::{
FfiAccountId, FfiBlockId, FfiHashType, FfiOption, FfiVec,
account::FfiAccount,
block::{FfiBlock, FfiBlockOpt},
transaction::FfiTransaction,
},
},
errors::OperationStatus,
};
/// Query the last block id from indexer.
///
/// # Arguments
///
/// - `indexer`: A pointer to the `IndexerServiceFFI` instance to be queried.
///
/// # Returns
///
/// A `PointerResult<u64, OperationStatus>` indicating success or failure.
///
/// # Safety
///
/// The caller must ensure that:
/// - `indexer` is a valid pointer to a `IndexerServiceFFI` instance
#[unsafe(no_mangle)]
pub unsafe extern "C" fn query_last_block(
indexer: *const IndexerServiceFFI,
) -> PointerResult<u64, OperationStatus> {
if indexer.is_null() {
log::error!("Attempted to query a null indexer pointer. This is a bug. Aborting.");
return PointerResult::from_error(OperationStatus::NullPointer);
}
let indexer = unsafe { &*indexer };
let client = unsafe { indexer.client() };
let runtime = unsafe { indexer.runtime() };
runtime
.block_on(client.get_last_finalized_block_id())
.map_or_else(
|_| PointerResult::from_error(OperationStatus::ClientError),
PointerResult::from_value,
)
}
/// Query the block by id from indexer.
///
/// # Arguments
///
/// - `indexer`: A pointer to the `IndexerServiceFFI` instance to be queried.
/// - `block_id`: `u64` number of block id
///
/// # Returns
///
/// A `PointerResult<FfiBlockOpt, OperationStatus>` indicating success or failure.
///
/// # Safety
///
/// The caller must ensure that:
/// - `indexer` is a valid pointer to a `IndexerServiceFFI` instance
#[unsafe(no_mangle)]
pub unsafe extern "C" fn query_block(
indexer: *const IndexerServiceFFI,
block_id: FfiBlockId,
) -> PointerResult<FfiBlockOpt, OperationStatus> {
if indexer.is_null() {
log::error!("Attempted to query a null indexer pointer. This is a bug. Aborting.");
return PointerResult::from_error(OperationStatus::NullPointer);
}
let indexer = unsafe { &*indexer };
let client = unsafe { indexer.client() };
let runtime = unsafe { indexer.runtime() };
runtime
.block_on(client.get_block_by_id(block_id))
.map_or_else(
|_| PointerResult::from_error(OperationStatus::ClientError),
|block_opt| {
let block_ffi = block_opt.map_or_else(FfiBlockOpt::from_none, |block| {
FfiBlockOpt::from_value(block.into())
});
PointerResult::from_value(block_ffi)
},
)
}
/// Query the block by id from indexer.
///
/// # Arguments
///
/// - `indexer`: A pointer to the `IndexerServiceFFI` instance to be queried.
/// - `hash`: `FfiHashType` - hash of block
///
/// # Returns
///
/// A `PointerResult<FfiBlockOpt, OperationStatus>` indicating success or failure.
///
/// # Safety
///
/// The caller must ensure that:
/// - `indexer` is a valid pointer to a `IndexerServiceFFI` instance
#[unsafe(no_mangle)]
pub unsafe extern "C" fn query_block_by_hash(
indexer: *const IndexerServiceFFI,
hash: FfiHashType,
) -> PointerResult<FfiBlockOpt, OperationStatus> {
if indexer.is_null() {
log::error!("Attempted to query a null indexer pointer. This is a bug. Aborting.");
return PointerResult::from_error(OperationStatus::NullPointer);
}
let indexer = unsafe { &*indexer };
let client = unsafe { indexer.client() };
let runtime = unsafe { indexer.runtime() };
runtime
.block_on(client.get_block_by_hash(HashType(hash.data)))
.map_or_else(
|_| PointerResult::from_error(OperationStatus::ClientError),
|block_opt| {
let block_ffi = block_opt.map_or_else(FfiBlockOpt::from_none, |block| {
FfiBlockOpt::from_value(block.into())
});
PointerResult::from_value(block_ffi)
},
)
}
/// Query the account by id from indexer.
///
/// # Arguments
///
/// - `indexer`: A pointer to the `IndexerServiceFFI` instance to be queried.
/// - `account_id`: `FfiAccountId` - id of queried account
///
/// # Returns
///
/// A `PointerResult<FfiAccount, OperationStatus>` indicating success or failure.
///
/// # Safety
///
/// The caller must ensure that:
/// - `indexer` is a valid pointer to a `IndexerServiceFFI` instance
#[unsafe(no_mangle)]
pub unsafe extern "C" fn query_account(
indexer: *const IndexerServiceFFI,
account_id: FfiAccountId,
) -> PointerResult<FfiAccount, OperationStatus> {
if indexer.is_null() {
log::error!("Attempted to query a null indexer pointer. This is a bug. Aborting.");
return PointerResult::from_error(OperationStatus::NullPointer);
}
let indexer = unsafe { &*indexer };
let client = unsafe { indexer.client() };
let runtime = unsafe { indexer.runtime() };
runtime
.block_on(client.get_account(AccountId {
value: account_id.data,
}))
.map_or_else(
|_| PointerResult::from_error(OperationStatus::ClientError),
|acc| {
let acc_nssa: nssa::Account =
acc.try_into().expect("Source is in blocks, must fit");
PointerResult::from_value(acc_nssa.into())
},
)
}
/// Query the trasnaction by hash from indexer.
///
/// # Arguments
///
/// - `indexer`: A pointer to the `IndexerServiceFFI` instance to be queried.
/// - `hash`: `FfiHashType` - hash of transaction
///
/// # Returns
///
/// A `PointerResult<FfiOption<FfiTransaction>, OperationStatus>` indicating success or failure.
///
/// # Safety
///
/// The caller must ensure that:
/// - `indexer` is a valid pointer to a `IndexerServiceFFI` instance
#[unsafe(no_mangle)]
pub unsafe extern "C" fn query_transaction(
indexer: *const IndexerServiceFFI,
hash: FfiHashType,
) -> PointerResult<FfiOption<FfiTransaction>, OperationStatus> {
if indexer.is_null() {
log::error!("Attempted to query a null indexer pointer. This is a bug. Aborting.");
return PointerResult::from_error(OperationStatus::NullPointer);
}
let indexer = unsafe { &*indexer };
let client = unsafe { indexer.client() };
let runtime = unsafe { indexer.runtime() };
runtime
.block_on(client.get_transaction(HashType(hash.data)))
.map_or_else(
|_| PointerResult::from_error(OperationStatus::ClientError),
|tx_opt| {
let tx_ffi = tx_opt.map_or_else(FfiOption::<FfiTransaction>::from_none, |tx| {
FfiOption::<FfiTransaction>::from_value(tx.into())
});
PointerResult::from_value(tx_ffi)
},
)
}
/// Query the blocks by block range from indexer.
///
/// # Arguments
///
/// - `indexer`: A pointer to the `IndexerServiceFFI` instance to be queried.
/// - `before`: `FfiOption<u64>` - end block of query
/// - `limit`: `u64` - number of blocks to query before `before`
///
/// # Returns
///
/// A `PointerResult<FfiVec<FfiBlock>, OperationStatus>` indicating success or failure.
///
/// # Safety
///
/// The caller must ensure that:
/// - `indexer` is a valid pointer to a `IndexerServiceFFI` instance
#[unsafe(no_mangle)]
pub unsafe extern "C" fn query_block_vec(
indexer: *const IndexerServiceFFI,
before: FfiOption<u64>,
limit: u64,
) -> PointerResult<FfiVec<FfiBlock>, OperationStatus> {
if indexer.is_null() {
log::error!("Attempted to query a null indexer pointer. This is a bug. Aborting.");
return PointerResult::from_error(OperationStatus::NullPointer);
}
let indexer = unsafe { &*indexer };
let client = unsafe { indexer.client() };
let runtime = unsafe { indexer.runtime() };
let before_std = before.is_some.then(|| unsafe { *before.value });
runtime
.block_on(client.get_blocks(before_std, limit))
.map_or_else(
|_| PointerResult::from_error(OperationStatus::ClientError),
|block_vec| {
PointerResult::from_value(
block_vec
.into_iter()
.map(Into::into)
.collect::<Vec<_>>()
.into(),
)
},
)
}
/// Query the transactions range by account id from indexer.
///
/// # Arguments
///
/// - `indexer`: A pointer to the `IndexerServiceFFI` instance to be queried.
/// - `account_id`: `FfiAccountId` - id of queried account
/// - `offset`: `u64` - first tx id of query
/// - `limit`: `u64` - number of tx ids to query after `offset`
///
/// # Returns
///
/// A `PointerResult<FfiVec<FfiBlock>, OperationStatus>` indicating success or failure.
///
/// # Safety
///
/// The caller must ensure that:
/// - `indexer` is a valid pointer to a `IndexerServiceFFI` instance
#[unsafe(no_mangle)]
pub unsafe extern "C" fn query_transactions_by_account(
indexer: *const IndexerServiceFFI,
account_id: FfiAccountId,
offset: u64,
limit: u64,
) -> PointerResult<FfiVec<FfiTransaction>, OperationStatus> {
if indexer.is_null() {
log::error!("Attempted to query a null indexer pointer. This is a bug. Aborting.");
return PointerResult::from_error(OperationStatus::NullPointer);
}
let indexer = unsafe { &*indexer };
let client = unsafe { indexer.client() };
let runtime = unsafe { indexer.runtime() };
runtime
.block_on(client.get_transactions_by_account(
AccountId {
value: account_id.data,
},
offset,
limit,
))
.map_or_else(
|_| PointerResult::from_error(OperationStatus::ClientError),
|tx_vec| {
PointerResult::from_value(
tx_vec
.into_iter()
.map(Into::into)
.collect::<Vec<_>>()
.into(),
)
},
)
}

View File

@ -0,0 +1,119 @@
use indexer_service_protocol::ProgramId;
use crate::api::types::{FfiBytes32, FfiProgramId, FfiU128};
/// Account data structure - C-compatible version of nssa Account.
///
/// Note: `balance` and `nonce` are u128 values represented as little-endian
/// byte arrays since C doesn't have native u128 support.
#[repr(C)]
pub struct FfiAccount {
pub program_owner: FfiProgramId,
/// Balance as little-endian [u8; 16].
pub balance: FfiU128,
/// Pointer to account data bytes.
pub data: *mut u8,
/// Length of account data.
pub data_len: usize,
/// Capacity of account data.
pub data_cap: usize,
/// Nonce as little-endian [u8; 16].
pub nonce: FfiU128,
}
// Helper functions to convert between Rust and FFI types
impl From<&nssa::AccountId> for FfiBytes32 {
fn from(id: &nssa::AccountId) -> Self {
Self::from_account_id(id)
}
}
impl From<nssa::Account> for FfiAccount {
fn from(value: nssa::Account) -> Self {
let nssa::Account {
program_owner,
balance,
data,
nonce,
} = value;
let (data, data_len, data_cap) = data.into_inner().into_raw_parts();
let program_owner = FfiProgramId {
data: program_owner,
};
Self {
program_owner,
balance: balance.into(),
data,
data_len,
data_cap,
nonce: nonce.0.into(),
}
}
}
impl From<FfiAccount> for indexer_service_protocol::Account {
fn from(value: FfiAccount) -> Self {
let FfiAccount {
program_owner,
balance,
data,
data_cap,
data_len,
nonce,
} = value;
Self {
program_owner: ProgramId(program_owner.data),
balance: balance.into(),
data: indexer_service_protocol::Data(unsafe {
Vec::from_raw_parts(data, data_len, data_cap)
}),
nonce: nonce.into(),
}
}
}
impl From<&FfiAccount> for indexer_service_protocol::Account {
fn from(value: &FfiAccount) -> Self {
let &FfiAccount {
program_owner,
balance,
data,
data_cap,
data_len,
nonce,
} = value;
Self {
program_owner: ProgramId(program_owner.data),
balance: balance.into(),
data: indexer_service_protocol::Data(unsafe {
Vec::from_raw_parts(data, data_len, data_cap)
}),
nonce: nonce.into(),
}
}
}
/// Frees the resources associated with the given ffi account.
///
/// # Arguments
///
/// - `val`: An instance of `FfiAccount`.
///
/// # Returns
///
/// void.
///
/// # Safety
///
/// The caller must ensure that:
/// - `val` is a valid instance of `FfiAccount`.
#[unsafe(no_mangle)]
pub unsafe extern "C" fn free_ffi_account(val: FfiAccount) {
let orig_val: indexer_service_protocol::Account = val.into();
drop(orig_val);
}

View File

@ -0,0 +1,199 @@
use indexer_service_protocol::{
BedrockStatus, Block, BlockHeader, HashType, MantleMsgId, Signature,
};
use crate::api::types::{
FfiBlockId, FfiHashType, FfiMsgId, FfiOption, FfiSignature, FfiTimestamp, FfiVec,
transaction::free_ffi_transaction_vec, vectors::FfiBlockBody,
};
#[repr(C)]
pub struct FfiBlock {
pub header: FfiBlockHeader,
pub body: FfiBlockBody,
pub bedrock_status: FfiBedrockStatus,
pub bedrock_parent_id: FfiMsgId,
}
impl From<Block> for FfiBlock {
fn from(value: Block) -> Self {
let Block {
header,
body,
bedrock_status,
bedrock_parent_id,
} = value;
Self {
header: header.into(),
body: body
.transactions
.into_iter()
.map(Into::into)
.collect::<Vec<_>>()
.into(),
bedrock_status: bedrock_status.into(),
bedrock_parent_id: bedrock_parent_id.into(),
}
}
}
pub type FfiBlockOpt = FfiOption<FfiBlock>;
#[repr(C)]
pub struct FfiBlockHeader {
pub block_id: FfiBlockId,
pub prev_block_hash: FfiHashType,
pub hash: FfiHashType,
pub timestamp: FfiTimestamp,
pub signature: FfiSignature,
}
impl From<BlockHeader> for FfiBlockHeader {
fn from(value: BlockHeader) -> Self {
let BlockHeader {
block_id,
prev_block_hash,
hash,
timestamp,
signature,
} = value;
Self {
block_id,
prev_block_hash: prev_block_hash.into(),
hash: hash.into(),
timestamp,
signature: signature.into(),
}
}
}
#[repr(C)]
pub enum FfiBedrockStatus {
Pending = 0x0,
Safe,
Finalized,
}
impl From<BedrockStatus> for FfiBedrockStatus {
fn from(value: BedrockStatus) -> Self {
match value {
BedrockStatus::Finalized => Self::Finalized,
BedrockStatus::Pending => Self::Pending,
BedrockStatus::Safe => Self::Safe,
}
}
}
impl From<FfiBedrockStatus> for BedrockStatus {
fn from(value: FfiBedrockStatus) -> Self {
match value {
FfiBedrockStatus::Finalized => Self::Finalized,
FfiBedrockStatus::Pending => Self::Pending,
FfiBedrockStatus::Safe => Self::Safe,
}
}
}
/// Frees the resources associated with the given ffi block.
///
/// # Arguments
///
/// - `val`: An instance of `FfiBlock`.
///
/// # Returns
///
/// void.
///
/// # Safety
///
/// The caller must ensure that:
/// - `val` is a valid instance of `FfiBlock`.
#[unsafe(no_mangle)]
pub unsafe extern "C" fn free_ffi_block(val: FfiBlock) {
// We don't really need all the casts, but just in case
// All except `ffi_tx_ffi_vec` is Copy types, so no need for Drop
let _ = BlockHeader {
block_id: val.header.block_id,
prev_block_hash: HashType(val.header.prev_block_hash.data),
hash: HashType(val.header.hash.data),
timestamp: val.header.timestamp,
signature: Signature(val.header.signature.data),
};
let ffi_tx_ffi_vec = val.body;
#[expect(clippy::let_underscore_must_use, reason = "No use for this Copy type")]
let _: BedrockStatus = val.bedrock_status.into();
let _ = MantleMsgId(val.bedrock_parent_id.data);
unsafe {
free_ffi_transaction_vec(ffi_tx_ffi_vec);
};
}
/// Frees the resources associated with the given ffi block option.
///
/// # Arguments
///
/// - `val`: An instance of `FfiBlockOpt`.
///
/// # Returns
///
/// void.
///
/// # Safety
///
/// The caller must ensure that:
/// - `val` is a valid instance of `FfiBlockOpt`.
#[unsafe(no_mangle)]
pub unsafe extern "C" fn free_ffi_block_opt(val: FfiBlockOpt) {
if val.is_some {
let value = unsafe { Box::from_raw(val.value) };
// We don't really need all the casts, but just in case
// All except `ffi_tx_ffi_vec` is Copy types, so no need for Drop
let _ = BlockHeader {
block_id: value.header.block_id,
prev_block_hash: HashType(value.header.prev_block_hash.data),
hash: HashType(value.header.hash.data),
timestamp: value.header.timestamp,
signature: Signature(value.header.signature.data),
};
let ffi_tx_ffi_vec = value.body;
#[expect(clippy::let_underscore_must_use, reason = "No use for this Copy type")]
let _: BedrockStatus = value.bedrock_status.into();
let _ = MantleMsgId(value.bedrock_parent_id.data);
unsafe {
free_ffi_transaction_vec(ffi_tx_ffi_vec);
};
}
}
/// Frees the resources associated with the given ffi block vector.
///
/// # Arguments
///
/// - `val`: An instance of `FfiVec<FfiBlock>`.
///
/// # Returns
///
/// void.
///
/// # Safety
///
/// The caller must ensure that:
/// - `val` is a valid instance of `FfiVec<FfiBlock>`.
#[unsafe(no_mangle)]
pub unsafe extern "C" fn free_ffi_block_vec(val: FfiVec<FfiBlock>) {
let ffi_block_std_vec: Vec<_> = val.into();
for block in ffi_block_std_vec {
unsafe {
free_ffi_block(block);
}
}
}

View File

@ -0,0 +1,165 @@
use indexer_service_protocol::{AccountId, HashType, MantleMsgId, ProgramId, PublicKey, Signature};
pub mod account;
pub mod block;
pub mod transaction;
pub mod vectors;
/// 32-byte array type for `AccountId`, keys, hashes, etc.
#[repr(C)]
#[derive(Clone, Copy, Default)]
pub struct FfiBytes32 {
pub data: [u8; 32],
}
/// 64-byte array type for signatures, etc.
#[repr(C)]
#[derive(Clone, Copy)]
pub struct FfiBytes64 {
pub data: [u8; 64],
}
/// Program ID - 8 u32 values (32 bytes total).
#[repr(C)]
#[derive(Clone, Copy, Default)]
pub struct FfiProgramId {
pub data: [u32; 8],
}
impl From<ProgramId> for FfiProgramId {
fn from(value: ProgramId) -> Self {
Self { data: value.0 }
}
}
/// U128 - 16 bytes little endian.
#[repr(C)]
#[derive(Clone, Copy, Default)]
pub struct FfiU128 {
pub data: [u8; 16],
}
impl FfiBytes32 {
/// Create from a 32-byte array.
#[must_use]
pub const fn from_bytes(bytes: [u8; 32]) -> Self {
Self { data: bytes }
}
/// Create from an `AccountId`.
#[must_use]
pub const fn from_account_id(id: &nssa::AccountId) -> Self {
Self { data: *id.value() }
}
}
impl From<u128> for FfiU128 {
fn from(value: u128) -> Self {
Self {
data: value.to_le_bytes(),
}
}
}
impl From<FfiU128> for u128 {
fn from(value: FfiU128) -> Self {
Self::from_le_bytes(value.data)
}
}
pub type FfiHashType = FfiBytes32;
pub type FfiMsgId = FfiBytes32;
pub type FfiBlockId = u64;
pub type FfiTimestamp = u64;
pub type FfiSignature = FfiBytes64;
pub type FfiAccountId = FfiBytes32;
pub type FfiNonce = FfiU128;
pub type FfiPublicKey = FfiBytes32;
impl From<HashType> for FfiHashType {
fn from(value: HashType) -> Self {
Self { data: value.0 }
}
}
impl From<MantleMsgId> for FfiMsgId {
fn from(value: MantleMsgId) -> Self {
Self { data: value.0 }
}
}
impl From<Signature> for FfiSignature {
fn from(value: Signature) -> Self {
Self { data: value.0 }
}
}
impl From<AccountId> for FfiAccountId {
fn from(value: AccountId) -> Self {
Self { data: value.value }
}
}
impl From<PublicKey> for FfiPublicKey {
fn from(value: PublicKey) -> Self {
Self { data: value.0 }
}
}
#[repr(C)]
pub struct FfiVec<T> {
pub entries: *mut T,
pub len: usize,
pub capacity: usize,
}
impl<T> From<Vec<T>> for FfiVec<T> {
fn from(value: Vec<T>) -> Self {
let (entries, len, capacity) = value.into_raw_parts();
Self {
entries,
len,
capacity,
}
}
}
impl<T> From<FfiVec<T>> for Vec<T> {
fn from(value: FfiVec<T>) -> Self {
unsafe { Self::from_raw_parts(value.entries, value.len, value.capacity) }
}
}
impl<T> FfiVec<T> {
/// # Safety
///
/// `index` must be lesser than `self.len`.
#[must_use]
pub unsafe fn get(&self, index: usize) -> &T {
let ptr = unsafe { self.entries.add(index) };
unsafe { &*ptr }
}
}
#[repr(C)]
pub struct FfiOption<T> {
pub value: *mut T,
pub is_some: bool,
}
impl<T> FfiOption<T> {
pub fn from_value(val: T) -> Self {
Self {
value: Box::into_raw(Box::new(val)),
is_some: true,
}
}
#[must_use]
pub const fn from_none() -> Self {
Self {
value: std::ptr::null_mut(),
is_some: false,
}
}
}

View File

@ -0,0 +1,548 @@
use indexer_service_protocol::{
AccountId, Ciphertext, Commitment, CommitmentSetDigest, EncryptedAccountData,
EphemeralPublicKey, HashType, Nullifier, PrivacyPreservingMessage,
PrivacyPreservingTransaction, ProgramDeploymentMessage, ProgramDeploymentTransaction,
ProgramId, Proof, PublicKey, PublicMessage, PublicTransaction, Signature, Transaction,
ValidityWindow, WitnessSet,
};
use crate::api::types::{
FfiBytes32, FfiHashType, FfiOption, FfiProgramId, FfiPublicKey, FfiSignature, FfiVec,
vectors::{
FfiAccountIdList, FfiAccountList, FfiEncryptedAccountDataList, FfiInstructionDataList,
FfiNonceList, FfiNullifierCommitmentSetList, FfiProgramDeploymentMessage, FfiProof,
FfiSignaturePubKeyList, FfiVecBytes32, FfiVecU8,
},
};
#[repr(C)]
pub struct FfiPublicTransactionBody {
pub hash: FfiHashType,
pub message: FfiPublicMessage,
pub witness_set: FfiSignaturePubKeyList,
}
impl From<PublicTransaction> for FfiPublicTransactionBody {
fn from(value: PublicTransaction) -> Self {
let PublicTransaction {
hash,
message,
witness_set,
} = value;
Self {
hash: hash.into(),
message: message.into(),
witness_set: witness_set
.signatures_and_public_keys
.into_iter()
.map(Into::into)
.collect::<Vec<_>>()
.into(),
}
}
}
impl From<Box<FfiPublicTransactionBody>> for PublicTransaction {
fn from(value: Box<FfiPublicTransactionBody>) -> Self {
Self {
hash: HashType(value.hash.data),
message: PublicMessage {
program_id: ProgramId(value.message.program_id.data),
account_ids: {
let std_vec: Vec<_> = value.message.account_ids.into();
std_vec
.into_iter()
.map(|ffi_val| AccountId {
value: ffi_val.data,
})
.collect()
},
nonces: {
let std_vec: Vec<_> = value.message.nonces.into();
std_vec.into_iter().map(Into::into).collect()
},
instruction_data: value.message.instruction_data.into(),
},
witness_set: WitnessSet {
signatures_and_public_keys: {
let std_vec: Vec<_> = value.witness_set.into();
std_vec
.into_iter()
.map(|ffi_val| {
(
Signature(ffi_val.signature.data),
PublicKey(ffi_val.public_key.data),
)
})
.collect()
},
proof: None,
},
}
}
}
#[repr(C)]
pub struct FfiPublicMessage {
pub program_id: FfiProgramId,
pub account_ids: FfiAccountIdList,
pub nonces: FfiNonceList,
pub instruction_data: FfiInstructionDataList,
}
impl From<PublicMessage> for FfiPublicMessage {
fn from(value: PublicMessage) -> Self {
let PublicMessage {
program_id,
account_ids,
nonces,
instruction_data,
} = value;
Self {
program_id: program_id.into(),
account_ids: account_ids
.into_iter()
.map(Into::into)
.collect::<Vec<_>>()
.into(),
nonces: nonces
.into_iter()
.map(Into::into)
.collect::<Vec<_>>()
.into(),
instruction_data: instruction_data.into(),
}
}
}
#[repr(C)]
pub struct FfiPrivateTransactionBody {
pub hash: FfiHashType,
pub message: FfiPrivacyPreservingMessage,
pub witness_set: FfiSignaturePubKeyList,
pub proof: FfiProof,
}
impl From<PrivacyPreservingTransaction> for FfiPrivateTransactionBody {
fn from(value: PrivacyPreservingTransaction) -> Self {
let PrivacyPreservingTransaction {
hash,
message,
witness_set,
} = value;
Self {
hash: hash.into(),
message: message.into(),
witness_set: witness_set
.signatures_and_public_keys
.into_iter()
.map(Into::into)
.collect::<Vec<_>>()
.into(),
proof: witness_set
.proof
.expect("Private execution: proof must be present")
.0
.into(),
}
}
}
impl From<Box<FfiPrivateTransactionBody>> for PrivacyPreservingTransaction {
fn from(value: Box<FfiPrivateTransactionBody>) -> Self {
Self {
hash: HashType(value.hash.data),
message: PrivacyPreservingMessage {
public_account_ids: {
let std_vec: Vec<_> = value.message.public_account_ids.into();
std_vec
.into_iter()
.map(|ffi_val| AccountId {
value: ffi_val.data,
})
.collect()
},
nonces: {
let std_vec: Vec<_> = value.message.nonces.into();
std_vec.into_iter().map(Into::into).collect()
},
public_post_states: {
let std_vec: Vec<_> = value.message.public_post_states.into();
std_vec.into_iter().map(Into::into).collect()
},
encrypted_private_post_states: {
let std_vec: Vec<_> = value.message.encrypted_private_post_states.into();
std_vec
.into_iter()
.map(|ffi_val| EncryptedAccountData {
ciphertext: Ciphertext(ffi_val.ciphertext.into()),
epk: EphemeralPublicKey(ffi_val.epk.into()),
view_tag: ffi_val.view_tag,
})
.collect()
},
new_commitments: {
let std_vec: Vec<_> = value.message.new_commitments.into();
std_vec
.into_iter()
.map(|ffi_val| Commitment(ffi_val.data))
.collect()
},
new_nullifiers: {
let std_vec: Vec<_> = value.message.new_nullifiers.into();
std_vec
.into_iter()
.map(|ffi_val| {
(
Nullifier(ffi_val.nullifier.data),
CommitmentSetDigest(ffi_val.commitment_set_digest.data),
)
})
.collect()
},
block_validity_window: cast_ffi_validity_window(
value.message.block_validity_window,
),
timestamp_validity_window: cast_ffi_validity_window(
value.message.timestamp_validity_window,
),
},
witness_set: WitnessSet {
signatures_and_public_keys: {
let std_vec: Vec<_> = value.witness_set.into();
std_vec
.into_iter()
.map(|ffi_val| {
(
Signature(ffi_val.signature.data),
PublicKey(ffi_val.public_key.data),
)
})
.collect()
},
proof: Some(Proof(value.proof.into())),
},
}
}
}
#[repr(C)]
pub struct FfiPrivacyPreservingMessage {
pub public_account_ids: FfiAccountIdList,
pub nonces: FfiNonceList,
pub public_post_states: FfiAccountList,
pub encrypted_private_post_states: FfiEncryptedAccountDataList,
pub new_commitments: FfiVecBytes32,
pub new_nullifiers: FfiNullifierCommitmentSetList,
pub block_validity_window: [u64; 2],
pub timestamp_validity_window: [u64; 2],
}
impl From<PrivacyPreservingMessage> for FfiPrivacyPreservingMessage {
fn from(value: PrivacyPreservingMessage) -> Self {
let PrivacyPreservingMessage {
public_account_ids,
nonces,
public_post_states,
encrypted_private_post_states,
new_commitments,
new_nullifiers,
block_validity_window,
timestamp_validity_window,
} = value;
Self {
public_account_ids: public_account_ids
.into_iter()
.map(Into::into)
.collect::<Vec<_>>()
.into(),
nonces: nonces
.into_iter()
.map(Into::into)
.collect::<Vec<_>>()
.into(),
public_post_states: public_post_states
.into_iter()
.map(|acc_ind| -> nssa::Account {
acc_ind.try_into().expect("Source is in blocks, must fit")
})
.map(Into::into)
.collect::<Vec<_>>()
.into(),
encrypted_private_post_states: encrypted_private_post_states
.into_iter()
.map(Into::into)
.collect::<Vec<_>>()
.into(),
new_commitments: new_commitments
.into_iter()
.map(|comm| FfiBytes32 { data: comm.0 })
.collect::<Vec<_>>()
.into(),
new_nullifiers: new_nullifiers
.into_iter()
.map(Into::into)
.collect::<Vec<_>>()
.into(),
block_validity_window: cast_validity_window(block_validity_window),
timestamp_validity_window: cast_validity_window(timestamp_validity_window),
}
}
}
#[repr(C)]
pub struct FfiNullifierCommitmentSet {
pub nullifier: FfiBytes32,
pub commitment_set_digest: FfiBytes32,
}
impl From<(Nullifier, CommitmentSetDigest)> for FfiNullifierCommitmentSet {
fn from(value: (Nullifier, CommitmentSetDigest)) -> Self {
Self {
nullifier: FfiBytes32 { data: value.0.0 },
commitment_set_digest: FfiBytes32 { data: value.1.0 },
}
}
}
#[repr(C)]
pub struct FfiEncryptedAccountData {
pub ciphertext: FfiVecU8,
pub epk: FfiVecU8,
pub view_tag: u8,
}
impl From<EncryptedAccountData> for FfiEncryptedAccountData {
fn from(value: EncryptedAccountData) -> Self {
let EncryptedAccountData {
ciphertext,
epk,
view_tag,
} = value;
Self {
ciphertext: ciphertext.0.into(),
epk: epk.0.into(),
view_tag,
}
}
}
#[repr(C)]
pub struct FfiSignaturePubKeyEntry {
pub signature: FfiSignature,
pub public_key: FfiPublicKey,
}
impl From<(Signature, PublicKey)> for FfiSignaturePubKeyEntry {
fn from(value: (Signature, PublicKey)) -> Self {
Self {
signature: value.0.into(),
public_key: value.1.into(),
}
}
}
#[repr(C)]
pub struct FfiProgramDeploymentTransactionBody {
pub hash: FfiHashType,
pub message: FfiProgramDeploymentMessage,
}
impl From<Box<FfiProgramDeploymentTransactionBody>> for ProgramDeploymentTransaction {
fn from(value: Box<FfiProgramDeploymentTransactionBody>) -> Self {
Self {
hash: HashType(value.hash.data),
message: ProgramDeploymentMessage {
bytecode: value.message.into(),
},
}
}
}
impl From<ProgramDeploymentTransaction> for FfiProgramDeploymentTransactionBody {
fn from(value: ProgramDeploymentTransaction) -> Self {
let ProgramDeploymentTransaction { hash, message } = value;
Self {
hash: hash.into(),
message: message.bytecode.into(),
}
}
}
#[repr(C)]
pub struct FfiTransactionBody {
pub public_body: *mut FfiPublicTransactionBody,
pub private_body: *mut FfiPrivateTransactionBody,
pub program_deployment_body: *mut FfiProgramDeploymentTransactionBody,
}
#[repr(C)]
pub struct FfiTransaction {
pub body: FfiTransactionBody,
pub kind: FfiTransactionKind,
}
impl From<Transaction> for FfiTransaction {
fn from(value: Transaction) -> Self {
match value {
Transaction::Public(pub_tx) => Self {
body: FfiTransactionBody {
public_body: Box::into_raw(Box::new(pub_tx.into())),
private_body: std::ptr::null_mut(),
program_deployment_body: std::ptr::null_mut(),
},
kind: FfiTransactionKind::Public,
},
Transaction::PrivacyPreserving(priv_tx) => Self {
body: FfiTransactionBody {
public_body: std::ptr::null_mut(),
private_body: Box::into_raw(Box::new(priv_tx.into())),
program_deployment_body: std::ptr::null_mut(),
},
kind: FfiTransactionKind::Private,
},
Transaction::ProgramDeployment(pr_dep_tx) => Self {
body: FfiTransactionBody {
public_body: std::ptr::null_mut(),
private_body: std::ptr::null_mut(),
program_deployment_body: Box::into_raw(Box::new(pr_dep_tx.into())),
},
kind: FfiTransactionKind::ProgramDeploy,
},
}
}
}
#[repr(C)]
pub enum FfiTransactionKind {
Public = 0x0,
Private,
ProgramDeploy,
}
/// Frees the resources associated with the given ffi transaction.
///
/// # Arguments
///
/// - `val`: An instance of `FfiTransaction`.
///
/// # Returns
///
/// void.
///
/// # Safety
///
/// The caller must ensure that:
/// - `val` is a valid instance of `FfiTransaction`.
#[unsafe(no_mangle)]
pub unsafe extern "C" fn free_ffi_transaction(val: FfiTransaction) {
match val.kind {
FfiTransactionKind::Public => {
let body = unsafe { Box::from_raw(val.body.public_body) };
let std_body: PublicTransaction = body.into();
drop(std_body);
}
FfiTransactionKind::Private => {
let body = unsafe { Box::from_raw(val.body.private_body) };
let std_body: PrivacyPreservingTransaction = body.into();
drop(std_body);
}
FfiTransactionKind::ProgramDeploy => {
let body = unsafe { Box::from_raw(val.body.program_deployment_body) };
let std_body: ProgramDeploymentTransaction = body.into();
drop(std_body);
}
}
}
/// Frees the resources associated with the given ffi transaction option.
///
/// # Arguments
///
/// - `val`: An instance of `FfiOption<FfiTransaction>`.
///
/// # Returns
///
/// void.
///
/// # Safety
///
/// The caller must ensure that:
/// - `val` is a valid instance of `FfiOption<FfiTransaction>`.
#[unsafe(no_mangle)]
pub unsafe extern "C" fn free_ffi_transaction_opt(val: FfiOption<FfiTransaction>) {
if val.is_some {
let value = unsafe { Box::from_raw(val.value) };
match value.kind {
FfiTransactionKind::Public => {
let body = unsafe { Box::from_raw(value.body.public_body) };
let std_body: PublicTransaction = body.into();
drop(std_body);
}
FfiTransactionKind::Private => {
let body = unsafe { Box::from_raw(value.body.private_body) };
let std_body: PrivacyPreservingTransaction = body.into();
drop(std_body);
}
FfiTransactionKind::ProgramDeploy => {
let body = unsafe { Box::from_raw(value.body.program_deployment_body) };
let std_body: ProgramDeploymentTransaction = body.into();
drop(std_body);
}
}
}
}
/// Frees the resources associated with the given vector of ffi transactions.
///
/// # Arguments
///
/// - `val`: An instance of `FfiVec<FfiTransaction>`.
///
/// # Returns
///
/// void.
///
/// # Safety
///
/// The caller must ensure that:
/// - `val` is a valid instance of `FfiVec<FfiTransaction>`.
#[unsafe(no_mangle)]
pub unsafe extern "C" fn free_ffi_transaction_vec(val: FfiVec<FfiTransaction>) {
let ffi_tx_std_vec: Vec<_> = val.into();
for tx in ffi_tx_std_vec {
unsafe {
free_ffi_transaction(tx);
}
}
}
fn cast_validity_window(window: ValidityWindow) -> [u64; 2] {
[
window.0.0.unwrap_or_default(),
window.0.1.unwrap_or(u64::MAX),
]
}
const fn cast_ffi_validity_window(ffi_window: [u64; 2]) -> ValidityWindow {
let left = if ffi_window[0] == 0 {
None
} else {
Some(ffi_window[0])
};
let right = if ffi_window[1] == u64::MAX {
None
} else {
Some(ffi_window[1])
};
ValidityWindow((left, right))
}

View File

@ -0,0 +1,31 @@
use crate::api::types::{
FfiAccountId, FfiBytes32, FfiNonce, FfiVec,
account::FfiAccount,
transaction::{
FfiEncryptedAccountData, FfiNullifierCommitmentSet, FfiSignaturePubKeyEntry, FfiTransaction,
},
};
pub type FfiVecU8 = FfiVec<u8>;
pub type FfiAccountList = FfiVec<FfiAccount>;
pub type FfiAccountIdList = FfiVec<FfiAccountId>;
pub type FfiVecBytes32 = FfiVec<FfiBytes32>;
pub type FfiBlockBody = FfiVec<FfiTransaction>;
pub type FfiNonceList = FfiVec<FfiNonce>;
pub type FfiInstructionDataList = FfiVec<u32>;
pub type FfiSignaturePubKeyList = FfiVec<FfiSignaturePubKeyEntry>;
pub type FfiProof = FfiVecU8;
pub type FfiProgramDeploymentMessage = FfiVecU8;
pub type FfiEncryptedAccountDataList = FfiVec<FfiEncryptedAccountData>;
pub type FfiNullifierCommitmentSetList = FfiVec<FfiNullifierCommitmentSet>;

View File

@ -4,7 +4,6 @@ use anyhow::{Context as _, Result};
use log::info;
pub use url::Url;
#[expect(async_fn_in_trait, reason = "We don't care about Send/Sync here")]
pub trait IndexerClientTrait: Clone {
async fn new(indexer_url: &Url) -> Result<Self>;
}

View File

@ -5,6 +5,7 @@ pub enum OperationStatus {
Ok = 0x0,
NullPointer = 0x1,
InitializationError = 0x2,
ClientError = 0x3,
}
impl OperationStatus {

View File

@ -3,18 +3,26 @@ use std::{ffi::c_void, net::SocketAddr};
use indexer_service::IndexerHandle;
use tokio::runtime::Runtime;
use crate::client::IndexerClient;
#[repr(C)]
pub struct IndexerServiceFFI {
indexer_handle: *mut c_void,
runtime: *mut c_void,
indexer_client: *mut c_void,
}
impl IndexerServiceFFI {
pub fn new(indexer_handle: indexer_service::IndexerHandle, runtime: Runtime) -> Self {
pub fn new(
indexer_handle: indexer_service::IndexerHandle,
runtime: Runtime,
indexer_client: IndexerClient,
) -> Self {
Self {
// Box the complex types and convert to opaque pointers
indexer_handle: Box::into_raw(Box::new(indexer_handle)).cast::<c_void>(),
runtime: Box::into_raw(Box::new(runtime)).cast::<c_void>(),
indexer_client: Box::into_raw(Box::new(indexer_client)).cast::<c_void>(),
}
}
@ -25,10 +33,11 @@ impl IndexerServiceFFI {
/// The caller must ensure that:
/// - `self` is a valid object(contains valid pointers in all fields)
#[must_use]
pub unsafe fn into_parts(self) -> (Box<IndexerHandle>, Box<Runtime>) {
pub unsafe fn into_parts(self) -> (Box<IndexerHandle>, Box<Runtime>, Box<IndexerClient>) {
let indexer_handle = unsafe { Box::from_raw(self.indexer_handle.cast::<IndexerHandle>()) };
let runtime = unsafe { Box::from_raw(self.runtime.cast::<Runtime>()) };
(indexer_handle, runtime)
let indexer_client = unsafe { Box::from_raw(self.indexer_client.cast::<IndexerClient>()) };
(indexer_handle, runtime, indexer_client)
}
/// Helper to get indexer handle addr.
@ -49,7 +58,7 @@ impl IndexerServiceFFI {
indexer_handle.addr()
}
/// Helper to get indexer handle addr.
/// Helper to get indexer handle ref.
///
/// # Safety
///
@ -64,6 +73,38 @@ impl IndexerServiceFFI {
.expect("Indexer Handle must be non-null pointer")
}
}
/// Helper to get indexer client ref.
///
/// # Safety
///
/// The caller must ensure that:
/// - `self` is a valid object(contains valid pointers in all fields)
#[must_use]
pub const unsafe fn client(&self) -> &IndexerClient {
unsafe {
self.indexer_client
.cast::<IndexerClient>()
.as_ref()
.expect("Indexer Client must be non-null pointer")
}
}
/// Helper to get indexer runtime ref.
///
/// # Safety
///
/// The caller must ensure that:
/// - `self` is a valid object(contains valid pointers in all fields)
#[must_use]
pub const unsafe fn runtime(&self) -> &Runtime {
unsafe {
self.runtime
.cast::<Runtime>()
.as_ref()
.expect("Indexer Runtime must be non-null pointer")
}
}
}
// Implement Drop to prevent memory leaks
@ -72,6 +113,7 @@ impl Drop for IndexerServiceFFI {
let Self {
indexer_handle,
runtime,
indexer_client,
} = self;
if indexer_handle.is_null() {
@ -80,7 +122,11 @@ impl Drop for IndexerServiceFFI {
if runtime.is_null() {
log::error!("Attempted to drop a null tokio runtime pointer. This is a bug");
}
if indexer_client.is_null() {
log::error!("Attempted to drop a null client pointer. This is a bug");
}
drop(unsafe { Box::from_raw(indexer_handle.cast::<IndexerHandle>()) });
drop(unsafe { Box::from_raw(runtime.cast::<Runtime>()) });
drop(unsafe { Box::from_raw(indexer_client.cast::<IndexerClient>()) });
}
}

View File

@ -4,5 +4,6 @@ pub use errors::OperationStatus;
pub use indexer::IndexerServiceFFI;
pub mod api;
mod client;
mod errors;
mod indexer;

View File

@ -1,12 +1,8 @@
{
"home": ".",
"consensus_info_polling_interval": "1s",
"bedrock_client_config": {
"addr": "http://localhost:8080",
"backoff": {
"start_delay": "100ms",
"max_retries": 5
}
"bedrock_config": {
"addr": "http://localhost:8080"
},
"channel_id": "0101010101010101010101010101010101010101010101010101010101010101",
"initial_accounts": [

View File

@ -41,6 +41,13 @@ pub trait Rpc {
#[method(name = "getAccount")]
async fn get_account(&self, account_id: AccountId) -> Result<Account, ErrorObjectOwned>;
#[method(name = "getAccountAtBlock")]
async fn get_account_at_block(
&self,
account_id: AccountId,
block_id: BlockId,
) -> Result<Account, ErrorObjectOwned>;
#[method(name = "getTransaction")]
async fn get_transaction(
&self,

View File

@ -239,6 +239,22 @@ impl indexer_service_rpc::RpcServer for MockIndexerService {
.ok_or_else(|| ErrorObjectOwned::owned(-32001, "Account not found", None::<()>))
}
async fn get_account_at_block(
&self,
account_id: AccountId,
_block_id: BlockId,
) -> Result<Account, ErrorObjectOwned> {
// Mock service does not track historical state; returns current state regardless of
// block_id.
self.state
.read()
.await
.accounts
.get(&account_id)
.cloned()
.ok_or_else(|| ErrorObjectOwned::owned(-32001, "Account not found", None::<()>))
}
async fn get_transaction(
&self,
tx_hash: HashType,

View File

@ -83,6 +83,19 @@ impl indexer_service_rpc::RpcServer for IndexerService {
.into())
}
async fn get_account_at_block(
&self,
account_id: AccountId,
block_id: BlockId,
) -> Result<Account, ErrorObjectOwned> {
Ok(self
.indexer
.store
.account_state_at_block(&account_id.into(), block_id)
.map_err(db_error)?
.into())
}
async fn get_transaction(
&self,
tx_hash: HashType,

View File

@ -1,76 +0,0 @@
#include <stdarg.h>
#include <stdbool.h>
#include <stdint.h>
#include <stdlib.h>
typedef enum OperationStatus {
Ok = 0,
NullPointer = 1,
InitializationError = 2,
} OperationStatus;
typedef struct IndexerServiceFFI {
void *indexer_handle;
void *runtime;
} IndexerServiceFFI;
/**
* Simple wrapper around a pointer to a value or an error.
*
* Pointer is not guaranteed. You should check the error field before
* dereferencing the pointer.
*/
typedef struct PointerResult_IndexerServiceFFI__OperationStatus {
struct IndexerServiceFFI *value;
enum OperationStatus error;
} PointerResult_IndexerServiceFFI__OperationStatus;
typedef struct PointerResult_IndexerServiceFFI__OperationStatus InitializedIndexerServiceFFIResult;
/**
* Creates and starts an indexer based on the provided
* configuration file path.
*
* # Arguments
*
* - `config_path`: A pointer to a string representing the path to the configuration file.
* - `port`: Number representing a port, on which indexers RPC will start.
*
* # Returns
*
* An `InitializedIndexerServiceFFIResult` containing either a pointer to the
* initialized `IndexerServiceFFI` or an error code.
*/
InitializedIndexerServiceFFIResult start_indexer(const char *config_path, uint16_t port);
/**
* Stops and frees the resources associated with the given indexer service.
*
* # Arguments
*
* - `indexer`: A pointer to the `IndexerServiceFFI` instance to be stopped.
*
* # Returns
*
* An `OperationStatus` indicating success or failure.
*
* # Safety
*
* The caller must ensure that:
* - `indexer` is a valid pointer to a `IndexerServiceFFI` instance
* - The `IndexerServiceFFI` instance was created by this library
* - The pointer will not be used after this function returns
*/
enum OperationStatus stop_indexer(struct IndexerServiceFFI *indexer);
/**
* # Safety
* It's up to the caller to pass a proper pointer, if somehow from c/c++ side
* this is called with a type which doesn't come from a returned `CString` it
* will cause a segfault.
*/
void free_cstring(char *block);
bool is_ok(const enum OperationStatus *self);
bool is_error(const enum OperationStatus *self);

View File

@ -19,11 +19,13 @@ indexer_service.workspace = true
serde_json.workspace = true
token_core.workspace = true
ata_core.workspace = true
indexer_service_rpc.workspace = true
indexer_service_rpc = { workspace = true, features = ["client"] }
sequencer_service_rpc = { workspace = true, features = ["client"] }
jsonrpsee = { workspace = true, features = ["ws-client"] }
wallet-ffi.workspace = true
indexer_ffi.workspace = true
testnet_initial_state.workspace = true
indexer_service_protocol.workspace = true
url.workspace = true
@ -35,4 +37,4 @@ hex.workspace = true
tempfile.workspace = true
bytesize.workspace = true
futures.workspace = true
testcontainers = { version = "0.27.0", features = ["docker-compose"] }
testcontainers = { version = "0.27.3", features = ["docker-compose"] }

View File

@ -2,7 +2,7 @@ use std::{net::SocketAddr, path::PathBuf, time::Duration};
use anyhow::{Context as _, Result};
use bytesize::ByteSize;
use indexer_service::{BackoffConfig, ChannelId, ClientConfig, IndexerConfig};
use indexer_service::{ChannelId, ClientConfig, IndexerConfig};
use key_protocol::key_management::KeyChain;
use nssa::{Account, AccountId, PrivateKey, PublicKey};
use nssa_core::{account::Data, program::DEFAULT_PROGRAM_ID};
@ -59,12 +59,16 @@ impl InitialData {
}
let mut private_charlie_key_chain = KeyChain::new_os_random();
let mut private_charlie_account_id =
AccountId::from((&private_charlie_key_chain.nullifier_public_key, 0));
let mut private_charlie_account_id = AccountId::for_regular_private_account(
&private_charlie_key_chain.nullifier_public_key,
0,
);
let mut private_david_key_chain = KeyChain::new_os_random();
let mut private_david_account_id =
AccountId::from((&private_david_key_chain.nullifier_public_key, 0));
let mut private_david_account_id = AccountId::for_regular_private_account(
&private_david_key_chain.nullifier_public_key,
0,
);
// Ensure consistent ordering
if private_charlie_account_id > private_david_account_id {
@ -164,35 +168,10 @@ impl std::fmt::Display for UrlProtocol {
}
}
pub fn indexer_config(
bedrock_addr: SocketAddr,
home: PathBuf,
initial_data: &InitialData,
) -> Result<IndexerConfig> {
Ok(IndexerConfig {
home,
consensus_info_polling_interval: Duration::from_secs(1),
bedrock_client_config: ClientConfig {
addr: addr_to_url(UrlProtocol::Http, bedrock_addr)
.context("Failed to convert bedrock addr to URL")?,
auth: None,
backoff: BackoffConfig {
start_delay: Duration::from_millis(100),
max_retries: 10,
},
},
initial_public_accounts: Some(initial_data.sequencer_initial_public_accounts()),
initial_private_accounts: Some(initial_data.sequencer_initial_private_accounts()),
signing_key: [37; 32],
channel_id: bedrock_channel_id(),
})
}
pub fn sequencer_config(
partial: SequencerPartialConfig,
home: PathBuf,
bedrock_addr: SocketAddr,
indexer_addr: SocketAddr,
initial_data: &InitialData,
) -> Result<SequencerConfig> {
let SequencerPartialConfig {
@ -215,17 +194,11 @@ pub fn sequencer_config(
initial_private_accounts: Some(initial_data.sequencer_initial_private_accounts()),
signing_key: [37; 32],
bedrock_config: BedrockConfig {
backoff: BackoffConfig {
start_delay: Duration::from_millis(100),
max_retries: 5,
},
channel_id: bedrock_channel_id(),
node_url: addr_to_url(UrlProtocol::Http, bedrock_addr)
.context("Failed to convert bedrock addr to URL")?,
auth: None,
},
indexer_rpc_url: addr_to_url(UrlProtocol::Ws, indexer_addr)
.context("Failed to convert indexer addr to URL")?,
})
}
@ -245,6 +218,26 @@ pub fn wallet_config(
})
}
pub fn indexer_config(
bedrock_addr: SocketAddr,
home: PathBuf,
initial_data: &InitialData,
) -> Result<IndexerConfig> {
Ok(IndexerConfig {
home,
consensus_info_polling_interval: Duration::from_secs(1),
bedrock_config: ClientConfig {
addr: addr_to_url(UrlProtocol::Http, bedrock_addr)
.context("Failed to convert bedrock addr to URL")?,
auth: None,
},
initial_public_accounts: Some(initial_data.sequencer_initial_public_accounts()),
initial_private_accounts: Some(initial_data.sequencer_initial_private_accounts()),
signing_key: [37; 32],
channel_id: bedrock_channel_id(),
})
}
pub fn addr_to_url(protocol: UrlProtocol, addr: SocketAddr) -> Result<Url> {
// Convert 0.0.0.0 to 127.0.0.1 for client connections
// When binding to port 0, the server binds to 0.0.0.0:<random_port>

View File

@ -0,0 +1,34 @@
//! Thin client wrapper for querying the indexer's JSON-RPC API in tests.
//!
//! The sequencer doesn't depend on the indexer at runtime — finalization comes
//! from zone-sdk events. This wrapper exists purely for test ergonomics so
//! integration tests can construct a single connection and call
//! `indexer_service_rpc::RpcClient` methods directly via `Deref`.
use std::ops::Deref;
use anyhow::{Context as _, Result};
use jsonrpsee::ws_client::{WsClient, WsClientBuilder};
use log::info;
use url::Url;
pub struct IndexerClient(WsClient);
impl IndexerClient {
pub async fn new(indexer_url: &Url) -> Result<Self> {
info!("Connecting to Indexer at {indexer_url}");
let client = WsClientBuilder::default()
.build(indexer_url)
.await
.context("Failed to create websocket client")?;
Ok(Self(client))
}
}
impl Deref for IndexerClient {
type Target = WsClient;
fn deref(&self) -> &Self::Target {
&self.0
}
}

View File

@ -9,16 +9,19 @@ use indexer_service::IndexerHandle;
use log::{debug, error};
use nssa::{AccountId, PrivacyPreservingTransaction};
use nssa_core::Commitment;
use sequencer_core::indexer_client::{IndexerClient, IndexerClientTrait as _};
use sequencer_service::SequencerHandle;
use sequencer_service_rpc::{RpcClient as _, SequencerClient, SequencerClientBuilder};
use tempfile::TempDir;
use testcontainers::compose::DockerCompose;
use wallet::WalletCore;
use crate::setup::{setup_bedrock_node, setup_indexer, setup_sequencer, setup_wallet};
use crate::{
indexer_client::IndexerClient,
setup::{setup_bedrock_node, setup_indexer, setup_sequencer, setup_wallet},
};
pub mod config;
pub mod indexer_client;
pub mod setup;
pub mod test_context_ffi;
@ -26,6 +29,7 @@ pub mod test_context_ffi;
pub const TIME_TO_WAIT_FOR_BLOCK_SECONDS: u64 = 12;
pub const NSSA_PROGRAM_FOR_TEST_DATA_CHANGER: &str = "data_changer.bin";
pub const NSSA_PROGRAM_FOR_TEST_NOOP: &str = "noop.bin";
pub const NSSA_PROGRAM_FOR_TEST_PDA_FUND_SPEND_PROXY: &str = "pda_fund_spend_proxy.bin";
const BEDROCK_SERVICE_WITH_OPEN_PORT: &str = "logos-blockchain-node-0";
const BEDROCK_SERVICE_PORT: u16 = 18080;
@ -77,14 +81,10 @@ impl TestContext {
.await
.context("Failed to setup Indexer")?;
let (sequencer_handle, temp_sequencer_dir) = setup_sequencer(
sequencer_partial_config,
bedrock_addr,
indexer_handle.addr(),
&initial_data,
)
.await
.context("Failed to setup Sequencer")?;
let (sequencer_handle, temp_sequencer_dir) =
setup_sequencer(sequencer_partial_config, bedrock_addr, &initial_data)
.await
.context("Failed to setup Sequencer")?;
let (wallet, temp_wallet_dir, wallet_password) =
setup_wallet(sequencer_handle.addr(), &initial_data)

View File

@ -119,7 +119,6 @@ pub(crate) async fn setup_indexer(
pub(crate) async fn setup_sequencer(
partial: config::SequencerPartialConfig,
bedrock_addr: SocketAddr,
indexer_addr: SocketAddr,
initial_data: &config::InitialData,
) -> Result<(SequencerHandle, TempDir)> {
let temp_sequencer_dir =
@ -134,7 +133,6 @@ pub(crate) async fn setup_sequencer(
partial,
temp_sequencer_dir.path().to_owned(),
bedrock_addr,
indexer_addr,
initial_data,
)
.context("Failed to create Sequencer config")?;

View File

@ -6,7 +6,6 @@ use indexer_ffi::IndexerServiceFFI;
use indexer_service_rpc::RpcClient as _;
use log::{debug, error};
use nssa::AccountId;
use sequencer_core::indexer_client::{IndexerClient, IndexerClientTrait as _};
use sequencer_service::SequencerHandle;
use sequencer_service_rpc::{RpcClient as _, SequencerClient, SequencerClientBuilder};
use tempfile::TempDir;
@ -15,6 +14,7 @@ use wallet::WalletCore;
use crate::{
BEDROCK_SERVICE_WITH_OPEN_PORT, LOGGER, TestContextBuilder, config,
indexer_client::IndexerClient,
setup::{setup_bedrock_node, setup_indexer_ffi, setup_sequencer, setup_wallet},
};
@ -85,8 +85,6 @@ impl TestContextFFI {
.block_on(setup_sequencer(
sequencer_partial_config,
bedrock_addr,
// SAFETY: addr is valid if indexer_ffi is valid.
unsafe { indexer_ffi.addr() },
initial_data,
))
.context("Failed to setup Sequencer")?;
@ -268,6 +266,11 @@ impl BlockingTestContextFFI {
pub fn runtime_clone(&self) -> Arc<tokio::runtime::Runtime> {
Arc::<tokio::runtime::Runtime>::clone(&self.runtime)
}
#[must_use]
pub const fn indexer_ffi(&self) -> *const IndexerServiceFFI {
&raw const (self.indexer_ffi)
}
}
impl Drop for BlockingTestContextFFI {

View File

@ -627,14 +627,14 @@ async fn shielded_transfers_to_two_identifiers_same_npk() -> Result<()> {
.await?;
// Both accounts must be discovered with the correct balances.
let account_id_1 = AccountId::from((&npk, identifier_1));
let account_id_1 = AccountId::for_regular_private_account(&npk, identifier_1);
let acc_1 = ctx
.wallet()
.get_account_private(account_id_1)
.context("account for identifier 1 not found after sync")?;
assert_eq!(acc_1.balance, 100);
let account_id_2 = AccountId::from((&npk, identifier_2));
let account_id_2 = AccountId::for_regular_private_account(&npk, identifier_2);
let acc_2 = ctx
.wallet()
.get_account_private(account_id_2)

View File

@ -1,11 +1,18 @@
#![expect(
clippy::shadow_unrelated,
clippy::tests_outside_test_module,
clippy::undocumented_unsafe_blocks,
reason = "We don't care about these in tests"
)]
use anyhow::{Context as _, Result};
use indexer_service_rpc::RpcClient as _;
use indexer_ffi::{
IndexerServiceFFI, OperationStatus,
api::{
PointerResult,
types::{FfiAccountId, FfiOption, FfiVec, account::FfiAccount, block::FfiBlock},
},
};
use integration_tests::{
TIME_TO_WAIT_FOR_BLOCK_SECONDS, format_private_account_id, format_public_account_id,
test_context_ffi::BlockingTestContextFFI, verify_commitment_is_in_state,
@ -17,6 +24,23 @@ use wallet::cli::{Command, programs::native_token_transfer::AuthTransferSubcomma
/// Maximum time to wait for the indexer to catch up to the sequencer.
const L2_TO_L1_TIMEOUT_MILLIS: u64 = 180_000;
unsafe extern "C" {
unsafe fn query_last_block(
indexer: *const IndexerServiceFFI,
) -> PointerResult<u64, OperationStatus>;
unsafe fn query_block_vec(
indexer: *const IndexerServiceFFI,
before: FfiOption<u64>,
limit: u64,
) -> PointerResult<FfiVec<FfiBlock>, OperationStatus>;
unsafe fn query_account(
indexer: *const IndexerServiceFFI,
account_id: FfiAccountId,
) -> PointerResult<FfiAccount, OperationStatus>;
}
#[test]
fn indexer_test_run_ffi() -> Result<()> {
let blocking_ctx = BlockingTestContextFFI::new()?;
@ -28,10 +52,19 @@ fn indexer_test_run_ffi() -> Result<()> {
});
let last_block_indexer = blocking_ctx.ctx().get_last_block_indexer(runtime_wrapped)?;
let last_block_indexer_ffi_res = unsafe { query_last_block(blocking_ctx.indexer_ffi()) };
assert!(last_block_indexer_ffi_res.error.is_ok());
let last_block_indexer_ffi = unsafe { *last_block_indexer_ffi_res.value };
info!("Last block on ind now is {last_block_indexer}");
info!("Last block on ind ffi now is {last_block_indexer_ffi}");
assert!(last_block_indexer > 1);
assert!(last_block_indexer_ffi > 1);
assert_eq!(last_block_indexer, last_block_indexer_ffi);
Ok(())
}
@ -40,7 +73,6 @@ fn indexer_test_run_ffi() -> Result<()> {
fn indexer_ffi_block_batching() -> Result<()> {
let blocking_ctx = BlockingTestContextFFI::new()?;
let runtime_wrapped = blocking_ctx.runtime();
let ctx = blocking_ctx.ctx();
// WAIT
info!("Waiting for indexer to parse blocks");
@ -48,31 +80,36 @@ fn indexer_ffi_block_batching() -> Result<()> {
tokio::time::sleep(std::time::Duration::from_millis(L2_TO_L1_TIMEOUT_MILLIS)).await;
});
let last_block_indexer = runtime_wrapped
.block_on(ctx.indexer_client().get_last_finalized_block_id())
.unwrap();
let last_block_indexer_ffi_res = unsafe { query_last_block(blocking_ctx.indexer_ffi()) };
assert!(last_block_indexer_ffi_res.error.is_ok());
let last_block_indexer = unsafe { *last_block_indexer_ffi_res.value };
info!("Last block on ind now is {last_block_indexer}");
assert!(last_block_indexer > 1);
// Getting wide batch to fit all blocks (from latest backwards)
let mut block_batch = runtime_wrapped
.block_on(ctx.indexer_client().get_blocks(None, 100))
.unwrap();
let before_ffi = FfiOption::<u64>::from_none();
let limit = 100;
// Reverse to check chain consistency from oldest to newest
block_batch.reverse();
let block_batch_ffi_res =
unsafe { query_block_vec(blocking_ctx.indexer_ffi(), before_ffi, limit) };
// Checking chain consistency
let mut prev_block_hash = block_batch.first().unwrap().header.hash;
assert!(block_batch_ffi_res.error.is_ok());
for block in &block_batch[1..] {
assert_eq!(block.header.prev_block_hash, prev_block_hash);
let block_batch = unsafe { &*block_batch_ffi_res.value };
let mut last_block_prev_hash = unsafe { block_batch.get(0) }.header.prev_block_hash.data;
for i in 1..block_batch.len {
let block = unsafe { block_batch.get(i) };
assert_eq!(last_block_prev_hash, block.header.hash.data);
info!("Block {} chain-consistent", block.header.block_id);
prev_block_hash = block.header.hash;
last_block_prev_hash = block.header.prev_block_hash.data;
}
Ok(())
@ -82,6 +119,7 @@ fn indexer_ffi_block_batching() -> Result<()> {
fn indexer_ffi_state_consistency() -> Result<()> {
let mut blocking_ctx = BlockingTestContextFFI::new()?;
let runtime_wrapped = blocking_ctx.runtime_clone();
let indexer_ffi = blocking_ctx.indexer_ffi();
let ctx = blocking_ctx.ctx_mut();
let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
@ -179,14 +217,21 @@ fn indexer_ffi_state_consistency() -> Result<()> {
tokio::time::sleep(std::time::Duration::from_millis(L2_TO_L1_TIMEOUT_MILLIS)).await;
});
let acc1_ind_state = runtime_wrapped.block_on(
ctx.indexer_client()
.get_account(ctx.existing_public_accounts()[0].into()),
)?;
let acc2_ind_state = runtime_wrapped.block_on(
ctx.indexer_client()
.get_account(ctx.existing_public_accounts()[1].into()),
)?;
let acc1_ind_state_ffi =
unsafe { query_account(indexer_ffi, (&ctx.existing_public_accounts()[0]).into()) };
assert!(acc1_ind_state_ffi.error.is_ok());
let acc1_ind_state_pre = unsafe { &*acc1_ind_state_ffi.value };
let acc1_ind_state: indexer_service_protocol::Account = acc1_ind_state_pre.into();
let acc2_ind_state_ffi =
unsafe { query_account(indexer_ffi, (&ctx.existing_public_accounts()[1]).into()) };
assert!(acc2_ind_state_ffi.error.is_ok());
let acc2_ind_state_pre = unsafe { &*acc2_ind_state_ffi.value };
let acc2_ind_state: indexer_service_protocol::Account = acc2_ind_state_pre.into();
info!("Checking correct state transition");
let acc1_seq_state =
@ -212,6 +257,7 @@ fn indexer_ffi_state_consistency() -> Result<()> {
fn indexer_ffi_state_consistency_with_labels() -> Result<()> {
let mut blocking_ctx = BlockingTestContextFFI::new()?;
let runtime_wrapped = blocking_ctx.runtime_clone();
let indexer_ffi = blocking_ctx.indexer_ffi();
let ctx = blocking_ctx.ctx_mut();
// Assign labels to both accounts
@ -275,10 +321,14 @@ fn indexer_ffi_state_consistency_with_labels() -> Result<()> {
tokio::time::sleep(std::time::Duration::from_millis(L2_TO_L1_TIMEOUT_MILLIS)).await;
});
let acc1_ind_state = runtime_wrapped.block_on(
ctx.indexer_client()
.get_account(ctx.existing_public_accounts()[0].into()),
)?;
let acc1_ind_state_ffi =
unsafe { query_account(indexer_ffi, (&ctx.existing_public_accounts()[0]).into()) };
assert!(acc1_ind_state_ffi.error.is_ok());
let acc1_ind_state_pre = unsafe { &*acc1_ind_state_ffi.value };
let acc1_ind_state: indexer_service_protocol::Account = acc1_ind_state_pre.into();
let acc1_seq_state =
runtime_wrapped.block_on(sequencer_service_rpc::RpcClient::get_account(
ctx.sequencer_client(),

View File

@ -0,0 +1,309 @@
#![expect(
clippy::tests_outside_test_module,
reason = "We don't care about these in tests"
)]
use std::{path::PathBuf, time::Duration};
use anyhow::{Context as _, Result};
use integration_tests::{
NSSA_PROGRAM_FOR_TEST_PDA_FUND_SPEND_PROXY, TIME_TO_WAIT_FOR_BLOCK_SECONDS, TestContext,
verify_commitment_is_in_state,
};
use log::info;
use nssa::{
AccountId, ProgramId, privacy_preserving_transaction::circuit::ProgramWithDependencies,
program::Program,
};
use nssa_core::{NullifierPublicKey, encryption::ViewingPublicKey, program::PdaSeed};
use tokio::test;
use wallet::{
PrivacyPreservingAccount, WalletCore,
cli::{Command, account::AccountSubcommand},
};
/// Funds a private PDA via the proxy program with a chained call to `auth_transfer`.
///
/// A direct call to `auth_transfer` cannot establish the PDA-to-npk binding because it uses
/// `Claim::Authorized` rather than `Claim::Pda`. Routing through the proxy provides the binding
/// via `pda_seeds` in the chained call to `auth_transfer`.
#[expect(
clippy::too_many_arguments,
reason = "test helper — grouping args would obscure intent"
)]
async fn fund_private_pda(
wallet: &WalletCore,
sender: AccountId,
pda_account_id: AccountId,
npk: NullifierPublicKey,
vpk: ViewingPublicKey,
identifier: u128,
seed: PdaSeed,
amount: u128,
proxy_program: &ProgramWithDependencies,
auth_transfer_id: ProgramId,
) -> Result<()> {
wallet
.send_privacy_preserving_tx(
vec![
PrivacyPreservingAccount::Public(sender),
PrivacyPreservingAccount::PrivatePdaForeign {
account_id: pda_account_id,
npk,
vpk,
identifier,
},
],
Program::serialize_instruction((seed, amount, auth_transfer_id, true))
.context("failed to serialize pda_fund_spend_proxy fund instruction")?,
proxy_program,
)
.await
.map_err(|e| anyhow::anyhow!("{e}"))?;
Ok(())
}
/// Spends from an owned private PDA to a fresh private-foreign recipient.
///
/// Alice must own the PDA in the wallet (i.e. it must have been synced after a receive).
#[expect(
clippy::too_many_arguments,
reason = "test helper — grouping args would obscure intent"
)]
async fn spend_private_pda(
wallet: &WalletCore,
pda_account_id: AccountId,
recipient_npk: NullifierPublicKey,
recipient_vpk: ViewingPublicKey,
seed: PdaSeed,
amount: u128,
spend_program: &ProgramWithDependencies,
auth_transfer_id: nssa::ProgramId,
) -> Result<()> {
wallet
.send_privacy_preserving_tx(
vec![
PrivacyPreservingAccount::PrivatePdaOwned(pda_account_id),
PrivacyPreservingAccount::PrivateForeign {
npk: recipient_npk,
vpk: recipient_vpk,
identifier: 0,
},
],
Program::serialize_instruction((seed, amount, auth_transfer_id, false))
.context("failed to serialize pda_fund_spend_proxy instruction")?,
spend_program,
)
.await
.map_err(|e| anyhow::anyhow!("{e}"))?;
Ok(())
}
/// Two private transfers go to distinct members of the same PDA family (same seed and npk,
/// but identifier=0 and identifier=1). Alice then spends from both PDAs.
///
/// This exercises the full identifier-diversified private PDA lifecycle:
/// receive(id=0), receive(id=1) → sync → spend(id=0), spend(id=1) → sync → assert.
#[test]
async fn private_pda_family_members_receive_and_spend() -> Result<()> {
let mut ctx = TestContext::new().await?;
// ── Build alice's key chain ──────────────────────────────────────────────────────────────────
let alice_chain_index = ctx.wallet_mut().create_private_accounts_key(None);
let (alice_npk, alice_vpk) = {
let node = ctx
.wallet()
.storage()
.user_data
.private_key_tree
.key_map
.get(&alice_chain_index)
.context("key node was just inserted")?;
let kc = &node.value.0;
(kc.nullifier_public_key, kc.viewing_public_key.clone())
};
let proxy = {
let path = PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("../artifacts/test_program_methods")
.join(NSSA_PROGRAM_FOR_TEST_PDA_FUND_SPEND_PROXY);
Program::new(std::fs::read(&path).with_context(|| format!("reading {path:?}"))?)
.context("invalid pda_fund_spend_proxy binary")?
};
let auth_transfer = Program::authenticated_transfer_program();
let proxy_id = proxy.id();
let auth_transfer_id = auth_transfer.id();
let seed = PdaSeed::new([42; 32]);
let amount: u128 = 100;
let spend_program =
ProgramWithDependencies::new(proxy, [(auth_transfer_id, auth_transfer)].into());
let alice_pda_0_id = AccountId::for_private_pda(&proxy_id, &seed, &alice_npk, 0);
let alice_pda_1_id = AccountId::for_private_pda(&proxy_id, &seed, &alice_npk, 1);
// Use two different public senders to avoid nonce conflicts between the back-to-back txs.
let senders = ctx.existing_public_accounts();
let sender_0 = senders[0];
let sender_1 = senders[1];
// ── Receive ──────────────────────────────────────────────────────────────────────────────────
info!("Sending to alice_pda_0 (identifier=0)");
fund_private_pda(
ctx.wallet(),
sender_0,
alice_pda_0_id,
alice_npk,
alice_vpk.clone(),
0,
seed,
amount,
&spend_program,
auth_transfer_id,
)
.await?;
info!("Sending to alice_pda_1 (identifier=1)");
fund_private_pda(
ctx.wallet(),
sender_1,
alice_pda_1_id,
alice_npk,
alice_vpk.clone(),
1,
seed,
amount,
&spend_program,
auth_transfer_id,
)
.await?;
info!("Waiting for block");
tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await;
// Sync so alice's wallet discovers and stores both PDAs.
wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::Account(AccountSubcommand::SyncPrivate {}),
)
.await?;
// Both PDAs must be discoverable and have the correct balance.
let pda_0_account = ctx
.wallet()
.get_account_private(alice_pda_0_id)
.context("alice_pda_0 not found after sync")?;
assert_eq!(pda_0_account.balance, amount);
let pda_1_account = ctx
.wallet()
.get_account_private(alice_pda_1_id)
.context("alice_pda_1 not found after sync")?;
assert_eq!(pda_1_account.balance, amount);
// Commitments for both PDAs must be in the sequencer's state.
let commitment_0 = ctx
.wallet()
.get_private_account_commitment(alice_pda_0_id)
.context("commitment for alice_pda_0 missing")?;
assert!(
verify_commitment_is_in_state(commitment_0.clone(), ctx.sequencer_client()).await,
"alice_pda_0 commitment not in state after receive"
);
let commitment_1 = ctx
.wallet()
.get_private_account_commitment(alice_pda_1_id)
.context("commitment for alice_pda_1 missing")?;
assert!(
verify_commitment_is_in_state(commitment_1.clone(), ctx.sequencer_client()).await,
"alice_pda_1 commitment not in state after receive"
);
assert_ne!(
commitment_0, commitment_1,
"distinct identifiers must yield distinct commitments"
);
// ── Spend ─────────────────────────────────────────────────────────────────────────────────────
// Fresh recipients — hardcoded npks not in any wallet.
let recipient_npk_0 = NullifierPublicKey([0xAA; 32]);
let recipient_vpk_0 = ViewingPublicKey::from_scalar(recipient_npk_0.0);
let recipient_npk_1 = NullifierPublicKey([0xBB; 32]);
let recipient_vpk_1 = ViewingPublicKey::from_scalar(recipient_npk_1.0);
let amount_spend_0: u128 = 13;
let amount_spend_1: u128 = 37;
info!("Alice spending from alice_pda_0");
spend_private_pda(
ctx.wallet(),
alice_pda_0_id,
recipient_npk_0,
recipient_vpk_0,
seed,
amount_spend_0,
&spend_program,
auth_transfer_id,
)
.await?;
info!("Alice spending from alice_pda_1");
spend_private_pda(
ctx.wallet(),
alice_pda_1_id,
recipient_npk_1,
recipient_vpk_1,
seed,
amount_spend_1,
&spend_program,
auth_transfer_id,
)
.await?;
info!("Waiting for block");
tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await;
wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::Account(AccountSubcommand::SyncPrivate {}),
)
.await?;
// After spending, PDAs should have the remaining balance.
let pda_0_spent = ctx
.wallet()
.get_account_private(alice_pda_0_id)
.context("alice_pda_0 not found after spend sync")?;
assert_eq!(pda_0_spent.balance, amount - amount_spend_0);
let pda_1_spent = ctx
.wallet()
.get_account_private(alice_pda_1_id)
.context("alice_pda_1 not found after spend sync")?;
assert_eq!(pda_1_spent.balance, amount - amount_spend_1);
// Post-spend commitments must be in state.
let post_spend_commitment_0 = ctx
.wallet()
.get_private_account_commitment(alice_pda_0_id)
.context("post-spend commitment for alice_pda_0 missing")?;
assert!(
verify_commitment_is_in_state(post_spend_commitment_0, ctx.sequencer_client()).await,
"alice_pda_0 post-spend commitment not in state"
);
let post_spend_commitment_1 = ctx
.wallet()
.get_private_account_commitment(alice_pda_1_id)
.context("post-spend commitment for alice_pda_1 missing")?;
assert!(
verify_commitment_is_in_state(post_spend_commitment_1, ctx.sequencer_client()).await,
"alice_pda_1 post-spend commitment not in state"
);
info!("Private PDA family member receive-and-spend test passed");
Ok(())
}

View File

@ -0,0 +1,234 @@
#![expect(
clippy::tests_outside_test_module,
reason = "Integration test file, not inside a #[cfg(test)] module"
)]
#![expect(
clippy::shadow_unrelated,
reason = "Sequential wallet commands naturally reuse the `command` binding"
)]
//! Shared account integration tests.
//!
//! Demonstrates:
//! 1. Group creation and GMS distribution via seal/unseal.
//! 2. Shared regular private account creation via `--for-gms`.
//! 3. Funding a shared account from a public account.
//! 4. Syncing discovers the funded shared account state.
use std::time::Duration;
use anyhow::{Context as _, Result};
use integration_tests::{TIME_TO_WAIT_FOR_BLOCK_SECONDS, TestContext, format_public_account_id};
use log::info;
use tokio::test;
use wallet::cli::{
Command, SubcommandReturnValue,
account::{AccountSubcommand, NewSubcommand},
group::GroupSubcommand,
programs::native_token_transfer::AuthTransferSubcommand,
};
/// Create a group, create a shared account from it, and verify registration.
#[test]
async fn group_create_and_shared_account_registration() -> Result<()> {
let mut ctx = TestContext::new().await?;
// Create a group
let command = Command::Group(GroupSubcommand::New {
name: "test-group".into(),
});
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
// Verify group exists
assert!(
ctx.wallet()
.storage()
.user_data
.group_key_holder("test-group")
.is_some()
);
// Create a shared regular private account from the group
let command = Command::Account(AccountSubcommand::New(NewSubcommand::PrivateGms {
group: "test-group".into(),
label: Some("shared-acc".into()),
pda: false,
seed: None,
program_id: None,
identifier: None,
}));
let result = wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
let SubcommandReturnValue::RegisterAccount {
account_id: shared_account_id,
} = result
else {
anyhow::bail!("Expected RegisterAccount return value");
};
// Verify shared account is registered in storage
let entry = ctx
.wallet()
.storage()
.user_data
.shared_private_account(&shared_account_id)
.context("Shared account not found in storage")?;
assert_eq!(entry.group_label, "test-group");
assert!(entry.pda_seed.is_none());
info!("Shared account registered: {shared_account_id}");
Ok(())
}
/// GMS seal/unseal round-trip via invite/join, verify key agreement.
#[test]
async fn group_invite_join_key_agreement() -> Result<()> {
let mut ctx = TestContext::new().await?;
// Generate a sealing key
let command = Command::Group(GroupSubcommand::NewSealingKey);
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
// Create a group
let command = Command::Group(GroupSubcommand::New {
name: "alice-group".into(),
});
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
// Seal GMS for ourselves (simulating invite to another wallet)
let sealing_sk = ctx
.wallet()
.storage()
.user_data
.sealing_secret_key
.context("Sealing key not found")?;
let sealing_pk =
key_protocol::key_management::group_key_holder::SealingPublicKey::from_scalar(sealing_sk);
let holder = ctx
.wallet()
.storage()
.user_data
.group_key_holder("alice-group")
.context("Group not found")?;
let sealed = holder.seal_for(&sealing_pk);
let sealed_hex = hex::encode(&sealed);
// Join under a different name (simulating Bob receiving the sealed GMS)
let command = Command::Group(GroupSubcommand::Join {
name: "bob-copy".into(),
sealed: sealed_hex,
});
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
// Both derive the same keys for the same derivation seed
let alice_holder = ctx
.wallet()
.storage()
.user_data
.group_key_holder("alice-group")
.unwrap();
let bob_holder = ctx
.wallet()
.storage()
.user_data
.group_key_holder("bob-copy")
.unwrap();
let seed = [42_u8; 32];
let alice_npk = alice_holder
.derive_keys_for_shared_account(&seed)
.generate_nullifier_public_key();
let bob_npk = bob_holder
.derive_keys_for_shared_account(&seed)
.generate_nullifier_public_key();
assert_eq!(
alice_npk, bob_npk,
"Key agreement: same GMS produces same keys"
);
info!("Key agreement verified via invite/join");
Ok(())
}
/// Fund a shared account from a public account via auth-transfer, then sync.
/// TODO: Requires auth-transfer init to work with shared accounts (authorization flow).
#[test]
async fn fund_shared_account_from_public() -> Result<()> {
let mut ctx = TestContext::new().await?;
// Create group and shared account
let command = Command::Group(GroupSubcommand::New {
name: "fund-group".into(),
});
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
let command = Command::Account(AccountSubcommand::New(NewSubcommand::PrivateGms {
group: "fund-group".into(),
label: None,
pda: false,
seed: None,
program_id: None,
identifier: None,
}));
let result = wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
let SubcommandReturnValue::RegisterAccount {
account_id: shared_id,
} = result
else {
anyhow::bail!("Expected RegisterAccount return value");
};
// Initialize the shared account under auth-transfer
let command = Command::AuthTransfer(AuthTransferSubcommand::Init {
account_id: Some(format!("Private/{shared_id}")),
account_label: None,
});
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await;
// Sync private accounts
let command = Command::Account(AccountSubcommand::SyncPrivate);
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
// Fund from a public account
let from_public = ctx.existing_public_accounts()[0];
let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
from: Some(format_public_account_id(from_public)),
from_label: None,
to: Some(format!("Private/{shared_id}")),
to_label: None,
to_npk: None,
to_vpk: None,
to_identifier: None,
amount: 100,
});
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await;
// Sync private accounts
let command = Command::Account(AccountSubcommand::SyncPrivate);
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
// Verify the shared account was updated
let entry = ctx
.wallet()
.storage()
.user_data
.shared_private_account(&shared_id)
.context("Shared account not found after sync")?;
info!(
"Shared account balance after funding: {}",
entry.account.balance
);
assert_eq!(
entry.account.balance, 100,
"Shared account should have received 100"
);
Ok(())
}

View File

@ -27,7 +27,7 @@ use nssa::{
public_transaction as putx,
};
use nssa_core::{
MembershipProof, NullifierPublicKey,
InputAccountIdentity, MembershipProof, NullifierPublicKey,
account::{AccountWithMetadata, Nonce, data::Data},
encryption::ViewingPublicKey,
};
@ -220,7 +220,7 @@ fn build_privacy_transaction() -> PrivacyPreservingTransaction {
data: Data::default(),
},
true,
AccountId::from((&sender_npk, 0)),
AccountId::for_regular_private_account(&sender_npk, 0),
);
let recipient_nsk = [2; 32];
let recipient_vsk = [99; 32];
@ -229,7 +229,7 @@ fn build_privacy_transaction() -> PrivacyPreservingTransaction {
let recipient_pre = AccountWithMetadata::new(
Account::default(),
false,
AccountId::from((&recipient_npk, 0)),
AccountId::for_regular_private_account(&recipient_npk, 0),
);
let eph_holder_from = EphemeralKeyHolder::new(&sender_npk);
@ -251,10 +251,19 @@ fn build_privacy_transaction() -> PrivacyPreservingTransaction {
let (output, proof) = circuit::execute_and_prove(
vec![sender_pre, recipient_pre],
Program::serialize_instruction(balance_to_move).unwrap(),
vec![1, 2],
vec![(sender_npk, 0, sender_ss), (recipient_npk, 0, recipient_ss)],
vec![sender_nsk],
vec![Some(proof)],
vec![
InputAccountIdentity::PrivateAuthorizedUpdate {
ssk: sender_ss,
nsk: sender_nsk,
membership_proof: proof,
identifier: 0,
},
InputAccountIdentity::PrivateUnauthorized {
npk: recipient_npk,
ssk: recipient_ss,
identifier: 0,
},
],
&program.into(),
)
.unwrap();

View File

@ -801,7 +801,7 @@ fn test_wallet_ffi_transfer_shielded() -> Result<()> {
let (to, to_keys) = unsafe {
let mut out_keys = FfiPrivateAccountKeys::default();
wallet_ffi_create_private_accounts_key(wallet_ffi_handle, &raw mut out_keys);
let account_id = nssa::AccountId::from((&out_keys.npk(), 0_u128));
let account_id = nssa::AccountId::for_regular_private_account(&out_keys.npk(), 0_u128);
let to: FfiBytes32 = (&account_id).into();
(to, out_keys)
};
@ -935,7 +935,7 @@ fn test_wallet_ffi_transfer_private() -> Result<()> {
let (to, to_keys) = unsafe {
let mut out_keys = FfiPrivateAccountKeys::default();
wallet_ffi_create_private_accounts_key(wallet_ffi_handle, &raw mut out_keys);
let account_id = nssa::AccountId::from((&out_keys.npk(), 0_u128));
let account_id = nssa::AccountId::for_regular_private_account(&out_keys.npk(), 0_u128);
let to: FfiBytes32 = (&account_id).into();
(to, out_keys)
};

View File

@ -26,3 +26,4 @@ itertools.workspace = true
[dev-dependencies]
base58.workspace = true
bincode.workspace = true

View File

@ -0,0 +1,601 @@
use aes_gcm::{Aes256Gcm, KeyInit as _, aead::Aead as _};
use nssa_core::{
SharedSecretKey,
encryption::{Scalar, shared_key_derivation::Secp256k1Point},
program::{PdaSeed, ProgramId},
};
use rand::{RngCore as _, rngs::OsRng};
use serde::{Deserialize, Serialize};
use sha2::{Digest as _, digest::FixedOutput as _};
use super::secret_holders::{PrivateKeyHolder, SecretSpendingKey};
/// Public key used to seal a `GroupKeyHolder` for distribution to a recipient.
///
/// Wraps a secp256k1 point but is a distinct type from `ViewingPublicKey` to enforce
/// key separation: viewing keys encrypt account state, sealing keys encrypt the GMS
/// for off-chain distribution.
pub struct SealingPublicKey(Secp256k1Point);
impl SealingPublicKey {
/// Derive the sealing public key from a secret scalar.
#[must_use]
pub fn from_scalar(scalar: Scalar) -> Self {
Self(Secp256k1Point::from_scalar(scalar))
}
/// Construct from raw serialized bytes (e.g. received from another wallet).
#[must_use]
pub const fn from_bytes(bytes: Vec<u8>) -> Self {
Self(Secp256k1Point(bytes))
}
/// Returns the raw bytes for display or transmission.
#[must_use]
pub fn to_bytes(&self) -> &[u8] {
&self.0.0
}
}
/// Secret key used to unseal a `GroupKeyHolder` received from another member.
pub type SealingSecretKey = Scalar;
/// Manages shared viewing keys for a group of controllers owning private PDAs.
///
/// The Group Master Secret (GMS) is a 32-byte random value shared among controllers.
/// Each private PDA owned by the group gets a unique [`SecretSpendingKey`] derived from
/// the GMS by mixing the PDA seed into the SHA-256 input (see `secret_spending_key_for_pda`).
///
/// # Distribution
///
/// The GMS is a long-term secret and must never cross a trust boundary in raw form.
/// Controllers share it off-chain by sealing it under each recipient's [`SealingPublicKey`]
/// (see `seal_for` / `unseal`). Wallets persisting a `GroupKeyHolder` must encrypt it at
/// rest; the raw bytes are exposed only via [`GroupKeyHolder::dangerous_raw_gms`], which
/// is intended for the sealing path exclusively.
///
/// # Logging safety
///
/// `Debug` is implemented manually to redact the GMS; formatting this value with `{:?}`
/// will not leak the secret. Code that formats through `{:#?}` on containing types is
/// safe for the same reason.
#[derive(Serialize, Deserialize, Clone)]
pub struct GroupKeyHolder {
gms: [u8; 32],
}
impl std::fmt::Debug for GroupKeyHolder {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("GroupKeyHolder")
.field("gms", &"<redacted>")
.finish()
}
}
impl Default for GroupKeyHolder {
fn default() -> Self {
Self::new()
}
}
impl GroupKeyHolder {
/// Create a new group with a fresh random GMS.
#[must_use]
pub fn new() -> Self {
let mut gms = [0_u8; 32];
OsRng.fill_bytes(&mut gms);
Self { gms }
}
/// Restore from an existing GMS (received via `unseal`).
#[must_use]
pub const fn from_gms(gms: [u8; 32]) -> Self {
Self { gms }
}
/// Returns the raw 32-byte GMS. The name reflects intent: only the sealed-distribution
/// path (`seal_for`) and sealed-at-rest persistence should ever need the raw bytes. Do
/// not log the result, do not pass it across an untrusted channel.
#[must_use]
pub const fn dangerous_raw_gms(&self) -> &[u8; 32] {
&self.gms
}
/// Derive a per-PDA [`SecretSpendingKey`] by mixing the seed into the SHA-256 input.
///
/// Each distinct `(program_id, pda_seed)` pair produces a distinct SSK in the full 256-bit
/// space, so adversarial seed-grinding cannot collide two PDAs' derived keys under the same
/// group. Uses the codebase's 32-byte protocol-versioned domain-separation convention.
fn secret_spending_key_for_pda(
&self,
program_id: &ProgramId,
pda_seed: &PdaSeed,
) -> SecretSpendingKey {
const PREFIX: &[u8; 32] = b"/LEE/v0.3/GroupKeyDerivation/SSK";
let mut hasher = sha2::Sha256::new();
hasher.update(PREFIX);
hasher.update(self.gms);
for word in program_id {
hasher.update(word.to_le_bytes());
}
hasher.update(pda_seed.as_ref());
SecretSpendingKey(hasher.finalize_fixed().into())
}
/// Derive keys for a specific PDA under a given program.
///
/// All controllers holding the same GMS independently derive the same keys for the
/// same `(program_id, seed)` because the derivation is deterministic.
#[must_use]
pub fn derive_keys_for_pda(
&self,
program_id: &ProgramId,
pda_seed: &PdaSeed,
) -> PrivateKeyHolder {
self.secret_spending_key_for_pda(program_id, pda_seed)
.produce_private_key_holder(None)
}
/// Derive keys for a shared regular (non-PDA) private account.
///
/// Uses a distinct domain separator from `derive_keys_for_pda` to prevent cross-domain
/// key collisions. The `derivation_seed` should be a stable, unique 32-byte value
/// (e.g. derived deterministically from the account's identifier).
#[must_use]
pub fn derive_keys_for_shared_account(&self, derivation_seed: &[u8; 32]) -> PrivateKeyHolder {
const PREFIX: &[u8; 32] = b"/LEE/v0.3/GroupKeyDerivation/SHA";
let mut hasher = sha2::Sha256::new();
hasher.update(PREFIX);
hasher.update(self.gms);
hasher.update(derivation_seed);
SecretSpendingKey(hasher.finalize_fixed().into()).produce_private_key_holder(None)
}
/// Encrypts this holder's GMS under the recipient's [`SealingPublicKey`].
///
/// Uses an ephemeral ECDH key exchange to derive a shared secret, then AES-256-GCM
/// to encrypt the payload. The returned bytes are
/// `ephemeral_pubkey (33) || nonce (12) || ciphertext+tag (48)` = 93 bytes.
///
/// Each call generates a fresh ephemeral key, so two seals of the same holder produce
/// different ciphertexts.
#[must_use]
pub fn seal_for(&self, recipient_key: &SealingPublicKey) -> Vec<u8> {
let mut ephemeral_scalar: Scalar = [0_u8; 32];
OsRng.fill_bytes(&mut ephemeral_scalar);
let ephemeral_pubkey = Secp256k1Point::from_scalar(ephemeral_scalar);
let shared = SharedSecretKey::new(&ephemeral_scalar, &recipient_key.0);
let aes_key = Self::seal_kdf(&shared);
let cipher = Aes256Gcm::new(&aes_key.into());
let mut nonce_bytes = [0_u8; 12];
OsRng.fill_bytes(&mut nonce_bytes);
let nonce = aes_gcm::Nonce::from(nonce_bytes);
let ciphertext = cipher
.encrypt(&nonce, self.gms.as_ref())
.expect("AES-GCM encryption should not fail with valid key/nonce");
let capacity = 33_usize
.checked_add(12)
.and_then(|n| n.checked_add(ciphertext.len()))
.expect("seal capacity overflow");
let mut out = Vec::with_capacity(capacity);
out.extend_from_slice(&ephemeral_pubkey.0);
out.extend_from_slice(&nonce_bytes);
out.extend_from_slice(&ciphertext);
out
}
/// Decrypts a sealed `GroupKeyHolder` using the recipient's [`SealingSecretKey`].
///
/// Returns `Err` if the ciphertext is too short, the ECDH point is invalid, or the
/// AES-GCM authentication tag doesn't verify (wrong key or tampered data).
pub fn unseal(sealed: &[u8], own_key: &SealingSecretKey) -> Result<Self, SealError> {
const HEADER_LEN: usize = 33 + 12;
const MIN_LEN: usize = HEADER_LEN + 16;
if sealed.len() < MIN_LEN {
return Err(SealError::TooShort);
}
// MIN_LEN (61) > HEADER_LEN (45), so all slicing below is in bounds.
let ephemeral_pubkey = Secp256k1Point(sealed[..33].to_vec());
let nonce = aes_gcm::Nonce::from_slice(&sealed[33..HEADER_LEN]);
let ciphertext = &sealed[HEADER_LEN..];
let shared = SharedSecretKey::new(own_key, &ephemeral_pubkey);
let aes_key = Self::seal_kdf(&shared);
let cipher = Aes256Gcm::new(&aes_key.into());
let plaintext = cipher
.decrypt(nonce, ciphertext)
.map_err(|_err| SealError::DecryptionFailed)?;
if plaintext.len() != 32 {
return Err(SealError::DecryptionFailed);
}
let mut gms = [0_u8; 32];
gms.copy_from_slice(&plaintext);
Ok(Self::from_gms(gms))
}
/// Derives an AES-256 key from the ECDH shared secret via SHA-256 with a domain prefix.
fn seal_kdf(shared: &SharedSecretKey) -> [u8; 32] {
const PREFIX: &[u8; 32] = b"/LEE/v0.3/GroupKeySeal/AES\x00\x00\x00\x00\x00\x00";
let mut hasher = sha2::Sha256::new();
hasher.update(PREFIX);
hasher.update(shared.0);
hasher.finalize_fixed().into()
}
}
#[derive(Debug)]
pub enum SealError {
TooShort,
DecryptionFailed,
}
#[cfg(test)]
mod tests {
use nssa_core::NullifierPublicKey;
use super::*;
const TEST_PROGRAM_ID: ProgramId = [9; 8];
/// Two holders from the same GMS derive identical keys for the same PDA seed.
#[test]
fn same_gms_same_seed_produces_same_keys() {
let gms = [42_u8; 32];
let holder_a = GroupKeyHolder::from_gms(gms);
let holder_b = GroupKeyHolder::from_gms(gms);
let seed = PdaSeed::new([1; 32]);
let keys_a = holder_a.derive_keys_for_pda(&TEST_PROGRAM_ID, &seed);
let keys_b = holder_b.derive_keys_for_pda(&TEST_PROGRAM_ID, &seed);
assert_eq!(
keys_a.generate_nullifier_public_key().to_byte_array(),
keys_b.generate_nullifier_public_key().to_byte_array(),
);
}
/// Different PDA seeds produce different keys from the same GMS.
#[test]
fn same_gms_different_seed_produces_different_keys() {
let holder = GroupKeyHolder::from_gms([42_u8; 32]);
let seed_a = PdaSeed::new([1; 32]);
let seed_b = PdaSeed::new([2; 32]);
let npk_a = holder
.derive_keys_for_pda(&TEST_PROGRAM_ID, &seed_a)
.generate_nullifier_public_key();
let npk_b = holder
.derive_keys_for_pda(&TEST_PROGRAM_ID, &seed_b)
.generate_nullifier_public_key();
assert_ne!(npk_a.to_byte_array(), npk_b.to_byte_array());
}
/// Different GMS produce different keys for the same PDA seed.
#[test]
fn different_gms_same_seed_produces_different_keys() {
let holder_a = GroupKeyHolder::from_gms([42_u8; 32]);
let holder_b = GroupKeyHolder::from_gms([99_u8; 32]);
let seed = PdaSeed::new([1; 32]);
let npk_a = holder_a
.derive_keys_for_pda(&TEST_PROGRAM_ID, &seed)
.generate_nullifier_public_key();
let npk_b = holder_b
.derive_keys_for_pda(&TEST_PROGRAM_ID, &seed)
.generate_nullifier_public_key();
assert_ne!(npk_a.to_byte_array(), npk_b.to_byte_array());
}
/// GMS round-trip: export and restore produces the same keys.
#[test]
fn gms_round_trip() {
let original = GroupKeyHolder::from_gms([7_u8; 32]);
let restored = GroupKeyHolder::from_gms(*original.dangerous_raw_gms());
let seed = PdaSeed::new([1; 32]);
let npk_original = original
.derive_keys_for_pda(&TEST_PROGRAM_ID, &seed)
.generate_nullifier_public_key();
let npk_restored = restored
.derive_keys_for_pda(&TEST_PROGRAM_ID, &seed)
.generate_nullifier_public_key();
assert_eq!(npk_original.to_byte_array(), npk_restored.to_byte_array());
}
/// The derived `NullifierPublicKey` is non-zero (sanity check).
#[test]
fn derived_npk_is_non_zero() {
let holder = GroupKeyHolder::from_gms([42_u8; 32]);
let seed = PdaSeed::new([1; 32]);
let npk = holder
.derive_keys_for_pda(&TEST_PROGRAM_ID, &seed)
.generate_nullifier_public_key();
assert_ne!(npk, NullifierPublicKey([0; 32]));
}
/// Pins the end-to-end derivation for a fixed (GMS, `ProgramId`, `PdaSeed`). Any change
/// to `secret_spending_key_for_pda`, the `PrivateKeyHolder` nsk/npk chain, or the
/// `AccountId::for_private_pda` formula breaks this test. Mirrors the pinned-value
/// pattern from `for_private_pda_matches_pinned_value` in `nssa_core`.
#[test]
fn pinned_end_to_end_derivation_for_private_pda() {
use nssa_core::{account::AccountId, program::ProgramId};
let gms = [42_u8; 32];
let seed = PdaSeed::new([1; 32]);
let program_id: ProgramId = [9; 8];
let holder = GroupKeyHolder::from_gms(gms);
let npk = holder
.derive_keys_for_pda(&TEST_PROGRAM_ID, &seed)
.generate_nullifier_public_key();
let account_id = AccountId::for_private_pda(&program_id, &seed, &npk, u128::MAX);
let expected_npk = NullifierPublicKey([
136, 176, 234, 71, 208, 8, 143, 142, 126, 155, 132, 18, 71, 27, 88, 56, 100, 90, 79,
215, 76, 92, 60, 166, 104, 35, 51, 91, 16, 114, 188, 112,
]);
// AccountId is derived from (program_id, seed, npk), so it changes when npk changes.
// We verify npk is pinned, and AccountId is deterministically derived from it.
let expected_account_id =
AccountId::for_private_pda(&program_id, &seed, &expected_npk, u128::MAX);
assert_eq!(npk, expected_npk);
assert_eq!(account_id, expected_account_id);
}
/// Wallets persist `GroupKeyHolder` to disk and reload it on startup. This test pins
/// the serde round-trip: serialize, deserialize, and assert the derived keys for a
/// sample seed match on both sides. A silent encoding drift would corrupt every
/// group-owned account.
#[test]
fn gms_serde_round_trip_preserves_derivation() {
let original = GroupKeyHolder::from_gms([7_u8; 32]);
let encoded = bincode::serialize(&original).expect("serialize");
let restored: GroupKeyHolder = bincode::deserialize(&encoded).expect("deserialize");
let seed = PdaSeed::new([1; 32]);
let npk_original = original
.derive_keys_for_pda(&TEST_PROGRAM_ID, &seed)
.generate_nullifier_public_key();
let npk_restored = restored
.derive_keys_for_pda(&TEST_PROGRAM_ID, &seed)
.generate_nullifier_public_key();
assert_eq!(npk_original, npk_restored);
assert_eq!(original.dangerous_raw_gms(), restored.dangerous_raw_gms());
}
/// A `GroupKeyHolder` constructed from the same 32 bytes as a personal
/// `SecretSpendingKey` must not derive the same `NullifierPublicKey` as the personal
/// path, so a private PDA cannot be spent by a personal nullifier even under
/// adversarial key-material reuse. The safety rests on the group path's distinct
/// domain-separation prefix plus the seed mix-in (see `secret_spending_key_for_pda`).
#[test]
fn group_derivation_does_not_collide_with_personal_path_at_shared_bytes() {
let shared_bytes = [13_u8; 32];
let seed = PdaSeed::new([5; 32]);
let group_npk = GroupKeyHolder::from_gms(shared_bytes)
.derive_keys_for_pda(&TEST_PROGRAM_ID, &seed)
.generate_nullifier_public_key();
let personal_npk = SecretSpendingKey(shared_bytes)
.produce_private_key_holder(None)
.generate_nullifier_public_key();
assert_ne!(group_npk, personal_npk);
}
/// Seal then unseal recovers the same GMS and derived keys.
#[test]
fn seal_unseal_round_trip() {
let holder = GroupKeyHolder::from_gms([42_u8; 32]);
let recipient_ssk = SecretSpendingKey([7_u8; 32]);
let recipient_keys = recipient_ssk.produce_private_key_holder(None);
let recipient_vpk = recipient_keys.generate_viewing_public_key();
let recipient_vsk = recipient_keys.viewing_secret_key;
let sealed = holder.seal_for(&SealingPublicKey::from_bytes(recipient_vpk.0));
let restored = GroupKeyHolder::unseal(&sealed, &recipient_vsk).expect("unseal");
assert_eq!(restored.dangerous_raw_gms(), holder.dangerous_raw_gms());
let seed = PdaSeed::new([1; 32]);
assert_eq!(
holder
.derive_keys_for_pda(&TEST_PROGRAM_ID, &seed)
.generate_nullifier_public_key(),
restored
.derive_keys_for_pda(&TEST_PROGRAM_ID, &seed)
.generate_nullifier_public_key(),
);
}
/// Unsealing with a different VSK fails with `DecryptionFailed`.
#[test]
fn unseal_wrong_vsk_fails() {
let holder = GroupKeyHolder::from_gms([42_u8; 32]);
let recipient_ssk = SecretSpendingKey([7_u8; 32]);
let recipient_vpk = recipient_ssk
.produce_private_key_holder(None)
.generate_viewing_public_key();
let wrong_ssk = SecretSpendingKey([99_u8; 32]);
let wrong_vsk = wrong_ssk
.produce_private_key_holder(None)
.viewing_secret_key;
let sealed = holder.seal_for(&SealingPublicKey::from_bytes(recipient_vpk.0));
let result = GroupKeyHolder::unseal(&sealed, &wrong_vsk);
assert!(matches!(result, Err(super::SealError::DecryptionFailed)));
}
/// Tampered ciphertext fails authentication.
#[test]
fn unseal_tampered_ciphertext_fails() {
let holder = GroupKeyHolder::from_gms([42_u8; 32]);
let recipient_ssk = SecretSpendingKey([7_u8; 32]);
let recipient_keys = recipient_ssk.produce_private_key_holder(None);
let recipient_vpk = recipient_keys.generate_viewing_public_key();
let recipient_vsk = recipient_keys.viewing_secret_key;
let mut sealed = holder.seal_for(&SealingPublicKey::from_bytes(recipient_vpk.0));
// Flip a byte in the ciphertext portion (after ephemeral_pubkey + nonce)
let last = sealed.len() - 1;
sealed[last] ^= 0xFF;
let result = GroupKeyHolder::unseal(&sealed, &recipient_vsk);
assert!(matches!(result, Err(super::SealError::DecryptionFailed)));
}
/// Two seals of the same holder produce different ciphertexts (ephemeral randomness).
#[test]
fn two_seals_produce_different_ciphertexts() {
let holder = GroupKeyHolder::from_gms([42_u8; 32]);
let recipient_ssk = SecretSpendingKey([7_u8; 32]);
let recipient_vpk = recipient_ssk
.produce_private_key_holder(None)
.generate_viewing_public_key();
let sealing_key = SealingPublicKey::from_bytes(recipient_vpk.0);
let sealed_a = holder.seal_for(&sealing_key);
let sealed_b = holder.seal_for(&sealing_key);
assert_ne!(sealed_a, sealed_b);
}
/// Sealed payload is too short.
#[test]
fn unseal_too_short_fails() {
let vsk: SealingSecretKey = [7_u8; 32];
let result = GroupKeyHolder::unseal(&[0_u8; 10], &vsk);
assert!(matches!(result, Err(super::SealError::TooShort)));
}
/// Degenerate GMS values (all-zeros, all-ones, single-bit) must still produce valid,
/// non-zero, pairwise-distinct npks. Rules out accidental "if gms == default { return
/// default }" style shortcuts in the derivation.
#[test]
fn degenerate_gms_produces_distinct_non_zero_keys() {
let seed = PdaSeed::new([1; 32]);
let degenerate = [[0_u8; 32], [0xFF_u8; 32], {
let mut v = [0_u8; 32];
v[0] = 1;
v
}];
let npks: Vec<NullifierPublicKey> = degenerate
.iter()
.map(|gms| {
GroupKeyHolder::from_gms(*gms)
.derive_keys_for_pda(&TEST_PROGRAM_ID, &seed)
.generate_nullifier_public_key()
})
.collect();
for npk in &npks {
assert_ne!(*npk, NullifierPublicKey([0; 32]));
}
for (i, a) in npks.iter().enumerate() {
for b in &npks[i + 1..] {
assert_ne!(a, b);
}
}
}
/// Full lifecycle: create group, distribute GMS via seal/unseal, verify key agreement.
#[test]
fn group_pda_lifecycle() {
use nssa_core::account::AccountId;
let alice_holder = GroupKeyHolder::new();
let pda_seed = PdaSeed::new([42_u8; 32]);
let program_id: nssa_core::program::ProgramId = [1; 8];
// Derive Alice's keys
let alice_keys = alice_holder.derive_keys_for_pda(&TEST_PROGRAM_ID, &pda_seed);
let alice_npk = alice_keys.generate_nullifier_public_key();
// Seal GMS for Bob using Bob's viewing key, Bob unseals
let bob_ssk = SecretSpendingKey([77_u8; 32]);
let bob_keys = bob_ssk.produce_private_key_holder(None);
let bob_vpk = bob_keys.generate_viewing_public_key();
let bob_vsk = bob_keys.viewing_secret_key;
let sealed = alice_holder.seal_for(&SealingPublicKey::from_bytes(bob_vpk.0));
let bob_holder =
GroupKeyHolder::unseal(&sealed, &bob_vsk).expect("Bob should unseal the GMS");
// Key agreement: both derive identical NPK and AccountId
let bob_npk = bob_holder
.derive_keys_for_pda(&TEST_PROGRAM_ID, &pda_seed)
.generate_nullifier_public_key();
assert_eq!(alice_npk, bob_npk);
let alice_account_id = AccountId::for_private_pda(&program_id, &pda_seed, &alice_npk, 0);
let bob_account_id = AccountId::for_private_pda(&program_id, &pda_seed, &bob_npk, 0);
assert_eq!(alice_account_id, bob_account_id);
}
/// Same GMS + same derivation seed produces same keys for shared accounts.
#[test]
fn shared_account_same_gms_same_seed_produces_same_keys() {
let gms = [42_u8; 32];
let derivation_seed = [1_u8; 32];
let holder_a = GroupKeyHolder::from_gms(gms);
let holder_b = GroupKeyHolder::from_gms(gms);
let npk_a = holder_a
.derive_keys_for_shared_account(&derivation_seed)
.generate_nullifier_public_key();
let npk_b = holder_b
.derive_keys_for_shared_account(&derivation_seed)
.generate_nullifier_public_key();
assert_eq!(npk_a, npk_b);
}
/// Different derivation seeds produce different keys for shared accounts.
#[test]
fn shared_account_different_seeds_produce_different_keys() {
let holder = GroupKeyHolder::from_gms([42_u8; 32]);
let npk_a = holder
.derive_keys_for_shared_account(&[1_u8; 32])
.generate_nullifier_public_key();
let npk_b = holder
.derive_keys_for_shared_account(&[2_u8; 32])
.generate_nullifier_public_key();
assert_ne!(npk_a, npk_b);
}
/// PDA and shared account derivations from the same GMS + same bytes never collide.
#[test]
fn pda_and_shared_derivations_do_not_collide() {
let holder = GroupKeyHolder::from_gms([42_u8; 32]);
let bytes = [1_u8; 32];
let pda_npk = holder
.derive_keys_for_pda(&TEST_PROGRAM_ID, &PdaSeed::new(bytes))
.generate_nullifier_public_key();
let shared_npk = holder
.derive_keys_for_shared_account(&bytes)
.generate_nullifier_public_key();
assert_ne!(pda_npk, shared_npk);
}
}

View File

@ -1,5 +1,5 @@
use k256::{Scalar, elliptic_curve::PrimeField as _};
use nssa_core::{Identifier, NullifierPublicKey, encryption::ViewingPublicKey};
use nssa_core::{NullifierPublicKey, PrivateAccountKind, encryption::ViewingPublicKey};
use serde::{Deserialize, Serialize};
use crate::key_management::{
@ -10,7 +10,7 @@ use crate::key_management::{
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct ChildKeysPrivate {
pub value: (KeyChain, Vec<(Identifier, nssa::Account)>),
pub value: (KeyChain, Vec<(PrivateAccountKind, nssa::Account)>),
pub ccc: [u8; 32],
/// Can be [`None`] if root.
pub cci: Option<u32>,
@ -115,9 +115,11 @@ impl KeyTreeNode for ChildKeysPrivate {
}
fn account_ids(&self) -> impl Iterator<Item = nssa::AccountId> {
self.value.1.iter().map(|(identifier, _)| {
nssa::AccountId::from((&self.value.0.nullifier_public_key, *identifier))
})
let npk = self.value.0.nullifier_public_key;
self.value
.1
.iter()
.map(move |(kind, _)| nssa::AccountId::for_private_account(&npk, kind))
}
}

View File

@ -274,7 +274,10 @@ impl KeyTree<ChildKeysPrivate> {
identifier: Identifier,
) -> Option<nssa::AccountId> {
let node = self.key_map.get(cci)?;
let account_id = nssa::AccountId::from((&node.value.0.nullifier_public_key, identifier));
let account_id = nssa::AccountId::for_regular_private_account(
&node.value.0.nullifier_public_key,
identifier,
);
if self.account_id_map.contains_key(&account_id) {
return None;
}
@ -319,6 +322,7 @@ mod tests {
use std::{collections::HashSet, str::FromStr as _};
use nssa::AccountId;
use nssa_core::PrivateAccountKind;
use super::*;
@ -532,7 +536,7 @@ mod tests {
.get_mut(&ChainIndex::from_str("/1").unwrap())
.unwrap();
acc.value.1.push((
0,
PrivateAccountKind::Regular(0),
nssa::Account {
balance: 2,
..nssa::Account::default()
@ -544,7 +548,7 @@ mod tests {
.get_mut(&ChainIndex::from_str("/2").unwrap())
.unwrap();
acc.value.1.push((
0,
PrivateAccountKind::Regular(0),
nssa::Account {
balance: 3,
..nssa::Account::default()
@ -556,7 +560,7 @@ mod tests {
.get_mut(&ChainIndex::from_str("/0/1").unwrap())
.unwrap();
acc.value.1.push((
0,
PrivateAccountKind::Regular(0),
nssa::Account {
balance: 5,
..nssa::Account::default()
@ -568,7 +572,7 @@ mod tests {
.get_mut(&ChainIndex::from_str("/1/0").unwrap())
.unwrap();
acc.value.1.push((
0,
PrivateAccountKind::Regular(0),
nssa::Account {
balance: 6,
..nssa::Account::default()

View File

@ -6,6 +6,7 @@ use secret_holders::{PrivateKeyHolder, SecretSpendingKey, SeedHolder};
use serde::{Deserialize, Serialize};
pub mod ephemeral_key_holder;
pub mod group_key_holder;
pub mod key_tree;
pub mod secret_holders;

View File

@ -3,11 +3,12 @@ use std::collections::BTreeMap;
use anyhow::Result;
use k256::AffinePoint;
use nssa::{Account, AccountId};
use nssa_core::Identifier;
use nssa_core::{Identifier, PrivateAccountKind};
use serde::{Deserialize, Serialize};
use crate::key_management::{
KeyChain,
group_key_holder::GroupKeyHolder,
key_tree::{KeyTreePrivate, KeyTreePublic, chain_index::ChainIndex},
secret_holders::SeedHolder,
};
@ -17,10 +18,25 @@ pub type PublicKey = AffinePoint;
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct UserPrivateAccountData {
pub key_chain: KeyChain,
pub accounts: Vec<(Identifier, Account)>,
pub accounts: Vec<(PrivateAccountKind, Account)>,
}
/// Metadata for a shared account (GMS-derived), stored alongside the cached plaintext state.
/// The group label and identifier (or PDA seed) are needed to re-derive keys during sync.
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct SharedAccountEntry {
pub group_label: String,
pub identifier: Identifier,
/// For PDA accounts, the seed and program ID used to derive keys via `derive_keys_for_pda`.
/// `None` for regular shared accounts (keys derived from identifier via derivation seed).
#[serde(default)]
pub pda_seed: Option<nssa_core::program::PdaSeed>,
#[serde(default)]
pub pda_program_id: Option<nssa_core::program::ProgramId>,
pub account: Account,
}
#[derive(Clone, Debug)]
pub struct NSSAUserData {
/// Default public accounts.
pub default_pub_account_signing_keys: BTreeMap<nssa::AccountId, nssa::PrivateKey>,
@ -30,6 +46,16 @@ pub struct NSSAUserData {
pub public_key_tree: KeyTreePublic,
/// Tree of private keys.
pub private_key_tree: KeyTreePrivate,
/// Group key holders for shared account management, keyed by a human-readable label.
pub group_key_holders: BTreeMap<String, GroupKeyHolder>,
/// Cached plaintext state of shared private accounts (PDAs and regular shared accounts),
/// keyed by `AccountId`. Each entry stores the group label and identifier needed
/// to re-derive keys during sync.
pub shared_private_accounts: BTreeMap<nssa::AccountId, SharedAccountEntry>,
/// Dedicated sealing secret key for GMS distribution. Generated once via
/// `wallet group new-sealing-key`. The corresponding public key is shared with
/// group members so they can seal GMS for this wallet.
pub sealing_secret_key: Option<nssa_core::encryption::Scalar>,
}
impl NSSAUserData {
@ -53,10 +79,11 @@ impl NSSAUserData {
) -> bool {
let mut check_res = true;
for (account_id, entry) in accounts_keys_map {
let any_match = entry.accounts.iter().any(|(identifier, _)| {
nssa::AccountId::from((&entry.key_chain.nullifier_public_key, *identifier))
== *account_id
});
let npk = &entry.key_chain.nullifier_public_key;
let any_match = entry
.accounts
.iter()
.any(|(kind, _)| nssa::AccountId::for_private_account(npk, kind) == *account_id);
if !any_match {
println!("No matching entry found for account_id {account_id}");
check_res = false;
@ -88,6 +115,9 @@ impl NSSAUserData {
default_user_private_accounts: default_accounts_key_chains,
public_key_tree,
private_key_tree,
group_key_holders: BTreeMap::new(),
shared_private_accounts: BTreeMap::new(),
sealing_secret_key: None,
})
}
@ -148,6 +178,7 @@ impl NSSAUserData {
}
/// Returns the key chain and account data for the given private account ID.
/// Does not cover shared private accounts — use `shared_private_account` for those.
#[must_use]
pub fn get_private_account(
&self,
@ -155,24 +186,27 @@ impl NSSAUserData {
) -> Option<(KeyChain, nssa_core::account::Account, Identifier)> {
// Check default accounts
if let Some(entry) = self.default_user_private_accounts.get(&account_id) {
for (identifier, account) in &entry.accounts {
let expected_id =
nssa::AccountId::from((&entry.key_chain.nullifier_public_key, *identifier));
if expected_id == account_id {
return Some((entry.key_chain.clone(), account.clone(), *identifier));
}
let npk = &entry.key_chain.nullifier_public_key;
if let Some((kind, account)) = entry
.accounts
.iter()
.find(|(kind, _)| nssa::AccountId::for_private_account(npk, kind) == account_id)
{
return Some((entry.key_chain.clone(), account.clone(), kind.identifier()));
}
return None;
}
// Check tree
if let Some(node) = self.private_key_tree.get_node(account_id) {
let key_chain = &node.value.0;
for (identifier, account) in &node.value.1 {
let expected_id =
nssa::AccountId::from((&key_chain.nullifier_public_key, *identifier));
if expected_id == account_id {
return Some((key_chain.clone(), account.clone(), *identifier));
}
let npk = &key_chain.nullifier_public_key;
if let Some((kind, account)) = node
.value
.1
.iter()
.find(|(kind, _)| nssa::AccountId::for_private_account(npk, kind) == account_id)
{
return Some((key_chain.clone(), account.clone(), kind.identifier()));
}
}
None
@ -195,6 +229,56 @@ impl NSSAUserData {
.copied()
.chain(self.private_key_tree.account_id_map.keys().copied())
}
/// Returns the `GroupKeyHolder` for the given label, if it exists.
#[must_use]
pub fn group_key_holder(&self, label: &str) -> Option<&GroupKeyHolder> {
self.group_key_holders.get(label)
}
/// Inserts or replaces a `GroupKeyHolder` under the given label.
///
/// If a holder already exists under this label, it is silently replaced and the old
/// GMS is lost. Callers must ensure label uniqueness across groups.
pub fn insert_group_key_holder(&mut self, label: String, holder: GroupKeyHolder) {
self.group_key_holders.insert(label, holder);
}
/// Returns the cached account for a shared private account, if it exists.
#[must_use]
pub fn shared_private_account(
&self,
account_id: &nssa::AccountId,
) -> Option<&SharedAccountEntry> {
self.shared_private_accounts.get(account_id)
}
/// Inserts or replaces a shared private account entry.
pub fn insert_shared_private_account(
&mut self,
account_id: nssa::AccountId,
entry: SharedAccountEntry,
) {
self.shared_private_accounts.insert(account_id, entry);
}
/// Updates the cached account state for a shared private account.
pub fn update_shared_private_account_state(
&mut self,
account_id: &nssa::AccountId,
account: nssa_core::account::Account,
) {
if let Some(entry) = self.shared_private_accounts.get_mut(account_id) {
entry.account = account;
}
}
/// Iterates over all shared private accounts.
pub fn shared_private_accounts_iter(
&self,
) -> impl Iterator<Item = (&nssa::AccountId, &SharedAccountEntry)> {
self.shared_private_accounts.iter()
}
}
impl Default for NSSAUserData {
@ -214,6 +298,112 @@ impl Default for NSSAUserData {
mod tests {
use super::*;
#[test]
fn group_key_holder_storage_round_trip() {
let mut user_data = NSSAUserData::default();
assert!(user_data.group_key_holder("test-group").is_none());
let holder = GroupKeyHolder::from_gms([42_u8; 32]);
user_data.insert_group_key_holder(String::from("test-group"), holder.clone());
let retrieved = user_data
.group_key_holder("test-group")
.expect("should exist");
assert_eq!(retrieved.dangerous_raw_gms(), holder.dangerous_raw_gms());
}
#[test]
fn group_key_holders_default_empty() {
let user_data = NSSAUserData::default();
assert!(user_data.group_key_holders.is_empty());
assert!(user_data.shared_private_accounts.is_empty());
}
#[test]
fn shared_account_entry_serde_round_trip() {
use nssa_core::program::PdaSeed;
let entry = SharedAccountEntry {
group_label: String::from("test-group"),
identifier: 42,
pda_seed: None,
pda_program_id: None,
account: nssa_core::account::Account::default(),
};
let encoded = bincode::serialize(&entry).expect("serialize");
let decoded: SharedAccountEntry = bincode::deserialize(&encoded).expect("deserialize");
assert_eq!(decoded.group_label, "test-group");
assert_eq!(decoded.identifier, 42);
assert!(decoded.pda_seed.is_none());
let pda_entry = SharedAccountEntry {
group_label: String::from("pda-group"),
identifier: u128::MAX,
pda_seed: Some(PdaSeed::new([7_u8; 32])),
pda_program_id: Some([9; 8]),
account: nssa_core::account::Account::default(),
};
let pda_encoded = bincode::serialize(&pda_entry).expect("serialize pda");
let pda_decoded: SharedAccountEntry =
bincode::deserialize(&pda_encoded).expect("deserialize pda");
assert_eq!(pda_decoded.group_label, "pda-group");
assert_eq!(pda_decoded.identifier, u128::MAX);
assert_eq!(pda_decoded.pda_seed.unwrap(), PdaSeed::new([7_u8; 32]));
}
#[test]
fn shared_account_entry_none_pda_seed_round_trips() {
// Verify that an entry with pda_seed=None serializes and deserializes correctly,
// confirming the #[serde(default)] attribute works for backward compatibility.
let entry = SharedAccountEntry {
group_label: String::from("old"),
identifier: 1,
pda_seed: None,
pda_program_id: None,
account: nssa_core::account::Account::default(),
};
let encoded = bincode::serialize(&entry).expect("serialize");
let decoded: SharedAccountEntry = bincode::deserialize(&encoded).expect("deserialize");
assert_eq!(decoded.group_label, "old");
assert_eq!(decoded.identifier, 1);
assert!(decoded.pda_seed.is_none());
}
#[test]
fn shared_account_derives_consistent_keys_from_group() {
use nssa_core::program::PdaSeed;
let mut user_data = NSSAUserData::default();
let gms_holder = GroupKeyHolder::from_gms([42_u8; 32]);
user_data.insert_group_key_holder(String::from("my-group"), gms_holder);
let holder = user_data.group_key_holder("my-group").unwrap();
// Regular shared account: derive via tag
let tag = [1_u8; 32];
let keys_a = holder.derive_keys_for_shared_account(&tag);
let keys_b = holder.derive_keys_for_shared_account(&tag);
assert_eq!(
keys_a.generate_nullifier_public_key(),
keys_b.generate_nullifier_public_key(),
);
// PDA shared account: derive via seed
let seed = PdaSeed::new([2_u8; 32]);
let pda_keys_a = holder.derive_keys_for_pda(&[9; 8], &seed);
let pda_keys_b = holder.derive_keys_for_pda(&[9; 8], &seed);
assert_eq!(
pda_keys_a.generate_nullifier_public_key(),
pda_keys_b.generate_nullifier_public_key(),
);
// PDA and shared derivations don't collide
assert_ne!(
keys_a.generate_nullifier_public_key(),
pda_keys_a.generate_nullifier_public_key(),
);
}
#[test]
fn new_account() {
let mut user_data = NSSAUserData::default();

View File

@ -12,23 +12,99 @@ use crate::{
pub struct PrivacyPreservingCircuitInput {
/// Outputs of the program execution.
pub program_outputs: Vec<ProgramOutput>,
/// Visibility mask for accounts.
///
/// - `0` - public account
/// - `1` - private account with authentication
/// - `2` - private account without authentication
/// - `3` - private PDA account
pub visibility_mask: Vec<u8>,
/// Public keys and identifiers of private accounts.
pub private_account_keys: Vec<(NullifierPublicKey, Identifier, SharedSecretKey)>,
/// Nullifier secret keys for authorized private accounts.
pub private_account_nsks: Vec<NullifierSecretKey>,
/// Membership proofs for private accounts. Can be [`None`] for uninitialized accounts.
pub private_account_membership_proofs: Vec<Option<MembershipProof>>,
/// One entry per `pre_state`, in the same order as the program's `pre_states`.
/// Length must equal the number of `pre_states` derived from `program_outputs`.
/// The guest's `private_pda_npk_by_position` and `private_pda_bound_positions`
/// rely on this position alignment.
pub account_identities: Vec<InputAccountIdentity>,
/// Program ID.
pub program_id: ProgramId,
}
/// Per-account input to the privacy-preserving circuit. Each variant carries exactly the fields
/// the guest needs for that account's code path.
#[derive(Serialize, Deserialize, Clone)]
pub enum InputAccountIdentity {
/// Public account. The guest reads pre/post state from `program_outputs` and emits no
/// commitment, ciphertext, or nullifier.
Public,
/// Init of an authorized standalone private account: no membership proof. The `pre_state`
/// must be `Account::default()`. The `account_id` is derived as
/// `AccountId::for_regular_private_account(&NullifierPublicKey::from(nsk), identifier)` and
/// matched against `pre_state.account_id`.
PrivateAuthorizedInit {
ssk: SharedSecretKey,
nsk: NullifierSecretKey,
identifier: Identifier,
},
/// Update of an authorized standalone private account: existing on-chain commitment, with
/// membership proof.
PrivateAuthorizedUpdate {
ssk: SharedSecretKey,
nsk: NullifierSecretKey,
membership_proof: MembershipProof,
identifier: Identifier,
},
/// Init of a standalone private account the caller does not own (e.g. a recipient who
/// doesn't yet exist on chain). No `nsk`, no membership proof.
PrivateUnauthorized {
npk: NullifierPublicKey,
ssk: SharedSecretKey,
identifier: Identifier,
},
/// Init of a private PDA, unauthorized. The npk-to-account_id binding is proven upstream
/// via `Claim::Pda(seed)` or a caller's `pda_seeds` match. The identifier diversifies the
/// PDA within the `(program_id, seed, npk)` family: `AccountId::for_private_pda` uses it
/// as the 4th input.
PrivatePdaInit {
npk: NullifierPublicKey,
ssk: SharedSecretKey,
identifier: Identifier,
},
/// Update of an existing private PDA, authorized, with membership proof. `npk` is derived
/// from `nsk`. Authorization is established upstream by a caller `pda_seeds` match or a
/// previously-seen authorization in a chained call.
PrivatePdaUpdate {
ssk: SharedSecretKey,
nsk: NullifierSecretKey,
membership_proof: MembershipProof,
identifier: Identifier,
},
}
impl InputAccountIdentity {
#[must_use]
pub const fn is_public(&self) -> bool {
matches!(self, Self::Public)
}
#[must_use]
pub const fn is_private_pda(&self) -> bool {
matches!(
self,
Self::PrivatePdaInit { .. } | Self::PrivatePdaUpdate { .. }
)
}
/// For private PDA variants, return the `(npk, identifier)` pair. `Init` carries both
/// directly; `Update` derives `npk` from `nsk`. For non-PDA variants returns `None`.
#[must_use]
pub fn npk_if_private_pda(&self) -> Option<(NullifierPublicKey, Identifier)> {
match self {
Self::PrivatePdaInit {
npk, identifier, ..
} => Some((*npk, *identifier)),
Self::PrivatePdaUpdate {
nsk, identifier, ..
} => Some((NullifierPublicKey::from(nsk), *identifier)),
Self::Public
| Self::PrivateAuthorizedInit { .. }
| Self::PrivateAuthorizedUpdate { .. }
| Self::PrivateUnauthorized { .. } => None,
}
}
}
#[derive(Serialize, Deserialize)]
#[cfg_attr(any(feature = "host", test), derive(Debug, PartialEq, Eq))]
pub struct PrivacyPreservingCircuitOutput {

View File

@ -8,7 +8,7 @@ use serde::{Deserialize, Serialize};
#[cfg(feature = "host")]
pub use shared_key_derivation::{EphemeralPublicKey, EphemeralSecretKey, ViewingPublicKey};
use crate::{Commitment, Identifier, account::Account};
use crate::{Commitment, account::Account, program::PrivateAccountKind};
#[cfg(feature = "host")]
pub mod shared_key_derivation;
@ -40,13 +40,14 @@ impl EncryptionScheme {
#[must_use]
pub fn encrypt(
account: &Account,
identifier: Identifier,
kind: &PrivateAccountKind,
shared_secret: &SharedSecretKey,
commitment: &Commitment,
output_index: u32,
) -> Ciphertext {
// Plaintext: identifier (16 bytes, little-endian) || account bytes
let mut buffer = identifier.to_le_bytes().to_vec();
// Plaintext: PrivateAccountKind::HEADER_LEN bytes header || account bytes.
// Both variants produce the same header length — see PrivateAccountKind::to_header_bytes.
let mut buffer = kind.to_header_bytes().to_vec();
buffer.extend_from_slice(&account.to_bytes());
Self::symmetric_transform(&mut buffer, shared_secret, commitment, output_index);
Ciphertext(buffer)
@ -89,17 +90,19 @@ impl EncryptionScheme {
shared_secret: &SharedSecretKey,
commitment: &Commitment,
output_index: u32,
) -> Option<(Identifier, Account)> {
) -> Option<(PrivateAccountKind, Account)> {
use std::io::Cursor;
let mut buffer = ciphertext.0.clone();
Self::symmetric_transform(&mut buffer, shared_secret, commitment, output_index);
if buffer.len() < 16 {
if buffer.len() < PrivateAccountKind::HEADER_LEN {
return None;
}
let identifier = Identifier::from_le_bytes(buffer[..16].try_into().unwrap());
let header: &[u8; PrivateAccountKind::HEADER_LEN] =
buffer[..PrivateAccountKind::HEADER_LEN].try_into().unwrap();
let kind = PrivateAccountKind::from_header_bytes(header)?;
let mut cursor = Cursor::new(&buffer[16..]);
let mut cursor = Cursor::new(&buffer[PrivateAccountKind::HEADER_LEN..]);
Account::from_cursor(&mut cursor)
.inspect_err(|err| {
println!(
@ -112,6 +115,43 @@ impl EncryptionScheme {
);
})
.ok()
.map(|account| (identifier, account))
.map(|account| (kind, account))
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{
account::{Account, AccountId},
program::PdaSeed,
};
#[test]
fn encrypt_same_length_for_account_and_pda() {
let account = Account::default();
let secret = SharedSecretKey([0_u8; 32]);
let commitment = crate::Commitment::new(&AccountId::new([0_u8; 32]), &Account::default());
let account_ct = EncryptionScheme::encrypt(
&account,
&PrivateAccountKind::Regular(42),
&secret,
&commitment,
0,
);
let pda_ct = EncryptionScheme::encrypt(
&account,
&PrivateAccountKind::Pda {
program_id: [1_u32; 8],
seed: PdaSeed::new([2_u8; 32]),
identifier: 42,
},
&secret,
&commitment,
0,
);
assert_eq!(account_ct.0.len(), pda_ct.0.len());
}
}

View File

@ -3,13 +3,16 @@
reason = "We prefer to group methods by functionality rather than by type for encoding"
)]
pub use circuit_io::{PrivacyPreservingCircuitInput, PrivacyPreservingCircuitOutput};
pub use circuit_io::{
InputAccountIdentity, PrivacyPreservingCircuitInput, PrivacyPreservingCircuitOutput,
};
pub use commitment::{
Commitment, CommitmentSetDigest, DUMMY_COMMITMENT, DUMMY_COMMITMENT_HASH, MembershipProof,
compute_digest_for_path,
};
pub use encryption::{EncryptionScheme, SharedSecretKey};
pub use nullifier::{Identifier, Nullifier, NullifierPublicKey, NullifierSecretKey};
pub use program::PrivateAccountKind;
pub mod account;
mod circuit_io;

View File

@ -12,10 +12,11 @@ pub type Identifier = u128;
#[cfg_attr(any(feature = "host", test), derive(Hash))]
pub struct NullifierPublicKey(pub [u8; 32]);
impl From<(&NullifierPublicKey, Identifier)> for AccountId {
fn from(value: (&NullifierPublicKey, Identifier)) -> Self {
let (npk, identifier) = value;
impl AccountId {
/// Derives an [`AccountId`] for a regular (non-PDA) private account from the nullifier public
/// key and identifier.
#[must_use]
pub fn for_regular_private_account(npk: &NullifierPublicKey, identifier: Identifier) -> Self {
// 32 bytes prefix || 32 bytes npk || 16 bytes identifier
let mut bytes = [0; 80];
bytes[0..32].copy_from_slice(PRIVATE_ACCOUNT_ID_PREFIX);
@ -31,6 +32,12 @@ impl From<(&NullifierPublicKey, Identifier)> for AccountId {
}
}
impl From<(&NullifierPublicKey, Identifier)> for AccountId {
fn from((npk, identifier): (&NullifierPublicKey, Identifier)) -> Self {
Self::for_regular_private_account(npk, identifier)
}
}
impl AsRef<[u8]> for NullifierPublicKey {
fn as_ref(&self) -> &[u8] {
self.0.as_slice()
@ -155,7 +162,7 @@ mod tests {
253, 105, 164, 89, 84, 40, 191, 182, 119, 64, 255, 67, 142,
]);
let account_id = AccountId::from((&npk, 0));
let account_id = AccountId::for_regular_private_account(&npk, 0);
assert_eq!(account_id, expected_account_id);
}
@ -172,7 +179,7 @@ mod tests {
56, 247, 99, 121, 165, 182, 234, 255, 19, 127, 191, 72,
]);
let account_id = AccountId::from((&npk, 1));
let account_id = AccountId::for_regular_private_account(&npk, 1);
assert_eq!(account_id, expected_account_id);
}
@ -190,7 +197,7 @@ mod tests {
19, 245, 25, 214, 162, 209, 135, 252, 82, 27, 2, 174, 196,
]);
let account_id = AccountId::from((&npk, identifier));
let account_id = AccountId::for_regular_private_account(&npk, identifier);
assert_eq!(account_id, expected_account_id);
}

Some files were not shown because too many files have changed in this diff Show More