Merge branch 'main' into schouhy/diversify-private-pdas-by-identifier

This commit is contained in:
Sergio Chouhy 2026-05-06 00:22:58 -03:00
commit fb4ddb055a
90 changed files with 3979 additions and 2326 deletions

View File

@ -225,7 +225,7 @@ jobs:
- uses: ./.github/actions/install-risc0 - uses: ./.github/actions/install-risc0
- name: Install just - name: Install just
run: cargo install just run: cargo install --locked just
- name: Build artifacts - name: Build artifacts
run: just build-artifacts run: just build-artifacts

1674
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -36,7 +36,6 @@ members = [
"examples/program_deployment", "examples/program_deployment",
"examples/program_deployment/methods", "examples/program_deployment/methods",
"examples/program_deployment/methods/guest", "examples/program_deployment/methods/guest",
"bedrock_client",
"testnet_initial_state", "testnet_initial_state",
"indexer_ffi", "indexer_ffi",
] ]
@ -67,7 +66,6 @@ amm_program = { path = "programs/amm" }
ata_core = { path = "programs/associated_token_account/core" } ata_core = { path = "programs/associated_token_account/core" }
ata_program = { path = "programs/associated_token_account" } ata_program = { path = "programs/associated_token_account" }
test_program_methods = { path = "test_program_methods" } test_program_methods = { path = "test_program_methods" }
bedrock_client = { path = "bedrock_client" }
testnet_initial_state = { path = "testnet_initial_state" } testnet_initial_state = { path = "testnet_initial_state" }
tokio = { version = "1.50", features = [ tokio = { version = "1.50", features = [
@ -122,11 +120,12 @@ tokio-retry = "0.3.0"
schemars = "1.2" schemars = "1.2"
async-stream = "0.3.6" async-stream = "0.3.6"
logos-blockchain-common-http-client = { git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "1da154c74b911318fb853d37261f8a05ffe513b4" } logos-blockchain-common-http-client = { git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "ee281a447d95a951752461ee0a6e88eb4a0f17cf" }
logos-blockchain-key-management-system-service = { git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "1da154c74b911318fb853d37261f8a05ffe513b4" } logos-blockchain-key-management-system-service = { git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "ee281a447d95a951752461ee0a6e88eb4a0f17cf" }
logos-blockchain-core = { git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "1da154c74b911318fb853d37261f8a05ffe513b4" } logos-blockchain-core = { git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "ee281a447d95a951752461ee0a6e88eb4a0f17cf" }
logos-blockchain-chain-broadcast-service = { git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "1da154c74b911318fb853d37261f8a05ffe513b4" } logos-blockchain-chain-broadcast-service = { git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "ee281a447d95a951752461ee0a6e88eb4a0f17cf" }
logos-blockchain-chain-service = { git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "1da154c74b911318fb853d37261f8a05ffe513b4" } logos-blockchain-chain-service = { git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "ee281a447d95a951752461ee0a6e88eb4a0f17cf" }
logos-blockchain-zone-sdk = { git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "ee281a447d95a951752461ee0a6e88eb4a0f17cf" }
rocksdb = { version = "0.24.0", default-features = false, features = [ rocksdb = { version = "0.24.0", default-features = false, features = [
"snappy", "snappy",

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -39,42 +39,42 @@ cryptarchia:
threshold: 1 threshold: 1
timestamp: 0 timestamp: 0
gossipsub_protocol: /integration/logos-blockchain/cryptarchia/proto/1.0.0 gossipsub_protocol: /integration/logos-blockchain/cryptarchia/proto/1.0.0
genesis_state: genesis_block:
mantle_tx: header:
ops: version: Bedrock
parent_block: '0000000000000000000000000000000000000000000000000000000000000000'
slot: 0
block_root: b5f8787ac23674822414c70eea15d842da38f2e806ede1a73cf7b5cf0277da07
proof_of_leadership:
proof: '0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
entropy_contribution: '0000000000000000000000000000000000000000000000000000000000000000'
leader_key: '0000000000000000000000000000000000000000000000000000000000000000'
voucher_cm: '0000000000000000000000000000000000000000000000000000000000000000'
signature: '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
transactions:
- mantle_tx:
ops:
- opcode: 0 - opcode: 0
payload: payload:
inputs: [ ] inputs: []
outputs: outputs:
- value: 1 - value: 1
pk: d204000000000000000000000000000000000000000000000000000000000000 pk: d204000000000000000000000000000000000000000000000000000000000000
- value: 100 - value: 100
pk: 2e03b2eff5a45478e7e79668d2a146cf2c5c7925bce927f2b1c67f2ab4fc0d26 pk: '2e03b2eff5a45478e7e79668d2a146cf2c5c7925bce927f2b1c67f2ab4fc0d26'
- value: 1
pk: ed266e6e887b9b97059dc1aa1b7b2e19b934291753c6336a163fe4ebaa28e717
- opcode: 17 - opcode: 17
payload: payload:
channel_id: "0000000000000000000000000000000000000000000000000000000000000000" channel_id: '0000000000000000000000000000000000000000000000000000000000000000'
inscription: [ 103, 101, 110, 101, 115, 105, 115 ] # "genesis" in bytes inscription: '67656e65736973'
parent: "0000000000000000000000000000000000000000000000000000000000000000" parent: '0000000000000000000000000000000000000000000000000000000000000000'
signer: "0000000000000000000000000000000000000000000000000000000000000000" signer: '0000000000000000000000000000000000000000000000000000000000000000'
execution_gas_price: 0 execution_gas_price: 0
storage_gas_price: 0 storage_gas_price: 0
ops_proofs: ops_proofs:
- !ZkSig - !Ed25519Sig '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
pi_a: [ - !Ed25519Sig '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
]
pi_b: [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
]
pi_c: [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
]
- NoProof
time: time:
slot_duration: '1.0' slot_duration: '1.0'
chain_start_time: PLACEHOLDER_CHAIN_START_TIME chain_start_time: PLACEHOLDER_CHAIN_START_TIME

View File

@ -1,7 +1,7 @@
services: services:
logos-blockchain-node-0: logos-blockchain-node-0:
image: ghcr.io/logos-blockchain/logos-blockchain@sha256:c5243681b353278cabb562a176f0a5cfbefc2056f18cebc47fe0e3720c29fb12 image: ghcr.io/logos-blockchain/logos-blockchain@sha256:9f1829dea335c56f6ff68ae37ea872ed5313b96b69e8ffe143c02b7217de85fc
ports: ports:
- "${PORT:-8080}:18080/tcp" - "${PORT:-8080}:18080/tcp"
volumes: volumes:

View File

@ -1,23 +0,0 @@
[package]
name = "bedrock_client"
version = "0.1.0"
edition = "2024"
license = { workspace = true }
[lints]
workspace = true
[dependencies]
common.workspace = true
reqwest.workspace = true
anyhow.workspace = true
tokio-retry.workspace = true
futures.workspace = true
log.workspace = true
serde.workspace = true
humantime-serde.workspace = true
logos-blockchain-common-http-client.workspace = true
logos-blockchain-core.workspace = true
logos-blockchain-chain-broadcast-service.workspace = true
logos-blockchain-chain-service.workspace = true

View File

@ -1,121 +0,0 @@
use std::time::Duration;
use anyhow::{Context as _, Result};
use common::config::BasicAuth;
use futures::{Stream, TryFutureExt as _};
#[expect(clippy::single_component_path_imports, reason = "Satisfy machete")]
use humantime_serde;
use log::{info, warn};
pub use logos_blockchain_chain_broadcast_service::BlockInfo;
use logos_blockchain_chain_service::CryptarchiaInfo;
pub use logos_blockchain_common_http_client::{CommonHttpClient, Error};
pub use logos_blockchain_core::{block::Block, header::HeaderId, mantle::SignedMantleTx};
use reqwest::{Client, Url};
use serde::{Deserialize, Serialize};
use tokio_retry::Retry;
/// Fibonacci backoff retry strategy configuration.
#[derive(Debug, Copy, Clone, Serialize, Deserialize)]
pub struct BackoffConfig {
#[serde(with = "humantime_serde")]
pub start_delay: Duration,
pub max_retries: usize,
}
impl Default for BackoffConfig {
fn default() -> Self {
Self {
start_delay: Duration::from_millis(100),
max_retries: 5,
}
}
}
/// Simple wrapper
/// maybe extend in the future for our purposes
/// `Clone` is cheap because `CommonHttpClient` is internally reference counted (`Arc`).
#[derive(Clone)]
pub struct BedrockClient {
http_client: CommonHttpClient,
node_url: Url,
backoff: BackoffConfig,
}
impl BedrockClient {
pub fn new(backoff: BackoffConfig, node_url: Url, auth: Option<BasicAuth>) -> Result<Self> {
info!("Creating Bedrock client with node URL {node_url}");
let client = Client::builder()
//Add more fields if needed
.timeout(std::time::Duration::from_mins(1))
.build()
.context("Failed to build HTTP client")?;
let auth = auth.map(|a| {
logos_blockchain_common_http_client::BasicAuthCredentials::new(a.username, a.password)
});
let http_client = CommonHttpClient::new_with_client(client, auth);
Ok(Self {
http_client,
node_url,
backoff,
})
}
pub async fn post_transaction(&self, tx: SignedMantleTx) -> Result<Result<(), Error>, Error> {
Retry::spawn(self.backoff_strategy(), || async {
match self
.http_client
.post_transaction(self.node_url.clone(), tx.clone())
.await
{
Ok(()) => Ok(Ok(())),
Err(err) => match err {
// Retry arm.
// Retrying only reqwest errors: mainly connected to http.
Error::Request(_) => Err(err),
// Returning non-retryable error
Error::Server(_) | Error::Client(_) | Error::Url(_) => Ok(Err(err)),
},
}
})
.await
}
pub async fn get_lib_stream(&self) -> Result<impl Stream<Item = BlockInfo>, Error> {
self.http_client.get_lib_stream(self.node_url.clone()).await
}
pub async fn get_block_by_id(
&self,
header_id: HeaderId,
) -> Result<Option<Block<SignedMantleTx>>, Error> {
Retry::spawn(self.backoff_strategy(), || {
self.http_client
.get_block_by_id(self.node_url.clone(), header_id)
.inspect_err(|err| warn!("Block fetching failed with error: {err:#}"))
})
.await
}
pub async fn get_consensus_info(&self) -> Result<CryptarchiaInfo, Error> {
Retry::spawn(self.backoff_strategy(), || {
self.http_client
.consensus_info(self.node_url.clone())
.inspect_err(|err| warn!("Block fetching failed with error: {err:#}"))
})
.await
}
fn backoff_strategy(&self) -> impl Iterator<Item = Duration> {
let start_delay_millis = self
.backoff
.start_delay
.as_millis()
.try_into()
.expect("Start delay must be less than u64::MAX milliseconds");
tokio_retry::strategy::FibonacciBackoff::from_millis(start_delay_millis)
.take(self.backoff.max_retries)
}
}

View File

@ -1,12 +1,8 @@
{ {
"home": "./indexer/service", "home": "./indexer/service",
"consensus_info_polling_interval": "1s", "consensus_info_polling_interval": "1s",
"bedrock_client_config": { "bedrock_config": {
"addr": "http://logos-blockchain-node-0:18080", "addr": "http://logos-blockchain-node-0:18080"
"backoff": {
"start_delay": "100ms",
"max_retries": 5
}
}, },
"channel_id": "0101010101010101010101010101010101010101010101010101010101010101", "channel_id": "0101010101010101010101010101010101010101010101010101010101010101",
"initial_accounts": [ "initial_accounts": [

View File

@ -9,7 +9,7 @@ workspace = true
[dependencies] [dependencies]
common.workspace = true common.workspace = true
bedrock_client.workspace = true logos-blockchain-zone-sdk.workspace = true
nssa.workspace = true nssa.workspace = true
nssa_core.workspace = true nssa_core.workspace = true
storage.workspace = true storage.workspace = true
@ -19,13 +19,13 @@ anyhow.workspace = true
log.workspace = true log.workspace = true
serde.workspace = true serde.workspace = true
humantime-serde.workspace = true humantime-serde.workspace = true
tokio.workspace = true
borsh.workspace = true borsh.workspace = true
futures.workspace = true futures.workspace = true
url.workspace = true url.workspace = true
logos-blockchain-core.workspace = true logos-blockchain-core.workspace = true
serde_json.workspace = true serde_json.workspace = true
async-stream.workspace = true async-stream.workspace = true
tokio.workspace = true
[dev-dependencies] [dev-dependencies]
tempfile.workspace = true tempfile.workspace = true

View File

@ -1,11 +1,12 @@
use std::{path::Path, sync::Arc}; use std::{path::Path, sync::Arc};
use anyhow::Result; use anyhow::{Context as _, Result};
use bedrock_client::HeaderId;
use common::{ use common::{
block::{BedrockStatus, Block}, block::{BedrockStatus, Block},
transaction::{NSSATransaction, clock_invocation}, transaction::{NSSATransaction, clock_invocation},
}; };
use logos_blockchain_core::{header::HeaderId, mantle::ops::channel::MsgId};
use logos_blockchain_zone_sdk::Slot;
use nssa::{Account, AccountId, V03State}; use nssa::{Account, AccountId, V03State};
use nssa_core::BlockId; use nssa_core::BlockId;
use storage::indexer::RocksDBIO; use storage::indexer::RocksDBIO;
@ -103,6 +104,22 @@ impl IndexerStore {
Ok(self.dbio.calculate_state_for_id(block_id)?) Ok(self.dbio.calculate_state_for_id(block_id)?)
} }
pub fn get_zone_cursor(&self) -> Result<Option<(MsgId, Slot)>> {
let Some(bytes) = self.dbio.get_zone_sdk_indexer_cursor_bytes()? else {
return Ok(None);
};
let cursor: (MsgId, Slot) = serde_json::from_slice(&bytes)
.context("Failed to deserialize stored zone-sdk indexer cursor")?;
Ok(Some(cursor))
}
pub fn set_zone_cursor(&self, cursor: &(MsgId, Slot)) -> Result<()> {
let bytes =
serde_json::to_vec(cursor).context("Failed to serialize zone-sdk indexer cursor")?;
self.dbio.put_zone_sdk_indexer_cursor_bytes(&bytes)?;
Ok(())
}
/// Recalculation of final state directly from DB. /// Recalculation of final state directly from DB.
/// ///
/// Used for indexer healthcheck. /// Used for indexer healthcheck.

View File

@ -6,7 +6,6 @@ use std::{
}; };
use anyhow::{Context as _, Result}; use anyhow::{Context as _, Result};
pub use bedrock_client::BackoffConfig;
use common::config::BasicAuth; use common::config::BasicAuth;
use humantime_serde; use humantime_serde;
pub use logos_blockchain_core::mantle::ops::channel::ChannelId; pub use logos_blockchain_core::mantle::ops::channel::ChannelId;
@ -16,8 +15,6 @@ use url::Url;
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ClientConfig { pub struct ClientConfig {
/// For individual RPC requests we use Fibonacci backoff retry strategy.
pub backoff: BackoffConfig,
pub addr: Url, pub addr: Url,
#[serde(default, skip_serializing_if = "Option::is_none")] #[serde(default, skip_serializing_if = "Option::is_none")]
pub auth: Option<BasicAuth>, pub auth: Option<BasicAuth>,
@ -31,7 +28,7 @@ pub struct IndexerConfig {
pub signing_key: [u8; 32], pub signing_key: [u8; 32],
#[serde(with = "humantime_serde")] #[serde(with = "humantime_serde")]
pub consensus_info_polling_interval: Duration, pub consensus_info_polling_interval: Duration,
pub bedrock_client_config: ClientConfig, pub bedrock_config: ClientConfig,
pub channel_id: ChannelId, pub channel_id: ChannelId,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub initial_public_accounts: Option<Vec<PublicAccountPublicInitialData>>, pub initial_public_accounts: Option<Vec<PublicAccountPublicInitialData>>,

View File

@ -1,15 +1,14 @@
use std::collections::VecDeque; use std::sync::Arc;
use anyhow::Result; use anyhow::Result;
use bedrock_client::{BedrockClient, HeaderId}; use common::block::{Block, HashableBlockData};
use common::{ // ToDo: Remove after testnet
HashType, PINATA_BASE58, use common::{HashType, PINATA_BASE58};
block::{Block, HashableBlockData}, use futures::StreamExt as _;
}; use log::{error, info, warn};
use log::{debug, error, info}; use logos_blockchain_core::header::HeaderId;
use logos_blockchain_core::mantle::{ use logos_blockchain_zone_sdk::{
Op, SignedMantleTx, CommonHttpClient, ZoneMessage, adapter::NodeHttpClient, indexer::ZoneIndexer,
ops::channel::{ChannelId, inscribe::InscriptionOp},
}; };
use nssa::V03State; use nssa::V03State;
use testnet_initial_state::initial_state_testnet; use testnet_initial_state::initial_state_testnet;
@ -21,25 +20,11 @@ pub mod config;
#[derive(Clone)] #[derive(Clone)]
pub struct IndexerCore { pub struct IndexerCore {
pub bedrock_client: BedrockClient, pub zone_indexer: Arc<ZoneIndexer<NodeHttpClient>>,
pub config: IndexerConfig, pub config: IndexerConfig,
pub store: IndexerStore, pub store: IndexerStore,
} }
#[derive(Clone)]
/// This struct represents one L1 block data fetched from backfilling.
pub struct BackfillBlockData {
l2_blocks: Vec<Block>,
l1_header: HeaderId,
}
#[derive(Clone)]
/// This struct represents data fetched fom backfilling in one iteration.
pub struct BackfillData {
block_data: VecDeque<BackfillBlockData>,
curr_fin_l1_lib_header: HeaderId,
}
impl IndexerCore { impl IndexerCore {
pub fn new(config: IndexerConfig) -> Result<Self> { pub fn new(config: IndexerConfig) -> Result<Self> {
let hashable_data = HashableBlockData { let hashable_data = HashableBlockData {
@ -107,279 +92,88 @@ impl IndexerCore {
let home = config.home.join("rocksdb"); let home = config.home.join("rocksdb");
let basic_auth = config.bedrock_config.auth.clone().map(Into::into);
let node = NodeHttpClient::new(
CommonHttpClient::new(basic_auth),
config.bedrock_config.addr.clone(),
);
let zone_indexer = ZoneIndexer::new(config.channel_id, node);
Ok(Self { Ok(Self {
bedrock_client: BedrockClient::new( zone_indexer: Arc::new(zone_indexer),
config.bedrock_client_config.backoff,
config.bedrock_client_config.addr.clone(),
config.bedrock_client_config.auth.clone(),
)?,
config, config,
store: IndexerStore::open_db_with_genesis(&home, &genesis_block, &state)?, store: IndexerStore::open_db_with_genesis(&home, &genesis_block, &state)?,
}) })
} }
pub fn subscribe_parse_block_stream(&self) -> impl futures::Stream<Item = Result<Block>> { pub fn subscribe_parse_block_stream(&self) -> impl futures::Stream<Item = Result<Block>> + '_ {
let poll_interval = self.config.consensus_info_polling_interval;
let initial_cursor = self
.store
.get_zone_cursor()
.expect("Failed to load zone-sdk indexer cursor");
async_stream::stream! { async_stream::stream! {
info!("Searching for initial header"); let mut cursor = initial_cursor;
let last_stored_l1_lib_header = self.store.last_observed_l1_lib_header()?; if cursor.is_some() {
info!("Resuming indexer from cursor {cursor:?}");
let mut prev_last_l1_lib_header = if let Some(last_l1_lib_header) = last_stored_l1_lib_header {
info!("Last l1 lib header found: {last_l1_lib_header}");
last_l1_lib_header
} else { } else {
info!("Last l1 lib header not found in DB"); info!("Starting indexer from beginning of channel");
info!("Searching for the start of a channel"); }
let BackfillData {
block_data: start_buff,
curr_fin_l1_lib_header: last_l1_lib_header,
} = self.search_for_channel_start().await?;
for BackfillBlockData {
l2_blocks: l2_block_vec,
l1_header,
} in start_buff {
let mut l2_blocks_parsed_ids: Vec<_> = l2_block_vec.iter().map(|block| block.header.block_id).collect();
l2_blocks_parsed_ids.sort_unstable();
info!("Parsed {} L2 blocks with ids {:?}", l2_block_vec.len(), l2_blocks_parsed_ids);
for l2_block in l2_block_vec {
// TODO: proper fix is to make the sequencer's genesis include a
// trailing `clock_invocation(0)` (and have the indexer's
// `open_db_with_genesis` not pre-apply state transitions) so the
// inscribed genesis can flow through `put_block` like any other
// block. For now we skip re-applying it.
//
// The channel-start (block_id == 1) is the sequencer's genesis
// inscription that we re-discover during initial search. The
// indexer already has its own locally-constructed genesis in
// the store from `open_db_with_genesis`, so re-applying the
// inscribed copy is both redundant and would fail the strict
// block validation in `put_block` (the inscribed genesis lacks
// the trailing clock invocation).
if l2_block.header.block_id != 1 {
self
.store
.put_block(l2_block.clone(), l1_header)
.await
.inspect_err(|err| error!("Failed to put block with err {err:?}"))?;
}
yield Ok(l2_block);
}
}
last_l1_lib_header
};
info!("Searching for initial header finished");
info!("Starting backfilling from {prev_last_l1_lib_header}");
loop { loop {
let BackfillData { let stream = match self.zone_indexer.next_messages(cursor).await {
block_data: buff, Ok(s) => s,
curr_fin_l1_lib_header, Err(err) => {
} = self error!("Failed to start zone-sdk next_messages stream: {err}");
.backfill_to_last_l1_lib_header_id(prev_last_l1_lib_header, &self.config.channel_id) tokio::time::sleep(poll_interval).await;
.await continue;
.inspect_err(|err| error!("Failed to backfill to last l1 lib header id with err {err:#?}"))?;
prev_last_l1_lib_header = curr_fin_l1_lib_header;
for BackfillBlockData {
l2_blocks: l2_block_vec,
l1_header: header,
} in buff {
let mut l2_blocks_parsed_ids: Vec<_> = l2_block_vec.iter().map(|block| block.header.block_id).collect();
l2_blocks_parsed_ids.sort_unstable();
info!("Parsed {} L2 blocks with ids {:?}", l2_block_vec.len(), l2_blocks_parsed_ids);
for l2_block in l2_block_vec {
self.store.put_block(l2_block.clone(), header).await?;
yield Ok(l2_block);
} }
}
}
}
}
async fn get_lib(&self) -> Result<HeaderId> {
Ok(self.bedrock_client.get_consensus_info().await?.lib)
}
async fn get_next_lib(&self, prev_lib: HeaderId) -> Result<HeaderId> {
loop {
let next_lib = self.get_lib().await?;
if next_lib == prev_lib {
info!(
"Wait {:?} to not spam the node",
self.config.consensus_info_polling_interval
);
tokio::time::sleep(self.config.consensus_info_polling_interval).await;
} else {
break Ok(next_lib);
}
}
}
/// WARNING: depending on channel state,
/// may take indefinite amount of time.
pub async fn search_for_channel_start(&self) -> Result<BackfillData> {
let mut curr_last_l1_lib_header = self.get_lib().await?;
let mut backfill_start = curr_last_l1_lib_header;
// ToDo: How to get root?
let mut backfill_limit = HeaderId::from([0; 32]);
// ToDo: Not scalable, initial buffer should be stored in DB to not run out of memory
// Don't want to complicate DB even more right now.
let mut block_buffer = VecDeque::new();
'outer: loop {
let mut cycle_header = curr_last_l1_lib_header;
loop {
let Some(cycle_block) = self.bedrock_client.get_block_by_id(cycle_header).await?
else {
// First run can reach root easily
// so here we are optimistic about L1
// failing to get parent.
break;
}; };
let mut stream = std::pin::pin!(stream);
// It would be better to have id, but block does not have it, so slot will do. while let Some((msg, slot)) = stream.next().await {
info!( let zone_block = match msg {
"INITIAL SEARCH: Observed L1 block at slot {}", ZoneMessage::Block(b) => b,
cycle_block.header().slot().into_inner() // Non-block messages don't carry a cursor position; the
); // next ZoneBlock advances past them implicitly.
debug!( ZoneMessage::Deposit(_) | ZoneMessage::Withdraw(_) => continue,
"INITIAL SEARCH: This block header is {}", };
cycle_block.header().id()
);
debug!(
"INITIAL SEARCH: This block parent is {}",
cycle_block.header().parent()
);
let (l2_block_vec, l1_header) = let block: Block = match borsh::from_slice(&zone_block.data) {
parse_block_owned(&cycle_block, &self.config.channel_id); Ok(b) => b,
Err(e) => {
error!("Failed to deserialize L2 block from zone-sdk: {e}");
// Advance past the broken inscription so we don't
// re-process it on restart.
cursor = Some((zone_block.id, slot));
if let Err(err) = self.store.set_zone_cursor(&(zone_block.id, slot)) {
warn!("Failed to persist indexer cursor: {err:#}");
}
continue;
}
};
info!("Parsed {} L2 blocks", l2_block_vec.len()); info!("Indexed L2 block {}", block.header.block_id);
if !l2_block_vec.is_empty() { // TODO: Remove l1_header placeholder once storage layer
block_buffer.push_front(BackfillBlockData { // no longer requires it. Zone-sdk handles L1 tracking internally.
l2_blocks: l2_block_vec.clone(), let placeholder_l1_header = HeaderId::from([0_u8; 32]);
l1_header, if let Err(err) = self.store.put_block(block.clone(), placeholder_l1_header).await {
}); error!("Failed to store block {}: {err:#}", block.header.block_id);
}
if let Some(first_l2_block) = l2_block_vec.first()
&& first_l2_block.header.block_id == 1
{
info!("INITIAL_SEARCH: Found channel start");
break 'outer;
}
// Step back to parent
let parent = cycle_block.header().parent();
if parent == backfill_limit {
break;
}
cycle_header = parent;
}
info!("INITIAL_SEARCH: Reached backfill limit, refetching last l1 lib header");
block_buffer.clear();
backfill_limit = backfill_start;
curr_last_l1_lib_header = self.get_next_lib(curr_last_l1_lib_header).await?;
backfill_start = curr_last_l1_lib_header;
}
Ok(BackfillData {
block_data: block_buffer,
curr_fin_l1_lib_header: curr_last_l1_lib_header,
})
}
pub async fn backfill_to_last_l1_lib_header_id(
&self,
last_fin_l1_lib_header: HeaderId,
channel_id: &ChannelId,
) -> Result<BackfillData> {
let curr_fin_l1_lib_header = self.get_next_lib(last_fin_l1_lib_header).await?;
// ToDo: Not scalable, buffer should be stored in DB to not run out of memory
// Don't want to complicate DB even more right now.
let mut block_buffer = VecDeque::new();
let mut cycle_header = curr_fin_l1_lib_header;
loop {
let Some(cycle_block) = self.bedrock_client.get_block_by_id(cycle_header).await? else {
return Err(anyhow::anyhow!("Parent not found"));
};
if cycle_block.header().id() == last_fin_l1_lib_header {
break;
}
// Step back to parent
cycle_header = cycle_block.header().parent();
// It would be better to have id, but block does not have it, so slot will do.
info!(
"Observed L1 block at slot {}",
cycle_block.header().slot().into_inner()
);
let (l2_block_vec, l1_header) = parse_block_owned(&cycle_block, channel_id);
info!("Parsed {} L2 blocks", l2_block_vec.len());
if !l2_block_vec.is_empty() {
block_buffer.push_front(BackfillBlockData {
l2_blocks: l2_block_vec,
l1_header,
});
}
}
Ok(BackfillData {
block_data: block_buffer,
curr_fin_l1_lib_header,
})
}
}
fn parse_block_owned(
l1_block: &bedrock_client::Block<SignedMantleTx>,
decoded_channel_id: &ChannelId,
) -> (Vec<Block>, HeaderId) {
(
#[expect(
clippy::wildcard_enum_match_arm,
reason = "We are only interested in channel inscription ops, so it's fine to ignore the rest"
)]
l1_block
.transactions()
.flat_map(|tx| {
tx.mantle_tx.ops.iter().filter_map(|op| match op {
Op::ChannelInscribe(InscriptionOp {
channel_id,
inscription,
..
}) if channel_id == decoded_channel_id => {
borsh::from_slice::<Block>(inscription)
.inspect_err(|err| {
error!("Failed to deserialize our inscription with err: {err:#?}");
})
.ok()
} }
_ => None,
}) cursor = Some((zone_block.id, slot));
}) if let Err(err) = self.store.set_zone_cursor(&(zone_block.id, slot)) {
.collect(), warn!("Failed to persist indexer cursor: {err:#}");
l1_block.header().id(), }
) yield Ok(block);
}
// Stream ended (caught up to LIB). Sleep then poll again.
tokio::time::sleep(poll_interval).await;
}
}
}
} }

View File

@ -1,12 +1,8 @@
{ {
"home": ".", "home": ".",
"consensus_info_polling_interval": "1s", "consensus_info_polling_interval": "1s",
"bedrock_client_config": { "bedrock_config": {
"addr": "http://localhost:8080", "addr": "http://localhost:8080"
"backoff": {
"start_delay": "100ms",
"max_retries": 5
}
}, },
"channel_id": "0101010101010101010101010101010101010101010101010101010101010101", "channel_id": "0101010101010101010101010101010101010101010101010101010101010101",
"initial_accounts": [ "initial_accounts": [

View File

@ -19,8 +19,9 @@ indexer_service.workspace = true
serde_json.workspace = true serde_json.workspace = true
token_core.workspace = true token_core.workspace = true
ata_core.workspace = true ata_core.workspace = true
indexer_service_rpc.workspace = true indexer_service_rpc = { workspace = true, features = ["client"] }
sequencer_service_rpc = { workspace = true, features = ["client"] } sequencer_service_rpc = { workspace = true, features = ["client"] }
jsonrpsee = { workspace = true, features = ["ws-client"] }
wallet-ffi.workspace = true wallet-ffi.workspace = true
indexer_ffi.workspace = true indexer_ffi.workspace = true
testnet_initial_state.workspace = true testnet_initial_state.workspace = true
@ -35,4 +36,4 @@ hex.workspace = true
tempfile.workspace = true tempfile.workspace = true
bytesize.workspace = true bytesize.workspace = true
futures.workspace = true futures.workspace = true
testcontainers = { version = "0.27.0", features = ["docker-compose"] } testcontainers = { version = "0.27.3", features = ["docker-compose"] }

View File

@ -2,7 +2,7 @@ use std::{net::SocketAddr, path::PathBuf, time::Duration};
use anyhow::{Context as _, Result}; use anyhow::{Context as _, Result};
use bytesize::ByteSize; use bytesize::ByteSize;
use indexer_service::{BackoffConfig, ChannelId, ClientConfig, IndexerConfig}; use indexer_service::{ChannelId, ClientConfig, IndexerConfig};
use key_protocol::key_management::KeyChain; use key_protocol::key_management::KeyChain;
use nssa::{Account, AccountId, PrivateKey, PublicKey}; use nssa::{Account, AccountId, PrivateKey, PublicKey};
use nssa_core::{account::Data, program::DEFAULT_PROGRAM_ID}; use nssa_core::{account::Data, program::DEFAULT_PROGRAM_ID};
@ -164,35 +164,10 @@ impl std::fmt::Display for UrlProtocol {
} }
} }
pub fn indexer_config(
bedrock_addr: SocketAddr,
home: PathBuf,
initial_data: &InitialData,
) -> Result<IndexerConfig> {
Ok(IndexerConfig {
home,
consensus_info_polling_interval: Duration::from_secs(1),
bedrock_client_config: ClientConfig {
addr: addr_to_url(UrlProtocol::Http, bedrock_addr)
.context("Failed to convert bedrock addr to URL")?,
auth: None,
backoff: BackoffConfig {
start_delay: Duration::from_millis(100),
max_retries: 10,
},
},
initial_public_accounts: Some(initial_data.sequencer_initial_public_accounts()),
initial_private_accounts: Some(initial_data.sequencer_initial_private_accounts()),
signing_key: [37; 32],
channel_id: bedrock_channel_id(),
})
}
pub fn sequencer_config( pub fn sequencer_config(
partial: SequencerPartialConfig, partial: SequencerPartialConfig,
home: PathBuf, home: PathBuf,
bedrock_addr: SocketAddr, bedrock_addr: SocketAddr,
indexer_addr: SocketAddr,
initial_data: &InitialData, initial_data: &InitialData,
) -> Result<SequencerConfig> { ) -> Result<SequencerConfig> {
let SequencerPartialConfig { let SequencerPartialConfig {
@ -215,17 +190,11 @@ pub fn sequencer_config(
initial_private_accounts: Some(initial_data.sequencer_initial_private_accounts()), initial_private_accounts: Some(initial_data.sequencer_initial_private_accounts()),
signing_key: [37; 32], signing_key: [37; 32],
bedrock_config: BedrockConfig { bedrock_config: BedrockConfig {
backoff: BackoffConfig {
start_delay: Duration::from_millis(100),
max_retries: 5,
},
channel_id: bedrock_channel_id(), channel_id: bedrock_channel_id(),
node_url: addr_to_url(UrlProtocol::Http, bedrock_addr) node_url: addr_to_url(UrlProtocol::Http, bedrock_addr)
.context("Failed to convert bedrock addr to URL")?, .context("Failed to convert bedrock addr to URL")?,
auth: None, auth: None,
}, },
indexer_rpc_url: addr_to_url(UrlProtocol::Ws, indexer_addr)
.context("Failed to convert indexer addr to URL")?,
}) })
} }
@ -245,6 +214,26 @@ pub fn wallet_config(
}) })
} }
pub fn indexer_config(
bedrock_addr: SocketAddr,
home: PathBuf,
initial_data: &InitialData,
) -> Result<IndexerConfig> {
Ok(IndexerConfig {
home,
consensus_info_polling_interval: Duration::from_secs(1),
bedrock_config: ClientConfig {
addr: addr_to_url(UrlProtocol::Http, bedrock_addr)
.context("Failed to convert bedrock addr to URL")?,
auth: None,
},
initial_public_accounts: Some(initial_data.sequencer_initial_public_accounts()),
initial_private_accounts: Some(initial_data.sequencer_initial_private_accounts()),
signing_key: [37; 32],
channel_id: bedrock_channel_id(),
})
}
pub fn addr_to_url(protocol: UrlProtocol, addr: SocketAddr) -> Result<Url> { pub fn addr_to_url(protocol: UrlProtocol, addr: SocketAddr) -> Result<Url> {
// Convert 0.0.0.0 to 127.0.0.1 for client connections // Convert 0.0.0.0 to 127.0.0.1 for client connections
// When binding to port 0, the server binds to 0.0.0.0:<random_port> // When binding to port 0, the server binds to 0.0.0.0:<random_port>

View File

@ -0,0 +1,34 @@
//! Thin client wrapper for querying the indexer's JSON-RPC API in tests.
//!
//! The sequencer doesn't depend on the indexer at runtime — finalization comes
//! from zone-sdk events. This wrapper exists purely for test ergonomics so
//! integration tests can construct a single connection and call
//! `indexer_service_rpc::RpcClient` methods directly via `Deref`.
use std::ops::Deref;
use anyhow::{Context as _, Result};
use jsonrpsee::ws_client::{WsClient, WsClientBuilder};
use log::info;
use url::Url;
pub struct IndexerClient(WsClient);
impl IndexerClient {
pub async fn new(indexer_url: &Url) -> Result<Self> {
info!("Connecting to Indexer at {indexer_url}");
let client = WsClientBuilder::default()
.build(indexer_url)
.await
.context("Failed to create websocket client")?;
Ok(Self(client))
}
}
impl Deref for IndexerClient {
type Target = WsClient;
fn deref(&self) -> &Self::Target {
&self.0
}
}

View File

@ -9,16 +9,19 @@ use indexer_service::IndexerHandle;
use log::{debug, error}; use log::{debug, error};
use nssa::{AccountId, PrivacyPreservingTransaction}; use nssa::{AccountId, PrivacyPreservingTransaction};
use nssa_core::Commitment; use nssa_core::Commitment;
use sequencer_core::indexer_client::{IndexerClient, IndexerClientTrait as _};
use sequencer_service::SequencerHandle; use sequencer_service::SequencerHandle;
use sequencer_service_rpc::{RpcClient as _, SequencerClient, SequencerClientBuilder}; use sequencer_service_rpc::{RpcClient as _, SequencerClient, SequencerClientBuilder};
use tempfile::TempDir; use tempfile::TempDir;
use testcontainers::compose::DockerCompose; use testcontainers::compose::DockerCompose;
use wallet::WalletCore; use wallet::WalletCore;
use crate::setup::{setup_bedrock_node, setup_indexer, setup_sequencer, setup_wallet}; use crate::{
indexer_client::IndexerClient,
setup::{setup_bedrock_node, setup_indexer, setup_sequencer, setup_wallet},
};
pub mod config; pub mod config;
pub mod indexer_client;
pub mod setup; pub mod setup;
pub mod test_context_ffi; pub mod test_context_ffi;
@ -78,14 +81,10 @@ impl TestContext {
.await .await
.context("Failed to setup Indexer")?; .context("Failed to setup Indexer")?;
let (sequencer_handle, temp_sequencer_dir) = setup_sequencer( let (sequencer_handle, temp_sequencer_dir) =
sequencer_partial_config, setup_sequencer(sequencer_partial_config, bedrock_addr, &initial_data)
bedrock_addr, .await
indexer_handle.addr(), .context("Failed to setup Sequencer")?;
&initial_data,
)
.await
.context("Failed to setup Sequencer")?;
let (wallet, temp_wallet_dir, wallet_password) = let (wallet, temp_wallet_dir, wallet_password) =
setup_wallet(sequencer_handle.addr(), &initial_data) setup_wallet(sequencer_handle.addr(), &initial_data)

View File

@ -119,7 +119,6 @@ pub(crate) async fn setup_indexer(
pub(crate) async fn setup_sequencer( pub(crate) async fn setup_sequencer(
partial: config::SequencerPartialConfig, partial: config::SequencerPartialConfig,
bedrock_addr: SocketAddr, bedrock_addr: SocketAddr,
indexer_addr: SocketAddr,
initial_data: &config::InitialData, initial_data: &config::InitialData,
) -> Result<(SequencerHandle, TempDir)> { ) -> Result<(SequencerHandle, TempDir)> {
let temp_sequencer_dir = let temp_sequencer_dir =
@ -134,7 +133,6 @@ pub(crate) async fn setup_sequencer(
partial, partial,
temp_sequencer_dir.path().to_owned(), temp_sequencer_dir.path().to_owned(),
bedrock_addr, bedrock_addr,
indexer_addr,
initial_data, initial_data,
) )
.context("Failed to create Sequencer config")?; .context("Failed to create Sequencer config")?;

View File

@ -6,7 +6,6 @@ use indexer_ffi::IndexerServiceFFI;
use indexer_service_rpc::RpcClient as _; use indexer_service_rpc::RpcClient as _;
use log::{debug, error}; use log::{debug, error};
use nssa::AccountId; use nssa::AccountId;
use sequencer_core::indexer_client::{IndexerClient, IndexerClientTrait as _};
use sequencer_service::SequencerHandle; use sequencer_service::SequencerHandle;
use sequencer_service_rpc::{RpcClient as _, SequencerClient, SequencerClientBuilder}; use sequencer_service_rpc::{RpcClient as _, SequencerClient, SequencerClientBuilder};
use tempfile::TempDir; use tempfile::TempDir;
@ -15,6 +14,7 @@ use wallet::WalletCore;
use crate::{ use crate::{
BEDROCK_SERVICE_WITH_OPEN_PORT, LOGGER, TestContextBuilder, config, BEDROCK_SERVICE_WITH_OPEN_PORT, LOGGER, TestContextBuilder, config,
indexer_client::IndexerClient,
setup::{setup_bedrock_node, setup_indexer_ffi, setup_sequencer, setup_wallet}, setup::{setup_bedrock_node, setup_indexer_ffi, setup_sequencer, setup_wallet},
}; };
@ -85,8 +85,6 @@ impl TestContextFFI {
.block_on(setup_sequencer( .block_on(setup_sequencer(
sequencer_partial_config, sequencer_partial_config,
bedrock_addr, bedrock_addr,
// SAFETY: addr is valid if indexer_ffi is valid.
unsafe { indexer_ffi.addr() },
initial_data, initial_data,
)) ))
.context("Failed to setup Sequencer")?; .context("Failed to setup Sequencer")?;

View File

@ -27,7 +27,7 @@ use nssa::{
public_transaction as putx, public_transaction as putx,
}; };
use nssa_core::{ use nssa_core::{
MembershipProof, NullifierPublicKey, InputAccountIdentity, MembershipProof, NullifierPublicKey,
account::{AccountWithMetadata, Nonce, data::Data}, account::{AccountWithMetadata, Nonce, data::Data},
encryption::ViewingPublicKey, encryption::ViewingPublicKey,
}; };
@ -251,10 +251,19 @@ fn build_privacy_transaction() -> PrivacyPreservingTransaction {
let (output, proof) = circuit::execute_and_prove( let (output, proof) = circuit::execute_and_prove(
vec![sender_pre, recipient_pre], vec![sender_pre, recipient_pre],
Program::serialize_instruction(balance_to_move).unwrap(), Program::serialize_instruction(balance_to_move).unwrap(),
vec![1, 2], vec![
vec![(sender_npk, 0, sender_ss), (recipient_npk, 0, recipient_ss)], InputAccountIdentity::PrivateAuthorizedUpdate {
vec![sender_nsk], ssk: sender_ss,
vec![Some(proof)], nsk: sender_nsk,
membership_proof: proof,
identifier: 0,
},
InputAccountIdentity::PrivateUnauthorized {
npk: recipient_npk,
ssk: recipient_ss,
identifier: 0,
},
],
&program.into(), &program.into(),
) )
.unwrap(); .unwrap();

View File

@ -26,3 +26,4 @@ itertools.workspace = true
[dev-dependencies] [dev-dependencies]
base58.workspace = true base58.workspace = true
bincode.workspace = true

View File

@ -0,0 +1,505 @@
use aes_gcm::{Aes256Gcm, KeyInit as _, aead::Aead as _};
use nssa_core::{
SharedSecretKey,
encryption::{Scalar, shared_key_derivation::Secp256k1Point},
program::PdaSeed,
};
use rand::{RngCore as _, rngs::OsRng};
use serde::{Deserialize, Serialize};
use sha2::{Digest as _, digest::FixedOutput as _};
use super::secret_holders::{PrivateKeyHolder, SecretSpendingKey};
/// Public key used to seal a `GroupKeyHolder` for distribution to a recipient.
///
/// Structurally identical to `ViewingPublicKey` (both are secp256k1 points), but given
/// a distinct alias to clarify intent: viewing keys encrypt account state, sealing keys
/// encrypt the GMS for off-chain distribution.
pub type SealingPublicKey = Secp256k1Point;
/// Secret key used to unseal a `GroupKeyHolder` received from another member.
pub type SealingSecretKey = Scalar;
/// Manages shared viewing keys for a group of controllers owning private PDAs.
///
/// The Group Master Secret (GMS) is a 32-byte random value shared among controllers.
/// Each private PDA owned by the group gets a unique [`SecretSpendingKey`] derived from
/// the GMS by mixing the PDA seed into the SHA-256 input (see `secret_spending_key_for_pda`).
///
/// # Distribution
///
/// The GMS is a long-term secret and must never cross a trust boundary in raw form.
/// Controllers share it off-chain by sealing it under each recipient's [`SealingPublicKey`]
/// (see `seal_for` / `unseal`). Wallets persisting a `GroupKeyHolder` must encrypt it at
/// rest; the raw bytes are exposed only via [`GroupKeyHolder::dangerous_raw_gms`], which
/// is intended for the sealing path exclusively.
///
/// # Logging safety
///
/// `Debug` is implemented manually to redact the GMS; formatting this value with `{:?}`
/// will not leak the secret. Code that formats through `{:#?}` on containing types is
/// safe for the same reason.
#[derive(Serialize, Deserialize, Clone)]
pub struct GroupKeyHolder {
gms: [u8; 32],
}
impl std::fmt::Debug for GroupKeyHolder {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("GroupKeyHolder")
.field("gms", &"<redacted>")
.finish()
}
}
impl Default for GroupKeyHolder {
fn default() -> Self {
Self::new()
}
}
impl GroupKeyHolder {
/// Create a new group with a fresh random GMS.
#[must_use]
pub fn new() -> Self {
let mut gms = [0_u8; 32];
OsRng.fill_bytes(&mut gms);
Self { gms }
}
/// Restore from an existing GMS (received via `unseal`).
#[must_use]
pub const fn from_gms(gms: [u8; 32]) -> Self {
Self { gms }
}
/// Returns the raw 32-byte GMS. The name reflects intent: only the sealed-distribution
/// path (`seal_for`) and sealed-at-rest persistence should ever need the raw bytes. Do
/// not log the result, do not pass it across an untrusted channel.
#[must_use]
pub const fn dangerous_raw_gms(&self) -> &[u8; 32] {
&self.gms
}
/// Derive a per-PDA [`SecretSpendingKey`] by mixing the seed into the SHA-256 input.
///
/// Each distinct `pda_seed` produces a distinct SSK in the full 256-bit space, so
/// adversarial seed-grinding cannot collide two PDAs' derived keys under the same
/// group. Uses the codebase's 32-byte protocol-versioned domain-separation convention.
fn secret_spending_key_for_pda(&self, pda_seed: &PdaSeed) -> SecretSpendingKey {
const PREFIX: &[u8; 32] = b"/LEE/v0.3/GroupKeyDerivation/SSK";
let mut hasher = sha2::Sha256::new();
hasher.update(PREFIX);
hasher.update(self.gms);
hasher.update(pda_seed.as_ref());
SecretSpendingKey(hasher.finalize_fixed().into())
}
/// Derive keys for a specific PDA.
///
/// All controllers holding the same GMS independently derive the same keys for the
/// same PDA because the derivation is deterministic in (GMS, seed).
#[must_use]
pub fn derive_keys_for_pda(&self, pda_seed: &PdaSeed) -> PrivateKeyHolder {
self.secret_spending_key_for_pda(pda_seed)
.produce_private_key_holder(None)
}
/// Encrypts this holder's GMS under the recipient's [`SealingPublicKey`].
///
/// Uses an ephemeral ECDH key exchange to derive a shared secret, then AES-256-GCM
/// to encrypt the payload. The returned bytes are
/// `ephemeral_pubkey (33) || nonce (12) || ciphertext+tag (48)` = 93 bytes.
///
/// Each call generates a fresh ephemeral key, so two seals of the same holder produce
/// different ciphertexts.
#[must_use]
pub fn seal_for(&self, recipient_key: &SealingPublicKey) -> Vec<u8> {
let mut ephemeral_scalar: Scalar = [0_u8; 32];
OsRng.fill_bytes(&mut ephemeral_scalar);
let ephemeral_pubkey = Secp256k1Point::from_scalar(ephemeral_scalar);
let shared = SharedSecretKey::new(&ephemeral_scalar, recipient_key);
let aes_key = Self::seal_kdf(&shared);
let cipher = Aes256Gcm::new(&aes_key.into());
let mut nonce_bytes = [0_u8; 12];
OsRng.fill_bytes(&mut nonce_bytes);
let nonce = aes_gcm::Nonce::from(nonce_bytes);
let ciphertext = cipher
.encrypt(&nonce, self.gms.as_ref())
.expect("AES-GCM encryption should not fail with valid key/nonce");
let capacity = 33_usize
.checked_add(12)
.and_then(|n| n.checked_add(ciphertext.len()))
.expect("seal capacity overflow");
let mut out = Vec::with_capacity(capacity);
out.extend_from_slice(&ephemeral_pubkey.0);
out.extend_from_slice(&nonce_bytes);
out.extend_from_slice(&ciphertext);
out
}
/// Decrypts a sealed `GroupKeyHolder` using the recipient's [`SealingSecretKey`].
///
/// Returns `Err` if the ciphertext is too short, the ECDH point is invalid, or the
/// AES-GCM authentication tag doesn't verify (wrong key or tampered data).
pub fn unseal(sealed: &[u8], own_key: &SealingSecretKey) -> Result<Self, SealError> {
const HEADER_LEN: usize = 33 + 12;
const MIN_LEN: usize = HEADER_LEN + 16;
if sealed.len() < MIN_LEN {
return Err(SealError::TooShort);
}
// MIN_LEN (61) > HEADER_LEN (45), so all slicing below is in bounds.
let ephemeral_pubkey = Secp256k1Point(sealed[..33].to_vec());
let nonce = aes_gcm::Nonce::from_slice(&sealed[33..HEADER_LEN]);
let ciphertext = &sealed[HEADER_LEN..];
let shared = SharedSecretKey::new(own_key, &ephemeral_pubkey);
let aes_key = Self::seal_kdf(&shared);
let cipher = Aes256Gcm::new(&aes_key.into());
let plaintext = cipher
.decrypt(nonce, ciphertext)
.map_err(|_err| SealError::DecryptionFailed)?;
if plaintext.len() != 32 {
return Err(SealError::DecryptionFailed);
}
let mut gms = [0_u8; 32];
gms.copy_from_slice(&plaintext);
Ok(Self::from_gms(gms))
}
/// Derives an AES-256 key from the ECDH shared secret via SHA-256 with a domain prefix.
fn seal_kdf(shared: &SharedSecretKey) -> [u8; 32] {
const PREFIX: &[u8; 32] = b"/LEE/v0.3/GroupKeySeal/AES\x00\x00\x00\x00\x00\x00";
let mut hasher = sha2::Sha256::new();
hasher.update(PREFIX);
hasher.update(shared.0);
hasher.finalize_fixed().into()
}
}
#[derive(Debug)]
pub enum SealError {
TooShort,
DecryptionFailed,
}
#[cfg(test)]
mod tests {
use nssa_core::NullifierPublicKey;
use super::*;
/// Two holders from the same GMS derive identical keys for the same PDA seed.
#[test]
fn same_gms_same_seed_produces_same_keys() {
let gms = [42_u8; 32];
let holder_a = GroupKeyHolder::from_gms(gms);
let holder_b = GroupKeyHolder::from_gms(gms);
let seed = PdaSeed::new([1; 32]);
let keys_a = holder_a.derive_keys_for_pda(&seed);
let keys_b = holder_b.derive_keys_for_pda(&seed);
assert_eq!(
keys_a.generate_nullifier_public_key().to_byte_array(),
keys_b.generate_nullifier_public_key().to_byte_array(),
);
}
/// Different PDA seeds produce different keys from the same GMS.
#[test]
fn same_gms_different_seed_produces_different_keys() {
let holder = GroupKeyHolder::from_gms([42_u8; 32]);
let seed_a = PdaSeed::new([1; 32]);
let seed_b = PdaSeed::new([2; 32]);
let npk_a = holder
.derive_keys_for_pda(&seed_a)
.generate_nullifier_public_key();
let npk_b = holder
.derive_keys_for_pda(&seed_b)
.generate_nullifier_public_key();
assert_ne!(npk_a.to_byte_array(), npk_b.to_byte_array());
}
/// Different GMS produce different keys for the same PDA seed.
#[test]
fn different_gms_same_seed_produces_different_keys() {
let holder_a = GroupKeyHolder::from_gms([42_u8; 32]);
let holder_b = GroupKeyHolder::from_gms([99_u8; 32]);
let seed = PdaSeed::new([1; 32]);
let npk_a = holder_a
.derive_keys_for_pda(&seed)
.generate_nullifier_public_key();
let npk_b = holder_b
.derive_keys_for_pda(&seed)
.generate_nullifier_public_key();
assert_ne!(npk_a.to_byte_array(), npk_b.to_byte_array());
}
/// GMS round-trip: export and restore produces the same keys.
#[test]
fn gms_round_trip() {
let original = GroupKeyHolder::from_gms([7_u8; 32]);
let restored = GroupKeyHolder::from_gms(*original.dangerous_raw_gms());
let seed = PdaSeed::new([1; 32]);
let npk_original = original
.derive_keys_for_pda(&seed)
.generate_nullifier_public_key();
let npk_restored = restored
.derive_keys_for_pda(&seed)
.generate_nullifier_public_key();
assert_eq!(npk_original.to_byte_array(), npk_restored.to_byte_array());
}
/// The derived `NullifierPublicKey` is non-zero (sanity check).
#[test]
fn derived_npk_is_non_zero() {
let holder = GroupKeyHolder::from_gms([42_u8; 32]);
let seed = PdaSeed::new([1; 32]);
let npk = holder
.derive_keys_for_pda(&seed)
.generate_nullifier_public_key();
assert_ne!(npk, NullifierPublicKey([0; 32]));
}
/// Pins the end-to-end derivation for a fixed (GMS, `ProgramId`, `PdaSeed`). Any change
/// to `secret_spending_key_for_pda`, the `PrivateKeyHolder` nsk/npk chain, or the
/// `AccountId::for_private_pda` formula breaks this test. Mirrors the pinned-value
/// pattern from `for_private_pda_matches_pinned_value` in `nssa_core`.
#[test]
fn pinned_end_to_end_derivation_for_private_pda() {
use nssa_core::{account::AccountId, program::ProgramId};
let gms = [42_u8; 32];
let seed = PdaSeed::new([1; 32]);
let program_id: ProgramId = [9; 8];
let holder = GroupKeyHolder::from_gms(gms);
let npk = holder
.derive_keys_for_pda(&seed)
.generate_nullifier_public_key();
let account_id = AccountId::for_private_pda(&program_id, &seed, &npk, u128::MAX);
let expected_npk = NullifierPublicKey([
185, 161, 225, 224, 20, 156, 173, 0, 6, 173, 74, 136, 16, 88, 71, 154, 101, 160, 224,
162, 247, 98, 183, 210, 118, 130, 143, 237, 20, 112, 111, 114,
]);
let expected_account_id = AccountId::new([
251, 228, 245, 3, 160, 134, 97, 69, 187, 157, 170, 192, 165, 216, 166, 79, 179, 187,
125, 146, 36, 192, 232, 110, 198, 47, 24, 10, 223, 25, 108, 5,
]);
assert_eq!(npk, expected_npk);
assert_eq!(account_id, expected_account_id);
}
/// Wallets persist `GroupKeyHolder` to disk and reload it on startup. This test pins
/// the serde round-trip: serialize, deserialize, and assert the derived keys for a
/// sample seed match on both sides. A silent encoding drift would corrupt every
/// group-owned account.
#[test]
fn gms_serde_round_trip_preserves_derivation() {
let original = GroupKeyHolder::from_gms([7_u8; 32]);
let encoded = bincode::serialize(&original).expect("serialize");
let restored: GroupKeyHolder = bincode::deserialize(&encoded).expect("deserialize");
let seed = PdaSeed::new([1; 32]);
let npk_original = original
.derive_keys_for_pda(&seed)
.generate_nullifier_public_key();
let npk_restored = restored
.derive_keys_for_pda(&seed)
.generate_nullifier_public_key();
assert_eq!(npk_original, npk_restored);
assert_eq!(original.dangerous_raw_gms(), restored.dangerous_raw_gms());
}
/// A `GroupKeyHolder` constructed from the same 32 bytes as a personal
/// `SecretSpendingKey` must not derive the same `NullifierPublicKey` as the personal
/// path, so a private PDA cannot be spent by a personal nullifier even under
/// adversarial key-material reuse. The safety rests on the group path's distinct
/// domain-separation prefix plus the seed mix-in (see `secret_spending_key_for_pda`).
#[test]
fn group_derivation_does_not_collide_with_personal_path_at_shared_bytes() {
let shared_bytes = [13_u8; 32];
let seed = PdaSeed::new([5; 32]);
let group_npk = GroupKeyHolder::from_gms(shared_bytes)
.derive_keys_for_pda(&seed)
.generate_nullifier_public_key();
let personal_npk = SecretSpendingKey(shared_bytes)
.produce_private_key_holder(None)
.generate_nullifier_public_key();
assert_ne!(group_npk, personal_npk);
}
/// Seal then unseal recovers the same GMS and derived keys.
#[test]
fn seal_unseal_round_trip() {
let holder = GroupKeyHolder::from_gms([42_u8; 32]);
let recipient_ssk = SecretSpendingKey([7_u8; 32]);
let recipient_keys = recipient_ssk.produce_private_key_holder(None);
let recipient_vpk = recipient_keys.generate_viewing_public_key();
let recipient_vsk = recipient_keys.viewing_secret_key;
let sealed = holder.seal_for(&recipient_vpk);
let restored = GroupKeyHolder::unseal(&sealed, &recipient_vsk).expect("unseal");
assert_eq!(restored.dangerous_raw_gms(), holder.dangerous_raw_gms());
let seed = PdaSeed::new([1; 32]);
assert_eq!(
holder
.derive_keys_for_pda(&seed)
.generate_nullifier_public_key(),
restored
.derive_keys_for_pda(&seed)
.generate_nullifier_public_key(),
);
}
/// Unsealing with a different VSK fails with `DecryptionFailed`.
#[test]
fn unseal_wrong_vsk_fails() {
let holder = GroupKeyHolder::from_gms([42_u8; 32]);
let recipient_ssk = SecretSpendingKey([7_u8; 32]);
let recipient_vpk = recipient_ssk
.produce_private_key_holder(None)
.generate_viewing_public_key();
let wrong_ssk = SecretSpendingKey([99_u8; 32]);
let wrong_vsk = wrong_ssk
.produce_private_key_holder(None)
.viewing_secret_key;
let sealed = holder.seal_for(&recipient_vpk);
let result = GroupKeyHolder::unseal(&sealed, &wrong_vsk);
assert!(matches!(result, Err(super::SealError::DecryptionFailed)));
}
/// Tampered ciphertext fails authentication.
#[test]
fn unseal_tampered_ciphertext_fails() {
let holder = GroupKeyHolder::from_gms([42_u8; 32]);
let recipient_ssk = SecretSpendingKey([7_u8; 32]);
let recipient_keys = recipient_ssk.produce_private_key_holder(None);
let recipient_vpk = recipient_keys.generate_viewing_public_key();
let recipient_vsk = recipient_keys.viewing_secret_key;
let mut sealed = holder.seal_for(&recipient_vpk);
// Flip a byte in the ciphertext portion (after ephemeral_pubkey + nonce)
let last = sealed.len() - 1;
sealed[last] ^= 0xFF;
let result = GroupKeyHolder::unseal(&sealed, &recipient_vsk);
assert!(matches!(result, Err(super::SealError::DecryptionFailed)));
}
/// Two seals of the same holder produce different ciphertexts (ephemeral randomness).
#[test]
fn two_seals_produce_different_ciphertexts() {
let holder = GroupKeyHolder::from_gms([42_u8; 32]);
let recipient_ssk = SecretSpendingKey([7_u8; 32]);
let recipient_vpk = recipient_ssk
.produce_private_key_holder(None)
.generate_viewing_public_key();
let sealed_a = holder.seal_for(&recipient_vpk);
let sealed_b = holder.seal_for(&recipient_vpk);
assert_ne!(sealed_a, sealed_b);
}
/// Sealed payload is too short.
#[test]
fn unseal_too_short_fails() {
let vsk: SealingSecretKey = [7_u8; 32];
let result = GroupKeyHolder::unseal(&[0_u8; 10], &vsk);
assert!(matches!(result, Err(super::SealError::TooShort)));
}
/// Degenerate GMS values (all-zeros, all-ones, single-bit) must still produce valid,
/// non-zero, pairwise-distinct npks. Rules out accidental "if gms == default { return
/// default }" style shortcuts in the derivation.
#[test]
fn degenerate_gms_produces_distinct_non_zero_keys() {
let seed = PdaSeed::new([1; 32]);
let degenerate = [[0_u8; 32], [0xFF_u8; 32], {
let mut v = [0_u8; 32];
v[0] = 1;
v
}];
let npks: Vec<NullifierPublicKey> = degenerate
.iter()
.map(|gms| {
GroupKeyHolder::from_gms(*gms)
.derive_keys_for_pda(&seed)
.generate_nullifier_public_key()
})
.collect();
for npk in &npks {
assert_ne!(*npk, NullifierPublicKey([0; 32]));
}
for (i, a) in npks.iter().enumerate() {
for b in &npks[i + 1..] {
assert_ne!(a, b);
}
}
}
/// Full lifecycle: create group, distribute GMS via seal/unseal, verify key agreement.
#[test]
fn group_pda_lifecycle() {
use nssa_core::account::AccountId;
let alice_holder = GroupKeyHolder::new();
let pda_seed = PdaSeed::new([42_u8; 32]);
let program_id: nssa_core::program::ProgramId = [1; 8];
// Derive Alice's keys
let alice_keys = alice_holder.derive_keys_for_pda(&pda_seed);
let alice_npk = alice_keys.generate_nullifier_public_key();
// Seal GMS for Bob using Bob's viewing key, Bob unseals
let bob_ssk = SecretSpendingKey([77_u8; 32]);
let bob_keys = bob_ssk.produce_private_key_holder(None);
let bob_vpk = bob_keys.generate_viewing_public_key();
let bob_vsk = bob_keys.viewing_secret_key;
let sealed = alice_holder.seal_for(&bob_vpk);
let bob_holder =
GroupKeyHolder::unseal(&sealed, &bob_vsk).expect("Bob should unseal the GMS");
// Key agreement: both derive identical NPK and AccountId
let bob_npk = bob_holder
.derive_keys_for_pda(&pda_seed)
.generate_nullifier_public_key();
assert_eq!(alice_npk, bob_npk);
let alice_account_id = AccountId::for_private_pda(&program_id, &pda_seed, &alice_npk, 0);
let bob_account_id = AccountId::for_private_pda(&program_id, &pda_seed, &bob_npk, 0);
assert_eq!(alice_account_id, bob_account_id);
}
}

View File

@ -6,6 +6,7 @@ use secret_holders::{PrivateKeyHolder, SecretSpendingKey, SeedHolder};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
pub mod ephemeral_key_holder; pub mod ephemeral_key_holder;
pub mod group_key_holder;
pub mod key_tree; pub mod key_tree;
pub mod secret_holders; pub mod secret_holders;

View File

@ -8,6 +8,7 @@ use serde::{Deserialize, Serialize};
use crate::key_management::{ use crate::key_management::{
KeyChain, KeyChain,
group_key_holder::GroupKeyHolder,
key_tree::{KeyTreePrivate, KeyTreePublic, chain_index::ChainIndex}, key_tree::{KeyTreePrivate, KeyTreePublic, chain_index::ChainIndex},
secret_holders::SeedHolder, secret_holders::SeedHolder,
}; };
@ -30,6 +31,17 @@ pub struct NSSAUserData {
pub public_key_tree: KeyTreePublic, pub public_key_tree: KeyTreePublic,
/// Tree of private keys. /// Tree of private keys.
pub private_key_tree: KeyTreePrivate, pub private_key_tree: KeyTreePrivate,
/// Group key holders for private PDA groups, keyed by a human-readable label.
/// Defaults to empty for backward compatibility with wallets that predate group PDAs.
/// An older wallet binary that re-serializes this struct will drop the field.
#[serde(default)]
pub group_key_holders: BTreeMap<String, GroupKeyHolder>,
/// Cached plaintext state of private PDA accounts, keyed by `AccountId`.
/// Updated after each private PDA transaction by decrypting the circuit output.
/// The sequencer only stores encrypted commitments, so this local cache is the
/// only source of plaintext state for private PDAs.
#[serde(default, alias = "group_pda_accounts")]
pub pda_accounts: BTreeMap<nssa::AccountId, nssa_core::account::Account>,
} }
impl NSSAUserData { impl NSSAUserData {
@ -88,6 +100,8 @@ impl NSSAUserData {
default_user_private_accounts: default_accounts_key_chains, default_user_private_accounts: default_accounts_key_chains,
public_key_tree, public_key_tree,
private_key_tree, private_key_tree,
group_key_holders: BTreeMap::new(),
pda_accounts: BTreeMap::new(),
}) })
} }
@ -193,6 +207,20 @@ impl NSSAUserData {
.copied() .copied()
.chain(self.private_key_tree.account_id_map.keys().copied()) .chain(self.private_key_tree.account_id_map.keys().copied())
} }
/// Returns the `GroupKeyHolder` for the given label, if it exists.
#[must_use]
pub fn group_key_holder(&self, label: &str) -> Option<&GroupKeyHolder> {
self.group_key_holders.get(label)
}
/// Inserts or replaces a `GroupKeyHolder` under the given label.
///
/// If a holder already exists under this label, it is silently replaced and the old
/// GMS is lost. Callers must ensure label uniqueness across groups.
pub fn insert_group_key_holder(&mut self, label: String, holder: GroupKeyHolder) {
self.group_key_holders.insert(label, holder);
}
} }
impl Default for NSSAUserData { impl Default for NSSAUserData {
@ -212,6 +240,26 @@ impl Default for NSSAUserData {
mod tests { mod tests {
use super::*; use super::*;
#[test]
fn group_key_holder_storage_round_trip() {
let mut user_data = NSSAUserData::default();
assert!(user_data.group_key_holder("test-group").is_none());
let holder = GroupKeyHolder::from_gms([42_u8; 32]);
user_data.insert_group_key_holder(String::from("test-group"), holder.clone());
let retrieved = user_data
.group_key_holder("test-group")
.expect("should exist");
assert_eq!(retrieved.dangerous_raw_gms(), holder.dangerous_raw_gms());
}
#[test]
fn group_key_holders_default_empty() {
let user_data = NSSAUserData::default();
assert!(user_data.group_key_holders.is_empty());
}
#[test] #[test]
fn new_account() { fn new_account() {
let mut user_data = NSSAUserData::default(); let mut user_data = NSSAUserData::default();

View File

@ -12,23 +12,97 @@ use crate::{
pub struct PrivacyPreservingCircuitInput { pub struct PrivacyPreservingCircuitInput {
/// Outputs of the program execution. /// Outputs of the program execution.
pub program_outputs: Vec<ProgramOutput>, pub program_outputs: Vec<ProgramOutput>,
/// Visibility mask for accounts. /// One entry per `pre_state`, in the same order as the program's `pre_states`.
/// /// Length must equal the number of `pre_states` derived from `program_outputs`.
/// - `0` - public account /// The guest's `private_pda_npk_by_position` and `private_pda_bound_positions`
/// - `1` - private account with authentication /// rely on this position alignment.
/// - `2` - private account without authentication pub account_identities: Vec<InputAccountIdentity>,
/// - `3` - private PDA account
pub visibility_mask: Vec<u8>,
/// Public keys and identifiers of private accounts.
pub private_account_keys: Vec<(NullifierPublicKey, Identifier, SharedSecretKey)>,
/// Nullifier secret keys for authorized private accounts.
pub private_account_nsks: Vec<NullifierSecretKey>,
/// Membership proofs for private accounts. Can be [`None`] for uninitialized accounts.
pub private_account_membership_proofs: Vec<Option<MembershipProof>>,
/// Program ID. /// Program ID.
pub program_id: ProgramId, pub program_id: ProgramId,
} }
/// Per-account input to the privacy-preserving circuit. Each variant carries exactly the fields
/// the guest needs for that account's code path.
#[derive(Serialize, Deserialize, Clone)]
pub enum InputAccountIdentity {
/// Public account. The guest reads pre/post state from `program_outputs` and emits no
/// commitment, ciphertext, or nullifier.
Public,
/// Init of an authorized standalone private account: no membership proof. The `pre_state`
/// must be `Account::default()`. The `account_id` is derived as
/// `AccountId::from((&NullifierPublicKey::from(nsk), identifier))` and matched against
/// `pre_state.account_id`.
PrivateAuthorizedInit {
ssk: SharedSecretKey,
nsk: NullifierSecretKey,
identifier: Identifier,
},
/// Update of an authorized standalone private account: existing on-chain commitment, with
/// membership proof.
PrivateAuthorizedUpdate {
ssk: SharedSecretKey,
nsk: NullifierSecretKey,
membership_proof: MembershipProof,
identifier: Identifier,
},
/// Init of a standalone private account the caller does not own (e.g. a recipient who
/// doesn't yet exist on chain). No `nsk`, no membership proof.
PrivateUnauthorized {
npk: NullifierPublicKey,
ssk: SharedSecretKey,
identifier: Identifier,
},
/// Init of a private PDA, unauthorized. The npk-to-account_id binding is proven upstream
/// via `Claim::Pda(seed)` or a caller's `pda_seeds` match. The identifier diversifies the
/// PDA within the `(program_id, seed, npk)` family: `AccountId::for_private_pda` uses it
/// as the 4th input.
PrivatePdaInit {
npk: NullifierPublicKey,
ssk: SharedSecretKey,
identifier: Identifier,
},
/// Update of an existing private PDA, authorized, with membership proof. `npk` is derived
/// from `nsk`. Authorization is established upstream by a caller `pda_seeds` match or a
/// previously-seen authorization in a chained call.
PrivatePdaUpdate {
ssk: SharedSecretKey,
nsk: NullifierSecretKey,
membership_proof: MembershipProof,
identifier: Identifier,
},
}
impl InputAccountIdentity {
#[must_use]
pub const fn is_public(&self) -> bool {
matches!(self, Self::Public)
}
#[must_use]
pub const fn is_private_pda(&self) -> bool {
matches!(
self,
Self::PrivatePdaInit { .. } | Self::PrivatePdaUpdate { .. }
)
}
/// For private PDA variants, return the `(npk, identifier)` pair. `Init` carries both
/// directly; `Update` derives `npk` from `nsk`. For non-PDA variants returns `None`.
#[must_use]
pub fn npk_if_private_pda(&self) -> Option<(NullifierPublicKey, Identifier)> {
match self {
Self::PrivatePdaInit { npk, identifier, .. } => Some((*npk, *identifier)),
Self::PrivatePdaUpdate { nsk, identifier, .. } => {
Some((NullifierPublicKey::from(nsk), *identifier))
}
Self::Public
| Self::PrivateAuthorizedInit { .. }
| Self::PrivateAuthorizedUpdate { .. }
| Self::PrivateUnauthorized { .. } => None,
}
}
}
#[derive(Serialize, Deserialize)] #[derive(Serialize, Deserialize)]
#[cfg_attr(any(feature = "host", test), derive(Debug, PartialEq, Eq))] #[cfg_attr(any(feature = "host", test), derive(Debug, PartialEq, Eq))]
pub struct PrivacyPreservingCircuitOutput { pub struct PrivacyPreservingCircuitOutput {

View File

@ -3,7 +3,9 @@
reason = "We prefer to group methods by functionality rather than by type for encoding" reason = "We prefer to group methods by functionality rather than by type for encoding"
)] )]
pub use circuit_io::{PrivacyPreservingCircuitInput, PrivacyPreservingCircuitOutput}; pub use circuit_io::{
InputAccountIdentity, PrivacyPreservingCircuitInput, PrivacyPreservingCircuitOutput,
};
pub use commitment::{ pub use commitment::{
Commitment, CommitmentSetDigest, DUMMY_COMMITMENT, DUMMY_COMMITMENT_HASH, MembershipProof, Commitment, CommitmentSetDigest, DUMMY_COMMITMENT, DUMMY_COMMITMENT_HASH, MembershipProof,
compute_digest_for_path, compute_digest_for_path,

View File

@ -43,6 +43,12 @@ impl PdaSeed {
} }
} }
impl AsRef<[u8]> for PdaSeed {
fn as_ref(&self) -> &[u8] {
&self.0
}
}
impl AccountId { impl AccountId {
/// Derives an [`AccountId`] for a public PDA from the program ID and seed. /// Derives an [`AccountId`] for a public PDA from the program ID and seed.
#[must_use] #[must_use]

View File

@ -2,8 +2,7 @@ use std::collections::{HashMap, VecDeque};
use borsh::{BorshDeserialize, BorshSerialize}; use borsh::{BorshDeserialize, BorshSerialize};
use nssa_core::{ use nssa_core::{
Identifier, MembershipProof, NullifierPublicKey, NullifierSecretKey, InputAccountIdentity, PrivacyPreservingCircuitInput, PrivacyPreservingCircuitOutput,
PrivacyPreservingCircuitInput, PrivacyPreservingCircuitOutput, SharedSecretKey,
account::AccountWithMetadata, account::AccountWithMetadata,
program::{ChainedCall, InstructionData, ProgramId, ProgramOutput}, program::{ChainedCall, InstructionData, ProgramId, ProgramOutput},
}; };
@ -63,14 +62,10 @@ impl From<Program> for ProgramWithDependencies {
/// Generates a proof of the execution of a NSSA program inside the privacy preserving execution /// Generates a proof of the execution of a NSSA program inside the privacy preserving execution
/// circuit. /// circuit.
/// TODO: too many parameters.
pub fn execute_and_prove( pub fn execute_and_prove(
pre_states: Vec<AccountWithMetadata>, pre_states: Vec<AccountWithMetadata>,
instruction_data: InstructionData, instruction_data: InstructionData,
visibility_mask: Vec<u8>, account_identities: Vec<InputAccountIdentity>,
private_account_keys: Vec<(NullifierPublicKey, Identifier, SharedSecretKey)>,
private_account_nsks: Vec<NullifierSecretKey>,
private_account_membership_proofs: Vec<Option<MembershipProof>>,
program_with_dependencies: &ProgramWithDependencies, program_with_dependencies: &ProgramWithDependencies,
) -> Result<(PrivacyPreservingCircuitOutput, Proof), NssaError> { ) -> Result<(PrivacyPreservingCircuitOutput, Proof), NssaError> {
let ProgramWithDependencies { let ProgramWithDependencies {
@ -128,10 +123,7 @@ pub fn execute_and_prove(
let circuit_input = PrivacyPreservingCircuitInput { let circuit_input = PrivacyPreservingCircuitInput {
program_outputs, program_outputs,
visibility_mask, account_identities,
private_account_keys,
private_account_nsks,
private_account_membership_proofs,
program_id: program_with_dependencies.program.id(), program_id: program_with_dependencies.program.id(),
}; };
@ -184,7 +176,8 @@ mod tests {
#![expect(clippy::shadow_unrelated, reason = "We don't care about it in tests")] #![expect(clippy::shadow_unrelated, reason = "We don't care about it in tests")]
use nssa_core::{ use nssa_core::{
Commitment, DUMMY_COMMITMENT_HASH, EncryptionScheme, Nullifier, SharedSecretKey, Commitment, DUMMY_COMMITMENT_HASH, EncryptionScheme, Nullifier,
SharedSecretKey,
account::{Account, AccountId, AccountWithMetadata, Nonce, data::Data}, account::{Account, AccountId, AccountWithMetadata, Nonce, data::Data},
encryption::PrivateAccountKind, encryption::PrivateAccountKind,
program::PdaSeed, program::PdaSeed,
@ -242,10 +235,14 @@ mod tests {
let (output, proof) = execute_and_prove( let (output, proof) = execute_and_prove(
vec![sender, recipient], vec![sender, recipient],
Program::serialize_instruction(balance_to_move).unwrap(), Program::serialize_instruction(balance_to_move).unwrap(),
vec![0, 2], vec![
vec![(recipient_keys.npk(), 0, shared_secret)], InputAccountIdentity::Public,
vec![], InputAccountIdentity::PrivateUnauthorized {
vec![None], npk: recipient_keys.npk(),
ssk: shared_secret,
identifier: 0,
},
],
&Program::authenticated_transfer_program().into(), &Program::authenticated_transfer_program().into(),
) )
.unwrap(); .unwrap();
@ -335,13 +332,21 @@ mod tests {
let (output, proof) = execute_and_prove( let (output, proof) = execute_and_prove(
vec![sender_pre, recipient], vec![sender_pre, recipient],
Program::serialize_instruction(balance_to_move).unwrap(), Program::serialize_instruction(balance_to_move).unwrap(),
vec![1, 2],
vec![ vec![
(sender_keys.npk(), 0, shared_secret_1), InputAccountIdentity::PrivateAuthorizedUpdate {
(recipient_keys.npk(), 0, shared_secret_2), ssk: shared_secret_1,
nsk: sender_keys.nsk,
membership_proof: commitment_set
.get_proof_for(&commitment_sender)
.expect("sender's commitment must be in the set"),
identifier: 0,
},
InputAccountIdentity::PrivateUnauthorized {
npk: recipient_keys.npk(),
ssk: shared_secret_2,
identifier: 0,
},
], ],
vec![sender_keys.nsk],
vec![commitment_set.get_proof_for(&commitment_sender), None],
&program.into(), &program.into(),
) )
.unwrap(); .unwrap();
@ -404,10 +409,11 @@ mod tests {
let result = execute_and_prove( let result = execute_and_prove(
vec![pre], vec![pre],
instruction, instruction,
vec![2], vec![InputAccountIdentity::PrivateUnauthorized {
vec![(account_keys.npk(), 0, shared_secret)], npk: account_keys.npk(),
vec![], ssk: shared_secret,
vec![None], identifier: 0,
}],
&program_with_deps, &program_with_deps,
); );
@ -431,10 +437,11 @@ mod tests {
let (output, _proof) = execute_and_prove( let (output, _proof) = execute_and_prove(
vec![pre_state], vec![pre_state],
Program::serialize_instruction(seed).unwrap(), Program::serialize_instruction(seed).unwrap(),
vec![3], vec![InputAccountIdentity::PrivatePdaInit {
vec![(npk, identifier, shared_secret.clone())], npk,
vec![], ssk: shared_secret.clone(),
vec![None], identifier,
}],
&program.clone().into(), &program.clone().into(),
) )
.unwrap(); .unwrap();
@ -491,10 +498,14 @@ mod tests {
funder.clone(), funder.clone(),
], ],
Program::serialize_instruction((seed, amount, auth_transfer_id)).unwrap(), Program::serialize_instruction((seed, amount, auth_transfer_id)).unwrap(),
vec![3, 0], vec![
vec![(alice_npk, 0, alice_shared_0.clone())], InputAccountIdentity::PrivatePdaInit {
vec![], npk: alice_npk,
vec![None], ssk: alice_shared_0.clone(),
identifier: 0,
},
InputAccountIdentity::Public,
],
&program_with_deps, &program_with_deps,
) )
.unwrap(); .unwrap();
@ -522,10 +533,14 @@ mod tests {
funder.clone(), funder.clone(),
], ],
Program::serialize_instruction((seed, amount, auth_transfer_id)).unwrap(), Program::serialize_instruction((seed, amount, auth_transfer_id)).unwrap(),
vec![3, 0], vec![
vec![(alice_npk, 1, alice_shared_1.clone())], InputAccountIdentity::PrivatePdaInit {
vec![], npk: alice_npk,
vec![None], ssk: alice_shared_1.clone(),
identifier: 1,
},
InputAccountIdentity::Public,
],
&program_with_deps, &program_with_deps,
) )
.unwrap(); .unwrap();
@ -561,13 +576,19 @@ mod tests {
AccountWithMetadata::new(Account::default(), false, recipient_0_id), AccountWithMetadata::new(Account::default(), false, recipient_0_id),
], ],
Program::serialize_instruction((seed, amount, auth_transfer_id)).unwrap(), Program::serialize_instruction((seed, amount, auth_transfer_id)).unwrap(),
vec![3, 2],
vec![ vec![
(alice_npk, 0, alice_shared_0.clone()), InputAccountIdentity::PrivatePdaUpdate {
(recipient_keys.npk(), 0, SharedSecretKey::new(&[20; 32], &recipient_keys.vpk())), ssk: alice_shared_0.clone(),
nsk: alice_keys.nsk,
membership_proof: proof_pda_0.expect("pda_0 commitment must be in the set"),
identifier: 0,
},
InputAccountIdentity::PrivateUnauthorized {
npk: recipient_keys.npk(),
ssk: SharedSecretKey::new(&[20; 32], &recipient_keys.vpk()),
identifier: 0,
},
], ],
vec![alice_keys.nsk],
vec![proof_pda_0, None],
&program_with_deps, &program_with_deps,
) )
.unwrap(); .unwrap();
@ -587,13 +608,19 @@ mod tests {
AccountWithMetadata::new(Account::default(), false, recipient_1_id), AccountWithMetadata::new(Account::default(), false, recipient_1_id),
], ],
Program::serialize_instruction((seed, amount, auth_transfer_id)).unwrap(), Program::serialize_instruction((seed, amount, auth_transfer_id)).unwrap(),
vec![3, 2],
vec![ vec![
(alice_npk, 1, alice_shared_1.clone()), InputAccountIdentity::PrivatePdaUpdate {
(recipient_keys.npk(), 1, SharedSecretKey::new(&[21; 32], &recipient_keys.vpk())), ssk: alice_shared_1.clone(),
nsk: alice_keys.nsk,
membership_proof: proof_pda_1.expect("pda_1 commitment must be in the set"),
identifier: 1,
},
InputAccountIdentity::PrivateUnauthorized {
npk: recipient_keys.npk(),
ssk: SharedSecretKey::new(&[21; 32], &recipient_keys.vpk()),
identifier: 1,
},
], ],
vec![alice_keys.nsk],
vec![proof_pda_1, None],
&program_with_deps, &program_with_deps,
) )
.unwrap(); .unwrap();
@ -601,4 +628,108 @@ mod tests {
assert_eq!(output_spend_1.new_commitments.len(), 2); assert_eq!(output_spend_1.new_commitments.len(), 2);
assert_eq!(output_spend_1.new_nullifiers.len(), 2); assert_eq!(output_spend_1.new_nullifiers.len(), 2);
} }
/// Group PDA deposit: creates a new PDA and transfers balance from the
/// counterparty. Both accounts owned by `private_pda_spender`.
#[test]
fn group_pda_deposit() {
let program = Program::private_pda_spender();
let noop = Program::noop();
let keys = test_private_account_keys_1();
let npk = keys.npk();
let seed = PdaSeed::new([42; 32]);
let shared_secret_pda = SharedSecretKey::new(&[55; 32], &keys.vpk());
// PDA (new, mask 3)
let pda_id = AccountId::for_private_pda(&program.id(), &seed, &npk, 0);
let pda_pre = AccountWithMetadata::new(Account::default(), false, pda_id);
// Sender (mask 0, public, owned by this program, has balance)
let sender_id = AccountId::new([99; 32]);
let sender_pre = AccountWithMetadata::new(
Account {
program_owner: program.id(),
balance: 10000,
..Account::default()
},
true,
sender_id,
);
let noop_id = noop.id();
let program_with_deps = ProgramWithDependencies::new(program, [(noop_id, noop)].into());
let instruction = Program::serialize_instruction((seed, noop_id, 500_u128, true)).unwrap();
// PDA is mask 3 (private PDA), sender is mask 0 (public).
// The noop chained call is required to establish the mask-3 (seed, npk) binding
// that the circuit enforces for private PDAs. Without a caller providing pda_seeds,
// the circuit's binding check rejects the account.
let result = execute_and_prove(
vec![pda_pre, sender_pre],
instruction,
vec![
InputAccountIdentity::PrivatePdaInit {
npk,
ssk: shared_secret_pda,
identifier: 0,
},
InputAccountIdentity::Public,
],
&program_with_deps,
);
let (output, _proof) = result.expect("group PDA deposit should succeed");
// Only PDA (mask 3) produces a commitment; sender (mask 0) is public.
assert_eq!(output.new_commitments.len(), 1);
}
/// Group PDA spend binding: the noop chained call with `pda_seeds` establishes
/// the mask-3 binding for an existing-but-default PDA. Uses amount=0 because
/// testing with a pre-funded PDA requires a two-tx sequence with membership proofs.
#[test]
fn group_pda_spend_binding() {
let program = Program::private_pda_spender();
let noop = Program::noop();
let keys = test_private_account_keys_1();
let npk = keys.npk();
let seed = PdaSeed::new([42; 32]);
let shared_secret_pda = SharedSecretKey::new(&[55; 32], &keys.vpk());
let pda_id = AccountId::for_private_pda(&program.id(), &seed, &npk, 0);
let pda_pre = AccountWithMetadata::new(Account::default(), false, pda_id);
let bob_id = AccountId::new([88; 32]);
let bob_pre = AccountWithMetadata::new(
Account {
program_owner: program.id(),
balance: 10000,
..Account::default()
},
true,
bob_id,
);
let noop_id = noop.id();
let program_with_deps = ProgramWithDependencies::new(program, [(noop_id, noop)].into());
let instruction = Program::serialize_instruction((seed, noop_id, 0_u128, false)).unwrap();
let result = execute_and_prove(
vec![pda_pre, bob_pre],
instruction,
vec![
InputAccountIdentity::PrivatePdaInit {
npk,
ssk: shared_secret_pda,
identifier: 0,
},
InputAccountIdentity::Public,
],
&program_with_deps,
);
let (output, _proof) = result.expect("group PDA spend binding should succeed");
assert_eq!(output.new_commitments.len(), 1);
}
} }

View File

@ -313,6 +313,16 @@ mod tests {
} }
} }
#[must_use]
pub fn private_pda_spender() -> Self {
use test_program_methods::{PRIVATE_PDA_SPENDER_ELF, PRIVATE_PDA_SPENDER_ID};
Self {
id: PRIVATE_PDA_SPENDER_ID,
elf: PRIVATE_PDA_SPENDER_ELF.to_vec(),
}
}
#[must_use] #[must_use]
pub fn two_pda_claimer() -> Self { pub fn two_pda_claimer() -> Self {
use test_program_methods::{TWO_PDA_CLAIMER_ELF, TWO_PDA_CLAIMER_ID}; use test_program_methods::{TWO_PDA_CLAIMER_ELF, TWO_PDA_CLAIMER_ID};

File diff suppressed because it is too large Load Diff

View File

@ -5,9 +5,9 @@ use std::{
use nssa_core::{ use nssa_core::{
Commitment, CommitmentSetDigest, DUMMY_COMMITMENT_HASH, EncryptionScheme, Identifier, Commitment, CommitmentSetDigest, DUMMY_COMMITMENT_HASH, EncryptionScheme, Identifier,
PrivateAccountKind, InputAccountIdentity, MembershipProof, Nullifier, NullifierPublicKey, NullifierSecretKey,
MembershipProof, Nullifier, NullifierPublicKey, NullifierSecretKey, PrivateAccountKind, PrivacyPreservingCircuitInput, PrivacyPreservingCircuitOutput,
PrivacyPreservingCircuitInput, PrivacyPreservingCircuitOutput, SharedSecretKey, SharedSecretKey,
account::{Account, AccountId, AccountWithMetadata, Nonce}, account::{Account, AccountId, AccountWithMetadata, Nonce},
compute_digest_for_path, compute_digest_for_path,
program::{ program::{
@ -18,23 +18,24 @@ use nssa_core::{
}; };
use risc0_zkvm::{guest::env, serde::to_vec}; use risc0_zkvm::{guest::env, serde::to_vec};
/// State of the involved accounts before and after program execution. /// State of the involved accounts before and after program execution.
struct ExecutionState { struct ExecutionState {
pre_states: Vec<AccountWithMetadata>, pre_states: Vec<AccountWithMetadata>,
post_states: HashMap<AccountId, Account>, post_states: HashMap<AccountId, Account>,
block_validity_window: BlockValidityWindow, block_validity_window: BlockValidityWindow,
timestamp_validity_window: TimestampValidityWindow, timestamp_validity_window: TimestampValidityWindow,
/// Positions (in `pre_states`) of mask-3 accounts whose supplied npk has been bound to /// Positions (in `pre_states`) of private-PDA accounts whose supplied npk has been bound to
/// their `AccountId` via a proven `AccountId::for_private_pda(program_id, seed, npk, identifier)` /// their `AccountId` via a proven `AccountId::for_private_pda(program_id, seed, npk, identifier)`
/// check. /// check.
/// Two proof paths populate this set: a `Claim::Pda(seed)` in a program's `post_state` on /// Two proof paths populate this set: a `Claim::Pda(seed)` in a program's `post_state` on
/// that `pre_state`, or a caller's `ChainedCall.pda_seeds` entry matching that `pre_state` /// that `pre_state`, or a caller's `ChainedCall.pda_seeds` entry matching that `pre_state`
/// under the private derivation. Binding is an idempotent property, not an event: the same /// under the private derivation. Binding is an idempotent property, not an event: the same
/// position can legitimately be bound through both paths in the same tx (e.g. a program /// position can legitimately be bound through both paths in the same tx (e.g. a program
/// claims a private PDA and then delegates it to a callee), and the set uses `contains`, /// claims a private PDA and then delegates it to a callee), and the map uses `contains_key`,
/// not `assert!(insert)`. After the main loop, every mask-3 position must appear in this /// not `assert!(insert)`. After the main loop, every private-PDA position must appear in this
/// set; otherwise the npk is unbound and the circuit rejects. /// map; otherwise the npk is unbound and the circuit rejects.
/// The stored `(ProgramId, PdaSeed)` is the owner program and seed, used in
/// `compute_circuit_output` to construct `PrivateAccountKind::Pda { program_id, seed, identifier }`.
private_pda_bound_positions: HashMap<usize, (ProgramId, PdaSeed)>, private_pda_bound_positions: HashMap<usize, (ProgramId, PdaSeed)>,
/// Across the whole transaction, each `(program_id, seed)` pair may resolve to at most one /// Across the whole transaction, each `(program_id, seed)` pair may resolve to at most one
/// `AccountId`. A seed under a program can derive a family of accounts, one public PDA and /// `AccountId`. A seed under a program can derive a family of accounts, one public PDA and
@ -45,39 +46,30 @@ struct ExecutionState {
/// `AccountId` entry or as an equality check against the existing one, making the rule: one /// `AccountId` entry or as an equality check against the existing one, making the rule: one
/// `(program, seed)` → one account per tx. /// `(program, seed)` → one account per tx.
pda_family_binding: HashMap<(ProgramId, PdaSeed), AccountId>, pda_family_binding: HashMap<(ProgramId, PdaSeed), AccountId>,
/// Map from a mask-3 `pre_state`'s position in `visibility_mask` to the (npk, identifier) /// Map from a private-PDA `pre_state`'s position in `account_identities` to the (npk,
/// supplied for that position in `private_account_keys`. Built once in `derive_from_outputs` /// identifier) supplied for that position. Built once in `derive_from_outputs` by walking
/// by walking `visibility_mask` in lock-step with `private_account_keys`, used later by the /// `account_identities` and consulting `npk_if_private_pda`. Used later by the claim and
/// claim and caller-seeds authorization paths. /// caller-seeds authorization paths to verify
/// `AccountId::for_private_pda(program_id, seed, npk, identifier) == pre_state.account_id`.
private_pda_npk_by_position: HashMap<usize, (NullifierPublicKey, Identifier)>, private_pda_npk_by_position: HashMap<usize, (NullifierPublicKey, Identifier)>,
} }
impl ExecutionState { impl ExecutionState {
/// Validate program outputs and derive the overall execution state. /// Validate program outputs and derive the overall execution state.
pub fn derive_from_outputs( pub fn derive_from_outputs(
visibility_mask: &[u8], account_identities: &[InputAccountIdentity],
private_account_keys: &[(NullifierPublicKey, Identifier, SharedSecretKey)],
program_id: ProgramId, program_id: ProgramId,
program_outputs: Vec<ProgramOutput>, program_outputs: Vec<ProgramOutput>,
) -> Self { ) -> Self {
// Build position → npk map for mask-3 pre_states. `private_account_keys` is consumed in // Build position → (npk, identifier) map for private-PDA pre_states, indexed by position
// pre_state order across all masks 1/2/3, so walk `visibility_mask` in lock-step. The // in `account_identities`. The vec is documented as 1:1 with the program's pre_state
// downstream `compute_circuit_output` also consumes the same iterator and its trailing // order, so position here matches `pre_state_position` used downstream in
// assertions catch an over-supply of keys; under-supply surfaces here. // `validate_and_sync_states`.
let mut private_pda_npk_by_position: HashMap<usize, (NullifierPublicKey, Identifier)> = HashMap::new(); let mut private_pda_npk_by_position: HashMap<usize, (NullifierPublicKey, Identifier)> =
{ HashMap::new();
let mut keys_iter = private_account_keys.iter(); for (pos, account_identity) in account_identities.iter().enumerate() {
for (pos, &mask) in visibility_mask.iter().enumerate() { if let Some((npk, identifier)) = account_identity.npk_if_private_pda() {
if matches!(mask, 1..=3) { private_pda_npk_by_position.insert(pos, (npk, identifier));
let (npk, identifier, _) = keys_iter.next().unwrap_or_else(|| {
panic!(
"private_account_keys shorter than visibility_mask demands: no key for masked position {pos} (mask {mask})"
)
});
if mask == 3 {
private_pda_npk_by_position.insert(pos, (*npk, *identifier));
}
}
} }
} }
@ -194,7 +186,7 @@ impl ExecutionState {
} }
execution_state.validate_and_sync_states( execution_state.validate_and_sync_states(
visibility_mask, account_identities,
chained_call.program_id, chained_call.program_id,
caller_program_id, caller_program_id,
&chained_call.pda_seeds, &chained_call.pda_seeds,
@ -211,12 +203,12 @@ impl ExecutionState {
"Inner call without a chained call found", "Inner call without a chained call found",
); );
// Every mask-3 pre_state must have had its npk bound to its account_id, either via a // Every private-PDA pre_state must have had its npk bound to its account_id, either via
// `Claim::Pda(seed)` in some program's post_state or via a caller's `pda_seeds` matching // a `Claim::Pda(seed)` in some program's post_state or via a caller's `pda_seeds`
// the private derivation. An unbound mask-3 pre_state has no cryptographic link between // matching the private derivation. An unbound private-PDA pre_state has no
// the supplied npk and the account_id, and must be rejected. // cryptographic link between the supplied npk and the account_id, and must be rejected.
for (pos, &mask) in visibility_mask.iter().enumerate() { for (pos, account_identity) in account_identities.iter().enumerate() {
if mask == 3 { if account_identity.is_private_pda() {
assert!( assert!(
execution_state.private_pda_bound_positions.contains_key(&pos), execution_state.private_pda_bound_positions.contains_key(&pos),
"private PDA pre_state at position {pos} has no proven (seed, npk) binding via Claim::Pda or caller pda_seeds" "private PDA pre_state at position {pos} has no proven (seed, npk) binding via Claim::Pda or caller pda_seeds"
@ -251,7 +243,7 @@ impl ExecutionState {
/// Validate program pre and post states and populate the execution state. /// Validate program pre and post states and populate the execution state.
fn validate_and_sync_states( fn validate_and_sync_states(
&mut self, &mut self,
visibility_mask: &[u8], account_identities: &[InputAccountIdentity],
program_id: ProgramId, program_id: ProgramId,
caller_program_id: Option<ProgramId>, caller_program_id: Option<ProgramId>,
caller_pda_seeds: &[PdaSeed], caller_pda_seeds: &[PdaSeed],
@ -329,9 +321,9 @@ impl ExecutionState {
.position(|acc| acc.account_id == pre_account_id) .position(|acc| acc.account_id == pre_account_id)
.expect("Pre state must exist at this point"); .expect("Pre state must exist at this point");
let mask = visibility_mask[pre_state_position]; let account_identity = &account_identities[pre_state_position];
match mask { if account_identity.is_public() {
0 => match claim { match claim {
Claim::Authorized => { Claim::Authorized => {
// Note: no need to check authorized pdas because we have already // Note: no need to check authorized pdas because we have already
// checked consistency of authorization above. // checked consistency of authorization above.
@ -353,35 +345,40 @@ impl ExecutionState {
pre_account_id, pre_account_id,
); );
} }
}, }
3 => { } else if account_identity.is_private_pda() {
match claim { match claim {
Claim::Authorized => {} Claim::Authorized => {
Claim::Pda(seed) => { assert!(
let (npk, identifier) = self pre_is_authorized,
"Cannot claim unauthorized private PDA {pre_account_id}"
);
}
Claim::Pda(seed) => {
let (npk, identifier) = self
.private_pda_npk_by_position .private_pda_npk_by_position
.get(&pre_state_position) .get(&pre_state_position)
.expect("private PDA pre_state must have an npk in the position map"); .expect(
let pda = AccountId::for_private_pda(&program_id, &seed, npk, *identifier); "private PDA pre_state must have an npk in the position map",
assert_eq!(
pre_account_id, pda,
"Invalid private PDA claim for account {pre_account_id}"
); );
self.private_pda_bound_positions.insert(pre_state_position, (program_id, seed)); let pda = AccountId::for_private_pda(&program_id, &seed, npk, *identifier);
assert_family_binding( assert_eq!(
&mut self.pda_family_binding, pre_account_id, pda,
program_id, "Invalid private PDA claim for account {pre_account_id}"
seed, );
pre_account_id, self.private_pda_bound_positions.insert(pre_state_position, (program_id, seed));
); assert_family_binding(
} &mut self.pda_family_binding,
program_id,
seed,
pre_account_id,
);
} }
} }
_ => { } else {
// Mask 1/2: standard private accounts don't enforce the claim semantics. // Standalone private accounts: don't enforce the claim semantics.
// Unauthorized private claiming is intentionally allowed since operating // Unauthorized private claiming is intentionally allowed since operating
// these accounts requires the npk/nsk keypair anyway. // these accounts requires the npk/nsk keypair anyway.
}
} }
post.account_mut().program_owner = program_id; post.account_mut().program_owner = program_id;
@ -483,10 +480,7 @@ fn resolve_authorization_and_record_bindings(
fn compute_circuit_output( fn compute_circuit_output(
mut execution_state: ExecutionState, mut execution_state: ExecutionState,
visibility_mask: &[u8], account_identities: &[InputAccountIdentity],
private_account_keys: &[(NullifierPublicKey, Identifier, SharedSecretKey)],
private_account_nsks: &[NullifierSecretKey],
private_account_membership_proofs: &[Option<MembershipProof>],
) -> PrivacyPreservingCircuitOutput { ) -> PrivacyPreservingCircuitOutput {
let mut output = PrivacyPreservingCircuitOutput { let mut output = PrivacyPreservingCircuitOutput {
public_pre_states: Vec::new(), public_pre_states: Vec::new(),
@ -501,287 +495,273 @@ fn compute_circuit_output(
let pda_seed_by_position = std::mem::take(&mut execution_state.private_pda_bound_positions); let pda_seed_by_position = std::mem::take(&mut execution_state.private_pda_bound_positions);
let states_iter = execution_state.into_states_iter(); let states_iter = execution_state.into_states_iter();
assert_eq!( assert_eq!(
visibility_mask.len(), account_identities.len(),
states_iter.len(), states_iter.len(),
"Invalid visibility mask length" "Invalid account_identities length"
); );
let mut private_keys_iter = private_account_keys.iter();
let mut private_nsks_iter = private_account_nsks.iter();
let mut private_membership_proofs_iter = private_account_membership_proofs.iter();
let mut output_index = 0; let mut output_index = 0;
for (pos, (account_visibility_mask, (pre_state, post_state))) in for (pos, (account_identity, (pre_state, post_state))) in
visibility_mask.iter().copied().zip(states_iter).enumerate() account_identities.iter().zip(states_iter).enumerate()
{ {
match account_visibility_mask { match account_identity {
0 => { InputAccountIdentity::Public => {
// Public account
output.public_pre_states.push(pre_state); output.public_pre_states.push(pre_state);
output.public_post_states.push(post_state); output.public_post_states.push(post_state);
} }
1 | 2 => { InputAccountIdentity::PrivateAuthorizedInit {
let Some((npk, identifier, shared_secret)) = private_keys_iter.next() else { ssk,
panic!("Missing private account key"); nsk,
}; identifier,
} => {
let npk = NullifierPublicKey::from(nsk);
let account_id = AccountId::from((&npk, *identifier));
assert_eq!(account_id, pre_state.account_id, "AccountId mismatch");
assert!(
pre_state.is_authorized,
"Pre-state not authorized for authenticated private account"
);
assert_eq!(
pre_state.account,
Account::default(),
"Found new private account with non default values"
);
let new_nullifier = (
Nullifier::for_account_initialization(&account_id),
DUMMY_COMMITMENT_HASH,
);
let new_nonce = pre_state.account.nonce.private_account_nonce_increment(nsk);
emit_private_output(
&mut output,
&mut output_index,
post_state,
&account_id,
&PrivateAccountKind::Regular(*identifier),
ssk,
new_nullifier,
new_nonce,
);
}
InputAccountIdentity::PrivateAuthorizedUpdate {
ssk,
nsk,
membership_proof,
identifier,
} => {
let npk = NullifierPublicKey::from(nsk);
let account_id = AccountId::from((&npk, *identifier));
assert_eq!(account_id, pre_state.account_id, "AccountId mismatch");
assert!(
pre_state.is_authorized,
"Pre-state not authorized for authenticated private account"
);
let new_nullifier = compute_update_nullifier_and_set_digest(
membership_proof,
&pre_state.account,
&account_id,
nsk,
);
let new_nonce = pre_state.account.nonce.private_account_nonce_increment(nsk);
emit_private_output(
&mut output,
&mut output_index,
post_state,
&account_id,
&PrivateAccountKind::Regular(*identifier),
ssk,
new_nullifier,
new_nonce,
);
}
InputAccountIdentity::PrivateUnauthorized {
npk,
ssk,
identifier,
} => {
let account_id = AccountId::from((npk, *identifier)); let account_id = AccountId::from((npk, *identifier));
assert_eq!(account_id, pre_state.account_id, "AccountId mismatch"); assert_eq!(account_id, pre_state.account_id, "AccountId mismatch");
assert_eq!(
let (new_nullifier, new_nonce) = if account_visibility_mask == 1 { pre_state.account,
// Private account with authentication Account::default(),
"Found new private account with non default values",
let Some(nsk) = private_nsks_iter.next() else { );
panic!("Missing private account nullifier secret key"); assert!(
}; !pre_state.is_authorized,
"Found new private account marked as authorized."
// Verify the nullifier public key
assert_eq!(
npk,
&NullifierPublicKey::from(nsk),
"Nullifier public key mismatch"
);
// Check pre_state authorization
assert!(
pre_state.is_authorized,
"Pre-state not authorized for authenticated private account"
);
let Some(membership_proof_opt) = private_membership_proofs_iter.next() else {
panic!("Missing membership proof");
};
let new_nullifier = compute_nullifier_and_set_digest(
membership_proof_opt.as_ref(),
&pre_state.account,
&account_id,
nsk,
);
let new_nonce = pre_state.account.nonce.private_account_nonce_increment(nsk);
(new_nullifier, new_nonce)
} else {
// Private account without authentication
assert_eq!(
pre_state.account,
Account::default(),
"Found new private account with non default values",
);
assert!(
!pre_state.is_authorized,
"Found new private account marked as authorized."
);
let Some(membership_proof_opt) = private_membership_proofs_iter.next() else {
panic!("Missing membership proof");
};
assert!(
membership_proof_opt.is_none(),
"Membership proof must be None for unauthorized accounts"
);
let nullifier = Nullifier::for_account_initialization(&account_id);
let new_nonce = Nonce::private_account_nonce_init(&account_id);
((nullifier, DUMMY_COMMITMENT_HASH), new_nonce)
};
output.new_nullifiers.push(new_nullifier);
// Update post-state with new nonce
let mut post_with_updated_nonce = post_state;
post_with_updated_nonce.nonce = new_nonce;
// Compute commitment
let commitment_post = Commitment::new(&account_id, &post_with_updated_nonce);
// Encrypt and push post state
let encrypted_account = EncryptionScheme::encrypt(
&post_with_updated_nonce,
&PrivateAccountKind::Regular(*identifier),
shared_secret,
&commitment_post,
output_index,
); );
output.new_commitments.push(commitment_post); let new_nullifier = (
output.ciphertexts.push(encrypted_account); Nullifier::for_account_initialization(&account_id),
output_index = output_index DUMMY_COMMITMENT_HASH,
.checked_add(1) );
.unwrap_or_else(|| panic!("Too many private accounts, output index overflow")); let new_nonce = Nonce::private_account_nonce_init(&account_id);
emit_private_output(
&mut output,
&mut output_index,
post_state,
&account_id,
&PrivateAccountKind::Regular(*identifier),
ssk,
new_nullifier,
new_nonce,
);
} }
3 => { InputAccountIdentity::PrivatePdaInit { npk: _, ssk, identifier } => {
// Private PDA account. The supplied npk has already been bound to // The npk-to-account_id binding is established upstream in
// `pre_state.account_id` upstream in `validate_and_sync_states`, either via a // `validate_and_sync_states` via `Claim::Pda(seed)` or a caller `pda_seeds`
// `Claim::Pda(seed)` match or via a caller `pda_seeds` match, both of which // match. Here we only enforce the init pre-conditions. The supplied npk on
// assert `AccountId::for_private_pda(owner, seed, npk, identifier) == account_id`. The // the variant has been recorded into `private_pda_npk_by_position` and used
// post-loop assertion in `derive_from_outputs` (see the // for the binding check; we use `pre_state.account_id` directly for nullifier
// `private_pda_bound_positions` check) guarantees that every mask-3 // and commitment derivation.
// position has been through at least one such binding, so this assert!(
// branch can safely use the wallet npk without re-verifying. !pre_state.is_authorized,
let Some((npk, identifier, shared_secret)) = private_keys_iter.next() else { "PrivatePdaInit requires unauthorized pre_state"
panic!("Missing private account key"); );
}; assert_eq!(
pre_state.account,
Account::default(),
"New private PDA must be default"
);
let (new_nullifier, new_nonce) = if pre_state.is_authorized { let new_nullifier = (
// Existing private PDA with authentication (like mask 1) Nullifier::for_account_initialization(&pre_state.account_id),
let Some(nsk) = private_nsks_iter.next() else { DUMMY_COMMITMENT_HASH,
panic!("Missing private account nullifier secret key"); );
}; let new_nonce = Nonce::private_account_nonce_init(&pre_state.account_id);
assert_eq!(
npk,
&NullifierPublicKey::from(nsk),
"Nullifier public key mismatch"
);
let Some(membership_proof_opt) = private_membership_proofs_iter.next() else {
panic!("Missing membership proof");
};
let new_nullifier = compute_nullifier_and_set_digest(
membership_proof_opt.as_ref(),
&pre_state.account,
&pre_state.account_id,
nsk,
);
let new_nonce = pre_state.account.nonce.private_account_nonce_increment(nsk);
(new_nullifier, new_nonce)
} else {
// New private PDA (like mask 2). The default + unauthorized requirement
// here rules out use cases like a fully-private multisig, which would need
// a non-default, non-authorized private PDA input account.
// TODO(private-pdas-pr-2/3): relax this once the wallet can supply a
// `(seed, owner)` side input so the npk-to-account_id binding can be
// re-verified for an existing private PDA without a `Claim::Pda` or caller
// `pda_seeds` match.
assert_eq!(
pre_state.account,
Account::default(),
"New private PDA must be default"
);
let Some(membership_proof_opt) = private_membership_proofs_iter.next() else {
panic!("Missing membership proof");
};
assert!(
membership_proof_opt.is_none(),
"Membership proof must be None for new accounts"
);
let nullifier = Nullifier::for_account_initialization(&pre_state.account_id);
let new_nonce = Nonce::private_account_nonce_init(&pre_state.account_id);
((nullifier, DUMMY_COMMITMENT_HASH), new_nonce)
};
output.new_nullifiers.push(new_nullifier);
let mut post_with_updated_nonce = post_state;
post_with_updated_nonce.nonce = new_nonce;
let commitment_post =
Commitment::new(&pre_state.account_id, &post_with_updated_nonce);
let account_id = pre_state.account_id;
let (pda_program_id, seed) = pda_seed_by_position let (pda_program_id, seed) = pda_seed_by_position
.get(&pos) .get(&pos)
.expect("mask-3 position must be in pda_seed_by_position"); .expect("PrivatePdaInit position must be in pda_seed_by_position");
let encrypted_account = EncryptionScheme::encrypt( emit_private_output(
&post_with_updated_nonce, &mut output,
&mut output_index,
post_state,
&account_id,
&PrivateAccountKind::Pda { &PrivateAccountKind::Pda {
program_id: *pda_program_id, program_id: *pda_program_id,
seed: *seed, seed: *seed,
identifier: *identifier, identifier: *identifier,
}, },
shared_secret, ssk,
&commitment_post, new_nullifier,
output_index, new_nonce,
);
}
InputAccountIdentity::PrivatePdaUpdate {
ssk,
nsk,
membership_proof,
identifier,
} => {
// The npk binding is established upstream. Authorization must already be set;
// an unauthorized PrivatePdaUpdate would mean the prover supplied an nsk for an
// unbound PDA, which the upstream binding check would have rejected anyway,
// but we assert here to fail fast and document the precondition.
assert!(
pre_state.is_authorized,
"PrivatePdaUpdate requires authorized pre_state"
); );
output.new_commitments.push(commitment_post); let new_nullifier = compute_update_nullifier_and_set_digest(
output.ciphertexts.push(encrypted_account); membership_proof,
output_index = output_index &pre_state.account,
.checked_add(1) &pre_state.account_id,
.unwrap_or_else(|| panic!("Too many private accounts, output index overflow")); nsk,
);
let new_nonce = pre_state.account.nonce.private_account_nonce_increment(nsk);
let account_id = pre_state.account_id;
let (pda_program_id, seed) = pda_seed_by_position
.get(&pos)
.expect("PrivatePdaUpdate position must be in pda_seed_by_position");
emit_private_output(
&mut output,
&mut output_index,
post_state,
&account_id,
&PrivateAccountKind::Pda {
program_id: *pda_program_id,
seed: *seed,
identifier: *identifier,
},
ssk,
new_nullifier,
new_nonce,
);
} }
_ => panic!("Invalid visibility mask value"),
} }
} }
assert!(
private_keys_iter.next().is_none(),
"Too many private account keys"
);
assert!(
private_nsks_iter.next().is_none(),
"Too many private account nullifier secret keys"
);
assert!(
private_membership_proofs_iter.next().is_none(),
"Too many private account membership proofs"
);
output output
} }
fn compute_nullifier_and_set_digest( #[expect(
membership_proof_opt: Option<&MembershipProof>, clippy::too_many_arguments,
reason = "All seven inputs are distinct concerns from the variant arms; bundling would be artificial"
)]
fn emit_private_output(
output: &mut PrivacyPreservingCircuitOutput,
output_index: &mut u32,
post_state: Account,
account_id: &AccountId,
kind: &PrivateAccountKind,
shared_secret: &SharedSecretKey,
new_nullifier: (Nullifier, CommitmentSetDigest),
new_nonce: Nonce,
) {
output.new_nullifiers.push(new_nullifier);
let mut post_with_updated_nonce = post_state;
post_with_updated_nonce.nonce = new_nonce;
let commitment_post = Commitment::new(account_id, &post_with_updated_nonce);
let encrypted_account = EncryptionScheme::encrypt(
&post_with_updated_nonce,
kind,
shared_secret,
&commitment_post,
*output_index,
);
output.new_commitments.push(commitment_post);
output.ciphertexts.push(encrypted_account);
*output_index = output_index
.checked_add(1)
.unwrap_or_else(|| panic!("Too many private accounts, output index overflow"));
}
fn compute_update_nullifier_and_set_digest(
membership_proof: &MembershipProof,
pre_account: &Account, pre_account: &Account,
account_id: &AccountId, account_id: &AccountId,
nsk: &NullifierSecretKey, nsk: &NullifierSecretKey,
) -> (Nullifier, CommitmentSetDigest) { ) -> (Nullifier, CommitmentSetDigest) {
membership_proof_opt.as_ref().map_or_else( let commitment_pre = Commitment::new(account_id, pre_account);
|| { let set_digest = compute_digest_for_path(&commitment_pre, membership_proof);
assert_eq!( let nullifier = Nullifier::for_account_update(&commitment_pre, nsk);
*pre_account, (nullifier, set_digest)
Account::default(),
"Found new private account with non default values"
);
// Compute initialization nullifier
let nullifier = Nullifier::for_account_initialization(account_id);
(nullifier, DUMMY_COMMITMENT_HASH)
},
|membership_proof| {
// Compute commitment set digest associated with provided auth path
let commitment_pre = Commitment::new(account_id, pre_account);
let set_digest = compute_digest_for_path(&commitment_pre, membership_proof);
// Compute update nullifier
let nullifier = Nullifier::for_account_update(&commitment_pre, nsk);
(nullifier, set_digest)
},
)
} }
fn main() { fn main() {
let PrivacyPreservingCircuitInput { let PrivacyPreservingCircuitInput {
program_outputs, program_outputs,
visibility_mask, account_identities,
private_account_keys,
private_account_nsks,
private_account_membership_proofs,
program_id, program_id,
} = env::read(); } = env::read();
let execution_state = ExecutionState::derive_from_outputs( let execution_state =
&visibility_mask, ExecutionState::derive_from_outputs(&account_identities, program_id, program_outputs);
&private_account_keys,
program_id,
program_outputs,
);
let output = compute_circuit_output( let output = compute_circuit_output(execution_state, &account_identities);
execution_state,
&visibility_mask,
&private_account_keys,
&private_account_nsks,
&private_account_membership_proofs,
);
env::commit(&output); env::commit(&output);
} }

View File

@ -13,7 +13,7 @@ nssa_core.workspace = true
common.workspace = true common.workspace = true
storage.workspace = true storage.workspace = true
mempool.workspace = true mempool.workspace = true
bedrock_client.workspace = true logos-blockchain-zone-sdk.workspace = true
testnet_initial_state.workspace = true testnet_initial_state.workspace = true
anyhow.workspace = true anyhow.workspace = true
@ -30,7 +30,6 @@ rand.workspace = true
borsh.workspace = true borsh.workspace = true
bytesize.workspace = true bytesize.workspace = true
url.workspace = true url.workspace = true
jsonrpsee = { workspace = true, features = ["ws-client"] }
[features] [features]
default = [] default = []

View File

@ -0,0 +1,136 @@
use std::{sync::Arc, time::Duration};
use anyhow::{Context as _, Result, anyhow};
use common::block::Block;
use log::warn;
pub use logos_blockchain_core::mantle::ops::channel::MsgId;
pub use logos_blockchain_key_management_system_service::keys::Ed25519Key;
pub use logos_blockchain_zone_sdk::sequencer::SequencerCheckpoint;
use logos_blockchain_zone_sdk::{
CommonHttpClient,
adapter::NodeHttpClient,
sequencer::{Event, SequencerConfig as ZoneSdkSequencerConfig, SequencerHandle, ZoneSequencer},
state::InscriptionInfo,
};
use tokio::task::JoinHandle;
use crate::config::BedrockConfig;
/// Sink for `Event::Published` checkpoints emitted by the drive task.
/// Caller is responsible for persistence (e.g. writing to rocksdb).
pub type CheckpointSink = Box<dyn Fn(SequencerCheckpoint) + Send + Sync + 'static>;
/// Sink for finalized L2 block ids derived from `Event::TxsFinalized` and
/// `Event::FinalizedInscriptions`. Caller is responsible for cleanup
/// (e.g. marking pending blocks as finalized in storage).
pub type FinalizedBlockSink = Box<dyn Fn(u64) + Send + Sync + 'static>;
#[expect(async_fn_in_trait, reason = "We don't care about Send/Sync here")]
pub trait BlockPublisherTrait: Clone {
async fn new(
config: &BedrockConfig,
bedrock_signing_key: Ed25519Key,
resubmit_interval: Duration,
initial_checkpoint: Option<SequencerCheckpoint>,
on_checkpoint: CheckpointSink,
on_finalized_block: FinalizedBlockSink,
) -> Result<Self>;
/// Fire-and-forget publish. Zone-sdk drives the actual submission and
/// retries internally; this just hands the payload off.
async fn publish_block(&self, block: &Block) -> Result<()>;
}
/// Real block publisher backed by zone-sdk's `ZoneSequencer`.
#[derive(Clone)]
pub struct ZoneSdkPublisher {
handle: SequencerHandle<NodeHttpClient>,
// Aborts the drive task when the last clone is dropped.
_drive_task: Arc<DriveTaskGuard>,
}
struct DriveTaskGuard(JoinHandle<()>);
impl Drop for DriveTaskGuard {
fn drop(&mut self) {
self.0.abort();
}
}
impl BlockPublisherTrait for ZoneSdkPublisher {
async fn new(
config: &BedrockConfig,
bedrock_signing_key: Ed25519Key,
resubmit_interval: Duration,
initial_checkpoint: Option<SequencerCheckpoint>,
on_checkpoint: CheckpointSink,
on_finalized_block: FinalizedBlockSink,
) -> Result<Self> {
let basic_auth = config.auth.clone().map(Into::into);
let node = NodeHttpClient::new(CommonHttpClient::new(basic_auth), config.node_url.clone());
let zone_sdk_config = ZoneSdkSequencerConfig {
resubmit_interval,
..ZoneSdkSequencerConfig::default()
};
let (mut sequencer, mut handle) = ZoneSequencer::init_with_config(
config.channel_id,
bedrock_signing_key,
node,
zone_sdk_config,
initial_checkpoint,
);
let drive_task = tokio::spawn(async move {
loop {
let Some(event) = sequencer.next_event().await else {
continue;
};
match event {
Event::Published { checkpoint, .. } => on_checkpoint(checkpoint),
Event::TxsFinalized { inscriptions, .. }
| Event::FinalizedInscriptions { inscriptions } => {
if let Some(max_block_id) = max_block_id_from_inscriptions(&inscriptions) {
on_finalized_block(max_block_id);
}
}
Event::ChannelUpdate { .. } | Event::Ready => {}
}
}
});
handle.wait_ready().await;
Ok(Self {
handle,
_drive_task: Arc::new(DriveTaskGuard(drive_task)),
})
}
async fn publish_block(&self, block: &Block) -> Result<()> {
let data = borsh::to_vec(block).context("Failed to serialize block")?;
self.handle
.publish_message(data)
.await
.map_err(|e| anyhow!("zone-sdk publish failed: {e}"))?;
Ok(())
}
}
/// Deserialize each inscription payload as a `Block` and return the highest
/// `block_id`. Bad payloads are logged and skipped.
fn max_block_id_from_inscriptions(inscriptions: &[InscriptionInfo]) -> Option<u64> {
inscriptions
.iter()
.filter_map(
|inscription| match borsh::from_slice::<Block>(&inscription.payload) {
Ok(block) => Some(block.header.block_id),
Err(err) => {
warn!("Failed to deserialize finalized inscription as Block: {err:#}");
None
}
},
)
.max()
}

View File

@ -1,116 +0,0 @@
use anyhow::{Context as _, Result};
use bedrock_client::BedrockClient;
pub use common::block::Block;
pub use logos_blockchain_core::mantle::{MantleTx, SignedMantleTx, ops::channel::MsgId};
use logos_blockchain_core::mantle::{
Op, OpProof, Transaction as _,
ops::channel::{ChannelId, inscribe::InscriptionOp},
};
pub use logos_blockchain_key_management_system_service::keys::Ed25519Key;
use logos_blockchain_key_management_system_service::keys::Ed25519PublicKey;
use crate::config::BedrockConfig;
#[expect(async_fn_in_trait, reason = "We don't care about Send/Sync here")]
pub trait BlockSettlementClientTrait: Clone {
//// Create a new client.
fn new(config: &BedrockConfig, signing_key: Ed25519Key) -> Result<Self>;
/// Get the bedrock channel ID used by this client.
fn bedrock_channel_id(&self) -> ChannelId;
/// Get the bedrock signing key used by this client.
fn bedrock_signing_key(&self) -> &Ed25519Key;
/// Post a transaction to the node.
async fn submit_inscribe_tx_to_bedrock(&self, tx: SignedMantleTx) -> Result<()>;
/// Create and sign a transaction for inscribing data.
fn create_inscribe_tx(&self, block: &Block) -> Result<(SignedMantleTx, MsgId)> {
let inscription_data = borsh::to_vec(block)?;
log::debug!(
"The size of the block {} is {} bytes",
block.header.block_id,
inscription_data.len()
);
let verifying_key_bytes = self.bedrock_signing_key().public_key().to_bytes();
let verifying_key =
Ed25519PublicKey::from_bytes(&verifying_key_bytes).expect("valid ed25519 public key");
let inscribe_op = InscriptionOp {
channel_id: self.bedrock_channel_id(),
inscription: inscription_data,
parent: block.bedrock_parent_id.into(),
signer: verifying_key,
};
let inscribe_op_id = inscribe_op.id();
let inscribe_tx = MantleTx {
ops: vec![Op::ChannelInscribe(inscribe_op)],
// Altruistic test config
storage_gas_price: 0.into(),
execution_gas_price: 0.into(),
};
let tx_hash = inscribe_tx.hash();
let signature_bytes = self
.bedrock_signing_key()
.sign_payload(tx_hash.as_signing_bytes().as_ref())
.to_bytes();
let signature =
logos_blockchain_key_management_system_service::keys::Ed25519Signature::from_bytes(
&signature_bytes,
);
let signed_mantle_tx = SignedMantleTx {
ops_proofs: vec![OpProof::Ed25519Sig(signature)],
mantle_tx: inscribe_tx,
};
Ok((signed_mantle_tx, inscribe_op_id))
}
}
/// A component that posts block data to logos blockchain.
#[derive(Clone)]
pub struct BlockSettlementClient {
client: BedrockClient,
signing_key: Ed25519Key,
channel_id: ChannelId,
}
impl BlockSettlementClientTrait for BlockSettlementClient {
fn new(config: &BedrockConfig, signing_key: Ed25519Key) -> Result<Self> {
let client =
BedrockClient::new(config.backoff, config.node_url.clone(), config.auth.clone())
.context("Failed to initialize bedrock client")?;
Ok(Self {
client,
signing_key,
channel_id: config.channel_id,
})
}
async fn submit_inscribe_tx_to_bedrock(&self, tx: SignedMantleTx) -> Result<()> {
let (parent_id, msg_id) = match tx.mantle_tx.ops.first() {
Some(Op::ChannelInscribe(inscribe)) => (inscribe.parent, inscribe.id()),
_ => panic!("Expected ChannelInscribe op"),
};
self.client
.post_transaction(tx)
.await
.context("Failed to post transaction to Bedrock after retries")?
.context("Failed to post transaction to Bedrock with non-retryable error")?;
log::debug!("Posted block to Bedrock with parent id {parent_id:?} and msg id: {msg_id:?}");
Ok(())
}
fn bedrock_channel_id(&self) -> ChannelId {
self.channel_id
}
fn bedrock_signing_key(&self) -> &Ed25519Key {
&self.signing_key
}
}

View File

@ -1,16 +1,17 @@
use std::{collections::HashMap, path::Path}; use std::{collections::HashMap, path::Path, sync::Arc};
use anyhow::Result; use anyhow::{Context as _, Result};
use common::{ use common::{
HashType, HashType,
block::{Block, BlockMeta, MantleMsgId}, block::{Block, BlockMeta, MantleMsgId},
transaction::NSSATransaction, transaction::NSSATransaction,
}; };
use logos_blockchain_zone_sdk::sequencer::SequencerCheckpoint;
use nssa::V03State; use nssa::V03State;
use storage::{error::DbError, sequencer::RocksDBIO}; use storage::{error::DbError, sequencer::RocksDBIO};
pub struct SequencerStore { pub struct SequencerStore {
dbio: RocksDBIO, dbio: Arc<RocksDBIO>,
// TODO: Consider adding the hashmap to the database for faster recovery. // TODO: Consider adding the hashmap to the database for faster recovery.
tx_hash_to_block_map: HashMap<HashType, u64>, tx_hash_to_block_map: HashMap<HashType, u64>,
genesis_id: u64, genesis_id: u64,
@ -30,7 +31,11 @@ impl SequencerStore {
) -> Result<Self> { ) -> Result<Self> {
let tx_hash_to_block_map = block_to_transactions_map(genesis_block); let tx_hash_to_block_map = block_to_transactions_map(genesis_block);
let dbio = RocksDBIO::open_or_create(location, genesis_block, genesis_msg_id)?; let dbio = Arc::new(RocksDBIO::open_or_create(
location,
genesis_block,
genesis_msg_id,
)?);
let genesis_id = dbio.get_meta_first_block_in_db()?; let genesis_id = dbio.get_meta_first_block_in_db()?;
@ -42,6 +47,14 @@ impl SequencerStore {
}) })
} }
/// Shared handle to the underlying rocksdb. Used to persist the zone-sdk
/// checkpoint from the sequencer's drive task without needing &mut to the
/// store.
#[must_use]
pub fn dbio(&self) -> Arc<RocksDBIO> {
Arc::clone(&self.dbio)
}
pub fn get_block_at_id(&self, id: u64) -> Result<Option<Block>, DbError> { pub fn get_block_at_id(&self, id: u64) -> Result<Option<Block>, DbError> {
self.dbio.get_block(id) self.dbio.get_block(id)
} }
@ -55,6 +68,7 @@ impl SequencerStore {
} }
/// Returns the transaction corresponding to the given hash, if it exists in the blockchain. /// Returns the transaction corresponding to the given hash, if it exists in the blockchain.
#[must_use]
pub fn get_transaction_by_hash(&self, hash: HashType) -> Option<NSSATransaction> { pub fn get_transaction_by_hash(&self, hash: HashType) -> Option<NSSATransaction> {
let block_id = *self.tx_hash_to_block_map.get(&hash)?; let block_id = *self.tx_hash_to_block_map.get(&hash)?;
let block = self let block = self
@ -76,10 +90,12 @@ impl SequencerStore {
Ok(self.dbio.latest_block_meta()?) Ok(self.dbio.latest_block_meta()?)
} }
#[must_use]
pub const fn genesis_id(&self) -> u64 { pub const fn genesis_id(&self) -> u64 {
self.genesis_id self.genesis_id
} }
#[must_use]
pub const fn signing_key(&self) -> &nssa::PrivateKey { pub const fn signing_key(&self) -> &nssa::PrivateKey {
&self.signing_key &self.signing_key
} }
@ -100,9 +116,26 @@ impl SequencerStore {
Ok(()) Ok(())
} }
#[must_use]
pub fn get_nssa_state(&self) -> Option<V03State> { pub fn get_nssa_state(&self) -> Option<V03State> {
self.dbio.get_nssa_state().ok() self.dbio.get_nssa_state().ok()
} }
pub fn get_zone_checkpoint(&self) -> Result<Option<SequencerCheckpoint>> {
let Some(bytes) = self.dbio.get_zone_sdk_checkpoint_bytes()? else {
return Ok(None);
};
let checkpoint: SequencerCheckpoint = serde_json::from_slice(&bytes)
.context("Failed to deserialize stored zone-sdk checkpoint")?;
Ok(Some(checkpoint))
}
pub fn set_zone_checkpoint(&self, checkpoint: &SequencerCheckpoint) -> Result<()> {
let bytes =
serde_json::to_vec(checkpoint).context("Failed to serialize zone-sdk checkpoint")?;
self.dbio.put_zone_sdk_checkpoint_bytes(&bytes)?;
Ok(())
}
} }
pub(crate) fn block_to_transactions_map(block: &Block) -> HashMap<HashType, u64> { pub(crate) fn block_to_transactions_map(block: &Block) -> HashMap<HashType, u64> {

View File

@ -6,7 +6,6 @@ use std::{
}; };
use anyhow::Result; use anyhow::Result;
use bedrock_client::BackoffConfig;
use bytesize::ByteSize; use bytesize::ByteSize;
use common::config::BasicAuth; use common::config::BasicAuth;
use humantime_serde; use humantime_serde;
@ -42,8 +41,6 @@ pub struct SequencerConfig {
pub signing_key: [u8; 32], pub signing_key: [u8; 32],
/// Bedrock configuration options. /// Bedrock configuration options.
pub bedrock_config: BedrockConfig, pub bedrock_config: BedrockConfig,
/// Indexer RPC URL.
pub indexer_rpc_url: Url,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub initial_public_accounts: Option<Vec<PublicAccountPublicInitialData>>, pub initial_public_accounts: Option<Vec<PublicAccountPublicInitialData>>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
@ -52,9 +49,6 @@ pub struct SequencerConfig {
#[derive(Clone, Serialize, Deserialize)] #[derive(Clone, Serialize, Deserialize)]
pub struct BedrockConfig { pub struct BedrockConfig {
/// Fibonacci backoff retry strategy configuration.
#[serde(default)]
pub backoff: BackoffConfig,
/// Bedrock channel ID. /// Bedrock channel ID.
pub channel_id: ChannelId, pub channel_id: ChannelId,
/// Bedrock Url. /// Bedrock Url.

View File

@ -1,34 +0,0 @@
use std::{ops::Deref, sync::Arc};
use anyhow::{Context as _, Result};
use log::info;
pub use url::Url;
#[expect(async_fn_in_trait, reason = "We don't care about Send/Sync here")]
pub trait IndexerClientTrait: Clone {
async fn new(indexer_url: &Url) -> Result<Self>;
}
#[derive(Clone)]
pub struct IndexerClient(Arc<jsonrpsee::ws_client::WsClient>);
impl IndexerClientTrait for IndexerClient {
async fn new(indexer_url: &Url) -> Result<Self> {
info!("Connecting to Indexer at {indexer_url}");
let client = jsonrpsee::ws_client::WsClientBuilder::default()
.build(indexer_url)
.await
.context("Failed to create websocket client")?;
Ok(Self(Arc::new(client)))
}
}
impl Deref for IndexerClient {
type Target = jsonrpsee::ws_client::WsClient;
fn deref(&self) -> &Self::Target {
&self.0
}
}

View File

@ -1,7 +1,6 @@
use std::{path::Path, time::Instant}; use std::{path::Path, time::Instant};
use anyhow::{Context as _, Result, anyhow}; use anyhow::{Context as _, Result, anyhow};
use bedrock_client::SignedMantleTx;
#[cfg(feature = "testnet")] #[cfg(feature = "testnet")]
use common::PINATA_BASE58; use common::PINATA_BASE58;
use common::{ use common::{
@ -20,33 +19,27 @@ pub use storage::error::DbError;
use testnet_initial_state::initial_state; use testnet_initial_state::initial_state;
use crate::{ use crate::{
block_settlement_client::{BlockSettlementClient, BlockSettlementClientTrait, MsgId}, block_publisher::{BlockPublisherTrait, ZoneSdkPublisher},
block_store::SequencerStore, block_store::SequencerStore,
indexer_client::{IndexerClient, IndexerClientTrait},
}; };
pub mod block_settlement_client; pub mod block_publisher;
pub mod block_store; pub mod block_store;
pub mod config; pub mod config;
pub mod indexer_client;
#[cfg(feature = "mock")] #[cfg(feature = "mock")]
pub mod mock; pub mod mock;
pub struct SequencerCore< pub struct SequencerCore<BP: BlockPublisherTrait = ZoneSdkPublisher> {
BC: BlockSettlementClientTrait = BlockSettlementClient,
IC: IndexerClientTrait = IndexerClient,
> {
state: nssa::V03State, state: nssa::V03State,
store: SequencerStore, store: SequencerStore,
mempool: MemPool<NSSATransaction>, mempool: MemPool<NSSATransaction>,
sequencer_config: SequencerConfig, sequencer_config: SequencerConfig,
chain_height: u64, chain_height: u64,
block_settlement_client: BC, block_publisher: BP,
indexer_client: IC,
} }
impl<BC: BlockSettlementClientTrait, IC: IndexerClientTrait> SequencerCore<BC, IC> { impl<BP: BlockPublisherTrait> SequencerCore<BP> {
/// Starts the sequencer using the provided configuration. /// Starts the sequencer using the provided configuration.
/// If an existing database is found, the sequencer state is loaded from it and /// If an existing database is found, the sequencer state is loaded from it and
/// assumed to represent the correct latest state consistent with Bedrock-finalized data. /// assumed to represent the correct latest state consistent with Bedrock-finalized data.
@ -70,23 +63,16 @@ impl<BC: BlockSettlementClientTrait, IC: IndexerClientTrait> SequencerCore<BC, I
load_or_create_signing_key(&config.home.join("bedrock_signing_key")) load_or_create_signing_key(&config.home.join("bedrock_signing_key"))
.expect("Failed to load or create bedrock signing key"); .expect("Failed to load or create bedrock signing key");
let block_settlement_client = BC::new(&config.bedrock_config, bedrock_signing_key) // TODO: Remove msg_id from BlockMeta — it is no longer needed now that
.expect("Failed to initialize Block Settlement Client"); // zone-sdk manages L1 settlement state via its own checkpoint.
let genesis_msg_id = [0_u8; 32];
let indexer_client = IC::new(&config.indexer_rpc_url)
.await
.expect("Failed to create Indexer Client");
let (_tx, genesis_msg_id) = block_settlement_client
.create_inscribe_tx(&genesis_block)
.expect("Failed to create inscribe tx for genesis block");
// Sequencer should panic if unable to open db, // Sequencer should panic if unable to open db,
// as fixing this issue may require actions non-native to program scope // as fixing this issue may require actions non-native to program scope
let store = SequencerStore::open_db_with_genesis( let store = SequencerStore::open_db_with_genesis(
&config.home.join("rocksdb"), &config.home.join("rocksdb"),
&genesis_block, &genesis_block,
genesis_msg_id.into(), genesis_msg_id,
signing_key, signing_key,
) )
.unwrap(); .unwrap();
@ -94,6 +80,51 @@ impl<BC: BlockSettlementClientTrait, IC: IndexerClientTrait> SequencerCore<BC, I
.latest_block_meta() .latest_block_meta()
.expect("Failed to read latest block meta from store"); .expect("Failed to read latest block meta from store");
let initial_checkpoint = store
.get_zone_checkpoint()
.expect("Failed to load zone-sdk checkpoint");
let is_fresh_start = initial_checkpoint.is_none();
let dbio_for_checkpoint = store.dbio();
let on_checkpoint: block_publisher::CheckpointSink = Box::new(move |cp| {
let bytes = match serde_json::to_vec(&cp) {
Ok(b) => b,
Err(err) => {
error!("Failed to serialize zone-sdk checkpoint: {err:#}");
return;
}
};
if let Err(err) = dbio_for_checkpoint.put_zone_sdk_checkpoint_bytes(&bytes) {
error!("Failed to persist zone-sdk checkpoint: {err:#}");
}
});
let dbio_for_finalized = store.dbio();
let on_finalized_block: block_publisher::FinalizedBlockSink = Box::new(move |block_id| {
if let Err(err) = dbio_for_finalized.clean_pending_blocks_up_to(block_id) {
error!("Failed to mark pending blocks finalized up to {block_id}: {err:#}");
}
});
let block_publisher = BP::new(
&config.bedrock_config,
bedrock_signing_key,
config.retry_pending_blocks_timeout,
initial_checkpoint,
on_checkpoint,
on_finalized_block,
)
.await
.expect("Failed to initialize Block Publisher");
// On a truly fresh start (no checkpoint persisted yet), publish the
// genesis block so the indexer can find the channel start. After the
// first publish, zone-sdk's checkpoint persistence covers further
// restarts.
if is_fresh_start && let Err(err) = block_publisher.publish_block(&genesis_block).await {
error!("Failed to publish genesis block: {err:#}");
}
#[cfg_attr(not(feature = "testnet"), allow(unused_mut))] #[cfg_attr(not(feature = "testnet"), allow(unused_mut))]
let mut state = if let Some(state) = store.get_nssa_state() { let mut state = if let Some(state) = store.get_nssa_state() {
info!("Found local database. Loading state and pending blocks from it."); info!("Found local database. Loading state and pending blocks from it.");
@ -159,35 +190,33 @@ impl<BC: BlockSettlementClientTrait, IC: IndexerClientTrait> SequencerCore<BC, I
mempool, mempool,
chain_height: latest_block_meta.id, chain_height: latest_block_meta.id,
sequencer_config: config, sequencer_config: config,
block_settlement_client, block_publisher,
indexer_client,
}; };
(sequencer_core, mempool_handle) (sequencer_core, mempool_handle)
} }
/// Produces a new block from mempool transactions and publishes it via zone-sdk.
pub async fn produce_new_block(&mut self) -> Result<u64> { pub async fn produce_new_block(&mut self) -> Result<u64> {
let (tx, _msg_id) = self let block = self
.produce_new_block_with_mempool_transactions() .build_block_from_mempool()
.context("Failed to produce new block with mempool transactions")?; .context("Failed to build block from mempool transactions")?;
match self
.block_settlement_client // TODO: Remove msg_id from store.update — it is no longer needed now that
.submit_inscribe_tx_to_bedrock(tx) // zone-sdk manages L1 settlement state via its own checkpoint.
.await let placeholder_msg_id = [0_u8; 32];
{
Ok(()) => {} if let Err(err) = self.block_publisher.publish_block(&block).await {
Err(err) => { error!("Failed to publish block to Bedrock with error: {err:#}");
error!("Failed to post block data to Bedrock with error: {err:#}");
}
} }
self.store.update(&block, placeholder_msg_id, &self.state)?;
Ok(self.chain_height) Ok(self.chain_height)
} }
/// Produces new block from transactions in mempool and packs it into a `SignedMantleTx`. /// Builds a new block from transactions in the mempool.
pub fn produce_new_block_with_mempool_transactions( /// Does NOT publish or store the block — the caller is responsible for that.
&mut self, pub fn build_block_from_mempool(&mut self) -> Result<Block> {
) -> Result<(SignedMantleTx, MsgId)> {
let now = Instant::now(); let now = Instant::now();
let new_block_height = self.next_block_id(); let new_block_height = self.next_block_id();
@ -277,21 +306,12 @@ impl<BC: BlockSettlementClientTrait, IC: IndexerClientTrait> SequencerCore<BC, I
timestamp: new_block_timestamp, timestamp: new_block_timestamp,
}; };
// TODO: Remove bedrock_parent_id from Block — it is no longer needed now
// that zone-sdk manages the inscription parent chain internally.
let placeholder_parent_id = [0_u8; 32];
let block = hashable_data let block = hashable_data
.clone() .clone()
.into_pending_block(self.store.signing_key(), latest_block_meta.msg_id); .into_pending_block(self.store.signing_key(), placeholder_parent_id);
let (tx, msg_id) = self
.block_settlement_client
.create_inscribe_tx(&block)
.with_context(|| {
format!(
"Failed to create inscribe transaction for block with id {}",
block.header.block_id
)
})?;
self.store.update(&block, msg_id.into(), &self.state)?;
self.chain_height = new_block_height; self.chain_height = new_block_height;
@ -300,7 +320,7 @@ impl<BC: BlockSettlementClientTrait, IC: IndexerClientTrait> SequencerCore<BC, I
hashable_data.transactions.len(), hashable_data.transactions.len(),
now.elapsed().as_secs() now.elapsed().as_secs()
); );
Ok((tx, msg_id)) Ok(block)
} }
pub const fn state(&self) -> &nssa::V03State { pub const fn state(&self) -> &nssa::V03State {
@ -319,22 +339,19 @@ impl<BC: BlockSettlementClientTrait, IC: IndexerClientTrait> SequencerCore<BC, I
&self.sequencer_config &self.sequencer_config
} }
/// Deletes finalized blocks from the sequencer's pending block list. /// Marks all pending blocks with `block_id <= last_finalized_block_id` as
/// This method must be called when new blocks are finalized on Bedrock. /// finalized. Idempotent. Production callers don't invoke this directly —
/// All pending blocks with an ID less than or equal to `last_finalized_block_id` /// it's wired up in `start_from_config` to the publisher's
/// are removed from the database. /// `on_finalized_block` sink, which fires on `Event::TxsFinalized` /
pub fn clean_finalized_blocks_from_db(&mut self, last_finalized_block_id: u64) -> Result<()> { /// `Event::FinalizedInscriptions`. Kept on the type for tests.
self.get_pending_blocks()? // TODO: Delete blocks instead of marking them as finalized. Current
.iter() // approach is used because we still have `GetBlockDataRequest`.
.map(|block| block.header.block_id) pub fn clean_finalized_blocks_from_db(&self, last_finalized_block_id: u64) -> Result<()> {
.min() info!("Clearing pending blocks up to id: {last_finalized_block_id}");
.map_or(Ok(()), |first_pending_block_id| { self.store
info!("Clearing pending blocks up to id: {last_finalized_block_id}"); .dbio()
// TODO: Delete blocks instead of marking them as finalized. .clean_pending_blocks_up_to(last_finalized_block_id)?;
// Current approach is used because we still have `GetBlockDataRequest`. Ok(())
(first_pending_block_id..=last_finalized_block_id)
.try_for_each(|id| self.store.mark_block_as_finalized(id))
})
} }
/// Returns the list of stored pending blocks. /// Returns the list of stored pending blocks.
@ -348,12 +365,8 @@ impl<BC: BlockSettlementClientTrait, IC: IndexerClientTrait> SequencerCore<BC, I
.collect()) .collect())
} }
pub fn block_settlement_client(&self) -> BC { pub fn block_publisher(&self) -> BP {
self.block_settlement_client.clone() self.block_publisher.clone()
}
pub fn indexer_client(&self) -> IC {
self.indexer_client.clone()
} }
fn next_block_id(&self) -> u64 { fn next_block_id(&self) -> u64 {
@ -392,7 +405,6 @@ mod tests {
use std::{pin::pin, time::Duration}; use std::{pin::pin, time::Duration};
use bedrock_client::BackoffConfig;
use common::{ use common::{
test_utils::sequencer_sign_key_for_testing, test_utils::sequencer_sign_key_for_testing,
transaction::{NSSATransaction, clock_invocation}, transaction::{NSSATransaction, clock_invocation},
@ -420,16 +432,11 @@ mod tests {
block_create_timeout: Duration::from_secs(1), block_create_timeout: Duration::from_secs(1),
signing_key: *sequencer_sign_key_for_testing().value(), signing_key: *sequencer_sign_key_for_testing().value(),
bedrock_config: BedrockConfig { bedrock_config: BedrockConfig {
backoff: BackoffConfig {
start_delay: Duration::from_millis(100),
max_retries: 5,
},
channel_id: ChannelId::from([0; 32]), channel_id: ChannelId::from([0; 32]),
node_url: "http://not-used-in-unit-tests".parse().unwrap(), node_url: "http://not-used-in-unit-tests".parse().unwrap(),
auth: None, auth: None,
}, },
retry_pending_blocks_timeout: Duration::from_mins(4), retry_pending_blocks_timeout: Duration::from_mins(4),
indexer_rpc_url: "ws://localhost:8779".parse().unwrap(),
initial_public_accounts: None, initial_public_accounts: None,
initial_private_accounts: None, initial_private_accounts: None,
} }
@ -457,9 +464,7 @@ mod tests {
let tx = common::test_utils::produce_dummy_empty_transaction(); let tx = common::test_utils::produce_dummy_empty_transaction();
mempool_handle.push(tx).await.unwrap(); mempool_handle.push(tx).await.unwrap();
sequencer sequencer.produce_new_block().await.unwrap();
.produce_new_block_with_mempool_transactions()
.unwrap();
(sequencer, mempool_handle) (sequencer, mempool_handle)
} }
@ -604,23 +609,21 @@ mod tests {
assert!(poll.is_pending()); assert!(poll.is_pending());
// Empty the mempool by producing a block // Empty the mempool by producing a block
sequencer sequencer.produce_new_block().await.unwrap();
.produce_new_block_with_mempool_transactions()
.unwrap();
// Resolve the pending push // Resolve the pending push
assert!(push_fut.await.is_ok()); assert!(push_fut.await.is_ok());
} }
#[tokio::test] #[tokio::test]
async fn produce_new_block_with_mempool_transactions() { async fn build_block_from_mempool() {
let (mut sequencer, mempool_handle) = common_setup().await; let (mut sequencer, mempool_handle) = common_setup().await;
let genesis_height = sequencer.chain_height; let genesis_height = sequencer.chain_height;
let tx = common::test_utils::produce_dummy_empty_transaction(); let tx = common::test_utils::produce_dummy_empty_transaction();
mempool_handle.push(tx).await.unwrap(); mempool_handle.push(tx).await.unwrap();
let result = sequencer.produce_new_block_with_mempool_transactions(); let result = sequencer.build_block_from_mempool();
assert!(result.is_ok()); assert!(result.is_ok());
assert_eq!(sequencer.chain_height, genesis_height + 1); assert_eq!(sequencer.chain_height, genesis_height + 1);
} }
@ -645,9 +648,7 @@ mod tests {
mempool_handle.push(tx_replay).await.unwrap(); mempool_handle.push(tx_replay).await.unwrap();
// Create block // Create block
sequencer sequencer.produce_new_block().await.unwrap();
.produce_new_block_with_mempool_transactions()
.unwrap();
let block = sequencer let block = sequencer
.store .store
.get_block_at_id(sequencer.chain_height) .get_block_at_id(sequencer.chain_height)
@ -679,9 +680,7 @@ mod tests {
// The transaction should be included the first time // The transaction should be included the first time
mempool_handle.push(tx.clone()).await.unwrap(); mempool_handle.push(tx.clone()).await.unwrap();
sequencer sequencer.produce_new_block().await.unwrap();
.produce_new_block_with_mempool_transactions()
.unwrap();
let block = sequencer let block = sequencer
.store .store
.get_block_at_id(sequencer.chain_height) .get_block_at_id(sequencer.chain_height)
@ -697,9 +696,7 @@ mod tests {
// Add same transaction should fail // Add same transaction should fail
mempool_handle.push(tx.clone()).await.unwrap(); mempool_handle.push(tx.clone()).await.unwrap();
sequencer sequencer.produce_new_block().await.unwrap();
.produce_new_block_with_mempool_transactions()
.unwrap();
let block = sequencer let block = sequencer
.store .store
.get_block_at_id(sequencer.chain_height) .get_block_at_id(sequencer.chain_height)
@ -738,9 +735,7 @@ mod tests {
); );
mempool_handle.push(tx.clone()).await.unwrap(); mempool_handle.push(tx.clone()).await.unwrap();
sequencer sequencer.produce_new_block().await.unwrap();
.produce_new_block_with_mempool_transactions()
.unwrap();
let block = sequencer let block = sequencer
.store .store
.get_block_at_id(sequencer.chain_height) .get_block_at_id(sequencer.chain_height)
@ -778,15 +773,9 @@ mod tests {
let config = setup_sequencer_config(); let config = setup_sequencer_config();
let (mut sequencer, _mempool_handle) = let (mut sequencer, _mempool_handle) =
SequencerCoreWithMockClients::start_from_config(config).await; SequencerCoreWithMockClients::start_from_config(config).await;
sequencer sequencer.produce_new_block().await.unwrap();
.produce_new_block_with_mempool_transactions() sequencer.produce_new_block().await.unwrap();
.unwrap(); sequencer.produce_new_block().await.unwrap();
sequencer
.produce_new_block_with_mempool_transactions()
.unwrap();
sequencer
.produce_new_block_with_mempool_transactions()
.unwrap();
assert_eq!(sequencer.get_pending_blocks().unwrap().len(), 4); assert_eq!(sequencer.get_pending_blocks().unwrap().len(), 4);
} }
@ -795,15 +784,9 @@ mod tests {
let config = setup_sequencer_config(); let config = setup_sequencer_config();
let (mut sequencer, _mempool_handle) = let (mut sequencer, _mempool_handle) =
SequencerCoreWithMockClients::start_from_config(config).await; SequencerCoreWithMockClients::start_from_config(config).await;
sequencer sequencer.produce_new_block().await.unwrap();
.produce_new_block_with_mempool_transactions() sequencer.produce_new_block().await.unwrap();
.unwrap(); sequencer.produce_new_block().await.unwrap();
sequencer
.produce_new_block_with_mempool_transactions()
.unwrap();
sequencer
.produce_new_block_with_mempool_transactions()
.unwrap();
let last_finalized_block = 3; let last_finalized_block = 3;
sequencer sequencer
@ -836,9 +819,7 @@ mod tests {
); );
mempool_handle.push(tx).await.unwrap(); mempool_handle.push(tx).await.unwrap();
sequencer sequencer.produce_new_block().await.unwrap();
.produce_new_block_with_mempool_transactions()
.unwrap();
// Get the metadata of the last block produced // Get the metadata of the last block produced
sequencer.store.latest_block_meta().unwrap() sequencer.store.latest_block_meta().unwrap()
@ -861,9 +842,7 @@ mod tests {
mempool_handle.push(tx.clone()).await.unwrap(); mempool_handle.push(tx.clone()).await.unwrap();
// Step 4: Produce new block // Step 4: Produce new block
sequencer sequencer.produce_new_block().await.unwrap();
.produce_new_block_with_mempool_transactions()
.unwrap();
// Step 5: Verify the new block has correct previous block metadata // Step 5: Verify the new block has correct previous block metadata
let new_block = sequencer let new_block = sequencer
@ -876,10 +855,6 @@ mod tests {
new_block.header.prev_block_hash, expected_prev_meta.hash, new_block.header.prev_block_hash, expected_prev_meta.hash,
"New block's prev_block_hash should match the stored metadata hash" "New block's prev_block_hash should match the stored metadata hash"
); );
assert_eq!(
new_block.bedrock_parent_id, expected_prev_meta.msg_id,
"New block's bedrock_parent_id should match the stored metadata msg_id"
);
assert_eq!( assert_eq!(
new_block.body.transactions, new_block.body.transactions,
vec![ vec![
@ -914,9 +889,7 @@ mod tests {
.await .await
.unwrap(); .unwrap();
mempool_handle.push(crafted_clock_tx).await.unwrap(); mempool_handle.push(crafted_clock_tx).await.unwrap();
sequencer sequencer.produce_new_block().await.unwrap();
.produce_new_block_with_mempool_transactions()
.unwrap();
let block = sequencer let block = sequencer
.store .store
@ -949,15 +922,11 @@ mod tests {
// Produce multiple blocks to advance chain height // Produce multiple blocks to advance chain height
let tx = common::test_utils::produce_dummy_empty_transaction(); let tx = common::test_utils::produce_dummy_empty_transaction();
mempool_handle.push(tx).await.unwrap(); mempool_handle.push(tx).await.unwrap();
sequencer sequencer.produce_new_block().await.unwrap();
.produce_new_block_with_mempool_transactions()
.unwrap();
let tx = common::test_utils::produce_dummy_empty_transaction(); let tx = common::test_utils::produce_dummy_empty_transaction();
mempool_handle.push(tx).await.unwrap(); mempool_handle.push(tx).await.unwrap();
sequencer sequencer.produce_new_block().await.unwrap();
.produce_new_block_with_mempool_transactions()
.unwrap();
// Return the current chain height (should be genesis_id + 2) // Return the current chain height (should be genesis_id + 2)
sequencer.chain_height sequencer.chain_height
@ -994,9 +963,7 @@ mod tests {
), ),
)); ));
mempool_handle.push(deploy_tx).await.unwrap(); mempool_handle.push(deploy_tx).await.unwrap();
sequencer sequencer.produce_new_block().await.unwrap();
.produce_new_block_with_mempool_transactions()
.unwrap();
// Build a user transaction that invokes clock_chain_caller, which in turn chain-calls the // Build a user transaction that invokes clock_chain_caller, which in turn chain-calls the
// clock program with the clock accounts. The sequencer should detect that the resulting // clock program with the clock accounts. The sequencer should detect that the resulting
@ -1021,9 +988,7 @@ mod tests {
)); ));
mempool_handle.push(user_tx).await.unwrap(); mempool_handle.push(user_tx).await.unwrap();
sequencer sequencer.produce_new_block().await.unwrap();
.produce_new_block_with_mempool_transactions()
.unwrap();
let block = sequencer let block = sequencer
.store .store
@ -1057,7 +1022,7 @@ mod tests {
mempool_handle.push(tx).await.unwrap(); mempool_handle.push(tx).await.unwrap();
// Block production must fail because the appended clock tx cannot execute. // Block production must fail because the appended clock tx cannot execute.
let result = sequencer.produce_new_block_with_mempool_transactions(); let result = sequencer.produce_new_block().await;
assert!( assert!(
result.is_err(), result.is_err(),
"Block production should abort when clock account data is corrupted" "Block production should abort when clock account data is corrupted"
@ -1076,7 +1041,7 @@ mod tests {
program::Program, program::Program,
}; };
use nssa_core::{ use nssa_core::{
SharedSecretKey, InputAccountIdentity, SharedSecretKey,
account::AccountWithMetadata, account::AccountWithMetadata,
encryption::{EphemeralPublicKey, EphemeralSecretKey, ViewingPublicKey}, encryption::{EphemeralPublicKey, EphemeralSecretKey, ViewingPublicKey},
}; };
@ -1114,10 +1079,11 @@ mod tests {
(&npk, 0), (&npk, 0),
)], )],
Program::serialize_instruction(0_u128).unwrap(), Program::serialize_instruction(0_u128).unwrap(),
vec![1], vec![InputAccountIdentity::PrivateAuthorizedInit {
vec![(npk, 0, shared_secret)], ssk: shared_secret,
vec![nsk], nsk,
vec![None], identifier: 0,
}],
&Program::authenticated_transfer_program().into(), &Program::authenticated_transfer_program().into(),
) )
.unwrap(); .unwrap();

View File

@ -1,76 +1,34 @@
use anyhow::{Result, anyhow}; use std::time::Duration;
use bedrock_client::SignedMantleTx;
use logos_blockchain_core::mantle::ops::channel::ChannelId; use anyhow::Result;
use common::block::Block;
use logos_blockchain_key_management_system_service::keys::Ed25519Key; use logos_blockchain_key_management_system_service::keys::Ed25519Key;
use url::Url;
use crate::{ use crate::{
block_settlement_client::BlockSettlementClientTrait, config::BedrockConfig, block_publisher::{
indexer_client::IndexerClientTrait, BlockPublisherTrait, CheckpointSink, FinalizedBlockSink, SequencerCheckpoint,
},
config::BedrockConfig,
}; };
pub type SequencerCoreWithMockClients = pub type SequencerCoreWithMockClients = crate::SequencerCore<MockBlockPublisher>;
crate::SequencerCore<MockBlockSettlementClient, MockIndexerClient>;
#[derive(Clone)] #[derive(Clone)]
pub struct MockBlockSettlementClient { pub struct MockBlockPublisher;
bedrock_channel_id: ChannelId,
bedrock_signing_key: Ed25519Key,
}
impl BlockSettlementClientTrait for MockBlockSettlementClient { impl BlockPublisherTrait for MockBlockPublisher {
fn new(config: &BedrockConfig, signing_key: Ed25519Key) -> Result<Self> { async fn new(
Ok(Self { _config: &BedrockConfig,
bedrock_channel_id: config.channel_id, _bedrock_signing_key: Ed25519Key,
bedrock_signing_key: signing_key, _resubmit_interval: Duration,
}) _initial_checkpoint: Option<SequencerCheckpoint>,
_on_checkpoint: CheckpointSink,
_on_finalized_block: FinalizedBlockSink,
) -> Result<Self> {
Ok(Self)
} }
fn bedrock_channel_id(&self) -> ChannelId { async fn publish_block(&self, _block: &Block) -> Result<()> {
self.bedrock_channel_id
}
fn bedrock_signing_key(&self) -> &Ed25519Key {
&self.bedrock_signing_key
}
async fn submit_inscribe_tx_to_bedrock(&self, _tx: SignedMantleTx) -> Result<()> {
Ok(()) Ok(())
} }
} }
#[derive(Clone)]
pub struct MockBlockSettlementClientWithError {
bedrock_channel_id: ChannelId,
bedrock_signing_key: Ed25519Key,
}
impl BlockSettlementClientTrait for MockBlockSettlementClientWithError {
fn new(config: &BedrockConfig, signing_key: Ed25519Key) -> Result<Self> {
Ok(Self {
bedrock_channel_id: config.channel_id,
bedrock_signing_key: signing_key,
})
}
fn bedrock_channel_id(&self) -> ChannelId {
self.bedrock_channel_id
}
fn bedrock_signing_key(&self) -> &Ed25519Key {
&self.bedrock_signing_key
}
async fn submit_inscribe_tx_to_bedrock(&self, _tx: SignedMantleTx) -> Result<()> {
Err(anyhow!("Mock error"))
}
}
#[derive(Copy, Clone)]
pub struct MockIndexerClient;
impl IndexerClientTrait for MockIndexerClient {
async fn new(_indexer_url: &Url) -> Result<Self> {
Ok(Self)
}
}

View File

@ -14,7 +14,6 @@ mempool.workspace = true
sequencer_core = { workspace = true, features = ["testnet"] } sequencer_core = { workspace = true, features = ["testnet"] }
sequencer_service_protocol.workspace = true sequencer_service_protocol.workspace = true
sequencer_service_rpc = { workspace = true, features = ["server"] } sequencer_service_rpc = { workspace = true, features = ["server"] }
indexer_service_rpc = { workspace = true, features = ["client"] }
clap = { workspace = true, features = ["derive", "env"] } clap = { workspace = true, features = ["derive", "env"] }
anyhow.workspace = true anyhow.workspace = true

View File

@ -5,15 +5,13 @@ use bytesize::ByteSize;
use common::transaction::NSSATransaction; use common::transaction::NSSATransaction;
use futures::never::Never; use futures::never::Never;
use jsonrpsee::server::ServerHandle; use jsonrpsee::server::ServerHandle;
#[cfg(not(feature = "standalone"))]
use log::warn;
use log::{error, info}; use log::{error, info};
use mempool::MemPoolHandle; use mempool::MemPoolHandle;
#[cfg(not(feature = "standalone"))]
use sequencer_core::SequencerCore;
#[cfg(feature = "standalone")] #[cfg(feature = "standalone")]
use sequencer_core::SequencerCoreWithMockClients as SequencerCore; use sequencer_core::SequencerCoreWithMockClients as SequencerCore;
pub use sequencer_core::config::*; pub use sequencer_core::config::*;
#[cfg(not(feature = "standalone"))]
use sequencer_core::{SequencerCore, block_settlement_client::BlockSettlementClientTrait as _};
use sequencer_service_rpc::RpcServer as _; use sequencer_service_rpc::RpcServer as _;
use tokio::{sync::Mutex, task::JoinHandle}; use tokio::{sync::Mutex, task::JoinHandle};
@ -29,8 +27,6 @@ pub struct SequencerHandle {
/// Option because of `Drop` which forbids to simply move out of `self` in `stopped()`. /// Option because of `Drop` which forbids to simply move out of `self` in `stopped()`.
server_handle: Option<ServerHandle>, server_handle: Option<ServerHandle>,
main_loop_handle: JoinHandle<Result<Never>>, main_loop_handle: JoinHandle<Result<Never>>,
retry_pending_blocks_loop_handle: JoinHandle<Result<Never>>,
listen_for_bedrock_blocks_loop_handle: JoinHandle<Result<Never>>,
} }
impl SequencerHandle { impl SequencerHandle {
@ -38,15 +34,11 @@ impl SequencerHandle {
addr: SocketAddr, addr: SocketAddr,
server_handle: ServerHandle, server_handle: ServerHandle,
main_loop_handle: JoinHandle<Result<Never>>, main_loop_handle: JoinHandle<Result<Never>>,
retry_pending_blocks_loop_handle: JoinHandle<Result<Never>>,
listen_for_bedrock_blocks_loop_handle: JoinHandle<Result<Never>>,
) -> Self { ) -> Self {
Self { Self {
addr, addr,
server_handle: Some(server_handle), server_handle: Some(server_handle),
main_loop_handle, main_loop_handle,
retry_pending_blocks_loop_handle,
listen_for_bedrock_blocks_loop_handle,
} }
} }
@ -60,8 +52,6 @@ impl SequencerHandle {
addr: _, addr: _,
server_handle, server_handle,
main_loop_handle, main_loop_handle,
retry_pending_blocks_loop_handle,
listen_for_bedrock_blocks_loop_handle,
} = &mut self; } = &mut self;
let server_handle = server_handle.take().expect("Server handle is set"); let server_handle = server_handle.take().expect("Server handle is set");
@ -75,16 +65,6 @@ impl SequencerHandle {
.context("Main loop task panicked")? .context("Main loop task panicked")?
.context("Main loop exited unexpectedly") .context("Main loop exited unexpectedly")
} }
res = retry_pending_blocks_loop_handle => {
res
.context("Retry pending blocks loop task panicked")?
.context("Retry pending blocks loop exited unexpectedly")
}
res = listen_for_bedrock_blocks_loop_handle => {
res
.context("Listen for bedrock blocks loop task panicked")?
.context("Listen for bedrock blocks loop exited unexpectedly")
}
} }
} }
@ -98,14 +78,10 @@ impl SequencerHandle {
addr: _, addr: _,
server_handle, server_handle,
main_loop_handle, main_loop_handle,
retry_pending_blocks_loop_handle,
listen_for_bedrock_blocks_loop_handle,
} = self; } = self;
let stopped = server_handle.as_ref().is_none_or(ServerHandle::is_stopped) let stopped = server_handle.as_ref().is_none_or(ServerHandle::is_stopped)
|| main_loop_handle.is_finished() || main_loop_handle.is_finished();
|| retry_pending_blocks_loop_handle.is_finished()
|| listen_for_bedrock_blocks_loop_handle.is_finished();
!stopped !stopped
} }
@ -121,13 +97,9 @@ impl Drop for SequencerHandle {
addr: _, addr: _,
server_handle, server_handle,
main_loop_handle, main_loop_handle,
retry_pending_blocks_loop_handle,
listen_for_bedrock_blocks_loop_handle,
} = self; } = self;
main_loop_handle.abort(); main_loop_handle.abort();
retry_pending_blocks_loop_handle.abort();
listen_for_bedrock_blocks_loop_handle.abort();
let Some(handle) = server_handle else { let Some(handle) = server_handle else {
return; return;
@ -141,7 +113,6 @@ impl Drop for SequencerHandle {
pub async fn run(config: SequencerConfig, port: u16) -> Result<SequencerHandle> { pub async fn run(config: SequencerConfig, port: u16) -> Result<SequencerHandle> {
let block_timeout = config.block_create_timeout; let block_timeout = config.block_create_timeout;
let retry_pending_blocks_timeout = config.retry_pending_blocks_timeout;
let max_block_size = config.max_block_size; let max_block_size = config.max_block_size;
let (sequencer_core, mempool_handle) = SequencerCore::start_from_config(config).await; let (sequencer_core, mempool_handle) = SequencerCore::start_from_config(config).await;
@ -159,34 +130,10 @@ pub async fn run(config: SequencerConfig, port: u16) -> Result<SequencerHandle>
.await?; .await?;
info!("RPC server started"); info!("RPC server started");
#[cfg(not(feature = "standalone"))]
{
info!("Submitting stored pending blocks");
retry_pending_blocks(&seq_core_wrapped)
.await
.expect("Failed to submit pending blocks on startup");
}
info!("Starting main sequencer loop"); info!("Starting main sequencer loop");
let main_loop_handle = tokio::spawn(main_loop(Arc::clone(&seq_core_wrapped), block_timeout)); let main_loop_handle = tokio::spawn(main_loop(seq_core_wrapped, block_timeout));
info!("Starting pending block retry loop"); Ok(SequencerHandle::new(addr, server_handle, main_loop_handle))
let retry_pending_blocks_loop_handle = tokio::spawn(retry_pending_blocks_loop(
Arc::clone(&seq_core_wrapped),
retry_pending_blocks_timeout,
));
info!("Starting bedrock block listening loop");
let listen_for_bedrock_blocks_loop_handle =
tokio::spawn(listen_for_bedrock_blocks_loop(seq_core_wrapped));
Ok(SequencerHandle::new(
addr,
server_handle,
main_loop_handle,
retry_pending_blocks_loop_handle,
listen_for_bedrock_blocks_loop_handle,
))
} }
async fn run_server( async fn run_server(
@ -235,118 +182,3 @@ async fn main_loop(seq_core: Arc<Mutex<SequencerCore>>, block_timeout: Duration)
info!("Waiting for new transactions"); info!("Waiting for new transactions");
} }
} }
#[cfg(not(feature = "standalone"))]
async fn retry_pending_blocks(seq_core: &Arc<Mutex<SequencerCore>>) -> Result<()> {
use std::time::Instant;
use log::debug;
let (mut pending_blocks, block_settlement_client) = {
let sequencer_core = seq_core.lock().await;
let client = sequencer_core.block_settlement_client();
let pending_blocks = sequencer_core
.get_pending_blocks()
.expect("Sequencer should be able to retrieve pending blocks");
(pending_blocks, client)
};
pending_blocks.sort_by(|block1, block2| block1.header.block_id.cmp(&block2.header.block_id));
if !pending_blocks.is_empty() {
info!(
"Resubmitting blocks from {} to {}",
pending_blocks.first().unwrap().header.block_id,
pending_blocks.last().unwrap().header.block_id
);
}
for block in &pending_blocks {
debug!(
"Resubmitting pending block with id {}",
block.header.block_id
);
// TODO: We could cache the inscribe tx for each pending block to avoid re-creating it
// on every retry.
let now = Instant::now();
let (tx, _msg_id) = block_settlement_client
.create_inscribe_tx(block)
.context("Failed to create inscribe tx for pending block")?;
debug!("Create inscribe: {:?}", now.elapsed());
let now = Instant::now();
if let Err(e) = block_settlement_client
.submit_inscribe_tx_to_bedrock(tx)
.await
{
warn!(
"Failed to resubmit block with id {} with error {e:#}",
block.header.block_id
);
}
debug!("Post: {:?}", now.elapsed());
}
Ok(())
}
#[cfg(not(feature = "standalone"))]
async fn retry_pending_blocks_loop(
seq_core: Arc<Mutex<SequencerCore>>,
retry_pending_blocks_timeout: Duration,
) -> Result<Never> {
loop {
tokio::time::sleep(retry_pending_blocks_timeout).await;
retry_pending_blocks(&seq_core).await?;
}
}
#[cfg(not(feature = "standalone"))]
async fn listen_for_bedrock_blocks_loop(seq_core: Arc<Mutex<SequencerCore>>) -> Result<Never> {
use indexer_service_rpc::RpcClient as _;
let indexer_client = seq_core.lock().await.indexer_client();
let retry_delay = Duration::from_secs(5);
loop {
// TODO: Subscribe from the first pending block ID?
let mut subscription = indexer_client
.subscribe_to_finalized_blocks()
.await
.context("Failed to subscribe to finalized blocks")?;
while let Some(block_id) = subscription.next().await {
let block_id = block_id.context("Failed to get next block from subscription")?;
info!("Received new L2 block with ID {block_id}");
seq_core
.lock()
.await
.clean_finalized_blocks_from_db(block_id)
.with_context(|| {
format!("Failed to clean finalized blocks from DB for block ID {block_id}")
})?;
}
warn!(
"Block subscription closed unexpectedly, reason: {:?}, retrying after {retry_delay:?}",
subscription.close_reason()
);
tokio::time::sleep(retry_delay).await;
}
}
#[cfg(feature = "standalone")]
async fn listen_for_bedrock_blocks_loop(_seq_core: Arc<Mutex<SequencerCore>>) -> Result<Never> {
std::future::pending::<Result<Never>>().await
}
#[cfg(feature = "standalone")]
async fn retry_pending_blocks_loop(
_seq_core: Arc<Mutex<SequencerCore>>,
_retry_pending_blocks_timeout: Duration,
) -> Result<Never> {
std::future::pending::<Result<Never>>().await
}

View File

@ -8,10 +8,7 @@ use jsonrpsee::{
use log::warn; use log::warn;
use mempool::MemPoolHandle; use mempool::MemPoolHandle;
use nssa::{self, program::Program}; use nssa::{self, program::Program};
use sequencer_core::{ use sequencer_core::{DbError, SequencerCore, block_publisher::BlockPublisherTrait};
DbError, SequencerCore, block_settlement_client::BlockSettlementClientTrait,
indexer_client::IndexerClientTrait,
};
use sequencer_service_protocol::{ use sequencer_service_protocol::{
Account, AccountId, Block, BlockId, Commitment, HashType, MembershipProof, Nonce, ProgramId, Account, AccountId, Block, BlockId, Commitment, HashType, MembershipProof, Nonce, ProgramId,
}; };
@ -19,15 +16,15 @@ use tokio::sync::Mutex;
const NOT_FOUND_ERROR_CODE: i32 = -31999; const NOT_FOUND_ERROR_CODE: i32 = -31999;
pub struct SequencerService<BC: BlockSettlementClientTrait, IC: IndexerClientTrait> { pub struct SequencerService<BC: BlockPublisherTrait> {
sequencer: Arc<Mutex<SequencerCore<BC, IC>>>, sequencer: Arc<Mutex<SequencerCore<BC>>>,
mempool_handle: MemPoolHandle<NSSATransaction>, mempool_handle: MemPoolHandle<NSSATransaction>,
max_block_size: u64, max_block_size: u64,
} }
impl<BC: BlockSettlementClientTrait, IC: IndexerClientTrait> SequencerService<BC, IC> { impl<BC: BlockPublisherTrait> SequencerService<BC> {
pub const fn new( pub const fn new(
sequencer: Arc<Mutex<SequencerCore<BC, IC>>>, sequencer: Arc<Mutex<SequencerCore<BC>>>,
mempool_handle: MemPoolHandle<NSSATransaction>, mempool_handle: MemPoolHandle<NSSATransaction>,
max_block_size: u64, max_block_size: u64,
) -> Self { ) -> Self {
@ -40,8 +37,8 @@ impl<BC: BlockSettlementClientTrait, IC: IndexerClientTrait> SequencerService<BC
} }
#[async_trait] #[async_trait]
impl<BC: BlockSettlementClientTrait + Send + 'static, IC: IndexerClientTrait + Send + 'static> impl<BC: BlockPublisherTrait + Send + 'static> sequencer_service_rpc::RpcServer
sequencer_service_rpc::RpcServer for SequencerService<BC, IC> for SequencerService<BC>
{ {
async fn send_transaction(&self, tx: NSSATransaction) -> Result<HashType, ErrorObjectOwned> { async fn send_transaction(&self, tx: NSSATransaction) -> Result<HashType, ErrorObjectOwned> {
// Reserve ~200 bytes for block header overhead // Reserve ~200 bytes for block header overhead

View File

@ -8,7 +8,8 @@ use crate::{
indexer::{ indexer::{
ACC_NUM_CELL_NAME, BLOCK_HASH_CELL_NAME, BREAKPOINT_CELL_NAME, CF_ACC_META, ACC_NUM_CELL_NAME, BLOCK_HASH_CELL_NAME, BREAKPOINT_CELL_NAME, CF_ACC_META,
CF_BREAKPOINT_NAME, CF_HASH_TO_ID, CF_TX_TO_ID, DB_META_LAST_BREAKPOINT_ID, CF_BREAKPOINT_NAME, CF_HASH_TO_ID, CF_TX_TO_ID, DB_META_LAST_BREAKPOINT_ID,
DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY, TX_HASH_CELL_NAME, DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY, DB_META_ZONE_SDK_INDEXER_CURSOR_KEY,
TX_HASH_CELL_NAME,
}, },
}; };
@ -211,6 +212,41 @@ impl SimpleWritableCell for AccNumTxCell {
} }
} }
/// Opaque bytes for the zone-sdk indexer cursor `Option<(MsgId, Slot)>`.
/// The caller serializes via `serde_json` (neither type derives borsh).
#[derive(BorshDeserialize)]
pub struct ZoneSdkIndexerCursorCellOwned(pub Vec<u8>);
impl SimpleStorableCell for ZoneSdkIndexerCursorCellOwned {
type KeyParams = ();
const CELL_NAME: &'static str = DB_META_ZONE_SDK_INDEXER_CURSOR_KEY;
const CF_NAME: &'static str = CF_META_NAME;
}
impl SimpleReadableCell for ZoneSdkIndexerCursorCellOwned {}
#[derive(BorshSerialize)]
pub struct ZoneSdkIndexerCursorCellRef<'bytes>(pub &'bytes [u8]);
impl SimpleStorableCell for ZoneSdkIndexerCursorCellRef<'_> {
type KeyParams = ();
const CELL_NAME: &'static str = DB_META_ZONE_SDK_INDEXER_CURSOR_KEY;
const CF_NAME: &'static str = CF_META_NAME;
}
impl SimpleWritableCell for ZoneSdkIndexerCursorCellRef<'_> {
fn value_constructor(&self) -> DbResult<Vec<u8>> {
borsh::to_vec(&self).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize zone-sdk indexer cursor cell".to_owned()),
)
})
}
}
#[cfg(test)] #[cfg(test)]
mod uniform_tests { mod uniform_tests {
use crate::{ use crate::{

View File

@ -22,6 +22,8 @@ pub const DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY: &str =
"last_observed_l1_lib_header_in_db"; "last_observed_l1_lib_header_in_db";
/// Key base for storing metainformation about the last breakpoint. /// Key base for storing metainformation about the last breakpoint.
pub const DB_META_LAST_BREAKPOINT_ID: &str = "last_breakpoint_id"; pub const DB_META_LAST_BREAKPOINT_ID: &str = "last_breakpoint_id";
/// Key base for storing the zone-sdk indexer cursor (opaque bytes).
pub const DB_META_ZONE_SDK_INDEXER_CURSOR_KEY: &str = "zone_sdk_indexer_cursor";
/// Cell name for a breakpoint. /// Cell name for a breakpoint.
pub const BREAKPOINT_CELL_NAME: &str = "breakpoint"; pub const BREAKPOINT_CELL_NAME: &str = "breakpoint";

View File

@ -4,7 +4,7 @@ use crate::{
cells::shared_cells::{BlockCell, FirstBlockCell, FirstBlockSetCell, LastBlockCell}, cells::shared_cells::{BlockCell, FirstBlockCell, FirstBlockSetCell, LastBlockCell},
indexer::indexer_cells::{ indexer::indexer_cells::{
AccNumTxCell, BlockHashToBlockIdMapCell, BreakpointCellOwned, LastBreakpointIdCell, AccNumTxCell, BlockHashToBlockIdMapCell, BreakpointCellOwned, LastBreakpointIdCell,
LastObservedL1LibHeaderCell, TxHashToBlockIdMapCell, LastObservedL1LibHeaderCell, TxHashToBlockIdMapCell, ZoneSdkIndexerCursorCellOwned,
}, },
}; };
@ -64,4 +64,10 @@ impl RocksDBIO {
self.get_opt::<AccNumTxCell>(acc_id) self.get_opt::<AccNumTxCell>(acc_id)
.map(|opt| opt.map(|cell| cell.0)) .map(|opt| opt.map(|cell| cell.0))
} }
pub fn get_zone_sdk_indexer_cursor_bytes(&self) -> DbResult<Option<Vec<u8>>> {
Ok(self
.get_opt::<ZoneSdkIndexerCursorCellOwned>(())?
.map(|cell| cell.0))
}
} }

View File

@ -4,6 +4,7 @@ use crate::{
cells::shared_cells::{FirstBlockSetCell, LastBlockCell}, cells::shared_cells::{FirstBlockSetCell, LastBlockCell},
indexer::indexer_cells::{ indexer::indexer_cells::{
BreakpointCellRef, LastBreakpointIdCell, LastObservedL1LibHeaderCell, BreakpointCellRef, LastBreakpointIdCell, LastObservedL1LibHeaderCell,
ZoneSdkIndexerCursorCellRef,
}, },
}; };
@ -30,6 +31,10 @@ impl RocksDBIO {
self.put(&FirstBlockSetCell(true), ()) self.put(&FirstBlockSetCell(true), ())
} }
pub fn put_zone_sdk_indexer_cursor_bytes(&self, bytes: &[u8]) -> DbResult<()> {
self.put(&ZoneSdkIndexerCursorCellRef(bytes), ())
}
// State // State
pub fn put_breakpoint(&self, br_id: u64, breakpoint: &V03State) -> DbResult<()> { pub fn put_breakpoint(&self, br_id: u64, breakpoint: &V03State) -> DbResult<()> {

View File

@ -12,7 +12,7 @@ use crate::{
error::DbError, error::DbError,
sequencer::sequencer_cells::{ sequencer::sequencer_cells::{
LastFinalizedBlockIdCell, LatestBlockMetaCellOwned, LatestBlockMetaCellRef, LastFinalizedBlockIdCell, LatestBlockMetaCellOwned, LatestBlockMetaCellRef,
NSSAStateCellOwned, NSSAStateCellRef, NSSAStateCellOwned, NSSAStateCellRef, ZoneSdkCheckpointCellOwned, ZoneSdkCheckpointCellRef,
}, },
}; };
@ -22,6 +22,8 @@ pub mod sequencer_cells;
pub const DB_META_LAST_FINALIZED_BLOCK_ID: &str = "last_finalized_block_id"; pub const DB_META_LAST_FINALIZED_BLOCK_ID: &str = "last_finalized_block_id";
/// Key base for storing metainformation about the latest block meta. /// Key base for storing metainformation about the latest block meta.
pub const DB_META_LATEST_BLOCK_META_KEY: &str = "latest_block_meta"; pub const DB_META_LATEST_BLOCK_META_KEY: &str = "latest_block_meta";
/// Key base for storing the zone-sdk sequencer checkpoint (opaque bytes).
pub const DB_META_ZONE_SDK_CHECKPOINT_KEY: &str = "zone_sdk_checkpoint";
/// Key base for storing the NSSA state. /// Key base for storing the NSSA state.
pub const DB_NSSA_STATE_KEY: &str = "nssa_state"; pub const DB_NSSA_STATE_KEY: &str = "nssa_state";
@ -205,6 +207,16 @@ impl RocksDBIO {
self.get::<LatestBlockMetaCellOwned>(()).map(|val| val.0) self.get::<LatestBlockMetaCellOwned>(()).map(|val| val.0)
} }
pub fn get_zone_sdk_checkpoint_bytes(&self) -> DbResult<Option<Vec<u8>>> {
Ok(self
.get_opt::<ZoneSdkCheckpointCellOwned>(())?
.map(|cell| cell.0))
}
pub fn put_zone_sdk_checkpoint_bytes(&self, bytes: &[u8]) -> DbResult<()> {
self.put(&ZoneSdkCheckpointCellRef(bytes), ())
}
pub fn put_block( pub fn put_block(
&self, &self,
block: &Block, block: &Block,
@ -275,6 +287,22 @@ impl RocksDBIO {
Ok(()) Ok(())
} }
/// Mark every pending block with `block_id <= last_finalized` as finalized.
/// Idempotent — already-finalized blocks are skipped.
pub fn clean_pending_blocks_up_to(&self, last_finalized: u64) -> DbResult<()> {
let pending_ids: Vec<u64> = self
.get_all_blocks()
.filter_map(Result::ok)
.filter(|b| matches!(b.bedrock_status, BedrockStatus::Pending))
.map(|b| b.header.block_id)
.filter(|id| *id <= last_finalized)
.collect();
for id in pending_ids {
self.mark_block_as_finalized(id)?;
}
Ok(())
}
pub fn mark_block_as_finalized(&self, block_id: u64) -> DbResult<()> { pub fn mark_block_as_finalized(&self, block_id: u64) -> DbResult<()> {
let mut block = self.get_block(block_id)?.ok_or_else(|| { let mut block = self.get_block(block_id)?.ok_or_else(|| {
DbError::db_interaction_error(format!("Block with id {block_id} not found")) DbError::db_interaction_error(format!("Block with id {block_id} not found"))

View File

@ -8,7 +8,7 @@ use crate::{
error::DbError, error::DbError,
sequencer::{ sequencer::{
CF_NSSA_STATE_NAME, DB_META_LAST_FINALIZED_BLOCK_ID, DB_META_LATEST_BLOCK_META_KEY, CF_NSSA_STATE_NAME, DB_META_LAST_FINALIZED_BLOCK_ID, DB_META_LATEST_BLOCK_META_KEY,
DB_NSSA_STATE_KEY, DB_META_ZONE_SDK_CHECKPOINT_KEY, DB_NSSA_STATE_KEY,
}, },
}; };
@ -95,6 +95,42 @@ impl SimpleWritableCell for LatestBlockMetaCellRef<'_> {
} }
} }
/// Opaque bytes for the zone-sdk sequencer checkpoint. The caller is
/// responsible for the actual encoding (we use `serde_json` since
/// `SequencerCheckpoint` only derives serde, not borsh).
#[derive(BorshDeserialize)]
pub struct ZoneSdkCheckpointCellOwned(pub Vec<u8>);
impl SimpleStorableCell for ZoneSdkCheckpointCellOwned {
type KeyParams = ();
const CELL_NAME: &'static str = DB_META_ZONE_SDK_CHECKPOINT_KEY;
const CF_NAME: &'static str = CF_META_NAME;
}
impl SimpleReadableCell for ZoneSdkCheckpointCellOwned {}
#[derive(BorshSerialize)]
pub struct ZoneSdkCheckpointCellRef<'bytes>(pub &'bytes [u8]);
impl SimpleStorableCell for ZoneSdkCheckpointCellRef<'_> {
type KeyParams = ();
const CELL_NAME: &'static str = DB_META_ZONE_SDK_CHECKPOINT_KEY;
const CF_NAME: &'static str = CF_META_NAME;
}
impl SimpleWritableCell for ZoneSdkCheckpointCellRef<'_> {
fn value_constructor(&self) -> DbResult<Vec<u8>> {
borsh::to_vec(&self).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize zone-sdk checkpoint cell".to_owned()),
)
})
}
}
#[cfg(test)] #[cfg(test)]
mod uniform_tests { mod uniform_tests {
use crate::{ use crate::{

View File

@ -0,0 +1,118 @@
use nssa_core::program::{
AccountPostState, ChainedCall, Claim, PdaSeed, ProgramId, ProgramInput, ProgramOutput,
read_nssa_inputs,
};
/// Single program for group PDA operations. Owns and operates the PDA directly.
///
/// Instruction: `(pda_seed, noop_program_id, amount, is_deposit)`.
/// Pre-states: `[group_pda, counterparty]`.
///
/// **Deposit** (`is_deposit = true`, new PDA):
/// Claims PDA via `Claim::Pda(seed)`, increases PDA balance, decreases counterparty.
/// Counterparty must be authorized and owned by this program (or uninitialized).
///
/// **Spend** (`is_deposit = false`, existing PDA):
/// Decreases PDA balance (this program owns it), increases counterparty.
/// Chains to a noop callee with `pda_seeds` to establish the mask-3 binding
/// that the circuit requires for existing private PDAs.
type Instruction = (PdaSeed, ProgramId, u128, bool);
#[expect(
clippy::allow_attributes,
reason = "allow is needed because the clones are only redundant in test compilation"
)]
#[allow(
clippy::redundant_clone,
reason = "clones needed in non-test compilation"
)]
fn main() {
let (
ProgramInput {
self_program_id,
caller_program_id,
pre_states,
instruction: (pda_seed, noop_id, amount, is_deposit),
},
instruction_words,
) = read_nssa_inputs::<Instruction>();
let Ok([pda_pre, counterparty_pre]) = <[_; 2]>::try_from(pre_states.clone()) else {
panic!("expected exactly 2 pre_states: [group_pda, counterparty]");
};
if is_deposit {
// Deposit: claim PDA, transfer balance from counterparty to PDA.
// Both accounts must be owned by this program (or uninitialized) for
// validate_execution to allow balance changes.
assert!(
counterparty_pre.is_authorized,
"Counterparty must be authorized to deposit"
);
let mut pda_account = pda_pre.account;
let mut counterparty_account = counterparty_pre.account;
pda_account.balance = pda_account
.balance
.checked_add(amount)
.expect("PDA balance overflow");
counterparty_account.balance = counterparty_account
.balance
.checked_sub(amount)
.expect("Counterparty has insufficient balance");
let pda_post = AccountPostState::new_claimed_if_default(pda_account, Claim::Pda(pda_seed));
let counterparty_post = AccountPostState::new(counterparty_account);
ProgramOutput::new(
self_program_id,
caller_program_id,
instruction_words,
pre_states,
vec![pda_post, counterparty_post],
)
.write();
} else {
// Spend: decrease PDA balance (owned by this program), increase counterparty.
// Chain to noop with pda_seeds to establish the mask-3 binding for the
// existing PDA. The noop's pre_states must match our post_states.
// Authorization is enforced by the circuit's binding check, not here.
let mut pda_account = pda_pre.account.clone();
let mut counterparty_account = counterparty_pre.account.clone();
pda_account.balance = pda_account
.balance
.checked_sub(amount)
.expect("PDA has insufficient balance");
counterparty_account.balance = counterparty_account
.balance
.checked_add(amount)
.expect("Counterparty balance overflow");
let pda_post = AccountPostState::new(pda_account.clone());
let counterparty_post = AccountPostState::new(counterparty_account.clone());
// Chain to noop solely to establish the mask-3 binding via pda_seeds.
let mut noop_pda_pre = pda_pre;
noop_pda_pre.account = pda_account;
noop_pda_pre.is_authorized = true;
let mut noop_counterparty_pre = counterparty_pre;
noop_counterparty_pre.account = counterparty_account;
let noop_call = ChainedCall::new(noop_id, vec![noop_pda_pre, noop_counterparty_pre], &())
.with_pda_seeds(vec![pda_seed]);
ProgramOutput::new(
self_program_id,
caller_program_id,
instruction_words,
pre_states,
vec![pda_post, counterparty_post],
)
.with_chained_calls(vec![noop_call])
.write();
}
}

295
wallet/src/cli/group.rs Normal file
View File

@ -0,0 +1,295 @@
use anyhow::{Context as _, Result};
use clap::Subcommand;
use key_protocol::key_management::group_key_holder::GroupKeyHolder;
use nssa::AccountId;
use nssa_core::program::PdaSeed;
use crate::{
WalletCore,
cli::{SubcommandReturnValue, WalletSubcommand},
};
/// Group PDA management commands.
#[derive(Subcommand, Debug, Clone)]
pub enum GroupSubcommand {
/// Create a new group with a fresh random GMS.
New {
/// Human-readable name for the group.
name: String,
},
/// Import a group from raw GMS bytes.
Import {
/// Human-readable name for the group.
name: String,
/// Raw GMS as 64-character hex string.
#[arg(long)]
gms: String,
/// Epoch (defaults to 0).
#[arg(long, default_value = "0")]
epoch: u32,
},
/// Export the raw GMS hex for backup or manual distribution.
Export {
/// Group name.
name: String,
},
/// List all groups with their epochs.
#[command(visible_alias = "ls")]
List,
/// Derive keys for a PDA seed and show the resulting AccountId.
Derive {
/// Group name.
name: String,
/// PDA seed as 64-character hex string.
#[arg(long)]
seed: String,
/// Program ID as hex string (u32x8 little-endian).
#[arg(long)]
program_id: String,
},
/// Remove a group from the wallet.
Remove {
/// Group name.
name: String,
},
/// Seal the group's GMS for a recipient (invite).
Invite {
/// Group name.
name: String,
/// Recipient's viewing public key as hex string.
#[arg(long)]
vpk: String,
},
/// Unseal a received GMS and store it (join a group).
Join {
/// Human-readable name to store the group under.
name: String,
/// Sealed GMS as hex string (from the inviter).
#[arg(long)]
sealed: String,
/// Account label or Private/<id> whose VSK to use for decryption.
#[arg(long)]
account: String,
},
/// Ratchet the GMS to exclude removed members.
Ratchet {
/// Group name.
name: String,
},
}
impl WalletSubcommand for GroupSubcommand {
async fn handle_subcommand(
self,
wallet_core: &mut WalletCore,
) -> Result<SubcommandReturnValue> {
match self {
Self::New { name } => {
if wallet_core
.storage()
.user_data
.get_group_key_holder(&name)
.is_some()
{
anyhow::bail!("Group '{name}' already exists");
}
let holder = GroupKeyHolder::new();
wallet_core
.storage_mut()
.user_data
.insert_group_key_holder(name.clone(), holder);
wallet_core.store_persistent_data().await?;
println!("Created group '{name}' at epoch 0");
Ok(SubcommandReturnValue::Empty)
}
Self::Import { name, gms, epoch } => {
if wallet_core
.storage()
.user_data
.get_group_key_holder(&name)
.is_some()
{
anyhow::bail!("Group '{name}' already exists");
}
let gms_bytes: [u8; 32] = hex::decode(&gms)
.context("Invalid GMS hex")?
.try_into()
.map_err(|_| anyhow::anyhow!("GMS must be exactly 32 bytes"))?;
let holder = GroupKeyHolder::from_gms_and_epoch(gms_bytes, epoch);
wallet_core
.storage_mut()
.user_data
.insert_group_key_holder(name.clone(), holder);
wallet_core.store_persistent_data().await?;
println!("Imported group '{name}' at epoch {epoch}");
Ok(SubcommandReturnValue::Empty)
}
Self::Export { name } => {
let holder = wallet_core
.storage()
.user_data
.get_group_key_holder(&name)
.context(format!("Group '{name}' not found"))?;
let gms_hex = hex::encode(holder.dangerous_raw_gms());
let epoch = holder.epoch();
println!("Group: {name}");
println!("Epoch: {epoch}");
println!("GMS: {gms_hex}");
Ok(SubcommandReturnValue::Empty)
}
Self::List => {
let holders = &wallet_core.storage().user_data.group_key_holders;
if holders.is_empty() {
println!("No groups found");
} else {
for (name, holder) in holders {
println!("{name} (epoch {})", holder.epoch());
}
}
Ok(SubcommandReturnValue::Empty)
}
Self::Derive {
name,
seed,
program_id,
} => {
let holder = wallet_core
.storage()
.user_data
.get_group_key_holder(&name)
.context(format!("Group '{name}' not found"))?;
let seed_bytes: [u8; 32] = hex::decode(&seed)
.context("Invalid seed hex")?
.try_into()
.map_err(|_| anyhow::anyhow!("Seed must be exactly 32 bytes"))?;
let pda_seed = PdaSeed::new(seed_bytes);
let pid_bytes =
hex::decode(&program_id).context("Invalid program ID hex")?;
if pid_bytes.len() != 32 {
anyhow::bail!("Program ID must be exactly 32 bytes");
}
let mut pid: nssa_core::program::ProgramId = [0; 8];
for (i, chunk) in pid_bytes.chunks_exact(4).enumerate() {
pid[i] = u32::from_le_bytes(chunk.try_into().unwrap());
}
let keys = holder.derive_keys_for_pda(&pda_seed);
let npk = keys.generate_nullifier_public_key();
let vpk = keys.generate_viewing_public_key();
let account_id = AccountId::for_private_pda(&pid, &pda_seed, &npk);
println!("Group: {name}");
println!("NPK: {}", hex::encode(npk.0));
println!("VPK: {}", hex::encode(&vpk.0));
println!("AccountId: {account_id}");
Ok(SubcommandReturnValue::Empty)
}
Self::Remove { name } => {
if wallet_core
.storage_mut()
.user_data
.group_key_holders
.remove(&name)
.is_none()
{
anyhow::bail!("Group '{name}' not found");
}
wallet_core.store_persistent_data().await?;
println!("Removed group '{name}'");
Ok(SubcommandReturnValue::Empty)
}
Self::Invite { name, vpk } => {
let holder = wallet_core
.storage()
.user_data
.get_group_key_holder(&name)
.context(format!("Group '{name}' not found"))?;
let vpk_bytes = hex::decode(&vpk).context("Invalid VPK hex")?;
let recipient_vpk =
nssa_core::encryption::shared_key_derivation::Secp256k1Point(vpk_bytes);
let sealed = holder.seal_for(&recipient_vpk);
println!("{}", hex::encode(&sealed));
Ok(SubcommandReturnValue::Empty)
}
Self::Join {
name,
sealed,
account,
} => {
if wallet_core
.storage()
.user_data
.get_group_key_holder(&name)
.is_some()
{
anyhow::bail!("Group '{name}' already exists");
}
let sealed_bytes = hex::decode(&sealed).context("Invalid sealed hex")?;
// Resolve the account to get the VSK
let account_id: nssa::AccountId = account
.parse()
.context("Invalid account ID (use Private/<base58>)")?;
let (keychain, _) = wallet_core
.storage()
.user_data
.get_private_account(account_id)
.context("Private account not found")?;
let vsk = keychain.private_key_holder.viewing_secret_key;
let holder = GroupKeyHolder::unseal(&sealed_bytes, &vsk)
.map_err(|e| anyhow::anyhow!("Failed to unseal: {e:?}"))?;
let epoch = holder.epoch();
wallet_core
.storage_mut()
.user_data
.insert_group_key_holder(name.clone(), holder);
wallet_core.store_persistent_data().await?;
println!("Joined group '{name}' at epoch {epoch}");
Ok(SubcommandReturnValue::Empty)
}
Self::Ratchet { name } => {
let holder = wallet_core
.storage_mut()
.user_data
.group_key_holders
.get_mut(&name)
.context(format!("Group '{name}' not found"))?;
let mut salt = [0_u8; 32];
rand::RngCore::fill_bytes(&mut rand::rngs::OsRng, &mut salt);
holder.ratchet(salt);
let epoch = holder.epoch();
wallet_core.store_persistent_data().await?;
println!("Ratcheted group '{name}' to epoch {epoch}");
println!("Re-invite remaining members with 'group invite'");
Ok(SubcommandReturnValue::Empty)
}
}
}
}

View File

@ -414,13 +414,7 @@ impl WalletCore {
let (output, proof) = nssa::privacy_preserving_transaction::circuit::execute_and_prove( let (output, proof) = nssa::privacy_preserving_transaction::circuit::execute_and_prove(
pre_states, pre_states,
instruction_data, instruction_data,
acc_manager.visibility_mask().to_vec(), acc_manager.account_identities(),
private_account_keys
.iter()
.map(|keys| (keys.npk, keys.identifier, keys.ssk))
.collect::<Vec<_>>(),
acc_manager.private_account_auth(),
acc_manager.private_account_membership_proofs(),
&program.to_owned(), &program.to_owned(),
) )
.unwrap(); .unwrap();

View File

@ -2,7 +2,8 @@ use anyhow::Result;
use key_protocol::key_management::ephemeral_key_holder::EphemeralKeyHolder; use key_protocol::key_management::ephemeral_key_holder::EphemeralKeyHolder;
use nssa::{AccountId, PrivateKey}; use nssa::{AccountId, PrivateKey};
use nssa_core::{ use nssa_core::{
Identifier, MembershipProof, NullifierPublicKey, NullifierSecretKey, SharedSecretKey, Identifier, InputAccountIdentity, MembershipProof, NullifierPublicKey, NullifierSecretKey,
SharedSecretKey,
account::{AccountWithMetadata, Nonce}, account::{AccountWithMetadata, Nonce},
encryption::{EphemeralPublicKey, ViewingPublicKey}, encryption::{EphemeralPublicKey, ViewingPublicKey},
}; };
@ -51,7 +52,6 @@ impl PrivacyPreservingAccount {
pub struct PrivateAccountKeys { pub struct PrivateAccountKeys {
pub npk: NullifierPublicKey, pub npk: NullifierPublicKey,
pub identifier: Identifier,
pub ssk: SharedSecretKey, pub ssk: SharedSecretKey,
pub vpk: ViewingPublicKey, pub vpk: ViewingPublicKey,
pub epk: EphemeralPublicKey, pub epk: EphemeralPublicKey,
@ -67,7 +67,6 @@ enum State {
pub struct AccountManager { pub struct AccountManager {
states: Vec<State>, states: Vec<State>,
visibility_mask: Vec<u8>,
} }
impl AccountManager { impl AccountManager {
@ -75,11 +74,10 @@ impl AccountManager {
wallet: &WalletCore, wallet: &WalletCore,
accounts: Vec<PrivacyPreservingAccount>, accounts: Vec<PrivacyPreservingAccount>,
) -> Result<Self, ExecutionFailureKind> { ) -> Result<Self, ExecutionFailureKind> {
let mut pre_states = Vec::with_capacity(accounts.len()); let mut states = Vec::with_capacity(accounts.len());
let mut visibility_mask = Vec::with_capacity(accounts.len());
for account in accounts { for account in accounts {
let (state, mask) = match account { let state = match account {
PrivacyPreservingAccount::Public(account_id) => { PrivacyPreservingAccount::Public(account_id) => {
let acc = wallet let acc = wallet
.get_account_public(account_id) .get_account_public(account_id)
@ -89,13 +87,12 @@ impl AccountManager {
let sk = wallet.get_account_public_signing_key(account_id).cloned(); let sk = wallet.get_account_public_signing_key(account_id).cloned();
let account = AccountWithMetadata::new(acc.clone(), sk.is_some(), account_id); let account = AccountWithMetadata::new(acc.clone(), sk.is_some(), account_id);
(State::Public { account, sk }, 0) State::Public { account, sk }
} }
PrivacyPreservingAccount::PrivateOwned(account_id) => { PrivacyPreservingAccount::PrivateOwned(account_id) => {
let pre = private_acc_preparation(wallet, account_id).await?; let pre = private_acc_preparation(wallet, account_id, false).await?;
let mask = if pre.pre_state.is_authorized { 1 } else { 2 };
(State::Private(pre), mask) State::Private(pre)
} }
PrivacyPreservingAccount::PrivateForeign { PrivacyPreservingAccount::PrivateForeign {
npk, npk,
@ -104,6 +101,9 @@ impl AccountManager {
} => { } => {
let acc = nssa_core::account::Account::default(); let acc = nssa_core::account::Account::default();
let auth_acc = AccountWithMetadata::new(acc, false, (&npk, identifier)); let auth_acc = AccountWithMetadata::new(acc, false, (&npk, identifier));
let eph_holder = EphemeralKeyHolder::new(&npk);
let ssk = eph_holder.calculate_shared_secret_sender(&vpk);
let epk = eph_holder.generate_ephemeral_public_key();
let pre = AccountPreparedData { let pre = AccountPreparedData {
nsk: None, nsk: None,
npk, npk,
@ -111,13 +111,16 @@ impl AccountManager {
vpk, vpk,
pre_state: auth_acc, pre_state: auth_acc,
proof: None, proof: None,
ssk,
epk,
is_pda: false,
}; };
(State::Private(pre), 2) State::Private(pre)
} }
PrivacyPreservingAccount::PrivatePdaOwned(account_id) => { PrivacyPreservingAccount::PrivatePdaOwned(account_id) => {
let pre = private_acc_preparation(wallet, account_id).await?; let pre = private_acc_preparation(wallet, account_id, true).await?;
(State::Private(pre), 3) State::Private(pre)
} }
PrivacyPreservingAccount::PrivatePdaForeign { PrivacyPreservingAccount::PrivatePdaForeign {
account_id, account_id,
@ -127,6 +130,9 @@ impl AccountManager {
} => { } => {
let acc = nssa_core::account::Account::default(); let acc = nssa_core::account::Account::default();
let auth_acc = AccountWithMetadata::new(acc, false, account_id); let auth_acc = AccountWithMetadata::new(acc, false, account_id);
let eph_holder = EphemeralKeyHolder::new(&npk);
let ssk = eph_holder.calculate_shared_secret_sender(&vpk);
let epk = eph_holder.generate_ephemeral_public_key();
let pre = AccountPreparedData { let pre = AccountPreparedData {
nsk: None, nsk: None,
npk, npk,
@ -134,19 +140,18 @@ impl AccountManager {
vpk, vpk,
pre_state: auth_acc, pre_state: auth_acc,
proof: None, proof: None,
ssk,
epk,
is_pda: true,
}; };
(State::Private(pre), 3) State::Private(pre)
} }
}; };
pre_states.push(state); states.push(state);
visibility_mask.push(mask);
} }
Ok(Self { Ok(Self { states })
states: pre_states,
visibility_mask,
})
} }
pub fn pre_states(&self) -> Vec<AccountWithMetadata> { pub fn pre_states(&self) -> Vec<AccountWithMetadata> {
@ -159,10 +164,6 @@ impl AccountManager {
.collect() .collect()
} }
pub fn visibility_mask(&self) -> &[u8] {
&self.visibility_mask
}
pub fn public_account_nonces(&self) -> Vec<Nonce> { pub fn public_account_nonces(&self) -> Vec<Nonce> {
self.states self.states
.iter() .iter()
@ -177,38 +178,63 @@ impl AccountManager {
self.states self.states
.iter() .iter()
.filter_map(|state| match state { .filter_map(|state| match state {
State::Private(pre) => { State::Private(pre) => Some(PrivateAccountKeys {
let eph_holder = EphemeralKeyHolder::new(&pre.npk); npk: pre.npk,
ssk: pre.ssk,
vpk: pre.vpk.clone(),
epk: pre.epk.clone(),
}),
State::Public { .. } => None,
})
.collect()
}
Some(PrivateAccountKeys { /// Build the per-account input vec for the privacy-preserving circuit. Each variant carries
npk: pre.npk, /// exactly the fields the circuit's code path for that account needs, with the ephemeral
identifier: pre.identifier, /// keys (`ssk`) drawn from the cached values that `private_account_keys` and the message
ssk: eph_holder.calculate_shared_secret_sender(&pre.vpk), /// construction also use, so all three views agree on the same ephemeral key.
vpk: pre.vpk.clone(), pub fn account_identities(&self) -> Vec<InputAccountIdentity> {
epk: eph_holder.generate_ephemeral_public_key(), self.states
}) .iter()
.map(|state| match state {
State::Public { .. } => InputAccountIdentity::Public,
State::Private(pre) if pre.is_pda => {
match (pre.nsk, pre.proof.clone()) {
(Some(nsk), Some(membership_proof)) => {
InputAccountIdentity::PrivatePdaUpdate {
ssk: pre.ssk,
nsk,
membership_proof,
identifier: pre.identifier,
}
}
_ => InputAccountIdentity::PrivatePdaInit {
npk: pre.npk,
ssk: pre.ssk,
identifier: pre.identifier,
},
}
} }
State::Public { .. } => None, State::Private(pre) => match (pre.nsk, pre.proof.clone()) {
}) (Some(nsk), Some(membership_proof)) => {
.collect() InputAccountIdentity::PrivateAuthorizedUpdate {
} ssk: pre.ssk,
nsk,
pub fn private_account_auth(&self) -> Vec<NullifierSecretKey> { membership_proof,
self.states identifier: pre.identifier,
.iter() }
.filter_map(|state| match state { }
State::Private(pre) => pre.nsk, (Some(nsk), None) => InputAccountIdentity::PrivateAuthorizedInit {
State::Public { .. } => None, ssk: pre.ssk,
}) nsk,
.collect() identifier: pre.identifier,
} },
(None, _) => InputAccountIdentity::PrivateUnauthorized {
pub fn private_account_membership_proofs(&self) -> Vec<Option<MembershipProof>> { npk: pre.npk,
self.states ssk: pre.ssk,
.iter() identifier: pre.identifier,
.filter_map(|state| match state { },
State::Private(pre) => Some(pre.proof.clone()), },
State::Public { .. } => None,
}) })
.collect() .collect()
} }
@ -241,11 +267,22 @@ struct AccountPreparedData {
vpk: ViewingPublicKey, vpk: ViewingPublicKey,
pre_state: AccountWithMetadata, pre_state: AccountWithMetadata,
proof: Option<MembershipProof>, proof: Option<MembershipProof>,
/// Cached shared-secret key derived once at `AccountManager::new`. Reused for both the
/// circuit input variant (`account_identities()`) and the message ephemeral-key tuples
/// (`private_account_keys()`), so all consumers see the same key. The corresponding
/// `EphemeralKeyHolder` uses `OsRng` and would produce a different value on a second call.
ssk: SharedSecretKey,
/// Cached ephemeral public key, paired with `ssk`.
epk: EphemeralPublicKey,
/// True when this account is a private PDA (owned or foreign). Used by `account_identities()`
/// to select `PrivatePdaInit`/`PrivatePdaUpdate` rather than the standalone private variants.
is_pda: bool,
} }
async fn private_acc_preparation( async fn private_acc_preparation(
wallet: &WalletCore, wallet: &WalletCore,
account_id: AccountId, account_id: AccountId,
is_pda: bool,
) -> Result<AccountPreparedData, ExecutionFailureKind> { ) -> Result<AccountPreparedData, ExecutionFailureKind> {
let Some((from_keys, from_acc, from_identifier)) = let Some((from_keys, from_acc, from_identifier)) =
wallet.storage.user_data.get_private_account(account_id) wallet.storage.user_data.get_private_account(account_id)
@ -268,6 +305,10 @@ async fn private_acc_preparation(
// support from that in the wallet. // support from that in the wallet.
let sender_pre = AccountWithMetadata::new(from_acc.clone(), true, account_id); let sender_pre = AccountWithMetadata::new(from_acc.clone(), true, account_id);
let eph_holder = EphemeralKeyHolder::new(&from_npk);
let ssk = eph_holder.calculate_shared_secret_sender(&from_vpk);
let epk = eph_holder.generate_ephemeral_public_key();
Ok(AccountPreparedData { Ok(AccountPreparedData {
nsk: Some(nsk), nsk: Some(nsk),
npk: from_npk, npk: from_npk,
@ -275,5 +316,9 @@ async fn private_acc_preparation(
vpk: from_vpk, vpk: from_vpk,
pre_state: sender_pre, pre_state: sender_pre,
proof, proof,
ssk,
epk,
is_pda,
}) })
} }