Merge branch 'main' into Pravdyvy/indexer-query-api

This commit is contained in:
Pravdyvy 2026-05-05 14:39:14 +03:00 committed by GitHub
commit f3472ce87a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
134 changed files with 6182 additions and 3341 deletions

View File

@ -225,7 +225,7 @@ jobs:
- uses: ./.github/actions/install-risc0
- name: Install just
run: cargo install just
run: cargo install --locked just
- name: Build artifacts
run: just build-artifacts

1674
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -36,7 +36,6 @@ members = [
"examples/program_deployment",
"examples/program_deployment/methods",
"examples/program_deployment/methods/guest",
"bedrock_client",
"testnet_initial_state",
"indexer_ffi",
]
@ -67,7 +66,6 @@ amm_program = { path = "programs/amm" }
ata_core = { path = "programs/associated_token_account/core" }
ata_program = { path = "programs/associated_token_account" }
test_program_methods = { path = "test_program_methods" }
bedrock_client = { path = "bedrock_client" }
testnet_initial_state = { path = "testnet_initial_state" }
tokio = { version = "1.50", features = [
@ -122,11 +120,12 @@ tokio-retry = "0.3.0"
schemars = "1.2"
async-stream = "0.3.6"
logos-blockchain-common-http-client = { git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "1da154c74b911318fb853d37261f8a05ffe513b4" }
logos-blockchain-key-management-system-service = { git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "1da154c74b911318fb853d37261f8a05ffe513b4" }
logos-blockchain-core = { git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "1da154c74b911318fb853d37261f8a05ffe513b4" }
logos-blockchain-chain-broadcast-service = { git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "1da154c74b911318fb853d37261f8a05ffe513b4" }
logos-blockchain-chain-service = { git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "1da154c74b911318fb853d37261f8a05ffe513b4" }
logos-blockchain-common-http-client = { git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "ee281a447d95a951752461ee0a6e88eb4a0f17cf" }
logos-blockchain-key-management-system-service = { git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "ee281a447d95a951752461ee0a6e88eb4a0f17cf" }
logos-blockchain-core = { git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "ee281a447d95a951752461ee0a6e88eb4a0f17cf" }
logos-blockchain-chain-broadcast-service = { git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "ee281a447d95a951752461ee0a6e88eb4a0f17cf" }
logos-blockchain-chain-service = { git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "ee281a447d95a951752461ee0a6e88eb4a0f17cf" }
logos-blockchain-zone-sdk = { git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "ee281a447d95a951752461ee0a6e88eb4a0f17cf" }
rocksdb = { version = "0.24.0", default-features = false, features = [
"snappy",

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -39,42 +39,42 @@ cryptarchia:
threshold: 1
timestamp: 0
gossipsub_protocol: /integration/logos-blockchain/cryptarchia/proto/1.0.0
genesis_state:
mantle_tx:
ops:
genesis_block:
header:
version: Bedrock
parent_block: '0000000000000000000000000000000000000000000000000000000000000000'
slot: 0
block_root: b5f8787ac23674822414c70eea15d842da38f2e806ede1a73cf7b5cf0277da07
proof_of_leadership:
proof: '0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
entropy_contribution: '0000000000000000000000000000000000000000000000000000000000000000'
leader_key: '0000000000000000000000000000000000000000000000000000000000000000'
voucher_cm: '0000000000000000000000000000000000000000000000000000000000000000'
signature: '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
transactions:
- mantle_tx:
ops:
- opcode: 0
payload:
inputs: [ ]
inputs: []
outputs:
- value: 1
pk: d204000000000000000000000000000000000000000000000000000000000000
- value: 100
pk: 2e03b2eff5a45478e7e79668d2a146cf2c5c7925bce927f2b1c67f2ab4fc0d26
- value: 1
pk: d204000000000000000000000000000000000000000000000000000000000000
- value: 100
pk: '2e03b2eff5a45478e7e79668d2a146cf2c5c7925bce927f2b1c67f2ab4fc0d26'
- value: 1
pk: ed266e6e887b9b97059dc1aa1b7b2e19b934291753c6336a163fe4ebaa28e717
- opcode: 17
payload:
channel_id: "0000000000000000000000000000000000000000000000000000000000000000"
inscription: [ 103, 101, 110, 101, 115, 105, 115 ] # "genesis" in bytes
parent: "0000000000000000000000000000000000000000000000000000000000000000"
signer: "0000000000000000000000000000000000000000000000000000000000000000"
execution_gas_price: 0
storage_gas_price: 0
ops_proofs:
- !ZkSig
pi_a: [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
]
pi_b: [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
]
pi_c: [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
]
- NoProof
channel_id: '0000000000000000000000000000000000000000000000000000000000000000'
inscription: '67656e65736973'
parent: '0000000000000000000000000000000000000000000000000000000000000000'
signer: '0000000000000000000000000000000000000000000000000000000000000000'
execution_gas_price: 0
storage_gas_price: 0
ops_proofs:
- !Ed25519Sig '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
- !Ed25519Sig '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
time:
slot_duration: '1.0'
chain_start_time: PLACEHOLDER_CHAIN_START_TIME

View File

@ -1,7 +1,7 @@
services:
logos-blockchain-node-0:
image: ghcr.io/logos-blockchain/logos-blockchain@sha256:c5243681b353278cabb562a176f0a5cfbefc2056f18cebc47fe0e3720c29fb12
image: ghcr.io/logos-blockchain/logos-blockchain@sha256:9f1829dea335c56f6ff68ae37ea872ed5313b96b69e8ffe143c02b7217de85fc
ports:
- "${PORT:-8080}:18080/tcp"
volumes:

View File

@ -1,23 +0,0 @@
[package]
name = "bedrock_client"
version = "0.1.0"
edition = "2024"
license = { workspace = true }
[lints]
workspace = true
[dependencies]
common.workspace = true
reqwest.workspace = true
anyhow.workspace = true
tokio-retry.workspace = true
futures.workspace = true
log.workspace = true
serde.workspace = true
humantime-serde.workspace = true
logos-blockchain-common-http-client.workspace = true
logos-blockchain-core.workspace = true
logos-blockchain-chain-broadcast-service.workspace = true
logos-blockchain-chain-service.workspace = true

View File

@ -1,121 +0,0 @@
use std::time::Duration;
use anyhow::{Context as _, Result};
use common::config::BasicAuth;
use futures::{Stream, TryFutureExt as _};
#[expect(clippy::single_component_path_imports, reason = "Satisfy machete")]
use humantime_serde;
use log::{info, warn};
pub use logos_blockchain_chain_broadcast_service::BlockInfo;
use logos_blockchain_chain_service::CryptarchiaInfo;
pub use logos_blockchain_common_http_client::{CommonHttpClient, Error};
pub use logos_blockchain_core::{block::Block, header::HeaderId, mantle::SignedMantleTx};
use reqwest::{Client, Url};
use serde::{Deserialize, Serialize};
use tokio_retry::Retry;
/// Fibonacci backoff retry strategy configuration.
#[derive(Debug, Copy, Clone, Serialize, Deserialize)]
pub struct BackoffConfig {
#[serde(with = "humantime_serde")]
pub start_delay: Duration,
pub max_retries: usize,
}
impl Default for BackoffConfig {
fn default() -> Self {
Self {
start_delay: Duration::from_millis(100),
max_retries: 5,
}
}
}
/// Simple wrapper
/// maybe extend in the future for our purposes
/// `Clone` is cheap because `CommonHttpClient` is internally reference counted (`Arc`).
#[derive(Clone)]
pub struct BedrockClient {
http_client: CommonHttpClient,
node_url: Url,
backoff: BackoffConfig,
}
impl BedrockClient {
pub fn new(backoff: BackoffConfig, node_url: Url, auth: Option<BasicAuth>) -> Result<Self> {
info!("Creating Bedrock client with node URL {node_url}");
let client = Client::builder()
//Add more fields if needed
.timeout(std::time::Duration::from_mins(1))
.build()
.context("Failed to build HTTP client")?;
let auth = auth.map(|a| {
logos_blockchain_common_http_client::BasicAuthCredentials::new(a.username, a.password)
});
let http_client = CommonHttpClient::new_with_client(client, auth);
Ok(Self {
http_client,
node_url,
backoff,
})
}
pub async fn post_transaction(&self, tx: SignedMantleTx) -> Result<Result<(), Error>, Error> {
Retry::spawn(self.backoff_strategy(), || async {
match self
.http_client
.post_transaction(self.node_url.clone(), tx.clone())
.await
{
Ok(()) => Ok(Ok(())),
Err(err) => match err {
// Retry arm.
// Retrying only reqwest errors: mainly connected to http.
Error::Request(_) => Err(err),
// Returning non-retryable error
Error::Server(_) | Error::Client(_) | Error::Url(_) => Ok(Err(err)),
},
}
})
.await
}
pub async fn get_lib_stream(&self) -> Result<impl Stream<Item = BlockInfo>, Error> {
self.http_client.get_lib_stream(self.node_url.clone()).await
}
pub async fn get_block_by_id(
&self,
header_id: HeaderId,
) -> Result<Option<Block<SignedMantleTx>>, Error> {
Retry::spawn(self.backoff_strategy(), || {
self.http_client
.get_block_by_id(self.node_url.clone(), header_id)
.inspect_err(|err| warn!("Block fetching failed with error: {err:#}"))
})
.await
}
pub async fn get_consensus_info(&self) -> Result<CryptarchiaInfo, Error> {
Retry::spawn(self.backoff_strategy(), || {
self.http_client
.consensus_info(self.node_url.clone())
.inspect_err(|err| warn!("Block fetching failed with error: {err:#}"))
})
.await
}
fn backoff_strategy(&self) -> impl Iterator<Item = Duration> {
let start_delay_millis = self
.backoff
.start_delay
.as_millis()
.try_into()
.expect("Start delay must be less than u64::MAX milliseconds");
tokio_retry::strategy::FibonacciBackoff::from_millis(start_delay_millis)
.take(self.backoff.max_retries)
}
}

View File

@ -85,9 +85,20 @@ impl HashableBlockData {
signing_key: &nssa::PrivateKey,
bedrock_parent_id: MantleMsgId,
) -> Block {
const PREFIX: &[u8; 32] = b"/LEE/v0.3/Message/Block/\x00\x00\x00\x00\x00\x00\x00\x00";
let data_bytes = borsh::to_vec(&self).unwrap();
let signature = nssa::Signature::new(signing_key, &data_bytes);
let hash = OwnHasher::hash(&data_bytes);
let mut bytes = Vec::with_capacity(
PREFIX
.len()
.checked_add(data_bytes.len())
.expect("length overflow"),
);
bytes.extend_from_slice(PREFIX);
bytes.extend_from_slice(&data_bytes);
let hash = OwnHasher::hash(&bytes);
let signature = nssa::Signature::new(signing_key, &hash.0);
Block {
header: BlockHeader {
block_id: self.block_id,
@ -103,11 +114,6 @@ impl HashableBlockData {
bedrock_parent_id,
}
}
#[must_use]
pub fn block_hash(&self) -> BlockHash {
OwnHasher::hash(&borsh::to_vec(&self).unwrap())
}
}
impl From<Block> for HashableBlockData {

View File

@ -93,6 +93,12 @@ Only `Public/2gJJjtG9UivBGEhA1Jz6waZQx1cwfYupC5yvKEweHaeH` is used for completio
exec zsh
```
> **Note:** After updating the completion script, re-run step 1 to copy the new file, then rebuild the cache:
> ```sh
> cp _wallet ~/.oh-my-zsh/custom/plugins/wallet/
> rm -rf ~/.zcompdump* && exec zsh
> ```
### Requirements
The completion script calls `wallet account list` to dynamically fetch account IDs. Ensure the `wallet` command is in your `$PATH`.
@ -197,8 +203,7 @@ wallet account get --account-id <TAB>
2. Rebuild the completion cache:
```sh
rm -f ~/.zcompdump*
exec zsh
rm -rf ~/.zcompdump* && exec zsh
```
### Account IDs not completing

View File

@ -46,7 +46,7 @@ _wallet() {
cword=$COMP_CWORD
}
local commands="auth-transfer chain-info account pinata token amm check-health config restore-keys deploy-program help"
local commands="auth-transfer chain-info account pinata token amm ata check-health config restore-keys deploy-program help"
# Find the main command and subcommand by scanning words before the cursor.
# Global options that take a value are skipped along with their argument.
@ -127,10 +127,10 @@ _wallet() {
--to-label)
_wallet_complete_account_label "$cur"
;;
--to-npk | --to-vpk | --amount)
--to-npk | --to-vpk | --to-identifier | --amount)
;; # no specific completion
*)
COMPREPLY=($(compgen -W "--from --from-label --to --to-label --to-npk --to-vpk --amount" -- "$cur"))
COMPREPLY=($(compgen -W "--from --from-label --to --to-label --to-npk --to-vpk --to-identifier --amount" -- "$cur"))
;;
esac
;;
@ -187,11 +187,11 @@ _wallet() {
sync-private)
;; # no options
new)
# `account new` is itself a subcommand: public | private
# `account new` is itself a subcommand: public | private-accounts-key
local new_subcmd=""
for ((i = subcmd_idx + 1; i < cword; i++)); do
case "${words[$i]}" in
public | private)
public | private-accounts-key)
new_subcmd="${words[$i]}"
break
;;
@ -199,13 +199,26 @@ _wallet() {
done
if [[ -z "$new_subcmd" ]]; then
COMPREPLY=($(compgen -W "public private" -- "$cur"))
COMPREPLY=($(compgen -W "public private-accounts-key" -- "$cur"))
else
case "$prev" in
--cci | -l | --label)
;; # no specific completion
*)
COMPREPLY=($(compgen -W "--cci -l --label" -- "$cur"))
case "$new_subcmd" in
public)
case "$prev" in
--cci | -l | --label)
;; # no specific completion
*)
COMPREPLY=($(compgen -W "--cci -l --label" -- "$cur"))
;;
esac
;;
private-accounts-key)
case "$prev" in
--cci)
;; # no specific completion
*)
COMPREPLY=($(compgen -W "--cci" -- "$cur"))
;;
esac
;;
esac
fi
@ -289,10 +302,10 @@ _wallet() {
--to-label)
_wallet_complete_account_label "$cur"
;;
--to-npk | --to-vpk | --amount)
--to-npk | --to-vpk | --to-identifier | --amount)
;; # no specific completion
*)
COMPREPLY=($(compgen -W "--from --from-label --to --to-label --to-npk --to-vpk --amount" -- "$cur"))
COMPREPLY=($(compgen -W "--from --from-label --to --to-label --to-npk --to-vpk --to-identifier --amount" -- "$cur"))
;;
esac
;;
@ -331,10 +344,10 @@ _wallet() {
--holder-label)
_wallet_complete_account_label "$cur"
;;
--holder-npk | --holder-vpk | --amount)
--holder-npk | --holder-vpk | --holder-identifier | --amount)
;; # no specific completion
*)
COMPREPLY=($(compgen -W "--definition --definition-label --holder --holder-label --holder-npk --holder-vpk --amount" -- "$cur"))
COMPREPLY=($(compgen -W "--definition --definition-label --holder --holder-label --holder-npk --holder-vpk --holder-identifier --amount" -- "$cur"))
;;
esac
;;
@ -344,7 +357,7 @@ _wallet() {
amm)
case "$subcmd" in
"")
COMPREPLY=($(compgen -W "new swap add-liquidity remove-liquidity help" -- "$cur"))
COMPREPLY=($(compgen -W "new swap-exact-input swap-exact-output add-liquidity remove-liquidity help" -- "$cur"))
;;
new)
case "$prev" in
@ -373,7 +386,7 @@ _wallet() {
;;
esac
;;
swap)
swap-exact-input)
case "$prev" in
--user-holding-a)
_wallet_complete_account_id "$cur"
@ -394,6 +407,15 @@ _wallet() {
;;
esac
;;
swap-exact-output)
case "$prev" in
--user-holding-a | --user-holding-b | --exact-amount-out | --max-amount-in | --token-definition)
;; # no specific completion
*)
COMPREPLY=($(compgen -W "--user-holding-a --user-holding-b --exact-amount-out --max-amount-in --token-definition" -- "$cur"))
;;
esac
;;
add-liquidity)
case "$prev" in
--user-holding-a)
@ -451,6 +473,68 @@ _wallet() {
esac
;;
ata)
case "$subcmd" in
"")
COMPREPLY=($(compgen -W "address create send burn list help" -- "$cur"))
;;
address)
case "$prev" in
--owner | --token-definition)
;; # no specific completion
*)
COMPREPLY=($(compgen -W "--owner --token-definition" -- "$cur"))
;;
esac
;;
create)
case "$prev" in
--owner)
_wallet_complete_account_id "$cur"
;;
--token-definition)
;; # no specific completion
*)
COMPREPLY=($(compgen -W "--owner --token-definition" -- "$cur"))
;;
esac
;;
send)
case "$prev" in
--from)
_wallet_complete_account_id "$cur"
;;
--to | --token-definition | --amount)
;; # no specific completion
*)
COMPREPLY=($(compgen -W "--from --token-definition --to --amount" -- "$cur"))
;;
esac
;;
burn)
case "$prev" in
--holder)
_wallet_complete_account_id "$cur"
;;
--token-definition | --amount)
;; # no specific completion
*)
COMPREPLY=($(compgen -W "--holder --token-definition --amount" -- "$cur"))
;;
esac
;;
list)
case "$prev" in
--owner | --token-definition)
;; # no specific completion
*)
COMPREPLY=($(compgen -W "--owner --token-definition" -- "$cur"))
;;
esac
;;
esac
;;
config)
case "$subcmd" in
"")

View File

@ -24,6 +24,7 @@ _wallet() {
'pinata:Pinata program interaction subcommand'
'token:Token program interaction subcommand'
'amm:AMM program interaction subcommand'
'ata:Associated Token Account program interaction subcommand'
'check-health:Check the wallet can connect to the node and builtin local programs match the remote versions'
'config:Command to setup config, get and set config fields'
'restore-keys:Restoring keys from given password at given depth'
@ -52,6 +53,9 @@ _wallet() {
amm)
_wallet_amm
;;
ata)
_wallet_ata
;;
config)
_wallet_config
;;
@ -72,7 +76,7 @@ _wallet() {
# auth-transfer subcommand
_wallet_auth_transfer() {
local -a subcommands
_arguments -C \
'1: :->subcommand' \
'*:: :->args'
@ -91,16 +95,17 @@ _wallet_auth_transfer() {
init)
_arguments \
'--account-id[Account ID to initialize]:account_id:_wallet_account_ids' \
'--account-label[Account label (alternative to --account-id)]:label:_wallet_account_labels'
'--account-label[Account label (alternative to --account-id)]:label:'
;;
send)
_arguments \
'--from[Source account ID]:from_account:_wallet_account_ids' \
'--from-label[Source account label (alternative to --from)]:label:_wallet_account_labels' \
'--from-label[From account label (alternative to --from)]:label:' \
'--to[Destination account ID (for owned accounts)]:to_account:_wallet_account_ids' \
'--to-label[Destination account label (alternative to --to)]:label:_wallet_account_labels' \
'--to-label[To account label (alternative to --to)]:label:' \
'--to-npk[Destination nullifier public key (for foreign private accounts)]:npk:' \
'--to-vpk[Destination viewing public key (for foreign private accounts)]:vpk:' \
'--to-identifier[Identifier for the recipient private account]:identifier:' \
'--amount[Amount of native tokens to send]:amount:'
;;
esac
@ -111,7 +116,7 @@ _wallet_auth_transfer() {
# chain-info subcommand
_wallet_chain_info() {
local -a subcommands
_arguments -C \
'1: :->subcommand' \
'*:: :->args'
@ -144,7 +149,7 @@ _wallet_chain_info() {
# account subcommand
_wallet_account() {
local -a subcommands
_arguments -C \
'1: :->subcommand' \
'*:: :->args'
@ -169,7 +174,7 @@ _wallet_account() {
'(-r --raw)'{-r,--raw}'[Get raw account data]' \
'(-k --keys)'{-k,--keys}'[Display keys (pk for public accounts, npk/vpk for private accounts)]' \
'(-a --account-id)'{-a,--account-id}'[Account ID to query]:account_id:_wallet_account_ids' \
'--account-label[Account label (alternative to --account-id)]:label:_wallet_account_labels'
'--account-label[Account label (alternative to --account-id)]:label:'
;;
list|ls)
_arguments \
@ -181,19 +186,27 @@ _wallet_account() {
'*:: :->new_args'
case $state in
account_type)
compadd public private
compadd public private-accounts-key
;;
new_args)
_arguments \
'--cci[Chain index of a parent node]:chain_index:' \
'(-l --label)'{-l,--label}'[Label to assign to the new account]:label:'
case $line[1] in
public)
_arguments \
'--cci[Chain index of a parent node]:chain_index:' \
'(-l --label)'{-l,--label}'[Label to assign to the new account]:label:'
;;
private-accounts-key)
_arguments \
'--cci[Chain index of a parent node]:chain_index:'
;;
esac
;;
esac
;;
label)
_arguments \
'(-a --account-id)'{-a,--account-id}'[Account ID to label]:account_id:_wallet_account_ids' \
'--account-label[Account label (alternative to --account-id)]:label:_wallet_account_labels' \
'--account-label[Account label (alternative to --account-id)]:label:' \
'(-l --label)'{-l,--label}'[The label to assign to the account]:label:'
;;
esac
@ -204,7 +217,7 @@ _wallet_account() {
# pinata subcommand
_wallet_pinata() {
local -a subcommands
_arguments -C \
'1: :->subcommand' \
'*:: :->args'
@ -222,7 +235,7 @@ _wallet_pinata() {
claim)
_arguments \
'--to[Destination account ID to receive claimed tokens]:to_account:_wallet_account_ids' \
'--to-label[Destination account label (alternative to --to)]:label:_wallet_account_labels'
'--to-label[To account label (alternative to --to)]:label:'
;;
esac
;;
@ -255,36 +268,38 @@ _wallet_token() {
'--name[Token name]:name:' \
'--total-supply[Total supply of tokens to mint]:total_supply:' \
'--definition-account-id[Account ID for token definition]:definition_account:_wallet_account_ids' \
'--definition-account-label[Definition account label (alternative to --definition-account-id)]:label:_wallet_account_labels' \
'--definition-account-label[Definition account label (alternative to --definition-account-id)]:label:' \
'--supply-account-id[Account ID to receive initial supply]:supply_account:_wallet_account_ids' \
'--supply-account-label[Supply account label (alternative to --supply-account-id)]:label:_wallet_account_labels'
'--supply-account-label[Supply account label (alternative to --supply-account-id)]:label:'
;;
send)
_arguments \
'--from[Source holding account ID]:from_account:_wallet_account_ids' \
'--from-label[Source account label (alternative to --from)]:label:_wallet_account_labels' \
'--from-label[From account label (alternative to --from)]:label:' \
'--to[Destination holding account ID (for owned accounts)]:to_account:_wallet_account_ids' \
'--to-label[Destination account label (alternative to --to)]:label:_wallet_account_labels' \
'--to-label[To account label (alternative to --to)]:label:' \
'--to-npk[Destination nullifier public key (for foreign private accounts)]:npk:' \
'--to-vpk[Destination viewing public key (for foreign private accounts)]:vpk:' \
'--to-identifier[Identifier for the recipient private account]:identifier:' \
'--amount[Amount of tokens to send]:amount:'
;;
burn)
_arguments \
'--definition[Definition account ID]:definition_account:_wallet_account_ids' \
'--definition-label[Definition account label (alternative to --definition)]:label:_wallet_account_labels' \
'--definition-label[Definition account label (alternative to --definition)]:label:' \
'--holder[Holder account ID]:holder_account:_wallet_account_ids' \
'--holder-label[Holder account label (alternative to --holder)]:label:_wallet_account_labels' \
'--holder-label[Holder account label (alternative to --holder)]:label:' \
'--amount[Amount of tokens to burn]:amount:'
;;
mint)
_arguments \
'--definition[Definition account ID]:definition_account:_wallet_account_ids' \
'--definition-label[Definition account label (alternative to --definition)]:label:_wallet_account_labels' \
'--definition-label[Definition account label (alternative to --definition)]:label:' \
'--holder[Holder account ID (for owned accounts)]:holder_account:_wallet_account_ids' \
'--holder-label[Holder account label (alternative to --holder)]:label:_wallet_account_labels' \
'--holder-label[Holder account label (alternative to --holder)]:label:' \
'--holder-npk[Holder nullifier public key (for foreign private accounts)]:npk:' \
'--holder-vpk[Holder viewing public key (for foreign private accounts)]:vpk:' \
'--holder-identifier[Identifier for the holder private account]:identifier:' \
'--amount[Amount of tokens to mint]:amount:'
;;
esac
@ -295,7 +310,7 @@ _wallet_token() {
# amm subcommand
_wallet_amm() {
local -a subcommands
_arguments -C \
'1: :->subcommand' \
'*:: :->args'
@ -304,7 +319,8 @@ _wallet_amm() {
subcommand)
subcommands=(
'new:Create a new liquidity pool'
'swap:Swap tokens using the AMM'
'swap-exact-input:Swap specifying exact input amount'
'swap-exact-output:Swap specifying exact output amount'
'add-liquidity:Add liquidity to an existing pool'
'remove-liquidity:Remove liquidity from a pool'
'help:Print this message or the help of the given subcommand(s)'
@ -316,32 +332,40 @@ _wallet_amm() {
new)
_arguments \
'--user-holding-a[User token A holding account ID]:holding_a:_wallet_account_ids' \
'--user-holding-a-label[User holding A account label (alternative to --user-holding-a)]:label:_wallet_account_labels' \
'--user-holding-a-label[User holding A label (alternative to --user-holding-a)]:label:' \
'--user-holding-b[User token B holding account ID]:holding_b:_wallet_account_ids' \
'--user-holding-b-label[User holding B account label (alternative to --user-holding-b)]:label:_wallet_account_labels' \
'--user-holding-b-label[User holding B label (alternative to --user-holding-b)]:label:' \
'--user-holding-lp[User LP token holding account ID]:holding_lp:_wallet_account_ids' \
'--user-holding-lp-label[User holding LP account label (alternative to --user-holding-lp)]:label:_wallet_account_labels' \
'--user-holding-lp-label[User holding LP label (alternative to --user-holding-lp)]:label:' \
'--balance-a[Amount of token A to deposit]:balance_a:' \
'--balance-b[Amount of token B to deposit]:balance_b:'
;;
swap)
swap-exact-input)
_arguments \
'--user-holding-a[User token A holding account ID]:holding_a:_wallet_account_ids' \
'--user-holding-a-label[User holding A account label (alternative to --user-holding-a)]:label:_wallet_account_labels' \
'--user-holding-a-label[User holding A label (alternative to --user-holding-a)]:label:' \
'--user-holding-b[User token B holding account ID]:holding_b:_wallet_account_ids' \
'--user-holding-b-label[User holding B account label (alternative to --user-holding-b)]:label:_wallet_account_labels' \
'--user-holding-b-label[User holding B label (alternative to --user-holding-b)]:label:' \
'--amount-in[Amount of tokens to swap]:amount_in:' \
'--min-amount-out[Minimum tokens expected in return]:min_amount_out:' \
'--token-definition[Definition ID of the token being provided]:token_def:'
;;
swap-exact-output)
_arguments \
'--user-holding-a[User token A holding account ID]:holding_a:' \
'--user-holding-b[User token B holding account ID]:holding_b:' \
'--exact-amount-out[Exact amount of tokens expected out]:exact_amount_out:' \
'--max-amount-in[Maximum tokens to spend]:max_amount_in:' \
'--token-definition[Definition ID of the token being provided]:token_def:'
;;
add-liquidity)
_arguments \
'--user-holding-a[User token A holding account ID]:holding_a:_wallet_account_ids' \
'--user-holding-a-label[User holding A account label (alternative to --user-holding-a)]:label:_wallet_account_labels' \
'--user-holding-a-label[User holding A label (alternative to --user-holding-a)]:label:' \
'--user-holding-b[User token B holding account ID]:holding_b:_wallet_account_ids' \
'--user-holding-b-label[User holding B account label (alternative to --user-holding-b)]:label:_wallet_account_labels' \
'--user-holding-b-label[User holding B label (alternative to --user-holding-b)]:label:' \
'--user-holding-lp[User LP token holding account ID]:holding_lp:_wallet_account_ids' \
'--user-holding-lp-label[User holding LP account label (alternative to --user-holding-lp)]:label:_wallet_account_labels' \
'--user-holding-lp-label[User holding LP label (alternative to --user-holding-lp)]:label:' \
'--max-amount-a[Maximum amount of token A to deposit]:max_amount_a:' \
'--max-amount-b[Maximum amount of token B to deposit]:max_amount_b:' \
'--min-amount-lp[Minimum LP tokens to receive]:min_amount_lp:'
@ -349,11 +373,11 @@ _wallet_amm() {
remove-liquidity)
_arguments \
'--user-holding-a[User token A holding account ID]:holding_a:_wallet_account_ids' \
'--user-holding-a-label[User holding A account label (alternative to --user-holding-a)]:label:_wallet_account_labels' \
'--user-holding-a-label[User holding A label (alternative to --user-holding-a)]:label:' \
'--user-holding-b[User token B holding account ID]:holding_b:_wallet_account_ids' \
'--user-holding-b-label[User holding B account label (alternative to --user-holding-b)]:label:_wallet_account_labels' \
'--user-holding-b-label[User holding B label (alternative to --user-holding-b)]:label:' \
'--user-holding-lp[User LP token holding account ID]:holding_lp:_wallet_account_ids' \
'--user-holding-lp-label[User holding LP account label (alternative to --user-holding-lp)]:label:_wallet_account_labels' \
'--user-holding-lp-label[User holding LP label (alternative to --user-holding-lp)]:label:' \
'--balance-lp[Amount of LP tokens to burn]:balance_lp:' \
'--min-amount-a[Minimum token A to receive]:min_amount_a:' \
'--min-amount-b[Minimum token B to receive]:min_amount_b:'
@ -363,6 +387,61 @@ _wallet_amm() {
esac
}
# ata subcommand
_wallet_ata() {
local -a subcommands
_arguments -C \
'1: :->subcommand' \
'*:: :->args'
case $state in
subcommand)
subcommands=(
'address:Derive and print the Associated Token Account address (local only)'
'create:Create (or idempotently no-op) the Associated Token Account'
'send:Send tokens from owner ATA to a recipient token holding account'
'burn:Burn tokens from holder ATA'
'list:List all ATAs for a given owner across multiple token definitions'
'help:Print this message or the help of the given subcommand(s)'
)
_describe -t subcommands 'ata subcommands' subcommands
;;
args)
case $line[1] in
address)
_arguments \
'--owner[Owner account (no privacy prefix)]:owner:' \
'--token-definition[Token definition account (no privacy prefix)]:token_def:'
;;
create)
_arguments \
'--owner[Owner account with privacy prefix]:owner:_wallet_account_ids' \
'--token-definition[Token definition account (no privacy prefix)]:token_def:'
;;
send)
_arguments \
'--from[Sender account with privacy prefix]:from:_wallet_account_ids' \
'--token-definition[Token definition account (no privacy prefix)]:token_def:' \
'--to[Recipient account (no privacy prefix)]:to:' \
'--amount[Amount of tokens to send]:amount:'
;;
burn)
_arguments \
'--holder[Holder account with privacy prefix]:holder:_wallet_account_ids' \
'--token-definition[Token definition account (no privacy prefix)]:token_def:' \
'--amount[Amount of tokens to burn]:amount:'
;;
list)
_arguments \
'--owner[Owner account (no privacy prefix)]:owner:' \
'--token-definition[Token definition accounts (no privacy prefix)]:token_def:'
;;
esac
;;
esac
}
# config subcommand
_wallet_config() {
local -a subcommands
@ -435,6 +514,7 @@ _wallet_help() {
'pinata:Pinata program interaction subcommand'
'token:Token program interaction subcommand'
'amm:AMM program interaction subcommand'
'ata:Associated Token Account program interaction subcommand'
'check-health:Check the wallet can connect to the node'
'config:Command to setup config, get and set config fields'
'restore-keys:Restoring keys from given password at given depth'
@ -468,25 +548,4 @@ _wallet_account_ids() {
_multi_parts / accounts
}
# Helper function to complete account labels
# Uses `wallet account list` to get available labels
_wallet_account_labels() {
local -a labels
local line
if command -v wallet &>/dev/null; then
while IFS= read -r line; do
local label
# Extract label from [...] at end of line
label="${line##*\[}"
label="${label%\]}"
[[ -n "$label" && "$label" != "$line" ]] && labels+=("$label")
done < <(wallet account list 2>/dev/null)
fi
if (( ${#labels} > 0 )); then
compadd -a labels
fi
}
_wallet "$@"

View File

@ -1,12 +1,8 @@
{
"home": "./indexer/service",
"consensus_info_polling_interval": "1s",
"bedrock_client_config": {
"addr": "http://logos-blockchain-node-0:18080",
"backoff": {
"start_delay": "100ms",
"max_retries": 5
}
"bedrock_config": {
"addr": "http://logos-blockchain-node-0:18080"
},
"channel_id": "0101010101010101010101010101010101010101010101010101010101010101",
"initial_accounts": [

View File

@ -5,6 +5,7 @@ This tutorial walks through native token transfers between public and private ac
4. Private account creation.
5. Native token transfer from a public account to a private account.
6. Native token transfer from a public account to a private account owned by someone else.
7. Sending to a private accounts key from multiple independent senders.
---
@ -142,7 +143,7 @@ Account owned by authenticated-transfer program
> Private accounts are structurally identical to public accounts, but their values are stored off-chain. On-chain, only a 32-byte commitment is recorded.
> Transactions include encrypted private values so the owner can recover them, and the decryption keys are never shared.
> Private accounts use two keypairs: nullifier keys for privacy-preserving executions and viewing keys for encrypting and decrypting values.
> The private account ID is derived from the nullifier public key.
> The private account ID is derived from the nullifier public key and a numeric identifier: `SHA256(prefix || npk || identifier)`. The same `npk` paired with different identifiers yields different, independent account IDs.
> Private accounts can be initialized by anyone, but once initialized they can only be modified by the owners keys.
> Updates include a new commitment and a nullifier for the old state, which prevents linkage between versions.
@ -158,7 +159,9 @@ With vpk 02ddc96d0eb56e00ce14994cfdaec5ae1f76244180a919545983156e3519940a17
```
> [!Tip]
> Focus on the account ID for now. The `npk` and `vpk` values are stored locally and used to build privacy-preserving transactions. The private account ID is derived from `npk`.
> Save this account ID. You will use it in later commands.
### b. Check the account status
Just like public accounts, new private accounts start out uninitialized:
@ -218,21 +221,23 @@ Account owned by authenticated-transfer program
## 6. Native token transfer from a public account to a private account owned by someone else
> [!Important]
> Well simulate transferring to someone else by creating a new private account we own and treating it as if it belonged to another user.
> Well simulate transferring to someone else by creating a new private accounts key and treating it as if it belonged to another user. When the recipient is someone else, you only have their `npk` and `vpk` — not an account ID.
### a. Create a new uninitialized private account
### a. Create a new private accounts key to simulate a foreign recipient
```bash
wallet account new private
wallet account new private-accounts-key
# Output:
Generated new account with account_id Private/AukXPRBmrYVqoqEW2HTs7N3hvTn3qdNFDcxDHVr5hMm5
Generated new private accounts key at path /1
With npk 0c95ebc4b3830f53da77bb0b80a276a776cdcf6410932acc718dcdb3f788a00e
With vpk 039fd12a3674a880d3e917804129141e4170d419d1f9e28a3dcf979c1f2369cb72
```
> [!Tip]
> Ignore the private account ID here and use the `npk` and `vpk` values to send to a foreign private account.
> Ignore the account ID here and use the `npk` and `vpk` values to send to a foreign private account.
### b. Send 3 tokens using the recipients npk and vpk
```bash
wallet auth-transfer send \
@ -242,9 +247,74 @@ wallet auth-transfer send \
--amount 3
```
> [!Note]
> `--to-identifier` is omitted here. When omitted, the wallet picks a random identifier, which is usually fine. Use the flag explicitly when a specific identifier is required.
> [!Warning]
> This command creates a privacy-preserving transaction, which may take a few minutes. The updated values are encrypted and included in the transaction.
> Once accepted, the recipient must run `wallet account sync-private` to scan the chain for their encrypted updates and refresh local state.
> [!Note]
> You have seen transfers between two public accounts and from a public sender to a private recipient. Transfers from a private sender, whether to a public account or to another private account, follow the same pattern.
## 7. Sending to a private accounts key from multiple independent senders
> [!Important]
> A private accounts key (`npk` + `vpk`) can be shared with multiple senders. Each sender independently chooses an identifier; the recipient's account ID is derived from `(npk, identifier)`. Two senders using different identifiers produce two separate private accounts under the same key.
### a. Alice creates a private accounts key
```bash
wallet account new private-accounts-key
# Output:
Generated new private accounts key at path /2
With npk a3f7c21b8e905d4f6a1bc783d0e2f94c1d5a6b7e8f9012345678abcdef012345
With vpk 03b1d2e3f4a5b6c7d8e9f0a1b2c3d4e5f6071819202122232425262728292a2b2c
```
Alice shares the `npk` and `vpk` values with Bob and Charlie out of band.
### b. Bob sends 10 tokens to Alice using identifier 1
```bash
wallet auth-transfer send \
--from Public/BobXqJprP9BmhbFVQyBcbznU8bAXcwrzwRoPTetXdQPA \
--to-npk a3f7c21b8e905d4f6a1bc783d0e2f94c1d5a6b7e8f9012345678abcdef012345 \
--to-vpk 03b1d2e3f4a5b6c7d8e9f0a1b2c3d4e5f6071819202122232425262728292a2b2c \
--to-identifier 1 \
--amount 10
```
### c. Charlie sends 5 tokens to Alice using identifier 2
```bash
wallet auth-transfer send \
--from Public/CharlieYrP9BmhbFVQyBcbznU8bAXcwrzwRoPTetXdQPB \
--to-npk a3f7c21b8e905d4f6a1bc783d0e2f94c1d5a6b7e8f9012345678abcdef012345 \
--to-vpk 03b1d2e3f4a5b6c7d8e9f0a1b2c3d4e5f6071819202122232425262728292a2b2c \
--to-identifier 2 \
--amount 5
```
> [!Note]
> Bob and Charlie each chose a different identifier. They do not need to coordinate — any two distinct values work.
### d. Alice syncs to discover the new accounts
```bash
wallet account sync-private
```
```bash
wallet account list
# Output (private account entries under key /2):
/2 Private/AliceBobAcctXxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
/2 Private/AliceCharlieAcctXxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
```
Alice now has two separate private accounts, one funded by Bob and one by Charlie, both controlled by the same key at path `/2`.
> [!Tip]
> Alice can check each account balance with `wallet account get --account-id Private/...`. Neither balance is visible on-chain.

View File

@ -9,7 +9,7 @@ workspace = true
[dependencies]
common.workspace = true
bedrock_client.workspace = true
logos-blockchain-zone-sdk.workspace = true
nssa.workspace = true
nssa_core.workspace = true
storage.workspace = true
@ -19,13 +19,13 @@ anyhow.workspace = true
log.workspace = true
serde.workspace = true
humantime-serde.workspace = true
tokio.workspace = true
borsh.workspace = true
futures.workspace = true
url.workspace = true
logos-blockchain-core.workspace = true
serde_json.workspace = true
async-stream.workspace = true
tokio.workspace = true
[dev-dependencies]
tempfile.workspace = true

View File

@ -1,11 +1,12 @@
use std::{path::Path, sync::Arc};
use anyhow::Result;
use bedrock_client::HeaderId;
use anyhow::{Context as _, Result};
use common::{
block::{BedrockStatus, Block},
transaction::{NSSATransaction, clock_invocation},
};
use logos_blockchain_core::{header::HeaderId, mantle::ops::channel::MsgId};
use logos_blockchain_zone_sdk::Slot;
use nssa::{Account, AccountId, V03State};
use nssa_core::BlockId;
use storage::indexer::RocksDBIO;
@ -103,6 +104,22 @@ impl IndexerStore {
Ok(self.dbio.calculate_state_for_id(block_id)?)
}
pub fn get_zone_cursor(&self) -> Result<Option<(MsgId, Slot)>> {
let Some(bytes) = self.dbio.get_zone_sdk_indexer_cursor_bytes()? else {
return Ok(None);
};
let cursor: (MsgId, Slot) = serde_json::from_slice(&bytes)
.context("Failed to deserialize stored zone-sdk indexer cursor")?;
Ok(Some(cursor))
}
pub fn set_zone_cursor(&self, cursor: &(MsgId, Slot)) -> Result<()> {
let bytes =
serde_json::to_vec(cursor).context("Failed to serialize zone-sdk indexer cursor")?;
self.dbio.put_zone_sdk_indexer_cursor_bytes(&bytes)?;
Ok(())
}
/// Recalculation of final state directly from DB.
///
/// Used for indexer healthcheck.

View File

@ -6,7 +6,6 @@ use std::{
};
use anyhow::{Context as _, Result};
pub use bedrock_client::BackoffConfig;
use common::config::BasicAuth;
use humantime_serde;
pub use logos_blockchain_core::mantle::ops::channel::ChannelId;
@ -16,8 +15,6 @@ use url::Url;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ClientConfig {
/// For individual RPC requests we use Fibonacci backoff retry strategy.
pub backoff: BackoffConfig,
pub addr: Url,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub auth: Option<BasicAuth>,
@ -31,7 +28,7 @@ pub struct IndexerConfig {
pub signing_key: [u8; 32],
#[serde(with = "humantime_serde")]
pub consensus_info_polling_interval: Duration,
pub bedrock_client_config: ClientConfig,
pub bedrock_config: ClientConfig,
pub channel_id: ChannelId,
#[serde(skip_serializing_if = "Option::is_none")]
pub initial_public_accounts: Option<Vec<PublicAccountPublicInitialData>>,

View File

@ -1,15 +1,14 @@
use std::collections::VecDeque;
use std::sync::Arc;
use anyhow::Result;
use bedrock_client::{BedrockClient, HeaderId};
use common::{
HashType, PINATA_BASE58,
block::{Block, HashableBlockData},
};
use log::{debug, error, info};
use logos_blockchain_core::mantle::{
Op, SignedMantleTx,
ops::channel::{ChannelId, inscribe::InscriptionOp},
use common::block::{Block, HashableBlockData};
// ToDo: Remove after testnet
use common::{HashType, PINATA_BASE58};
use futures::StreamExt as _;
use log::{error, info, warn};
use logos_blockchain_core::header::HeaderId;
use logos_blockchain_zone_sdk::{
CommonHttpClient, ZoneMessage, adapter::NodeHttpClient, indexer::ZoneIndexer,
};
use nssa::V03State;
use testnet_initial_state::initial_state_testnet;
@ -21,25 +20,11 @@ pub mod config;
#[derive(Clone)]
pub struct IndexerCore {
pub bedrock_client: BedrockClient,
pub zone_indexer: Arc<ZoneIndexer<NodeHttpClient>>,
pub config: IndexerConfig,
pub store: IndexerStore,
}
#[derive(Clone)]
/// This struct represents one L1 block data fetched from backfilling.
pub struct BackfillBlockData {
l2_blocks: Vec<Block>,
l1_header: HeaderId,
}
#[derive(Clone)]
/// This struct represents data fetched fom backfilling in one iteration.
pub struct BackfillData {
block_data: VecDeque<BackfillBlockData>,
curr_fin_l1_lib_header: HeaderId,
}
impl IndexerCore {
pub fn new(config: IndexerConfig) -> Result<Self> {
let hashable_data = HashableBlockData {
@ -63,6 +48,7 @@ impl IndexerCore {
.iter()
.map(|init_comm_data| {
let npk = &init_comm_data.npk;
let account_id = nssa::AccountId::from((npk, 0));
let mut acc = init_comm_data.account.clone();
@ -70,8 +56,8 @@ impl IndexerCore {
nssa::program::Program::authenticated_transfer_program().id();
(
nssa_core::Commitment::new(npk, &acc),
nssa_core::Nullifier::for_account_initialization(npk),
nssa_core::Commitment::new(&account_id, &acc),
nssa_core::Nullifier::for_account_initialization(&account_id),
)
})
.collect()
@ -106,279 +92,88 @@ impl IndexerCore {
let home = config.home.join("rocksdb");
let basic_auth = config.bedrock_config.auth.clone().map(Into::into);
let node = NodeHttpClient::new(
CommonHttpClient::new(basic_auth),
config.bedrock_config.addr.clone(),
);
let zone_indexer = ZoneIndexer::new(config.channel_id, node);
Ok(Self {
bedrock_client: BedrockClient::new(
config.bedrock_client_config.backoff,
config.bedrock_client_config.addr.clone(),
config.bedrock_client_config.auth.clone(),
)?,
zone_indexer: Arc::new(zone_indexer),
config,
store: IndexerStore::open_db_with_genesis(&home, &genesis_block, &state)?,
})
}
pub fn subscribe_parse_block_stream(&self) -> impl futures::Stream<Item = Result<Block>> {
pub fn subscribe_parse_block_stream(&self) -> impl futures::Stream<Item = Result<Block>> + '_ {
let poll_interval = self.config.consensus_info_polling_interval;
let initial_cursor = self
.store
.get_zone_cursor()
.expect("Failed to load zone-sdk indexer cursor");
async_stream::stream! {
info!("Searching for initial header");
let mut cursor = initial_cursor;
let last_stored_l1_lib_header = self.store.last_observed_l1_lib_header()?;
let mut prev_last_l1_lib_header = if let Some(last_l1_lib_header) = last_stored_l1_lib_header {
info!("Last l1 lib header found: {last_l1_lib_header}");
last_l1_lib_header
if cursor.is_some() {
info!("Resuming indexer from cursor {cursor:?}");
} else {
info!("Last l1 lib header not found in DB");
info!("Searching for the start of a channel");
let BackfillData {
block_data: start_buff,
curr_fin_l1_lib_header: last_l1_lib_header,
} = self.search_for_channel_start().await?;
for BackfillBlockData {
l2_blocks: l2_block_vec,
l1_header,
} in start_buff {
let mut l2_blocks_parsed_ids: Vec<_> = l2_block_vec.iter().map(|block| block.header.block_id).collect();
l2_blocks_parsed_ids.sort_unstable();
info!("Parsed {} L2 blocks with ids {:?}", l2_block_vec.len(), l2_blocks_parsed_ids);
for l2_block in l2_block_vec {
// TODO: proper fix is to make the sequencer's genesis include a
// trailing `clock_invocation(0)` (and have the indexer's
// `open_db_with_genesis` not pre-apply state transitions) so the
// inscribed genesis can flow through `put_block` like any other
// block. For now we skip re-applying it.
//
// The channel-start (block_id == 1) is the sequencer's genesis
// inscription that we re-discover during initial search. The
// indexer already has its own locally-constructed genesis in
// the store from `open_db_with_genesis`, so re-applying the
// inscribed copy is both redundant and would fail the strict
// block validation in `put_block` (the inscribed genesis lacks
// the trailing clock invocation).
if l2_block.header.block_id != 1 {
self
.store
.put_block(l2_block.clone(), l1_header)
.await
.inspect_err(|err| error!("Failed to put block with err {err:?}"))?;
}
yield Ok(l2_block);
}
}
last_l1_lib_header
};
info!("Searching for initial header finished");
info!("Starting backfilling from {prev_last_l1_lib_header}");
info!("Starting indexer from beginning of channel");
}
loop {
let BackfillData {
block_data: buff,
curr_fin_l1_lib_header,
} = self
.backfill_to_last_l1_lib_header_id(prev_last_l1_lib_header, &self.config.channel_id)
.await
.inspect_err(|err| error!("Failed to backfill to last l1 lib header id with err {err:#?}"))?;
prev_last_l1_lib_header = curr_fin_l1_lib_header;
for BackfillBlockData {
l2_blocks: l2_block_vec,
l1_header: header,
} in buff {
let mut l2_blocks_parsed_ids: Vec<_> = l2_block_vec.iter().map(|block| block.header.block_id).collect();
l2_blocks_parsed_ids.sort_unstable();
info!("Parsed {} L2 blocks with ids {:?}", l2_block_vec.len(), l2_blocks_parsed_ids);
for l2_block in l2_block_vec {
self.store.put_block(l2_block.clone(), header).await?;
yield Ok(l2_block);
let stream = match self.zone_indexer.next_messages(cursor).await {
Ok(s) => s,
Err(err) => {
error!("Failed to start zone-sdk next_messages stream: {err}");
tokio::time::sleep(poll_interval).await;
continue;
}
}
}
}
}
async fn get_lib(&self) -> Result<HeaderId> {
Ok(self.bedrock_client.get_consensus_info().await?.lib)
}
async fn get_next_lib(&self, prev_lib: HeaderId) -> Result<HeaderId> {
loop {
let next_lib = self.get_lib().await?;
if next_lib == prev_lib {
info!(
"Wait {:?} to not spam the node",
self.config.consensus_info_polling_interval
);
tokio::time::sleep(self.config.consensus_info_polling_interval).await;
} else {
break Ok(next_lib);
}
}
}
/// WARNING: depending on channel state,
/// may take indefinite amount of time.
pub async fn search_for_channel_start(&self) -> Result<BackfillData> {
let mut curr_last_l1_lib_header = self.get_lib().await?;
let mut backfill_start = curr_last_l1_lib_header;
// ToDo: How to get root?
let mut backfill_limit = HeaderId::from([0; 32]);
// ToDo: Not scalable, initial buffer should be stored in DB to not run out of memory
// Don't want to complicate DB even more right now.
let mut block_buffer = VecDeque::new();
'outer: loop {
let mut cycle_header = curr_last_l1_lib_header;
loop {
let Some(cycle_block) = self.bedrock_client.get_block_by_id(cycle_header).await?
else {
// First run can reach root easily
// so here we are optimistic about L1
// failing to get parent.
break;
};
let mut stream = std::pin::pin!(stream);
// It would be better to have id, but block does not have it, so slot will do.
info!(
"INITIAL SEARCH: Observed L1 block at slot {}",
cycle_block.header().slot().into_inner()
);
debug!(
"INITIAL SEARCH: This block header is {}",
cycle_block.header().id()
);
debug!(
"INITIAL SEARCH: This block parent is {}",
cycle_block.header().parent()
);
while let Some((msg, slot)) = stream.next().await {
let zone_block = match msg {
ZoneMessage::Block(b) => b,
// Non-block messages don't carry a cursor position; the
// next ZoneBlock advances past them implicitly.
ZoneMessage::Deposit(_) | ZoneMessage::Withdraw(_) => continue,
};
let (l2_block_vec, l1_header) =
parse_block_owned(&cycle_block, &self.config.channel_id);
let block: Block = match borsh::from_slice(&zone_block.data) {
Ok(b) => b,
Err(e) => {
error!("Failed to deserialize L2 block from zone-sdk: {e}");
// Advance past the broken inscription so we don't
// re-process it on restart.
cursor = Some((zone_block.id, slot));
if let Err(err) = self.store.set_zone_cursor(&(zone_block.id, slot)) {
warn!("Failed to persist indexer cursor: {err:#}");
}
continue;
}
};
info!("Parsed {} L2 blocks", l2_block_vec.len());
info!("Indexed L2 block {}", block.header.block_id);
if !l2_block_vec.is_empty() {
block_buffer.push_front(BackfillBlockData {
l2_blocks: l2_block_vec.clone(),
l1_header,
});
}
if let Some(first_l2_block) = l2_block_vec.first()
&& first_l2_block.header.block_id == 1
{
info!("INITIAL_SEARCH: Found channel start");
break 'outer;
}
// Step back to parent
let parent = cycle_block.header().parent();
if parent == backfill_limit {
break;
}
cycle_header = parent;
}
info!("INITIAL_SEARCH: Reached backfill limit, refetching last l1 lib header");
block_buffer.clear();
backfill_limit = backfill_start;
curr_last_l1_lib_header = self.get_next_lib(curr_last_l1_lib_header).await?;
backfill_start = curr_last_l1_lib_header;
}
Ok(BackfillData {
block_data: block_buffer,
curr_fin_l1_lib_header: curr_last_l1_lib_header,
})
}
pub async fn backfill_to_last_l1_lib_header_id(
&self,
last_fin_l1_lib_header: HeaderId,
channel_id: &ChannelId,
) -> Result<BackfillData> {
let curr_fin_l1_lib_header = self.get_next_lib(last_fin_l1_lib_header).await?;
// ToDo: Not scalable, buffer should be stored in DB to not run out of memory
// Don't want to complicate DB even more right now.
let mut block_buffer = VecDeque::new();
let mut cycle_header = curr_fin_l1_lib_header;
loop {
let Some(cycle_block) = self.bedrock_client.get_block_by_id(cycle_header).await? else {
return Err(anyhow::anyhow!("Parent not found"));
};
if cycle_block.header().id() == last_fin_l1_lib_header {
break;
}
// Step back to parent
cycle_header = cycle_block.header().parent();
// It would be better to have id, but block does not have it, so slot will do.
info!(
"Observed L1 block at slot {}",
cycle_block.header().slot().into_inner()
);
let (l2_block_vec, l1_header) = parse_block_owned(&cycle_block, channel_id);
info!("Parsed {} L2 blocks", l2_block_vec.len());
if !l2_block_vec.is_empty() {
block_buffer.push_front(BackfillBlockData {
l2_blocks: l2_block_vec,
l1_header,
});
}
}
Ok(BackfillData {
block_data: block_buffer,
curr_fin_l1_lib_header,
})
}
}
fn parse_block_owned(
l1_block: &bedrock_client::Block<SignedMantleTx>,
decoded_channel_id: &ChannelId,
) -> (Vec<Block>, HeaderId) {
(
#[expect(
clippy::wildcard_enum_match_arm,
reason = "We are only interested in channel inscription ops, so it's fine to ignore the rest"
)]
l1_block
.transactions()
.flat_map(|tx| {
tx.mantle_tx.ops.iter().filter_map(|op| match op {
Op::ChannelInscribe(InscriptionOp {
channel_id,
inscription,
..
}) if channel_id == decoded_channel_id => {
borsh::from_slice::<Block>(inscription)
.inspect_err(|err| {
error!("Failed to deserialize our inscription with err: {err:#?}");
})
.ok()
// TODO: Remove l1_header placeholder once storage layer
// no longer requires it. Zone-sdk handles L1 tracking internally.
let placeholder_l1_header = HeaderId::from([0_u8; 32]);
if let Err(err) = self.store.put_block(block.clone(), placeholder_l1_header).await {
error!("Failed to store block {}: {err:#}", block.header.block_id);
}
_ => None,
})
})
.collect(),
l1_block.header().id(),
)
cursor = Some((zone_block.id, slot));
if let Err(err) = self.store.set_zone_cursor(&(zone_block.id, slot)) {
warn!("Failed to persist indexer cursor: {err:#}");
}
yield Ok(block);
}
// Stream ended (caught up to LIB). Sleep then poll again.
tokio::time::sleep(poll_interval).await;
}
}
}
}

View File

@ -1,12 +1,8 @@
{
"home": ".",
"consensus_info_polling_interval": "1s",
"bedrock_client_config": {
"addr": "http://localhost:8080",
"backoff": {
"start_delay": "100ms",
"max_retries": 5
}
"bedrock_config": {
"addr": "http://localhost:8080"
},
"channel_id": "0101010101010101010101010101010101010101010101010101010101010101",
"initial_accounts": [

View File

@ -6,7 +6,7 @@
clippy::integer_division_remainder_used,
reason = "Mock service uses intentional casts and format patterns for test data generation"
)]
use std::collections::HashMap;
use std::{collections::HashMap, sync::Arc, time::Duration};
use indexer_service_protocol::{
Account, AccountId, BedrockStatus, Block, BlockBody, BlockHeader, BlockId, Commitment,
@ -19,15 +19,73 @@ use jsonrpsee::{
core::{SubscriptionResult, async_trait},
types::ErrorObjectOwned,
};
use tokio::sync::{RwLock, broadcast};
/// A mock implementation of the `IndexerService` RPC for testing purposes.
pub struct MockIndexerService {
const MOCK_GENESIS_TIMESTAMP_MS: u64 = 1_704_067_200_000;
const MOCK_BLOCK_INTERVAL_MS: u64 = 30_000;
struct MockState {
blocks: Vec<Block>,
accounts: HashMap<AccountId, Account>,
account_ids: Vec<AccountId>,
transactions: HashMap<HashType, (Transaction, BlockId)>,
}
/// A mock implementation of the `IndexerService` RPC for testing purposes.
pub struct MockIndexerService {
state: Arc<RwLock<MockState>>,
finalized_blocks_tx: broadcast::Sender<Block>,
}
impl MockIndexerService {
fn spawn_block_generation_task(
state: Arc<RwLock<MockState>>,
finalized_blocks_tx: broadcast::Sender<Block>,
) {
tokio::spawn(async move {
loop {
tokio::time::sleep(Duration::from_secs(30)).await;
let new_block = {
let mut state = state.write().await;
let next_block_id = state
.blocks
.last()
.map_or(1, |block| block.header.block_id.saturating_add(1));
let prev_hash = state
.blocks
.last()
.map_or(HashType([0_u8; 32]), |block| block.header.hash);
let timestamp = state.blocks.last().map_or(
MOCK_GENESIS_TIMESTAMP_MS + MOCK_BLOCK_INTERVAL_MS,
|block| {
block
.header
.timestamp
.saturating_add(MOCK_BLOCK_INTERVAL_MS)
},
);
let block = build_mock_block(
next_block_id,
prev_hash,
timestamp,
&state.account_ids,
BedrockStatus::Finalized,
);
index_block_transactions(&mut state.transactions, &block);
state.blocks.push(block.clone());
block
};
let _res = finalized_blocks_tx.send(new_block);
}
});
}
#[must_use]
pub fn new_with_mock_blocks() -> Self {
let mut blocks = Vec::new();
@ -59,119 +117,38 @@ impl MockIndexerService {
let mut prev_hash = HashType([0_u8; 32]);
for block_id in 1..=100 {
let block_hash = {
let mut hash = [0_u8; 32];
hash[0] = block_id as u8;
hash[1] = 0xff;
HashType(hash)
};
// Create 2-4 transactions per block (mix of Public, PrivacyPreserving, and
// ProgramDeployment)
let num_txs = 2 + (block_id % 3);
let mut block_transactions = Vec::new();
for tx_idx in 0..num_txs {
let tx_hash = {
let mut hash = [0_u8; 32];
hash[0] = block_id as u8;
hash[1] = tx_idx as u8;
HashType(hash)
};
// Vary transaction types: Public, PrivacyPreserving, or ProgramDeployment
let tx = match (block_id + tx_idx) % 5 {
// Public transactions (most common)
0 | 1 => Transaction::Public(PublicTransaction {
hash: tx_hash,
message: PublicMessage {
program_id: ProgramId([1_u32; 8]),
account_ids: vec![
account_ids[tx_idx as usize % account_ids.len()],
account_ids[(tx_idx as usize + 1) % account_ids.len()],
],
nonces: vec![block_id as u128, (block_id + 1) as u128],
instruction_data: vec![1, 2, 3, 4],
},
witness_set: WitnessSet {
signatures_and_public_keys: vec![],
proof: None,
},
}),
// PrivacyPreserving transactions
2 | 3 => Transaction::PrivacyPreserving(PrivacyPreservingTransaction {
hash: tx_hash,
message: PrivacyPreservingMessage {
public_account_ids: vec![
account_ids[tx_idx as usize % account_ids.len()],
],
nonces: vec![block_id as u128],
public_post_states: vec![Account {
program_owner: ProgramId([1_u32; 8]),
balance: 500,
data: Data(vec![0xdd, 0xee]),
nonce: block_id as u128,
}],
encrypted_private_post_states: vec![EncryptedAccountData {
ciphertext: indexer_service_protocol::Ciphertext(vec![
0x01, 0x02, 0x03, 0x04,
]),
epk: indexer_service_protocol::EphemeralPublicKey(vec![0xaa; 32]),
view_tag: 42,
}],
new_commitments: vec![Commitment([block_id as u8; 32])],
new_nullifiers: vec![(
indexer_service_protocol::Nullifier([tx_idx as u8; 32]),
CommitmentSetDigest([0xff; 32]),
)],
block_validity_window: ValidityWindow((None, None)),
timestamp_validity_window: ValidityWindow((None, None)),
},
witness_set: WitnessSet {
signatures_and_public_keys: vec![],
proof: Some(indexer_service_protocol::Proof(vec![0; 32])),
},
}),
// ProgramDeployment transactions (rare)
_ => Transaction::ProgramDeployment(ProgramDeploymentTransaction {
hash: tx_hash,
message: ProgramDeploymentMessage {
bytecode: vec![0x00, 0x61, 0x73, 0x6d, 0x01, 0x00, 0x00, 0x00], /* WASM magic number */
},
}),
};
transactions.insert(tx_hash, (tx.clone(), block_id));
block_transactions.push(tx);
}
let block = Block {
header: BlockHeader {
block_id,
prev_block_hash: prev_hash,
hash: block_hash,
timestamp: 1_704_067_200_000 + (block_id * 12_000), // ~12 seconds per block
signature: Signature([0_u8; 64]),
},
body: BlockBody {
transactions: block_transactions,
},
bedrock_status: match block_id {
let block = build_mock_block(
block_id,
prev_hash,
MOCK_GENESIS_TIMESTAMP_MS + (block_id * MOCK_BLOCK_INTERVAL_MS),
&account_ids,
match block_id {
0..=5 => BedrockStatus::Finalized,
6..=8 => BedrockStatus::Safe,
_ => BedrockStatus::Pending,
},
bedrock_parent_id: MantleMsgId([0; 32]),
};
);
prev_hash = block_hash;
index_block_transactions(&mut transactions, &block);
prev_hash = block.header.hash;
blocks.push(block);
}
Self {
let state = Arc::new(RwLock::new(MockState {
blocks,
accounts,
account_ids,
transactions,
}));
let (finalized_blocks_tx, _) = broadcast::channel(32);
Self::spawn_block_generation_task(Arc::clone(&state), finalized_blocks_tx.clone());
Self {
state,
finalized_blocks_tx,
}
}
}
@ -183,21 +160,45 @@ impl indexer_service_rpc::RpcServer for MockIndexerService {
subscription_sink: jsonrpsee::PendingSubscriptionSink,
) -> SubscriptionResult {
let sink = subscription_sink.accept().await?;
for block in self
.blocks
.iter()
.filter(|b| b.bedrock_status == BedrockStatus::Finalized)
{
let initial_finalized_blocks: Vec<Block> = {
let state = self.state.read().await;
state
.blocks
.iter()
.filter(|b| b.bedrock_status == BedrockStatus::Finalized)
.cloned()
.collect()
};
for block in &initial_finalized_blocks {
let json = serde_json::value::to_raw_value(block).unwrap();
sink.send(json).await?;
}
let mut receiver = self.finalized_blocks_tx.subscribe();
loop {
match receiver.recv().await {
Ok(block) => {
let json = serde_json::value::to_raw_value(&block).unwrap();
sink.send(json).await?;
}
Err(broadcast::error::RecvError::Lagged(_)) => {}
Err(broadcast::error::RecvError::Closed) => break,
}
}
Ok(())
}
async fn get_last_finalized_block_id(&self) -> Result<BlockId, ErrorObjectOwned> {
self.blocks
.last()
.map(|bl| bl.header.block_id)
self.state
.read()
.await
.blocks
.iter()
.rev()
.find(|block| block.bedrock_status == BedrockStatus::Finalized)
.map(|block| block.header.block_id)
.ok_or_else(|| {
ErrorObjectOwned::owned(-32001, "Last block not found".to_owned(), None::<()>)
})
@ -205,6 +206,9 @@ impl indexer_service_rpc::RpcServer for MockIndexerService {
async fn get_block_by_id(&self, block_id: BlockId) -> Result<Option<Block>, ErrorObjectOwned> {
Ok(self
.state
.read()
.await
.blocks
.iter()
.find(|b| b.header.block_id == block_id)
@ -216,6 +220,9 @@ impl indexer_service_rpc::RpcServer for MockIndexerService {
block_hash: HashType,
) -> Result<Option<Block>, ErrorObjectOwned> {
Ok(self
.state
.read()
.await
.blocks
.iter()
.find(|b| b.header.hash == block_hash)
@ -223,7 +230,10 @@ impl indexer_service_rpc::RpcServer for MockIndexerService {
}
async fn get_account(&self, account_id: AccountId) -> Result<Account, ErrorObjectOwned> {
self.accounts
self.state
.read()
.await
.accounts
.get(&account_id)
.cloned()
.ok_or_else(|| ErrorObjectOwned::owned(-32001, "Account not found", None::<()>))
@ -233,7 +243,13 @@ impl indexer_service_rpc::RpcServer for MockIndexerService {
&self,
tx_hash: HashType,
) -> Result<Option<Transaction>, ErrorObjectOwned> {
Ok(self.transactions.get(&tx_hash).map(|(tx, _)| tx.clone()))
Ok(self
.state
.read()
.await
.transactions
.get(&tx_hash)
.map(|(tx, _)| tx.clone()))
}
async fn get_blocks(
@ -241,15 +257,17 @@ impl indexer_service_rpc::RpcServer for MockIndexerService {
before: Option<BlockId>,
limit: u64,
) -> Result<Vec<Block>, ErrorObjectOwned> {
let state = self.state.read().await;
let start_id = before.map_or_else(
|| self.blocks.len(),
|| state.blocks.len(),
|id| usize::try_from(id.saturating_sub(1)).expect("u64 should fit in usize"),
);
let result = (1..=start_id)
.rev()
.take(limit as usize)
.map_while(|block_id| self.blocks.get(block_id - 1).cloned())
.map_while(|block_id| state.blocks.get(block_id - 1).cloned())
.collect();
Ok(result)
@ -261,20 +279,24 @@ impl indexer_service_rpc::RpcServer for MockIndexerService {
offset: u64,
limit: u64,
) -> Result<Vec<Transaction>, ErrorObjectOwned> {
let mut account_txs: Vec<_> = self
.transactions
.values()
.filter(|(tx, _)| match tx {
Transaction::Public(pub_tx) => pub_tx.message.account_ids.contains(&account_id),
Transaction::PrivacyPreserving(priv_tx) => {
priv_tx.message.public_account_ids.contains(&account_id)
}
Transaction::ProgramDeployment(_) => false,
})
.collect();
let mut account_txs: Vec<(Transaction, BlockId)> = {
let state = self.state.read().await;
state
.transactions
.values()
.filter(|(tx, _)| match tx {
Transaction::Public(pub_tx) => pub_tx.message.account_ids.contains(&account_id),
Transaction::PrivacyPreserving(priv_tx) => {
priv_tx.message.public_account_ids.contains(&account_id)
}
Transaction::ProgramDeployment(_) => false,
})
.cloned()
.collect()
};
// Sort by block ID descending (most recent first)
account_txs.sort_by_key(|b| std::cmp::Reverse(b.1));
account_txs.sort_by_key(|(_, block_id)| std::cmp::Reverse(*block_id));
let start = offset as usize;
if start >= account_txs.len() {
@ -293,3 +315,123 @@ impl indexer_service_rpc::RpcServer for MockIndexerService {
Ok(())
}
}
fn build_mock_block(
block_id: BlockId,
prev_hash: HashType,
timestamp: u64,
account_ids: &[AccountId],
bedrock_status: BedrockStatus,
) -> Block {
let block_hash = {
let mut hash = [0_u8; 32];
hash[0] = block_id as u8;
hash[1] = 0xff;
HashType(hash)
};
// Create 2-4 transactions per block (mix of Public, PrivacyPreserving, and ProgramDeployment)
let num_txs = 2 + (block_id % 3);
let mut block_transactions = Vec::new();
for tx_idx in 0..num_txs {
let tx_hash = {
let mut hash = [0_u8; 32];
hash[0] = block_id as u8;
hash[1] = tx_idx as u8;
HashType(hash)
};
// Vary transaction types: Public, PrivacyPreserving, or ProgramDeployment
let tx = match (block_id + tx_idx) % 5 {
// Public transactions (most common)
0 | 1 => Transaction::Public(PublicTransaction {
hash: tx_hash,
message: PublicMessage {
program_id: ProgramId([1_u32; 8]),
account_ids: vec![
account_ids[tx_idx as usize % account_ids.len()],
account_ids[(tx_idx as usize + 1) % account_ids.len()],
],
nonces: vec![block_id as u128, (block_id + 1) as u128],
instruction_data: vec![1, 2, 3, 4],
},
witness_set: WitnessSet {
signatures_and_public_keys: vec![],
proof: None,
},
}),
// PrivacyPreserving transactions
2 | 3 => Transaction::PrivacyPreserving(PrivacyPreservingTransaction {
hash: tx_hash,
message: PrivacyPreservingMessage {
public_account_ids: vec![account_ids[tx_idx as usize % account_ids.len()]],
nonces: vec![block_id as u128],
public_post_states: vec![Account {
program_owner: ProgramId([1_u32; 8]),
balance: 500,
data: Data(vec![0xdd, 0xee]),
nonce: block_id as u128,
}],
encrypted_private_post_states: vec![EncryptedAccountData {
ciphertext: indexer_service_protocol::Ciphertext(vec![
0x01, 0x02, 0x03, 0x04,
]),
epk: indexer_service_protocol::EphemeralPublicKey(vec![0xaa; 32]),
view_tag: 42,
}],
new_commitments: vec![Commitment([block_id as u8; 32])],
new_nullifiers: vec![(
indexer_service_protocol::Nullifier([tx_idx as u8; 32]),
CommitmentSetDigest([0xff; 32]),
)],
block_validity_window: ValidityWindow((None, None)),
timestamp_validity_window: ValidityWindow((None, None)),
},
witness_set: WitnessSet {
signatures_and_public_keys: vec![],
proof: Some(indexer_service_protocol::Proof(vec![0; 32])),
},
}),
// ProgramDeployment transactions (rare)
_ => Transaction::ProgramDeployment(ProgramDeploymentTransaction {
hash: tx_hash,
message: ProgramDeploymentMessage {
bytecode: vec![0x00, 0x61, 0x73, 0x6d, 0x01, 0x00, 0x00, 0x00], /* WASM magic
* number */
},
}),
};
block_transactions.push(tx);
}
Block {
header: BlockHeader {
block_id,
prev_block_hash: prev_hash,
hash: block_hash,
timestamp,
signature: Signature([0_u8; 64]),
},
body: BlockBody {
transactions: block_transactions,
},
bedrock_status,
bedrock_parent_id: MantleMsgId([0; 32]),
}
}
fn index_block_transactions(
transactions: &mut HashMap<HashType, (Transaction, BlockId)>,
block: &Block,
) {
for tx in &block.body.transactions {
let tx_hash = match tx {
Transaction::Public(public_tx) => public_tx.hash,
Transaction::PrivacyPreserving(private_tx) => private_tx.hash,
Transaction::ProgramDeployment(deployment_tx) => deployment_tx.hash,
};
transactions.insert(tx_hash, (tx.clone(), block.header.block_id));
}
}

View File

@ -19,8 +19,9 @@ indexer_service.workspace = true
serde_json.workspace = true
token_core.workspace = true
ata_core.workspace = true
indexer_service_rpc.workspace = true
indexer_service_rpc = { workspace = true, features = ["client"] }
sequencer_service_rpc = { workspace = true, features = ["client"] }
jsonrpsee = { workspace = true, features = ["ws-client"] }
wallet-ffi.workspace = true
indexer_ffi.workspace = true
testnet_initial_state.workspace = true
@ -36,4 +37,4 @@ hex.workspace = true
tempfile.workspace = true
bytesize.workspace = true
futures.workspace = true
testcontainers = { version = "0.27.0", features = ["docker-compose"] }
testcontainers = { version = "0.27.3", features = ["docker-compose"] }

View File

@ -2,7 +2,7 @@ use std::{net::SocketAddr, path::PathBuf, time::Duration};
use anyhow::{Context as _, Result};
use bytesize::ByteSize;
use indexer_service::{BackoffConfig, ChannelId, ClientConfig, IndexerConfig};
use indexer_service::{ChannelId, ClientConfig, IndexerConfig};
use key_protocol::key_management::KeyChain;
use nssa::{Account, AccountId, PrivateKey, PublicKey};
use nssa_core::{account::Data, program::DEFAULT_PROGRAM_ID};
@ -60,11 +60,11 @@ impl InitialData {
let mut private_charlie_key_chain = KeyChain::new_os_random();
let mut private_charlie_account_id =
AccountId::from(&private_charlie_key_chain.nullifier_public_key);
AccountId::from((&private_charlie_key_chain.nullifier_public_key, 0));
let mut private_david_key_chain = KeyChain::new_os_random();
let mut private_david_account_id =
AccountId::from(&private_david_key_chain.nullifier_public_key);
AccountId::from((&private_david_key_chain.nullifier_public_key, 0));
// Ensure consistent ordering
if private_charlie_account_id > private_david_account_id {
@ -139,11 +139,10 @@ impl InitialData {
})
})
.chain(self.private_accounts.iter().map(|(key_chain, account)| {
let account_id = AccountId::from(&key_chain.nullifier_public_key);
InitialAccountData::Private(Box::new(PrivateAccountPrivateInitialData {
account_id,
account: account.clone(),
key_chain: key_chain.clone(),
identifier: 0,
}))
}))
.collect()
@ -165,35 +164,10 @@ impl std::fmt::Display for UrlProtocol {
}
}
pub fn indexer_config(
bedrock_addr: SocketAddr,
home: PathBuf,
initial_data: &InitialData,
) -> Result<IndexerConfig> {
Ok(IndexerConfig {
home,
consensus_info_polling_interval: Duration::from_secs(1),
bedrock_client_config: ClientConfig {
addr: addr_to_url(UrlProtocol::Http, bedrock_addr)
.context("Failed to convert bedrock addr to URL")?,
auth: None,
backoff: BackoffConfig {
start_delay: Duration::from_millis(100),
max_retries: 10,
},
},
initial_public_accounts: Some(initial_data.sequencer_initial_public_accounts()),
initial_private_accounts: Some(initial_data.sequencer_initial_private_accounts()),
signing_key: [37; 32],
channel_id: bedrock_channel_id(),
})
}
pub fn sequencer_config(
partial: SequencerPartialConfig,
home: PathBuf,
bedrock_addr: SocketAddr,
indexer_addr: SocketAddr,
initial_data: &InitialData,
) -> Result<SequencerConfig> {
let SequencerPartialConfig {
@ -216,17 +190,11 @@ pub fn sequencer_config(
initial_private_accounts: Some(initial_data.sequencer_initial_private_accounts()),
signing_key: [37; 32],
bedrock_config: BedrockConfig {
backoff: BackoffConfig {
start_delay: Duration::from_millis(100),
max_retries: 5,
},
channel_id: bedrock_channel_id(),
node_url: addr_to_url(UrlProtocol::Http, bedrock_addr)
.context("Failed to convert bedrock addr to URL")?,
auth: None,
},
indexer_rpc_url: addr_to_url(UrlProtocol::Ws, indexer_addr)
.context("Failed to convert indexer addr to URL")?,
})
}
@ -246,6 +214,26 @@ pub fn wallet_config(
})
}
pub fn indexer_config(
bedrock_addr: SocketAddr,
home: PathBuf,
initial_data: &InitialData,
) -> Result<IndexerConfig> {
Ok(IndexerConfig {
home,
consensus_info_polling_interval: Duration::from_secs(1),
bedrock_config: ClientConfig {
addr: addr_to_url(UrlProtocol::Http, bedrock_addr)
.context("Failed to convert bedrock addr to URL")?,
auth: None,
},
initial_public_accounts: Some(initial_data.sequencer_initial_public_accounts()),
initial_private_accounts: Some(initial_data.sequencer_initial_private_accounts()),
signing_key: [37; 32],
channel_id: bedrock_channel_id(),
})
}
pub fn addr_to_url(protocol: UrlProtocol, addr: SocketAddr) -> Result<Url> {
// Convert 0.0.0.0 to 127.0.0.1 for client connections
// When binding to port 0, the server binds to 0.0.0.0:<random_port>

View File

@ -0,0 +1,34 @@
//! Thin client wrapper for querying the indexer's JSON-RPC API in tests.
//!
//! The sequencer doesn't depend on the indexer at runtime — finalization comes
//! from zone-sdk events. This wrapper exists purely for test ergonomics so
//! integration tests can construct a single connection and call
//! `indexer_service_rpc::RpcClient` methods directly via `Deref`.
use std::ops::Deref;
use anyhow::{Context as _, Result};
use jsonrpsee::ws_client::{WsClient, WsClientBuilder};
use log::info;
use url::Url;
pub struct IndexerClient(WsClient);
impl IndexerClient {
pub async fn new(indexer_url: &Url) -> Result<Self> {
info!("Connecting to Indexer at {indexer_url}");
let client = WsClientBuilder::default()
.build(indexer_url)
.await
.context("Failed to create websocket client")?;
Ok(Self(client))
}
}
impl Deref for IndexerClient {
type Target = WsClient;
fn deref(&self) -> &Self::Target {
&self.0
}
}

View File

@ -9,16 +9,19 @@ use indexer_service::IndexerHandle;
use log::{debug, error};
use nssa::{AccountId, PrivacyPreservingTransaction};
use nssa_core::Commitment;
use sequencer_core::indexer_client::{IndexerClient, IndexerClientTrait as _};
use sequencer_service::SequencerHandle;
use sequencer_service_rpc::{RpcClient as _, SequencerClient, SequencerClientBuilder};
use tempfile::TempDir;
use testcontainers::compose::DockerCompose;
use wallet::WalletCore;
use crate::setup::{setup_bedrock_node, setup_indexer, setup_sequencer, setup_wallet};
use crate::{
indexer_client::IndexerClient,
setup::{setup_bedrock_node, setup_indexer, setup_sequencer, setup_wallet},
};
pub mod config;
pub mod indexer_client;
pub mod setup;
pub mod test_context_ffi;
@ -77,14 +80,10 @@ impl TestContext {
.await
.context("Failed to setup Indexer")?;
let (sequencer_handle, temp_sequencer_dir) = setup_sequencer(
sequencer_partial_config,
bedrock_addr,
indexer_handle.addr(),
&initial_data,
)
.await
.context("Failed to setup Sequencer")?;
let (sequencer_handle, temp_sequencer_dir) =
setup_sequencer(sequencer_partial_config, bedrock_addr, &initial_data)
.await
.context("Failed to setup Sequencer")?;
let (wallet, temp_wallet_dir, wallet_password) =
setup_wallet(sequencer_handle.addr(), &initial_data)

View File

@ -119,7 +119,6 @@ pub(crate) async fn setup_indexer(
pub(crate) async fn setup_sequencer(
partial: config::SequencerPartialConfig,
bedrock_addr: SocketAddr,
indexer_addr: SocketAddr,
initial_data: &config::InitialData,
) -> Result<(SequencerHandle, TempDir)> {
let temp_sequencer_dir =
@ -134,7 +133,6 @@ pub(crate) async fn setup_sequencer(
partial,
temp_sequencer_dir.path().to_owned(),
bedrock_addr,
indexer_addr,
initial_data,
)
.context("Failed to create Sequencer config")?;

View File

@ -6,7 +6,6 @@ use indexer_ffi::IndexerServiceFFI;
use indexer_service_rpc::RpcClient as _;
use log::{debug, error};
use nssa::AccountId;
use sequencer_core::indexer_client::{IndexerClient, IndexerClientTrait as _};
use sequencer_service::SequencerHandle;
use sequencer_service_rpc::{RpcClient as _, SequencerClient, SequencerClientBuilder};
use tempfile::TempDir;
@ -15,6 +14,7 @@ use wallet::WalletCore;
use crate::{
BEDROCK_SERVICE_WITH_OPEN_PORT, LOGGER, TestContextBuilder, config,
indexer_client::IndexerClient,
setup::{setup_bedrock_node, setup_indexer_ffi, setup_sequencer, setup_wallet},
};
@ -85,8 +85,6 @@ impl TestContextFFI {
.block_on(setup_sequencer(
sequencer_partial_config,
bedrock_addr,
// SAFETY: addr is valid if indexer_ffi is valid.
unsafe { indexer_ffi.addr() },
initial_data,
))
.context("Failed to setup Sequencer")?;

View File

@ -4,7 +4,7 @@
)]
use anyhow::Result;
use integration_tests::TestContext;
use integration_tests::{TestContext, format_private_account_id};
use log::info;
use nssa::program::Program;
use sequencer_service_rpc::RpcClient as _;
@ -70,34 +70,29 @@ async fn new_public_account_with_label() -> Result<()> {
}
#[test]
async fn new_private_account_with_label() -> Result<()> {
async fn add_label_to_existing_account() -> Result<()> {
let mut ctx = TestContext::new().await?;
let account_id = ctx.existing_private_accounts()[0];
let label = "my-test-private-account".to_owned();
let command = Command::Account(AccountSubcommand::New(NewSubcommand::Private {
cci: None,
label: Some(label.clone()),
}));
let command = Command::Account(AccountSubcommand::Label {
account_id: Some(format_private_account_id(account_id)),
account_label: None,
label: label.clone(),
});
let result = execute_subcommand(ctx.wallet_mut(), command).await?;
execute_subcommand(ctx.wallet_mut(), command).await?;
// Extract the account_id from the result
let wallet::cli::SubcommandReturnValue::RegisterAccount { account_id } = result else {
panic!("Expected RegisterAccount return value")
};
// Verify the label was stored
let stored_label = ctx
.wallet()
.storage()
.labels
.get(&account_id.to_string())
.expect("Label should be stored for the new account");
.expect("Label should be stored for the account");
assert_eq!(stored_label.to_string(), label);
info!("Successfully created private account with label");
info!("Successfully set label on existing private account");
Ok(())
}

View File

@ -133,6 +133,7 @@ async fn amm_public() -> Result<()> {
to_label: None,
to_npk: None,
to_vpk: None,
to_identifier: Some(0),
amount: 7,
};
@ -162,6 +163,7 @@ async fn amm_public() -> Result<()> {
to_label: None,
to_npk: None,
to_vpk: None,
to_identifier: Some(0),
amount: 7,
};
@ -550,6 +552,7 @@ async fn amm_new_pool_using_labels() -> Result<()> {
to_label: None,
to_npk: None,
to_vpk: None,
to_identifier: Some(0),
amount: 5,
};
wallet::cli::execute_subcommand(ctx.wallet_mut(), Command::Token(subcommand)).await?;
@ -574,6 +577,7 @@ async fn amm_new_pool_using_labels() -> Result<()> {
to_label: None,
to_npk: None,
to_vpk: None,
to_identifier: Some(0),
amount: 5,
};
wallet::cli::execute_subcommand(ctx.wallet_mut(), Command::Token(subcommand)).await?;

View File

@ -268,6 +268,7 @@ async fn transfer_and_burn_via_ata() -> Result<()> {
to_label: None,
to_npk: None,
to_vpk: None,
to_identifier: Some(0),
amount: fund_amount,
}),
)
@ -500,6 +501,7 @@ async fn transfer_via_ata_private_owner() -> Result<()> {
to_label: None,
to_npk: None,
to_vpk: None,
to_identifier: Some(0),
amount: fund_amount,
}),
)
@ -614,6 +616,7 @@ async fn burn_via_ata_private_owner() -> Result<()> {
to_label: None,
to_npk: None,
to_vpk: None,
to_identifier: Some(0),
amount: fund_amount,
}),
)

View File

@ -30,6 +30,7 @@ async fn private_transfer_to_owned_account() -> Result<()> {
to_label: None,
to_npk: None,
to_vpk: None,
to_identifier: Some(0),
amount: 100,
});
@ -71,6 +72,7 @@ async fn private_transfer_to_foreign_account() -> Result<()> {
to_label: None,
to_npk: Some(to_npk_string),
to_vpk: Some(hex::encode(to_vpk.0)),
to_identifier: Some(0),
amount: 100,
});
@ -121,6 +123,7 @@ async fn deshielded_transfer_to_public_account() -> Result<()> {
to_label: None,
to_npk: None,
to_vpk: None,
to_identifier: Some(0),
amount: 100,
});
@ -170,12 +173,11 @@ async fn private_transfer_to_owned_account_using_claiming_path() -> Result<()> {
};
// Get the keys for the newly created account
let (to_keys, _) = ctx
let (to_keys, _, to_identifier) = ctx
.wallet()
.storage()
.user_data
.get_private_account(to_account_id)
.cloned()
.context("Failed to get private account")?;
// Send to this account using claiming path (using npk and vpk instead of account ID)
@ -186,6 +188,7 @@ async fn private_transfer_to_owned_account_using_claiming_path() -> Result<()> {
to_label: None,
to_npk: Some(hex::encode(to_keys.nullifier_public_key.0)),
to_vpk: Some(hex::encode(to_keys.viewing_public_key.0)),
to_identifier: Some(to_identifier),
amount: 100,
});
@ -236,6 +239,7 @@ async fn shielded_transfer_to_owned_private_account() -> Result<()> {
to_label: None,
to_npk: None,
to_vpk: None,
to_identifier: Some(0),
amount: 100,
});
@ -280,6 +284,7 @@ async fn shielded_transfer_to_foreign_account() -> Result<()> {
to_label: None,
to_npk: Some(to_npk_string),
to_vpk: Some(hex::encode(to_vpk.0)),
to_identifier: Some(0),
amount: 100,
});
@ -336,12 +341,11 @@ async fn private_transfer_to_owned_account_continuous_run_path() -> Result<()> {
};
// Get the newly created account's keys
let (to_keys, _) = ctx
let (to_keys, _, to_identifier) = ctx
.wallet()
.storage()
.user_data
.get_private_account(to_account_id)
.cloned()
.context("Failed to get private account")?;
// Send transfer using nullifier and viewing public keys
@ -352,6 +356,7 @@ async fn private_transfer_to_owned_account_continuous_run_path() -> Result<()> {
to_label: None,
to_npk: Some(hex::encode(to_keys.nullifier_public_key.0)),
to_vpk: Some(hex::encode(to_keys.viewing_public_key.0)),
to_identifier: Some(to_identifier),
amount: 100,
});
@ -455,6 +460,7 @@ async fn private_transfer_using_from_label() -> Result<()> {
to_label: None,
to_npk: None,
to_vpk: None,
to_identifier: Some(0),
amount: 100,
});
@ -527,3 +533,112 @@ async fn initialize_private_account_using_label() -> Result<()> {
Ok(())
}
#[test]
async fn shielded_transfers_to_two_identifiers_same_npk() -> Result<()> {
let mut ctx = TestContext::new().await?;
// Both transfers below will target this same node with distinct identifiers.
let chain_index = ctx.wallet_mut().create_private_accounts_key(None);
let (npk, vpk) = {
let node = ctx
.wallet()
.storage()
.user_data
.private_key_tree
.key_map
.get(&chain_index)
.expect("node was just inserted");
let key_chain = &node.value.0;
(
key_chain.nullifier_public_key,
key_chain.viewing_public_key.clone(),
)
};
let npk_hex = hex::encode(npk.0);
let vpk_hex = hex::encode(vpk.0);
let identifier_1 = 1_u128;
let identifier_2 = 2_u128;
let sender_0: AccountId = ctx.existing_public_accounts()[0];
let sender_1: AccountId = ctx.existing_public_accounts()[1];
wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::AuthTransfer(AuthTransferSubcommand::Send {
from: Some(format_public_account_id(sender_0)),
from_label: None,
to: None,
to_label: None,
to_npk: Some(npk_hex.clone()),
to_vpk: Some(vpk_hex.clone()),
to_identifier: Some(identifier_1),
amount: 100,
}),
)
.await?;
wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::AuthTransfer(AuthTransferSubcommand::Send {
from: Some(format_public_account_id(sender_1)),
from_label: None,
to: None,
to_label: None,
to_npk: Some(npk_hex),
to_vpk: Some(vpk_hex),
to_identifier: Some(identifier_2),
amount: 200,
}),
)
.await?;
info!("Waiting for next block creation");
tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await;
wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::Account(AccountSubcommand::SyncPrivate {}),
)
.await?;
// Both accounts must be discovered with the correct balances.
let account_id_1 = AccountId::from((&npk, identifier_1));
let acc_1 = ctx
.wallet()
.get_account_private(account_id_1)
.context("account for identifier 1 not found after sync")?;
assert_eq!(acc_1.balance, 100);
let account_id_2 = AccountId::from((&npk, identifier_2));
let acc_2 = ctx
.wallet()
.get_account_private(account_id_2)
.context("account for identifier 2 not found after sync")?;
assert_eq!(acc_2.balance, 200);
// Both account ids must resolve to the same key node.
let tree = &ctx.wallet().storage().user_data.private_key_tree;
let ci_1 = tree
.account_id_map
.get(&account_id_1)
.context("account_id_1 missing from private_key_tree.account_id_map")?;
let ci_2 = tree
.account_id_map
.get(&account_id_2)
.context("account_id_2 missing from private_key_tree.account_id_map")?;
assert_eq!(
ci_1, ci_2,
"identifiers 1 and 2 under the same NPK must share a single chain_index"
);
assert_eq!(
ci_1, &chain_index,
"both accounts must resolve to the key node created at the start of the test"
);
info!("Successfully transferred to two distinct identifiers under the same NPK");
Ok(())
}

View File

@ -23,6 +23,7 @@ async fn successful_transfer_to_existing_account() -> Result<()> {
to_label: None,
to_npk: None,
to_vpk: None,
to_identifier: Some(0),
amount: 100,
});
@ -81,6 +82,7 @@ pub async fn successful_transfer_to_new_account() -> Result<()> {
to_label: None,
to_npk: None,
to_vpk: None,
to_identifier: Some(0),
amount: 100,
});
@ -119,6 +121,7 @@ async fn failed_transfer_with_insufficient_balance() -> Result<()> {
to_label: None,
to_npk: None,
to_vpk: None,
to_identifier: Some(0),
amount: 1_000_000,
});
@ -159,6 +162,7 @@ async fn two_consecutive_successful_transfers() -> Result<()> {
to_label: None,
to_npk: None,
to_vpk: None,
to_identifier: Some(0),
amount: 100,
});
@ -193,6 +197,7 @@ async fn two_consecutive_successful_transfers() -> Result<()> {
to_label: None,
to_npk: None,
to_vpk: None,
to_identifier: Some(0),
amount: 100,
});
@ -278,6 +283,7 @@ async fn successful_transfer_using_from_label() -> Result<()> {
to_label: None,
to_npk: None,
to_vpk: None,
to_identifier: Some(0),
amount: 100,
});
@ -325,6 +331,7 @@ async fn successful_transfer_using_to_label() -> Result<()> {
to_label: Some(label),
to_npk: None,
to_vpk: None,
to_identifier: Some(0),
amount: 100,
});

View File

@ -111,6 +111,7 @@ async fn indexer_state_consistency() -> Result<()> {
to_label: None,
to_npk: None,
to_vpk: None,
to_identifier: Some(0),
amount: 100,
});
@ -147,6 +148,7 @@ async fn indexer_state_consistency() -> Result<()> {
to_label: None,
to_npk: None,
to_vpk: None,
to_identifier: Some(0),
amount: 100,
});
@ -233,6 +235,7 @@ async fn indexer_state_consistency_with_labels() -> Result<()> {
to_label: Some(to_label_str),
to_npk: None,
to_vpk: None,
to_identifier: Some(0),
amount: 100,
});

View File

@ -130,6 +130,7 @@ fn indexer_ffi_state_consistency() -> Result<()> {
to_npk: None,
to_vpk: None,
amount: 100,
to_identifier: Some(0),
});
runtime_wrapped.block_on(wallet::cli::execute_subcommand(ctx.wallet_mut(), command))?;
@ -171,6 +172,7 @@ fn indexer_ffi_state_consistency() -> Result<()> {
to_npk: None,
to_vpk: None,
amount: 100,
to_identifier: Some(0),
});
runtime_wrapped.block_on(wallet::cli::execute_subcommand(ctx.wallet_mut(), command))?;
@ -281,6 +283,7 @@ fn indexer_ffi_state_consistency_with_labels() -> Result<()> {
to_npk: None,
to_vpk: None,
amount: 100,
to_identifier: Some(0),
});
runtime_wrapped.block_on(wallet::cli::execute_subcommand(ctx.wallet_mut(), command))?;

View File

@ -59,12 +59,11 @@ async fn sync_private_account_with_non_zero_chain_index() -> Result<()> {
};
// Get the keys for the newly created account
let (to_keys, _) = ctx
let (to_keys, _, to_identifier) = ctx
.wallet()
.storage()
.user_data
.get_private_account(to_account_id)
.cloned()
.context("Failed to get private account")?;
// Send to this account using claiming path (using npk and vpk instead of account ID)
@ -75,6 +74,7 @@ async fn sync_private_account_with_non_zero_chain_index() -> Result<()> {
to_label: None,
to_npk: Some(hex::encode(to_keys.nullifier_public_key.0)),
to_vpk: Some(hex::encode(to_keys.viewing_public_key.0)),
to_identifier: Some(to_identifier),
amount: 100,
});
@ -151,6 +151,7 @@ async fn restore_keys_from_seed() -> Result<()> {
to_label: None,
to_npk: None,
to_vpk: None,
to_identifier: Some(0),
amount: 100,
});
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
@ -163,6 +164,7 @@ async fn restore_keys_from_seed() -> Result<()> {
to_label: None,
to_npk: None,
to_vpk: None,
to_identifier: Some(0),
amount: 101,
});
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
@ -203,6 +205,7 @@ async fn restore_keys_from_seed() -> Result<()> {
to_label: None,
to_npk: None,
to_vpk: None,
to_identifier: Some(0),
amount: 102,
});
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
@ -215,6 +218,7 @@ async fn restore_keys_from_seed() -> Result<()> {
to_label: None,
to_npk: None,
to_vpk: None,
to_identifier: Some(0),
amount: 103,
});
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
@ -259,16 +263,16 @@ async fn restore_keys_from_seed() -> Result<()> {
.expect("Acc 4 should be restored");
assert_eq!(
acc1.value.1.program_owner,
acc1.value.1[0].1.program_owner,
Program::authenticated_transfer_program().id()
);
assert_eq!(
acc2.value.1.program_owner,
acc2.value.1[0].1.program_owner,
Program::authenticated_transfer_program().id()
);
assert_eq!(acc1.value.1.balance, 100);
assert_eq!(acc2.value.1.balance, 101);
assert_eq!(acc1.value.1[0].1.balance, 100);
assert_eq!(acc2.value.1[0].1.balance, 101);
info!("Tree checks passed, testing restored accounts can transact");
@ -280,6 +284,7 @@ async fn restore_keys_from_seed() -> Result<()> {
to_label: None,
to_npk: None,
to_vpk: None,
to_identifier: Some(0),
amount: 10,
});
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
@ -291,6 +296,7 @@ async fn restore_keys_from_seed() -> Result<()> {
to_label: None,
to_npk: None,
to_vpk: None,
to_identifier: Some(0),
amount: 11,
});
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;

View File

@ -134,6 +134,7 @@ async fn create_and_transfer_public_token() -> Result<()> {
to_label: None,
to_npk: None,
to_vpk: None,
to_identifier: Some(0),
amount: transfer_amount,
};
@ -227,6 +228,7 @@ async fn create_and_transfer_public_token() -> Result<()> {
holder_label: None,
holder_npk: None,
holder_vpk: None,
holder_identifier: None,
amount: mint_amount,
};
@ -372,6 +374,7 @@ async fn create_and_transfer_token_with_private_supply() -> Result<()> {
to_label: None,
to_npk: None,
to_vpk: None,
to_identifier: Some(0),
amount: transfer_amount,
};
@ -566,6 +569,7 @@ async fn create_token_with_private_definition() -> Result<()> {
holder_label: None,
holder_npk: None,
holder_vpk: None,
holder_identifier: None,
amount: mint_amount_public,
};
@ -614,6 +618,7 @@ async fn create_token_with_private_definition() -> Result<()> {
holder_label: None,
holder_npk: None,
holder_vpk: None,
holder_identifier: None,
amount: mint_amount_private,
};
@ -756,6 +761,7 @@ async fn create_token_with_private_definition_and_supply() -> Result<()> {
to_label: None,
to_npk: None,
to_vpk: None,
to_identifier: Some(0),
amount: transfer_amount,
};
@ -887,6 +893,7 @@ async fn shielded_token_transfer() -> Result<()> {
to_label: None,
to_npk: None,
to_vpk: None,
to_identifier: Some(0),
amount: transfer_amount,
};
@ -1013,6 +1020,7 @@ async fn deshielded_token_transfer() -> Result<()> {
to_label: None,
to_npk: None,
to_vpk: None,
to_identifier: Some(0),
amount: transfer_amount,
};
@ -1131,12 +1139,11 @@ async fn token_claiming_path_with_private_accounts() -> Result<()> {
};
// Get keys for foreign mint (claiming path)
let (holder_keys, _) = ctx
let (holder_keys, _, holder_identifier) = ctx
.wallet()
.storage()
.user_data
.get_private_account(recipient_account_id)
.cloned()
.context("Failed to get private account keys")?;
// Mint using claiming path (foreign account)
@ -1148,6 +1155,7 @@ async fn token_claiming_path_with_private_accounts() -> Result<()> {
holder_label: None,
holder_npk: Some(hex::encode(holder_keys.nullifier_public_key.0)),
holder_vpk: Some(hex::encode(holder_keys.viewing_public_key.0)),
holder_identifier: Some(holder_identifier),
amount: mint_amount,
};
@ -1351,6 +1359,7 @@ async fn transfer_token_using_from_label() -> Result<()> {
to_label: None,
to_npk: None,
to_vpk: None,
to_identifier: Some(0),
amount: transfer_amount,
};
wallet::cli::execute_subcommand(ctx.wallet_mut(), Command::Token(subcommand)).await?;

View File

@ -27,7 +27,7 @@ use nssa::{
public_transaction as putx,
};
use nssa_core::{
MembershipProof, NullifierPublicKey,
InputAccountIdentity, MembershipProof, NullifierPublicKey,
account::{AccountWithMetadata, Nonce, data::Data},
encryption::ViewingPublicKey,
};
@ -220,14 +220,17 @@ fn build_privacy_transaction() -> PrivacyPreservingTransaction {
data: Data::default(),
},
true,
AccountId::from(&sender_npk),
AccountId::from((&sender_npk, 0)),
);
let recipient_nsk = [2; 32];
let recipient_vsk = [99; 32];
let recipient_vpk = ViewingPublicKey::from_scalar(recipient_vsk);
let recipient_npk = NullifierPublicKey::from(&recipient_nsk);
let recipient_pre =
AccountWithMetadata::new(Account::default(), false, AccountId::from(&recipient_npk));
let recipient_pre = AccountWithMetadata::new(
Account::default(),
false,
AccountId::from((&recipient_npk, 0)),
);
let eph_holder_from = EphemeralKeyHolder::new(&sender_npk);
let sender_ss = eph_holder_from.calculate_shared_secret_sender(&sender_vpk);
@ -248,10 +251,19 @@ fn build_privacy_transaction() -> PrivacyPreservingTransaction {
let (output, proof) = circuit::execute_and_prove(
vec![sender_pre, recipient_pre],
Program::serialize_instruction(balance_to_move).unwrap(),
vec![1, 2],
vec![(sender_npk, sender_ss), (recipient_npk, recipient_ss)],
vec![sender_nsk],
vec![Some(proof)],
vec![
InputAccountIdentity::PrivateAuthorizedUpdate {
ssk: sender_ss,
nsk: sender_nsk,
membership_proof: proof,
identifier: 0,
},
InputAccountIdentity::PrivateUnauthorized {
npk: recipient_npk,
ssk: recipient_ss,
identifier: 0,
},
],
&program.into(),
)
.unwrap();

View File

@ -26,7 +26,7 @@ use nssa_core::program::DEFAULT_PROGRAM_ID;
use tempfile::tempdir;
use wallet_ffi::{
FfiAccount, FfiAccountList, FfiBytes32, FfiPrivateAccountKeys, FfiPublicAccountKey,
FfiTransferResult, WalletHandle, error,
FfiTransferResult, FfiU128, WalletHandle, error,
};
unsafe extern "C" {
@ -53,6 +53,11 @@ unsafe extern "C" {
out_account_id: *mut FfiBytes32,
) -> error::WalletFfiError;
fn wallet_ffi_create_private_accounts_key(
handle: *mut WalletHandle,
out_keys: *mut FfiPrivateAccountKeys,
) -> error::WalletFfiError;
fn wallet_ffi_list_accounts(
handle: *mut WalletHandle,
out_list: *mut FfiAccountList,
@ -116,6 +121,7 @@ unsafe extern "C" {
handle: *mut WalletHandle,
from: *const FfiBytes32,
to_keys: *const FfiPrivateAccountKeys,
to_identifier: *const FfiU128,
amount: *const [u8; 16],
out_result: *mut FfiTransferResult,
) -> error::WalletFfiError;
@ -132,6 +138,7 @@ unsafe extern "C" {
handle: *mut WalletHandle,
from: *const FfiBytes32,
to_keys: *const FfiPrivateAccountKeys,
to_identifier: *const FfiU128,
amount: *const [u8; 16],
out_result: *mut FfiTransferResult,
) -> error::WalletFfiError;
@ -260,33 +267,28 @@ fn wallet_ffi_create_public_accounts() -> Result<()> {
fn wallet_ffi_create_private_accounts() -> Result<()> {
let password = "password_for_tests";
let n_accounts = 10;
// Create `n_accounts` private accounts with wallet FFI
let new_private_account_ids_ffi = unsafe {
let mut account_ids = Vec::new();
// Create `n_accounts` receiving keys with wallet FFI
let new_npks_ffi = unsafe {
let mut npks = Vec::new();
let wallet_ffi_handle = new_wallet_ffi_with_default_config(password)?;
for _ in 0..n_accounts {
let mut out_account_id = FfiBytes32::from_bytes([0; 32]);
wallet_ffi_create_account_private(wallet_ffi_handle, &raw mut out_account_id);
account_ids.push(out_account_id.data);
let mut out_keys = FfiPrivateAccountKeys::default();
wallet_ffi_create_private_accounts_key(wallet_ffi_handle, &raw mut out_keys);
npks.push(out_keys.nullifier_public_key.data);
wallet_ffi_free_private_account_keys(&raw mut out_keys);
}
wallet_ffi_destroy(wallet_ffi_handle);
account_ids
npks
};
// All returned IDs must be unique and non-zero
assert_eq!(new_private_account_ids_ffi.len(), n_accounts);
let unique: HashSet<_> = new_private_account_ids_ffi.iter().collect();
assert_eq!(
unique.len(),
n_accounts,
"Duplicate private account IDs returned"
);
// All returned NPKs must be unique and non-zero
assert_eq!(new_npks_ffi.len(), n_accounts);
let unique: HashSet<_> = new_npks_ffi.iter().collect();
assert_eq!(unique.len(), n_accounts, "Duplicate NPKs returned");
assert!(
new_private_account_ids_ffi
.iter()
.all(|id| *id != [0_u8; 32]),
"Zero account ID returned"
new_npks_ffi.iter().all(|id| *id != [0_u8; 32]),
"Zero NPK returned"
);
Ok(())
@ -294,46 +296,35 @@ fn wallet_ffi_create_private_accounts() -> Result<()> {
#[test]
fn wallet_ffi_save_and_load_persistent_storage() -> Result<()> {
let ctx = BlockingTestContext::new()?;
let mut out_private_account_id = FfiBytes32::from_bytes([0; 32]);
let home = tempfile::tempdir()?;
// Create a private account with the wallet FFI and save it
unsafe {
// Create a receiving key and save
let first_npk = unsafe {
let wallet_ffi_handle = new_wallet_ffi_with_test_context_config(&ctx, home.path())?;
wallet_ffi_create_account_private(wallet_ffi_handle, &raw mut out_private_account_id);
let mut out_keys = FfiPrivateAccountKeys::default();
wallet_ffi_create_private_accounts_key(wallet_ffi_handle, &raw mut out_keys);
let npk = out_keys.nullifier_public_key.data;
wallet_ffi_free_private_account_keys(&raw mut out_keys);
wallet_ffi_save(wallet_ffi_handle);
wallet_ffi_destroy(wallet_ffi_handle);
}
let private_account_keys = unsafe {
let wallet_ffi_handle = load_existing_ffi_wallet(home.path())?;
let mut private_account = FfiAccount::default();
let result = wallet_ffi_get_account_private(
wallet_ffi_handle,
&raw const out_private_account_id,
&raw mut private_account,
);
assert_eq!(result, error::WalletFfiError::Success);
let mut out_keys = FfiPrivateAccountKeys::default();
let result = wallet_ffi_get_private_account_keys(
wallet_ffi_handle,
&raw const out_private_account_id,
&raw mut out_keys,
);
assert_eq!(result, error::WalletFfiError::Success);
wallet_ffi_destroy(wallet_ffi_handle);
out_keys
npk
};
assert_eq!(
nssa::AccountId::from(&private_account_keys.npk()),
out_private_account_id.into()
// After loading, creating a new key should yield a different NPK (state was persisted)
let second_npk = unsafe {
let wallet_ffi_handle = load_existing_ffi_wallet(home.path())?;
let mut out_keys = FfiPrivateAccountKeys::default();
wallet_ffi_create_private_accounts_key(wallet_ffi_handle, &raw mut out_keys);
let npk = out_keys.nullifier_public_key.data;
wallet_ffi_free_private_account_keys(&raw mut out_keys);
wallet_ffi_destroy(wallet_ffi_handle);
npk
};
assert_ne!(first_npk, [0_u8; 32], "First NPK should be non-zero");
assert_ne!(second_npk, [0_u8; 32], "Second NPK should be non-zero");
assert_ne!(
first_npk, second_npk,
"Keys should differ after state was persisted"
);
Ok(())
@ -344,22 +335,22 @@ fn test_wallet_ffi_list_accounts() -> Result<()> {
let password = "password_for_tests";
// Create the wallet FFI and track which account IDs were created as public/private
let (wallet_ffi_handle, created_public_ids, created_private_ids) = unsafe {
let (wallet_ffi_handle, created_public_ids) = unsafe {
let handle = new_wallet_ffi_with_default_config(password)?;
let mut public_ids: Vec<[u8; 32]> = Vec::new();
let mut private_ids: Vec<[u8; 32]> = Vec::new();
// Create 5 public accounts and 5 private accounts, recording their IDs
// Create 5 public accounts and 5 receiving keys
for _ in 0..5 {
let mut out_account_id = FfiBytes32::from_bytes([0; 32]);
wallet_ffi_create_account_public(handle, &raw mut out_account_id);
public_ids.push(out_account_id.data);
wallet_ffi_create_account_private(handle, &raw mut out_account_id);
private_ids.push(out_account_id.data);
let mut out_keys = FfiPrivateAccountKeys::default();
wallet_ffi_create_private_accounts_key(handle, &raw mut out_keys);
wallet_ffi_free_private_account_keys(&raw mut out_keys);
}
(handle, public_ids, private_ids)
(handle, public_ids)
};
// Get the account list with FFI method
@ -382,31 +373,19 @@ fn test_wallet_ffi_list_accounts() -> Result<()> {
.filter(|e| e.is_public)
.map(|e| e.account_id.data)
.collect();
let listed_private_ids: HashSet<[u8; 32]> = wallet_ffi_account_list_slice
.iter()
.filter(|e| !e.is_public)
.map(|e| e.account_id.data)
.collect();
for id in &created_public_ids {
assert!(
listed_public_ids.contains(id),
"Created public account not found in list with is_public=true"
);
}
for id in &created_private_ids {
assert!(
listed_private_ids.contains(id),
"Created private account not found in list with is_public=false"
);
}
// Total listed accounts must be at least the number we created
// Total listed accounts must be at least the number of public accounts created
// (receiving keys without synced accounts don't appear in the list)
assert!(
wallet_ffi_account_list.count >= created_public_ids.len() + created_private_ids.len(),
"Listed account count ({}) is less than the number of created accounts ({})",
wallet_ffi_account_list.count >= created_public_ids.len(),
"Listed account count ({}) is less than the number of created public accounts ({})",
wallet_ffi_account_list.count,
created_public_ids.len() + created_private_ids.len()
created_public_ids.len()
);
unsafe {
@ -710,25 +689,13 @@ fn wallet_ffi_init_private_account_auth_transfer() -> Result<()> {
let home = tempfile::tempdir()?;
let wallet_ffi_handle = new_wallet_ffi_with_test_context_config(&ctx, home.path())?;
// Create a new uninitialized public account
let mut out_account_id = FfiBytes32::from_bytes([0; 32]);
// Create a new private account
let mut out_account_id = FfiBytes32::default();
unsafe {
wallet_ffi_create_account_private(wallet_ffi_handle, &raw mut out_account_id);
}
// Check its program owner is the default program id
let account: Account = unsafe {
let mut out_account = FfiAccount::default();
wallet_ffi_get_account_private(
wallet_ffi_handle,
&raw const out_account_id,
&raw mut out_account,
);
(&out_account).try_into().unwrap()
};
assert_eq!(account.program_owner, DEFAULT_PROGRAM_ID);
// Call the init funciton
// Call the init function
let mut transfer_result = FfiTransferResult::default();
unsafe {
wallet_ffi_register_private_account(
@ -832,24 +799,24 @@ fn test_wallet_ffi_transfer_shielded() -> Result<()> {
let wallet_ffi_handle = new_wallet_ffi_with_test_context_config(&ctx, home.path())?;
let from: FfiBytes32 = (&ctx.ctx().existing_public_accounts()[0]).into();
let (to, to_keys) = unsafe {
let mut out_account_id = FfiBytes32::default();
let mut out_keys = FfiPrivateAccountKeys::default();
wallet_ffi_create_account_private(wallet_ffi_handle, &raw mut out_account_id);
wallet_ffi_get_private_account_keys(
wallet_ffi_handle,
&raw const out_account_id,
&raw mut out_keys,
);
(out_account_id, out_keys)
wallet_ffi_create_private_accounts_key(wallet_ffi_handle, &raw mut out_keys);
let account_id = nssa::AccountId::from((&out_keys.npk(), 0_u128));
let to: FfiBytes32 = (&account_id).into();
(to, out_keys)
};
let amount: [u8; 16] = 100_u128.to_le_bytes();
let mut transfer_result = FfiTransferResult::default();
unsafe {
let to_identifier = FfiU128 {
data: 0_u128.to_le_bytes(),
};
wallet_ffi_transfer_shielded(
wallet_ffi_handle,
&raw const from,
&raw const to_keys,
&raw const to_identifier,
&raw const amount,
&raw mut transfer_result,
);
@ -966,25 +933,25 @@ fn test_wallet_ffi_transfer_private() -> Result<()> {
let from: FfiBytes32 = (&ctx.ctx().existing_private_accounts()[0]).into();
let (to, to_keys) = unsafe {
let mut out_account_id = FfiBytes32::default();
let mut out_keys = FfiPrivateAccountKeys::default();
wallet_ffi_create_account_private(wallet_ffi_handle, &raw mut out_account_id);
wallet_ffi_get_private_account_keys(
wallet_ffi_handle,
&raw const out_account_id,
&raw mut out_keys,
);
(out_account_id, out_keys)
wallet_ffi_create_private_accounts_key(wallet_ffi_handle, &raw mut out_keys);
let account_id = nssa::AccountId::from((&out_keys.npk(), 0_u128));
let to: FfiBytes32 = (&account_id).into();
(to, out_keys)
};
let amount: [u8; 16] = 100_u128.to_le_bytes();
let mut transfer_result = FfiTransferResult::default();
unsafe {
let to_identifier = FfiU128 {
data: 0_u128.to_le_bytes(),
};
wallet_ffi_transfer_private(
wallet_ffi_handle,
&raw const from,
&raw const to_keys,
&raw const to_identifier,
&raw const amount,
&raw mut transfer_result,
);

View File

@ -26,3 +26,4 @@ itertools.workspace = true
[dev-dependencies]
base58.workspace = true
bincode.workspace = true

View File

@ -0,0 +1,504 @@
use aes_gcm::{Aes256Gcm, KeyInit as _, aead::Aead as _};
use nssa_core::{
SharedSecretKey,
encryption::{Scalar, shared_key_derivation::Secp256k1Point},
program::PdaSeed,
};
use rand::{RngCore as _, rngs::OsRng};
use serde::{Deserialize, Serialize};
use sha2::{Digest as _, digest::FixedOutput as _};
use super::secret_holders::{PrivateKeyHolder, SecretSpendingKey};
/// Public key used to seal a `GroupKeyHolder` for distribution to a recipient.
///
/// Structurally identical to `ViewingPublicKey` (both are secp256k1 points), but given
/// a distinct alias to clarify intent: viewing keys encrypt account state, sealing keys
/// encrypt the GMS for off-chain distribution.
pub type SealingPublicKey = Secp256k1Point;
/// Secret key used to unseal a `GroupKeyHolder` received from another member.
pub type SealingSecretKey = Scalar;
/// Manages shared viewing keys for a group of controllers owning private PDAs.
///
/// The Group Master Secret (GMS) is a 32-byte random value shared among controllers.
/// Each private PDA owned by the group gets a unique [`SecretSpendingKey`] derived from
/// the GMS by mixing the PDA seed into the SHA-256 input (see `secret_spending_key_for_pda`).
///
/// # Distribution
///
/// The GMS is a long-term secret and must never cross a trust boundary in raw form.
/// Controllers share it off-chain by sealing it under each recipient's [`SealingPublicKey`]
/// (see `seal_for` / `unseal`). Wallets persisting a `GroupKeyHolder` must encrypt it at
/// rest; the raw bytes are exposed only via [`GroupKeyHolder::dangerous_raw_gms`], which
/// is intended for the sealing path exclusively.
///
/// # Logging safety
///
/// `Debug` is implemented manually to redact the GMS; formatting this value with `{:?}`
/// will not leak the secret. Code that formats through `{:#?}` on containing types is
/// safe for the same reason.
#[derive(Serialize, Deserialize, Clone)]
pub struct GroupKeyHolder {
gms: [u8; 32],
}
impl std::fmt::Debug for GroupKeyHolder {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("GroupKeyHolder")
.field("gms", &"<redacted>")
.finish()
}
}
impl Default for GroupKeyHolder {
fn default() -> Self {
Self::new()
}
}
impl GroupKeyHolder {
/// Create a new group with a fresh random GMS.
#[must_use]
pub fn new() -> Self {
let mut gms = [0_u8; 32];
OsRng.fill_bytes(&mut gms);
Self { gms }
}
/// Restore from an existing GMS (received via `unseal`).
#[must_use]
pub const fn from_gms(gms: [u8; 32]) -> Self {
Self { gms }
}
/// Returns the raw 32-byte GMS. The name reflects intent: only the sealed-distribution
/// path (`seal_for`) and sealed-at-rest persistence should ever need the raw bytes. Do
/// not log the result, do not pass it across an untrusted channel.
#[must_use]
pub const fn dangerous_raw_gms(&self) -> &[u8; 32] {
&self.gms
}
/// Derive a per-PDA [`SecretSpendingKey`] by mixing the seed into the SHA-256 input.
///
/// Each distinct `pda_seed` produces a distinct SSK in the full 256-bit space, so
/// adversarial seed-grinding cannot collide two PDAs' derived keys under the same
/// group. Uses the codebase's 32-byte protocol-versioned domain-separation convention.
fn secret_spending_key_for_pda(&self, pda_seed: &PdaSeed) -> SecretSpendingKey {
const PREFIX: &[u8; 32] = b"/LEE/v0.3/GroupKeyDerivation/SSK";
let mut hasher = sha2::Sha256::new();
hasher.update(PREFIX);
hasher.update(self.gms);
hasher.update(pda_seed.as_ref());
SecretSpendingKey(hasher.finalize_fixed().into())
}
/// Derive keys for a specific PDA.
///
/// All controllers holding the same GMS independently derive the same keys for the
/// same PDA because the derivation is deterministic in (GMS, seed).
#[must_use]
pub fn derive_keys_for_pda(&self, pda_seed: &PdaSeed) -> PrivateKeyHolder {
self.secret_spending_key_for_pda(pda_seed)
.produce_private_key_holder(None)
}
/// Encrypts this holder's GMS under the recipient's [`SealingPublicKey`].
///
/// Uses an ephemeral ECDH key exchange to derive a shared secret, then AES-256-GCM
/// to encrypt the payload. The returned bytes are
/// `ephemeral_pubkey (33) || nonce (12) || ciphertext+tag (48)` = 93 bytes.
///
/// Each call generates a fresh ephemeral key, so two seals of the same holder produce
/// different ciphertexts.
#[must_use]
pub fn seal_for(&self, recipient_key: &SealingPublicKey) -> Vec<u8> {
let mut ephemeral_scalar: Scalar = [0_u8; 32];
OsRng.fill_bytes(&mut ephemeral_scalar);
let ephemeral_pubkey = Secp256k1Point::from_scalar(ephemeral_scalar);
let shared = SharedSecretKey::new(&ephemeral_scalar, recipient_key);
let aes_key = Self::seal_kdf(&shared);
let cipher = Aes256Gcm::new(&aes_key.into());
let mut nonce_bytes = [0_u8; 12];
OsRng.fill_bytes(&mut nonce_bytes);
let nonce = aes_gcm::Nonce::from(nonce_bytes);
let ciphertext = cipher
.encrypt(&nonce, self.gms.as_ref())
.expect("AES-GCM encryption should not fail with valid key/nonce");
let capacity = 33_usize
.checked_add(12)
.and_then(|n| n.checked_add(ciphertext.len()))
.expect("seal capacity overflow");
let mut out = Vec::with_capacity(capacity);
out.extend_from_slice(&ephemeral_pubkey.0);
out.extend_from_slice(&nonce_bytes);
out.extend_from_slice(&ciphertext);
out
}
/// Decrypts a sealed `GroupKeyHolder` using the recipient's [`SealingSecretKey`].
///
/// Returns `Err` if the ciphertext is too short, the ECDH point is invalid, or the
/// AES-GCM authentication tag doesn't verify (wrong key or tampered data).
pub fn unseal(sealed: &[u8], own_key: &SealingSecretKey) -> Result<Self, SealError> {
const HEADER_LEN: usize = 33 + 12;
const MIN_LEN: usize = HEADER_LEN + 16;
if sealed.len() < MIN_LEN {
return Err(SealError::TooShort);
}
// MIN_LEN (61) > HEADER_LEN (45), so all slicing below is in bounds.
let ephemeral_pubkey = Secp256k1Point(sealed[..33].to_vec());
let nonce = aes_gcm::Nonce::from_slice(&sealed[33..HEADER_LEN]);
let ciphertext = &sealed[HEADER_LEN..];
let shared = SharedSecretKey::new(own_key, &ephemeral_pubkey);
let aes_key = Self::seal_kdf(&shared);
let cipher = Aes256Gcm::new(&aes_key.into());
let plaintext = cipher
.decrypt(nonce, ciphertext)
.map_err(|_err| SealError::DecryptionFailed)?;
if plaintext.len() != 32 {
return Err(SealError::DecryptionFailed);
}
let mut gms = [0_u8; 32];
gms.copy_from_slice(&plaintext);
Ok(Self::from_gms(gms))
}
/// Derives an AES-256 key from the ECDH shared secret via SHA-256 with a domain prefix.
fn seal_kdf(shared: &SharedSecretKey) -> [u8; 32] {
const PREFIX: &[u8; 32] = b"/LEE/v0.3/GroupKeySeal/AES\x00\x00\x00\x00\x00\x00";
let mut hasher = sha2::Sha256::new();
hasher.update(PREFIX);
hasher.update(shared.0);
hasher.finalize_fixed().into()
}
}
#[derive(Debug)]
pub enum SealError {
TooShort,
DecryptionFailed,
}
#[cfg(test)]
mod tests {
use nssa_core::NullifierPublicKey;
use super::*;
/// Two holders from the same GMS derive identical keys for the same PDA seed.
#[test]
fn same_gms_same_seed_produces_same_keys() {
let gms = [42_u8; 32];
let holder_a = GroupKeyHolder::from_gms(gms);
let holder_b = GroupKeyHolder::from_gms(gms);
let seed = PdaSeed::new([1; 32]);
let keys_a = holder_a.derive_keys_for_pda(&seed);
let keys_b = holder_b.derive_keys_for_pda(&seed);
assert_eq!(
keys_a.generate_nullifier_public_key().to_byte_array(),
keys_b.generate_nullifier_public_key().to_byte_array(),
);
}
/// Different PDA seeds produce different keys from the same GMS.
#[test]
fn same_gms_different_seed_produces_different_keys() {
let holder = GroupKeyHolder::from_gms([42_u8; 32]);
let seed_a = PdaSeed::new([1; 32]);
let seed_b = PdaSeed::new([2; 32]);
let npk_a = holder
.derive_keys_for_pda(&seed_a)
.generate_nullifier_public_key();
let npk_b = holder
.derive_keys_for_pda(&seed_b)
.generate_nullifier_public_key();
assert_ne!(npk_a.to_byte_array(), npk_b.to_byte_array());
}
/// Different GMS produce different keys for the same PDA seed.
#[test]
fn different_gms_same_seed_produces_different_keys() {
let holder_a = GroupKeyHolder::from_gms([42_u8; 32]);
let holder_b = GroupKeyHolder::from_gms([99_u8; 32]);
let seed = PdaSeed::new([1; 32]);
let npk_a = holder_a
.derive_keys_for_pda(&seed)
.generate_nullifier_public_key();
let npk_b = holder_b
.derive_keys_for_pda(&seed)
.generate_nullifier_public_key();
assert_ne!(npk_a.to_byte_array(), npk_b.to_byte_array());
}
/// GMS round-trip: export and restore produces the same keys.
#[test]
fn gms_round_trip() {
let original = GroupKeyHolder::from_gms([7_u8; 32]);
let restored = GroupKeyHolder::from_gms(*original.dangerous_raw_gms());
let seed = PdaSeed::new([1; 32]);
let npk_original = original
.derive_keys_for_pda(&seed)
.generate_nullifier_public_key();
let npk_restored = restored
.derive_keys_for_pda(&seed)
.generate_nullifier_public_key();
assert_eq!(npk_original.to_byte_array(), npk_restored.to_byte_array());
}
/// The derived `NullifierPublicKey` is non-zero (sanity check).
#[test]
fn derived_npk_is_non_zero() {
let holder = GroupKeyHolder::from_gms([42_u8; 32]);
let seed = PdaSeed::new([1; 32]);
let npk = holder
.derive_keys_for_pda(&seed)
.generate_nullifier_public_key();
assert_ne!(npk, NullifierPublicKey([0; 32]));
}
/// Pins the end-to-end derivation for a fixed (GMS, `ProgramId`, `PdaSeed`). Any change
/// to `secret_spending_key_for_pda`, the `PrivateKeyHolder` nsk/npk chain, or the
/// `AccountId::for_private_pda` formula breaks this test. Mirrors the pinned-value
/// pattern from `for_private_pda_matches_pinned_value` in `nssa_core`.
#[test]
fn pinned_end_to_end_derivation_for_private_pda() {
use nssa_core::{account::AccountId, program::ProgramId};
let gms = [42_u8; 32];
let seed = PdaSeed::new([1; 32]);
let program_id: ProgramId = [9; 8];
let holder = GroupKeyHolder::from_gms(gms);
let npk = holder
.derive_keys_for_pda(&seed)
.generate_nullifier_public_key();
let account_id = AccountId::for_private_pda(&program_id, &seed, &npk);
let expected_npk = NullifierPublicKey([
185, 161, 225, 224, 20, 156, 173, 0, 6, 173, 74, 136, 16, 88, 71, 154, 101, 160, 224,
162, 247, 98, 183, 210, 118, 130, 143, 237, 20, 112, 111, 114,
]);
let expected_account_id = AccountId::new([
236, 138, 175, 184, 194, 233, 144, 109, 157, 51, 193, 120, 83, 110, 147, 90, 154, 57,
148, 236, 12, 92, 135, 38, 253, 79, 88, 143, 161, 175, 46, 144,
]);
assert_eq!(npk, expected_npk);
assert_eq!(account_id, expected_account_id);
}
/// Wallets persist `GroupKeyHolder` to disk and reload it on startup. This test pins
/// the serde round-trip: serialize, deserialize, and assert the derived keys for a
/// sample seed match on both sides. A silent encoding drift would corrupt every
/// group-owned account.
#[test]
fn gms_serde_round_trip_preserves_derivation() {
let original = GroupKeyHolder::from_gms([7_u8; 32]);
let encoded = bincode::serialize(&original).expect("serialize");
let restored: GroupKeyHolder = bincode::deserialize(&encoded).expect("deserialize");
let seed = PdaSeed::new([1; 32]);
let npk_original = original
.derive_keys_for_pda(&seed)
.generate_nullifier_public_key();
let npk_restored = restored
.derive_keys_for_pda(&seed)
.generate_nullifier_public_key();
assert_eq!(npk_original, npk_restored);
assert_eq!(original.dangerous_raw_gms(), restored.dangerous_raw_gms());
}
/// A `GroupKeyHolder` constructed from the same 32 bytes as a personal
/// `SecretSpendingKey` must not derive the same `NullifierPublicKey` as the personal
/// path, so a private PDA cannot be spent by a personal nullifier even under
/// adversarial key-material reuse. The safety rests on the group path's distinct
/// domain-separation prefix plus the seed mix-in (see `secret_spending_key_for_pda`).
#[test]
fn group_derivation_does_not_collide_with_personal_path_at_shared_bytes() {
let shared_bytes = [13_u8; 32];
let seed = PdaSeed::new([5; 32]);
let group_npk = GroupKeyHolder::from_gms(shared_bytes)
.derive_keys_for_pda(&seed)
.generate_nullifier_public_key();
let personal_npk = SecretSpendingKey(shared_bytes)
.produce_private_key_holder(None)
.generate_nullifier_public_key();
assert_ne!(group_npk, personal_npk);
}
/// Seal then unseal recovers the same GMS and derived keys.
#[test]
fn seal_unseal_round_trip() {
let holder = GroupKeyHolder::from_gms([42_u8; 32]);
let recipient_ssk = SecretSpendingKey([7_u8; 32]);
let recipient_keys = recipient_ssk.produce_private_key_holder(None);
let recipient_vpk = recipient_keys.generate_viewing_public_key();
let recipient_vsk = recipient_keys.viewing_secret_key;
let sealed = holder.seal_for(&recipient_vpk);
let restored = GroupKeyHolder::unseal(&sealed, &recipient_vsk).expect("unseal");
assert_eq!(restored.dangerous_raw_gms(), holder.dangerous_raw_gms());
let seed = PdaSeed::new([1; 32]);
assert_eq!(
holder
.derive_keys_for_pda(&seed)
.generate_nullifier_public_key(),
restored
.derive_keys_for_pda(&seed)
.generate_nullifier_public_key(),
);
}
/// Unsealing with a different VSK fails with `DecryptionFailed`.
#[test]
fn unseal_wrong_vsk_fails() {
let holder = GroupKeyHolder::from_gms([42_u8; 32]);
let recipient_ssk = SecretSpendingKey([7_u8; 32]);
let recipient_vpk = recipient_ssk
.produce_private_key_holder(None)
.generate_viewing_public_key();
let wrong_ssk = SecretSpendingKey([99_u8; 32]);
let wrong_vsk = wrong_ssk
.produce_private_key_holder(None)
.viewing_secret_key;
let sealed = holder.seal_for(&recipient_vpk);
let result = GroupKeyHolder::unseal(&sealed, &wrong_vsk);
assert!(matches!(result, Err(super::SealError::DecryptionFailed)));
}
/// Tampered ciphertext fails authentication.
#[test]
fn unseal_tampered_ciphertext_fails() {
let holder = GroupKeyHolder::from_gms([42_u8; 32]);
let recipient_ssk = SecretSpendingKey([7_u8; 32]);
let recipient_keys = recipient_ssk.produce_private_key_holder(None);
let recipient_vpk = recipient_keys.generate_viewing_public_key();
let recipient_vsk = recipient_keys.viewing_secret_key;
let mut sealed = holder.seal_for(&recipient_vpk);
// Flip a byte in the ciphertext portion (after ephemeral_pubkey + nonce)
let last = sealed.len() - 1;
sealed[last] ^= 0xFF;
let result = GroupKeyHolder::unseal(&sealed, &recipient_vsk);
assert!(matches!(result, Err(super::SealError::DecryptionFailed)));
}
/// Two seals of the same holder produce different ciphertexts (ephemeral randomness).
#[test]
fn two_seals_produce_different_ciphertexts() {
let holder = GroupKeyHolder::from_gms([42_u8; 32]);
let recipient_ssk = SecretSpendingKey([7_u8; 32]);
let recipient_vpk = recipient_ssk
.produce_private_key_holder(None)
.generate_viewing_public_key();
let sealed_a = holder.seal_for(&recipient_vpk);
let sealed_b = holder.seal_for(&recipient_vpk);
assert_ne!(sealed_a, sealed_b);
}
/// Sealed payload is too short.
#[test]
fn unseal_too_short_fails() {
let vsk: SealingSecretKey = [7_u8; 32];
let result = GroupKeyHolder::unseal(&[0_u8; 10], &vsk);
assert!(matches!(result, Err(super::SealError::TooShort)));
}
/// Degenerate GMS values (all-zeros, all-ones, single-bit) must still produce valid,
/// non-zero, pairwise-distinct npks. Rules out accidental "if gms == default { return
/// default }" style shortcuts in the derivation.
#[test]
fn degenerate_gms_produces_distinct_non_zero_keys() {
let seed = PdaSeed::new([1; 32]);
let degenerate = [[0_u8; 32], [0xFF_u8; 32], {
let mut v = [0_u8; 32];
v[0] = 1;
v
}];
let npks: Vec<NullifierPublicKey> = degenerate
.iter()
.map(|gms| {
GroupKeyHolder::from_gms(*gms)
.derive_keys_for_pda(&seed)
.generate_nullifier_public_key()
})
.collect();
for npk in &npks {
assert_ne!(*npk, NullifierPublicKey([0; 32]));
}
for (i, a) in npks.iter().enumerate() {
for b in &npks[i + 1..] {
assert_ne!(a, b);
}
}
}
/// Full lifecycle: create group, distribute GMS via seal/unseal, verify key agreement.
#[test]
fn group_pda_lifecycle() {
use nssa_core::account::AccountId;
let alice_holder = GroupKeyHolder::new();
let pda_seed = PdaSeed::new([42_u8; 32]);
let program_id: nssa_core::program::ProgramId = [1; 8];
// Derive Alice's keys
let alice_keys = alice_holder.derive_keys_for_pda(&pda_seed);
let alice_npk = alice_keys.generate_nullifier_public_key();
// Seal GMS for Bob using Bob's viewing key, Bob unseals
let bob_ssk = SecretSpendingKey([77_u8; 32]);
let bob_keys = bob_ssk.produce_private_key_holder(None);
let bob_vpk = bob_keys.generate_viewing_public_key();
let bob_vsk = bob_keys.viewing_secret_key;
let sealed = alice_holder.seal_for(&bob_vpk);
let bob_holder =
GroupKeyHolder::unseal(&sealed, &bob_vsk).expect("Bob should unseal the GMS");
// Key agreement: both derive identical NPK and AccountId
let bob_npk = bob_holder
.derive_keys_for_pda(&pda_seed)
.generate_nullifier_public_key();
assert_eq!(alice_npk, bob_npk);
let alice_account_id = AccountId::for_private_pda(&program_id, &pda_seed, &alice_npk);
let bob_account_id = AccountId::for_private_pda(&program_id, &pda_seed, &bob_npk);
assert_eq!(alice_account_id, bob_account_id);
}
}

View File

@ -1,23 +1,24 @@
use k256::{Scalar, elliptic_curve::PrimeField as _};
use nssa_core::{NullifierPublicKey, encryption::ViewingPublicKey};
use nssa_core::{Identifier, NullifierPublicKey, encryption::ViewingPublicKey};
use serde::{Deserialize, Serialize};
use crate::key_management::{
KeyChain,
key_tree::traits::KeyNode,
key_tree::traits::KeyTreeNode,
secret_holders::{PrivateKeyHolder, SecretSpendingKey},
};
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct ChildKeysPrivate {
pub value: (KeyChain, nssa::Account),
pub value: (KeyChain, Vec<(Identifier, nssa::Account)>),
pub ccc: [u8; 32],
/// Can be [`None`] if root.
pub cci: Option<u32>,
}
impl KeyNode for ChildKeysPrivate {
fn root(seed: [u8; 64]) -> Self {
impl ChildKeysPrivate {
#[must_use]
pub fn root(seed: [u8; 64]) -> Self {
let hash_value = hmac_sha512::HMAC::mac(seed, b"LEE_master_priv");
let ssk = SecretSpendingKey(
@ -46,14 +47,15 @@ impl KeyNode for ChildKeysPrivate {
viewing_secret_key: vsk,
},
},
nssa::Account::default(),
vec![],
),
ccc,
cci: None,
}
}
fn nth_child(&self, cci: u32) -> Self {
#[must_use]
pub fn nth_child(&self, cci: u32) -> Self {
#[expect(clippy::arithmetic_side_effects, reason = "TODO: fix later")]
let parent_pt =
Scalar::from_repr(self.value.0.private_key_holder.nullifier_secret_key.into())
@ -95,43 +97,27 @@ impl KeyNode for ChildKeysPrivate {
viewing_secret_key: vsk,
},
},
nssa::Account::default(),
vec![],
),
ccc,
cci: Some(cci),
}
}
fn chain_code(&self) -> &[u8; 32] {
&self.ccc
}
fn child_index(&self) -> Option<u32> {
self.cci
}
fn account_id(&self) -> nssa::AccountId {
nssa::AccountId::from(&self.value.0.nullifier_public_key)
}
}
#[expect(
clippy::single_char_lifetime_names,
reason = "TODO add meaningful name"
)]
impl<'a> From<&'a ChildKeysPrivate> for &'a (KeyChain, nssa::Account) {
fn from(value: &'a ChildKeysPrivate) -> Self {
&value.value
impl KeyTreeNode for ChildKeysPrivate {
fn from_seed(seed: [u8; 64]) -> Self {
Self::root(seed)
}
}
#[expect(
clippy::single_char_lifetime_names,
reason = "TODO add meaningful name"
)]
impl<'a> From<&'a mut ChildKeysPrivate> for &'a mut (KeyChain, nssa::Account) {
fn from(value: &'a mut ChildKeysPrivate) -> Self {
&mut value.value
fn derive_child(&self, cci: u32) -> Self {
self.nth_child(cci)
}
fn account_ids(&self) -> impl Iterator<Item = nssa::AccountId> {
self.value.1.iter().map(|(identifier, _)| {
nssa::AccountId::from((&self.value.0.nullifier_public_key, *identifier))
})
}
}

View File

@ -1,7 +1,7 @@
use k256::elliptic_curve::{PrimeField as _, sec1::ToEncodedPoint as _};
use serde::{Deserialize, Serialize};
use crate::key_management::key_tree::traits::KeyNode;
use crate::key_management::key_tree::traits::KeyTreeNode;
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct ChildKeysPublic {
@ -13,32 +13,8 @@ pub struct ChildKeysPublic {
}
impl ChildKeysPublic {
fn compute_hash_value(&self, cci: u32) -> [u8; 64] {
let mut hash_input = vec![];
if ((2_u32).pow(31)).cmp(&cci) == std::cmp::Ordering::Greater {
// Non-harden.
// BIP-032 compatibility requires 1-byte header from the public_key;
// Not stored in `self.cpk.value()`.
let sk = k256::SecretKey::from_bytes(self.csk.value().into())
.expect("32 bytes, within curve order");
let pk = sk.public_key();
hash_input.extend_from_slice(pk.to_encoded_point(true).as_bytes());
} else {
// Harden.
hash_input.extend_from_slice(&[0_u8]);
hash_input.extend_from_slice(self.csk.value());
}
#[expect(clippy::big_endian_bytes, reason = "BIP-032 uses big endian")]
hash_input.extend_from_slice(&cci.to_be_bytes());
hmac_sha512::HMAC::mac(hash_input, self.ccc)
}
}
impl KeyNode for ChildKeysPublic {
fn root(seed: [u8; 64]) -> Self {
#[must_use]
pub fn root(seed: [u8; 64]) -> Self {
let hash_value = hmac_sha512::HMAC::mac(seed, "LEE_master_pub");
let csk = nssa::PrivateKey::try_new(
@ -58,7 +34,8 @@ impl KeyNode for ChildKeysPublic {
}
}
fn nth_child(&self, cci: u32) -> Self {
#[must_use]
pub fn nth_child(&self, cci: u32) -> Self {
let hash_value = self.compute_hash_value(cci);
let csk = nssa::PrivateKey::try_new({
@ -90,17 +67,33 @@ impl KeyNode for ChildKeysPublic {
}
}
fn chain_code(&self) -> &[u8; 32] {
&self.ccc
}
fn child_index(&self) -> Option<u32> {
self.cci
}
fn account_id(&self) -> nssa::AccountId {
#[must_use]
pub fn account_id(&self) -> nssa::AccountId {
nssa::AccountId::from(&self.cpk)
}
fn compute_hash_value(&self, cci: u32) -> [u8; 64] {
let mut hash_input = vec![];
if ((2_u32).pow(31)).cmp(&cci) == std::cmp::Ordering::Greater {
// Non-harden.
// BIP-032 compatibility requires 1-byte header from the public_key;
// Not stored in `self.cpk.value()`.
let sk = k256::SecretKey::from_bytes(self.csk.value().into())
.expect("32 bytes, within curve order");
let pk = sk.public_key();
hash_input.extend_from_slice(pk.to_encoded_point(true).as_bytes());
} else {
// Harden.
hash_input.extend_from_slice(&[0_u8]);
hash_input.extend_from_slice(self.csk.value());
}
#[expect(clippy::big_endian_bytes, reason = "BIP-032 uses big endian")]
hash_input.extend_from_slice(&cci.to_be_bytes());
hmac_sha512::HMAC::mac(hash_input, self.ccc)
}
}
#[expect(
@ -113,6 +106,20 @@ impl<'a> From<&'a ChildKeysPublic> for &'a nssa::PrivateKey {
}
}
impl KeyTreeNode for ChildKeysPublic {
fn from_seed(seed: [u8; 64]) -> Self {
Self::root(seed)
}
fn derive_child(&self, cci: u32) -> Self {
self.nth_child(cci)
}
fn account_ids(&self) -> impl Iterator<Item = nssa::AccountId> {
std::iter::once(self.account_id())
}
}
#[cfg(test)]
mod tests {
use nssa::{PrivateKey, PublicKey};

View File

@ -2,12 +2,13 @@ use std::collections::BTreeMap;
use anyhow::Result;
use nssa::{Account, AccountId};
use nssa_core::Identifier;
use serde::{Deserialize, Serialize};
use crate::key_management::{
key_tree::{
chain_index::ChainIndex, keys_private::ChildKeysPrivate, keys_public::ChildKeysPublic,
traits::KeyNode,
traits::KeyTreeNode,
},
secret_holders::SeedHolder,
};
@ -20,7 +21,7 @@ pub mod traits;
pub const DEPTH_SOFT_CAP: u32 = 20;
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct KeyTree<N: KeyNode> {
pub struct KeyTree<N: KeyTreeNode> {
pub key_map: BTreeMap<ChainIndex, N>,
pub account_id_map: BTreeMap<nssa::AccountId, ChainIndex>,
}
@ -28,7 +29,7 @@ pub struct KeyTree<N: KeyNode> {
pub type KeyTreePublic = KeyTree<ChildKeysPublic>;
pub type KeyTreePrivate = KeyTree<ChildKeysPrivate>;
impl<N: KeyNode> KeyTree<N> {
impl<N: KeyTreeNode> KeyTree<N> {
#[must_use]
pub fn new(seed: &SeedHolder) -> Self {
let seed_fit: [u8; 64] = seed
@ -37,29 +38,62 @@ impl<N: KeyNode> KeyTree<N> {
.try_into()
.expect("SeedHolder seed is 64 bytes long");
let root_keys = N::root(seed_fit);
let account_id = root_keys.account_id();
let key_map = BTreeMap::from_iter([(ChainIndex::root(), root_keys)]);
let account_id_map = BTreeMap::from_iter([(account_id, ChainIndex::root())]);
let root_keys = N::from_seed(seed_fit);
let account_id_map = root_keys
.account_ids()
.map(|id| (id, ChainIndex::root()))
.collect();
Self {
key_map,
key_map: BTreeMap::from_iter([(ChainIndex::root(), root_keys)]),
account_id_map,
}
}
pub fn new_from_root(root: N) -> Self {
let account_id_map = BTreeMap::from_iter([(root.account_id(), ChainIndex::root())]);
let key_map = BTreeMap::from_iter([(ChainIndex::root(), root)]);
let account_id_map = root
.account_ids()
.map(|id| (id, ChainIndex::root()))
.collect();
Self {
key_map,
key_map: BTreeMap::from_iter([(ChainIndex::root(), root)]),
account_id_map,
}
}
// ToDo: Add function to create a tree from list of nodes with consistency check.
pub fn generate_new_node(&mut self, parent_cci: &ChainIndex) -> Option<ChainIndex> {
let parent_keys = self.key_map.get(parent_cci)?;
let next_child_id = self
.find_next_last_child_of_id(parent_cci)
.expect("Can be None only if parent is not present");
let next_cci = parent_cci.nth_child(next_child_id);
let child_keys = parent_keys.derive_child(next_child_id);
let account_ids = child_keys.account_ids();
for account_id in account_ids {
self.account_id_map.insert(account_id, next_cci.clone());
}
self.key_map.insert(next_cci.clone(), child_keys);
Some(next_cci)
}
pub fn fill_node(&mut self, chain_index: &ChainIndex) -> Option<ChainIndex> {
let parent_keys = self.key_map.get(&chain_index.parent()?)?;
let child_id = *chain_index.chain().last()?;
let child_keys = parent_keys.derive_child(child_id);
let account_ids = child_keys.account_ids();
for account_id in account_ids {
self.account_id_map.insert(account_id, chain_index.clone());
}
self.key_map.insert(chain_index.clone(), child_keys);
Some(chain_index.clone())
}
#[must_use]
pub fn find_next_last_child_of_id(&self, parent_id: &ChainIndex) -> Option<u32> {
@ -102,25 +136,6 @@ impl<N: KeyNode> KeyTree<N> {
}
}
pub fn generate_new_node(
&mut self,
parent_cci: &ChainIndex,
) -> Option<(nssa::AccountId, ChainIndex)> {
let parent_keys = self.key_map.get(parent_cci)?;
let next_child_id = self
.find_next_last_child_of_id(parent_cci)
.expect("Can be None only if parent is not present");
let next_cci = parent_cci.nth_child(next_child_id);
let child_keys = parent_keys.nth_child(next_child_id);
let account_id = child_keys.account_id();
self.key_map.insert(next_cci.clone(), child_keys);
self.account_id_map.insert(account_id, next_cci.clone());
Some((account_id, next_cci))
}
fn find_next_slot_layered(&self) -> ChainIndex {
let mut depth = 1;
@ -134,44 +149,10 @@ impl<N: KeyNode> KeyTree<N> {
}
}
pub fn fill_node(&mut self, chain_index: &ChainIndex) -> Option<(nssa::AccountId, ChainIndex)> {
let parent_keys = self.key_map.get(&chain_index.parent()?)?;
let child_id = *chain_index.chain().last()?;
let child_keys = parent_keys.nth_child(child_id);
let account_id = child_keys.account_id();
self.key_map.insert(chain_index.clone(), child_keys);
self.account_id_map.insert(account_id, chain_index.clone());
Some((account_id, chain_index.clone()))
}
pub fn generate_new_node_layered(&mut self) -> Option<(nssa::AccountId, ChainIndex)> {
pub fn generate_new_node_layered(&mut self) -> Option<ChainIndex> {
self.fill_node(&self.find_next_slot_layered())
}
#[must_use]
pub fn get_node(&self, account_id: nssa::AccountId) -> Option<&N> {
let chain_id = self.account_id_map.get(&account_id)?;
self.key_map.get(chain_id)
}
pub fn get_node_mut(&mut self, account_id: nssa::AccountId) -> Option<&mut N> {
let chain_id = self.account_id_map.get(&account_id)?;
self.key_map.get_mut(chain_id)
}
pub fn insert(&mut self, account_id: nssa::AccountId, chain_index: ChainIndex, node: N) {
self.account_id_map.insert(account_id, chain_index.clone());
self.key_map.insert(chain_index, node);
}
pub fn remove(&mut self, addr: nssa::AccountId) -> Option<N> {
let chain_index = self.account_id_map.remove(&addr)?;
self.key_map.remove(&chain_index)
}
/// Populates tree with children.
///
/// For given `depth` adds children to a tree such that their `ChainIndex::depth(&self) <
@ -194,37 +175,50 @@ impl<N: KeyNode> KeyTree<N> {
}
}
}
}
impl KeyTree<ChildKeysPrivate> {
/// Cleanup of non-initialized accounts in a private tree.
///
/// If account is default, removes them, stops at first non-default account.
///
/// Walks through tree in lairs of same depth using `ChainIndex::chain_ids_at_depth()`.
///
/// Chain must be parsed for accounts beforehand.
///
/// Slow, maintains tree consistency.
pub fn cleanup_tree_remove_uninit_layered(&mut self, depth: u32) {
let depth = usize::try_from(depth).expect("Depth is expected to fit in usize");
'outer: for i in (1..depth).rev() {
println!("Cleanup of tree at depth {i}");
for id in ChainIndex::chain_ids_at_depth(i) {
if let Some(node) = self.key_map.get(&id) {
if node.value.1 == nssa::Account::default() {
let addr = node.account_id();
self.remove(addr);
} else {
break 'outer;
}
}
}
}
#[must_use]
pub fn get_node(&self, account_id: nssa::AccountId) -> Option<&N> {
let chain_id = self.account_id_map.get(&account_id)?;
self.key_map.get(chain_id)
}
pub fn get_node_mut(&mut self, account_id: nssa::AccountId) -> Option<&mut N> {
let chain_id = self.account_id_map.get(&account_id)?;
self.key_map.get_mut(chain_id)
}
pub fn insert(&mut self, account_id: nssa::AccountId, chain_index: ChainIndex, node: N) {
self.account_id_map.insert(account_id, chain_index.clone());
self.key_map.insert(chain_index, node);
}
pub fn remove(&mut self, addr: nssa::AccountId) -> Option<N> {
let chain_index = self.account_id_map.remove(&addr)?;
self.key_map.remove(&chain_index)
}
}
impl KeyTree<ChildKeysPublic> {
/// Generate a new public key node, returning the account ID and chain index.
pub fn generate_new_public_node(
&mut self,
parent_cci: &ChainIndex,
) -> Option<(nssa::AccountId, ChainIndex)> {
let cci = self.generate_new_node(parent_cci)?;
let node = self.key_map.get(&cci)?;
let account_id = node.account_ids().next()?;
Some((account_id, cci))
}
/// Generate a new public key node using layered placement, returning the account ID and chain
/// index.
pub fn generate_new_public_node_layered(&mut self) -> Option<(nssa::AccountId, ChainIndex)> {
let cci = self.generate_new_node_layered()?;
let node = self.key_map.get(&cci)?;
let account_id = node.account_ids().next()?;
Some((account_id, cci))
}
/// Cleanup of non-initialized accounts in a public tree.
///
/// If account is default, removes them, stops at first non-default account.
@ -259,6 +253,65 @@ impl KeyTree<ChildKeysPublic> {
}
}
impl KeyTree<ChildKeysPrivate> {
pub fn create_private_accounts_key_node(
&mut self,
parent_cci: &ChainIndex,
) -> Option<ChainIndex> {
self.generate_new_node(parent_cci)
}
pub fn create_private_accounts_key_node_layered(&mut self) -> Option<ChainIndex> {
self.generate_new_node_layered()
}
/// Register an additional identifier on an existing private key node, inserting the derived
/// `AccountId` into `account_id_map`. Returns `None` if the node does not exist or the
/// `AccountId` is already registered.
pub fn register_identifier_on_node(
&mut self,
cci: &ChainIndex,
identifier: Identifier,
) -> Option<nssa::AccountId> {
let node = self.key_map.get(cci)?;
let account_id = nssa::AccountId::from((&node.value.0.nullifier_public_key, identifier));
if self.account_id_map.contains_key(&account_id) {
return None;
}
self.account_id_map.insert(account_id, cci.clone());
Some(account_id)
}
/// Cleanup of non-initialized accounts in a private tree.
///
/// If account has no synced entries, removes it, stops at first initialized account.
///
/// Walks through tree in layers of same depth using `ChainIndex::chain_ids_at_depth()`.
///
/// Chain must be parsed for accounts beforehand.
///
/// Slow, maintains tree consistency.
pub fn cleanup_tree_remove_uninit_layered(&mut self, depth: u32) {
let depth = usize::try_from(depth).expect("Depth is expected to fit in usize");
'outer: for i in (1..depth).rev() {
println!("Cleanup of tree at depth {i}");
for id in ChainIndex::chain_ids_at_depth(i) {
if let Some(node) = self.key_map.get(&id).cloned() {
if node.value.1.is_empty() {
let account_ids = node.account_ids();
self.key_map.remove(&id);
for addr in account_ids {
self.account_id_map.remove(&addr);
}
} else {
break 'outer;
}
}
}
}
}
}
#[cfg(test)]
mod tests {
#![expect(clippy::shadow_unrelated, reason = "We don't care about this in tests")]
@ -478,25 +531,59 @@ mod tests {
.key_map
.get_mut(&ChainIndex::from_str("/1").unwrap())
.unwrap();
acc.value.1.balance = 2;
acc.value.1.push((
0,
nssa::Account {
balance: 2,
..nssa::Account::default()
},
));
let acc = tree
.key_map
.get_mut(&ChainIndex::from_str("/2").unwrap())
.unwrap();
acc.value.1.balance = 3;
acc.value.1.push((
0,
nssa::Account {
balance: 3,
..nssa::Account::default()
},
));
let acc = tree
.key_map
.get_mut(&ChainIndex::from_str("/0/1").unwrap())
.unwrap();
acc.value.1.balance = 5;
acc.value.1.push((
0,
nssa::Account {
balance: 5,
..nssa::Account::default()
},
));
let acc = tree
.key_map
.get_mut(&ChainIndex::from_str("/1/0").unwrap())
.unwrap();
acc.value.1.balance = 6;
acc.value.1.push((
0,
nssa::Account {
balance: 6,
..nssa::Account::default()
},
));
// Update account_id_map for nodes that now have entries
for chain_index_str in ["/1", "/2", "/0/1", "/1/0"] {
let id = ChainIndex::from_str(chain_index_str).unwrap();
if let Some(node) = tree.key_map.get(&id) {
for account_id in node.account_ids() {
tree.account_id_map.insert(account_id, id.clone());
}
}
}
tree.cleanup_tree_remove_uninit_layered(10);
@ -518,15 +605,15 @@ mod tests {
assert_eq!(key_set, key_set_res);
let acc = &tree.key_map[&ChainIndex::from_str("/1").unwrap()];
assert_eq!(acc.value.1.balance, 2);
assert_eq!(acc.value.1[0].1.balance, 2);
let acc = &tree.key_map[&ChainIndex::from_str("/2").unwrap()];
assert_eq!(acc.value.1.balance, 3);
assert_eq!(acc.value.1[0].1.balance, 3);
let acc = &tree.key_map[&ChainIndex::from_str("/0/1").unwrap()];
assert_eq!(acc.value.1.balance, 5);
assert_eq!(acc.value.1[0].1.balance, 5);
let acc = &tree.key_map[&ChainIndex::from_str("/1/0").unwrap()];
assert_eq!(acc.value.1.balance, 6);
assert_eq!(acc.value.1[0].1.balance, 6);
}
}

View File

@ -1,15 +1,8 @@
/// Trait, that reperesents a Node in hierarchical key tree.
pub trait KeyNode {
/// Tree root node.
fn root(seed: [u8; 64]) -> Self;
/// `cci`'s child of node.
pub trait KeyTreeNode: Sized {
#[must_use]
fn nth_child(&self, cci: u32) -> Self;
fn chain_code(&self) -> &[u8; 32];
fn child_index(&self) -> Option<u32>;
fn account_id(&self) -> nssa::AccountId;
fn from_seed(seed: [u8; 64]) -> Self;
#[must_use]
fn derive_child(&self, cci: u32) -> Self;
#[must_use]
fn account_ids(&self) -> impl Iterator<Item = nssa::AccountId>;
}

View File

@ -6,6 +6,7 @@ use secret_holders::{PrivateKeyHolder, SecretSpendingKey, SeedHolder};
use serde::{Deserialize, Serialize};
pub mod ephemeral_key_holder;
pub mod group_key_holder;
pub mod key_tree;
pub mod secret_holders;
@ -172,11 +173,12 @@ mod tests {
// /0/0
key_tree_private.generate_new_node_layered().unwrap();
// /2
let (second_child_id, _) = key_tree_private.generate_new_node_layered().unwrap();
let second_chain_index = key_tree_private.generate_new_node_layered().unwrap();
key_tree_private
.get_node(second_child_id)
.unwrap()
.key_map
.get(&second_chain_index)
.expect("Node was just inserted")
.value
.0
.clone()

View File

@ -2,27 +2,46 @@ use std::collections::BTreeMap;
use anyhow::Result;
use k256::AffinePoint;
use nssa::{Account, AccountId};
use nssa_core::Identifier;
use serde::{Deserialize, Serialize};
use crate::key_management::{
KeyChain,
group_key_holder::GroupKeyHolder,
key_tree::{KeyTreePrivate, KeyTreePublic, chain_index::ChainIndex},
secret_holders::SeedHolder,
};
pub type PublicKey = AffinePoint;
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct UserPrivateAccountData {
pub key_chain: KeyChain,
pub accounts: Vec<(Identifier, Account)>,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct NSSAUserData {
/// Default public accounts.
pub default_pub_account_signing_keys: BTreeMap<nssa::AccountId, nssa::PrivateKey>,
/// Default private accounts.
pub default_user_private_accounts:
BTreeMap<nssa::AccountId, (KeyChain, nssa_core::account::Account)>,
pub default_user_private_accounts: BTreeMap<AccountId, UserPrivateAccountData>,
/// Tree of public keys.
pub public_key_tree: KeyTreePublic,
/// Tree of private keys.
pub private_key_tree: KeyTreePrivate,
/// Group key holders for private PDA groups, keyed by a human-readable label.
/// Defaults to empty for backward compatibility with wallets that predate group PDAs.
/// An older wallet binary that re-serializes this struct will drop the field.
#[serde(default)]
pub group_key_holders: BTreeMap<String, GroupKeyHolder>,
/// Cached plaintext state of private PDA accounts, keyed by `AccountId`.
/// Updated after each private PDA transaction by decrypting the circuit output.
/// The sequencer only stores encrypted commitments, so this local cache is the
/// only source of plaintext state for private PDAs.
#[serde(default, alias = "group_pda_accounts")]
pub pda_accounts: BTreeMap<nssa::AccountId, nssa_core::account::Account>,
}
impl NSSAUserData {
@ -42,13 +61,16 @@ impl NSSAUserData {
}
fn valid_private_key_transaction_pairing_check(
accounts_keys_map: &BTreeMap<nssa::AccountId, (KeyChain, nssa_core::account::Account)>,
accounts_keys_map: &BTreeMap<AccountId, UserPrivateAccountData>,
) -> bool {
let mut check_res = true;
for (account_id, (key, _)) in accounts_keys_map {
let expected_account_id = nssa::AccountId::from(&key.nullifier_public_key);
if expected_account_id != *account_id {
println!("{expected_account_id}, {account_id}");
for (account_id, entry) in accounts_keys_map {
let any_match = entry.accounts.iter().any(|(identifier, _)| {
nssa::AccountId::from((&entry.key_chain.nullifier_public_key, *identifier))
== *account_id
});
if !any_match {
println!("No matching entry found for account_id {account_id}");
check_res = false;
}
}
@ -57,10 +79,7 @@ impl NSSAUserData {
pub fn new_with_accounts(
default_accounts_keys: BTreeMap<nssa::AccountId, nssa::PrivateKey>,
default_accounts_key_chains: BTreeMap<
nssa::AccountId,
(KeyChain, nssa_core::account::Account),
>,
default_accounts_key_chains: BTreeMap<AccountId, UserPrivateAccountData>,
public_key_tree: KeyTreePublic,
private_key_tree: KeyTreePrivate,
) -> Result<Self> {
@ -81,6 +100,8 @@ impl NSSAUserData {
default_user_private_accounts: default_accounts_key_chains,
public_key_tree,
private_key_tree,
group_key_holders: BTreeMap::new(),
pda_accounts: BTreeMap::new(),
})
}
@ -94,11 +115,11 @@ impl NSSAUserData {
match parent_cci {
Some(parent_cci) => self
.public_key_tree
.generate_new_node(&parent_cci)
.generate_new_public_node(&parent_cci)
.expect("Parent must be present in a tree"),
None => self
.public_key_tree
.generate_new_node_layered()
.generate_new_public_node_layered()
.expect("Search for new node slot failed"),
}
}
@ -114,50 +135,61 @@ impl NSSAUserData {
.or_else(|| self.public_key_tree.get_node(account_id).map(Into::into))
}
/// Generated new private key for privacy preserving transactions.
///
/// Returns the `account_id` of new account.
pub fn generate_new_privacy_preserving_transaction_key_chain(
&mut self,
parent_cci: Option<ChainIndex>,
) -> (nssa::AccountId, ChainIndex) {
/// Creates a new receiving key node and returns its `ChainIndex`.
pub fn create_private_accounts_key(&mut self, parent_cci: Option<ChainIndex>) -> ChainIndex {
match parent_cci {
Some(parent_cci) => self
.private_key_tree
.generate_new_node(&parent_cci)
.create_private_accounts_key_node(&parent_cci)
.expect("Parent must be present in a tree"),
None => self
.private_key_tree
.generate_new_node_layered()
.create_private_accounts_key_node_layered()
.expect("Search for new node slot failed"),
}
}
/// Returns the signing key for public transaction signatures.
/// Registers an additional identifier on an existing private key node, deriving and recording
/// the corresponding `AccountId`. Returns `None` if the node does not exist or the identifier
/// is already registered.
pub fn register_identifier_on_private_key_chain(
&mut self,
cci: &ChainIndex,
identifier: Identifier,
) -> Option<nssa::AccountId> {
self.private_key_tree
.register_identifier_on_node(cci, identifier)
}
/// Returns the key chain and account data for the given private account ID.
#[must_use]
pub fn get_private_account(
&self,
account_id: nssa::AccountId,
) -> Option<&(KeyChain, nssa_core::account::Account)> {
self.default_user_private_accounts
.get(&account_id)
.or_else(|| self.private_key_tree.get_node(account_id).map(Into::into))
}
/// Returns the signing key for public transaction signatures.
pub fn get_private_account_mut(
&mut self,
account_id: &nssa::AccountId,
) -> Option<&mut (KeyChain, nssa_core::account::Account)> {
// First seek in defaults
if let Some(key) = self.default_user_private_accounts.get_mut(account_id) {
Some(key)
// Then seek in tree
} else {
self.private_key_tree
.get_node_mut(*account_id)
.map(Into::into)
) -> Option<(KeyChain, nssa_core::account::Account, Identifier)> {
// Check default accounts
if let Some(entry) = self.default_user_private_accounts.get(&account_id) {
for (identifier, account) in &entry.accounts {
let expected_id =
nssa::AccountId::from((&entry.key_chain.nullifier_public_key, *identifier));
if expected_id == account_id {
return Some((entry.key_chain.clone(), account.clone(), *identifier));
}
}
return None;
}
// Check tree
if let Some(node) = self.private_key_tree.get_node(account_id) {
let key_chain = &node.value.0;
for (identifier, account) in &node.value.1 {
let expected_id =
nssa::AccountId::from((&key_chain.nullifier_public_key, *identifier));
if expected_id == account_id {
return Some((key_chain.clone(), account.clone(), *identifier));
}
}
}
None
}
pub fn account_ids(&self) -> impl Iterator<Item = nssa::AccountId> {
@ -177,6 +209,20 @@ impl NSSAUserData {
.copied()
.chain(self.private_key_tree.account_id_map.keys().copied())
}
/// Returns the `GroupKeyHolder` for the given label, if it exists.
#[must_use]
pub fn group_key_holder(&self, label: &str) -> Option<&GroupKeyHolder> {
self.group_key_holders.get(label)
}
/// Inserts or replaces a `GroupKeyHolder` under the given label.
///
/// If a holder already exists under this label, it is silently replaced and the old
/// GMS is lost. Callers must ensure label uniqueness across groups.
pub fn insert_group_key_holder(&mut self, label: String, holder: GroupKeyHolder) {
self.group_key_holders.insert(label, holder);
}
}
impl Default for NSSAUserData {
@ -196,20 +242,39 @@ impl Default for NSSAUserData {
mod tests {
use super::*;
#[test]
fn group_key_holder_storage_round_trip() {
let mut user_data = NSSAUserData::default();
assert!(user_data.group_key_holder("test-group").is_none());
let holder = GroupKeyHolder::from_gms([42_u8; 32]);
user_data.insert_group_key_holder(String::from("test-group"), holder.clone());
let retrieved = user_data
.group_key_holder("test-group")
.expect("should exist");
assert_eq!(retrieved.dangerous_raw_gms(), holder.dangerous_raw_gms());
}
#[test]
fn group_key_holders_default_empty() {
let user_data = NSSAUserData::default();
assert!(user_data.group_key_holders.is_empty());
}
#[test]
fn new_account() {
let mut user_data = NSSAUserData::default();
let (account_id_private, _) = user_data
.generate_new_privacy_preserving_transaction_key_chain(Some(ChainIndex::root()));
let is_key_chain_generated = user_data.get_private_account(account_id_private).is_some();
let chain_index = user_data.create_private_accounts_key(Some(ChainIndex::root()));
let is_key_chain_generated = user_data
.private_key_tree
.key_map
.contains_key(&chain_index);
assert!(is_key_chain_generated);
let account_id_private_str = account_id_private.to_string();
println!("{account_id_private_str:#?}");
let key_chain = &user_data.get_private_account(account_id_private).unwrap().0;
let key_chain = &user_data.private_key_tree.key_map[&chain_index].value.0;
println!("{key_chain:#?}");
}
}

View File

@ -10,7 +10,7 @@ use risc0_zkvm::sha::{Impl, Sha256 as _};
use serde::{Deserialize, Serialize};
use serde_with::{DeserializeFromStr, SerializeDisplay};
use crate::{NullifierPublicKey, NullifierSecretKey, program::ProgramId};
use crate::{NullifierSecretKey, program::ProgramId};
pub mod data;
@ -26,9 +26,9 @@ impl Nonce {
}
#[must_use]
pub fn private_account_nonce_init(npk: &NullifierPublicKey) -> Self {
pub fn private_account_nonce_init(account_id: &AccountId) -> Self {
let mut bytes: [u8; 64] = [0_u8; 64];
bytes[..32].copy_from_slice(&npk.0);
bytes[..32].copy_from_slice(account_id.value());
let result: [u8; 32] = Impl::hash_bytes(&bytes).as_bytes().try_into().unwrap();
let result = result.first_chunk::<16>().unwrap();
@ -306,8 +306,8 @@ mod tests {
#[test]
fn initialize_private_nonce() {
let npk = NullifierPublicKey([42; 32]);
let nonce = Nonce::private_account_nonce_init(&npk);
let account_id = AccountId::new([42; 32]);
let nonce = Nonce::private_account_nonce_init(&account_id);
let expected_nonce = Nonce(37_937_661_125_547_691_021_612_781_941_709_513_486);
assert_eq!(nonce, expected_nonce);
}

View File

@ -1,7 +1,7 @@
use serde::{Deserialize, Serialize};
use crate::{
Commitment, CommitmentSetDigest, MembershipProof, Nullifier, NullifierPublicKey,
Commitment, CommitmentSetDigest, Identifier, MembershipProof, Nullifier, NullifierPublicKey,
NullifierSecretKey, SharedSecretKey,
account::{Account, AccountWithMetadata},
encryption::Ciphertext,
@ -12,23 +12,92 @@ use crate::{
pub struct PrivacyPreservingCircuitInput {
/// Outputs of the program execution.
pub program_outputs: Vec<ProgramOutput>,
/// Visibility mask for accounts.
///
/// - `0` - public account
/// - `1` - private account with authentication
/// - `2` - private account without authentication
/// - `3` - private PDA account
pub visibility_mask: Vec<u8>,
/// Public keys of private accounts.
pub private_account_keys: Vec<(NullifierPublicKey, SharedSecretKey)>,
/// Nullifier secret keys for authorized private accounts.
pub private_account_nsks: Vec<NullifierSecretKey>,
/// Membership proofs for private accounts. Can be [`None`] for uninitialized accounts.
pub private_account_membership_proofs: Vec<Option<MembershipProof>>,
/// One entry per `pre_state`, in the same order as the program's `pre_states`.
/// Length must equal the number of `pre_states` derived from `program_outputs`.
/// The guest's `private_pda_npk_by_position` and `private_pda_bound_positions`
/// rely on this position alignment.
pub account_identities: Vec<InputAccountIdentity>,
/// Program ID.
pub program_id: ProgramId,
}
/// Per-account input to the privacy-preserving circuit. Each variant carries exactly the fields
/// the guest needs for that account's code path.
#[derive(Serialize, Deserialize, Clone)]
pub enum InputAccountIdentity {
/// Public account. The guest reads pre/post state from `program_outputs` and emits no
/// commitment, ciphertext, or nullifier.
Public,
/// Init of an authorized standalone private account: no membership proof. The `pre_state`
/// must be `Account::default()`. The `account_id` is derived as
/// `AccountId::from((&NullifierPublicKey::from(nsk), identifier))` and matched against
/// `pre_state.account_id`.
PrivateAuthorizedInit {
ssk: SharedSecretKey,
nsk: NullifierSecretKey,
identifier: Identifier,
},
/// Update of an authorized standalone private account: existing on-chain commitment, with
/// membership proof.
PrivateAuthorizedUpdate {
ssk: SharedSecretKey,
nsk: NullifierSecretKey,
membership_proof: MembershipProof,
identifier: Identifier,
},
/// Init of a standalone private account the caller does not own (e.g. a recipient who
/// doesn't yet exist on chain). No `nsk`, no membership proof.
PrivateUnauthorized {
npk: NullifierPublicKey,
ssk: SharedSecretKey,
identifier: Identifier,
},
/// Init of a private PDA, unauthorized. The npk-to-account_id binding is proven upstream
/// via `Claim::Pda(seed)` or a caller's `pda_seeds` match. Identifier is fixed by
/// convention to `PRIVATE_PDA_FIXED_IDENTIFIER` and not carried per-input.
PrivatePdaInit {
npk: NullifierPublicKey,
ssk: SharedSecretKey,
},
/// Update of an existing private PDA, authorized, with membership proof. `npk` is derived
/// from `nsk`. Authorization is established upstream by a caller `pda_seeds` match or a
/// previously-seen authorization in a chained call. Identifier is fixed.
PrivatePdaUpdate {
ssk: SharedSecretKey,
nsk: NullifierSecretKey,
membership_proof: MembershipProof,
},
}
impl InputAccountIdentity {
#[must_use]
pub const fn is_public(&self) -> bool {
matches!(self, Self::Public)
}
#[must_use]
pub const fn is_private_pda(&self) -> bool {
matches!(
self,
Self::PrivatePdaInit { .. } | Self::PrivatePdaUpdate { .. }
)
}
/// For private PDA variants, return the nullifier public key. `Init` carries it directly;
/// `Update` derives it from `nsk`. For non-PDA variants returns `None`.
#[must_use]
pub fn npk_if_private_pda(&self) -> Option<NullifierPublicKey> {
match self {
Self::PrivatePdaInit { npk, .. } => Some(*npk),
Self::PrivatePdaUpdate { nsk, .. } => Some(NullifierPublicKey::from(nsk)),
Self::Public
| Self::PrivateAuthorizedInit { .. }
| Self::PrivateAuthorizedUpdate { .. }
| Self::PrivateUnauthorized { .. } => None,
}
}
}
#[derive(Serialize, Deserialize)]
#[cfg_attr(any(feature = "host", test), derive(Debug, PartialEq, Eq))]
pub struct PrivacyPreservingCircuitOutput {
@ -57,7 +126,7 @@ mod tests {
use super::*;
use crate::{
Commitment, Nullifier, NullifierPublicKey,
Commitment, Nullifier,
account::{Account, AccountId, AccountWithMetadata, Nonce},
};
@ -94,12 +163,12 @@ mod tests {
}],
ciphertexts: vec![Ciphertext(vec![255, 255, 1, 1, 2, 2])],
new_commitments: vec![Commitment::new(
&NullifierPublicKey::from(&[1; 32]),
&AccountId::new([1; 32]),
&Account::default(),
)],
new_nullifiers: vec![(
Nullifier::for_account_update(
&Commitment::new(&NullifierPublicKey::from(&[2; 32]), &Account::default()),
&Commitment::new(&AccountId::new([2; 32]), &Account::default()),
&[1; 32],
),
[0xab; 32],

View File

@ -2,7 +2,7 @@ use borsh::{BorshDeserialize, BorshSerialize};
use risc0_zkvm::sha::{Impl, Sha256 as _};
use serde::{Deserialize, Serialize};
use crate::{NullifierPublicKey, account::Account};
use crate::account::{Account, AccountId};
/// A commitment to all zero data.
/// ```python
@ -49,16 +49,16 @@ impl std::fmt::Debug for Commitment {
}
impl Commitment {
/// Generates the commitment to a private account owned by user for npk:
/// SHA256( `Comm_DS` || npk || `program_owner` || balance || nonce || SHA256(data)).
/// Generates the commitment to a private account owned by user for `account_id`:
/// SHA256( `Comm_DS` || `account_id` || `program_owner` || balance || nonce || SHA256(data)).
#[must_use]
pub fn new(npk: &NullifierPublicKey, account: &Account) -> Self {
pub fn new(account_id: &AccountId, account: &Account) -> Self {
const COMMITMENT_PREFIX: &[u8; 32] =
b"/LEE/v0.3/Commitment/\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00";
let mut bytes = Vec::new();
bytes.extend_from_slice(COMMITMENT_PREFIX);
bytes.extend_from_slice(&npk.to_byte_array());
bytes.extend_from_slice(account_id.value());
let account_bytes_with_hashed_data = {
let mut this = Vec::new();
for word in &account.program_owner {
@ -115,14 +115,15 @@ mod tests {
use risc0_zkvm::sha::{Impl, Sha256 as _};
use crate::{
Commitment, DUMMY_COMMITMENT, DUMMY_COMMITMENT_HASH, NullifierPublicKey, account::Account,
Commitment, DUMMY_COMMITMENT, DUMMY_COMMITMENT_HASH,
account::{Account, AccountId},
};
#[test]
fn nothing_up_my_sleeve_dummy_commitment() {
let default_account = Account::default();
let npk_null = NullifierPublicKey([0; 32]);
let expected_dummy_commitment = Commitment::new(&npk_null, &default_account);
let account_id_null = AccountId::new([0; 32]);
let expected_dummy_commitment = Commitment::new(&account_id_null, &default_account);
assert_eq!(DUMMY_COMMITMENT, expected_dummy_commitment);
}

View File

@ -8,7 +8,7 @@ use serde::{Deserialize, Serialize};
#[cfg(feature = "host")]
pub use shared_key_derivation::{EphemeralPublicKey, EphemeralSecretKey, ViewingPublicKey};
use crate::{Commitment, account::Account};
use crate::{Commitment, Identifier, account::Account};
#[cfg(feature = "host")]
pub mod shared_key_derivation;
@ -40,11 +40,14 @@ impl EncryptionScheme {
#[must_use]
pub fn encrypt(
account: &Account,
identifier: Identifier,
shared_secret: &SharedSecretKey,
commitment: &Commitment,
output_index: u32,
) -> Ciphertext {
let mut buffer = account.to_bytes();
// Plaintext: identifier (16 bytes, little-endian) || account bytes
let mut buffer = identifier.to_le_bytes().to_vec();
buffer.extend_from_slice(&account.to_bytes());
Self::symmetric_transform(&mut buffer, shared_secret, commitment, output_index);
Ciphertext(buffer)
}
@ -86,12 +89,17 @@ impl EncryptionScheme {
shared_secret: &SharedSecretKey,
commitment: &Commitment,
output_index: u32,
) -> Option<Account> {
) -> Option<(Identifier, Account)> {
use std::io::Cursor;
let mut buffer = ciphertext.0.clone();
Self::symmetric_transform(&mut buffer, shared_secret, commitment, output_index);
let mut cursor = Cursor::new(buffer.as_slice());
if buffer.len() < 16 {
return None;
}
let identifier = Identifier::from_le_bytes(buffer[..16].try_into().unwrap());
let mut cursor = Cursor::new(&buffer[16..]);
Account::from_cursor(&mut cursor)
.inspect_err(|err| {
println!(
@ -104,5 +112,6 @@ impl EncryptionScheme {
);
})
.ok()
.map(|account| (identifier, account))
}
}

View File

@ -3,13 +3,15 @@
reason = "We prefer to group methods by functionality rather than by type for encoding"
)]
pub use circuit_io::{PrivacyPreservingCircuitInput, PrivacyPreservingCircuitOutput};
pub use circuit_io::{
InputAccountIdentity, PrivacyPreservingCircuitInput, PrivacyPreservingCircuitOutput,
};
pub use commitment::{
Commitment, CommitmentSetDigest, DUMMY_COMMITMENT, DUMMY_COMMITMENT_HASH, MembershipProof,
compute_digest_for_path,
};
pub use encryption::{EncryptionScheme, SharedSecretKey};
pub use nullifier::{Nullifier, NullifierPublicKey, NullifierSecretKey};
pub use nullifier::{Identifier, Nullifier, NullifierPublicKey, NullifierSecretKey};
pub mod account;
mod circuit_io;

View File

@ -4,18 +4,24 @@ use serde::{Deserialize, Serialize};
use crate::{Commitment, account::AccountId};
const PRIVATE_ACCOUNT_ID_PREFIX: &[u8; 32] = b"/LEE/v0.3/AccountId/Private/\x00\x00\x00\x00";
pub type Identifier = u128;
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]
#[cfg_attr(any(feature = "host", test), derive(Hash))]
pub struct NullifierPublicKey(pub [u8; 32]);
impl From<&NullifierPublicKey> for AccountId {
fn from(value: &NullifierPublicKey) -> Self {
const PRIVATE_ACCOUNT_ID_PREFIX: &[u8; 32] =
b"/LEE/v0.3/AccountId/Private/\x00\x00\x00\x00";
impl From<(&NullifierPublicKey, Identifier)> for AccountId {
fn from(value: (&NullifierPublicKey, Identifier)) -> Self {
let (npk, identifier) = value;
let mut bytes = [0; 64];
// 32 bytes prefix || 32 bytes npk || 16 bytes identifier
let mut bytes = [0; 80];
bytes[0..32].copy_from_slice(PRIVATE_ACCOUNT_ID_PREFIX);
bytes[32..].copy_from_slice(&value.0);
bytes[32..64].copy_from_slice(&npk.0);
bytes[64..80].copy_from_slice(&identifier.to_le_bytes());
Self::new(
Impl::hash_bytes(&bytes)
.as_bytes()
@ -85,10 +91,10 @@ impl Nullifier {
/// Computes a nullifier for an account initialization.
#[must_use]
pub fn for_account_initialization(npk: &NullifierPublicKey) -> Self {
pub fn for_account_initialization(account_id: &AccountId) -> Self {
const INIT_PREFIX: &[u8; 32] = b"/LEE/v0.3/Nullifier/Initialize/\x00";
let mut bytes = INIT_PREFIX.to_vec();
bytes.extend_from_slice(&npk.to_byte_array());
bytes.extend_from_slice(account_id.value());
Self(Impl::hash_bytes(&bytes).as_bytes().try_into().unwrap())
}
}
@ -111,7 +117,7 @@ mod tests {
#[test]
fn constructor_for_account_initialization() {
let npk = NullifierPublicKey([
let account_id = AccountId::new([
112, 188, 193, 129, 150, 55, 228, 67, 88, 168, 29, 151, 5, 92, 23, 190, 17, 162, 164,
255, 29, 105, 42, 186, 43, 11, 157, 168, 132, 225, 17, 163,
]);
@ -119,7 +125,7 @@ mod tests {
149, 59, 95, 181, 2, 194, 20, 143, 72, 233, 104, 243, 59, 70, 67, 243, 110, 77, 109,
132, 139, 111, 51, 125, 128, 92, 107, 46, 252, 4, 20, 149,
]);
let nullifier = Nullifier::for_account_initialization(&npk);
let nullifier = Nullifier::for_account_initialization(&account_id);
assert_eq!(nullifier, expected_nullifier);
}
@ -145,11 +151,46 @@ mod tests {
];
let npk = NullifierPublicKey::from(&nsk);
let expected_account_id = AccountId::new([
139, 72, 194, 222, 215, 187, 147, 56, 55, 35, 222, 205, 156, 12, 204, 227, 166, 44, 30,
81, 186, 14, 167, 234, 28, 236, 32, 213, 125, 251, 193, 233,
165, 52, 40, 32, 231, 171, 113, 10, 65, 241, 156, 72, 154, 207, 122, 192, 15, 46, 50,
253, 105, 164, 89, 84, 40, 191, 182, 119, 64, 255, 67, 142,
]);
let account_id = AccountId::from(&npk);
let account_id = AccountId::from((&npk, 0));
assert_eq!(account_id, expected_account_id);
}
#[test]
fn account_id_from_nullifier_public_key_identifier_1() {
let nsk = [
57, 5, 64, 115, 153, 56, 184, 51, 207, 238, 99, 165, 147, 214, 213, 151, 30, 251, 30,
196, 134, 22, 224, 211, 237, 120, 136, 225, 188, 220, 249, 28,
];
let npk = NullifierPublicKey::from(&nsk);
let expected_account_id = AccountId::new([
203, 201, 109, 245, 40, 54, 195, 12, 55, 33, 0, 86, 245, 65, 70, 156, 24, 249, 26, 95,
56, 247, 99, 121, 165, 182, 234, 255, 19, 127, 191, 72,
]);
let account_id = AccountId::from((&npk, 1));
assert_eq!(account_id, expected_account_id);
}
#[test]
fn account_id_from_nullifier_public_key_byte_asymmetric_identifier() {
let identifier: u128 = 0x0123_4567_89AB_CDEF_FEDC_BA98_7654_3210;
let nsk = [
57, 5, 64, 115, 153, 56, 184, 51, 207, 238, 99, 165, 147, 214, 213, 151, 30, 251, 30,
196, 134, 22, 224, 211, 237, 120, 136, 225, 188, 220, 249, 28,
];
let npk = NullifierPublicKey::from(&nsk);
let expected_account_id = AccountId::new([
178, 16, 226, 206, 217, 38, 38, 45, 155, 240, 226, 253, 168, 87, 146, 70, 72, 32, 174,
19, 245, 25, 214, 162, 209, 135, 252, 82, 27, 2, 174, 196,
]);
let account_id = AccountId::from((&npk, identifier));
assert_eq!(account_id, expected_account_id);
}

View File

@ -37,6 +37,12 @@ impl PdaSeed {
}
}
impl AsRef<[u8]> for PdaSeed {
fn as_ref(&self) -> &[u8] {
&self.0
}
}
impl AccountId {
/// Derives an [`AccountId`] for a public PDA from the program ID and seed.
#[must_use]
@ -913,18 +919,6 @@ mod tests {
assert_ne!(private_id, public_id);
}
/// A private PDA address differs from a standard private account address at the same `npk`,
/// because the private PDA formula includes `program_id` and `seed`.
#[test]
fn for_private_pda_differs_from_standard_private() {
let program_id: ProgramId = [1; 8];
let seed = PdaSeed::new([2; 32]);
let npk = NullifierPublicKey([3; 32]);
let private_pda_id = AccountId::for_private_pda(&program_id, &seed, &npk);
let standard_private_id = AccountId::from(&npk);
assert_ne!(private_pda_id, standard_private_id);
}
// ---- compute_public_authorized_pdas tests ----
/// `compute_public_authorized_pdas` returns the public PDA addresses for the caller's seeds.

View File

@ -2,8 +2,7 @@ use std::collections::{HashMap, VecDeque};
use borsh::{BorshDeserialize, BorshSerialize};
use nssa_core::{
MembershipProof, NullifierPublicKey, NullifierSecretKey, PrivacyPreservingCircuitInput,
PrivacyPreservingCircuitOutput, SharedSecretKey,
InputAccountIdentity, PrivacyPreservingCircuitInput, PrivacyPreservingCircuitOutput,
account::AccountWithMetadata,
program::{ChainedCall, InstructionData, ProgramId, ProgramOutput},
};
@ -63,14 +62,10 @@ impl From<Program> for ProgramWithDependencies {
/// Generates a proof of the execution of a NSSA program inside the privacy preserving execution
/// circuit.
/// TODO: too many parameters.
pub fn execute_and_prove(
pre_states: Vec<AccountWithMetadata>,
instruction_data: InstructionData,
visibility_mask: Vec<u8>,
private_account_keys: Vec<(NullifierPublicKey, SharedSecretKey)>,
private_account_nsks: Vec<NullifierSecretKey>,
private_account_membership_proofs: Vec<Option<MembershipProof>>,
account_identities: Vec<InputAccountIdentity>,
program_with_dependencies: &ProgramWithDependencies,
) -> Result<(PrivacyPreservingCircuitOutput, Proof), NssaError> {
let ProgramWithDependencies {
@ -128,10 +123,7 @@ pub fn execute_and_prove(
let circuit_input = PrivacyPreservingCircuitInput {
program_outputs,
visibility_mask,
private_account_keys,
private_account_nsks,
private_account_membership_proofs,
account_identities,
program_id: program_with_dependencies.program.id(),
};
@ -186,6 +178,7 @@ mod tests {
use nssa_core::{
Commitment, DUMMY_COMMITMENT_HASH, EncryptionScheme, Nullifier, SharedSecretKey,
account::{Account, AccountId, AccountWithMetadata, Nonce, data::Data},
program::PdaSeed,
};
use super::*;
@ -213,11 +206,8 @@ mod tests {
AccountId::new([0; 32]),
);
let recipient = AccountWithMetadata::new(
Account::default(),
false,
AccountId::from(&recipient_keys.npk()),
);
let recipient_account_id = AccountId::from((&recipient_keys.npk(), 0));
let recipient = AccountWithMetadata::new(Account::default(), false, recipient_account_id);
let balance_to_move: u128 = 37;
@ -231,7 +221,7 @@ mod tests {
let expected_recipient_post = Account {
program_owner: program.id(),
balance: balance_to_move,
nonce: Nonce::private_account_nonce_init(&recipient_keys.npk()),
nonce: Nonce::private_account_nonce_init(&recipient_account_id),
data: Data::default(),
};
@ -243,10 +233,14 @@ mod tests {
let (output, proof) = execute_and_prove(
vec![sender, recipient],
Program::serialize_instruction(balance_to_move).unwrap(),
vec![0, 2],
vec![(recipient_keys.npk(), shared_secret)],
vec![],
vec![None],
vec![
InputAccountIdentity::Public,
InputAccountIdentity::PrivateUnauthorized {
npk: recipient_keys.npk(),
ssk: shared_secret,
identifier: 0,
},
],
&Program::authenticated_transfer_program().into(),
)
.unwrap();
@ -261,7 +255,7 @@ mod tests {
assert_eq!(output.new_nullifiers.len(), 1);
assert_eq!(output.ciphertexts.len(), 1);
let recipient_post = EncryptionScheme::decrypt(
let (_identifier, recipient_post) = EncryptionScheme::decrypt(
&output.ciphertexts[0],
&shared_secret,
&output.new_commitments[0],
@ -286,27 +280,24 @@ mod tests {
data: Data::default(),
},
true,
AccountId::from(&sender_keys.npk()),
AccountId::from((&sender_keys.npk(), 0)),
);
let commitment_sender = Commitment::new(&sender_keys.npk(), &sender_pre.account);
let sender_account_id = AccountId::from((&sender_keys.npk(), 0));
let commitment_sender = Commitment::new(&sender_account_id, &sender_pre.account);
let recipient = AccountWithMetadata::new(
Account::default(),
false,
AccountId::from(&recipient_keys.npk()),
);
let recipient_account_id = AccountId::from((&recipient_keys.npk(), 0));
let recipient = AccountWithMetadata::new(Account::default(), false, recipient_account_id);
let balance_to_move: u128 = 37;
let mut commitment_set = CommitmentSet::with_capacity(2);
commitment_set.extend(std::slice::from_ref(&commitment_sender));
let expected_new_nullifiers = vec![
(
Nullifier::for_account_update(&commitment_sender, &sender_keys.nsk),
commitment_set.digest(),
),
(
Nullifier::for_account_initialization(&recipient_keys.npk()),
Nullifier::for_account_initialization(&recipient_account_id),
DUMMY_COMMITMENT_HASH,
),
];
@ -322,12 +313,12 @@ mod tests {
let expected_private_account_2 = Account {
program_owner: program.id(),
balance: balance_to_move,
nonce: Nonce::private_account_nonce_init(&recipient_keys.npk()),
nonce: Nonce::private_account_nonce_init(&recipient_account_id),
..Default::default()
};
let expected_new_commitments = vec![
Commitment::new(&sender_keys.npk(), &expected_private_account_1),
Commitment::new(&recipient_keys.npk(), &expected_private_account_2),
Commitment::new(&sender_account_id, &expected_private_account_1),
Commitment::new(&recipient_account_id, &expected_private_account_2),
];
let esk_1 = [3; 32];
@ -339,13 +330,21 @@ mod tests {
let (output, proof) = execute_and_prove(
vec![sender_pre, recipient],
Program::serialize_instruction(balance_to_move).unwrap(),
vec![1, 2],
vec![
(sender_keys.npk(), shared_secret_1),
(recipient_keys.npk(), shared_secret_2),
InputAccountIdentity::PrivateAuthorizedUpdate {
ssk: shared_secret_1,
nsk: sender_keys.nsk,
membership_proof: commitment_set
.get_proof_for(&commitment_sender)
.expect("sender's commitment must be in the set"),
identifier: 0,
},
InputAccountIdentity::PrivateUnauthorized {
npk: recipient_keys.npk(),
ssk: shared_secret_2,
identifier: 0,
},
],
vec![sender_keys.nsk],
vec![commitment_set.get_proof_for(&commitment_sender), None],
&program.into(),
)
.unwrap();
@ -357,7 +356,7 @@ mod tests {
assert_eq!(output.new_nullifiers, expected_new_nullifiers);
assert_eq!(output.ciphertexts.len(), 2);
let sender_post = EncryptionScheme::decrypt(
let (_identifier, sender_post) = EncryptionScheme::decrypt(
&output.ciphertexts[0],
&shared_secret_1,
&expected_new_commitments[0],
@ -366,7 +365,7 @@ mod tests {
.unwrap();
assert_eq!(sender_post, expected_private_account_1);
let recipient_post = EncryptionScheme::decrypt(
let (_identifier, recipient_post) = EncryptionScheme::decrypt(
&output.ciphertexts[1],
&shared_secret_2,
&expected_new_commitments[1],
@ -382,7 +381,7 @@ mod tests {
let pre = AccountWithMetadata::new(
Account::default(),
false,
AccountId::from(&account_keys.npk()),
AccountId::from((&account_keys.npk(), 0)),
);
let validity_window_chain_caller = Program::validity_window_chain_caller();
@ -408,13 +407,116 @@ mod tests {
let result = execute_and_prove(
vec![pre],
instruction,
vec![2],
vec![(account_keys.npk(), shared_secret)],
vec![],
vec![None],
vec![InputAccountIdentity::PrivateUnauthorized {
npk: account_keys.npk(),
ssk: shared_secret,
identifier: 0,
}],
&program_with_deps,
);
assert!(matches!(result, Err(NssaError::CircuitProvingError(_))));
}
/// Group PDA deposit: creates a new PDA and transfers balance from the
/// counterparty. Both accounts owned by `private_pda_spender`.
#[test]
fn group_pda_deposit() {
let program = Program::private_pda_spender();
let noop = Program::noop();
let keys = test_private_account_keys_1();
let npk = keys.npk();
let seed = PdaSeed::new([42; 32]);
let shared_secret_pda = SharedSecretKey::new(&[55; 32], &keys.vpk());
// PDA (new, mask 3)
let pda_id = AccountId::for_private_pda(&program.id(), &seed, &npk);
let pda_pre = AccountWithMetadata::new(Account::default(), false, pda_id);
// Sender (mask 0, public, owned by this program, has balance)
let sender_id = AccountId::new([99; 32]);
let sender_pre = AccountWithMetadata::new(
Account {
program_owner: program.id(),
balance: 10000,
..Account::default()
},
true,
sender_id,
);
let noop_id = noop.id();
let program_with_deps = ProgramWithDependencies::new(program, [(noop_id, noop)].into());
let instruction = Program::serialize_instruction((seed, noop_id, 500_u128, true)).unwrap();
// PDA is mask 3 (private PDA), sender is mask 0 (public).
// The noop chained call is required to establish the mask-3 (seed, npk) binding
// that the circuit enforces for private PDAs. Without a caller providing pda_seeds,
// the circuit's binding check rejects the account.
let result = execute_and_prove(
vec![pda_pre, sender_pre],
instruction,
vec![
InputAccountIdentity::PrivatePdaInit {
npk,
ssk: shared_secret_pda,
},
InputAccountIdentity::Public,
],
&program_with_deps,
);
let (output, _proof) = result.expect("group PDA deposit should succeed");
// Only PDA (mask 3) produces a commitment; sender (mask 0) is public.
assert_eq!(output.new_commitments.len(), 1);
}
/// Group PDA spend binding: the noop chained call with `pda_seeds` establishes
/// the mask-3 binding for an existing-but-default PDA. Uses amount=0 because
/// testing with a pre-funded PDA requires a two-tx sequence with membership proofs.
#[test]
fn group_pda_spend_binding() {
let program = Program::private_pda_spender();
let noop = Program::noop();
let keys = test_private_account_keys_1();
let npk = keys.npk();
let seed = PdaSeed::new([42; 32]);
let shared_secret_pda = SharedSecretKey::new(&[55; 32], &keys.vpk());
let pda_id = AccountId::for_private_pda(&program.id(), &seed, &npk);
let pda_pre = AccountWithMetadata::new(Account::default(), false, pda_id);
let bob_id = AccountId::new([88; 32]);
let bob_pre = AccountWithMetadata::new(
Account {
program_owner: program.id(),
balance: 10000,
..Account::default()
},
true,
bob_id,
);
let noop_id = noop.id();
let program_with_deps = ProgramWithDependencies::new(program, [(noop_id, noop)].into());
let instruction = Program::serialize_instruction((seed, noop_id, 0_u128, false)).unwrap();
let result = execute_and_prove(
vec![pda_pre, bob_pre],
instruction,
vec![
InputAccountIdentity::PrivatePdaInit {
npk,
ssk: shared_secret_pda,
},
InputAccountIdentity::Public,
],
&program_with_deps,
);
let (output, _proof) = result.expect("group PDA spend binding should succeed");
assert_eq!(output.new_commitments.len(), 1);
}
}

View File

@ -9,6 +9,8 @@ use sha2::{Digest as _, Sha256};
use crate::{AccountId, error::NssaError};
const PREFIX: &[u8; 32] = b"/LEE/v0.3/Message/Privacy/\x00\x00\x00\x00\x00\x00";
pub type ViewTag = u8;
#[derive(Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize)]
@ -118,22 +120,34 @@ impl Message {
timestamp_validity_window: output.timestamp_validity_window,
})
}
#[must_use]
pub fn hash(&self) -> [u8; 32] {
let msg = self.to_bytes();
let mut bytes = Vec::with_capacity(
PREFIX
.len()
.checked_add(msg.len())
.expect("length overflow"),
);
bytes.extend_from_slice(PREFIX);
bytes.extend_from_slice(&msg);
Sha256::digest(bytes).into()
}
}
#[cfg(test)]
pub mod tests {
use nssa_core::{
Commitment, EncryptionScheme, Nullifier, NullifierPublicKey, SharedSecretKey,
account::Account,
account::{Account, AccountId, Nonce},
encryption::{EphemeralPublicKey, ViewingPublicKey},
program::{BlockValidityWindow, TimestampValidityWindow},
};
use sha2::{Digest as _, Sha256};
use crate::{
AccountId,
privacy_preserving_transaction::message::{EncryptedAccountData, Message},
};
use super::{EncryptedAccountData, Message, PREFIX};
#[must_use]
pub fn message_for_tests() -> Message {
@ -154,9 +168,11 @@ pub mod tests {
let encrypted_private_post_states = Vec::new();
let new_commitments = vec![Commitment::new(&npk2, &account2)];
let account_id2 = nssa_core::account::AccountId::from((&npk2, 0));
let new_commitments = vec![Commitment::new(&account_id2, &account2)];
let old_commitment = Commitment::new(&npk1, &account1);
let account_id1 = nssa_core::account::AccountId::from((&npk1, 0));
let old_commitment = Commitment::new(&account_id1, &account1);
let new_nullifiers = vec![(
Nullifier::for_account_update(&old_commitment, &nsk1),
[0; 32],
@ -174,16 +190,69 @@ pub mod tests {
}
}
#[test]
fn hash_privacy_pinned() {
let msg = Message {
public_account_ids: vec![AccountId::new([42_u8; 32])],
nonces: vec![Nonce(5)],
public_post_states: vec![],
encrypted_private_post_states: vec![],
new_commitments: vec![],
new_nullifiers: vec![],
block_validity_window: BlockValidityWindow::new_unbounded(),
timestamp_validity_window: TimestampValidityWindow::new_unbounded(),
};
let public_account_ids_bytes: &[u8] = &[42_u8; 32];
let nonces_bytes: &[u8] = &[1, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
// all remaining vec fields are empty: u32 len=0
let empty_vec_bytes: &[u8] = &[0_u8; 4];
// validity windows: unbounded = {from: None (0u8), to: None (0u8)}
let unbounded_window_bytes: &[u8] = &[0_u8; 2];
let expected_borsh_vec: Vec<u8> = [
&[1_u8, 0, 0, 0], // public_account_ids
public_account_ids_bytes,
nonces_bytes,
empty_vec_bytes, // public_post_state
empty_vec_bytes, // encrypted_private_post_states
empty_vec_bytes, // new_commitments
empty_vec_bytes, // new_nullifiers
unbounded_window_bytes, // block_validity_window
unbounded_window_bytes, // timestamp_validity_window
]
.concat();
let expected_borsh: &[u8] = &expected_borsh_vec;
assert_eq!(
borsh::to_vec(&msg).unwrap(),
expected_borsh,
"`privacy_preserving_transaction::hash()`: expected borsh order has changed"
);
let mut preimage = Vec::with_capacity(PREFIX.len() + expected_borsh.len());
preimage.extend_from_slice(PREFIX);
preimage.extend_from_slice(expected_borsh);
let expected_hash: [u8; 32] = Sha256::digest(&preimage).into();
assert_eq!(
msg.hash(),
expected_hash,
"`privacy_preserving_transaction::hash()`: serialization has changed"
);
}
#[test]
fn encrypted_account_data_constructor() {
let npk = NullifierPublicKey::from(&[1; 32]);
let vpk = ViewingPublicKey::from_scalar([2; 32]);
let account = Account::default();
let commitment = Commitment::new(&npk, &account);
let account_id = nssa_core::account::AccountId::from((&npk, 0));
let commitment = Commitment::new(&account_id, &account);
let esk = [3; 32];
let shared_secret = SharedSecretKey::new(&esk, &vpk);
let epk = EphemeralPublicKey::from_scalar(esk);
let ciphertext = EncryptionScheme::encrypt(&account, &shared_secret, &commitment, 2);
let ciphertext = EncryptionScheme::encrypt(&account, 0, &shared_secret, &commitment, 2);
let encrypted_account_data =
EncryptedAccountData::new(ciphertext.clone(), &npk, &vpk, epk.clone());

View File

@ -14,12 +14,12 @@ pub struct WitnessSet {
impl WitnessSet {
#[must_use]
pub fn for_message(message: &Message, proof: Proof, private_keys: &[&PrivateKey]) -> Self {
let message_bytes = message.to_bytes();
let message_hash = message.hash();
let signatures_and_public_keys = private_keys
.iter()
.map(|&key| {
(
Signature::new(key, &message_bytes),
Signature::new(key, &message_hash),
PublicKey::new_from_private_key(key),
)
})
@ -32,9 +32,9 @@ impl WitnessSet {
#[must_use]
pub fn signatures_are_valid_for(&self, message: &Message) -> bool {
let message_bytes = message.to_bytes();
let message_hash = message.hash();
for (signature, public_key) in self.signatures_and_public_keys() {
if !signature.is_valid_for(&message_bytes, public_key) {
if !signature.is_valid_for(&message_hash, public_key) {
return false;
}
}

View File

@ -312,6 +312,16 @@ mod tests {
}
}
#[must_use]
pub fn private_pda_spender() -> Self {
use test_program_methods::{PRIVATE_PDA_SPENDER_ELF, PRIVATE_PDA_SPENDER_ID};
Self {
id: PRIVATE_PDA_SPENDER_ID,
elf: PRIVATE_PDA_SPENDER_ELF.to_vec(),
}
}
#[must_use]
pub fn two_pda_claimer() -> Self {
use test_program_methods::{TWO_PDA_CLAIMER_ELF, TWO_PDA_CLAIMER_ID};

View File

@ -4,9 +4,12 @@ use nssa_core::{
program::{InstructionData, ProgramId},
};
use serde::Serialize;
use sha2::{Digest as _, Sha256};
use crate::{AccountId, error::NssaError, program::Program};
const PREFIX: &[u8; 32] = b"/LEE/v0.3/Message/Public/\x00\x00\x00\x00\x00\x00\x00";
#[derive(Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize)]
pub struct Message {
pub program_id: ProgramId,
@ -63,4 +66,74 @@ impl Message {
instruction_data,
}
}
#[must_use]
pub fn hash(&self) -> [u8; 32] {
let mut bytes = Vec::with_capacity(
PREFIX
.len()
.checked_add(self.to_bytes().len())
.expect("length overflow"),
);
bytes.extend_from_slice(PREFIX);
bytes.extend_from_slice(&self.to_bytes());
Sha256::digest(bytes).into()
}
}
#[cfg(test)]
mod tests {
use nssa_core::account::{AccountId, Nonce};
use sha2::{Digest as _, Sha256};
use super::{Message, PREFIX};
#[test]
fn hash_public_pinned() {
let msg = Message::new_preserialized(
[1_u32; 8],
vec![AccountId::new([42_u8; 32])],
vec![Nonce(5)],
vec![],
);
// program_id: [1_u32; 8], each word as LE u32
let program_id_bytes: &[u8] = &[
1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1,
0, 0, 0,
];
// account_ids: AccountId([42_u8; 32])
let account_ids_bytes: &[u8] = &[42_u8; 32];
// nonces: u32 len=1, then Nonce(5) as LE u128
let nonces_bytes: &[u8] = &[1, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
let instruction_data_bytes: &[u8] = &[0_u8; 4];
let expected_borsh_vec: Vec<u8> = [
program_id_bytes,
&[1_u8, 0, 0, 0], // account_ids len=1
account_ids_bytes,
nonces_bytes,
instruction_data_bytes,
]
.concat();
let expected_borsh: &[u8] = &expected_borsh_vec;
assert_eq!(
borsh::to_vec(&msg).unwrap(),
expected_borsh,
"`public_transaction::hash()`: expected borsh order has changed"
);
let mut preimage = Vec::with_capacity(PREFIX.len() + expected_borsh.len());
preimage.extend_from_slice(PREFIX);
preimage.extend_from_slice(expected_borsh);
let expected_hash: [u8; 32] = Sha256::digest(&preimage).into();
assert_eq!(
msg.hash(),
expected_hash,
"`public_transaction::hash()`: serialization has changed"
);
}
}

View File

@ -10,12 +10,12 @@ pub struct WitnessSet {
impl WitnessSet {
#[must_use]
pub fn for_message(message: &Message, private_keys: &[&PrivateKey]) -> Self {
let message_bytes = message.to_bytes();
let message_hash = message.hash();
let signatures_and_public_keys = private_keys
.iter()
.map(|&key| {
(
Signature::new(key, &message_bytes),
Signature::new(key, &message_hash),
PublicKey::new_from_private_key(key),
)
})
@ -27,9 +27,9 @@ impl WitnessSet {
#[must_use]
pub fn is_valid_for(&self, message: &Message) -> bool {
let message_bytes = message.to_bytes();
let message_hash = message.hash();
for (signature, public_key) in self.signatures_and_public_keys() {
if !signature.is_valid_for(&message_bytes, public_key) {
if !signature.is_valid_for(&message_hash, public_key) {
return false;
}
}
@ -75,7 +75,7 @@ mod tests {
assert_eq!(witness_set.signatures_and_public_keys.len(), 2);
let message_bytes = message.to_bytes();
let message_bytes = message.hash();
for ((signature, public_key), expected_public_key) in witness_set
.signatures_and_public_keys
.into_iter()

View File

@ -4,7 +4,7 @@ pub struct TestVector {
pub seckey: Option<PrivateKey>,
pub pubkey: PublicKey,
pub aux_rand: Option<[u8; 32]>,
pub message: Option<Vec<u8>>,
pub message: [u8; 32],
pub signature: Signature,
pub verification_result: bool,
}
@ -15,18 +15,21 @@ pub struct TestVector {
pub fn test_vectors() -> Vec<TestVector> {
vec![
TestVector {
seckey: Some(PrivateKey::try_new(hex_to_bytes(
"0000000000000000000000000000000000000000000000000000000000000003",
)).unwrap()),
seckey: Some(
PrivateKey::try_new(hex_to_bytes(
"0000000000000000000000000000000000000000000000000000000000000003",
))
.unwrap(),
),
pubkey: PublicKey::try_new(hex_to_bytes(
"F9308A019258C31049344F85F89D5229B531C845836F99B08601F113BCE036F9",
)).unwrap(),
))
.unwrap(),
aux_rand: Some(hex_to_bytes::<32>(
"0000000000000000000000000000000000000000000000000000000000000000",
)),
message: Some(
hex::decode("0000000000000000000000000000000000000000000000000000000000000000")
.unwrap(),
message: hex_to_bytes::<32>(
"0000000000000000000000000000000000000000000000000000000000000000",
),
signature: Signature {
value: hex_to_bytes(
@ -36,18 +39,21 @@ pub fn test_vectors() -> Vec<TestVector> {
verification_result: true,
},
TestVector {
seckey: Some(PrivateKey::try_new(hex_to_bytes(
"B7E151628AED2A6ABF7158809CF4F3C762E7160F38B4DA56A784D9045190CFEF",
)).unwrap()),
seckey: Some(
PrivateKey::try_new(hex_to_bytes(
"B7E151628AED2A6ABF7158809CF4F3C762E7160F38B4DA56A784D9045190CFEF",
))
.unwrap(),
),
pubkey: PublicKey::try_new(hex_to_bytes(
"DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659",
)).unwrap(),
))
.unwrap(),
aux_rand: Some(hex_to_bytes::<32>(
"0000000000000000000000000000000000000000000000000000000000000001",
)),
message: Some(
hex::decode("243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89")
.unwrap(),
message: hex_to_bytes::<32>(
"243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89",
),
signature: Signature {
value: hex_to_bytes(
@ -57,18 +63,21 @@ pub fn test_vectors() -> Vec<TestVector> {
verification_result: true,
},
TestVector {
seckey: Some(PrivateKey::try_new(hex_to_bytes(
"C90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B14E5C9",
)).unwrap()),
seckey: Some(
PrivateKey::try_new(hex_to_bytes(
"C90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B14E5C9",
))
.unwrap(),
),
pubkey: PublicKey::try_new(hex_to_bytes(
"DD308AFEC5777E13121FA72B9CC1B7CC0139715309B086C960E18FD969774EB8",
)).unwrap(),
))
.unwrap(),
aux_rand: Some(hex_to_bytes::<32>(
"C87AA53824B4D7AE2EB035A2B5BBBCCC080E76CDC6D1692C4B0B62D798E6D906",
)),
message: Some(
hex::decode("7E2D58D8B3BCDF1ABADEC7829054F90DDA9805AAB56C77333024B9D0A508B75C")
.unwrap(),
message: hex_to_bytes::<32>(
"7E2D58D8B3BCDF1ABADEC7829054F90DDA9805AAB56C77333024B9D0A508B75C",
),
signature: Signature {
value: hex_to_bytes(
@ -78,18 +87,21 @@ pub fn test_vectors() -> Vec<TestVector> {
verification_result: true,
},
TestVector {
seckey: Some(PrivateKey::try_new(hex_to_bytes(
"0B432B2677937381AEF05BB02A66ECD012773062CF3FA2549E44F58ED2401710",
)).unwrap()),
seckey: Some(
PrivateKey::try_new(hex_to_bytes(
"0B432B2677937381AEF05BB02A66ECD012773062CF3FA2549E44F58ED2401710",
))
.unwrap(),
),
pubkey: PublicKey::try_new(hex_to_bytes(
"25D1DFF95105F5253C4022F628A996AD3A0D95FBF21D468A1B33F8C160D8F517",
)).unwrap(),
))
.unwrap(),
aux_rand: Some(hex_to_bytes::<32>(
"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF",
)),
message: Some(
hex::decode("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF")
.unwrap(),
message: hex_to_bytes::<32>(
"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF",
),
signature: Signature {
value: hex_to_bytes(
@ -102,11 +114,11 @@ pub fn test_vectors() -> Vec<TestVector> {
seckey: None,
pubkey: PublicKey::try_new(hex_to_bytes(
"D69C3509BB99E412E68B0FE8544E72837DFA30746D8BE2AA65975F29D22DC7B9",
)).unwrap(),
))
.unwrap(),
aux_rand: None,
message: Some(
hex::decode("4DF3C3F68FCC83B27E9D42C90431A72499F17875C81A599B566C9889B9696703")
.unwrap(),
message: hex_to_bytes::<32>(
"4DF3C3F68FCC83B27E9D42C90431A72499F17875C81A599B566C9889B9696703",
),
signature: Signature {
value: hex_to_bytes(
@ -122,13 +134,15 @@ pub fn test_vectors() -> Vec<TestVector> {
// "EEFDEA4CDB677750A420FEE807EACF21EB9898AE79B9768766E4FAA04A2D4A34",
// )).unwrap(),
// aux_rand: None,
// message: Some(
// hex::decode("243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89").unwrap(),
// ),
// message:
//
// hex::decode("243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89").
// unwrap(), ),
// signature: Signature {
// value: hex_to_bytes(
// "6CFF5C3BA86C69EA4B7376F31A9BCB4F74C1976089B2D9963DA2E5543E17776969E89B4C5564D00349106B8497785DD7D1D713A8AE82B32FA79D5F7FC407D39B",
// ),
//
// "6CFF5C3BA86C69EA4B7376F31A9BCB4F74C1976089B2D9963DA2E5543E17776969E89B4C5564D00349106B8497785DD7D1D713A8AE82B32FA79D5F7FC407D39B"
// , ),
// },
// verification_result: false,
// },
@ -136,11 +150,11 @@ pub fn test_vectors() -> Vec<TestVector> {
seckey: None,
pubkey: PublicKey::try_new(hex_to_bytes(
"DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659",
)).unwrap(),
))
.unwrap(),
aux_rand: None,
message: Some(
hex::decode("243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89")
.unwrap(),
message: hex_to_bytes::<32>(
"243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89",
),
signature: Signature {
value: hex_to_bytes(
@ -153,11 +167,11 @@ pub fn test_vectors() -> Vec<TestVector> {
seckey: None,
pubkey: PublicKey::try_new(hex_to_bytes(
"DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659",
)).unwrap(),
))
.unwrap(),
aux_rand: None,
message: Some(
hex::decode("243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89")
.unwrap(),
message: hex_to_bytes::<32>(
"243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89",
),
signature: Signature {
value: hex_to_bytes(
@ -170,11 +184,11 @@ pub fn test_vectors() -> Vec<TestVector> {
seckey: None,
pubkey: PublicKey::try_new(hex_to_bytes(
"DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659",
)).unwrap(),
))
.unwrap(),
aux_rand: None,
message: Some(
hex::decode("243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89")
.unwrap(),
message: hex_to_bytes::<32>(
"243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89",
),
signature: Signature {
value: hex_to_bytes(
@ -187,11 +201,11 @@ pub fn test_vectors() -> Vec<TestVector> {
seckey: None,
pubkey: PublicKey::try_new(hex_to_bytes(
"DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659",
)).unwrap(),
))
.unwrap(),
aux_rand: None,
message: Some(
hex::decode("243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89")
.unwrap(),
message: hex_to_bytes::<32>(
"243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89",
),
signature: Signature {
value: hex_to_bytes(
@ -204,11 +218,11 @@ pub fn test_vectors() -> Vec<TestVector> {
seckey: None,
pubkey: PublicKey::try_new(hex_to_bytes(
"DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659",
)).unwrap(),
))
.unwrap(),
aux_rand: None,
message: Some(
hex::decode("243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89")
.unwrap(),
message: hex_to_bytes::<32>(
"243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89",
),
signature: Signature {
value: hex_to_bytes(
@ -221,11 +235,11 @@ pub fn test_vectors() -> Vec<TestVector> {
seckey: None,
pubkey: PublicKey::try_new(hex_to_bytes(
"DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659",
)).unwrap(),
))
.unwrap(),
aux_rand: None,
message: Some(
hex::decode("243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89")
.unwrap(),
message: hex_to_bytes::<32>(
"243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89",
),
signature: Signature {
value: hex_to_bytes(
@ -238,11 +252,11 @@ pub fn test_vectors() -> Vec<TestVector> {
seckey: None,
pubkey: PublicKey::try_new(hex_to_bytes(
"DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659",
)).unwrap(),
))
.unwrap(),
aux_rand: None,
message: Some(
hex::decode("243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89")
.unwrap(),
message: hex_to_bytes::<32>(
"243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89",
),
signature: Signature {
value: hex_to_bytes(
@ -255,11 +269,11 @@ pub fn test_vectors() -> Vec<TestVector> {
seckey: None,
pubkey: PublicKey::try_new(hex_to_bytes(
"DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659",
)).unwrap(),
))
.unwrap(),
aux_rand: None,
message: Some(
hex::decode("243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89")
.unwrap(),
message: hex_to_bytes::<32>(
"243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89",
),
signature: Signature {
value: hex_to_bytes(
@ -275,90 +289,96 @@ pub fn test_vectors() -> Vec<TestVector> {
// "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC30",
// )).unwrap(),
// aux_rand: None,
// message: Some(
// hex::decode("243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89").unwrap(),
// ),
// message:
//
// hex::decode("243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89").
// unwrap(), ),
// signature: Signature {
// value: hex_to_bytes(
// "6CFF5C3BA86C69EA4B7376F31A9BCB4F74C1976089B2D9963DA2E5543E17776969E89B4C5564D00349106B8497785DD7D1D713A8AE82B32FA79D5F7FC407D39B",
// ),
//
// "6CFF5C3BA86C69EA4B7376F31A9BCB4F74C1976089B2D9963DA2E5543E17776969E89B4C5564D00349106B8497785DD7D1D713A8AE82B32FA79D5F7FC407D39B"
// , ),
// },
// verification_result: false,
// },
TestVector {
seckey: Some(PrivateKey::try_new(hex_to_bytes(
"0340034003400340034003400340034003400340034003400340034003400340",
)).unwrap()),
pubkey: PublicKey::try_new(hex_to_bytes(
"778CAA53B4393AC467774D09497A87224BF9FAB6F6E68B23086497324D6FD117",
)).unwrap(),
aux_rand: Some(hex_to_bytes::<32>(
"0000000000000000000000000000000000000000000000000000000000000000",
)),
message: None,
signature: Signature {
value: hex_to_bytes(
"71535DB165ECD9FBBC046E5FFAEA61186BB6AD436732FCCC25291A55895464CF6069CE26BF03466228F19A3A62DB8A649F2D560FAC652827D1AF0574E427AB63",
),
},
verification_result: true,
},
TestVector {
seckey: Some(PrivateKey::try_new(hex_to_bytes(
"0340034003400340034003400340034003400340034003400340034003400340",
)).unwrap()),
pubkey: PublicKey::try_new(hex_to_bytes(
"778CAA53B4393AC467774D09497A87224BF9FAB6F6E68B23086497324D6FD117",
)).unwrap(),
aux_rand: Some(hex_to_bytes::<32>(
"0000000000000000000000000000000000000000000000000000000000000000",
)),
message: Some(hex::decode("11").unwrap()),
signature: Signature {
value: hex_to_bytes(
"08A20A0AFEF64124649232E0693C583AB1B9934AE63B4C3511F3AE1134C6A303EA3173BFEA6683BD101FA5AA5DBC1996FE7CACFC5A577D33EC14564CEC2BACBF",
),
},
verification_result: true,
},
TestVector {
seckey: Some(PrivateKey::try_new(hex_to_bytes(
"0340034003400340034003400340034003400340034003400340034003400340",
)).unwrap()),
pubkey: PublicKey::try_new(hex_to_bytes(
"778CAA53B4393AC467774D09497A87224BF9FAB6F6E68B23086497324D6FD117",
)).unwrap(),
aux_rand: Some(hex_to_bytes::<32>(
"0000000000000000000000000000000000000000000000000000000000000000",
)),
message: Some(hex::decode("0102030405060708090A0B0C0D0E0F1011").unwrap()),
signature: Signature {
value: hex_to_bytes(
"5130F39A4059B43BC7CAC09A19ECE52B5D8699D1A71E3C52DA9AFDB6B50AC370C4A482B77BF960F8681540E25B6771ECE1E5A37FD80E5A51897C5566A97EA5A5",
),
},
verification_result: true,
},
TestVector {
seckey: Some(PrivateKey::try_new(hex_to_bytes(
"0340034003400340034003400340034003400340034003400340034003400340",
)).unwrap()),
pubkey: PublicKey::try_new(hex_to_bytes(
"778CAA53B4393AC467774D09497A87224BF9FAB6F6E68B23086497324D6FD117",
)).unwrap(),
aux_rand: Some(hex_to_bytes::<32>(
"0000000000000000000000000000000000000000000000000000000000000000",
)),
message: Some(
hex::decode("99999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999").unwrap(),
),
signature: Signature {
value: hex_to_bytes(
"403B12B0D8555A344175EA7EC746566303321E5DBFA8BE6F091635163ECA79A8585ED3E3170807E7C03B720FC54C7B23897FCBA0E9D0B4A06894CFD249F22367",
),
},
verification_result: true,
},
// Test with invalid message length (0); valid test for BIP-340 post 2022.
// TestVector {
// seckey: PrivateKey::try_new(hex_to_bytes(
// "0340034003400340034003400340034003400340034003400340034003400340",
// )).unwrap()),
// pubkey: PublicKey::try_new(hex_to_bytes(
// "778CAA53B4393AC467774D09497A87224BF9FAB6F6E68B23086497324D6FD117",
// )).unwrap(),
// aux_rand: hex_to_bytes::<32>(
// "0000000000000000000000000000000000000000000000000000000000000000",
// )),
// message: None,
// signature: Signature {
// value: hex_to_bytes(
// "71535DB165ECD9FBBC046E5FFAEA61186BB6AD436732FCCC25291A55895464CF6069CE26BF03466228F19A3A62DB8A649F2D560FAC652827D1AF0574E427AB63",
// ),
// },
// verification_result: true,
// },
// Test with invalid message length (1); valid test for BIP-340 post 2022.
// TestVector {
// seckey: PrivateKey::try_new(hex_to_bytes(
// "0340034003400340034003400340034003400340034003400340034003400340",
// )).unwrap()),
// pubkey: PublicKey::try_new(hex_to_bytes(
// "778CAA53B4393AC467774D09497A87224BF9FAB6F6E68B23086497324D6FD117",
// )).unwrap(),
// aux_rand: hex_to_bytes::<32>(
// "0000000000000000000000000000000000000000000000000000000000000000",
// )),
// message: hex::decode("11").unwrap()),
// signature: Signature {
// value: hex_to_bytes(
// "08A20A0AFEF64124649232E0693C583AB1B9934AE63B4C3511F3AE1134C6A303EA3173BFEA6683BD101FA5AA5DBC1996FE7CACFC5A577D33EC14564CEC2BACBF",
// ),
// },
// verification_result: true,
// },
// Test with invalid message length (17); valid test for BIP-340 post 2022.
// TestVector {
// seckey: PrivateKey::try_new(hex_to_bytes(
// "0340034003400340034003400340034003400340034003400340034003400340",
// )).unwrap()),
// pubkey: PublicKey::try_new(hex_to_bytes(
// "778CAA53B4393AC467774D09497A87224BF9FAB6F6E68B23086497324D6FD117",
// )).unwrap(),
// aux_rand: hex_to_bytes::<32>(
// "0000000000000000000000000000000000000000000000000000000000000000",
// )),
// message: hex::decode("0102030405060708090A0B0C0D0E0F1011").unwrap()),
// signature: Signature {
// value: hex_to_bytes(
// "5130F39A4059B43BC7CAC09A19ECE52B5D8699D1A71E3C52DA9AFDB6B50AC370C4A482B77BF960F8681540E25B6771ECE1E5A37FD80E5A51897C5566A97EA5A5",
// ),
// },
// erification_result: true,
// },
// Test with invalid message length (100); valid test for BIP-340 post 2022.
// TestVector {
// seckey: PrivateKey::try_new(hex_to_bytes(
// "0340034003400340034003400340034003400340034003400340034003400340",
// )).unwrap()),
// pubkey: PublicKey::try_new(hex_to_bytes(
// "778CAA53B4393AC467774D09497A87224BF9FAB6F6E68B23086497324D6FD117",
// )).unwrap(),
// aux_rand: hex_to_bytes::<32>(
// "0000000000000000000000000000000000000000000000000000000000000000",
// )),
// message:
// hex::decode("99999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999").unwrap(),
// ),
// signature: Signature {
// value: hex_to_bytes(
// "403B12B0D8555A344175EA7EC746566303321E5DBFA8BE6F091635163ECA79A8585ED3E3170807E7C03B720FC54C7B23897FCBA0E9D0B4A06894CFD249F22367",
// ),
// },
// verification_result: true,
// },
]
}

View File

@ -36,8 +36,10 @@ impl FromStr for Signature {
}
impl Signature {
/// This function expects the incoming message to be prehashed to be pre-2022 BIP-340/Keycard
/// compatible.
#[must_use]
pub fn new(key: &PrivateKey, message: &[u8]) -> Self {
pub fn new(key: &PrivateKey, message: &[u8; 32]) -> Self {
let mut aux_random = [0_u8; 32];
OsRng.fill_bytes(&mut aux_random);
Self::new_with_aux_random(key, message, aux_random)
@ -45,14 +47,14 @@ impl Signature {
pub(crate) fn new_with_aux_random(
key: &PrivateKey,
message: &[u8],
message: &[u8; 32],
aux_random: [u8; 32],
) -> Self {
let value = {
let signing_key = k256::schnorr::SigningKey::from_bytes(key.value())
.expect("Expect valid signing key");
signing_key
.sign_raw(message, &aux_random)
.sign_prehash_with_aux_rand(message, &aux_random)
.expect("Expect to produce a valid signature")
.to_bytes()
};
@ -61,7 +63,7 @@ impl Signature {
}
#[must_use]
pub fn is_valid_for(&self, bytes: &[u8], public_key: &PublicKey) -> bool {
pub fn is_valid_for(&self, bytes: &[u8; 32], public_key: &PublicKey) -> bool {
let Ok(pk) = k256::schnorr::VerifyingKey::from_bytes(public_key.value()) else {
return false;
};
@ -97,9 +99,8 @@ mod tests {
let Some(aux_random) = test_vector.aux_rand else {
continue;
};
let Some(message) = test_vector.message else {
continue;
};
let message = test_vector.message;
if !test_vector.verification_result {
continue;
}
@ -114,7 +115,7 @@ mod tests {
#[test]
fn signature_verification_from_bip340_test_vectors() {
for (i, test_vector) in bip340_test_vectors::test_vectors().into_iter().enumerate() {
let message = test_vector.message.unwrap_or(vec![]);
let message = test_vector.message;
let expected_result = test_vector.verification_result;
let result = test_vector

File diff suppressed because it is too large Load Diff

View File

@ -4,9 +4,9 @@ use std::{
};
use nssa_core::{
Commitment, CommitmentSetDigest, DUMMY_COMMITMENT_HASH, EncryptionScheme, MembershipProof,
Nullifier, NullifierPublicKey, NullifierSecretKey, PrivacyPreservingCircuitInput,
PrivacyPreservingCircuitOutput, SharedSecretKey,
Commitment, CommitmentSetDigest, DUMMY_COMMITMENT_HASH, EncryptionScheme, Identifier,
InputAccountIdentity, MembershipProof, Nullifier, NullifierPublicKey, NullifierSecretKey,
PrivacyPreservingCircuitInput, PrivacyPreservingCircuitOutput, SharedSecretKey,
account::{Account, AccountId, AccountWithMetadata, Nonce},
compute_digest_for_path,
program::{
@ -17,22 +17,24 @@ use nssa_core::{
};
use risc0_zkvm::{guest::env, serde::to_vec};
const PRIVATE_PDA_FIXED_IDENTIFIER: Identifier = u128::MAX;
/// State of the involved accounts before and after program execution.
struct ExecutionState {
pre_states: Vec<AccountWithMetadata>,
post_states: HashMap<AccountId, Account>,
block_validity_window: BlockValidityWindow,
timestamp_validity_window: TimestampValidityWindow,
/// Positions (in `pre_states`) of mask-3 accounts whose supplied npk has been bound to
/// their `AccountId` via a proven `AccountId::for_private_pda(program_id, seed, npk)`
/// Positions (in `pre_states`) of private-PDA accounts whose supplied npk has been bound
/// to their `AccountId` via a proven `AccountId::for_private_pda(program_id, seed, npk)`
/// check.
/// Two proof paths populate this set: a `Claim::Pda(seed)` in a program's `post_state` on
/// that `pre_state`, or a caller's `ChainedCall.pda_seeds` entry matching that `pre_state`
/// under the private derivation. Binding is an idempotent property, not an event: the same
/// position can legitimately be bound through both paths in the same tx (e.g. a program
/// claims a private PDA and then delegates it to a callee), and the set uses `contains`,
/// not `assert!(insert)`. After the main loop, every mask-3 position must appear in this
/// set; otherwise the npk is unbound and the circuit rejects.
/// not `assert!(insert)`. After the main loop, every private-PDA position must appear in
/// this set; otherwise the npk is unbound and the circuit rejects.
private_pda_bound_positions: HashSet<usize>,
/// Across the whole transaction, each `(program_id, seed)` pair may resolve to at most one
/// `AccountId`. A seed under a program can derive a family of accounts, one public PDA and
@ -43,39 +45,29 @@ struct ExecutionState {
/// `AccountId` entry or as an equality check against the existing one, making the rule: one
/// `(program, seed)` → one account per tx.
pda_family_binding: HashMap<(ProgramId, PdaSeed), AccountId>,
/// Map from a mask-3 `pre_state`'s position in `visibility_mask` to the npk supplied for
/// that position in `private_account_keys`. Built once in `derive_from_outputs` by walking
/// `visibility_mask` in lock-step with `private_account_keys`, used later by the claim and
/// caller-seeds authorization paths.
/// Map from a private-PDA `pre_state`'s position in `account_identities` to the npk that
/// variant supplies for that position. Populated once in `derive_from_outputs` by walking
/// `account_identities` and consulting `npk_if_private_pda`. Used later by the claim and
/// caller-seeds authorization paths to verify
/// `AccountId::for_private_pda(program_id, seed, npk) == pre_state.account_id`.
private_pda_npk_by_position: HashMap<usize, NullifierPublicKey>,
}
impl ExecutionState {
/// Validate program outputs and derive the overall execution state.
pub fn derive_from_outputs(
visibility_mask: &[u8],
private_account_keys: &[(NullifierPublicKey, SharedSecretKey)],
account_identities: &[InputAccountIdentity],
program_id: ProgramId,
program_outputs: Vec<ProgramOutput>,
) -> Self {
// Build position → npk map for mask-3 pre_states. `private_account_keys` is consumed in
// pre_state order across all masks 1/2/3, so walk `visibility_mask` in lock-step. The
// downstream `compute_circuit_output` also consumes the same iterator and its trailing
// assertions catch an over-supply of keys; under-supply surfaces here.
// Build position → npk map for private-PDA pre_states, indexed by position in
// `account_identities`. The vec is documented as 1:1 with the program's pre_state order,
// so position here matches `pre_state_position` used downstream in
// `validate_and_sync_states`.
let mut private_pda_npk_by_position: HashMap<usize, NullifierPublicKey> = HashMap::new();
{
let mut keys_iter = private_account_keys.iter();
for (pos, &mask) in visibility_mask.iter().enumerate() {
if matches!(mask, 1..=3) {
let (npk, _) = keys_iter.next().unwrap_or_else(|| {
panic!(
"private_account_keys shorter than visibility_mask demands: no key for masked position {pos} (mask {mask})"
)
});
if mask == 3 {
private_pda_npk_by_position.insert(pos, *npk);
}
}
for (pos, account_identity) in account_identities.iter().enumerate() {
if let Some(npk) = account_identity.npk_if_private_pda() {
private_pda_npk_by_position.insert(pos, npk);
}
}
@ -192,7 +184,7 @@ impl ExecutionState {
}
execution_state.validate_and_sync_states(
visibility_mask,
account_identities,
chained_call.program_id,
caller_program_id,
&chained_call.pda_seeds,
@ -209,12 +201,12 @@ impl ExecutionState {
"Inner call without a chained call found",
);
// Every mask-3 pre_state must have had its npk bound to its account_id, either via a
// `Claim::Pda(seed)` in some program's post_state or via a caller's `pda_seeds` matching
// the private derivation. An unbound mask-3 pre_state has no cryptographic link between
// the supplied npk and the account_id, and must be rejected.
for (pos, &mask) in visibility_mask.iter().enumerate() {
if mask == 3 {
// Every private-PDA pre_state must have had its npk bound to its account_id, either via
// a `Claim::Pda(seed)` in some program's post_state or via a caller's `pda_seeds`
// matching the private derivation. An unbound private-PDA pre_state has no
// cryptographic link between the supplied npk and the account_id, and must be rejected.
for (pos, account_identity) in account_identities.iter().enumerate() {
if account_identity.is_private_pda() {
assert!(
execution_state.private_pda_bound_positions.contains(&pos),
"private PDA pre_state at position {pos} has no proven (seed, npk) binding via Claim::Pda or caller pda_seeds"
@ -249,7 +241,7 @@ impl ExecutionState {
/// Validate program pre and post states and populate the execution state.
fn validate_and_sync_states(
&mut self,
visibility_mask: &[u8],
account_identities: &[InputAccountIdentity],
program_id: ProgramId,
caller_program_id: Option<ProgramId>,
caller_pda_seeds: &[PdaSeed],
@ -327,9 +319,9 @@ impl ExecutionState {
.position(|acc| acc.account_id == pre_account_id)
.expect("Pre state must exist at this point");
let mask = visibility_mask[pre_state_position];
match mask {
0 => match claim {
let account_identity = &account_identities[pre_state_position];
if account_identity.is_public() {
match claim {
Claim::Authorized => {
// Note: no need to check authorized pdas because we have already
// checked consistency of authorization above.
@ -351,40 +343,40 @@ impl ExecutionState {
pre_account_id,
);
}
},
3 => {
match claim {
Claim::Authorized => {
assert!(
pre_is_authorized,
"Cannot claim unauthorized private PDA {pre_account_id}"
);
}
Claim::Pda(seed) => {
let npk = self
}
} else if account_identity.is_private_pda() {
match claim {
Claim::Authorized => {
assert!(
pre_is_authorized,
"Cannot claim unauthorized private PDA {pre_account_id}"
);
}
Claim::Pda(seed) => {
let npk = self
.private_pda_npk_by_position
.get(&pre_state_position)
.expect("private PDA pre_state must have an npk in the position map");
let pda = AccountId::for_private_pda(&program_id, &seed, npk);
assert_eq!(
pre_account_id, pda,
"Invalid private PDA claim for account {pre_account_id}"
.expect(
"private PDA pre_state must have an npk in the position map",
);
self.private_pda_bound_positions.insert(pre_state_position);
assert_family_binding(
&mut self.pda_family_binding,
program_id,
seed,
pre_account_id,
);
}
let pda = AccountId::for_private_pda(&program_id, &seed, npk);
assert_eq!(
pre_account_id, pda,
"Invalid private PDA claim for account {pre_account_id}"
);
self.private_pda_bound_positions.insert(pre_state_position);
assert_family_binding(
&mut self.pda_family_binding,
program_id,
seed,
pre_account_id,
);
}
}
_ => {
// Mask 1/2: standard private accounts don't enforce the claim semantics.
// Unauthorized private claiming is intentionally allowed since operating
// these accounts requires the npk/nsk keypair anyway.
}
} else {
// Standalone private accounts: don't enforce the claim semantics.
// Unauthorized private claiming is intentionally allowed since operating
// these accounts requires the npk/nsk keypair anyway.
}
post.account_mut().program_owner = program_id;
@ -486,10 +478,7 @@ fn resolve_authorization_and_record_bindings(
fn compute_circuit_output(
execution_state: ExecutionState,
visibility_mask: &[u8],
private_account_keys: &[(NullifierPublicKey, SharedSecretKey)],
private_account_nsks: &[NullifierSecretKey],
private_account_membership_proofs: &[Option<MembershipProof>],
account_identities: &[InputAccountIdentity],
) -> PrivacyPreservingCircuitOutput {
let mut output = PrivacyPreservingCircuitOutput {
public_pre_states: Vec::new(),
@ -503,280 +492,268 @@ fn compute_circuit_output(
let states_iter = execution_state.into_states_iter();
assert_eq!(
visibility_mask.len(),
account_identities.len(),
states_iter.len(),
"Invalid visibility mask length"
"Invalid account_identities length"
);
let mut private_keys_iter = private_account_keys.iter();
let mut private_nsks_iter = private_account_nsks.iter();
let mut private_membership_proofs_iter = private_account_membership_proofs.iter();
let mut output_index = 0;
for (account_visibility_mask, (pre_state, post_state)) in
visibility_mask.iter().copied().zip(states_iter)
{
match account_visibility_mask {
0 => {
// Public account
for (account_identity, (pre_state, post_state)) in account_identities.iter().zip(states_iter) {
match account_identity {
InputAccountIdentity::Public => {
output.public_pre_states.push(pre_state);
output.public_post_states.push(post_state);
}
1 | 2 => {
let Some((npk, shared_secret)) = private_keys_iter.next() else {
panic!("Missing private account key");
};
InputAccountIdentity::PrivateAuthorizedInit {
ssk,
nsk,
identifier,
} => {
assert_ne!(
*identifier, PRIVATE_PDA_FIXED_IDENTIFIER,
"Identifier must be different from {PRIVATE_PDA_FIXED_IDENTIFIER}. This is reserved for private PDA."
);
let npk = NullifierPublicKey::from(nsk);
let account_id = AccountId::from((&npk, *identifier));
assert_eq!(account_id, pre_state.account_id, "AccountId mismatch");
assert!(
pre_state.is_authorized,
"Pre-state not authorized for authenticated private account"
);
assert_eq!(
AccountId::from(npk),
pre_state.account_id,
"AccountId mismatch"
pre_state.account,
Account::default(),
"Found new private account with non default values"
);
let (new_nullifier, new_nonce) = if account_visibility_mask == 1 {
// Private account with authentication
let Some(nsk) = private_nsks_iter.next() else {
panic!("Missing private account nullifier secret key");
};
// Verify the nullifier public key
assert_eq!(
npk,
&NullifierPublicKey::from(nsk),
"Nullifier public key mismatch"
);
// Check pre_state authorization
assert!(
pre_state.is_authorized,
"Pre-state not authorized for authenticated private account"
);
let Some(membership_proof_opt) = private_membership_proofs_iter.next() else {
panic!("Missing membership proof");
};
let new_nullifier = compute_nullifier_and_set_digest(
membership_proof_opt.as_ref(),
&pre_state.account,
npk,
nsk,
);
let new_nonce = pre_state.account.nonce.private_account_nonce_increment(nsk);
(new_nullifier, new_nonce)
} else {
// Private account without authentication
assert_eq!(
pre_state.account,
Account::default(),
"Found new private account with non default values",
);
assert!(
!pre_state.is_authorized,
"Found new private account marked as authorized."
);
let Some(membership_proof_opt) = private_membership_proofs_iter.next() else {
panic!("Missing membership proof");
};
assert!(
membership_proof_opt.is_none(),
"Membership proof must be None for unauthorized accounts"
);
let nullifier = Nullifier::for_account_initialization(npk);
let new_nonce = Nonce::private_account_nonce_init(npk);
((nullifier, DUMMY_COMMITMENT_HASH), new_nonce)
};
output.new_nullifiers.push(new_nullifier);
// Update post-state with new nonce
let mut post_with_updated_nonce = post_state;
post_with_updated_nonce.nonce = new_nonce;
// Compute commitment
let commitment_post = Commitment::new(npk, &post_with_updated_nonce);
// Encrypt and push post state
let encrypted_account = EncryptionScheme::encrypt(
&post_with_updated_nonce,
shared_secret,
&commitment_post,
output_index,
let new_nullifier = (
Nullifier::for_account_initialization(&account_id),
DUMMY_COMMITMENT_HASH,
);
let new_nonce = pre_state.account.nonce.private_account_nonce_increment(nsk);
output.new_commitments.push(commitment_post);
output.ciphertexts.push(encrypted_account);
output_index = output_index
.checked_add(1)
.unwrap_or_else(|| panic!("Too many private accounts, output index overflow"));
emit_private_output(
&mut output,
&mut output_index,
post_state,
&account_id,
*identifier,
ssk,
new_nullifier,
new_nonce,
);
}
3 => {
// Private PDA account. The supplied npk has already been bound to
// `pre_state.account_id` upstream in `validate_and_sync_states`, either via a
// `Claim::Pda(seed)` match or via a caller `pda_seeds` match, both of which
// assert `AccountId::for_private_pda(owner, seed, npk) == account_id`. The
// post-loop assertion in `derive_from_outputs` (see the
// `private_pda_bound_positions` check) guarantees that every mask-3
// position has been through at least one such binding, so this
// branch can safely use the wallet npk without re-verifying.
let Some((npk, shared_secret)) = private_keys_iter.next() else {
panic!("Missing private account key");
};
InputAccountIdentity::PrivateAuthorizedUpdate {
ssk,
nsk,
membership_proof,
identifier,
} => {
assert_ne!(
*identifier, PRIVATE_PDA_FIXED_IDENTIFIER,
"Identifier must be different from {PRIVATE_PDA_FIXED_IDENTIFIER}. This is reserved for private PDA."
);
let npk = NullifierPublicKey::from(nsk);
let account_id = AccountId::from((&npk, *identifier));
let (new_nullifier, new_nonce) = if pre_state.is_authorized {
// Existing private PDA with authentication (like mask 1)
let Some(nsk) = private_nsks_iter.next() else {
panic!("Missing private account nullifier secret key");
};
assert_eq!(
npk,
&NullifierPublicKey::from(nsk),
"Nullifier public key mismatch"
);
let Some(membership_proof_opt) = private_membership_proofs_iter.next() else {
panic!("Missing membership proof");
};
let new_nullifier = compute_nullifier_and_set_digest(
membership_proof_opt.as_ref(),
&pre_state.account,
npk,
nsk,
);
let new_nonce = pre_state.account.nonce.private_account_nonce_increment(nsk);
(new_nullifier, new_nonce)
} else {
// New private PDA (like mask 2). The default + unauthorized requirement
// here rules out use cases like a fully-private multisig, which would need
// a non-default, non-authorized private PDA input account.
// TODO(private-pdas-pr-2/3): relax this once the wallet can supply a
// `(seed, owner)` side input so the npk-to-account_id binding can be
// re-verified for an existing private PDA without a `Claim::Pda` or caller
// `pda_seeds` match.
assert_eq!(
pre_state.account,
Account::default(),
"New private PDA must be default"
);
let Some(membership_proof_opt) = private_membership_proofs_iter.next() else {
panic!("Missing membership proof");
};
assert!(
membership_proof_opt.is_none(),
"Membership proof must be None for new accounts"
);
let nullifier = Nullifier::for_account_initialization(npk);
let new_nonce = Nonce::private_account_nonce_init(npk);
((nullifier, DUMMY_COMMITMENT_HASH), new_nonce)
};
output.new_nullifiers.push(new_nullifier);
let mut post_with_updated_nonce = post_state;
post_with_updated_nonce.nonce = new_nonce;
let commitment_post = Commitment::new(npk, &post_with_updated_nonce);
let encrypted_account = EncryptionScheme::encrypt(
&post_with_updated_nonce,
shared_secret,
&commitment_post,
output_index,
assert_eq!(account_id, pre_state.account_id, "AccountId mismatch");
assert!(
pre_state.is_authorized,
"Pre-state not authorized for authenticated private account"
);
output.new_commitments.push(commitment_post);
output.ciphertexts.push(encrypted_account);
output_index = output_index
.checked_add(1)
.unwrap_or_else(|| panic!("Too many private accounts, output index overflow"));
let new_nullifier = compute_update_nullifier_and_set_digest(
membership_proof,
&pre_state.account,
&account_id,
nsk,
);
let new_nonce = pre_state.account.nonce.private_account_nonce_increment(nsk);
emit_private_output(
&mut output,
&mut output_index,
post_state,
&account_id,
*identifier,
ssk,
new_nullifier,
new_nonce,
);
}
InputAccountIdentity::PrivateUnauthorized {
npk,
ssk,
identifier,
} => {
assert_ne!(
*identifier, PRIVATE_PDA_FIXED_IDENTIFIER,
"Identifier must be different from {PRIVATE_PDA_FIXED_IDENTIFIER}. This is reserved for private PDA."
);
let account_id = AccountId::from((npk, *identifier));
assert_eq!(account_id, pre_state.account_id, "AccountId mismatch");
assert_eq!(
pre_state.account,
Account::default(),
"Found new private account with non default values",
);
assert!(
!pre_state.is_authorized,
"Found new private account marked as authorized."
);
let new_nullifier = (
Nullifier::for_account_initialization(&account_id),
DUMMY_COMMITMENT_HASH,
);
let new_nonce = Nonce::private_account_nonce_init(&account_id);
emit_private_output(
&mut output,
&mut output_index,
post_state,
&account_id,
*identifier,
ssk,
new_nullifier,
new_nonce,
);
}
InputAccountIdentity::PrivatePdaInit { npk: _, ssk } => {
// The npk-to-account_id binding is established upstream in
// `validate_and_sync_states` via `Claim::Pda(seed)` or a caller `pda_seeds`
// match. Here we only enforce the init pre-conditions. The supplied npk on
// the variant has been recorded into `private_pda_npk_by_position` and used
// for the binding check; we use `pre_state.account_id` directly for nullifier
// and commitment derivation.
assert!(
!pre_state.is_authorized,
"PrivatePdaInit requires unauthorized pre_state"
);
assert_eq!(
pre_state.account,
Account::default(),
"New private PDA must be default"
);
let new_nullifier = (
Nullifier::for_account_initialization(&pre_state.account_id),
DUMMY_COMMITMENT_HASH,
);
let new_nonce = Nonce::private_account_nonce_init(&pre_state.account_id);
let account_id = pre_state.account_id;
emit_private_output(
&mut output,
&mut output_index,
post_state,
&account_id,
PRIVATE_PDA_FIXED_IDENTIFIER,
ssk,
new_nullifier,
new_nonce,
);
}
InputAccountIdentity::PrivatePdaUpdate {
ssk,
nsk,
membership_proof,
} => {
// The npk binding is established upstream. Authorization must already be set;
// an unauthorized PrivatePdaUpdate would mean the prover supplied an nsk for an
// unbound PDA, which the upstream binding check would have rejected anyway,
// but we assert here to fail fast and document the precondition.
assert!(
pre_state.is_authorized,
"PrivatePdaUpdate requires authorized pre_state"
);
let new_nullifier = compute_update_nullifier_and_set_digest(
membership_proof,
&pre_state.account,
&pre_state.account_id,
nsk,
);
let new_nonce = pre_state.account.nonce.private_account_nonce_increment(nsk);
let account_id = pre_state.account_id;
emit_private_output(
&mut output,
&mut output_index,
post_state,
&account_id,
PRIVATE_PDA_FIXED_IDENTIFIER,
ssk,
new_nullifier,
new_nonce,
);
}
_ => panic!("Invalid visibility mask value"),
}
}
assert!(
private_keys_iter.next().is_none(),
"Too many private account keys"
);
assert!(
private_nsks_iter.next().is_none(),
"Too many private account nullifier secret keys"
);
assert!(
private_membership_proofs_iter.next().is_none(),
"Too many private account membership proofs"
);
output
}
fn compute_nullifier_and_set_digest(
membership_proof_opt: Option<&MembershipProof>,
#[expect(
clippy::too_many_arguments,
reason = "All seven inputs are distinct concerns from the variant arms; bundling would be artificial"
)]
fn emit_private_output(
output: &mut PrivacyPreservingCircuitOutput,
output_index: &mut u32,
post_state: Account,
account_id: &AccountId,
identifier: Identifier,
shared_secret: &SharedSecretKey,
new_nullifier: (Nullifier, CommitmentSetDigest),
new_nonce: Nonce,
) {
output.new_nullifiers.push(new_nullifier);
let mut post_with_updated_nonce = post_state;
post_with_updated_nonce.nonce = new_nonce;
let commitment_post = Commitment::new(account_id, &post_with_updated_nonce);
let encrypted_account = EncryptionScheme::encrypt(
&post_with_updated_nonce,
identifier,
shared_secret,
&commitment_post,
*output_index,
);
output.new_commitments.push(commitment_post);
output.ciphertexts.push(encrypted_account);
*output_index = output_index
.checked_add(1)
.unwrap_or_else(|| panic!("Too many private accounts, output index overflow"));
}
fn compute_update_nullifier_and_set_digest(
membership_proof: &MembershipProof,
pre_account: &Account,
npk: &NullifierPublicKey,
account_id: &AccountId,
nsk: &NullifierSecretKey,
) -> (Nullifier, CommitmentSetDigest) {
membership_proof_opt.as_ref().map_or_else(
|| {
assert_eq!(
*pre_account,
Account::default(),
"Found new private account with non default values"
);
// Compute initialization nullifier
let nullifier = Nullifier::for_account_initialization(npk);
(nullifier, DUMMY_COMMITMENT_HASH)
},
|membership_proof| {
// Compute commitment set digest associated with provided auth path
let commitment_pre = Commitment::new(npk, pre_account);
let set_digest = compute_digest_for_path(&commitment_pre, membership_proof);
// Compute update nullifier
let nullifier = Nullifier::for_account_update(&commitment_pre, nsk);
(nullifier, set_digest)
},
)
let commitment_pre = Commitment::new(account_id, pre_account);
let set_digest = compute_digest_for_path(&commitment_pre, membership_proof);
let nullifier = Nullifier::for_account_update(&commitment_pre, nsk);
(nullifier, set_digest)
}
fn main() {
let PrivacyPreservingCircuitInput {
program_outputs,
visibility_mask,
private_account_keys,
private_account_nsks,
private_account_membership_proofs,
account_identities,
program_id,
} = env::read();
let execution_state = ExecutionState::derive_from_outputs(
&visibility_mask,
&private_account_keys,
program_id,
program_outputs,
);
let execution_state =
ExecutionState::derive_from_outputs(&account_identities, program_id, program_outputs);
let output = compute_circuit_output(
execution_state,
&visibility_mask,
&private_account_keys,
&private_account_nsks,
&private_account_membership_proofs,
);
let output = compute_circuit_output(execution_state, &account_identities);
env::commit(&output);
}

View File

@ -13,7 +13,7 @@ nssa_core.workspace = true
common.workspace = true
storage.workspace = true
mempool.workspace = true
bedrock_client.workspace = true
logos-blockchain-zone-sdk.workspace = true
testnet_initial_state.workspace = true
anyhow.workspace = true
@ -30,7 +30,6 @@ rand.workspace = true
borsh.workspace = true
bytesize.workspace = true
url.workspace = true
jsonrpsee = { workspace = true, features = ["ws-client"] }
[features]
default = []

View File

@ -0,0 +1,136 @@
use std::{sync::Arc, time::Duration};
use anyhow::{Context as _, Result, anyhow};
use common::block::Block;
use log::warn;
pub use logos_blockchain_core::mantle::ops::channel::MsgId;
pub use logos_blockchain_key_management_system_service::keys::Ed25519Key;
pub use logos_blockchain_zone_sdk::sequencer::SequencerCheckpoint;
use logos_blockchain_zone_sdk::{
CommonHttpClient,
adapter::NodeHttpClient,
sequencer::{Event, SequencerConfig as ZoneSdkSequencerConfig, SequencerHandle, ZoneSequencer},
state::InscriptionInfo,
};
use tokio::task::JoinHandle;
use crate::config::BedrockConfig;
/// Sink for `Event::Published` checkpoints emitted by the drive task.
/// Caller is responsible for persistence (e.g. writing to rocksdb).
pub type CheckpointSink = Box<dyn Fn(SequencerCheckpoint) + Send + Sync + 'static>;
/// Sink for finalized L2 block ids derived from `Event::TxsFinalized` and
/// `Event::FinalizedInscriptions`. Caller is responsible for cleanup
/// (e.g. marking pending blocks as finalized in storage).
pub type FinalizedBlockSink = Box<dyn Fn(u64) + Send + Sync + 'static>;
#[expect(async_fn_in_trait, reason = "We don't care about Send/Sync here")]
pub trait BlockPublisherTrait: Clone {
async fn new(
config: &BedrockConfig,
bedrock_signing_key: Ed25519Key,
resubmit_interval: Duration,
initial_checkpoint: Option<SequencerCheckpoint>,
on_checkpoint: CheckpointSink,
on_finalized_block: FinalizedBlockSink,
) -> Result<Self>;
/// Fire-and-forget publish. Zone-sdk drives the actual submission and
/// retries internally; this just hands the payload off.
async fn publish_block(&self, block: &Block) -> Result<()>;
}
/// Real block publisher backed by zone-sdk's `ZoneSequencer`.
#[derive(Clone)]
pub struct ZoneSdkPublisher {
handle: SequencerHandle<NodeHttpClient>,
// Aborts the drive task when the last clone is dropped.
_drive_task: Arc<DriveTaskGuard>,
}
struct DriveTaskGuard(JoinHandle<()>);
impl Drop for DriveTaskGuard {
fn drop(&mut self) {
self.0.abort();
}
}
impl BlockPublisherTrait for ZoneSdkPublisher {
async fn new(
config: &BedrockConfig,
bedrock_signing_key: Ed25519Key,
resubmit_interval: Duration,
initial_checkpoint: Option<SequencerCheckpoint>,
on_checkpoint: CheckpointSink,
on_finalized_block: FinalizedBlockSink,
) -> Result<Self> {
let basic_auth = config.auth.clone().map(Into::into);
let node = NodeHttpClient::new(CommonHttpClient::new(basic_auth), config.node_url.clone());
let zone_sdk_config = ZoneSdkSequencerConfig {
resubmit_interval,
..ZoneSdkSequencerConfig::default()
};
let (mut sequencer, mut handle) = ZoneSequencer::init_with_config(
config.channel_id,
bedrock_signing_key,
node,
zone_sdk_config,
initial_checkpoint,
);
let drive_task = tokio::spawn(async move {
loop {
let Some(event) = sequencer.next_event().await else {
continue;
};
match event {
Event::Published { checkpoint, .. } => on_checkpoint(checkpoint),
Event::TxsFinalized { inscriptions, .. }
| Event::FinalizedInscriptions { inscriptions } => {
if let Some(max_block_id) = max_block_id_from_inscriptions(&inscriptions) {
on_finalized_block(max_block_id);
}
}
Event::ChannelUpdate { .. } | Event::Ready => {}
}
}
});
handle.wait_ready().await;
Ok(Self {
handle,
_drive_task: Arc::new(DriveTaskGuard(drive_task)),
})
}
async fn publish_block(&self, block: &Block) -> Result<()> {
let data = borsh::to_vec(block).context("Failed to serialize block")?;
self.handle
.publish_message(data)
.await
.map_err(|e| anyhow!("zone-sdk publish failed: {e}"))?;
Ok(())
}
}
/// Deserialize each inscription payload as a `Block` and return the highest
/// `block_id`. Bad payloads are logged and skipped.
fn max_block_id_from_inscriptions(inscriptions: &[InscriptionInfo]) -> Option<u64> {
inscriptions
.iter()
.filter_map(
|inscription| match borsh::from_slice::<Block>(&inscription.payload) {
Ok(block) => Some(block.header.block_id),
Err(err) => {
warn!("Failed to deserialize finalized inscription as Block: {err:#}");
None
}
},
)
.max()
}

Some files were not shown because too many files have changed in this diff Show More