Merge branch 'main' into dsq/wallet-ffi

# Conflicts:
#	Cargo.lock
#	Cargo.toml
This commit is contained in:
danielSanchezQ 2026-01-30 10:39:25 +00:00
commit 45a4063aea
103 changed files with 5792 additions and 1083 deletions

View File

@ -99,7 +99,7 @@ jobs:
run: rustup install
- name: Install nextest
run: cargo install cargo-nextest
run: cargo install --locked cargo-nextest
- name: Run tests
env:

2354
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -1,25 +1,30 @@
[workspace]
resolver = "3"
members = [
"integration_tests",
"sequencer_runner",
"storage",
"key_protocol",
"sequencer_rpc",
"mempool",
"wallet",
"wallet-ffi",
"sequencer_core",
"common",
"nssa",
"nssa/core",
"program_methods",
"program_methods/guest",
"test_program_methods",
"test_program_methods/guest",
"examples/program_deployment",
"examples/program_deployment/methods",
"examples/program_deployment/methods/guest",
"integration_tests",
"storage",
"key_protocol",
"mempool",
"wallet",
"wallet-ffi",
"common",
"nssa",
"nssa/core",
"sequencer_core",
"sequencer_rpc",
"sequencer_runner",
"indexer_service",
"indexer_service/protocol",
"indexer_service/rpc",
"program_methods",
"program_methods/guest",
"test_program_methods",
"test_program_methods/guest",
"examples/program_deployment",
"examples/program_deployment/methods",
"examples/program_deployment/methods/guest",
"bedrock_client",
"indexer_core",
]
[workspace.dependencies]
@ -32,15 +37,22 @@ key_protocol = { path = "key_protocol" }
sequencer_core = { path = "sequencer_core" }
sequencer_rpc = { path = "sequencer_rpc" }
sequencer_runner = { path = "sequencer_runner" }
indexer_service = { path = "indexer_service" }
indexer_service_protocol = { path = "indexer_service/protocol" }
indexer_service_rpc = { path = "indexer_service/rpc" }
wallet = { path = "wallet" }
wallet-ffi = { path = "wallet-ffi" }
test_program_methods = { path = "test_program_methods" }
bedrock_client = { path = "bedrock_client" }
indexer_core = { path = "indexer_core" }
tokio = { version = "1.28.2", features = [
"net",
"rt-multi-thread",
"sync",
"fs",
"net",
"rt-multi-thread",
"sync",
"fs",
] }
tokio-util = "0.7.18"
risc0-zkvm = { version = "3.0.3", features = ['std'] }
risc0-build = "3.0.3"
anyhow = "1.0.98"
@ -51,6 +63,7 @@ serde = { version = "1.0.60", default-features = false, features = ["derive"] }
serde_json = "1.0.81"
actix = "0.13.0"
actix-cors = "0.6.1"
jsonrpsee = "0.26.0"
futures = "0.3"
actix-rt = "*"
lazy_static = "1.5.0"
@ -76,22 +89,30 @@ chrono = "0.4.41"
borsh = "1.5.7"
base58 = "0.2.0"
itertools = "0.14.0"
url = { version = "2.5.4", features = ["serde"] }
tokio-retry = "0.3.0"
schemars = "1.2.0"
logos-blockchain-common-http-client = { git = "https://github.com/logos-blockchain/logos-blockchain.git" }
logos-blockchain-key-management-system-service = { git = "https://github.com/logos-blockchain/logos-blockchain.git" }
logos-blockchain-core = { git = "https://github.com/logos-blockchain/logos-blockchain.git" }
logos-blockchain-chain-broadcast-service = { git = "https://github.com/logos-blockchain/logos-blockchain.git" }
rocksdb = { version = "0.24.0", default-features = false, features = [
"snappy",
"bindgen-runtime",
"snappy",
"bindgen-runtime",
] }
rand = { version = "0.8.5", features = ["std", "std_rng", "getrandom"] }
k256 = { version = "0.13.3", features = [
"ecdsa-core",
"arithmetic",
"expose-field",
"serde",
"pem",
"ecdsa-core",
"arithmetic",
"expose-field",
"serde",
"pem",
] }
elliptic-curve = { version = "0.13.8", features = ["arithmetic"] }
actix-web = { version = "=4.1.0", default-features = false, features = [
"macros",
"macros",
] }
clap = { version = "4.5.42", features = ["derive", "env"] }
reqwest = { version = "0.11.16", features = ["json"] }
reqwest = { version = "0.12", features = ["json", "rustls-tls", "stream"] }

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

15
bedrock_client/Cargo.toml Normal file
View File

@ -0,0 +1,15 @@
[package]
name = "bedrock_client"
version = "0.1.0"
edition = "2024"
[dependencies]
reqwest.workspace = true
anyhow.workspace = true
tokio-retry.workspace = true
futures.workspace = true
log.workspace = true
serde.workspace = true
logos-blockchain-common-http-client.workspace = true
logos-blockchain-core.workspace = true
logos-blockchain-chain-broadcast-service.workspace = true

67
bedrock_client/src/lib.rs Normal file
View File

@ -0,0 +1,67 @@
use anyhow::Result;
use futures::{Stream, TryFutureExt};
use log::warn;
pub use logos_blockchain_chain_broadcast_service::BlockInfo;
pub use logos_blockchain_common_http_client::{BasicAuthCredentials, CommonHttpClient, Error};
pub use logos_blockchain_core::{block::Block, header::HeaderId, mantle::SignedMantleTx};
use reqwest::{Client, Url};
use serde::{Deserialize, Serialize};
use tokio_retry::Retry;
/// Fibonacci backoff retry strategy configuration
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BackoffConfig {
pub start_delay_millis: u64,
pub max_retries: usize,
}
// Simple wrapper
// maybe extend in the future for our purposes
// `Clone` is cheap because `CommonHttpClient` is internally reference counted (`Arc`).
#[derive(Clone)]
pub struct BedrockClient {
http_client: CommonHttpClient,
node_url: Url,
}
impl BedrockClient {
pub fn new(auth: Option<BasicAuthCredentials>, node_url: Url) -> Result<Self> {
let client = Client::builder()
//Add more fields if needed
.timeout(std::time::Duration::from_secs(60))
.build()?;
let http_client = CommonHttpClient::new_with_client(client, auth);
Ok(Self {
http_client,
node_url,
})
}
pub async fn post_transaction(&self, tx: SignedMantleTx) -> Result<(), Error> {
self.http_client
.post_transaction(self.node_url.clone(), tx)
.await
}
pub async fn get_lib_stream(&self) -> Result<impl Stream<Item = BlockInfo>, Error> {
self.http_client.get_lib_stream(self.node_url.clone()).await
}
pub async fn get_block_by_id(
&self,
header_id: HeaderId,
backoff: &BackoffConfig,
) -> Result<Option<Block<SignedMantleTx>>, Error> {
let strategy =
tokio_retry::strategy::FibonacciBackoff::from_millis(backoff.start_delay_millis)
.take(backoff.max_retries);
Retry::spawn(strategy, || {
self.http_client
.get_block_by_id(self.node_url.clone(), header_id)
.inspect_err(|err| warn!("Block fetching failed with err: {err:#?}"))
})
.await
}
}

View File

@ -52,7 +52,7 @@ if [ -d ".git" ]; then
git reset --hard origin/main
else
echo "Cloning repository..."
git clone https://github.com/vacp2p/nescience-testnet.git .
git clone https://github.com/logos-blockchain/lssa.git .
git checkout main
fi

View File

@ -17,3 +17,5 @@ log.workspace = true
hex.workspace = true
borsh.workspace = true
base64.workspace = true
url.workspace = true
logos-blockchain-common-http-client.workspace = true

View File

@ -4,6 +4,7 @@ use sha2::{Digest, Sha256, digest::FixedOutput};
use crate::transaction::EncodedTransaction;
pub type HashType = [u8; 32];
pub type MantleMsgId = [u8; 32];
#[derive(Debug, Clone)]
/// Our own hasher.
@ -23,7 +24,7 @@ pub type BlockHash = [u8; 32];
pub type BlockId = u64;
pub type TimeStamp = u64;
#[derive(Debug, Clone)]
#[derive(Debug, Clone, BorshSerialize, BorshDeserialize)]
pub struct BlockHeader {
pub block_id: BlockId,
pub prev_block_hash: BlockHash,
@ -32,18 +33,27 @@ pub struct BlockHeader {
pub signature: nssa::Signature,
}
#[derive(Debug, Clone)]
#[derive(Debug, Clone, BorshSerialize, BorshDeserialize)]
pub struct BlockBody {
pub transactions: Vec<EncodedTransaction>,
}
#[derive(Debug, Clone)]
#[derive(Debug, Clone, BorshSerialize, BorshDeserialize)]
pub enum BedrockStatus {
Pending,
Safe,
Finalized,
}
#[derive(Debug, BorshSerialize, BorshDeserialize)]
pub struct Block {
pub header: BlockHeader,
pub body: BlockBody,
pub bedrock_status: BedrockStatus,
pub bedrock_parent_id: MantleMsgId,
}
#[derive(Debug, PartialEq, Eq, BorshSerialize, BorshDeserialize)]
#[derive(Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize)]
pub struct HashableBlockData {
pub block_id: BlockId,
pub prev_block_hash: BlockHash,
@ -52,7 +62,11 @@ pub struct HashableBlockData {
}
impl HashableBlockData {
pub fn into_block(self, signing_key: &nssa::PrivateKey) -> Block {
pub fn into_pending_block(
self,
signing_key: &nssa::PrivateKey,
bedrock_parent_id: MantleMsgId,
) -> Block {
let data_bytes = borsh::to_vec(&self).unwrap();
let signature = nssa::Signature::new(signing_key, &data_bytes);
let hash = OwnHasher::hash(&data_bytes);
@ -67,8 +81,14 @@ impl HashableBlockData {
body: BlockBody {
transactions: self.transactions,
},
bedrock_status: BedrockStatus::Pending,
bedrock_parent_id,
}
}
pub fn block_hash(&self) -> BlockHash {
OwnHasher::hash(&borsh::to_vec(&self).unwrap())
}
}
impl From<Block> for HashableBlockData {

View File

@ -0,0 +1,6 @@
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum Message {
L2BlockFinalized { l2_block_height: u64 },
}

View File

@ -0,0 +1 @@
pub mod indexer;

View File

@ -1,4 +1,5 @@
pub mod block;
pub mod communication;
pub mod error;
pub mod rpc_primitives;
pub mod sequencer_client;

View File

@ -73,6 +73,11 @@ pub struct GetProofForCommitmentRequest {
#[derive(Serialize, Deserialize, Debug)]
pub struct GetProgramIdsRequest {}
#[derive(Serialize, Deserialize, Debug)]
pub struct PostIndexerMessageRequest {
pub message: crate::communication::indexer::Message,
}
parse_request!(HelloRequest);
parse_request!(RegisterAccountRequest);
parse_request!(SendTxRequest);
@ -87,6 +92,7 @@ parse_request!(GetAccountsNoncesRequest);
parse_request!(GetProofForCommitmentRequest);
parse_request!(GetAccountRequest);
parse_request!(GetProgramIdsRequest);
parse_request!(PostIndexerMessageRequest);
#[derive(Serialize, Deserialize, Debug)]
pub struct HelloResponse {
@ -216,3 +222,8 @@ pub struct GetInitialTestnetAccountsResponse {
pub account_id: String,
pub balance: u64,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct PostIndexerMessageResponse {
pub status: String,
}

View File

@ -1,10 +1,12 @@
use std::{collections::HashMap, ops::RangeInclusive};
use std::{collections::HashMap, ops::RangeInclusive, str::FromStr};
use anyhow::Result;
use logos_blockchain_common_http_client::BasicAuthCredentials;
use nssa_core::program::ProgramId;
use reqwest::Client;
use serde::Deserialize;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use url::Url;
use super::rpc_primitives::requests::{
GetAccountBalanceRequest, GetAccountBalanceResponse, GetBlockDataRequest, GetBlockDataResponse,
@ -20,28 +22,75 @@ use crate::{
GetInitialTestnetAccountsResponse, GetLastBlockRequest, GetLastBlockResponse,
GetProgramIdsRequest, GetProgramIdsResponse, GetProofForCommitmentRequest,
GetProofForCommitmentResponse, GetTransactionByHashRequest,
GetTransactionByHashResponse, SendTxRequest, SendTxResponse,
GetTransactionByHashResponse, PostIndexerMessageRequest, PostIndexerMessageResponse,
SendTxRequest, SendTxResponse,
},
},
transaction::{EncodedTransaction, NSSATransaction},
};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BasicAuth {
pub username: String,
pub password: Option<String>,
}
impl std::fmt::Display for BasicAuth {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.username)?;
if let Some(password) = &self.password {
write!(f, ":{password}")?;
}
Ok(())
}
}
impl FromStr for BasicAuth {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let parse = || {
let mut parts = s.splitn(2, ':');
let username = parts.next()?;
let password = parts.next().filter(|p| !p.is_empty());
if parts.next().is_some() {
return None;
}
Some((username, password))
};
let (username, password) = parse().ok_or_else(|| {
anyhow::anyhow!("Invalid auth format. Expected 'user' or 'user:password'")
})?;
Ok(Self {
username: username.to_string(),
password: password.map(|p| p.to_string()),
})
}
}
impl From<BasicAuth> for BasicAuthCredentials {
fn from(value: BasicAuth) -> Self {
BasicAuthCredentials::new(value.username, value.password)
}
}
#[derive(Clone)]
pub struct SequencerClient {
pub client: reqwest::Client,
pub sequencer_addr: String,
pub basic_auth: Option<(String, Option<String>)>,
pub sequencer_addr: Url,
pub basic_auth: Option<BasicAuth>,
}
impl SequencerClient {
pub fn new(sequencer_addr: String) -> Result<Self> {
pub fn new(sequencer_addr: Url) -> Result<Self> {
Self::new_with_auth(sequencer_addr, None)
}
pub fn new_with_auth(
sequencer_addr: String,
basic_auth: Option<(String, Option<String>)>,
) -> Result<Self> {
pub fn new_with_auth(sequencer_addr: Url, basic_auth: Option<BasicAuth>) -> Result<Self> {
Ok(Self {
client: Client::builder()
// Add more fields if needed
@ -66,9 +115,9 @@ impl SequencerClient {
"Calling method {method} with payload {request:?} to sequencer at {}",
self.sequencer_addr
);
let mut call_builder = self.client.post(&self.sequencer_addr);
let mut call_builder = self.client.post(self.sequencer_addr.clone());
if let Some((username, password)) = &self.basic_auth {
if let Some(BasicAuth { username, password }) = &self.basic_auth {
call_builder = call_builder.basic_auth(username, password.as_deref());
}
@ -347,4 +396,23 @@ impl SequencerClient {
Ok(resp_deser)
}
/// Post indexer into sequencer
pub async fn post_indexer_message(
&self,
message: crate::communication::indexer::Message,
) -> Result<PostIndexerMessageResponse, SequencerClientError> {
let last_req = PostIndexerMessageRequest { message };
let req = serde_json::to_value(last_req).unwrap();
let resp = self
.call_method_with_payload("post_indexer_message", req)
.await
.unwrap();
let resp_deser = serde_json::from_value(resp).unwrap();
Ok(resp_deser)
}
}

View File

@ -30,7 +30,7 @@ pub fn produce_dummy_block(
transactions,
};
block_data.into_block(&sequencer_sign_key_for_testing())
block_data.into_pending_block(&sequencer_sign_key_for_testing(), [0; 32])
}
pub fn produce_dummy_empty_transaction() -> EncodedTransaction {

View File

@ -50,7 +50,7 @@ async fn main() {
wallet_core
.send_privacy_preserving_tx(
accounts,
&Program::serialize_instruction(greeting).unwrap(),
Program::serialize_instruction(greeting).unwrap(),
&program.into(),
)
.await

View File

@ -58,7 +58,7 @@ async fn main() {
wallet_core
.send_privacy_preserving_tx(
accounts,
&Program::serialize_instruction(instruction).unwrap(),
Program::serialize_instruction(instruction).unwrap(),
&program_with_dependencies,
)
.await

View File

@ -101,7 +101,7 @@ async fn main() {
wallet_core
.send_privacy_preserving_tx(
accounts,
&Program::serialize_instruction(instruction).unwrap(),
Program::serialize_instruction(instruction).unwrap(),
&program.into(),
)
.await
@ -142,7 +142,7 @@ async fn main() {
wallet_core
.send_privacy_preserving_tx(
accounts,
&Program::serialize_instruction(instruction).unwrap(),
Program::serialize_instruction(instruction).unwrap(),
&program.into(),
)
.await

18
indexer_core/Cargo.toml Normal file
View File

@ -0,0 +1,18 @@
[package]
name = "indexer_core"
version = "0.1.0"
edition = "2024"
[dependencies]
common.workspace = true
bedrock_client.workspace = true
anyhow.workspace = true
log.workspace = true
serde.workspace = true
tokio.workspace = true
borsh.workspace = true
futures.workspace = true
url.workspace = true
logos-blockchain-core.workspace = true
serde_json.workspace = true

View File

@ -0,0 +1,36 @@
use std::{fs::File, io::BufReader, path::Path};
use anyhow::{Context, Result};
use bedrock_client::BackoffConfig;
use common::sequencer_client::BasicAuth;
use logos_blockchain_core::mantle::ops::channel::ChannelId;
use serde::{Deserialize, Serialize};
use url::Url;
#[derive(Debug, Clone, Serialize, Deserialize)]
/// ToDo: Expand if necessary
pub struct ClientConfig {
pub addr: Url,
pub auth: Option<BasicAuth>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
/// Note: For individual RPC requests we use Fibonacci backoff retry strategy
pub struct IndexerConfig {
pub resubscribe_interval_millis: u64,
pub backoff: BackoffConfig,
pub bedrock_client_config: ClientConfig,
pub sequencer_client_config: ClientConfig,
pub channel_id: ChannelId,
}
impl IndexerConfig {
pub fn from_path(config_home: &Path) -> Result<IndexerConfig> {
let file = File::open(config_home)
.with_context(|| format!("Failed to open indexer config at {config_home:?}"))?;
let reader = BufReader::new(file);
serde_json::from_reader(reader)
.with_context(|| format!("Failed to parse indexer config at {config_home:?}"))
}
}

124
indexer_core/src/lib.rs Normal file
View File

@ -0,0 +1,124 @@
use std::sync::Arc;
use anyhow::Result;
use bedrock_client::BedrockClient;
use common::{
block::HashableBlockData, communication::indexer::Message,
rpc_primitives::requests::PostIndexerMessageResponse, sequencer_client::SequencerClient,
};
use futures::StreamExt;
use log::info;
use logos_blockchain_core::mantle::{
Op, SignedMantleTx,
ops::channel::{ChannelId, inscribe::InscriptionOp},
};
use tokio::sync::RwLock;
use crate::{config::IndexerConfig, state::IndexerState};
pub mod config;
pub mod state;
pub struct IndexerCore {
pub bedrock_client: BedrockClient,
pub sequencer_client: SequencerClient,
pub config: IndexerConfig,
pub state: IndexerState,
}
impl IndexerCore {
pub fn new(config: IndexerConfig) -> Result<Self> {
Ok(Self {
bedrock_client: BedrockClient::new(
config.bedrock_client_config.auth.clone().map(Into::into),
config.bedrock_client_config.addr.clone(),
)?,
sequencer_client: SequencerClient::new_with_auth(
config.sequencer_client_config.addr.clone(),
config.sequencer_client_config.auth.clone(),
)?,
config,
// No state setup for now, future task.
state: IndexerState {
latest_seen_block: Arc::new(RwLock::new(0)),
},
})
}
pub async fn subscribe_parse_block_stream(&self) -> Result<()> {
loop {
let mut stream_pinned = Box::pin(self.bedrock_client.get_lib_stream().await?);
info!("Block stream joined");
while let Some(block_info) = stream_pinned.next().await {
let header_id = block_info.header_id;
info!("Observed L1 block at height {}", block_info.height);
if let Some(l1_block) = self
.bedrock_client
.get_block_by_id(header_id, &self.config.backoff)
.await?
{
info!("Extracted L1 block at height {}", block_info.height);
let l2_blocks_parsed = parse_blocks(
l1_block.into_transactions().into_iter(),
&self.config.channel_id,
);
for l2_block in l2_blocks_parsed {
// State modification, will be updated in future
{
let mut guard = self.state.latest_seen_block.write().await;
if l2_block.block_id > *guard {
*guard = l2_block.block_id;
}
}
// Sending data into sequencer, may need to be expanded.
let message = Message::L2BlockFinalized {
l2_block_height: l2_block.block_id,
};
let status = self.send_message_to_sequencer(message.clone()).await?;
info!("Sent message {message:#?} to sequencer; status {status:#?}");
}
}
}
// Refetch stream after delay
tokio::time::sleep(std::time::Duration::from_millis(
self.config.resubscribe_interval_millis,
))
.await;
}
}
pub async fn send_message_to_sequencer(
&self,
message: Message,
) -> Result<PostIndexerMessageResponse> {
Ok(self.sequencer_client.post_indexer_message(message).await?)
}
}
fn parse_blocks(
block_txs: impl Iterator<Item = SignedMantleTx>,
decoded_channel_id: &ChannelId,
) -> impl Iterator<Item = HashableBlockData> {
block_txs.flat_map(|tx| {
tx.mantle_tx.ops.into_iter().filter_map(|op| match op {
Op::ChannelInscribe(InscriptionOp {
channel_id,
inscription,
..
}) if channel_id == *decoded_channel_id => {
borsh::from_slice::<HashableBlockData>(&inscription).ok()
}
_ => None,
})
})
}

View File

@ -0,0 +1,9 @@
use std::sync::Arc;
use tokio::sync::RwLock;
#[derive(Debug, Clone)]
pub struct IndexerState {
// Only one field for now, for testing.
pub latest_seen_block: Arc<RwLock<u64>>,
}

View File

@ -0,0 +1,17 @@
[package]
name = "indexer_service"
version = "0.1.0"
edition = "2024"
[dependencies]
indexer_service_protocol.workspace = true
indexer_service_rpc = { workspace = true, features = ["server"] }
clap = { workspace = true, features = ["derive"] }
anyhow.workspace = true
tokio.workspace = true
tokio-util.workspace = true
env_logger.workspace = true
log.workspace = true
jsonrpsee.workspace = true
async-trait = "0.1.89"

View File

@ -0,0 +1,64 @@
# Chef stage - uses pre-built cargo-chef image
FROM lukemathwalker/cargo-chef:latest-rust-1.91.1-slim-trixie AS chef
# Install build dependencies
RUN apt-get update && apt-get install -y \
pkg-config \
libssl-dev \
libclang-dev \
clang \
curl \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /indexer_service
# Planner stage - generates dependency recipe
FROM chef AS planner
COPY . .
RUN cargo chef prepare --bin indexer_service --recipe-path recipe.json
# Builder stage - builds dependencies and application
FROM chef AS builder
COPY --from=planner /indexer_service/recipe.json recipe.json
# Build dependencies only (this layer will be cached)
RUN cargo chef cook --bin indexer_service --release --recipe-path recipe.json
# Copy source code
COPY . .
# Build the actual application
RUN cargo build --release --bin indexer_service
# Strip debug symbols to reduce binary size
RUN strip /indexer_service/target/release/indexer_service
# Runtime stage - minimal image
FROM debian:trixie-slim
# Create non-root user for security
RUN useradd -m -u 1000 -s /bin/bash indexer_service_user
# Copy binary from builder
COPY --from=builder --chown=indexer_service_user:indexer_service_user /indexer_service/target/release/indexer_service /usr/local/bin/indexer_service
# Expose default port
EXPOSE 8779
# Health check
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
CMD curl http://localhost:8779 \
-H "Content-Type: application/json" \
-d "{ \
\"jsonrpc\": \"2.0\", \
\"method\": \"get_schema\", \
\"params\": {}, \
\"id\": 1 \
}" || exit 1
# Run the application
ENV RUST_LOG=info
USER indexer_service_user
WORKDIR /indexer_service
CMD ["indexer_service"]

View File

@ -0,0 +1,9 @@
services:
indexer_service:
image: lssa/indexer_service
build:
context: ..
dockerfile: indexer_service/Dockerfile
container_name: indexer_service
ports:
- "8779:8779"

View File

@ -0,0 +1,18 @@
[package]
name = "indexer_service_protocol"
version = "0.1.0"
edition = "2024"
[dependencies]
nssa_core = { workspace = true, optional = true, features = ["host"] }
nssa = { workspace = true, optional = true }
common = { workspace = true, optional = true }
serde = { workspace = true, features = ["derive"] }
schemars.workspace = true
base64.workspace = true
borsh = { workspace = true, optional = true }
[features]
# Enable conversion to/from NSSA core types
convert = ["dep:nssa_core", "dep:nssa", "dep:common", "dep:borsh"]

View File

@ -0,0 +1,652 @@
//! Conversions between indexer_service_protocol types and nssa/nssa_core types
use crate::*;
// ============================================================================
// Account-related conversions
// ============================================================================
impl From<nssa_core::account::AccountId> for AccountId {
fn from(value: nssa_core::account::AccountId) -> Self {
Self {
value: value.into_value(),
}
}
}
impl From<AccountId> for nssa_core::account::AccountId {
fn from(value: AccountId) -> Self {
let AccountId { value } = value;
nssa_core::account::AccountId::new(value)
}
}
impl From<nssa_core::account::Account> for Account {
fn from(value: nssa_core::account::Account) -> Self {
let nssa_core::account::Account {
program_owner,
balance,
data,
nonce,
} = value;
Self {
program_owner,
balance,
data: data.into(),
nonce,
}
}
}
impl TryFrom<Account> for nssa_core::account::Account {
type Error = nssa_core::account::data::DataTooBigError;
fn try_from(value: Account) -> Result<Self, Self::Error> {
let Account {
program_owner,
balance,
data,
nonce,
} = value;
Ok(nssa_core::account::Account {
program_owner,
balance,
data: data.try_into()?,
nonce,
})
}
}
impl From<nssa_core::account::Data> for Data {
fn from(value: nssa_core::account::Data) -> Self {
Self(value.into_inner())
}
}
impl TryFrom<Data> for nssa_core::account::Data {
type Error = nssa_core::account::data::DataTooBigError;
fn try_from(value: Data) -> Result<Self, Self::Error> {
nssa_core::account::Data::try_from(value.0)
}
}
// ============================================================================
// Commitment and Nullifier conversions
// ============================================================================
impl From<nssa_core::Commitment> for Commitment {
fn from(value: nssa_core::Commitment) -> Self {
Self(value.to_byte_array())
}
}
impl From<Commitment> for nssa_core::Commitment {
fn from(value: Commitment) -> Self {
nssa_core::Commitment::from_byte_array(value.0)
}
}
impl From<nssa_core::Nullifier> for Nullifier {
fn from(value: nssa_core::Nullifier) -> Self {
Self(value.to_byte_array())
}
}
impl From<Nullifier> for nssa_core::Nullifier {
fn from(value: Nullifier) -> Self {
nssa_core::Nullifier::from_byte_array(value.0)
}
}
impl From<nssa_core::CommitmentSetDigest> for CommitmentSetDigest {
fn from(value: nssa_core::CommitmentSetDigest) -> Self {
Self(value)
}
}
impl From<CommitmentSetDigest> for nssa_core::CommitmentSetDigest {
fn from(value: CommitmentSetDigest) -> Self {
value.0
}
}
// ============================================================================
// Encryption-related conversions
// ============================================================================
impl From<nssa_core::encryption::Ciphertext> for Ciphertext {
fn from(value: nssa_core::encryption::Ciphertext) -> Self {
Self(value.into_inner())
}
}
impl From<Ciphertext> for nssa_core::encryption::Ciphertext {
fn from(value: Ciphertext) -> Self {
nssa_core::encryption::Ciphertext::from_inner(value.0)
}
}
impl From<nssa_core::encryption::EphemeralPublicKey> for EphemeralPublicKey {
fn from(value: nssa_core::encryption::EphemeralPublicKey) -> Self {
Self(value.0)
}
}
impl From<EphemeralPublicKey> for nssa_core::encryption::EphemeralPublicKey {
fn from(value: EphemeralPublicKey) -> Self {
nssa_core::encryption::shared_key_derivation::Secp256k1Point(value.0)
}
}
// ============================================================================
// Signature and PublicKey conversions
// ============================================================================
impl From<nssa::Signature> for Signature {
fn from(value: nssa::Signature) -> Self {
let nssa::Signature { value } = value;
Self(value)
}
}
impl From<Signature> for nssa::Signature {
fn from(value: Signature) -> Self {
let Signature(sig_value) = value;
nssa::Signature { value: sig_value }
}
}
impl From<nssa::PublicKey> for PublicKey {
fn from(value: nssa::PublicKey) -> Self {
Self(*value.value())
}
}
impl TryFrom<PublicKey> for nssa::PublicKey {
type Error = nssa::error::NssaError;
fn try_from(value: PublicKey) -> Result<Self, Self::Error> {
nssa::PublicKey::try_new(value.0)
}
}
// ============================================================================
// Proof conversions
// ============================================================================
impl From<nssa::privacy_preserving_transaction::circuit::Proof> for Proof {
fn from(value: nssa::privacy_preserving_transaction::circuit::Proof) -> Self {
Self(value.into_inner())
}
}
impl From<Proof> for nssa::privacy_preserving_transaction::circuit::Proof {
fn from(value: Proof) -> Self {
nssa::privacy_preserving_transaction::circuit::Proof::from_inner(value.0)
}
}
// ============================================================================
// EncryptedAccountData conversions
// ============================================================================
impl From<nssa::privacy_preserving_transaction::message::EncryptedAccountData>
for EncryptedAccountData
{
fn from(value: nssa::privacy_preserving_transaction::message::EncryptedAccountData) -> Self {
Self {
ciphertext: value.ciphertext.into(),
epk: value.epk.into(),
view_tag: value.view_tag,
}
}
}
impl From<EncryptedAccountData>
for nssa::privacy_preserving_transaction::message::EncryptedAccountData
{
fn from(value: EncryptedAccountData) -> Self {
Self {
ciphertext: value.ciphertext.into(),
epk: value.epk.into(),
view_tag: value.view_tag,
}
}
}
// ============================================================================
// Transaction Message conversions
// ============================================================================
impl From<nssa::public_transaction::Message> for PublicMessage {
fn from(value: nssa::public_transaction::Message) -> Self {
let nssa::public_transaction::Message {
program_id,
account_ids,
nonces,
instruction_data,
} = value;
Self {
program_id,
account_ids: account_ids.into_iter().map(Into::into).collect(),
nonces,
instruction_data,
}
}
}
impl From<PublicMessage> for nssa::public_transaction::Message {
fn from(value: PublicMessage) -> Self {
let PublicMessage {
program_id,
account_ids,
nonces,
instruction_data,
} = value;
Self::new_preserialized(
program_id,
account_ids.into_iter().map(Into::into).collect(),
nonces,
instruction_data,
)
}
}
impl From<nssa::privacy_preserving_transaction::message::Message> for PrivacyPreservingMessage {
fn from(value: nssa::privacy_preserving_transaction::message::Message) -> Self {
let nssa::privacy_preserving_transaction::message::Message {
public_account_ids,
nonces,
public_post_states,
encrypted_private_post_states,
new_commitments,
new_nullifiers,
} = value;
Self {
public_account_ids: public_account_ids.into_iter().map(Into::into).collect(),
nonces,
public_post_states: public_post_states.into_iter().map(Into::into).collect(),
encrypted_private_post_states: encrypted_private_post_states
.into_iter()
.map(Into::into)
.collect(),
new_commitments: new_commitments.into_iter().map(Into::into).collect(),
new_nullifiers: new_nullifiers
.into_iter()
.map(|(n, d)| (n.into(), d.into()))
.collect(),
}
}
}
impl TryFrom<PrivacyPreservingMessage> for nssa::privacy_preserving_transaction::message::Message {
type Error = nssa_core::account::data::DataTooBigError;
fn try_from(value: PrivacyPreservingMessage) -> Result<Self, Self::Error> {
let PrivacyPreservingMessage {
public_account_ids,
nonces,
public_post_states,
encrypted_private_post_states,
new_commitments,
new_nullifiers,
} = value;
Ok(Self {
public_account_ids: public_account_ids.into_iter().map(Into::into).collect(),
nonces,
public_post_states: public_post_states
.into_iter()
.map(TryInto::try_into)
.collect::<Result<Vec<_>, _>>()?,
encrypted_private_post_states: encrypted_private_post_states
.into_iter()
.map(Into::into)
.collect(),
new_commitments: new_commitments.into_iter().map(Into::into).collect(),
new_nullifiers: new_nullifiers
.into_iter()
.map(|(n, d)| (n.into(), d.into()))
.collect(),
})
}
}
impl From<nssa::program_deployment_transaction::Message> for ProgramDeploymentMessage {
fn from(value: nssa::program_deployment_transaction::Message) -> Self {
Self {
bytecode: value.into_bytecode(),
}
}
}
impl From<ProgramDeploymentMessage> for nssa::program_deployment_transaction::Message {
fn from(value: ProgramDeploymentMessage) -> Self {
let ProgramDeploymentMessage { bytecode } = value;
Self::new(bytecode)
}
}
// ============================================================================
// WitnessSet conversions
// ============================================================================
impl TryFrom<nssa::public_transaction::WitnessSet> for WitnessSet {
type Error = ();
fn try_from(_value: nssa::public_transaction::WitnessSet) -> Result<Self, Self::Error> {
// Public transaction witness sets don't have proofs, so we can't convert them directly
Err(())
}
}
impl From<nssa::privacy_preserving_transaction::witness_set::WitnessSet> for WitnessSet {
fn from(value: nssa::privacy_preserving_transaction::witness_set::WitnessSet) -> Self {
let (sigs_and_pks, proof) = value.into_raw_parts();
Self {
signatures_and_public_keys: sigs_and_pks
.into_iter()
.map(|(sig, pk)| (sig.into(), pk.into()))
.collect(),
proof: proof.into(),
}
}
}
impl TryFrom<WitnessSet> for nssa::privacy_preserving_transaction::witness_set::WitnessSet {
type Error = nssa::error::NssaError;
fn try_from(value: WitnessSet) -> Result<Self, Self::Error> {
let WitnessSet {
signatures_and_public_keys,
proof,
} = value;
let signatures_and_public_keys = signatures_and_public_keys
.into_iter()
.map(|(sig, pk)| Ok((sig.into(), pk.try_into()?)))
.collect::<Result<Vec<_>, Self::Error>>()?;
Ok(Self::from_raw_parts(
signatures_and_public_keys,
proof.into(),
))
}
}
// ============================================================================
// Transaction conversions
// ============================================================================
impl From<nssa::PublicTransaction> for PublicTransaction {
fn from(value: nssa::PublicTransaction) -> Self {
Self {
message: value.message().clone().into(),
witness_set: WitnessSet {
signatures_and_public_keys: value
.witness_set()
.signatures_and_public_keys()
.iter()
.map(|(sig, pk)| (sig.clone().into(), pk.clone().into()))
.collect(),
proof: Proof(vec![]), // Public transactions don't have proofs
},
}
}
}
impl TryFrom<PublicTransaction> for nssa::PublicTransaction {
type Error = nssa::error::NssaError;
fn try_from(value: PublicTransaction) -> Result<Self, Self::Error> {
let PublicTransaction {
message,
witness_set,
} = value;
let WitnessSet {
signatures_and_public_keys,
proof: _,
} = witness_set;
Ok(Self::new(
message.into(),
nssa::public_transaction::WitnessSet::from_raw_parts(
signatures_and_public_keys
.into_iter()
.map(|(sig, pk)| Ok((sig.into(), pk.try_into()?)))
.collect::<Result<Vec<_>, Self::Error>>()?,
),
))
}
}
impl From<nssa::PrivacyPreservingTransaction> for PrivacyPreservingTransaction {
fn from(value: nssa::PrivacyPreservingTransaction) -> Self {
Self {
message: value.message().clone().into(),
witness_set: value.witness_set().clone().into(),
}
}
}
impl TryFrom<PrivacyPreservingTransaction> for nssa::PrivacyPreservingTransaction {
type Error = nssa::error::NssaError;
fn try_from(value: PrivacyPreservingTransaction) -> Result<Self, Self::Error> {
let PrivacyPreservingTransaction {
message,
witness_set,
} = value;
Ok(Self::new(
message.try_into().map_err(|_| {
nssa::error::NssaError::InvalidInput("Data too big error".to_string())
})?,
witness_set.try_into()?,
))
}
}
impl From<nssa::ProgramDeploymentTransaction> for ProgramDeploymentTransaction {
fn from(value: nssa::ProgramDeploymentTransaction) -> Self {
Self {
message: value.into_message().into(),
}
}
}
impl From<ProgramDeploymentTransaction> for nssa::ProgramDeploymentTransaction {
fn from(value: ProgramDeploymentTransaction) -> Self {
let ProgramDeploymentTransaction { message } = value;
Self::new(message.into())
}
}
impl From<common::transaction::NSSATransaction> for Transaction {
fn from(value: common::transaction::NSSATransaction) -> Self {
match value {
common::transaction::NSSATransaction::Public(tx) => Transaction::Public(tx.into()),
common::transaction::NSSATransaction::PrivacyPreserving(tx) => {
Transaction::PrivacyPreserving(tx.into())
}
common::transaction::NSSATransaction::ProgramDeployment(tx) => {
Transaction::ProgramDeployment(tx.into())
}
}
}
}
impl TryFrom<Transaction> for common::transaction::NSSATransaction {
type Error = nssa::error::NssaError;
fn try_from(value: Transaction) -> Result<Self, Self::Error> {
match value {
Transaction::Public(tx) => {
Ok(common::transaction::NSSATransaction::Public(tx.try_into()?))
}
Transaction::PrivacyPreserving(tx) => Ok(
common::transaction::NSSATransaction::PrivacyPreserving(tx.try_into()?),
),
Transaction::ProgramDeployment(tx) => Ok(
common::transaction::NSSATransaction::ProgramDeployment(tx.into()),
),
}
}
}
// ============================================================================
// Block conversions
// ============================================================================
impl From<common::block::BlockHeader> for BlockHeader {
fn from(value: common::block::BlockHeader) -> Self {
let common::block::BlockHeader {
block_id,
prev_block_hash,
hash,
timestamp,
signature,
} = value;
Self {
block_id,
prev_block_hash: Hash(prev_block_hash),
hash: Hash(hash),
timestamp,
signature: signature.into(),
}
}
}
impl TryFrom<BlockHeader> for common::block::BlockHeader {
type Error = nssa::error::NssaError;
fn try_from(value: BlockHeader) -> Result<Self, Self::Error> {
let BlockHeader {
block_id,
prev_block_hash,
hash,
timestamp,
signature,
} = value;
Ok(Self {
block_id,
prev_block_hash: prev_block_hash.0,
hash: hash.0,
timestamp,
signature: signature.into(),
})
}
}
impl TryFrom<common::block::BlockBody> for BlockBody {
type Error = std::io::Error;
fn try_from(value: common::block::BlockBody) -> Result<Self, Self::Error> {
// Note: EncodedTransaction doesn't have a direct conversion to NSSATransaction
// This conversion will decode and re-encode the transactions
use borsh::BorshDeserialize as _;
let common::block::BlockBody { transactions } = value;
let transactions = transactions
.into_iter()
.map(|encoded_tx| match encoded_tx.tx_kind {
common::transaction::TxKind::Public => {
nssa::PublicTransaction::try_from_slice(&encoded_tx.encoded_transaction_data)
.map(|tx| Transaction::Public(tx.into()))
}
common::transaction::TxKind::PrivacyPreserving => {
nssa::PrivacyPreservingTransaction::try_from_slice(
&encoded_tx.encoded_transaction_data,
)
.map(|tx| Transaction::PrivacyPreserving(tx.into()))
}
common::transaction::TxKind::ProgramDeployment => {
nssa::ProgramDeploymentTransaction::try_from_slice(
&encoded_tx.encoded_transaction_data,
)
.map(|tx| Transaction::ProgramDeployment(tx.into()))
}
})
.collect::<Result<Vec<_>, _>>()?;
Ok(Self { transactions })
}
}
impl TryFrom<BlockBody> for common::block::BlockBody {
type Error = nssa::error::NssaError;
fn try_from(value: BlockBody) -> Result<Self, Self::Error> {
let BlockBody { transactions } = value;
let transactions = transactions
.into_iter()
.map(|tx| {
let nssa_tx: common::transaction::NSSATransaction = tx.try_into()?;
Ok::<_, nssa::error::NssaError>(nssa_tx.into())
})
.collect::<Result<Vec<_>, _>>()?;
Ok(Self { transactions })
}
}
impl TryFrom<common::block::Block> for Block {
type Error = std::io::Error;
fn try_from(value: common::block::Block) -> Result<Self, Self::Error> {
let common::block::Block {
header,
body,
bedrock_status,
bedrock_parent_id,
} = value;
Ok(Self {
header: header.into(),
body: body.try_into()?,
bedrock_status: bedrock_status.into(),
bedrock_parent_id: MantleMsgId(bedrock_parent_id),
})
}
}
impl TryFrom<Block> for common::block::Block {
type Error = nssa::error::NssaError;
fn try_from(value: Block) -> Result<Self, Self::Error> {
let Block {
header,
body,
bedrock_status,
bedrock_parent_id,
} = value;
Ok(Self {
header: header.try_into()?,
body: body.try_into()?,
bedrock_status: bedrock_status.into(),
bedrock_parent_id: bedrock_parent_id.0,
})
}
}
impl From<common::block::BedrockStatus> for BedrockStatus {
fn from(value: common::block::BedrockStatus) -> Self {
match value {
common::block::BedrockStatus::Pending => Self::Pending,
common::block::BedrockStatus::Safe => Self::Safe,
common::block::BedrockStatus::Finalized => Self::Finalized,
}
}
}
impl From<BedrockStatus> for common::block::BedrockStatus {
fn from(value: BedrockStatus) -> Self {
match value {
BedrockStatus::Pending => Self::Pending,
BedrockStatus::Safe => Self::Safe,
BedrockStatus::Finalized => Self::Finalized,
}
}
}

View File

@ -0,0 +1,238 @@
//! This crate defines the protocol types used by the indexer service.
//!
//! Currently it mostly mimics types from `nssa_core`, but it's important to have a separate crate
//! to define a stable interface for the indexer service RPCs which evolves in its own way.
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
#[cfg(feature = "convert")]
mod convert;
pub type Nonce = u128;
pub type ProgramId = [u32; 8];
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, JsonSchema)]
pub struct AccountId {
#[serde(with = "base64::arr")]
#[schemars(with = "String", description = "base64-encoded account ID")]
pub value: [u8; 32],
}
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, JsonSchema)]
pub struct Account {
pub program_owner: ProgramId,
pub balance: u128,
pub data: Data,
pub nonce: Nonce,
}
pub type BlockId = u64;
pub type TimeStamp = u64;
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, JsonSchema)]
pub struct Block {
pub header: BlockHeader,
pub body: BlockBody,
pub bedrock_status: BedrockStatus,
pub bedrock_parent_id: MantleMsgId,
}
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, JsonSchema)]
pub struct BlockHeader {
pub block_id: BlockId,
pub prev_block_hash: Hash,
pub hash: Hash,
pub timestamp: TimeStamp,
pub signature: Signature,
}
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, JsonSchema)]
pub struct Signature(
#[serde(with = "base64::arr")]
#[schemars(with = "String", description = "base64-encoded signature")]
pub [u8; 64],
);
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, JsonSchema)]
pub struct BlockBody {
pub transactions: Vec<Transaction>,
}
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, JsonSchema)]
pub enum Transaction {
Public(PublicTransaction),
PrivacyPreserving(PrivacyPreservingTransaction),
ProgramDeployment(ProgramDeploymentTransaction),
}
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, JsonSchema)]
pub struct PublicTransaction {
pub message: PublicMessage,
pub witness_set: WitnessSet,
}
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, JsonSchema)]
pub struct PrivacyPreservingTransaction {
pub message: PrivacyPreservingMessage,
pub witness_set: WitnessSet,
}
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, JsonSchema)]
pub struct PublicMessage {
pub program_id: ProgramId,
pub account_ids: Vec<AccountId>,
pub nonces: Vec<Nonce>,
pub instruction_data: InstructionData,
}
pub type InstructionData = Vec<u32>;
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, JsonSchema)]
pub struct PrivacyPreservingMessage {
pub public_account_ids: Vec<AccountId>,
pub nonces: Vec<Nonce>,
pub public_post_states: Vec<Account>,
pub encrypted_private_post_states: Vec<EncryptedAccountData>,
pub new_commitments: Vec<Commitment>,
pub new_nullifiers: Vec<(Nullifier, CommitmentSetDigest)>,
}
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, JsonSchema)]
pub struct WitnessSet {
pub signatures_and_public_keys: Vec<(Signature, PublicKey)>,
pub proof: Proof,
}
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, JsonSchema)]
pub struct Proof(
#[serde(with = "base64")]
#[schemars(with = "String", description = "base64-encoded proof")]
pub Vec<u8>,
);
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, JsonSchema)]
pub struct EncryptedAccountData {
pub ciphertext: Ciphertext,
pub epk: EphemeralPublicKey,
pub view_tag: ViewTag,
}
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, JsonSchema)]
pub struct ProgramDeploymentTransaction {
pub message: ProgramDeploymentMessage,
}
pub type ViewTag = u8;
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, JsonSchema)]
pub struct Ciphertext(
#[serde(with = "base64")]
#[schemars(with = "String", description = "base64-encoded ciphertext")]
pub Vec<u8>,
);
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, JsonSchema)]
pub struct PublicKey(
#[serde(with = "base64::arr")]
#[schemars(with = "String", description = "base64-encoded public key")]
pub [u8; 32],
);
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, JsonSchema)]
pub struct EphemeralPublicKey(
#[serde(with = "base64")]
#[schemars(with = "String", description = "base64-encoded ephemeral public key")]
pub Vec<u8>,
);
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, JsonSchema)]
pub struct Commitment(
#[serde(with = "base64::arr")]
#[schemars(with = "String", description = "base64-encoded commitment")]
pub [u8; 32],
);
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, JsonSchema)]
pub struct Nullifier(
#[serde(with = "base64::arr")]
#[schemars(with = "String", description = "base64-encoded nullifier")]
pub [u8; 32],
);
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, JsonSchema)]
pub struct CommitmentSetDigest(
#[serde(with = "base64::arr")]
#[schemars(with = "String", description = "base64-encoded commitment set digest")]
pub [u8; 32],
);
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, JsonSchema)]
pub struct ProgramDeploymentMessage {
#[serde(with = "base64")]
#[schemars(with = "String", description = "base64-encoded program bytecode")]
pub bytecode: Vec<u8>,
}
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, JsonSchema)]
pub struct Data(
#[serde(with = "base64")]
#[schemars(with = "String", description = "base64-encoded account data")]
pub Vec<u8>,
);
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, JsonSchema)]
pub struct Hash(
#[serde(with = "base64::arr")]
#[schemars(with = "String", description = "base64-encoded hash")]
pub [u8; 32],
);
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, JsonSchema)]
pub struct MantleMsgId(
#[serde(with = "base64::arr")]
#[schemars(with = "String", description = "base64-encoded Bedrock message id")]
pub [u8; 32],
);
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, JsonSchema)]
pub enum BedrockStatus {
Pending,
Safe,
Finalized,
}
mod base64 {
use base64::prelude::{BASE64_STANDARD, Engine as _};
use serde::{Deserialize, Deserializer, Serialize, Serializer};
pub mod arr {
use super::*;
pub fn serialize<S: Serializer>(v: &[u8], s: S) -> Result<S::Ok, S::Error> {
super::serialize(v, s)
}
pub fn deserialize<'de, const N: usize, D: Deserializer<'de>>(
d: D,
) -> Result<[u8; N], D::Error> {
let vec = super::deserialize(d)?;
vec.try_into().map_err(|_| {
serde::de::Error::custom(format!("Invalid length, expected {N} bytes"))
})
}
}
pub fn serialize<S: Serializer>(v: &[u8], s: S) -> Result<S::Ok, S::Error> {
let base64 = BASE64_STANDARD.encode(v);
String::serialize(&base64, s)
}
pub fn deserialize<'de, D: Deserializer<'de>>(d: D) -> Result<Vec<u8>, D::Error> {
let base64 = String::deserialize(d)?;
BASE64_STANDARD
.decode(base64.as_bytes())
.map_err(serde::de::Error::custom)
}
}

View File

@ -0,0 +1,15 @@
[package]
name = "indexer_service_rpc"
version = "0.1.0"
edition = "2024"
[dependencies]
indexer_service_protocol = { workspace = true }
jsonrpsee = { workspace = true, features = ["macros"] }
serde_json.workspace = true
schemars.workspace = true
[features]
client = ["jsonrpsee/client"]
server = ["jsonrpsee/server"]

View File

@ -0,0 +1,40 @@
use indexer_service_protocol::{Account, AccountId, Block, BlockId, Hash, Transaction};
use jsonrpsee::{core::SubscriptionResult, proc_macros::rpc, types::ErrorObjectOwned};
#[cfg(all(not(feature = "server"), not(feature = "client")))]
compile_error!("At least one of `server` or `client` features must be enabled.");
#[cfg_attr(feature = "server", rpc(server))]
#[cfg_attr(feature = "client", rpc(client))]
pub trait Rpc {
#[method(name = "get_schema")]
fn get_schema(&self) -> Result<serde_json::Value, ErrorObjectOwned> {
// TODO: Canonical solution would be to provide `describe` method returning OpenRPC spec,
// But for now it's painful to implement, although can be done if really needed.
// Currently we can wait until we can auto-generated it: https://github.com/paritytech/jsonrpsee/issues/737
// and just return JSON schema.
// Block schema contains all other types used in the protocol, so it's sufficient to return
// its schema.
let block_schema = schemars::schema_for!(Block);
Ok(serde_json::to_value(block_schema).expect("Schema serialization should not fail"))
}
#[subscription(name = "subscribeToBlocks", item = Vec<Block>)]
async fn subscribe_to_blocks(&self, from: BlockId) -> SubscriptionResult;
#[method(name = "getBlockById")]
async fn get_block_by_id(&self, block_id: BlockId) -> Result<Block, ErrorObjectOwned>;
#[method(name = "getBlockByHash")]
async fn get_block_by_hash(&self, block_hash: Hash) -> Result<Block, ErrorObjectOwned>;
#[method(name = "getLastBlockId")]
async fn get_last_block_id(&self) -> Result<BlockId, ErrorObjectOwned>;
#[method(name = "getAccount")]
async fn get_account(&self, account_id: AccountId) -> Result<Account, ErrorObjectOwned>;
#[method(name = "getTransaction")]
async fn get_transaction(&self, tx_hash: Hash) -> Result<Transaction, ErrorObjectOwned>;
}

View File

@ -0,0 +1 @@
pub mod service;

View File

@ -0,0 +1,72 @@
use std::net::SocketAddr;
use anyhow::{Context as _, Result};
use clap::Parser;
use indexer_service_rpc::RpcServer as _;
use jsonrpsee::server::Server;
use log::{error, info};
use tokio_util::sync::CancellationToken;
#[derive(Debug, Parser)]
#[clap(version)]
struct Args {
#[clap(short, long, default_value = "8779")]
port: u16,
}
#[tokio::main]
async fn main() -> Result<()> {
env_logger::init();
let args = Args::parse();
let cancellation_token = listen_for_shutdown_signal();
let handle = run_server(args.port).await?;
let handle_clone = handle.clone();
tokio::select! {
_ = cancellation_token.cancelled() => {
info!("Shutting down server...");
}
_ = handle_clone.stopped() => {
error!("Server stopped unexpectedly");
}
}
info!("Server shutdown complete");
Ok(())
}
async fn run_server(port: u16) -> Result<jsonrpsee::server::ServerHandle> {
let server = Server::builder()
.build(SocketAddr::from(([0, 0, 0, 0], port)))
.await
.context("Failed to build RPC server")?;
let addr = server
.local_addr()
.context("Failed to get local address of RPC server")?;
info!("Starting Indexer Service RPC server on {addr}");
let handle = server.start(indexer_service::service::IndexerService.into_rpc());
Ok(handle)
}
fn listen_for_shutdown_signal() -> CancellationToken {
let cancellation_token = CancellationToken::new();
let cancellation_token_clone = cancellation_token.clone();
tokio::spawn(async move {
if let Err(err) = tokio::signal::ctrl_c().await {
error!("Failed to listen for Ctrl-C signal: {err}");
return;
}
info!("Received Ctrl-C signal");
cancellation_token_clone.cancel();
});
cancellation_token
}

View File

@ -0,0 +1,36 @@
use indexer_service_protocol::{Account, AccountId, Block, BlockId, Hash, Transaction};
use jsonrpsee::{core::SubscriptionResult, types::ErrorObjectOwned};
pub struct IndexerService;
// `async_trait` is required by `jsonrpsee`
#[async_trait::async_trait]
impl indexer_service_rpc::RpcServer for IndexerService {
async fn subscribe_to_blocks(
&self,
_subscription_sink: jsonrpsee::PendingSubscriptionSink,
_from: BlockId,
) -> SubscriptionResult {
todo!()
}
async fn get_block_by_id(&self, _block_id: BlockId) -> Result<Block, ErrorObjectOwned> {
todo!()
}
async fn get_block_by_hash(&self, _block_hash: Hash) -> Result<Block, ErrorObjectOwned> {
todo!()
}
async fn get_last_block_id(&self) -> Result<BlockId, ErrorObjectOwned> {
todo!()
}
async fn get_account(&self, _account_id: AccountId) -> Result<Account, ErrorObjectOwned> {
todo!()
}
async fn get_transaction(&self, _tx_hash: Hash) -> Result<Transaction, ErrorObjectOwned> {
todo!()
}
}

View File

@ -11,6 +11,8 @@ sequencer_runner.workspace = true
wallet.workspace = true
common.workspace = true
key_protocol.workspace = true
indexer_core.workspace = true
url.workspace = true
anyhow.workspace = true
env_logger.workspace = true

View File

@ -0,0 +1,17 @@
{
"bedrock_client_config": {
"addr": "http://127.0.0.1:8080",
"auth": {
"username": "user"
}
},
"channel_id": "0101010101010101010101010101010101010101010101010101010101010101",
"backoff": {
"max_retries": 10,
"start_delay_millis": 100
},
"resubscribe_interval_millis": 1000,
"sequencer_client_config": {
"addr": "will_be_replaced_in_runtime"
}
}

View File

@ -0,0 +1,165 @@
{
"home": "",
"override_rust_log": null,
"genesis_id": 1,
"is_genesis_random": true,
"max_num_tx_in_block": 20,
"mempool_max_size": 10000,
"block_create_timeout_millis": 10000,
"port": 0,
"initial_accounts": [
{
"account_id": "BLgCRDXYdQPMMWVHYRFGQZbgeHx9frkipa8GtpG2Syqy",
"balance": 10000
},
{
"account_id": "Gj1mJy5W7J5pfmLRujmQaLfLMWidNxQ6uwnhb666ZwHw",
"balance": 20000
}
],
"initial_commitments": [
{
"npk": [
63,
202,
178,
231,
183,
82,
237,
212,
216,
221,
215,
255,
153,
101,
177,
161,
254,
210,
128,
122,
54,
190,
230,
151,
183,
64,
225,
229,
113,
1,
228,
97
],
"account": {
"program_owner": [
0,
0,
0,
0,
0,
0,
0,
0
],
"balance": 10000,
"data": [],
"nonce": 0
}
},
{
"npk": [
192,
251,
166,
243,
167,
236,
84,
249,
35,
136,
130,
172,
219,
225,
161,
139,
229,
89,
243,
125,
194,
213,
209,
30,
23,
174,
100,
244,
124,
74,
140,
47
],
"account": {
"program_owner": [
0,
0,
0,
0,
0,
0,
0,
0
],
"balance": 20000,
"data": [],
"nonce": 0
}
}
],
"signing_key": [
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37
],
"bedrock_config": {
"channel_id": "0101010101010101010101010101010101010101010101010101010101010101",
"node_url": "http://127.0.0.1:8080",
"auth": {
"username": "user"
}
}
}

View File

@ -6,6 +6,7 @@
"max_num_tx_in_block": 20,
"mempool_max_size": 10000,
"block_create_timeout_millis": 10000,
"retry_pending_blocks_timeout_millis": 240000,
"port": 0,
"initial_accounts": [
{
@ -155,4 +156,4 @@
37,
37
]
}
}

View File

@ -3,19 +3,21 @@
use std::{net::SocketAddr, path::PathBuf, sync::LazyLock};
use actix_web::dev::ServerHandle;
use anyhow::{Context as _, Result};
use anyhow::{Context, Result};
use base64::{Engine, engine::general_purpose::STANDARD as BASE64};
use common::{
sequencer_client::SequencerClient,
transaction::{EncodedTransaction, NSSATransaction},
};
use futures::FutureExt as _;
use indexer_core::{IndexerCore, config::IndexerConfig};
use log::debug;
use nssa::PrivacyPreservingTransaction;
use nssa_core::Commitment;
use sequencer_core::config::SequencerConfig;
use tempfile::TempDir;
use tokio::task::JoinHandle;
use url::Url;
use wallet::{WalletCore, config::WalletConfigOverrides};
// TODO: Remove this and control time from tests
@ -38,6 +40,8 @@ static LOGGER: LazyLock<()> = LazyLock::new(env_logger::init);
pub struct TestContext {
sequencer_server_handle: ServerHandle,
sequencer_loop_handle: JoinHandle<Result<()>>,
sequencer_retry_pending_blocks_handle: JoinHandle<Result<()>>,
indexer_loop_handle: Option<JoinHandle<Result<()>>>,
sequencer_client: SequencerClient,
wallet: WalletCore,
_temp_sequencer_dir: TempDir,
@ -45,33 +49,61 @@ pub struct TestContext {
}
impl TestContext {
/// Create new test context.
/// Create new test context in detached mode. Default.
pub async fn new() -> Result<Self> {
let manifest_dir = env!("CARGO_MANIFEST_DIR");
let sequencer_config_path =
PathBuf::from(manifest_dir).join("configs/sequencer/sequencer_config.json");
PathBuf::from(manifest_dir).join("configs/sequencer/detached/sequencer_config.json");
let sequencer_config = SequencerConfig::from_path(&sequencer_config_path)
.context("Failed to create sequencer config from file")?;
Self::new_with_sequencer_config(sequencer_config).await
Self::new_with_sequencer_and_maybe_indexer_configs(sequencer_config, None).await
}
/// Create new test context with custom sequencer config.
/// Create new test context in local bedrock node attached mode.
pub async fn new_bedrock_local_attached() -> Result<Self> {
let manifest_dir = env!("CARGO_MANIFEST_DIR");
let sequencer_config_path = PathBuf::from(manifest_dir)
.join("configs/sequencer/bedrock_local_attached/sequencer_config.json");
let sequencer_config = SequencerConfig::from_path(&sequencer_config_path)
.context("Failed to create sequencer config from file")?;
let indexer_config_path =
PathBuf::from(manifest_dir).join("configs/indexer/indexer_config.json");
let indexer_config = IndexerConfig::from_path(&indexer_config_path)
.context("Failed to create indexer config from file")?;
Self::new_with_sequencer_and_maybe_indexer_configs(sequencer_config, Some(indexer_config))
.await
}
/// Create new test context with custom sequencer config and maybe indexer config.
///
/// `home` and `port` fields of the provided config will be overridden to meet tests parallelism
/// requirements.
pub async fn new_with_sequencer_config(sequencer_config: SequencerConfig) -> Result<Self> {
pub async fn new_with_sequencer_and_maybe_indexer_configs(
sequencer_config: SequencerConfig,
indexer_config: Option<IndexerConfig>,
) -> Result<Self> {
// Ensure logger is initialized only once
*LOGGER;
debug!("Test context setup");
let (sequencer_server_handle, sequencer_addr, sequencer_loop_handle, temp_sequencer_dir) =
Self::setup_sequencer(sequencer_config)
.await
.context("Failed to setup sequencer")?;
let (
sequencer_server_handle,
sequencer_addr,
sequencer_loop_handle,
sequencer_retry_pending_blocks_handle,
temp_sequencer_dir,
) = Self::setup_sequencer(sequencer_config)
.await
.context("Failed to setup sequencer")?;
// Convert 0.0.0.0 to 127.0.0.1 for client connections
// When binding to port 0, the server binds to 0.0.0.0:<random_port>
@ -86,22 +118,54 @@ impl TestContext {
.await
.context("Failed to setup wallet")?;
let sequencer_client =
SequencerClient::new(sequencer_addr).context("Failed to create sequencer client")?;
let sequencer_client = SequencerClient::new(
Url::parse(&sequencer_addr).context("Failed to parse sequencer addr")?,
)
.context("Failed to create sequencer client")?;
Ok(Self {
sequencer_server_handle,
sequencer_loop_handle,
sequencer_client,
wallet,
_temp_sequencer_dir: temp_sequencer_dir,
_temp_wallet_dir: temp_wallet_dir,
})
if let Some(mut indexer_config) = indexer_config {
indexer_config.sequencer_client_config.addr =
Url::parse(&sequencer_addr).context("Failed to parse sequencer addr")?;
let indexer_core = IndexerCore::new(indexer_config)?;
let indexer_loop_handle = Some(tokio::spawn(async move {
indexer_core.subscribe_parse_block_stream().await
}));
Ok(Self {
sequencer_server_handle,
sequencer_loop_handle,
sequencer_retry_pending_blocks_handle,
indexer_loop_handle,
sequencer_client,
wallet,
_temp_sequencer_dir: temp_sequencer_dir,
_temp_wallet_dir: temp_wallet_dir,
})
} else {
Ok(Self {
sequencer_server_handle,
sequencer_loop_handle,
sequencer_retry_pending_blocks_handle,
indexer_loop_handle: None,
sequencer_client,
wallet,
_temp_sequencer_dir: temp_sequencer_dir,
_temp_wallet_dir: temp_wallet_dir,
})
}
}
async fn setup_sequencer(
mut config: SequencerConfig,
) -> Result<(ServerHandle, SocketAddr, JoinHandle<Result<()>>, TempDir)> {
) -> Result<(
ServerHandle,
SocketAddr,
JoinHandle<Result<()>>,
JoinHandle<Result<()>>,
TempDir,
)> {
let temp_sequencer_dir =
tempfile::tempdir().context("Failed to create temp dir for sequencer home")?;
@ -113,13 +177,18 @@ impl TestContext {
// Setting port to 0 lets the OS choose a free port for us
config.port = 0;
let (sequencer_server_handle, sequencer_addr, sequencer_loop_handle) =
sequencer_runner::startup_sequencer(config).await?;
let (
sequencer_server_handle,
sequencer_addr,
sequencer_loop_handle,
sequencer_retry_pending_blocks_handle,
) = sequencer_runner::startup_sequencer(config).await?;
Ok((
sequencer_server_handle,
sequencer_addr,
sequencer_loop_handle,
sequencer_retry_pending_blocks_handle,
temp_sequencer_dir,
))
}
@ -180,6 +249,8 @@ impl Drop for TestContext {
let Self {
sequencer_server_handle,
sequencer_loop_handle,
sequencer_retry_pending_blocks_handle,
indexer_loop_handle,
sequencer_client: _,
wallet: _,
_temp_sequencer_dir,
@ -187,6 +258,10 @@ impl Drop for TestContext {
} = self;
sequencer_loop_handle.abort();
sequencer_retry_pending_blocks_handle.abort();
if let Some(indexer_loop_handle) = indexer_loop_handle {
indexer_loop_handle.abort();
}
// Can't wait here as Drop can't be async, but anyway stop signal should be sent
sequencer_server_handle.stop(true).now_or_never();

View File

@ -0,0 +1,23 @@
use anyhow::Result;
use integration_tests::TestContext;
use log::info;
use tokio::test;
#[ignore = "needs complicated setup"]
#[test]
// To run this test properly, you need nomos node running in the background.
// For instructions in building nomos node, refer to [this](https://github.com/logos-blockchain/logos-blockchain?tab=readme-ov-file#running-a-logos-blockchain-node).
//
// Recommended to run node locally from build binary.
async fn indexer_run_local_node() -> Result<()> {
let _ctx = TestContext::new_bedrock_local_attached().await?;
info!("Let's observe behaviour");
tokio::time::sleep(std::time::Duration::from_secs(180)).await;
// No way to check state of indexer now
// When it will be a service, then it will become possible.
Ok(())
}

View File

@ -11,11 +11,13 @@ use tokio::test;
use wallet::cli::{
Command, SubcommandReturnValue,
account::{AccountSubcommand, NewSubcommand},
programs::pinata::PinataProgramAgnosticSubcommand,
programs::{
native_token_transfer::AuthTransferSubcommand, pinata::PinataProgramAgnosticSubcommand,
},
};
#[test]
async fn claim_pinata_to_public_account() -> Result<()> {
async fn claim_pinata_to_existing_public_account() -> Result<()> {
let mut ctx = TestContext::new().await?;
let pinata_prize = 150;
@ -120,8 +122,26 @@ async fn claim_pinata_to_new_private_account() -> Result<()> {
anyhow::bail!("Expected RegisterAccount return value");
};
let winner_account_id_formatted = format_private_account_id(&winner_account_id.to_string());
// Initialize account under auth transfer program
let command = Command::AuthTransfer(AuthTransferSubcommand::Init {
account_id: winner_account_id_formatted.clone(),
});
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
info!("Waiting for next block creation");
tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await;
let new_commitment = ctx
.wallet()
.get_private_account_commitment(&winner_account_id)
.context("Failed to get private account commitment")?;
assert!(verify_commitment_is_in_state(new_commitment, ctx.sequencer_client()).await);
// Claim pinata to the new private account
let command = Command::Pinata(PinataProgramAgnosticSubcommand::Claim {
to: format_private_account_id(&winner_account_id.to_string()),
to: winner_account_id_formatted,
});
let pinata_balance_pre = ctx

View File

@ -25,7 +25,11 @@ pub async fn tps_test() -> Result<()> {
let target_tps = 12;
let tps_test = TpsTestManager::new(target_tps, num_transactions);
let ctx = TestContext::new_with_sequencer_config(tps_test.generate_sequencer_config()).await?;
let ctx = TestContext::new_with_sequencer_and_maybe_indexer_configs(
tps_test.generate_sequencer_config(),
None,
)
.await?;
let target_time = tps_test.target_time();
info!(
@ -185,6 +189,8 @@ impl TpsTestManager {
initial_accounts: initial_public_accounts,
initial_commitments: vec![initial_commitment],
signing_key: [37; 32],
bedrock_config: None,
retry_pending_blocks_timeout_millis: 1000 * 60 * 4,
}
}
}
@ -234,16 +240,16 @@ fn build_privacy_transaction() -> PrivacyPreservingTransaction {
]],
);
let (output, proof) = circuit::execute_and_prove(
&[sender_pre, recipient_pre],
&Program::serialize_instruction(balance_to_move).unwrap(),
&[1, 2],
&[0xdeadbeef1, 0xdeadbeef2],
&[
vec![sender_pre, recipient_pre],
Program::serialize_instruction(balance_to_move).unwrap(),
vec![1, 2],
vec![0xdeadbeef1, 0xdeadbeef2],
vec![
(sender_npk.clone(), sender_ss),
(recipient_npk.clone(), recipient_ss),
],
&[sender_nsk],
&[Some(proof)],
vec![sender_nsk],
vec![Some(proof)],
&program.into(),
)
.unwrap();

View File

@ -24,8 +24,9 @@ risc0-binfmt = "3.0.2"
[dev-dependencies]
test_program_methods.workspace = true
hex-literal = "1.0.0"
env_logger.workspace = true
hex-literal = "1.0.0"
test-case = "3.3.1"
[features]
default = []

View File

@ -15,9 +15,8 @@ pub type Nonce = u128;
/// Account to be used both in public and private contexts
#[derive(
Clone, Default, Eq, PartialEq, Serialize, Deserialize, BorshSerialize, BorshDeserialize,
Debug, Default, Clone, Eq, PartialEq, Serialize, Deserialize, BorshSerialize, BorshDeserialize,
)]
#[cfg_attr(any(feature = "host", test), derive(Debug))]
pub struct Account {
pub program_owner: ProgramId,
pub balance: u128,
@ -25,8 +24,7 @@ pub struct Account {
pub nonce: Nonce,
}
#[derive(Clone, Eq, PartialEq, Serialize, Deserialize)]
#[cfg_attr(any(feature = "host", test), derive(Debug))]
#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)]
pub struct AccountWithMetadata {
pub account: Account,
pub is_authorized: bool,
@ -45,6 +43,7 @@ impl AccountWithMetadata {
}
#[derive(
Debug,
Default,
Copy,
Clone,
@ -56,7 +55,7 @@ impl AccountWithMetadata {
BorshSerialize,
BorshDeserialize,
)]
#[cfg_attr(any(feature = "host", test), derive(Debug, PartialOrd, Ord))]
#[cfg_attr(any(feature = "host", test), derive(PartialOrd, Ord))]
pub struct AccountId {
value: [u8; 32],
}
@ -69,6 +68,10 @@ impl AccountId {
pub fn value(&self) -> &[u8; 32] {
&self.value
}
pub fn into_value(self) -> [u8; 32] {
self.value
}
}
impl AsRef<[u8]> for AccountId {

View File

@ -5,8 +5,7 @@ use serde::{Deserialize, Serialize};
pub const DATA_MAX_LENGTH_IN_BYTES: usize = 100 * 1024; // 100 KiB
#[derive(Default, Clone, PartialEq, Eq, Serialize, BorshSerialize)]
#[cfg_attr(any(feature = "host", test), derive(Debug))]
#[derive(Debug, Default, Clone, PartialEq, Eq, Serialize, BorshSerialize)]
pub struct Data(Vec<u8>);
impl Data {

View File

@ -5,7 +5,10 @@ use serde::{Deserialize, Serialize};
use crate::{NullifierPublicKey, account::Account};
#[derive(Serialize, Deserialize, BorshSerialize, BorshDeserialize)]
#[cfg_attr(any(feature = "host", test), derive(Debug, Clone, PartialEq, Eq, Hash))]
#[cfg_attr(
any(feature = "host", test),
derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)
)]
pub struct Commitment(pub(super) [u8; 32]);
/// A commitment to all zero data.

View File

@ -69,6 +69,11 @@ impl Commitment {
self.0
}
#[cfg(feature = "host")]
pub fn from_byte_array(bytes: [u8; 32]) -> Self {
Self(bytes)
}
#[cfg(feature = "host")]
pub fn from_cursor(cursor: &mut Cursor<&[u8]>) -> Result<Self, NssaCoreError> {
let mut bytes = [0u8; 32];
@ -89,6 +94,11 @@ impl Nullifier {
self.0
}
#[cfg(feature = "host")]
pub fn from_byte_array(bytes: [u8; 32]) -> Self {
Self(bytes)
}
pub fn from_cursor(cursor: &mut Cursor<&[u8]>) -> Result<Self, NssaCoreError> {
let mut bytes = [0u8; 32];
cursor.read_exact(&mut bytes)?;
@ -106,6 +116,16 @@ impl Ciphertext {
bytes
}
#[cfg(feature = "host")]
pub fn into_inner(self) -> Vec<u8> {
self.0
}
#[cfg(feature = "host")]
pub fn from_inner(inner: Vec<u8>) -> Self {
Self(inner)
}
#[cfg(feature = "host")]
pub fn from_cursor(cursor: &mut Cursor<&[u8]>) -> Result<Self, NssaCoreError> {
let mut u32_bytes = [0; 4];

View File

@ -4,8 +4,8 @@ use serde::{Deserialize, Serialize};
use crate::{Commitment, account::AccountId};
#[derive(Serialize, Deserialize, PartialEq, Eq)]
#[cfg_attr(any(feature = "host", test), derive(Debug, Clone, Hash))]
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)]
#[cfg_attr(any(feature = "host", test), derive(Clone, Hash))]
pub struct NullifierPublicKey(pub [u8; 32]);
impl From<&NullifierPublicKey> for AccountId {
@ -42,7 +42,10 @@ impl From<&NullifierSecretKey> for NullifierPublicKey {
pub type NullifierSecretKey = [u8; 32];
#[derive(Serialize, Deserialize, BorshSerialize, BorshDeserialize)]
#[cfg_attr(any(feature = "host", test), derive(Debug, Clone, PartialEq, Eq, Hash))]
#[cfg_attr(
any(feature = "host", test),
derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)
)]
pub struct Nullifier(pub(super) [u8; 32]);
impl Nullifier {

View File

@ -30,6 +30,20 @@ impl PdaSeed {
}
}
pub fn compute_authorized_pdas(
caller_program_id: Option<ProgramId>,
pda_seeds: &[PdaSeed],
) -> HashSet<AccountId> {
caller_program_id
.map(|caller_program_id| {
pda_seeds
.iter()
.map(|pda_seed| AccountId::from((&caller_program_id, pda_seed)))
.collect()
})
.unwrap_or_default()
}
impl From<(&ProgramId, &PdaSeed)> for AccountId {
fn from(value: (&ProgramId, &PdaSeed)) -> Self {
use risc0_zkvm::sha::{Impl, Sha256};
@ -93,6 +107,13 @@ impl AccountPostState {
}
}
/// Creates a post state that requests ownership of the account
/// if the account's program owner is the default program ID.
pub fn new_claimed_if_default(account: Account) -> Self {
let claim = account.program_owner == DEFAULT_PROGRAM_ID;
Self { account, claim }
}
/// Returns `true` if this post state requests that the account
/// be claimed (owned) by the executing program.
pub fn requires_claim(&self) -> bool {
@ -108,6 +129,11 @@ impl AccountPostState {
pub fn account_mut(&mut self) -> &mut Account {
&mut self.account
}
/// Consumes the post state and returns the underlying account
pub fn into_account(self) -> Account {
self.account
}
}
#[derive(Serialize, Deserialize, Clone)]

View File

@ -1,3 +1,4 @@
use borsh::{BorshDeserialize, BorshSerialize};
use sha2::{Digest, Sha256};
mod default_values;
@ -20,6 +21,7 @@ fn hash_value(value: &Value) -> Node {
}
#[cfg_attr(test, derive(Debug, PartialEq, Eq))]
#[derive(BorshSerialize, BorshDeserialize)]
pub struct MerkleTree {
nodes: Vec<Node>,
capacity: usize,

View File

@ -1,11 +1,11 @@
use std::collections::HashMap;
use std::collections::{HashMap, VecDeque};
use borsh::{BorshDeserialize, BorshSerialize};
use nssa_core::{
MembershipProof, NullifierPublicKey, NullifierSecretKey, PrivacyPreservingCircuitInput,
PrivacyPreservingCircuitOutput, SharedSecretKey,
account::AccountWithMetadata,
program::{InstructionData, ProgramId, ProgramOutput},
program::{ChainedCall, InstructionData, ProgramId, ProgramOutput},
};
use risc0_zkvm::{ExecutorEnv, InnerReceipt, Receipt, default_prover};
@ -20,6 +20,16 @@ use crate::{
#[derive(Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize)]
pub struct Proof(pub(crate) Vec<u8>);
impl Proof {
pub fn into_inner(self) -> Vec<u8> {
self.0
}
pub fn from_inner(inner: Vec<u8>) -> Self {
Self(inner)
}
}
#[derive(Clone)]
pub struct ProgramWithDependencies {
pub program: Program,
@ -43,27 +53,44 @@ impl From<Program> for ProgramWithDependencies {
}
/// Generates a proof of the execution of a NSSA program inside the privacy preserving execution
/// circuit
/// circuit.
#[expect(clippy::too_many_arguments, reason = "TODO: fix later")]
pub fn execute_and_prove(
pre_states: &[AccountWithMetadata],
instruction_data: &InstructionData,
visibility_mask: &[u8],
private_account_nonces: &[u128],
private_account_keys: &[(NullifierPublicKey, SharedSecretKey)],
private_account_nsks: &[NullifierSecretKey],
private_account_membership_proofs: &[Option<MembershipProof>],
pre_states: Vec<AccountWithMetadata>,
instruction_data: InstructionData,
visibility_mask: Vec<u8>,
private_account_nonces: Vec<u128>,
private_account_keys: Vec<(NullifierPublicKey, SharedSecretKey)>,
private_account_nsks: Vec<NullifierSecretKey>,
private_account_membership_proofs: Vec<Option<MembershipProof>>,
program_with_dependencies: &ProgramWithDependencies,
) -> Result<(PrivacyPreservingCircuitOutput, Proof), NssaError> {
let mut program = &program_with_dependencies.program;
let dependencies = &program_with_dependencies.dependencies;
let mut instruction_data = instruction_data.clone();
let mut pre_states = pre_states.to_vec();
let ProgramWithDependencies {
program,
dependencies,
} = program_with_dependencies;
let mut env_builder = ExecutorEnv::builder();
let mut program_outputs = Vec::new();
for _i in 0..MAX_NUMBER_CHAINED_CALLS {
let inner_receipt = execute_and_prove_program(program, &pre_states, &instruction_data)?;
let initial_call = ChainedCall {
program_id: program.id(),
instruction_data: instruction_data.clone(),
pre_states,
pda_seeds: vec![],
};
let mut chained_calls = VecDeque::from_iter([(initial_call, program)]);
let mut chain_calls_counter = 0;
while let Some((chained_call, program)) = chained_calls.pop_front() {
if chain_calls_counter >= MAX_NUMBER_CHAINED_CALLS {
return Err(NssaError::MaxChainedCallsDepthExceeded);
}
let inner_receipt = execute_and_prove_program(
program,
&chained_call.pre_states,
&chained_call.instruction_data,
)?;
let program_output: ProgramOutput = inner_receipt
.journal
@ -76,39 +103,23 @@ pub fn execute_and_prove(
// Prove circuit.
env_builder.add_assumption(inner_receipt);
// TODO: Remove when multi-chain calls are supported in the circuit
assert!(program_output.chained_calls.len() <= 1);
// TODO: Modify when multi-chain calls are supported in the circuit
if let Some(next_call) = program_output.chained_calls.first() {
program = dependencies
.get(&next_call.program_id)
for new_call in program_output.chained_calls.into_iter().rev() {
let next_program = dependencies
.get(&new_call.program_id)
.ok_or(NssaError::InvalidProgramBehavior)?;
instruction_data = next_call.instruction_data.clone();
// Build post states with metadata for next call
let mut post_states_with_metadata = Vec::new();
for (pre, post) in program_output
.pre_states
.iter()
.zip(program_output.post_states)
{
let mut post_with_metadata = pre.clone();
post_with_metadata.account = post.account().clone();
post_states_with_metadata.push(post_with_metadata);
}
pre_states = next_call.pre_states.clone();
} else {
break;
chained_calls.push_front((new_call, next_program));
}
chain_calls_counter += 1;
}
let circuit_input = PrivacyPreservingCircuitInput {
program_outputs,
visibility_mask: visibility_mask.to_vec(),
private_account_nonces: private_account_nonces.to_vec(),
private_account_keys: private_account_keys.to_vec(),
private_account_nsks: private_account_nsks.to_vec(),
private_account_membership_proofs: private_account_membership_proofs.to_vec(),
visibility_mask,
private_account_nonces,
private_account_keys,
private_account_nsks,
private_account_membership_proofs,
program_id: program_with_dependencies.program.id(),
};
@ -215,13 +226,13 @@ mod tests {
let shared_secret = SharedSecretKey::new(&esk, &recipient_keys.ivk());
let (output, proof) = execute_and_prove(
&[sender, recipient],
&Program::serialize_instruction(balance_to_move).unwrap(),
&[0, 2],
&[0xdeadbeef],
&[(recipient_keys.npk(), shared_secret)],
&[],
&[None],
vec![sender, recipient],
Program::serialize_instruction(balance_to_move).unwrap(),
vec![0, 2],
vec![0xdeadbeef],
vec![(recipient_keys.npk(), shared_secret)],
vec![],
vec![None],
&Program::authenticated_transfer_program().into(),
)
.unwrap();
@ -311,16 +322,16 @@ mod tests {
let shared_secret_2 = SharedSecretKey::new(&esk_2, &recipient_keys.ivk());
let (output, proof) = execute_and_prove(
&[sender_pre.clone(), recipient],
&Program::serialize_instruction(balance_to_move).unwrap(),
&[1, 2],
&[0xdeadbeef1, 0xdeadbeef2],
&[
vec![sender_pre.clone(), recipient],
Program::serialize_instruction(balance_to_move).unwrap(),
vec![1, 2],
vec![0xdeadbeef1, 0xdeadbeef2],
vec![
(sender_keys.npk(), shared_secret_1),
(recipient_keys.npk(), shared_secret_2),
],
&[sender_keys.nsk],
&[commitment_set.get_proof_for(&commitment_sender), None],
vec![sender_keys.nsk],
vec![commitment_set.get_proof_for(&commitment_sender), None],
&program.into(),
)
.unwrap();

View File

@ -45,12 +45,12 @@ impl EncryptedAccountData {
#[derive(Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize)]
pub struct Message {
pub(crate) public_account_ids: Vec<AccountId>,
pub(crate) nonces: Vec<Nonce>,
pub(crate) public_post_states: Vec<Account>,
pub public_account_ids: Vec<AccountId>,
pub nonces: Vec<Nonce>,
pub public_post_states: Vec<Account>,
pub encrypted_private_post_states: Vec<EncryptedAccountData>,
pub new_commitments: Vec<Commitment>,
pub(crate) new_nullifiers: Vec<(Nullifier, CommitmentSetDigest)>,
pub new_nullifiers: Vec<(Nullifier, CommitmentSetDigest)>,
}
impl Message {

View File

@ -16,7 +16,7 @@ use crate::{
#[derive(Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize)]
pub struct PrivacyPreservingTransaction {
pub message: Message,
witness_set: WitnessSet,
pub witness_set: WitnessSet,
}
impl PrivacyPreservingTransaction {

View File

@ -46,4 +46,18 @@ impl WitnessSet {
pub fn proof(&self) -> &Proof {
&self.proof
}
pub fn into_raw_parts(self) -> (Vec<(Signature, PublicKey)>, Proof) {
(self.signatures_and_public_keys, self.proof)
}
pub fn from_raw_parts(
signatures_and_public_keys: Vec<(Signature, PublicKey)>,
proof: Proof,
) -> Self {
Self {
signatures_and_public_keys,
proof,
}
}
}

View File

@ -1,3 +1,4 @@
use borsh::{BorshDeserialize, BorshSerialize};
use nssa_core::{
account::AccountWithMetadata,
program::{InstructionData, ProgramId, ProgramOutput},
@ -14,7 +15,7 @@ use crate::{
/// TODO: Make this variable when fees are implemented
const MAX_NUM_CYCLES_PUBLIC_EXECUTION: u64 = 1024 * 1024 * 32; // 32M cycles
#[derive(Clone, Debug, PartialEq, Eq)]
#[derive(Clone, Debug, PartialEq, Eq, BorshSerialize, BorshDeserialize)]
pub struct Program {
id: ProgramId,
elf: Vec<u8>,
@ -226,6 +227,15 @@ mod tests {
}
}
pub fn changer_claimer() -> Self {
use test_program_methods::{CHANGER_CLAIMER_ELF, CHANGER_CLAIMER_ID};
Program {
id: CHANGER_CLAIMER_ID,
elf: CHANGER_CLAIMER_ELF.to_vec(),
}
}
pub fn noop() -> Self {
use test_program_methods::{NOOP_ELF, NOOP_ID};
@ -235,6 +245,17 @@ mod tests {
}
}
pub fn malicious_authorization_changer() -> Self {
use test_program_methods::{
MALICIOUS_AUTHORIZATION_CHANGER_ELF, MALICIOUS_AUTHORIZATION_CHANGER_ID,
};
Program {
id: MALICIOUS_AUTHORIZATION_CHANGER_ID,
elf: MALICIOUS_AUTHORIZATION_CHANGER_ELF.to_vec(),
}
}
pub fn modified_transfer_program() -> Self {
use test_program_methods::MODIFIED_TRANSFER_ELF;
// This unwrap won't panic since the `MODIFIED_TRANSFER_ELF` comes from risc0 build of

View File

@ -9,4 +9,8 @@ impl Message {
pub fn new(bytecode: Vec<u8>) -> Self {
Self { bytecode }
}
pub fn into_bytecode(self) -> Vec<u8> {
self.bytecode
}
}

View File

@ -14,6 +14,10 @@ impl ProgramDeploymentTransaction {
Self { message }
}
pub fn into_message(self) -> Message {
self.message
}
pub(crate) fn validate_and_produce_public_state_diff(
&self,
state: &V02State,

View File

@ -9,10 +9,10 @@ use crate::{AccountId, error::NssaError, program::Program};
#[derive(Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize)]
pub struct Message {
pub(crate) program_id: ProgramId,
pub(crate) account_ids: Vec<AccountId>,
pub(crate) nonces: Vec<Nonce>,
pub(crate) instruction_data: InstructionData,
pub program_id: ProgramId,
pub account_ids: Vec<AccountId>,
pub nonces: Vec<Nonce>,
pub instruction_data: InstructionData,
}
impl Message {

View File

@ -4,7 +4,7 @@ use borsh::{BorshDeserialize, BorshSerialize};
use log::debug;
use nssa_core::{
account::{Account, AccountId, AccountWithMetadata},
program::{ChainedCall, DEFAULT_PROGRAM_ID, PdaSeed, ProgramId, validate_execution},
program::{ChainedCall, DEFAULT_PROGRAM_ID, validate_execution},
};
use sha2::{Digest, digest::FixedOutput};
@ -119,7 +119,7 @@ impl PublicTransaction {
return Err(NssaError::MaxChainedCallsDepthExceeded);
}
// Check the `program_id` corresponds to a deployed program
// Check that the `program_id` corresponds to a deployed program
let Some(program) = state.programs().get(&chained_call.program_id) else {
return Err(NssaError::InvalidInput("Unknown program".into()));
};
@ -135,12 +135,14 @@ impl PublicTransaction {
chained_call.program_id, program_output
);
let authorized_pdas =
self.compute_authorized_pdas(&caller_program_id, &chained_call.pda_seeds);
let authorized_pdas = nssa_core::program::compute_authorized_pdas(
caller_program_id,
&chained_call.pda_seeds,
);
for pre in &program_output.pre_states {
let account_id = pre.account_id;
// Check that the program output pre_states coinicide with the values in the public
// Check that the program output pre_states coincide with the values in the public
// state or with any modifications to those values during the chain of calls.
let expected_pre = state_diff
.get(&account_id)
@ -198,22 +200,23 @@ impl PublicTransaction {
chain_calls_counter += 1;
}
Ok(state_diff)
}
fn compute_authorized_pdas(
&self,
caller_program_id: &Option<ProgramId>,
pda_seeds: &[PdaSeed],
) -> HashSet<AccountId> {
if let Some(caller_program_id) = caller_program_id {
pda_seeds
.iter()
.map(|pda_seed| AccountId::from((caller_program_id, pda_seed)))
.collect()
} else {
HashSet::new()
// Check that all modified uninitialized accounts where claimed
for post in state_diff.iter().filter_map(|(account_id, post)| {
let pre = state.get_account_by_id(account_id);
if pre.program_owner != DEFAULT_PROGRAM_ID {
return None;
}
if pre == *post {
return None;
}
Some(post)
}) {
if post.program_owner == DEFAULT_PROGRAM_ID {
return Err(NssaError::InvalidProgramBehavior);
}
}
Ok(state_diff)
}
}

View File

@ -37,6 +37,16 @@ impl WitnessSet {
pub fn signatures_and_public_keys(&self) -> &[(Signature, PublicKey)] {
&self.signatures_and_public_keys
}
pub fn into_raw_parts(self) -> Vec<(Signature, PublicKey)> {
self.signatures_and_public_keys
}
pub fn from_raw_parts(signatures_and_public_keys: Vec<(Signature, PublicKey)>) -> Self {
Self {
signatures_and_public_keys,
}
}
}
#[cfg(test)]

View File

@ -8,7 +8,7 @@ use rand::{RngCore, rngs::OsRng};
#[derive(Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize)]
pub struct Signature {
value: [u8; 64],
pub value: [u8; 64],
}
impl Signature {

View File

@ -1,5 +1,6 @@
use std::collections::{HashMap, HashSet};
use std::collections::{BTreeSet, HashMap, HashSet};
use borsh::{BorshDeserialize, BorshSerialize};
use nssa_core::{
Commitment, CommitmentSetDigest, DUMMY_COMMITMENT, MembershipProof, Nullifier,
account::{Account, AccountId},
@ -15,6 +16,8 @@ use crate::{
pub const MAX_NUMBER_CHAINED_CALLS: usize = 10;
#[derive(BorshSerialize, BorshDeserialize)]
#[cfg_attr(test, derive(Debug, PartialEq, Eq))]
pub(crate) struct CommitmentSet {
merkle_tree: MerkleTree,
commitments: HashMap<Commitment, usize>,
@ -60,8 +63,49 @@ impl CommitmentSet {
}
}
type NullifierSet = HashSet<Nullifier>;
#[cfg_attr(test, derive(Debug, PartialEq, Eq))]
struct NullifierSet(BTreeSet<Nullifier>);
impl NullifierSet {
fn new() -> Self {
Self(BTreeSet::new())
}
fn extend(&mut self, new_nullifiers: Vec<Nullifier>) {
self.0.extend(new_nullifiers);
}
fn contains(&self, nullifier: &Nullifier) -> bool {
self.0.contains(nullifier)
}
}
impl BorshSerialize for NullifierSet {
fn serialize<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {
self.0.iter().collect::<Vec<_>>().serialize(writer)
}
}
impl BorshDeserialize for NullifierSet {
fn deserialize_reader<R: std::io::Read>(reader: &mut R) -> std::io::Result<Self> {
let vec = Vec::<Nullifier>::deserialize_reader(reader)?;
let mut set = BTreeSet::new();
for n in vec {
if !set.insert(n) {
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidData,
"duplicate nullifier in NullifierSet",
));
}
}
Ok(Self(set))
}
}
#[derive(BorshSerialize, BorshDeserialize)]
#[cfg_attr(test, derive(Debug, PartialEq, Eq))]
pub struct V02State {
public_state: HashMap<AccountId, Account>,
private_state: (CommitmentSet, NullifierSet),
@ -504,6 +548,7 @@ pub mod tests {
self.insert_program(Program::chain_caller());
self.insert_program(Program::amm());
self.insert_program(Program::claimer());
self.insert_program(Program::changer_claimer());
self
}
@ -865,13 +910,13 @@ pub mod tests {
let epk = EphemeralPublicKey::from_scalar(esk);
let (output, proof) = circuit::execute_and_prove(
&[sender, recipient],
&Program::serialize_instruction(balance_to_move).unwrap(),
&[0, 2],
&[0xdeadbeef],
&[(recipient_keys.npk(), shared_secret)],
&[],
&[None],
vec![sender, recipient],
Program::serialize_instruction(balance_to_move).unwrap(),
vec![0, 2],
vec![0xdeadbeef],
vec![(recipient_keys.npk(), shared_secret)],
vec![],
vec![None],
&Program::authenticated_transfer_program().into(),
)
.unwrap();
@ -912,16 +957,16 @@ pub mod tests {
let epk_2 = EphemeralPublicKey::from_scalar(esk_2);
let (output, proof) = circuit::execute_and_prove(
&[sender_pre, recipient_pre],
&Program::serialize_instruction(balance_to_move).unwrap(),
&[1, 2],
&new_nonces,
&[
vec![sender_pre, recipient_pre],
Program::serialize_instruction(balance_to_move).unwrap(),
vec![1, 2],
new_nonces.to_vec(),
vec![
(sender_keys.npk(), shared_secret_1),
(recipient_keys.npk(), shared_secret_2),
],
&[sender_keys.nsk],
&[state.get_proof_for_commitment(&sender_commitment), None],
vec![sender_keys.nsk],
vec![state.get_proof_for_commitment(&sender_commitment), None],
&program.into(),
)
.unwrap();
@ -965,13 +1010,13 @@ pub mod tests {
let epk = EphemeralPublicKey::from_scalar(esk);
let (output, proof) = circuit::execute_and_prove(
&[sender_pre, recipient_pre],
&Program::serialize_instruction(balance_to_move).unwrap(),
&[1, 0],
&[new_nonce],
&[(sender_keys.npk(), shared_secret)],
&[sender_keys.nsk],
&[state.get_proof_for_commitment(&sender_commitment)],
vec![sender_pre, recipient_pre],
Program::serialize_instruction(balance_to_move).unwrap(),
vec![1, 0],
vec![new_nonce],
vec![(sender_keys.npk(), shared_secret)],
vec![sender_keys.nsk],
vec![state.get_proof_for_commitment(&sender_commitment)],
&program.into(),
)
.unwrap();
@ -1179,13 +1224,13 @@ pub mod tests {
);
let result = execute_and_prove(
&[public_account],
&Program::serialize_instruction(10u128).unwrap(),
&[0],
&[],
&[],
&[],
&[],
vec![public_account],
Program::serialize_instruction(10u128).unwrap(),
vec![0],
vec![],
vec![],
vec![],
vec![],
&program.into(),
);
@ -1206,13 +1251,13 @@ pub mod tests {
);
let result = execute_and_prove(
&[public_account],
&Program::serialize_instruction(10u128).unwrap(),
&[0],
&[],
&[],
&[],
&[],
vec![public_account],
Program::serialize_instruction(10u128).unwrap(),
vec![0],
vec![],
vec![],
vec![],
vec![],
&program.into(),
);
@ -1233,13 +1278,13 @@ pub mod tests {
);
let result = execute_and_prove(
&[public_account],
&Program::serialize_instruction(()).unwrap(),
&[0],
&[],
&[],
&[],
&[],
vec![public_account],
Program::serialize_instruction(()).unwrap(),
vec![0],
vec![],
vec![],
vec![],
vec![],
&program.into(),
);
@ -1260,13 +1305,13 @@ pub mod tests {
);
let result = execute_and_prove(
&[public_account],
&Program::serialize_instruction(vec![0]).unwrap(),
&[0],
&[],
&[],
&[],
&[],
vec![public_account],
Program::serialize_instruction(vec![0]).unwrap(),
vec![0],
vec![],
vec![],
vec![],
vec![],
&program.into(),
);
@ -1289,13 +1334,13 @@ pub mod tests {
let large_data: Vec<u8> = vec![0; nssa_core::account::data::DATA_MAX_LENGTH_IN_BYTES + 1];
let result = execute_and_prove(
&[public_account],
&Program::serialize_instruction(large_data).unwrap(),
&[0],
&[],
&[],
&[],
&[],
vec![public_account],
Program::serialize_instruction(large_data).unwrap(),
vec![0],
vec![],
vec![],
vec![],
vec![],
&program.to_owned().into(),
);
@ -1316,13 +1361,13 @@ pub mod tests {
);
let result = execute_and_prove(
&[public_account],
&Program::serialize_instruction(()).unwrap(),
&[0],
&[],
&[],
&[],
&[],
vec![public_account],
Program::serialize_instruction(()).unwrap(),
vec![0],
vec![],
vec![],
vec![],
vec![],
&program.into(),
);
@ -1352,13 +1397,13 @@ pub mod tests {
);
let result = execute_and_prove(
&[public_account_1, public_account_2],
&Program::serialize_instruction(()).unwrap(),
&[0, 0],
&[],
&[],
&[],
&[],
vec![public_account_1, public_account_2],
Program::serialize_instruction(()).unwrap(),
vec![0, 0],
vec![],
vec![],
vec![],
vec![],
&program.into(),
);
@ -1379,13 +1424,13 @@ pub mod tests {
);
let result = execute_and_prove(
&[public_account],
&Program::serialize_instruction(()).unwrap(),
&[0],
&[],
&[],
&[],
&[],
vec![public_account],
Program::serialize_instruction(()).unwrap(),
vec![0],
vec![],
vec![],
vec![],
vec![],
&program.into(),
);
@ -1415,13 +1460,13 @@ pub mod tests {
);
let result = execute_and_prove(
&[public_account_1, public_account_2],
&Program::serialize_instruction(10u128).unwrap(),
&[0, 0],
&[],
&[],
&[],
&[],
vec![public_account_1, public_account_2],
Program::serialize_instruction(10u128).unwrap(),
vec![0, 0],
vec![],
vec![],
vec![],
vec![],
&program.into(),
);
@ -1453,13 +1498,13 @@ pub mod tests {
// Setting only one visibility mask for a circuit execution with two pre_state accounts.
let visibility_mask = [0];
let result = execute_and_prove(
&[public_account_1, public_account_2],
&Program::serialize_instruction(10u128).unwrap(),
&visibility_mask,
&[],
&[],
&[],
&[],
vec![public_account_1, public_account_2],
Program::serialize_instruction(10u128).unwrap(),
visibility_mask.to_vec(),
vec![],
vec![],
vec![],
vec![],
&program.into(),
);
@ -1486,11 +1531,11 @@ pub mod tests {
// Setting only one nonce for an execution with two private accounts.
let private_account_nonces = [0xdeadbeef1];
let result = execute_and_prove(
&[private_account_1, private_account_2],
&Program::serialize_instruction(10u128).unwrap(),
&[1, 2],
&private_account_nonces,
&[
vec![private_account_1, private_account_2],
Program::serialize_instruction(10u128).unwrap(),
vec![1, 2],
private_account_nonces.to_vec(),
vec![
(
sender_keys.npk(),
SharedSecretKey::new(&[55; 32], &sender_keys.ivk()),
@ -1500,8 +1545,8 @@ pub mod tests {
SharedSecretKey::new(&[56; 32], &recipient_keys.ivk()),
),
],
&[sender_keys.nsk],
&[Some((0, vec![]))],
vec![sender_keys.nsk],
vec![Some((0, vec![]))],
&program.into(),
);
@ -1530,13 +1575,13 @@ pub mod tests {
SharedSecretKey::new(&[55; 32], &sender_keys.ivk()),
)];
let result = execute_and_prove(
&[private_account_1, private_account_2],
&Program::serialize_instruction(10u128).unwrap(),
&[1, 2],
&[0xdeadbeef1, 0xdeadbeef2],
&private_account_keys,
&[sender_keys.nsk],
&[Some((0, vec![]))],
vec![private_account_1, private_account_2],
Program::serialize_instruction(10u128).unwrap(),
vec![1, 2],
vec![0xdeadbeef1, 0xdeadbeef2],
private_account_keys.to_vec(),
vec![sender_keys.nsk],
vec![Some((0, vec![]))],
&program.into(),
);
@ -1563,11 +1608,11 @@ pub mod tests {
// Setting no second commitment proof.
let private_account_membership_proofs = [Some((0, vec![]))];
let result = execute_and_prove(
&[private_account_1, private_account_2],
&Program::serialize_instruction(10u128).unwrap(),
&[1, 2],
&[0xdeadbeef1, 0xdeadbeef2],
&[
vec![private_account_1, private_account_2],
Program::serialize_instruction(10u128).unwrap(),
vec![1, 2],
vec![0xdeadbeef1, 0xdeadbeef2],
vec![
(
sender_keys.npk(),
SharedSecretKey::new(&[55; 32], &sender_keys.ivk()),
@ -1577,8 +1622,8 @@ pub mod tests {
SharedSecretKey::new(&[56; 32], &recipient_keys.ivk()),
),
],
&[sender_keys.nsk],
&private_account_membership_proofs,
vec![sender_keys.nsk],
private_account_membership_proofs.to_vec(),
&program.into(),
);
@ -1605,11 +1650,11 @@ pub mod tests {
// Setting no auth key for an execution with one non default private accounts.
let private_account_nsks = [];
let result = execute_and_prove(
&[private_account_1, private_account_2],
&Program::serialize_instruction(10u128).unwrap(),
&[1, 2],
&[0xdeadbeef1, 0xdeadbeef2],
&[
vec![private_account_1, private_account_2],
Program::serialize_instruction(10u128).unwrap(),
vec![1, 2],
vec![0xdeadbeef1, 0xdeadbeef2],
vec![
(
sender_keys.npk(),
SharedSecretKey::new(&[55; 32], &sender_keys.ivk()),
@ -1619,8 +1664,8 @@ pub mod tests {
SharedSecretKey::new(&[56; 32], &recipient_keys.ivk()),
),
],
&private_account_nsks,
&[],
private_account_nsks.to_vec(),
vec![],
&program.into(),
);
@ -1663,13 +1708,13 @@ pub mod tests {
let private_account_nsks = [recipient_keys.nsk];
let private_account_membership_proofs = [Some((0, vec![]))];
let result = execute_and_prove(
&[private_account_1, private_account_2],
&Program::serialize_instruction(10u128).unwrap(),
&[1, 2],
&[0xdeadbeef1, 0xdeadbeef2],
&private_account_keys,
&private_account_nsks,
&private_account_membership_proofs,
vec![private_account_1, private_account_2],
Program::serialize_instruction(10u128).unwrap(),
vec![1, 2],
vec![0xdeadbeef1, 0xdeadbeef2],
private_account_keys.to_vec(),
private_account_nsks.to_vec(),
private_account_membership_proofs.to_vec(),
&program.into(),
);
@ -1701,11 +1746,11 @@ pub mod tests {
);
let result = execute_and_prove(
&[private_account_1, private_account_2],
&Program::serialize_instruction(10u128).unwrap(),
&[1, 2],
&[0xdeadbeef1, 0xdeadbeef2],
&[
vec![private_account_1, private_account_2],
Program::serialize_instruction(10u128).unwrap(),
vec![1, 2],
vec![0xdeadbeef1, 0xdeadbeef2],
vec![
(
sender_keys.npk(),
SharedSecretKey::new(&[55; 32], &sender_keys.ivk()),
@ -1715,8 +1760,8 @@ pub mod tests {
SharedSecretKey::new(&[56; 32], &recipient_keys.ivk()),
),
],
&[sender_keys.nsk],
&[Some((0, vec![]))],
vec![sender_keys.nsk],
vec![Some((0, vec![]))],
&program.into(),
);
@ -1749,11 +1794,11 @@ pub mod tests {
);
let result = execute_and_prove(
&[private_account_1, private_account_2],
&Program::serialize_instruction(10u128).unwrap(),
&[1, 2],
&[0xdeadbeef1, 0xdeadbeef2],
&[
vec![private_account_1, private_account_2],
Program::serialize_instruction(10u128).unwrap(),
vec![1, 2],
vec![0xdeadbeef1, 0xdeadbeef2],
vec![
(
sender_keys.npk(),
SharedSecretKey::new(&[55; 32], &sender_keys.ivk()),
@ -1763,8 +1808,8 @@ pub mod tests {
SharedSecretKey::new(&[56; 32], &recipient_keys.ivk()),
),
],
&[sender_keys.nsk],
&[Some((0, vec![]))],
vec![sender_keys.nsk],
vec![Some((0, vec![]))],
&program.into(),
);
@ -1796,11 +1841,11 @@ pub mod tests {
);
let result = execute_and_prove(
&[private_account_1, private_account_2],
&Program::serialize_instruction(10u128).unwrap(),
&[1, 2],
&[0xdeadbeef1, 0xdeadbeef2],
&[
vec![private_account_1, private_account_2],
Program::serialize_instruction(10u128).unwrap(),
vec![1, 2],
vec![0xdeadbeef1, 0xdeadbeef2],
vec![
(
sender_keys.npk(),
SharedSecretKey::new(&[55; 32], &sender_keys.ivk()),
@ -1810,8 +1855,8 @@ pub mod tests {
SharedSecretKey::new(&[56; 32], &recipient_keys.ivk()),
),
],
&[sender_keys.nsk],
&[Some((0, vec![]))],
vec![sender_keys.nsk],
vec![Some((0, vec![]))],
&program.into(),
);
@ -1843,11 +1888,11 @@ pub mod tests {
);
let result = execute_and_prove(
&[private_account_1, private_account_2],
&Program::serialize_instruction(10u128).unwrap(),
&[1, 2],
&[0xdeadbeef1, 0xdeadbeef2],
&[
vec![private_account_1, private_account_2],
Program::serialize_instruction(10u128).unwrap(),
vec![1, 2],
vec![0xdeadbeef1, 0xdeadbeef2],
vec![
(
sender_keys.npk(),
SharedSecretKey::new(&[55; 32], &sender_keys.ivk()),
@ -1857,8 +1902,8 @@ pub mod tests {
SharedSecretKey::new(&[56; 32], &recipient_keys.ivk()),
),
],
&[sender_keys.nsk],
&[Some((0, vec![]))],
vec![sender_keys.nsk],
vec![Some((0, vec![]))],
&program.into(),
);
@ -1888,11 +1933,11 @@ pub mod tests {
);
let result = execute_and_prove(
&[private_account_1, private_account_2],
&Program::serialize_instruction(10u128).unwrap(),
&[1, 2],
&[0xdeadbeef1, 0xdeadbeef2],
&[
vec![private_account_1, private_account_2],
Program::serialize_instruction(10u128).unwrap(),
vec![1, 2],
vec![0xdeadbeef1, 0xdeadbeef2],
vec![
(
sender_keys.npk(),
SharedSecretKey::new(&[55; 32], &sender_keys.ivk()),
@ -1902,8 +1947,8 @@ pub mod tests {
SharedSecretKey::new(&[56; 32], &recipient_keys.ivk()),
),
],
&[sender_keys.nsk],
&[Some((0, vec![]))],
vec![sender_keys.nsk],
vec![Some((0, vec![]))],
&program.into(),
);
@ -1927,13 +1972,13 @@ pub mod tests {
let visibility_mask = [0, 3];
let result = execute_and_prove(
&[public_account_1, public_account_2],
&Program::serialize_instruction(10u128).unwrap(),
&visibility_mask,
&[],
&[],
&[],
&[],
vec![public_account_1, public_account_2],
Program::serialize_instruction(10u128).unwrap(),
visibility_mask.to_vec(),
vec![],
vec![],
vec![],
vec![],
&program.into(),
);
@ -1961,11 +2006,11 @@ pub mod tests {
// accounts.
let private_account_nonces = [0xdeadbeef1, 0xdeadbeef2, 0xdeadbeef3];
let result = execute_and_prove(
&[private_account_1, private_account_2],
&Program::serialize_instruction(10u128).unwrap(),
&[1, 2],
&private_account_nonces,
&[
vec![private_account_1, private_account_2],
Program::serialize_instruction(10u128).unwrap(),
vec![1, 2],
private_account_nonces.to_vec(),
vec![
(
sender_keys.npk(),
SharedSecretKey::new(&[55; 32], &sender_keys.ivk()),
@ -1975,8 +2020,8 @@ pub mod tests {
SharedSecretKey::new(&[56; 32], &recipient_keys.ivk()),
),
],
&[sender_keys.nsk],
&[Some((0, vec![]))],
vec![sender_keys.nsk],
vec![Some((0, vec![]))],
&program.into(),
);
@ -2017,13 +2062,13 @@ pub mod tests {
),
];
let result = execute_and_prove(
&[private_account_1, private_account_2],
&Program::serialize_instruction(10u128).unwrap(),
&[1, 2],
&[0xdeadbeef1, 0xdeadbeef2],
&private_account_keys,
&[sender_keys.nsk],
&[Some((0, vec![]))],
vec![private_account_1, private_account_2],
Program::serialize_instruction(10u128).unwrap(),
vec![1, 2],
vec![0xdeadbeef1, 0xdeadbeef2],
private_account_keys.to_vec(),
vec![sender_keys.nsk],
vec![Some((0, vec![]))],
&program.into(),
);
@ -2053,11 +2098,11 @@ pub mod tests {
let private_account_nsks = [sender_keys.nsk, recipient_keys.nsk];
let private_account_membership_proofs = [Some((0, vec![])), Some((1, vec![]))];
let result = execute_and_prove(
&[private_account_1, private_account_2],
&Program::serialize_instruction(10u128).unwrap(),
&visibility_mask,
&[0xdeadbeef1, 0xdeadbeef2],
&[
vec![private_account_1, private_account_2],
Program::serialize_instruction(10u128).unwrap(),
visibility_mask.to_vec(),
vec![0xdeadbeef1, 0xdeadbeef2],
vec![
(
sender_keys.npk(),
SharedSecretKey::new(&[55; 32], &sender_keys.ivk()),
@ -2067,8 +2112,8 @@ pub mod tests {
SharedSecretKey::new(&[56; 32], &recipient_keys.ivk()),
),
],
&private_account_nsks,
&private_account_membership_proofs,
private_account_nsks.to_vec(),
private_account_membership_proofs.to_vec(),
&program.into(),
);
@ -2149,16 +2194,16 @@ pub mod tests {
let private_account_membership_proofs = [Some((1, vec![])), Some((1, vec![]))];
let shared_secret = SharedSecretKey::new(&[55; 32], &sender_keys.ivk());
let result = execute_and_prove(
&[private_account_1.clone(), private_account_1],
&Program::serialize_instruction(100u128).unwrap(),
&visibility_mask,
&[0xdeadbeef1, 0xdeadbeef2],
&[
vec![private_account_1.clone(), private_account_1],
Program::serialize_instruction(100u128).unwrap(),
visibility_mask.to_vec(),
vec![0xdeadbeef1, 0xdeadbeef2],
vec![
(sender_keys.npk(), shared_secret),
(sender_keys.npk(), shared_secret),
],
&private_account_nsks,
&private_account_membership_proofs,
private_account_nsks.to_vec(),
private_account_membership_proofs.to_vec(),
&program.into(),
);
@ -3941,8 +3986,9 @@ pub mod tests {
assert_eq!(to_post, expected_to_post);
}
#[test]
fn test_private_chained_call() {
#[test_case::test_case(1; "single call")]
#[test_case::test_case(2; "two calls")]
fn test_private_chained_call(number_of_calls: u32) {
// Arrange
let chain_caller = Program::chain_caller();
let auth_transfers = Program::authenticated_transfer_program();
@ -3978,7 +4024,7 @@ pub mod tests {
let instruction: (u128, ProgramId, u32, Option<PdaSeed>) = (
amount,
Program::authenticated_transfer_program().id(),
1,
number_of_calls,
None,
);
@ -3999,14 +4045,14 @@ pub mod tests {
let to_new_nonce = 0xdeadbeef2;
let from_expected_post = Account {
balance: initial_balance - amount,
balance: initial_balance - number_of_calls as u128 * amount,
nonce: from_new_nonce,
..from_account.account.clone()
};
let from_expected_commitment = Commitment::new(&from_keys.npk(), &from_expected_post);
let to_expected_post = Account {
balance: amount,
balance: number_of_calls as u128 * amount,
nonce: to_new_nonce,
..to_account.account.clone()
};
@ -4014,13 +4060,13 @@ pub mod tests {
// Act
let (output, proof) = execute_and_prove(
&[to_account, from_account],
&Program::serialize_instruction(instruction).unwrap(),
&[1, 1],
&[from_new_nonce, to_new_nonce],
&[(from_keys.npk(), to_ss), (to_keys.npk(), from_ss)],
&[from_keys.nsk, to_keys.nsk],
&[
vec![to_account, from_account],
Program::serialize_instruction(instruction).unwrap(),
vec![1, 1],
vec![from_new_nonce, to_new_nonce],
vec![(from_keys.npk(), to_ss), (to_keys.npk(), from_ss)],
vec![from_keys.nsk, to_keys.nsk],
vec![
state.get_proof_for_commitment(&from_commitment),
state.get_proof_for_commitment(&to_commitment),
],
@ -4255,13 +4301,13 @@ pub mod tests {
// Execute and prove the circuit with the authorized account but no commitment proof
let (output, proof) = execute_and_prove(
std::slice::from_ref(&authorized_account),
&Program::serialize_instruction(balance).unwrap(),
&[1],
&[nonce],
&[(private_keys.npk(), shared_secret)],
&[private_keys.nsk],
&[None],
vec![authorized_account],
Program::serialize_instruction(balance).unwrap(),
vec![1],
vec![nonce],
vec![(private_keys.npk(), shared_secret)],
vec![private_keys.nsk],
vec![None],
&program.into(),
)
.unwrap();
@ -4308,13 +4354,13 @@ pub mod tests {
// Step 2: Execute claimer program to claim the account with authentication
let (output, proof) = execute_and_prove(
std::slice::from_ref(&authorized_account),
&Program::serialize_instruction(balance).unwrap(),
&[1],
&[nonce],
&[(private_keys.npk(), shared_secret)],
&[private_keys.nsk],
&[None],
vec![authorized_account.clone()],
Program::serialize_instruction(balance).unwrap(),
vec![1],
vec![nonce],
vec![(private_keys.npk(), shared_secret)],
vec![private_keys.nsk],
vec![None],
&claimer_program.into(),
)
.unwrap();
@ -4356,16 +4402,185 @@ pub mod tests {
// Step 3: Try to execute noop program with authentication but without initialization
let res = execute_and_prove(
std::slice::from_ref(&account_metadata),
&Program::serialize_instruction(()).unwrap(),
&[1],
&[nonce2],
&[(private_keys.npk(), shared_secret2)],
&[private_keys.nsk],
&[None],
vec![account_metadata],
Program::serialize_instruction(()).unwrap(),
vec![1],
vec![nonce2],
vec![(private_keys.npk(), shared_secret2)],
vec![private_keys.nsk],
vec![None],
&noop_program.into(),
);
assert!(matches!(res, Err(NssaError::CircuitProvingError(_))));
}
#[test]
fn test_public_changer_claimer_no_data_change_no_claim_succeeds() {
let initial_data = [];
let mut state =
V02State::new_with_genesis_accounts(&initial_data, &[]).with_test_programs();
let account_id = AccountId::new([1; 32]);
let program_id = Program::changer_claimer().id();
// Don't change data (None) and don't claim (false)
let instruction: (Option<Vec<u8>>, bool) = (None, false);
let message =
public_transaction::Message::try_new(program_id, vec![account_id], vec![], instruction)
.unwrap();
let witness_set = public_transaction::WitnessSet::for_message(&message, &[]);
let tx = PublicTransaction::new(message, witness_set);
let result = state.transition_from_public_transaction(&tx);
// Should succeed - no changes made, no claim needed
assert!(result.is_ok());
// Account should remain default/unclaimed
assert_eq!(state.get_account_by_id(&account_id), Account::default());
}
#[test]
fn test_public_changer_claimer_data_change_no_claim_fails() {
let initial_data = [];
let mut state =
V02State::new_with_genesis_accounts(&initial_data, &[]).with_test_programs();
let account_id = AccountId::new([1; 32]);
let program_id = Program::changer_claimer().id();
// Change data but don't claim (false) - should fail
let new_data = vec![1, 2, 3, 4, 5];
let instruction: (Option<Vec<u8>>, bool) = (Some(new_data), false);
let message =
public_transaction::Message::try_new(program_id, vec![account_id], vec![], instruction)
.unwrap();
let witness_set = public_transaction::WitnessSet::for_message(&message, &[]);
let tx = PublicTransaction::new(message, witness_set);
let result = state.transition_from_public_transaction(&tx);
// Should fail - cannot modify data without claiming the account
assert!(matches!(result, Err(NssaError::InvalidProgramBehavior)));
}
#[test]
fn test_private_changer_claimer_no_data_change_no_claim_succeeds() {
let program = Program::changer_claimer();
let sender_keys = test_private_account_keys_1();
let private_account =
AccountWithMetadata::new(Account::default(), true, &sender_keys.npk());
// Don't change data (None) and don't claim (false)
let instruction: (Option<Vec<u8>>, bool) = (None, false);
let result = execute_and_prove(
vec![private_account],
Program::serialize_instruction(instruction).unwrap(),
vec![1],
vec![2],
vec![(
sender_keys.npk(),
SharedSecretKey::new(&[3; 32], &sender_keys.ivk()),
)],
vec![sender_keys.nsk],
vec![Some((0, vec![]))],
&program.into(),
);
// Should succeed - no changes made, no claim needed
assert!(result.is_ok());
}
#[test]
fn test_private_changer_claimer_data_change_no_claim_fails() {
let program = Program::changer_claimer();
let sender_keys = test_private_account_keys_1();
let private_account =
AccountWithMetadata::new(Account::default(), true, &sender_keys.npk());
// Change data but don't claim (false) - should fail
let new_data = vec![1, 2, 3, 4, 5];
let instruction: (Option<Vec<u8>>, bool) = (Some(new_data), false);
let result = execute_and_prove(
vec![private_account],
Program::serialize_instruction(instruction).unwrap(),
vec![1],
vec![2],
vec![(
sender_keys.npk(),
SharedSecretKey::new(&[3; 32], &sender_keys.ivk()),
)],
vec![sender_keys.nsk],
vec![Some((0, vec![]))],
&program.into(),
);
// Should fail - cannot modify data without claiming the account
assert!(matches!(result, Err(NssaError::CircuitProvingError(_))));
}
#[test]
fn test_malicious_authorization_changer_should_fail_in_privacy_preserving_circuit() {
// Arrange
let malicious_program = Program::malicious_authorization_changer();
let auth_transfers = Program::authenticated_transfer_program();
let sender_keys = test_public_account_keys_1();
let recipient_keys = test_private_account_keys_1();
let sender_account = AccountWithMetadata::new(
Account {
program_owner: auth_transfers.id(),
balance: 100,
..Default::default()
},
false,
sender_keys.account_id(),
);
let recipient_account =
AccountWithMetadata::new(Account::default(), true, &recipient_keys.npk());
let recipient_commitment =
Commitment::new(&recipient_keys.npk(), &recipient_account.account);
let state = V02State::new_with_genesis_accounts(
&[(sender_account.account_id, sender_account.account.balance)],
std::slice::from_ref(&recipient_commitment),
)
.with_test_programs();
let balance_to_transfer = 10u128;
let instruction = (balance_to_transfer, auth_transfers.id());
let recipient_esk = [3; 32];
let recipient = SharedSecretKey::new(&recipient_esk, &recipient_keys.ivk());
let mut dependencies = HashMap::new();
dependencies.insert(auth_transfers.id(), auth_transfers);
let program_with_deps = ProgramWithDependencies::new(malicious_program, dependencies);
let recipient_new_nonce = 0xdeadbeef1;
// Act - execute the malicious program - this should fail during proving
let result = execute_and_prove(
vec![sender_account, recipient_account],
Program::serialize_instruction(instruction).unwrap(),
vec![0, 1],
vec![recipient_new_nonce],
vec![(recipient_keys.npk(), recipient)],
vec![recipient_keys.nsk],
vec![state.get_proof_for_commitment(&recipient_commitment)],
&program_with_deps,
);
// Assert - should fail because the malicious program tries to manipulate is_authorized
assert!(matches!(result, Err(NssaError::CircuitProvingError(_))));
}
#[test]
fn test_state_serialization_roundtrip() {
let account_id_1 = AccountId::new([1; 32]);
let account_id_2 = AccountId::new([2; 32]);
let initial_data = [(account_id_1, 100u128), (account_id_2, 151u128)];
let state = V02State::new_with_genesis_accounts(&initial_data, &[]).with_test_programs();
let bytes = borsh::to_vec(&state).unwrap();
let state_from_bytes: V02State = borsh::from_slice(&bytes).unwrap();
assert_eq!(state, state_from_bytes);
}
}

View File

@ -77,7 +77,7 @@ fn main() {
instruction_words,
vec![pinata, winner],
vec![
AccountPostState::new(pinata_post),
AccountPostState::new_claimed_if_default(pinata_post),
AccountPostState::new(winner_post),
],
);

View File

@ -1,12 +1,18 @@
use std::collections::HashMap;
use std::{
collections::{HashMap, HashSet, VecDeque, hash_map::Entry},
convert::Infallible,
};
use nssa_core::{
Commitment, CommitmentSetDigest, DUMMY_COMMITMENT_HASH, EncryptionScheme, Nullifier,
NullifierPublicKey, PrivacyPreservingCircuitInput, PrivacyPreservingCircuitOutput,
account::{Account, AccountId, AccountWithMetadata},
Commitment, CommitmentSetDigest, DUMMY_COMMITMENT_HASH, EncryptionScheme, MembershipProof,
Nullifier, NullifierPublicKey, NullifierSecretKey, PrivacyPreservingCircuitInput,
PrivacyPreservingCircuitOutput, SharedSecretKey,
account::{Account, AccountId, AccountWithMetadata, Nonce},
compute_digest_for_path,
encryption::Ciphertext,
program::{DEFAULT_PROGRAM_ID, MAX_NUMBER_CHAINED_CALLS, validate_execution},
program::{
AccountPostState, ChainedCall, DEFAULT_PROGRAM_ID, MAX_NUMBER_CHAINED_CALLS, ProgramId,
ProgramOutput, validate_execution,
},
};
use risc0_zkvm::{guest::env, serde::to_vec};
@ -18,118 +24,224 @@ fn main() {
private_account_keys,
private_account_nsks,
private_account_membership_proofs,
mut program_id,
program_id,
} = env::read();
let mut pre_states: Vec<AccountWithMetadata> = Vec::new();
let mut state_diff: HashMap<AccountId, Account> = HashMap::new();
let execution_state = ExecutionState::derive_from_outputs(program_id, program_outputs);
let num_calls = program_outputs.len();
if num_calls > MAX_NUMBER_CHAINED_CALLS {
panic!("Max chained calls depth is exceeded");
}
let output = compute_circuit_output(
execution_state,
&visibility_mask,
&private_account_nonces,
&private_account_keys,
&private_account_nsks,
&private_account_membership_proofs,
);
let Some(last_program_call) = program_outputs.last() else {
panic!("Program outputs is empty")
};
env::commit(&output);
}
if !last_program_call.chained_calls.is_empty() {
panic!("Call stack is incomplete");
}
/// State of the involved accounts before and after program execution.
struct ExecutionState {
pre_states: Vec<AccountWithMetadata>,
post_states: HashMap<AccountId, Account>,
}
for window in program_outputs.windows(2) {
let caller = &window[0];
let callee = &window[1];
if caller.chained_calls.len() > 1 {
panic!("Privacy Multi-chained calls are not supported yet");
}
// TODO: Modify when multi-chain calls are supported in the circuit
let Some(caller_chained_call) = &caller.chained_calls.first() else {
panic!("Expected chained call");
impl ExecutionState {
/// Validate program outputs and derive the overall execution state.
pub fn derive_from_outputs(program_id: ProgramId, program_outputs: Vec<ProgramOutput>) -> Self {
let Some(first_output) = program_outputs.first() else {
panic!("No program outputs provided");
};
// Check that instruction data in caller is the instruction data in callee
if caller_chained_call.instruction_data != callee.instruction_data {
panic!("Invalid instruction data");
}
// Check that account pre_states in caller are the ones in calle
if caller_chained_call.pre_states != callee.pre_states {
panic!("Invalid pre states");
}
}
for (i, program_output) in program_outputs.iter().enumerate() {
let mut program_output = program_output.clone();
// Check that `program_output` is consistent with the execution of the corresponding
// program.
let program_output_words =
&to_vec(&program_output).expect("program_output must be serializable");
env::verify(program_id, program_output_words)
.expect("program output must match the program's execution");
// Check that the program is well behaved.
// See the # Programs section for the definition of the `validate_execution` method.
if !validate_execution(
&program_output.pre_states,
&program_output.post_states,
let initial_call = ChainedCall {
program_id,
) {
panic!("Bad behaved program");
}
instruction_data: first_output.instruction_data.clone(),
pre_states: first_output.pre_states.clone(),
pda_seeds: Vec::new(),
};
let mut chained_calls = VecDeque::from_iter([(initial_call, None)]);
// The invoked program claims the accounts with default program id.
for post in program_output
.post_states
.iter_mut()
.filter(|post| post.requires_claim())
{
// The invoked program can only claim accounts with default program id.
if post.account().program_owner == DEFAULT_PROGRAM_ID {
post.account_mut().program_owner = program_id;
} else {
panic!("Cannot claim an initialized account")
let mut execution_state = ExecutionState {
pre_states: Vec::new(),
post_states: HashMap::new(),
};
let mut program_outputs_iter = program_outputs.into_iter();
let mut chain_calls_counter = 0;
while let Some((chained_call, caller_program_id)) = chained_calls.pop_front() {
assert!(
chain_calls_counter <= MAX_NUMBER_CHAINED_CALLS,
"Max chained calls depth is exceeded"
);
let Some(program_output) = program_outputs_iter.next() else {
panic!("Insufficient program outputs for chained calls");
};
// Check that instruction data in chained call is the instruction data in program output
assert_eq!(
chained_call.instruction_data, program_output.instruction_data,
"Mismatched instruction data between chained call and program output"
);
// Check that `program_output` is consistent with the execution of the corresponding
// program.
let program_output_words =
&to_vec(&program_output).expect("program_output must be serializable");
env::verify(chained_call.program_id, program_output_words).unwrap_or_else(
|_: Infallible| unreachable!("Infallible error is never constructed"),
);
// Check that the program is well behaved.
// See the # Programs section for the definition of the `validate_execution` method.
let execution_valid = validate_execution(
&program_output.pre_states,
&program_output.post_states,
chained_call.program_id,
);
assert!(execution_valid, "Bad behaved program");
for next_call in program_output.chained_calls.iter().rev() {
chained_calls.push_front((next_call.clone(), Some(chained_call.program_id)));
}
let authorized_pdas = nssa_core::program::compute_authorized_pdas(
caller_program_id,
&chained_call.pda_seeds,
);
execution_state.validate_and_sync_states(
chained_call.program_id,
authorized_pdas,
program_output.pre_states,
program_output.post_states,
);
chain_calls_counter += 1;
}
for (pre, post) in program_output
assert!(
program_outputs_iter.next().is_none(),
"Inner call without a chained call found",
);
// Check that all modified uninitialized accounts were claimed
for (account_id, post) in execution_state
.pre_states
.iter()
.zip(&program_output.post_states)
.filter(|a| a.account.program_owner == DEFAULT_PROGRAM_ID)
.map(|a| {
let post = execution_state
.post_states
.get(&a.account_id)
.expect("Post state must exist for pre state");
(a, post)
})
.filter(|(pre_default, post)| pre_default.account != **post)
.map(|(pre, post)| (pre.account_id, post))
{
if let Some(account_pre) = state_diff.get(&pre.account_id) {
if account_pre != &pre.account {
panic!("Invalid input");
}
} else {
pre_states.push(pre.clone());
}
state_diff.insert(pre.account_id, post.account().clone());
assert_ne!(
post.program_owner, DEFAULT_PROGRAM_ID,
"Account {account_id:?} was modified but not claimed"
);
}
// TODO: Modify when multi-chain calls are supported in the circuit
if let Some(next_chained_call) = &program_output.chained_calls.first() {
program_id = next_chained_call.program_id;
} else if i != program_outputs.len() - 1 {
panic!("Inner call without a chained call found")
};
execution_state
}
let n_accounts = pre_states.len();
if visibility_mask.len() != n_accounts {
panic!("Invalid visibility mask length");
/// Validate program pre and post states and populate the execution state.
fn validate_and_sync_states(
&mut self,
program_id: ProgramId,
authorized_pdas: HashSet<AccountId>,
pre_states: Vec<AccountWithMetadata>,
post_states: Vec<AccountPostState>,
) {
for (pre, mut post) in pre_states.into_iter().zip(post_states) {
let pre_account_id = pre.account_id;
let post_states_entry = self.post_states.entry(pre.account_id);
match &post_states_entry {
Entry::Occupied(occupied) => {
// Ensure that new pre state is the same as known post state
assert_eq!(
occupied.get(),
&pre.account,
"Inconsistent pre state for account {pre_account_id:?}",
);
let previous_is_authorized = self
.pre_states
.iter()
.find(|acc| acc.account_id == pre_account_id)
.map(|acc| acc.is_authorized)
.unwrap_or_else(|| {
panic!(
"Pre state must exist in execution state for account {pre_account_id:?}",
)
});
let is_authorized =
previous_is_authorized || authorized_pdas.contains(&pre_account_id);
assert_eq!(
pre.is_authorized, is_authorized,
"Inconsistent authorization for account {pre_account_id:?}",
);
}
Entry::Vacant(_) => {
self.pre_states.push(pre);
}
}
if post.requires_claim() {
// The invoked program can only claim accounts with default program id.
if post.account().program_owner == DEFAULT_PROGRAM_ID {
post.account_mut().program_owner = program_id;
} else {
panic!("Cannot claim an initialized account {pre_account_id:?}");
}
}
post_states_entry.insert_entry(post.into_account());
}
}
// These lists will be the public outputs of this circuit
// and will be populated next.
let mut public_pre_states: Vec<AccountWithMetadata> = Vec::new();
let mut public_post_states: Vec<Account> = Vec::new();
let mut ciphertexts: Vec<Ciphertext> = Vec::new();
let mut new_commitments: Vec<Commitment> = Vec::new();
let mut new_nullifiers: Vec<(Nullifier, CommitmentSetDigest)> = Vec::new();
/// Get an iterator over pre and post states of each account involved in the execution.
pub fn into_states_iter(
mut self,
) -> impl ExactSizeIterator<Item = (AccountWithMetadata, Account)> {
self.pre_states.into_iter().map(move |pre| {
let post = self
.post_states
.remove(&pre.account_id)
.expect("Account from pre states should exist in state diff");
(pre, post)
})
}
}
fn compute_circuit_output(
execution_state: ExecutionState,
visibility_mask: &[u8],
private_account_nonces: &[Nonce],
private_account_keys: &[(NullifierPublicKey, SharedSecretKey)],
private_account_nsks: &[NullifierSecretKey],
private_account_membership_proofs: &[Option<MembershipProof>],
) -> PrivacyPreservingCircuitOutput {
let mut output = PrivacyPreservingCircuitOutput {
public_pre_states: Vec::new(),
public_post_states: Vec::new(),
ciphertexts: Vec::new(),
new_commitments: Vec::new(),
new_nullifiers: Vec::new(),
};
let states_iter = execution_state.into_states_iter();
assert_eq!(
visibility_mask.len(),
states_iter.len(),
"Invalid visibility mask length"
);
let mut private_nonces_iter = private_account_nonces.iter();
let mut private_keys_iter = private_account_keys.iter();
@ -137,141 +249,156 @@ fn main() {
let mut private_membership_proofs_iter = private_account_membership_proofs.iter();
let mut output_index = 0;
for i in 0..n_accounts {
match visibility_mask[i] {
for (visibility_mask, (pre_state, post_state)) in
visibility_mask.iter().copied().zip(states_iter)
{
match visibility_mask {
0 => {
// Public account
public_pre_states.push(pre_states[i].clone());
let mut post = state_diff.get(&pre_states[i].account_id).unwrap().clone();
if post.program_owner == DEFAULT_PROGRAM_ID {
// Claim account
post.program_owner = program_id;
}
public_post_states.push(post);
output.public_pre_states.push(pre_state);
output.public_post_states.push(post_state);
}
1 | 2 => {
let new_nonce = private_nonces_iter.next().expect("Missing private nonce");
let (npk, shared_secret) = private_keys_iter.next().expect("Missing keys");
let Some((npk, shared_secret)) = private_keys_iter.next() else {
panic!("Missing private account key");
};
if AccountId::from(npk) != pre_states[i].account_id {
panic!("AccountId mismatch");
}
assert_eq!(
AccountId::from(npk),
pre_state.account_id,
"AccountId mismatch"
);
if visibility_mask[i] == 1 {
let new_nullifier = if visibility_mask == 1 {
// Private account with authentication
let nsk = private_nsks_iter.next().expect("Missing nsk");
let Some(nsk) = private_nsks_iter.next() else {
panic!("Missing private account nullifier secret key");
};
// Verify the nullifier public key
let expected_npk = NullifierPublicKey::from(nsk);
if &expected_npk != npk {
panic!("Nullifier public key mismatch");
}
assert_eq!(
npk,
&NullifierPublicKey::from(nsk),
"Nullifier public key mismatch"
);
// Check pre_state authorization
if !pre_states[i].is_authorized {
panic!("Pre-state not authorized");
}
assert!(
pre_state.is_authorized,
"Pre-state not authorized for authenticated private account"
);
let membership_proof_opt = private_membership_proofs_iter
.next()
.expect("Missing membership proof");
let (nullifier, set_digest) = membership_proof_opt
.as_ref()
.map(|membership_proof| {
// Compute commitment set digest associated with provided auth path
let commitment_pre = Commitment::new(npk, &pre_states[i].account);
let set_digest =
compute_digest_for_path(&commitment_pre, membership_proof);
let Some(membership_proof_opt) = private_membership_proofs_iter.next() else {
panic!("Missing membership proof");
};
// Compute update nullifier
let nullifier = Nullifier::for_account_update(&commitment_pre, nsk);
(nullifier, set_digest)
})
.unwrap_or_else(|| {
if pre_states[i].account != Account::default() {
panic!("Found new private account with non default values.");
}
// Compute initialization nullifier
let nullifier = Nullifier::for_account_initialization(npk);
(nullifier, DUMMY_COMMITMENT_HASH)
});
new_nullifiers.push((nullifier, set_digest));
compute_nullifier_and_set_digest(
membership_proof_opt.as_ref(),
&pre_state.account,
npk,
nsk,
)
} else {
// Private account without authentication
if pre_states[i].account != Account::default() {
panic!("Found new private account with non default values.");
}
if pre_states[i].is_authorized {
panic!("Found new private account marked as authorized.");
}
assert_eq!(
pre_state.account,
Account::default(),
"Found new private account with non default values",
);
assert!(
!pre_state.is_authorized,
"Found new private account marked as authorized."
);
let Some(membership_proof_opt) = private_membership_proofs_iter.next() else {
panic!("Missing membership proof");
};
let membership_proof_opt = private_membership_proofs_iter
.next()
.expect("Missing membership proof");
assert!(
membership_proof_opt.is_none(),
"Membership proof must be None for unauthorized accounts"
);
let nullifier = Nullifier::for_account_initialization(npk);
new_nullifiers.push((nullifier, DUMMY_COMMITMENT_HASH));
}
(nullifier, DUMMY_COMMITMENT_HASH)
};
output.new_nullifiers.push(new_nullifier);
// Update post-state with new nonce
let mut post_with_updated_values =
state_diff.get(&pre_states[i].account_id).unwrap().clone();
post_with_updated_values.nonce = *new_nonce;
if post_with_updated_values.program_owner == DEFAULT_PROGRAM_ID {
// Claim account
post_with_updated_values.program_owner = program_id;
}
let mut post_with_updated_nonce = post_state;
let Some(new_nonce) = private_nonces_iter.next() else {
panic!("Missing private account nonce");
};
post_with_updated_nonce.nonce = *new_nonce;
// Compute commitment
let commitment_post = Commitment::new(npk, &post_with_updated_values);
let commitment_post = Commitment::new(npk, &post_with_updated_nonce);
// Encrypt and push post state
let encrypted_account = EncryptionScheme::encrypt(
&post_with_updated_values,
&post_with_updated_nonce,
shared_secret,
&commitment_post,
output_index,
);
new_commitments.push(commitment_post);
ciphertexts.push(encrypted_account);
output.new_commitments.push(commitment_post);
output.ciphertexts.push(encrypted_account);
output_index += 1;
}
_ => panic!("Invalid visibility mask value"),
}
}
if private_nonces_iter.next().is_some() {
panic!("Too many nonces");
}
assert!(private_nonces_iter.next().is_none(), "Too many nonces");
if private_keys_iter.next().is_some() {
panic!("Too many private account keys");
}
assert!(
private_keys_iter.next().is_none(),
"Too many private account keys"
);
if private_nsks_iter.next().is_some() {
panic!("Too many private account authentication keys");
}
assert!(
private_nsks_iter.next().is_none(),
"Too many private account nullifier secret keys"
);
if private_membership_proofs_iter.next().is_some() {
panic!("Too many private account membership proofs");
}
assert!(
private_membership_proofs_iter.next().is_none(),
"Too many private account membership proofs"
);
let output = PrivacyPreservingCircuitOutput {
public_pre_states,
public_post_states,
ciphertexts,
new_commitments,
new_nullifiers,
};
env::commit(&output);
output
}
fn compute_nullifier_and_set_digest(
membership_proof_opt: Option<&MembershipProof>,
pre_account: &Account,
npk: &NullifierPublicKey,
nsk: &NullifierSecretKey,
) -> (Nullifier, CommitmentSetDigest) {
membership_proof_opt
.as_ref()
.map(|membership_proof| {
// Compute commitment set digest associated with provided auth path
let commitment_pre = Commitment::new(npk, pre_account);
let set_digest = compute_digest_for_path(&commitment_pre, membership_proof);
// Compute update nullifier
let nullifier = Nullifier::for_account_update(&commitment_pre, nsk);
(nullifier, set_digest)
})
.unwrap_or_else(|| {
assert_eq!(
*pre_account,
Account::default(),
"Found new private account with non default values"
);
// Compute initialization nullifier
let nullifier = Nullifier::for_account_initialization(npk);
(nullifier, DUMMY_COMMITMENT_HASH)
})
}

View File

@ -17,11 +17,17 @@ serde_json.workspace = true
tempfile.workspace = true
chrono.workspace = true
log.workspace = true
tokio = { workspace = true, features = ["rt-multi-thread", "macros"] }
bedrock_client.workspace = true
logos-blockchain-key-management-system-service.workspace = true
logos-blockchain-core.workspace = true
rand.workspace = true
reqwest.workspace = true
borsh.workspace = true
[features]
default = []
testnet = []
[dev-dependencies]
tokio = { workspace = true, features = ["rt-multi-thread", "macros"] }
futures.workspace = true

View File

@ -0,0 +1,115 @@
use std::{fs, path::Path, str::FromStr};
use anyhow::{Context, Result, anyhow};
use bedrock_client::BedrockClient;
use common::block::Block;
use logos_blockchain_core::mantle::{
MantleTx, Op, OpProof, SignedMantleTx, Transaction, TxHash, ledger,
ops::channel::{ChannelId, MsgId, inscribe::InscriptionOp},
};
use logos_blockchain_key_management_system_service::keys::{
ED25519_SECRET_KEY_SIZE, Ed25519Key, Ed25519PublicKey,
};
use reqwest::Url;
use crate::config::BedrockConfig;
/// A component that posts block data to logos blockchain
#[derive(Clone)]
pub struct BlockSettlementClient {
bedrock_client: BedrockClient,
bedrock_signing_key: Ed25519Key,
bedrock_channel_id: ChannelId,
}
impl BlockSettlementClient {
pub fn try_new(home: &Path, config: &BedrockConfig) -> Result<Self> {
let bedrock_signing_key = load_or_create_signing_key(&home.join("bedrock_signing_key"))
.context("Failed to load or create signing key")?;
let bedrock_url = Url::from_str(config.node_url.as_ref())
.context("Bedrock node address is not a valid url")?;
let bedrock_client =
BedrockClient::new(None, bedrock_url).context("Failed to initialize bedrock client")?;
Ok(Self {
bedrock_client,
bedrock_signing_key,
bedrock_channel_id: config.channel_id,
})
}
/// Create and sign a transaction for inscribing data
pub fn create_inscribe_tx(&self, block: &Block) -> Result<(SignedMantleTx, MsgId)> {
let inscription_data = borsh::to_vec(block)?;
let verifying_key_bytes = self.bedrock_signing_key.public_key().to_bytes();
let verifying_key =
Ed25519PublicKey::from_bytes(&verifying_key_bytes).expect("valid ed25519 public key");
let inscribe_op = InscriptionOp {
channel_id: self.bedrock_channel_id,
inscription: inscription_data,
parent: block.bedrock_parent_id.into(),
signer: verifying_key,
};
let inscribe_op_id = inscribe_op.id();
let ledger_tx = ledger::Tx::new(vec![], vec![]);
let inscribe_tx = MantleTx {
ops: vec![Op::ChannelInscribe(inscribe_op)],
ledger_tx,
// Altruistic test config
storage_gas_price: 0,
execution_gas_price: 0,
};
let tx_hash = inscribe_tx.hash();
let signature_bytes = self
.bedrock_signing_key
.sign_payload(tx_hash.as_signing_bytes().as_ref())
.to_bytes();
let signature =
logos_blockchain_key_management_system_service::keys::Ed25519Signature::from_bytes(
&signature_bytes,
);
let signed_mantle_tx = SignedMantleTx {
ops_proofs: vec![OpProof::Ed25519Sig(signature)],
ledger_tx_proof: empty_ledger_signature(&tx_hash),
mantle_tx: inscribe_tx,
};
Ok((signed_mantle_tx, inscribe_op_id))
}
/// Post a transaction to the node
pub async fn submit_block_to_bedrock(&self, block: &Block) -> Result<MsgId> {
let (tx, new_msg_id) = self.create_inscribe_tx(block)?;
// Post the transaction
self.bedrock_client.post_transaction(tx).await?;
Ok(new_msg_id)
}
}
/// Load signing key from file or generate a new one if it doesn't exist
fn load_or_create_signing_key(path: &Path) -> Result<Ed25519Key> {
if path.exists() {
let key_bytes = fs::read(path)?;
let key_array: [u8; ED25519_SECRET_KEY_SIZE] = key_bytes
.try_into()
.map_err(|_| anyhow!("Found key with incorrect length"))?;
Ok(Ed25519Key::from_bytes(&key_array))
} else {
let mut key_bytes = [0u8; ED25519_SECRET_KEY_SIZE];
rand::RngCore::fill_bytes(&mut rand::thread_rng(), &mut key_bytes);
fs::write(path, key_bytes)?;
Ok(Ed25519Key::from_bytes(&key_bytes))
}
}
fn empty_ledger_signature(
tx_hash: &TxHash,
) -> logos_blockchain_key_management_system_service::keys::ZkSignature {
logos_blockchain_key_management_system_service::keys::ZkKey::multi_sign(&[], tx_hash.as_ref())
.expect("multi-sign with empty key set works")
}

View File

@ -2,9 +2,10 @@ use std::{collections::HashMap, path::Path};
use anyhow::Result;
use common::{HashType, block::Block, transaction::EncodedTransaction};
use nssa::V02State;
use storage::RocksDBIO;
pub struct SequencerBlockStore {
pub struct SequencerStore {
dbio: RocksDBIO,
// TODO: Consider adding the hashmap to the database for faster recovery.
tx_hash_to_block_map: HashMap<HashType, u64>,
@ -12,7 +13,7 @@ pub struct SequencerBlockStore {
signing_key: nssa::PrivateKey,
}
impl SequencerBlockStore {
impl SequencerStore {
/// Starting database at the start of new chain.
/// Creates files if necessary.
///
@ -42,18 +43,15 @@ impl SequencerBlockStore {
/// Reopening existing database
pub fn open_db_restart(location: &Path, signing_key: nssa::PrivateKey) -> Result<Self> {
SequencerBlockStore::open_db_with_genesis(location, None, signing_key)
SequencerStore::open_db_with_genesis(location, None, signing_key)
}
pub fn get_block_at_id(&self, id: u64) -> Result<Block> {
Ok(self.dbio.get_block(id)?.into_block(&self.signing_key))
Ok(self.dbio.get_block(id)?)
}
pub fn put_block_at_id(&mut self, block: Block) -> Result<()> {
let new_transactions_map = block_to_transactions_map(&block);
self.dbio.put_block(block, false)?;
self.tx_hash_to_block_map.extend(new_transactions_map);
Ok(())
pub fn delete_block_at_id(&mut self, block_id: u64) -> Result<()> {
Ok(self.dbio.delete_block(block_id)?)
}
/// Returns the transaction corresponding to the given hash, if it exists in the blockchain.
@ -81,6 +79,21 @@ impl SequencerBlockStore {
pub fn signing_key(&self) -> &nssa::PrivateKey {
&self.signing_key
}
pub fn get_all_blocks(&self) -> impl Iterator<Item = Result<Block>> {
self.dbio.get_all_blocks().map(|res| Ok(res?))
}
pub(crate) fn update(&mut self, block: Block, state: &V02State) -> Result<()> {
let new_transactions_map = block_to_transactions_map(&block);
self.dbio.atomic_update(block, state)?;
self.tx_hash_to_block_map.extend(new_transactions_map);
Ok(())
}
pub fn get_nssa_state(&self) -> Option<V02State> {
self.dbio.get_nssa_state().ok()
}
}
pub(crate) fn block_to_transactions_map(block: &Block) -> HashMap<HashType, u64> {
@ -113,11 +126,10 @@ mod tests {
transactions: vec![],
};
let genesis_block = genesis_block_hashable_data.into_block(&signing_key);
let genesis_block = genesis_block_hashable_data.into_pending_block(&signing_key, [0; 32]);
// Start an empty node store
let mut node_store =
SequencerBlockStore::open_db_with_genesis(path, Some(genesis_block), signing_key)
.unwrap();
SequencerStore::open_db_with_genesis(path, Some(genesis_block), signing_key).unwrap();
let tx = common::test_utils::produce_dummy_empty_transaction();
let block = common::test_utils::produce_dummy_block(1, None, vec![tx.clone()]);
@ -126,7 +138,8 @@ mod tests {
let retrieved_tx = node_store.get_transaction_by_hash(tx.hash());
assert_eq!(None, retrieved_tx);
// Add the block with the transaction
node_store.put_block_at_id(block).unwrap();
let dummy_state = V02State::new_with_genesis_accounts(&[], &[]);
node_store.update(block, &dummy_state).unwrap();
// Try again
let retrieved_tx = node_store.get_transaction_by_hash(tx.hash());
assert_eq!(Some(tx), retrieved_tx);

View File

@ -5,6 +5,8 @@ use std::{
};
use anyhow::Result;
use common::sequencer_client::BasicAuth;
use logos_blockchain_core::mantle::ops::channel::ChannelId;
use serde::{Deserialize, Serialize};
#[derive(Debug, Serialize, Deserialize, Clone)]
@ -39,6 +41,8 @@ pub struct SequencerConfig {
pub mempool_max_size: usize,
/// Interval in which blocks produced
pub block_create_timeout_millis: u64,
/// Interval in which pending blocks are retried
pub retry_pending_blocks_timeout_millis: u64,
/// Port to listen
pub port: u16,
/// List of initial accounts data
@ -47,6 +51,18 @@ pub struct SequencerConfig {
pub initial_commitments: Vec<CommitmentsInitialData>,
/// Sequencer own signing key
pub signing_key: [u8; 32],
/// Bedrock configuration options
pub bedrock_config: Option<BedrockConfig>,
}
#[derive(Clone, Serialize, Deserialize)]
pub struct BedrockConfig {
/// Bedrock channel ID
pub channel_id: ChannelId,
/// Bedrock Url
pub node_url: String,
/// Bedrock auth
pub auth: Option<BasicAuth>,
}
impl SequencerConfig {

View File

@ -5,25 +5,28 @@ use anyhow::Result;
use common::PINATA_BASE58;
use common::{
HashType,
block::HashableBlockData,
block::{BedrockStatus, Block, HashableBlockData, MantleMsgId},
transaction::{EncodedTransaction, NSSATransaction},
};
use config::SequencerConfig;
use log::warn;
use log::{info, warn};
use mempool::{MemPool, MemPoolHandle};
use serde::{Deserialize, Serialize};
use crate::block_store::SequencerBlockStore;
use crate::{block_settlement_client::BlockSettlementClient, block_store::SequencerStore};
mod block_settlement_client;
pub mod block_store;
pub mod config;
pub struct SequencerCore {
state: nssa::V02State,
block_store: SequencerBlockStore,
store: SequencerStore,
mempool: MemPool<EncodedTransaction>,
sequencer_config: SequencerConfig,
chain_height: u64,
block_settlement_client: Option<BlockSettlementClient>,
last_bedrock_msg_id: MantleMsgId,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
@ -41,7 +44,11 @@ impl Display for TransactionMalformationError {
impl std::error::Error for TransactionMalformationError {}
impl SequencerCore {
/// Start Sequencer from configuration and construct transaction sender
/// Starts the sequencer using the provided configuration.
/// If an existing database is found, the sequencer state is loaded from it and
/// assumed to represent the correct latest state consistent with Bedrock-finalized data.
/// If no database is found, the sequencer performs a fresh start from genesis,
/// initializing its state with the accounts defined in the configuration file.
pub fn start_from_config(config: SequencerConfig) -> (Self, MemPoolHandle<EncodedTransaction>) {
let hashable_data = HashableBlockData {
block_id: config.genesis_id,
@ -51,72 +58,72 @@ impl SequencerCore {
};
let signing_key = nssa::PrivateKey::try_new(config.signing_key).unwrap();
let genesis_block = hashable_data.into_block(&signing_key);
let channel_genesis_msg_id = [0; 32];
let genesis_block = hashable_data.into_pending_block(&signing_key, channel_genesis_msg_id);
// Sequencer should panic if unable to open db,
// as fixing this issue may require actions non-native to program scope
let block_store = SequencerBlockStore::open_db_with_genesis(
let store = SequencerStore::open_db_with_genesis(
&config.home.join("rocksdb"),
Some(genesis_block),
signing_key,
)
.unwrap();
let mut initial_commitments = vec![];
for init_comm_data in config.initial_commitments.clone() {
let npk = init_comm_data.npk;
let mut state = match store.get_nssa_state() {
Some(state) => {
info!("Found local database. Loading state and pending blocks from it.");
state
}
None => {
info!(
"No database found when starting the sequencer. Creating a fresh new with the initial data in config"
);
let initial_commitments: Vec<nssa_core::Commitment> = config
.initial_commitments
.iter()
.map(|init_comm_data| {
let npk = &init_comm_data.npk;
let mut acc = init_comm_data.account;
let mut acc = init_comm_data.account.clone();
acc.program_owner = nssa::program::Program::authenticated_transfer_program().id();
acc.program_owner =
nssa::program::Program::authenticated_transfer_program().id();
let comm = nssa_core::Commitment::new(&npk, &acc);
nssa_core::Commitment::new(npk, &acc)
})
.collect();
initial_commitments.push(comm);
}
let init_accs: Vec<(nssa::AccountId, u128)> = config
.initial_accounts
.iter()
.map(|acc_data| (acc_data.account_id.parse().unwrap(), acc_data.balance))
.collect();
let init_accs: Vec<(nssa::AccountId, u128)> = config
.initial_accounts
.iter()
.map(|acc_data| (acc_data.account_id.parse().unwrap(), acc_data.balance))
.collect();
let mut state = nssa::V02State::new_with_genesis_accounts(&init_accs, &initial_commitments);
nssa::V02State::new_with_genesis_accounts(&init_accs, &initial_commitments)
}
};
#[cfg(feature = "testnet")]
state.add_pinata_program(PINATA_BASE58.parse().unwrap());
let (mempool, mempool_handle) = MemPool::new(config.mempool_max_size);
let mut this = Self {
let block_settlement_client = config.bedrock_config.as_ref().map(|bedrock_config| {
BlockSettlementClient::try_new(&config.home, bedrock_config)
.expect("Block settlement client should be constructible")
});
let sequencer_core = Self {
state,
block_store,
store,
mempool,
chain_height: config.genesis_id,
sequencer_config: config,
block_settlement_client,
last_bedrock_msg_id: channel_genesis_msg_id,
};
this.sync_state_with_stored_blocks();
(this, mempool_handle)
}
/// If there are stored blocks ahead of the current height, this method will load and process
/// all transaction in them in the order they are stored. The NSSA state will be updated
/// accordingly.
fn sync_state_with_stored_blocks(&mut self) {
let mut next_block_id = self.sequencer_config.genesis_id + 1;
while let Ok(block) = self.block_store.get_block_at_id(next_block_id) {
for encoded_transaction in block.body.transactions {
let transaction = NSSATransaction::try_from(&encoded_transaction).unwrap();
// Process transaction and update state
self.execute_check_transaction_on_state(transaction)
.unwrap();
// Update the tx hash to block id map.
self.block_store.insert(&encoded_transaction, next_block_id);
}
self.chain_height = next_block_id;
next_block_id += 1;
}
(sequencer_core, mempool_handle)
}
fn execute_check_transaction_on_state(
@ -137,9 +144,24 @@ impl SequencerCore {
Ok(tx)
}
pub async fn produce_new_block_and_post_to_settlement_layer(&mut self) -> Result<u64> {
let block_data = self.produce_new_block_with_mempool_transactions()?;
if let Some(client) = self.block_settlement_client.as_mut() {
let block =
block_data.into_pending_block(self.store.signing_key(), self.last_bedrock_msg_id);
let msg_id = client.submit_block_to_bedrock(&block).await?;
self.last_bedrock_msg_id = msg_id.into();
log::info!("Posted block data to Bedrock");
}
Ok(self.chain_height)
}
/// Produces new block from transactions in mempool
pub fn produce_new_block_with_mempool_transactions(&mut self) -> Result<u64> {
pub fn produce_new_block_with_mempool_transactions(&mut self) -> Result<HashableBlockData> {
let now = Instant::now();
let new_block_height = self.chain_height + 1;
let mut valid_transactions = vec![];
@ -159,16 +181,10 @@ impl SequencerCore {
}
}
let prev_block_hash = self
.block_store
.get_block_at_id(self.chain_height)?
.header
.hash;
let prev_block_hash = self.store.get_block_at_id(self.chain_height)?.header.hash;
let curr_time = chrono::Utc::now().timestamp_millis() as u64;
let num_txs_in_block = valid_transactions.len();
let hashable_data = HashableBlockData {
block_id: new_block_height,
transactions: valid_transactions,
@ -176,9 +192,11 @@ impl SequencerCore {
timestamp: curr_time,
};
let block = hashable_data.into_block(self.block_store.signing_key());
let block = hashable_data
.clone()
.into_pending_block(self.store.signing_key(), self.last_bedrock_msg_id);
self.block_store.put_block_at_id(block)?;
self.store.update(block, &self.state)?;
self.chain_height = new_block_height;
@ -194,19 +212,18 @@ impl SequencerCore {
// ```
log::info!(
"Created block with {} transactions in {} seconds",
num_txs_in_block,
hashable_data.transactions.len(),
now.elapsed().as_secs()
);
Ok(self.chain_height)
Ok(hashable_data)
}
pub fn state(&self) -> &nssa::V02State {
&self.state
}
pub fn block_store(&self) -> &SequencerBlockStore {
&self.block_store
pub fn block_store(&self) -> &SequencerStore {
&self.store
}
pub fn chain_height(&self) -> u64 {
@ -216,6 +233,39 @@ impl SequencerCore {
pub fn sequencer_config(&self) -> &SequencerConfig {
&self.sequencer_config
}
/// Deletes finalized blocks from the sequencer's pending block list.
/// This method must be called when new blocks are finalized on Bedrock.
/// All pending blocks with an ID less than or equal to `last_finalized_block_id`
/// are removed from the database.
pub fn clean_finalized_blocks_from_db(&mut self, last_finalized_block_id: u64) -> Result<()> {
if let Some(first_pending_block_id) = self
.get_pending_blocks()?
.iter()
.map(|block| block.header.block_id)
.min()
{
(first_pending_block_id..=last_finalized_block_id)
.try_for_each(|id| self.store.delete_block_at_id(id))
} else {
Ok(())
}
}
/// Returns the list of stored pending blocks.
pub fn get_pending_blocks(&self) -> Result<Vec<Block>> {
Ok(self
.store
.get_all_blocks()
.collect::<Result<Vec<Block>>>()?
.into_iter()
.filter(|block| matches!(block.bedrock_status, BedrockStatus::Pending))
.collect())
}
pub fn block_settlement_client(&self) -> Option<BlockSettlementClient> {
self.block_settlement_client.clone()
}
}
// TODO: Introduce type-safe wrapper around checked transaction, e.g. AuthenticatedTransaction
@ -277,6 +327,8 @@ mod tests {
initial_accounts,
initial_commitments: vec![],
signing_key: *sequencer_sign_key_for_testing().value(),
bedrock_config: None,
retry_pending_blocks_timeout_millis: 1000 * 60 * 4,
}
}
@ -619,9 +671,9 @@ mod tests {
let tx = common::test_utils::produce_dummy_empty_transaction();
mempool_handle.push(tx).await.unwrap();
let block_id = sequencer.produce_new_block_with_mempool_transactions();
assert!(block_id.is_ok());
assert_eq!(block_id.unwrap(), genesis_height + 1);
let block = sequencer.produce_new_block_with_mempool_transactions();
assert!(block.is_ok());
assert_eq!(block.unwrap().block_id, genesis_height + 1);
}
#[tokio::test]
@ -658,11 +710,9 @@ mod tests {
// Create block
let current_height = sequencer
.produce_new_block_with_mempool_transactions()
.unwrap();
let block = sequencer
.block_store
.get_block_at_id(current_height)
.unwrap();
.unwrap()
.block_id;
let block = sequencer.store.get_block_at_id(current_height).unwrap();
// Only one should be included in the block
assert_eq!(block.body.transactions, vec![tx.clone()]);
@ -697,22 +747,18 @@ mod tests {
mempool_handle.push(tx.clone()).await.unwrap();
let current_height = sequencer
.produce_new_block_with_mempool_transactions()
.unwrap();
let block = sequencer
.block_store
.get_block_at_id(current_height)
.unwrap();
.unwrap()
.block_id;
let block = sequencer.store.get_block_at_id(current_height).unwrap();
assert_eq!(block.body.transactions, vec![tx.clone()]);
// Add same transaction should fail
mempool_handle.push(tx.clone()).await.unwrap();
let current_height = sequencer
.produce_new_block_with_mempool_transactions()
.unwrap();
let block = sequencer
.block_store
.get_block_at_id(current_height)
.unwrap();
.unwrap()
.block_id;
let block = sequencer.store.get_block_at_id(current_height).unwrap();
assert!(block.body.transactions.is_empty());
}
@ -743,11 +789,9 @@ mod tests {
mempool_handle.push(tx.clone()).await.unwrap();
let current_height = sequencer
.produce_new_block_with_mempool_transactions()
.unwrap();
let block = sequencer
.block_store
.get_block_at_id(current_height)
.unwrap();
.unwrap()
.block_id;
let block = sequencer.store.get_block_at_id(current_height).unwrap();
assert_eq!(block.body.transactions, vec![tx.clone()]);
}
@ -767,4 +811,42 @@ mod tests {
config.initial_accounts[1].balance + balance_to_move
);
}
#[test]
fn test_get_pending_blocks() {
let config = setup_sequencer_config();
let (mut sequencer, _mempool_handle) = SequencerCore::start_from_config(config);
sequencer
.produce_new_block_with_mempool_transactions()
.unwrap();
sequencer
.produce_new_block_with_mempool_transactions()
.unwrap();
sequencer
.produce_new_block_with_mempool_transactions()
.unwrap();
assert_eq!(sequencer.get_pending_blocks().unwrap().len(), 4);
}
#[test]
fn test_delete_blocks() {
let config = setup_sequencer_config();
let (mut sequencer, _mempool_handle) = SequencerCore::start_from_config(config);
sequencer
.produce_new_block_with_mempool_transactions()
.unwrap();
sequencer
.produce_new_block_with_mempool_transactions()
.unwrap();
sequencer
.produce_new_block_with_mempool_transactions()
.unwrap();
let last_finalized_block = 3;
sequencer
.clean_finalized_blocks_from_db(last_finalized_block)
.unwrap();
assert_eq!(sequencer.get_pending_blocks().unwrap().len(), 1);
}
}

View File

@ -18,8 +18,8 @@ use common::{
GetInitialTestnetAccountsRequest, GetLastBlockRequest, GetLastBlockResponse,
GetProgramIdsRequest, GetProgramIdsResponse, GetProofForCommitmentRequest,
GetProofForCommitmentResponse, GetTransactionByHashRequest,
GetTransactionByHashResponse, HelloRequest, HelloResponse, SendTxRequest,
SendTxResponse,
GetTransactionByHashResponse, HelloRequest, HelloResponse, PostIndexerMessageRequest,
PostIndexerMessageResponse, SendTxRequest, SendTxResponse,
},
},
transaction::{EncodedTransaction, NSSATransaction},
@ -44,6 +44,7 @@ pub const GET_ACCOUNTS_NONCES: &str = "get_accounts_nonces";
pub const GET_ACCOUNT: &str = "get_account";
pub const GET_PROOF_FOR_COMMITMENT: &str = "get_proof_for_commitment";
pub const GET_PROGRAM_IDS: &str = "get_program_ids";
pub const POST_INDEXER_MESSAGE: &str = "post_indexer_message";
pub const HELLO_FROM_SEQUENCER: &str = "HELLO_FROM_SEQUENCER";
@ -314,6 +315,18 @@ impl JsonHandler {
respond(response)
}
async fn process_indexer_message(&self, request: Request) -> Result<Value, RpcErr> {
let _indexer_post_req = PostIndexerMessageRequest::parse(Some(request.params))?;
// ToDo: Add indexer messages handling
let response = PostIndexerMessageResponse {
status: "Success".to_string(),
};
respond(response)
}
pub async fn process_request_internal(&self, request: Request) -> Result<Value, RpcErr> {
match request.method.as_ref() {
HELLO => self.process_temp_hello(request).await,
@ -329,6 +342,7 @@ impl JsonHandler {
GET_TRANSACTION_BY_HASH => self.process_get_transaction_by_hash(request).await,
GET_PROOF_FOR_COMMITMENT => self.process_get_proof_by_commitment(request).await,
GET_PROGRAM_IDS => self.process_get_program_ids(request).await,
POST_INDEXER_MESSAGE => self.process_indexer_message(request).await,
_ => Err(RpcErr(RpcError::method_not_found(request.method))),
}
}
@ -340,10 +354,13 @@ mod tests {
use base58::ToBase58;
use base64::{Engine, engine::general_purpose};
use common::{test_utils::sequencer_sign_key_for_testing, transaction::EncodedTransaction};
use common::{
sequencer_client::BasicAuth, test_utils::sequencer_sign_key_for_testing,
transaction::EncodedTransaction,
};
use sequencer_core::{
SequencerCore,
config::{AccountInitialData, SequencerConfig},
config::{AccountInitialData, BedrockConfig, SequencerConfig},
};
use serde_json::Value;
use tempfile::tempdir;
@ -388,11 +405,21 @@ mod tests {
initial_accounts,
initial_commitments: vec![],
signing_key: *sequencer_sign_key_for_testing().value(),
retry_pending_blocks_timeout_millis: 1000 * 60 * 4,
bedrock_config: Some(BedrockConfig {
channel_id: [42; 32].into(),
node_url: "http://localhost:8080".to_string(),
auth: Some(BasicAuth {
username: "user".to_string(),
password: None,
}),
}),
}
}
async fn components_for_tests() -> (JsonHandler, Vec<AccountInitialData>, EncodedTransaction) {
let config = sequencer_config_for_tests();
let (mut sequencer_core, mempool_handle) = SequencerCore::start_from_config(config);
let initial_accounts = sequencer_core.sequencer_config().initial_accounts.clone();

View File

@ -5,7 +5,8 @@
"is_genesis_random": true,
"max_num_tx_in_block": 20,
"mempool_max_size": 1000,
"block_create_timeout_millis": 10000,
"block_create_timeout_millis": 5000,
"retry_pending_blocks_timeout_millis": 7000,
"port": 3040,
"initial_accounts": [
{
@ -154,5 +155,12 @@
37,
37,
37
]
}
],
"bedrock_config": {
"channel_id": "0101010101010101010101010101010101010101010101010101010101010101",
"node_url": "http://localhost:8080",
"auth": {
"username": "user"
}
}
}

View File

@ -4,7 +4,7 @@ use actix_web::dev::ServerHandle;
use anyhow::Result;
use clap::Parser;
use common::rpc_primitives::RpcConfig;
use log::info;
use log::{info, warn};
use sequencer_core::{SequencerCore, config::SequencerConfig};
use sequencer_rpc::new_http_server;
use tokio::{sync::Mutex, task::JoinHandle};
@ -20,8 +20,14 @@ struct Args {
pub async fn startup_sequencer(
app_config: SequencerConfig,
) -> Result<(ServerHandle, SocketAddr, JoinHandle<Result<()>>)> {
) -> Result<(
ServerHandle,
SocketAddr,
JoinHandle<Result<()>>,
JoinHandle<Result<()>>,
)> {
let block_timeout = app_config.block_create_timeout_millis;
let retry_pending_blocks_timeout = app_config.retry_pending_blocks_timeout_millis;
let port = app_config.port;
let (sequencer_core, mempool_handle) = SequencerCore::start_from_config(app_config);
@ -39,8 +45,41 @@ pub async fn startup_sequencer(
let http_server_handle = http_server.handle();
tokio::spawn(http_server);
info!("Starting main sequencer loop");
info!("Starting pending block retry loop");
let seq_core_wrapped_for_block_retry = seq_core_wrapped.clone();
let retry_pending_blocks_handle = tokio::spawn(async move {
loop {
tokio::time::sleep(std::time::Duration::from_millis(
retry_pending_blocks_timeout,
))
.await;
let (pending_blocks, block_settlement_client) = {
let sequencer_core = seq_core_wrapped_for_block_retry.lock().await;
let client = sequencer_core.block_settlement_client();
let pending_blocks = sequencer_core
.get_pending_blocks()
.expect("Sequencer should be able to retrieve pending blocks");
(pending_blocks, client)
};
let Some(client) = block_settlement_client else {
continue;
};
info!("Resubmitting {} pending blocks", pending_blocks.len());
for block in &pending_blocks {
if let Err(e) = client.submit_block_to_bedrock(block).await {
warn!(
"Failed to resubmit block with id {} with error {}",
block.header.block_id, e
);
}
}
}
});
info!("Starting main sequencer loop");
let main_loop_handle = tokio::spawn(async move {
loop {
tokio::time::sleep(std::time::Duration::from_millis(block_timeout)).await;
@ -50,7 +89,9 @@ pub async fn startup_sequencer(
let id = {
let mut state = seq_core_wrapped.lock().await;
state.produce_new_block_with_mempool_transactions()?
state
.produce_new_block_and_post_to_settlement_layer()
.await?
};
info!("Block with id {id} created");
@ -59,7 +100,12 @@ pub async fn startup_sequencer(
}
});
Ok((http_server_handle, addr, main_loop_handle))
Ok((
http_server_handle,
addr,
main_loop_handle,
retry_pending_blocks_handle,
))
}
pub async fn main_runner() -> Result<()> {
@ -79,9 +125,26 @@ pub async fn main_runner() -> Result<()> {
}
// ToDo: Add restart on failures
let (_, _, main_loop_handle) = startup_sequencer(app_config).await?;
let (_, _, main_loop_handle, retry_loop_handle) = startup_sequencer(app_config).await?;
main_loop_handle.await??;
info!("Sequencer running. Monitoring concurrent tasks...");
tokio::select! {
res = main_loop_handle => {
match res {
Ok(inner_res) => warn!("Main loop exited unexpectedly: {:?}", inner_res),
Err(e) => warn!("Main loop task panicked: {:?}", e),
}
}
res = retry_loop_handle => {
match res {
Ok(inner_res) => warn!("Retry loop exited unexpectedly: {:?}", inner_res),
Err(e) => warn!("Retry loop task panicked: {:?}", e),
}
}
}
info!("Shutting down sequencer...");
Ok(())
}

View File

@ -9,3 +9,4 @@ common.workspace = true
thiserror.workspace = true
borsh.workspace = true
rocksdb.workspace = true
nssa.workspace = true

View File

@ -1,9 +1,10 @@
use std::{path::Path, sync::Arc};
use common::block::{Block, HashableBlockData};
use common::block::Block;
use error::DbError;
use nssa::V02State;
use rocksdb::{
BoundColumnFamily, ColumnFamilyDescriptor, DBWithThreadMode, MultiThreaded, Options,
BoundColumnFamily, ColumnFamilyDescriptor, DBWithThreadMode, MultiThreaded, Options, WriteBatch,
};
pub mod error;
@ -26,16 +27,18 @@ pub const DB_META_FIRST_BLOCK_IN_DB_KEY: &str = "first_block_in_db";
pub const DB_META_LAST_BLOCK_IN_DB_KEY: &str = "last_block_in_db";
/// Key base for storing metainformation which describe if first block has been set
pub const DB_META_FIRST_BLOCK_SET_KEY: &str = "first_block_set";
/// Key base for storing metainformation about the last finalized block on Bedrock
pub const DB_META_LAST_FINALIZED_BLOCK_ID: &str = "last_finalized_block_id";
/// Key base for storing snapshot which describe block id
pub const DB_SNAPSHOT_BLOCK_ID_KEY: &str = "block_id";
/// Key base for storing the NSSA state
pub const DB_NSSA_STATE_KEY: &str = "nssa_state";
/// Name of block column family
pub const CF_BLOCK_NAME: &str = "cf_block";
/// Name of meta column family
pub const CF_META_NAME: &str = "cf_meta";
/// Name of snapshot column family
pub const CF_SNAPSHOT_NAME: &str = "cf_snapshot";
/// Name of state column family
pub const CF_NSSA_STATE_NAME: &str = "cf_nssa_state";
pub type DbResult<T> = Result<T, DbError>;
@ -50,7 +53,7 @@ impl RocksDBIO {
// ToDo: Add more column families for different data
let cfb = ColumnFamilyDescriptor::new(CF_BLOCK_NAME, cf_opts.clone());
let cfmeta = ColumnFamilyDescriptor::new(CF_META_NAME, cf_opts.clone());
let cfsnapshot = ColumnFamilyDescriptor::new(CF_SNAPSHOT_NAME, cf_opts.clone());
let cfstate = ColumnFamilyDescriptor::new(CF_NSSA_STATE_NAME, cf_opts.clone());
let mut db_opts = Options::default();
db_opts.create_missing_column_families(true);
@ -58,7 +61,7 @@ impl RocksDBIO {
let db = DBWithThreadMode::<MultiThreaded>::open_cf_descriptors(
&db_opts,
path,
vec![cfb, cfmeta, cfsnapshot],
vec![cfb, cfmeta, cfstate],
);
let dbio = Self {
@ -75,6 +78,7 @@ impl RocksDBIO {
dbio.put_meta_first_block_in_db(block)?;
dbio.put_meta_is_first_block_set()?;
dbio.put_meta_last_block_in_db(block_id)?;
dbio.put_meta_last_finalized_block_id(None)?;
Ok(dbio)
} else {
@ -89,7 +93,7 @@ impl RocksDBIO {
// ToDo: Add more column families for different data
let _cfb = ColumnFamilyDescriptor::new(CF_BLOCK_NAME, cf_opts.clone());
let _cfmeta = ColumnFamilyDescriptor::new(CF_META_NAME, cf_opts.clone());
let _cfsnapshot = ColumnFamilyDescriptor::new(CF_SNAPSHOT_NAME, cf_opts.clone());
let _cfstate = ColumnFamilyDescriptor::new(CF_NSSA_STATE_NAME, cf_opts.clone());
let mut db_opts = Options::default();
db_opts.create_missing_column_families(true);
@ -106,8 +110,8 @@ impl RocksDBIO {
self.db.cf_handle(CF_BLOCK_NAME).unwrap()
}
pub fn snapshot_column(&self) -> Arc<BoundColumnFamily<'_>> {
self.db.cf_handle(CF_SNAPSHOT_NAME).unwrap()
pub fn nssa_state_column(&self) -> Arc<BoundColumnFamily<'_>> {
self.db.cf_handle(CF_NSSA_STATE_NAME).unwrap()
}
pub fn get_meta_first_block_in_db(&self) -> DbResult<u64> {
@ -186,6 +190,24 @@ impl RocksDBIO {
Ok(res.is_some())
}
pub fn put_nssa_state_in_db(&self, state: &V02State, batch: &mut WriteBatch) -> DbResult<()> {
let cf_nssa_state = self.nssa_state_column();
batch.put_cf(
&cf_nssa_state,
borsh::to_vec(&DB_NSSA_STATE_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_NSSA_STATE_KEY".to_string()),
)
})?,
borsh::to_vec(state).map_err(|err| {
DbError::borsh_cast_message(err, Some("Failed to serialize NSSA state".to_string()))
})?,
);
Ok(())
}
pub fn put_meta_first_block_in_db(&self, block: Block) -> DbResult<()> {
let cf_meta = self.meta_column();
self.db
@ -206,7 +228,15 @@ impl RocksDBIO {
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
self.put_block(block, true)?;
let mut batch = WriteBatch::default();
self.put_block(block, true, &mut batch)?;
self.db.write(batch).map_err(|rerr| {
DbError::rocksdb_cast_message(
rerr,
Some("Failed to write first block in db".to_string()),
)
})?;
Ok(())
}
@ -232,6 +262,28 @@ impl RocksDBIO {
Ok(())
}
pub fn put_meta_last_finalized_block_id(&self, block_id: Option<u64>) -> DbResult<()> {
let cf_meta = self.meta_column();
self.db
.put_cf(
&cf_meta,
borsh::to_vec(&DB_META_LAST_FINALIZED_BLOCK_ID).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_LAST_FINALIZED_BLOCK_ID".to_string()),
)
})?,
borsh::to_vec(&block_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize last block id".to_string()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
Ok(())
}
pub fn put_meta_is_first_block_set(&self) -> DbResult<()> {
let cf_meta = self.meta_column();
self.db
@ -249,7 +301,7 @@ impl RocksDBIO {
Ok(())
}
pub fn put_block(&self, block: Block, first: bool) -> DbResult<()> {
pub fn put_block(&self, block: Block, first: bool, batch: &mut WriteBatch) -> DbResult<()> {
let cf_block = self.block_column();
if !first {
@ -260,27 +312,19 @@ impl RocksDBIO {
}
}
self.db
.put_cf(
&cf_block,
borsh::to_vec(&block.header.block_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize block id".to_string()),
)
})?,
borsh::to_vec(&HashableBlockData::from(block)).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize block data".to_string()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
batch.put_cf(
&cf_block,
borsh::to_vec(&block.header.block_id).map_err(|err| {
DbError::borsh_cast_message(err, Some("Failed to serialize block id".to_string()))
})?,
borsh::to_vec(&block).map_err(|err| {
DbError::borsh_cast_message(err, Some("Failed to serialize block data".to_string()))
})?,
);
Ok(())
}
pub fn get_block(&self, block_id: u64) -> DbResult<HashableBlockData> {
pub fn get_block(&self, block_id: u64) -> DbResult<Block> {
let cf_block = self.block_column();
let res = self
.db
@ -296,14 +340,12 @@ impl RocksDBIO {
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(
borsh::from_slice::<HashableBlockData>(&data).map_err(|serr| {
DbError::borsh_cast_message(
serr,
Some("Failed to deserialize block data".to_string()),
)
})?,
)
Ok(borsh::from_slice::<Block>(&data).map_err(|serr| {
DbError::borsh_cast_message(
serr,
Some("Failed to deserialize block data".to_string()),
)
})?)
} else {
Err(DbError::db_interaction_error(
"Block on this id not found".to_string(),
@ -311,32 +353,90 @@ impl RocksDBIO {
}
}
pub fn get_snapshot_block_id(&self) -> DbResult<u64> {
let cf_snapshot = self.snapshot_column();
pub fn get_nssa_state(&self) -> DbResult<V02State> {
let cf_nssa_state = self.nssa_state_column();
let res = self
.db
.get_cf(
&cf_snapshot,
borsh::to_vec(&DB_SNAPSHOT_BLOCK_ID_KEY).map_err(|err| {
&cf_nssa_state,
borsh::to_vec(&DB_NSSA_STATE_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_SNAPSHOT_BLOCK_ID_KEY".to_string()),
Some("Failed to serialize block id".to_string()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(borsh::from_slice::<u64>(&data).map_err(|err| {
Ok(borsh::from_slice::<V02State>(&data).map_err(|serr| {
DbError::borsh_cast_message(
err,
Some("Failed to deserialize last block".to_string()),
serr,
Some("Failed to deserialize block data".to_string()),
)
})?)
} else {
Err(DbError::db_interaction_error(
"Snapshot block ID not found".to_string(),
"Block on this id not found".to_string(),
))
}
}
pub fn delete_block(&self, block_id: u64) -> DbResult<()> {
let cf_block = self.block_column();
let key = borsh::to_vec(&block_id).map_err(|err| {
DbError::borsh_cast_message(err, Some("Failed to serialize block id".to_string()))
})?;
if self
.db
.get_cf(&cf_block, &key)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?
.is_none()
{
return Err(DbError::db_interaction_error(
"Block on this id not found".to_string(),
));
}
self.db
.delete_cf(&cf_block, key)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
Ok(())
}
pub fn get_all_blocks(&self) -> impl Iterator<Item = DbResult<Block>> {
let cf_block = self.block_column();
self.db
.iterator_cf(&cf_block, rocksdb::IteratorMode::Start)
.map(|res| {
let (_key, value) = res.map_err(|rerr| {
DbError::rocksdb_cast_message(
rerr,
Some("Failed to get key value pair".to_string()),
)
})?;
borsh::from_slice::<Block>(&value).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to deserialize block data".to_string()),
)
})
})
}
pub fn atomic_update(&self, block: Block, state: &V02State) -> DbResult<()> {
let block_id = block.header.block_id;
let mut batch = WriteBatch::default();
self.put_block(block, false, &mut batch)?;
self.put_nssa_state_in_db(state, &mut batch)?;
self.db.write(batch).map_err(|rerr| {
DbError::rocksdb_cast_message(
rerr,
Some(format!("Failed to udpate db with block {block_id}")),
)
})
}
}

View File

@ -0,0 +1,38 @@
use nssa_core::program::{AccountPostState, ProgramInput, read_nssa_inputs, write_nssa_outputs};
type Instruction = (Option<Vec<u8>>, bool);
/// A program that optionally modifies the account data and optionally claims it.
fn main() {
let (
ProgramInput {
pre_states,
instruction: (data_opt, should_claim),
},
instruction_words,
) = read_nssa_inputs::<Instruction>();
let [pre] = match pre_states.try_into() {
Ok(array) => array,
Err(_) => return,
};
let account_pre = &pre.account;
let mut account_post = account_pre.clone();
// Update data if provided
if let Some(data) = data_opt {
account_post.data = data
.try_into()
.expect("provided data should fit into data limit");
}
// Claim or not based on the boolean flag
let post_state = if should_claim {
AccountPostState::new_claimed(account_post)
} else {
AccountPostState::new(account_post)
};
write_nssa_outputs(instruction_words, vec![pre], vec![post_state]);
}

View File

@ -0,0 +1,53 @@
use nssa_core::{
account::AccountWithMetadata,
program::{
AccountPostState, ChainedCall, ProgramId, ProgramInput, read_nssa_inputs,
write_nssa_outputs_with_chained_call,
},
};
use risc0_zkvm::serde::to_vec;
type Instruction = (u128, ProgramId);
/// A malicious test program that attempts to change authorization status.
/// It accepts two accounts and executes a native token transfer program via chain call,
/// but sets the `is_authorized` field of the first account to true.
fn main() {
let (
ProgramInput {
pre_states,
instruction: (balance, transfer_program_id),
},
instruction_words,
) = read_nssa_inputs::<Instruction>();
let [sender, receiver] = match pre_states.try_into() {
Ok(array) => array,
Err(_) => return,
};
// Maliciously set is_authorized to true for the first account
let authorised_sender = AccountWithMetadata {
is_authorized: true,
..sender.clone()
};
let instruction_data = to_vec(&balance).unwrap();
let chained_call = ChainedCall {
program_id: transfer_program_id,
instruction_data,
pre_states: vec![authorised_sender.clone(), receiver.clone()],
pda_seeds: vec![],
};
write_nssa_outputs_with_chained_call(
instruction_words,
vec![sender.clone(), receiver.clone()],
vec![
AccountPostState::new(sender.account),
AccountPostState::new(receiver.account),
],
vec![chained_call],
);
}

View File

@ -29,3 +29,4 @@ risc0-zkvm.workspace = true
async-stream = "0.3.6"
indicatif = { version = "0.18.3", features = ["improved_unicode"] }
optfield = "0.4.0"
url.workspace = true

View File

@ -19,7 +19,7 @@ pub enum ChainSubcommand {
/// Get transaction at hash from sequencer
Transaction {
/// hash - valid 32 byte hex string
#[arg(short, long)]
#[arg(short = 't', long)]
hash: String,
},
}

View File

@ -1,10 +1,10 @@
use std::{
io::{BufReader, Write as _},
path::Path,
str::FromStr,
};
use anyhow::{Context as _, Result};
use common::sequencer_client::BasicAuth;
use key_protocol::key_management::{
KeyChain,
key_tree::{
@ -14,49 +14,6 @@ use key_protocol::key_management::{
use log::warn;
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BasicAuth {
pub username: String,
pub password: Option<String>,
}
impl std::fmt::Display for BasicAuth {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.username)?;
if let Some(password) = &self.password {
write!(f, ":{password}")?;
}
Ok(())
}
}
impl FromStr for BasicAuth {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let parse = || {
let mut parts = s.splitn(2, ':');
let username = parts.next()?;
let password = parts.next().filter(|p| !p.is_empty());
if parts.next().is_some() {
return None;
}
Some((username, password))
};
let (username, password) = parse().ok_or_else(|| {
anyhow::anyhow!("Invalid auth format. Expected 'user' or 'user:password'")
})?;
Ok(Self {
username: username.to_string(),
password: password.map(|p| p.to_string()),
})
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct InitialAccountDataPublic {
pub account_id: String,

View File

@ -23,6 +23,7 @@ use nssa_core::{
};
pub use privacy_preserving_tx::PrivacyPreservingAccount;
use tokio::io::AsyncWriteExt;
use url::Url;
use crate::{
config::{PersistentStorage, WalletConfigOverrides},
@ -188,13 +189,9 @@ impl WalletCore {
config.apply_overrides(config_overrides);
}
let basic_auth = config
.basic_auth
.as_ref()
.map(|auth| (auth.username.clone(), auth.password.clone()));
let sequencer_client = Arc::new(SequencerClient::new_with_auth(
config.sequencer_addr.clone(),
basic_auth,
Url::parse(&config.sequencer_addr)?,
config.basic_auth.clone(),
)?);
let tx_poller = TxPoller::new(config.clone(), Arc::clone(&sequencer_client));
@ -375,7 +372,7 @@ impl WalletCore {
pub async fn send_privacy_preserving_tx(
&self,
accounts: Vec<PrivacyPreservingAccount>,
instruction_data: &InstructionData,
instruction_data: InstructionData,
program: &ProgramWithDependencies,
) -> Result<(SendTxResponse, Vec<SharedSecretKey>), ExecutionFailureKind> {
self.send_privacy_preserving_tx_with_pre_check(accounts, instruction_data, program, |_| {
@ -387,7 +384,7 @@ impl WalletCore {
pub async fn send_privacy_preserving_tx_with_pre_check(
&self,
accounts: Vec<PrivacyPreservingAccount>,
instruction_data: &InstructionData,
instruction_data: InstructionData,
program: &ProgramWithDependencies,
tx_pre_check: impl FnOnce(&[&Account]) -> Result<(), ExecutionFailureKind>,
) -> Result<(SendTxResponse, Vec<SharedSecretKey>), ExecutionFailureKind> {
@ -403,16 +400,16 @@ impl WalletCore {
let private_account_keys = acc_manager.private_account_keys();
let (output, proof) = nssa::privacy_preserving_transaction::circuit::execute_and_prove(
&pre_states,
pre_states,
instruction_data,
acc_manager.visibility_mask(),
&produce_random_nonces(private_account_keys.len()),
&private_account_keys
acc_manager.visibility_mask().to_vec(),
produce_random_nonces(private_account_keys.len()),
private_account_keys
.iter()
.map(|keys| (keys.npk.clone(), keys.ssk))
.collect::<Vec<_>>(),
&acc_manager.private_account_auth(),
&acc_manager.private_account_membership_proofs(),
acc_manager.private_account_auth(),
acc_manager.private_account_membership_proofs(),
&program.to_owned(),
)
.unwrap();

View File

@ -19,7 +19,7 @@ impl NativeTokenTransfer<'_> {
PrivacyPreservingAccount::PrivateOwned(from),
PrivacyPreservingAccount::Public(to),
],
&instruction_data,
instruction_data,
&program.into(),
tx_pre_check,
)

View File

@ -17,7 +17,7 @@ impl NativeTokenTransfer<'_> {
self.0
.send_privacy_preserving_tx(
vec![PrivacyPreservingAccount::PrivateOwned(from)],
&Program::serialize_instruction(instruction).unwrap(),
Program::serialize_instruction(instruction).unwrap(),
&Program::authenticated_transfer_program().into(),
)
.await
@ -46,7 +46,7 @@ impl NativeTokenTransfer<'_> {
ipk: to_ipk,
},
],
&instruction_data,
instruction_data,
&program.into(),
tx_pre_check,
)
@ -73,7 +73,7 @@ impl NativeTokenTransfer<'_> {
PrivacyPreservingAccount::PrivateOwned(from),
PrivacyPreservingAccount::PrivateOwned(to),
],
&instruction_data,
instruction_data,
&program.into(),
tx_pre_check,
)

Some files were not shown because too many files have changed in this diff Show More