merge master

This commit is contained in:
Petar Radovic 2026-04-29 10:33:07 +02:00
commit 6bcbe058ed
304 changed files with 18790 additions and 9498 deletions

View File

@ -13,6 +13,7 @@ ignore = [
{ id = "RUSTSEC-2025-0055", reason = "`tracing-subscriber` v0.2.25 pulled in by ark-relations v0.4.0 - will be addressed before mainnet" },
{ id = "RUSTSEC-2025-0141", reason = "`bincode` is unmaintained but continuing to use it." },
{ id = "RUSTSEC-2023-0089", reason = "atomic-polyfill is pulled transitively via risc0-zkvm; waiting on upstream fix (see https://github.com/risc0/risc0/issues/3453)" },
{ id = "RUSTSEC-2026-0097", reason = "`rand` v0.8.5 is present transitively from logos crates, modification may break integration" },
]
yanked = "deny"
unused-ignored-advisory = "deny"

View File

@ -26,11 +26,20 @@ Thumbs.db
ci_scripts/
# Documentation
docs/
*.md
!README.md
# Configs (copy selectively if needed)
# Non-build project files
completions/
configs/
# License
Justfile
clippy.toml
rustfmt.toml
flake.nix
flake.lock
LICENSE
# Docker compose files (not needed inside build)
docker-compose*.yml
**/docker-compose*.yml

View File

@ -11,6 +11,10 @@ on:
- "**.md"
- "!.github/workflows/*.yml"
permissions:
contents: read
pull-requests: read
name: General
jobs:
@ -19,7 +23,7 @@ jobs:
steps:
- uses: actions/checkout@v5
with:
ref: ${{ github.head_ref }}
ref: ${{ github.event.pull_request.head.sha || github.head_ref }}
- name: Install nightly toolchain for rustfmt
run: rustup install nightly --profile minimal --component rustfmt
@ -32,7 +36,7 @@ jobs:
steps:
- uses: actions/checkout@v5
with:
ref: ${{ github.head_ref }}
ref: ${{ github.event.pull_request.head.sha || github.head_ref }}
- name: Install taplo-cli
run: cargo install --locked taplo-cli
@ -45,7 +49,7 @@ jobs:
steps:
- uses: actions/checkout@v5
with:
ref: ${{ github.head_ref }}
ref: ${{ github.event.pull_request.head.sha || github.head_ref }}
- name: Install active toolchain
run: rustup install
@ -61,7 +65,7 @@ jobs:
steps:
- uses: actions/checkout@v5
with:
ref: ${{ github.head_ref }}
ref: ${{ github.event.pull_request.head.sha || github.head_ref }}
- name: Install cargo-deny
run: cargo install --locked cargo-deny
@ -77,7 +81,7 @@ jobs:
steps:
- uses: actions/checkout@v5
with:
ref: ${{ github.head_ref }}
ref: ${{ github.event.pull_request.head.sha || github.head_ref }}
- uses: ./.github/actions/install-system-deps
@ -106,7 +110,7 @@ jobs:
steps:
- uses: actions/checkout@v5
with:
ref: ${{ github.head_ref }}
ref: ${{ github.event.pull_request.head.sha || github.head_ref }}
- uses: ./.github/actions/install-system-deps
@ -126,7 +130,7 @@ jobs:
env:
RISC0_DEV_MODE: "1"
RUST_LOG: "info"
run: cargo nextest run --workspace --exclude integration_tests
run: cargo nextest run --workspace --exclude integration_tests --all-features
integration-tests:
runs-on: ubuntu-latest
@ -134,7 +138,7 @@ jobs:
steps:
- uses: actions/checkout@v5
with:
ref: ${{ github.head_ref }}
ref: ${{ github.event.pull_request.head.sha || github.head_ref }}
- uses: ./.github/actions/install-system-deps
@ -162,7 +166,7 @@ jobs:
steps:
- uses: actions/checkout@v5
with:
ref: ${{ github.head_ref }}
ref: ${{ github.event.pull_request.head.sha || github.head_ref }}
- uses: ./.github/actions/install-system-deps
@ -182,7 +186,7 @@ jobs:
env:
RISC0_DEV_MODE: "1"
RUST_LOG: "info"
run: cargo nextest run -p integration_tests indexer -- --skip tps_test
run: cargo nextest run -p integration_tests indexer -- --skip tps_test
valid-proof-test:
runs-on: ubuntu-latest
@ -190,7 +194,7 @@ jobs:
steps:
- uses: actions/checkout@v5
with:
ref: ${{ github.head_ref }}
ref: ${{ github.event.pull_request.head.sha || github.head_ref }}
- uses: ./.github/actions/install-system-deps
@ -216,7 +220,7 @@ jobs:
steps:
- uses: actions/checkout@v5
with:
ref: ${{ github.head_ref }}
ref: ${{ github.event.pull_request.head.sha || github.head_ref }}
- uses: ./.github/actions/install-risc0

View File

@ -12,12 +12,12 @@ jobs:
strategy:
matrix:
include:
- name: sequencer_runner
dockerfile: ./sequencer_runner/Dockerfile
- name: sequencer_service
dockerfile: ./sequencer/service/Dockerfile
build_args: |
STANDALONE=false
- name: sequencer_runner-standalone
dockerfile: ./sequencer_runner/Dockerfile
- name: sequencer_service-standalone
dockerfile: ./sequencer/service/Dockerfile
build_args: |
STANDALONE=true
- name: indexer_service
@ -50,7 +50,7 @@ jobs:
type=ref,event=pr
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=sha,prefix={{branch}}-
type=sha,prefix=sha-
type=raw,value=latest,enable={{is_default_branch}}
- name: Build and push Docker image

2
.gitignore vendored
View File

@ -6,7 +6,7 @@ data/
.idea/
.vscode/
rocksdb
sequencer_runner/data/
sequencer/service/data/
storage.json
result
wallet-ffi/wallet_ffi.h

2060
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -15,11 +15,15 @@ members = [
"nssa/core",
"programs/amm/core",
"programs/amm",
"programs/clock/core",
"programs/token/core",
"programs/token",
"sequencer_core",
"sequencer_rpc",
"sequencer_runner",
"programs/associated_token_account/core",
"programs/associated_token_account",
"sequencer/core",
"sequencer/service",
"sequencer/service/protocol",
"sequencer/service/rpc",
"indexer/core",
"indexer/service",
"indexer/service/protocol",
@ -32,7 +36,8 @@ members = [
"examples/program_deployment",
"examples/program_deployment/methods",
"examples/program_deployment/methods/guest",
"bedrock_client",
"testnet_initial_state",
"indexer_ffi",
]
[workspace.dependencies]
@ -42,21 +47,26 @@ common = { path = "common" }
mempool = { path = "mempool" }
storage = { path = "storage" }
key_protocol = { path = "key_protocol" }
sequencer_core = { path = "sequencer_core" }
sequencer_rpc = { path = "sequencer_rpc" }
sequencer_runner = { path = "sequencer_runner" }
sequencer_core = { path = "sequencer/core" }
sequencer_service_protocol = { path = "sequencer/service/protocol" }
sequencer_service_rpc = { path = "sequencer/service/rpc" }
sequencer_service = { path = "sequencer/service" }
indexer_core = { path = "indexer/core" }
indexer_service = { path = "indexer/service" }
indexer_service_protocol = { path = "indexer/service/protocol" }
indexer_service_rpc = { path = "indexer/service/rpc" }
wallet = { path = "wallet" }
wallet-ffi = { path = "wallet-ffi", default-features = false }
indexer_ffi = { path = "indexer_ffi" }
clock_core = { path = "programs/clock/core" }
token_core = { path = "programs/token/core" }
token_program = { path = "programs/token" }
amm_core = { path = "programs/amm/core" }
amm_program = { path = "programs/amm" }
ata_core = { path = "programs/associated_token_account/core" }
ata_program = { path = "programs/associated_token_account" }
test_program_methods = { path = "test_program_methods" }
bedrock_client = { path = "bedrock_client" }
testnet_initial_state = { path = "testnet_initial_state" }
tokio = { version = "1.50", features = [
"net",
@ -110,12 +120,12 @@ tokio-retry = "0.3.0"
schemars = "1.2"
async-stream = "0.3.6"
logos-blockchain-common-http-client = { git = "https://github.com/logos-blockchain/logos-blockchain.git" }
logos-blockchain-key-management-system-service = { git = "https://github.com/logos-blockchain/logos-blockchain.git" }
logos-blockchain-core = { git = "https://github.com/logos-blockchain/logos-blockchain.git" }
logos-blockchain-chain-broadcast-service = { git = "https://github.com/logos-blockchain/logos-blockchain.git" }
logos-blockchain-chain-service = { git = "https://github.com/logos-blockchain/logos-blockchain.git" }
logos-blockchain-zone-sdk = { git = "https://github.com/logos-blockchain/logos-blockchain.git" }
logos-blockchain-common-http-client = { git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "f1b2a8c8e1dde0151a663795656ad3d855056d71" }
logos-blockchain-key-management-system-service = { git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "f1b2a8c8e1dde0151a663795656ad3d855056d71" }
logos-blockchain-core = { git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "f1b2a8c8e1dde0151a663795656ad3d855056d71" }
logos-blockchain-chain-broadcast-service = { git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "f1b2a8c8e1dde0151a663795656ad3d855056d71" }
logos-blockchain-chain-service = { git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "f1b2a8c8e1dde0151a663795656ad3d855056d71" }
logos-blockchain-zone-sdk = { git = "https://github.com/logos-blockchain/logos-blockchain.git", rev = "f1b2a8c8e1dde0151a663795656ad3d855056d71" }
rocksdb = { version = "0.24.0", default-features = false, features = [
"snappy",
@ -143,6 +153,14 @@ opt-level = 'z'
lto = true
codegen-units = 1
# Keep backtraces but drop full DWARF type info to avoid LLD OOM/SIGBUS when
# linking large integration-test binaries on resource-constrained CI runners.
[profile.dev]
debug = "line-tables-only"
[profile.test]
debug = "line-tables-only"
[workspace.lints.rust]
warnings = "deny"

View File

@ -30,10 +30,10 @@ run-bedrock:
docker compose up
# Run Sequencer
[working-directory: 'sequencer_runner']
[working-directory: 'sequencer/service']
run-sequencer:
@echo "🧠 Running sequencer"
RUST_LOG=info RISC0_DEV_MODE=1 cargo run --release -p sequencer_runner configs/debug
RUST_LOG=info RISC0_DEV_MODE=1 cargo run --release -p sequencer_service configs/debug/sequencer_config.json
# Run Indexer
[working-directory: 'indexer/service']
@ -62,9 +62,9 @@ run-wallet +args:
# Clean runtime data
clean:
@echo "🧹 Cleaning run artifacts"
rm -rf sequencer_runner/bedrock_signing_key
rm -rf sequencer_runner/rocksdb
rm -rf sequencer_runner/zone_sdk_checkpoint.json
rm -rf sequencer/service/bedrock_signing_key
rm -rf sequencer/service/rocksdb
rm -rf sequencer/service/zone_sdk_checkpoint.json
rm -rf indexer/service/rocksdb
rm -rf indexer/service/zone_sdk_indexer_cursor.json
rm -rf wallet/configs/debug/storage.json

View File

@ -161,7 +161,7 @@ The sequencer and logos blockchain node can be run locally:
- `RUST_LOG=info cargo run -p indexer_service indexer/service/configs/indexer_config.json`
3. On another terminal go to the `logos-blockchain/lssa` repo and run the sequencer:
- `RUST_LOG=info cargo run -p sequencer_runner sequencer_runner/configs/debug`
- `RUST_LOG=info cargo run -p sequencer_service sequencer/service/configs/debug/sequencer_config.json`
4. (To run the explorer): on another terminal go to `logos-blockchain/lssa/explorer_service` and run the following:
- `cargo install cargo-leptos`
- `cargo leptos build --release`
@ -171,8 +171,8 @@ The sequencer and logos blockchain node can be run locally:
After stopping services above you need to remove 3 folders to start cleanly:
1. In the `logos-blockchain/logos-blockchain` folder `state` (not needed in case of docker setup)
2. In the `lssa` folder `sequencer_runner/rocksdb`
3. In the `lssa` file `sequencer_runner/bedrock_signing_key`
2. In the `lssa` folder `sequencer/service/rocksdb`
3. In the `lssa` file `sequencer/service/bedrock_signing_key`
4. In the `lssa` folder `indexer/service/rocksdb`
### Normal mode (`just` commands)
@ -220,7 +220,7 @@ This will use a wallet binary built from this repo and not the one installed in
### Standalone mode
The sequencer can be run in standalone mode with:
```bash
RUST_LOG=info cargo run --features standalone -p sequencer_runner sequencer_runner/configs/debug
RUST_LOG=info cargo run --features standalone -p sequencer_service sequencer/service/configs/debug
```
## Running with Docker

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -1,12 +0,0 @@
port: 4400
n_hosts: 4
timeout: 10
# Tracing
tracing_settings:
logger: Stdout
tracing: None
filter: None
metrics: None
console: None
level: DEBUG

View File

@ -0,0 +1,82 @@
blend:
common:
num_blend_layers: 3
minimum_network_size: 30
protocol_name: /blend/integration-tests
data_replication_factor: 0
core:
scheduler:
cover:
message_frequency_per_round: 1.0
intervals_for_safety_buffer: 100
delayer:
maximum_release_delay_in_rounds: 3
minimum_messages_coefficient: 1
normalization_constant: 1.03
activity_threshold_sensitivity: 1
network:
kademlia_protocol_name: /integration/logos-blockchain/kad/1.0.0
identify_protocol_name: /integration/logos-blockchain/identify/1.0.0
chain_sync_protocol_name: /integration/logos-blockchain/chainsync/1.0.0
cryptarchia:
epoch_config:
epoch_stake_distribution_stabilization: 3
epoch_period_nonce_buffer: 3
epoch_period_nonce_stabilization: 4
security_param: 10
slot_activation_coeff:
numerator: 1
denominator: 2
learning_rate: 0.1
sdp_config:
service_params:
BN:
lock_period: 10
inactivity_period: 1
retention_period: 1
timestamp: 0
min_stake:
threshold: 1
timestamp: 0
gossipsub_protocol: /integration/logos-blockchain/cryptarchia/proto/1.0.0
genesis_state:
mantle_tx:
ops:
- opcode: 0
payload:
inputs: [ ]
outputs:
- value: 1
pk: d204000000000000000000000000000000000000000000000000000000000000
- value: 100
pk: 2e03b2eff5a45478e7e79668d2a146cf2c5c7925bce927f2b1c67f2ab4fc0d26
- opcode: 17
payload:
channel_id: "0000000000000000000000000000000000000000000000000000000000000000"
inscription: [ 103, 101, 110, 101, 115, 105, 115 ] # "genesis" in bytes
parent: "0000000000000000000000000000000000000000000000000000000000000000"
signer: "0000000000000000000000000000000000000000000000000000000000000000"
execution_gas_price: 0
storage_gas_price: 0
ops_proofs:
- !ZkSig
pi_a: [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
]
pi_b: [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
]
pi_c: [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
]
- NoProof
time:
slot_duration: '1.0'
chain_start_time: PLACEHOLDER_CHAIN_START_TIME
mempool:
pubsub_topic: mantle_e2e_tests

View File

@ -1,46 +1,12 @@
services:
cfgsync:
image: ghcr.io/logos-blockchain/logos-blockchain@sha256:000982e751dfd346ca5346b8025c685fc3abc585079c59cde3bde7fd63100657
volumes:
- ./scripts:/etc/logos-blockchain/scripts
- ./cfgsync.yaml:/etc/logos-blockchain/cfgsync.yaml:z
entrypoint: /etc/logos-blockchain/scripts/run_cfgsync.sh
logos-blockchain-node-0:
image: ghcr.io/logos-blockchain/logos-blockchain@sha256:000982e751dfd346ca5346b8025c685fc3abc585079c59cde3bde7fd63100657
image: ghcr.io/logos-blockchain/logos-blockchain@sha256:c5243681b353278cabb562a176f0a5cfbefc2056f18cebc47fe0e3720c29fb12
ports:
- "${PORT:-8080}:18080/tcp"
volumes:
- ./scripts:/etc/logos-blockchain/scripts
- ./kzgrs_test_params:/kzgrs_test_params:z
depends_on:
- cfgsync
entrypoint: /etc/logos-blockchain/scripts/run_logos_blockchain_node.sh
logos-blockchain-node-1:
image: ghcr.io/logos-blockchain/logos-blockchain@sha256:000982e751dfd346ca5346b8025c685fc3abc585079c59cde3bde7fd63100657
volumes:
- ./scripts:/etc/logos-blockchain/scripts
- ./kzgrs_test_params:/kzgrs_test_params:z
depends_on:
- cfgsync
entrypoint: /etc/logos-blockchain/scripts/run_logos_blockchain_node.sh
logos-blockchain-node-2:
image: ghcr.io/logos-blockchain/logos-blockchain@sha256:000982e751dfd346ca5346b8025c685fc3abc585079c59cde3bde7fd63100657
volumes:
- ./scripts:/etc/logos-blockchain/scripts
- ./kzgrs_test_params:/kzgrs_test_params:z
depends_on:
- cfgsync
entrypoint: /etc/logos-blockchain/scripts/run_logos_blockchain_node.sh
logos-blockchain-node-3:
image: ghcr.io/logos-blockchain/logos-blockchain@sha256:000982e751dfd346ca5346b8025c685fc3abc585079c59cde3bde7fd63100657
volumes:
- ./scripts:/etc/logos-blockchain/scripts
- ./kzgrs_test_params:/kzgrs_test_params:z
depends_on:
- cfgsync
- ./node-config.yaml:/etc/logos-blockchain/node-config.yaml:z
- ./deployment-settings.yaml:/etc/logos-blockchain/deployment-settings.yaml:z
entrypoint: /etc/logos-blockchain/scripts/run_logos_blockchain_node.sh

54
bedrock/node-config.yaml Normal file
View File

@ -0,0 +1,54 @@
blend:
non_ephemeral_signing_key_id: 86c8519f00178e9eb1fe5f4247e4bed77d4c9f6da2fb10e8a1fdd7ba6bc79fa0
core:
zk:
secret_key_kms_id: 64249c75c2cb813578b75d05b215fc95f67cea5862fff047228183f22e63460e
cryptarchia:
service:
bootstrap:
prolonged_bootstrap_period: '1.000000000'
network:
network:
max_connected_peers_to_try_download: 16
max_discovered_peers_to_try_download: 16
leader:
wallet:
funding_pk: "2e03b2eff5a45478e7e79668d2a146cf2c5c7925bce927f2b1c67f2ab4fc0d26"
sdp:
wallet:
funding_pk: ed266e6e887b9b97059dc1aa1b7b2e19b934291753c6336a163fe4ebaa28e717
kms:
backend:
keys:
2e03b2eff5a45478e7e79668d2a146cf2c5c7925bce927f2b1c67f2ab4fc0d26: !Zk 6c645cd4636d9c4c36a37a9aeabcaa3300000000000000000000000000000000
64249c75c2cb813578b75d05b215fc95f67cea5862fff047228183f22e63460e: !Zk 83c851cf4436e8d2fdac33d56d2b235f66431be97e2a20bf241d431713dc720f
ed266e6e887b9b97059dc1aa1b7b2e19b934291753c6336a163fe4ebaa28e717: !Zk 7364705cd4636d9c4c36a37a9aeabcaa00000000000000000000000000000000
86c8519f00178e9eb1fe5f4247e4bed77d4c9f6da2fb10e8a1fdd7ba6bc79fa0: !Ed25519 5cd4636d9c4c36a37a9aeabcaa332c3ec796226af0af48a0b2e70167205af749
wallet:
known_keys:
2e03b2eff5a45478e7e79668d2a146cf2c5c7925bce927f2b1c67f2ab4fc0d26: "2e03b2eff5a45478e7e79668d2a146cf2c5c7925bce927f2b1c67f2ab4fc0d26"
ed266e6e887b9b97059dc1aa1b7b2e19b934291753c6336a163fe4ebaa28e717: "ed266e6e887b9b97059dc1aa1b7b2e19b934291753c6336a163fe4ebaa28e717"
voucher_master_key_id: 2e03b2eff5a45478e7e79668d2a146cf2c5c7925bce927f2b1c67f2ab4fc0d26
api:
backend:
listen_address: 0.0.0.0:18080
cors_origins: []
timeout: 30
max_body_size: 10485760
max_concurrent_requests: 500
tracing:
logger:
stdout: true
stderr: false
file:
directory: "./state/logs"
otlp: null
loki: null
gelf: null
tracing: None
filter: None
metrics: None
console: None
level: "INFO"

View File

@ -2,12 +2,19 @@
set -e
export CFG_FILE_PATH="/config.yaml" \
CFG_SERVER_ADDR="http://cfgsync:4400" \
CFG_HOST_IP=$(hostname -i) \
CFG_HOST_IDENTIFIER="validator-$(hostname -i)" \
LOG_LEVEL="INFO" \
POL_PROOF_DEV_MODE=true
export POL_PROOF_DEV_MODE=true
/usr/bin/logos-blockchain-cfgsync-client && \
exec /usr/bin/logos-blockchain-node /config.yaml
# Use static configs mounted from host. Both node-config.yaml and
# deployment-settings.yaml have matching validator keys so the node
# can produce blocks as a single-validator network.
# Copy deployment-settings to a writable path because sed -i can't
# rename on a bind-mounted file.
cp /etc/logos-blockchain/deployment-settings.yaml /deployment-settings.yaml
# Set chain_start_time to "now" so the chain starts immediately.
sed -i "s/PLACEHOLDER_CHAIN_START_TIME/$(date -u '+%Y-%m-%d %H:%M:%S.000000 +00:00:00')/" \
/deployment-settings.yaml
exec /usr/bin/logos-blockchain-node \
/etc/logos-blockchain/node-config.yaml \
--deployment /deployment-settings.yaml

View File

@ -1,23 +0,0 @@
[package]
name = "bedrock_client"
version = "0.1.0"
edition = "2024"
license = { workspace = true }
[lints]
workspace = true
[dependencies]
common.workspace = true
reqwest.workspace = true
anyhow.workspace = true
tokio-retry.workspace = true
futures.workspace = true
log.workspace = true
serde.workspace = true
humantime-serde.workspace = true
logos-blockchain-common-http-client.workspace = true
logos-blockchain-core.workspace = true
logos-blockchain-chain-broadcast-service.workspace = true
logos-blockchain-chain-service.workspace = true

View File

@ -1,121 +0,0 @@
use std::time::Duration;
use anyhow::{Context as _, Result};
use common::config::BasicAuth;
use futures::{Stream, TryFutureExt as _};
#[expect(clippy::single_component_path_imports, reason = "Satisfy machete")]
use humantime_serde;
use log::{info, warn};
pub use logos_blockchain_chain_broadcast_service::BlockInfo;
use logos_blockchain_chain_service::CryptarchiaInfo;
pub use logos_blockchain_common_http_client::{CommonHttpClient, Error};
pub use logos_blockchain_core::{block::Block, header::HeaderId, mantle::SignedMantleTx};
use reqwest::{Client, Url};
use serde::{Deserialize, Serialize};
use tokio_retry::Retry;
/// Fibonacci backoff retry strategy configuration.
#[derive(Debug, Copy, Clone, Serialize, Deserialize)]
pub struct BackoffConfig {
#[serde(with = "humantime_serde")]
pub start_delay: Duration,
pub max_retries: usize,
}
impl Default for BackoffConfig {
fn default() -> Self {
Self {
start_delay: Duration::from_millis(100),
max_retries: 5,
}
}
}
/// Simple wrapper
/// maybe extend in the future for our purposes
/// `Clone` is cheap because `CommonHttpClient` is internally reference counted (`Arc`).
#[derive(Clone)]
pub struct BedrockClient {
http_client: CommonHttpClient,
node_url: Url,
backoff: BackoffConfig,
}
impl BedrockClient {
pub fn new(backoff: BackoffConfig, node_url: Url, auth: Option<BasicAuth>) -> Result<Self> {
info!("Creating Bedrock client with node URL {node_url}");
let client = Client::builder()
//Add more fields if needed
.timeout(std::time::Duration::from_secs(60))
.build()
.context("Failed to build HTTP client")?;
let auth = auth.map(|a| {
logos_blockchain_common_http_client::BasicAuthCredentials::new(a.username, a.password)
});
let http_client = CommonHttpClient::new_with_client(client, auth);
Ok(Self {
http_client,
node_url,
backoff,
})
}
pub async fn post_transaction(&self, tx: SignedMantleTx) -> Result<Result<(), Error>, Error> {
Retry::spawn(self.backoff_strategy(), || async {
match self
.http_client
.post_transaction(self.node_url.clone(), tx.clone())
.await
{
Ok(()) => Ok(Ok(())),
Err(err) => match err {
// Retry arm.
// Retrying only reqwest errors: mainly connected to http.
Error::Request(_) => Err(err),
// Returning non-retryable error
Error::Server(_) | Error::Client(_) | Error::Url(_) => Ok(Err(err)),
},
}
})
.await
}
pub async fn get_lib_stream(&self) -> Result<impl Stream<Item = BlockInfo>, Error> {
self.http_client.get_lib_stream(self.node_url.clone()).await
}
pub async fn get_block_by_id(
&self,
header_id: HeaderId,
) -> Result<Option<Block<SignedMantleTx>>, Error> {
Retry::spawn(self.backoff_strategy(), || {
self.http_client
.get_block_by_id(self.node_url.clone(), header_id)
.inspect_err(|err| warn!("Block fetching failed with error: {err:#}"))
})
.await
}
pub async fn get_consensus_info(&self) -> Result<CryptarchiaInfo, Error> {
Retry::spawn(self.backoff_strategy(), || {
self.http_client
.consensus_info(self.node_url.clone())
.inspect_err(|err| warn!("Block fetching failed with error: {err:#}"))
})
.await
}
fn backoff_strategy(&self) -> impl Iterator<Item = Duration> {
let start_delay_millis = self
.backoff
.start_delay
.as_millis()
.try_into()
.expect("Start delay must be less than u64::MAX milliseconds");
tokio_retry::strategy::FibonacciBackoff::from_millis(start_delay_millis)
.take(self.backoff.max_retries)
}
}

View File

@ -10,19 +10,15 @@ workspace = true
[dependencies]
nssa.workspace = true
nssa_core.workspace = true
clock_core.workspace = true
anyhow.workspace = true
thiserror.workspace = true
serde_json.workspace = true
serde.workspace = true
serde_with.workspace = true
reqwest.workspace = true
base64.workspace = true
sha2.workspace = true
log.workspace = true
hex.workspace = true
borsh.workspace = true
bytesize.workspace = true
base64.workspace = true
url.workspace = true
logos-blockchain-common-http-client.workspace = true
tokio-retry.workspace = true

View File

@ -1,14 +1,12 @@
use borsh::{BorshDeserialize, BorshSerialize};
use nssa::AccountId;
use nssa_core::BlockId;
pub use nssa_core::Timestamp;
use serde::{Deserialize, Serialize};
use sha2::{Digest as _, Sha256, digest::FixedOutput as _};
use crate::{HashType, transaction::NSSATransaction};
pub type MantleMsgId = [u8; 32];
pub type BlockHash = HashType;
pub type BlockId = u64;
pub type TimeStamp = u64;
#[derive(Debug, Clone, BorshSerialize, BorshDeserialize)]
pub struct BlockMeta {
@ -36,7 +34,7 @@ pub struct BlockHeader {
pub block_id: BlockId,
pub prev_block_hash: BlockHash,
pub hash: BlockHash,
pub timestamp: TimeStamp,
pub timestamp: Timestamp,
pub signature: nssa::Signature,
}
@ -60,11 +58,23 @@ pub struct Block {
pub bedrock_parent_id: MantleMsgId,
}
impl Serialize for Block {
fn serialize<S: serde::Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
crate::borsh_base64::serialize(self, serializer)
}
}
impl<'de> Deserialize<'de> for Block {
fn deserialize<D: serde::Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
crate::borsh_base64::deserialize(deserializer)
}
}
#[derive(Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize)]
pub struct HashableBlockData {
pub block_id: BlockId,
pub prev_block_hash: BlockHash,
pub timestamp: TimeStamp,
pub timestamp: Timestamp,
pub transactions: Vec<NSSATransaction>,
}
@ -111,20 +121,6 @@ impl From<Block> for HashableBlockData {
}
}
/// Helper struct for account (de-)serialization.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AccountInitialData {
pub account_id: AccountId,
pub balance: u128,
}
/// Helper struct to (de-)serialize initial commitments.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CommitmentsInitialData {
pub npk: nssa_core::NullifierPublicKey,
pub account: nssa_core::account::Account,
}
#[cfg(test)]
mod tests {
use crate::{HashType, block::HashableBlockData, test_utils};

View File

@ -0,0 +1,25 @@
//! This module provides utilities for serializing and deserializing data by combining Borsh and
//! Base64 encodings.
use base64::{Engine as _, engine::general_purpose::STANDARD};
use borsh::{BorshDeserialize, BorshSerialize};
use serde::{Deserialize, Serialize};
pub fn serialize<T: BorshSerialize, S: serde::Serializer>(
value: &T,
serializer: S,
) -> Result<S::Ok, S::Error> {
let borsh_encoded = borsh::to_vec(value).map_err(serde::ser::Error::custom)?;
let base64_encoded = STANDARD.encode(&borsh_encoded);
Serialize::serialize(&base64_encoded, serializer)
}
pub fn deserialize<'de, T: BorshDeserialize, D: serde::Deserializer<'de>>(
deserializer: D,
) -> Result<T, D::Error> {
let base64_encoded = <String as Deserialize>::deserialize(deserializer)?;
let borsh_encoded = STANDARD
.decode(base64_encoded.as_bytes())
.map_err(serde::de::Error::custom)?;
borsh::from_slice(&borsh_encoded).map_err(serde::de::Error::custom)
}

View File

@ -1,43 +0,0 @@
use nssa::AccountId;
use serde::Deserialize;
use crate::rpc_primitives::errors::RpcError;
#[derive(Debug, Clone, Deserialize)]
pub struct SequencerRpcError {
pub jsonrpc: String,
pub error: RpcError,
pub id: u64,
}
#[derive(thiserror::Error, Debug)]
pub enum SequencerClientError {
#[error("HTTP error")]
HTTPError(#[from] reqwest::Error),
#[error("Serde error")]
SerdeError(#[from] serde_json::Error),
#[error("Internal error: {0:?}")]
InternalError(SequencerRpcError),
}
impl From<SequencerRpcError> for SequencerClientError {
fn from(value: SequencerRpcError) -> Self {
Self::InternalError(value)
}
}
#[derive(Debug, thiserror::Error)]
pub enum ExecutionFailureKind {
#[error("Failed to get data from sequencer")]
SequencerError(#[source] anyhow::Error),
#[error("Inputs amounts does not match outputs")]
AmountMismatchError,
#[error("Accounts key not found")]
KeyNotFoundError,
#[error("Sequencer client error: {0:?}")]
SequencerClientError(#[from] SequencerClientError),
#[error("Can not pay for operation")]
InsufficientFundsError,
#[error("Account {0} data is invalid")]
AccountDataError(AccountId),
}

View File

@ -4,10 +4,8 @@ use borsh::{BorshDeserialize, BorshSerialize};
use serde_with::{DeserializeFromStr, SerializeDisplay};
pub mod block;
mod borsh_base64;
pub mod config;
pub mod error;
pub mod rpc_primitives;
pub mod sequencer_client;
pub mod transaction;
// Module for tests utility functions

View File

@ -1,194 +0,0 @@
use std::fmt;
use serde_json::{Value, to_value};
#[derive(serde::Serialize)]
pub struct RpcParseError(pub String);
/// This struct may be returned from JSON RPC server in case of error.
///
/// It is expected that that this struct has impls From<_> all other RPC errors
/// like [`RpcBlockError`](crate::types::blocks::RpcBlockError).
#[derive(Debug, serde::Serialize, serde::Deserialize, Clone, PartialEq, Eq)]
#[serde(deny_unknown_fields)]
pub struct RpcError {
#[serde(flatten)]
pub error_struct: Option<RpcErrorKind>,
/// Deprecated please use the `error_struct` instead.
pub code: i64,
/// Deprecated please use the `error_struct` instead.
pub message: String,
/// Deprecated please use the `error_struct` instead.
#[serde(skip_serializing_if = "Option::is_none")]
pub data: Option<Value>,
}
#[derive(Debug, serde::Serialize, serde::Deserialize, Clone, PartialEq, Eq)]
#[serde(tag = "name", content = "cause", rename_all = "SCREAMING_SNAKE_CASE")]
pub enum RpcErrorKind {
RequestValidationError(RpcRequestValidationErrorKind),
HandlerError(Value),
InternalError(Value),
}
#[derive(Debug, serde::Serialize, serde::Deserialize, Clone, PartialEq, Eq)]
#[serde(tag = "name", content = "info", rename_all = "SCREAMING_SNAKE_CASE")]
pub enum RpcRequestValidationErrorKind {
MethodNotFound { method_name: String },
ParseError { error_message: String },
}
/// A general Server Error.
#[derive(serde::Serialize, serde::Deserialize, Debug, PartialEq, Eq, Clone)]
pub enum ServerError {
Timeout,
Closed,
}
impl RpcError {
/// A generic constructor.
///
/// Mostly for completeness, doesn't do anything but filling in the corresponding fields.
#[must_use]
pub const fn new(code: i64, message: String, data: Option<Value>) -> Self {
Self {
code,
message,
data,
error_struct: None,
}
}
/// Create an Invalid Param error.
pub fn invalid_params(data: impl serde::Serialize) -> Self {
let value = match to_value(data) {
Ok(value) => value,
Err(err) => {
return Self::server_error(Some(format!(
"Failed to serialize invalid parameters error: {:?}",
err.to_string()
)));
}
};
Self::new(-32_602, "Invalid params".to_owned(), Some(value))
}
/// Create a server error.
pub fn server_error<E: serde::Serialize>(e: Option<E>) -> Self {
Self::new(
-32_000,
"Server error".to_owned(),
e.map(|v| to_value(v).expect("Must be representable in JSON")),
)
}
/// Create a parse error.
#[must_use]
pub fn parse_error(e: String) -> Self {
Self {
code: -32_700,
message: "Parse error".to_owned(),
data: Some(Value::String(e.clone())),
error_struct: Some(RpcErrorKind::RequestValidationError(
RpcRequestValidationErrorKind::ParseError { error_message: e },
)),
}
}
#[must_use]
pub fn serialization_error(e: &str) -> Self {
Self::new_internal_error(Some(Value::String(e.to_owned())), e)
}
/// Helper method to define extract `INTERNAL_ERROR` in separate `RpcErrorKind`
/// Returns `HANDLER_ERROR` if the error is not internal one.
#[must_use]
pub fn new_internal_or_handler_error(error_data: Option<Value>, error_struct: Value) -> Self {
if error_struct["name"] == "INTERNAL_ERROR" {
let error_message = match error_struct["info"].get("error_message") {
Some(Value::String(error_message)) => error_message.as_str(),
_ => "InternalError happened during serializing InternalError",
};
Self::new_internal_error(error_data, error_message)
} else {
Self::new_handler_error(error_data, error_struct)
}
}
#[must_use]
pub fn new_internal_error(error_data: Option<Value>, info: &str) -> Self {
Self {
code: -32_000,
message: "Server error".to_owned(),
data: error_data,
error_struct: Some(RpcErrorKind::InternalError(serde_json::json!({
"name": "INTERNAL_ERROR",
"info": serde_json::json!({"error_message": info})
}))),
}
}
fn new_handler_error(error_data: Option<Value>, error_struct: Value) -> Self {
Self {
code: -32_000,
message: "Server error".to_owned(),
data: error_data,
error_struct: Some(RpcErrorKind::HandlerError(error_struct)),
}
}
/// Create a method not found error.
#[must_use]
pub fn method_not_found(method: String) -> Self {
Self {
code: -32_601,
message: "Method not found".to_owned(),
data: Some(Value::String(method.clone())),
error_struct: Some(RpcErrorKind::RequestValidationError(
RpcRequestValidationErrorKind::MethodNotFound {
method_name: method,
},
)),
}
}
}
impl fmt::Display for RpcError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{self:?}")
}
}
impl From<RpcParseError> for RpcError {
fn from(parse_error: RpcParseError) -> Self {
Self::parse_error(parse_error.0)
}
}
impl From<std::convert::Infallible> for RpcError {
fn from(_: std::convert::Infallible) -> Self {
// SAFETY: Infallible error can never be constructed, so this code can never be reached.
unsafe { core::hint::unreachable_unchecked() }
}
}
impl fmt::Display for ServerError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Timeout => write!(f, "ServerError: Timeout"),
Self::Closed => write!(f, "ServerError: Closed"),
}
}
}
impl From<ServerError> for RpcError {
fn from(e: ServerError) -> Self {
let error_data = match to_value(&e) {
Ok(value) => value,
Err(_err) => {
return Self::new_internal_error(None, "Failed to serialize ServerError");
}
};
Self::new_internal_error(Some(error_data), e.to_string().as_str())
}
}

View File

@ -1,588 +0,0 @@
// Copyright 2017 tokio-jsonrpc Developers
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
//! JSON-RPC 2.0 messages.
//!
//! The main entrypoint here is the [Message](enum.Message.html). The others are just building
//! blocks and you should generally work with `Message` instead.
use std::fmt::{Formatter, Result as FmtResult};
use serde::{
de::{Deserializer, Error, Unexpected, Visitor},
ser::{SerializeStruct as _, Serializer},
};
use serde_json::{Result as JsonResult, Value};
use super::errors::RpcError;
pub type Parsed = Result<Message, Broken>;
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
struct Version;
impl serde::Serialize for Version {
fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
serializer.serialize_str("2.0")
}
}
impl<'de> serde::Deserialize<'de> for Version {
#[expect(
clippy::renamed_function_params,
reason = "More readable than original serde parameter names"
)]
fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
struct VersionVisitor;
impl Visitor<'_> for VersionVisitor {
type Value = Version;
fn expecting(&self, formatter: &mut Formatter<'_>) -> FmtResult {
formatter.write_str("a version string")
}
fn visit_str<E: Error>(self, value: &str) -> Result<Version, E> {
match value {
"2.0" => Ok(Version),
_ => Err(E::invalid_value(Unexpected::Str(value), &"value 2.0")),
}
}
}
deserializer.deserialize_str(VersionVisitor)
}
}
/// An RPC request.
#[derive(Debug, serde::Serialize, serde::Deserialize, Clone, PartialEq, Eq)]
#[serde(deny_unknown_fields)]
#[expect(
clippy::partial_pub_fields,
reason = "We don't want to allow access to the version, but the others are public for ease of use"
)]
pub struct Request {
jsonrpc: Version,
pub method: String,
#[serde(default, skip_serializing_if = "Value::is_null")]
pub params: Value,
pub id: Value,
}
impl Request {
#[must_use]
pub fn from_payload_version_2_0(method: String, payload: serde_json::Value) -> Self {
Self {
jsonrpc: Version,
method,
params: payload,
// ToDo: Correct checking of id
id: 1.into(),
}
}
/// Answer the request with a (positive) reply.
///
/// The ID is taken from the request.
#[must_use]
pub fn reply(&self, reply: Value) -> Message {
Message::Response(Response {
jsonrpc: Version,
result: Ok(reply),
id: self.id.clone(),
})
}
/// Answer the request with an error.
#[must_use]
pub fn error(&self, error: RpcError) -> Message {
Message::Response(Response {
jsonrpc: Version,
result: Err(error),
id: self.id.clone(),
})
}
}
/// A response to an RPC.
///
/// It is created by the methods on [Request](struct.Request.html).
#[expect(
clippy::partial_pub_fields,
reason = "We don't want to allow access to the version, but the others are public for ease of use"
)]
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Response {
jsonrpc: Version,
pub result: Result<Value, RpcError>,
pub id: Value,
}
impl serde::Serialize for Response {
fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
let mut sub = serializer.serialize_struct("Response", 3)?;
sub.serialize_field("jsonrpc", &self.jsonrpc)?;
match &self.result {
Ok(value) => sub.serialize_field("result", value),
Err(err) => sub.serialize_field("error", err),
}?;
sub.serialize_field("id", &self.id)?;
sub.end()
}
}
/// A helper trick for deserialization.
#[derive(serde::Deserialize)]
#[serde(deny_unknown_fields)]
struct WireResponse {
// It is actually used to eat and sanity check the deserialized text
#[serde(rename = "jsonrpc")]
_jsonrpc: Version,
// Make sure we accept null as Some(Value::Null), instead of going to None
#[serde(default, deserialize_with = "some_value")]
result: Option<Value>,
error: Option<RpcError>,
id: Value,
}
// Implementing deserialize is hard. We sidestep the difficulty by deserializing a similar
// structure that directly corresponds to whatever is on the wire and then convert it to our more
// convenient representation.
impl<'de> serde::Deserialize<'de> for Response {
fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
let wr: WireResponse = serde::Deserialize::deserialize(deserializer)?;
let result = match (wr.result, wr.error) {
(Some(res), None) => Ok(res),
(None, Some(err)) => Err(err),
_ => {
let err = D::Error::custom("Either 'error' or 'result' is expected, but not both");
return Err(err);
}
};
Ok(Self {
jsonrpc: Version,
result,
id: wr.id,
})
}
}
/// A notification (doesn't expect an answer).
#[expect(
clippy::partial_pub_fields,
reason = "We don't want to allow access to the version, but the others are public for ease of use"
)]
#[derive(Debug, serde::Serialize, serde::Deserialize, Clone, PartialEq, Eq)]
#[serde(deny_unknown_fields)]
pub struct Notification {
jsonrpc: Version,
pub method: String,
#[serde(default, skip_serializing_if = "Value::is_null")]
pub params: Value,
}
/// One message of the JSON RPC protocol.
///
/// One message, directly mapped from the structures of the protocol. See the
/// [specification](http://www.jsonrpc.org/specification) for more details.
///
/// Since the protocol allows one endpoint to be both client and server at the same time, the
/// message can decode and encode both directions of the protocol.
///
/// The `Batch` variant is supposed to be created directly, without a constructor.
///
/// The `UnmatchedSub` variant is used when a request is an array and some of the subrequests
/// aren't recognized as valid json rpc 2.0 messages. This is never returned as a top-level
/// element, it is returned as `Err(Broken::Unmatched)`.
#[derive(Debug, Clone, PartialEq, serde::Serialize, serde::Deserialize)]
#[serde(untagged)]
pub enum Message {
/// An RPC request.
Request(Request),
/// A response to a Request.
Response(Response),
/// A notification.
Notification(Notification),
/// A batch of more requests or responses.
///
/// The protocol allows bundling multiple requests, notifications or responses to a single
/// message.
///
/// This variant has no direct constructor and is expected to be constructed manually.
Batch(Vec<Self>),
/// An unmatched sub entry in a `Batch`.
///
/// When there's a `Batch` and an element doesn't comform to the JSONRPC 2.0 format, that one
/// is represented by this. This is never produced as a top-level value when parsing, the
/// `Err(Broken::Unmatched)` is used instead. It is not possible to serialize.
#[serde(skip_serializing)]
UnmatchedSub(Value),
}
impl Message {
/// A constructor for a request.
///
/// The ID is auto-set to dontcare.
#[must_use]
pub fn request(method: String, params: Value) -> Self {
let id = Value::from("dontcare");
Self::Request(Request {
jsonrpc: Version,
method,
params,
id,
})
}
/// Create a top-level error (without an ID).
#[must_use]
pub const fn error(error: RpcError) -> Self {
Self::Response(Response {
jsonrpc: Version,
result: Err(error),
id: Value::Null,
})
}
/// A constructor for a notification.
#[must_use]
pub const fn notification(method: String, params: Value) -> Self {
Self::Notification(Notification {
jsonrpc: Version,
method,
params,
})
}
/// A constructor for a response.
#[must_use]
pub const fn response(id: Value, result: Result<Value, RpcError>) -> Self {
Self::Response(Response {
jsonrpc: Version,
result,
id,
})
}
/// Returns id or Null if there is no id.
#[must_use]
pub fn id(&self) -> Value {
match self {
Self::Request(req) => req.id.clone(),
Self::Response(response) => response.id.clone(),
Self::Notification(_) | Self::Batch(_) | Self::UnmatchedSub(_) => Value::Null,
}
}
}
impl From<Message> for String {
fn from(val: Message) -> Self {
::serde_json::ser::to_string(&val).expect("message serialization to json should not fail")
}
}
impl From<Message> for Vec<u8> {
fn from(val: Message) -> Self {
::serde_json::ser::to_vec(&val)
.expect("message serialization to json bytes should not fail")
}
}
/// A broken message.
///
/// Protocol-level errors.
#[derive(Debug, Clone, PartialEq, Eq, serde::Deserialize)]
#[serde(untagged)]
pub enum Broken {
/// It was valid JSON, but doesn't match the form of a JSONRPC 2.0 message.
Unmatched(Value),
/// Invalid JSON.
#[serde(skip_deserializing)]
SyntaxError(String),
}
impl Broken {
/// Generate an appropriate error message.
///
/// The error message for these things are specified in the RFC, so this just creates an error
/// with the right values.
#[must_use]
pub fn reply(&self) -> Message {
match self {
Self::Unmatched(_) => Message::error(RpcError::parse_error(
"JSON RPC Request format was expected".to_owned(),
)),
Self::SyntaxError(e) => Message::error(RpcError::parse_error(e.clone())),
}
}
}
/// A trick to easily deserialize and detect valid JSON, but invalid Message.
#[derive(serde::Deserialize)]
#[serde(untagged)]
pub enum WireMessage {
Message(Message),
Broken(Broken),
}
pub fn decoded_to_parsed(res: JsonResult<WireMessage>) -> Parsed {
match res {
Ok(WireMessage::Message(Message::UnmatchedSub(value))) => Err(Broken::Unmatched(value)),
Ok(WireMessage::Message(m)) => Ok(m),
Ok(WireMessage::Broken(b)) => Err(b),
Err(e) => Err(Broken::SyntaxError(e.to_string())),
}
}
/// Read a [Message](enum.Message.html) from a slice.
///
/// Invalid JSON or JSONRPC messages are reported as [Broken](enum.Broken.html).
pub fn from_slice(s: &[u8]) -> Parsed {
decoded_to_parsed(::serde_json::de::from_slice(s))
}
/// Read a [Message](enum.Message.html) from a string.
///
/// Invalid JSON or JSONRPC messages are reported as [Broken](enum.Broken.html).
pub fn from_str(s: &str) -> Parsed {
from_slice(s.as_bytes())
}
/// Deserializer for `Option<Value>` that produces `Some(Value::Null)`.
///
/// The usual one produces None in that case. But we need to know the difference between
/// `{x: null}` and `{}`.
fn some_value<'de, D: Deserializer<'de>>(deserializer: D) -> Result<Option<Value>, D::Error> {
serde::Deserialize::deserialize(deserializer).map(Some)
}
#[cfg(test)]
mod tests {
use serde_json::{Value, de::from_slice, json, ser::to_vec};
use super::*;
/// Test serialization and deserialization of the Message.
///
/// We first deserialize it from a string. That way we check deserialization works.
/// But since serialization doesn't have to produce the exact same result (order, spaces, …),
/// we then serialize and deserialize the thing again and check it matches.
#[test]
fn message_serde() {
// A helper for running one message test
fn one(input: &str, expected: &Message) {
let parsed: Message = from_str(input).unwrap();
assert_eq!(*expected, parsed);
let serialized = to_vec(&parsed).unwrap();
let deserialized: Message = from_slice(&serialized).unwrap();
assert_eq!(parsed, deserialized);
}
// A request without parameters
one(
r#"{"jsonrpc": "2.0", "method": "call", "id": 1}"#,
&Message::Request(Request {
jsonrpc: Version,
method: "call".to_owned(),
params: Value::Null,
id: json!(1),
}),
);
// A request with parameters
one(
r#"{"jsonrpc": "2.0", "method": "call", "params": [1, 2, 3], "id": 2}"#,
&Message::Request(Request {
jsonrpc: Version,
method: "call".to_owned(),
params: json!([1, 2, 3]),
id: json!(2),
}),
);
// A notification (with parameters)
one(
r#"{"jsonrpc": "2.0", "method": "notif", "params": {"x": "y"}}"#,
&Message::Notification(Notification {
jsonrpc: Version,
method: "notif".to_owned(),
params: json!({"x": "y"}),
}),
);
// A successful response
one(
r#"{"jsonrpc": "2.0", "result": 42, "id": 3}"#,
&Message::Response(Response {
jsonrpc: Version,
result: Ok(json!(42)),
id: json!(3),
}),
);
// A successful response
one(
r#"{"jsonrpc": "2.0", "result": null, "id": 3}"#,
&Message::Response(Response {
jsonrpc: Version,
result: Ok(Value::Null),
id: json!(3),
}),
);
// An error
one(
r#"{"jsonrpc": "2.0", "error": {"code": 42, "message": "Wrong!"}, "id": null}"#,
&Message::Response(Response {
jsonrpc: Version,
result: Err(RpcError::new(42, "Wrong!".to_owned(), None)),
id: Value::Null,
}),
);
// A batch
one(
r#"[
{"jsonrpc": "2.0", "method": "notif"},
{"jsonrpc": "2.0", "method": "call", "id": 42}
]"#,
&Message::Batch(vec![
Message::Notification(Notification {
jsonrpc: Version,
method: "notif".to_owned(),
params: Value::Null,
}),
Message::Request(Request {
jsonrpc: Version,
method: "call".to_owned(),
params: Value::Null,
id: json!(42),
}),
]),
);
// Some handling of broken messages inside a batch
let parsed = from_str(
r#"[
{"jsonrpc": "2.0", "method": "notif"},
{"jsonrpc": "2.0", "method": "call", "id": 42},
true
]"#,
)
.unwrap();
assert_eq!(
Message::Batch(vec![
Message::Notification(Notification {
jsonrpc: Version,
method: "notif".to_owned(),
params: Value::Null,
}),
Message::Request(Request {
jsonrpc: Version,
method: "call".to_owned(),
params: Value::Null,
id: json!(42),
}),
Message::UnmatchedSub(Value::Bool(true)),
]),
parsed
);
to_vec(&Message::UnmatchedSub(Value::Null)).unwrap_err();
}
/// A helper for the `broken` test.
///
/// Check that the given JSON string parses, but is not recognized as a valid RPC message.
///
/// Test things that are almost but not entirely JSONRPC are rejected.
///
/// The reject is done by returning it as Unmatched.
#[test]
fn broken() {
// A helper with one test
fn one(input: &str) {
let msg = from_str(input);
match msg {
Err(Broken::Unmatched(_)) => (),
_ => panic!("{input} recognized as an RPC message: {msg:?}!"),
}
}
// Missing the version
one(r#"{"method": "notif"}"#);
// Wrong version
one(r#"{"jsonrpc": 2.0, "method": "notif"}"#);
// A response with both result and error
one(r#"{"jsonrpc": "2.0", "result": 42, "error": {"code": 42, "message": "!"}, "id": 1}"#);
// A response without an id
one(r#"{"jsonrpc": "2.0", "result": 42}"#);
// An extra field
one(r#"{"jsonrpc": "2.0", "method": "weird", "params": 42, "others": 43, "id": 2}"#);
// Something completely different
one(r#"{"x": [1, 2, 3]}"#);
match from_str("{]") {
Err(Broken::SyntaxError(_)) => (),
other => panic!("Something unexpected: {other:?}"),
}
}
/// Test some non-trivial aspects of the constructors.
///
/// This doesn't have a full coverage, because there's not much to actually test there.
/// Most of it is related to the ids.
#[test]
#[ignore = "Not a full coverage test"]
fn constructors() {
let msg1 = Message::request("call".to_owned(), json!([1, 2, 3]));
let msg2 = Message::request("call".to_owned(), json!([1, 2, 3]));
// They differ, even when created with the same parameters
assert_ne!(msg1, msg2);
// And, specifically, they differ in the ID's
let (req1, req2) = if let (Message::Request(req1), Message::Request(req2)) = (msg1, msg2) {
assert_ne!(req1.id, req2.id);
assert!(req1.id.is_string());
assert!(req2.id.is_string());
(req1, req2)
} else {
panic!("Non-request received");
};
let id1 = req1.id.clone();
// When we answer a message, we get the same ID
if let Message::Response(resp) = req1.reply(json!([1, 2, 3])) {
assert_eq!(
resp,
Response {
jsonrpc: Version,
result: Ok(json!([1, 2, 3])),
id: id1
}
);
} else {
panic!("Not a response");
}
let id2 = req2.id.clone();
// The same with an error
if let Message::Response(resp) = req2.error(RpcError::new(42, "Wrong!".to_owned(), None)) {
assert_eq!(
resp,
Response {
jsonrpc: Version,
result: Err(RpcError::new(42, "Wrong!".to_owned(), None)),
id: id2,
}
);
} else {
panic!("Not a response");
}
// When we have unmatched, we generate a top-level error with Null id.
if let Message::Response(resp) =
Message::error(RpcError::new(43, "Also wrong!".to_owned(), None))
{
assert_eq!(
resp,
Response {
jsonrpc: Version,
result: Err(RpcError::new(43, "Also wrong!".to_owned(), None)),
id: Value::Null,
}
);
} else {
panic!("Not a response");
}
}
}

View File

@ -1,57 +0,0 @@
use bytesize::ByteSize;
use serde::{Deserialize, Serialize};
pub mod errors;
pub mod message;
pub mod parser;
pub mod requests;
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct RpcLimitsConfig {
/// Maximum byte size of the json payload.
pub json_payload_max_size: ByteSize,
}
impl Default for RpcLimitsConfig {
fn default() -> Self {
Self {
json_payload_max_size: ByteSize::mib(10),
}
}
}
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct RpcConfig {
pub addr: String,
pub cors_allowed_origins: Vec<String>,
#[serde(default)]
pub limits_config: RpcLimitsConfig,
}
impl Default for RpcConfig {
fn default() -> Self {
Self {
addr: "0.0.0.0:3040".to_owned(),
cors_allowed_origins: vec!["*".to_owned()],
limits_config: RpcLimitsConfig::default(),
}
}
}
impl RpcConfig {
#[must_use]
pub fn new(addr: &str) -> Self {
Self {
addr: addr.to_owned(),
..Default::default()
}
}
#[must_use]
pub fn with_port(port: u16) -> Self {
Self {
addr: format!("0.0.0.0:{port}"),
..Default::default()
}
}
}

View File

@ -1,29 +0,0 @@
use serde::de::DeserializeOwned;
use serde_json::Value;
use super::errors::RpcParseError;
#[macro_export]
macro_rules! parse_request {
($request_name:ty) => {
impl RpcRequest for $request_name {
fn parse(value: Option<Value>) -> Result<Self, RpcParseError> {
parse_params::<Self>(value)
}
}
};
}
pub trait RpcRequest: Sized {
fn parse(value: Option<Value>) -> Result<Self, RpcParseError>;
}
pub fn parse_params<T: DeserializeOwned>(value: Option<Value>) -> Result<T, RpcParseError> {
value.map_or_else(
|| Err(RpcParseError("Require at least one parameter".to_owned())),
|value| {
serde_json::from_value(value)
.map_err(|err| RpcParseError(format!("Failed parsing args: {err}")))
},
)
}

View File

@ -1,219 +0,0 @@
use std::collections::HashMap;
use nssa::AccountId;
use nssa_core::program::ProgramId;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use super::{
errors::RpcParseError,
parser::{RpcRequest, parse_params},
};
use crate::{HashType, parse_request};
mod base64_deser {
use base64::{Engine as _, engine::general_purpose};
use serde::{self, Deserialize, Deserializer, Serializer, ser::SerializeSeq as _};
pub mod vec {
use super::*;
pub fn serialize<S>(bytes_vec: &[Vec<u8>], serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut seq = serializer.serialize_seq(Some(bytes_vec.len()))?;
for bytes in bytes_vec {
let s = general_purpose::STANDARD.encode(bytes);
seq.serialize_element(&s)?;
}
seq.end()
}
pub fn deserialize<'de, D>(deserializer: D) -> Result<Vec<Vec<u8>>, D::Error>
where
D: Deserializer<'de>,
{
let base64_strings: Vec<String> = Deserialize::deserialize(deserializer)?;
base64_strings
.into_iter()
.map(|s| {
general_purpose::STANDARD
.decode(&s)
.map_err(serde::de::Error::custom)
})
.collect()
}
}
pub fn serialize<S>(bytes: &[u8], serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let base64_string = general_purpose::STANDARD.encode(bytes);
serializer.serialize_str(&base64_string)
}
pub fn deserialize<'de, D>(deserializer: D) -> Result<Vec<u8>, D::Error>
where
D: Deserializer<'de>,
{
let base64_string: String = Deserialize::deserialize(deserializer)?;
general_purpose::STANDARD
.decode(&base64_string)
.map_err(serde::de::Error::custom)
}
}
#[derive(Serialize, Deserialize, Debug)]
pub struct HelloRequest;
#[derive(Serialize, Deserialize, Debug)]
pub struct RegisterAccountRequest {
pub account_id: [u8; 32],
}
#[derive(Serialize, Deserialize, Debug)]
pub struct SendTxRequest {
#[serde(with = "base64_deser")]
pub transaction: Vec<u8>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct GetBlockDataRequest {
pub block_id: u64,
}
/// Get a range of blocks from `start_block_id` to `end_block_id` (inclusive).
#[derive(Serialize, Deserialize, Debug)]
pub struct GetBlockRangeDataRequest {
pub start_block_id: u64,
pub end_block_id: u64,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct GetGenesisIdRequest;
#[derive(Serialize, Deserialize, Debug)]
pub struct GetLastBlockRequest;
#[derive(Serialize, Deserialize, Debug)]
pub struct GetInitialTestnetAccountsRequest;
#[derive(Serialize, Deserialize, Debug)]
pub struct GetAccountBalanceRequest {
pub account_id: AccountId,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct GetTransactionByHashRequest {
pub hash: HashType,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct GetAccountsNoncesRequest {
pub account_ids: Vec<AccountId>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct GetAccountRequest {
pub account_id: AccountId,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct GetProofForCommitmentRequest {
pub commitment: nssa_core::Commitment,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct GetProgramIdsRequest;
parse_request!(HelloRequest);
parse_request!(RegisterAccountRequest);
parse_request!(SendTxRequest);
parse_request!(GetBlockDataRequest);
parse_request!(GetBlockRangeDataRequest);
parse_request!(GetGenesisIdRequest);
parse_request!(GetLastBlockRequest);
parse_request!(GetInitialTestnetAccountsRequest);
parse_request!(GetAccountBalanceRequest);
parse_request!(GetTransactionByHashRequest);
parse_request!(GetAccountsNoncesRequest);
parse_request!(GetProofForCommitmentRequest);
parse_request!(GetAccountRequest);
parse_request!(GetProgramIdsRequest);
#[derive(Serialize, Deserialize, Debug)]
pub struct HelloResponse {
pub greeting: String,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct RegisterAccountResponse {
pub status: String,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct SendTxResponse {
pub status: String,
pub tx_hash: HashType,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct GetBlockDataResponse {
#[serde(with = "base64_deser")]
pub block: Vec<u8>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct GetBlockRangeDataResponse {
#[serde(with = "base64_deser::vec")]
pub blocks: Vec<Vec<u8>>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct GetGenesisIdResponse {
pub genesis_id: u64,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct GetLastBlockResponse {
pub last_block: u64,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct GetAccountBalanceResponse {
pub balance: u128,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct GetAccountsNoncesResponse {
pub nonces: Vec<u128>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct GetTransactionByHashResponse {
pub transaction: Option<String>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct GetAccountResponse {
pub account: nssa::Account,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct GetProofForCommitmentResponse {
pub membership_proof: Option<nssa_core::MembershipProof>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct GetProgramIdsResponse {
pub program_ids: HashMap<String, ProgramId>,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct GetInitialTestnetAccountsResponse {
/// Hex encoded account id.
pub account_id: String,
pub balance: u64,
}

View File

@ -1,361 +0,0 @@
use std::{collections::HashMap, ops::RangeInclusive};
use anyhow::Result;
use nssa::AccountId;
use nssa_core::program::ProgramId;
use reqwest::Client;
use serde::Deserialize;
use serde_json::Value;
use url::Url;
use super::rpc_primitives::requests::{
GetAccountBalanceRequest, GetAccountBalanceResponse, GetBlockDataRequest, GetBlockDataResponse,
GetGenesisIdRequest, GetGenesisIdResponse, GetInitialTestnetAccountsRequest,
};
use crate::{
HashType,
config::BasicAuth,
error::{SequencerClientError, SequencerRpcError},
rpc_primitives::{
self,
requests::{
GetAccountRequest, GetAccountResponse, GetAccountsNoncesRequest,
GetAccountsNoncesResponse, GetBlockRangeDataRequest, GetBlockRangeDataResponse,
GetInitialTestnetAccountsResponse, GetLastBlockRequest, GetLastBlockResponse,
GetProgramIdsRequest, GetProgramIdsResponse, GetProofForCommitmentRequest,
GetProofForCommitmentResponse, GetTransactionByHashRequest,
GetTransactionByHashResponse, SendTxRequest, SendTxResponse,
},
},
transaction::NSSATransaction,
};
#[derive(Debug, Clone, Deserialize)]
struct SequencerRpcResponse {
#[serde(rename = "jsonrpc")]
_jsonrpc: String,
result: serde_json::Value,
#[serde(rename = "id")]
_id: u64,
}
#[derive(Clone)]
pub struct SequencerClient {
pub client: reqwest::Client,
pub sequencer_addr: Url,
pub basic_auth: Option<BasicAuth>,
}
impl SequencerClient {
pub fn new(sequencer_addr: Url) -> Result<Self> {
Self::new_with_auth(sequencer_addr, None)
}
pub fn new_with_auth(sequencer_addr: Url, basic_auth: Option<BasicAuth>) -> Result<Self> {
Ok(Self {
client: Client::builder()
// Add more fields if needed
.timeout(std::time::Duration::from_secs(60))
// Should be kept in sync with server keep-alive settings
.pool_idle_timeout(std::time::Duration::from_secs(5))
.build()?,
sequencer_addr,
basic_auth,
})
}
pub async fn call_method_with_payload(
&self,
method: &str,
payload: Value,
) -> Result<Value, SequencerClientError> {
let request =
rpc_primitives::message::Request::from_payload_version_2_0(method.to_owned(), payload);
log::debug!(
"Calling method {method} with payload {request:?} to sequencer at {}",
self.sequencer_addr
);
let strategy = tokio_retry::strategy::FixedInterval::from_millis(10000).take(60);
let response_vall = tokio_retry::Retry::spawn(strategy, || async {
let mut call_builder = self.client.post(self.sequencer_addr.clone());
if let Some(BasicAuth { username, password }) = &self.basic_auth {
call_builder = call_builder.basic_auth(username, password.as_deref());
}
let call_res_res = call_builder.json(&request).send().await;
match call_res_res {
Err(err) => Err(err),
Ok(call_res) => call_res.json::<Value>().await,
}
})
.await?;
if let Ok(response) = serde_json::from_value::<SequencerRpcResponse>(response_vall.clone())
{
Ok(response.result)
} else {
let err_resp = serde_json::from_value::<SequencerRpcError>(response_vall)?;
Err(err_resp.into())
}
}
/// Get block data at `block_id` from sequencer.
pub async fn get_block(
&self,
block_id: u64,
) -> Result<GetBlockDataResponse, SequencerClientError> {
let block_req = GetBlockDataRequest { block_id };
let req = serde_json::to_value(block_req)?;
let resp = self.call_method_with_payload("get_block", req).await?;
let resp_deser = serde_json::from_value(resp)?;
Ok(resp_deser)
}
pub async fn get_block_range(
&self,
range: RangeInclusive<u64>,
) -> Result<GetBlockRangeDataResponse, SequencerClientError> {
let block_req = GetBlockRangeDataRequest {
start_block_id: *range.start(),
end_block_id: *range.end(),
};
let req = serde_json::to_value(block_req)?;
let resp = self
.call_method_with_payload("get_block_range", req)
.await?;
let resp_deser = serde_json::from_value(resp)?;
Ok(resp_deser)
}
/// Get last known `blokc_id` from sequencer.
pub async fn get_last_block(&self) -> Result<GetLastBlockResponse, SequencerClientError> {
let block_req = GetLastBlockRequest {};
let req = serde_json::to_value(block_req)?;
let resp = self.call_method_with_payload("get_last_block", req).await?;
let resp_deser = serde_json::from_value(resp)?;
Ok(resp_deser)
}
/// Get account public balance for `account_id`. `account_id` must be a valid hex-string for 32
/// bytes.
pub async fn get_account_balance(
&self,
account_id: AccountId,
) -> Result<GetAccountBalanceResponse, SequencerClientError> {
let block_req = GetAccountBalanceRequest { account_id };
let req = serde_json::to_value(block_req)?;
let resp = self
.call_method_with_payload("get_account_balance", req)
.await?;
let resp_deser = serde_json::from_value(resp)?;
Ok(resp_deser)
}
/// Get accounts nonces for `account_ids`. `account_ids` must be a list of valid hex-strings for
/// 32 bytes.
pub async fn get_accounts_nonces(
&self,
account_ids: Vec<AccountId>,
) -> Result<GetAccountsNoncesResponse, SequencerClientError> {
let block_req = GetAccountsNoncesRequest { account_ids };
let req = serde_json::to_value(block_req)?;
let resp = self
.call_method_with_payload("get_accounts_nonces", req)
.await?;
let resp_deser = serde_json::from_value(resp)?;
Ok(resp_deser)
}
pub async fn get_account(
&self,
account_id: AccountId,
) -> Result<GetAccountResponse, SequencerClientError> {
let block_req = GetAccountRequest { account_id };
let req = serde_json::to_value(block_req)?;
let resp = self.call_method_with_payload("get_account", req).await?;
let resp_deser = serde_json::from_value(resp)?;
Ok(resp_deser)
}
/// Get transaction details for `hash`.
pub async fn get_transaction_by_hash(
&self,
hash: HashType,
) -> Result<GetTransactionByHashResponse, SequencerClientError> {
let block_req = GetTransactionByHashRequest { hash };
let req = serde_json::to_value(block_req)?;
let resp = self
.call_method_with_payload("get_transaction_by_hash", req)
.await?;
let resp_deser = serde_json::from_value(resp)?;
Ok(resp_deser)
}
/// Send transaction to sequencer.
pub async fn send_tx_public(
&self,
transaction: nssa::PublicTransaction,
) -> Result<SendTxResponse, SequencerClientError> {
let transaction = NSSATransaction::Public(transaction);
let tx_req = SendTxRequest {
transaction: borsh::to_vec(&transaction).unwrap(),
};
let req = serde_json::to_value(tx_req)?;
let resp = self.call_method_with_payload("send_tx", req).await?;
let resp_deser = serde_json::from_value(resp)?;
Ok(resp_deser)
}
/// Send transaction to sequencer.
pub async fn send_tx_private(
&self,
transaction: nssa::PrivacyPreservingTransaction,
) -> Result<SendTxResponse, SequencerClientError> {
let transaction = NSSATransaction::PrivacyPreserving(transaction);
let tx_req = SendTxRequest {
transaction: borsh::to_vec(&transaction).unwrap(),
};
let req = serde_json::to_value(tx_req)?;
let resp = self.call_method_with_payload("send_tx", req).await?;
let resp_deser = serde_json::from_value(resp)?;
Ok(resp_deser)
}
/// Get genesis id from sequencer.
pub async fn get_genesis_id(&self) -> Result<GetGenesisIdResponse, SequencerClientError> {
let genesis_req = GetGenesisIdRequest {};
let req = serde_json::to_value(genesis_req).unwrap();
let resp = self
.call_method_with_payload("get_genesis", req)
.await
.unwrap();
let resp_deser = serde_json::from_value(resp).unwrap();
Ok(resp_deser)
}
/// Get initial testnet accounts from sequencer.
pub async fn get_initial_testnet_accounts(
&self,
) -> Result<Vec<GetInitialTestnetAccountsResponse>, SequencerClientError> {
let acc_req = GetInitialTestnetAccountsRequest {};
let req = serde_json::to_value(acc_req).unwrap();
let resp = self
.call_method_with_payload("get_initial_testnet_accounts", req)
.await
.unwrap();
let resp_deser = serde_json::from_value(resp).unwrap();
Ok(resp_deser)
}
/// Get proof for commitment.
pub async fn get_proof_for_commitment(
&self,
commitment: nssa_core::Commitment,
) -> Result<Option<nssa_core::MembershipProof>, SequencerClientError> {
let acc_req = GetProofForCommitmentRequest { commitment };
let req = serde_json::to_value(acc_req).unwrap();
let resp = self
.call_method_with_payload("get_proof_for_commitment", req)
.await
.unwrap();
let resp_deser = serde_json::from_value::<GetProofForCommitmentResponse>(resp)
.unwrap()
.membership_proof;
Ok(resp_deser)
}
pub async fn send_tx_program(
&self,
transaction: nssa::ProgramDeploymentTransaction,
) -> Result<SendTxResponse, SequencerClientError> {
let transaction = NSSATransaction::ProgramDeployment(transaction);
let tx_req = SendTxRequest {
transaction: borsh::to_vec(&transaction).unwrap(),
};
let req = serde_json::to_value(tx_req)?;
let resp = self.call_method_with_payload("send_tx", req).await?;
let resp_deser = serde_json::from_value(resp)?;
Ok(resp_deser)
}
/// Get Ids of the programs used by the node.
pub async fn get_program_ids(
&self,
) -> Result<HashMap<String, ProgramId>, SequencerClientError> {
let acc_req = GetProgramIdsRequest {};
let req = serde_json::to_value(acc_req).unwrap();
let resp = self
.call_method_with_payload("get_program_ids", req)
.await
.unwrap();
let resp_deser = serde_json::from_value::<GetProgramIdsResponse>(resp)
.unwrap()
.program_ids;
Ok(resp_deser)
}
}

View File

@ -3,7 +3,7 @@ use nssa::AccountId;
use crate::{
HashType,
block::{Block, HashableBlockData},
transaction::NSSATransaction,
transaction::{NSSATransaction, clock_invocation},
};
// Helpers
@ -15,7 +15,7 @@ pub fn sequencer_sign_key_for_testing() -> nssa::PrivateKey {
// Dummy producers
/// Produce dummy block with.
/// Produce dummy block with provided transactions + clock transaction an the end.
///
/// `id` - block id, provide zero for genesis.
///
@ -26,8 +26,12 @@ pub fn sequencer_sign_key_for_testing() -> nssa::PrivateKey {
pub fn produce_dummy_block(
id: u64,
prev_hash: Option<HashType>,
transactions: Vec<NSSATransaction>,
mut transactions: Vec<NSSATransaction>,
) -> Block {
transactions.push(NSSATransaction::Public(clock_invocation(
id.saturating_mul(100),
)));
let block_data = HashableBlockData {
block_id: id,
prev_block_hash: prev_hash.unwrap_or_default(),
@ -68,7 +72,7 @@ pub fn create_transaction_native_token_transfer(
signing_key: &nssa::PrivateKey,
) -> NSSATransaction {
let account_ids = vec![from, to];
let nonces = vec![nonce];
let nonces = vec![nonce.into()];
let program_id = nssa::program::Program::authenticated_transfer_program().id();
let message = nssa::public_transaction::Message::try_new(
program_id,

View File

@ -1,6 +1,7 @@
use borsh::{BorshDeserialize, BorshSerialize};
use log::warn;
use nssa::{AccountId, V02State};
use nssa::{AccountId, V03State, ValidatedStateDiff};
use nssa_core::{BlockId, Timestamp};
use serde::{Deserialize, Serialize};
use crate::HashType;
@ -12,6 +13,18 @@ pub enum NSSATransaction {
ProgramDeployment(nssa::ProgramDeploymentTransaction),
}
impl Serialize for NSSATransaction {
fn serialize<S: serde::Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
crate::borsh_base64::serialize(self, serializer)
}
}
impl<'de> Deserialize<'de> for NSSATransaction {
fn deserialize<D: serde::Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
crate::borsh_base64::deserialize(deserializer)
}
}
impl NSSATransaction {
#[must_use]
pub fn hash(&self) -> HashType {
@ -53,17 +66,53 @@ impl NSSATransaction {
}
}
/// Validates the transaction against the current state and returns the resulting diff
/// without applying it. Rejects transactions that modify clock system accounts.
pub fn validate_on_state(
&self,
state: &V03State,
block_id: BlockId,
timestamp: Timestamp,
) -> Result<ValidatedStateDiff, nssa::error::NssaError> {
let diff = match self {
Self::Public(tx) => {
ValidatedStateDiff::from_public_transaction(tx, state, block_id, timestamp)
}
Self::PrivacyPreserving(tx) => ValidatedStateDiff::from_privacy_preserving_transaction(
tx, state, block_id, timestamp,
),
Self::ProgramDeployment(tx) => {
ValidatedStateDiff::from_program_deployment_transaction(tx, state)
}
}?;
let public_diff = diff.public_diff();
let touches_clock = nssa::CLOCK_PROGRAM_ACCOUNT_IDS.iter().any(|id| {
public_diff
.get(id)
.is_some_and(|post| *post != state.get_account_by_id(*id))
});
if touches_clock {
return Err(nssa::error::NssaError::InvalidInput(
"Transaction modifies system clock accounts".into(),
));
}
Ok(diff)
}
/// Validates the transaction against the current state, rejects modifications to clock
/// system accounts, and applies the resulting diff to the state.
pub fn execute_check_on_state(
self,
state: &mut V02State,
state: &mut V03State,
block_id: BlockId,
timestamp: Timestamp,
) -> Result<Self, nssa::error::NssaError> {
match &self {
Self::Public(tx) => state.transition_from_public_transaction(tx),
Self::PrivacyPreserving(tx) => state.transition_from_privacy_preserving_transaction(tx),
Self::ProgramDeployment(tx) => state.transition_from_program_deployment_transaction(tx),
}
.inspect_err(|err| warn!("Error at transition {err:#?}"))?;
let diff = self
.validate_on_state(state, block_id, timestamp)
.inspect_err(|err| warn!("Error at transition {err:#?}"))?;
state.apply_state_diff(diff);
Ok(self)
}
}
@ -87,7 +136,7 @@ impl From<nssa::ProgramDeploymentTransaction> for NSSATransaction {
}
#[derive(
Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, BorshSerialize, BorshDeserialize,
Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, BorshSerialize, BorshDeserialize,
)]
pub enum TxKind {
Public,
@ -104,3 +153,20 @@ pub enum TransactionMalformationError {
#[error("Transaction size {size} exceeds maximum allowed size of {max} bytes")]
TransactionTooLarge { size: usize, max: usize },
}
/// Returns the canonical Clock Program invocation transaction for the given block timestamp.
/// Every valid block must end with exactly one occurrence of this transaction.
#[must_use]
pub fn clock_invocation(timestamp: clock_core::Instruction) -> nssa::PublicTransaction {
let message = nssa::public_transaction::Message::try_new(
nssa::program::Program::clock().id(),
clock_core::CLOCK_PROGRAM_ACCOUNT_IDS.to_vec(),
vec![],
timestamp,
)
.expect("Clock invocation message should always be constructable");
nssa::PublicTransaction::new(
message,
nssa::public_transaction::WitnessSet::from_raw_parts(vec![]),
)
}

View File

@ -22,6 +22,20 @@ _wallet_complete_account_id() {
fi
}
# Helper function to complete account labels
_wallet_complete_account_label() {
local cur="$1"
local labels
if command -v wallet &>/dev/null; then
labels=$(wallet account list 2>/dev/null | grep -o '\[.*\]' | sed 's/^\[//;s/\]$//')
fi
if [[ -n "$labels" ]]; then
COMPREPLY=($(compgen -W "$labels" -- "$cur"))
fi
}
_wallet() {
local cur prev words cword
_init_completion 2>/dev/null || {
@ -91,20 +105,32 @@ _wallet() {
--account-id)
_wallet_complete_account_id "$cur"
;;
--account-label)
_wallet_complete_account_label "$cur"
;;
*)
COMPREPLY=($(compgen -W "--account-id" -- "$cur"))
COMPREPLY=($(compgen -W "--account-id --account-label" -- "$cur"))
;;
esac
;;
send)
case "$prev" in
--from | --to)
--from)
_wallet_complete_account_id "$cur"
;;
--from-label)
_wallet_complete_account_label "$cur"
;;
--to)
_wallet_complete_account_id "$cur"
;;
--to-label)
_wallet_complete_account_label "$cur"
;;
--to-npk | --to-vpk | --amount)
;; # no specific completion
*)
COMPREPLY=($(compgen -W "--from --to --to-npk --to-vpk --amount" -- "$cur"))
COMPREPLY=($(compgen -W "--from --from-label --to --to-label --to-npk --to-vpk --amount" -- "$cur"))
;;
esac
;;
@ -147,8 +173,11 @@ _wallet() {
-a | --account-id)
_wallet_complete_account_id "$cur"
;;
--account-label)
_wallet_complete_account_label "$cur"
;;
*)
COMPREPLY=($(compgen -W "-r --raw -k --keys -a --account-id" -- "$cur"))
COMPREPLY=($(compgen -W "-r --raw -k --keys -a --account-id --account-label" -- "$cur"))
;;
esac
;;
@ -186,10 +215,13 @@ _wallet() {
-a | --account-id)
_wallet_complete_account_id "$cur"
;;
--account-label)
_wallet_complete_account_label "$cur"
;;
-l | --label)
;; # no specific completion for label value
*)
COMPREPLY=($(compgen -W "-a --account-id -l --label" -- "$cur"))
COMPREPLY=($(compgen -W "-a --account-id --account-label -l --label" -- "$cur"))
;;
esac
;;
@ -206,8 +238,11 @@ _wallet() {
--to)
_wallet_complete_account_id "$cur"
;;
--to-label)
_wallet_complete_account_label "$cur"
;;
*)
COMPREPLY=($(compgen -W "--to" -- "$cur"))
COMPREPLY=($(compgen -W "--to --to-label" -- "$cur"))
;;
esac
;;
@ -221,49 +256,85 @@ _wallet() {
;;
new)
case "$prev" in
--definition-account-id | --supply-account-id)
--definition-account-id)
_wallet_complete_account_id "$cur"
;;
--definition-account-label)
_wallet_complete_account_label "$cur"
;;
--supply-account-id)
_wallet_complete_account_id "$cur"
;;
--supply-account-label)
_wallet_complete_account_label "$cur"
;;
-n | --name | -t | --total-supply)
;; # no specific completion
*)
COMPREPLY=($(compgen -W "--definition-account-id --supply-account-id -n --name -t --total-supply" -- "$cur"))
COMPREPLY=($(compgen -W "--definition-account-id --definition-account-label --supply-account-id --supply-account-label -n --name -t --total-supply" -- "$cur"))
;;
esac
;;
send)
case "$prev" in
--from | --to)
--from)
_wallet_complete_account_id "$cur"
;;
--from-label)
_wallet_complete_account_label "$cur"
;;
--to)
_wallet_complete_account_id "$cur"
;;
--to-label)
_wallet_complete_account_label "$cur"
;;
--to-npk | --to-vpk | --amount)
;; # no specific completion
*)
COMPREPLY=($(compgen -W "--from --to --to-npk --to-vpk --amount" -- "$cur"))
COMPREPLY=($(compgen -W "--from --from-label --to --to-label --to-npk --to-vpk --amount" -- "$cur"))
;;
esac
;;
burn)
case "$prev" in
--definition | --holder)
--definition)
_wallet_complete_account_id "$cur"
;;
--definition-label)
_wallet_complete_account_label "$cur"
;;
--holder)
_wallet_complete_account_id "$cur"
;;
--holder-label)
_wallet_complete_account_label "$cur"
;;
--amount)
;; # no specific completion
*)
COMPREPLY=($(compgen -W "--definition --holder --amount" -- "$cur"))
COMPREPLY=($(compgen -W "--definition --definition-label --holder --holder-label --amount" -- "$cur"))
;;
esac
;;
mint)
case "$prev" in
--definition | --holder)
--definition)
_wallet_complete_account_id "$cur"
;;
--definition-label)
_wallet_complete_account_label "$cur"
;;
--holder)
_wallet_complete_account_id "$cur"
;;
--holder-label)
_wallet_complete_account_label "$cur"
;;
--holder-npk | --holder-vpk | --amount)
;; # no specific completion
*)
COMPREPLY=($(compgen -W "--definition --holder --holder-npk --holder-vpk --amount" -- "$cur"))
COMPREPLY=($(compgen -W "--definition --definition-label --holder --holder-label --holder-npk --holder-vpk --amount" -- "$cur"))
;;
esac
;;
@ -277,49 +348,103 @@ _wallet() {
;;
new)
case "$prev" in
--user-holding-a | --user-holding-b | --user-holding-lp)
--user-holding-a)
_wallet_complete_account_id "$cur"
;;
--user-holding-a-label)
_wallet_complete_account_label "$cur"
;;
--user-holding-b)
_wallet_complete_account_id "$cur"
;;
--user-holding-b-label)
_wallet_complete_account_label "$cur"
;;
--user-holding-lp)
_wallet_complete_account_id "$cur"
;;
--user-holding-lp-label)
_wallet_complete_account_label "$cur"
;;
--balance-a | --balance-b)
;; # no specific completion
*)
COMPREPLY=($(compgen -W "--user-holding-a --user-holding-b --user-holding-lp --balance-a --balance-b" -- "$cur"))
COMPREPLY=($(compgen -W "--user-holding-a --user-holding-a-label --user-holding-b --user-holding-b-label --user-holding-lp --user-holding-lp-label --balance-a --balance-b" -- "$cur"))
;;
esac
;;
swap)
case "$prev" in
--user-holding-a | --user-holding-b)
--user-holding-a)
_wallet_complete_account_id "$cur"
;;
--user-holding-a-label)
_wallet_complete_account_label "$cur"
;;
--user-holding-b)
_wallet_complete_account_id "$cur"
;;
--user-holding-b-label)
_wallet_complete_account_label "$cur"
;;
--amount-in | --min-amount-out | --token-definition)
;; # no specific completion
*)
COMPREPLY=($(compgen -W "--user-holding-a --user-holding-b --amount-in --min-amount-out --token-definition" -- "$cur"))
COMPREPLY=($(compgen -W "--user-holding-a --user-holding-a-label --user-holding-b --user-holding-b-label --amount-in --min-amount-out --token-definition" -- "$cur"))
;;
esac
;;
add-liquidity)
case "$prev" in
--user-holding-a | --user-holding-b | --user-holding-lp)
--user-holding-a)
_wallet_complete_account_id "$cur"
;;
--user-holding-a-label)
_wallet_complete_account_label "$cur"
;;
--user-holding-b)
_wallet_complete_account_id "$cur"
;;
--user-holding-b-label)
_wallet_complete_account_label "$cur"
;;
--user-holding-lp)
_wallet_complete_account_id "$cur"
;;
--user-holding-lp-label)
_wallet_complete_account_label "$cur"
;;
--max-amount-a | --max-amount-b | --min-amount-lp)
;; # no specific completion
*)
COMPREPLY=($(compgen -W "--user-holding-a --user-holding-b --user-holding-lp --max-amount-a --max-amount-b --min-amount-lp" -- "$cur"))
COMPREPLY=($(compgen -W "--user-holding-a --user-holding-a-label --user-holding-b --user-holding-b-label --user-holding-lp --user-holding-lp-label --max-amount-a --max-amount-b --min-amount-lp" -- "$cur"))
;;
esac
;;
remove-liquidity)
case "$prev" in
--user-holding-a | --user-holding-b | --user-holding-lp)
--user-holding-a)
_wallet_complete_account_id "$cur"
;;
--user-holding-a-label)
_wallet_complete_account_label "$cur"
;;
--user-holding-b)
_wallet_complete_account_id "$cur"
;;
--user-holding-b-label)
_wallet_complete_account_label "$cur"
;;
--user-holding-lp)
_wallet_complete_account_id "$cur"
;;
--user-holding-lp-label)
_wallet_complete_account_label "$cur"
;;
--balance-lp | --min-amount-a | --min-amount-b)
;; # no specific completion
*)
COMPREPLY=($(compgen -W "--user-holding-a --user-holding-b --user-holding-lp --balance-lp --min-amount-a --min-amount-b" -- "$cur"))
COMPREPLY=($(compgen -W "--user-holding-a --user-holding-a-label --user-holding-b --user-holding-b-label --user-holding-lp --user-holding-lp-label --balance-lp --min-amount-a --min-amount-b" -- "$cur"))
;;
esac
;;

View File

@ -90,12 +90,15 @@ _wallet_auth_transfer() {
case $line[1] in
init)
_arguments \
'--account-id[Account ID to initialize]:account_id:_wallet_account_ids'
'--account-id[Account ID to initialize]:account_id:_wallet_account_ids' \
'--account-label[Account label (alternative to --account-id)]:label:_wallet_account_labels'
;;
send)
_arguments \
'--from[Source account ID]:from_account:_wallet_account_ids' \
'--from-label[Source account label (alternative to --from)]:label:_wallet_account_labels' \
'--to[Destination account ID (for owned accounts)]:to_account:_wallet_account_ids' \
'--to-label[Destination account label (alternative to --to)]:label:_wallet_account_labels' \
'--to-npk[Destination nullifier public key (for foreign private accounts)]:npk:' \
'--to-vpk[Destination viewing public key (for foreign private accounts)]:vpk:' \
'--amount[Amount of native tokens to send]:amount:'
@ -165,7 +168,8 @@ _wallet_account() {
_arguments \
'(-r --raw)'{-r,--raw}'[Get raw account data]' \
'(-k --keys)'{-k,--keys}'[Display keys (pk for public accounts, npk/vpk for private accounts)]' \
'(-a --account-id)'{-a,--account-id}'[Account ID to query]:account_id:_wallet_account_ids'
'(-a --account-id)'{-a,--account-id}'[Account ID to query]:account_id:_wallet_account_ids' \
'--account-label[Account label (alternative to --account-id)]:label:_wallet_account_labels'
;;
list|ls)
_arguments \
@ -189,6 +193,7 @@ _wallet_account() {
label)
_arguments \
'(-a --account-id)'{-a,--account-id}'[Account ID to label]:account_id:_wallet_account_ids' \
'--account-label[Account label (alternative to --account-id)]:label:_wallet_account_labels' \
'(-l --label)'{-l,--label}'[The label to assign to the account]:label:'
;;
esac
@ -216,7 +221,8 @@ _wallet_pinata() {
case $line[1] in
claim)
_arguments \
'--to[Destination account ID to receive claimed tokens]:to_account:_wallet_account_ids'
'--to[Destination account ID to receive claimed tokens]:to_account:_wallet_account_ids' \
'--to-label[Destination account label (alternative to --to)]:label:_wallet_account_labels'
;;
esac
;;
@ -249,12 +255,16 @@ _wallet_token() {
'--name[Token name]:name:' \
'--total-supply[Total supply of tokens to mint]:total_supply:' \
'--definition-account-id[Account ID for token definition]:definition_account:_wallet_account_ids' \
'--supply-account-id[Account ID to receive initial supply]:supply_account:_wallet_account_ids'
'--definition-account-label[Definition account label (alternative to --definition-account-id)]:label:_wallet_account_labels' \
'--supply-account-id[Account ID to receive initial supply]:supply_account:_wallet_account_ids' \
'--supply-account-label[Supply account label (alternative to --supply-account-id)]:label:_wallet_account_labels'
;;
send)
_arguments \
'--from[Source holding account ID]:from_account:_wallet_account_ids' \
'--from-label[Source account label (alternative to --from)]:label:_wallet_account_labels' \
'--to[Destination holding account ID (for owned accounts)]:to_account:_wallet_account_ids' \
'--to-label[Destination account label (alternative to --to)]:label:_wallet_account_labels' \
'--to-npk[Destination nullifier public key (for foreign private accounts)]:npk:' \
'--to-vpk[Destination viewing public key (for foreign private accounts)]:vpk:' \
'--amount[Amount of tokens to send]:amount:'
@ -262,13 +272,17 @@ _wallet_token() {
burn)
_arguments \
'--definition[Definition account ID]:definition_account:_wallet_account_ids' \
'--definition-label[Definition account label (alternative to --definition)]:label:_wallet_account_labels' \
'--holder[Holder account ID]:holder_account:_wallet_account_ids' \
'--holder-label[Holder account label (alternative to --holder)]:label:_wallet_account_labels' \
'--amount[Amount of tokens to burn]:amount:'
;;
mint)
_arguments \
'--definition[Definition account ID]:definition_account:_wallet_account_ids' \
'--definition-label[Definition account label (alternative to --definition)]:label:_wallet_account_labels' \
'--holder[Holder account ID (for owned accounts)]:holder_account:_wallet_account_ids' \
'--holder-label[Holder account label (alternative to --holder)]:label:_wallet_account_labels' \
'--holder-npk[Holder nullifier public key (for foreign private accounts)]:npk:' \
'--holder-vpk[Holder viewing public key (for foreign private accounts)]:vpk:' \
'--amount[Amount of tokens to mint]:amount:'
@ -302,15 +316,20 @@ _wallet_amm() {
new)
_arguments \
'--user-holding-a[User token A holding account ID]:holding_a:_wallet_account_ids' \
'--user-holding-a-label[User holding A account label (alternative to --user-holding-a)]:label:_wallet_account_labels' \
'--user-holding-b[User token B holding account ID]:holding_b:_wallet_account_ids' \
'--user-holding-b-label[User holding B account label (alternative to --user-holding-b)]:label:_wallet_account_labels' \
'--user-holding-lp[User LP token holding account ID]:holding_lp:_wallet_account_ids' \
'--user-holding-lp-label[User holding LP account label (alternative to --user-holding-lp)]:label:_wallet_account_labels' \
'--balance-a[Amount of token A to deposit]:balance_a:' \
'--balance-b[Amount of token B to deposit]:balance_b:'
;;
swap)
_arguments \
'--user-holding-a[User token A holding account ID]:holding_a:_wallet_account_ids' \
'--user-holding-a-label[User holding A account label (alternative to --user-holding-a)]:label:_wallet_account_labels' \
'--user-holding-b[User token B holding account ID]:holding_b:_wallet_account_ids' \
'--user-holding-b-label[User holding B account label (alternative to --user-holding-b)]:label:_wallet_account_labels' \
'--amount-in[Amount of tokens to swap]:amount_in:' \
'--min-amount-out[Minimum tokens expected in return]:min_amount_out:' \
'--token-definition[Definition ID of the token being provided]:token_def:'
@ -318,8 +337,11 @@ _wallet_amm() {
add-liquidity)
_arguments \
'--user-holding-a[User token A holding account ID]:holding_a:_wallet_account_ids' \
'--user-holding-a-label[User holding A account label (alternative to --user-holding-a)]:label:_wallet_account_labels' \
'--user-holding-b[User token B holding account ID]:holding_b:_wallet_account_ids' \
'--user-holding-b-label[User holding B account label (alternative to --user-holding-b)]:label:_wallet_account_labels' \
'--user-holding-lp[User LP token holding account ID]:holding_lp:_wallet_account_ids' \
'--user-holding-lp-label[User holding LP account label (alternative to --user-holding-lp)]:label:_wallet_account_labels' \
'--max-amount-a[Maximum amount of token A to deposit]:max_amount_a:' \
'--max-amount-b[Maximum amount of token B to deposit]:max_amount_b:' \
'--min-amount-lp[Minimum LP tokens to receive]:min_amount_lp:'
@ -327,8 +349,11 @@ _wallet_amm() {
remove-liquidity)
_arguments \
'--user-holding-a[User token A holding account ID]:holding_a:_wallet_account_ids' \
'--user-holding-a-label[User holding A account label (alternative to --user-holding-a)]:label:_wallet_account_labels' \
'--user-holding-b[User token B holding account ID]:holding_b:_wallet_account_ids' \
'--user-holding-b-label[User holding B account label (alternative to --user-holding-b)]:label:_wallet_account_labels' \
'--user-holding-lp[User LP token holding account ID]:holding_lp:_wallet_account_ids' \
'--user-holding-lp-label[User holding LP account label (alternative to --user-holding-lp)]:label:_wallet_account_labels' \
'--balance-lp[Amount of LP tokens to burn]:balance_lp:' \
'--min-amount-a[Minimum token A to receive]:min_amount_a:' \
'--min-amount-b[Minimum token B to receive]:min_amount_b:'
@ -424,7 +449,7 @@ _wallet_help() {
_wallet_account_ids() {
local -a accounts
local line
# Try to get accounts from wallet account list command
# Filter to lines starting with /N (numbered accounts) and extract the account ID
if command -v wallet &>/dev/null; then
@ -433,14 +458,35 @@ _wallet_account_ids() {
[[ -n "$line" ]] && accounts+=("${line%,}")
done < <(wallet account list 2>/dev/null | grep '^/[0-9]' | awk '{print $2}')
fi
# Provide type prefixes as fallback if command fails or returns nothing
if (( ${#accounts} == 0 )); then
compadd -S '' -- 'Public/' 'Private/'
return
fi
_multi_parts / accounts
}
# Helper function to complete account labels
# Uses `wallet account list` to get available labels
_wallet_account_labels() {
local -a labels
local line
if command -v wallet &>/dev/null; then
while IFS= read -r line; do
local label
# Extract label from [...] at end of line
label="${line##*\[}"
label="${label%\]}"
[[ -n "$label" && "$label" != "$line" ]] && labels+=("$label")
done < <(wallet account list 2>/dev/null)
fi
if (( ${#labels} > 0 )); then
compadd -a labels
fi
}
_wallet "$@"

View File

@ -1,6 +1,5 @@
{
"home": "/var/lib/sequencer_runner",
"override_rust_log": null,
"home": "/var/lib/sequencer_service",
"genesis_id": 1,
"is_genesis_random": true,
"max_num_tx_in_block": 20,
@ -8,7 +7,6 @@
"mempool_max_size": 10000,
"block_create_timeout": "10s",
"retry_pending_blocks_timeout": "7s",
"port": 3040,
"bedrock_config": {
"backoff": {
"start_delay": "100ms",

View File

@ -7,21 +7,21 @@ services:
environment:
- RUST_LOG=error
sequencer_runner:
sequencer_service:
depends_on:
- logos-blockchain-node-0
- indexer_service
volumes: !override
- ./configs/docker-all-in-one/sequencer:/etc/sequencer_runner
volumes:
- ./configs/docker-all-in-one/sequencer_config.json:/etc/sequencer_service/sequencer_config.json
indexer_service:
depends_on:
- logos-blockchain-node-0
volumes:
- ./configs/docker-all-in-one/indexer/indexer_config.json:/etc/indexer_service/indexer_config.json
- ./configs/docker-all-in-one/indexer_config.json:/etc/indexer_service/indexer_config.json
explorer_service:
depends_on:
- indexer_service
environment:
- INDEXER_RPC_URL=http://indexer_service:8779
- INDEXER_RPC_URL=http://indexer_service:8779

View File

@ -6,7 +6,7 @@ include:
- path:
bedrock/docker-compose.yml
- path:
sequencer_runner/docker-compose.yml
sequencer/service/docker-compose.yml
- path:
indexer/service/docker-compose.yml
- path:

View File

@ -0,0 +1,369 @@
# Associated Token Accounts (ATAs)
This tutorial covers Associated Token Accounts (ATAs). An ATA lets you derive a unique token holding address from an owner account and a token definition — no need to create and track holding accounts manually. Given the same inputs, anyone can compute the same ATA address without a network call. By the end, you will have practiced:
1. Deriving ATA addresses locally.
2. Creating an ATA.
3. Sending tokens via ATAs.
4. Burning tokens from an ATA.
5. Listing ATAs across multiple token definitions.
6. Creating an ATA with a private owner.
7. Sending tokens from a private owner's ATA.
8. Burning tokens from a private owner's ATA.
> [!Important]
> This tutorial assumes you have completed the [wallet-setup](wallet-setup.md) and [custom-tokens](custom-tokens.md) tutorials. You need a running wallet with accounts and at least one token definition.
## Prerequisites
### Deploy the ATA program
Unlike the Token program (which is built-in), the ATA program must be deployed before you can use it. The pre-built binary is included in the repository:
```bash
wallet deploy-program artifacts/program_methods/associated_token_account.bin
```
> [!Note]
> Program deployment is idempotent — if the ATA program has already been deployed (e.g. by another user on the same network), the command is a no-op.
You can verify the deployment succeeded by running any `wallet ata` command. If the program is not deployed, commands that submit transactions will fail.
The CLI provides commands to work with the ATA program. Run `wallet ata` to see the options:
```bash
Commands:
address Derive and print the Associated Token Account address (local only, no network)
create Create (or idempotently no-op) the Associated Token Account
send Send tokens from owner's ATA to a recipient
burn Burn tokens from holder's ATA
list List all ATAs for a given owner across multiple token definitions
help Print this message or the help of the given subcommand(s)
```
## 1. How ATA addresses work
An ATA address is deterministically derived from two inputs:
1. The **owner** account ID.
2. The **token definition** account ID.
The derivation works as follows:
```
seed = SHA256(owner_id || definition_id)
ata_address = AccountId::for_public_pda(ata_program_id, seed)
```
Because the computation is pure, anyone who knows the owner and definition can reproduce the exact same ATA address — no network call required.
> [!Note]
> All ATA commands that submit transactions accept a privacy prefix on the owner/holder argument — `Public/` for public accounts and `Private/` for private accounts. Using `Private/` generates a zero-knowledge proof locally and submits only the proof to the sequencer, keeping the owner's identity off-chain.
## 2. Deriving an ATA address (`wallet ata address`)
The `address` subcommand computes the ATA address locally without submitting a transaction.
### a. Set up an owner and token definition
If you already have a public account and a token definition from the custom-tokens tutorial, you can reuse them. Otherwise, create them now:
```bash
wallet account new public
# Output:
Generated new account with account_id Public/5FkBei8HYoSUNqh9rWCrJDnSZE5FJfGiWmTvhgBx3qTB
```
```bash
wallet account new public
# Output:
Generated new account with account_id Public/3YpK8RvVzWm6Q4h2nDAbxJfLmuRqkEkFP9C7UwTdGvE4
```
```bash
wallet token new \
--name MYTOKEN \
--total-supply 10000 \
--definition-account-id Public/3YpK8RvVzWm6Q4h2nDAbxJfLmuRqkEkFP9C7UwTdGvE4 \
--supply-account-id Public/5FkBei8HYoSUNqh9rWCrJDnSZE5FJfGiWmTvhgBx3qTB
```
### b. Derive the ATA address
```bash
wallet ata address \
--owner 5FkBei8HYoSUNqh9rWCrJDnSZE5FJfGiWmTvhgBx3qTB \
--token-definition 3YpK8RvVzWm6Q4h2nDAbxJfLmuRqkEkFP9C7UwTdGvE4
# Output:
7a2Bf9cKLm3XpRtH1wDqZs8vYjN4eU6gAoFxW5kMnE2R
```
> [!Note]
> This is a pure computation — no transaction is submitted and no network connection is needed. The same inputs will always produce the same output.
## 3. Creating an ATA (`wallet ata create`)
Before an ATA can hold tokens it must be created on-chain. The `create` subcommand submits a transaction that initializes the ATA. If it already exists, the operation is a no-op.
### a. Create the ATA
```bash
wallet ata create \
--owner Public/5FkBei8HYoSUNqh9rWCrJDnSZE5FJfGiWmTvhgBx3qTB \
--token-definition 3YpK8RvVzWm6Q4h2nDAbxJfLmuRqkEkFP9C7UwTdGvE4
```
### b. Inspect the ATA
Use the ATA address derived in the previous section:
```bash
wallet account get --account-id Public/7a2Bf9cKLm3XpRtH1wDqZs8vYjN4eU6gAoFxW5kMnE2R
# Output:
Holding account owned by ata program
{"account_type":"Token holding","definition_id":"3YpK8RvVzWm6Q4h2nDAbxJfLmuRqkEkFP9C7UwTdGvE4","balance":0}
```
> [!Tip]
> Creation is idempotent — running the same command again is a no-op.
## 4. Sending tokens via ATA (`wallet ata send`)
The `send` subcommand transfers tokens from the owner's ATA to a recipient account.
### a. Fund the ATA
First, move tokens into the ATA from the supply account created earlier:
```bash
wallet token send \
--from Public/5FkBei8HYoSUNqh9rWCrJDnSZE5FJfGiWmTvhgBx3qTB \
--to Public/7a2Bf9cKLm3XpRtH1wDqZs8vYjN4eU6gAoFxW5kMnE2R \
--amount 5000
```
### b. Create a recipient account
```bash
wallet account new public
# Output:
Generated new account with account_id Public/9Ht4Kv8pYmW2rXjN6dFcQsA7bEoLf3gUZx1wDnR5eTi
```
### c. Send tokens from the ATA to the recipient
```bash
wallet ata send \
--from Public/5FkBei8HYoSUNqh9rWCrJDnSZE5FJfGiWmTvhgBx3qTB \
--token-definition 3YpK8RvVzWm6Q4h2nDAbxJfLmuRqkEkFP9C7UwTdGvE4 \
--to 9Ht4Kv8pYmW2rXjN6dFcQsA7bEoLf3gUZx1wDnR5eTi \
--amount 2000
```
### d. Verify balances
```bash
wallet account get --account-id Public/7a2Bf9cKLm3XpRtH1wDqZs8vYjN4eU6gAoFxW5kMnE2R
# Output:
Holding account owned by ata program
{"account_type":"Token holding","definition_id":"3YpK8RvVzWm6Q4h2nDAbxJfLmuRqkEkFP9C7UwTdGvE4","balance":3000}
```
```bash
wallet account get --account-id Public/9Ht4Kv8pYmW2rXjN6dFcQsA7bEoLf3gUZx1wDnR5eTi
# Output:
Holding account owned by token program
{"account_type":"Token holding","definition_id":"3YpK8RvVzWm6Q4h2nDAbxJfLmuRqkEkFP9C7UwTdGvE4","balance":2000}
```
## 5. Burning tokens from an ATA (`wallet ata burn`)
The `burn` subcommand destroys tokens held in the owner's ATA, reducing the token's total supply.
### a. Burn tokens
```bash
wallet ata burn \
--holder Public/5FkBei8HYoSUNqh9rWCrJDnSZE5FJfGiWmTvhgBx3qTB \
--token-definition 3YpK8RvVzWm6Q4h2nDAbxJfLmuRqkEkFP9C7UwTdGvE4 \
--amount 500
```
### b. Verify the reduced balance
```bash
wallet account get --account-id Public/7a2Bf9cKLm3XpRtH1wDqZs8vYjN4eU6gAoFxW5kMnE2R
# Output:
Holding account owned by ata program
{"account_type":"Token holding","definition_id":"3YpK8RvVzWm6Q4h2nDAbxJfLmuRqkEkFP9C7UwTdGvE4","balance":2500}
```
## 6. Listing ATAs (`wallet ata list`)
The `list` subcommand queries ATAs for a given owner across one or more token definitions.
### a. Create a second token and ATA
Create a second token definition so there are multiple ATAs to list:
```bash
wallet account new public
# Output:
Generated new account with account_id Public/BxR3Lm7YkWp9vNs2hD4qJcTfA8eUoZ6gKn1wXjM5rFi
```
```bash
wallet account new public
# Output:
Generated new account with account_id Public/Ck8mVp4YhWn2rXjD6dFsQtA7bEoLf3gUZx1wDnR9eTs
```
```bash
wallet token new \
--name OTHERTOKEN \
--total-supply 5000 \
--definition-account-id Public/BxR3Lm7YkWp9vNs2hD4qJcTfA8eUoZ6gKn1wXjM5rFi \
--supply-account-id Public/Ck8mVp4YhWn2rXjD6dFsQtA7bEoLf3gUZx1wDnR9eTs
```
Create an ATA for the second token:
```bash
wallet ata create \
--owner Public/5FkBei8HYoSUNqh9rWCrJDnSZE5FJfGiWmTvhgBx3qTB \
--token-definition BxR3Lm7YkWp9vNs2hD4qJcTfA8eUoZ6gKn1wXjM5rFi
```
### b. List ATAs for both token definitions
```bash
wallet ata list \
--owner 5FkBei8HYoSUNqh9rWCrJDnSZE5FJfGiWmTvhgBx3qTB \
--token-definition \
3YpK8RvVzWm6Q4h2nDAbxJfLmuRqkEkFP9C7UwTdGvE4 \
BxR3Lm7YkWp9vNs2hD4qJcTfA8eUoZ6gKn1wXjM5rFi
# Output:
ATA 7a2Bf9cKLm3XpRtH1wDqZs8vYjN4eU6gAoFxW5kMnE2R (definition 3YpK8RvVzWm6Q4h2nDAbxJfLmuRqkEkFP9C7UwTdGvE4): balance 2500
ATA 4nPxKd8YmW7rVsH2jDfQcA9bEoLf6gUZx3wTnR1eMs5 (definition BxR3Lm7YkWp9vNs2hD4qJcTfA8eUoZ6gKn1wXjM5rFi): balance 0
```
> [!Note]
> The `list` command derives each ATA address locally and fetches its on-chain state. If an ATA has not been created for a given definition, it prints "No ATA for definition ..." instead.
## 7. Private owner operations
All three ATA operations — `create`, `send`, and `burn` — support private owner accounts. Passing a `Private/` prefix on the owner argument switches the wallet into privacy-preserving mode:
1. The wallet builds the transaction locally.
2. The ATA program is executed inside the RISC0 ZK VM to generate a proof.
3. The proof, the updated ATA state (in plaintext), and an encrypted update for the owner's private account are submitted to the sequencer.
4. The sequencer verifies the proof, writes the ATA state change to the public chain, and records the owner's new commitment in the nullifier set.
The result is that the ATA account and its token balance are **fully public** — anyone can see them. What stays private is the link between the ATA and its owner: the proof demonstrates that someone with the correct private key authorized the operation, but reveals nothing about which account that was.
> [!Note]
> The ATA address is derived from `SHA256(owner_id || definition_id)`. Because SHA256 is one-way, the ATA address does not reveal the owner's identity. However, if the owner's account ID becomes known for any other reason, all of their ATAs across every token definition can be enumerated by anyone.
### a. Create a private account
```bash
wallet account new private
# Output:
Generated new account with account_id Private/HkR7Lm2YnWp4vNs8hD3qJcTfA6eUoZ9gKn5wXjM1rFi
```
### b. Create the ATA for the private owner
Pass `Private/` on `--owner`. The token definition account has no privacy prefix — it is always a public account.
```bash
wallet ata create \
--owner Private/HkR7Lm2YnWp4vNs8hD3qJcTfA6eUoZ9gKn5wXjM1rFi \
--token-definition 3YpK8RvVzWm6Q4h2nDAbxJfLmuRqkEkFP9C7UwTdGvE4
```
> [!Note]
> Proof generation runs locally in the RISC0 ZK VM and can take up to a minute on first run.
### c. Verify the ATA was created
Derive the ATA address using the raw account ID (no privacy prefix):
```bash
wallet ata address \
--owner HkR7Lm2YnWp4vNs8hD3qJcTfA6eUoZ9gKn5wXjM1rFi \
--token-definition 3YpK8RvVzWm6Q4h2nDAbxJfLmuRqkEkFP9C7UwTdGvE4
# Output:
2pQxNf7YkWm3rVsH8jDcQaA4bEoLf9gUZx6wTnR2eMs1
```
```bash
wallet account get --account-id Public/2pQxNf7YkWm3rVsH8jDcQaA4bEoLf9gUZx6wTnR2eMs1
# Output:
Holding account owned by ata program
{"account_type":"Token holding","definition_id":"3YpK8RvVzWm6Q4h2nDAbxJfLmuRqkEkFP9C7UwTdGvE4","balance":0}
```
### d. Fund the ATA
The ATA is a public account. Fund it with a direct token transfer from any public holding account:
```bash
wallet token send \
--from Public/5FkBei8HYoSUNqh9rWCrJDnSZE5FJfGiWmTvhgBx3qTB \
--to Public/2pQxNf7YkWm3rVsH8jDcQaA4bEoLf9gUZx6wTnR2eMs1 \
--amount 500
```
### e. Send tokens from the private owner's ATA
```bash
wallet ata send \
--from Private/HkR7Lm2YnWp4vNs8hD3qJcTfA6eUoZ9gKn5wXjM1rFi \
--token-definition 3YpK8RvVzWm6Q4h2nDAbxJfLmuRqkEkFP9C7UwTdGvE4 \
--to 9Ht4Kv8pYmW2rXjN6dFcQsA7bEoLf3gUZx1wDnR5eTi \
--amount 200
```
Verify the ATA balance decreased:
```bash
wallet account get --account-id Public/2pQxNf7YkWm3rVsH8jDcQaA4bEoLf9gUZx6wTnR2eMs1
# Output:
Holding account owned by ata program
{"account_type":"Token holding","definition_id":"3YpK8RvVzWm6Q4h2nDAbxJfLmuRqkEkFP9C7UwTdGvE4","balance":300}
```
### f. Burn tokens from the private owner's ATA
```bash
wallet ata burn \
--holder Private/HkR7Lm2YnWp4vNs8hD3qJcTfA6eUoZ9gKn5wXjM1rFi \
--token-definition 3YpK8RvVzWm6Q4h2nDAbxJfLmuRqkEkFP9C7UwTdGvE4 \
--amount 100
```
Verify the balance and token supply:
```bash
wallet account get --account-id Public/2pQxNf7YkWm3rVsH8jDcQaA4bEoLf9gUZx6wTnR2eMs1
# Output:
Holding account owned by ata program
{"account_type":"Token holding","definition_id":"3YpK8RvVzWm6Q4h2nDAbxJfLmuRqkEkFP9C7UwTdGvE4","balance":200}
```

View File

@ -8,8 +8,10 @@ license = { workspace = true }
workspace = true
[dependencies]
common.workspace = true
nssa.workspace = true
nssa_core.workspace = true
sequencer_service_rpc = { workspace = true, features = ["client"] }
wallet.workspace = true
tokio = { workspace = true, features = ["macros"] }

View File

@ -1,6 +1,4 @@
use nssa_core::program::{
AccountPostState, DEFAULT_PROGRAM_ID, ProgramInput, read_nssa_inputs, write_nssa_outputs,
};
use nssa_core::program::{AccountPostState, Claim, ProgramInput, ProgramOutput, read_nssa_inputs};
// Hello-world example program.
//
@ -21,6 +19,8 @@ fn main() {
// Read inputs
let (
ProgramInput {
self_program_id,
caller_program_id,
pre_states,
instruction: greeting,
},
@ -45,16 +45,19 @@ fn main() {
// Wrap the post state account values inside a `AccountPostState` instance.
// This is used to forward the account claiming request if any
let post_state = if post_account.program_owner == DEFAULT_PROGRAM_ID {
// This produces a claim request
AccountPostState::new_claimed(post_account)
} else {
// This doesn't produce a claim request
AccountPostState::new(post_account)
};
let post_state = AccountPostState::new_claimed_if_default(post_account, Claim::Authorized);
// The output is a proposed state difference. It will only succeed if the pre states coincide
// with the previous values of the accounts, and the transition to the post states conforms
// with the NSSA program rules.
write_nssa_outputs(instruction_data, vec![pre_state], vec![post_state]);
// WARNING: constructing a `ProgramOutput` has no effect on its own. `.write()` must be
// called to commit the output.
ProgramOutput::new(
self_program_id,
caller_program_id,
instruction_data,
vec![pre_state],
vec![post_state],
)
.write();
}

View File

@ -1,6 +1,4 @@
use nssa_core::program::{
AccountPostState, DEFAULT_PROGRAM_ID, ProgramInput, read_nssa_inputs, write_nssa_outputs,
};
use nssa_core::program::{AccountPostState, Claim, ProgramInput, ProgramOutput, read_nssa_inputs};
// Hello-world with authorization example program.
//
@ -21,6 +19,8 @@ fn main() {
// Read inputs
let (
ProgramInput {
self_program_id,
caller_program_id,
pre_states,
instruction: greeting,
},
@ -52,16 +52,19 @@ fn main() {
// Wrap the post state account values inside a `AccountPostState` instance.
// This is used to forward the account claiming request if any
let post_state = if post_account.program_owner == DEFAULT_PROGRAM_ID {
// This produces a claim request
AccountPostState::new_claimed(post_account)
} else {
// This doesn't produce a claim request
AccountPostState::new(post_account)
};
let post_state = AccountPostState::new_claimed_if_default(post_account, Claim::Authorized);
// The output is a proposed state difference. It will only succeed if the pre states coincide
// with the previous values of the accounts, and the transition to the post states conforms
// with the NSSA program rules.
write_nssa_outputs(instruction_data, vec![pre_state], vec![post_state]);
// WARNING: constructing a `ProgramOutput` has no effect on its own. `.write()` must be
// called to commit the output.
ProgramOutput::new(
self_program_id,
caller_program_id,
instruction_data,
vec![pre_state],
vec![post_state],
)
.write();
}

View File

@ -1,8 +1,6 @@
use nssa_core::{
account::{Account, AccountWithMetadata, Data},
program::{
AccountPostState, DEFAULT_PROGRAM_ID, ProgramInput, read_nssa_inputs, write_nssa_outputs,
},
account::{AccountWithMetadata, Data},
program::{AccountPostState, Claim, ProgramInput, ProgramOutput, read_nssa_inputs},
};
// Hello-world with write + move_data example program.
@ -26,16 +24,6 @@ const MOVE_DATA_FUNCTION_ID: u8 = 1;
type Instruction = (u8, Vec<u8>);
fn build_post_state(post_account: Account) -> AccountPostState {
if post_account.program_owner == DEFAULT_PROGRAM_ID {
// This produces a claim request
AccountPostState::new_claimed(post_account)
} else {
// This doesn't produce a claim request
AccountPostState::new(post_account)
}
}
fn write(pre_state: AccountWithMetadata, greeting: &[u8]) -> AccountPostState {
// Construct the post state account values
let post_account = {
@ -48,7 +36,7 @@ fn write(pre_state: AccountWithMetadata, greeting: &[u8]) -> AccountPostState {
this
};
build_post_state(post_account)
AccountPostState::new_claimed_if_default(post_account, Claim::Authorized)
}
fn move_data(from_pre: AccountWithMetadata, to_pre: AccountWithMetadata) -> Vec<AccountPostState> {
@ -58,7 +46,7 @@ fn move_data(from_pre: AccountWithMetadata, to_pre: AccountWithMetadata) -> Vec<
let from_post = {
let mut this = from_pre.account;
this.data = Data::default();
build_post_state(this)
AccountPostState::new_claimed_if_default(this, Claim::Authorized)
};
let to_post = {
@ -68,7 +56,7 @@ fn move_data(from_pre: AccountWithMetadata, to_pre: AccountWithMetadata) -> Vec<
this.data = bytes
.try_into()
.expect("Data should fit within the allowed limits");
build_post_state(this)
AccountPostState::new_claimed_if_default(this, Claim::Authorized)
};
vec![from_post, to_post]
@ -78,6 +66,8 @@ fn main() {
// Read input accounts.
let (
ProgramInput {
self_program_id,
caller_program_id,
pre_states,
instruction: (function_id, data),
},
@ -95,5 +85,14 @@ fn main() {
_ => panic!("invalid params"),
};
write_nssa_outputs(instruction_words, pre_states, post_states);
// WARNING: constructing a `ProgramOutput` has no effect on its own. `.write()` must be
// called to commit the output.
ProgramOutput::new(
self_program_id,
caller_program_id,
instruction_words,
pre_states,
post_states,
)
.write();
}

View File

@ -1,6 +1,5 @@
use nssa_core::program::{
AccountPostState, ChainedCall, ProgramId, ProgramInput, read_nssa_inputs,
write_nssa_outputs_with_chained_call,
AccountPostState, ChainedCall, ProgramId, ProgramInput, ProgramOutput, read_nssa_inputs,
};
// Tail Call example program.
@ -28,6 +27,8 @@ fn main() {
// Read inputs
let (
ProgramInput {
self_program_id,
caller_program_id,
pre_states,
instruction: (),
},
@ -53,11 +54,16 @@ fn main() {
pda_seeds: vec![],
};
// Write the outputs
write_nssa_outputs_with_chained_call(
// Write the outputs.
// WARNING: constructing a `ProgramOutput` has no effect on its own. `.write()` must be
// called to commit the output.
ProgramOutput::new(
self_program_id,
caller_program_id,
instruction_data,
vec![pre_state],
vec![post_state],
vec![chained_call],
);
)
.with_chained_calls(vec![chained_call])
.write();
}

View File

@ -1,6 +1,6 @@
use nssa_core::program::{
AccountPostState, ChainedCall, PdaSeed, ProgramId, ProgramInput, read_nssa_inputs,
write_nssa_outputs_with_chained_call,
AccountPostState, ChainedCall, PdaSeed, ProgramId, ProgramInput, ProgramOutput,
read_nssa_inputs,
};
// Tail Call with PDA example program.
@ -33,6 +33,8 @@ fn main() {
// Read inputs
let (
ProgramInput {
self_program_id,
caller_program_id,
pre_states,
instruction: (),
},
@ -65,11 +67,16 @@ fn main() {
pda_seeds: vec![PDA_SEED],
};
// Write the outputs
write_nssa_outputs_with_chained_call(
// Write the outputs.
// WARNING: constructing a `ProgramOutput` has no effect on its own. `.write()` must be
// called to commit the output.
ProgramOutput::new(
self_program_id,
caller_program_id,
instruction_data,
vec![pre_state],
vec![post_state],
vec![chained_call],
);
)
.with_chained_calls(vec![chained_call])
.write();
}

View File

@ -1,8 +1,10 @@
use common::transaction::NSSATransaction;
use nssa::{
AccountId, PublicTransaction,
program::Program,
public_transaction::{Message, WitnessSet},
};
use sequencer_service_rpc::RpcClient as _;
use wallet::WalletCore;
// Before running this example, compile the `hello_world.rs` guest program with:
@ -58,7 +60,7 @@ async fn main() {
// Submit the transaction
let _response = wallet_core
.sequencer_client
.send_tx_public(tx)
.send_transaction(NSSATransaction::Public(tx))
.await
.unwrap();
}

View File

@ -1,8 +1,10 @@
use common::transaction::NSSATransaction;
use nssa::{
AccountId, PublicTransaction,
program::Program,
public_transaction::{Message, WitnessSet},
};
use sequencer_service_rpc::RpcClient as _;
use wallet::WalletCore;
// Before running this example, compile the `simple_tail_call.rs` guest program with:
@ -54,7 +56,7 @@ async fn main() {
// Submit the transaction
let _response = wallet_core
.sequencer_client
.send_tx_public(tx)
.send_transaction(NSSATransaction::Public(tx))
.await
.unwrap();
}

View File

@ -1,8 +1,10 @@
use common::transaction::NSSATransaction;
use nssa::{
AccountId, PublicTransaction,
program::Program,
public_transaction::{Message, WitnessSet},
};
use sequencer_service_rpc::RpcClient as _;
use wallet::WalletCore;
// Before running this example, compile the `hello_world_with_authorization.rs` guest program with:
@ -71,7 +73,7 @@ async fn main() {
// Submit the transaction
let _response = wallet_core
.sequencer_client
.send_tx_public(tx)
.send_transaction(NSSATransaction::Public(tx))
.await
.unwrap();
}

View File

@ -3,12 +3,14 @@
reason = "This is an example program, it's fine to print to stdout"
)]
use common::transaction::NSSATransaction;
use nssa::{
AccountId, PublicTransaction,
program::Program,
public_transaction::{Message, WitnessSet},
};
use nssa_core::program::PdaSeed;
use sequencer_service_rpc::RpcClient as _;
use wallet::WalletCore;
// Before running this example, compile the `simple_tail_call.rs` guest program with:
@ -44,7 +46,7 @@ async fn main() {
let program = Program::new(bytecode).unwrap();
// Compute the PDA to pass it as input account to the public execution
let pda = AccountId::from((&program.id(), &PDA_SEED));
let pda = AccountId::for_public_pda(&program.id(), &PDA_SEED);
let account_ids = vec![pda];
let instruction_data = ();
let nonces = vec![];
@ -56,7 +58,7 @@ async fn main() {
// Submit the transaction
let _response = wallet_core
.sequencer_client
.send_tx_public(tx)
.send_transaction(NSSATransaction::Public(tx))
.await
.unwrap();

View File

@ -1,5 +1,7 @@
use clap::{Parser, Subcommand};
use common::transaction::NSSATransaction;
use nssa::{PublicTransaction, program::Program, public_transaction};
use sequencer_service_rpc::RpcClient as _;
use wallet::{PrivacyPreservingAccount, WalletCore};
// Before running this example, compile the `hello_world_with_move_function.rs` guest program with:
@ -87,7 +89,7 @@ async fn main() {
// Submit the transaction
let _response = wallet_core
.sequencer_client
.send_tx_public(tx)
.send_transaction(NSSATransaction::Public(tx))
.await
.unwrap();
}
@ -126,7 +128,7 @@ async fn main() {
// Submit the transaction
let _response = wallet_core
.sequencer_client
.send_tx_public(tx)
.send_transaction(NSSATransaction::Public(tx))
.await
.unwrap();
}

View File

@ -22,7 +22,13 @@ WORKDIR /explorer_service
COPY . .
# Build the app
RUN cargo leptos build --release -vv
RUN --mount=type=cache,target=/usr/local/cargo/registry/index \
--mount=type=cache,target=/usr/local/cargo/registry/cache \
--mount=type=cache,target=/usr/local/cargo/git \
--mount=type=cache,target=/explorer_service/target \
cargo leptos build --release -vv \
&& cp /explorer_service/target/release/explorer_service /usr/local/bin/explorer_service \
&& cp -r /explorer_service/target/site /explorer_service/site_output
FROM debian:trixie-slim AS runtime
WORKDIR /explorer_service
@ -33,10 +39,10 @@ RUN apt-get update -y \
&& rm -rf /var/lib/apt/lists/*
# Copy the server binary to the /explorer_service directory
COPY --from=builder /explorer_service/target/release/explorer_service /explorer_service/
COPY --from=builder /usr/local/bin/explorer_service /explorer_service/
# /target/site contains our JS/WASM/CSS, etc.
COPY --from=builder /explorer_service/target/site /explorer_service/site
COPY --from=builder /explorer_service/site_output /explorer_service/site
# Copy Cargo.toml as its needed at runtime
COPY --from=builder /explorer_service/Cargo.toml /explorer_service/

View File

@ -41,12 +41,12 @@ pub async fn search(query: String) -> Result<SearchResults, ServerFnError> {
// Try as hash
if let Ok(hash) = HashType::from_str(&query) {
// Try as block hash
if let Ok(block) = client.get_block_by_hash(hash).await {
if let Ok(Some(block)) = client.get_block_by_hash(hash).await {
blocks.push(block);
}
// Try as transaction hash
if let Ok(tx) = client.get_transaction(hash).await {
if let Ok(Some(tx)) = client.get_transaction(hash).await {
transactions.push(tx);
}
}
@ -60,7 +60,7 @@ pub async fn search(query: String) -> Result<SearchResults, ServerFnError> {
// Try as block ID
if let Ok(block_id) = query.parse::<u64>()
&& let Ok(block) = client.get_block_by_id(block_id).await
&& let Ok(Some(block)) = client.get_block_by_id(block_id).await
{
blocks.push(block);
}
@ -81,6 +81,7 @@ pub async fn get_block_by_id(block_id: BlockId) -> Result<Block, ServerFnError>
.get_block_by_id(block_id)
.await
.map_err(|e| ServerFnError::ServerError(format!("RPC error: {e}")))
.and_then(|opt| opt.ok_or_else(|| ServerFnError::ServerError("Block not found".to_owned())))
}
/// Get latest block ID
@ -103,6 +104,7 @@ pub async fn get_block_by_hash(block_hash: HashType) -> Result<Block, ServerFnEr
.get_block_by_hash(block_hash)
.await
.map_err(|e| ServerFnError::ServerError(format!("RPC error: {e}")))
.and_then(|opt| opt.ok_or_else(|| ServerFnError::ServerError("Block not found".to_owned())))
}
/// Get transaction by hash
@ -114,6 +116,9 @@ pub async fn get_transaction(tx_hash: HashType) -> Result<Transaction, ServerFnE
.get_transaction(tx_hash)
.await
.map_err(|e| ServerFnError::ServerError(format!("RPC error: {e}")))
.and_then(|opt| {
opt.ok_or_else(|| ServerFnError::ServerError("Transaction not found".to_owned()))
})
}
/// Get blocks with pagination

View File

@ -84,7 +84,7 @@ pub fn TransactionPage() -> impl IntoView {
} = witness_set;
let program_id_str = program_id.to_string();
let proof_len = proof.0.len();
let proof_len = proof.map_or(0, |p| p.0.len());
let signatures_count = signatures_and_public_keys.len();
view! {
@ -177,13 +177,14 @@ pub fn TransactionPage() -> impl IntoView {
encrypted_private_post_states,
new_commitments,
new_nullifiers,
block_validity_window,
timestamp_validity_window,
} = message;
let WitnessSet {
signatures_and_public_keys: _,
proof,
} = witness_set;
let proof_len = proof.0.len();
let proof_len = proof.map_or(0, |p| p.0.len());
view! {
<div class="transaction-details">
<h2>"Privacy-Preserving Transaction Details"</h2>
@ -212,6 +213,14 @@ pub fn TransactionPage() -> impl IntoView {
<span class="info-label">"Proof Size:"</span>
<span class="info-value">{format!("{proof_len} bytes")}</span>
</div>
<div class="info-row">
<span class="info-label">"Block Validity Window:"</span>
<span class="info-value">{block_validity_window.to_string()}</span>
</div>
<div class="info-row">
<span class="info-label">"Timestamp Validity Window:"</span>
<span class="info-value">{timestamp_validity_window.to_string()}</span>
</div>
</div>
<h3>"Public Accounts"</h3>

24
flake.lock generated
View File

@ -2,11 +2,11 @@
"nodes": {
"crane": {
"locked": {
"lastModified": 1769737823,
"narHash": "sha256-DrBaNpZ+sJ4stXm+0nBX7zqZT9t9P22zbk6m5YhQxS4=",
"lastModified": 1776396856,
"narHash": "sha256-aRJpIJUlZLaf06ekPvqjuU46zvO9K90IxJGpbqodkPs=",
"owner": "ipetkov",
"repo": "crane",
"rev": "b2f45c3830aa96b7456a4c4bc327d04d7a43e1ba",
"rev": "28462d6d55c33206ffa5a56c7907ca3125ed788f",
"type": "github"
},
"original": {
@ -20,11 +20,11 @@
"nixpkgs": "nixpkgs"
},
"locked": {
"lastModified": 1770979891,
"narHash": "sha256-cvkVnE7btuFLzv70ORAZve9K1Huiplq0iECgXSXb0ZY=",
"lastModified": 1775835011,
"narHash": "sha256-SQDLyyRUa5J9QHjNiHbeZw4rQOZnTEo61TcaUpjtLBs=",
"owner": "logos-blockchain",
"repo": "logos-blockchain-circuits",
"rev": "ec7d298e5a3a0507bb8570df86cdf78dc452d024",
"rev": "d6cf41f66500d4afc157b4f43de0f0d5bfa01443",
"type": "github"
},
"original": {
@ -51,11 +51,11 @@
},
"nixpkgs_2": {
"locked": {
"lastModified": 1770019141,
"narHash": "sha256-VKS4ZLNx4PNrABoB0L8KUpc1fE7CLpQXQs985tGfaCU=",
"lastModified": 1776169885,
"narHash": "sha256-l/iNYDZ4bGOAFQY2q8y5OAfBBtrDAaPuRQqWaFHVRXM=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "cb369ef2efd432b3cdf8622b0ffc0a97a02f3137",
"rev": "4bd9165a9165d7b5e33ae57f3eecbcb28fb231c9",
"type": "github"
},
"original": {
@ -80,11 +80,11 @@
]
},
"locked": {
"lastModified": 1770088046,
"narHash": "sha256-4hfYDnUTvL1qSSZEA4CEThxfz+KlwSFQ30Z9jgDguO0=",
"lastModified": 1776395632,
"narHash": "sha256-Mi1uF5f2FsdBIvy+v7MtsqxD3Xjhd0ARJdwoqqqPtJo=",
"owner": "oxalica",
"repo": "rust-overlay",
"rev": "71f9daa4e05e49c434d08627e755495ae222bc34",
"rev": "8087ff1f47fff983a1fba70fa88b759f2fd8ae97",
"type": "github"
},
"original": {

View File

@ -130,9 +130,26 @@
'';
}
);
indexerFfiPackage = craneLib.buildPackage (
commonArgs
// {
pname = "logos-execution-zone-indexer-ffi";
version = "0.1.0";
cargoExtraArgs = "-p indexer_ffi";
postInstall = ''
mkdir -p $out/include
cp indexer_ffi/indexer_ffi.h $out/include/
''
+ pkgs.lib.optionalString pkgs.stdenv.isDarwin ''
install_name_tool -id @rpath/libindexer_ffi.dylib $out/lib/libindexer_ffi.dylib
'';
}
);
in
{
wallet = walletFfiPackage;
indexer = indexerFfiPackage;
default = walletFfiPackage;
}
);
@ -144,9 +161,14 @@
walletFfiShell = pkgs.mkShell {
inputsFrom = [ walletFfiPackage ];
};
indexerFfiPackage = self.packages.${system}.indexer;
indexerFfiShell = pkgs.mkShell {
inputsFrom = [ indexerFfiPackage ];
};
in
{
wallet = walletFfiShell;
indexer = indexerFfiShell;
default = walletFfiShell;
}
);

View File

@ -9,11 +9,11 @@ workspace = true
[dependencies]
common.workspace = true
bedrock_client.workspace = true
logos-blockchain-zone-sdk.workspace = true
nssa.workspace = true
nssa_core.workspace = true
storage.workspace = true
testnet_initial_state.workspace = true
anyhow.workspace = true
log.workspace = true
@ -29,4 +29,3 @@ tokio.workspace = true
[dev-dependencies]
tempfile.workspace = true

View File

@ -1,19 +1,20 @@
use std::{path::Path, sync::Arc};
use anyhow::Result;
use bedrock_client::HeaderId;
use common::{
block::{BedrockStatus, Block, BlockId},
transaction::NSSATransaction,
block::{BedrockStatus, Block},
transaction::{NSSATransaction, clock_invocation},
};
use nssa::{Account, AccountId, V02State};
use logos_blockchain_core::header::HeaderId;
use nssa::{Account, AccountId, V03State};
use nssa_core::BlockId;
use storage::indexer::RocksDBIO;
use tokio::sync::RwLock;
#[derive(Clone)]
pub struct IndexerStore {
dbio: Arc<RocksDBIO>,
current_state: Arc<RwLock<V02State>>,
current_state: Arc<RwLock<V03State>>,
}
impl IndexerStore {
@ -24,7 +25,7 @@ impl IndexerStore {
pub fn open_db_with_genesis(
location: &Path,
genesis_block: &Block,
initial_state: &V02State,
initial_state: &V03State,
) -> Result<Self> {
let dbio = RocksDBIO::open_or_create(location, genesis_block, initial_state)?;
let current_state = dbio.final_state()?;
@ -46,7 +47,7 @@ impl IndexerStore {
Ok(self.dbio.get_meta_last_block_in_db()?)
}
pub fn get_block_at_id(&self, id: u64) -> Result<Block> {
pub fn get_block_at_id(&self, id: u64) -> Result<Option<Block>> {
Ok(self.dbio.get_block(id)?)
}
@ -54,20 +55,25 @@ impl IndexerStore {
Ok(self.dbio.get_block_batch(before, limit)?)
}
pub fn get_transaction_by_hash(&self, tx_hash: [u8; 32]) -> Result<NSSATransaction> {
let block = self.get_block_at_id(self.dbio.get_block_id_by_tx_hash(tx_hash)?)?;
let transaction = block
pub fn get_transaction_by_hash(&self, tx_hash: [u8; 32]) -> Result<Option<NSSATransaction>> {
let Some(block_id) = self.dbio.get_block_id_by_tx_hash(tx_hash)? else {
return Ok(None);
};
let Some(block) = self.get_block_at_id(block_id)? else {
return Ok(None);
};
Ok(block
.body
.transactions
.iter()
.find(|enc_tx| enc_tx.hash().0 == tx_hash)
.ok_or_else(|| anyhow::anyhow!("Transaction not found in DB"))?;
Ok(transaction.clone())
.into_iter()
.find(|enc_tx| enc_tx.hash().0 == tx_hash))
}
pub fn get_block_by_hash(&self, hash: [u8; 32]) -> Result<Block> {
self.get_block_at_id(self.dbio.get_block_id_by_hash(hash)?)
pub fn get_block_by_hash(&self, hash: [u8; 32]) -> Result<Option<Block>> {
let Some(id) = self.dbio.get_block_id_by_hash(hash)? else {
return Ok(None);
};
self.get_block_at_id(id)
}
pub fn get_transactions_by_account(
@ -93,14 +99,14 @@ impl IndexerStore {
.expect("Must be set at the DB startup")
}
pub fn get_state_at_block(&self, block_id: u64) -> Result<V02State> {
pub fn get_state_at_block(&self, block_id: u64) -> Result<V03State> {
Ok(self.dbio.calculate_state_for_id(block_id)?)
}
/// Recalculation of final state directly from DB.
///
/// Used for indexer healthcheck.
pub fn recalculate_final_state(&self) -> Result<V02State> {
pub fn recalculate_final_state(&self) -> Result<V03State> {
Ok(self.dbio.final_state()?)
}
@ -116,12 +122,37 @@ impl IndexerStore {
{
let mut state_guard = self.current_state.write().await;
for transaction in &block.body.transactions {
let (clock_tx, user_txs) = block
.body
.transactions
.split_last()
.ok_or_else(|| anyhow::anyhow!("Block has no transactions"))?;
anyhow::ensure!(
*clock_tx == NSSATransaction::Public(clock_invocation(block.header.timestamp)),
"Last transaction in block must be the clock invocation for the block timestamp"
);
for transaction in user_txs {
transaction
.clone()
.transaction_stateless_check()?
.execute_check_on_state(&mut state_guard)?;
.execute_check_on_state(
&mut state_guard,
block.header.block_id,
block.header.timestamp,
)?;
}
// Apply the clock invocation directly (it is expected to modify clock accounts).
let NSSATransaction::Public(clock_public_tx) = clock_tx else {
anyhow::bail!("Clock invocation must be a public transaction");
};
state_guard.transition_from_public_transaction(
clock_public_tx,
block.header.block_id,
block.header.timestamp,
)?;
}
// ToDo: Currently we are fetching only finalized blocks
@ -167,11 +198,15 @@ mod tests {
let storage = IndexerStore::open_db_with_genesis(
home.as_ref(),
&genesis_block(),
&nssa::V02State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[]),
&nssa::V03State::new_with_genesis_accounts(
&[(acc1(), 10000), (acc2(), 20000)],
vec![],
0,
),
)
.unwrap();
let block = storage.get_block_at_id(1).unwrap();
let block = storage.get_block_at_id(1).unwrap().unwrap();
let final_id = storage.get_last_block_id().unwrap();
assert_eq!(block.header.hash, genesis_block().header.hash);
@ -185,7 +220,11 @@ mod tests {
let storage = IndexerStore::open_db_with_genesis(
home.as_ref(),
&genesis_block(),
&nssa::V02State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[]),
&nssa::V03State::new_with_genesis_accounts(
&[(acc1(), 10000), (acc2(), 20000)],
vec![],
0,
),
)
.unwrap();
@ -203,12 +242,10 @@ mod tests {
10,
&sign_key,
);
let block_id = u64::try_from(i).unwrap();
let next_block = common::test_utils::produce_dummy_block(
u64::try_from(i).unwrap(),
Some(prev_hash),
vec![tx],
);
let next_block =
common::test_utils::produce_dummy_block(block_id, Some(prev_hash), vec![tx]);
prev_hash = next_block.header.hash;
storage

View File

@ -6,20 +6,15 @@ use std::{
};
use anyhow::{Context as _, Result};
pub use bedrock_client::BackoffConfig;
use common::{
block::{AccountInitialData, CommitmentsInitialData},
config::BasicAuth,
};
use common::config::BasicAuth;
use humantime_serde;
pub use logos_blockchain_core::mantle::ops::channel::ChannelId;
use serde::{Deserialize, Serialize};
use testnet_initial_state::{PrivateAccountPublicInitialData, PublicAccountPublicInitialData};
use url::Url;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ClientConfig {
/// For individual RPC requests we use Fibonacci backoff retry strategy.
pub backoff: BackoffConfig,
pub addr: Url,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub auth: Option<BasicAuth>,
@ -29,16 +24,16 @@ pub struct ClientConfig {
pub struct IndexerConfig {
/// Home dir of sequencer storage.
pub home: PathBuf,
/// List of initial accounts data.
pub initial_accounts: Vec<AccountInitialData>,
/// List of initial commitments.
pub initial_commitments: Vec<CommitmentsInitialData>,
/// Sequencers signing key.
pub signing_key: [u8; 32],
#[serde(with = "humantime_serde")]
pub consensus_info_polling_interval: Duration,
pub bedrock_client_config: ClientConfig,
pub channel_id: ChannelId,
#[serde(skip_serializing_if = "Option::is_none")]
pub initial_public_accounts: Option<Vec<PublicAccountPublicInitialData>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub initial_private_accounts: Option<Vec<PrivateAccountPublicInitialData>>,
}
impl IndexerConfig {

View File

@ -7,7 +7,11 @@ use common::{HashType, PINATA_BASE58};
use futures::StreamExt as _;
use log::{error, info, warn};
use logos_blockchain_core::header::HeaderId;
use logos_blockchain_zone_sdk::indexer::ZoneIndexer;
use logos_blockchain_zone_sdk::{
CommonHttpClient, ZoneMessage, adapter::NodeHttpClient, indexer::ZoneIndexer,
};
use nssa::V03State;
use testnet_initial_state::initial_state_testnet;
use crate::{block_store::IndexerStore, config::IndexerConfig};
@ -16,7 +20,7 @@ pub mod config;
#[derive(Clone)]
pub struct IndexerCore {
pub zone_indexer: Arc<ZoneIndexer>,
pub zone_indexer: Arc<ZoneIndexer<NodeHttpClient>>,
pub config: IndexerConfig,
pub store: IndexerStore,
}
@ -38,45 +42,61 @@ impl IndexerCore {
let channel_genesis_msg_id = [0; 32];
let genesis_block = hashable_data.into_pending_block(&signing_key, channel_genesis_msg_id);
// This is a troubling moment, because changes in key protocol can
// affect this. And indexer can not reliably ask this data from sequencer
// because indexer must be independent from it.
// ToDo: move initial state generation into common and use the same method
// for indexer and sequencer. This way both services buit at same version
// could be in sync.
let initial_commitments: Vec<nssa_core::Commitment> = config
.initial_commitments
.iter()
.map(|init_comm_data| {
let npk = &init_comm_data.npk;
let initial_private_accounts: Option<Vec<(nssa_core::Commitment, nssa_core::Nullifier)>> =
config.initial_private_accounts.as_ref().map(|accounts| {
accounts
.iter()
.map(|init_comm_data| {
let npk = &init_comm_data.npk;
let mut acc = init_comm_data.account.clone();
let mut acc = init_comm_data.account.clone();
acc.program_owner = nssa::program::Program::authenticated_transfer_program().id();
acc.program_owner =
nssa::program::Program::authenticated_transfer_program().id();
nssa_core::Commitment::new(npk, &acc)
})
.collect();
(
nssa_core::Commitment::new(npk, &acc),
nssa_core::Nullifier::for_account_initialization(npk),
)
})
.collect()
});
let init_accs: Vec<(nssa::AccountId, u128)> = config
.initial_accounts
.iter()
.map(|acc_data| (acc_data.account_id, acc_data.balance))
.collect();
let init_accs: Option<Vec<(nssa::AccountId, u128)>> = config
.initial_public_accounts
.as_ref()
.map(|initial_accounts| {
initial_accounts
.iter()
.map(|acc_data| (acc_data.account_id, acc_data.balance))
.collect()
});
let mut state = nssa::V02State::new_with_genesis_accounts(&init_accs, &initial_commitments);
// If initial commitments or accounts are present in config, need to construct state from
// them
let state = if initial_private_accounts.is_some() || init_accs.is_some() {
let mut state = V03State::new_with_genesis_accounts(
&init_accs.unwrap_or_default(),
initial_private_accounts.unwrap_or_default(),
genesis_block.header.timestamp,
);
// ToDo: Remove after testnet
state.add_pinata_program(PINATA_BASE58.parse().unwrap());
// ToDo: Remove after testnet
state.add_pinata_program(PINATA_BASE58.parse().unwrap());
state
} else {
initial_state_testnet()
};
let home = config.home.join("rocksdb");
let auth = config.bedrock_client_config.auth.clone().map(Into::into);
let zone_indexer = ZoneIndexer::new(
config.channel_id,
let basic_auth = config.bedrock_client_config.auth.clone().map(Into::into);
let node = NodeHttpClient::new(
CommonHttpClient::new(basic_auth),
config.bedrock_client_config.addr.clone(),
auth,
);
let zone_indexer = ZoneIndexer::new(config.channel_id, node);
Ok(Self {
zone_indexer: Arc::new(zone_indexer),
@ -99,7 +119,11 @@ impl IndexerCore {
let mut follow_stream = std::pin::pin!(follow_stream);
while let Some(zone_block) = follow_stream.next().await {
while let Some(zone_message) = follow_stream.next().await {
let zone_block = match zone_message {
ZoneMessage::Block(b) => b,
ZoneMessage::Deposit(_) | ZoneMessage::Withdraw(_) => continue,
};
let block: Block = match borsh::from_slice(&zone_block.data) {
Ok(b) => b,
Err(e) => {

View File

@ -21,7 +21,6 @@ log.workspace = true
jsonrpsee.workspace = true
serde_json.workspace = true
futures.workspace = true
async-trait = "0.1.89"
arc-swap = "1.8.1"
[features]

View File

@ -51,32 +51,34 @@ RUN cargo chef prepare --bin indexer_service --recipe-path recipe.json
FROM chef AS builder
COPY --from=planner /indexer_service/recipe.json recipe.json
# Build dependencies only (this layer will be cached)
RUN cargo chef cook --bin indexer_service --release --recipe-path recipe.json
RUN --mount=type=cache,target=/usr/local/cargo/registry/index \
--mount=type=cache,target=/usr/local/cargo/registry/cache \
--mount=type=cache,target=/usr/local/cargo/git \
--mount=type=cache,target=/indexer_service/target \
cargo chef cook --bin indexer_service --release --recipe-path recipe.json
# Copy source code
COPY . .
# Build the actual application
RUN cargo build --release --bin indexer_service
# Strip debug symbols to reduce binary size
RUN strip /indexer_service/target/release/indexer_service
# Build the actual application and copy the binary out of the cache mount
RUN --mount=type=cache,target=/usr/local/cargo/registry/index \
--mount=type=cache,target=/usr/local/cargo/registry/cache \
--mount=type=cache,target=/usr/local/cargo/git \
--mount=type=cache,target=/indexer_service/target \
cargo build --release --bin indexer_service \
&& strip /indexer_service/target/release/indexer_service \
&& cp /indexer_service/target/release/indexer_service /usr/local/bin/indexer_service
# Runtime stage - minimal image
FROM debian:trixie-slim
# Install runtime dependencies
RUN apt-get update \
&& apt-get install -y gosu jq \
&& rm -rf /var/lib/apt/lists/*
# Create non-root user for security
RUN useradd -m -u 1000 -s /bin/bash indexer_service_user && \
mkdir -p /indexer_service /etc/indexer_service && \
chown -R indexer_service_user:indexer_service_user /indexer_service /etc/indexer_service
mkdir -p /indexer_service /etc/indexer_service /var/lib/indexer_service && \
chown -R indexer_service_user:indexer_service_user /indexer_service /etc/indexer_service /var/lib/indexer_service
# Copy binary from builder
COPY --from=builder --chown=indexer_service_user:indexer_service_user /indexer_service/target/release/indexer_service /usr/local/bin/indexer_service
COPY --from=builder --chown=indexer_service_user:indexer_service_user /usr/local/bin/indexer_service /usr/local/bin/indexer_service
# Copy r0vm binary from builder
COPY --from=builder --chown=indexer_service_user:indexer_service_user /usr/local/bin/r0vm /usr/local/bin/r0vm
@ -84,9 +86,7 @@ COPY --from=builder --chown=indexer_service_user:indexer_service_user /usr/local
# Copy logos blockchain circuits from builder
COPY --from=builder --chown=indexer_service_user:indexer_service_user /root/.logos-blockchain-circuits /home/indexer_service_user/.logos-blockchain-circuits
# Copy entrypoint script
COPY indexer/service/docker-entrypoint.sh /docker-entrypoint.sh
RUN chmod +x /docker-entrypoint.sh
VOLUME /var/lib/indexer_service
# Expose default port
EXPOSE 8779
@ -105,9 +105,7 @@ HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
# Run the application
ENV RUST_LOG=info
USER root
ENTRYPOINT ["/docker-entrypoint.sh"]
USER indexer_service_user
WORKDIR /indexer_service
CMD ["indexer_service", "/etc/indexer_service/indexer_config.json"]

View File

@ -11,50 +11,50 @@
"channel_id": "0101010101010101010101010101010101010101010101010101010101010101",
"initial_accounts": [
{
"account_id": "6iArKUXxhUJqS7kCaPNhwMWt3ro71PDyBj7jwAyE2VQV",
"account_id": "CbgR6tj5kWx5oziiFptM7jMvrQeYY3Mzaao6ciuhSr2r",
"balance": 10000
},
{
"account_id": "7wHg9sbJwc6h3NP1S9bekfAzB8CHifEcxKswCKUt3YQo",
"account_id": "2RHZhw9h534Zr3eq2RGhQete2Hh667foECzXPmSkGni2",
"balance": 20000
}
],
"initial_commitments": [
{
"npk":[
177,
64,
1,
"npk": [
139,
19,
158,
11,
87,
38,
254,
159,
155,
231,
165,
1,
94,
64,
137,
243,
76,
249,
101,
251,
129,
33,
101,
189,
30,
42,
11,
191,
34,
103,
186,
227,
230
] ,
85,
206,
132,
228,
220,
114,
145,
89,
113,
156,
238,
142,
242,
74,
182,
91,
43,
100,
6,
190,
31,
15,
31,
88,
96,
204
],
"account": {
"program_owner": [
0,
@ -73,38 +73,38 @@
},
{
"npk": [
32,
67,
72,
164,
106,
53,
66,
239,
141,
15,
52,
230,
136,
177,
2,
236,
207,
243,
173,
134,
135,
210,
143,
87,
232,
33,
223,
54,
226,
10,
71,
215,
128,
194,
120,
113,
224,
4,
165
254,
143,
172,
24,
244,
243,
208,
65,
112,
118,
70,
217,
240,
69,
100,
129,
3,
121,
25,
213,
132,
42,
45
],
"account": {
"program_owner": [
@ -157,4 +157,4 @@
37,
37
]
}
}

View File

@ -10,5 +10,8 @@ services:
volumes:
# Mount configuration
- ./configs/indexer_config.json:/etc/indexer_service/indexer_config.json
# Mount data folder
- ./data:/var/lib/indexer_service
# Mount data volume
- indexer_data:/var/lib/indexer_service
volumes:
indexer_data:

View File

@ -1,29 +0,0 @@
#!/bin/sh
# This is an entrypoint script for the indexer_service Docker container,
# it's not meant to be executed outside of the container.
set -e
CONFIG="/etc/indexer_service/indexer_config.json"
# Check config file exists
if [ ! -f "$CONFIG" ]; then
echo "Config file not found: $CONFIG" >&2
exit 1
fi
# Parse home dir
HOME_DIR=$(jq -r '.home' "$CONFIG")
if [ -z "$HOME_DIR" ] || [ "$HOME_DIR" = "null" ]; then
echo "'home' key missing in config" >&2
exit 1
fi
# Give permissions to the data directory and switch to non-root user
if [ "$(id -u)" = "0" ]; then
mkdir -p "$HOME_DIR"
chown -R indexer_service_user:indexer_service_user "$HOME_DIR"
exec gosu indexer_service_user "$@"
fi

View File

@ -1,11 +1,13 @@
//! Conversions between `indexer_service_protocol` types and `nssa/nssa_core` types.
use nssa_core::account::Nonce;
use crate::{
Account, AccountId, BedrockStatus, Block, BlockBody, BlockHeader, Ciphertext, Commitment,
CommitmentSetDigest, Data, EncryptedAccountData, EphemeralPublicKey, HashType, MantleMsgId,
Nullifier, PrivacyPreservingMessage, PrivacyPreservingTransaction, ProgramDeploymentMessage,
ProgramDeploymentTransaction, ProgramId, Proof, PublicKey, PublicMessage, PublicTransaction,
Signature, Transaction, WitnessSet,
Signature, Transaction, ValidityWindow, WitnessSet,
};
// ============================================================================
@ -52,7 +54,7 @@ impl From<nssa_core::account::Account> for Account {
program_owner: program_owner.into(),
balance,
data: data.into(),
nonce,
nonce: nonce.0,
}
}
}
@ -72,7 +74,7 @@ impl TryFrom<Account> for nssa_core::account::Account {
program_owner: program_owner.into(),
balance,
data: data.try_into()?,
nonce,
nonce: Nonce(nonce),
})
}
}
@ -250,7 +252,7 @@ impl From<nssa::public_transaction::Message> for PublicMessage {
Self {
program_id: program_id.into(),
account_ids: account_ids.into_iter().map(Into::into).collect(),
nonces,
nonces: nonces.iter().map(|x| x.0).collect(),
instruction_data,
}
}
@ -267,7 +269,10 @@ impl From<PublicMessage> for nssa::public_transaction::Message {
Self::new_preserialized(
program_id.into(),
account_ids.into_iter().map(Into::into).collect(),
nonces,
nonces
.iter()
.map(|x| nssa_core::account::Nonce(*x))
.collect(),
instruction_data,
)
}
@ -282,10 +287,12 @@ impl From<nssa::privacy_preserving_transaction::message::Message> for PrivacyPre
encrypted_private_post_states,
new_commitments,
new_nullifiers,
block_validity_window,
timestamp_validity_window,
} = value;
Self {
public_account_ids: public_account_ids.into_iter().map(Into::into).collect(),
nonces,
nonces: nonces.iter().map(|x| x.0).collect(),
public_post_states: public_post_states.into_iter().map(Into::into).collect(),
encrypted_private_post_states: encrypted_private_post_states
.into_iter()
@ -296,12 +303,14 @@ impl From<nssa::privacy_preserving_transaction::message::Message> for PrivacyPre
.into_iter()
.map(|(n, d)| (n.into(), d.into()))
.collect(),
block_validity_window: block_validity_window.into(),
timestamp_validity_window: timestamp_validity_window.into(),
}
}
}
impl TryFrom<PrivacyPreservingMessage> for nssa::privacy_preserving_transaction::message::Message {
type Error = nssa_core::account::data::DataTooBigError;
type Error = nssa::error::NssaError;
fn try_from(value: PrivacyPreservingMessage) -> Result<Self, Self::Error> {
let PrivacyPreservingMessage {
@ -311,14 +320,20 @@ impl TryFrom<PrivacyPreservingMessage> for nssa::privacy_preserving_transaction:
encrypted_private_post_states,
new_commitments,
new_nullifiers,
block_validity_window,
timestamp_validity_window,
} = value;
Ok(Self {
public_account_ids: public_account_ids.into_iter().map(Into::into).collect(),
nonces,
nonces: nonces
.iter()
.map(|x| nssa_core::account::Nonce(*x))
.collect(),
public_post_states: public_post_states
.into_iter()
.map(TryInto::try_into)
.collect::<Result<Vec<_>, _>>()?,
.collect::<Result<Vec<_>, _>>()
.map_err(|e| nssa::error::NssaError::InvalidInput(format!("{e}")))?,
encrypted_private_post_states: encrypted_private_post_states
.into_iter()
.map(Into::into)
@ -328,6 +343,12 @@ impl TryFrom<PrivacyPreservingMessage> for nssa::privacy_preserving_transaction:
.into_iter()
.map(|(n, d)| (n.into(), d.into()))
.collect(),
block_validity_window: block_validity_window
.try_into()
.map_err(|e| nssa::error::NssaError::InvalidInput(format!("{e}")))?,
timestamp_validity_window: timestamp_validity_window
.try_into()
.map_err(|e| nssa::error::NssaError::InvalidInput(format!("{e}")))?,
})
}
}
@ -351,12 +372,16 @@ impl From<ProgramDeploymentMessage> for nssa::program_deployment_transaction::Me
// WitnessSet conversions
// ============================================================================
impl TryFrom<nssa::public_transaction::WitnessSet> for WitnessSet {
type Error = ();
fn try_from(_value: nssa::public_transaction::WitnessSet) -> Result<Self, Self::Error> {
// Public transaction witness sets don't have proofs, so we can't convert them directly
Err(())
impl From<nssa::public_transaction::WitnessSet> for WitnessSet {
fn from(value: nssa::public_transaction::WitnessSet) -> Self {
Self {
signatures_and_public_keys: value
.signatures_and_public_keys()
.iter()
.map(|(sig, pk)| (sig.clone().into(), pk.clone().into()))
.collect(),
proof: None,
}
}
}
@ -368,7 +393,7 @@ impl From<nssa::privacy_preserving_transaction::witness_set::WitnessSet> for Wit
.into_iter()
.map(|(sig, pk)| (sig.into(), pk.into()))
.collect(),
proof: proof.into(),
proof: Some(proof.into()),
}
}
}
@ -388,7 +413,9 @@ impl TryFrom<WitnessSet> for nssa::privacy_preserving_transaction::witness_set::
Ok(Self::from_raw_parts(
signatures_and_public_keys,
proof.into(),
proof
.map(Into::into)
.ok_or_else(|| nssa::error::NssaError::InvalidInput("Missing proof".to_owned()))?,
))
}
}
@ -408,14 +435,7 @@ impl From<nssa::PublicTransaction> for PublicTransaction {
Self {
hash,
message: message.into(),
witness_set: WitnessSet {
signatures_and_public_keys: witness_set
.signatures_and_public_keys()
.iter()
.map(|(sig, pk)| (sig.clone().into(), pk.clone().into()))
.collect(),
proof: Proof(vec![]), // Public transactions don't have proofs
},
witness_set: witness_set.into(),
}
}
}
@ -472,14 +492,7 @@ impl TryFrom<PrivacyPreservingTransaction> for nssa::PrivacyPreservingTransactio
witness_set,
} = value;
Ok(Self::new(
message
.try_into()
.map_err(|err: nssa_core::account::data::DataTooBigError| {
nssa::error::NssaError::InvalidInput(err.to_string())
})?,
witness_set.try_into()?,
))
Ok(Self::new(message.try_into()?, witness_set.try_into()?))
}
}
@ -680,3 +693,21 @@ impl From<HashType> for common::HashType {
Self(value.0)
}
}
// ============================================================================
// ValidityWindow conversions
// ============================================================================
impl From<nssa_core::program::ValidityWindow<u64>> for ValidityWindow {
fn from(value: nssa_core::program::ValidityWindow<u64>) -> Self {
Self((value.start(), value.end()))
}
}
impl TryFrom<ValidityWindow> for nssa_core::program::ValidityWindow<u64> {
type Error = nssa_core::program::InvalidWindow;
fn try_from(value: ValidityWindow) -> Result<Self, Self::Error> {
value.0.try_into()
}
}

View File

@ -138,7 +138,7 @@ pub struct Account {
}
pub type BlockId = u64;
pub type TimeStamp = u64;
pub type Timestamp = u64;
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, JsonSchema)]
pub struct Block {
@ -153,7 +153,7 @@ pub struct BlockHeader {
pub block_id: BlockId,
pub prev_block_hash: HashType,
pub hash: HashType,
pub timestamp: TimeStamp,
pub timestamp: Timestamp,
pub signature: Signature,
}
@ -235,12 +235,14 @@ pub struct PrivacyPreservingMessage {
pub encrypted_private_post_states: Vec<EncryptedAccountData>,
pub new_commitments: Vec<Commitment>,
pub new_nullifiers: Vec<(Nullifier, CommitmentSetDigest)>,
pub block_validity_window: ValidityWindow,
pub timestamp_validity_window: ValidityWindow,
}
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, JsonSchema)]
pub struct WitnessSet {
pub signatures_and_public_keys: Vec<(Signature, PublicKey)>,
pub proof: Proof,
pub proof: Option<Proof>,
}
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, JsonSchema)]
@ -300,6 +302,20 @@ pub struct Nullifier(
pub [u8; 32],
);
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, JsonSchema)]
pub struct ValidityWindow(pub (Option<BlockId>, Option<BlockId>));
impl Display for ValidityWindow {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self.0 {
(Some(start), Some(end)) => write!(f, "[{start}, {end})"),
(Some(start), None) => write!(f, "[{start}, \u{221e})"),
(None, Some(end)) => write!(f, "(-\u{221e}, {end})"),
(None, None) => write!(f, "(-\u{221e}, \u{221e})"),
}
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, JsonSchema)]
pub struct CommitmentSetDigest(
#[serde(with = "base64::arr")]

View File

@ -30,16 +30,22 @@ pub trait Rpc {
async fn get_last_finalized_block_id(&self) -> Result<BlockId, ErrorObjectOwned>;
#[method(name = "getBlockById")]
async fn get_block_by_id(&self, block_id: BlockId) -> Result<Block, ErrorObjectOwned>;
async fn get_block_by_id(&self, block_id: BlockId) -> Result<Option<Block>, ErrorObjectOwned>;
#[method(name = "getBlockByHash")]
async fn get_block_by_hash(&self, block_hash: HashType) -> Result<Block, ErrorObjectOwned>;
async fn get_block_by_hash(
&self,
block_hash: HashType,
) -> Result<Option<Block>, ErrorObjectOwned>;
#[method(name = "getAccount")]
async fn get_account(&self, account_id: AccountId) -> Result<Account, ErrorObjectOwned>;
#[method(name = "getTransaction")]
async fn get_transaction(&self, tx_hash: HashType) -> Result<Transaction, ErrorObjectOwned>;
async fn get_transaction(
&self,
tx_hash: HashType,
) -> Result<Option<Transaction>, ErrorObjectOwned>;
#[method(name = "getBlocks")]
async fn get_blocks(

Some files were not shown because too many files have changed in this diff Show More