Merge branch 'main' into Pravdyvy/indexer-final-state

This commit is contained in:
Pravdyvy 2026-03-18 09:15:45 +02:00
commit 94faaa6d64
245 changed files with 8246 additions and 6274 deletions

960
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -58,7 +58,7 @@ amm_program = { path = "programs/amm" }
test_program_methods = { path = "test_program_methods" }
bedrock_client = { path = "bedrock_client" }
tokio = { version = "1.28.2", features = [
tokio = { version = "1.50", features = [
"net",
"rt-multi-thread",
"sync",
@ -75,15 +75,15 @@ serde = { version = "1.0.60", default-features = false, features = ["derive"] }
serde_json = "1.0.81"
serde_with = "3.16.1"
actix = "0.13.0"
actix-cors = "0.6.1"
actix-cors = "0.7.1"
jsonrpsee = "0.26.0"
futures = "0.3"
actix-rt = "*"
lazy_static = "1.5.0"
env_logger = "0.10"
env_logger = "0.11"
log = "0.4.28"
lru = "0.7.8"
thiserror = "2.0.12"
lru = "0.16.3"
thiserror = "2.0"
sha2 = "0.10.8"
hex = "0.4.3"
bytemuck = "1.24.0"
@ -91,7 +91,7 @@ bytesize = { version = "2.3.1", features = ["serde"] }
humantime-serde = "1.1"
humantime = "2.1"
aes-gcm = "0.10.3"
toml = "0.7.4"
toml = "0.9.8"
bincode = "1.3.3"
tempfile = "3.14.0"
light-poseidon = "0.3.0"
@ -107,7 +107,7 @@ base58 = "0.2.0"
itertools = "0.14.0"
url = { version = "2.5.4", features = ["serde"] }
tokio-retry = "0.3.0"
schemars = "1.2.0"
schemars = "1.2"
async-stream = "0.3.6"
logos-blockchain-common-http-client = { git = "https://github.com/logos-blockchain/logos-blockchain.git" }
@ -129,7 +129,7 @@ k256 = { version = "0.13.3", features = [
"pem",
] }
elliptic-curve = { version = "0.13.8", features = ["arithmetic"] }
actix-web = { version = "=4.1.0", default-features = false, features = [
actix-web = { version = "4.13.0", default-features = false, features = [
"macros",
] }
clap = { version = "4.5.42", features = ["derive", "env"] }
@ -141,3 +141,142 @@ inherits = "release"
opt-level = 'z'
lto = true
codegen-units = 1
[workspace.lints.rust]
warnings = "deny"
[workspace.lints]
clippy.all = { level = "deny", priority = -1 }
# Pedantic
clippy.pedantic = { level = "deny", priority = -1 }
# Reason: documenting every function returning Result is too verbose and doesn't add much value when you have good error types.
clippy.missing-errors-doc = "allow"
# Reason: most of the panics are internal and not part of the public API, so documenting them is not necessary.
clippy.missing-panics-doc = "allow"
# Reason: this isn't always bad and actually works well for our financial and cryptography code.
clippy.similar-names = "allow"
# Reason: this lint is too strict and hard to fix.
clippy.too-many-lines = "allow"
# Reason: std hasher is fine for us in public functions.
clippy.implicit-hasher = "allow"
# Restriction
clippy.restriction = { level = "deny", priority = -1 }
# Reason: we deny the whole `restriction` group but we allow things that don't make sense for us.
# That way we can still benefit from new lints added to the `restriction` group without having to
# explicitly allow them.
# As a downside our contributors don't know if some lint was enabled intentionally or just no one
# else faced it before to allow it but we can handle this during code reviews.
clippy.blanket-clippy-restriction-lints = "allow"
# Reason: we can't avoid using unwrap for now.
clippy.unwrap-used = "allow"
# Reason: we can't avoid using expect for now.
clippy.expect-used = "allow"
# Reason: unreachable is good in many cases.
clippy.unreachable = "allow"
# Reason: this is ridiculous strict in our codebase and doesn't add any value.
clippy.single-call-fn = "allow"
# Reason: we use panic in some places and it's okay.
clippy.panic = "allow"
# Reason: shadowing is good most of the times.
clippy.shadow-reuse = "allow"
# Reason: implicit return is good.
clippy.implicit-return = "allow"
# Reason: std is fine for us, we don't need to use core.
clippy.std-instead-of-core = "allow"
# Reason: std is fine for us, we don't need to use alloc.
clippy.std-instead-of-alloc = "allow"
# Reason: default methods are good most of the time.
clippy.missing-trait-methods = "allow"
# Reason: this is too verbose and doesn't help much if you have rust analyzer.
clippy.pattern-type-mismatch = "allow"
# Reason: decreases readability.
clippy.assertions-on-result-states = "allow"
# Reason: documenting every assert is too verbose.
clippy.missing-assert-message = "allow"
# Reason: documenting private items is too verbose and doesn't add much value.
clippy.missing-docs-in-private-items = "allow"
# Reason: we use separated suffix style.
clippy.separated_literal_suffix = "allow"
# Reason: sometimes absolute paths are more readable.
clippy.absolute-paths = "allow"
# Reason: sometimes it's as readable as full variable naming.
clippy.min-ident-chars = "allow"
# Reason: it's very common and handy.
clippy.indexing-slicing = "allow"
# Reason: we use little endian style.
clippy.little-endian-bytes = "allow"
# Reason: we use this style of pub visibility.
clippy.pub-with-shorthand = "allow"
# Reason: question mark operator is very cool.
clippy.question-mark-used = "allow"
# Reason: it's fine to panic in tests and some functions where it makes sense.
clippy.panic-in-result-fn = "allow"
# Reason: we don't care that much about inlining and LTO should take care of it.
clippy.missing_inline_in_public_items = "allow"
# Reason: it's okay for us.
clippy.default-numeric-fallback = "allow"
# Reason: this is fine for us.
clippy.exhaustive-enums = "allow"
# Reason: this is fine for us.
clippy.exhaustive-structs = "allow"
# Reason: this helps readability when item is imported in other modules.
clippy.module-name-repetitions = "allow"
# Reason: mostly historical reasons, maybe we'll address this in future.
clippy.mod-module-files = "allow"
# Reason: named module files is our preferred way.
clippy.self-named-module-files = "allow"
# Reason: this is actually quite handy.
clippy.impl-trait-in-params = "allow"
# Reason: this is often useful.
clippy.use-debug = "allow"
# Reason: this is sometimes useful.
clippy.field-scoped-visibility-modifiers = "allow"
# Reason: `pub use` is good for re-exports and hiding unnecessary details.
clippy.pub-use = "allow"
# Reason: we prefer semicolons inside blocks.
clippy.semicolon-outside-block = "allow"
# Reason: we don't do it blindly, this is mostly internal constraints checks.
clippy.unwrap-in-result = "allow"
# Reason: we don't see any problems with that.
clippy.shadow-same = "allow"
# Reason: this lint is too verbose.
clippy.let-underscore-untyped = "allow"
# Reason: this lint is actually bad as it forces to use wildcard `..` instead of
# field-by-field `_` which may lead to subtle bugs when new fields are added to the struct.
clippy.unneeded-field-pattern = "allow"
# Nursery
clippy.nursery = { level = "deny", priority = -1 }
# Reason: this is okay if it compiles.
clippy.future-not-send = "allow"
# Reason: this is actually a good lint, but currently it gives a lot of false-positives.
clippy.significant-drop-tightening = "allow"
# Correctness
clippy.correctness = { level = "deny", priority = -1 }
# Complexity
clippy.complexity = { level = "deny", priority = -1 }
# Perf
clippy.perf = { level = "deny", priority = -1 }
# Suspicious
clippy.suspicious = { level = "deny", priority = -1 }
# Style
clippy.style = { level = "deny", priority = -1 }
# Cargo
clippy.cargo = { level = "deny", priority = -1 }
# Reason: we're not at this stage yet and it will be a pain to create a new crate.
clippy.cargo-common-metadata = "allow"
# Reason: hard to address right now and mostly comes from dependencies
# so the fix would be just a long list of exceptions.
clippy.multiple-crate-versions = "allow"

View File

@ -37,9 +37,15 @@ run-sequencer:
# Run Indexer
[working-directory: 'indexer/service']
run-indexer:
run-indexer mock="":
@echo "🔍 Running indexer"
RUST_LOG=info RISC0_DEV_MODE=1 cargo run --release -p indexer_service configs/indexer_config.json
@if [ "{{mock}}" = "mock" ]; then \
echo "🧪 Using mock data"; \
RUST_LOG=info RISC0_DEV_MODE=1 cargo run --release --features mock-responses -p indexer_service configs/indexer_config.json; \
else \
echo "🚀 Using real data"; \
RUST_LOG=info RISC0_DEV_MODE=1 cargo run --release -p indexer_service configs/indexer_config.json; \
fi
# Run Explorer
[working-directory: 'explorer_service']
@ -58,4 +64,6 @@ clean:
@echo "🧹 Cleaning run artifacts"
rm -rf sequencer_runner/bedrock_signing_key
rm -rf sequencer_runner/rocksdb
rm -rf indexer/service/rocksdb
rm -rf wallet/configs/debug/storage.json
cd bedrock && docker compose down -v

View File

@ -69,6 +69,7 @@ Both public and private executions use the same Risc0 VM bytecode. Public transa
This design keeps public transactions as fast as any RISC-Vbased VM and makes private transactions efficient for validators. It also supports parallel execution similar to Solana, improving throughput. The main computational cost for privacy-preserving transactions is on the user side, where ZK proofs are generated.
---
---
---
@ -130,29 +131,31 @@ RUST_LOG=info RISC0_DEV_MODE=1 cargo run $(pwd)/configs/debug all
```
# Run the sequencer and node
## Running Manually
### Normal mode
The sequencer and logos blockchain node can be run locally:
1. On one terminal go to the `logos-blockchain/logos-blockchain` repo and run a local logos blockchain node:
- `git checkout master; git pull`
- `cargo clean`
- `rm -r ~/.logos-blockchain-circuits`
- `./scripts/setup-logos-blockchain-circuits.sh`
- `cargo build --all-features`
- `./target/debug/logos-blockchain-node --deployment nodes/node/standalone-deployment-config.yaml nodes/node/standalone-node-config.yaml`
2. Alternatively (WARNING: This node is outdated) go to ``logos-blockchain/lssa/` repo and run the node from docker:
- `cd bedrock`
- Change line 14 of `docker-compose.yml` from `"0:18080/tcp"` into `"8080:18080/tcp"`
- `docker compose up`
3. On another terminal go to the `logos-blockchain/lssa` repo and run indexer service:
- `git checkout master; git pull`
- `cargo clean`
- `rm -r ~/.logos-blockchain-circuits`
- `./scripts/setup-logos-blockchain-circuits.sh`
- `cargo build --all-features`
- `./target/debug/logos-blockchain-node --deployment nodes/node/standalone-deployment-config.yaml nodes/node/standalone-node-config.yaml`
- Alternatively (WARNING: This node is outdated) go to `logos-blockchain/lssa/` repo and run the node from docker:
- `cd bedrock`
- Change line 14 of `docker-compose.yml` from `"0:18080/tcp"` into `"8080:18080/tcp"`
- `docker compose up`
2. On another terminal go to the `logos-blockchain/lssa` repo and run indexer service:
- `RUST_LOG=info cargo run -p indexer_service indexer/service/configs/indexer_config.json`
4. On another terminal go to the `logos-blockchain/lssa` repo and run the sequencer:
3. On another terminal go to the `logos-blockchain/lssa` repo and run the sequencer:
- `RUST_LOG=info cargo run -p sequencer_runner sequencer_runner/configs/debug`
4. (To run the explorer): on another terminal go to `logos-blockchain/lssa/explorer_service` and run the following:
- `cargo install cargo-leptos`
- `cargo leptos build --release`
- `cargo leptos serve --release`
### Notes on cleanup

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -4,6 +4,9 @@ version = "0.1.0"
edition = "2024"
license = { workspace = true }
[lints]
workspace = true
[dependencies]
common.workspace = true

View File

@ -2,7 +2,7 @@ use std::time::Duration;
use anyhow::{Context as _, Result};
use common::config::BasicAuth;
use futures::{Stream, TryFutureExt};
use futures::{Stream, TryFutureExt as _};
#[expect(clippy::single_component_path_imports, reason = "Satisfy machete")]
use humantime_serde;
use log::{info, warn};
@ -14,7 +14,7 @@ use reqwest::{Client, Url};
use serde::{Deserialize, Serialize};
use tokio_retry::Retry;
/// Fibonacci backoff retry strategy configuration
/// Fibonacci backoff retry strategy configuration.
#[derive(Debug, Copy, Clone, Serialize, Deserialize)]
pub struct BackoffConfig {
#[serde(with = "humantime_serde")]
@ -31,9 +31,9 @@ impl Default for BackoffConfig {
}
}
// Simple wrapper
// maybe extend in the future for our purposes
// `Clone` is cheap because `CommonHttpClient` is internally reference counted (`Arc`).
/// Simple wrapper
/// maybe extend in the future for our purposes
/// `Clone` is cheap because `CommonHttpClient` is internally reference counted (`Arc`).
#[derive(Clone)]
pub struct BedrockClient {
http_client: CommonHttpClient,
@ -62,10 +62,22 @@ impl BedrockClient {
})
}
pub async fn post_transaction(&self, tx: SignedMantleTx) -> Result<(), Error> {
Retry::spawn(self.backoff_strategy(), || {
self.http_client
pub async fn post_transaction(&self, tx: SignedMantleTx) -> Result<Result<(), Error>, Error> {
Retry::spawn(self.backoff_strategy(), || async {
match self
.http_client
.post_transaction(self.node_url.clone(), tx.clone())
.await
{
Ok(()) => Ok(Ok(())),
Err(err) => match err {
// Retry arm.
// Retrying only reqwest errors: mainly connected to http.
Error::Request(_) => Err(err),
// Returning non-retryable error
Error::Server(_) | Error::Client(_) | Error::Url(_) => Ok(Err(err)),
},
}
})
.await
}
@ -96,9 +108,14 @@ impl BedrockClient {
}
fn backoff_strategy(&self) -> impl Iterator<Item = Duration> {
tokio_retry::strategy::FibonacciBackoff::from_millis(
self.backoff.start_delay.as_millis() as u64
)
.take(self.backoff.max_retries)
let start_delay_millis = self
.backoff
.start_delay
.as_millis()
.try_into()
.expect("Start delay must be less than u64::MAX milliseconds");
tokio_retry::strategy::FibonacciBackoff::from_millis(start_delay_millis)
.take(self.backoff.max_retries)
}
}

54
clippy.toml Normal file
View File

@ -0,0 +1,54 @@
module-item-order-groupings = [
[
"use",
[
"use",
],
],
[
"modules",
[
"extern_crate",
"mod",
"foreign_mod",
],
],
[
"macros",
[
"macro",
],
],
[
"global_asm",
[
"global_asm",
],
],
[
"UPPER_SNAKE_CASE",
[
"static",
"const",
],
],
[
"PascalCase",
[
"ty_alias",
"enum",
"struct",
"union",
"trait",
"trait_alias",
"impl",
],
],
[
"lower_snake_case",
[
"fn",
],
],
]
source-item-ordering = ["module"]

View File

@ -4,6 +4,9 @@ version = "0.1.0"
edition = "2024"
license = { workspace = true }
[lints]
workspace = true
[dependencies]
nssa.workspace = true
nssa_core.workspace = true

View File

@ -1,7 +1,7 @@
use borsh::{BorshDeserialize, BorshSerialize};
use nssa::AccountId;
use serde::{Deserialize, Serialize};
use sha2::{Digest, Sha256, digest::FixedOutput};
use sha2::{Digest as _, Sha256, digest::FixedOutput as _};
use crate::{HashType, transaction::NSSATransaction};
@ -20,7 +20,7 @@ pub struct BlockMeta {
#[derive(Debug, Clone)]
/// Our own hasher.
/// Currently it is SHA256 hasher wrapper. May change in a future.
pub struct OwnHasher {}
pub struct OwnHasher;
impl OwnHasher {
fn hash(data: &[u8]) -> HashType {
@ -69,6 +69,7 @@ pub struct HashableBlockData {
}
impl HashableBlockData {
#[must_use]
pub fn into_pending_block(
self,
signing_key: &nssa::PrivateKey,
@ -93,6 +94,7 @@ impl HashableBlockData {
}
}
#[must_use]
pub fn block_hash(&self) -> BlockHash {
OwnHasher::hash(&borsh::to_vec(&self).unwrap())
}
@ -109,14 +111,14 @@ impl From<Block> for HashableBlockData {
}
}
/// Helper struct for account (de-)serialization
/// Helper struct for account (de-)serialization.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AccountInitialData {
pub account_id: AccountId,
pub balance: u128,
}
/// Helper struct to (de-)serialize initial commitments
/// Helper struct to (de-)serialize initial commitments.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CommitmentsInitialData {
pub npk: nssa_core::NullifierPublicKey,
@ -128,7 +130,7 @@ mod tests {
use crate::{HashType, block::HashableBlockData, test_utils};
#[test]
fn test_encoding_roundtrip() {
fn encoding_roundtrip() {
let transactions = vec![test_utils::produce_dummy_empty_transaction()];
let block = test_utils::produce_dummy_block(1, Some(HashType([1; 32])), transactions);
let hashable = HashableBlockData::from(block);

View File

@ -42,14 +42,14 @@ impl FromStr for BasicAuth {
})?;
Ok(Self {
username: username.to_string(),
password: password.map(|p| p.to_string()),
username: username.to_owned(),
password: password.map(std::string::ToString::to_string),
})
}
}
impl From<BasicAuth> for BasicAuthCredentials {
fn from(value: BasicAuth) -> Self {
BasicAuthCredentials::new(value.username, value.password)
Self::new(value.username, value.password)
}
}

View File

@ -22,14 +22,14 @@ pub enum SequencerClientError {
impl From<SequencerRpcError> for SequencerClientError {
fn from(value: SequencerRpcError) -> Self {
SequencerClientError::InternalError(value)
Self::InternalError(value)
}
}
#[derive(Debug, thiserror::Error)]
pub enum ExecutionFailureKind {
#[error("Failed to get account data from sequencer")]
SequencerError,
#[error("Failed to get data from sequencer")]
SequencerError(#[source] anyhow::Error),
#[error("Inputs amounts does not match outputs")]
AmountMismatchError,
#[error("Accounts key not found")]

View File

@ -17,7 +17,6 @@ pub mod test_utils;
pub const PINATA_BASE58: &str = "EfQhKQAkX2FJiwNii2WFQsGndjvF1Mzd7RuVe7QdPLw7";
#[derive(
Debug,
Default,
Copy,
Clone,
@ -37,13 +36,19 @@ impl Display for HashType {
}
}
impl std::fmt::Debug for HashType {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", hex::encode(self.0))
}
}
impl FromStr for HashType {
type Err = hex::FromHexError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut bytes = [0u8; 32];
let mut bytes = [0_u8; 32];
hex::decode_to_slice(s, &mut bytes)?;
Ok(HashType(bytes))
Ok(Self(bytes))
}
}
@ -61,7 +66,7 @@ impl From<HashType> for [u8; 32] {
impl From<[u8; 32]> for HashType {
fn from(bytes: [u8; 32]) -> Self {
HashType(bytes)
Self(bytes)
}
}
@ -69,7 +74,7 @@ impl TryFrom<Vec<u8>> for HashType {
type Error = <[u8; 32] as TryFrom<Vec<u8>>>::Error;
fn try_from(value: Vec<u8>) -> Result<Self, Self::Error> {
Ok(HashType(value.try_into()?))
Ok(Self(value.try_into()?))
}
}
@ -85,7 +90,7 @@ mod tests {
#[test]
fn serialization_roundtrip() {
let original = HashType([1u8; 32]);
let original = HashType([1_u8; 32]);
let serialized = original.to_string();
let deserialized = HashType::from_str(&serialized).unwrap();
assert_eq!(original, deserialized);

View File

@ -5,25 +5,25 @@ use serde_json::{Value, to_value};
#[derive(serde::Serialize)]
pub struct RpcParseError(pub String);
#[allow(clippy::too_long_first_doc_paragraph)]
/// This struct may be returned from JSON RPC server in case of error
/// This struct may be returned from JSON RPC server in case of error.
///
/// It is expected that that this struct has impls From<_> all other RPC errors
/// like [`RpcBlockError`](crate::types::blocks::RpcBlockError)
#[derive(Debug, serde::Serialize, serde::Deserialize, Clone, PartialEq)]
/// like [`RpcBlockError`](crate::types::blocks::RpcBlockError).
#[derive(Debug, serde::Serialize, serde::Deserialize, Clone, PartialEq, Eq)]
#[serde(deny_unknown_fields)]
pub struct RpcError {
#[serde(flatten)]
pub error_struct: Option<RpcErrorKind>,
/// Deprecated please use the `error_struct` instead
/// Deprecated please use the `error_struct` instead.
pub code: i64,
/// Deprecated please use the `error_struct` instead
/// Deprecated please use the `error_struct` instead.
pub message: String,
/// Deprecated please use the `error_struct` instead
/// Deprecated please use the `error_struct` instead.
#[serde(skip_serializing_if = "Option::is_none")]
pub data: Option<Value>,
}
#[derive(Debug, serde::Serialize, serde::Deserialize, Clone, PartialEq)]
#[derive(Debug, serde::Serialize, serde::Deserialize, Clone, PartialEq, Eq)]
#[serde(tag = "name", content = "cause", rename_all = "SCREAMING_SNAKE_CASE")]
pub enum RpcErrorKind {
RequestValidationError(RpcRequestValidationErrorKind),
@ -31,14 +31,14 @@ pub enum RpcErrorKind {
InternalError(Value),
}
#[derive(Debug, serde::Serialize, serde::Deserialize, Clone, PartialEq)]
#[derive(Debug, serde::Serialize, serde::Deserialize, Clone, PartialEq, Eq)]
#[serde(tag = "name", content = "info", rename_all = "SCREAMING_SNAKE_CASE")]
pub enum RpcRequestValidationErrorKind {
MethodNotFound { method_name: String },
ParseError { error_message: String },
}
/// A general Server Error
/// A general Server Error.
#[derive(serde::Serialize, serde::Deserialize, Debug, PartialEq, Eq, Clone)]
pub enum ServerError {
Timeout,
@ -49,8 +49,9 @@ impl RpcError {
/// A generic constructor.
///
/// Mostly for completeness, doesn't do anything but filling in the corresponding fields.
pub fn new(code: i64, message: String, data: Option<Value>) -> Self {
RpcError {
#[must_use]
pub const fn new(code: i64, message: String, data: Option<Value>) -> Self {
Self {
code,
message,
data,
@ -69,12 +70,12 @@ impl RpcError {
)));
}
};
RpcError::new(-32_602, "Invalid params".to_owned(), Some(value))
Self::new(-32_602, "Invalid params".to_owned(), Some(value))
}
/// Create a server error.
pub fn server_error<E: serde::Serialize>(e: Option<E>) -> Self {
RpcError::new(
Self::new(
-32_000,
"Server error".to_owned(),
e.map(|v| to_value(v).expect("Must be representable in JSON")),
@ -82,8 +83,9 @@ impl RpcError {
}
/// Create a parse error.
#[must_use]
pub fn parse_error(e: String) -> Self {
RpcError {
Self {
code: -32_700,
message: "Parse error".to_owned(),
data: Some(Value::String(e.clone())),
@ -93,12 +95,14 @@ impl RpcError {
}
}
#[must_use]
pub fn serialization_error(e: &str) -> Self {
RpcError::new_internal_error(Some(Value::String(e.to_owned())), e)
Self::new_internal_error(Some(Value::String(e.to_owned())), e)
}
/// Helper method to define extract `INTERNAL_ERROR` in separate `RpcErrorKind`
/// Returns `HANDLER_ERROR` if the error is not internal one
/// Returns `HANDLER_ERROR` if the error is not internal one.
#[must_use]
pub fn new_internal_or_handler_error(error_data: Option<Value>, error_struct: Value) -> Self {
if error_struct["name"] == "INTERNAL_ERROR" {
let error_message = match error_struct["info"].get("error_message") {
@ -111,8 +115,9 @@ impl RpcError {
}
}
#[must_use]
pub fn new_internal_error(error_data: Option<Value>, info: &str) -> Self {
RpcError {
Self {
code: -32_000,
message: "Server error".to_owned(),
data: error_data,
@ -124,7 +129,7 @@ impl RpcError {
}
fn new_handler_error(error_data: Option<Value>, error_struct: Value) -> Self {
RpcError {
Self {
code: -32_000,
message: "Server error".to_owned(),
data: error_data,
@ -133,8 +138,9 @@ impl RpcError {
}
/// Create a method not found error.
#[must_use]
pub fn method_not_found(method: String) -> Self {
RpcError {
Self {
code: -32_601,
message: "Method not found".to_owned(),
data: Some(Value::String(method.clone())),
@ -161,6 +167,7 @@ impl From<RpcParseError> for RpcError {
impl From<std::convert::Infallible> for RpcError {
fn from(_: std::convert::Infallible) -> Self {
// SAFETY: Infallible error can never be constructed, so this code can never be reached.
unsafe { core::hint::unreachable_unchecked() }
}
}
@ -168,20 +175,20 @@ impl From<std::convert::Infallible> for RpcError {
impl fmt::Display for ServerError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
ServerError::Timeout => write!(f, "ServerError: Timeout"),
ServerError::Closed => write!(f, "ServerError: Closed"),
Self::Timeout => write!(f, "ServerError: Timeout"),
Self::Closed => write!(f, "ServerError: Closed"),
}
}
}
impl From<ServerError> for RpcError {
fn from(e: ServerError) -> RpcError {
fn from(e: ServerError) -> Self {
let error_data = match to_value(&e) {
Ok(value) => value,
Err(_err) => {
return RpcError::new_internal_error(None, "Failed to serialize ServerError");
return Self::new_internal_error(None, "Failed to serialize ServerError");
}
};
RpcError::new_internal_error(Some(error_data), e.to_string().as_str())
Self::new_internal_error(Some(error_data), e.to_string().as_str())
}
}

View File

@ -13,12 +13,14 @@ use std::fmt::{Formatter, Result as FmtResult};
use serde::{
de::{Deserializer, Error, Unexpected, Visitor},
ser::{SerializeStruct, Serializer},
ser::{SerializeStruct as _, Serializer},
};
use serde_json::{Result as JsonResult, Value};
use super::errors::RpcError;
pub type Parsed = Result<Message, Broken>;
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
struct Version;
@ -29,10 +31,13 @@ impl serde::Serialize for Version {
}
impl<'de> serde::Deserialize<'de> for Version {
#[expect(
clippy::renamed_function_params,
reason = "More readable than original serde parameter names"
)]
fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
struct VersionVisitor;
#[allow(clippy::needless_lifetimes)]
impl<'de> Visitor<'de> for VersionVisitor {
impl Visitor<'_> for VersionVisitor {
type Value = Version;
fn expecting(&self, formatter: &mut Formatter<'_>) -> FmtResult {
@ -51,8 +56,12 @@ impl<'de> serde::Deserialize<'de> for Version {
}
/// An RPC request.
#[derive(Debug, serde::Serialize, serde::Deserialize, Clone, PartialEq)]
#[derive(Debug, serde::Serialize, serde::Deserialize, Clone, PartialEq, Eq)]
#[serde(deny_unknown_fields)]
#[expect(
clippy::partial_pub_fields,
reason = "We don't want to allow access to the version, but the others are public for ease of use"
)]
pub struct Request {
jsonrpc: Version,
pub method: String,
@ -62,6 +71,7 @@ pub struct Request {
}
impl Request {
#[must_use]
pub fn from_payload_version_2_0(method: String, payload: serde_json::Value) -> Self {
Self {
jsonrpc: Version,
@ -75,6 +85,7 @@ impl Request {
/// Answer the request with a (positive) reply.
///
/// The ID is taken from the request.
#[must_use]
pub fn reply(&self, reply: Value) -> Message {
Message::Response(Response {
jsonrpc: Version,
@ -84,6 +95,7 @@ impl Request {
}
/// Answer the request with an error.
#[must_use]
pub fn error(&self, error: RpcError) -> Message {
Message::Response(Response {
jsonrpc: Version,
@ -96,7 +108,11 @@ impl Request {
/// A response to an RPC.
///
/// It is created by the methods on [Request](struct.Request.html).
#[derive(Debug, Clone, PartialEq)]
#[expect(
clippy::partial_pub_fields,
reason = "We don't want to allow access to the version, but the others are public for ease of use"
)]
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Response {
jsonrpc: Version,
pub result: Result<Value, RpcError>,
@ -107,30 +123,22 @@ impl serde::Serialize for Response {
fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
let mut sub = serializer.serialize_struct("Response", 3)?;
sub.serialize_field("jsonrpc", &self.jsonrpc)?;
match self.result {
Ok(ref value) => sub.serialize_field("result", value),
Err(ref err) => sub.serialize_field("error", err),
match &self.result {
Ok(value) => sub.serialize_field("result", value),
Err(err) => sub.serialize_field("error", err),
}?;
sub.serialize_field("id", &self.id)?;
sub.end()
}
}
/// Deserializer for `Option<Value>` that produces `Some(Value::Null)`.
///
/// The usual one produces None in that case. But we need to know the difference between
/// `{x: null}` and `{}`.
fn some_value<'de, D: Deserializer<'de>>(deserializer: D) -> Result<Option<Value>, D::Error> {
serde::Deserialize::deserialize(deserializer).map(Some)
}
/// A helper trick for deserialization.
#[derive(serde::Deserialize)]
#[serde(deny_unknown_fields)]
struct WireResponse {
// It is actually used to eat and sanity check the deserialized text
#[allow(dead_code)]
jsonrpc: Version,
#[serde(rename = "jsonrpc")]
_jsonrpc: Version,
// Make sure we accept null as Some(Value::Null), instead of going to None
#[serde(default, deserialize_with = "some_value")]
result: Option<Value>,
@ -152,7 +160,7 @@ impl<'de> serde::Deserialize<'de> for Response {
return Err(err);
}
};
Ok(Response {
Ok(Self {
jsonrpc: Version,
result,
id: wr.id,
@ -161,7 +169,11 @@ impl<'de> serde::Deserialize<'de> for Response {
}
/// A notification (doesn't expect an answer).
#[derive(Debug, serde::Serialize, serde::Deserialize, Clone, PartialEq)]
#[expect(
clippy::partial_pub_fields,
reason = "We don't want to allow access to the version, but the others are public for ease of use"
)]
#[derive(Debug, serde::Serialize, serde::Deserialize, Clone, PartialEq, Eq)]
#[serde(deny_unknown_fields)]
pub struct Notification {
jsonrpc: Version,
@ -198,7 +210,7 @@ pub enum Message {
/// message.
///
/// This variant has no direct constructor and is expected to be constructed manually.
Batch(Vec<Message>),
Batch(Vec<Self>),
/// An unmatched sub entry in a `Batch`.
///
/// When there's a `Batch` and an element doesn't comform to the JSONRPC 2.0 format, that one
@ -212,9 +224,10 @@ impl Message {
/// A constructor for a request.
///
/// The ID is auto-set to dontcare.
#[must_use]
pub fn request(method: String, params: Value) -> Self {
let id = Value::from("dontcare");
Message::Request(Request {
Self::Request(Request {
jsonrpc: Version,
method,
params,
@ -223,8 +236,9 @@ impl Message {
}
/// Create a top-level error (without an ID).
pub fn error(error: RpcError) -> Self {
Message::Response(Response {
#[must_use]
pub const fn error(error: RpcError) -> Self {
Self::Response(Response {
jsonrpc: Version,
result: Err(error),
id: Value::Null,
@ -232,8 +246,9 @@ impl Message {
}
/// A constructor for a notification.
pub fn notification(method: String, params: Value) -> Self {
Message::Notification(Notification {
#[must_use]
pub const fn notification(method: String, params: Value) -> Self {
Self::Notification(Notification {
jsonrpc: Version,
method,
params,
@ -241,8 +256,9 @@ impl Message {
}
/// A constructor for a response.
pub fn response(id: Value, result: Result<Value, RpcError>) -> Self {
Message::Response(Response {
#[must_use]
pub const fn response(id: Value, result: Result<Value, RpcError>) -> Self {
Self::Response(Response {
jsonrpc: Version,
result,
id,
@ -250,18 +266,33 @@ impl Message {
}
/// Returns id or Null if there is no id.
#[must_use]
pub fn id(&self) -> Value {
match self {
Message::Request(req) => req.id.clone(),
_ => Value::Null,
Self::Request(req) => req.id.clone(),
Self::Response(response) => response.id.clone(),
Self::Notification(_) | Self::Batch(_) | Self::UnmatchedSub(_) => Value::Null,
}
}
}
impl From<Message> for String {
fn from(val: Message) -> Self {
::serde_json::ser::to_string(&val).expect("message serialization to json should not fail")
}
}
impl From<Message> for Vec<u8> {
fn from(val: Message) -> Self {
::serde_json::ser::to_vec(&val)
.expect("message serialization to json bytes should not fail")
}
}
/// A broken message.
///
/// Protocol-level errors.
#[derive(Debug, Clone, PartialEq, serde::Deserialize)]
#[derive(Debug, Clone, PartialEq, Eq, serde::Deserialize)]
#[serde(untagged)]
pub enum Broken {
/// It was valid JSON, but doesn't match the form of a JSONRPC 2.0 message.
@ -276,12 +307,13 @@ impl Broken {
///
/// The error message for these things are specified in the RFC, so this just creates an error
/// with the right values.
#[must_use]
pub fn reply(&self) -> Message {
match *self {
Broken::Unmatched(_) => Message::error(RpcError::parse_error(
match self {
Self::Unmatched(_) => Message::error(RpcError::parse_error(
"JSON RPC Request format was expected".to_owned(),
)),
Broken::SyntaxError(ref e) => Message::error(RpcError::parse_error(e.clone())),
Self::SyntaxError(e) => Message::error(RpcError::parse_error(e.clone())),
}
}
}
@ -303,8 +335,6 @@ pub fn decoded_to_parsed(res: JsonResult<WireMessage>) -> Parsed {
}
}
pub type Parsed = Result<Message, Broken>;
/// Read a [Message](enum.Message.html) from a slice.
///
/// Invalid JSON or JSONRPC messages are reported as [Broken](enum.Broken.html).
@ -319,16 +349,12 @@ pub fn from_str(s: &str) -> Parsed {
from_slice(s.as_bytes())
}
impl From<Message> for String {
fn from(val: Message) -> Self {
::serde_json::ser::to_string(&val).unwrap()
}
}
impl From<Message> for Vec<u8> {
fn from(val: Message) -> Self {
::serde_json::ser::to_vec(&val).unwrap()
}
/// Deserializer for `Option<Value>` that produces `Some(Value::Null)`.
///
/// The usual one produces None in that case. But we need to know the difference between
/// `{x: null}` and `{}`.
fn some_value<'de, D: Deserializer<'de>>(deserializer: D) -> Result<Option<Value>, D::Error> {
serde::Deserialize::deserialize(deserializer).map(Some)
}
#[cfg(test)]
@ -337,13 +363,12 @@ mod tests {
use super::*;
/// Test serialization and deserialization of the Message
/// Test serialization and deserialization of the Message.
///
/// We first deserialize it from a string. That way we check deserialization works.
/// But since serialization doesn't have to produce the exact same result (order, spaces, …),
/// we then serialize and deserialize the thing again and check it matches.
#[test]
#[allow(clippy::too_many_lines)]
fn message_serde() {
// A helper for running one message test
fn one(input: &str, expected: &Message) {
@ -463,11 +488,10 @@ mod tests {
///
/// Check that the given JSON string parses, but is not recognized as a valid RPC message.
///
/// Test things that are almost but not entirely JSONRPC are rejected
/// Test things that are almost but not entirely JSONRPC are rejected.
///
/// The reject is done by returning it as Unmatched.
#[test]
#[allow(clippy::panic)]
fn broken() {
// A helper with one test
fn one(input: &str) {
@ -491,19 +515,18 @@ mod tests {
// Something completely different
one(r#"{"x": [1, 2, 3]}"#);
match from_str(r#"{]"#) {
match from_str("{]") {
Err(Broken::SyntaxError(_)) => (),
other => panic!("Something unexpected: {other:?}"),
};
}
}
/// Test some non-trivial aspects of the constructors
/// Test some non-trivial aspects of the constructors.
///
/// This doesn't have a full coverage, because there's not much to actually test there.
/// Most of it is related to the ids.
#[test]
#[allow(clippy::panic)]
#[ignore]
#[ignore = "Not a full coverage test"]
fn constructors() {
let msg1 = Message::request("call".to_owned(), json!([1, 2, 3]));
let msg2 = Message::request("call".to_owned(), json!([1, 2, 3]));
@ -520,9 +543,9 @@ mod tests {
};
let id1 = req1.id.clone();
// When we answer a message, we get the same ID
if let Message::Response(ref resp) = req1.reply(json!([1, 2, 3])) {
if let Message::Response(resp) = req1.reply(json!([1, 2, 3])) {
assert_eq!(
*resp,
resp,
Response {
jsonrpc: Version,
result: Ok(json!([1, 2, 3])),
@ -534,11 +557,9 @@ mod tests {
}
let id2 = req2.id.clone();
// The same with an error
if let Message::Response(ref resp) =
req2.error(RpcError::new(42, "Wrong!".to_owned(), None))
{
if let Message::Response(resp) = req2.error(RpcError::new(42, "Wrong!".to_owned(), None)) {
assert_eq!(
*resp,
resp,
Response {
jsonrpc: Version,
result: Err(RpcError::new(42, "Wrong!".to_owned(), None)),
@ -549,11 +570,11 @@ mod tests {
panic!("Not a response");
}
// When we have unmatched, we generate a top-level error with Null id.
if let Message::Response(ref resp) =
if let Message::Response(resp) =
Message::error(RpcError::new(43, "Also wrong!".to_owned(), None))
{
assert_eq!(
*resp,
resp,
Response {
jsonrpc: Version,
result: Err(RpcError::new(43, "Also wrong!".to_owned(), None)),

View File

@ -30,7 +30,7 @@ pub struct RpcConfig {
impl Default for RpcConfig {
fn default() -> Self {
RpcConfig {
Self {
addr: "0.0.0.0:3040".to_owned(),
cors_allowed_origins: vec!["*".to_owned()],
limits_config: RpcLimitsConfig::default(),
@ -39,15 +39,17 @@ impl Default for RpcConfig {
}
impl RpcConfig {
#[must_use]
pub fn new(addr: &str) -> Self {
RpcConfig {
Self {
addr: addr.to_owned(),
..Default::default()
}
}
#[must_use]
pub fn with_port(port: u16) -> Self {
RpcConfig {
Self {
addr: format!("0.0.0.0:{port}"),
..Default::default()
}

View File

@ -3,18 +3,6 @@ use serde_json::Value;
use super::errors::RpcParseError;
pub trait RpcRequest: Sized {
fn parse(value: Option<Value>) -> Result<Self, RpcParseError>;
}
pub fn parse_params<T: DeserializeOwned>(value: Option<Value>) -> Result<T, RpcParseError> {
if let Some(value) = value {
serde_json::from_value(value)
.map_err(|err| RpcParseError(format!("Failed parsing args: {err}")))
} else {
Err(RpcParseError("Require at least one parameter".to_owned()))
}
}
#[macro_export]
macro_rules! parse_request {
($request_name:ty) => {
@ -25,3 +13,17 @@ macro_rules! parse_request {
}
};
}
pub trait RpcRequest: Sized {
fn parse(value: Option<Value>) -> Result<Self, RpcParseError>;
}
pub fn parse_params<T: DeserializeOwned>(value: Option<Value>) -> Result<T, RpcParseError> {
value.map_or_else(
|| Err(RpcParseError("Require at least one parameter".to_owned())),
|value| {
serde_json::from_value(value)
.map_err(|err| RpcParseError(format!("Failed parsing args: {err}")))
},
)
}

View File

@ -11,8 +11,62 @@ use super::{
};
use crate::{HashType, parse_request};
mod base64_deser {
use base64::{Engine as _, engine::general_purpose};
use serde::{self, Deserialize, Deserializer, Serializer, ser::SerializeSeq as _};
pub mod vec {
use super::*;
pub fn serialize<S>(bytes_vec: &[Vec<u8>], serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut seq = serializer.serialize_seq(Some(bytes_vec.len()))?;
for bytes in bytes_vec {
let s = general_purpose::STANDARD.encode(bytes);
seq.serialize_element(&s)?;
}
seq.end()
}
pub fn deserialize<'de, D>(deserializer: D) -> Result<Vec<Vec<u8>>, D::Error>
where
D: Deserializer<'de>,
{
let base64_strings: Vec<String> = Deserialize::deserialize(deserializer)?;
base64_strings
.into_iter()
.map(|s| {
general_purpose::STANDARD
.decode(&s)
.map_err(serde::de::Error::custom)
})
.collect()
}
}
pub fn serialize<S>(bytes: &[u8], serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let base64_string = general_purpose::STANDARD.encode(bytes);
serializer.serialize_str(&base64_string)
}
pub fn deserialize<'de, D>(deserializer: D) -> Result<Vec<u8>, D::Error>
where
D: Deserializer<'de>,
{
let base64_string: String = Deserialize::deserialize(deserializer)?;
general_purpose::STANDARD
.decode(&base64_string)
.map_err(serde::de::Error::custom)
}
}
#[derive(Serialize, Deserialize, Debug)]
pub struct HelloRequest {}
pub struct HelloRequest;
#[derive(Serialize, Deserialize, Debug)]
pub struct RegisterAccountRequest {
@ -30,7 +84,7 @@ pub struct GetBlockDataRequest {
pub block_id: u64,
}
/// Get a range of blocks from `start_block_id` to `end_block_id` (inclusive)
/// Get a range of blocks from `start_block_id` to `end_block_id` (inclusive).
#[derive(Serialize, Deserialize, Debug)]
pub struct GetBlockRangeDataRequest {
pub start_block_id: u64,
@ -38,13 +92,13 @@ pub struct GetBlockRangeDataRequest {
}
#[derive(Serialize, Deserialize, Debug)]
pub struct GetGenesisIdRequest {}
pub struct GetGenesisIdRequest;
#[derive(Serialize, Deserialize, Debug)]
pub struct GetLastBlockRequest {}
pub struct GetLastBlockRequest;
#[derive(Serialize, Deserialize, Debug)]
pub struct GetInitialTestnetAccountsRequest {}
pub struct GetInitialTestnetAccountsRequest;
#[derive(Serialize, Deserialize, Debug)]
pub struct GetAccountBalanceRequest {
@ -72,7 +126,7 @@ pub struct GetProofForCommitmentRequest {
}
#[derive(Serialize, Deserialize, Debug)]
pub struct GetProgramIdsRequest {}
pub struct GetProgramIdsRequest;
parse_request!(HelloRequest);
parse_request!(RegisterAccountRequest);
@ -117,60 +171,6 @@ pub struct GetBlockRangeDataResponse {
pub blocks: Vec<Vec<u8>>,
}
mod base64_deser {
use base64::{Engine as _, engine::general_purpose};
use serde::{self, Deserialize, Deserializer, Serializer, ser::SerializeSeq as _};
pub fn serialize<S>(bytes: &[u8], serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let base64_string = general_purpose::STANDARD.encode(bytes);
serializer.serialize_str(&base64_string)
}
pub fn deserialize<'de, D>(deserializer: D) -> Result<Vec<u8>, D::Error>
where
D: Deserializer<'de>,
{
let base64_string: String = Deserialize::deserialize(deserializer)?;
general_purpose::STANDARD
.decode(&base64_string)
.map_err(serde::de::Error::custom)
}
pub mod vec {
use super::*;
pub fn serialize<S>(bytes_vec: &[Vec<u8>], serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut seq = serializer.serialize_seq(Some(bytes_vec.len()))?;
for bytes in bytes_vec {
let s = general_purpose::STANDARD.encode(bytes);
seq.serialize_element(&s)?;
}
seq.end()
}
pub fn deserialize<'de, D>(deserializer: D) -> Result<Vec<Vec<u8>>, D::Error>
where
D: Deserializer<'de>,
{
let base64_strings: Vec<String> = Deserialize::deserialize(deserializer)?;
base64_strings
.into_iter()
.map(|s| {
general_purpose::STANDARD
.decode(&s)
.map_err(serde::de::Error::custom)
})
.collect()
}
}
}
#[derive(Serialize, Deserialize, Debug)]
pub struct GetGenesisIdResponse {
pub genesis_id: u64,
@ -213,7 +213,7 @@ pub struct GetProgramIdsResponse {
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct GetInitialTestnetAccountsResponse {
/// Hex encoded account id
/// Hex encoded account id.
pub account_id: String,
pub balance: u64,
}

View File

@ -30,6 +30,15 @@ use crate::{
transaction::NSSATransaction,
};
#[derive(Debug, Clone, Deserialize)]
struct SequencerRpcResponse {
#[serde(rename = "jsonrpc")]
_jsonrpc: String,
result: serde_json::Value,
#[serde(rename = "id")]
_id: u64,
}
#[derive(Clone)]
pub struct SequencerClient {
pub client: reqwest::Client,
@ -61,7 +70,7 @@ impl SequencerClient {
payload: Value,
) -> Result<Value, SequencerClientError> {
let request =
rpc_primitives::message::Request::from_payload_version_2_0(method.to_string(), payload);
rpc_primitives::message::Request::from_payload_version_2_0(method.to_owned(), payload);
log::debug!(
"Calling method {method} with payload {request:?} to sequencer at {}",
@ -86,14 +95,6 @@ impl SequencerClient {
})
.await?;
#[derive(Debug, Clone, Deserialize)]
#[allow(dead_code)]
pub struct SequencerRpcResponse {
pub jsonrpc: String,
pub result: serde_json::Value,
pub id: u64,
}
if let Ok(response) = serde_json::from_value::<SequencerRpcResponse>(response_vall.clone())
{
Ok(response.result)
@ -104,7 +105,7 @@ impl SequencerClient {
}
}
/// Get block data at `block_id` from sequencer
/// Get block data at `block_id` from sequencer.
pub async fn get_block(
&self,
block_id: u64,
@ -140,7 +141,7 @@ impl SequencerClient {
Ok(resp_deser)
}
/// Get last known `blokc_id` from sequencer
/// Get last known `blokc_id` from sequencer.
pub async fn get_last_block(&self) -> Result<GetLastBlockResponse, SequencerClientError> {
let block_req = GetLastBlockRequest {};
@ -224,7 +225,7 @@ impl SequencerClient {
Ok(resp_deser)
}
/// Send transaction to sequencer
/// Send transaction to sequencer.
pub async fn send_tx_public(
&self,
transaction: nssa::PublicTransaction,
@ -244,7 +245,7 @@ impl SequencerClient {
Ok(resp_deser)
}
/// Send transaction to sequencer
/// Send transaction to sequencer.
pub async fn send_tx_private(
&self,
transaction: nssa::PrivacyPreservingTransaction,
@ -264,7 +265,7 @@ impl SequencerClient {
Ok(resp_deser)
}
/// Get genesis id from sequencer
/// Get genesis id from sequencer.
pub async fn get_genesis_id(&self) -> Result<GetGenesisIdResponse, SequencerClientError> {
let genesis_req = GetGenesisIdRequest {};
@ -280,7 +281,7 @@ impl SequencerClient {
Ok(resp_deser)
}
/// Get initial testnet accounts from sequencer
/// Get initial testnet accounts from sequencer.
pub async fn get_initial_testnet_accounts(
&self,
) -> Result<Vec<GetInitialTestnetAccountsResponse>, SequencerClientError> {
@ -298,7 +299,7 @@ impl SequencerClient {
Ok(resp_deser)
}
/// Get proof for commitment
/// Get proof for commitment.
pub async fn get_proof_for_commitment(
&self,
commitment: nssa_core::Commitment,
@ -338,7 +339,7 @@ impl SequencerClient {
Ok(resp_deser)
}
/// Get Ids of the programs used by the node
/// Get Ids of the programs used by the node.
pub async fn get_program_ids(
&self,
) -> Result<HashMap<String, ProgramId>, SequencerClientError> {

View File

@ -8,19 +8,21 @@ use crate::{
// Helpers
#[must_use]
pub fn sequencer_sign_key_for_testing() -> nssa::PrivateKey {
nssa::PrivateKey::try_new([37; 32]).unwrap()
}
// Dummy producers
/// Produce dummy block with
/// Produce dummy block with.
///
/// `id` - block id, provide zero for genesis
/// `id` - block id, provide zero for genesis.
///
/// `prev_hash` - hash of previous block, provide None for genesis
/// `prev_hash` - hash of previous block, provide None for genesis.
///
/// `transactions` - vector of `EncodedTransaction` objects
/// `transactions` - vector of `EncodedTransaction` objects.
#[must_use]
pub fn produce_dummy_block(
id: u64,
prev_hash: Option<HashType>,
@ -29,13 +31,14 @@ pub fn produce_dummy_block(
let block_data = HashableBlockData {
block_id: id,
prev_block_hash: prev_hash.unwrap_or_default(),
timestamp: id * 100,
timestamp: id.saturating_mul(100),
transactions,
};
block_data.into_pending_block(&sequencer_sign_key_for_testing(), [0; 32])
}
#[must_use]
pub fn produce_dummy_empty_transaction() -> NSSATransaction {
let program_id = nssa::program::Program::authenticated_transfer_program().id();
let account_ids = vec![];
@ -56,12 +59,13 @@ pub fn produce_dummy_empty_transaction() -> NSSATransaction {
NSSATransaction::Public(nssa_tx)
}
#[must_use]
pub fn create_transaction_native_token_transfer(
from: AccountId,
nonce: u128,
to: AccountId,
balance_to_move: u128,
signing_key: nssa::PrivateKey,
signing_key: &nssa::PrivateKey,
) -> NSSATransaction {
let account_ids = vec![from, to];
let nonces = vec![nonce];
@ -73,7 +77,7 @@ pub fn create_transaction_native_token_transfer(
balance_to_move,
)
.unwrap();
let witness_set = nssa::public_transaction::WitnessSet::for_message(&message, &[&signing_key]);
let witness_set = nssa::public_transaction::WitnessSet::for_message(&message, &[signing_key]);
let nssa_tx = nssa::PublicTransaction::new(message, witness_set);

View File

@ -13,19 +13,21 @@ pub enum NSSATransaction {
}
impl NSSATransaction {
#[must_use]
pub fn hash(&self) -> HashType {
HashType(match self {
NSSATransaction::Public(tx) => tx.hash(),
NSSATransaction::PrivacyPreserving(tx) => tx.hash(),
NSSATransaction::ProgramDeployment(tx) => tx.hash(),
Self::Public(tx) => tx.hash(),
Self::PrivacyPreserving(tx) => tx.hash(),
Self::ProgramDeployment(tx) => tx.hash(),
})
}
#[must_use]
pub fn affected_public_account_ids(&self) -> Vec<AccountId> {
match self {
NSSATransaction::ProgramDeployment(tx) => tx.affected_public_account_ids(),
NSSATransaction::Public(tx) => tx.affected_public_account_ids(),
NSSATransaction::PrivacyPreserving(tx) => tx.affected_public_account_ids(),
Self::ProgramDeployment(tx) => tx.affected_public_account_ids(),
Self::Public(tx) => tx.affected_public_account_ids(),
Self::PrivacyPreserving(tx) => tx.affected_public_account_ids(),
}
}
@ -33,21 +35,21 @@ impl NSSATransaction {
pub fn transaction_stateless_check(self) -> Result<Self, TransactionMalformationError> {
// Stateless checks here
match self {
NSSATransaction::Public(tx) => {
Self::Public(tx) => {
if tx.witness_set().is_valid_for(tx.message()) {
Ok(NSSATransaction::Public(tx))
Ok(Self::Public(tx))
} else {
Err(TransactionMalformationError::InvalidSignature)
}
}
NSSATransaction::PrivacyPreserving(tx) => {
Self::PrivacyPreserving(tx) => {
if tx.witness_set().signatures_are_valid_for(tx.message()) {
Ok(NSSATransaction::PrivacyPreserving(tx))
Ok(Self::PrivacyPreserving(tx))
} else {
Err(TransactionMalformationError::InvalidSignature)
}
}
NSSATransaction::ProgramDeployment(tx) => Ok(NSSATransaction::ProgramDeployment(tx)),
Self::ProgramDeployment(tx) => Ok(Self::ProgramDeployment(tx)),
}
}
@ -56,13 +58,9 @@ impl NSSATransaction {
state: &mut V02State,
) -> Result<Self, nssa::error::NssaError> {
match &self {
NSSATransaction::Public(tx) => state.transition_from_public_transaction(tx),
NSSATransaction::PrivacyPreserving(tx) => {
state.transition_from_privacy_preserving_transaction(tx)
}
NSSATransaction::ProgramDeployment(tx) => {
state.transition_from_program_deployment_transaction(tx)
}
Self::Public(tx) => state.transition_from_public_transaction(tx),
Self::PrivacyPreserving(tx) => state.transition_from_privacy_preserving_transaction(tx),
Self::ProgramDeployment(tx) => state.transition_from_program_deployment_transaction(tx),
}
.inspect_err(|err| warn!("Error at transition {err:#?}"))?;
@ -97,7 +95,7 @@ pub enum TxKind {
ProgramDeployment,
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, thiserror::Error)]
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, thiserror::Error)]
pub enum TransactionMalformationError {
#[error("Invalid signature(-s)")]
InvalidSignature,

View File

@ -1,6 +1,6 @@
# Wallet CLI Completion
Completion scripts for the LSSA `wallet` command.
Completion scripts for the LSSA `wallet` command.
## ZSH
@ -19,9 +19,9 @@ Preconfigured accounts and accounts only with `/` (no number) are not completed.
e.g.:
```
▶ wallet account list
Preconfigured Public/Gj1mJy5W7J5pfmLRujmQaLfLMWidNxQ6uwnhb666ZwHw,
Preconfigured Public/BLgCRDXYdQPMMWVHYRFGQZbgeHx9frkipa8GtpG2Syqy,
▶ wallet account list
Preconfigured Public/7wHg9sbJwc6h3NP1S9bekfAzB8CHifEcxKswCKUt3YQo,
Preconfigured Public/6iArKUXxhUJqS7kCaPNhwMWt3ro71PDyBj7jwAyE2VQV,
Preconfigured Private/3oCG8gqdKLMegw4rRfyaMQvuPHpcASt7xwttsmnZLSkw,
Preconfigured Private/AKTcXgJ1xoynta1Ec7y6Jso1z1JQtHqd7aPQ1h9er6xX,
/ Public/8DstRgMQrB2N9a7ymv98RDDbt8nctrP9ZzaNRSpKDZSu,
@ -118,9 +118,80 @@ wallet account get --account-id <TAB>
# Shows: Public/... Private/...
```
## Bash
Works with bash 4+. The `bash-completion` package is required for auto-sourcing from
`/etc/bash_completion.d/`; without it, source the file directly from `~/.bashrc` instead.
### Features
- Full completion for all wallet subcommands
- Contextual option completion for each command
- Dynamic account ID completion via `wallet account list`
- Falls back to `Public/` / `Private/` prefixes when no accounts are available
Note that only accounts created by the user auto-complete (same filtering as zsh — see above).
### Installation
#### Option A — source directly from `~/.bashrc` (works everywhere)
```sh
echo "source $(pwd)/completions/bash/wallet" >> ~/.bashrc
exec bash
```
#### Option B — system-wide via `bash-completion`
1. Copy the file:
```sh
cp ./bash/wallet /etc/bash_completion.d/wallet
```
2. Ensure `bash-completion` is initialised in every interactive shell. On many Linux
distributions (e.g. Fedora) it is only sourced for **login** shells via
`/etc/profile.d/bash_completion.sh`. For non-login shells (e.g. a bash session started
inside zsh), add this to `~/.bashrc`:
```sh
[[ -f /usr/share/bash-completion/bash_completion ]] && source /usr/share/bash-completion/bash_completion
```
3. Reload your shell:
```sh
exec bash
```
### Requirements
The completion script calls `wallet account list` to dynamically fetch account IDs. Ensure the `wallet` command is in your `$PATH`.
### Usage
```sh
# Main commands
wallet <TAB>
# Account subcommands
wallet account <TAB>
# Options for auth-transfer send
wallet auth-transfer send --<TAB>
# Account types when creating
wallet account new <TAB>
# Shows: public private
# Account IDs (fetched dynamically)
wallet account get --account-id <TAB>
# Shows: Public/... Private/...
```
## Troubleshooting
### Completions not appearing
### Zsh completions not appearing
1. Check that `compinit` is called in your `.zshrc`
2. Rebuild the completion cache:

382
completions/bash/wallet Normal file
View File

@ -0,0 +1,382 @@
#!/usr/bin/env bash
# Bash completion script for the wallet CLI
# See instructions in ../README.md
# Helper function to complete account IDs
# Uses `wallet account list` to get available accounts
# Only includes accounts with /N prefix (where N is a number)
_wallet_complete_account_id() {
local cur="$1"
local accounts
if command -v wallet &>/dev/null; then
accounts=$(wallet account list 2>/dev/null | grep '^/[0-9]' | awk '{print $2}' | tr -d ',')
fi
if [[ -n "$accounts" ]]; then
COMPREPLY=($(compgen -W "$accounts" -- "$cur"))
else
COMPREPLY=($(compgen -W "Public/ Private/" -- "$cur"))
compopt -o nospace 2>/dev/null
fi
}
_wallet() {
local cur prev words cword
_init_completion 2>/dev/null || {
COMPREPLY=()
cur="${COMP_WORDS[COMP_CWORD]}"
prev="${COMP_WORDS[COMP_CWORD-1]}"
words=("${COMP_WORDS[@]}")
cword=$COMP_CWORD
}
local commands="auth-transfer chain-info account pinata token amm check-health config restore-keys deploy-program help"
# Find the main command and subcommand by scanning words before the cursor.
# Global options that take a value are skipped along with their argument.
local cmd="" subcmd=""
local cmd_idx=0 subcmd_idx=0
local i
for ((i = 1; i < cword; i++)); do
local w="${words[$i]}"
case "$w" in
--auth)
((i++)) # skip the auth value
;;
-c | --continuous-run)
# boolean flag, no value
;;
-*)
# unrecognised option, skip
;;
*)
if [[ -z "$cmd" ]]; then
cmd="$w"
cmd_idx=$i
elif [[ -z "$subcmd" ]]; then
subcmd="$w"
subcmd_idx=$i
fi
;;
esac
done
local config_keys="override_rust_log sequencer_addr seq_poll_timeout seq_tx_poll_max_blocks seq_poll_max_retries seq_block_poll_max_amount initial_accounts basic_auth"
case "$cmd" in
"")
# Completing the main command or a global option
if [[ "$prev" == "--auth" ]]; then
return # completing the --auth value; no suggestions
fi
case "$cur" in
-*)
COMPREPLY=($(compgen -W "-c --continuous-run --auth" -- "$cur"))
;;
*)
COMPREPLY=($(compgen -W "$commands" -- "$cur"))
;;
esac
;;
auth-transfer)
case "$subcmd" in
"")
COMPREPLY=($(compgen -W "init send help" -- "$cur"))
;;
init)
case "$prev" in
--account-id)
_wallet_complete_account_id "$cur"
;;
*)
COMPREPLY=($(compgen -W "--account-id" -- "$cur"))
;;
esac
;;
send)
case "$prev" in
--from | --to)
_wallet_complete_account_id "$cur"
;;
--to-npk | --to-vpk | --amount)
;; # no specific completion
*)
COMPREPLY=($(compgen -W "--from --to --to-npk --to-vpk --amount" -- "$cur"))
;;
esac
;;
esac
;;
chain-info)
case "$subcmd" in
"")
COMPREPLY=($(compgen -W "current-block-id block transaction help" -- "$cur"))
;;
block)
case "$prev" in
-i | --id)
;; # no specific completion for block ID
*)
COMPREPLY=($(compgen -W "-i --id" -- "$cur"))
;;
esac
;;
transaction)
case "$prev" in
-t | --hash)
;; # no specific completion for tx hash
*)
COMPREPLY=($(compgen -W "-t --hash" -- "$cur"))
;;
esac
;;
esac
;;
account)
case "$subcmd" in
"")
COMPREPLY=($(compgen -W "get new sync-private list ls label help" -- "$cur"))
;;
get)
case "$prev" in
-a | --account-id)
_wallet_complete_account_id "$cur"
;;
*)
COMPREPLY=($(compgen -W "-r --raw -k --keys -a --account-id" -- "$cur"))
;;
esac
;;
list | ls)
COMPREPLY=($(compgen -W "-l --long" -- "$cur"))
;;
sync-private)
;; # no options
new)
# `account new` is itself a subcommand: public | private
local new_subcmd=""
for ((i = subcmd_idx + 1; i < cword; i++)); do
case "${words[$i]}" in
public | private)
new_subcmd="${words[$i]}"
break
;;
esac
done
if [[ -z "$new_subcmd" ]]; then
COMPREPLY=($(compgen -W "public private" -- "$cur"))
else
case "$prev" in
--cci | -l | --label)
;; # no specific completion
*)
COMPREPLY=($(compgen -W "--cci -l --label" -- "$cur"))
;;
esac
fi
;;
label)
case "$prev" in
-a | --account-id)
_wallet_complete_account_id "$cur"
;;
-l | --label)
;; # no specific completion for label value
*)
COMPREPLY=($(compgen -W "-a --account-id -l --label" -- "$cur"))
;;
esac
;;
esac
;;
pinata)
case "$subcmd" in
"")
COMPREPLY=($(compgen -W "claim help" -- "$cur"))
;;
claim)
case "$prev" in
--to)
_wallet_complete_account_id "$cur"
;;
*)
COMPREPLY=($(compgen -W "--to" -- "$cur"))
;;
esac
;;
esac
;;
token)
case "$subcmd" in
"")
COMPREPLY=($(compgen -W "new send burn mint help" -- "$cur"))
;;
new)
case "$prev" in
--definition-account-id | --supply-account-id)
_wallet_complete_account_id "$cur"
;;
-n | --name | -t | --total-supply)
;; # no specific completion
*)
COMPREPLY=($(compgen -W "--definition-account-id --supply-account-id -n --name -t --total-supply" -- "$cur"))
;;
esac
;;
send)
case "$prev" in
--from | --to)
_wallet_complete_account_id "$cur"
;;
--to-npk | --to-vpk | --amount)
;; # no specific completion
*)
COMPREPLY=($(compgen -W "--from --to --to-npk --to-vpk --amount" -- "$cur"))
;;
esac
;;
burn)
case "$prev" in
--definition | --holder)
_wallet_complete_account_id "$cur"
;;
--amount)
;; # no specific completion
*)
COMPREPLY=($(compgen -W "--definition --holder --amount" -- "$cur"))
;;
esac
;;
mint)
case "$prev" in
--definition | --holder)
_wallet_complete_account_id "$cur"
;;
--holder-npk | --holder-vpk | --amount)
;; # no specific completion
*)
COMPREPLY=($(compgen -W "--definition --holder --holder-npk --holder-vpk --amount" -- "$cur"))
;;
esac
;;
esac
;;
amm)
case "$subcmd" in
"")
COMPREPLY=($(compgen -W "new swap add-liquidity remove-liquidity help" -- "$cur"))
;;
new)
case "$prev" in
--user-holding-a | --user-holding-b | --user-holding-lp)
_wallet_complete_account_id "$cur"
;;
--balance-a | --balance-b)
;; # no specific completion
*)
COMPREPLY=($(compgen -W "--user-holding-a --user-holding-b --user-holding-lp --balance-a --balance-b" -- "$cur"))
;;
esac
;;
swap)
case "$prev" in
--user-holding-a | --user-holding-b)
_wallet_complete_account_id "$cur"
;;
--amount-in | --min-amount-out | --token-definition)
;; # no specific completion
*)
COMPREPLY=($(compgen -W "--user-holding-a --user-holding-b --amount-in --min-amount-out --token-definition" -- "$cur"))
;;
esac
;;
add-liquidity)
case "$prev" in
--user-holding-a | --user-holding-b | --user-holding-lp)
_wallet_complete_account_id "$cur"
;;
--max-amount-a | --max-amount-b | --min-amount-lp)
;; # no specific completion
*)
COMPREPLY=($(compgen -W "--user-holding-a --user-holding-b --user-holding-lp --max-amount-a --max-amount-b --min-amount-lp" -- "$cur"))
;;
esac
;;
remove-liquidity)
case "$prev" in
--user-holding-a | --user-holding-b | --user-holding-lp)
_wallet_complete_account_id "$cur"
;;
--balance-lp | --min-amount-a | --min-amount-b)
;; # no specific completion
*)
COMPREPLY=($(compgen -W "--user-holding-a --user-holding-b --user-holding-lp --balance-lp --min-amount-a --min-amount-b" -- "$cur"))
;;
esac
;;
esac
;;
config)
case "$subcmd" in
"")
COMPREPLY=($(compgen -W "get set description help" -- "$cur"))
;;
get)
# Accepts optional -a/--all flag and an optional positional key
COMPREPLY=($(compgen -W "--all -a $config_keys" -- "$cur"))
;;
set)
# set <key> <value> — only complete the key; no completion for the value
local set_args=0
for ((i = subcmd_idx + 1; i < cword; i++)); do
[[ "${words[$i]}" != -* ]] && ((set_args++))
done
if [[ $set_args -eq 0 ]]; then
COMPREPLY=($(compgen -W "$config_keys" -- "$cur"))
fi
;;
description)
# description <key> — only complete if no key provided yet
local has_key=false
for ((i = subcmd_idx + 1; i < cword; i++)); do
[[ "${words[$i]}" != -* ]] && has_key=true && break
done
if ! $has_key; then
COMPREPLY=($(compgen -W "$config_keys" -- "$cur"))
fi
;;
esac
;;
restore-keys)
case "$prev" in
-d | --depth)
;; # no specific completion for depth value
*)
COMPREPLY=($(compgen -W "-d --depth" -- "$cur"))
;;
esac
;;
deploy-program)
COMPREPLY=($(compgen -f -- "$cur"))
compopt -o filenames 2>/dev/null
;;
help)
COMPREPLY=($(compgen -W "$commands" -- "$cur"))
;;
esac
}
complete -F _wallet wallet

View File

@ -181,7 +181,8 @@ _wallet_account() {
;;
new_args)
_arguments \
'--cci[Chain index of a parent node]:chain_index:'
'--cci[Chain index of a parent node]:chain_index:' \
'(-l --label)'{-l,--label}'[Label to assign to the new account]:label:'
;;
esac
;;
@ -343,7 +344,6 @@ _wallet_config() {
local -a config_keys
config_keys=(
'all'
'override_rust_log'
'sequencer_addr'
'seq_poll_timeout'
@ -370,7 +370,12 @@ _wallet_config() {
;;
args)
case $line[1] in
get|description)
get)
_arguments \
'(-a --all)'{-a,--all}'[Print all config fields]' \
'::key:compadd -a config_keys'
;;
description)
compadd -a config_keys
;;
set)

View File

@ -1,6 +1,6 @@
{
"home": "./indexer/service",
"consensus_info_polling_interval": "60s",
"consensus_info_polling_interval": "1s",
"bedrock_client_config": {
"addr": "http://logos-blockchain-node-0:18080",
"backoff": {
@ -11,50 +11,50 @@
"channel_id": "0101010101010101010101010101010101010101010101010101010101010101",
"initial_accounts": [
{
"account_id": "BLgCRDXYdQPMMWVHYRFGQZbgeHx9frkipa8GtpG2Syqy",
"account_id": "6iArKUXxhUJqS7kCaPNhwMWt3ro71PDyBj7jwAyE2VQV",
"balance": 10000
},
{
"account_id": "Gj1mJy5W7J5pfmLRujmQaLfLMWidNxQ6uwnhb666ZwHw",
"account_id": "7wHg9sbJwc6h3NP1S9bekfAzB8CHifEcxKswCKUt3YQo",
"balance": 20000
}
],
"initial_commitments": [
{
"npk": [
63,
202,
178,
231,
183,
82,
237,
212,
216,
221,
215,
255,
153,
101,
"npk":[
177,
161,
254,
210,
128,
122,
54,
190,
230,
151,
183,
64,
225,
229,
113,
1,
228,
97
],
11,
87,
38,
254,
159,
231,
165,
1,
94,
64,
137,
243,
76,
249,
101,
251,
129,
33,
101,
189,
30,
42,
11,
191,
34,
103,
186,
227,
230
] ,
"account": {
"program_owner": [
0,
@ -73,38 +73,38 @@
},
{
"npk": [
192,
251,
166,
243,
167,
236,
84,
249,
35,
32,
67,
72,
164,
106,
53,
66,
239,
141,
15,
52,
230,
136,
130,
172,
219,
225,
161,
139,
229,
89,
177,
2,
236,
207,
243,
125,
134,
135,
210,
143,
87,
232,
215,
128,
194,
213,
209,
30,
23,
174,
100,
244,
124,
74,
140,
47
120,
113,
224,
4,
165
],
"account": {
"program_owner": [

View File

@ -20,17 +20,50 @@
"indexer_rpc_url": "ws://indexer_service:8779",
"initial_accounts": [
{
"account_id": "BLgCRDXYdQPMMWVHYRFGQZbgeHx9frkipa8GtpG2Syqy",
"account_id": "6iArKUXxhUJqS7kCaPNhwMWt3ro71PDyBj7jwAyE2VQV",
"balance": 10000
},
{
"account_id": "Gj1mJy5W7J5pfmLRujmQaLfLMWidNxQ6uwnhb666ZwHw",
"account_id": "7wHg9sbJwc6h3NP1S9bekfAzB8CHifEcxKswCKUt3YQo",
"balance": 20000
}
],
"initial_commitments": [
{
"npk": [13, 25, 40, 5, 198, 248, 210, 248, 237, 121, 124, 145, 186, 142, 253, 216, 236, 69, 193, 32, 166, 167, 49, 133, 172, 111, 159, 46, 84, 17, 157, 23],
"npk":[
177,
64,
1,
11,
87,
38,
254,
159,
231,
165,
1,
94,
64,
137,
243,
76,
249,
101,
251,
129,
33,
101,
189,
30,
42,
11,
191,
34,
103,
186,
227,
230
] ,
"account": {
"program_owner": [
0,
@ -48,7 +81,40 @@
}
},
{
"npk": [32, 67, 72, 164, 106, 53, 66, 239, 141, 15, 52, 230, 136, 177, 2, 236, 207, 243, 134, 135, 210, 143, 87, 232, 215, 128, 194, 120, 113, 224, 4, 165],
"npk": [
32,
67,
72,
164,
106,
53,
66,
239,
141,
15,
52,
230,
136,
177,
2,
236,
207,
243,
134,
135,
210,
143,
87,
232,
215,
128,
194,
120,
113,
224,
4,
165
],
"account": {
"program_owner": [
0,

View File

@ -4,6 +4,9 @@ version = "0.1.0"
edition = "2024"
license = { workspace = true }
[lints]
workspace = true
[dependencies]
nssa.workspace = true
nssa_core.workspace = true

View File

@ -4,6 +4,9 @@ version = "0.1.0"
edition = "2024"
license = { workspace = true }
[lints]
workspace = true
[build-dependencies]
risc0-build.workspace = true

View File

@ -4,6 +4,9 @@ version = "0.1.0"
edition = "2024"
license = { workspace = true }
[lints]
workspace = true
[dependencies]
nssa_core.workspace = true

View File

@ -36,9 +36,7 @@ fn main() {
// Fail if the input account is not authorized
// The `is_authorized` field will be correctly populated or verified by the system if
// authorization is provided.
if !pre_state.is_authorized {
panic!("Missing required authorization");
}
assert!(pre_state.is_authorized, "Missing required authorization");
// ####
// Construct the post state account values

View File

@ -1,5 +1,5 @@
use nssa_core::{
account::{Account, AccountWithMetadata},
account::{Account, AccountWithMetadata, Data},
program::{
AccountPostState, DEFAULT_PROGRAM_ID, ProgramInput, read_nssa_inputs, write_nssa_outputs,
},
@ -21,10 +21,11 @@ use nssa_core::{
// In case an input account is uninitialized, the program will claim it when
// producing the post-state.
type Instruction = (u8, Vec<u8>);
const WRITE_FUNCTION_ID: u8 = 0;
const MOVE_DATA_FUNCTION_ID: u8 = 1;
type Instruction = (u8, Vec<u8>);
fn build_post_state(post_account: Account) -> AccountPostState {
if post_account.program_owner == DEFAULT_PROGRAM_ID {
// This produces a claim request
@ -35,12 +36,12 @@ fn build_post_state(post_account: Account) -> AccountPostState {
}
}
fn write(pre_state: AccountWithMetadata, greeting: Vec<u8>) -> AccountPostState {
fn write(pre_state: AccountWithMetadata, greeting: &[u8]) -> AccountPostState {
// Construct the post state account values
let post_account = {
let mut this = pre_state.account.clone();
let mut this = pre_state.account;
let mut bytes = this.data.into_inner();
bytes.extend_from_slice(&greeting);
bytes.extend_from_slice(greeting);
this.data = bytes
.try_into()
.expect("Data should fit within the allowed limits");
@ -50,21 +51,18 @@ fn write(pre_state: AccountWithMetadata, greeting: Vec<u8>) -> AccountPostState
build_post_state(post_account)
}
fn move_data(
from_pre: &AccountWithMetadata,
to_pre: &AccountWithMetadata,
) -> Vec<AccountPostState> {
fn move_data(from_pre: AccountWithMetadata, to_pre: AccountWithMetadata) -> Vec<AccountPostState> {
// Construct the post state account values
let from_data: Vec<u8> = from_pre.account.data.clone().into();
let from_post = {
let mut this = from_pre.account.clone();
this.data = Default::default();
let mut this = from_pre.account;
this.data = Data::default();
build_post_state(this)
};
let to_post = {
let mut this = to_pre.account.clone();
let mut this = to_pre.account;
let mut bytes = this.data.into_inner();
bytes.extend_from_slice(&from_data);
this.data = bytes
@ -88,11 +86,11 @@ fn main() {
let post_states = match (pre_states.as_slice(), function_id, data.len()) {
([account_pre], WRITE_FUNCTION_ID, _) => {
let post = write(account_pre.clone(), data);
let post = write(account_pre.clone(), &data);
vec![post]
}
([account_from_pre, account_to_pre], MOVE_DATA_FUNCTION_ID, 0) => {
move_data(account_from_pre, account_to_pre)
move_data(account_from_pre.clone(), account_to_pre.clone())
}
_ => panic!("invalid params"),
};

View File

@ -29,7 +29,7 @@ fn main() {
let (
ProgramInput {
pre_states,
instruction: _,
instruction: (),
},
instruction_data,
) = read_nssa_inputs::<()>();

View File

@ -34,14 +34,13 @@ fn main() {
let (
ProgramInput {
pre_states,
instruction: _,
instruction: (),
},
instruction_data,
) = read_nssa_inputs::<()>();
// Unpack the input account pre state
let [pre_state] = pre_states
.clone()
.try_into()
.unwrap_or_else(|_| panic!("Input pre states should consist of a single account"));

View File

@ -48,7 +48,7 @@ async fn main() {
let hello_world_bytecode: Vec<u8> = std::fs::read(hello_world_path).unwrap();
let hello_world = Program::new(hello_world_bytecode).unwrap();
let dependencies: HashMap<ProgramId, Program> =
[(hello_world.id(), hello_world)].into_iter().collect();
std::iter::once((hello_world.id(), hello_world)).collect();
let program_with_dependencies = ProgramWithDependencies::new(simple_tail_call, dependencies);
let accounts = vec![PrivacyPreservingAccount::PrivateOwned(account_id)];

View File

@ -1,3 +1,8 @@
#![expect(
clippy::print_stdout,
reason = "This is an example program, it's fine to print to stdout"
)]
use nssa::{
AccountId, PublicTransaction,
program::Program,

View File

@ -19,13 +19,14 @@ use wallet::{PrivacyPreservingAccount, WalletCore};
// methods/guest/target/riscv32im-risc0-zkvm-elf/docker/hello_world_with_move_function.bin \
// write-public Ds8q5PjLcKwwV97Zi7duhRVF9uwA2PuYMoLL7FwCzsXE Hola
type Instruction = (u8, Vec<u8>);
const WRITE_FUNCTION_ID: u8 = 0;
const MOVE_DATA_FUNCTION_ID: u8 = 1;
type Instruction = (u8, Vec<u8>);
#[derive(Parser, Debug)]
struct Cli {
/// Path to program binary
/// Path to program binary.
program_path: String,
#[command(subcommand)]
@ -34,7 +35,7 @@ struct Cli {
#[derive(Subcommand, Debug)]
enum Command {
/// Write instruction into one account
/// Write instruction into one account.
WritePublic {
account_id: String,
greeting: String,
@ -43,7 +44,7 @@ enum Command {
account_id: String,
greeting: String,
},
/// Move data between two accounts
/// Move data between two accounts.
MoveDataPublicToPublic {
from: String,
to: String,
@ -148,5 +149,5 @@ async fn main() {
.await
.unwrap();
}
};
}
}

View File

@ -4,6 +4,9 @@ version = "0.1.0"
edition = "2024"
license.workspace = true
[lints]
workspace = true
[lib]
crate-type = ["cdylib", "rlib"]

View File

@ -1,4 +1,4 @@
FROM rust:1.91.1-trixie AS builder
FROM rust:1.94.0-trixie AS builder
# Install cargo-binstall, which makes it easier to install other
# cargo extensions like cargo-leptos

View File

@ -2,7 +2,7 @@ use indexer_service_protocol::{Account, AccountId, Block, BlockId, HashType, Tra
use leptos::prelude::*;
use serde::{Deserialize, Serialize};
/// Search results structure
/// Search results structure.
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct SearchResults {
pub blocks: Vec<Block>,
@ -10,7 +10,7 @@ pub struct SearchResults {
pub accounts: Vec<(AccountId, Account)>,
}
/// RPC client type
/// RPC client type.
#[cfg(feature = "ssr")]
pub type IndexerRpcClient = jsonrpsee::http_client::HttpClient;
@ -22,7 +22,7 @@ pub async fn get_account(account_id: AccountId) -> Result<Account, ServerFnError
client
.get_account(account_id)
.await
.map_err(|e| ServerFnError::ServerError(format!("RPC error: {}", e)))
.map_err(|e| ServerFnError::ServerError(format!("RPC error: {e}")))
}
/// Search for a block, transaction, or account by query string
@ -80,7 +80,7 @@ pub async fn get_block_by_id(block_id: BlockId) -> Result<Block, ServerFnError>
client
.get_block_by_id(block_id)
.await
.map_err(|e| ServerFnError::ServerError(format!("RPC error: {}", e)))
.map_err(|e| ServerFnError::ServerError(format!("RPC error: {e}")))
}
/// Get latest block ID
@ -91,7 +91,7 @@ pub async fn get_latest_block_id() -> Result<BlockId, ServerFnError> {
client
.get_last_finalized_block_id()
.await
.map_err(|e| ServerFnError::ServerError(format!("RPC error: {}", e)))
.map_err(|e| ServerFnError::ServerError(format!("RPC error: {e}")))
}
/// Get block by hash
@ -102,7 +102,7 @@ pub async fn get_block_by_hash(block_hash: HashType) -> Result<Block, ServerFnEr
client
.get_block_by_hash(block_hash)
.await
.map_err(|e| ServerFnError::ServerError(format!("RPC error: {}", e)))
.map_err(|e| ServerFnError::ServerError(format!("RPC error: {e}")))
}
/// Get transaction by hash
@ -113,36 +113,36 @@ pub async fn get_transaction(tx_hash: HashType) -> Result<Transaction, ServerFnE
client
.get_transaction(tx_hash)
.await
.map_err(|e| ServerFnError::ServerError(format!("RPC error: {}", e)))
.map_err(|e| ServerFnError::ServerError(format!("RPC error: {e}")))
}
/// Get blocks with pagination
#[server]
pub async fn get_blocks(offset: u32, limit: u32) -> Result<Vec<Block>, ServerFnError> {
pub async fn get_blocks(before: Option<BlockId>, limit: u64) -> Result<Vec<Block>, ServerFnError> {
use indexer_service_rpc::RpcClient as _;
let client = expect_context::<IndexerRpcClient>();
client
.get_blocks(offset, limit)
.get_blocks(before, limit)
.await
.map_err(|e| ServerFnError::ServerError(format!("RPC error: {}", e)))
.map_err(|e| ServerFnError::ServerError(format!("RPC error: {e}")))
}
/// Get transactions by account
#[server]
pub async fn get_transactions_by_account(
account_id: AccountId,
limit: u32,
offset: u32,
offset: u64,
limit: u64,
) -> Result<Vec<Transaction>, ServerFnError> {
use indexer_service_rpc::RpcClient as _;
let client = expect_context::<IndexerRpcClient>();
client
.get_transactions_by_account(account_id, limit, offset)
.get_transactions_by_account(account_id, offset, limit)
.await
.map_err(|e| ServerFnError::ServerError(format!("RPC error: {}", e)))
.map_err(|e| ServerFnError::ServerError(format!("RPC error: {e}")))
}
/// Create the RPC client for the indexer service (server-side only)
/// Create the RPC client for the indexer service (server-side only).
#[cfg(feature = "ssr")]
pub fn create_indexer_rpc_client(url: &url::Url) -> Result<IndexerRpcClient, String> {
use jsonrpsee::http_client::HttpClientBuilder;

View File

@ -4,8 +4,8 @@ use leptos_router::components::A;
use crate::format_utils;
/// Get CSS class for bedrock status
fn status_class(status: &BedrockStatus) -> &'static str {
/// Get CSS class for bedrock status.
const fn status_class(status: &BedrockStatus) -> &'static str {
match status {
BedrockStatus::Pending => "status-pending",
BedrockStatus::Safe => "status-safe",

View File

@ -1,7 +1,7 @@
pub mod account_preview;
pub mod block_preview;
pub mod transaction_preview;
pub use account_preview::AccountPreview;
pub use block_preview::BlockPreview;
pub use transaction_preview::TransactionPreview;
pub mod account_preview;
pub mod block_preview;
pub mod transaction_preview;

View File

@ -2,8 +2,8 @@ use indexer_service_protocol::Transaction;
use leptos::prelude::*;
use leptos_router::components::A;
/// Get transaction type name and CSS class
fn transaction_type_info(tx: &Transaction) -> (&'static str, &'static str) {
/// Get transaction type name and CSS class.
const fn transaction_type_info(tx: &Transaction) -> (&'static str, &'static str) {
match tx {
Transaction::Public(_) => ("Public", "tx-type-public"),
Transaction::PrivacyPreserving(_) => ("Privacy-Preserving", "tx-type-private"),
@ -13,6 +13,10 @@ fn transaction_type_info(tx: &Transaction) -> (&'static str, &'static str) {
/// Transaction preview component
#[component]
#[expect(
clippy::needless_pass_by_value,
reason = "Leptos component props are passed by value by framework convention"
)]
pub fn TransactionPreview(transaction: Transaction) -> impl IntoView {
let hash = transaction.hash();
let hash_str = hash.to_string();

View File

@ -1,9 +1,17 @@
//! Formatting utilities for the explorer
//! Formatting utilities for the explorer.
/// Format timestamp to human-readable string
/// Format timestamp to human-readable string.
#[expect(
clippy::integer_division,
clippy::integer_division_remainder_used,
reason = "We need to convert milliseconds to seconds, and this is the most straightforward way to do it"
)]
pub fn format_timestamp(timestamp: u64) -> String {
let seconds = timestamp / 1000;
let datetime = chrono::DateTime::from_timestamp(seconds as i64, 0)
.unwrap_or_else(|| chrono::DateTime::from_timestamp(0, 0).unwrap());
let datetime = chrono::DateTime::from_timestamp(
i64::try_from(seconds).expect("Timestamp out of range"),
0,
)
.unwrap_or_else(|| chrono::DateTime::from_timestamp(0, 0).unwrap());
datetime.format("%Y-%m-%d %H:%M:%S UTC").to_string()
}

View File

@ -1,3 +1,9 @@
#![expect(
clippy::must_use_candidate,
clippy::same_name_method,
reason = "Warns on code generated by leptos macros"
)]
use leptos::prelude::*;
use leptos_meta::{Meta, Stylesheet, Title, provide_meta_context};
use leptos_router::{

View File

@ -1,3 +1,7 @@
#[expect(
clippy::print_stdout,
reason = "This is just simple and handy for such a small server"
)]
#[cfg(feature = "ssr")]
#[tokio::main]
async fn main() {
@ -5,20 +9,20 @@ async fn main() {
use clap::Parser;
use explorer_service::App;
use leptos::prelude::*;
use leptos_axum::{LeptosRoutes, generate_route_list};
use leptos_axum::{LeptosRoutes as _, generate_route_list};
use leptos_meta::MetaTags;
env_logger::init();
/// LEZ Block Explorer Server CLI arguments.
#[derive(Parser, Debug)]
#[command(version, about, long_about = None)]
struct Args {
/// Indexer RPC URL
/// Indexer RPC URL.
#[arg(long, env = "INDEXER_RPC_URL", default_value = "http://localhost:8779")]
indexer_rpc_url: url::Url,
}
env_logger::init();
let args = Args::parse();
let conf = get_configuration(None).unwrap();

View File

@ -10,11 +10,11 @@ use crate::{api, components::TransactionPreview};
#[component]
pub fn AccountPage() -> impl IntoView {
let params = use_params_map();
let (tx_offset, set_tx_offset) = signal(0u32);
let (tx_offset, set_tx_offset) = signal(0_u64);
let (all_transactions, set_all_transactions) = signal(Vec::new());
let (is_loading, set_is_loading) = signal(false);
let (has_more, set_has_more) = signal(true);
let tx_limit = 10u32;
let tx_limit = 10_u64;
// Parse account ID from URL params
let account_id = move || {
@ -27,7 +27,7 @@ pub fn AccountPage() -> impl IntoView {
match acc_id_opt {
Some(acc_id) => api::get_account(acc_id).await,
None => Err(leptos::prelude::ServerFnError::ServerError(
"Invalid account ID".to_string(),
"Invalid account ID".to_owned(),
)),
}
});
@ -35,9 +35,9 @@ pub fn AccountPage() -> impl IntoView {
// Load initial transactions
let transactions_resource = Resource::new(account_id, move |acc_id_opt| async move {
match acc_id_opt {
Some(acc_id) => api::get_transactions_by_account(acc_id, tx_limit, 0).await,
Some(acc_id) => api::get_transactions_by_account(acc_id, 0, tx_limit).await,
None => Err(leptos::prelude::ServerFnError::ServerError(
"Invalid account ID".to_string(),
"Invalid account ID".to_owned(),
)),
}
});
@ -46,7 +46,9 @@ pub fn AccountPage() -> impl IntoView {
Effect::new(move || {
if let Some(Ok(txs)) = transactions_resource.get() {
set_all_transactions.set(txs.clone());
set_has_more.set(txs.len() as u32 == tx_limit);
set_has_more.set(
u64::try_from(txs.len()).expect("Transaction count should fit in u64") == tx_limit,
);
}
});
@ -57,18 +59,19 @@ pub fn AccountPage() -> impl IntoView {
};
set_is_loading.set(true);
let current_offset = tx_offset.get() + tx_limit;
let current_offset = tx_offset.get().saturating_add(tx_limit);
set_tx_offset.set(current_offset);
leptos::task::spawn_local(async move {
match api::get_transactions_by_account(acc_id, tx_limit, current_offset).await {
match api::get_transactions_by_account(acc_id, current_offset, tx_limit).await {
Ok(new_txs) => {
let txs_count = new_txs.len() as u32;
let txs_count =
u64::try_from(new_txs.len()).expect("Transaction count should fit in u64");
set_all_transactions.update(|txs| txs.extend(new_txs));
set_has_more.set(txs_count == tx_limit);
}
Err(e) => {
log::error!("Failed to load more transactions: {}", e);
log::error!("Failed to load more transactions: {e}");
}
}
set_is_loading.set(false);
@ -108,114 +111,111 @@ pub fn AccountPage() -> impl IntoView {
<div class="info-row">
<span class="info-label">"Account ID:"</span>
<span class="info-value hash">{account_id_str}</span>
</div>
<div class="info-row">
<span class="info-label">"Balance:"</span>
<span class="info-value">{balance_str}</span>
</div>
<div class="info-row">
<span class="info-label">"Program Owner:"</span>
<span class="info-value hash">{program_id}</span>
</div>
<div class="info-row">
<span class="info-label">"Nonce:"</span>
<span class="info-value">{nonce_str}</span>
</div>
<div class="info-row">
<span class="info-label">"Data:"</span>
<span class="info-value">{format!("{} bytes", data_len)}</span>
</div>
</div>
</div>
</div>
<div class="info-row">
<span class="info-label">"Balance:"</span>
<span class="info-value">{balance_str}</span>
</div>
<div class="info-row">
<span class="info-label">"Program Owner:"</span>
<span class="info-value hash">{program_id}</span>
</div>
<div class="info-row">
<span class="info-label">"Nonce:"</span>
<span class="info-value">{nonce_str}</span>
</div>
<div class="info-row">
<span class="info-label">"Data:"</span>
<span class="info-value">{format!("{data_len} bytes")}</span>
</div>
</div>
</div>
<div class="account-transactions">
<h2>"Transactions"</h2>
<Suspense fallback=move || {
view! { <div class="loading">"Loading transactions..."</div> }
}>
{move || {
transactions_resource
.get()
.map(|result| match result {
Ok(_) => {
let txs = all_transactions.get();
if txs.is_empty() {
view! {
<div class="no-transactions">
"No transactions found"
</div>
}
.into_any()
} else {
view! {
<div>
<div class="transactions-list">
{txs
.into_iter()
.map(|tx| {
view! { <TransactionPreview transaction=tx /> }
})
.collect::<Vec<_>>()}
</div>
{move || {
if has_more.get() {
view! {
<button
class="load-more-button"
on:click=load_more
disabled=move || is_loading.get()
>
{move || {
if is_loading.get() {
"Loading..."
} else {
"Load More"
}
}}
</button>
}
.into_any()
} else {
().into_any()
<div class="account-transactions">
<h2>"Transactions"</h2>
<Suspense fallback=move || {
view! { <div class="loading">"Loading transactions..."</div> }
}>
{move || {
transactions_resource
.get()
.map(|load_tx_result| match load_tx_result {
Ok(_) => {
let txs = all_transactions.get();
if txs.is_empty() {
view! {
<div class="no-transactions">
"No transactions found"
</div>
}
}}
.into_any()
} else {
view! {
<div>
<div class="transactions-list">
{txs
.into_iter()
.map(|tx| {
view! { <TransactionPreview transaction=tx /> }
})
.collect::<Vec<_>>()}
</div>
{move || {
if has_more.get() {
view! {
<button
class="load-more-button"
on:click=load_more
disabled=move || is_loading.get()
>
{move || {
if is_loading.get() {
"Loading..."
} else {
"Load More"
}
}}
</div>
}
.into_any()
}
}
Err(e) => {
view! {
<div class="error">
{format!("Failed to load transactions: {}", e)}
</div>
}
.into_any()
}
})
}}
</button>
}
.into_any()
} else {
().into_any()
}
}}
</Suspense>
</div>
</div>
}
.into_any()
</div>
}
.into_any()
}
}
Err(e) => {
view! {
<div class="error">
{format!("Failed to load transactions: {e}")}
</div>
}
.into_any()
}
})
}}
</Suspense>
</div>
</div>
}
.into_any()
}
Err(e) => {
view! {
<div class="error-page">
<h1>"Error"</h1>
<p>{format!("Failed to load account: {}", e)}</p>
<p>{format!("Failed to load account: {e}")}</p>
</div>
}
.into_any()
}
})
}}
</Suspense>
</div>
}

View File

@ -38,7 +38,7 @@ pub fn BlockPage() -> impl IntoView {
Some(BlockIdOrHash::BlockId(id)) => api::get_block_by_id(id).await,
Some(BlockIdOrHash::Hash(hash)) => api::get_block_by_hash(hash).await,
None => Err(leptos::prelude::ServerFnError::ServerError(
"Invalid block ID or hash".to_string(),
"Invalid block ID or hash".to_owned(),
)),
}
},
@ -144,7 +144,7 @@ pub fn BlockPage() -> impl IntoView {
view! {
<div class="error-page">
<h1>"Error"</h1>
<p>{format!("Failed to load block: {}", e)}</p>
<p>{format!("Failed to load block: {e}")}</p>
</div>
}
.into_any()

View File

@ -1,5 +1,8 @@
use leptos::prelude::*;
use leptos_router::hooks::{use_navigate, use_query_map};
use leptos_router::{
NavigateOptions,
hooks::{use_navigate, use_query_map},
};
use web_sys::SubmitEvent;
use crate::{
@ -33,41 +36,79 @@ pub fn MainPage() -> impl IntoView {
match api::search(query).await {
Ok(result) => Some(result),
Err(e) => {
log::error!("Search error: {}", e);
log::error!("Search error: {e}");
None
}
}
});
// Pagination state for blocks
let (all_blocks, set_all_blocks) = signal(Vec::new());
let (is_loading_blocks, set_is_loading_blocks) = signal(false);
let (has_more_blocks, set_has_more_blocks) = signal(true);
let (oldest_loaded_block_id, set_oldest_loaded_block_id) = signal(None::<u64>);
// Load recent blocks on mount
let recent_blocks_resource = Resource::new(
|| (),
|_| async {
match api::get_latest_block_id().await {
Ok(last_id) => {
api::get_blocks(
std::cmp::max(last_id.saturating_sub(RECENT_BLOCKS_LIMIT) as u32, 1),
(RECENT_BLOCKS_LIMIT + 1) as u32,
)
.await
}
Err(err) => Err(err),
}
},
|()| async { api::get_blocks(None, RECENT_BLOCKS_LIMIT).await },
);
// Update all_blocks when initial load completes
Effect::new(move || {
if let Some(Ok(blocks)) = recent_blocks_resource.get() {
let oldest_id = blocks.last().map(|b| b.header.block_id);
set_all_blocks.set(blocks.clone());
set_oldest_loaded_block_id.set(oldest_id);
set_has_more_blocks.set(
u64::try_from(blocks.len()).expect("usize should fit in u64")
== RECENT_BLOCKS_LIMIT
&& oldest_id.unwrap_or(0) > 1,
);
}
});
// Load more blocks handler
let load_more_blocks = move |_| {
let before_id = oldest_loaded_block_id.get();
if before_id.is_none() {
return;
}
set_is_loading_blocks.set(true);
leptos::task::spawn_local(async move {
match api::get_blocks(before_id, RECENT_BLOCKS_LIMIT).await {
Ok(new_blocks) => {
let blocks_count =
u64::try_from(new_blocks.len()).expect("usize should fit in u64");
let new_oldest_id = new_blocks.last().map(|b| b.header.block_id);
set_all_blocks.update(|blocks| blocks.extend(new_blocks));
set_oldest_loaded_block_id.set(new_oldest_id);
set_has_more_blocks
.set(blocks_count == RECENT_BLOCKS_LIMIT && new_oldest_id.unwrap_or(0) > 1);
}
Err(e) => {
log::error!("Failed to load more blocks: {e}");
}
}
set_is_loading_blocks.set(false);
});
};
// Handle search - update URL parameter
let on_search = move |ev: SubmitEvent| {
ev.prevent_default();
let query = search_query.get();
if query.is_empty() {
navigate("?", Default::default());
navigate("?", NavigateOptions::default());
return;
}
navigate(
&format!("?q={}", urlencoding::encode(&query)),
Default::default(),
NavigateOptions::default(),
);
};
@ -108,78 +149,78 @@ pub fn MainPage() -> impl IntoView {
view! {
<div class="search-results">
<h2>"Search Results"</h2>
{if !has_results {
view! { <div class="not-found">"No results found"</div> }
.into_any()
} else {
view! {
<div class="results-container">
{if !blocks.is_empty() {
view! {
<div class="results-section">
<h3>"Blocks"</h3>
<div class="results-list">
{blocks
.into_iter()
.map(|block| {
view! { <BlockPreview block=block /> }
})
.collect::<Vec<_>>()}
{if has_results {
view! {
<div class="results-container">
{if blocks.is_empty() {
().into_any()
} else {
view! {
<div class="results-section">
<h3>"Blocks"</h3>
<div class="results-list">
{blocks
.into_iter()
.map(|block| {
view! { <BlockPreview block=block /> }
})
.collect::<Vec<_>>()}
</div>
</div>
</div>
}
.into_any()
} else {
().into_any()
}}
}
.into_any()
}}
{if !transactions.is_empty() {
view! {
<div class="results-section">
<h3>"Transactions"</h3>
<div class="results-list">
{transactions
.into_iter()
.map(|tx| {
view! { <TransactionPreview transaction=tx /> }
})
.collect::<Vec<_>>()}
{if transactions.is_empty() {
().into_any()
} else {
view! {
<div class="results-section">
<h3>"Transactions"</h3>
<div class="results-list">
{transactions
.into_iter()
.map(|tx| {
view! { <TransactionPreview transaction=tx /> }
})
.collect::<Vec<_>>()}
</div>
</div>
</div>
}
.into_any()
} else {
().into_any()
}}
}
.into_any()
}}
{if !accounts.is_empty() {
view! {
<div class="results-section">
<h3>"Accounts"</h3>
<div class="results-list">
{accounts
.into_iter()
.map(|(id, account)| {
view! {
<AccountPreview
account_id=id
account=account
/>
}
})
.collect::<Vec<_>>()}
{if accounts.is_empty() {
().into_any()
} else {
view! {
<div class="results-section">
<h3>"Accounts"</h3>
<div class="results-list">
{accounts
.into_iter()
.map(|(id, account)| {
view! {
<AccountPreview
account_id=id
account=account
/>
}
})
.collect::<Vec<_>>()}
</div>
</div>
</div>
}
.into_any()
} else {
().into_any()
}}
}
.into_any()
}}
</div>
}
.into_any()
}}
</div>
}
.into_any()
} else {
view! { <div class="not-found">"No results found"</div> }
.into_any()
}}
</div>
}
.into_any()
@ -196,22 +237,51 @@ pub fn MainPage() -> impl IntoView {
recent_blocks_resource
.get()
.map(|result| match result {
Ok(blocks) if !blocks.is_empty() => {
view! {
<div class="blocks-list">
{blocks
.into_iter()
.map(|block| view! { <BlockPreview block=block /> })
.collect::<Vec<_>>()}
</div>
}
.into_any()
}
Ok(_) => {
view! { <div class="no-blocks">"No blocks found"</div> }.into_any()
let blocks = all_blocks.get();
if blocks.is_empty() {
view! { <div class="no-blocks">"No blocks found"</div> }
.into_any()
} else {
view! {
<div>
<div class="blocks-list">
{blocks
.into_iter()
.map(|block| view! { <BlockPreview block=block /> })
.collect::<Vec<_>>()}
</div>
{move || {
if has_more_blocks.get() {
view! {
<button
class="load-more-button"
on:click=load_more_blocks
disabled=move || is_loading_blocks.get()
>
{move || {
if is_loading_blocks.get() {
"Loading..."
} else {
"Load More"
}
}}
</button>
}
.into_any()
} else {
().into_any()
}
}}
</div>
}
.into_any()
}
}
Err(e) => {
view! { <div class="error">{format!("Error: {}", e)}</div> }
view! { <div class="error">{format!("Error: {e}")}</div> }
.into_any()
}
})

View File

@ -1,9 +1,9 @@
pub mod account_page;
pub mod block_page;
pub mod main_page;
pub mod transaction_page;
pub use account_page::AccountPage;
pub use block_page::BlockPage;
pub use main_page::MainPage;
pub use transaction_page::TransactionPage;
pub mod account_page;
pub mod block_page;
pub mod main_page;
pub mod transaction_page;

View File

@ -4,7 +4,7 @@ use indexer_service_protocol::{
HashType, PrivacyPreservingMessage, PrivacyPreservingTransaction, ProgramDeploymentMessage,
ProgramDeploymentTransaction, PublicMessage, PublicTransaction, Transaction, WitnessSet,
};
use itertools::{EitherOrBoth, Itertools};
use itertools::{EitherOrBoth, Itertools as _};
use leptos::prelude::*;
use leptos_router::{components::A, hooks::use_params_map};
@ -17,16 +17,14 @@ pub fn TransactionPage() -> impl IntoView {
let transaction_resource = Resource::new(
move || {
params
.read()
.get("hash")
.and_then(|s| HashType::from_str(&s).ok())
let s = params.read().get("hash")?;
HashType::from_str(&s).ok()
},
|hash_opt| async move {
match hash_opt {
Some(hash) => api::get_transaction(hash).await,
None => Err(leptos::prelude::ServerFnError::ServerError(
"Invalid transaction hash".to_string(),
"Invalid transaction hash".to_owned(),
)),
}
},
@ -105,7 +103,7 @@ pub fn TransactionPage() -> impl IntoView {
</div>
<div class="info-row">
<span class="info-label">"Proof Size:"</span>
<span class="info-value">{format!("{} bytes", proof_len)}</span>
<span class="info-value">{format!("{proof_len} bytes")}</span>
</div>
<div class="info-row">
<span class="info-label">"Signatures:"</span>
@ -141,7 +139,7 @@ pub fn TransactionPage() -> impl IntoView {
<span class="hash">{account_id_str}</span>
</A>
<span class="nonce">
" (nonce: "{"Not affected by this transaction".to_string()}" )"
" (nonce: "{"Not affected by this transaction".to_owned()}" )"
</span>
</div>
}
@ -153,7 +151,7 @@ pub fn TransactionPage() -> impl IntoView {
<span class="hash">{"Account not found"}</span>
</A>
<span class="nonce">
" (nonce: "{"Account not found".to_string()}" )"
" (nonce: "{"Account not found".to_owned()}" )"
</span>
</div>
}
@ -212,7 +210,7 @@ pub fn TransactionPage() -> impl IntoView {
</div>
<div class="info-row">
<span class="info-label">"Proof Size:"</span>
<span class="info-value">{format!("{} bytes", proof_len)}</span>
<span class="info-value">{format!("{proof_len} bytes")}</span>
</div>
</div>
@ -244,7 +242,7 @@ pub fn TransactionPage() -> impl IntoView {
<span class="hash">{account_id_str}</span>
</A>
<span class="nonce">
" (nonce: "{"Not affected by this transaction".to_string()}" )"
" (nonce: "{"Not affected by this transaction".to_owned()}" )"
</span>
</div>
}
@ -256,7 +254,7 @@ pub fn TransactionPage() -> impl IntoView {
<span class="hash">{"Account not found"}</span>
</A>
<span class="nonce">
" (nonce: "{"Account not found".to_string()}" )"
" (nonce: "{"Account not found".to_owned()}" )"
</span>
</div>
}
@ -284,7 +282,7 @@ pub fn TransactionPage() -> impl IntoView {
<div class="info-row">
<span class="info-label">"Bytecode Size:"</span>
<span class="info-value">
{format!("{} bytes", bytecode_len)}
{format!("{bytecode_len} bytes")}
</span>
</div>
</div>
@ -302,7 +300,7 @@ pub fn TransactionPage() -> impl IntoView {
view! {
<div class="error-page">
<h1>"Error"</h1>
<p>{format!("Failed to load transaction: {}", e)}</p>
<p>{format!("Failed to load transaction: {e}")}</p>
</div>
}
.into_any()

View File

@ -4,6 +4,9 @@ version = "0.1.0"
edition = "2024"
license = { workspace = true }
[lints]
workspace = true
[dependencies]
common.workspace = true
bedrock_client.workspace = true

View File

@ -3,7 +3,7 @@ use std::{path::Path, sync::Arc};
use anyhow::Result;
use bedrock_client::HeaderId;
use common::{
block::{BedrockStatus, Block},
block::{BedrockStatus, Block, BlockId},
transaction::NSSATransaction,
};
use nssa::{Account, AccountId, V02State};
@ -23,9 +23,10 @@ impl IndexerStore {
/// ATTENTION: Will overwrite genesis block.
pub fn open_db_with_genesis(
location: &Path,
start_data: Option<(Block, V02State)>,
genesis_block: &Block,
initial_state: &V02State,
) -> Result<Self> {
let dbio = RocksDBIO::open_or_create(location, start_data)?;
let dbio = RocksDBIO::open_or_create(location, genesis_block, initial_state)?;
let current_state = dbio.final_state()?;
Ok(Self {
@ -34,11 +35,6 @@ impl IndexerStore {
})
}
/// Reopening existing database
pub fn open_db_restart(location: &Path) -> Result<Self> {
Self::open_db_with_genesis(location, None)
}
pub fn last_observed_l1_lib_header(&self) -> Result<Option<HeaderId>> {
Ok(self
.dbio
@ -54,8 +50,8 @@ impl IndexerStore {
Ok(self.dbio.get_block(id)?)
}
pub fn get_block_batch(&self, offset: u64, limit: u64) -> Result<Vec<Block>> {
Ok(self.dbio.get_block_batch(offset, limit)?)
pub fn get_block_batch(&self, before: Option<BlockId>, limit: u64) -> Result<Vec<Block>> {
Ok(self.dbio.get_block_batch(before, limit)?)
}
pub fn get_transaction_by_hash(&self, tx_hash: [u8; 32]) -> Result<NSSATransaction> {
@ -83,12 +79,14 @@ impl IndexerStore {
Ok(self.dbio.get_acc_transactions(acc_id, offset, limit)?)
}
#[must_use]
pub fn genesis_id(&self) -> u64 {
self.dbio
.get_meta_first_block_in_db()
.expect("Must be set at the DB startup")
}
#[must_use]
pub fn last_block(&self) -> u64 {
self.dbio
.get_meta_last_block_in_db()
@ -99,9 +97,9 @@ impl IndexerStore {
Ok(self.dbio.calculate_state_for_id(block_id)?)
}
/// Recalculation of final state directly from DB
/// Recalculation of final state directly from DB.
///
/// Used for indexer healthcheck
/// Used for indexer healthcheck.
pub fn recalculate_final_state(&self) -> Result<V02State> {
Ok(self.dbio.final_state()?)
}
@ -114,7 +112,7 @@ impl IndexerStore {
.get_account_by_id(*account_id))
}
pub async fn put_block(&mut self, mut block: Block, l1_header: HeaderId) -> Result<()> {
pub async fn put_block(&self, mut block: Block, l1_header: HeaderId) -> Result<()> {
{
let mut state_guard = self.current_state.write().await;
@ -131,7 +129,7 @@ impl IndexerStore {
// to represent correct block finality
block.bedrock_status = BedrockStatus::Finalized;
Ok(self.dbio.put_block(block, l1_header.into())?)
Ok(self.dbio.put_block(&block, l1_header.into())?)
}
}
@ -163,15 +161,13 @@ mod tests {
}
#[test]
fn test_correct_startup() {
fn correct_startup() {
let home = tempdir().unwrap();
let storage = IndexerStore::open_db_with_genesis(
home.as_ref(),
Some((
genesis_block(),
nssa::V02State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[]),
)),
&genesis_block(),
&nssa::V02State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[]),
)
.unwrap();
@ -183,15 +179,13 @@ mod tests {
}
#[tokio::test]
async fn test_state_transition() {
async fn state_transition() {
let home = tempdir().unwrap();
let mut storage = IndexerStore::open_db_with_genesis(
let storage = IndexerStore::open_db_with_genesis(
home.as_ref(),
Some((
genesis_block(),
nssa::V02State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[]),
)),
&genesis_block(),
&nssa::V02State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[]),
)
.unwrap();
@ -207,15 +201,15 @@ mod tests {
i - 2,
to,
10,
sign_key.clone(),
&sign_key,
);
let next_block =
common::test_utils::produce_dummy_block(i as u64, Some(prev_hash), vec![tx]);
common::test_utils::produce_dummy_block(u64::try_from(i).unwrap(), Some(prev_hash), vec![tx]);
prev_hash = next_block.header.hash;
storage
.put_block(next_block, HeaderId::from([i as u8; 32]))
.put_block(next_block, HeaderId::from([u8::try_from(i).unwrap(); 32]))
.await
.unwrap();
}

View File

@ -27,13 +27,13 @@ pub struct ClientConfig {
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct IndexerConfig {
/// Home dir of sequencer storage
/// Home dir of sequencer storage.
pub home: PathBuf,
/// List of initial accounts data
/// List of initial accounts data.
pub initial_accounts: Vec<AccountInitialData>,
/// List of initial commitments
/// List of initial commitments.
pub initial_commitments: Vec<CommitmentsInitialData>,
/// Sequencers signing key
/// Sequencers signing key.
pub signing_key: [u8; 32],
#[serde(with = "humantime_serde")]
pub consensus_info_polling_interval: Duration,
@ -42,12 +42,17 @@ pub struct IndexerConfig {
}
impl IndexerConfig {
pub fn from_path(config_path: &Path) -> Result<IndexerConfig> {
let file = File::open(config_path)
.with_context(|| format!("Failed to open indexer config at {config_path:?}"))?;
pub fn from_path(config_path: &Path) -> Result<Self> {
let file = File::open(config_path).with_context(|| {
format!("Failed to open indexer config at {}", config_path.display())
})?;
let reader = BufReader::new(file);
serde_json::from_reader(reader)
.with_context(|| format!("Failed to parse indexer config at {config_path:?}"))
serde_json::from_reader(reader).with_context(|| {
format!(
"Failed to parse indexer config at {}",
config_path.display()
)
})
}
}

View File

@ -24,14 +24,14 @@ pub struct IndexerCore {
}
#[derive(Clone)]
/// This struct represents one L1 block data fetched from backfilling
/// This struct represents one L1 block data fetched from backfilling.
pub struct BackfillBlockData {
l2_blocks: Vec<Block>,
l1_header: HeaderId,
}
#[derive(Clone)]
/// This struct represents data fetched fom backfilling in one iteration
/// This struct represents data fetched fom backfilling in one iteration.
pub struct BackfillData {
block_data: VecDeque<BackfillBlockData>,
curr_fin_l1_lib_header: HeaderId,
@ -52,7 +52,7 @@ impl IndexerCore {
// ToDo: remove key from indexer config, use some default.
let signing_key = nssa::PrivateKey::try_new(config.signing_key).unwrap();
let channel_genesis_msg_id = [0; 32];
let start_block = hashable_data.into_pending_block(&signing_key, channel_genesis_msg_id);
let genesis_block = hashable_data.into_pending_block(&signing_key, channel_genesis_msg_id);
// This is a troubling moment, because changes in key protocol can
// affect this. And indexer can not reliably ask this data from sequencer
@ -94,49 +94,44 @@ impl IndexerCore {
config.bedrock_client_config.auth.clone(),
)?,
config,
store: IndexerStore::open_db_with_genesis(&home, Some((start_block, state)))?,
store: IndexerStore::open_db_with_genesis(&home, &genesis_block, &state)?,
})
}
pub async fn subscribe_parse_block_stream(
&mut self,
) -> impl futures::Stream<Item = Result<Block>> {
pub fn subscribe_parse_block_stream(&self) -> impl futures::Stream<Item = Result<Block>> {
async_stream::stream! {
info!("Searching for initial header");
let last_l1_lib_header = self.store.last_observed_l1_lib_header()?;
let last_stored_l1_lib_header = self.store.last_observed_l1_lib_header()?;
let mut prev_last_l1_lib_header = match last_l1_lib_header {
Some(last_l1_lib_header) => {
info!("Last l1 lib header found: {last_l1_lib_header}");
last_l1_lib_header
},
None => {
info!("Last l1 lib header not found in DB");
info!("Searching for the start of a channel");
let mut prev_last_l1_lib_header = if let Some(last_l1_lib_header) = last_stored_l1_lib_header {
info!("Last l1 lib header found: {last_l1_lib_header}");
last_l1_lib_header
} else {
info!("Last l1 lib header not found in DB");
info!("Searching for the start of a channel");
let BackfillData {
block_data: start_buff,
curr_fin_l1_lib_header: last_l1_lib_header,
} = self.search_for_channel_start().await?;
let BackfillData {
block_data: start_buff,
curr_fin_l1_lib_header: last_l1_lib_header,
} = self.search_for_channel_start().await?;
for BackfillBlockData {
l2_blocks: l2_block_vec,
l1_header,
} in start_buff {
let mut l2_blocks_parsed_ids: Vec<_> = l2_block_vec.iter().map(|block| block.header.block_id).collect();
l2_blocks_parsed_ids.sort();
info!("Parsed {} L2 blocks with ids {:?}", l2_block_vec.len(), l2_blocks_parsed_ids);
for BackfillBlockData {
l2_blocks: l2_block_vec,
l1_header,
} in start_buff {
let mut l2_blocks_parsed_ids: Vec<_> = l2_block_vec.iter().map(|block| block.header.block_id).collect();
l2_blocks_parsed_ids.sort_unstable();
info!("Parsed {} L2 blocks with ids {:?}", l2_block_vec.len(), l2_blocks_parsed_ids);
for l2_block in l2_block_vec {
self.store.put_block(l2_block.clone(), l1_header).await?;
yield Ok(l2_block);
}
yield Ok(l2_block);
}
}
last_l1_lib_header
},
last_l1_lib_header
};
info!("Searching for initial header finished");
@ -159,7 +154,7 @@ impl IndexerCore {
l1_header: header,
} in buff {
let mut l2_blocks_parsed_ids: Vec<_> = l2_block_vec.iter().map(|block| block.header.block_id).collect();
l2_blocks_parsed_ids.sort();
l2_blocks_parsed_ids.sort_unstable();
info!("Parsed {} L2 blocks with ids {:?}", l2_block_vec.len(), l2_blocks_parsed_ids);
for l2_block in l2_block_vec {
@ -179,20 +174,20 @@ impl IndexerCore {
async fn get_next_lib(&self, prev_lib: HeaderId) -> Result<HeaderId> {
loop {
let next_lib = self.get_lib().await?;
if next_lib != prev_lib {
break Ok(next_lib);
} else {
if next_lib == prev_lib {
info!(
"Wait {:?} to not spam the node",
self.config.consensus_info_polling_interval
);
tokio::time::sleep(self.config.consensus_info_polling_interval).await;
} else {
break Ok(next_lib);
}
}
}
/// WARNING: depending on channel state,
/// may take indefinite amount of time
/// may take indefinite amount of time.
pub async fn search_for_channel_start(&self) -> Result<BackfillData> {
let mut curr_last_l1_lib_header = self.get_lib().await?;
let mut backfill_start = curr_last_l1_lib_header;
@ -206,15 +201,13 @@ impl IndexerCore {
let mut cycle_header = curr_last_l1_lib_header;
loop {
let cycle_block =
if let Some(block) = self.bedrock_client.get_block_by_id(cycle_header).await? {
block
} else {
// First run can reach root easily
// so here we are optimistic about L1
// failing to get parent.
break;
};
let Some(cycle_block) = self.bedrock_client.get_block_by_id(cycle_header).await?
else {
// First run can reach root easily
// so here we are optimistic about L1
// failing to get parent.
break;
};
// It would be better to have id, but block does not have it, so slot will do.
info!(
@ -291,10 +284,9 @@ impl IndexerCore {
if cycle_block.header().id() == last_fin_l1_lib_header {
break;
} else {
// Step back to parent
cycle_header = cycle_block.header().parent();
}
// Step back to parent
cycle_header = cycle_block.header().parent();
// It would be better to have id, but block does not have it, so slot will do.
info!(
@ -326,6 +318,10 @@ fn parse_block_owned(
decoded_channel_id: &ChannelId,
) -> (Vec<Block>, HeaderId) {
(
#[expect(
clippy::wildcard_enum_match_arm,
reason = "We are only interested in channel inscription ops, so it's fine to ignore the rest"
)]
l1_block
.transactions()
.flat_map(|tx| {
@ -337,7 +333,7 @@ fn parse_block_owned(
}) if channel_id == decoded_channel_id => {
borsh::from_slice::<Block>(inscription)
.inspect_err(|err| {
error!("Failed to deserialize our inscription with err: {err:#?}")
error!("Failed to deserialize our inscription with err: {err:#?}");
})
.ok()
}

View File

@ -4,6 +4,9 @@ version = "0.1.0"
edition = "2024"
license = { workspace = true }
[lints]
workspace = true
[dependencies]
indexer_service_protocol = { workspace = true, features = ["convert"] }
indexer_service_rpc = { workspace = true, features = ["server"] }

View File

@ -1,5 +1,5 @@
# Chef stage - uses pre-built cargo-chef image
FROM lukemathwalker/cargo-chef:latest-rust-1.91.1-slim-trixie AS chef
FROM lukemathwalker/cargo-chef:latest-rust-1.94.0-slim-trixie AS chef
# Install build dependencies
RUN apt-get update && apt-get install -y \

View File

@ -1,6 +1,6 @@
{
"home": "./indexer/service",
"consensus_info_polling_interval": "60s",
"home": ".",
"consensus_info_polling_interval": "1s",
"bedrock_client_config": {
"addr": "http://localhost:8080",
"backoff": {
@ -11,50 +11,50 @@
"channel_id": "0101010101010101010101010101010101010101010101010101010101010101",
"initial_accounts": [
{
"account_id": "BLgCRDXYdQPMMWVHYRFGQZbgeHx9frkipa8GtpG2Syqy",
"account_id": "6iArKUXxhUJqS7kCaPNhwMWt3ro71PDyBj7jwAyE2VQV",
"balance": 10000
},
{
"account_id": "Gj1mJy5W7J5pfmLRujmQaLfLMWidNxQ6uwnhb666ZwHw",
"account_id": "7wHg9sbJwc6h3NP1S9bekfAzB8CHifEcxKswCKUt3YQo",
"balance": 20000
}
],
"initial_commitments": [
{
"npk": [
63,
202,
178,
231,
183,
82,
237,
212,
216,
221,
215,
255,
153,
101,
"npk":[
177,
161,
254,
210,
128,
122,
54,
190,
230,
151,
183,
64,
225,
229,
113,
1,
228,
97
],
11,
87,
38,
254,
159,
231,
165,
1,
94,
64,
137,
243,
76,
249,
101,
251,
129,
33,
101,
189,
30,
42,
11,
191,
34,
103,
186,
227,
230
] ,
"account": {
"program_owner": [
0,
@ -73,38 +73,38 @@
},
{
"npk": [
192,
251,
166,
243,
167,
236,
84,
249,
35,
32,
67,
72,
164,
106,
53,
66,
239,
141,
15,
52,
230,
136,
130,
172,
219,
225,
161,
139,
229,
89,
177,
2,
236,
207,
243,
125,
134,
135,
210,
143,
87,
232,
215,
128,
194,
213,
209,
30,
23,
174,
100,
244,
124,
74,
140,
47
120,
113,
224,
4,
165
],
"account": {
"program_owner": [

View File

@ -4,6 +4,9 @@ version = "0.1.0"
edition = "2024"
license = { workspace = true }
[lints]
workspace = true
[dependencies]
nssa_core = { workspace = true, optional = true, features = ["host"] }
nssa = { workspace = true, optional = true }

View File

@ -1,6 +1,12 @@
//! Conversions between indexer_service_protocol types and nssa/nssa_core types
//! Conversions between `indexer_service_protocol` types and `nssa/nssa_core` types.
use crate::*;
use crate::{
Account, AccountId, BedrockStatus, Block, BlockBody, BlockHeader, Ciphertext, Commitment,
CommitmentSetDigest, Data, EncryptedAccountData, EphemeralPublicKey, HashType, MantleMsgId,
Nullifier, PrivacyPreservingMessage, PrivacyPreservingTransaction, ProgramDeploymentMessage,
ProgramDeploymentTransaction, ProgramId, Proof, PublicKey, PublicMessage, PublicTransaction,
Signature, Transaction, WitnessSet,
};
// ============================================================================
// Account-related conversions
@ -29,7 +35,7 @@ impl From<nssa_core::account::AccountId> for AccountId {
impl From<AccountId> for nssa_core::account::AccountId {
fn from(value: AccountId) -> Self {
let AccountId { value } = value;
nssa_core::account::AccountId::new(value)
Self::new(value)
}
}
@ -62,7 +68,7 @@ impl TryFrom<Account> for nssa_core::account::Account {
nonce,
} = value;
Ok(nssa_core::account::Account {
Ok(Self {
program_owner: program_owner.into(),
balance,
data: data.try_into()?,
@ -81,7 +87,7 @@ impl TryFrom<Data> for nssa_core::account::Data {
type Error = nssa_core::account::data::DataTooBigError;
fn try_from(value: Data) -> Result<Self, Self::Error> {
nssa_core::account::Data::try_from(value.0)
Self::try_from(value.0)
}
}
@ -97,7 +103,7 @@ impl From<nssa_core::Commitment> for Commitment {
impl From<Commitment> for nssa_core::Commitment {
fn from(value: Commitment) -> Self {
nssa_core::Commitment::from_byte_array(value.0)
Self::from_byte_array(value.0)
}
}
@ -109,7 +115,7 @@ impl From<nssa_core::Nullifier> for Nullifier {
impl From<Nullifier> for nssa_core::Nullifier {
fn from(value: Nullifier) -> Self {
nssa_core::Nullifier::from_byte_array(value.0)
Self::from_byte_array(value.0)
}
}
@ -137,7 +143,7 @@ impl From<nssa_core::encryption::Ciphertext> for Ciphertext {
impl From<Ciphertext> for nssa_core::encryption::Ciphertext {
fn from(value: Ciphertext) -> Self {
nssa_core::encryption::Ciphertext::from_inner(value.0)
Self::from_inner(value.0)
}
}
@ -149,7 +155,7 @@ impl From<nssa_core::encryption::EphemeralPublicKey> for EphemeralPublicKey {
impl From<EphemeralPublicKey> for nssa_core::encryption::EphemeralPublicKey {
fn from(value: EphemeralPublicKey) -> Self {
nssa_core::encryption::shared_key_derivation::Secp256k1Point(value.0)
Self(value.0)
}
}
@ -167,7 +173,7 @@ impl From<nssa::Signature> for Signature {
impl From<Signature> for nssa::Signature {
fn from(value: Signature) -> Self {
let Signature(sig_value) = value;
nssa::Signature { value: sig_value }
Self { value: sig_value }
}
}
@ -181,7 +187,7 @@ impl TryFrom<PublicKey> for nssa::PublicKey {
type Error = nssa::error::NssaError;
fn try_from(value: PublicKey) -> Result<Self, Self::Error> {
nssa::PublicKey::try_new(value.0)
Self::try_new(value.0)
}
}
@ -197,7 +203,7 @@ impl From<nssa::privacy_preserving_transaction::circuit::Proof> for Proof {
impl From<Proof> for nssa::privacy_preserving_transaction::circuit::Proof {
fn from(value: Proof) -> Self {
nssa::privacy_preserving_transaction::circuit::Proof::from_inner(value.0)
Self::from_inner(value.0)
}
}
@ -499,12 +505,12 @@ impl From<ProgramDeploymentTransaction> for nssa::ProgramDeploymentTransaction {
impl From<common::transaction::NSSATransaction> for Transaction {
fn from(value: common::transaction::NSSATransaction) -> Self {
match value {
common::transaction::NSSATransaction::Public(tx) => Transaction::Public(tx.into()),
common::transaction::NSSATransaction::Public(tx) => Self::Public(tx.into()),
common::transaction::NSSATransaction::PrivacyPreserving(tx) => {
Transaction::PrivacyPreserving(tx.into())
Self::PrivacyPreserving(tx.into())
}
common::transaction::NSSATransaction::ProgramDeployment(tx) => {
Transaction::ProgramDeployment(tx.into())
Self::ProgramDeployment(tx.into())
}
}
}
@ -515,15 +521,9 @@ impl TryFrom<Transaction> for common::transaction::NSSATransaction {
fn try_from(value: Transaction) -> Result<Self, Self::Error> {
match value {
Transaction::Public(tx) => {
Ok(common::transaction::NSSATransaction::Public(tx.try_into()?))
}
Transaction::PrivacyPreserving(tx) => Ok(
common::transaction::NSSATransaction::PrivacyPreserving(tx.try_into()?),
),
Transaction::ProgramDeployment(tx) => Ok(
common::transaction::NSSATransaction::ProgramDeployment(tx.into()),
),
Transaction::Public(tx) => Ok(Self::Public(tx.try_into()?)),
Transaction::PrivacyPreserving(tx) => Ok(Self::PrivacyPreserving(tx.try_into()?)),
Transaction::ProgramDeployment(tx) => Ok(Self::ProgramDeployment(tx.into())),
}
}
}
@ -677,6 +677,6 @@ impl From<common::HashType> for HashType {
impl From<HashType> for common::HashType {
fn from(value: HashType) -> Self {
common::HashType(value.0)
Self(value.0)
}
}

View File

@ -14,6 +14,40 @@ use serde_with::{DeserializeFromStr, SerializeDisplay};
#[cfg(feature = "convert")]
mod convert;
mod base64 {
use base64::prelude::{BASE64_STANDARD, Engine as _};
use serde::{Deserialize as _, Deserializer, Serialize as _, Serializer};
pub mod arr {
use super::{Deserializer, Serializer};
pub fn serialize<S: Serializer>(v: &[u8], s: S) -> Result<S::Ok, S::Error> {
super::serialize(v, s)
}
pub fn deserialize<'de, const N: usize, D: Deserializer<'de>>(
d: D,
) -> Result<[u8; N], D::Error> {
let vec = super::deserialize(d)?;
vec.try_into().map_err(|_bytes| {
serde::de::Error::custom(format!("Invalid length, expected {N} bytes"))
})
}
}
pub fn serialize<S: Serializer>(v: &[u8], s: S) -> Result<S::Ok, S::Error> {
let base64 = BASE64_STANDARD.encode(v);
String::serialize(&base64, s)
}
pub fn deserialize<'de, D: Deserializer<'de>>(d: D) -> Result<Vec<u8>, D::Error> {
let base64 = String::deserialize(d)?;
BASE64_STANDARD
.decode(base64.as_bytes())
.map_err(serde::de::Error::custom)
}
}
pub type Nonce = u128;
#[derive(
@ -23,26 +57,43 @@ pub struct ProgramId(pub [u32; 8]);
impl Display for ProgramId {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let bytes: Vec<u8> = self.0.iter().flat_map(|n| n.to_be_bytes()).collect();
let bytes: Vec<u8> = self.0.iter().flat_map(|n| n.to_le_bytes()).collect();
write!(f, "{}", bytes.to_base58())
}
}
#[derive(Debug)]
pub enum ProgramIdParseError {
InvalidBase58(base58::FromBase58Error),
InvalidLength(usize),
}
impl Display for ProgramIdParseError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::InvalidBase58(err) => write!(f, "invalid base58: {err:?}"),
Self::InvalidLength(len) => {
write!(f, "invalid length: expected 32 bytes, got {len}")
}
}
}
}
impl FromStr for ProgramId {
type Err = hex::FromHexError;
type Err = ProgramIdParseError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let bytes = s
.from_base58()
.map_err(|_| hex::FromHexError::InvalidStringLength)?;
.map_err(ProgramIdParseError::InvalidBase58)?;
if bytes.len() != 32 {
return Err(hex::FromHexError::InvalidStringLength);
return Err(ProgramIdParseError::InvalidLength(bytes.len()));
}
let mut arr = [0u32; 8];
let mut arr = [0_u32; 8];
for (i, chunk) in bytes.chunks_exact(4).enumerate() {
arr[i] = u32::from_be_bytes(chunk.try_into().unwrap());
arr[i] = u32::from_le_bytes(chunk.try_into().unwrap());
}
Ok(ProgramId(arr))
Ok(Self(arr))
}
}
@ -72,9 +123,9 @@ impl FromStr for AccountId {
bytes.len()
));
}
let mut value = [0u8; 32];
let mut value = [0_u8; 32];
value.copy_from_slice(&bytes);
Ok(AccountId { value })
Ok(Self { value })
}
}
@ -121,9 +172,9 @@ impl FromStr for Signature {
type Err = hex::FromHexError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut bytes = [0u8; 64];
let mut bytes = [0_u8; 64];
hex::decode_to_slice(s, &mut bytes)?;
Ok(Signature(bytes))
Ok(Self(bytes))
}
}
@ -140,12 +191,14 @@ pub enum Transaction {
}
impl Transaction {
/// Get the hash of the transaction
pub fn hash(&self) -> &self::HashType {
/// Get the hash of the transaction.
#[expect(clippy::same_name_method, reason = "This is handy")]
#[must_use]
pub const fn hash(&self) -> &self::HashType {
match self {
Transaction::Public(tx) => &tx.hash,
Transaction::PrivacyPreserving(tx) => &tx.hash,
Transaction::ProgramDeployment(tx) => &tx.hash,
Self::Public(tx) => &tx.hash,
Self::PrivacyPreserving(tx) => &tx.hash,
Self::ProgramDeployment(tx) => &tx.hash,
}
}
}
@ -283,9 +336,9 @@ impl FromStr for HashType {
type Err = hex::FromHexError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut bytes = [0u8; 32];
let mut bytes = [0_u8; 32];
hex::decode_to_slice(s, &mut bytes)?;
Ok(HashType(bytes))
Ok(Self(bytes))
}
}
@ -302,37 +355,3 @@ pub enum BedrockStatus {
Safe,
Finalized,
}
mod base64 {
use base64::prelude::{BASE64_STANDARD, Engine as _};
use serde::{Deserialize, Deserializer, Serialize, Serializer};
pub mod arr {
use super::*;
pub fn serialize<S: Serializer>(v: &[u8], s: S) -> Result<S::Ok, S::Error> {
super::serialize(v, s)
}
pub fn deserialize<'de, const N: usize, D: Deserializer<'de>>(
d: D,
) -> Result<[u8; N], D::Error> {
let vec = super::deserialize(d)?;
vec.try_into().map_err(|_| {
serde::de::Error::custom(format!("Invalid length, expected {N} bytes"))
})
}
}
pub fn serialize<S: Serializer>(v: &[u8], s: S) -> Result<S::Ok, S::Error> {
let base64 = BASE64_STANDARD.encode(v);
String::serialize(&base64, s)
}
pub fn deserialize<'de, D: Deserializer<'de>>(d: D) -> Result<Vec<u8>, D::Error> {
let base64 = String::deserialize(d)?;
BASE64_STANDARD
.decode(base64.as_bytes())
.map_err(serde::de::Error::custom)
}
}

View File

@ -4,6 +4,9 @@ version = "0.1.0"
edition = "2024"
license = { workspace = true }
[lints]
workspace = true
[dependencies]
indexer_service_protocol.workspace = true

View File

@ -42,14 +42,18 @@ pub trait Rpc {
async fn get_transaction(&self, tx_hash: HashType) -> Result<Transaction, ErrorObjectOwned>;
#[method(name = "getBlocks")]
async fn get_blocks(&self, offset: u32, limit: u32) -> Result<Vec<Block>, ErrorObjectOwned>;
async fn get_blocks(
&self,
before: Option<BlockId>,
limit: u64,
) -> Result<Vec<Block>, ErrorObjectOwned>;
#[method(name = "getTransactionsByAccount")]
async fn get_transactions_by_account(
&self,
account_id: AccountId,
limit: u32,
offset: u32,
offset: u64,
limit: u64,
) -> Result<Vec<Transaction>, ErrorObjectOwned>;
// ToDo: expand healthcheck response into some kind of report

View File

@ -16,14 +16,15 @@ pub struct IndexerHandle {
server_handle: Option<jsonrpsee::server::ServerHandle>,
}
impl IndexerHandle {
fn new(addr: SocketAddr, server_handle: jsonrpsee::server::ServerHandle) -> Self {
const fn new(addr: SocketAddr, server_handle: jsonrpsee::server::ServerHandle) -> Self {
Self {
addr,
server_handle: Some(server_handle),
}
}
pub fn addr(&self) -> SocketAddr {
#[must_use]
pub const fn addr(&self) -> SocketAddr {
self.addr
}
@ -33,9 +34,14 @@ impl IndexerHandle {
.take()
.expect("Indexer server handle is set");
handle.stopped().await
handle.stopped().await;
}
#[expect(
clippy::redundant_closure_for_method_calls,
reason = "Clippy suggested path jsonrpsee::jsonrpsee_server::ServerHandle is not accessible"
)]
#[must_use]
pub fn is_stopped(&self) -> bool {
self.server_handle
.as_ref()

View File

@ -15,6 +15,10 @@ struct Args {
}
#[tokio::main]
#[expect(
clippy::integer_division_remainder_used,
reason = "Generated by select! macro, can't be easily rewritten to avoid this lint"
)]
async fn main() -> Result<()> {
env_logger::init();
@ -26,10 +30,10 @@ async fn main() -> Result<()> {
let indexer_handle = indexer_service::run_server(config, port).await?;
tokio::select! {
_ = cancellation_token.cancelled() => {
() = cancellation_token.cancelled() => {
info!("Shutting down server...");
}
_ = indexer_handle.stopped() => {
() = indexer_handle.stopped() => {
error!("Server stopped unexpectedly");
}
}

View File

@ -1,3 +1,11 @@
#![expect(
clippy::as_conversions,
clippy::arithmetic_side_effects,
clippy::cast_possible_truncation,
clippy::cast_lossless,
clippy::integer_division_remainder_used,
reason = "Mock service uses intentional casts and format patterns for test data generation"
)]
use std::collections::HashMap;
use indexer_service_protocol::{
@ -9,7 +17,7 @@ use indexer_service_protocol::{
};
use jsonrpsee::{core::SubscriptionResult, types::ErrorObjectOwned};
/// A mock implementation of the IndexerService RPC for testing purposes.
/// A mock implementation of the `IndexerService` RPC for testing purposes.
pub struct MockIndexerService {
blocks: Vec<Block>,
accounts: HashMap<AccountId, Account>,
@ -17,6 +25,7 @@ pub struct MockIndexerService {
}
impl MockIndexerService {
#[must_use]
pub fn new_with_mock_blocks() -> Self {
let mut blocks = Vec::new();
let mut accounts = HashMap::new();
@ -25,7 +34,7 @@ impl MockIndexerService {
// Create some mock accounts
let account_ids: Vec<AccountId> = (0..5)
.map(|i| {
let mut value = [0u8; 32];
let mut value = [0_u8; 32];
value[0] = i;
AccountId { value }
})
@ -43,12 +52,12 @@ impl MockIndexerService {
);
}
// Create 10 blocks with transactions
let mut prev_hash = HashType([0u8; 32]);
// Create 100 blocks with transactions
let mut prev_hash = HashType([0_u8; 32]);
for block_id in 0..10 {
for block_id in 1..=100 {
let block_hash = {
let mut hash = [0u8; 32];
let mut hash = [0_u8; 32];
hash[0] = block_id as u8;
hash[1] = 0xff;
HashType(hash)
@ -61,7 +70,7 @@ impl MockIndexerService {
for tx_idx in 0..num_txs {
let tx_hash = {
let mut hash = [0u8; 32];
let mut hash = [0_u8; 32];
hash[0] = block_id as u8;
hash[1] = tx_idx as u8;
HashType(hash)
@ -73,7 +82,7 @@ impl MockIndexerService {
0 | 1 => Transaction::Public(PublicTransaction {
hash: tx_hash,
message: PublicMessage {
program_id: ProgramId([1u32; 8]),
program_id: ProgramId([1_u32; 8]),
account_ids: vec![
account_ids[tx_idx as usize % account_ids.len()],
account_ids[(tx_idx as usize + 1) % account_ids.len()],
@ -95,7 +104,7 @@ impl MockIndexerService {
],
nonces: vec![block_id as u128],
public_post_states: vec![Account {
program_owner: ProgramId([1u32; 8]),
program_owner: ProgramId([1_u32; 8]),
balance: 500,
data: Data(vec![0xdd, 0xee]),
nonce: block_id as u128,
@ -136,8 +145,8 @@ impl MockIndexerService {
block_id,
prev_block_hash: prev_hash,
hash: block_hash,
timestamp: 1704067200000 + (block_id * 12000), // ~12 seconds per block
signature: Signature([0u8; 64]),
timestamp: 1_704_067_200_000 + (block_id * 12_000), // ~12 seconds per block
signature: Signature([0_u8; 64]),
},
body: BlockBody {
transactions: block_transactions,
@ -185,7 +194,7 @@ impl indexer_service_rpc::RpcServer for MockIndexerService {
.last()
.map(|bl| bl.header.block_id)
.ok_or_else(|| {
ErrorObjectOwned::owned(-32001, "Last block not found".to_string(), None::<()>)
ErrorObjectOwned::owned(-32001, "Last block not found".to_owned(), None::<()>)
})
}
@ -197,7 +206,7 @@ impl indexer_service_rpc::RpcServer for MockIndexerService {
.ok_or_else(|| {
ErrorObjectOwned::owned(
-32001,
format!("Block with ID {} not found", block_id),
format!("Block with ID {block_id} not found"),
None::<()>,
)
})
@ -225,30 +234,30 @@ impl indexer_service_rpc::RpcServer for MockIndexerService {
.ok_or_else(|| ErrorObjectOwned::owned(-32001, "Transaction not found", None::<()>))
}
async fn get_blocks(&self, offset: u32, limit: u32) -> Result<Vec<Block>, ErrorObjectOwned> {
let offset = offset as usize;
let limit = limit as usize;
let total = self.blocks.len();
async fn get_blocks(
&self,
before: Option<BlockId>,
limit: u64,
) -> Result<Vec<Block>, ErrorObjectOwned> {
let start_id = before.map_or_else(
|| self.blocks.len(),
|id| usize::try_from(id.saturating_sub(1)).expect("u64 should fit in usize"),
);
// Return blocks in reverse order (newest first), with pagination
let start = offset.min(total);
let end = (offset + limit).min(total);
Ok(self
.blocks
.iter()
let result = (1..=start_id)
.rev()
.skip(start)
.take(end - start)
.cloned()
.collect())
.take(limit as usize)
.map_while(|block_id| self.blocks.get(block_id - 1).cloned())
.collect();
Ok(result)
}
async fn get_transactions_by_account(
&self,
account_id: AccountId,
limit: u32,
offset: u32,
offset: u64,
limit: u64,
) -> Result<Vec<Transaction>, ErrorObjectOwned> {
let mut account_txs: Vec<_> = self
.transactions

View File

@ -89,17 +89,21 @@ impl indexer_service_rpc::RpcServer for IndexerService {
.into())
}
async fn get_blocks(&self, offset: u32, limit: u32) -> Result<Vec<Block>, ErrorObjectOwned> {
async fn get_blocks(
&self,
before: Option<BlockId>,
limit: u64,
) -> Result<Vec<Block>, ErrorObjectOwned> {
let blocks = self
.indexer
.store
.get_block_batch(offset as u64, limit as u64)
.get_block_batch(before, limit)
.map_err(db_error)?;
let mut block_res = vec![];
for block in blocks {
block_res.push(block.into())
block_res.push(block.into());
}
Ok(block_res)
@ -108,19 +112,19 @@ impl indexer_service_rpc::RpcServer for IndexerService {
async fn get_transactions_by_account(
&self,
account_id: AccountId,
limit: u32,
offset: u32,
offset: u64,
limit: u64,
) -> Result<Vec<Transaction>, ErrorObjectOwned> {
let transactions = self
.indexer
.store
.get_transactions_by_account(account_id.value, offset as u64, limit as u64)
.get_transactions_by_account(account_id.value, offset, limit)
.map_err(db_error)?;
let mut tx_res = vec![];
for tx in transactions {
tx_res.push(tx.into())
tx_res.push(tx.into());
}
Ok(tx_res)
@ -155,8 +159,10 @@ impl SubscriptionService {
pub async fn add_subscription(&self, subscription: Subscription<BlockId>) -> Result<()> {
let guard = self.parts.load();
if let Err(err) = guard.new_subscription_sender.send(subscription) {
error!("Failed to send new subscription to subscription service with error: {err:#?}");
if let Err(send_err) = guard.new_subscription_sender.send(subscription) {
error!(
"Failed to send new subscription to subscription service with error: {send_err:#?}"
);
// Respawn the subscription service loop if it has finished (either with error or panic)
if guard.handle.is_finished() {
@ -178,21 +184,25 @@ impl SubscriptionService {
}
}
bail!(err);
};
bail!(send_err)
}
Ok(())
}
fn spawn_respond_subscribers_loop(mut indexer: IndexerCore) -> SubscriptionLoopParts {
fn spawn_respond_subscribers_loop(indexer: IndexerCore) -> SubscriptionLoopParts {
let (new_subscription_sender, mut sub_receiver) =
tokio::sync::mpsc::unbounded_channel::<Subscription<BlockId>>();
let handle = tokio::spawn(async move {
let mut subscribers = Vec::new();
let mut block_stream = pin!(indexer.subscribe_parse_block_stream().await);
let mut block_stream = pin!(indexer.subscribe_parse_block_stream());
#[expect(
clippy::integer_division_remainder_used,
reason = "Generated by select! macro, can't be easily rewritten to avoid this lint"
)]
loop {
tokio::select! {
sub = sub_receiver.recv() => {
@ -247,7 +257,7 @@ struct Subscription<T> {
}
impl<T> Subscription<T> {
fn new(sink: SubscriptionSink) -> Self {
const fn new(sink: SubscriptionSink) -> Self {
Self {
sink,
_marker: std::marker::PhantomData,
@ -274,6 +284,7 @@ impl<T> Drop for Subscription<T> {
}
}
#[must_use]
pub fn not_yet_implemented_error() -> ErrorObjectOwned {
ErrorObject::owned(
ErrorCode::InternalError.code(),
@ -282,10 +293,14 @@ pub fn not_yet_implemented_error() -> ErrorObjectOwned {
)
}
#[expect(
clippy::needless_pass_by_value,
reason = "Error is consumed to extract details for error response"
)]
fn db_error(err: anyhow::Error) -> ErrorObjectOwned {
ErrorObjectOwned::owned(
ErrorCode::InternalError.code(),
"DBError".to_string(),
"DBError".to_owned(),
Some(format!("{err:#?}")),
)
}

View File

@ -4,6 +4,9 @@ version = "0.1.0"
edition = "2024"
license = { workspace = true }
[lints]
workspace = true
[dependencies]
nssa_core = { workspace = true, features = ["host"] }
nssa.workspace = true

View File

@ -1,6 +1,6 @@
use std::{net::SocketAddr, path::PathBuf, time::Duration};
use anyhow::{Context, Result};
use anyhow::{Context as _, Result};
use bytesize::ByteSize;
use common::block::{AccountInitialData, CommitmentsInitialData};
use indexer_service::{BackoffConfig, ChannelId, ClientConfig, IndexerConfig};
@ -13,31 +13,8 @@ use wallet::config::{
InitialAccountData, InitialAccountDataPrivate, InitialAccountDataPublic, WalletConfig,
};
pub fn indexer_config(
bedrock_addr: SocketAddr,
home: PathBuf,
initial_data: &InitialData,
) -> Result<IndexerConfig> {
Ok(IndexerConfig {
home,
consensus_info_polling_interval: Duration::from_secs(1),
bedrock_client_config: ClientConfig {
addr: addr_to_url(UrlProtocol::Http, bedrock_addr)
.context("Failed to convert bedrock addr to URL")?,
auth: None,
backoff: BackoffConfig {
start_delay: Duration::from_millis(100),
max_retries: 10,
},
},
initial_accounts: initial_data.sequencer_initial_accounts(),
initial_commitments: initial_data.sequencer_initial_commitments(),
signing_key: [37; 32],
channel_id: bedrock_channel_id(),
})
}
/// Sequencer config options available for custom changes in integration tests.
#[derive(Debug, Clone, Copy)]
pub struct SequencerPartialConfig {
pub max_num_tx_in_block: usize,
pub max_block_size: ByteSize,
@ -56,72 +33,13 @@ impl Default for SequencerPartialConfig {
}
}
pub fn sequencer_config(
partial: SequencerPartialConfig,
home: PathBuf,
bedrock_addr: SocketAddr,
indexer_addr: SocketAddr,
initial_data: &InitialData,
) -> Result<SequencerConfig> {
let SequencerPartialConfig {
max_num_tx_in_block,
max_block_size,
mempool_max_size,
block_create_timeout,
} = partial;
Ok(SequencerConfig {
home,
override_rust_log: None,
genesis_id: 1,
is_genesis_random: true,
max_num_tx_in_block,
max_block_size,
mempool_max_size,
block_create_timeout,
retry_pending_blocks_timeout: Duration::from_secs(120),
port: 0,
initial_accounts: initial_data.sequencer_initial_accounts(),
initial_commitments: initial_data.sequencer_initial_commitments(),
signing_key: [37; 32],
bedrock_config: BedrockConfig {
backoff: BackoffConfig {
start_delay: Duration::from_millis(100),
max_retries: 5,
},
channel_id: bedrock_channel_id(),
node_url: addr_to_url(UrlProtocol::Http, bedrock_addr)
.context("Failed to convert bedrock addr to URL")?,
auth: None,
},
indexer_rpc_url: addr_to_url(UrlProtocol::Ws, indexer_addr)
.context("Failed to convert indexer addr to URL")?,
})
}
pub fn wallet_config(
sequencer_addr: SocketAddr,
initial_data: &InitialData,
) -> Result<WalletConfig> {
Ok(WalletConfig {
override_rust_log: None,
sequencer_addr: addr_to_url(UrlProtocol::Http, sequencer_addr)
.context("Failed to convert sequencer addr to URL")?,
seq_poll_timeout: Duration::from_secs(30),
seq_tx_poll_max_blocks: 15,
seq_poll_max_retries: 10,
seq_block_poll_max_amount: 100,
initial_accounts: initial_data.wallet_initial_accounts(),
basic_auth: None,
})
}
pub struct InitialData {
pub public_accounts: Vec<(PrivateKey, u128)>,
pub private_accounts: Vec<(KeyChain, Account)>,
}
impl InitialData {
#[must_use]
pub fn with_two_public_and_two_private_initialized_accounts() -> Self {
let mut public_alice_private_key = PrivateKey::new_os_random();
let mut public_alice_public_key =
@ -221,16 +139,17 @@ impl InitialData {
})
.chain(self.private_accounts.iter().map(|(key_chain, account)| {
let account_id = AccountId::from(&key_chain.nullifer_public_key);
InitialAccountData::Private(InitialAccountDataPrivate {
InitialAccountData::Private(Box::new(InitialAccountDataPrivate {
account_id,
account: account.clone(),
key_chain: key_chain.clone(),
})
}))
}))
.collect()
}
}
#[derive(Debug, Clone, Copy)]
pub enum UrlProtocol {
Http,
Ws,
@ -239,12 +158,96 @@ pub enum UrlProtocol {
impl std::fmt::Display for UrlProtocol {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
UrlProtocol::Http => write!(f, "http"),
UrlProtocol::Ws => write!(f, "ws"),
Self::Http => write!(f, "http"),
Self::Ws => write!(f, "ws"),
}
}
}
pub fn indexer_config(
bedrock_addr: SocketAddr,
home: PathBuf,
initial_data: &InitialData,
) -> Result<IndexerConfig> {
Ok(IndexerConfig {
home,
consensus_info_polling_interval: Duration::from_secs(1),
bedrock_client_config: ClientConfig {
addr: addr_to_url(UrlProtocol::Http, bedrock_addr)
.context("Failed to convert bedrock addr to URL")?,
auth: None,
backoff: BackoffConfig {
start_delay: Duration::from_millis(100),
max_retries: 10,
},
},
initial_accounts: initial_data.sequencer_initial_accounts(),
initial_commitments: initial_data.sequencer_initial_commitments(),
signing_key: [37; 32],
channel_id: bedrock_channel_id(),
})
}
pub fn sequencer_config(
partial: SequencerPartialConfig,
home: PathBuf,
bedrock_addr: SocketAddr,
indexer_addr: SocketAddr,
initial_data: &InitialData,
) -> Result<SequencerConfig> {
let SequencerPartialConfig {
max_num_tx_in_block,
max_block_size,
mempool_max_size,
block_create_timeout,
} = partial;
Ok(SequencerConfig {
home,
override_rust_log: None,
genesis_id: 1,
is_genesis_random: true,
max_num_tx_in_block,
max_block_size,
mempool_max_size,
block_create_timeout,
retry_pending_blocks_timeout: Duration::from_secs(120),
port: 0,
initial_accounts: initial_data.sequencer_initial_accounts(),
initial_commitments: initial_data.sequencer_initial_commitments(),
signing_key: [37; 32],
bedrock_config: BedrockConfig {
backoff: BackoffConfig {
start_delay: Duration::from_millis(100),
max_retries: 5,
},
channel_id: bedrock_channel_id(),
node_url: addr_to_url(UrlProtocol::Http, bedrock_addr)
.context("Failed to convert bedrock addr to URL")?,
auth: None,
},
indexer_rpc_url: addr_to_url(UrlProtocol::Ws, indexer_addr)
.context("Failed to convert indexer addr to URL")?,
})
}
pub fn wallet_config(
sequencer_addr: SocketAddr,
initial_data: &InitialData,
) -> Result<WalletConfig> {
Ok(WalletConfig {
override_rust_log: None,
sequencer_addr: addr_to_url(UrlProtocol::Http, sequencer_addr)
.context("Failed to convert sequencer addr to URL")?,
seq_poll_timeout: Duration::from_secs(30),
seq_tx_poll_max_blocks: 15,
seq_poll_max_retries: 10,
seq_block_poll_max_amount: 100,
initial_accounts: initial_data.wallet_initial_accounts(),
basic_auth: None,
})
}
pub fn addr_to_url(protocol: UrlProtocol, addr: SocketAddr) -> Result<Url> {
// Convert 0.0.0.0 to 127.0.0.1 for client connections
// When binding to port 0, the server binds to 0.0.0.0:<random_port>
@ -259,7 +262,7 @@ pub fn addr_to_url(protocol: UrlProtocol, addr: SocketAddr) -> Result<Url> {
}
fn bedrock_channel_id() -> ChannelId {
let channel_id: [u8; 32] = [0u8, 1]
let channel_id: [u8; 32] = [0_u8, 1]
.repeat(16)
.try_into()
.unwrap_or_else(|_| unreachable!());

View File

@ -2,15 +2,15 @@
use std::{net::SocketAddr, path::PathBuf, sync::LazyLock};
use anyhow::{Context, Result, bail};
use base64::{Engine, engine::general_purpose::STANDARD as BASE64};
use anyhow::{Context as _, Result, bail};
use base64::{Engine as _, engine::general_purpose::STANDARD as BASE64};
use common::{HashType, sequencer_client::SequencerClient, transaction::NSSATransaction};
use futures::FutureExt as _;
use indexer_service::IndexerHandle;
use log::{debug, error, warn};
use nssa::{AccountId, PrivacyPreservingTransaction};
use nssa_core::Commitment;
use sequencer_core::indexer_client::{IndexerClient, IndexerClientTrait};
use sequencer_core::indexer_client::{IndexerClient, IndexerClientTrait as _};
use sequencer_runner::SequencerHandle;
use tempfile::TempDir;
use testcontainers::compose::DockerCompose;
@ -52,7 +52,8 @@ impl TestContext {
Self::builder().build().await
}
pub fn builder() -> TestContextBuilder {
#[must_use]
pub const fn builder() -> TestContextBuilder {
TestContextBuilder::new()
}
@ -120,6 +121,10 @@ impl TestContext {
// Setting port to 0 to avoid conflicts between parallel tests, actual port will be retrieved after container is up
.with_env("PORT", "0");
#[expect(
clippy::items_after_statements,
reason = "This is more readable is this function used just after its definition"
)]
async fn up_and_retrieve_port(compose: &mut DockerCompose) -> Result<u16> {
compose
.up()
@ -151,10 +156,12 @@ impl TestContext {
}
let mut port = None;
let mut attempt = 0;
let max_attempts = 5;
let mut attempt = 0_u32;
let max_attempts = 5_u32;
while port.is_none() && attempt < max_attempts {
attempt += 1;
attempt = attempt
.checked_add(1)
.expect("We check that attempt < max_attempts, so this won't overflow");
match up_and_retrieve_port(&mut compose).await {
Ok(p) => {
port = Some(p);
@ -181,7 +188,10 @@ impl TestContext {
let temp_indexer_dir =
tempfile::tempdir().context("Failed to create temp dir for indexer home")?;
debug!("Using temp indexer home at {:?}", temp_indexer_dir.path());
debug!(
"Using temp indexer home at {}",
temp_indexer_dir.path().display()
);
let indexer_config = config::indexer_config(
bedrock_addr,
@ -206,8 +216,8 @@ impl TestContext {
tempfile::tempdir().context("Failed to create temp dir for sequencer home")?;
debug!(
"Using temp sequencer home at {:?}",
temp_sequencer_dir.path()
"Using temp sequencer home at {}",
temp_sequencer_dir.path().display()
);
let config = config::sequencer_config(
@ -260,30 +270,35 @@ impl TestContext {
}
/// Get reference to the wallet.
pub fn wallet(&self) -> &WalletCore {
#[must_use]
pub const fn wallet(&self) -> &WalletCore {
&self.wallet
}
#[must_use]
pub fn wallet_password(&self) -> &str {
&self.wallet_password
}
/// Get mutable reference to the wallet.
pub fn wallet_mut(&mut self) -> &mut WalletCore {
pub const fn wallet_mut(&mut self) -> &mut WalletCore {
&mut self.wallet
}
/// Get reference to the sequencer client.
pub fn sequencer_client(&self) -> &SequencerClient {
#[must_use]
pub const fn sequencer_client(&self) -> &SequencerClient {
&self.sequencer_client
}
/// Get reference to the indexer client.
pub fn indexer_client(&self) -> &IndexerClient {
#[must_use]
pub const fn indexer_client(&self) -> &IndexerClient {
&self.indexer_client
}
/// Get existing public account IDs in the wallet.
#[must_use]
pub fn existing_public_accounts(&self) -> Vec<AccountId> {
self.wallet
.storage()
@ -293,6 +308,7 @@ impl TestContext {
}
/// Get existing private account IDs in the wallet.
#[must_use]
pub fn existing_private_accounts(&self) -> Vec<AccountId> {
self.wallet
.storage()
@ -352,7 +368,7 @@ impl Drop for TestContext {
}
}
/// A test context to be used in normal #[test] tests
/// A test context to be used in normal #[test] tests.
pub struct BlockingTestContext {
ctx: Option<TestContext>,
runtime: tokio::runtime::Runtime,
@ -368,7 +384,7 @@ impl BlockingTestContext {
})
}
pub fn ctx(&self) -> &TestContext {
pub const fn ctx(&self) -> &TestContext {
self.ctx.as_ref().expect("TestContext is set")
}
}
@ -379,19 +395,21 @@ pub struct TestContextBuilder {
}
impl TestContextBuilder {
fn new() -> Self {
const fn new() -> Self {
Self {
initial_data: None,
sequencer_partial_config: None,
}
}
#[must_use]
pub fn with_initial_data(mut self, initial_data: config::InitialData) -> Self {
self.initial_data = Some(initial_data);
self
}
pub fn with_sequencer_partial_config(
#[must_use]
pub const fn with_sequencer_partial_config(
mut self,
sequencer_partial_config: config::SequencerPartialConfig,
) -> Self {
@ -419,18 +437,24 @@ impl Drop for BlockingTestContext {
if let Some(ctx) = ctx.take() {
drop(ctx);
}
})
});
}
}
#[must_use]
pub fn format_public_account_id(account_id: AccountId) -> String {
format!("Public/{account_id}")
}
#[must_use]
pub fn format_private_account_id(account_id: AccountId) -> String {
format!("Private/{account_id}")
}
#[expect(
clippy::wildcard_enum_match_arm,
reason = "We want the code to panic if the transaction type is not PrivacyPreserving"
)]
pub async fn fetch_privacy_preserving_tx(
seq_client: &SequencerClient,
tx_hash: HashType,

View File

@ -1,3 +1,8 @@
#![expect(
clippy::tests_outside_test_module,
reason = "We don't care about these in tests"
)]
use anyhow::Result;
use integration_tests::TestContext;
use log::info;
@ -36,7 +41,7 @@ async fn get_existing_account() -> Result<()> {
async fn new_public_account_with_label() -> Result<()> {
let mut ctx = TestContext::new().await?;
let label = "my-test-public-account".to_string();
let label = "my-test-public-account".to_owned();
let command = Command::Account(AccountSubcommand::New(NewSubcommand::Public {
cci: None,
label: Some(label.clone()),
@ -45,9 +50,8 @@ async fn new_public_account_with_label() -> Result<()> {
let result = execute_subcommand(ctx.wallet_mut(), command).await?;
// Extract the account_id from the result
let account_id = match result {
wallet::cli::SubcommandReturnValue::RegisterAccount { account_id } => account_id,
_ => panic!("Expected RegisterAccount return value"),
let wallet::cli::SubcommandReturnValue::RegisterAccount { account_id } = result else {
panic!("Expected RegisterAccount return value")
};
// Verify the label was stored
@ -69,7 +73,7 @@ async fn new_public_account_with_label() -> Result<()> {
async fn new_private_account_with_label() -> Result<()> {
let mut ctx = TestContext::new().await?;
let label = "my-test-private-account".to_string();
let label = "my-test-private-account".to_owned();
let command = Command::Account(AccountSubcommand::New(NewSubcommand::Private {
cci: None,
label: Some(label.clone()),
@ -78,9 +82,9 @@ async fn new_private_account_with_label() -> Result<()> {
let result = execute_subcommand(ctx.wallet_mut(), command).await?;
// Extract the account_id from the result
let account_id = match result {
wallet::cli::SubcommandReturnValue::RegisterAccount { account_id } => account_id,
_ => panic!("Expected RegisterAccount return value"),
let wallet::cli::SubcommandReturnValue::RegisterAccount { account_id } = result else {
panic!("Expected RegisterAccount return value")
};
// Verify the label was stored
@ -110,9 +114,9 @@ async fn new_public_account_without_label() -> Result<()> {
let result = execute_subcommand(ctx.wallet_mut(), command).await?;
// Extract the account_id from the result
let account_id = match result {
wallet::cli::SubcommandReturnValue::RegisterAccount { account_id } => account_id,
_ => panic!("Expected RegisterAccount return value"),
let wallet::cli::SubcommandReturnValue::RegisterAccount { account_id } = result else {
panic!("Expected RegisterAccount return value")
};
// Verify no label was stored

View File

@ -1,3 +1,9 @@
#![expect(
clippy::shadow_unrelated,
clippy::tests_outside_test_module,
reason = "We don't care about these in tests"
)]
use std::time::Duration;
use anyhow::Result;
@ -108,7 +114,7 @@ async fn amm_public() -> Result<()> {
let subcommand = TokenProgramAgnosticSubcommand::New {
definition_account_id: format_public_account_id(definition_account_id_1),
supply_account_id: format_public_account_id(supply_account_id_1),
name: "A NAM1".to_string(),
name: "A NAM1".to_owned(),
total_supply: 37,
};
wallet::cli::execute_subcommand(ctx.wallet_mut(), Command::Token(subcommand)).await?;
@ -132,7 +138,7 @@ async fn amm_public() -> Result<()> {
let subcommand = TokenProgramAgnosticSubcommand::New {
definition_account_id: format_public_account_id(definition_account_id_2),
supply_account_id: format_public_account_id(supply_account_id_2),
name: "A NAM2".to_string(),
name: "A NAM2".to_owned(),
total_supply: 37,
};
wallet::cli::execute_subcommand(ctx.wallet_mut(), Command::Token(subcommand)).await?;

View File

@ -1,2 +1,8 @@
#![expect(
clippy::shadow_unrelated,
clippy::tests_outside_test_module,
reason = "We don't care about these in tests"
)]
mod private;
mod public;

View File

@ -86,7 +86,7 @@ async fn private_transfer_to_foreign_account() -> Result<()> {
assert_eq!(tx.message.new_commitments[0], new_commitment1);
assert_eq!(tx.message.new_commitments.len(), 2);
for commitment in tx.message.new_commitments.into_iter() {
for commitment in tx.message.new_commitments {
assert!(verify_commitment_is_in_state(commitment, ctx.sequencer_client()).await);
}
@ -198,7 +198,7 @@ async fn private_transfer_to_owned_account_using_claiming_path() -> Result<()> {
assert_eq!(tx.message.new_commitments[0], new_commitment1);
assert_eq!(tx.message.new_commitments.len(), 2);
for commitment in tx.message.new_commitments.into_iter() {
for commitment in tx.message.new_commitments {
assert!(verify_commitment_is_in_state(commitment, ctx.sequencer_client()).await);
}
@ -353,7 +353,7 @@ async fn private_transfer_to_owned_account_continuous_run_path() -> Result<()> {
// Verify commitments are in state
assert_eq!(tx.message.new_commitments.len(), 2);
for commitment in tx.message.new_commitments.into_iter() {
for commitment in tx.message.new_commitments {
assert!(verify_commitment_is_in_state(commitment, ctx.sequencer_client()).await);
}

View File

@ -112,7 +112,7 @@ async fn failed_transfer_with_insufficient_balance() -> Result<()> {
to: Some(format_public_account_id(ctx.existing_public_accounts()[1])),
to_npk: None,
to_vpk: None,
amount: 1000000,
amount: 1_000_000,
});
let failed_send = wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await;

View File

@ -1,3 +1,9 @@
#![expect(
clippy::as_conversions,
clippy::tests_outside_test_module,
reason = "We don't care about these in tests"
)]
use std::time::Duration;
use anyhow::Result;
@ -24,7 +30,7 @@ async fn reject_oversized_transaction() -> Result<()> {
// Create a transaction that's definitely too large
// Block size is 1 MiB (1,048,576 bytes), minus ~200 bytes for header = ~1,048,376 bytes max tx
// Create a 1.1 MiB binary to ensure it exceeds the limit
let oversized_binary = vec![0u8; 1100 * 1024]; // 1.1 MiB binary
let oversized_binary = vec![0_u8; 1100 * 1024]; // 1.1 MiB binary
let message = nssa::program_deployment_transaction::Message::new(oversized_binary);
let tx = nssa::ProgramDeploymentTransaction::new(message);
@ -38,13 +44,12 @@ async fn reject_oversized_transaction() -> Result<()> {
);
let err = result.unwrap_err();
let err_str = format!("{:?}", err);
let err_str = format!("{err:?}");
// Check if the error contains information about transaction being too large
assert!(
err_str.contains("TransactionTooLarge") || err_str.contains("too large"),
"Expected TransactionTooLarge error, got: {}",
err_str
"Expected TransactionTooLarge error, got: {err_str}"
);
Ok(())
@ -63,7 +68,7 @@ async fn accept_transaction_within_limit() -> Result<()> {
.await?;
// Create a small program deployment that should fit
let small_binary = vec![0u8; 1024]; // 1 KiB binary
let small_binary = vec![0_u8; 1024]; // 1 KiB binary
let message = nssa::program_deployment_transaction::Message::new(small_binary);
let tx = nssa::ProgramDeploymentTransaction::new(message);

View File

@ -1,3 +1,9 @@
#![expect(
clippy::shadow_unrelated,
clippy::tests_outside_test_module,
reason = "We don't care about these in tests"
)]
use anyhow::Result;
use integration_tests::TestContext;
use log::info;
@ -12,8 +18,8 @@ async fn modify_config_field() -> Result<()> {
// Change config field
let command = Command::Config(ConfigSubcommand::Set {
key: "seq_poll_timeout".to_string(),
value: "1s".to_string(),
key: "seq_poll_timeout".to_owned(),
value: "1s".to_owned(),
});
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
@ -22,8 +28,8 @@ async fn modify_config_field() -> Result<()> {
// Return how it was at the beginning
let command = Command::Config(ConfigSubcommand::Set {
key: "seq_poll_timeout".to_string(),
value: format!("{:?}", old_seq_poll_timeout),
key: "seq_poll_timeout".to_owned(),
value: format!("{old_seq_poll_timeout:?}"),
});
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;

View File

@ -1,14 +1,19 @@
#![expect(
clippy::tests_outside_test_module,
reason = "We don't care about these in tests"
)]
use std::time::Duration;
use anyhow::Result;
use indexer_service_rpc::RpcClient;
use indexer_service_rpc::RpcClient as _;
use integration_tests::{TIME_TO_WAIT_FOR_BLOCK_SECONDS, TestContext, format_public_account_id};
use log::info;
use tokio::test;
use wallet::cli::{Command, programs::native_token_transfer::AuthTransferSubcommand};
/// Timeout in milliseconds to reliably await for block finalization
const L2_TO_L1_TIMEOUT_MILLIS: u64 = 600000;
/// Timeout in milliseconds to reliably await for block finalization.
const L2_TO_L1_TIMEOUT_MILLIS: u64 = 600_000;
#[test]
async fn indexer_test_run() -> Result<()> {
@ -57,8 +62,11 @@ async fn indexer_block_batching() -> Result<()> {
assert!(last_block_indexer > 1);
// Getting wide batch to fit all blocks
let block_batch = ctx.indexer_client().get_blocks(1, 100).await.unwrap();
// Getting wide batch to fit all blocks (from latest backwards)
let mut block_batch = ctx.indexer_client().get_blocks(None, 100).await.unwrap();
// Reverse to check chain consistency from oldest to newest
block_batch.reverse();
// Checking chain consistency
let mut prev_block_hash = block_batch.first().unwrap().header.hash;

View File

@ -1,9 +1,15 @@
use std::{str::FromStr, time::Duration};
#![expect(
clippy::shadow_unrelated,
clippy::tests_outside_test_module,
reason = "We don't care about these in tests"
)]
use anyhow::Result;
use std::{str::FromStr as _, time::Duration};
use anyhow::{Context as _, Result};
use integration_tests::{
TIME_TO_WAIT_FOR_BLOCK_SECONDS, TestContext, format_private_account_id,
format_public_account_id, verify_commitment_is_in_state,
TIME_TO_WAIT_FOR_BLOCK_SECONDS, TestContext, fetch_privacy_preserving_tx,
format_private_account_id, format_public_account_id, verify_commitment_is_in_state,
};
use key_protocol::key_management::key_tree::chain_index::ChainIndex;
use log::info;
@ -15,6 +21,93 @@ use wallet::cli::{
programs::native_token_transfer::AuthTransferSubcommand,
};
#[test]
async fn sync_private_account_with_non_zero_chain_index() -> Result<()> {
let mut ctx = TestContext::new().await?;
let from: AccountId = ctx.existing_private_accounts()[0];
// Create a new private account
let command = Command::Account(AccountSubcommand::New(NewSubcommand::Private {
cci: None,
label: None,
}));
for _ in 0..3 {
// Key Tree shift
// This way we have account with child index > 0.
let result = wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::Account(AccountSubcommand::New(NewSubcommand::Private {
cci: None,
label: None,
})),
)
.await?;
let SubcommandReturnValue::RegisterAccount { account_id: _ } = result else {
anyhow::bail!("Expected RegisterAccount return value");
};
}
let sub_ret = wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
let SubcommandReturnValue::RegisterAccount {
account_id: to_account_id,
} = sub_ret
else {
anyhow::bail!("Expected RegisterAccount return value");
};
// Get the keys for the newly created account
let (to_keys, _) = ctx
.wallet()
.storage()
.user_data
.get_private_account(to_account_id)
.cloned()
.context("Failed to get private account")?;
// Send to this account using claiming path (using npk and vpk instead of account ID)
let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
from: format_private_account_id(from),
to: None,
to_npk: Some(hex::encode(to_keys.nullifer_public_key.0)),
to_vpk: Some(hex::encode(to_keys.viewing_public_key.0)),
amount: 100,
});
let sub_ret = wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
let SubcommandReturnValue::PrivacyPreservingTransfer { tx_hash } = sub_ret else {
anyhow::bail!("Expected PrivacyPreservingTransfer return value");
};
let tx = fetch_privacy_preserving_tx(ctx.sequencer_client(), tx_hash).await;
// Sync the wallet to claim the new account
let command = Command::Account(AccountSubcommand::SyncPrivate {});
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
let new_commitment1 = ctx
.wallet()
.get_private_account_commitment(from)
.context("Failed to get private account commitment for sender")?;
assert_eq!(tx.message.new_commitments[0], new_commitment1);
assert_eq!(tx.message.new_commitments.len(), 2);
for commitment in tx.message.new_commitments {
assert!(verify_commitment_is_in_state(commitment, ctx.sequencer_client()).await);
}
let to_res_acc = ctx
.wallet()
.get_account_private(to_account_id)
.context("Failed to get recipient's private account")?;
assert_eq!(to_res_acc.balance, 100);
info!("Successfully transferred using claiming path");
Ok(())
}
#[test]
async fn restore_keys_from_seed() -> Result<()> {
let mut ctx = TestContext::new().await?;

View File

@ -1,3 +1,9 @@
#![expect(
clippy::shadow_unrelated,
clippy::tests_outside_test_module,
reason = "We don't care about these in tests"
)]
use std::time::Duration;
use anyhow::{Context as _, Result};
@ -16,6 +22,118 @@ use wallet::cli::{
},
};
#[test]
async fn claim_pinata_to_uninitialized_public_account_fails_fast() -> Result<()> {
let mut ctx = TestContext::new().await?;
let result = wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::Account(AccountSubcommand::New(NewSubcommand::Public {
cci: None,
label: None,
})),
)
.await?;
let SubcommandReturnValue::RegisterAccount {
account_id: winner_account_id,
} = result
else {
anyhow::bail!("Expected RegisterAccount return value");
};
let winner_account_id_formatted = format_public_account_id(winner_account_id);
let pinata_balance_pre = ctx
.sequencer_client()
.get_account_balance(PINATA_BASE58.parse().unwrap())
.await?
.balance;
let claim_result = wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::Pinata(PinataProgramAgnosticSubcommand::Claim {
to: winner_account_id_formatted,
}),
)
.await;
assert!(
claim_result.is_err(),
"Expected uninitialized account error"
);
let err = claim_result.unwrap_err().to_string();
assert!(
err.contains("wallet auth-transfer init --account-id Public/"),
"Expected init guidance, got: {err}",
);
let pinata_balance_post = ctx
.sequencer_client()
.get_account_balance(PINATA_BASE58.parse().unwrap())
.await?
.balance;
assert_eq!(pinata_balance_post, pinata_balance_pre);
Ok(())
}
#[test]
async fn claim_pinata_to_uninitialized_private_account_fails_fast() -> Result<()> {
let mut ctx = TestContext::new().await?;
let result = wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::Account(AccountSubcommand::New(NewSubcommand::Private {
cci: None,
label: None,
})),
)
.await?;
let SubcommandReturnValue::RegisterAccount {
account_id: winner_account_id,
} = result
else {
anyhow::bail!("Expected RegisterAccount return value");
};
let winner_account_id_formatted = format_private_account_id(winner_account_id);
let pinata_balance_pre = ctx
.sequencer_client()
.get_account_balance(PINATA_BASE58.parse().unwrap())
.await?
.balance;
let claim_result = wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::Pinata(PinataProgramAgnosticSubcommand::Claim {
to: winner_account_id_formatted,
}),
)
.await;
assert!(
claim_result.is_err(),
"Expected uninitialized account error"
);
let err = claim_result.unwrap_err().to_string();
assert!(
err.contains("wallet auth-transfer init --account-id Private/"),
"Expected init guidance, got: {err}",
);
let pinata_balance_post = ctx
.sequencer_client()
.get_account_balance(PINATA_BASE58.parse().unwrap())
.await?
.balance;
assert_eq!(pinata_balance_post, pinata_balance_pre);
Ok(())
}
#[test]
async fn claim_pinata_to_existing_public_account() -> Result<()> {
let mut ctx = TestContext::new().await?;

View File

@ -1,3 +1,8 @@
#![expect(
clippy::tests_outside_test_module,
reason = "We don't care about these in tests"
)]
use std::{path::PathBuf, time::Duration};
use anyhow::Result;

View File

@ -1,3 +1,9 @@
#![expect(
clippy::shadow_unrelated,
clippy::tests_outside_test_module,
reason = "We don't care about these in tests"
)]
use std::time::Duration;
use anyhow::{Context as _, Result};
@ -69,7 +75,7 @@ async fn create_and_transfer_public_token() -> Result<()> {
};
// Create new token
let name = "A NAME".to_string();
let name = "A NAME".to_owned();
let total_supply = 37;
let subcommand = TokenProgramAgnosticSubcommand::New {
definition_account_id: format_public_account_id(definition_account_id),
@ -317,7 +323,7 @@ async fn create_and_transfer_token_with_private_supply() -> Result<()> {
};
// Create new token
let name = "A NAME".to_string();
let name = "A NAME".to_owned();
let total_supply = 37;
let subcommand = TokenProgramAgnosticSubcommand::New {
definition_account_id: format_public_account_id(definition_account_id),
@ -475,7 +481,7 @@ async fn create_token_with_private_definition() -> Result<()> {
};
// Create token with private definition
let name = "A NAME".to_string();
let name = "A NAME".to_owned();
let total_supply = 37;
let subcommand = TokenProgramAgnosticSubcommand::New {
definition_account_id: format_private_account_id(definition_account_id),
@ -671,7 +677,7 @@ async fn create_token_with_private_definition_and_supply() -> Result<()> {
};
// Create token with both private definition and supply
let name = "A NAME".to_string();
let name = "A NAME".to_owned();
let total_supply = 37;
let subcommand = TokenProgramAgnosticSubcommand::New {
definition_account_id: format_private_account_id(definition_account_id),
@ -843,7 +849,7 @@ async fn shielded_token_transfer() -> Result<()> {
};
// Create token
let name = "A NAME".to_string();
let name = "A NAME".to_owned();
let total_supply = 37;
let subcommand = TokenProgramAgnosticSubcommand::New {
definition_account_id: format_public_account_id(definition_account_id),
@ -966,7 +972,7 @@ async fn deshielded_token_transfer() -> Result<()> {
};
// Create token with private supply
let name = "A NAME".to_string();
let name = "A NAME".to_owned();
let total_supply = 37;
let subcommand = TokenProgramAgnosticSubcommand::New {
definition_account_id: format_public_account_id(definition_account_id),
@ -1073,7 +1079,7 @@ async fn token_claiming_path_with_private_accounts() -> Result<()> {
};
// Create token
let name = "A NAME".to_string();
let name = "A NAME".to_owned();
let total_supply = 37;
let subcommand = TokenProgramAgnosticSubcommand::New {
definition_account_id: format_private_account_id(definition_account_id),

Some files were not shown because too many files have changed in this diff Show More