Merge branch 'main' into marvin/refactor-keys-to-wallet

This commit is contained in:
jonesmarvin8 2026-04-08 10:08:15 -04:00
commit d67269e666
125 changed files with 5211 additions and 1854 deletions

View File

@ -50,7 +50,7 @@ jobs:
type=ref,event=pr
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=sha,prefix={{branch}}-
type=sha,prefix=sha-
type=raw,value=latest,enable={{is_default_branch}}
- name: Build and push Docker image

14
Cargo.lock generated
View File

@ -1462,6 +1462,14 @@ version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3a822ea5bc7590f9d40f1ba12c0dc3c2760f3482c6984db1573ad11031420831"
[[package]]
name = "clock_core"
version = "0.1.0"
dependencies = [
"borsh",
"nssa_core",
]
[[package]]
name = "cobs"
version = "0.3.0"
@ -1511,6 +1519,7 @@ dependencies = [
"anyhow",
"base64 0.22.1",
"borsh",
"clock_core",
"hex",
"log",
"logos-blockchain-common-http-client",
@ -5259,6 +5268,7 @@ version = "0.1.0"
dependencies = [
"anyhow",
"borsh",
"clock_core",
"env_logger",
"hex",
"hex-literal 1.1.0",
@ -5897,6 +5907,7 @@ dependencies = [
"amm_program",
"ata_core",
"ata_program",
"clock_core",
"nssa_core",
"risc0-zkvm",
"serde",
@ -7151,6 +7162,7 @@ dependencies = [
"serde_json",
"storage",
"tempfile",
"test_program_methods",
"testnet_initial_state",
"tokio",
"url",
@ -7831,8 +7843,10 @@ dependencies = [
name = "test_programs"
version = "0.1.0"
dependencies = [
"clock_core",
"nssa_core",
"risc0-zkvm",
"serde",
]
[[package]]

View File

@ -15,6 +15,7 @@ members = [
"nssa/core",
"programs/amm/core",
"programs/amm",
"programs/clock/core",
"programs/token/core",
"programs/token",
"programs/associated_token_account/core",
@ -56,6 +57,7 @@ indexer_service_protocol = { path = "indexer/service/protocol" }
indexer_service_rpc = { path = "indexer/service/rpc" }
wallet = { path = "wallet" }
wallet-ffi = { path = "wallet-ffi", default-features = false }
clock_core = { path = "programs/clock/core" }
token_core = { path = "programs/token/core" }
token_program = { path = "programs/token" }
amm_core = { path = "programs/amm/core" }

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -10,6 +10,7 @@ workspace = true
[dependencies]
nssa.workspace = true
nssa_core.workspace = true
clock_core.workspace = true
anyhow.workspace = true
thiserror.workspace = true

View File

@ -1,10 +1,10 @@
use borsh::{BorshDeserialize, BorshSerialize};
use nssa_core::{BlockId, Timestamp};
use nssa_core::BlockId;
pub use nssa_core::Timestamp;
use serde::{Deserialize, Serialize};
use sha2::{Digest as _, Sha256, digest::FixedOutput as _};
use crate::{HashType, transaction::NSSATransaction};
pub type MantleMsgId = [u8; 32];
pub type BlockHash = HashType;

View File

@ -1,6 +1,6 @@
use borsh::{BorshDeserialize, BorshSerialize};
use log::warn;
use nssa::{AccountId, V03State};
use nssa::{AccountId, V03State, ValidatedStateDiff};
use nssa_core::{BlockId, Timestamp};
use serde::{Deserialize, Serialize};
@ -66,21 +66,53 @@ impl NSSATransaction {
}
}
/// Validates the transaction against the current state and returns the resulting diff
/// without applying it. Rejects transactions that modify clock system accounts.
pub fn validate_on_state(
&self,
state: &V03State,
block_id: BlockId,
timestamp: Timestamp,
) -> Result<ValidatedStateDiff, nssa::error::NssaError> {
let diff = match self {
Self::Public(tx) => {
ValidatedStateDiff::from_public_transaction(tx, state, block_id, timestamp)
}
Self::PrivacyPreserving(tx) => ValidatedStateDiff::from_privacy_preserving_transaction(
tx, state, block_id, timestamp,
),
Self::ProgramDeployment(tx) => {
ValidatedStateDiff::from_program_deployment_transaction(tx, state)
}
}?;
let public_diff = diff.public_diff();
let touches_clock = nssa::CLOCK_PROGRAM_ACCOUNT_IDS.iter().any(|id| {
public_diff
.get(id)
.is_some_and(|post| *post != state.get_account_by_id(*id))
});
if touches_clock {
return Err(nssa::error::NssaError::InvalidInput(
"Transaction modifies system clock accounts".into(),
));
}
Ok(diff)
}
/// Validates the transaction against the current state, rejects modifications to clock
/// system accounts, and applies the resulting diff to the state.
pub fn execute_check_on_state(
self,
state: &mut V03State,
block_id: BlockId,
timestamp: Timestamp,
) -> Result<Self, nssa::error::NssaError> {
match &self {
Self::Public(tx) => state.transition_from_public_transaction(tx, block_id, timestamp),
Self::PrivacyPreserving(tx) => {
state.transition_from_privacy_preserving_transaction(tx, block_id, timestamp)
}
Self::ProgramDeployment(tx) => state.transition_from_program_deployment_transaction(tx),
}
.inspect_err(|err| warn!("Error at transition {err:#?}"))?;
let diff = self
.validate_on_state(state, block_id, timestamp)
.inspect_err(|err| warn!("Error at transition {err:#?}"))?;
state.apply_state_diff(diff);
Ok(self)
}
}
@ -121,3 +153,20 @@ pub enum TransactionMalformationError {
#[error("Transaction size {size} exceeds maximum allowed size of {max} bytes")]
TransactionTooLarge { size: usize, max: usize },
}
/// Returns the canonical Clock Program invocation transaction for the given block timestamp.
/// Every valid block must end with exactly one occurrence of this transaction.
#[must_use]
pub fn clock_invocation(timestamp: clock_core::Instruction) -> nssa::PublicTransaction {
let message = nssa::public_transaction::Message::try_new(
nssa::program::Program::clock().id(),
clock_core::CLOCK_PROGRAM_ACCOUNT_IDS.to_vec(),
vec![],
timestamp,
)
.expect("Clock invocation message should always be constructable");
nssa::PublicTransaction::new(
message,
nssa::public_transaction::WitnessSet::from_raw_parts(vec![]),
)
}

View File

@ -22,6 +22,20 @@ _wallet_complete_account_id() {
fi
}
# Helper function to complete account labels
_wallet_complete_account_label() {
local cur="$1"
local labels
if command -v wallet &>/dev/null; then
labels=$(wallet account list 2>/dev/null | grep -o '\[.*\]' | sed 's/^\[//;s/\]$//')
fi
if [[ -n "$labels" ]]; then
COMPREPLY=($(compgen -W "$labels" -- "$cur"))
fi
}
_wallet() {
local cur prev words cword
_init_completion 2>/dev/null || {
@ -91,20 +105,32 @@ _wallet() {
--account-id)
_wallet_complete_account_id "$cur"
;;
--account-label)
_wallet_complete_account_label "$cur"
;;
*)
COMPREPLY=($(compgen -W "--account-id" -- "$cur"))
COMPREPLY=($(compgen -W "--account-id --account-label" -- "$cur"))
;;
esac
;;
send)
case "$prev" in
--from | --to)
--from)
_wallet_complete_account_id "$cur"
;;
--from-label)
_wallet_complete_account_label "$cur"
;;
--to)
_wallet_complete_account_id "$cur"
;;
--to-label)
_wallet_complete_account_label "$cur"
;;
--to-npk | --to-vpk | --amount)
;; # no specific completion
*)
COMPREPLY=($(compgen -W "--from --to --to-npk --to-vpk --amount" -- "$cur"))
COMPREPLY=($(compgen -W "--from --from-label --to --to-label --to-npk --to-vpk --amount" -- "$cur"))
;;
esac
;;
@ -147,8 +173,11 @@ _wallet() {
-a | --account-id)
_wallet_complete_account_id "$cur"
;;
--account-label)
_wallet_complete_account_label "$cur"
;;
*)
COMPREPLY=($(compgen -W "-r --raw -k --keys -a --account-id" -- "$cur"))
COMPREPLY=($(compgen -W "-r --raw -k --keys -a --account-id --account-label" -- "$cur"))
;;
esac
;;
@ -186,10 +215,13 @@ _wallet() {
-a | --account-id)
_wallet_complete_account_id "$cur"
;;
--account-label)
_wallet_complete_account_label "$cur"
;;
-l | --label)
;; # no specific completion for label value
*)
COMPREPLY=($(compgen -W "-a --account-id -l --label" -- "$cur"))
COMPREPLY=($(compgen -W "-a --account-id --account-label -l --label" -- "$cur"))
;;
esac
;;
@ -206,8 +238,11 @@ _wallet() {
--to)
_wallet_complete_account_id "$cur"
;;
--to-label)
_wallet_complete_account_label "$cur"
;;
*)
COMPREPLY=($(compgen -W "--to" -- "$cur"))
COMPREPLY=($(compgen -W "--to --to-label" -- "$cur"))
;;
esac
;;
@ -221,49 +256,85 @@ _wallet() {
;;
new)
case "$prev" in
--definition-account-id | --supply-account-id)
--definition-account-id)
_wallet_complete_account_id "$cur"
;;
--definition-account-label)
_wallet_complete_account_label "$cur"
;;
--supply-account-id)
_wallet_complete_account_id "$cur"
;;
--supply-account-label)
_wallet_complete_account_label "$cur"
;;
-n | --name | -t | --total-supply)
;; # no specific completion
*)
COMPREPLY=($(compgen -W "--definition-account-id --supply-account-id -n --name -t --total-supply" -- "$cur"))
COMPREPLY=($(compgen -W "--definition-account-id --definition-account-label --supply-account-id --supply-account-label -n --name -t --total-supply" -- "$cur"))
;;
esac
;;
send)
case "$prev" in
--from | --to)
--from)
_wallet_complete_account_id "$cur"
;;
--from-label)
_wallet_complete_account_label "$cur"
;;
--to)
_wallet_complete_account_id "$cur"
;;
--to-label)
_wallet_complete_account_label "$cur"
;;
--to-npk | --to-vpk | --amount)
;; # no specific completion
*)
COMPREPLY=($(compgen -W "--from --to --to-npk --to-vpk --amount" -- "$cur"))
COMPREPLY=($(compgen -W "--from --from-label --to --to-label --to-npk --to-vpk --amount" -- "$cur"))
;;
esac
;;
burn)
case "$prev" in
--definition | --holder)
--definition)
_wallet_complete_account_id "$cur"
;;
--definition-label)
_wallet_complete_account_label "$cur"
;;
--holder)
_wallet_complete_account_id "$cur"
;;
--holder-label)
_wallet_complete_account_label "$cur"
;;
--amount)
;; # no specific completion
*)
COMPREPLY=($(compgen -W "--definition --holder --amount" -- "$cur"))
COMPREPLY=($(compgen -W "--definition --definition-label --holder --holder-label --amount" -- "$cur"))
;;
esac
;;
mint)
case "$prev" in
--definition | --holder)
--definition)
_wallet_complete_account_id "$cur"
;;
--definition-label)
_wallet_complete_account_label "$cur"
;;
--holder)
_wallet_complete_account_id "$cur"
;;
--holder-label)
_wallet_complete_account_label "$cur"
;;
--holder-npk | --holder-vpk | --amount)
;; # no specific completion
*)
COMPREPLY=($(compgen -W "--definition --holder --holder-npk --holder-vpk --amount" -- "$cur"))
COMPREPLY=($(compgen -W "--definition --definition-label --holder --holder-label --holder-npk --holder-vpk --amount" -- "$cur"))
;;
esac
;;
@ -277,49 +348,103 @@ _wallet() {
;;
new)
case "$prev" in
--user-holding-a | --user-holding-b | --user-holding-lp)
--user-holding-a)
_wallet_complete_account_id "$cur"
;;
--user-holding-a-label)
_wallet_complete_account_label "$cur"
;;
--user-holding-b)
_wallet_complete_account_id "$cur"
;;
--user-holding-b-label)
_wallet_complete_account_label "$cur"
;;
--user-holding-lp)
_wallet_complete_account_id "$cur"
;;
--user-holding-lp-label)
_wallet_complete_account_label "$cur"
;;
--balance-a | --balance-b)
;; # no specific completion
*)
COMPREPLY=($(compgen -W "--user-holding-a --user-holding-b --user-holding-lp --balance-a --balance-b" -- "$cur"))
COMPREPLY=($(compgen -W "--user-holding-a --user-holding-a-label --user-holding-b --user-holding-b-label --user-holding-lp --user-holding-lp-label --balance-a --balance-b" -- "$cur"))
;;
esac
;;
swap)
case "$prev" in
--user-holding-a | --user-holding-b)
--user-holding-a)
_wallet_complete_account_id "$cur"
;;
--user-holding-a-label)
_wallet_complete_account_label "$cur"
;;
--user-holding-b)
_wallet_complete_account_id "$cur"
;;
--user-holding-b-label)
_wallet_complete_account_label "$cur"
;;
--amount-in | --min-amount-out | --token-definition)
;; # no specific completion
*)
COMPREPLY=($(compgen -W "--user-holding-a --user-holding-b --amount-in --min-amount-out --token-definition" -- "$cur"))
COMPREPLY=($(compgen -W "--user-holding-a --user-holding-a-label --user-holding-b --user-holding-b-label --amount-in --min-amount-out --token-definition" -- "$cur"))
;;
esac
;;
add-liquidity)
case "$prev" in
--user-holding-a | --user-holding-b | --user-holding-lp)
--user-holding-a)
_wallet_complete_account_id "$cur"
;;
--user-holding-a-label)
_wallet_complete_account_label "$cur"
;;
--user-holding-b)
_wallet_complete_account_id "$cur"
;;
--user-holding-b-label)
_wallet_complete_account_label "$cur"
;;
--user-holding-lp)
_wallet_complete_account_id "$cur"
;;
--user-holding-lp-label)
_wallet_complete_account_label "$cur"
;;
--max-amount-a | --max-amount-b | --min-amount-lp)
;; # no specific completion
*)
COMPREPLY=($(compgen -W "--user-holding-a --user-holding-b --user-holding-lp --max-amount-a --max-amount-b --min-amount-lp" -- "$cur"))
COMPREPLY=($(compgen -W "--user-holding-a --user-holding-a-label --user-holding-b --user-holding-b-label --user-holding-lp --user-holding-lp-label --max-amount-a --max-amount-b --min-amount-lp" -- "$cur"))
;;
esac
;;
remove-liquidity)
case "$prev" in
--user-holding-a | --user-holding-b | --user-holding-lp)
--user-holding-a)
_wallet_complete_account_id "$cur"
;;
--user-holding-a-label)
_wallet_complete_account_label "$cur"
;;
--user-holding-b)
_wallet_complete_account_id "$cur"
;;
--user-holding-b-label)
_wallet_complete_account_label "$cur"
;;
--user-holding-lp)
_wallet_complete_account_id "$cur"
;;
--user-holding-lp-label)
_wallet_complete_account_label "$cur"
;;
--balance-lp | --min-amount-a | --min-amount-b)
;; # no specific completion
*)
COMPREPLY=($(compgen -W "--user-holding-a --user-holding-b --user-holding-lp --balance-lp --min-amount-a --min-amount-b" -- "$cur"))
COMPREPLY=($(compgen -W "--user-holding-a --user-holding-a-label --user-holding-b --user-holding-b-label --user-holding-lp --user-holding-lp-label --balance-lp --min-amount-a --min-amount-b" -- "$cur"))
;;
esac
;;

View File

@ -90,12 +90,15 @@ _wallet_auth_transfer() {
case $line[1] in
init)
_arguments \
'--account-id[Account ID to initialize]:account_id:_wallet_account_ids'
'--account-id[Account ID to initialize]:account_id:_wallet_account_ids' \
'--account-label[Account label (alternative to --account-id)]:label:_wallet_account_labels'
;;
send)
_arguments \
'--from[Source account ID]:from_account:_wallet_account_ids' \
'--from-label[Source account label (alternative to --from)]:label:_wallet_account_labels' \
'--to[Destination account ID (for owned accounts)]:to_account:_wallet_account_ids' \
'--to-label[Destination account label (alternative to --to)]:label:_wallet_account_labels' \
'--to-npk[Destination nullifier public key (for foreign private accounts)]:npk:' \
'--to-vpk[Destination viewing public key (for foreign private accounts)]:vpk:' \
'--amount[Amount of native tokens to send]:amount:'
@ -165,7 +168,8 @@ _wallet_account() {
_arguments \
'(-r --raw)'{-r,--raw}'[Get raw account data]' \
'(-k --keys)'{-k,--keys}'[Display keys (pk for public accounts, npk/vpk for private accounts)]' \
'(-a --account-id)'{-a,--account-id}'[Account ID to query]:account_id:_wallet_account_ids'
'(-a --account-id)'{-a,--account-id}'[Account ID to query]:account_id:_wallet_account_ids' \
'--account-label[Account label (alternative to --account-id)]:label:_wallet_account_labels'
;;
list|ls)
_arguments \
@ -189,6 +193,7 @@ _wallet_account() {
label)
_arguments \
'(-a --account-id)'{-a,--account-id}'[Account ID to label]:account_id:_wallet_account_ids' \
'--account-label[Account label (alternative to --account-id)]:label:_wallet_account_labels' \
'(-l --label)'{-l,--label}'[The label to assign to the account]:label:'
;;
esac
@ -216,7 +221,8 @@ _wallet_pinata() {
case $line[1] in
claim)
_arguments \
'--to[Destination account ID to receive claimed tokens]:to_account:_wallet_account_ids'
'--to[Destination account ID to receive claimed tokens]:to_account:_wallet_account_ids' \
'--to-label[Destination account label (alternative to --to)]:label:_wallet_account_labels'
;;
esac
;;
@ -249,12 +255,16 @@ _wallet_token() {
'--name[Token name]:name:' \
'--total-supply[Total supply of tokens to mint]:total_supply:' \
'--definition-account-id[Account ID for token definition]:definition_account:_wallet_account_ids' \
'--supply-account-id[Account ID to receive initial supply]:supply_account:_wallet_account_ids'
'--definition-account-label[Definition account label (alternative to --definition-account-id)]:label:_wallet_account_labels' \
'--supply-account-id[Account ID to receive initial supply]:supply_account:_wallet_account_ids' \
'--supply-account-label[Supply account label (alternative to --supply-account-id)]:label:_wallet_account_labels'
;;
send)
_arguments \
'--from[Source holding account ID]:from_account:_wallet_account_ids' \
'--from-label[Source account label (alternative to --from)]:label:_wallet_account_labels' \
'--to[Destination holding account ID (for owned accounts)]:to_account:_wallet_account_ids' \
'--to-label[Destination account label (alternative to --to)]:label:_wallet_account_labels' \
'--to-npk[Destination nullifier public key (for foreign private accounts)]:npk:' \
'--to-vpk[Destination viewing public key (for foreign private accounts)]:vpk:' \
'--amount[Amount of tokens to send]:amount:'
@ -262,13 +272,17 @@ _wallet_token() {
burn)
_arguments \
'--definition[Definition account ID]:definition_account:_wallet_account_ids' \
'--definition-label[Definition account label (alternative to --definition)]:label:_wallet_account_labels' \
'--holder[Holder account ID]:holder_account:_wallet_account_ids' \
'--holder-label[Holder account label (alternative to --holder)]:label:_wallet_account_labels' \
'--amount[Amount of tokens to burn]:amount:'
;;
mint)
_arguments \
'--definition[Definition account ID]:definition_account:_wallet_account_ids' \
'--definition-label[Definition account label (alternative to --definition)]:label:_wallet_account_labels' \
'--holder[Holder account ID (for owned accounts)]:holder_account:_wallet_account_ids' \
'--holder-label[Holder account label (alternative to --holder)]:label:_wallet_account_labels' \
'--holder-npk[Holder nullifier public key (for foreign private accounts)]:npk:' \
'--holder-vpk[Holder viewing public key (for foreign private accounts)]:vpk:' \
'--amount[Amount of tokens to mint]:amount:'
@ -302,15 +316,20 @@ _wallet_amm() {
new)
_arguments \
'--user-holding-a[User token A holding account ID]:holding_a:_wallet_account_ids' \
'--user-holding-a-label[User holding A account label (alternative to --user-holding-a)]:label:_wallet_account_labels' \
'--user-holding-b[User token B holding account ID]:holding_b:_wallet_account_ids' \
'--user-holding-b-label[User holding B account label (alternative to --user-holding-b)]:label:_wallet_account_labels' \
'--user-holding-lp[User LP token holding account ID]:holding_lp:_wallet_account_ids' \
'--user-holding-lp-label[User holding LP account label (alternative to --user-holding-lp)]:label:_wallet_account_labels' \
'--balance-a[Amount of token A to deposit]:balance_a:' \
'--balance-b[Amount of token B to deposit]:balance_b:'
;;
swap)
_arguments \
'--user-holding-a[User token A holding account ID]:holding_a:_wallet_account_ids' \
'--user-holding-a-label[User holding A account label (alternative to --user-holding-a)]:label:_wallet_account_labels' \
'--user-holding-b[User token B holding account ID]:holding_b:_wallet_account_ids' \
'--user-holding-b-label[User holding B account label (alternative to --user-holding-b)]:label:_wallet_account_labels' \
'--amount-in[Amount of tokens to swap]:amount_in:' \
'--min-amount-out[Minimum tokens expected in return]:min_amount_out:' \
'--token-definition[Definition ID of the token being provided]:token_def:'
@ -318,8 +337,11 @@ _wallet_amm() {
add-liquidity)
_arguments \
'--user-holding-a[User token A holding account ID]:holding_a:_wallet_account_ids' \
'--user-holding-a-label[User holding A account label (alternative to --user-holding-a)]:label:_wallet_account_labels' \
'--user-holding-b[User token B holding account ID]:holding_b:_wallet_account_ids' \
'--user-holding-b-label[User holding B account label (alternative to --user-holding-b)]:label:_wallet_account_labels' \
'--user-holding-lp[User LP token holding account ID]:holding_lp:_wallet_account_ids' \
'--user-holding-lp-label[User holding LP account label (alternative to --user-holding-lp)]:label:_wallet_account_labels' \
'--max-amount-a[Maximum amount of token A to deposit]:max_amount_a:' \
'--max-amount-b[Maximum amount of token B to deposit]:max_amount_b:' \
'--min-amount-lp[Minimum LP tokens to receive]:min_amount_lp:'
@ -327,8 +349,11 @@ _wallet_amm() {
remove-liquidity)
_arguments \
'--user-holding-a[User token A holding account ID]:holding_a:_wallet_account_ids' \
'--user-holding-a-label[User holding A account label (alternative to --user-holding-a)]:label:_wallet_account_labels' \
'--user-holding-b[User token B holding account ID]:holding_b:_wallet_account_ids' \
'--user-holding-b-label[User holding B account label (alternative to --user-holding-b)]:label:_wallet_account_labels' \
'--user-holding-lp[User LP token holding account ID]:holding_lp:_wallet_account_ids' \
'--user-holding-lp-label[User holding LP account label (alternative to --user-holding-lp)]:label:_wallet_account_labels' \
'--balance-lp[Amount of LP tokens to burn]:balance_lp:' \
'--min-amount-a[Minimum token A to receive]:min_amount_a:' \
'--min-amount-b[Minimum token B to receive]:min_amount_b:'
@ -424,7 +449,7 @@ _wallet_help() {
_wallet_account_ids() {
local -a accounts
local line
# Try to get accounts from wallet account list command
# Filter to lines starting with /N (numbered accounts) and extract the account ID
if command -v wallet &>/dev/null; then
@ -433,14 +458,35 @@ _wallet_account_ids() {
[[ -n "$line" ]] && accounts+=("${line%,}")
done < <(wallet account list 2>/dev/null | grep '^/[0-9]' | awk '{print $2}')
fi
# Provide type prefixes as fallback if command fails or returns nothing
if (( ${#accounts} == 0 )); then
compadd -S '' -- 'Public/' 'Private/'
return
fi
_multi_parts / accounts
}
# Helper function to complete account labels
# Uses `wallet account list` to get available labels
_wallet_account_labels() {
local -a labels
local line
if command -v wallet &>/dev/null; then
while IFS= read -r line; do
local label
# Extract label from [...] at end of line
label="${line##*\[}"
label="${label%\]}"
[[ -n "$label" && "$label" != "$line" ]] && labels+=("$label")
done < <(wallet account list 2>/dev/null)
fi
if (( ${#labels} > 0 )); then
compadd -a labels
fi
}
_wallet "$@"

View File

@ -20,6 +20,7 @@ fn main() {
let (
ProgramInput {
self_program_id,
caller_program_id,
pre_states,
instruction: greeting,
},
@ -53,6 +54,7 @@ fn main() {
// called to commit the output.
ProgramOutput::new(
self_program_id,
caller_program_id,
instruction_data,
vec![pre_state],
vec![post_state],

View File

@ -20,6 +20,7 @@ fn main() {
let (
ProgramInput {
self_program_id,
caller_program_id,
pre_states,
instruction: greeting,
},
@ -60,6 +61,7 @@ fn main() {
// called to commit the output.
ProgramOutput::new(
self_program_id,
caller_program_id,
instruction_data,
vec![pre_state],
vec![post_state],

View File

@ -67,6 +67,7 @@ fn main() {
let (
ProgramInput {
self_program_id,
caller_program_id,
pre_states,
instruction: (function_id, data),
},
@ -86,5 +87,12 @@ fn main() {
// WARNING: constructing a `ProgramOutput` has no effect on its own. `.write()` must be
// called to commit the output.
ProgramOutput::new(self_program_id, instruction_words, pre_states, post_states).write();
ProgramOutput::new(
self_program_id,
caller_program_id,
instruction_words,
pre_states,
post_states,
)
.write();
}

View File

@ -28,6 +28,7 @@ fn main() {
let (
ProgramInput {
self_program_id,
caller_program_id,
pre_states,
instruction: (),
},
@ -58,6 +59,7 @@ fn main() {
// called to commit the output.
ProgramOutput::new(
self_program_id,
caller_program_id,
instruction_data,
vec![pre_state],
vec![post_state],

View File

@ -34,6 +34,7 @@ fn main() {
let (
ProgramInput {
self_program_id,
caller_program_id,
pre_states,
instruction: (),
},
@ -71,6 +72,7 @@ fn main() {
// called to commit the output.
ProgramOutput::new(
self_program_id,
caller_program_id,
instruction_data,
vec![pre_state],
vec![post_state],

View File

@ -4,7 +4,7 @@ use anyhow::Result;
use bedrock_client::HeaderId;
use common::{
block::{BedrockStatus, Block},
transaction::NSSATransaction,
transaction::{NSSATransaction, clock_invocation},
};
use nssa::{Account, AccountId, V03State};
use nssa_core::BlockId;
@ -122,7 +122,18 @@ impl IndexerStore {
{
let mut state_guard = self.current_state.write().await;
for transaction in &block.body.transactions {
let (clock_tx, user_txs) = block
.body
.transactions
.split_last()
.ok_or_else(|| anyhow::anyhow!("Block has no transactions"))?;
anyhow::ensure!(
*clock_tx == NSSATransaction::Public(clock_invocation(block.header.timestamp)),
"Last transaction in block must be the clock invocation for the block timestamp"
);
for transaction in user_txs {
transaction
.clone()
.transaction_stateless_check()?
@ -132,6 +143,16 @@ impl IndexerStore {
block.header.timestamp,
)?;
}
// Apply the clock invocation directly (it is expected to modify clock accounts).
let NSSATransaction::Public(clock_public_tx) = clock_tx else {
anyhow::bail!("Clock invocation must be a public transaction");
};
state_guard.transition_from_public_transaction(
clock_public_tx,
block.header.block_id,
block.header.timestamp,
)?;
}
// ToDo: Currently we are fetching only finalized blocks
@ -177,7 +198,7 @@ mod tests {
let storage = IndexerStore::open_db_with_genesis(
home.as_ref(),
&genesis_block(),
&nssa::V03State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[]),
&nssa::V03State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[], 0),
)
.unwrap();
@ -195,7 +216,7 @@ mod tests {
let storage = IndexerStore::open_db_with_genesis(
home.as_ref(),
&genesis_block(),
&nssa::V03State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[]),
&nssa::V03State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[], 0),
)
.unwrap();
@ -213,11 +234,14 @@ mod tests {
10,
&sign_key,
);
let block_id = u64::try_from(i).unwrap();
let block_timestamp = block_id.saturating_mul(100);
let clock_tx = NSSATransaction::Public(clock_invocation(block_timestamp));
let next_block = common::test_utils::produce_dummy_block(
u64::try_from(i).unwrap(),
block_id,
Some(prev_hash),
vec![tx],
vec![tx, clock_tx],
);
prev_hash = next_block.header.hash;

View File

@ -92,6 +92,7 @@ impl IndexerCore {
let mut state = V03State::new_with_genesis_accounts(
&init_accs.unwrap_or_default(),
&initial_commitments.unwrap_or_default(),
genesis_block.header.timestamp,
);
// ToDo: Remove after testnet

View File

@ -138,7 +138,7 @@ pub struct Account {
}
pub type BlockId = u64;
pub type TimeStamp = u64;
pub type Timestamp = u64;
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, JsonSchema)]
pub struct Block {
@ -153,7 +153,7 @@ pub struct BlockHeader {
pub block_id: BlockId,
pub prev_block_hash: HashType,
pub hash: HashType,
pub timestamp: TimeStamp,
pub timestamp: Timestamp,
pub signature: Signature,
}

View File

@ -113,9 +113,12 @@ async fn amm_public() -> Result<()> {
// Create new token
let subcommand = TokenProgramAgnosticSubcommand::New {
definition_account_id: format_public_account_id(definition_account_id_1),
supply_account_id: format_public_account_id(supply_account_id_1),
definition_account_id: Some(format_public_account_id(definition_account_id_1)),
definition_account_label: None,
supply_account_id: Some(format_public_account_id(supply_account_id_1)),
supply_account_label: None,
name: "A NAM1".to_owned(),
total_supply: 37,
};
wallet::cli::execute_subcommand(ctx.wallet_mut(), Command::Token(subcommand)).await?;
@ -124,8 +127,10 @@ async fn amm_public() -> Result<()> {
// Transfer 7 tokens from `supply_acc` to the account at account_id `recipient_account_id_1`
let subcommand = TokenProgramAgnosticSubcommand::Send {
from: format_public_account_id(supply_account_id_1),
from: Some(format_public_account_id(supply_account_id_1)),
from_label: None,
to: Some(format_public_account_id(recipient_account_id_1)),
to_label: None,
to_npk: None,
to_vpk: None,
amount: 7,
@ -137,9 +142,12 @@ async fn amm_public() -> Result<()> {
// Create new token
let subcommand = TokenProgramAgnosticSubcommand::New {
definition_account_id: format_public_account_id(definition_account_id_2),
supply_account_id: format_public_account_id(supply_account_id_2),
definition_account_id: Some(format_public_account_id(definition_account_id_2)),
definition_account_label: None,
supply_account_id: Some(format_public_account_id(supply_account_id_2)),
supply_account_label: None,
name: "A NAM2".to_owned(),
total_supply: 37,
};
wallet::cli::execute_subcommand(ctx.wallet_mut(), Command::Token(subcommand)).await?;
@ -148,8 +156,10 @@ async fn amm_public() -> Result<()> {
// Transfer 7 tokens from `supply_acc` to the account at account_id `recipient_account_id_2`
let subcommand = TokenProgramAgnosticSubcommand::Send {
from: format_public_account_id(supply_account_id_2),
from: Some(format_public_account_id(supply_account_id_2)),
from_label: None,
to: Some(format_public_account_id(recipient_account_id_2)),
to_label: None,
to_npk: None,
to_vpk: None,
amount: 7,
@ -181,9 +191,12 @@ async fn amm_public() -> Result<()> {
// Send creation tx
let subcommand = AmmProgramAgnosticSubcommand::New {
user_holding_a: format_public_account_id(recipient_account_id_1),
user_holding_b: format_public_account_id(recipient_account_id_2),
user_holding_lp: format_public_account_id(user_holding_lp),
user_holding_a: Some(format_public_account_id(recipient_account_id_1)),
user_holding_a_label: None,
user_holding_b: Some(format_public_account_id(recipient_account_id_2)),
user_holding_b_label: None,
user_holding_lp: Some(format_public_account_id(user_holding_lp)),
user_holding_lp_label: None,
balance_a: 3,
balance_b: 3,
};
@ -224,8 +237,10 @@ async fn amm_public() -> Result<()> {
// Make swap
let subcommand = AmmProgramAgnosticSubcommand::SwapExactInput {
user_holding_a: format_public_account_id(recipient_account_id_1),
user_holding_b: format_public_account_id(recipient_account_id_2),
user_holding_a: Some(format_public_account_id(recipient_account_id_1)),
user_holding_a_label: None,
user_holding_b: Some(format_public_account_id(recipient_account_id_2)),
user_holding_b_label: None,
amount_in: 2,
min_amount_out: 1,
token_definition: definition_account_id_1.to_string(),
@ -267,8 +282,10 @@ async fn amm_public() -> Result<()> {
// Make swap
let subcommand = AmmProgramAgnosticSubcommand::SwapExactInput {
user_holding_a: format_public_account_id(recipient_account_id_1),
user_holding_b: format_public_account_id(recipient_account_id_2),
user_holding_a: Some(format_public_account_id(recipient_account_id_1)),
user_holding_a_label: None,
user_holding_b: Some(format_public_account_id(recipient_account_id_2)),
user_holding_b_label: None,
amount_in: 2,
min_amount_out: 1,
token_definition: definition_account_id_2.to_string(),
@ -310,9 +327,12 @@ async fn amm_public() -> Result<()> {
// Add liquidity
let subcommand = AmmProgramAgnosticSubcommand::AddLiquidity {
user_holding_a: format_public_account_id(recipient_account_id_1),
user_holding_b: format_public_account_id(recipient_account_id_2),
user_holding_lp: format_public_account_id(user_holding_lp),
user_holding_a: Some(format_public_account_id(recipient_account_id_1)),
user_holding_a_label: None,
user_holding_b: Some(format_public_account_id(recipient_account_id_2)),
user_holding_b_label: None,
user_holding_lp: Some(format_public_account_id(user_holding_lp)),
user_holding_lp_label: None,
min_amount_lp: 1,
max_amount_a: 2,
max_amount_b: 2,
@ -354,9 +374,12 @@ async fn amm_public() -> Result<()> {
// Remove liquidity
let subcommand = AmmProgramAgnosticSubcommand::RemoveLiquidity {
user_holding_a: format_public_account_id(recipient_account_id_1),
user_holding_b: format_public_account_id(recipient_account_id_2),
user_holding_lp: format_public_account_id(user_holding_lp),
user_holding_a: Some(format_public_account_id(recipient_account_id_1)),
user_holding_a_label: None,
user_holding_b: Some(format_public_account_id(recipient_account_id_2)),
user_holding_b_label: None,
user_holding_lp: Some(format_public_account_id(user_holding_lp)),
user_holding_lp_label: None,
balance_lp: 2,
min_amount_a: 1,
min_amount_b: 1,
@ -397,3 +420,188 @@ async fn amm_public() -> Result<()> {
Ok(())
}
#[test]
async fn amm_new_pool_using_labels() -> Result<()> {
let mut ctx = TestContext::new().await?;
// Create token 1 accounts
let SubcommandReturnValue::RegisterAccount {
account_id: definition_account_id_1,
} = wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::Account(AccountSubcommand::New(NewSubcommand::Public {
cci: None,
label: None,
})),
)
.await?
else {
anyhow::bail!("Expected RegisterAccount return value");
};
let SubcommandReturnValue::RegisterAccount {
account_id: supply_account_id_1,
} = wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::Account(AccountSubcommand::New(NewSubcommand::Public {
cci: None,
label: None,
})),
)
.await?
else {
anyhow::bail!("Expected RegisterAccount return value");
};
// Create holding_a with a label
let holding_a_label = "amm-holding-a-label".to_owned();
let SubcommandReturnValue::RegisterAccount {
account_id: holding_a_id,
} = wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::Account(AccountSubcommand::New(NewSubcommand::Public {
cci: None,
label: Some(holding_a_label.clone()),
})),
)
.await?
else {
anyhow::bail!("Expected RegisterAccount return value");
};
// Create token 2 accounts
let SubcommandReturnValue::RegisterAccount {
account_id: definition_account_id_2,
} = wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::Account(AccountSubcommand::New(NewSubcommand::Public {
cci: None,
label: None,
})),
)
.await?
else {
anyhow::bail!("Expected RegisterAccount return value");
};
let SubcommandReturnValue::RegisterAccount {
account_id: supply_account_id_2,
} = wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::Account(AccountSubcommand::New(NewSubcommand::Public {
cci: None,
label: None,
})),
)
.await?
else {
anyhow::bail!("Expected RegisterAccount return value");
};
// Create holding_b with a label
let holding_b_label = "amm-holding-b-label".to_owned();
let SubcommandReturnValue::RegisterAccount {
account_id: holding_b_id,
} = wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::Account(AccountSubcommand::New(NewSubcommand::Public {
cci: None,
label: Some(holding_b_label.clone()),
})),
)
.await?
else {
anyhow::bail!("Expected RegisterAccount return value");
};
// Create holding_lp with a label
let holding_lp_label = "amm-holding-lp-label".to_owned();
let SubcommandReturnValue::RegisterAccount {
account_id: holding_lp_id,
} = wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::Account(AccountSubcommand::New(NewSubcommand::Public {
cci: None,
label: Some(holding_lp_label.clone()),
})),
)
.await?
else {
anyhow::bail!("Expected RegisterAccount return value");
};
// Create token 1 and distribute to holding_a
let subcommand = TokenProgramAgnosticSubcommand::New {
definition_account_id: Some(format_public_account_id(definition_account_id_1)),
definition_account_label: None,
supply_account_id: Some(format_public_account_id(supply_account_id_1)),
supply_account_label: None,
name: "TOKEN1".to_owned(),
total_supply: 10,
};
wallet::cli::execute_subcommand(ctx.wallet_mut(), Command::Token(subcommand)).await?;
tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await;
let subcommand = TokenProgramAgnosticSubcommand::Send {
from: Some(format_public_account_id(supply_account_id_1)),
from_label: None,
to: Some(format_public_account_id(holding_a_id)),
to_label: None,
to_npk: None,
to_vpk: None,
amount: 5,
};
wallet::cli::execute_subcommand(ctx.wallet_mut(), Command::Token(subcommand)).await?;
tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await;
// Create token 2 and distribute to holding_b
let subcommand = TokenProgramAgnosticSubcommand::New {
definition_account_id: Some(format_public_account_id(definition_account_id_2)),
definition_account_label: None,
supply_account_id: Some(format_public_account_id(supply_account_id_2)),
supply_account_label: None,
name: "TOKEN2".to_owned(),
total_supply: 10,
};
wallet::cli::execute_subcommand(ctx.wallet_mut(), Command::Token(subcommand)).await?;
tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await;
let subcommand = TokenProgramAgnosticSubcommand::Send {
from: Some(format_public_account_id(supply_account_id_2)),
from_label: None,
to: Some(format_public_account_id(holding_b_id)),
to_label: None,
to_npk: None,
to_vpk: None,
amount: 5,
};
wallet::cli::execute_subcommand(ctx.wallet_mut(), Command::Token(subcommand)).await?;
tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await;
// Create AMM pool using account labels instead of IDs
let subcommand = AmmProgramAgnosticSubcommand::New {
user_holding_a: None,
user_holding_a_label: Some(holding_a_label),
user_holding_b: None,
user_holding_b_label: Some(holding_b_label),
user_holding_lp: None,
user_holding_lp_label: Some(holding_lp_label),
balance_a: 3,
balance_b: 3,
};
wallet::cli::execute_subcommand(ctx.wallet_mut(), Command::AMM(subcommand)).await?;
tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await;
let holding_lp_acc = ctx.sequencer_client().get_account(holding_lp_id).await?;
// LP balance should be 3 (geometric mean of 3, 3)
assert_eq!(
u128::from_le_bytes(holding_lp_acc.data[33..].try_into().unwrap()),
3
);
info!("Successfully created AMM pool using account labels");
Ok(())
}

View File

@ -68,8 +68,10 @@ async fn create_ata_initializes_holding_account() -> Result<()> {
wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::Token(TokenProgramAgnosticSubcommand::New {
definition_account_id: format_public_account_id(definition_account_id),
supply_account_id: format_public_account_id(supply_account_id),
definition_account_id: Some(format_public_account_id(definition_account_id)),
definition_account_label: None,
supply_account_id: Some(format_public_account_id(supply_account_id)),
supply_account_label: None,
name: "TEST".to_owned(),
total_supply,
}),
@ -130,8 +132,10 @@ async fn create_ata_is_idempotent() -> Result<()> {
wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::Token(TokenProgramAgnosticSubcommand::New {
definition_account_id: format_public_account_id(definition_account_id),
supply_account_id: format_public_account_id(supply_account_id),
definition_account_id: Some(format_public_account_id(definition_account_id)),
definition_account_label: None,
supply_account_id: Some(format_public_account_id(supply_account_id)),
supply_account_label: None,
name: "TEST".to_owned(),
total_supply: 100,
}),
@ -208,8 +212,10 @@ async fn transfer_and_burn_via_ata() -> Result<()> {
wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::Token(TokenProgramAgnosticSubcommand::New {
definition_account_id: format_public_account_id(definition_account_id),
supply_account_id: format_public_account_id(supply_account_id),
definition_account_id: Some(format_public_account_id(definition_account_id)),
definition_account_label: None,
supply_account_id: Some(format_public_account_id(supply_account_id)),
supply_account_label: None,
name: "TEST".to_owned(),
total_supply,
}),
@ -256,8 +262,10 @@ async fn transfer_and_burn_via_ata() -> Result<()> {
wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::Token(TokenProgramAgnosticSubcommand::Send {
from: format_public_account_id(supply_account_id),
from: Some(format_public_account_id(supply_account_id)),
from_label: None,
to: Some(format_public_account_id(sender_ata_id)),
to_label: None,
to_npk: None,
to_vpk: None,
amount: fund_amount,
@ -362,8 +370,10 @@ async fn create_ata_with_private_owner() -> Result<()> {
wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::Token(TokenProgramAgnosticSubcommand::New {
definition_account_id: format_public_account_id(definition_account_id),
supply_account_id: format_public_account_id(supply_account_id),
definition_account_id: Some(format_public_account_id(definition_account_id)),
definition_account_label: None,
supply_account_id: Some(format_public_account_id(supply_account_id)),
supply_account_label: None,
name: "TEST".to_owned(),
total_supply: 100,
}),
@ -434,8 +444,10 @@ async fn transfer_via_ata_private_owner() -> Result<()> {
wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::Token(TokenProgramAgnosticSubcommand::New {
definition_account_id: format_public_account_id(definition_account_id),
supply_account_id: format_public_account_id(supply_account_id),
definition_account_id: Some(format_public_account_id(definition_account_id)),
definition_account_label: None,
supply_account_id: Some(format_public_account_id(supply_account_id)),
supply_account_label: None,
name: "TEST".to_owned(),
total_supply,
}),
@ -482,8 +494,10 @@ async fn transfer_via_ata_private_owner() -> Result<()> {
wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::Token(TokenProgramAgnosticSubcommand::Send {
from: format_public_account_id(supply_account_id),
from: Some(format_public_account_id(supply_account_id)),
from_label: None,
to: Some(format_public_account_id(sender_ata_id)),
to_label: None,
to_npk: None,
to_vpk: None,
amount: fund_amount,
@ -556,8 +570,10 @@ async fn burn_via_ata_private_owner() -> Result<()> {
wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::Token(TokenProgramAgnosticSubcommand::New {
definition_account_id: format_public_account_id(definition_account_id),
supply_account_id: format_public_account_id(supply_account_id),
definition_account_id: Some(format_public_account_id(definition_account_id)),
definition_account_label: None,
supply_account_id: Some(format_public_account_id(supply_account_id)),
supply_account_label: None,
name: "TEST".to_owned(),
total_supply,
}),
@ -592,8 +608,10 @@ async fn burn_via_ata_private_owner() -> Result<()> {
wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::Token(TokenProgramAgnosticSubcommand::Send {
from: format_public_account_id(supply_account_id),
from: Some(format_public_account_id(supply_account_id)),
from_label: None,
to: Some(format_public_account_id(holder_ata_id)),
to_label: None,
to_npk: None,
to_vpk: None,
amount: fund_amount,

View File

@ -24,8 +24,10 @@ async fn private_transfer_to_owned_account() -> Result<()> {
let to: AccountId = ctx.existing_private_accounts()[1];
let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
from: format_private_account_id(from),
from: Some(format_private_account_id(from)),
from_label: None,
to: Some(format_private_account_id(to)),
to_label: None,
to_npk: None,
to_vpk: None,
amount: 100,
@ -63,8 +65,10 @@ async fn private_transfer_to_foreign_account() -> Result<()> {
let to_vpk = Secp256k1Point::from_scalar(to_npk.0);
let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
from: format_private_account_id(from),
from: Some(format_private_account_id(from)),
from_label: None,
to: None,
to_label: None,
to_npk: Some(to_npk_string),
to_vpk: Some(hex::encode(to_vpk.0)),
amount: 100,
@ -111,8 +115,10 @@ async fn deshielded_transfer_to_public_account() -> Result<()> {
assert_eq!(from_acc.balance, 10000);
let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
from: format_private_account_id(from),
from: Some(format_private_account_id(from)),
from_label: None,
to: Some(format_public_account_id(to)),
to_label: None,
to_npk: None,
to_vpk: None,
amount: 100,
@ -175,8 +181,10 @@ async fn private_transfer_to_owned_account_using_claiming_path() -> Result<()> {
// Send to this account using claiming path (using npk and vpk instead of account ID)
let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
from: format_private_account_id(from),
from: Some(format_private_account_id(from)),
from_label: None,
to: None,
to_label: None,
to_npk: Some(hex::encode(to_keys.nullifier_public_key.0)),
to_vpk: Some(hex::encode(to_keys.viewing_public_key.0)),
amount: 100,
@ -223,8 +231,10 @@ async fn shielded_transfer_to_owned_private_account() -> Result<()> {
let to: AccountId = ctx.existing_private_accounts()[1];
let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
from: format_public_account_id(from),
from: Some(format_public_account_id(from)),
from_label: None,
to: Some(format_private_account_id(to)),
to_label: None,
to_npk: None,
to_vpk: None,
amount: 100,
@ -265,8 +275,10 @@ async fn shielded_transfer_to_foreign_account() -> Result<()> {
let from: AccountId = ctx.existing_public_accounts()[0];
let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
from: format_public_account_id(from),
from: Some(format_public_account_id(from)),
from_label: None,
to: None,
to_label: None,
to_npk: Some(to_npk_string),
to_vpk: Some(hex::encode(to_vpk.0)),
amount: 100,
@ -336,8 +348,10 @@ async fn private_transfer_to_owned_account_continuous_run_path() -> Result<()> {
// Send transfer using nullifier and viewing public keys
let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
from: format_private_account_id(from),
from: Some(format_private_account_id(from)),
from_label: None,
to: None,
to_label: None,
to_npk: Some(hex::encode(to_keys.nullifier_public_key.0)),
to_vpk: Some(hex::encode(to_keys.viewing_public_key.0)),
amount: 100,
@ -385,7 +399,8 @@ async fn initialize_private_account() -> Result<()> {
};
let command = Command::AuthTransfer(AuthTransferSubcommand::Init {
account_id: format_private_account_id(account_id),
account_id: Some(format_private_account_id(account_id)),
account_label: None,
});
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
@ -417,3 +432,100 @@ async fn initialize_private_account() -> Result<()> {
Ok(())
}
#[test]
async fn private_transfer_using_from_label() -> Result<()> {
let mut ctx = TestContext::new().await?;
let from: AccountId = ctx.existing_private_accounts()[0];
let to: AccountId = ctx.existing_private_accounts()[1];
// Assign a label to the sender account
let label = "private-sender-label".to_owned();
let command = Command::Account(AccountSubcommand::Label {
account_id: Some(format_private_account_id(from)),
account_label: None,
label: label.clone(),
});
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
// Send using the label instead of account ID
let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
from: None,
from_label: Some(label),
to: Some(format_private_account_id(to)),
to_label: None,
to_npk: None,
to_vpk: None,
amount: 100,
});
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
info!("Waiting for next block creation");
tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await;
let new_commitment1 = ctx
.wallet()
.get_private_account_commitment(from)
.context("Failed to get private account commitment for sender")?;
assert!(verify_commitment_is_in_state(new_commitment1, ctx.sequencer_client()).await);
let new_commitment2 = ctx
.wallet()
.get_private_account_commitment(to)
.context("Failed to get private account commitment for receiver")?;
assert!(verify_commitment_is_in_state(new_commitment2, ctx.sequencer_client()).await);
info!("Successfully transferred privately using from_label");
Ok(())
}
#[test]
async fn initialize_private_account_using_label() -> Result<()> {
let mut ctx = TestContext::new().await?;
// Create a new private account with a label
let label = "init-private-label".to_owned();
let command = Command::Account(AccountSubcommand::New(NewSubcommand::Private {
cci: None,
label: Some(label.clone()),
}));
let result = wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
let SubcommandReturnValue::RegisterAccount { account_id } = result else {
anyhow::bail!("Expected RegisterAccount return value");
};
// Initialize using the label instead of account ID
let command = Command::AuthTransfer(AuthTransferSubcommand::Init {
account_id: None,
account_label: Some(label),
});
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await;
let command = Command::Account(AccountSubcommand::SyncPrivate {});
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
let new_commitment = ctx
.wallet()
.get_private_account_commitment(account_id)
.context("Failed to get private account commitment")?;
assert!(verify_commitment_is_in_state(new_commitment, ctx.sequencer_client()).await);
let account = ctx
.wallet()
.get_account_private(account_id)
.context("Failed to get private account")?;
assert_eq!(
account.program_owner,
Program::authenticated_transfer_program().id()
);
info!("Successfully initialized private account using label");
Ok(())
}

View File

@ -17,8 +17,10 @@ async fn successful_transfer_to_existing_account() -> Result<()> {
let mut ctx = TestContext::new().await?;
let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
from: format_public_account_id(ctx.existing_public_accounts()[0]),
from: Some(format_public_account_id(ctx.existing_public_accounts()[0])),
from_label: None,
to: Some(format_public_account_id(ctx.existing_public_accounts()[1])),
to_label: None,
to_npk: None,
to_vpk: None,
amount: 100,
@ -73,8 +75,10 @@ pub async fn successful_transfer_to_new_account() -> Result<()> {
.expect("Failed to find newly created account in the wallet storage");
let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
from: format_public_account_id(ctx.existing_public_accounts()[0]),
from: Some(format_public_account_id(ctx.existing_public_accounts()[0])),
from_label: None,
to: Some(format_public_account_id(new_persistent_account_id)),
to_label: None,
to_npk: None,
to_vpk: None,
amount: 100,
@ -109,8 +113,10 @@ async fn failed_transfer_with_insufficient_balance() -> Result<()> {
let mut ctx = TestContext::new().await?;
let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
from: format_public_account_id(ctx.existing_public_accounts()[0]),
from: Some(format_public_account_id(ctx.existing_public_accounts()[0])),
from_label: None,
to: Some(format_public_account_id(ctx.existing_public_accounts()[1])),
to_label: None,
to_npk: None,
to_vpk: None,
amount: 1_000_000,
@ -147,8 +153,10 @@ async fn two_consecutive_successful_transfers() -> Result<()> {
// First transfer
let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
from: format_public_account_id(ctx.existing_public_accounts()[0]),
from: Some(format_public_account_id(ctx.existing_public_accounts()[0])),
from_label: None,
to: Some(format_public_account_id(ctx.existing_public_accounts()[1])),
to_label: None,
to_npk: None,
to_vpk: None,
amount: 100,
@ -179,8 +187,10 @@ async fn two_consecutive_successful_transfers() -> Result<()> {
// Second transfer
let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
from: format_public_account_id(ctx.existing_public_accounts()[0]),
from: Some(format_public_account_id(ctx.existing_public_accounts()[0])),
from_label: None,
to: Some(format_public_account_id(ctx.existing_public_accounts()[1])),
to_label: None,
to_npk: None,
to_vpk: None,
amount: 100,
@ -226,7 +236,8 @@ async fn initialize_public_account() -> Result<()> {
};
let command = Command::AuthTransfer(AuthTransferSubcommand::Init {
account_id: format_public_account_id(account_id),
account_id: Some(format_public_account_id(account_id)),
account_label: None,
});
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
@ -245,3 +256,97 @@ async fn initialize_public_account() -> Result<()> {
Ok(())
}
#[test]
async fn successful_transfer_using_from_label() -> Result<()> {
let mut ctx = TestContext::new().await?;
// Assign a label to the sender account
let label = "sender-label".to_owned();
let command = Command::Account(AccountSubcommand::Label {
account_id: Some(format_public_account_id(ctx.existing_public_accounts()[0])),
account_label: None,
label: label.clone(),
});
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
// Send using the label instead of account ID
let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
from: None,
from_label: Some(label),
to: Some(format_public_account_id(ctx.existing_public_accounts()[1])),
to_label: None,
to_npk: None,
to_vpk: None,
amount: 100,
});
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
info!("Waiting for next block creation");
tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await;
info!("Checking correct balance move");
let acc_1_balance = ctx
.sequencer_client()
.get_account_balance(ctx.existing_public_accounts()[0])
.await?;
let acc_2_balance = ctx
.sequencer_client()
.get_account_balance(ctx.existing_public_accounts()[1])
.await?;
assert_eq!(acc_1_balance, 9900);
assert_eq!(acc_2_balance, 20100);
info!("Successfully transferred using from_label");
Ok(())
}
#[test]
async fn successful_transfer_using_to_label() -> Result<()> {
let mut ctx = TestContext::new().await?;
// Assign a label to the receiver account
let label = "receiver-label".to_owned();
let command = Command::Account(AccountSubcommand::Label {
account_id: Some(format_public_account_id(ctx.existing_public_accounts()[1])),
account_label: None,
label: label.clone(),
});
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
// Send using the label for the recipient
let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
from: Some(format_public_account_id(ctx.existing_public_accounts()[0])),
from_label: None,
to: None,
to_label: Some(label),
to_npk: None,
to_vpk: None,
amount: 100,
});
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
info!("Waiting for next block creation");
tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await;
info!("Checking correct balance move");
let acc_1_balance = ctx
.sequencer_client()
.get_account_balance(ctx.existing_public_accounts()[0])
.await?;
let acc_2_balance = ctx
.sequencer_client()
.get_account_balance(ctx.existing_public_accounts()[1])
.await?;
assert_eq!(acc_1_balance, 9900);
assert_eq!(acc_2_balance, 20100);
info!("Successfully transferred using to_label");
Ok(())
}

View File

@ -1,14 +1,19 @@
#![expect(
clippy::shadow_unrelated,
clippy::tests_outside_test_module,
reason = "We don't care about these in tests"
)]
use std::time::Duration;
use anyhow::Result;
use anyhow::{Context as _, Result};
use indexer_service_rpc::RpcClient as _;
use integration_tests::{TIME_TO_WAIT_FOR_BLOCK_SECONDS, TestContext, format_public_account_id};
use integration_tests::{
TIME_TO_WAIT_FOR_BLOCK_SECONDS, TestContext, format_private_account_id,
format_public_account_id, verify_commitment_is_in_state,
};
use log::info;
use nssa::AccountId;
use tokio::test;
use wallet::cli::{Command, programs::native_token_transfer::AuthTransferSubcommand};
@ -83,8 +88,10 @@ async fn indexer_state_consistency() -> Result<()> {
let mut ctx = TestContext::new().await?;
let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
from: format_public_account_id(ctx.existing_public_accounts()[0]),
from: Some(format_public_account_id(ctx.existing_public_accounts()[0])),
from_label: None,
to: Some(format_public_account_id(ctx.existing_public_accounts()[1])),
to_label: None,
to_npk: None,
to_vpk: None,
amount: 100,
@ -113,6 +120,38 @@ async fn indexer_state_consistency() -> Result<()> {
assert_eq!(acc_1_balance, 9900);
assert_eq!(acc_2_balance, 20100);
let from: AccountId = ctx.existing_private_accounts()[0];
let to: AccountId = ctx.existing_private_accounts()[1];
let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
from: Some(format_private_account_id(from)),
from_label: None,
to: Some(format_private_account_id(to)),
to_label: None,
to_npk: None,
to_vpk: None,
amount: 100,
});
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
info!("Waiting for next block creation");
tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await;
let new_commitment1 = ctx
.wallet()
.get_private_account_commitment(from)
.context("Failed to get private account commitment for sender")?;
assert!(verify_commitment_is_in_state(new_commitment1, ctx.sequencer_client()).await);
let new_commitment2 = ctx
.wallet()
.get_private_account_commitment(to)
.context("Failed to get private account commitment for receiver")?;
assert!(verify_commitment_is_in_state(new_commitment2, ctx.sequencer_client()).await);
info!("Successfully transferred privately to owned account");
// WAIT
info!("Waiting for indexer to parse blocks");
tokio::time::sleep(std::time::Duration::from_millis(L2_TO_L1_TIMEOUT_MILLIS)).await;
@ -147,3 +186,76 @@ async fn indexer_state_consistency() -> Result<()> {
Ok(())
}
#[test]
async fn indexer_state_consistency_with_labels() -> Result<()> {
let mut ctx = TestContext::new().await?;
// Assign labels to both accounts
let from_label = "idx-sender-label".to_owned();
let to_label_str = "idx-receiver-label".to_owned();
let label_cmd = Command::Account(wallet::cli::account::AccountSubcommand::Label {
account_id: Some(format_public_account_id(ctx.existing_public_accounts()[0])),
account_label: None,
label: from_label.clone(),
});
wallet::cli::execute_subcommand(ctx.wallet_mut(), label_cmd).await?;
let label_cmd = Command::Account(wallet::cli::account::AccountSubcommand::Label {
account_id: Some(format_public_account_id(ctx.existing_public_accounts()[1])),
account_label: None,
label: to_label_str.clone(),
});
wallet::cli::execute_subcommand(ctx.wallet_mut(), label_cmd).await?;
// Send using labels instead of account IDs
let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
from: None,
from_label: Some(from_label),
to: None,
to_label: Some(to_label_str),
to_npk: None,
to_vpk: None,
amount: 100,
});
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
info!("Waiting for next block creation");
tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await;
let acc_1_balance = sequencer_service_rpc::RpcClient::get_account_balance(
ctx.sequencer_client(),
ctx.existing_public_accounts()[0],
)
.await?;
let acc_2_balance = sequencer_service_rpc::RpcClient::get_account_balance(
ctx.sequencer_client(),
ctx.existing_public_accounts()[1],
)
.await?;
assert_eq!(acc_1_balance, 9900);
assert_eq!(acc_2_balance, 20100);
info!("Waiting for indexer to parse blocks");
tokio::time::sleep(std::time::Duration::from_millis(L2_TO_L1_TIMEOUT_MILLIS)).await;
let acc1_ind_state = ctx
.indexer_client()
.get_account(ctx.existing_public_accounts()[0].into())
.await
.unwrap();
let acc1_seq_state = sequencer_service_rpc::RpcClient::get_account(
ctx.sequencer_client(),
ctx.existing_public_accounts()[0],
)
.await?;
assert_eq!(acc1_ind_state, acc1_seq_state.into());
info!("Indexer state is consistent after label-based transfer");
Ok(())
}

View File

@ -69,8 +69,10 @@ async fn sync_private_account_with_non_zero_chain_index() -> Result<()> {
// Send to this account using claiming path (using npk and vpk instead of account ID)
let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
from: format_private_account_id(from),
from: Some(format_private_account_id(from)),
from_label: None,
to: None,
to_label: None,
to_npk: Some(hex::encode(to_keys.nullifier_public_key.0)),
to_vpk: Some(hex::encode(to_keys.viewing_public_key.0)),
amount: 100,
@ -143,8 +145,10 @@ async fn restore_keys_from_seed() -> Result<()> {
// Send to first private account
let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
from: format_private_account_id(from),
from: Some(format_private_account_id(from)),
from_label: None,
to: Some(format_private_account_id(to_account_id1)),
to_label: None,
to_npk: None,
to_vpk: None,
amount: 100,
@ -153,8 +157,10 @@ async fn restore_keys_from_seed() -> Result<()> {
// Send to second private account
let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
from: format_private_account_id(from),
from: Some(format_private_account_id(from)),
from_label: None,
to: Some(format_private_account_id(to_account_id2)),
to_label: None,
to_npk: None,
to_vpk: None,
amount: 101,
@ -191,8 +197,10 @@ async fn restore_keys_from_seed() -> Result<()> {
// Send to first public account
let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
from: format_public_account_id(from),
from: Some(format_public_account_id(from)),
from_label: None,
to: Some(format_public_account_id(to_account_id3)),
to_label: None,
to_npk: None,
to_vpk: None,
amount: 102,
@ -201,8 +209,10 @@ async fn restore_keys_from_seed() -> Result<()> {
// Send to second public account
let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
from: format_public_account_id(from),
from: Some(format_public_account_id(from)),
from_label: None,
to: Some(format_public_account_id(to_account_id4)),
to_label: None,
to_npk: None,
to_vpk: None,
amount: 103,
@ -264,8 +274,10 @@ async fn restore_keys_from_seed() -> Result<()> {
// Test that restored accounts can send transactions
let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
from: format_private_account_id(to_account_id1),
from: Some(format_private_account_id(to_account_id1)),
from_label: None,
to: Some(format_private_account_id(to_account_id2)),
to_label: None,
to_npk: None,
to_vpk: None,
amount: 10,
@ -273,8 +285,10 @@ async fn restore_keys_from_seed() -> Result<()> {
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
let command = Command::AuthTransfer(AuthTransferSubcommand::Send {
from: format_public_account_id(to_account_id3),
from: Some(format_public_account_id(to_account_id3)),
from_label: None,
to: Some(format_public_account_id(to_account_id4)),
to_label: None,
to_npk: None,
to_vpk: None,
amount: 11,

View File

@ -52,7 +52,8 @@ async fn claim_pinata_to_uninitialized_public_account_fails_fast() -> Result<()>
let claim_result = wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::Pinata(PinataProgramAgnosticSubcommand::Claim {
to: winner_account_id_formatted,
to: Some(winner_account_id_formatted),
to_label: None,
}),
)
.await;
@ -106,7 +107,8 @@ async fn claim_pinata_to_uninitialized_private_account_fails_fast() -> Result<()
let claim_result = wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::Pinata(PinataProgramAgnosticSubcommand::Claim {
to: winner_account_id_formatted,
to: Some(winner_account_id_formatted),
to_label: None,
}),
)
.await;
@ -137,7 +139,8 @@ async fn claim_pinata_to_existing_public_account() -> Result<()> {
let pinata_prize = 150;
let command = Command::Pinata(PinataProgramAgnosticSubcommand::Claim {
to: format_public_account_id(ctx.existing_public_accounts()[0]),
to: Some(format_public_account_id(ctx.existing_public_accounts()[0])),
to_label: None,
});
let pinata_balance_pre = ctx
@ -175,7 +178,10 @@ async fn claim_pinata_to_existing_private_account() -> Result<()> {
let pinata_prize = 150;
let command = Command::Pinata(PinataProgramAgnosticSubcommand::Claim {
to: format_private_account_id(ctx.existing_private_accounts()[0]),
to: Some(format_private_account_id(
ctx.existing_private_accounts()[0],
)),
to_label: None,
});
let pinata_balance_pre = ctx
@ -239,7 +245,8 @@ async fn claim_pinata_to_new_private_account() -> Result<()> {
// Initialize account under auth transfer program
let command = Command::AuthTransfer(AuthTransferSubcommand::Init {
account_id: winner_account_id_formatted.clone(),
account_id: Some(winner_account_id_formatted.clone()),
account_label: None,
});
wallet::cli::execute_subcommand(ctx.wallet_mut(), command).await?;
@ -254,7 +261,8 @@ async fn claim_pinata_to_new_private_account() -> Result<()> {
// Claim pinata to the new private account
let command = Command::Pinata(PinataProgramAgnosticSubcommand::Claim {
to: winner_account_id_formatted,
to: Some(winner_account_id_formatted),
to_label: None,
});
let pinata_balance_pre = ctx

View File

@ -79,8 +79,10 @@ async fn create_and_transfer_public_token() -> Result<()> {
let name = "A NAME".to_owned();
let total_supply = 37;
let subcommand = TokenProgramAgnosticSubcommand::New {
definition_account_id: format_public_account_id(definition_account_id),
supply_account_id: format_public_account_id(supply_account_id),
definition_account_id: Some(format_public_account_id(definition_account_id)),
definition_account_label: None,
supply_account_id: Some(format_public_account_id(supply_account_id)),
supply_account_label: None,
name: name.clone(),
total_supply,
};
@ -126,8 +128,10 @@ async fn create_and_transfer_public_token() -> Result<()> {
// Transfer 7 tokens from supply_acc to recipient_account_id
let transfer_amount = 7;
let subcommand = TokenProgramAgnosticSubcommand::Send {
from: format_public_account_id(supply_account_id),
from: Some(format_public_account_id(supply_account_id)),
from_label: None,
to: Some(format_public_account_id(recipient_account_id)),
to_label: None,
to_npk: None,
to_vpk: None,
amount: transfer_amount,
@ -171,8 +175,10 @@ async fn create_and_transfer_public_token() -> Result<()> {
// Burn 3 tokens from recipient_acc
let burn_amount = 3;
let subcommand = TokenProgramAgnosticSubcommand::Burn {
definition: format_public_account_id(definition_account_id),
holder: format_public_account_id(recipient_account_id),
definition: Some(format_public_account_id(definition_account_id)),
definition_label: None,
holder: Some(format_public_account_id(recipient_account_id)),
holder_label: None,
amount: burn_amount,
};
@ -215,8 +221,10 @@ async fn create_and_transfer_public_token() -> Result<()> {
// Mint 10 tokens at recipient_acc
let mint_amount = 10;
let subcommand = TokenProgramAgnosticSubcommand::Mint {
definition: format_public_account_id(definition_account_id),
definition: Some(format_public_account_id(definition_account_id)),
definition_label: None,
holder: Some(format_public_account_id(recipient_account_id)),
holder_label: None,
holder_npk: None,
holder_vpk: None,
amount: mint_amount,
@ -319,8 +327,10 @@ async fn create_and_transfer_token_with_private_supply() -> Result<()> {
let name = "A NAME".to_owned();
let total_supply = 37;
let subcommand = TokenProgramAgnosticSubcommand::New {
definition_account_id: format_public_account_id(definition_account_id),
supply_account_id: format_private_account_id(supply_account_id),
definition_account_id: Some(format_public_account_id(definition_account_id)),
definition_account_label: None,
supply_account_id: Some(format_private_account_id(supply_account_id)),
supply_account_label: None,
name: name.clone(),
total_supply,
};
@ -356,8 +366,10 @@ async fn create_and_transfer_token_with_private_supply() -> Result<()> {
// Transfer 7 tokens from supply_acc to recipient_account_id
let transfer_amount = 7;
let subcommand = TokenProgramAgnosticSubcommand::Send {
from: format_private_account_id(supply_account_id),
from: Some(format_private_account_id(supply_account_id)),
from_label: None,
to: Some(format_private_account_id(recipient_account_id)),
to_label: None,
to_npk: None,
to_vpk: None,
amount: transfer_amount,
@ -383,8 +395,10 @@ async fn create_and_transfer_token_with_private_supply() -> Result<()> {
// Burn 3 tokens from recipient_acc
let burn_amount = 3;
let subcommand = TokenProgramAgnosticSubcommand::Burn {
definition: format_public_account_id(definition_account_id),
holder: format_private_account_id(recipient_account_id),
definition: Some(format_public_account_id(definition_account_id)),
definition_label: None,
holder: Some(format_private_account_id(recipient_account_id)),
holder_label: None,
amount: burn_amount,
};
@ -475,8 +489,10 @@ async fn create_token_with_private_definition() -> Result<()> {
let name = "A NAME".to_owned();
let total_supply = 37;
let subcommand = TokenProgramAgnosticSubcommand::New {
definition_account_id: format_private_account_id(definition_account_id),
supply_account_id: format_public_account_id(supply_account_id),
definition_account_id: Some(format_private_account_id(definition_account_id)),
definition_account_label: None,
supply_account_id: Some(format_public_account_id(supply_account_id)),
supply_account_label: None,
name: name.clone(),
total_supply,
};
@ -544,8 +560,10 @@ async fn create_token_with_private_definition() -> Result<()> {
// Mint to public account
let mint_amount_public = 10;
let subcommand = TokenProgramAgnosticSubcommand::Mint {
definition: format_private_account_id(definition_account_id),
definition: Some(format_private_account_id(definition_account_id)),
definition_label: None,
holder: Some(format_public_account_id(recipient_account_id_public)),
holder_label: None,
holder_npk: None,
holder_vpk: None,
amount: mint_amount_public,
@ -590,8 +608,10 @@ async fn create_token_with_private_definition() -> Result<()> {
// Mint to private account
let mint_amount_private = 5;
let subcommand = TokenProgramAgnosticSubcommand::Mint {
definition: format_private_account_id(definition_account_id),
definition: Some(format_private_account_id(definition_account_id)),
definition_label: None,
holder: Some(format_private_account_id(recipient_account_id_private)),
holder_label: None,
holder_npk: None,
holder_vpk: None,
amount: mint_amount_private,
@ -669,8 +689,10 @@ async fn create_token_with_private_definition_and_supply() -> Result<()> {
let name = "A NAME".to_owned();
let total_supply = 37;
let subcommand = TokenProgramAgnosticSubcommand::New {
definition_account_id: format_private_account_id(definition_account_id),
supply_account_id: format_private_account_id(supply_account_id),
definition_account_id: Some(format_private_account_id(definition_account_id)),
definition_account_label: None,
supply_account_id: Some(format_private_account_id(supply_account_id)),
supply_account_label: None,
name,
total_supply,
};
@ -728,8 +750,10 @@ async fn create_token_with_private_definition_and_supply() -> Result<()> {
// Transfer tokens
let transfer_amount = 7;
let subcommand = TokenProgramAgnosticSubcommand::Send {
from: format_private_account_id(supply_account_id),
from: Some(format_private_account_id(supply_account_id)),
from_label: None,
to: Some(format_private_account_id(recipient_account_id)),
to_label: None,
to_npk: None,
to_vpk: None,
amount: transfer_amount,
@ -841,8 +865,10 @@ async fn shielded_token_transfer() -> Result<()> {
let name = "A NAME".to_owned();
let total_supply = 37;
let subcommand = TokenProgramAgnosticSubcommand::New {
definition_account_id: format_public_account_id(definition_account_id),
supply_account_id: format_public_account_id(supply_account_id),
definition_account_id: Some(format_public_account_id(definition_account_id)),
definition_account_label: None,
supply_account_id: Some(format_public_account_id(supply_account_id)),
supply_account_label: None,
name,
total_supply,
};
@ -855,8 +881,10 @@ async fn shielded_token_transfer() -> Result<()> {
// Perform shielded transfer: public supply -> private recipient
let transfer_amount = 7;
let subcommand = TokenProgramAgnosticSubcommand::Send {
from: format_public_account_id(supply_account_id),
from: Some(format_public_account_id(supply_account_id)),
from_label: None,
to: Some(format_private_account_id(recipient_account_id)),
to_label: None,
to_npk: None,
to_vpk: None,
amount: transfer_amount,
@ -963,8 +991,10 @@ async fn deshielded_token_transfer() -> Result<()> {
let name = "A NAME".to_owned();
let total_supply = 37;
let subcommand = TokenProgramAgnosticSubcommand::New {
definition_account_id: format_public_account_id(definition_account_id),
supply_account_id: format_private_account_id(supply_account_id),
definition_account_id: Some(format_public_account_id(definition_account_id)),
definition_account_label: None,
supply_account_id: Some(format_private_account_id(supply_account_id)),
supply_account_label: None,
name,
total_supply,
};
@ -977,8 +1007,10 @@ async fn deshielded_token_transfer() -> Result<()> {
// Perform deshielded transfer: private supply -> public recipient
let transfer_amount = 7;
let subcommand = TokenProgramAgnosticSubcommand::Send {
from: format_private_account_id(supply_account_id),
from: Some(format_private_account_id(supply_account_id)),
from_label: None,
to: Some(format_public_account_id(recipient_account_id)),
to_label: None,
to_npk: None,
to_vpk: None,
amount: transfer_amount,
@ -1069,8 +1101,10 @@ async fn token_claiming_path_with_private_accounts() -> Result<()> {
let name = "A NAME".to_owned();
let total_supply = 37;
let subcommand = TokenProgramAgnosticSubcommand::New {
definition_account_id: format_private_account_id(definition_account_id),
supply_account_id: format_private_account_id(supply_account_id),
definition_account_id: Some(format_private_account_id(definition_account_id)),
definition_account_label: None,
supply_account_id: Some(format_private_account_id(supply_account_id)),
supply_account_label: None,
name,
total_supply,
};
@ -1108,8 +1142,10 @@ async fn token_claiming_path_with_private_accounts() -> Result<()> {
// Mint using claiming path (foreign account)
let mint_amount = 9;
let subcommand = TokenProgramAgnosticSubcommand::Mint {
definition: format_private_account_id(definition_account_id),
definition: Some(format_private_account_id(definition_account_id)),
definition_label: None,
holder: None,
holder_label: None,
holder_npk: Some(hex::encode(holder_keys.nullifier_public_key.0)),
holder_vpk: Some(hex::encode(holder_keys.viewing_public_key.0)),
amount: mint_amount,
@ -1149,3 +1185,193 @@ async fn token_claiming_path_with_private_accounts() -> Result<()> {
Ok(())
}
#[test]
async fn create_token_using_labels() -> Result<()> {
let mut ctx = TestContext::new().await?;
// Create definition and supply accounts with labels
let def_label = "token-definition-label".to_owned();
let supply_label = "token-supply-label".to_owned();
let result = wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::Account(AccountSubcommand::New(NewSubcommand::Public {
cci: None,
label: Some(def_label.clone()),
})),
)
.await?;
let SubcommandReturnValue::RegisterAccount {
account_id: definition_account_id,
} = result
else {
anyhow::bail!("Expected RegisterAccount return value");
};
let result = wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::Account(AccountSubcommand::New(NewSubcommand::Public {
cci: None,
label: Some(supply_label.clone()),
})),
)
.await?;
let SubcommandReturnValue::RegisterAccount {
account_id: supply_account_id,
} = result
else {
anyhow::bail!("Expected RegisterAccount return value");
};
// Create token using account labels instead of IDs
let name = "LABELED TOKEN".to_owned();
let total_supply = 100;
let subcommand = TokenProgramAgnosticSubcommand::New {
definition_account_id: None,
definition_account_label: Some(def_label),
supply_account_id: None,
supply_account_label: Some(supply_label),
name: name.clone(),
total_supply,
};
wallet::cli::execute_subcommand(ctx.wallet_mut(), Command::Token(subcommand)).await?;
info!("Waiting for next block creation");
tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await;
let definition_acc = ctx
.sequencer_client()
.get_account(definition_account_id)
.await?;
let token_definition = TokenDefinition::try_from(&definition_acc.data)?;
assert_eq!(definition_acc.program_owner, Program::token().id());
assert_eq!(
token_definition,
TokenDefinition::Fungible {
name,
total_supply,
metadata_id: None
}
);
let supply_acc = ctx
.sequencer_client()
.get_account(supply_account_id)
.await?;
let token_holding = TokenHolding::try_from(&supply_acc.data)?;
assert_eq!(
token_holding,
TokenHolding::Fungible {
definition_id: definition_account_id,
balance: total_supply
}
);
info!("Successfully created token using definition and supply account labels");
Ok(())
}
#[test]
async fn transfer_token_using_from_label() -> Result<()> {
let mut ctx = TestContext::new().await?;
// Create definition account
let result = wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::Account(AccountSubcommand::New(NewSubcommand::Public {
cci: None,
label: None,
})),
)
.await?;
let SubcommandReturnValue::RegisterAccount {
account_id: definition_account_id,
} = result
else {
anyhow::bail!("Expected RegisterAccount return value");
};
// Create supply account with a label
let supply_label = "token-supply-sender".to_owned();
let result = wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::Account(AccountSubcommand::New(NewSubcommand::Public {
cci: None,
label: Some(supply_label.clone()),
})),
)
.await?;
let SubcommandReturnValue::RegisterAccount {
account_id: supply_account_id,
} = result
else {
anyhow::bail!("Expected RegisterAccount return value");
};
// Create recipient account
let result = wallet::cli::execute_subcommand(
ctx.wallet_mut(),
Command::Account(AccountSubcommand::New(NewSubcommand::Public {
cci: None,
label: None,
})),
)
.await?;
let SubcommandReturnValue::RegisterAccount {
account_id: recipient_account_id,
} = result
else {
anyhow::bail!("Expected RegisterAccount return value");
};
// Create token
let total_supply = 50;
let subcommand = TokenProgramAgnosticSubcommand::New {
definition_account_id: Some(format_public_account_id(definition_account_id)),
definition_account_label: None,
supply_account_id: Some(format_public_account_id(supply_account_id)),
supply_account_label: None,
name: "LABEL TEST TOKEN".to_owned(),
total_supply,
};
wallet::cli::execute_subcommand(ctx.wallet_mut(), Command::Token(subcommand)).await?;
info!("Waiting for next block creation");
tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await;
// Transfer token using from_label instead of from
let transfer_amount = 20;
let subcommand = TokenProgramAgnosticSubcommand::Send {
from: None,
from_label: Some(supply_label),
to: Some(format_public_account_id(recipient_account_id)),
to_label: None,
to_npk: None,
to_vpk: None,
amount: transfer_amount,
};
wallet::cli::execute_subcommand(ctx.wallet_mut(), Command::Token(subcommand)).await?;
info!("Waiting for next block creation");
tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await;
let recipient_acc = ctx
.sequencer_client()
.get_account(recipient_account_id)
.await?;
let token_holding = TokenHolding::try_from(&recipient_acc.data)?;
assert_eq!(
token_holding,
TokenHolding::Fungible {
definition_id: definition_account_id,
balance: transfer_amount
}
);
info!("Successfully transferred token using from_label");
Ok(())
}

View File

@ -9,6 +9,7 @@ workspace = true
[dependencies]
nssa_core = { workspace = true, features = ["host"] }
clock_core.workspace = true
anyhow.workspace = true
thiserror.workspace = true

View File

@ -55,7 +55,7 @@ pub type NullifierSecretKey = [u8; 32];
#[derive(Serialize, Deserialize, BorshSerialize, BorshDeserialize)]
#[cfg_attr(
any(feature = "host", test),
derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash)
derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)
)]
pub struct Nullifier(pub(super) [u8; 32]);

View File

@ -17,6 +17,7 @@ pub type ProgramId = [u32; 8];
pub type InstructionData = Vec<u32>;
pub struct ProgramInput<T> {
pub self_program_id: ProgramId,
pub caller_program_id: Option<ProgramId>,
pub pre_states: Vec<AccountWithMetadata>,
pub instruction: T,
}
@ -284,6 +285,9 @@ pub struct InvalidWindow;
pub struct ProgramOutput {
/// The program ID of the program that produced this output.
pub self_program_id: ProgramId,
/// The program ID of the caller that invoked this program via a chained call,
/// or `None` if this is a top-level call.
pub caller_program_id: Option<ProgramId>,
/// The instruction data the program received to produce this output.
pub instruction_data: InstructionData,
/// The account pre states the program received to produce this output.
@ -301,12 +305,14 @@ pub struct ProgramOutput {
impl ProgramOutput {
pub const fn new(
self_program_id: ProgramId,
caller_program_id: Option<ProgramId>,
instruction_data: InstructionData,
pre_states: Vec<AccountWithMetadata>,
post_states: Vec<AccountPostState>,
) -> Self {
Self {
self_program_id,
caller_program_id,
instruction_data,
pre_states,
post_states,
@ -421,12 +427,14 @@ pub fn compute_authorized_pdas(
#[must_use]
pub fn read_nssa_inputs<T: DeserializeOwned>() -> (ProgramInput<T>, InstructionData) {
let self_program_id: ProgramId = env::read();
let caller_program_id: Option<ProgramId> = env::read();
let pre_states: Vec<AccountWithMetadata> = env::read();
let instruction_words: InstructionData = env::read();
let instruction = T::deserialize(&mut Deserializer::new(instruction_words.as_ref())).unwrap();
(
ProgramInput {
self_program_id,
caller_program_id,
pre_states,
instruction,
},
@ -627,7 +635,7 @@ mod tests {
#[test]
fn program_output_try_with_block_validity_window_range() {
let output = ProgramOutput::new(DEFAULT_PROGRAM_ID, vec![], vec![], vec![])
let output = ProgramOutput::new(DEFAULT_PROGRAM_ID, None, vec![], vec![], vec![])
.try_with_block_validity_window(10_u64..100)
.unwrap();
assert_eq!(output.block_validity_window.start(), Some(10));
@ -636,7 +644,7 @@ mod tests {
#[test]
fn program_output_with_block_validity_window_range_from() {
let output = ProgramOutput::new(DEFAULT_PROGRAM_ID, vec![], vec![], vec![])
let output = ProgramOutput::new(DEFAULT_PROGRAM_ID, None, vec![], vec![], vec![])
.with_block_validity_window(10_u64..);
assert_eq!(output.block_validity_window.start(), Some(10));
assert_eq!(output.block_validity_window.end(), None);
@ -644,7 +652,7 @@ mod tests {
#[test]
fn program_output_with_block_validity_window_range_to() {
let output = ProgramOutput::new(DEFAULT_PROGRAM_ID, vec![], vec![], vec![])
let output = ProgramOutput::new(DEFAULT_PROGRAM_ID, None, vec![], vec![], vec![])
.with_block_validity_window(..100_u64);
assert_eq!(output.block_validity_window.start(), None);
assert_eq!(output.block_validity_window.end(), Some(100));
@ -652,7 +660,7 @@ mod tests {
#[test]
fn program_output_try_with_block_validity_window_empty_range_fails() {
let result = ProgramOutput::new(DEFAULT_PROGRAM_ID, vec![], vec![], vec![])
let result = ProgramOutput::new(DEFAULT_PROGRAM_ID, None, vec![], vec![], vec![])
.try_with_block_validity_window(5_u64..5);
assert!(result.is_err());
}

View File

@ -16,7 +16,11 @@ pub use program_deployment_transaction::ProgramDeploymentTransaction;
pub use program_methods::PRIVACY_PRESERVING_CIRCUIT_ID;
pub use public_transaction::PublicTransaction;
pub use signature::{PrivateKey, PublicKey, Signature};
pub use state::V03State;
pub use state::{
CLOCK_01_PROGRAM_ACCOUNT_ID, CLOCK_10_PROGRAM_ACCOUNT_ID, CLOCK_50_PROGRAM_ACCOUNT_ID,
CLOCK_PROGRAM_ACCOUNT_IDS, V03State,
};
pub use validated_state_diff::ValidatedStateDiff;
pub mod encoding;
pub mod error;
@ -27,6 +31,7 @@ pub mod program_deployment_transaction;
pub mod public_transaction;
mod signature;
mod state;
mod validated_state_diff;
pub mod program_methods {
include!(concat!(env!("OUT_DIR"), "/program_methods/mod.rs"));

View File

@ -87,15 +87,16 @@ pub fn execute_and_prove(
pda_seeds: vec![],
};
let mut chained_calls = VecDeque::from_iter([(initial_call, initial_program)]);
let mut chained_calls = VecDeque::from_iter([(initial_call, initial_program, None)]);
let mut chain_calls_counter = 0;
while let Some((chained_call, program)) = chained_calls.pop_front() {
while let Some((chained_call, program, caller_program_id)) = chained_calls.pop_front() {
if chain_calls_counter >= MAX_NUMBER_CHAINED_CALLS {
return Err(NssaError::MaxChainedCallsDepthExceeded);
}
let inner_receipt = execute_and_prove_program(
program,
caller_program_id,
&chained_call.pre_states,
&chained_call.instruction_data,
)?;
@ -115,7 +116,7 @@ pub fn execute_and_prove(
let next_program = dependencies
.get(&new_call.program_id)
.ok_or(NssaError::InvalidProgramBehavior)?;
chained_calls.push_front((new_call, next_program));
chained_calls.push_front((new_call, next_program, Some(chained_call.program_id)));
}
chain_calls_counter = chain_calls_counter
@ -153,12 +154,19 @@ pub fn execute_and_prove(
fn execute_and_prove_program(
program: &Program,
caller_program_id: Option<ProgramId>,
pre_states: &[AccountWithMetadata],
instruction_data: &InstructionData,
) -> Result<Receipt, NssaError> {
// Write inputs to the program
let mut env_builder = ExecutorEnv::builder();
Program::write_inputs(program.id(), pre_states, instruction_data, &mut env_builder)?;
Program::write_inputs(
program.id(),
caller_program_id,
pre_states,
instruction_data,
&mut env_builder,
)?;
let env = env_builder.build().unwrap();
// Prove the program

View File

@ -1,19 +1,10 @@
use std::{
collections::{HashMap, HashSet},
hash::Hash,
};
use std::collections::HashSet;
use borsh::{BorshDeserialize, BorshSerialize};
use nssa_core::{
BlockId, PrivacyPreservingCircuitOutput, Timestamp,
account::{Account, AccountWithMetadata},
};
use nssa_core::account::AccountId;
use sha2::{Digest as _, digest::FixedOutput as _};
use super::{message::Message, witness_set::WitnessSet};
use crate::{
AccountId, V03State, error::NssaError, privacy_preserving_transaction::circuit::Proof,
};
#[derive(Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize)]
pub struct PrivacyPreservingTransaction {
@ -30,108 +21,6 @@ impl PrivacyPreservingTransaction {
}
}
pub(crate) fn validate_and_produce_public_state_diff(
&self,
state: &V03State,
block_id: BlockId,
timestamp: Timestamp,
) -> Result<HashMap<AccountId, Account>, NssaError> {
let message = &self.message;
let witness_set = &self.witness_set;
// 1. Commitments or nullifiers are non empty
if message.new_commitments.is_empty() && message.new_nullifiers.is_empty() {
return Err(NssaError::InvalidInput(
"Empty commitments and empty nullifiers found in message".into(),
));
}
// 2. Check there are no duplicate account_ids in the public_account_ids list.
if n_unique(&message.public_account_ids) != message.public_account_ids.len() {
return Err(NssaError::InvalidInput(
"Duplicate account_ids found in message".into(),
));
}
// Check there are no duplicate nullifiers in the new_nullifiers list
if n_unique(&message.new_nullifiers) != message.new_nullifiers.len() {
return Err(NssaError::InvalidInput(
"Duplicate nullifiers found in message".into(),
));
}
// Check there are no duplicate commitments in the new_commitments list
if n_unique(&message.new_commitments) != message.new_commitments.len() {
return Err(NssaError::InvalidInput(
"Duplicate commitments found in message".into(),
));
}
// 3. Nonce checks and Valid signatures
// Check exactly one nonce is provided for each signature
if message.nonces.len() != witness_set.signatures_and_public_keys.len() {
return Err(NssaError::InvalidInput(
"Mismatch between number of nonces and signatures/public keys".into(),
));
}
// Check the signatures are valid
if !witness_set.signatures_are_valid_for(message) {
return Err(NssaError::InvalidInput(
"Invalid signature for given message and public key".into(),
));
}
let signer_account_ids = self.signer_account_ids();
// Check nonces corresponds to the current nonces on the public state.
for (account_id, nonce) in signer_account_ids.iter().zip(&message.nonces) {
let current_nonce = state.get_account_by_id(*account_id).nonce;
if current_nonce != *nonce {
return Err(NssaError::InvalidInput("Nonce mismatch".into()));
}
}
// Verify validity window
if !message.block_validity_window.is_valid_for(block_id)
|| !message.timestamp_validity_window.is_valid_for(timestamp)
{
return Err(NssaError::OutOfValidityWindow);
}
// Build pre_states for proof verification
let public_pre_states: Vec<_> = message
.public_account_ids
.iter()
.map(|account_id| {
AccountWithMetadata::new(
state.get_account_by_id(*account_id),
signer_account_ids.contains(account_id),
*account_id,
)
})
.collect();
// 4. Proof verification
check_privacy_preserving_circuit_proof_is_valid(
&witness_set.proof,
&public_pre_states,
message,
)?;
// 5. Commitment freshness
state.check_commitments_are_new(&message.new_commitments)?;
// 6. Nullifier uniqueness
state.check_nullifiers_are_valid(&message.new_nullifiers)?;
Ok(message
.public_account_ids
.iter()
.copied()
.zip(message.public_post_states.clone())
.collect())
}
#[must_use]
pub const fn message(&self) -> &Message {
&self.message
@ -170,36 +59,6 @@ impl PrivacyPreservingTransaction {
}
}
fn check_privacy_preserving_circuit_proof_is_valid(
proof: &Proof,
public_pre_states: &[AccountWithMetadata],
message: &Message,
) -> Result<(), NssaError> {
let output = PrivacyPreservingCircuitOutput {
public_pre_states: public_pre_states.to_vec(),
public_post_states: message.public_post_states.clone(),
ciphertexts: message
.encrypted_private_post_states
.iter()
.cloned()
.map(|value| value.ciphertext)
.collect(),
new_commitments: message.new_commitments.clone(),
new_nullifiers: message.new_nullifiers.clone(),
block_validity_window: message.block_validity_window,
timestamp_validity_window: message.timestamp_validity_window,
};
proof
.is_valid_for(&output)
.then_some(())
.ok_or(NssaError::InvalidPrivacyPreservingProof)
}
fn n_unique<T: Eq + Hash>(data: &[T]) -> usize {
let set: HashSet<&T> = data.iter().collect();
set.len()
}
#[cfg(test)]
mod tests {
use crate::{

View File

@ -9,7 +9,9 @@ use serde::Serialize;
use crate::{
error::NssaError,
program_methods::{
AMM_ELF, ASSOCIATED_TOKEN_ACCOUNT_ELF, AUTHENTICATED_TRANSFER_ELF, PINATA_ELF, TOKEN_ELF,
AMM_ELF, AMM_ID, ASSOCIATED_TOKEN_ACCOUNT_ELF, ASSOCIATED_TOKEN_ACCOUNT_ID,
AUTHENTICATED_TRANSFER_ELF, AUTHENTICATED_TRANSFER_ID, CLOCK_ELF, CLOCK_ID, PINATA_ELF,
PINATA_ID, TOKEN_ELF, TOKEN_ID,
},
};
@ -52,13 +54,20 @@ impl Program {
pub(crate) fn execute(
&self,
caller_program_id: Option<ProgramId>,
pre_states: &[AccountWithMetadata],
instruction_data: &InstructionData,
) -> Result<ProgramOutput, NssaError> {
// Write inputs to the program
let mut env_builder = ExecutorEnv::builder();
env_builder.session_limit(Some(MAX_NUM_CYCLES_PUBLIC_EXECUTION));
Self::write_inputs(self.id, pre_states, instruction_data, &mut env_builder)?;
Self::write_inputs(
self.id,
caller_program_id,
pre_states,
instruction_data,
&mut env_builder,
)?;
let env = env_builder.build().unwrap();
// Execute the program (without proving)
@ -79,6 +88,7 @@ impl Program {
/// Writes inputs to `env_builder` in the order expected by the programs.
pub(crate) fn write_inputs(
program_id: ProgramId,
caller_program_id: Option<ProgramId>,
pre_states: &[AccountWithMetadata],
instruction_data: &[u32],
env_builder: &mut ExecutorEnvBuilder,
@ -86,6 +96,9 @@ impl Program {
env_builder
.write(&program_id)
.map_err(|e| NssaError::ProgramWriteInputFailed(e.to_string()))?;
env_builder
.write(&caller_program_id)
.map_err(|e| NssaError::ProgramWriteInputFailed(e.to_string()))?;
let pre_states = pre_states.to_vec();
env_builder
.write(&pre_states)
@ -98,27 +111,42 @@ impl Program {
#[must_use]
pub fn authenticated_transfer_program() -> Self {
// This unwrap won't panic since the `AUTHENTICATED_TRANSFER_ELF` comes from risc0 build of
// `program_methods`
Self::new(AUTHENTICATED_TRANSFER_ELF.to_vec()).unwrap()
Self {
id: AUTHENTICATED_TRANSFER_ID,
elf: AUTHENTICATED_TRANSFER_ELF.to_vec(),
}
}
#[must_use]
pub fn token() -> Self {
// This unwrap won't panic since the `TOKEN_ELF` comes from risc0 build of
// `program_methods`
Self::new(TOKEN_ELF.to_vec()).unwrap()
Self {
id: TOKEN_ID,
elf: TOKEN_ELF.to_vec(),
}
}
#[must_use]
pub fn amm() -> Self {
Self::new(AMM_ELF.to_vec()).expect("The AMM program must be a valid Risc0 program")
Self {
id: AMM_ID,
elf: AMM_ELF.to_vec(),
}
}
#[must_use]
pub fn clock() -> Self {
Self {
id: CLOCK_ID,
elf: CLOCK_ELF.to_vec(),
}
}
#[must_use]
pub fn ata() -> Self {
Self::new(ASSOCIATED_TOKEN_ACCOUNT_ELF.to_vec())
.expect("The ATA program must be a valid Risc0 program")
Self {
id: ASSOCIATED_TOKEN_ACCOUNT_ID,
elf: ASSOCIATED_TOKEN_ACCOUNT_ELF.to_vec(),
}
}
}
@ -126,16 +154,19 @@ impl Program {
impl Program {
#[must_use]
pub fn pinata() -> Self {
// This unwrap won't panic since the `PINATA_ELF` comes from risc0 build of
// `program_methods`
Self::new(PINATA_ELF.to_vec()).unwrap()
Self {
id: PINATA_ID,
elf: PINATA_ELF.to_vec(),
}
}
#[must_use]
#[expect(clippy::non_ascii_literal, reason = "More readable")]
pub fn pinata_token() -> Self {
use crate::program_methods::PINATA_TOKEN_ELF;
Self::new(PINATA_TOKEN_ELF.to_vec()).expect("Piñata program must be a valid R0BF file")
use crate::program_methods::{PINATA_TOKEN_ELF, PINATA_TOKEN_ID};
Self {
id: PINATA_TOKEN_ID,
elf: PINATA_TOKEN_ELF.to_vec(),
}
}
}
@ -146,8 +177,9 @@ mod tests {
use crate::{
program::Program,
program_methods::{
AUTHENTICATED_TRANSFER_ELF, AUTHENTICATED_TRANSFER_ID, PINATA_ELF, PINATA_ID,
TOKEN_ELF, TOKEN_ID,
AMM_ELF, AMM_ID, ASSOCIATED_TOKEN_ACCOUNT_ELF, ASSOCIATED_TOKEN_ACCOUNT_ID,
AUTHENTICATED_TRANSFER_ELF, AUTHENTICATED_TRANSFER_ID, CLOCK_ELF, CLOCK_ID, PINATA_ELF,
PINATA_ID, PINATA_TOKEN_ELF, PINATA_TOKEN_ID, TOKEN_ELF, TOKEN_ID,
},
};
@ -294,24 +326,71 @@ mod tests {
#[must_use]
pub fn modified_transfer_program() -> Self {
use test_program_methods::MODIFIED_TRANSFER_ELF;
// This unwrap won't panic since the `MODIFIED_TRANSFER_ELF` comes from risc0 build of
// `program_methods`
Self::new(MODIFIED_TRANSFER_ELF.to_vec()).unwrap()
use test_program_methods::{MODIFIED_TRANSFER_ELF, MODIFIED_TRANSFER_ID};
Self {
id: MODIFIED_TRANSFER_ID,
elf: MODIFIED_TRANSFER_ELF.to_vec(),
}
}
#[must_use]
pub fn validity_window() -> Self {
use test_program_methods::VALIDITY_WINDOW_ELF;
// This unwrap won't panic since the `VALIDITY_WINDOW_ELF` comes from risc0 build of
// `program_methods`
Self::new(VALIDITY_WINDOW_ELF.to_vec()).unwrap()
use test_program_methods::{VALIDITY_WINDOW_ELF, VALIDITY_WINDOW_ID};
Self {
id: VALIDITY_WINDOW_ID,
elf: VALIDITY_WINDOW_ELF.to_vec(),
}
}
#[must_use]
pub fn validity_window_chain_caller() -> Self {
use test_program_methods::VALIDITY_WINDOW_CHAIN_CALLER_ELF;
Self::new(VALIDITY_WINDOW_CHAIN_CALLER_ELF.to_vec()).unwrap()
use test_program_methods::{
VALIDITY_WINDOW_CHAIN_CALLER_ELF, VALIDITY_WINDOW_CHAIN_CALLER_ID,
};
Self {
id: VALIDITY_WINDOW_CHAIN_CALLER_ID,
elf: VALIDITY_WINDOW_CHAIN_CALLER_ELF.to_vec(),
}
}
#[must_use]
pub fn flash_swap_initiator() -> Self {
use test_program_methods::FLASH_SWAP_INITIATOR_ELF;
Self::new(FLASH_SWAP_INITIATOR_ELF.to_vec())
.expect("flash_swap_initiator must be a valid Risc0 program")
}
#[must_use]
pub fn flash_swap_callback() -> Self {
use test_program_methods::FLASH_SWAP_CALLBACK_ELF;
Self::new(FLASH_SWAP_CALLBACK_ELF.to_vec())
.expect("flash_swap_callback must be a valid Risc0 program")
}
#[must_use]
pub fn malicious_self_program_id() -> Self {
use test_program_methods::MALICIOUS_SELF_PROGRAM_ID_ELF;
Self::new(MALICIOUS_SELF_PROGRAM_ID_ELF.to_vec())
.expect("malicious_self_program_id must be a valid Risc0 program")
}
#[must_use]
pub fn malicious_caller_program_id() -> Self {
use test_program_methods::MALICIOUS_CALLER_PROGRAM_ID_ELF;
Self::new(MALICIOUS_CALLER_PROGRAM_ID_ELF.to_vec())
.expect("malicious_caller_program_id must be a valid Risc0 program")
}
#[must_use]
pub fn time_locked_transfer() -> Self {
use test_program_methods::TIME_LOCKED_TRANSFER_ELF;
Self::new(TIME_LOCKED_TRANSFER_ELF.to_vec()).unwrap()
}
#[must_use]
pub fn pinata_cooldown() -> Self {
use test_program_methods::PINATA_COOLDOWN_ELF;
Self::new(PINATA_COOLDOWN_ELF.to_vec()).unwrap()
}
}
@ -340,7 +419,7 @@ mod tests {
..Account::default()
};
let program_output = program
.execute(&[sender, recipient], &instruction_data)
.execute(None, &[sender, recipient], &instruction_data)
.unwrap();
let [sender_post, recipient_post] = program_output.post_states.try_into().unwrap();
@ -362,4 +441,21 @@ mod tests {
assert_eq!(pinata_program.id, PINATA_ID);
assert_eq!(pinata_program.elf, PINATA_ELF);
}
#[test]
fn builtin_program_ids_match_elfs() {
let cases: &[(&[u8], [u32; 8])] = &[
(AMM_ELF, AMM_ID),
(AUTHENTICATED_TRANSFER_ELF, AUTHENTICATED_TRANSFER_ID),
(ASSOCIATED_TOKEN_ACCOUNT_ELF, ASSOCIATED_TOKEN_ACCOUNT_ID),
(CLOCK_ELF, CLOCK_ID),
(PINATA_ELF, PINATA_ID),
(PINATA_TOKEN_ELF, PINATA_TOKEN_ID),
(TOKEN_ELF, TOKEN_ID),
];
for (elf, expected_id) in cases {
let program = Program::new(elf.to_vec()).unwrap();
assert_eq!(program.id(), *expected_id);
}
}
}

View File

@ -2,9 +2,7 @@ use borsh::{BorshDeserialize, BorshSerialize};
use nssa_core::account::AccountId;
use sha2::{Digest as _, digest::FixedOutput as _};
use crate::{
V03State, error::NssaError, program::Program, program_deployment_transaction::message::Message,
};
use crate::program_deployment_transaction::message::Message;
#[derive(Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize)]
pub struct ProgramDeploymentTransaction {
@ -22,19 +20,6 @@ impl ProgramDeploymentTransaction {
self.message
}
pub(crate) fn validate_and_produce_public_state_diff(
&self,
state: &V03State,
) -> Result<Program, NssaError> {
// TODO: remove clone
let program = Program::new(self.message.bytecode.clone())?;
if state.programs().contains_key(&program.id()) {
Err(NssaError::ProgramAlreadyExists)
} else {
Ok(program)
}
}
#[must_use]
pub fn hash(&self) -> [u8; 32] {
let bytes = self.to_bytes();

View File

@ -1,20 +1,10 @@
use std::collections::{HashMap, HashSet, VecDeque};
use std::collections::HashSet;
use borsh::{BorshDeserialize, BorshSerialize};
use log::debug;
use nssa_core::{
BlockId, Timestamp,
account::{Account, AccountId, AccountWithMetadata},
program::{ChainedCall, Claim, DEFAULT_PROGRAM_ID, validate_execution},
};
use nssa_core::account::AccountId;
use sha2::{Digest as _, digest::FixedOutput as _};
use crate::{
V03State, ensure,
error::NssaError,
public_transaction::{Message, WitnessSet},
state::MAX_NUMBER_CHAINED_CALLS,
};
use crate::public_transaction::{Message, WitnessSet};
#[derive(Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize)]
pub struct PublicTransaction {
@ -67,217 +57,6 @@ impl PublicTransaction {
hasher.update(&bytes);
hasher.finalize_fixed().into()
}
pub(crate) fn validate_and_produce_public_state_diff(
&self,
state: &V03State,
block_id: BlockId,
timestamp: Timestamp,
) -> Result<HashMap<AccountId, Account>, NssaError> {
let message = self.message();
let witness_set = self.witness_set();
// All account_ids must be different
ensure!(
message.account_ids.iter().collect::<HashSet<_>>().len() == message.account_ids.len(),
NssaError::InvalidInput("Duplicate account_ids found in message".into(),)
);
// Check exactly one nonce is provided for each signature
ensure!(
message.nonces.len() == witness_set.signatures_and_public_keys.len(),
NssaError::InvalidInput(
"Mismatch between number of nonces and signatures/public keys".into(),
)
);
// Check the signatures are valid
ensure!(
witness_set.is_valid_for(message),
NssaError::InvalidInput("Invalid signature for given message and public key".into())
);
let signer_account_ids = self.signer_account_ids();
// Check nonces corresponds to the current nonces on the public state.
for (account_id, nonce) in signer_account_ids.iter().zip(&message.nonces) {
let current_nonce = state.get_account_by_id(*account_id).nonce;
ensure!(
current_nonce == *nonce,
NssaError::InvalidInput("Nonce mismatch".into())
);
}
// Build pre_states for execution
let input_pre_states: Vec<_> = message
.account_ids
.iter()
.map(|account_id| {
AccountWithMetadata::new(
state.get_account_by_id(*account_id),
signer_account_ids.contains(account_id),
*account_id,
)
})
.collect();
let mut state_diff: HashMap<AccountId, Account> = HashMap::new();
let initial_call = ChainedCall {
program_id: message.program_id,
instruction_data: message.instruction_data.clone(),
pre_states: input_pre_states,
pda_seeds: vec![],
};
let mut chained_calls = VecDeque::from_iter([(initial_call, None)]);
let mut chain_calls_counter = 0;
while let Some((chained_call, caller_program_id)) = chained_calls.pop_front() {
ensure!(
chain_calls_counter <= MAX_NUMBER_CHAINED_CALLS,
NssaError::MaxChainedCallsDepthExceeded
);
// Check that the `program_id` corresponds to a deployed program
let Some(program) = state.programs().get(&chained_call.program_id) else {
return Err(NssaError::InvalidInput("Unknown program".into()));
};
debug!(
"Program {:?} pre_states: {:?}, instruction_data: {:?}",
chained_call.program_id, chained_call.pre_states, chained_call.instruction_data
);
let mut program_output =
program.execute(&chained_call.pre_states, &chained_call.instruction_data)?;
debug!(
"Program {:?} output: {:?}",
chained_call.program_id, program_output
);
let authorized_pdas = nssa_core::program::compute_authorized_pdas(
caller_program_id,
&chained_call.pda_seeds,
);
let is_authorized = |account_id: &AccountId| {
signer_account_ids.contains(account_id) || authorized_pdas.contains(account_id)
};
for pre in &program_output.pre_states {
let account_id = pre.account_id;
// Check that the program output pre_states coincide with the values in the public
// state or with any modifications to those values during the chain of calls.
let expected_pre = state_diff
.get(&account_id)
.cloned()
.unwrap_or_else(|| state.get_account_by_id(account_id));
ensure!(
pre.account == expected_pre,
NssaError::InvalidProgramBehavior
);
// Check that authorization flags are consistent with the provided ones or
// authorized by program through the PDA mechanism
ensure!(
pre.is_authorized == is_authorized(&account_id),
NssaError::InvalidProgramBehavior
);
}
// Verify that the program output's self_program_id matches the expected program ID.
ensure!(
program_output.self_program_id == chained_call.program_id,
NssaError::InvalidProgramBehavior
);
// Verify execution corresponds to a well-behaved program.
// See the # Programs section for the definition of the `validate_execution` method.
ensure!(
validate_execution(
&program_output.pre_states,
&program_output.post_states,
chained_call.program_id,
),
NssaError::InvalidProgramBehavior
);
// Verify validity window
ensure!(
program_output.block_validity_window.is_valid_for(block_id)
&& program_output
.timestamp_validity_window
.is_valid_for(timestamp),
NssaError::OutOfValidityWindow
);
for (i, post) in program_output.post_states.iter_mut().enumerate() {
let Some(claim) = post.required_claim() else {
continue;
};
// The invoked program can only claim accounts with default program id.
ensure!(
post.account().program_owner == DEFAULT_PROGRAM_ID,
NssaError::InvalidProgramBehavior
);
let account_id = program_output.pre_states[i].account_id;
match claim {
Claim::Authorized => {
// The program can only claim accounts that were authorized by the signer.
ensure!(
is_authorized(&account_id),
NssaError::InvalidProgramBehavior
);
}
Claim::Pda(seed) => {
// The program can only claim accounts that correspond to the PDAs it is
// authorized to claim.
let pda = AccountId::from((&chained_call.program_id, &seed));
ensure!(account_id == pda, NssaError::InvalidProgramBehavior);
}
}
post.account_mut().program_owner = chained_call.program_id;
}
// Update the state diff
for (pre, post) in program_output
.pre_states
.iter()
.zip(program_output.post_states.iter())
{
state_diff.insert(pre.account_id, post.account().clone());
}
for new_call in program_output.chained_calls.into_iter().rev() {
chained_calls.push_front((new_call, Some(chained_call.program_id)));
}
chain_calls_counter = chain_calls_counter
.checked_add(1)
.expect("we check the max depth at the beginning of the loop");
}
// Check that all modified uninitialized accounts where claimed
for post in state_diff.iter().filter_map(|(account_id, post)| {
let pre = state.get_account_by_id(*account_id);
if pre.program_owner != DEFAULT_PROGRAM_ID {
return None;
}
if pre == *post {
return None;
}
Some(post)
}) {
ensure!(
post.program_owner != DEFAULT_PROGRAM_ID,
NssaError::InvalidProgramBehavior
);
}
Ok(state_diff)
}
}
#[cfg(test)]
@ -289,6 +68,7 @@ pub mod tests {
error::NssaError,
program::Program,
public_transaction::{Message, WitnessSet},
validated_state_diff::ValidatedStateDiff,
};
fn keys_for_tests() -> (PrivateKey, PrivateKey, AccountId, AccountId) {
@ -302,7 +82,7 @@ pub mod tests {
fn state_for_tests() -> V03State {
let (_, _, addr1, addr2) = keys_for_tests();
let initial_data = [(addr1, 10000), (addr2, 20000)];
V03State::new_with_genesis_accounts(&initial_data, &[])
V03State::new_with_genesis_accounts(&initial_data, &[], 0)
}
fn transaction_for_tests() -> PublicTransaction {
@ -397,7 +177,7 @@ pub mod tests {
let witness_set = WitnessSet::for_message(&message, &[&key1, &key1]);
let tx = PublicTransaction::new(message, witness_set);
let result = tx.validate_and_produce_public_state_diff(&state, 1, 0);
let result = ValidatedStateDiff::from_public_transaction(&tx, &state, 1, 0);
assert!(matches!(result, Err(NssaError::InvalidInput(_))));
}
@ -417,7 +197,7 @@ pub mod tests {
let witness_set = WitnessSet::for_message(&message, &[&key1, &key2]);
let tx = PublicTransaction::new(message, witness_set);
let result = tx.validate_and_produce_public_state_diff(&state, 1, 0);
let result = ValidatedStateDiff::from_public_transaction(&tx, &state, 1, 0);
assert!(matches!(result, Err(NssaError::InvalidInput(_))));
}
@ -438,7 +218,7 @@ pub mod tests {
let mut witness_set = WitnessSet::for_message(&message, &[&key1, &key2]);
witness_set.signatures_and_public_keys[0].0 = Signature::new_for_tests([1; 64]);
let tx = PublicTransaction::new(message, witness_set);
let result = tx.validate_and_produce_public_state_diff(&state, 1, 0);
let result = ValidatedStateDiff::from_public_transaction(&tx, &state, 1, 0);
assert!(matches!(result, Err(NssaError::InvalidInput(_))));
}
@ -458,7 +238,7 @@ pub mod tests {
let witness_set = WitnessSet::for_message(&message, &[&key1, &key2]);
let tx = PublicTransaction::new(message, witness_set);
let result = tx.validate_and_produce_public_state_diff(&state, 1, 0);
let result = ValidatedStateDiff::from_public_transaction(&tx, &state, 1, 0);
assert!(matches!(result, Err(NssaError::InvalidInput(_))));
}
@ -474,7 +254,7 @@ pub mod tests {
let witness_set = WitnessSet::for_message(&message, &[&key1, &key2]);
let tx = PublicTransaction::new(message, witness_set);
let result = tx.validate_and_produce_public_state_diff(&state, 1, 0);
let result = ValidatedStateDiff::from_public_transaction(&tx, &state, 1, 0);
assert!(matches!(result, Err(NssaError::InvalidInput(_))));
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,442 @@
use std::{
collections::{HashMap, HashSet, VecDeque},
hash::Hash,
};
use log::debug;
use nssa_core::{
BlockId, Commitment, Nullifier, PrivacyPreservingCircuitOutput, Timestamp,
account::{Account, AccountId, AccountWithMetadata},
program::{
ChainedCall, Claim, DEFAULT_PROGRAM_ID, compute_authorized_pdas, validate_execution,
},
};
use crate::{
V03State, ensure,
error::NssaError,
privacy_preserving_transaction::{
PrivacyPreservingTransaction, circuit::Proof, message::Message,
},
program::Program,
program_deployment_transaction::ProgramDeploymentTransaction,
public_transaction::PublicTransaction,
state::MAX_NUMBER_CHAINED_CALLS,
};
pub struct StateDiff {
pub signer_account_ids: Vec<AccountId>,
pub public_diff: HashMap<AccountId, Account>,
pub new_commitments: Vec<Commitment>,
pub new_nullifiers: Vec<Nullifier>,
pub program: Option<Program>,
}
/// The validated output of executing or verifying a transaction, ready to be applied to the state.
///
/// Can only be constructed by the transaction validation functions inside this crate, ensuring the
/// diff has been checked before any state mutation occurs.
pub struct ValidatedStateDiff(StateDiff);
impl ValidatedStateDiff {
pub fn from_public_transaction(
tx: &PublicTransaction,
state: &V03State,
block_id: BlockId,
timestamp: Timestamp,
) -> Result<Self, NssaError> {
let message = tx.message();
let witness_set = tx.witness_set();
// All account_ids must be different
ensure!(
message.account_ids.iter().collect::<HashSet<_>>().len() == message.account_ids.len(),
NssaError::InvalidInput("Duplicate account_ids found in message".into(),)
);
// Check exactly one nonce is provided for each signature
ensure!(
message.nonces.len() == witness_set.signatures_and_public_keys.len(),
NssaError::InvalidInput(
"Mismatch between number of nonces and signatures/public keys".into(),
)
);
// Check the signatures are valid
ensure!(
witness_set.is_valid_for(message),
NssaError::InvalidInput("Invalid signature for given message and public key".into())
);
let signer_account_ids = tx.signer_account_ids();
// Check nonces corresponds to the current nonces on the public state.
for (account_id, nonce) in signer_account_ids.iter().zip(&message.nonces) {
let current_nonce = state.get_account_by_id(*account_id).nonce;
ensure!(
current_nonce == *nonce,
NssaError::InvalidInput("Nonce mismatch".into())
);
}
// Build pre_states for execution
let input_pre_states: Vec<_> = message
.account_ids
.iter()
.map(|account_id| {
AccountWithMetadata::new(
state.get_account_by_id(*account_id),
signer_account_ids.contains(account_id),
*account_id,
)
})
.collect();
let mut state_diff: HashMap<AccountId, Account> = HashMap::new();
let initial_call = ChainedCall {
program_id: message.program_id,
instruction_data: message.instruction_data.clone(),
pre_states: input_pre_states,
pda_seeds: vec![],
};
let mut chained_calls = VecDeque::from_iter([(initial_call, None)]);
let mut chain_calls_counter = 0;
while let Some((chained_call, caller_program_id)) = chained_calls.pop_front() {
ensure!(
chain_calls_counter <= MAX_NUMBER_CHAINED_CALLS,
NssaError::MaxChainedCallsDepthExceeded
);
// Check that the `program_id` corresponds to a deployed program
let Some(program) = state.programs().get(&chained_call.program_id) else {
return Err(NssaError::InvalidInput("Unknown program".into()));
};
debug!(
"Program {:?} pre_states: {:?}, instruction_data: {:?}",
chained_call.program_id, chained_call.pre_states, chained_call.instruction_data
);
let mut program_output = program.execute(
caller_program_id,
&chained_call.pre_states,
&chained_call.instruction_data,
)?;
debug!(
"Program {:?} output: {:?}",
chained_call.program_id, program_output
);
let authorized_pdas =
compute_authorized_pdas(caller_program_id, &chained_call.pda_seeds);
let is_authorized = |account_id: &AccountId| {
signer_account_ids.contains(account_id) || authorized_pdas.contains(account_id)
};
for pre in &program_output.pre_states {
let account_id = pre.account_id;
// Check that the program output pre_states coincide with the values in the public
// state or with any modifications to those values during the chain of calls.
let expected_pre = state_diff
.get(&account_id)
.cloned()
.unwrap_or_else(|| state.get_account_by_id(account_id));
ensure!(
pre.account == expected_pre,
NssaError::InvalidProgramBehavior
);
// Check that authorization flags are consistent with the provided ones or
// authorized by program through the PDA mechanism
ensure!(
pre.is_authorized == is_authorized(&account_id),
NssaError::InvalidProgramBehavior
);
}
// Verify that the program output's self_program_id matches the expected program ID.
ensure!(
program_output.self_program_id == chained_call.program_id,
NssaError::InvalidProgramBehavior
);
// Verify that the program output's caller_program_id matches the actual caller.
ensure!(
program_output.caller_program_id == caller_program_id,
NssaError::InvalidProgramBehavior
);
// Verify execution corresponds to a well-behaved program.
// See the # Programs section for the definition of the `validate_execution` method.
ensure!(
validate_execution(
&program_output.pre_states,
&program_output.post_states,
chained_call.program_id,
),
NssaError::InvalidProgramBehavior
);
// Verify validity window
ensure!(
program_output.block_validity_window.is_valid_for(block_id)
&& program_output
.timestamp_validity_window
.is_valid_for(timestamp),
NssaError::OutOfValidityWindow
);
for (i, post) in program_output.post_states.iter_mut().enumerate() {
let Some(claim) = post.required_claim() else {
continue;
};
// The invoked program can only claim accounts with default program id.
ensure!(
post.account().program_owner == DEFAULT_PROGRAM_ID,
NssaError::InvalidProgramBehavior
);
let account_id = program_output.pre_states[i].account_id;
match claim {
Claim::Authorized => {
// The program can only claim accounts that were authorized by the signer.
ensure!(
is_authorized(&account_id),
NssaError::InvalidProgramBehavior
);
}
Claim::Pda(seed) => {
// The program can only claim accounts that correspond to the PDAs it is
// authorized to claim.
let pda = AccountId::from((&chained_call.program_id, &seed));
ensure!(account_id == pda, NssaError::InvalidProgramBehavior);
}
}
post.account_mut().program_owner = chained_call.program_id;
}
// Update the state diff
for (pre, post) in program_output
.pre_states
.iter()
.zip(program_output.post_states.iter())
{
state_diff.insert(pre.account_id, post.account().clone());
}
for new_call in program_output.chained_calls.into_iter().rev() {
chained_calls.push_front((new_call, Some(chained_call.program_id)));
}
chain_calls_counter = chain_calls_counter
.checked_add(1)
.expect("we check the max depth at the beginning of the loop");
}
// Check that all modified uninitialized accounts where claimed
for post in state_diff.iter().filter_map(|(account_id, post)| {
let pre = state.get_account_by_id(*account_id);
if pre.program_owner != DEFAULT_PROGRAM_ID {
return None;
}
if pre == *post {
return None;
}
Some(post)
}) {
ensure!(
post.program_owner != DEFAULT_PROGRAM_ID,
NssaError::InvalidProgramBehavior
);
}
Ok(Self(StateDiff {
signer_account_ids,
public_diff: state_diff,
new_commitments: vec![],
new_nullifiers: vec![],
program: None,
}))
}
pub fn from_privacy_preserving_transaction(
tx: &PrivacyPreservingTransaction,
state: &V03State,
block_id: BlockId,
timestamp: Timestamp,
) -> Result<Self, NssaError> {
let message = &tx.message;
let witness_set = &tx.witness_set;
// 1. Commitments or nullifiers are non empty
if message.new_commitments.is_empty() && message.new_nullifiers.is_empty() {
return Err(NssaError::InvalidInput(
"Empty commitments and empty nullifiers found in message".into(),
));
}
// 2. Check there are no duplicate account_ids in the public_account_ids list.
if n_unique(&message.public_account_ids) != message.public_account_ids.len() {
return Err(NssaError::InvalidInput(
"Duplicate account_ids found in message".into(),
));
}
// Check there are no duplicate nullifiers in the new_nullifiers list
if n_unique(&message.new_nullifiers) != message.new_nullifiers.len() {
return Err(NssaError::InvalidInput(
"Duplicate nullifiers found in message".into(),
));
}
// Check there are no duplicate commitments in the new_commitments list
if n_unique(&message.new_commitments) != message.new_commitments.len() {
return Err(NssaError::InvalidInput(
"Duplicate commitments found in message".into(),
));
}
// 3. Nonce checks and Valid signatures
// Check exactly one nonce is provided for each signature
if message.nonces.len() != witness_set.signatures_and_public_keys.len() {
return Err(NssaError::InvalidInput(
"Mismatch between number of nonces and signatures/public keys".into(),
));
}
// Check the signatures are valid
if !witness_set.signatures_are_valid_for(message) {
return Err(NssaError::InvalidInput(
"Invalid signature for given message and public key".into(),
));
}
let signer_account_ids = tx.signer_account_ids();
// Check nonces corresponds to the current nonces on the public state.
for (account_id, nonce) in signer_account_ids.iter().zip(&message.nonces) {
let current_nonce = state.get_account_by_id(*account_id).nonce;
if current_nonce != *nonce {
return Err(NssaError::InvalidInput("Nonce mismatch".into()));
}
}
// Verify validity window
if !message.block_validity_window.is_valid_for(block_id)
|| !message.timestamp_validity_window.is_valid_for(timestamp)
{
return Err(NssaError::OutOfValidityWindow);
}
// Build pre_states for proof verification
let public_pre_states: Vec<_> = message
.public_account_ids
.iter()
.map(|account_id| {
AccountWithMetadata::new(
state.get_account_by_id(*account_id),
signer_account_ids.contains(account_id),
*account_id,
)
})
.collect();
// 4. Proof verification
check_privacy_preserving_circuit_proof_is_valid(
&witness_set.proof,
&public_pre_states,
message,
)?;
// 5. Commitment freshness
state.check_commitments_are_new(&message.new_commitments)?;
// 6. Nullifier uniqueness
state.check_nullifiers_are_valid(&message.new_nullifiers)?;
let public_diff = message
.public_account_ids
.iter()
.copied()
.zip(message.public_post_states.clone())
.collect();
let new_nullifiers = message
.new_nullifiers
.iter()
.copied()
.map(|(nullifier, _)| nullifier)
.collect();
Ok(Self(StateDiff {
signer_account_ids,
public_diff,
new_commitments: message.new_commitments.clone(),
new_nullifiers,
program: None,
}))
}
pub fn from_program_deployment_transaction(
tx: &ProgramDeploymentTransaction,
state: &V03State,
) -> Result<Self, NssaError> {
// TODO: remove clone
let program = Program::new(tx.message.bytecode.clone())?;
if state.programs().contains_key(&program.id()) {
return Err(NssaError::ProgramAlreadyExists);
}
Ok(Self(StateDiff {
signer_account_ids: vec![],
public_diff: HashMap::new(),
new_commitments: vec![],
new_nullifiers: vec![],
program: Some(program),
}))
}
/// Returns the public account changes produced by this transaction.
///
/// Used by callers (e.g. the sequencer) to inspect the diff before committing it, for example
/// to enforce that system accounts are not modified by user transactions.
#[must_use]
pub fn public_diff(&self) -> HashMap<AccountId, Account> {
self.0.public_diff.clone()
}
pub(crate) fn into_state_diff(self) -> StateDiff {
self.0
}
}
fn check_privacy_preserving_circuit_proof_is_valid(
proof: &Proof,
public_pre_states: &[AccountWithMetadata],
message: &Message,
) -> Result<(), NssaError> {
let output = PrivacyPreservingCircuitOutput {
public_pre_states: public_pre_states.to_vec(),
public_post_states: message.public_post_states.clone(),
ciphertexts: message
.encrypted_private_post_states
.iter()
.cloned()
.map(|value| value.ciphertext)
.collect(),
new_commitments: message.new_commitments.clone(),
new_nullifiers: message.new_nullifiers.clone(),
block_validity_window: message.block_validity_window,
timestamp_validity_window: message.timestamp_validity_window,
};
proof
.is_valid_for(&output)
.then_some(())
.ok_or(NssaError::InvalidPrivacyPreservingProof)
}
fn n_unique<T: Eq + Hash>(data: &[T]) -> usize {
let set: HashSet<&T> = data.iter().collect();
set.len()
}

View File

@ -9,6 +9,7 @@ workspace = true
[dependencies]
nssa_core.workspace = true
clock_core.workspace = true
token_core.workspace = true
token_program.workspace = true
amm_core.workspace = true

View File

@ -15,6 +15,7 @@ fn main() {
let (
ProgramInput {
self_program_id,
caller_program_id,
pre_states,
instruction,
},
@ -155,6 +156,7 @@ fn main() {
ProgramOutput::new(
self_program_id,
caller_program_id,
instruction_words,
pre_states_clone,
post_states,

View File

@ -5,6 +5,7 @@ fn main() {
let (
ProgramInput {
self_program_id,
caller_program_id,
pre_states,
instruction,
},
@ -59,6 +60,7 @@ fn main() {
ProgramOutput::new(
self_program_id,
caller_program_id,
instruction_words,
pre_states_clone,
post_states,

View File

@ -68,6 +68,7 @@ fn main() {
let (
ProgramInput {
self_program_id,
caller_program_id,
pre_states,
instruction: balance_to_move,
},
@ -85,5 +86,12 @@ fn main() {
_ => panic!("invalid params"),
};
ProgramOutput::new(self_program_id, instruction_words, pre_states, post_states).write();
ProgramOutput::new(
self_program_id,
caller_program_id,
instruction_words,
pre_states,
post_states,
)
.write();
}

View File

@ -0,0 +1,94 @@
//! Clock Program.
//!
//! A system program that records the current block ID and timestamp into dedicated clock accounts.
//! Three accounts are maintained, updated at different block intervals (every 1, 10, and 50
//! blocks), allowing programs to read recent timestamps at various granularities.
//!
//! This program can only be invoked exclusively by the sequencer as the last transaction in every
//! block. Clock accounts are assigned to the clock program at genesis, so no claiming is required
//! here.
use clock_core::{
CLOCK_01_PROGRAM_ACCOUNT_ID, CLOCK_10_PROGRAM_ACCOUNT_ID, CLOCK_50_PROGRAM_ACCOUNT_ID,
ClockAccountData, Instruction,
};
use nssa_core::{
account::AccountWithMetadata,
program::{AccountPostState, ProgramInput, ProgramOutput, read_nssa_inputs},
};
fn update_if_multiple(
pre: AccountWithMetadata,
divisor: u64,
current_block_id: u64,
updated_data: &[u8],
) -> (AccountWithMetadata, AccountPostState) {
if current_block_id.is_multiple_of(divisor) {
let mut post_account = pre.account.clone();
post_account.data = updated_data
.to_vec()
.try_into()
.expect("Clock account data should fit in account data");
(pre, AccountPostState::new(post_account))
} else {
let post = AccountPostState::new(pre.account.clone());
(pre, post)
}
}
fn main() {
let (
ProgramInput {
self_program_id,
caller_program_id,
pre_states,
instruction: timestamp,
},
instruction_words,
) = read_nssa_inputs::<Instruction>();
let Ok([pre_01, pre_10, pre_50]) = <[_; 3]>::try_from(pre_states) else {
panic!("Invalid number of input accounts");
};
// Verify pre-states correspond to the expected clock account IDs.
if pre_01.account_id != CLOCK_01_PROGRAM_ACCOUNT_ID
|| pre_10.account_id != CLOCK_10_PROGRAM_ACCOUNT_ID
|| pre_50.account_id != CLOCK_50_PROGRAM_ACCOUNT_ID
{
panic!("Invalid input accounts");
}
// Verify all clock accounts are owned by this program (assigned at genesis).
if pre_01.account.program_owner != self_program_id
|| pre_10.account.program_owner != self_program_id
|| pre_50.account.program_owner != self_program_id
{
panic!("Clock accounts must be owned by the clock program");
}
let prev_data = ClockAccountData::from_bytes(&pre_01.account.data.clone().into_inner());
let current_block_id = prev_data
.block_id
.checked_add(1)
.expect("Next block id should be within u64 boundaries");
let updated_data = ClockAccountData {
block_id: current_block_id,
timestamp,
}
.to_bytes();
let (pre_01, post_01) = update_if_multiple(pre_01, 1, current_block_id, &updated_data);
let (pre_10, post_10) = update_if_multiple(pre_10, 10, current_block_id, &updated_data);
let (pre_50, post_50) = update_if_multiple(pre_50, 50, current_block_id, &updated_data);
ProgramOutput::new(
self_program_id,
caller_program_id,
instruction_words,
vec![pre_01, pre_10, pre_50],
vec![post_01, post_10, post_50],
)
.write();
}

View File

@ -47,6 +47,7 @@ fn main() {
let (
ProgramInput {
self_program_id,
caller_program_id,
pre_states,
instruction: solution,
},
@ -81,6 +82,7 @@ fn main() {
ProgramOutput::new(
self_program_id,
caller_program_id,
instruction_words,
vec![pinata, winner],
vec![

View File

@ -53,6 +53,7 @@ fn main() {
let (
ProgramInput {
self_program_id,
caller_program_id,
pre_states,
instruction: solution,
},
@ -99,6 +100,7 @@ fn main() {
ProgramOutput::new(
self_program_id,
caller_program_id,
instruction_words,
vec![
pinata_definition,

View File

@ -114,6 +114,15 @@ impl ExecutionState {
"Program output self_program_id does not match chained call program_id"
);
// Verify that the program output's caller_program_id matches the actual caller.
// This prevents a malicious user from privately executing an internal function
// by spoofing caller_program_id (e.g. passing caller_program_id = self_program_id
// to bypass access control checks).
assert_eq!(
program_output.caller_program_id, caller_program_id,
"Program output caller_program_id does not match actual caller"
);
// Check that the program is well behaved.
// See the # Programs section for the definition of the `validate_execution` method.
let execution_valid = validate_execution(

View File

@ -13,6 +13,7 @@ fn main() {
let (
ProgramInput {
self_program_id,
caller_program_id,
pre_states,
instruction,
},
@ -84,6 +85,7 @@ fn main() {
ProgramOutput::new(
self_program_id,
caller_program_id,
instruction_words,
pre_states_clone,
post_states,

View File

@ -3036,7 +3036,7 @@ fn new_definition_lp_symmetric_amounts() {
fn state_for_amm_tests() -> V03State {
let initial_data = [];
let mut state = V03State::new_with_genesis_accounts(&initial_data, &[]);
let mut state = V03State::new_with_genesis_accounts(&initial_data, &[], 0);
state.force_insert_account(
IdForExeTests::pool_definition_id(),
AccountsForExeTests::pool_definition_init(),
@ -3079,7 +3079,7 @@ fn state_for_amm_tests() -> V03State {
fn state_for_amm_tests_with_new_def() -> V03State {
let initial_data = [];
let mut state = V03State::new_with_genesis_accounts(&initial_data, &[]);
let mut state = V03State::new_with_genesis_accounts(&initial_data, &[], 0);
state.force_insert_account(
IdForExeTests::token_a_definition_id(),
AccountsForExeTests::token_a_definition_account(),

View File

@ -0,0 +1,12 @@
[package]
name = "clock_core"
version = "0.1.0"
edition = "2024"
license = { workspace = true }
[lints]
workspace = true
[dependencies]
nssa_core.workspace = true
borsh.workspace = true

View File

@ -0,0 +1,42 @@
//! Core data structures and constants for the Clock Program.
use borsh::{BorshDeserialize, BorshSerialize};
use nssa_core::{Timestamp, account::AccountId};
pub const CLOCK_01_PROGRAM_ACCOUNT_ID: AccountId =
AccountId::new(*b"/LEZ/ClockProgramAccount/0000001");
pub const CLOCK_10_PROGRAM_ACCOUNT_ID: AccountId =
AccountId::new(*b"/LEZ/ClockProgramAccount/0000010");
pub const CLOCK_50_PROGRAM_ACCOUNT_ID: AccountId =
AccountId::new(*b"/LEZ/ClockProgramAccount/0000050");
/// All clock program account ID in the order expected by the clock program.
pub const CLOCK_PROGRAM_ACCOUNT_IDS: [AccountId; 3] = [
CLOCK_01_PROGRAM_ACCOUNT_ID,
CLOCK_10_PROGRAM_ACCOUNT_ID,
CLOCK_50_PROGRAM_ACCOUNT_ID,
];
/// The instruction type for the Clock Program. The sequencer passes the current block timestamp.
pub type Instruction = Timestamp;
/// The data stored in a clock account.
#[derive(Debug, Clone, Copy, PartialEq, Eq, BorshSerialize, BorshDeserialize)]
pub struct ClockAccountData {
pub block_id: u64,
pub timestamp: Timestamp,
}
impl ClockAccountData {
#[must_use]
pub fn to_bytes(self) -> Vec<u8> {
borsh::to_vec(&self).expect("ClockAccountData serialization should not fail")
}
#[must_use]
pub fn from_bytes(bytes: &[u8]) -> Self {
borsh::from_slice(bytes).expect("ClockAccountData deserialization should not fail")
}
}

View File

@ -40,3 +40,5 @@ mock = []
[dev-dependencies]
futures.workspace = true
test_program_methods.workspace = true
nssa = { workspace = true, features = ["test-utils"] }

View File

@ -150,7 +150,7 @@ mod tests {
let retrieved_tx = node_store.get_transaction_by_hash(tx.hash());
assert_eq!(None, retrieved_tx);
// Add the block with the transaction
let dummy_state = V03State::new_with_genesis_accounts(&[], &[]);
let dummy_state = V03State::new_with_genesis_accounts(&[], &[], 0);
node_store.update(&block, [1; 32], &dummy_state).unwrap();
// Try again
let retrieved_tx = node_store.get_transaction_by_hash(tx.hash());
@ -209,7 +209,7 @@ mod tests {
let block_hash = block.header.hash;
let block_msg_id = [1; 32];
let dummy_state = V03State::new_with_genesis_accounts(&[], &[]);
let dummy_state = V03State::new_with_genesis_accounts(&[], &[], 0);
node_store
.update(&block, block_msg_id, &dummy_state)
.unwrap();
@ -244,7 +244,7 @@ mod tests {
let block = common::test_utils::produce_dummy_block(1, None, vec![tx]);
let block_id = block.header.block_id;
let dummy_state = V03State::new_with_genesis_accounts(&[], &[]);
let dummy_state = V03State::new_with_genesis_accounts(&[], &[], 0);
node_store.update(&block, [1; 32], &dummy_state).unwrap();
// Verify initial status is Pending

View File

@ -24,9 +24,10 @@ pub struct SequencerConfig {
pub genesis_id: u64,
/// If `True`, then adds random sequence of bytes to genesis block.
pub is_genesis_random: bool,
/// Maximum number of transactions in block.
/// Maximum number of user transactions in a block (excludes the mandatory clock transaction).
pub max_num_tx_in_block: usize,
/// Maximum block size (includes header and transactions).
/// Maximum block size (includes header, user transactions, and the mandatory clock
/// transaction).
#[serde(default = "default_max_block_size")]
pub max_block_size: ByteSize,
/// Mempool maximum size.

View File

@ -7,7 +7,7 @@ use common::PINATA_BASE58;
use common::{
HashType,
block::{BedrockStatus, Block, HashableBlockData},
transaction::NSSATransaction,
transaction::{NSSATransaction, clock_invocation},
};
use config::SequencerConfig;
use log::{error, info, warn};
@ -16,7 +16,6 @@ use mempool::{MemPool, MemPoolHandle};
#[cfg(feature = "mock")]
pub use mock::SequencerCoreWithMockClients;
use nssa::V03State;
use nssa_core::{BlockId, Timestamp};
pub use storage::error::DbError;
use testnet_initial_state::initial_state;
@ -139,6 +138,7 @@ impl<BC: BlockSettlementClientTrait, IC: IndexerClientTrait> SequencerCore<BC, I
V03State::new_with_genesis_accounts(
&init_accs.unwrap_or_default(),
&initial_commitments.unwrap_or_default(),
genesis_block.header.timestamp,
)
} else {
initial_state()
@ -163,28 +163,6 @@ impl<BC: BlockSettlementClientTrait, IC: IndexerClientTrait> SequencerCore<BC, I
(sequencer_core, mempool_handle)
}
fn execute_check_transaction_on_state(
&mut self,
tx: NSSATransaction,
block_id: BlockId,
timestamp: Timestamp,
) -> Result<NSSATransaction, nssa::error::NssaError> {
match &tx {
NSSATransaction::Public(tx) => self
.state
.transition_from_public_transaction(tx, block_id, timestamp),
NSSATransaction::PrivacyPreserving(tx) => self
.state
.transition_from_privacy_preserving_transaction(tx, block_id, timestamp),
NSSATransaction::ProgramDeployment(tx) => self
.state
.transition_from_program_deployment_transaction(tx),
}
.inspect_err(|err| warn!("Error at transition {err:#?}"))?;
Ok(tx)
}
pub async fn produce_new_block(&mut self) -> Result<u64> {
let (tx, _msg_id) = self
.produce_new_block_with_mempool_transactions()
@ -224,12 +202,20 @@ impl<BC: BlockSettlementClientTrait, IC: IndexerClientTrait> SequencerCore<BC, I
let new_block_timestamp = u64::try_from(chrono::Utc::now().timestamp_millis())
.expect("Timestamp must be positive");
// Pre-create the mandatory clock tx so its size is included in the block size check.
let clock_tx = clock_invocation(new_block_timestamp);
let clock_nssa_tx = NSSATransaction::Public(clock_tx.clone());
while let Some(tx) = self.mempool.pop() {
let tx_hash = tx.hash();
// Check if block size exceeds limit
let temp_valid_transactions =
[valid_transactions.as_slice(), std::slice::from_ref(&tx)].concat();
// Check if block size exceeds limit (including the mandatory clock tx).
let temp_valid_transactions = [
valid_transactions.as_slice(),
std::slice::from_ref(&tx),
std::slice::from_ref(&clock_nssa_tx),
]
.concat();
let temp_hashable_data = HashableBlockData {
block_id: new_block_height,
transactions: temp_valid_transactions,
@ -252,26 +238,35 @@ impl<BC: BlockSettlementClientTrait, IC: IndexerClientTrait> SequencerCore<BC, I
break;
}
match self.execute_check_transaction_on_state(tx, new_block_height, new_block_timestamp)
{
Ok(valid_tx) => {
valid_transactions.push(valid_tx);
info!("Validated transaction with hash {tx_hash}, including it in block");
if valid_transactions.len() >= self.sequencer_config.max_num_tx_in_block {
break;
}
}
let validated_diff = match tx.validate_on_state(
&self.state,
new_block_height,
new_block_timestamp,
) {
Ok(diff) => diff,
Err(err) => {
error!(
"Transaction with hash {tx_hash} failed execution check with error: {err:#?}, skipping it",
);
// TODO: Probably need to handle unsuccessful transaction execution?
continue;
}
};
self.state.apply_state_diff(validated_diff);
valid_transactions.push(tx);
info!("Validated transaction with hash {tx_hash}, including it in block");
if valid_transactions.len() >= self.sequencer_config.max_num_tx_in_block {
break;
}
}
// Append the Clock Program invocation as the mandatory last transaction.
self.state
.transition_from_public_transaction(&clock_tx, new_block_height, new_block_timestamp)
.context("Clock transaction failed. Aborting block production.")?;
valid_transactions.push(clock_nssa_tx);
let hashable_data = HashableBlockData {
block_id: new_block_height,
transactions: valid_transactions,
@ -395,7 +390,10 @@ mod tests {
use std::{pin::pin, time::Duration};
use bedrock_client::BackoffConfig;
use common::{test_utils::sequencer_sign_key_for_testing, transaction::NSSATransaction};
use common::{
test_utils::sequencer_sign_key_for_testing,
transaction::{NSSATransaction, clock_invocation},
};
use logos_blockchain_core::mantle::ops::channel::ChannelId;
use mempool::MemPoolHandle;
use testnet_initial_state::{initial_accounts, initial_pub_accounts_private_keys};
@ -524,7 +522,7 @@ mod tests {
let tx = tx.transaction_stateless_check().unwrap();
// Signature is not from sender. Execution fails
let result = sequencer.execute_check_transaction_on_state(tx, 0, 0);
let result = tx.execute_check_on_state(&mut sequencer.state, 0, 0);
assert!(matches!(
result,
@ -550,7 +548,9 @@ mod tests {
// Passed pre-check
assert!(result.is_ok());
let result = sequencer.execute_check_transaction_on_state(result.unwrap(), 0, 0);
let result = result
.unwrap()
.execute_check_on_state(&mut sequencer.state, 0, 0);
let is_failed_at_balance_mismatch = matches!(
result.err().unwrap(),
nssa::error::NssaError::ProgramExecutionFailed(_)
@ -572,8 +572,7 @@ mod tests {
acc1, 0, acc2, 100, &sign_key1,
);
sequencer
.execute_check_transaction_on_state(tx, 0, 0)
tx.execute_check_on_state(&mut sequencer.state, 0, 0)
.unwrap();
let bal_from = sequencer.state.get_account_by_id(acc1).balance;
@ -652,8 +651,14 @@ mod tests {
.unwrap()
.unwrap();
// Only one should be included in the block
assert_eq!(block.body.transactions, vec![tx.clone()]);
// Only one user tx should be included; the clock tx is always appended last.
assert_eq!(
block.body.transactions,
vec![
tx.clone(),
NSSATransaction::Public(clock_invocation(block.header.timestamp))
]
);
}
#[tokio::test]
@ -679,7 +684,13 @@ mod tests {
.get_block_at_id(sequencer.chain_height)
.unwrap()
.unwrap();
assert_eq!(block.body.transactions, vec![tx.clone()]);
assert_eq!(
block.body.transactions,
vec![
tx.clone(),
NSSATransaction::Public(clock_invocation(block.header.timestamp))
]
);
// Add same transaction should fail
mempool_handle.push(tx.clone()).await.unwrap();
@ -691,7 +702,13 @@ mod tests {
.get_block_at_id(sequencer.chain_height)
.unwrap()
.unwrap();
assert!(block.body.transactions.is_empty());
// The replay is rejected, so only the clock tx is in the block.
assert_eq!(
block.body.transactions,
vec![NSSATransaction::Public(clock_invocation(
block.header.timestamp
))]
);
}
#[tokio::test]
@ -726,7 +743,13 @@ mod tests {
.get_block_at_id(sequencer.chain_height)
.unwrap()
.unwrap();
assert_eq!(block.body.transactions, vec![tx.clone()]);
assert_eq!(
block.body.transactions,
vec![
tx.clone(),
NSSATransaction::Public(clock_invocation(block.header.timestamp))
]
);
}
// Instantiating a new sequencer from the same config. This should load the existing block
@ -856,8 +879,54 @@ mod tests {
);
assert_eq!(
new_block.body.transactions,
vec![tx],
"New block should contain the submitted transaction"
vec![
tx,
NSSATransaction::Public(clock_invocation(new_block.header.timestamp))
],
"New block should contain the submitted transaction and the clock invocation"
);
}
#[tokio::test]
async fn transactions_touching_clock_account_are_dropped_from_block() {
let (mut sequencer, mempool_handle) = common_setup().await;
// Canonical clock invocation and a crafted variant with a different timestamp — both must
// be dropped because their diffs touch the clock accounts.
let crafted_clock_tx = {
let message = nssa::public_transaction::Message::try_new(
nssa::program::Program::clock().id(),
nssa::CLOCK_PROGRAM_ACCOUNT_IDS.to_vec(),
vec![],
42_u64,
)
.unwrap();
NSSATransaction::Public(nssa::PublicTransaction::new(
message,
nssa::public_transaction::WitnessSet::from_raw_parts(vec![]),
))
};
mempool_handle
.push(NSSATransaction::Public(clock_invocation(0)))
.await
.unwrap();
mempool_handle.push(crafted_clock_tx).await.unwrap();
sequencer
.produce_new_block_with_mempool_transactions()
.unwrap();
let block = sequencer
.store
.get_block_at_id(sequencer.chain_height)
.unwrap()
.unwrap();
// Both transactions were dropped. Only the system-appended clock tx remains.
assert_eq!(
block.body.transactions,
vec![NSSATransaction::Public(clock_invocation(
block.header.timestamp
))]
);
}
@ -909,4 +978,86 @@ mod tests {
"Chain height should NOT match the modified config.genesis_id"
);
}
#[tokio::test]
async fn user_tx_that_chain_calls_clock_is_dropped() {
let (mut sequencer, mempool_handle) = common_setup().await;
// Deploy the clock_chain_caller test program.
let deploy_tx =
NSSATransaction::ProgramDeployment(nssa::ProgramDeploymentTransaction::new(
nssa::program_deployment_transaction::Message::new(
test_program_methods::CLOCK_CHAIN_CALLER_ELF.to_vec(),
),
));
mempool_handle.push(deploy_tx).await.unwrap();
sequencer
.produce_new_block_with_mempool_transactions()
.unwrap();
// Build a user transaction that invokes clock_chain_caller, which in turn chain-calls the
// clock program with the clock accounts. The sequencer should detect that the resulting
// state diff modifies clock accounts and drop the transaction.
let clock_chain_caller_id =
nssa::program::Program::new(test_program_methods::CLOCK_CHAIN_CALLER_ELF.to_vec())
.unwrap()
.id();
let clock_program_id = nssa::program::Program::clock().id();
let timestamp: u64 = 0;
let message = nssa::public_transaction::Message::try_new(
clock_chain_caller_id,
nssa::CLOCK_PROGRAM_ACCOUNT_IDS.to_vec(),
vec![], // no signers
(clock_program_id, timestamp),
)
.unwrap();
let user_tx = NSSATransaction::Public(nssa::PublicTransaction::new(
message,
nssa::public_transaction::WitnessSet::from_raw_parts(vec![]),
));
mempool_handle.push(user_tx).await.unwrap();
sequencer
.produce_new_block_with_mempool_transactions()
.unwrap();
let block = sequencer
.store
.get_block_at_id(sequencer.chain_height)
.unwrap()
.unwrap();
// The user tx must have been dropped; only the mandatory clock invocation remains.
assert_eq!(
block.body.transactions,
vec![NSSATransaction::Public(clock_invocation(
block.header.timestamp
))]
);
}
#[tokio::test]
async fn block_production_aborts_when_clock_account_data_is_corrupted() {
let (mut sequencer, mempool_handle) = common_setup().await;
// Corrupt the clock 01 account data so the clock program panics on deserialization.
let clock_account_id = nssa::CLOCK_01_PROGRAM_ACCOUNT_ID;
let mut corrupted = sequencer.state.get_account_by_id(clock_account_id);
corrupted.data = vec![0xff; 3].try_into().unwrap();
sequencer
.state
.force_insert_account(clock_account_id, corrupted);
// Push a dummy transaction so the mempool is non-empty.
let tx = common::test_utils::produce_dummy_empty_transaction();
mempool_handle.push(tx).await.unwrap();
// Block production must fail because the appended clock tx cannot execute.
let result = sequencer.produce_new_block_with_mempool_transactions();
assert!(
result.is_err(),
"Block production should abort when clock account data is corrupted"
);
}
}

96
storage/src/cells/mod.rs Normal file
View File

@ -0,0 +1,96 @@
use std::sync::Arc;
use borsh::{BorshDeserialize, BorshSerialize};
use rocksdb::{BoundColumnFamily, DBWithThreadMode, MultiThreaded, WriteBatch};
use crate::{DbResult, error::DbError};
pub mod shared_cells;
pub trait SimpleStorableCell {
const CF_NAME: &'static str;
const CELL_NAME: &'static str;
type KeyParams;
fn key_constructor(_params: Self::KeyParams) -> DbResult<Vec<u8>> {
borsh::to_vec(&Self::CELL_NAME).map_err(|err| {
DbError::borsh_cast_message(
err,
Some(format!("Failed to serialize {:?}", Self::CELL_NAME)),
)
})
}
fn column_ref(db: &DBWithThreadMode<MultiThreaded>) -> Arc<BoundColumnFamily<'_>> {
db.cf_handle(Self::CF_NAME)
.unwrap_or_else(|| panic!("Column family {:?} must be present", Self::CF_NAME))
}
}
pub trait SimpleReadableCell: SimpleStorableCell + BorshDeserialize {
fn get(db: &DBWithThreadMode<MultiThreaded>, params: Self::KeyParams) -> DbResult<Self> {
let res = Self::get_opt(db, params)?;
res.ok_or_else(|| DbError::db_interaction_error(format!("{:?} not found", Self::CELL_NAME)))
}
fn get_opt(
db: &DBWithThreadMode<MultiThreaded>,
params: Self::KeyParams,
) -> DbResult<Option<Self>> {
let cf_ref = Self::column_ref(db);
let res = db
.get_cf(&cf_ref, Self::key_constructor(params)?)
.map_err(|rerr| {
DbError::rocksdb_cast_message(
rerr,
Some(format!("Failed to read {:?}", Self::CELL_NAME)),
)
})?;
res.map(|data| {
borsh::from_slice::<Self>(&data).map_err(|err| {
DbError::borsh_cast_message(
err,
Some(format!("Failed to deserialize {:?}", Self::CELL_NAME)),
)
})
})
.transpose()
}
}
pub trait SimpleWritableCell: SimpleStorableCell + BorshSerialize {
fn value_constructor(&self) -> DbResult<Vec<u8>>;
fn put(&self, db: &DBWithThreadMode<MultiThreaded>, params: Self::KeyParams) -> DbResult<()> {
let cf_ref = Self::column_ref(db);
db.put_cf(
&cf_ref,
Self::key_constructor(params)?,
self.value_constructor()?,
)
.map_err(|rerr| {
DbError::rocksdb_cast_message(
rerr,
Some(format!("Failed to write {:?}", Self::CELL_NAME)),
)
})?;
Ok(())
}
fn put_batch(
&self,
db: &DBWithThreadMode<MultiThreaded>,
params: Self::KeyParams,
write_batch: &mut WriteBatch,
) -> DbResult<()> {
let cf_ref = Self::column_ref(db);
write_batch.put_cf(
&cf_ref,
Self::key_constructor(params)?,
self.value_constructor()?,
);
Ok(())
}
}

View File

@ -0,0 +1,89 @@
use borsh::{BorshDeserialize, BorshSerialize};
use common::block::Block;
use crate::{
BLOCK_CELL_NAME, CF_BLOCK_NAME, CF_META_NAME, DB_META_FIRST_BLOCK_IN_DB_KEY,
DB_META_FIRST_BLOCK_SET_KEY, DB_META_LAST_BLOCK_IN_DB_KEY, DbResult,
cells::{SimpleReadableCell, SimpleStorableCell, SimpleWritableCell},
error::DbError,
};
#[derive(Debug, BorshSerialize, BorshDeserialize)]
pub struct LastBlockCell(pub u64);
impl SimpleStorableCell for LastBlockCell {
type KeyParams = ();
const CELL_NAME: &'static str = DB_META_LAST_BLOCK_IN_DB_KEY;
const CF_NAME: &'static str = CF_META_NAME;
}
impl SimpleReadableCell for LastBlockCell {}
impl SimpleWritableCell for LastBlockCell {
fn value_constructor(&self) -> DbResult<Vec<u8>> {
borsh::to_vec(&self).map_err(|err| {
DbError::borsh_cast_message(err, Some("Failed to serialize last block id".to_owned()))
})
}
}
#[derive(Debug, BorshSerialize, BorshDeserialize)]
pub struct FirstBlockSetCell(pub bool);
impl SimpleStorableCell for FirstBlockSetCell {
type KeyParams = ();
const CELL_NAME: &'static str = DB_META_FIRST_BLOCK_SET_KEY;
const CF_NAME: &'static str = CF_META_NAME;
}
impl SimpleReadableCell for FirstBlockSetCell {}
impl SimpleWritableCell for FirstBlockSetCell {
fn value_constructor(&self) -> DbResult<Vec<u8>> {
borsh::to_vec(&self).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize first block set flag".to_owned()),
)
})
}
}
#[derive(Debug, BorshSerialize, BorshDeserialize)]
pub struct FirstBlockCell(pub u64);
impl SimpleStorableCell for FirstBlockCell {
type KeyParams = ();
const CELL_NAME: &'static str = DB_META_FIRST_BLOCK_IN_DB_KEY;
const CF_NAME: &'static str = CF_META_NAME;
}
impl SimpleReadableCell for FirstBlockCell {}
#[derive(Debug, BorshSerialize, BorshDeserialize)]
pub struct BlockCell(pub Block);
impl SimpleStorableCell for BlockCell {
type KeyParams = u64;
const CELL_NAME: &'static str = BLOCK_CELL_NAME;
const CF_NAME: &'static str = CF_BLOCK_NAME;
fn key_constructor(params: Self::KeyParams) -> DbResult<Vec<u8>> {
// ToDo: Replace with increasing ordering serialization
borsh::to_vec(&params).map_err(|err| {
DbError::borsh_cast_message(
err,
Some(format!(
"Failed to serialize {:?} key params",
Self::CELL_NAME
)),
)
})
}
}
impl SimpleReadableCell for BlockCell {}

View File

@ -0,0 +1,230 @@
use borsh::{BorshDeserialize, BorshSerialize};
use nssa::V03State;
use crate::{
CF_META_NAME, DbResult,
cells::{SimpleReadableCell, SimpleStorableCell, SimpleWritableCell},
error::DbError,
indexer::{
ACC_NUM_CELL_NAME, BLOCK_HASH_CELL_NAME, BREAKPOINT_CELL_NAME, CF_ACC_META,
CF_BREAKPOINT_NAME, CF_HASH_TO_ID, CF_TX_TO_ID, DB_META_LAST_BREAKPOINT_ID,
DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY, TX_HASH_CELL_NAME,
},
};
#[derive(Debug, BorshSerialize, BorshDeserialize)]
pub struct LastObservedL1LibHeaderCell(pub [u8; 32]);
impl SimpleStorableCell for LastObservedL1LibHeaderCell {
type KeyParams = ();
const CELL_NAME: &'static str = DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY;
const CF_NAME: &'static str = CF_META_NAME;
}
impl SimpleReadableCell for LastObservedL1LibHeaderCell {}
impl SimpleWritableCell for LastObservedL1LibHeaderCell {
fn value_constructor(&self) -> DbResult<Vec<u8>> {
borsh::to_vec(&self).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize last observed l1 header".to_owned()),
)
})
}
}
#[derive(Debug, BorshSerialize, BorshDeserialize)]
pub struct LastBreakpointIdCell(pub u64);
impl SimpleStorableCell for LastBreakpointIdCell {
type KeyParams = ();
const CELL_NAME: &'static str = DB_META_LAST_BREAKPOINT_ID;
const CF_NAME: &'static str = CF_META_NAME;
}
impl SimpleReadableCell for LastBreakpointIdCell {}
impl SimpleWritableCell for LastBreakpointIdCell {
fn value_constructor(&self) -> DbResult<Vec<u8>> {
borsh::to_vec(&self).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize last breakpoint id".to_owned()),
)
})
}
}
#[derive(BorshDeserialize)]
pub struct BreakpointCellOwned(pub V03State);
impl SimpleStorableCell for BreakpointCellOwned {
type KeyParams = u64;
const CELL_NAME: &'static str = BREAKPOINT_CELL_NAME;
const CF_NAME: &'static str = CF_BREAKPOINT_NAME;
fn key_constructor(params: Self::KeyParams) -> DbResult<Vec<u8>> {
borsh::to_vec(&params).map_err(|err| {
DbError::borsh_cast_message(
err,
Some(format!(
"Failed to serialize {:?} key params",
Self::CELL_NAME
)),
)
})
}
}
impl SimpleReadableCell for BreakpointCellOwned {}
#[derive(BorshSerialize)]
pub struct BreakpointCellRef<'state>(pub &'state V03State);
impl SimpleStorableCell for BreakpointCellRef<'_> {
type KeyParams = u64;
const CELL_NAME: &'static str = BREAKPOINT_CELL_NAME;
const CF_NAME: &'static str = CF_BREAKPOINT_NAME;
fn key_constructor(params: Self::KeyParams) -> DbResult<Vec<u8>> {
borsh::to_vec(&params).map_err(|err| {
DbError::borsh_cast_message(
err,
Some(format!(
"Failed to serialize {:?} key params",
Self::CELL_NAME
)),
)
})
}
}
impl SimpleWritableCell for BreakpointCellRef<'_> {
fn value_constructor(&self) -> DbResult<Vec<u8>> {
borsh::to_vec(&self).map_err(|err| {
DbError::borsh_cast_message(err, Some("Failed to serialize breakpoint".to_owned()))
})
}
}
#[derive(Debug, BorshSerialize, BorshDeserialize)]
pub struct BlockHashToBlockIdMapCell(pub u64);
impl SimpleStorableCell for BlockHashToBlockIdMapCell {
type KeyParams = [u8; 32];
const CELL_NAME: &'static str = BLOCK_HASH_CELL_NAME;
const CF_NAME: &'static str = CF_HASH_TO_ID;
fn key_constructor(params: Self::KeyParams) -> DbResult<Vec<u8>> {
borsh::to_vec(&params).map_err(|err| {
DbError::borsh_cast_message(
err,
Some(format!(
"Failed to serialize {:?} key params",
Self::CELL_NAME
)),
)
})
}
}
impl SimpleReadableCell for BlockHashToBlockIdMapCell {}
impl SimpleWritableCell for BlockHashToBlockIdMapCell {
fn value_constructor(&self) -> DbResult<Vec<u8>> {
borsh::to_vec(&self).map_err(|err| {
DbError::borsh_cast_message(err, Some("Failed to serialize block id".to_owned()))
})
}
}
#[derive(Debug, BorshSerialize, BorshDeserialize)]
pub struct TxHashToBlockIdMapCell(pub u64);
impl SimpleStorableCell for TxHashToBlockIdMapCell {
type KeyParams = [u8; 32];
const CELL_NAME: &'static str = TX_HASH_CELL_NAME;
const CF_NAME: &'static str = CF_TX_TO_ID;
fn key_constructor(params: Self::KeyParams) -> DbResult<Vec<u8>> {
borsh::to_vec(&params).map_err(|err| {
DbError::borsh_cast_message(
err,
Some(format!(
"Failed to serialize {:?} key params",
Self::CELL_NAME
)),
)
})
}
}
impl SimpleReadableCell for TxHashToBlockIdMapCell {}
impl SimpleWritableCell for TxHashToBlockIdMapCell {
fn value_constructor(&self) -> DbResult<Vec<u8>> {
borsh::to_vec(&self).map_err(|err| {
DbError::borsh_cast_message(err, Some("Failed to serialize block id".to_owned()))
})
}
}
#[derive(Debug, BorshSerialize, BorshDeserialize)]
pub struct AccNumTxCell(pub u64);
impl SimpleStorableCell for AccNumTxCell {
type KeyParams = [u8; 32];
const CELL_NAME: &'static str = ACC_NUM_CELL_NAME;
const CF_NAME: &'static str = CF_ACC_META;
fn key_constructor(params: Self::KeyParams) -> DbResult<Vec<u8>> {
borsh::to_vec(&params).map_err(|err| {
DbError::borsh_cast_message(
err,
Some(format!(
"Failed to serialize {:?} key params",
Self::CELL_NAME
)),
)
})
}
}
impl SimpleReadableCell for AccNumTxCell {}
impl SimpleWritableCell for AccNumTxCell {
fn value_constructor(&self) -> DbResult<Vec<u8>> {
borsh::to_vec(&self).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize number of transactions".to_owned()),
)
})
}
}
#[cfg(test)]
mod uniform_tests {
use crate::{
cells::SimpleStorableCell as _,
indexer::indexer_cells::{BreakpointCellOwned, BreakpointCellRef},
};
#[test]
fn breakpoint_ref_and_owned_is_aligned() {
assert_eq!(BreakpointCellRef::CELL_NAME, BreakpointCellOwned::CELL_NAME);
assert_eq!(BreakpointCellRef::CF_NAME, BreakpointCellOwned::CF_NAME);
assert_eq!(
BreakpointCellRef::key_constructor(1000).unwrap(),
BreakpointCellOwned::key_constructor(1000).unwrap()
);
}
}

View File

@ -6,44 +6,29 @@ use rocksdb::{
BoundColumnFamily, ColumnFamilyDescriptor, DBWithThreadMode, MultiThreaded, Options,
};
use crate::error::DbError;
use crate::{BREAKPOINT_INTERVAL, CF_BLOCK_NAME, CF_META_NAME, DBIO, DbResult, error::DbError};
pub mod indexer_cells;
pub mod read_multiple;
pub mod read_once;
pub mod write_atomic;
pub mod write_non_atomic;
/// Maximal size of stored blocks in base.
///
/// Used to control db size.
///
/// Currently effectively unbounded.
pub const BUFF_SIZE_ROCKSDB: usize = usize::MAX;
/// Size of stored blocks cache in memory.
///
/// Keeping small to not run out of memory.
pub const CACHE_SIZE: usize = 1000;
/// Key base for storing metainformation about id of first block in db.
pub const DB_META_FIRST_BLOCK_IN_DB_KEY: &str = "first_block_in_db";
/// Key base for storing metainformation about id of last current block in db.
pub const DB_META_LAST_BLOCK_IN_DB_KEY: &str = "last_block_in_db";
/// Key base for storing metainformation about id of last observed L1 lib header in db.
pub const DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY: &str =
"last_observed_l1_lib_header_in_db";
/// Key base for storing metainformation which describe if first block has been set.
pub const DB_META_FIRST_BLOCK_SET_KEY: &str = "first_block_set";
/// Key base for storing metainformation about the last breakpoint.
pub const DB_META_LAST_BREAKPOINT_ID: &str = "last_breakpoint_id";
/// Interval between state breakpoints.
pub const BREAKPOINT_INTERVAL: u8 = 100;
/// Cell name for a breakpoint.
pub const BREAKPOINT_CELL_NAME: &str = "breakpoint";
/// Cell name for a block hash to block id map.
pub const BLOCK_HASH_CELL_NAME: &str = "block hash";
/// Cell name for a tx hash to block id map.
pub const TX_HASH_CELL_NAME: &str = "tx hash";
/// Cell name for a account number of transactions.
pub const ACC_NUM_CELL_NAME: &str = "acc id";
/// Name of block column family.
pub const CF_BLOCK_NAME: &str = "cf_block";
/// Name of meta column family.
pub const CF_META_NAME: &str = "cf_meta";
/// Name of breakpoint column family.
pub const CF_BREAKPOINT_NAME: &str = "cf_breakpoint";
/// Name of hash to id map column family.
@ -55,12 +40,16 @@ pub const CF_ACC_META: &str = "cf_acc_meta";
/// Name of account id to tx hash map column family.
pub const CF_ACC_TO_TX: &str = "cf_acc_to_tx";
pub type DbResult<T> = Result<T, DbError>;
pub struct RocksDBIO {
pub db: DBWithThreadMode<MultiThreaded>,
}
impl DBIO for RocksDBIO {
fn db(&self) -> &DBWithThreadMode<MultiThreaded> {
&self.db
}
}
impl RocksDBIO {
pub fn open_or_create(
path: &Path,
@ -257,7 +246,7 @@ mod tests {
let dbio = RocksDBIO::open_or_create(
temdir_path,
&genesis_block(),
&nssa::V03State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[]),
&nssa::V03State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[], 0),
)
.unwrap();
@ -294,7 +283,7 @@ mod tests {
let dbio = RocksDBIO::open_or_create(
temdir_path,
&genesis_block(),
&nssa::V03State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[]),
&nssa::V03State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[], 0),
)
.unwrap();
@ -347,7 +336,7 @@ mod tests {
let dbio = RocksDBIO::open_or_create(
temdir_path,
&genesis_block(),
&nssa::V03State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[]),
&nssa::V03State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[], 0),
)
.unwrap();
@ -420,7 +409,7 @@ mod tests {
let dbio = RocksDBIO::open_or_create(
temdir_path,
&genesis_block(),
&nssa::V03State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[]),
&nssa::V03State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[], 0),
)
.unwrap();
@ -503,7 +492,7 @@ mod tests {
let dbio = RocksDBIO::open_or_create(
temdir_path,
&genesis_block(),
&nssa::V03State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[]),
&nssa::V03State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[], 0),
)
.unwrap();
@ -599,7 +588,7 @@ mod tests {
let dbio = RocksDBIO::open_or_create(
temdir_path,
&genesis_block(),
&nssa::V03State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[]),
&nssa::V03State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[], 0),
)
.unwrap();

View File

@ -1,7 +1,11 @@
use super::{
Block, DB_META_FIRST_BLOCK_IN_DB_KEY, DB_META_FIRST_BLOCK_SET_KEY,
DB_META_LAST_BLOCK_IN_DB_KEY, DB_META_LAST_BREAKPOINT_ID,
DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY, DbError, DbResult, RocksDBIO, V03State,
use super::{Block, DbResult, RocksDBIO, V03State};
use crate::{
DBIO as _,
cells::shared_cells::{BlockCell, FirstBlockCell, FirstBlockSetCell, LastBlockCell},
indexer::indexer_cells::{
AccNumTxCell, BlockHashToBlockIdMapCell, BreakpointCellOwned, LastBreakpointIdCell,
LastObservedL1LibHeaderCell, TxHashToBlockIdMapCell,
},
};
#[expect(clippy::multiple_inherent_impl, reason = "Readability")]
@ -9,264 +13,55 @@ impl RocksDBIO {
// Meta
pub fn get_meta_first_block_in_db(&self) -> DbResult<u64> {
let cf_meta = self.meta_column();
let res = self
.db
.get_cf(
&cf_meta,
borsh::to_vec(&DB_META_FIRST_BLOCK_IN_DB_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_FIRST_BLOCK_IN_DB_KEY".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(borsh::from_slice::<u64>(&data).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to deserialize first block".to_owned()),
)
})?)
} else {
Err(DbError::db_interaction_error(
"First block not found".to_owned(),
))
}
self.get::<FirstBlockCell>(()).map(|cell| cell.0)
}
pub fn get_meta_last_block_in_db(&self) -> DbResult<u64> {
let cf_meta = self.meta_column();
let res = self
.db
.get_cf(
&cf_meta,
borsh::to_vec(&DB_META_LAST_BLOCK_IN_DB_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_LAST_BLOCK_IN_DB_KEY".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(borsh::from_slice::<u64>(&data).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to deserialize last block".to_owned()),
)
})?)
} else {
Err(DbError::db_interaction_error(
"Last block not found".to_owned(),
))
}
self.get::<LastBlockCell>(()).map(|cell| cell.0)
}
pub fn get_meta_last_observed_l1_lib_header_in_db(&self) -> DbResult<Option<[u8; 32]>> {
let cf_meta = self.meta_column();
let res = self
.db
.get_cf(
&cf_meta,
borsh::to_vec(&DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY).map_err(
|err| {
DbError::borsh_cast_message(
err,
Some(
"Failed to serialize DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY"
.to_owned(),
),
)
},
)?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
res.map(|data| {
borsh::from_slice::<[u8; 32]>(&data).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to deserialize last l1 lib header".to_owned()),
)
})
})
.transpose()
self.get_opt::<LastObservedL1LibHeaderCell>(())
.map(|opt| opt.map(|val| val.0))
}
pub fn get_meta_is_first_block_set(&self) -> DbResult<bool> {
let cf_meta = self.meta_column();
let res = self
.db
.get_cf(
&cf_meta,
borsh::to_vec(&DB_META_FIRST_BLOCK_SET_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_FIRST_BLOCK_SET_KEY".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
Ok(res.is_some())
Ok(self.get_opt::<FirstBlockSetCell>(())?.is_some())
}
pub fn get_meta_last_breakpoint_id(&self) -> DbResult<u64> {
let cf_meta = self.meta_column();
let res = self
.db
.get_cf(
&cf_meta,
borsh::to_vec(&DB_META_LAST_BREAKPOINT_ID).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_LAST_BREAKPOINT_ID".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(borsh::from_slice::<u64>(&data).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to deserialize last breakpoint id".to_owned()),
)
})?)
} else {
Err(DbError::db_interaction_error(
"Last breakpoint id not found".to_owned(),
))
}
self.get::<LastBreakpointIdCell>(()).map(|cell| cell.0)
}
// Block
pub fn get_block(&self, block_id: u64) -> DbResult<Option<Block>> {
let cf_block = self.block_column();
let res = self
.db
.get_cf(
&cf_block,
borsh::to_vec(&block_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize block id".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(Some(borsh::from_slice::<Block>(&data).map_err(|serr| {
DbError::borsh_cast_message(
serr,
Some("Failed to deserialize block data".to_owned()),
)
})?))
} else {
Ok(None)
}
self.get_opt::<BlockCell>(block_id)
.map(|opt| opt.map(|val| val.0))
}
// State
pub fn get_breakpoint(&self, br_id: u64) -> DbResult<V03State> {
let cf_br = self.breakpoint_column();
let res = self
.db
.get_cf(
&cf_br,
borsh::to_vec(&br_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize breakpoint id".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(borsh::from_slice::<V03State>(&data).map_err(|serr| {
DbError::borsh_cast_message(
serr,
Some("Failed to deserialize breakpoint data".to_owned()),
)
})?)
} else {
Err(DbError::db_interaction_error(
"Breakpoint on this id not found".to_owned(),
))
}
self.get::<BreakpointCellOwned>(br_id).map(|cell| cell.0)
}
// Mappings
pub fn get_block_id_by_hash(&self, hash: [u8; 32]) -> DbResult<Option<u64>> {
let cf_hti = self.hash_to_id_column();
let res = self
.db
.get_cf(
&cf_hti,
borsh::to_vec(&hash).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize block hash".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(Some(borsh::from_slice::<u64>(&data).map_err(|serr| {
DbError::borsh_cast_message(serr, Some("Failed to deserialize block id".to_owned()))
})?))
} else {
Ok(None)
}
self.get_opt::<BlockHashToBlockIdMapCell>(hash)
.map(|opt| opt.map(|cell| cell.0))
}
pub fn get_block_id_by_tx_hash(&self, tx_hash: [u8; 32]) -> DbResult<Option<u64>> {
let cf_tti = self.tx_hash_to_id_column();
let res = self
.db
.get_cf(
&cf_tti,
borsh::to_vec(&tx_hash).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize transaction hash".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(Some(borsh::from_slice::<u64>(&data).map_err(|serr| {
DbError::borsh_cast_message(serr, Some("Failed to deserialize block id".to_owned()))
})?))
} else {
Ok(None)
}
self.get_opt::<TxHashToBlockIdMapCell>(tx_hash)
.map(|opt| opt.map(|cell| cell.0))
}
// Accounts meta
pub(crate) fn get_acc_meta_num_tx(&self, acc_id: [u8; 32]) -> DbResult<Option<u64>> {
let cf_ameta = self.account_meta_column();
let res = self.db.get_cf(&cf_ameta, acc_id).map_err(|rerr| {
DbError::rocksdb_cast_message(rerr, Some("Failed to read from acc meta cf".to_owned()))
})?;
res.map(|data| {
borsh::from_slice::<u64>(&data).map_err(|serr| {
DbError::borsh_cast_message(serr, Some("Failed to deserialize num tx".to_owned()))
})
})
.transpose()
self.get_opt::<AccNumTxCell>(acc_id)
.map(|opt| opt.map(|cell| cell.0))
}
}

View File

@ -2,10 +2,14 @@ use std::collections::HashMap;
use rocksdb::WriteBatch;
use super::{
Arc, BREAKPOINT_INTERVAL, Block, BoundColumnFamily, DB_META_FIRST_BLOCK_IN_DB_KEY,
DB_META_FIRST_BLOCK_SET_KEY, DB_META_LAST_BLOCK_IN_DB_KEY, DB_META_LAST_BREAKPOINT_ID,
DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY, DbError, DbResult, RocksDBIO,
use super::{BREAKPOINT_INTERVAL, Block, DbError, DbResult, RocksDBIO};
use crate::{
DB_META_FIRST_BLOCK_IN_DB_KEY, DBIO as _,
cells::shared_cells::{FirstBlockSetCell, LastBlockCell},
indexer::indexer_cells::{
AccNumTxCell, BlockHashToBlockIdMapCell, LastBreakpointIdCell, LastObservedL1LibHeaderCell,
TxHashToBlockIdMapCell,
},
};
#[expect(clippy::multiple_inherent_impl, reason = "Readability")]
@ -18,22 +22,27 @@ impl RocksDBIO {
num_tx: u64,
write_batch: &mut WriteBatch,
) -> DbResult<()> {
let cf_ameta = self.account_meta_column();
self.put_batch(&AccNumTxCell(num_tx), acc_id, write_batch)
}
write_batch.put_cf(
&cf_ameta,
borsh::to_vec(&acc_id).map_err(|err| {
DbError::borsh_cast_message(err, Some("Failed to serialize account id".to_owned()))
})?,
borsh::to_vec(&num_tx).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize acc metadata".to_owned()),
)
})?,
);
// Mappings
Ok(())
pub fn put_block_id_by_hash_batch(
&self,
hash: [u8; 32],
block_id: u64,
write_batch: &mut WriteBatch,
) -> DbResult<()> {
self.put_batch(&BlockHashToBlockIdMapCell(block_id), hash, write_batch)
}
pub fn put_block_id_by_tx_hash_batch(
&self,
tx_hash: [u8; 32],
block_id: u64,
write_batch: &mut WriteBatch,
) -> DbResult<()> {
self.put_batch(&TxHashToBlockIdMapCell(block_id), tx_hash, write_batch)
}
// Account
@ -163,23 +172,7 @@ impl RocksDBIO {
block_id: u64,
write_batch: &mut WriteBatch,
) -> DbResult<()> {
let cf_meta = self.meta_column();
write_batch.put_cf(
&cf_meta,
borsh::to_vec(&DB_META_LAST_BLOCK_IN_DB_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_LAST_BLOCK_IN_DB_KEY".to_owned()),
)
})?,
borsh::to_vec(&block_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize last block id".to_owned()),
)
})?,
);
Ok(())
self.put_batch(&LastBlockCell(block_id), (), write_batch)
}
pub fn put_meta_last_observed_l1_lib_header_in_db_batch(
@ -187,26 +180,7 @@ impl RocksDBIO {
l1_lib_header: [u8; 32],
write_batch: &mut WriteBatch,
) -> DbResult<()> {
let cf_meta = self.meta_column();
write_batch.put_cf(
&cf_meta,
borsh::to_vec(&DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some(
"Failed to serialize DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY"
.to_owned(),
),
)
})?,
borsh::to_vec(&l1_lib_header).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize last l1 block header".to_owned()),
)
})?,
);
Ok(())
self.put_batch(&LastObservedL1LibHeaderCell(l1_lib_header), (), write_batch)
}
pub fn put_meta_last_breakpoint_id_batch(
@ -214,46 +188,17 @@ impl RocksDBIO {
br_id: u64,
write_batch: &mut WriteBatch,
) -> DbResult<()> {
let cf_meta = self.meta_column();
write_batch.put_cf(
&cf_meta,
borsh::to_vec(&DB_META_LAST_BREAKPOINT_ID).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_LAST_BREAKPOINT_ID".to_owned()),
)
})?,
borsh::to_vec(&br_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize last block id".to_owned()),
)
})?,
);
Ok(())
self.put_batch(&LastBreakpointIdCell(br_id), (), write_batch)
}
pub fn put_meta_is_first_block_set_batch(&self, write_batch: &mut WriteBatch) -> DbResult<()> {
let cf_meta = self.meta_column();
write_batch.put_cf(
&cf_meta,
borsh::to_vec(&DB_META_FIRST_BLOCK_SET_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_FIRST_BLOCK_SET_KEY".to_owned()),
)
})?,
[1_u8; 1],
);
Ok(())
self.put_batch(&FirstBlockSetCell(true), (), write_batch)
}
// Block
pub fn put_block(&self, block: &Block, l1_lib_header: [u8; 32]) -> DbResult<()> {
let cf_block = self.block_column();
let cf_hti = self.hash_to_id_column();
let cf_tti: Arc<BoundColumnFamily<'_>> = self.tx_hash_to_id_column();
let last_curr_block = self.get_meta_last_block_in_db()?;
let mut write_batch = WriteBatch::default();
@ -272,33 +217,22 @@ impl RocksDBIO {
self.put_meta_last_observed_l1_lib_header_in_db_batch(l1_lib_header, &mut write_batch)?;
}
write_batch.put_cf(
&cf_hti,
borsh::to_vec(&block.header.hash).map_err(|err| {
DbError::borsh_cast_message(err, Some("Failed to serialize block hash".to_owned()))
})?,
borsh::to_vec(&block.header.block_id).map_err(|err| {
DbError::borsh_cast_message(err, Some("Failed to serialize block id".to_owned()))
})?,
);
self.put_block_id_by_hash_batch(
block.header.hash.into(),
block.header.block_id,
&mut write_batch,
)?;
let mut acc_to_tx_map: HashMap<[u8; 32], Vec<[u8; 32]>> = HashMap::new();
for tx in &block.body.transactions {
let tx_hash = tx.hash();
write_batch.put_cf(
&cf_tti,
borsh::to_vec(&tx_hash).map_err(|err| {
DbError::borsh_cast_message(err, Some("Failed to serialize tx hash".to_owned()))
})?,
borsh::to_vec(&block.header.block_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize block id".to_owned()),
)
})?,
);
self.put_block_id_by_tx_hash_batch(
tx_hash.into(),
block.header.block_id,
&mut write_batch,
)?;
let acc_ids = tx
.affected_public_account_ids()

View File

@ -1,7 +1,10 @@
use super::{
BREAKPOINT_INTERVAL, DB_META_FIRST_BLOCK_SET_KEY, DB_META_LAST_BLOCK_IN_DB_KEY,
DB_META_LAST_BREAKPOINT_ID, DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY, DbError,
DbResult, RocksDBIO, V03State,
use super::{BREAKPOINT_INTERVAL, DbError, DbResult, RocksDBIO, V03State};
use crate::{
DBIO as _,
cells::shared_cells::{FirstBlockSetCell, LastBlockCell},
indexer::indexer_cells::{
BreakpointCellRef, LastBreakpointIdCell, LastObservedL1LibHeaderCell,
},
};
#[expect(clippy::multiple_inherent_impl, reason = "Readability")]
@ -9,118 +12,28 @@ impl RocksDBIO {
// Meta
pub fn put_meta_last_block_in_db(&self, block_id: u64) -> DbResult<()> {
let cf_meta = self.meta_column();
self.db
.put_cf(
&cf_meta,
borsh::to_vec(&DB_META_LAST_BLOCK_IN_DB_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_LAST_BLOCK_IN_DB_KEY".to_owned()),
)
})?,
borsh::to_vec(&block_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize last block id".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
Ok(())
self.put(&LastBlockCell(block_id), ())
}
pub fn put_meta_last_observed_l1_lib_header_in_db(
&self,
l1_lib_header: [u8; 32],
) -> DbResult<()> {
let cf_meta = self.meta_column();
self.db
.put_cf(
&cf_meta,
borsh::to_vec(&DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY).map_err(
|err| {
DbError::borsh_cast_message(
err,
Some(
"Failed to serialize DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY"
.to_owned(),
),
)
},
)?,
borsh::to_vec(&l1_lib_header).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize last l1 block header".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
Ok(())
self.put(&LastObservedL1LibHeaderCell(l1_lib_header), ())
}
pub fn put_meta_last_breakpoint_id(&self, br_id: u64) -> DbResult<()> {
let cf_meta = self.meta_column();
self.db
.put_cf(
&cf_meta,
borsh::to_vec(&DB_META_LAST_BREAKPOINT_ID).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_LAST_BREAKPOINT_ID".to_owned()),
)
})?,
borsh::to_vec(&br_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize last block id".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
Ok(())
self.put(&LastBreakpointIdCell(br_id), ())
}
pub fn put_meta_is_first_block_set(&self) -> DbResult<()> {
let cf_meta = self.meta_column();
self.db
.put_cf(
&cf_meta,
borsh::to_vec(&DB_META_FIRST_BLOCK_SET_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_FIRST_BLOCK_SET_KEY".to_owned()),
)
})?,
[1_u8; 1],
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
Ok(())
self.put(&FirstBlockSetCell(true), ())
}
// State
pub fn put_breakpoint(&self, br_id: u64, breakpoint: &V03State) -> DbResult<()> {
let cf_br = self.breakpoint_column();
self.db
.put_cf(
&cf_br,
borsh::to_vec(&br_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize breakpoint id".to_owned()),
)
})?,
borsh::to_vec(breakpoint).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize breakpoint data".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))
self.put(&BreakpointCellRef(breakpoint), br_id)
}
pub fn put_next_breakpoint(&self) -> DbResult<()> {

View File

@ -1,3 +1,69 @@
use rocksdb::{DBWithThreadMode, MultiThreaded, WriteBatch};
use crate::{
cells::{SimpleReadableCell, SimpleWritableCell},
error::DbError,
};
pub mod cells;
pub mod error;
pub mod indexer;
pub mod sequencer;
/// Maximal size of stored blocks in base.
///
/// Used to control db size.
///
/// Currently effectively unbounded.
pub const BUFF_SIZE_ROCKSDB: usize = usize::MAX;
/// Size of stored blocks cache in memory.
///
/// Keeping small to not run out of memory.
pub const CACHE_SIZE: usize = 1000;
/// Key base for storing metainformation which describe if first block has been set.
pub const DB_META_FIRST_BLOCK_SET_KEY: &str = "first_block_set";
/// Key base for storing metainformation about id of first block in db.
pub const DB_META_FIRST_BLOCK_IN_DB_KEY: &str = "first_block_in_db";
/// Key base for storing metainformation about id of last current block in db.
pub const DB_META_LAST_BLOCK_IN_DB_KEY: &str = "last_block_in_db";
/// Cell name for a block.
pub const BLOCK_CELL_NAME: &str = "block";
/// Interval between state breakpoints.
pub const BREAKPOINT_INTERVAL: u8 = 100;
/// Name of block column family.
pub const CF_BLOCK_NAME: &str = "cf_block";
/// Name of meta column family.
pub const CF_META_NAME: &str = "cf_meta";
pub type DbResult<T> = Result<T, DbError>;
/// Minimal requirements for DB IO.
pub trait DBIO {
fn db(&self) -> &DBWithThreadMode<MultiThreaded>;
fn get<T: SimpleReadableCell>(&self, params: T::KeyParams) -> DbResult<T> {
T::get(self.db(), params)
}
fn get_opt<T: SimpleReadableCell>(&self, params: T::KeyParams) -> DbResult<Option<T>> {
T::get_opt(self.db(), params)
}
fn put<T: SimpleWritableCell>(&self, cell: &T, params: T::KeyParams) -> DbResult<()> {
cell.put(self.db(), params)
}
fn put_batch<T: SimpleWritableCell>(
&self,
cell: &T,
params: T::KeyParams,
write_batch: &mut WriteBatch,
) -> DbResult<()> {
cell.put_batch(self.db(), params, write_batch)
}
}

View File

@ -1,596 +0,0 @@
use std::{path::Path, sync::Arc};
use common::block::{BedrockStatus, Block, BlockMeta, MantleMsgId};
use nssa::V03State;
use rocksdb::{
BoundColumnFamily, ColumnFamilyDescriptor, DBWithThreadMode, MultiThreaded, Options, WriteBatch,
};
use crate::error::DbError;
/// Maximal size of stored blocks in base.
///
/// Used to control db size.
///
/// Currently effectively unbounded.
pub const BUFF_SIZE_ROCKSDB: usize = usize::MAX;
/// Size of stored blocks cache in memory.
///
/// Keeping small to not run out of memory.
pub const CACHE_SIZE: usize = 1000;
/// Key base for storing metainformation about id of first block in db.
pub const DB_META_FIRST_BLOCK_IN_DB_KEY: &str = "first_block_in_db";
/// Key base for storing metainformation about id of last current block in db.
pub const DB_META_LAST_BLOCK_IN_DB_KEY: &str = "last_block_in_db";
/// Key base for storing metainformation which describe if first block has been set.
pub const DB_META_FIRST_BLOCK_SET_KEY: &str = "first_block_set";
/// Key base for storing metainformation about the last finalized block on Bedrock.
pub const DB_META_LAST_FINALIZED_BLOCK_ID: &str = "last_finalized_block_id";
/// Key base for storing metainformation about the latest block meta.
pub const DB_META_LATEST_BLOCK_META_KEY: &str = "latest_block_meta";
/// Key base for storing the NSSA state.
pub const DB_NSSA_STATE_KEY: &str = "nssa_state";
/// Name of block column family.
pub const CF_BLOCK_NAME: &str = "cf_block";
/// Name of meta column family.
pub const CF_META_NAME: &str = "cf_meta";
/// Name of state column family.
pub const CF_NSSA_STATE_NAME: &str = "cf_nssa_state";
pub type DbResult<T> = Result<T, DbError>;
pub struct RocksDBIO {
pub db: DBWithThreadMode<MultiThreaded>,
}
impl RocksDBIO {
pub fn open_or_create(
path: &Path,
genesis_block: &Block,
genesis_msg_id: MantleMsgId,
) -> DbResult<Self> {
let mut cf_opts = Options::default();
cf_opts.set_max_write_buffer_number(16);
// ToDo: Add more column families for different data
let cfb = ColumnFamilyDescriptor::new(CF_BLOCK_NAME, cf_opts.clone());
let cfmeta = ColumnFamilyDescriptor::new(CF_META_NAME, cf_opts.clone());
let cfstate = ColumnFamilyDescriptor::new(CF_NSSA_STATE_NAME, cf_opts.clone());
let mut db_opts = Options::default();
db_opts.create_missing_column_families(true);
db_opts.create_if_missing(true);
let db = DBWithThreadMode::<MultiThreaded>::open_cf_descriptors(
&db_opts,
path,
vec![cfb, cfmeta, cfstate],
)
.map_err(|err| DbError::RocksDbError {
error: err,
additional_info: Some("Failed to open or create DB".to_owned()),
})?;
let dbio = Self { db };
let is_start_set = dbio.get_meta_is_first_block_set()?;
if !is_start_set {
let block_id = genesis_block.header.block_id;
dbio.put_meta_first_block_in_db(genesis_block, genesis_msg_id)?;
dbio.put_meta_is_first_block_set()?;
dbio.put_meta_last_block_in_db(block_id)?;
dbio.put_meta_last_finalized_block_id(None)?;
dbio.put_meta_latest_block_meta(&BlockMeta {
id: genesis_block.header.block_id,
hash: genesis_block.header.hash,
msg_id: genesis_msg_id,
})?;
}
Ok(dbio)
}
pub fn destroy(path: &Path) -> DbResult<()> {
let mut cf_opts = Options::default();
cf_opts.set_max_write_buffer_number(16);
// ToDo: Add more column families for different data
let _cfb = ColumnFamilyDescriptor::new(CF_BLOCK_NAME, cf_opts.clone());
let _cfmeta = ColumnFamilyDescriptor::new(CF_META_NAME, cf_opts.clone());
let _cfstate = ColumnFamilyDescriptor::new(CF_NSSA_STATE_NAME, cf_opts.clone());
let mut db_opts = Options::default();
db_opts.create_missing_column_families(true);
db_opts.create_if_missing(true);
DBWithThreadMode::<MultiThreaded>::destroy(&db_opts, path)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))
}
pub fn meta_column(&self) -> Arc<BoundColumnFamily<'_>> {
self.db.cf_handle(CF_META_NAME).unwrap()
}
pub fn block_column(&self) -> Arc<BoundColumnFamily<'_>> {
self.db.cf_handle(CF_BLOCK_NAME).unwrap()
}
pub fn nssa_state_column(&self) -> Arc<BoundColumnFamily<'_>> {
self.db.cf_handle(CF_NSSA_STATE_NAME).unwrap()
}
pub fn get_meta_first_block_in_db(&self) -> DbResult<u64> {
let cf_meta = self.meta_column();
let res = self
.db
.get_cf(
&cf_meta,
borsh::to_vec(&DB_META_FIRST_BLOCK_IN_DB_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_FIRST_BLOCK_IN_DB_KEY".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(borsh::from_slice::<u64>(&data).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to deserialize first block".to_owned()),
)
})?)
} else {
Err(DbError::db_interaction_error(
"First block not found".to_owned(),
))
}
}
pub fn get_meta_last_block_in_db(&self) -> DbResult<u64> {
let cf_meta = self.meta_column();
let res = self
.db
.get_cf(
&cf_meta,
borsh::to_vec(&DB_META_LAST_BLOCK_IN_DB_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_LAST_BLOCK_IN_DB_KEY".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(borsh::from_slice::<u64>(&data).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to deserialize last block".to_owned()),
)
})?)
} else {
Err(DbError::db_interaction_error(
"Last block not found".to_owned(),
))
}
}
pub fn get_meta_is_first_block_set(&self) -> DbResult<bool> {
let cf_meta = self.meta_column();
let res = self
.db
.get_cf(
&cf_meta,
borsh::to_vec(&DB_META_FIRST_BLOCK_SET_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_FIRST_BLOCK_SET_KEY".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
Ok(res.is_some())
}
pub fn put_nssa_state_in_db(&self, state: &V03State, batch: &mut WriteBatch) -> DbResult<()> {
let cf_nssa_state = self.nssa_state_column();
batch.put_cf(
&cf_nssa_state,
borsh::to_vec(&DB_NSSA_STATE_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_NSSA_STATE_KEY".to_owned()),
)
})?,
borsh::to_vec(state).map_err(|err| {
DbError::borsh_cast_message(err, Some("Failed to serialize NSSA state".to_owned()))
})?,
);
Ok(())
}
pub fn put_meta_first_block_in_db(&self, block: &Block, msg_id: MantleMsgId) -> DbResult<()> {
let cf_meta = self.meta_column();
self.db
.put_cf(
&cf_meta,
borsh::to_vec(&DB_META_FIRST_BLOCK_IN_DB_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_FIRST_BLOCK_IN_DB_KEY".to_owned()),
)
})?,
borsh::to_vec(&block.header.block_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize first block id".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
let mut batch = WriteBatch::default();
self.put_block(block, msg_id, true, &mut batch)?;
self.db.write(batch).map_err(|rerr| {
DbError::rocksdb_cast_message(
rerr,
Some("Failed to write first block in db".to_owned()),
)
})?;
Ok(())
}
pub fn put_meta_last_block_in_db(&self, block_id: u64) -> DbResult<()> {
let cf_meta = self.meta_column();
self.db
.put_cf(
&cf_meta,
borsh::to_vec(&DB_META_LAST_BLOCK_IN_DB_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_LAST_BLOCK_IN_DB_KEY".to_owned()),
)
})?,
borsh::to_vec(&block_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize last block id".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
Ok(())
}
fn put_meta_last_block_in_db_batch(
&self,
block_id: u64,
batch: &mut WriteBatch,
) -> DbResult<()> {
let cf_meta = self.meta_column();
batch.put_cf(
&cf_meta,
borsh::to_vec(&DB_META_LAST_BLOCK_IN_DB_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_LAST_BLOCK_IN_DB_KEY".to_owned()),
)
})?,
borsh::to_vec(&block_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize last block id".to_owned()),
)
})?,
);
Ok(())
}
pub fn put_meta_last_finalized_block_id(&self, block_id: Option<u64>) -> DbResult<()> {
let cf_meta = self.meta_column();
self.db
.put_cf(
&cf_meta,
borsh::to_vec(&DB_META_LAST_FINALIZED_BLOCK_ID).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_LAST_FINALIZED_BLOCK_ID".to_owned()),
)
})?,
borsh::to_vec(&block_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize last block id".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
Ok(())
}
pub fn put_meta_is_first_block_set(&self) -> DbResult<()> {
let cf_meta = self.meta_column();
self.db
.put_cf(
&cf_meta,
borsh::to_vec(&DB_META_FIRST_BLOCK_SET_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_FIRST_BLOCK_SET_KEY".to_owned()),
)
})?,
[1_u8; 1],
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
Ok(())
}
fn put_meta_latest_block_meta(&self, block_meta: &BlockMeta) -> DbResult<()> {
let cf_meta = self.meta_column();
self.db
.put_cf(
&cf_meta,
borsh::to_vec(&DB_META_LATEST_BLOCK_META_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_LATEST_BLOCK_META_KEY".to_owned()),
)
})?,
borsh::to_vec(&block_meta).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize latest block meta".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
Ok(())
}
fn put_meta_latest_block_meta_batch(
&self,
block_meta: &BlockMeta,
batch: &mut WriteBatch,
) -> DbResult<()> {
let cf_meta = self.meta_column();
batch.put_cf(
&cf_meta,
borsh::to_vec(&DB_META_LATEST_BLOCK_META_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_LATEST_BLOCK_META_KEY".to_owned()),
)
})?,
borsh::to_vec(&block_meta).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize latest block meta".to_owned()),
)
})?,
);
Ok(())
}
pub fn latest_block_meta(&self) -> DbResult<BlockMeta> {
let cf_meta = self.meta_column();
let res = self
.db
.get_cf(
&cf_meta,
borsh::to_vec(&DB_META_LATEST_BLOCK_META_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_LATEST_BLOCK_META_KEY".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(borsh::from_slice::<BlockMeta>(&data).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to deserialize latest block meta".to_owned()),
)
})?)
} else {
Err(DbError::db_interaction_error(
"Latest block meta not found".to_owned(),
))
}
}
pub fn put_block(
&self,
block: &Block,
msg_id: MantleMsgId,
first: bool,
batch: &mut WriteBatch,
) -> DbResult<()> {
let cf_block = self.block_column();
if !first {
let last_curr_block = self.get_meta_last_block_in_db()?;
if block.header.block_id > last_curr_block {
self.put_meta_last_block_in_db_batch(block.header.block_id, batch)?;
self.put_meta_latest_block_meta_batch(
&BlockMeta {
id: block.header.block_id,
hash: block.header.hash,
msg_id,
},
batch,
)?;
}
}
batch.put_cf(
&cf_block,
borsh::to_vec(&block.header.block_id).map_err(|err| {
DbError::borsh_cast_message(err, Some("Failed to serialize block id".to_owned()))
})?,
borsh::to_vec(block).map_err(|err| {
DbError::borsh_cast_message(err, Some("Failed to serialize block data".to_owned()))
})?,
);
Ok(())
}
pub fn get_block(&self, block_id: u64) -> DbResult<Option<Block>> {
let cf_block = self.block_column();
let res = self
.db
.get_cf(
&cf_block,
borsh::to_vec(&block_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize block id".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(Some(borsh::from_slice::<Block>(&data).map_err(|serr| {
DbError::borsh_cast_message(
serr,
Some("Failed to deserialize block data".to_owned()),
)
})?))
} else {
Ok(None)
}
}
pub fn get_nssa_state(&self) -> DbResult<V03State> {
let cf_nssa_state = self.nssa_state_column();
let res = self
.db
.get_cf(
&cf_nssa_state,
borsh::to_vec(&DB_NSSA_STATE_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize block id".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(borsh::from_slice::<V03State>(&data).map_err(|serr| {
DbError::borsh_cast_message(
serr,
Some("Failed to deserialize block data".to_owned()),
)
})?)
} else {
Err(DbError::db_interaction_error(
"NSSA state not found".to_owned(),
))
}
}
pub fn delete_block(&self, block_id: u64) -> DbResult<()> {
let cf_block = self.block_column();
let key = borsh::to_vec(&block_id).map_err(|err| {
DbError::borsh_cast_message(err, Some("Failed to serialize block id".to_owned()))
})?;
if self
.db
.get_cf(&cf_block, &key)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?
.is_none()
{
return Err(DbError::db_interaction_error(format!(
"Block with id {block_id} not found"
)));
}
self.db
.delete_cf(&cf_block, key)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
Ok(())
}
pub fn mark_block_as_finalized(&self, block_id: u64) -> DbResult<()> {
let mut block = self.get_block(block_id)?.ok_or_else(|| {
DbError::db_interaction_error(format!("Block with id {block_id} not found"))
})?;
block.bedrock_status = BedrockStatus::Finalized;
let cf_block = self.block_column();
self.db
.put_cf(
&cf_block,
borsh::to_vec(&block_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize block id".to_owned()),
)
})?,
borsh::to_vec(&block).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize block data".to_owned()),
)
})?,
)
.map_err(|rerr| {
DbError::rocksdb_cast_message(
rerr,
Some(format!("Failed to mark block {block_id} as finalized")),
)
})?;
Ok(())
}
pub fn get_all_blocks(&self) -> impl Iterator<Item = DbResult<Block>> {
let cf_block = self.block_column();
self.db
.iterator_cf(&cf_block, rocksdb::IteratorMode::Start)
.map(|res| {
let (_key, value) = res.map_err(|rerr| {
DbError::rocksdb_cast_message(
rerr,
Some("Failed to get key value pair".to_owned()),
)
})?;
borsh::from_slice::<Block>(&value).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to deserialize block data".to_owned()),
)
})
})
}
pub fn atomic_update(
&self,
block: &Block,
msg_id: MantleMsgId,
state: &V03State,
) -> DbResult<()> {
let block_id = block.header.block_id;
let mut batch = WriteBatch::default();
self.put_block(block, msg_id, false, &mut batch)?;
self.put_nssa_state_in_db(state, &mut batch)?;
self.db.write(batch).map_err(|rerr| {
DbError::rocksdb_cast_message(
rerr,
Some(format!("Failed to udpate db with block {block_id}")),
)
})
}
}

View File

@ -0,0 +1,349 @@
use std::{path::Path, sync::Arc};
use common::block::{BedrockStatus, Block, BlockMeta, MantleMsgId};
use nssa::V03State;
use rocksdb::{
BoundColumnFamily, ColumnFamilyDescriptor, DBWithThreadMode, MultiThreaded, Options, WriteBatch,
};
use crate::{
CF_BLOCK_NAME, CF_META_NAME, DB_META_FIRST_BLOCK_IN_DB_KEY, DBIO, DbResult,
cells::shared_cells::{BlockCell, FirstBlockCell, FirstBlockSetCell, LastBlockCell},
error::DbError,
sequencer::sequencer_cells::{
LastFinalizedBlockIdCell, LatestBlockMetaCellOwned, LatestBlockMetaCellRef,
NSSAStateCellOwned, NSSAStateCellRef,
},
};
pub mod sequencer_cells;
/// Key base for storing metainformation about the last finalized block on Bedrock.
pub const DB_META_LAST_FINALIZED_BLOCK_ID: &str = "last_finalized_block_id";
/// Key base for storing metainformation about the latest block meta.
pub const DB_META_LATEST_BLOCK_META_KEY: &str = "latest_block_meta";
/// Key base for storing the NSSA state.
pub const DB_NSSA_STATE_KEY: &str = "nssa_state";
/// Name of state column family.
pub const CF_NSSA_STATE_NAME: &str = "cf_nssa_state";
pub struct RocksDBIO {
pub db: DBWithThreadMode<MultiThreaded>,
}
impl DBIO for RocksDBIO {
fn db(&self) -> &DBWithThreadMode<MultiThreaded> {
&self.db
}
}
impl RocksDBIO {
pub fn open_or_create(
path: &Path,
genesis_block: &Block,
genesis_msg_id: MantleMsgId,
) -> DbResult<Self> {
let mut cf_opts = Options::default();
cf_opts.set_max_write_buffer_number(16);
// ToDo: Add more column families for different data
let cfb = ColumnFamilyDescriptor::new(CF_BLOCK_NAME, cf_opts.clone());
let cfmeta = ColumnFamilyDescriptor::new(CF_META_NAME, cf_opts.clone());
let cfstate = ColumnFamilyDescriptor::new(CF_NSSA_STATE_NAME, cf_opts.clone());
let mut db_opts = Options::default();
db_opts.create_missing_column_families(true);
db_opts.create_if_missing(true);
let db = DBWithThreadMode::<MultiThreaded>::open_cf_descriptors(
&db_opts,
path,
vec![cfb, cfmeta, cfstate],
)
.map_err(|err| DbError::RocksDbError {
error: err,
additional_info: Some("Failed to open or create DB".to_owned()),
})?;
let dbio = Self { db };
let is_start_set = dbio.get_meta_is_first_block_set()?;
if !is_start_set {
let block_id = genesis_block.header.block_id;
dbio.put_meta_first_block_in_db(genesis_block, genesis_msg_id)?;
dbio.put_meta_is_first_block_set()?;
dbio.put_meta_last_block_in_db(block_id)?;
dbio.put_meta_last_finalized_block_id(None)?;
dbio.put_meta_latest_block_meta(&BlockMeta {
id: genesis_block.header.block_id,
hash: genesis_block.header.hash,
msg_id: genesis_msg_id,
})?;
}
Ok(dbio)
}
pub fn destroy(path: &Path) -> DbResult<()> {
let mut cf_opts = Options::default();
cf_opts.set_max_write_buffer_number(16);
// ToDo: Add more column families for different data
let _cfb = ColumnFamilyDescriptor::new(CF_BLOCK_NAME, cf_opts.clone());
let _cfmeta = ColumnFamilyDescriptor::new(CF_META_NAME, cf_opts.clone());
let _cfstate = ColumnFamilyDescriptor::new(CF_NSSA_STATE_NAME, cf_opts.clone());
let mut db_opts = Options::default();
db_opts.create_missing_column_families(true);
db_opts.create_if_missing(true);
DBWithThreadMode::<MultiThreaded>::destroy(&db_opts, path)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))
}
// Columns
pub fn meta_column(&self) -> Arc<BoundColumnFamily<'_>> {
self.db
.cf_handle(CF_META_NAME)
.expect("Meta column should exist")
}
pub fn block_column(&self) -> Arc<BoundColumnFamily<'_>> {
self.db
.cf_handle(CF_BLOCK_NAME)
.expect("Block column should exist")
}
pub fn nssa_state_column(&self) -> Arc<BoundColumnFamily<'_>> {
self.db
.cf_handle(CF_NSSA_STATE_NAME)
.expect("State should exist")
}
// Meta
pub fn get_meta_first_block_in_db(&self) -> DbResult<u64> {
self.get::<FirstBlockCell>(()).map(|cell| cell.0)
}
pub fn get_meta_last_block_in_db(&self) -> DbResult<u64> {
self.get::<LastBlockCell>(()).map(|cell| cell.0)
}
pub fn get_meta_is_first_block_set(&self) -> DbResult<bool> {
Ok(self.get_opt::<FirstBlockSetCell>(())?.is_some())
}
pub fn put_nssa_state_in_db(&self, state: &V03State, batch: &mut WriteBatch) -> DbResult<()> {
self.put_batch(&NSSAStateCellRef(state), (), batch)
}
pub fn put_meta_first_block_in_db(&self, block: &Block, msg_id: MantleMsgId) -> DbResult<()> {
let cf_meta = self.meta_column();
self.db
.put_cf(
&cf_meta,
borsh::to_vec(&DB_META_FIRST_BLOCK_IN_DB_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_FIRST_BLOCK_IN_DB_KEY".to_owned()),
)
})?,
borsh::to_vec(&block.header.block_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize first block id".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
let mut batch = WriteBatch::default();
self.put_block(block, msg_id, true, &mut batch)?;
self.db.write(batch).map_err(|rerr| {
DbError::rocksdb_cast_message(
rerr,
Some("Failed to write first block in db".to_owned()),
)
})?;
Ok(())
}
pub fn put_meta_last_block_in_db(&self, block_id: u64) -> DbResult<()> {
self.put(&LastBlockCell(block_id), ())
}
fn put_meta_last_block_in_db_batch(
&self,
block_id: u64,
batch: &mut WriteBatch,
) -> DbResult<()> {
self.put_batch(&LastBlockCell(block_id), (), batch)
}
pub fn put_meta_last_finalized_block_id(&self, block_id: Option<u64>) -> DbResult<()> {
self.put(&LastFinalizedBlockIdCell(block_id), ())
}
pub fn put_meta_is_first_block_set(&self) -> DbResult<()> {
self.put(&FirstBlockSetCell(true), ())
}
fn put_meta_latest_block_meta(&self, block_meta: &BlockMeta) -> DbResult<()> {
self.put(&LatestBlockMetaCellRef(block_meta), ())
}
fn put_meta_latest_block_meta_batch(
&self,
block_meta: &BlockMeta,
batch: &mut WriteBatch,
) -> DbResult<()> {
self.put_batch(&LatestBlockMetaCellRef(block_meta), (), batch)
}
pub fn latest_block_meta(&self) -> DbResult<BlockMeta> {
self.get::<LatestBlockMetaCellOwned>(()).map(|val| val.0)
}
pub fn put_block(
&self,
block: &Block,
msg_id: MantleMsgId,
first: bool,
batch: &mut WriteBatch,
) -> DbResult<()> {
let cf_block = self.block_column();
if !first {
let last_curr_block = self.get_meta_last_block_in_db()?;
if block.header.block_id > last_curr_block {
self.put_meta_last_block_in_db_batch(block.header.block_id, batch)?;
self.put_meta_latest_block_meta_batch(
&BlockMeta {
id: block.header.block_id,
hash: block.header.hash,
msg_id,
},
batch,
)?;
}
}
batch.put_cf(
&cf_block,
borsh::to_vec(&block.header.block_id).map_err(|err| {
DbError::borsh_cast_message(err, Some("Failed to serialize block id".to_owned()))
})?,
borsh::to_vec(block).map_err(|err| {
DbError::borsh_cast_message(err, Some("Failed to serialize block data".to_owned()))
})?,
);
Ok(())
}
pub fn get_block(&self, block_id: u64) -> DbResult<Option<Block>> {
self.get_opt::<BlockCell>(block_id)
.map(|opt| opt.map(|val| val.0))
}
pub fn get_nssa_state(&self) -> DbResult<V03State> {
self.get::<NSSAStateCellOwned>(()).map(|val| val.0)
}
pub fn delete_block(&self, block_id: u64) -> DbResult<()> {
let cf_block = self.block_column();
let key = borsh::to_vec(&block_id).map_err(|err| {
DbError::borsh_cast_message(err, Some("Failed to serialize block id".to_owned()))
})?;
if self
.db
.get_cf(&cf_block, &key)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?
.is_none()
{
return Err(DbError::db_interaction_error(format!(
"Block with id {block_id} not found"
)));
}
self.db
.delete_cf(&cf_block, key)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
Ok(())
}
pub fn mark_block_as_finalized(&self, block_id: u64) -> DbResult<()> {
let mut block = self.get_block(block_id)?.ok_or_else(|| {
DbError::db_interaction_error(format!("Block with id {block_id} not found"))
})?;
block.bedrock_status = BedrockStatus::Finalized;
let cf_block = self.block_column();
self.db
.put_cf(
&cf_block,
borsh::to_vec(&block_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize block id".to_owned()),
)
})?,
borsh::to_vec(&block).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize block data".to_owned()),
)
})?,
)
.map_err(|rerr| {
DbError::rocksdb_cast_message(
rerr,
Some(format!("Failed to mark block {block_id} as finalized")),
)
})?;
Ok(())
}
pub fn get_all_blocks(&self) -> impl Iterator<Item = DbResult<Block>> {
let cf_block = self.block_column();
self.db
.iterator_cf(&cf_block, rocksdb::IteratorMode::Start)
.map(|res| {
let (_key, value) = res.map_err(|rerr| {
DbError::rocksdb_cast_message(
rerr,
Some("Failed to get key value pair".to_owned()),
)
})?;
borsh::from_slice::<Block>(&value).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to deserialize block data".to_owned()),
)
})
})
}
pub fn atomic_update(
&self,
block: &Block,
msg_id: MantleMsgId,
state: &V03State,
) -> DbResult<()> {
let block_id = block.header.block_id;
let mut batch = WriteBatch::default();
self.put_block(block, msg_id, false, &mut batch)?;
self.put_nssa_state_in_db(state, &mut batch)?;
self.db.write(batch).map_err(|rerr| {
DbError::rocksdb_cast_message(
rerr,
Some(format!("Failed to udpate db with block {block_id}")),
)
})
}
}

View File

@ -0,0 +1,132 @@
use borsh::{BorshDeserialize, BorshSerialize};
use common::block::BlockMeta;
use nssa::V03State;
use crate::{
CF_META_NAME, DbResult,
cells::{SimpleReadableCell, SimpleStorableCell, SimpleWritableCell},
error::DbError,
sequencer::{
CF_NSSA_STATE_NAME, DB_META_LAST_FINALIZED_BLOCK_ID, DB_META_LATEST_BLOCK_META_KEY,
DB_NSSA_STATE_KEY,
},
};
#[derive(BorshDeserialize)]
pub struct NSSAStateCellOwned(pub V03State);
impl SimpleStorableCell for NSSAStateCellOwned {
type KeyParams = ();
const CELL_NAME: &'static str = DB_NSSA_STATE_KEY;
const CF_NAME: &'static str = CF_NSSA_STATE_NAME;
}
impl SimpleReadableCell for NSSAStateCellOwned {}
#[derive(BorshSerialize)]
pub struct NSSAStateCellRef<'state>(pub &'state V03State);
impl SimpleStorableCell for NSSAStateCellRef<'_> {
type KeyParams = ();
const CELL_NAME: &'static str = DB_NSSA_STATE_KEY;
const CF_NAME: &'static str = CF_NSSA_STATE_NAME;
}
impl SimpleWritableCell for NSSAStateCellRef<'_> {
fn value_constructor(&self) -> DbResult<Vec<u8>> {
borsh::to_vec(&self).map_err(|err| {
DbError::borsh_cast_message(err, Some("Failed to serialize last state".to_owned()))
})
}
}
#[derive(Debug, BorshSerialize, BorshDeserialize)]
pub struct LastFinalizedBlockIdCell(pub Option<u64>);
impl SimpleStorableCell for LastFinalizedBlockIdCell {
type KeyParams = ();
const CELL_NAME: &'static str = DB_META_LAST_FINALIZED_BLOCK_ID;
const CF_NAME: &'static str = CF_META_NAME;
}
impl SimpleReadableCell for LastFinalizedBlockIdCell {}
impl SimpleWritableCell for LastFinalizedBlockIdCell {
fn value_constructor(&self) -> DbResult<Vec<u8>> {
borsh::to_vec(&self).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize last finalized block id".to_owned()),
)
})
}
}
#[derive(BorshDeserialize)]
pub struct LatestBlockMetaCellOwned(pub BlockMeta);
impl SimpleStorableCell for LatestBlockMetaCellOwned {
type KeyParams = ();
const CELL_NAME: &'static str = DB_META_LATEST_BLOCK_META_KEY;
const CF_NAME: &'static str = CF_META_NAME;
}
impl SimpleReadableCell for LatestBlockMetaCellOwned {}
#[derive(BorshSerialize)]
pub struct LatestBlockMetaCellRef<'blockmeta>(pub &'blockmeta BlockMeta);
impl SimpleStorableCell for LatestBlockMetaCellRef<'_> {
type KeyParams = ();
const CELL_NAME: &'static str = DB_META_LATEST_BLOCK_META_KEY;
const CF_NAME: &'static str = CF_META_NAME;
}
impl SimpleWritableCell for LatestBlockMetaCellRef<'_> {
fn value_constructor(&self) -> DbResult<Vec<u8>> {
borsh::to_vec(&self).map_err(|err| {
DbError::borsh_cast_message(err, Some("Failed to serialize last block meta".to_owned()))
})
}
}
#[cfg(test)]
mod uniform_tests {
use crate::{
cells::SimpleStorableCell as _,
sequencer::sequencer_cells::{
LatestBlockMetaCellOwned, LatestBlockMetaCellRef, NSSAStateCellOwned, NSSAStateCellRef,
},
};
#[test]
fn state_ref_and_owned_is_aligned() {
assert_eq!(NSSAStateCellRef::CELL_NAME, NSSAStateCellOwned::CELL_NAME);
assert_eq!(NSSAStateCellRef::CF_NAME, NSSAStateCellOwned::CF_NAME);
assert_eq!(
NSSAStateCellRef::key_constructor(()).unwrap(),
NSSAStateCellOwned::key_constructor(()).unwrap()
);
}
#[test]
fn block_meta_ref_and_owned_is_aligned() {
assert_eq!(
LatestBlockMetaCellRef::CELL_NAME,
LatestBlockMetaCellOwned::CELL_NAME
);
assert_eq!(
LatestBlockMetaCellRef::CF_NAME,
LatestBlockMetaCellOwned::CF_NAME
);
assert_eq!(
LatestBlockMetaCellRef::key_constructor(()).unwrap(),
LatestBlockMetaCellOwned::key_constructor(()).unwrap()
);
}
}

View File

@ -9,5 +9,7 @@ workspace = true
[dependencies]
nssa_core.workspace = true
clock_core.workspace = true
risc0-zkvm.workspace = true
serde = { workspace = true, default-features = false }

View File

@ -6,6 +6,7 @@ fn main() {
let (
ProgramInput {
self_program_id,
caller_program_id,
pre_states,
instruction: balance_to_burn,
},
@ -22,6 +23,7 @@ fn main() {
ProgramOutput::new(
self_program_id,
caller_program_id,
instruction_words,
vec![pre],
vec![AccountPostState::new(account_post)],

View File

@ -14,6 +14,7 @@ fn main() {
let (
ProgramInput {
self_program_id,
caller_program_id,
pre_states,
instruction: (balance, auth_transfer_id, num_chain_calls, pda_seed),
},
@ -57,6 +58,7 @@ fn main() {
ProgramOutput::new(
self_program_id,
caller_program_id,
instruction_words,
vec![sender_pre.clone(), recipient_pre.clone()],
vec![

View File

@ -7,6 +7,7 @@ fn main() {
let (
ProgramInput {
self_program_id,
caller_program_id,
pre_states,
instruction: (data_opt, should_claim),
},
@ -36,6 +37,7 @@ fn main() {
ProgramOutput::new(
self_program_id,
caller_program_id,
instruction_words,
vec![pre],
vec![post_state],

View File

@ -6,6 +6,7 @@ fn main() {
let (
ProgramInput {
self_program_id,
caller_program_id,
pre_states,
instruction: (),
},
@ -20,6 +21,7 @@ fn main() {
ProgramOutput::new(
self_program_id,
caller_program_id,
instruction_words,
vec![pre],
vec![account_post],

View File

@ -0,0 +1,46 @@
use nssa_core::{
Timestamp,
program::{
AccountPostState, ChainedCall, ProgramId, ProgramInput, ProgramOutput, read_nssa_inputs,
},
};
use risc0_zkvm::serde::to_vec;
type Instruction = (ProgramId, Timestamp); // (clock_program_id, timestamp)
/// A program that chain-calls the clock program with the clock accounts it received as pre-states.
/// Used in tests to verify that user transactions cannot modify clock accounts, even indirectly
/// via chain calls.
fn main() {
let (
ProgramInput {
self_program_id,
caller_program_id,
pre_states,
instruction: (clock_program_id, timestamp),
},
instruction_words,
) = read_nssa_inputs::<Instruction>();
let post_states: Vec<_> = pre_states
.iter()
.map(|pre| AccountPostState::new(pre.account.clone()))
.collect();
let chained_call = ChainedCall {
program_id: clock_program_id,
instruction_data: to_vec(&timestamp).unwrap(),
pre_states: pre_states.clone(),
pda_seeds: vec![],
};
ProgramOutput::new(
self_program_id,
caller_program_id,
instruction_words,
pre_states,
post_states,
)
.with_chained_calls(vec![chained_call])
.write();
}

Some files were not shown because too many files have changed in this diff Show More