Merge branch 'main' into schouhy/fix-slow-tests-because-of-program-id-computation

This commit is contained in:
Sergio Chouhy 2026-04-07 17:39:01 -03:00
commit 99bedb970d
108 changed files with 3572 additions and 1653 deletions

14
Cargo.lock generated
View File

@ -1462,6 +1462,14 @@ version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3a822ea5bc7590f9d40f1ba12c0dc3c2760f3482c6984db1573ad11031420831"
[[package]]
name = "clock_core"
version = "0.1.0"
dependencies = [
"borsh",
"nssa_core",
]
[[package]]
name = "cobs"
version = "0.3.0"
@ -1511,6 +1519,7 @@ dependencies = [
"anyhow",
"base64 0.22.1",
"borsh",
"clock_core",
"hex",
"log",
"logos-blockchain-common-http-client",
@ -5259,6 +5268,7 @@ version = "0.1.0"
dependencies = [
"anyhow",
"borsh",
"clock_core",
"env_logger",
"hex",
"hex-literal 1.1.0",
@ -5897,6 +5907,7 @@ dependencies = [
"amm_program",
"ata_core",
"ata_program",
"clock_core",
"nssa_core",
"risc0-zkvm",
"serde",
@ -7151,6 +7162,7 @@ dependencies = [
"serde_json",
"storage",
"tempfile",
"test_program_methods",
"testnet_initial_state",
"tokio",
"url",
@ -7831,8 +7843,10 @@ dependencies = [
name = "test_programs"
version = "0.1.0"
dependencies = [
"clock_core",
"nssa_core",
"risc0-zkvm",
"serde",
]
[[package]]

View File

@ -15,6 +15,7 @@ members = [
"nssa/core",
"programs/amm/core",
"programs/amm",
"programs/clock/core",
"programs/token/core",
"programs/token",
"programs/associated_token_account/core",
@ -56,6 +57,7 @@ indexer_service_protocol = { path = "indexer/service/protocol" }
indexer_service_rpc = { path = "indexer/service/rpc" }
wallet = { path = "wallet" }
wallet-ffi = { path = "wallet-ffi", default-features = false }
clock_core = { path = "programs/clock/core" }
token_core = { path = "programs/token/core" }
token_program = { path = "programs/token" }
amm_core = { path = "programs/amm/core" }

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -10,6 +10,7 @@ workspace = true
[dependencies]
nssa.workspace = true
nssa_core.workspace = true
clock_core.workspace = true
anyhow.workspace = true
thiserror.workspace = true

View File

@ -1,10 +1,10 @@
use borsh::{BorshDeserialize, BorshSerialize};
use nssa_core::{BlockId, Timestamp};
use nssa_core::BlockId;
pub use nssa_core::Timestamp;
use serde::{Deserialize, Serialize};
use sha2::{Digest as _, Sha256, digest::FixedOutput as _};
use crate::{HashType, transaction::NSSATransaction};
pub type MantleMsgId = [u8; 32];
pub type BlockHash = HashType;

View File

@ -1,6 +1,6 @@
use borsh::{BorshDeserialize, BorshSerialize};
use log::warn;
use nssa::{AccountId, V03State};
use nssa::{AccountId, V03State, ValidatedStateDiff};
use nssa_core::{BlockId, Timestamp};
use serde::{Deserialize, Serialize};
@ -66,21 +66,53 @@ impl NSSATransaction {
}
}
/// Validates the transaction against the current state and returns the resulting diff
/// without applying it. Rejects transactions that modify clock system accounts.
pub fn validate_on_state(
&self,
state: &V03State,
block_id: BlockId,
timestamp: Timestamp,
) -> Result<ValidatedStateDiff, nssa::error::NssaError> {
let diff = match self {
Self::Public(tx) => {
ValidatedStateDiff::from_public_transaction(tx, state, block_id, timestamp)
}
Self::PrivacyPreserving(tx) => ValidatedStateDiff::from_privacy_preserving_transaction(
tx, state, block_id, timestamp,
),
Self::ProgramDeployment(tx) => {
ValidatedStateDiff::from_program_deployment_transaction(tx, state)
}
}?;
let public_diff = diff.public_diff();
let touches_clock = nssa::CLOCK_PROGRAM_ACCOUNT_IDS.iter().any(|id| {
public_diff
.get(id)
.is_some_and(|post| *post != state.get_account_by_id(*id))
});
if touches_clock {
return Err(nssa::error::NssaError::InvalidInput(
"Transaction modifies system clock accounts".into(),
));
}
Ok(diff)
}
/// Validates the transaction against the current state, rejects modifications to clock
/// system accounts, and applies the resulting diff to the state.
pub fn execute_check_on_state(
self,
state: &mut V03State,
block_id: BlockId,
timestamp: Timestamp,
) -> Result<Self, nssa::error::NssaError> {
match &self {
Self::Public(tx) => state.transition_from_public_transaction(tx, block_id, timestamp),
Self::PrivacyPreserving(tx) => {
state.transition_from_privacy_preserving_transaction(tx, block_id, timestamp)
}
Self::ProgramDeployment(tx) => state.transition_from_program_deployment_transaction(tx),
}
.inspect_err(|err| warn!("Error at transition {err:#?}"))?;
let diff = self
.validate_on_state(state, block_id, timestamp)
.inspect_err(|err| warn!("Error at transition {err:#?}"))?;
state.apply_state_diff(diff);
Ok(self)
}
}
@ -121,3 +153,20 @@ pub enum TransactionMalformationError {
#[error("Transaction size {size} exceeds maximum allowed size of {max} bytes")]
TransactionTooLarge { size: usize, max: usize },
}
/// Returns the canonical Clock Program invocation transaction for the given block timestamp.
/// Every valid block must end with exactly one occurrence of this transaction.
#[must_use]
pub fn clock_invocation(timestamp: clock_core::Instruction) -> nssa::PublicTransaction {
let message = nssa::public_transaction::Message::try_new(
nssa::program::Program::clock().id(),
clock_core::CLOCK_PROGRAM_ACCOUNT_IDS.to_vec(),
vec![],
timestamp,
)
.expect("Clock invocation message should always be constructable");
nssa::PublicTransaction::new(
message,
nssa::public_transaction::WitnessSet::from_raw_parts(vec![]),
)
}

View File

@ -20,6 +20,7 @@ fn main() {
let (
ProgramInput {
self_program_id,
caller_program_id,
pre_states,
instruction: greeting,
},
@ -53,6 +54,7 @@ fn main() {
// called to commit the output.
ProgramOutput::new(
self_program_id,
caller_program_id,
instruction_data,
vec![pre_state],
vec![post_state],

View File

@ -20,6 +20,7 @@ fn main() {
let (
ProgramInput {
self_program_id,
caller_program_id,
pre_states,
instruction: greeting,
},
@ -60,6 +61,7 @@ fn main() {
// called to commit the output.
ProgramOutput::new(
self_program_id,
caller_program_id,
instruction_data,
vec![pre_state],
vec![post_state],

View File

@ -67,6 +67,7 @@ fn main() {
let (
ProgramInput {
self_program_id,
caller_program_id,
pre_states,
instruction: (function_id, data),
},
@ -86,5 +87,12 @@ fn main() {
// WARNING: constructing a `ProgramOutput` has no effect on its own. `.write()` must be
// called to commit the output.
ProgramOutput::new(self_program_id, instruction_words, pre_states, post_states).write();
ProgramOutput::new(
self_program_id,
caller_program_id,
instruction_words,
pre_states,
post_states,
)
.write();
}

View File

@ -28,6 +28,7 @@ fn main() {
let (
ProgramInput {
self_program_id,
caller_program_id,
pre_states,
instruction: (),
},
@ -58,6 +59,7 @@ fn main() {
// called to commit the output.
ProgramOutput::new(
self_program_id,
caller_program_id,
instruction_data,
vec![pre_state],
vec![post_state],

View File

@ -34,6 +34,7 @@ fn main() {
let (
ProgramInput {
self_program_id,
caller_program_id,
pre_states,
instruction: (),
},
@ -71,6 +72,7 @@ fn main() {
// called to commit the output.
ProgramOutput::new(
self_program_id,
caller_program_id,
instruction_data,
vec![pre_state],
vec![post_state],

View File

@ -4,7 +4,7 @@ use anyhow::Result;
use bedrock_client::HeaderId;
use common::{
block::{BedrockStatus, Block},
transaction::NSSATransaction,
transaction::{NSSATransaction, clock_invocation},
};
use nssa::{Account, AccountId, V03State};
use nssa_core::BlockId;
@ -122,7 +122,18 @@ impl IndexerStore {
{
let mut state_guard = self.current_state.write().await;
for transaction in &block.body.transactions {
let (clock_tx, user_txs) = block
.body
.transactions
.split_last()
.ok_or_else(|| anyhow::anyhow!("Block has no transactions"))?;
anyhow::ensure!(
*clock_tx == NSSATransaction::Public(clock_invocation(block.header.timestamp)),
"Last transaction in block must be the clock invocation for the block timestamp"
);
for transaction in user_txs {
transaction
.clone()
.transaction_stateless_check()?
@ -132,6 +143,16 @@ impl IndexerStore {
block.header.timestamp,
)?;
}
// Apply the clock invocation directly (it is expected to modify clock accounts).
let NSSATransaction::Public(clock_public_tx) = clock_tx else {
anyhow::bail!("Clock invocation must be a public transaction");
};
state_guard.transition_from_public_transaction(
clock_public_tx,
block.header.block_id,
block.header.timestamp,
)?;
}
// ToDo: Currently we are fetching only finalized blocks
@ -177,7 +198,7 @@ mod tests {
let storage = IndexerStore::open_db_with_genesis(
home.as_ref(),
&genesis_block(),
&nssa::V03State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[]),
&nssa::V03State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[], 0),
)
.unwrap();
@ -195,7 +216,7 @@ mod tests {
let storage = IndexerStore::open_db_with_genesis(
home.as_ref(),
&genesis_block(),
&nssa::V03State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[]),
&nssa::V03State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[], 0),
)
.unwrap();
@ -213,11 +234,14 @@ mod tests {
10,
&sign_key,
);
let block_id = u64::try_from(i).unwrap();
let block_timestamp = block_id.saturating_mul(100);
let clock_tx = NSSATransaction::Public(clock_invocation(block_timestamp));
let next_block = common::test_utils::produce_dummy_block(
u64::try_from(i).unwrap(),
block_id,
Some(prev_hash),
vec![tx],
vec![tx, clock_tx],
);
prev_hash = next_block.header.hash;

View File

@ -92,6 +92,7 @@ impl IndexerCore {
let mut state = V03State::new_with_genesis_accounts(
&init_accs.unwrap_or_default(),
&initial_commitments.unwrap_or_default(),
genesis_block.header.timestamp,
);
// ToDo: Remove after testnet

View File

@ -138,7 +138,7 @@ pub struct Account {
}
pub type BlockId = u64;
pub type TimeStamp = u64;
pub type Timestamp = u64;
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, JsonSchema)]
pub struct Block {
@ -153,7 +153,7 @@ pub struct BlockHeader {
pub block_id: BlockId,
pub prev_block_hash: HashType,
pub hash: HashType,
pub timestamp: TimeStamp,
pub timestamp: Timestamp,
pub signature: Signature,
}

View File

@ -9,6 +9,7 @@ workspace = true
[dependencies]
nssa_core = { workspace = true, features = ["host"] }
clock_core.workspace = true
anyhow.workspace = true
thiserror.workspace = true

View File

@ -55,7 +55,7 @@ pub type NullifierSecretKey = [u8; 32];
#[derive(Serialize, Deserialize, BorshSerialize, BorshDeserialize)]
#[cfg_attr(
any(feature = "host", test),
derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash)
derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)
)]
pub struct Nullifier(pub(super) [u8; 32]);

View File

@ -17,6 +17,7 @@ pub type ProgramId = [u32; 8];
pub type InstructionData = Vec<u32>;
pub struct ProgramInput<T> {
pub self_program_id: ProgramId,
pub caller_program_id: Option<ProgramId>,
pub pre_states: Vec<AccountWithMetadata>,
pub instruction: T,
}
@ -284,6 +285,9 @@ pub struct InvalidWindow;
pub struct ProgramOutput {
/// The program ID of the program that produced this output.
pub self_program_id: ProgramId,
/// The program ID of the caller that invoked this program via a chained call,
/// or `None` if this is a top-level call.
pub caller_program_id: Option<ProgramId>,
/// The instruction data the program received to produce this output.
pub instruction_data: InstructionData,
/// The account pre states the program received to produce this output.
@ -301,12 +305,14 @@ pub struct ProgramOutput {
impl ProgramOutput {
pub const fn new(
self_program_id: ProgramId,
caller_program_id: Option<ProgramId>,
instruction_data: InstructionData,
pre_states: Vec<AccountWithMetadata>,
post_states: Vec<AccountPostState>,
) -> Self {
Self {
self_program_id,
caller_program_id,
instruction_data,
pre_states,
post_states,
@ -421,12 +427,14 @@ pub fn compute_authorized_pdas(
#[must_use]
pub fn read_nssa_inputs<T: DeserializeOwned>() -> (ProgramInput<T>, InstructionData) {
let self_program_id: ProgramId = env::read();
let caller_program_id: Option<ProgramId> = env::read();
let pre_states: Vec<AccountWithMetadata> = env::read();
let instruction_words: InstructionData = env::read();
let instruction = T::deserialize(&mut Deserializer::new(instruction_words.as_ref())).unwrap();
(
ProgramInput {
self_program_id,
caller_program_id,
pre_states,
instruction,
},
@ -627,7 +635,7 @@ mod tests {
#[test]
fn program_output_try_with_block_validity_window_range() {
let output = ProgramOutput::new(DEFAULT_PROGRAM_ID, vec![], vec![], vec![])
let output = ProgramOutput::new(DEFAULT_PROGRAM_ID, None, vec![], vec![], vec![])
.try_with_block_validity_window(10_u64..100)
.unwrap();
assert_eq!(output.block_validity_window.start(), Some(10));
@ -636,7 +644,7 @@ mod tests {
#[test]
fn program_output_with_block_validity_window_range_from() {
let output = ProgramOutput::new(DEFAULT_PROGRAM_ID, vec![], vec![], vec![])
let output = ProgramOutput::new(DEFAULT_PROGRAM_ID, None, vec![], vec![], vec![])
.with_block_validity_window(10_u64..);
assert_eq!(output.block_validity_window.start(), Some(10));
assert_eq!(output.block_validity_window.end(), None);
@ -644,7 +652,7 @@ mod tests {
#[test]
fn program_output_with_block_validity_window_range_to() {
let output = ProgramOutput::new(DEFAULT_PROGRAM_ID, vec![], vec![], vec![])
let output = ProgramOutput::new(DEFAULT_PROGRAM_ID, None, vec![], vec![], vec![])
.with_block_validity_window(..100_u64);
assert_eq!(output.block_validity_window.start(), None);
assert_eq!(output.block_validity_window.end(), Some(100));
@ -652,7 +660,7 @@ mod tests {
#[test]
fn program_output_try_with_block_validity_window_empty_range_fails() {
let result = ProgramOutput::new(DEFAULT_PROGRAM_ID, vec![], vec![], vec![])
let result = ProgramOutput::new(DEFAULT_PROGRAM_ID, None, vec![], vec![], vec![])
.try_with_block_validity_window(5_u64..5);
assert!(result.is_err());
}

View File

@ -16,7 +16,11 @@ pub use program_deployment_transaction::ProgramDeploymentTransaction;
pub use program_methods::PRIVACY_PRESERVING_CIRCUIT_ID;
pub use public_transaction::PublicTransaction;
pub use signature::{PrivateKey, PublicKey, Signature};
pub use state::V03State;
pub use state::{
CLOCK_01_PROGRAM_ACCOUNT_ID, CLOCK_10_PROGRAM_ACCOUNT_ID, CLOCK_50_PROGRAM_ACCOUNT_ID,
CLOCK_PROGRAM_ACCOUNT_IDS, V03State,
};
pub use validated_state_diff::ValidatedStateDiff;
pub mod encoding;
pub mod error;
@ -27,6 +31,7 @@ pub mod program_deployment_transaction;
pub mod public_transaction;
mod signature;
mod state;
mod validated_state_diff;
pub mod program_methods {
include!(concat!(env!("OUT_DIR"), "/program_methods/mod.rs"));

View File

@ -87,15 +87,16 @@ pub fn execute_and_prove(
pda_seeds: vec![],
};
let mut chained_calls = VecDeque::from_iter([(initial_call, initial_program)]);
let mut chained_calls = VecDeque::from_iter([(initial_call, initial_program, None)]);
let mut chain_calls_counter = 0;
while let Some((chained_call, program)) = chained_calls.pop_front() {
while let Some((chained_call, program, caller_program_id)) = chained_calls.pop_front() {
if chain_calls_counter >= MAX_NUMBER_CHAINED_CALLS {
return Err(NssaError::MaxChainedCallsDepthExceeded);
}
let inner_receipt = execute_and_prove_program(
program,
caller_program_id,
&chained_call.pre_states,
&chained_call.instruction_data,
)?;
@ -115,7 +116,7 @@ pub fn execute_and_prove(
let next_program = dependencies
.get(&new_call.program_id)
.ok_or(NssaError::InvalidProgramBehavior)?;
chained_calls.push_front((new_call, next_program));
chained_calls.push_front((new_call, next_program, Some(chained_call.program_id)));
}
chain_calls_counter = chain_calls_counter
@ -153,12 +154,19 @@ pub fn execute_and_prove(
fn execute_and_prove_program(
program: &Program,
caller_program_id: Option<ProgramId>,
pre_states: &[AccountWithMetadata],
instruction_data: &InstructionData,
) -> Result<Receipt, NssaError> {
// Write inputs to the program
let mut env_builder = ExecutorEnv::builder();
Program::write_inputs(program.id(), pre_states, instruction_data, &mut env_builder)?;
Program::write_inputs(
program.id(),
caller_program_id,
pre_states,
instruction_data,
&mut env_builder,
)?;
let env = env_builder.build().unwrap();
// Prove the program

View File

@ -1,19 +1,10 @@
use std::{
collections::{HashMap, HashSet},
hash::Hash,
};
use std::collections::HashSet;
use borsh::{BorshDeserialize, BorshSerialize};
use nssa_core::{
BlockId, PrivacyPreservingCircuitOutput, Timestamp,
account::{Account, AccountWithMetadata},
};
use nssa_core::account::AccountId;
use sha2::{Digest as _, digest::FixedOutput as _};
use super::{message::Message, witness_set::WitnessSet};
use crate::{
AccountId, V03State, error::NssaError, privacy_preserving_transaction::circuit::Proof,
};
#[derive(Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize)]
pub struct PrivacyPreservingTransaction {
@ -30,108 +21,6 @@ impl PrivacyPreservingTransaction {
}
}
pub(crate) fn validate_and_produce_public_state_diff(
&self,
state: &V03State,
block_id: BlockId,
timestamp: Timestamp,
) -> Result<HashMap<AccountId, Account>, NssaError> {
let message = &self.message;
let witness_set = &self.witness_set;
// 1. Commitments or nullifiers are non empty
if message.new_commitments.is_empty() && message.new_nullifiers.is_empty() {
return Err(NssaError::InvalidInput(
"Empty commitments and empty nullifiers found in message".into(),
));
}
// 2. Check there are no duplicate account_ids in the public_account_ids list.
if n_unique(&message.public_account_ids) != message.public_account_ids.len() {
return Err(NssaError::InvalidInput(
"Duplicate account_ids found in message".into(),
));
}
// Check there are no duplicate nullifiers in the new_nullifiers list
if n_unique(&message.new_nullifiers) != message.new_nullifiers.len() {
return Err(NssaError::InvalidInput(
"Duplicate nullifiers found in message".into(),
));
}
// Check there are no duplicate commitments in the new_commitments list
if n_unique(&message.new_commitments) != message.new_commitments.len() {
return Err(NssaError::InvalidInput(
"Duplicate commitments found in message".into(),
));
}
// 3. Nonce checks and Valid signatures
// Check exactly one nonce is provided for each signature
if message.nonces.len() != witness_set.signatures_and_public_keys.len() {
return Err(NssaError::InvalidInput(
"Mismatch between number of nonces and signatures/public keys".into(),
));
}
// Check the signatures are valid
if !witness_set.signatures_are_valid_for(message) {
return Err(NssaError::InvalidInput(
"Invalid signature for given message and public key".into(),
));
}
let signer_account_ids = self.signer_account_ids();
// Check nonces corresponds to the current nonces on the public state.
for (account_id, nonce) in signer_account_ids.iter().zip(&message.nonces) {
let current_nonce = state.get_account_by_id(*account_id).nonce;
if current_nonce != *nonce {
return Err(NssaError::InvalidInput("Nonce mismatch".into()));
}
}
// Verify validity window
if !message.block_validity_window.is_valid_for(block_id)
|| !message.timestamp_validity_window.is_valid_for(timestamp)
{
return Err(NssaError::OutOfValidityWindow);
}
// Build pre_states for proof verification
let public_pre_states: Vec<_> = message
.public_account_ids
.iter()
.map(|account_id| {
AccountWithMetadata::new(
state.get_account_by_id(*account_id),
signer_account_ids.contains(account_id),
*account_id,
)
})
.collect();
// 4. Proof verification
check_privacy_preserving_circuit_proof_is_valid(
&witness_set.proof,
&public_pre_states,
message,
)?;
// 5. Commitment freshness
state.check_commitments_are_new(&message.new_commitments)?;
// 6. Nullifier uniqueness
state.check_nullifiers_are_valid(&message.new_nullifiers)?;
Ok(message
.public_account_ids
.iter()
.copied()
.zip(message.public_post_states.clone())
.collect())
}
#[must_use]
pub const fn message(&self) -> &Message {
&self.message
@ -170,36 +59,6 @@ impl PrivacyPreservingTransaction {
}
}
fn check_privacy_preserving_circuit_proof_is_valid(
proof: &Proof,
public_pre_states: &[AccountWithMetadata],
message: &Message,
) -> Result<(), NssaError> {
let output = PrivacyPreservingCircuitOutput {
public_pre_states: public_pre_states.to_vec(),
public_post_states: message.public_post_states.clone(),
ciphertexts: message
.encrypted_private_post_states
.iter()
.cloned()
.map(|value| value.ciphertext)
.collect(),
new_commitments: message.new_commitments.clone(),
new_nullifiers: message.new_nullifiers.clone(),
block_validity_window: message.block_validity_window,
timestamp_validity_window: message.timestamp_validity_window,
};
proof
.is_valid_for(&output)
.then_some(())
.ok_or(NssaError::InvalidPrivacyPreservingProof)
}
fn n_unique<T: Eq + Hash>(data: &[T]) -> usize {
let set: HashSet<&T> = data.iter().collect();
set.len()
}
#[cfg(test)]
mod tests {
use crate::{

View File

@ -10,8 +10,8 @@ use crate::{
error::NssaError,
program_methods::{
AMM_ELF, AMM_ID, ASSOCIATED_TOKEN_ACCOUNT_ELF, ASSOCIATED_TOKEN_ACCOUNT_ID,
AUTHENTICATED_TRANSFER_ELF, AUTHENTICATED_TRANSFER_ID, PINATA_ELF, PINATA_ID, TOKEN_ELF,
TOKEN_ID,
AUTHENTICATED_TRANSFER_ELF, AUTHENTICATED_TRANSFER_ID, CLOCK_ELF, CLOCK_ID, PINATA_ELF,
PINATA_ID, TOKEN_ELF, TOKEN_ID,
},
};
@ -54,13 +54,20 @@ impl Program {
pub(crate) fn execute(
&self,
caller_program_id: Option<ProgramId>,
pre_states: &[AccountWithMetadata],
instruction_data: &InstructionData,
) -> Result<ProgramOutput, NssaError> {
// Write inputs to the program
let mut env_builder = ExecutorEnv::builder();
env_builder.session_limit(Some(MAX_NUM_CYCLES_PUBLIC_EXECUTION));
Self::write_inputs(self.id, pre_states, instruction_data, &mut env_builder)?;
Self::write_inputs(
self.id,
caller_program_id,
pre_states,
instruction_data,
&mut env_builder,
)?;
let env = env_builder.build().unwrap();
// Execute the program (without proving)
@ -81,6 +88,7 @@ impl Program {
/// Writes inputs to `env_builder` in the order expected by the programs.
pub(crate) fn write_inputs(
program_id: ProgramId,
caller_program_id: Option<ProgramId>,
pre_states: &[AccountWithMetadata],
instruction_data: &[u32],
env_builder: &mut ExecutorEnvBuilder,
@ -88,6 +96,9 @@ impl Program {
env_builder
.write(&program_id)
.map_err(|e| NssaError::ProgramWriteInputFailed(e.to_string()))?;
env_builder
.write(&caller_program_id)
.map_err(|e| NssaError::ProgramWriteInputFailed(e.to_string()))?;
let pre_states = pre_states.to_vec();
env_builder
.write(&pre_states)
@ -122,6 +133,14 @@ impl Program {
}
}
#[must_use]
pub fn clock() -> Self {
Self {
id: CLOCK_ID,
elf: CLOCK_ELF.to_vec(),
}
}
#[must_use]
pub fn ata() -> Self {
Self {
@ -159,8 +178,8 @@ mod tests {
program::Program,
program_methods::{
AMM_ELF, AMM_ID, ASSOCIATED_TOKEN_ACCOUNT_ELF, ASSOCIATED_TOKEN_ACCOUNT_ID,
AUTHENTICATED_TRANSFER_ELF, AUTHENTICATED_TRANSFER_ID, PINATA_ELF, PINATA_ID,
PINATA_TOKEN_ELF, PINATA_TOKEN_ID, TOKEN_ELF, TOKEN_ID,
AUTHENTICATED_TRANSFER_ELF, AUTHENTICATED_TRANSFER_ID, CLOCK_ELF, CLOCK_ID, PINATA_ELF,
PINATA_ID, PINATA_TOKEN_ELF, PINATA_TOKEN_ID, TOKEN_ELF, TOKEN_ID,
},
};
@ -333,6 +352,46 @@ mod tests {
elf: VALIDITY_WINDOW_CHAIN_CALLER_ELF.to_vec(),
}
}
#[must_use]
pub fn flash_swap_initiator() -> Self {
use test_program_methods::FLASH_SWAP_INITIATOR_ELF;
Self::new(FLASH_SWAP_INITIATOR_ELF.to_vec())
.expect("flash_swap_initiator must be a valid Risc0 program")
}
#[must_use]
pub fn flash_swap_callback() -> Self {
use test_program_methods::FLASH_SWAP_CALLBACK_ELF;
Self::new(FLASH_SWAP_CALLBACK_ELF.to_vec())
.expect("flash_swap_callback must be a valid Risc0 program")
}
#[must_use]
pub fn malicious_self_program_id() -> Self {
use test_program_methods::MALICIOUS_SELF_PROGRAM_ID_ELF;
Self::new(MALICIOUS_SELF_PROGRAM_ID_ELF.to_vec())
.expect("malicious_self_program_id must be a valid Risc0 program")
}
#[must_use]
pub fn malicious_caller_program_id() -> Self {
use test_program_methods::MALICIOUS_CALLER_PROGRAM_ID_ELF;
Self::new(MALICIOUS_CALLER_PROGRAM_ID_ELF.to_vec())
.expect("malicious_caller_program_id must be a valid Risc0 program")
}
#[must_use]
pub fn time_locked_transfer() -> Self {
use test_program_methods::TIME_LOCKED_TRANSFER_ELF;
Self::new(TIME_LOCKED_TRANSFER_ELF.to_vec()).unwrap()
}
#[must_use]
pub fn pinata_cooldown() -> Self {
use test_program_methods::PINATA_COOLDOWN_ELF;
Self::new(PINATA_COOLDOWN_ELF.to_vec()).unwrap()
}
}
#[test]
@ -360,7 +419,7 @@ mod tests {
..Account::default()
};
let program_output = program
.execute(&[sender, recipient], &instruction_data)
.execute(None, &[sender, recipient], &instruction_data)
.unwrap();
let [sender_post, recipient_post] = program_output.post_states.try_into().unwrap();
@ -386,12 +445,13 @@ mod tests {
#[test]
fn builtin_program_ids_match_elfs() {
let cases: &[(&[u8], [u32; 8])] = &[
(AUTHENTICATED_TRANSFER_ELF, AUTHENTICATED_TRANSFER_ID),
(TOKEN_ELF, TOKEN_ID),
(AMM_ELF, AMM_ID),
(AUTHENTICATED_TRANSFER_ELF, AUTHENTICATED_TRANSFER_ID),
(ASSOCIATED_TOKEN_ACCOUNT_ELF, ASSOCIATED_TOKEN_ACCOUNT_ID),
(CLOCK_ELF, CLOCK_ID),
(PINATA_ELF, PINATA_ID),
(PINATA_TOKEN_ELF, PINATA_TOKEN_ID),
(TOKEN_ELF, TOKEN_ID),
];
for (elf, expected_id) in cases {
let program = Program::new(elf.to_vec()).unwrap();

View File

@ -2,9 +2,7 @@ use borsh::{BorshDeserialize, BorshSerialize};
use nssa_core::account::AccountId;
use sha2::{Digest as _, digest::FixedOutput as _};
use crate::{
V03State, error::NssaError, program::Program, program_deployment_transaction::message::Message,
};
use crate::program_deployment_transaction::message::Message;
#[derive(Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize)]
pub struct ProgramDeploymentTransaction {
@ -22,19 +20,6 @@ impl ProgramDeploymentTransaction {
self.message
}
pub(crate) fn validate_and_produce_public_state_diff(
&self,
state: &V03State,
) -> Result<Program, NssaError> {
// TODO: remove clone
let program = Program::new(self.message.bytecode.clone())?;
if state.programs().contains_key(&program.id()) {
Err(NssaError::ProgramAlreadyExists)
} else {
Ok(program)
}
}
#[must_use]
pub fn hash(&self) -> [u8; 32] {
let bytes = self.to_bytes();

View File

@ -1,20 +1,10 @@
use std::collections::{HashMap, HashSet, VecDeque};
use std::collections::HashSet;
use borsh::{BorshDeserialize, BorshSerialize};
use log::debug;
use nssa_core::{
BlockId, Timestamp,
account::{Account, AccountId, AccountWithMetadata},
program::{ChainedCall, Claim, DEFAULT_PROGRAM_ID, validate_execution},
};
use nssa_core::account::AccountId;
use sha2::{Digest as _, digest::FixedOutput as _};
use crate::{
V03State, ensure,
error::NssaError,
public_transaction::{Message, WitnessSet},
state::MAX_NUMBER_CHAINED_CALLS,
};
use crate::public_transaction::{Message, WitnessSet};
#[derive(Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize)]
pub struct PublicTransaction {
@ -67,217 +57,6 @@ impl PublicTransaction {
hasher.update(&bytes);
hasher.finalize_fixed().into()
}
pub(crate) fn validate_and_produce_public_state_diff(
&self,
state: &V03State,
block_id: BlockId,
timestamp: Timestamp,
) -> Result<HashMap<AccountId, Account>, NssaError> {
let message = self.message();
let witness_set = self.witness_set();
// All account_ids must be different
ensure!(
message.account_ids.iter().collect::<HashSet<_>>().len() == message.account_ids.len(),
NssaError::InvalidInput("Duplicate account_ids found in message".into(),)
);
// Check exactly one nonce is provided for each signature
ensure!(
message.nonces.len() == witness_set.signatures_and_public_keys.len(),
NssaError::InvalidInput(
"Mismatch between number of nonces and signatures/public keys".into(),
)
);
// Check the signatures are valid
ensure!(
witness_set.is_valid_for(message),
NssaError::InvalidInput("Invalid signature for given message and public key".into())
);
let signer_account_ids = self.signer_account_ids();
// Check nonces corresponds to the current nonces on the public state.
for (account_id, nonce) in signer_account_ids.iter().zip(&message.nonces) {
let current_nonce = state.get_account_by_id(*account_id).nonce;
ensure!(
current_nonce == *nonce,
NssaError::InvalidInput("Nonce mismatch".into())
);
}
// Build pre_states for execution
let input_pre_states: Vec<_> = message
.account_ids
.iter()
.map(|account_id| {
AccountWithMetadata::new(
state.get_account_by_id(*account_id),
signer_account_ids.contains(account_id),
*account_id,
)
})
.collect();
let mut state_diff: HashMap<AccountId, Account> = HashMap::new();
let initial_call = ChainedCall {
program_id: message.program_id,
instruction_data: message.instruction_data.clone(),
pre_states: input_pre_states,
pda_seeds: vec![],
};
let mut chained_calls = VecDeque::from_iter([(initial_call, None)]);
let mut chain_calls_counter = 0;
while let Some((chained_call, caller_program_id)) = chained_calls.pop_front() {
ensure!(
chain_calls_counter <= MAX_NUMBER_CHAINED_CALLS,
NssaError::MaxChainedCallsDepthExceeded
);
// Check that the `program_id` corresponds to a deployed program
let Some(program) = state.programs().get(&chained_call.program_id) else {
return Err(NssaError::InvalidInput("Unknown program".into()));
};
debug!(
"Program {:?} pre_states: {:?}, instruction_data: {:?}",
chained_call.program_id, chained_call.pre_states, chained_call.instruction_data
);
let mut program_output =
program.execute(&chained_call.pre_states, &chained_call.instruction_data)?;
debug!(
"Program {:?} output: {:?}",
chained_call.program_id, program_output
);
let authorized_pdas = nssa_core::program::compute_authorized_pdas(
caller_program_id,
&chained_call.pda_seeds,
);
let is_authorized = |account_id: &AccountId| {
signer_account_ids.contains(account_id) || authorized_pdas.contains(account_id)
};
for pre in &program_output.pre_states {
let account_id = pre.account_id;
// Check that the program output pre_states coincide with the values in the public
// state or with any modifications to those values during the chain of calls.
let expected_pre = state_diff
.get(&account_id)
.cloned()
.unwrap_or_else(|| state.get_account_by_id(account_id));
ensure!(
pre.account == expected_pre,
NssaError::InvalidProgramBehavior
);
// Check that authorization flags are consistent with the provided ones or
// authorized by program through the PDA mechanism
ensure!(
pre.is_authorized == is_authorized(&account_id),
NssaError::InvalidProgramBehavior
);
}
// Verify that the program output's self_program_id matches the expected program ID.
ensure!(
program_output.self_program_id == chained_call.program_id,
NssaError::InvalidProgramBehavior
);
// Verify execution corresponds to a well-behaved program.
// See the # Programs section for the definition of the `validate_execution` method.
ensure!(
validate_execution(
&program_output.pre_states,
&program_output.post_states,
chained_call.program_id,
),
NssaError::InvalidProgramBehavior
);
// Verify validity window
ensure!(
program_output.block_validity_window.is_valid_for(block_id)
&& program_output
.timestamp_validity_window
.is_valid_for(timestamp),
NssaError::OutOfValidityWindow
);
for (i, post) in program_output.post_states.iter_mut().enumerate() {
let Some(claim) = post.required_claim() else {
continue;
};
// The invoked program can only claim accounts with default program id.
ensure!(
post.account().program_owner == DEFAULT_PROGRAM_ID,
NssaError::InvalidProgramBehavior
);
let account_id = program_output.pre_states[i].account_id;
match claim {
Claim::Authorized => {
// The program can only claim accounts that were authorized by the signer.
ensure!(
is_authorized(&account_id),
NssaError::InvalidProgramBehavior
);
}
Claim::Pda(seed) => {
// The program can only claim accounts that correspond to the PDAs it is
// authorized to claim.
let pda = AccountId::from((&chained_call.program_id, &seed));
ensure!(account_id == pda, NssaError::InvalidProgramBehavior);
}
}
post.account_mut().program_owner = chained_call.program_id;
}
// Update the state diff
for (pre, post) in program_output
.pre_states
.iter()
.zip(program_output.post_states.iter())
{
state_diff.insert(pre.account_id, post.account().clone());
}
for new_call in program_output.chained_calls.into_iter().rev() {
chained_calls.push_front((new_call, Some(chained_call.program_id)));
}
chain_calls_counter = chain_calls_counter
.checked_add(1)
.expect("we check the max depth at the beginning of the loop");
}
// Check that all modified uninitialized accounts where claimed
for post in state_diff.iter().filter_map(|(account_id, post)| {
let pre = state.get_account_by_id(*account_id);
if pre.program_owner != DEFAULT_PROGRAM_ID {
return None;
}
if pre == *post {
return None;
}
Some(post)
}) {
ensure!(
post.program_owner != DEFAULT_PROGRAM_ID,
NssaError::InvalidProgramBehavior
);
}
Ok(state_diff)
}
}
#[cfg(test)]
@ -289,6 +68,7 @@ pub mod tests {
error::NssaError,
program::Program,
public_transaction::{Message, WitnessSet},
validated_state_diff::ValidatedStateDiff,
};
fn keys_for_tests() -> (PrivateKey, PrivateKey, AccountId, AccountId) {
@ -302,7 +82,7 @@ pub mod tests {
fn state_for_tests() -> V03State {
let (_, _, addr1, addr2) = keys_for_tests();
let initial_data = [(addr1, 10000), (addr2, 20000)];
V03State::new_with_genesis_accounts(&initial_data, &[])
V03State::new_with_genesis_accounts(&initial_data, &[], 0)
}
fn transaction_for_tests() -> PublicTransaction {
@ -397,7 +177,7 @@ pub mod tests {
let witness_set = WitnessSet::for_message(&message, &[&key1, &key1]);
let tx = PublicTransaction::new(message, witness_set);
let result = tx.validate_and_produce_public_state_diff(&state, 1, 0);
let result = ValidatedStateDiff::from_public_transaction(&tx, &state, 1, 0);
assert!(matches!(result, Err(NssaError::InvalidInput(_))));
}
@ -417,7 +197,7 @@ pub mod tests {
let witness_set = WitnessSet::for_message(&message, &[&key1, &key2]);
let tx = PublicTransaction::new(message, witness_set);
let result = tx.validate_and_produce_public_state_diff(&state, 1, 0);
let result = ValidatedStateDiff::from_public_transaction(&tx, &state, 1, 0);
assert!(matches!(result, Err(NssaError::InvalidInput(_))));
}
@ -438,7 +218,7 @@ pub mod tests {
let mut witness_set = WitnessSet::for_message(&message, &[&key1, &key2]);
witness_set.signatures_and_public_keys[0].0 = Signature::new_for_tests([1; 64]);
let tx = PublicTransaction::new(message, witness_set);
let result = tx.validate_and_produce_public_state_diff(&state, 1, 0);
let result = ValidatedStateDiff::from_public_transaction(&tx, &state, 1, 0);
assert!(matches!(result, Err(NssaError::InvalidInput(_))));
}
@ -458,7 +238,7 @@ pub mod tests {
let witness_set = WitnessSet::for_message(&message, &[&key1, &key2]);
let tx = PublicTransaction::new(message, witness_set);
let result = tx.validate_and_produce_public_state_diff(&state, 1, 0);
let result = ValidatedStateDiff::from_public_transaction(&tx, &state, 1, 0);
assert!(matches!(result, Err(NssaError::InvalidInput(_))));
}
@ -474,7 +254,7 @@ pub mod tests {
let witness_set = WitnessSet::for_message(&message, &[&key1, &key2]);
let tx = PublicTransaction::new(message, witness_set);
let result = tx.validate_and_produce_public_state_diff(&state, 1, 0);
let result = ValidatedStateDiff::from_public_transaction(&tx, &state, 1, 0);
assert!(matches!(result, Err(NssaError::InvalidInput(_))));
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,442 @@
use std::{
collections::{HashMap, HashSet, VecDeque},
hash::Hash,
};
use log::debug;
use nssa_core::{
BlockId, Commitment, Nullifier, PrivacyPreservingCircuitOutput, Timestamp,
account::{Account, AccountId, AccountWithMetadata},
program::{
ChainedCall, Claim, DEFAULT_PROGRAM_ID, compute_authorized_pdas, validate_execution,
},
};
use crate::{
V03State, ensure,
error::NssaError,
privacy_preserving_transaction::{
PrivacyPreservingTransaction, circuit::Proof, message::Message,
},
program::Program,
program_deployment_transaction::ProgramDeploymentTransaction,
public_transaction::PublicTransaction,
state::MAX_NUMBER_CHAINED_CALLS,
};
pub struct StateDiff {
pub signer_account_ids: Vec<AccountId>,
pub public_diff: HashMap<AccountId, Account>,
pub new_commitments: Vec<Commitment>,
pub new_nullifiers: Vec<Nullifier>,
pub program: Option<Program>,
}
/// The validated output of executing or verifying a transaction, ready to be applied to the state.
///
/// Can only be constructed by the transaction validation functions inside this crate, ensuring the
/// diff has been checked before any state mutation occurs.
pub struct ValidatedStateDiff(StateDiff);
impl ValidatedStateDiff {
pub fn from_public_transaction(
tx: &PublicTransaction,
state: &V03State,
block_id: BlockId,
timestamp: Timestamp,
) -> Result<Self, NssaError> {
let message = tx.message();
let witness_set = tx.witness_set();
// All account_ids must be different
ensure!(
message.account_ids.iter().collect::<HashSet<_>>().len() == message.account_ids.len(),
NssaError::InvalidInput("Duplicate account_ids found in message".into(),)
);
// Check exactly one nonce is provided for each signature
ensure!(
message.nonces.len() == witness_set.signatures_and_public_keys.len(),
NssaError::InvalidInput(
"Mismatch between number of nonces and signatures/public keys".into(),
)
);
// Check the signatures are valid
ensure!(
witness_set.is_valid_for(message),
NssaError::InvalidInput("Invalid signature for given message and public key".into())
);
let signer_account_ids = tx.signer_account_ids();
// Check nonces corresponds to the current nonces on the public state.
for (account_id, nonce) in signer_account_ids.iter().zip(&message.nonces) {
let current_nonce = state.get_account_by_id(*account_id).nonce;
ensure!(
current_nonce == *nonce,
NssaError::InvalidInput("Nonce mismatch".into())
);
}
// Build pre_states for execution
let input_pre_states: Vec<_> = message
.account_ids
.iter()
.map(|account_id| {
AccountWithMetadata::new(
state.get_account_by_id(*account_id),
signer_account_ids.contains(account_id),
*account_id,
)
})
.collect();
let mut state_diff: HashMap<AccountId, Account> = HashMap::new();
let initial_call = ChainedCall {
program_id: message.program_id,
instruction_data: message.instruction_data.clone(),
pre_states: input_pre_states,
pda_seeds: vec![],
};
let mut chained_calls = VecDeque::from_iter([(initial_call, None)]);
let mut chain_calls_counter = 0;
while let Some((chained_call, caller_program_id)) = chained_calls.pop_front() {
ensure!(
chain_calls_counter <= MAX_NUMBER_CHAINED_CALLS,
NssaError::MaxChainedCallsDepthExceeded
);
// Check that the `program_id` corresponds to a deployed program
let Some(program) = state.programs().get(&chained_call.program_id) else {
return Err(NssaError::InvalidInput("Unknown program".into()));
};
debug!(
"Program {:?} pre_states: {:?}, instruction_data: {:?}",
chained_call.program_id, chained_call.pre_states, chained_call.instruction_data
);
let mut program_output = program.execute(
caller_program_id,
&chained_call.pre_states,
&chained_call.instruction_data,
)?;
debug!(
"Program {:?} output: {:?}",
chained_call.program_id, program_output
);
let authorized_pdas =
compute_authorized_pdas(caller_program_id, &chained_call.pda_seeds);
let is_authorized = |account_id: &AccountId| {
signer_account_ids.contains(account_id) || authorized_pdas.contains(account_id)
};
for pre in &program_output.pre_states {
let account_id = pre.account_id;
// Check that the program output pre_states coincide with the values in the public
// state or with any modifications to those values during the chain of calls.
let expected_pre = state_diff
.get(&account_id)
.cloned()
.unwrap_or_else(|| state.get_account_by_id(account_id));
ensure!(
pre.account == expected_pre,
NssaError::InvalidProgramBehavior
);
// Check that authorization flags are consistent with the provided ones or
// authorized by program through the PDA mechanism
ensure!(
pre.is_authorized == is_authorized(&account_id),
NssaError::InvalidProgramBehavior
);
}
// Verify that the program output's self_program_id matches the expected program ID.
ensure!(
program_output.self_program_id == chained_call.program_id,
NssaError::InvalidProgramBehavior
);
// Verify that the program output's caller_program_id matches the actual caller.
ensure!(
program_output.caller_program_id == caller_program_id,
NssaError::InvalidProgramBehavior
);
// Verify execution corresponds to a well-behaved program.
// See the # Programs section for the definition of the `validate_execution` method.
ensure!(
validate_execution(
&program_output.pre_states,
&program_output.post_states,
chained_call.program_id,
),
NssaError::InvalidProgramBehavior
);
// Verify validity window
ensure!(
program_output.block_validity_window.is_valid_for(block_id)
&& program_output
.timestamp_validity_window
.is_valid_for(timestamp),
NssaError::OutOfValidityWindow
);
for (i, post) in program_output.post_states.iter_mut().enumerate() {
let Some(claim) = post.required_claim() else {
continue;
};
// The invoked program can only claim accounts with default program id.
ensure!(
post.account().program_owner == DEFAULT_PROGRAM_ID,
NssaError::InvalidProgramBehavior
);
let account_id = program_output.pre_states[i].account_id;
match claim {
Claim::Authorized => {
// The program can only claim accounts that were authorized by the signer.
ensure!(
is_authorized(&account_id),
NssaError::InvalidProgramBehavior
);
}
Claim::Pda(seed) => {
// The program can only claim accounts that correspond to the PDAs it is
// authorized to claim.
let pda = AccountId::from((&chained_call.program_id, &seed));
ensure!(account_id == pda, NssaError::InvalidProgramBehavior);
}
}
post.account_mut().program_owner = chained_call.program_id;
}
// Update the state diff
for (pre, post) in program_output
.pre_states
.iter()
.zip(program_output.post_states.iter())
{
state_diff.insert(pre.account_id, post.account().clone());
}
for new_call in program_output.chained_calls.into_iter().rev() {
chained_calls.push_front((new_call, Some(chained_call.program_id)));
}
chain_calls_counter = chain_calls_counter
.checked_add(1)
.expect("we check the max depth at the beginning of the loop");
}
// Check that all modified uninitialized accounts where claimed
for post in state_diff.iter().filter_map(|(account_id, post)| {
let pre = state.get_account_by_id(*account_id);
if pre.program_owner != DEFAULT_PROGRAM_ID {
return None;
}
if pre == *post {
return None;
}
Some(post)
}) {
ensure!(
post.program_owner != DEFAULT_PROGRAM_ID,
NssaError::InvalidProgramBehavior
);
}
Ok(Self(StateDiff {
signer_account_ids,
public_diff: state_diff,
new_commitments: vec![],
new_nullifiers: vec![],
program: None,
}))
}
pub fn from_privacy_preserving_transaction(
tx: &PrivacyPreservingTransaction,
state: &V03State,
block_id: BlockId,
timestamp: Timestamp,
) -> Result<Self, NssaError> {
let message = &tx.message;
let witness_set = &tx.witness_set;
// 1. Commitments or nullifiers are non empty
if message.new_commitments.is_empty() && message.new_nullifiers.is_empty() {
return Err(NssaError::InvalidInput(
"Empty commitments and empty nullifiers found in message".into(),
));
}
// 2. Check there are no duplicate account_ids in the public_account_ids list.
if n_unique(&message.public_account_ids) != message.public_account_ids.len() {
return Err(NssaError::InvalidInput(
"Duplicate account_ids found in message".into(),
));
}
// Check there are no duplicate nullifiers in the new_nullifiers list
if n_unique(&message.new_nullifiers) != message.new_nullifiers.len() {
return Err(NssaError::InvalidInput(
"Duplicate nullifiers found in message".into(),
));
}
// Check there are no duplicate commitments in the new_commitments list
if n_unique(&message.new_commitments) != message.new_commitments.len() {
return Err(NssaError::InvalidInput(
"Duplicate commitments found in message".into(),
));
}
// 3. Nonce checks and Valid signatures
// Check exactly one nonce is provided for each signature
if message.nonces.len() != witness_set.signatures_and_public_keys.len() {
return Err(NssaError::InvalidInput(
"Mismatch between number of nonces and signatures/public keys".into(),
));
}
// Check the signatures are valid
if !witness_set.signatures_are_valid_for(message) {
return Err(NssaError::InvalidInput(
"Invalid signature for given message and public key".into(),
));
}
let signer_account_ids = tx.signer_account_ids();
// Check nonces corresponds to the current nonces on the public state.
for (account_id, nonce) in signer_account_ids.iter().zip(&message.nonces) {
let current_nonce = state.get_account_by_id(*account_id).nonce;
if current_nonce != *nonce {
return Err(NssaError::InvalidInput("Nonce mismatch".into()));
}
}
// Verify validity window
if !message.block_validity_window.is_valid_for(block_id)
|| !message.timestamp_validity_window.is_valid_for(timestamp)
{
return Err(NssaError::OutOfValidityWindow);
}
// Build pre_states for proof verification
let public_pre_states: Vec<_> = message
.public_account_ids
.iter()
.map(|account_id| {
AccountWithMetadata::new(
state.get_account_by_id(*account_id),
signer_account_ids.contains(account_id),
*account_id,
)
})
.collect();
// 4. Proof verification
check_privacy_preserving_circuit_proof_is_valid(
&witness_set.proof,
&public_pre_states,
message,
)?;
// 5. Commitment freshness
state.check_commitments_are_new(&message.new_commitments)?;
// 6. Nullifier uniqueness
state.check_nullifiers_are_valid(&message.new_nullifiers)?;
let public_diff = message
.public_account_ids
.iter()
.copied()
.zip(message.public_post_states.clone())
.collect();
let new_nullifiers = message
.new_nullifiers
.iter()
.copied()
.map(|(nullifier, _)| nullifier)
.collect();
Ok(Self(StateDiff {
signer_account_ids,
public_diff,
new_commitments: message.new_commitments.clone(),
new_nullifiers,
program: None,
}))
}
pub fn from_program_deployment_transaction(
tx: &ProgramDeploymentTransaction,
state: &V03State,
) -> Result<Self, NssaError> {
// TODO: remove clone
let program = Program::new(tx.message.bytecode.clone())?;
if state.programs().contains_key(&program.id()) {
return Err(NssaError::ProgramAlreadyExists);
}
Ok(Self(StateDiff {
signer_account_ids: vec![],
public_diff: HashMap::new(),
new_commitments: vec![],
new_nullifiers: vec![],
program: Some(program),
}))
}
/// Returns the public account changes produced by this transaction.
///
/// Used by callers (e.g. the sequencer) to inspect the diff before committing it, for example
/// to enforce that system accounts are not modified by user transactions.
#[must_use]
pub fn public_diff(&self) -> HashMap<AccountId, Account> {
self.0.public_diff.clone()
}
pub(crate) fn into_state_diff(self) -> StateDiff {
self.0
}
}
fn check_privacy_preserving_circuit_proof_is_valid(
proof: &Proof,
public_pre_states: &[AccountWithMetadata],
message: &Message,
) -> Result<(), NssaError> {
let output = PrivacyPreservingCircuitOutput {
public_pre_states: public_pre_states.to_vec(),
public_post_states: message.public_post_states.clone(),
ciphertexts: message
.encrypted_private_post_states
.iter()
.cloned()
.map(|value| value.ciphertext)
.collect(),
new_commitments: message.new_commitments.clone(),
new_nullifiers: message.new_nullifiers.clone(),
block_validity_window: message.block_validity_window,
timestamp_validity_window: message.timestamp_validity_window,
};
proof
.is_valid_for(&output)
.then_some(())
.ok_or(NssaError::InvalidPrivacyPreservingProof)
}
fn n_unique<T: Eq + Hash>(data: &[T]) -> usize {
let set: HashSet<&T> = data.iter().collect();
set.len()
}

View File

@ -9,6 +9,7 @@ workspace = true
[dependencies]
nssa_core.workspace = true
clock_core.workspace = true
token_core.workspace = true
token_program.workspace = true
amm_core.workspace = true

View File

@ -15,6 +15,7 @@ fn main() {
let (
ProgramInput {
self_program_id,
caller_program_id,
pre_states,
instruction,
},
@ -155,6 +156,7 @@ fn main() {
ProgramOutput::new(
self_program_id,
caller_program_id,
instruction_words,
pre_states_clone,
post_states,

View File

@ -5,6 +5,7 @@ fn main() {
let (
ProgramInput {
self_program_id,
caller_program_id,
pre_states,
instruction,
},
@ -59,6 +60,7 @@ fn main() {
ProgramOutput::new(
self_program_id,
caller_program_id,
instruction_words,
pre_states_clone,
post_states,

View File

@ -68,6 +68,7 @@ fn main() {
let (
ProgramInput {
self_program_id,
caller_program_id,
pre_states,
instruction: balance_to_move,
},
@ -85,5 +86,12 @@ fn main() {
_ => panic!("invalid params"),
};
ProgramOutput::new(self_program_id, instruction_words, pre_states, post_states).write();
ProgramOutput::new(
self_program_id,
caller_program_id,
instruction_words,
pre_states,
post_states,
)
.write();
}

View File

@ -0,0 +1,94 @@
//! Clock Program.
//!
//! A system program that records the current block ID and timestamp into dedicated clock accounts.
//! Three accounts are maintained, updated at different block intervals (every 1, 10, and 50
//! blocks), allowing programs to read recent timestamps at various granularities.
//!
//! This program can only be invoked exclusively by the sequencer as the last transaction in every
//! block. Clock accounts are assigned to the clock program at genesis, so no claiming is required
//! here.
use clock_core::{
CLOCK_01_PROGRAM_ACCOUNT_ID, CLOCK_10_PROGRAM_ACCOUNT_ID, CLOCK_50_PROGRAM_ACCOUNT_ID,
ClockAccountData, Instruction,
};
use nssa_core::{
account::AccountWithMetadata,
program::{AccountPostState, ProgramInput, ProgramOutput, read_nssa_inputs},
};
fn update_if_multiple(
pre: AccountWithMetadata,
divisor: u64,
current_block_id: u64,
updated_data: &[u8],
) -> (AccountWithMetadata, AccountPostState) {
if current_block_id.is_multiple_of(divisor) {
let mut post_account = pre.account.clone();
post_account.data = updated_data
.to_vec()
.try_into()
.expect("Clock account data should fit in account data");
(pre, AccountPostState::new(post_account))
} else {
let post = AccountPostState::new(pre.account.clone());
(pre, post)
}
}
fn main() {
let (
ProgramInput {
self_program_id,
caller_program_id,
pre_states,
instruction: timestamp,
},
instruction_words,
) = read_nssa_inputs::<Instruction>();
let Ok([pre_01, pre_10, pre_50]) = <[_; 3]>::try_from(pre_states) else {
panic!("Invalid number of input accounts");
};
// Verify pre-states correspond to the expected clock account IDs.
if pre_01.account_id != CLOCK_01_PROGRAM_ACCOUNT_ID
|| pre_10.account_id != CLOCK_10_PROGRAM_ACCOUNT_ID
|| pre_50.account_id != CLOCK_50_PROGRAM_ACCOUNT_ID
{
panic!("Invalid input accounts");
}
// Verify all clock accounts are owned by this program (assigned at genesis).
if pre_01.account.program_owner != self_program_id
|| pre_10.account.program_owner != self_program_id
|| pre_50.account.program_owner != self_program_id
{
panic!("Clock accounts must be owned by the clock program");
}
let prev_data = ClockAccountData::from_bytes(&pre_01.account.data.clone().into_inner());
let current_block_id = prev_data
.block_id
.checked_add(1)
.expect("Next block id should be within u64 boundaries");
let updated_data = ClockAccountData {
block_id: current_block_id,
timestamp,
}
.to_bytes();
let (pre_01, post_01) = update_if_multiple(pre_01, 1, current_block_id, &updated_data);
let (pre_10, post_10) = update_if_multiple(pre_10, 10, current_block_id, &updated_data);
let (pre_50, post_50) = update_if_multiple(pre_50, 50, current_block_id, &updated_data);
ProgramOutput::new(
self_program_id,
caller_program_id,
instruction_words,
vec![pre_01, pre_10, pre_50],
vec![post_01, post_10, post_50],
)
.write();
}

View File

@ -47,6 +47,7 @@ fn main() {
let (
ProgramInput {
self_program_id,
caller_program_id,
pre_states,
instruction: solution,
},
@ -81,6 +82,7 @@ fn main() {
ProgramOutput::new(
self_program_id,
caller_program_id,
instruction_words,
vec![pinata, winner],
vec![

View File

@ -53,6 +53,7 @@ fn main() {
let (
ProgramInput {
self_program_id,
caller_program_id,
pre_states,
instruction: solution,
},
@ -99,6 +100,7 @@ fn main() {
ProgramOutput::new(
self_program_id,
caller_program_id,
instruction_words,
vec![
pinata_definition,

View File

@ -114,6 +114,15 @@ impl ExecutionState {
"Program output self_program_id does not match chained call program_id"
);
// Verify that the program output's caller_program_id matches the actual caller.
// This prevents a malicious user from privately executing an internal function
// by spoofing caller_program_id (e.g. passing caller_program_id = self_program_id
// to bypass access control checks).
assert_eq!(
program_output.caller_program_id, caller_program_id,
"Program output caller_program_id does not match actual caller"
);
// Check that the program is well behaved.
// See the # Programs section for the definition of the `validate_execution` method.
let execution_valid = validate_execution(

View File

@ -13,6 +13,7 @@ fn main() {
let (
ProgramInput {
self_program_id,
caller_program_id,
pre_states,
instruction,
},
@ -84,6 +85,7 @@ fn main() {
ProgramOutput::new(
self_program_id,
caller_program_id,
instruction_words,
pre_states_clone,
post_states,

View File

@ -3036,7 +3036,7 @@ fn new_definition_lp_symmetric_amounts() {
fn state_for_amm_tests() -> V03State {
let initial_data = [];
let mut state = V03State::new_with_genesis_accounts(&initial_data, &[]);
let mut state = V03State::new_with_genesis_accounts(&initial_data, &[], 0);
state.force_insert_account(
IdForExeTests::pool_definition_id(),
AccountsForExeTests::pool_definition_init(),
@ -3079,7 +3079,7 @@ fn state_for_amm_tests() -> V03State {
fn state_for_amm_tests_with_new_def() -> V03State {
let initial_data = [];
let mut state = V03State::new_with_genesis_accounts(&initial_data, &[]);
let mut state = V03State::new_with_genesis_accounts(&initial_data, &[], 0);
state.force_insert_account(
IdForExeTests::token_a_definition_id(),
AccountsForExeTests::token_a_definition_account(),

View File

@ -0,0 +1,12 @@
[package]
name = "clock_core"
version = "0.1.0"
edition = "2024"
license = { workspace = true }
[lints]
workspace = true
[dependencies]
nssa_core.workspace = true
borsh.workspace = true

View File

@ -0,0 +1,42 @@
//! Core data structures and constants for the Clock Program.
use borsh::{BorshDeserialize, BorshSerialize};
use nssa_core::{Timestamp, account::AccountId};
pub const CLOCK_01_PROGRAM_ACCOUNT_ID: AccountId =
AccountId::new(*b"/LEZ/ClockProgramAccount/0000001");
pub const CLOCK_10_PROGRAM_ACCOUNT_ID: AccountId =
AccountId::new(*b"/LEZ/ClockProgramAccount/0000010");
pub const CLOCK_50_PROGRAM_ACCOUNT_ID: AccountId =
AccountId::new(*b"/LEZ/ClockProgramAccount/0000050");
/// All clock program account ID in the order expected by the clock program.
pub const CLOCK_PROGRAM_ACCOUNT_IDS: [AccountId; 3] = [
CLOCK_01_PROGRAM_ACCOUNT_ID,
CLOCK_10_PROGRAM_ACCOUNT_ID,
CLOCK_50_PROGRAM_ACCOUNT_ID,
];
/// The instruction type for the Clock Program. The sequencer passes the current block timestamp.
pub type Instruction = Timestamp;
/// The data stored in a clock account.
#[derive(Debug, Clone, Copy, PartialEq, Eq, BorshSerialize, BorshDeserialize)]
pub struct ClockAccountData {
pub block_id: u64,
pub timestamp: Timestamp,
}
impl ClockAccountData {
#[must_use]
pub fn to_bytes(self) -> Vec<u8> {
borsh::to_vec(&self).expect("ClockAccountData serialization should not fail")
}
#[must_use]
pub fn from_bytes(bytes: &[u8]) -> Self {
borsh::from_slice(bytes).expect("ClockAccountData deserialization should not fail")
}
}

View File

@ -40,3 +40,5 @@ mock = []
[dev-dependencies]
futures.workspace = true
test_program_methods.workspace = true
nssa = { workspace = true, features = ["test-utils"] }

View File

@ -150,7 +150,7 @@ mod tests {
let retrieved_tx = node_store.get_transaction_by_hash(tx.hash());
assert_eq!(None, retrieved_tx);
// Add the block with the transaction
let dummy_state = V03State::new_with_genesis_accounts(&[], &[]);
let dummy_state = V03State::new_with_genesis_accounts(&[], &[], 0);
node_store.update(&block, [1; 32], &dummy_state).unwrap();
// Try again
let retrieved_tx = node_store.get_transaction_by_hash(tx.hash());
@ -209,7 +209,7 @@ mod tests {
let block_hash = block.header.hash;
let block_msg_id = [1; 32];
let dummy_state = V03State::new_with_genesis_accounts(&[], &[]);
let dummy_state = V03State::new_with_genesis_accounts(&[], &[], 0);
node_store
.update(&block, block_msg_id, &dummy_state)
.unwrap();
@ -244,7 +244,7 @@ mod tests {
let block = common::test_utils::produce_dummy_block(1, None, vec![tx]);
let block_id = block.header.block_id;
let dummy_state = V03State::new_with_genesis_accounts(&[], &[]);
let dummy_state = V03State::new_with_genesis_accounts(&[], &[], 0);
node_store.update(&block, [1; 32], &dummy_state).unwrap();
// Verify initial status is Pending

View File

@ -24,9 +24,10 @@ pub struct SequencerConfig {
pub genesis_id: u64,
/// If `True`, then adds random sequence of bytes to genesis block.
pub is_genesis_random: bool,
/// Maximum number of transactions in block.
/// Maximum number of user transactions in a block (excludes the mandatory clock transaction).
pub max_num_tx_in_block: usize,
/// Maximum block size (includes header and transactions).
/// Maximum block size (includes header, user transactions, and the mandatory clock
/// transaction).
#[serde(default = "default_max_block_size")]
pub max_block_size: ByteSize,
/// Mempool maximum size.

View File

@ -7,7 +7,7 @@ use common::PINATA_BASE58;
use common::{
HashType,
block::{BedrockStatus, Block, HashableBlockData},
transaction::NSSATransaction,
transaction::{NSSATransaction, clock_invocation},
};
use config::SequencerConfig;
use log::{error, info, warn};
@ -16,7 +16,6 @@ use mempool::{MemPool, MemPoolHandle};
#[cfg(feature = "mock")]
pub use mock::SequencerCoreWithMockClients;
use nssa::V03State;
use nssa_core::{BlockId, Timestamp};
pub use storage::error::DbError;
use testnet_initial_state::initial_state;
@ -139,6 +138,7 @@ impl<BC: BlockSettlementClientTrait, IC: IndexerClientTrait> SequencerCore<BC, I
V03State::new_with_genesis_accounts(
&init_accs.unwrap_or_default(),
&initial_commitments.unwrap_or_default(),
genesis_block.header.timestamp,
)
} else {
initial_state()
@ -163,28 +163,6 @@ impl<BC: BlockSettlementClientTrait, IC: IndexerClientTrait> SequencerCore<BC, I
(sequencer_core, mempool_handle)
}
fn execute_check_transaction_on_state(
&mut self,
tx: NSSATransaction,
block_id: BlockId,
timestamp: Timestamp,
) -> Result<NSSATransaction, nssa::error::NssaError> {
match &tx {
NSSATransaction::Public(tx) => self
.state
.transition_from_public_transaction(tx, block_id, timestamp),
NSSATransaction::PrivacyPreserving(tx) => self
.state
.transition_from_privacy_preserving_transaction(tx, block_id, timestamp),
NSSATransaction::ProgramDeployment(tx) => self
.state
.transition_from_program_deployment_transaction(tx),
}
.inspect_err(|err| warn!("Error at transition {err:#?}"))?;
Ok(tx)
}
pub async fn produce_new_block(&mut self) -> Result<u64> {
let (tx, _msg_id) = self
.produce_new_block_with_mempool_transactions()
@ -224,12 +202,20 @@ impl<BC: BlockSettlementClientTrait, IC: IndexerClientTrait> SequencerCore<BC, I
let new_block_timestamp = u64::try_from(chrono::Utc::now().timestamp_millis())
.expect("Timestamp must be positive");
// Pre-create the mandatory clock tx so its size is included in the block size check.
let clock_tx = clock_invocation(new_block_timestamp);
let clock_nssa_tx = NSSATransaction::Public(clock_tx.clone());
while let Some(tx) = self.mempool.pop() {
let tx_hash = tx.hash();
// Check if block size exceeds limit
let temp_valid_transactions =
[valid_transactions.as_slice(), std::slice::from_ref(&tx)].concat();
// Check if block size exceeds limit (including the mandatory clock tx).
let temp_valid_transactions = [
valid_transactions.as_slice(),
std::slice::from_ref(&tx),
std::slice::from_ref(&clock_nssa_tx),
]
.concat();
let temp_hashable_data = HashableBlockData {
block_id: new_block_height,
transactions: temp_valid_transactions,
@ -252,26 +238,35 @@ impl<BC: BlockSettlementClientTrait, IC: IndexerClientTrait> SequencerCore<BC, I
break;
}
match self.execute_check_transaction_on_state(tx, new_block_height, new_block_timestamp)
{
Ok(valid_tx) => {
valid_transactions.push(valid_tx);
info!("Validated transaction with hash {tx_hash}, including it in block");
if valid_transactions.len() >= self.sequencer_config.max_num_tx_in_block {
break;
}
}
let validated_diff = match tx.validate_on_state(
&self.state,
new_block_height,
new_block_timestamp,
) {
Ok(diff) => diff,
Err(err) => {
error!(
"Transaction with hash {tx_hash} failed execution check with error: {err:#?}, skipping it",
);
// TODO: Probably need to handle unsuccessful transaction execution?
continue;
}
};
self.state.apply_state_diff(validated_diff);
valid_transactions.push(tx);
info!("Validated transaction with hash {tx_hash}, including it in block");
if valid_transactions.len() >= self.sequencer_config.max_num_tx_in_block {
break;
}
}
// Append the Clock Program invocation as the mandatory last transaction.
self.state
.transition_from_public_transaction(&clock_tx, new_block_height, new_block_timestamp)
.context("Clock transaction failed. Aborting block production.")?;
valid_transactions.push(clock_nssa_tx);
let hashable_data = HashableBlockData {
block_id: new_block_height,
transactions: valid_transactions,
@ -395,7 +390,10 @@ mod tests {
use std::{pin::pin, time::Duration};
use bedrock_client::BackoffConfig;
use common::{test_utils::sequencer_sign_key_for_testing, transaction::NSSATransaction};
use common::{
test_utils::sequencer_sign_key_for_testing,
transaction::{NSSATransaction, clock_invocation},
};
use logos_blockchain_core::mantle::ops::channel::ChannelId;
use mempool::MemPoolHandle;
use testnet_initial_state::{initial_accounts, initial_pub_accounts_private_keys};
@ -524,7 +522,7 @@ mod tests {
let tx = tx.transaction_stateless_check().unwrap();
// Signature is not from sender. Execution fails
let result = sequencer.execute_check_transaction_on_state(tx, 0, 0);
let result = tx.execute_check_on_state(&mut sequencer.state, 0, 0);
assert!(matches!(
result,
@ -550,7 +548,9 @@ mod tests {
// Passed pre-check
assert!(result.is_ok());
let result = sequencer.execute_check_transaction_on_state(result.unwrap(), 0, 0);
let result = result
.unwrap()
.execute_check_on_state(&mut sequencer.state, 0, 0);
let is_failed_at_balance_mismatch = matches!(
result.err().unwrap(),
nssa::error::NssaError::ProgramExecutionFailed(_)
@ -572,8 +572,7 @@ mod tests {
acc1, 0, acc2, 100, &sign_key1,
);
sequencer
.execute_check_transaction_on_state(tx, 0, 0)
tx.execute_check_on_state(&mut sequencer.state, 0, 0)
.unwrap();
let bal_from = sequencer.state.get_account_by_id(acc1).balance;
@ -652,8 +651,14 @@ mod tests {
.unwrap()
.unwrap();
// Only one should be included in the block
assert_eq!(block.body.transactions, vec![tx.clone()]);
// Only one user tx should be included; the clock tx is always appended last.
assert_eq!(
block.body.transactions,
vec![
tx.clone(),
NSSATransaction::Public(clock_invocation(block.header.timestamp))
]
);
}
#[tokio::test]
@ -679,7 +684,13 @@ mod tests {
.get_block_at_id(sequencer.chain_height)
.unwrap()
.unwrap();
assert_eq!(block.body.transactions, vec![tx.clone()]);
assert_eq!(
block.body.transactions,
vec![
tx.clone(),
NSSATransaction::Public(clock_invocation(block.header.timestamp))
]
);
// Add same transaction should fail
mempool_handle.push(tx.clone()).await.unwrap();
@ -691,7 +702,13 @@ mod tests {
.get_block_at_id(sequencer.chain_height)
.unwrap()
.unwrap();
assert!(block.body.transactions.is_empty());
// The replay is rejected, so only the clock tx is in the block.
assert_eq!(
block.body.transactions,
vec![NSSATransaction::Public(clock_invocation(
block.header.timestamp
))]
);
}
#[tokio::test]
@ -726,7 +743,13 @@ mod tests {
.get_block_at_id(sequencer.chain_height)
.unwrap()
.unwrap();
assert_eq!(block.body.transactions, vec![tx.clone()]);
assert_eq!(
block.body.transactions,
vec![
tx.clone(),
NSSATransaction::Public(clock_invocation(block.header.timestamp))
]
);
}
// Instantiating a new sequencer from the same config. This should load the existing block
@ -856,8 +879,54 @@ mod tests {
);
assert_eq!(
new_block.body.transactions,
vec![tx],
"New block should contain the submitted transaction"
vec![
tx,
NSSATransaction::Public(clock_invocation(new_block.header.timestamp))
],
"New block should contain the submitted transaction and the clock invocation"
);
}
#[tokio::test]
async fn transactions_touching_clock_account_are_dropped_from_block() {
let (mut sequencer, mempool_handle) = common_setup().await;
// Canonical clock invocation and a crafted variant with a different timestamp — both must
// be dropped because their diffs touch the clock accounts.
let crafted_clock_tx = {
let message = nssa::public_transaction::Message::try_new(
nssa::program::Program::clock().id(),
nssa::CLOCK_PROGRAM_ACCOUNT_IDS.to_vec(),
vec![],
42_u64,
)
.unwrap();
NSSATransaction::Public(nssa::PublicTransaction::new(
message,
nssa::public_transaction::WitnessSet::from_raw_parts(vec![]),
))
};
mempool_handle
.push(NSSATransaction::Public(clock_invocation(0)))
.await
.unwrap();
mempool_handle.push(crafted_clock_tx).await.unwrap();
sequencer
.produce_new_block_with_mempool_transactions()
.unwrap();
let block = sequencer
.store
.get_block_at_id(sequencer.chain_height)
.unwrap()
.unwrap();
// Both transactions were dropped. Only the system-appended clock tx remains.
assert_eq!(
block.body.transactions,
vec![NSSATransaction::Public(clock_invocation(
block.header.timestamp
))]
);
}
@ -909,4 +978,86 @@ mod tests {
"Chain height should NOT match the modified config.genesis_id"
);
}
#[tokio::test]
async fn user_tx_that_chain_calls_clock_is_dropped() {
let (mut sequencer, mempool_handle) = common_setup().await;
// Deploy the clock_chain_caller test program.
let deploy_tx =
NSSATransaction::ProgramDeployment(nssa::ProgramDeploymentTransaction::new(
nssa::program_deployment_transaction::Message::new(
test_program_methods::CLOCK_CHAIN_CALLER_ELF.to_vec(),
),
));
mempool_handle.push(deploy_tx).await.unwrap();
sequencer
.produce_new_block_with_mempool_transactions()
.unwrap();
// Build a user transaction that invokes clock_chain_caller, which in turn chain-calls the
// clock program with the clock accounts. The sequencer should detect that the resulting
// state diff modifies clock accounts and drop the transaction.
let clock_chain_caller_id =
nssa::program::Program::new(test_program_methods::CLOCK_CHAIN_CALLER_ELF.to_vec())
.unwrap()
.id();
let clock_program_id = nssa::program::Program::clock().id();
let timestamp: u64 = 0;
let message = nssa::public_transaction::Message::try_new(
clock_chain_caller_id,
nssa::CLOCK_PROGRAM_ACCOUNT_IDS.to_vec(),
vec![], // no signers
(clock_program_id, timestamp),
)
.unwrap();
let user_tx = NSSATransaction::Public(nssa::PublicTransaction::new(
message,
nssa::public_transaction::WitnessSet::from_raw_parts(vec![]),
));
mempool_handle.push(user_tx).await.unwrap();
sequencer
.produce_new_block_with_mempool_transactions()
.unwrap();
let block = sequencer
.store
.get_block_at_id(sequencer.chain_height)
.unwrap()
.unwrap();
// The user tx must have been dropped; only the mandatory clock invocation remains.
assert_eq!(
block.body.transactions,
vec![NSSATransaction::Public(clock_invocation(
block.header.timestamp
))]
);
}
#[tokio::test]
async fn block_production_aborts_when_clock_account_data_is_corrupted() {
let (mut sequencer, mempool_handle) = common_setup().await;
// Corrupt the clock 01 account data so the clock program panics on deserialization.
let clock_account_id = nssa::CLOCK_01_PROGRAM_ACCOUNT_ID;
let mut corrupted = sequencer.state.get_account_by_id(clock_account_id);
corrupted.data = vec![0xff; 3].try_into().unwrap();
sequencer
.state
.force_insert_account(clock_account_id, corrupted);
// Push a dummy transaction so the mempool is non-empty.
let tx = common::test_utils::produce_dummy_empty_transaction();
mempool_handle.push(tx).await.unwrap();
// Block production must fail because the appended clock tx cannot execute.
let result = sequencer.produce_new_block_with_mempool_transactions();
assert!(
result.is_err(),
"Block production should abort when clock account data is corrupted"
);
}
}

96
storage/src/cells/mod.rs Normal file
View File

@ -0,0 +1,96 @@
use std::sync::Arc;
use borsh::{BorshDeserialize, BorshSerialize};
use rocksdb::{BoundColumnFamily, DBWithThreadMode, MultiThreaded, WriteBatch};
use crate::{DbResult, error::DbError};
pub mod shared_cells;
pub trait SimpleStorableCell {
const CF_NAME: &'static str;
const CELL_NAME: &'static str;
type KeyParams;
fn key_constructor(_params: Self::KeyParams) -> DbResult<Vec<u8>> {
borsh::to_vec(&Self::CELL_NAME).map_err(|err| {
DbError::borsh_cast_message(
err,
Some(format!("Failed to serialize {:?}", Self::CELL_NAME)),
)
})
}
fn column_ref(db: &DBWithThreadMode<MultiThreaded>) -> Arc<BoundColumnFamily<'_>> {
db.cf_handle(Self::CF_NAME)
.unwrap_or_else(|| panic!("Column family {:?} must be present", Self::CF_NAME))
}
}
pub trait SimpleReadableCell: SimpleStorableCell + BorshDeserialize {
fn get(db: &DBWithThreadMode<MultiThreaded>, params: Self::KeyParams) -> DbResult<Self> {
let res = Self::get_opt(db, params)?;
res.ok_or_else(|| DbError::db_interaction_error(format!("{:?} not found", Self::CELL_NAME)))
}
fn get_opt(
db: &DBWithThreadMode<MultiThreaded>,
params: Self::KeyParams,
) -> DbResult<Option<Self>> {
let cf_ref = Self::column_ref(db);
let res = db
.get_cf(&cf_ref, Self::key_constructor(params)?)
.map_err(|rerr| {
DbError::rocksdb_cast_message(
rerr,
Some(format!("Failed to read {:?}", Self::CELL_NAME)),
)
})?;
res.map(|data| {
borsh::from_slice::<Self>(&data).map_err(|err| {
DbError::borsh_cast_message(
err,
Some(format!("Failed to deserialize {:?}", Self::CELL_NAME)),
)
})
})
.transpose()
}
}
pub trait SimpleWritableCell: SimpleStorableCell + BorshSerialize {
fn value_constructor(&self) -> DbResult<Vec<u8>>;
fn put(&self, db: &DBWithThreadMode<MultiThreaded>, params: Self::KeyParams) -> DbResult<()> {
let cf_ref = Self::column_ref(db);
db.put_cf(
&cf_ref,
Self::key_constructor(params)?,
self.value_constructor()?,
)
.map_err(|rerr| {
DbError::rocksdb_cast_message(
rerr,
Some(format!("Failed to write {:?}", Self::CELL_NAME)),
)
})?;
Ok(())
}
fn put_batch(
&self,
db: &DBWithThreadMode<MultiThreaded>,
params: Self::KeyParams,
write_batch: &mut WriteBatch,
) -> DbResult<()> {
let cf_ref = Self::column_ref(db);
write_batch.put_cf(
&cf_ref,
Self::key_constructor(params)?,
self.value_constructor()?,
);
Ok(())
}
}

View File

@ -0,0 +1,89 @@
use borsh::{BorshDeserialize, BorshSerialize};
use common::block::Block;
use crate::{
BLOCK_CELL_NAME, CF_BLOCK_NAME, CF_META_NAME, DB_META_FIRST_BLOCK_IN_DB_KEY,
DB_META_FIRST_BLOCK_SET_KEY, DB_META_LAST_BLOCK_IN_DB_KEY, DbResult,
cells::{SimpleReadableCell, SimpleStorableCell, SimpleWritableCell},
error::DbError,
};
#[derive(Debug, BorshSerialize, BorshDeserialize)]
pub struct LastBlockCell(pub u64);
impl SimpleStorableCell for LastBlockCell {
type KeyParams = ();
const CELL_NAME: &'static str = DB_META_LAST_BLOCK_IN_DB_KEY;
const CF_NAME: &'static str = CF_META_NAME;
}
impl SimpleReadableCell for LastBlockCell {}
impl SimpleWritableCell for LastBlockCell {
fn value_constructor(&self) -> DbResult<Vec<u8>> {
borsh::to_vec(&self).map_err(|err| {
DbError::borsh_cast_message(err, Some("Failed to serialize last block id".to_owned()))
})
}
}
#[derive(Debug, BorshSerialize, BorshDeserialize)]
pub struct FirstBlockSetCell(pub bool);
impl SimpleStorableCell for FirstBlockSetCell {
type KeyParams = ();
const CELL_NAME: &'static str = DB_META_FIRST_BLOCK_SET_KEY;
const CF_NAME: &'static str = CF_META_NAME;
}
impl SimpleReadableCell for FirstBlockSetCell {}
impl SimpleWritableCell for FirstBlockSetCell {
fn value_constructor(&self) -> DbResult<Vec<u8>> {
borsh::to_vec(&self).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize first block set flag".to_owned()),
)
})
}
}
#[derive(Debug, BorshSerialize, BorshDeserialize)]
pub struct FirstBlockCell(pub u64);
impl SimpleStorableCell for FirstBlockCell {
type KeyParams = ();
const CELL_NAME: &'static str = DB_META_FIRST_BLOCK_IN_DB_KEY;
const CF_NAME: &'static str = CF_META_NAME;
}
impl SimpleReadableCell for FirstBlockCell {}
#[derive(Debug, BorshSerialize, BorshDeserialize)]
pub struct BlockCell(pub Block);
impl SimpleStorableCell for BlockCell {
type KeyParams = u64;
const CELL_NAME: &'static str = BLOCK_CELL_NAME;
const CF_NAME: &'static str = CF_BLOCK_NAME;
fn key_constructor(params: Self::KeyParams) -> DbResult<Vec<u8>> {
// ToDo: Replace with increasing ordering serialization
borsh::to_vec(&params).map_err(|err| {
DbError::borsh_cast_message(
err,
Some(format!(
"Failed to serialize {:?} key params",
Self::CELL_NAME
)),
)
})
}
}
impl SimpleReadableCell for BlockCell {}

View File

@ -0,0 +1,230 @@
use borsh::{BorshDeserialize, BorshSerialize};
use nssa::V03State;
use crate::{
CF_META_NAME, DbResult,
cells::{SimpleReadableCell, SimpleStorableCell, SimpleWritableCell},
error::DbError,
indexer::{
ACC_NUM_CELL_NAME, BLOCK_HASH_CELL_NAME, BREAKPOINT_CELL_NAME, CF_ACC_META,
CF_BREAKPOINT_NAME, CF_HASH_TO_ID, CF_TX_TO_ID, DB_META_LAST_BREAKPOINT_ID,
DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY, TX_HASH_CELL_NAME,
},
};
#[derive(Debug, BorshSerialize, BorshDeserialize)]
pub struct LastObservedL1LibHeaderCell(pub [u8; 32]);
impl SimpleStorableCell for LastObservedL1LibHeaderCell {
type KeyParams = ();
const CELL_NAME: &'static str = DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY;
const CF_NAME: &'static str = CF_META_NAME;
}
impl SimpleReadableCell for LastObservedL1LibHeaderCell {}
impl SimpleWritableCell for LastObservedL1LibHeaderCell {
fn value_constructor(&self) -> DbResult<Vec<u8>> {
borsh::to_vec(&self).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize last observed l1 header".to_owned()),
)
})
}
}
#[derive(Debug, BorshSerialize, BorshDeserialize)]
pub struct LastBreakpointIdCell(pub u64);
impl SimpleStorableCell for LastBreakpointIdCell {
type KeyParams = ();
const CELL_NAME: &'static str = DB_META_LAST_BREAKPOINT_ID;
const CF_NAME: &'static str = CF_META_NAME;
}
impl SimpleReadableCell for LastBreakpointIdCell {}
impl SimpleWritableCell for LastBreakpointIdCell {
fn value_constructor(&self) -> DbResult<Vec<u8>> {
borsh::to_vec(&self).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize last breakpoint id".to_owned()),
)
})
}
}
#[derive(BorshDeserialize)]
pub struct BreakpointCellOwned(pub V03State);
impl SimpleStorableCell for BreakpointCellOwned {
type KeyParams = u64;
const CELL_NAME: &'static str = BREAKPOINT_CELL_NAME;
const CF_NAME: &'static str = CF_BREAKPOINT_NAME;
fn key_constructor(params: Self::KeyParams) -> DbResult<Vec<u8>> {
borsh::to_vec(&params).map_err(|err| {
DbError::borsh_cast_message(
err,
Some(format!(
"Failed to serialize {:?} key params",
Self::CELL_NAME
)),
)
})
}
}
impl SimpleReadableCell for BreakpointCellOwned {}
#[derive(BorshSerialize)]
pub struct BreakpointCellRef<'state>(pub &'state V03State);
impl SimpleStorableCell for BreakpointCellRef<'_> {
type KeyParams = u64;
const CELL_NAME: &'static str = BREAKPOINT_CELL_NAME;
const CF_NAME: &'static str = CF_BREAKPOINT_NAME;
fn key_constructor(params: Self::KeyParams) -> DbResult<Vec<u8>> {
borsh::to_vec(&params).map_err(|err| {
DbError::borsh_cast_message(
err,
Some(format!(
"Failed to serialize {:?} key params",
Self::CELL_NAME
)),
)
})
}
}
impl SimpleWritableCell for BreakpointCellRef<'_> {
fn value_constructor(&self) -> DbResult<Vec<u8>> {
borsh::to_vec(&self).map_err(|err| {
DbError::borsh_cast_message(err, Some("Failed to serialize breakpoint".to_owned()))
})
}
}
#[derive(Debug, BorshSerialize, BorshDeserialize)]
pub struct BlockHashToBlockIdMapCell(pub u64);
impl SimpleStorableCell for BlockHashToBlockIdMapCell {
type KeyParams = [u8; 32];
const CELL_NAME: &'static str = BLOCK_HASH_CELL_NAME;
const CF_NAME: &'static str = CF_HASH_TO_ID;
fn key_constructor(params: Self::KeyParams) -> DbResult<Vec<u8>> {
borsh::to_vec(&params).map_err(|err| {
DbError::borsh_cast_message(
err,
Some(format!(
"Failed to serialize {:?} key params",
Self::CELL_NAME
)),
)
})
}
}
impl SimpleReadableCell for BlockHashToBlockIdMapCell {}
impl SimpleWritableCell for BlockHashToBlockIdMapCell {
fn value_constructor(&self) -> DbResult<Vec<u8>> {
borsh::to_vec(&self).map_err(|err| {
DbError::borsh_cast_message(err, Some("Failed to serialize block id".to_owned()))
})
}
}
#[derive(Debug, BorshSerialize, BorshDeserialize)]
pub struct TxHashToBlockIdMapCell(pub u64);
impl SimpleStorableCell for TxHashToBlockIdMapCell {
type KeyParams = [u8; 32];
const CELL_NAME: &'static str = TX_HASH_CELL_NAME;
const CF_NAME: &'static str = CF_TX_TO_ID;
fn key_constructor(params: Self::KeyParams) -> DbResult<Vec<u8>> {
borsh::to_vec(&params).map_err(|err| {
DbError::borsh_cast_message(
err,
Some(format!(
"Failed to serialize {:?} key params",
Self::CELL_NAME
)),
)
})
}
}
impl SimpleReadableCell for TxHashToBlockIdMapCell {}
impl SimpleWritableCell for TxHashToBlockIdMapCell {
fn value_constructor(&self) -> DbResult<Vec<u8>> {
borsh::to_vec(&self).map_err(|err| {
DbError::borsh_cast_message(err, Some("Failed to serialize block id".to_owned()))
})
}
}
#[derive(Debug, BorshSerialize, BorshDeserialize)]
pub struct AccNumTxCell(pub u64);
impl SimpleStorableCell for AccNumTxCell {
type KeyParams = [u8; 32];
const CELL_NAME: &'static str = ACC_NUM_CELL_NAME;
const CF_NAME: &'static str = CF_ACC_META;
fn key_constructor(params: Self::KeyParams) -> DbResult<Vec<u8>> {
borsh::to_vec(&params).map_err(|err| {
DbError::borsh_cast_message(
err,
Some(format!(
"Failed to serialize {:?} key params",
Self::CELL_NAME
)),
)
})
}
}
impl SimpleReadableCell for AccNumTxCell {}
impl SimpleWritableCell for AccNumTxCell {
fn value_constructor(&self) -> DbResult<Vec<u8>> {
borsh::to_vec(&self).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize number of transactions".to_owned()),
)
})
}
}
#[cfg(test)]
mod uniform_tests {
use crate::{
cells::SimpleStorableCell as _,
indexer::indexer_cells::{BreakpointCellOwned, BreakpointCellRef},
};
#[test]
fn breakpoint_ref_and_owned_is_aligned() {
assert_eq!(BreakpointCellRef::CELL_NAME, BreakpointCellOwned::CELL_NAME);
assert_eq!(BreakpointCellRef::CF_NAME, BreakpointCellOwned::CF_NAME);
assert_eq!(
BreakpointCellRef::key_constructor(1000).unwrap(),
BreakpointCellOwned::key_constructor(1000).unwrap()
);
}
}

View File

@ -6,44 +6,29 @@ use rocksdb::{
BoundColumnFamily, ColumnFamilyDescriptor, DBWithThreadMode, MultiThreaded, Options,
};
use crate::error::DbError;
use crate::{BREAKPOINT_INTERVAL, CF_BLOCK_NAME, CF_META_NAME, DBIO, DbResult, error::DbError};
pub mod indexer_cells;
pub mod read_multiple;
pub mod read_once;
pub mod write_atomic;
pub mod write_non_atomic;
/// Maximal size of stored blocks in base.
///
/// Used to control db size.
///
/// Currently effectively unbounded.
pub const BUFF_SIZE_ROCKSDB: usize = usize::MAX;
/// Size of stored blocks cache in memory.
///
/// Keeping small to not run out of memory.
pub const CACHE_SIZE: usize = 1000;
/// Key base for storing metainformation about id of first block in db.
pub const DB_META_FIRST_BLOCK_IN_DB_KEY: &str = "first_block_in_db";
/// Key base for storing metainformation about id of last current block in db.
pub const DB_META_LAST_BLOCK_IN_DB_KEY: &str = "last_block_in_db";
/// Key base for storing metainformation about id of last observed L1 lib header in db.
pub const DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY: &str =
"last_observed_l1_lib_header_in_db";
/// Key base for storing metainformation which describe if first block has been set.
pub const DB_META_FIRST_BLOCK_SET_KEY: &str = "first_block_set";
/// Key base for storing metainformation about the last breakpoint.
pub const DB_META_LAST_BREAKPOINT_ID: &str = "last_breakpoint_id";
/// Interval between state breakpoints.
pub const BREAKPOINT_INTERVAL: u8 = 100;
/// Cell name for a breakpoint.
pub const BREAKPOINT_CELL_NAME: &str = "breakpoint";
/// Cell name for a block hash to block id map.
pub const BLOCK_HASH_CELL_NAME: &str = "block hash";
/// Cell name for a tx hash to block id map.
pub const TX_HASH_CELL_NAME: &str = "tx hash";
/// Cell name for a account number of transactions.
pub const ACC_NUM_CELL_NAME: &str = "acc id";
/// Name of block column family.
pub const CF_BLOCK_NAME: &str = "cf_block";
/// Name of meta column family.
pub const CF_META_NAME: &str = "cf_meta";
/// Name of breakpoint column family.
pub const CF_BREAKPOINT_NAME: &str = "cf_breakpoint";
/// Name of hash to id map column family.
@ -55,12 +40,16 @@ pub const CF_ACC_META: &str = "cf_acc_meta";
/// Name of account id to tx hash map column family.
pub const CF_ACC_TO_TX: &str = "cf_acc_to_tx";
pub type DbResult<T> = Result<T, DbError>;
pub struct RocksDBIO {
pub db: DBWithThreadMode<MultiThreaded>,
}
impl DBIO for RocksDBIO {
fn db(&self) -> &DBWithThreadMode<MultiThreaded> {
&self.db
}
}
impl RocksDBIO {
pub fn open_or_create(
path: &Path,
@ -257,7 +246,7 @@ mod tests {
let dbio = RocksDBIO::open_or_create(
temdir_path,
&genesis_block(),
&nssa::V03State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[]),
&nssa::V03State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[], 0),
)
.unwrap();
@ -294,7 +283,7 @@ mod tests {
let dbio = RocksDBIO::open_or_create(
temdir_path,
&genesis_block(),
&nssa::V03State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[]),
&nssa::V03State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[], 0),
)
.unwrap();
@ -347,7 +336,7 @@ mod tests {
let dbio = RocksDBIO::open_or_create(
temdir_path,
&genesis_block(),
&nssa::V03State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[]),
&nssa::V03State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[], 0),
)
.unwrap();
@ -420,7 +409,7 @@ mod tests {
let dbio = RocksDBIO::open_or_create(
temdir_path,
&genesis_block(),
&nssa::V03State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[]),
&nssa::V03State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[], 0),
)
.unwrap();
@ -503,7 +492,7 @@ mod tests {
let dbio = RocksDBIO::open_or_create(
temdir_path,
&genesis_block(),
&nssa::V03State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[]),
&nssa::V03State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[], 0),
)
.unwrap();
@ -599,7 +588,7 @@ mod tests {
let dbio = RocksDBIO::open_or_create(
temdir_path,
&genesis_block(),
&nssa::V03State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[]),
&nssa::V03State::new_with_genesis_accounts(&[(acc1(), 10000), (acc2(), 20000)], &[], 0),
)
.unwrap();

View File

@ -1,7 +1,11 @@
use super::{
Block, DB_META_FIRST_BLOCK_IN_DB_KEY, DB_META_FIRST_BLOCK_SET_KEY,
DB_META_LAST_BLOCK_IN_DB_KEY, DB_META_LAST_BREAKPOINT_ID,
DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY, DbError, DbResult, RocksDBIO, V03State,
use super::{Block, DbResult, RocksDBIO, V03State};
use crate::{
DBIO as _,
cells::shared_cells::{BlockCell, FirstBlockCell, FirstBlockSetCell, LastBlockCell},
indexer::indexer_cells::{
AccNumTxCell, BlockHashToBlockIdMapCell, BreakpointCellOwned, LastBreakpointIdCell,
LastObservedL1LibHeaderCell, TxHashToBlockIdMapCell,
},
};
#[expect(clippy::multiple_inherent_impl, reason = "Readability")]
@ -9,264 +13,55 @@ impl RocksDBIO {
// Meta
pub fn get_meta_first_block_in_db(&self) -> DbResult<u64> {
let cf_meta = self.meta_column();
let res = self
.db
.get_cf(
&cf_meta,
borsh::to_vec(&DB_META_FIRST_BLOCK_IN_DB_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_FIRST_BLOCK_IN_DB_KEY".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(borsh::from_slice::<u64>(&data).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to deserialize first block".to_owned()),
)
})?)
} else {
Err(DbError::db_interaction_error(
"First block not found".to_owned(),
))
}
self.get::<FirstBlockCell>(()).map(|cell| cell.0)
}
pub fn get_meta_last_block_in_db(&self) -> DbResult<u64> {
let cf_meta = self.meta_column();
let res = self
.db
.get_cf(
&cf_meta,
borsh::to_vec(&DB_META_LAST_BLOCK_IN_DB_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_LAST_BLOCK_IN_DB_KEY".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(borsh::from_slice::<u64>(&data).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to deserialize last block".to_owned()),
)
})?)
} else {
Err(DbError::db_interaction_error(
"Last block not found".to_owned(),
))
}
self.get::<LastBlockCell>(()).map(|cell| cell.0)
}
pub fn get_meta_last_observed_l1_lib_header_in_db(&self) -> DbResult<Option<[u8; 32]>> {
let cf_meta = self.meta_column();
let res = self
.db
.get_cf(
&cf_meta,
borsh::to_vec(&DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY).map_err(
|err| {
DbError::borsh_cast_message(
err,
Some(
"Failed to serialize DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY"
.to_owned(),
),
)
},
)?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
res.map(|data| {
borsh::from_slice::<[u8; 32]>(&data).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to deserialize last l1 lib header".to_owned()),
)
})
})
.transpose()
self.get_opt::<LastObservedL1LibHeaderCell>(())
.map(|opt| opt.map(|val| val.0))
}
pub fn get_meta_is_first_block_set(&self) -> DbResult<bool> {
let cf_meta = self.meta_column();
let res = self
.db
.get_cf(
&cf_meta,
borsh::to_vec(&DB_META_FIRST_BLOCK_SET_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_FIRST_BLOCK_SET_KEY".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
Ok(res.is_some())
Ok(self.get_opt::<FirstBlockSetCell>(())?.is_some())
}
pub fn get_meta_last_breakpoint_id(&self) -> DbResult<u64> {
let cf_meta = self.meta_column();
let res = self
.db
.get_cf(
&cf_meta,
borsh::to_vec(&DB_META_LAST_BREAKPOINT_ID).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_LAST_BREAKPOINT_ID".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(borsh::from_slice::<u64>(&data).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to deserialize last breakpoint id".to_owned()),
)
})?)
} else {
Err(DbError::db_interaction_error(
"Last breakpoint id not found".to_owned(),
))
}
self.get::<LastBreakpointIdCell>(()).map(|cell| cell.0)
}
// Block
pub fn get_block(&self, block_id: u64) -> DbResult<Option<Block>> {
let cf_block = self.block_column();
let res = self
.db
.get_cf(
&cf_block,
borsh::to_vec(&block_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize block id".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(Some(borsh::from_slice::<Block>(&data).map_err(|serr| {
DbError::borsh_cast_message(
serr,
Some("Failed to deserialize block data".to_owned()),
)
})?))
} else {
Ok(None)
}
self.get_opt::<BlockCell>(block_id)
.map(|opt| opt.map(|val| val.0))
}
// State
pub fn get_breakpoint(&self, br_id: u64) -> DbResult<V03State> {
let cf_br = self.breakpoint_column();
let res = self
.db
.get_cf(
&cf_br,
borsh::to_vec(&br_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize breakpoint id".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(borsh::from_slice::<V03State>(&data).map_err(|serr| {
DbError::borsh_cast_message(
serr,
Some("Failed to deserialize breakpoint data".to_owned()),
)
})?)
} else {
Err(DbError::db_interaction_error(
"Breakpoint on this id not found".to_owned(),
))
}
self.get::<BreakpointCellOwned>(br_id).map(|cell| cell.0)
}
// Mappings
pub fn get_block_id_by_hash(&self, hash: [u8; 32]) -> DbResult<Option<u64>> {
let cf_hti = self.hash_to_id_column();
let res = self
.db
.get_cf(
&cf_hti,
borsh::to_vec(&hash).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize block hash".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(Some(borsh::from_slice::<u64>(&data).map_err(|serr| {
DbError::borsh_cast_message(serr, Some("Failed to deserialize block id".to_owned()))
})?))
} else {
Ok(None)
}
self.get_opt::<BlockHashToBlockIdMapCell>(hash)
.map(|opt| opt.map(|cell| cell.0))
}
pub fn get_block_id_by_tx_hash(&self, tx_hash: [u8; 32]) -> DbResult<Option<u64>> {
let cf_tti = self.tx_hash_to_id_column();
let res = self
.db
.get_cf(
&cf_tti,
borsh::to_vec(&tx_hash).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize transaction hash".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(Some(borsh::from_slice::<u64>(&data).map_err(|serr| {
DbError::borsh_cast_message(serr, Some("Failed to deserialize block id".to_owned()))
})?))
} else {
Ok(None)
}
self.get_opt::<TxHashToBlockIdMapCell>(tx_hash)
.map(|opt| opt.map(|cell| cell.0))
}
// Accounts meta
pub(crate) fn get_acc_meta_num_tx(&self, acc_id: [u8; 32]) -> DbResult<Option<u64>> {
let cf_ameta = self.account_meta_column();
let res = self.db.get_cf(&cf_ameta, acc_id).map_err(|rerr| {
DbError::rocksdb_cast_message(rerr, Some("Failed to read from acc meta cf".to_owned()))
})?;
res.map(|data| {
borsh::from_slice::<u64>(&data).map_err(|serr| {
DbError::borsh_cast_message(serr, Some("Failed to deserialize num tx".to_owned()))
})
})
.transpose()
self.get_opt::<AccNumTxCell>(acc_id)
.map(|opt| opt.map(|cell| cell.0))
}
}

View File

@ -2,10 +2,14 @@ use std::collections::HashMap;
use rocksdb::WriteBatch;
use super::{
Arc, BREAKPOINT_INTERVAL, Block, BoundColumnFamily, DB_META_FIRST_BLOCK_IN_DB_KEY,
DB_META_FIRST_BLOCK_SET_KEY, DB_META_LAST_BLOCK_IN_DB_KEY, DB_META_LAST_BREAKPOINT_ID,
DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY, DbError, DbResult, RocksDBIO,
use super::{BREAKPOINT_INTERVAL, Block, DbError, DbResult, RocksDBIO};
use crate::{
DB_META_FIRST_BLOCK_IN_DB_KEY, DBIO as _,
cells::shared_cells::{FirstBlockSetCell, LastBlockCell},
indexer::indexer_cells::{
AccNumTxCell, BlockHashToBlockIdMapCell, LastBreakpointIdCell, LastObservedL1LibHeaderCell,
TxHashToBlockIdMapCell,
},
};
#[expect(clippy::multiple_inherent_impl, reason = "Readability")]
@ -18,22 +22,27 @@ impl RocksDBIO {
num_tx: u64,
write_batch: &mut WriteBatch,
) -> DbResult<()> {
let cf_ameta = self.account_meta_column();
self.put_batch(&AccNumTxCell(num_tx), acc_id, write_batch)
}
write_batch.put_cf(
&cf_ameta,
borsh::to_vec(&acc_id).map_err(|err| {
DbError::borsh_cast_message(err, Some("Failed to serialize account id".to_owned()))
})?,
borsh::to_vec(&num_tx).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize acc metadata".to_owned()),
)
})?,
);
// Mappings
Ok(())
pub fn put_block_id_by_hash_batch(
&self,
hash: [u8; 32],
block_id: u64,
write_batch: &mut WriteBatch,
) -> DbResult<()> {
self.put_batch(&BlockHashToBlockIdMapCell(block_id), hash, write_batch)
}
pub fn put_block_id_by_tx_hash_batch(
&self,
tx_hash: [u8; 32],
block_id: u64,
write_batch: &mut WriteBatch,
) -> DbResult<()> {
self.put_batch(&TxHashToBlockIdMapCell(block_id), tx_hash, write_batch)
}
// Account
@ -163,23 +172,7 @@ impl RocksDBIO {
block_id: u64,
write_batch: &mut WriteBatch,
) -> DbResult<()> {
let cf_meta = self.meta_column();
write_batch.put_cf(
&cf_meta,
borsh::to_vec(&DB_META_LAST_BLOCK_IN_DB_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_LAST_BLOCK_IN_DB_KEY".to_owned()),
)
})?,
borsh::to_vec(&block_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize last block id".to_owned()),
)
})?,
);
Ok(())
self.put_batch(&LastBlockCell(block_id), (), write_batch)
}
pub fn put_meta_last_observed_l1_lib_header_in_db_batch(
@ -187,26 +180,7 @@ impl RocksDBIO {
l1_lib_header: [u8; 32],
write_batch: &mut WriteBatch,
) -> DbResult<()> {
let cf_meta = self.meta_column();
write_batch.put_cf(
&cf_meta,
borsh::to_vec(&DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some(
"Failed to serialize DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY"
.to_owned(),
),
)
})?,
borsh::to_vec(&l1_lib_header).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize last l1 block header".to_owned()),
)
})?,
);
Ok(())
self.put_batch(&LastObservedL1LibHeaderCell(l1_lib_header), (), write_batch)
}
pub fn put_meta_last_breakpoint_id_batch(
@ -214,46 +188,17 @@ impl RocksDBIO {
br_id: u64,
write_batch: &mut WriteBatch,
) -> DbResult<()> {
let cf_meta = self.meta_column();
write_batch.put_cf(
&cf_meta,
borsh::to_vec(&DB_META_LAST_BREAKPOINT_ID).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_LAST_BREAKPOINT_ID".to_owned()),
)
})?,
borsh::to_vec(&br_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize last block id".to_owned()),
)
})?,
);
Ok(())
self.put_batch(&LastBreakpointIdCell(br_id), (), write_batch)
}
pub fn put_meta_is_first_block_set_batch(&self, write_batch: &mut WriteBatch) -> DbResult<()> {
let cf_meta = self.meta_column();
write_batch.put_cf(
&cf_meta,
borsh::to_vec(&DB_META_FIRST_BLOCK_SET_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_FIRST_BLOCK_SET_KEY".to_owned()),
)
})?,
[1_u8; 1],
);
Ok(())
self.put_batch(&FirstBlockSetCell(true), (), write_batch)
}
// Block
pub fn put_block(&self, block: &Block, l1_lib_header: [u8; 32]) -> DbResult<()> {
let cf_block = self.block_column();
let cf_hti = self.hash_to_id_column();
let cf_tti: Arc<BoundColumnFamily<'_>> = self.tx_hash_to_id_column();
let last_curr_block = self.get_meta_last_block_in_db()?;
let mut write_batch = WriteBatch::default();
@ -272,33 +217,22 @@ impl RocksDBIO {
self.put_meta_last_observed_l1_lib_header_in_db_batch(l1_lib_header, &mut write_batch)?;
}
write_batch.put_cf(
&cf_hti,
borsh::to_vec(&block.header.hash).map_err(|err| {
DbError::borsh_cast_message(err, Some("Failed to serialize block hash".to_owned()))
})?,
borsh::to_vec(&block.header.block_id).map_err(|err| {
DbError::borsh_cast_message(err, Some("Failed to serialize block id".to_owned()))
})?,
);
self.put_block_id_by_hash_batch(
block.header.hash.into(),
block.header.block_id,
&mut write_batch,
)?;
let mut acc_to_tx_map: HashMap<[u8; 32], Vec<[u8; 32]>> = HashMap::new();
for tx in &block.body.transactions {
let tx_hash = tx.hash();
write_batch.put_cf(
&cf_tti,
borsh::to_vec(&tx_hash).map_err(|err| {
DbError::borsh_cast_message(err, Some("Failed to serialize tx hash".to_owned()))
})?,
borsh::to_vec(&block.header.block_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize block id".to_owned()),
)
})?,
);
self.put_block_id_by_tx_hash_batch(
tx_hash.into(),
block.header.block_id,
&mut write_batch,
)?;
let acc_ids = tx
.affected_public_account_ids()

View File

@ -1,7 +1,10 @@
use super::{
BREAKPOINT_INTERVAL, DB_META_FIRST_BLOCK_SET_KEY, DB_META_LAST_BLOCK_IN_DB_KEY,
DB_META_LAST_BREAKPOINT_ID, DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY, DbError,
DbResult, RocksDBIO, V03State,
use super::{BREAKPOINT_INTERVAL, DbError, DbResult, RocksDBIO, V03State};
use crate::{
DBIO as _,
cells::shared_cells::{FirstBlockSetCell, LastBlockCell},
indexer::indexer_cells::{
BreakpointCellRef, LastBreakpointIdCell, LastObservedL1LibHeaderCell,
},
};
#[expect(clippy::multiple_inherent_impl, reason = "Readability")]
@ -9,118 +12,28 @@ impl RocksDBIO {
// Meta
pub fn put_meta_last_block_in_db(&self, block_id: u64) -> DbResult<()> {
let cf_meta = self.meta_column();
self.db
.put_cf(
&cf_meta,
borsh::to_vec(&DB_META_LAST_BLOCK_IN_DB_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_LAST_BLOCK_IN_DB_KEY".to_owned()),
)
})?,
borsh::to_vec(&block_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize last block id".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
Ok(())
self.put(&LastBlockCell(block_id), ())
}
pub fn put_meta_last_observed_l1_lib_header_in_db(
&self,
l1_lib_header: [u8; 32],
) -> DbResult<()> {
let cf_meta = self.meta_column();
self.db
.put_cf(
&cf_meta,
borsh::to_vec(&DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY).map_err(
|err| {
DbError::borsh_cast_message(
err,
Some(
"Failed to serialize DB_META_LAST_OBSERVED_L1_LIB_HEADER_ID_IN_DB_KEY"
.to_owned(),
),
)
},
)?,
borsh::to_vec(&l1_lib_header).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize last l1 block header".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
Ok(())
self.put(&LastObservedL1LibHeaderCell(l1_lib_header), ())
}
pub fn put_meta_last_breakpoint_id(&self, br_id: u64) -> DbResult<()> {
let cf_meta = self.meta_column();
self.db
.put_cf(
&cf_meta,
borsh::to_vec(&DB_META_LAST_BREAKPOINT_ID).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_LAST_BREAKPOINT_ID".to_owned()),
)
})?,
borsh::to_vec(&br_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize last block id".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
Ok(())
self.put(&LastBreakpointIdCell(br_id), ())
}
pub fn put_meta_is_first_block_set(&self) -> DbResult<()> {
let cf_meta = self.meta_column();
self.db
.put_cf(
&cf_meta,
borsh::to_vec(&DB_META_FIRST_BLOCK_SET_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_FIRST_BLOCK_SET_KEY".to_owned()),
)
})?,
[1_u8; 1],
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
Ok(())
self.put(&FirstBlockSetCell(true), ())
}
// State
pub fn put_breakpoint(&self, br_id: u64, breakpoint: &V03State) -> DbResult<()> {
let cf_br = self.breakpoint_column();
self.db
.put_cf(
&cf_br,
borsh::to_vec(&br_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize breakpoint id".to_owned()),
)
})?,
borsh::to_vec(breakpoint).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize breakpoint data".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))
self.put(&BreakpointCellRef(breakpoint), br_id)
}
pub fn put_next_breakpoint(&self) -> DbResult<()> {

View File

@ -1,3 +1,69 @@
use rocksdb::{DBWithThreadMode, MultiThreaded, WriteBatch};
use crate::{
cells::{SimpleReadableCell, SimpleWritableCell},
error::DbError,
};
pub mod cells;
pub mod error;
pub mod indexer;
pub mod sequencer;
/// Maximal size of stored blocks in base.
///
/// Used to control db size.
///
/// Currently effectively unbounded.
pub const BUFF_SIZE_ROCKSDB: usize = usize::MAX;
/// Size of stored blocks cache in memory.
///
/// Keeping small to not run out of memory.
pub const CACHE_SIZE: usize = 1000;
/// Key base for storing metainformation which describe if first block has been set.
pub const DB_META_FIRST_BLOCK_SET_KEY: &str = "first_block_set";
/// Key base for storing metainformation about id of first block in db.
pub const DB_META_FIRST_BLOCK_IN_DB_KEY: &str = "first_block_in_db";
/// Key base for storing metainformation about id of last current block in db.
pub const DB_META_LAST_BLOCK_IN_DB_KEY: &str = "last_block_in_db";
/// Cell name for a block.
pub const BLOCK_CELL_NAME: &str = "block";
/// Interval between state breakpoints.
pub const BREAKPOINT_INTERVAL: u8 = 100;
/// Name of block column family.
pub const CF_BLOCK_NAME: &str = "cf_block";
/// Name of meta column family.
pub const CF_META_NAME: &str = "cf_meta";
pub type DbResult<T> = Result<T, DbError>;
/// Minimal requirements for DB IO.
pub trait DBIO {
fn db(&self) -> &DBWithThreadMode<MultiThreaded>;
fn get<T: SimpleReadableCell>(&self, params: T::KeyParams) -> DbResult<T> {
T::get(self.db(), params)
}
fn get_opt<T: SimpleReadableCell>(&self, params: T::KeyParams) -> DbResult<Option<T>> {
T::get_opt(self.db(), params)
}
fn put<T: SimpleWritableCell>(&self, cell: &T, params: T::KeyParams) -> DbResult<()> {
cell.put(self.db(), params)
}
fn put_batch<T: SimpleWritableCell>(
&self,
cell: &T,
params: T::KeyParams,
write_batch: &mut WriteBatch,
) -> DbResult<()> {
cell.put_batch(self.db(), params, write_batch)
}
}

View File

@ -1,596 +0,0 @@
use std::{path::Path, sync::Arc};
use common::block::{BedrockStatus, Block, BlockMeta, MantleMsgId};
use nssa::V03State;
use rocksdb::{
BoundColumnFamily, ColumnFamilyDescriptor, DBWithThreadMode, MultiThreaded, Options, WriteBatch,
};
use crate::error::DbError;
/// Maximal size of stored blocks in base.
///
/// Used to control db size.
///
/// Currently effectively unbounded.
pub const BUFF_SIZE_ROCKSDB: usize = usize::MAX;
/// Size of stored blocks cache in memory.
///
/// Keeping small to not run out of memory.
pub const CACHE_SIZE: usize = 1000;
/// Key base for storing metainformation about id of first block in db.
pub const DB_META_FIRST_BLOCK_IN_DB_KEY: &str = "first_block_in_db";
/// Key base for storing metainformation about id of last current block in db.
pub const DB_META_LAST_BLOCK_IN_DB_KEY: &str = "last_block_in_db";
/// Key base for storing metainformation which describe if first block has been set.
pub const DB_META_FIRST_BLOCK_SET_KEY: &str = "first_block_set";
/// Key base for storing metainformation about the last finalized block on Bedrock.
pub const DB_META_LAST_FINALIZED_BLOCK_ID: &str = "last_finalized_block_id";
/// Key base for storing metainformation about the latest block meta.
pub const DB_META_LATEST_BLOCK_META_KEY: &str = "latest_block_meta";
/// Key base for storing the NSSA state.
pub const DB_NSSA_STATE_KEY: &str = "nssa_state";
/// Name of block column family.
pub const CF_BLOCK_NAME: &str = "cf_block";
/// Name of meta column family.
pub const CF_META_NAME: &str = "cf_meta";
/// Name of state column family.
pub const CF_NSSA_STATE_NAME: &str = "cf_nssa_state";
pub type DbResult<T> = Result<T, DbError>;
pub struct RocksDBIO {
pub db: DBWithThreadMode<MultiThreaded>,
}
impl RocksDBIO {
pub fn open_or_create(
path: &Path,
genesis_block: &Block,
genesis_msg_id: MantleMsgId,
) -> DbResult<Self> {
let mut cf_opts = Options::default();
cf_opts.set_max_write_buffer_number(16);
// ToDo: Add more column families for different data
let cfb = ColumnFamilyDescriptor::new(CF_BLOCK_NAME, cf_opts.clone());
let cfmeta = ColumnFamilyDescriptor::new(CF_META_NAME, cf_opts.clone());
let cfstate = ColumnFamilyDescriptor::new(CF_NSSA_STATE_NAME, cf_opts.clone());
let mut db_opts = Options::default();
db_opts.create_missing_column_families(true);
db_opts.create_if_missing(true);
let db = DBWithThreadMode::<MultiThreaded>::open_cf_descriptors(
&db_opts,
path,
vec![cfb, cfmeta, cfstate],
)
.map_err(|err| DbError::RocksDbError {
error: err,
additional_info: Some("Failed to open or create DB".to_owned()),
})?;
let dbio = Self { db };
let is_start_set = dbio.get_meta_is_first_block_set()?;
if !is_start_set {
let block_id = genesis_block.header.block_id;
dbio.put_meta_first_block_in_db(genesis_block, genesis_msg_id)?;
dbio.put_meta_is_first_block_set()?;
dbio.put_meta_last_block_in_db(block_id)?;
dbio.put_meta_last_finalized_block_id(None)?;
dbio.put_meta_latest_block_meta(&BlockMeta {
id: genesis_block.header.block_id,
hash: genesis_block.header.hash,
msg_id: genesis_msg_id,
})?;
}
Ok(dbio)
}
pub fn destroy(path: &Path) -> DbResult<()> {
let mut cf_opts = Options::default();
cf_opts.set_max_write_buffer_number(16);
// ToDo: Add more column families for different data
let _cfb = ColumnFamilyDescriptor::new(CF_BLOCK_NAME, cf_opts.clone());
let _cfmeta = ColumnFamilyDescriptor::new(CF_META_NAME, cf_opts.clone());
let _cfstate = ColumnFamilyDescriptor::new(CF_NSSA_STATE_NAME, cf_opts.clone());
let mut db_opts = Options::default();
db_opts.create_missing_column_families(true);
db_opts.create_if_missing(true);
DBWithThreadMode::<MultiThreaded>::destroy(&db_opts, path)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))
}
pub fn meta_column(&self) -> Arc<BoundColumnFamily<'_>> {
self.db.cf_handle(CF_META_NAME).unwrap()
}
pub fn block_column(&self) -> Arc<BoundColumnFamily<'_>> {
self.db.cf_handle(CF_BLOCK_NAME).unwrap()
}
pub fn nssa_state_column(&self) -> Arc<BoundColumnFamily<'_>> {
self.db.cf_handle(CF_NSSA_STATE_NAME).unwrap()
}
pub fn get_meta_first_block_in_db(&self) -> DbResult<u64> {
let cf_meta = self.meta_column();
let res = self
.db
.get_cf(
&cf_meta,
borsh::to_vec(&DB_META_FIRST_BLOCK_IN_DB_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_FIRST_BLOCK_IN_DB_KEY".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(borsh::from_slice::<u64>(&data).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to deserialize first block".to_owned()),
)
})?)
} else {
Err(DbError::db_interaction_error(
"First block not found".to_owned(),
))
}
}
pub fn get_meta_last_block_in_db(&self) -> DbResult<u64> {
let cf_meta = self.meta_column();
let res = self
.db
.get_cf(
&cf_meta,
borsh::to_vec(&DB_META_LAST_BLOCK_IN_DB_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_LAST_BLOCK_IN_DB_KEY".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(borsh::from_slice::<u64>(&data).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to deserialize last block".to_owned()),
)
})?)
} else {
Err(DbError::db_interaction_error(
"Last block not found".to_owned(),
))
}
}
pub fn get_meta_is_first_block_set(&self) -> DbResult<bool> {
let cf_meta = self.meta_column();
let res = self
.db
.get_cf(
&cf_meta,
borsh::to_vec(&DB_META_FIRST_BLOCK_SET_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_FIRST_BLOCK_SET_KEY".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
Ok(res.is_some())
}
pub fn put_nssa_state_in_db(&self, state: &V03State, batch: &mut WriteBatch) -> DbResult<()> {
let cf_nssa_state = self.nssa_state_column();
batch.put_cf(
&cf_nssa_state,
borsh::to_vec(&DB_NSSA_STATE_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_NSSA_STATE_KEY".to_owned()),
)
})?,
borsh::to_vec(state).map_err(|err| {
DbError::borsh_cast_message(err, Some("Failed to serialize NSSA state".to_owned()))
})?,
);
Ok(())
}
pub fn put_meta_first_block_in_db(&self, block: &Block, msg_id: MantleMsgId) -> DbResult<()> {
let cf_meta = self.meta_column();
self.db
.put_cf(
&cf_meta,
borsh::to_vec(&DB_META_FIRST_BLOCK_IN_DB_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_FIRST_BLOCK_IN_DB_KEY".to_owned()),
)
})?,
borsh::to_vec(&block.header.block_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize first block id".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
let mut batch = WriteBatch::default();
self.put_block(block, msg_id, true, &mut batch)?;
self.db.write(batch).map_err(|rerr| {
DbError::rocksdb_cast_message(
rerr,
Some("Failed to write first block in db".to_owned()),
)
})?;
Ok(())
}
pub fn put_meta_last_block_in_db(&self, block_id: u64) -> DbResult<()> {
let cf_meta = self.meta_column();
self.db
.put_cf(
&cf_meta,
borsh::to_vec(&DB_META_LAST_BLOCK_IN_DB_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_LAST_BLOCK_IN_DB_KEY".to_owned()),
)
})?,
borsh::to_vec(&block_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize last block id".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
Ok(())
}
fn put_meta_last_block_in_db_batch(
&self,
block_id: u64,
batch: &mut WriteBatch,
) -> DbResult<()> {
let cf_meta = self.meta_column();
batch.put_cf(
&cf_meta,
borsh::to_vec(&DB_META_LAST_BLOCK_IN_DB_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_LAST_BLOCK_IN_DB_KEY".to_owned()),
)
})?,
borsh::to_vec(&block_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize last block id".to_owned()),
)
})?,
);
Ok(())
}
pub fn put_meta_last_finalized_block_id(&self, block_id: Option<u64>) -> DbResult<()> {
let cf_meta = self.meta_column();
self.db
.put_cf(
&cf_meta,
borsh::to_vec(&DB_META_LAST_FINALIZED_BLOCK_ID).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_LAST_FINALIZED_BLOCK_ID".to_owned()),
)
})?,
borsh::to_vec(&block_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize last block id".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
Ok(())
}
pub fn put_meta_is_first_block_set(&self) -> DbResult<()> {
let cf_meta = self.meta_column();
self.db
.put_cf(
&cf_meta,
borsh::to_vec(&DB_META_FIRST_BLOCK_SET_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_FIRST_BLOCK_SET_KEY".to_owned()),
)
})?,
[1_u8; 1],
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
Ok(())
}
fn put_meta_latest_block_meta(&self, block_meta: &BlockMeta) -> DbResult<()> {
let cf_meta = self.meta_column();
self.db
.put_cf(
&cf_meta,
borsh::to_vec(&DB_META_LATEST_BLOCK_META_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_LATEST_BLOCK_META_KEY".to_owned()),
)
})?,
borsh::to_vec(&block_meta).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize latest block meta".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
Ok(())
}
fn put_meta_latest_block_meta_batch(
&self,
block_meta: &BlockMeta,
batch: &mut WriteBatch,
) -> DbResult<()> {
let cf_meta = self.meta_column();
batch.put_cf(
&cf_meta,
borsh::to_vec(&DB_META_LATEST_BLOCK_META_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_LATEST_BLOCK_META_KEY".to_owned()),
)
})?,
borsh::to_vec(&block_meta).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize latest block meta".to_owned()),
)
})?,
);
Ok(())
}
pub fn latest_block_meta(&self) -> DbResult<BlockMeta> {
let cf_meta = self.meta_column();
let res = self
.db
.get_cf(
&cf_meta,
borsh::to_vec(&DB_META_LATEST_BLOCK_META_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_LATEST_BLOCK_META_KEY".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(borsh::from_slice::<BlockMeta>(&data).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to deserialize latest block meta".to_owned()),
)
})?)
} else {
Err(DbError::db_interaction_error(
"Latest block meta not found".to_owned(),
))
}
}
pub fn put_block(
&self,
block: &Block,
msg_id: MantleMsgId,
first: bool,
batch: &mut WriteBatch,
) -> DbResult<()> {
let cf_block = self.block_column();
if !first {
let last_curr_block = self.get_meta_last_block_in_db()?;
if block.header.block_id > last_curr_block {
self.put_meta_last_block_in_db_batch(block.header.block_id, batch)?;
self.put_meta_latest_block_meta_batch(
&BlockMeta {
id: block.header.block_id,
hash: block.header.hash,
msg_id,
},
batch,
)?;
}
}
batch.put_cf(
&cf_block,
borsh::to_vec(&block.header.block_id).map_err(|err| {
DbError::borsh_cast_message(err, Some("Failed to serialize block id".to_owned()))
})?,
borsh::to_vec(block).map_err(|err| {
DbError::borsh_cast_message(err, Some("Failed to serialize block data".to_owned()))
})?,
);
Ok(())
}
pub fn get_block(&self, block_id: u64) -> DbResult<Option<Block>> {
let cf_block = self.block_column();
let res = self
.db
.get_cf(
&cf_block,
borsh::to_vec(&block_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize block id".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(Some(borsh::from_slice::<Block>(&data).map_err(|serr| {
DbError::borsh_cast_message(
serr,
Some("Failed to deserialize block data".to_owned()),
)
})?))
} else {
Ok(None)
}
}
pub fn get_nssa_state(&self) -> DbResult<V03State> {
let cf_nssa_state = self.nssa_state_column();
let res = self
.db
.get_cf(
&cf_nssa_state,
borsh::to_vec(&DB_NSSA_STATE_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize block id".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(borsh::from_slice::<V03State>(&data).map_err(|serr| {
DbError::borsh_cast_message(
serr,
Some("Failed to deserialize block data".to_owned()),
)
})?)
} else {
Err(DbError::db_interaction_error(
"NSSA state not found".to_owned(),
))
}
}
pub fn delete_block(&self, block_id: u64) -> DbResult<()> {
let cf_block = self.block_column();
let key = borsh::to_vec(&block_id).map_err(|err| {
DbError::borsh_cast_message(err, Some("Failed to serialize block id".to_owned()))
})?;
if self
.db
.get_cf(&cf_block, &key)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?
.is_none()
{
return Err(DbError::db_interaction_error(format!(
"Block with id {block_id} not found"
)));
}
self.db
.delete_cf(&cf_block, key)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
Ok(())
}
pub fn mark_block_as_finalized(&self, block_id: u64) -> DbResult<()> {
let mut block = self.get_block(block_id)?.ok_or_else(|| {
DbError::db_interaction_error(format!("Block with id {block_id} not found"))
})?;
block.bedrock_status = BedrockStatus::Finalized;
let cf_block = self.block_column();
self.db
.put_cf(
&cf_block,
borsh::to_vec(&block_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize block id".to_owned()),
)
})?,
borsh::to_vec(&block).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize block data".to_owned()),
)
})?,
)
.map_err(|rerr| {
DbError::rocksdb_cast_message(
rerr,
Some(format!("Failed to mark block {block_id} as finalized")),
)
})?;
Ok(())
}
pub fn get_all_blocks(&self) -> impl Iterator<Item = DbResult<Block>> {
let cf_block = self.block_column();
self.db
.iterator_cf(&cf_block, rocksdb::IteratorMode::Start)
.map(|res| {
let (_key, value) = res.map_err(|rerr| {
DbError::rocksdb_cast_message(
rerr,
Some("Failed to get key value pair".to_owned()),
)
})?;
borsh::from_slice::<Block>(&value).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to deserialize block data".to_owned()),
)
})
})
}
pub fn atomic_update(
&self,
block: &Block,
msg_id: MantleMsgId,
state: &V03State,
) -> DbResult<()> {
let block_id = block.header.block_id;
let mut batch = WriteBatch::default();
self.put_block(block, msg_id, false, &mut batch)?;
self.put_nssa_state_in_db(state, &mut batch)?;
self.db.write(batch).map_err(|rerr| {
DbError::rocksdb_cast_message(
rerr,
Some(format!("Failed to udpate db with block {block_id}")),
)
})
}
}

View File

@ -0,0 +1,349 @@
use std::{path::Path, sync::Arc};
use common::block::{BedrockStatus, Block, BlockMeta, MantleMsgId};
use nssa::V03State;
use rocksdb::{
BoundColumnFamily, ColumnFamilyDescriptor, DBWithThreadMode, MultiThreaded, Options, WriteBatch,
};
use crate::{
CF_BLOCK_NAME, CF_META_NAME, DB_META_FIRST_BLOCK_IN_DB_KEY, DBIO, DbResult,
cells::shared_cells::{BlockCell, FirstBlockCell, FirstBlockSetCell, LastBlockCell},
error::DbError,
sequencer::sequencer_cells::{
LastFinalizedBlockIdCell, LatestBlockMetaCellOwned, LatestBlockMetaCellRef,
NSSAStateCellOwned, NSSAStateCellRef,
},
};
pub mod sequencer_cells;
/// Key base for storing metainformation about the last finalized block on Bedrock.
pub const DB_META_LAST_FINALIZED_BLOCK_ID: &str = "last_finalized_block_id";
/// Key base for storing metainformation about the latest block meta.
pub const DB_META_LATEST_BLOCK_META_KEY: &str = "latest_block_meta";
/// Key base for storing the NSSA state.
pub const DB_NSSA_STATE_KEY: &str = "nssa_state";
/// Name of state column family.
pub const CF_NSSA_STATE_NAME: &str = "cf_nssa_state";
pub struct RocksDBIO {
pub db: DBWithThreadMode<MultiThreaded>,
}
impl DBIO for RocksDBIO {
fn db(&self) -> &DBWithThreadMode<MultiThreaded> {
&self.db
}
}
impl RocksDBIO {
pub fn open_or_create(
path: &Path,
genesis_block: &Block,
genesis_msg_id: MantleMsgId,
) -> DbResult<Self> {
let mut cf_opts = Options::default();
cf_opts.set_max_write_buffer_number(16);
// ToDo: Add more column families for different data
let cfb = ColumnFamilyDescriptor::new(CF_BLOCK_NAME, cf_opts.clone());
let cfmeta = ColumnFamilyDescriptor::new(CF_META_NAME, cf_opts.clone());
let cfstate = ColumnFamilyDescriptor::new(CF_NSSA_STATE_NAME, cf_opts.clone());
let mut db_opts = Options::default();
db_opts.create_missing_column_families(true);
db_opts.create_if_missing(true);
let db = DBWithThreadMode::<MultiThreaded>::open_cf_descriptors(
&db_opts,
path,
vec![cfb, cfmeta, cfstate],
)
.map_err(|err| DbError::RocksDbError {
error: err,
additional_info: Some("Failed to open or create DB".to_owned()),
})?;
let dbio = Self { db };
let is_start_set = dbio.get_meta_is_first_block_set()?;
if !is_start_set {
let block_id = genesis_block.header.block_id;
dbio.put_meta_first_block_in_db(genesis_block, genesis_msg_id)?;
dbio.put_meta_is_first_block_set()?;
dbio.put_meta_last_block_in_db(block_id)?;
dbio.put_meta_last_finalized_block_id(None)?;
dbio.put_meta_latest_block_meta(&BlockMeta {
id: genesis_block.header.block_id,
hash: genesis_block.header.hash,
msg_id: genesis_msg_id,
})?;
}
Ok(dbio)
}
pub fn destroy(path: &Path) -> DbResult<()> {
let mut cf_opts = Options::default();
cf_opts.set_max_write_buffer_number(16);
// ToDo: Add more column families for different data
let _cfb = ColumnFamilyDescriptor::new(CF_BLOCK_NAME, cf_opts.clone());
let _cfmeta = ColumnFamilyDescriptor::new(CF_META_NAME, cf_opts.clone());
let _cfstate = ColumnFamilyDescriptor::new(CF_NSSA_STATE_NAME, cf_opts.clone());
let mut db_opts = Options::default();
db_opts.create_missing_column_families(true);
db_opts.create_if_missing(true);
DBWithThreadMode::<MultiThreaded>::destroy(&db_opts, path)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))
}
// Columns
pub fn meta_column(&self) -> Arc<BoundColumnFamily<'_>> {
self.db
.cf_handle(CF_META_NAME)
.expect("Meta column should exist")
}
pub fn block_column(&self) -> Arc<BoundColumnFamily<'_>> {
self.db
.cf_handle(CF_BLOCK_NAME)
.expect("Block column should exist")
}
pub fn nssa_state_column(&self) -> Arc<BoundColumnFamily<'_>> {
self.db
.cf_handle(CF_NSSA_STATE_NAME)
.expect("State should exist")
}
// Meta
pub fn get_meta_first_block_in_db(&self) -> DbResult<u64> {
self.get::<FirstBlockCell>(()).map(|cell| cell.0)
}
pub fn get_meta_last_block_in_db(&self) -> DbResult<u64> {
self.get::<LastBlockCell>(()).map(|cell| cell.0)
}
pub fn get_meta_is_first_block_set(&self) -> DbResult<bool> {
Ok(self.get_opt::<FirstBlockSetCell>(())?.is_some())
}
pub fn put_nssa_state_in_db(&self, state: &V03State, batch: &mut WriteBatch) -> DbResult<()> {
self.put_batch(&NSSAStateCellRef(state), (), batch)
}
pub fn put_meta_first_block_in_db(&self, block: &Block, msg_id: MantleMsgId) -> DbResult<()> {
let cf_meta = self.meta_column();
self.db
.put_cf(
&cf_meta,
borsh::to_vec(&DB_META_FIRST_BLOCK_IN_DB_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_FIRST_BLOCK_IN_DB_KEY".to_owned()),
)
})?,
borsh::to_vec(&block.header.block_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize first block id".to_owned()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
let mut batch = WriteBatch::default();
self.put_block(block, msg_id, true, &mut batch)?;
self.db.write(batch).map_err(|rerr| {
DbError::rocksdb_cast_message(
rerr,
Some("Failed to write first block in db".to_owned()),
)
})?;
Ok(())
}
pub fn put_meta_last_block_in_db(&self, block_id: u64) -> DbResult<()> {
self.put(&LastBlockCell(block_id), ())
}
fn put_meta_last_block_in_db_batch(
&self,
block_id: u64,
batch: &mut WriteBatch,
) -> DbResult<()> {
self.put_batch(&LastBlockCell(block_id), (), batch)
}
pub fn put_meta_last_finalized_block_id(&self, block_id: Option<u64>) -> DbResult<()> {
self.put(&LastFinalizedBlockIdCell(block_id), ())
}
pub fn put_meta_is_first_block_set(&self) -> DbResult<()> {
self.put(&FirstBlockSetCell(true), ())
}
fn put_meta_latest_block_meta(&self, block_meta: &BlockMeta) -> DbResult<()> {
self.put(&LatestBlockMetaCellRef(block_meta), ())
}
fn put_meta_latest_block_meta_batch(
&self,
block_meta: &BlockMeta,
batch: &mut WriteBatch,
) -> DbResult<()> {
self.put_batch(&LatestBlockMetaCellRef(block_meta), (), batch)
}
pub fn latest_block_meta(&self) -> DbResult<BlockMeta> {
self.get::<LatestBlockMetaCellOwned>(()).map(|val| val.0)
}
pub fn put_block(
&self,
block: &Block,
msg_id: MantleMsgId,
first: bool,
batch: &mut WriteBatch,
) -> DbResult<()> {
let cf_block = self.block_column();
if !first {
let last_curr_block = self.get_meta_last_block_in_db()?;
if block.header.block_id > last_curr_block {
self.put_meta_last_block_in_db_batch(block.header.block_id, batch)?;
self.put_meta_latest_block_meta_batch(
&BlockMeta {
id: block.header.block_id,
hash: block.header.hash,
msg_id,
},
batch,
)?;
}
}
batch.put_cf(
&cf_block,
borsh::to_vec(&block.header.block_id).map_err(|err| {
DbError::borsh_cast_message(err, Some("Failed to serialize block id".to_owned()))
})?,
borsh::to_vec(block).map_err(|err| {
DbError::borsh_cast_message(err, Some("Failed to serialize block data".to_owned()))
})?,
);
Ok(())
}
pub fn get_block(&self, block_id: u64) -> DbResult<Option<Block>> {
self.get_opt::<BlockCell>(block_id)
.map(|opt| opt.map(|val| val.0))
}
pub fn get_nssa_state(&self) -> DbResult<V03State> {
self.get::<NSSAStateCellOwned>(()).map(|val| val.0)
}
pub fn delete_block(&self, block_id: u64) -> DbResult<()> {
let cf_block = self.block_column();
let key = borsh::to_vec(&block_id).map_err(|err| {
DbError::borsh_cast_message(err, Some("Failed to serialize block id".to_owned()))
})?;
if self
.db
.get_cf(&cf_block, &key)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?
.is_none()
{
return Err(DbError::db_interaction_error(format!(
"Block with id {block_id} not found"
)));
}
self.db
.delete_cf(&cf_block, key)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
Ok(())
}
pub fn mark_block_as_finalized(&self, block_id: u64) -> DbResult<()> {
let mut block = self.get_block(block_id)?.ok_or_else(|| {
DbError::db_interaction_error(format!("Block with id {block_id} not found"))
})?;
block.bedrock_status = BedrockStatus::Finalized;
let cf_block = self.block_column();
self.db
.put_cf(
&cf_block,
borsh::to_vec(&block_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize block id".to_owned()),
)
})?,
borsh::to_vec(&block).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize block data".to_owned()),
)
})?,
)
.map_err(|rerr| {
DbError::rocksdb_cast_message(
rerr,
Some(format!("Failed to mark block {block_id} as finalized")),
)
})?;
Ok(())
}
pub fn get_all_blocks(&self) -> impl Iterator<Item = DbResult<Block>> {
let cf_block = self.block_column();
self.db
.iterator_cf(&cf_block, rocksdb::IteratorMode::Start)
.map(|res| {
let (_key, value) = res.map_err(|rerr| {
DbError::rocksdb_cast_message(
rerr,
Some("Failed to get key value pair".to_owned()),
)
})?;
borsh::from_slice::<Block>(&value).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to deserialize block data".to_owned()),
)
})
})
}
pub fn atomic_update(
&self,
block: &Block,
msg_id: MantleMsgId,
state: &V03State,
) -> DbResult<()> {
let block_id = block.header.block_id;
let mut batch = WriteBatch::default();
self.put_block(block, msg_id, false, &mut batch)?;
self.put_nssa_state_in_db(state, &mut batch)?;
self.db.write(batch).map_err(|rerr| {
DbError::rocksdb_cast_message(
rerr,
Some(format!("Failed to udpate db with block {block_id}")),
)
})
}
}

View File

@ -0,0 +1,132 @@
use borsh::{BorshDeserialize, BorshSerialize};
use common::block::BlockMeta;
use nssa::V03State;
use crate::{
CF_META_NAME, DbResult,
cells::{SimpleReadableCell, SimpleStorableCell, SimpleWritableCell},
error::DbError,
sequencer::{
CF_NSSA_STATE_NAME, DB_META_LAST_FINALIZED_BLOCK_ID, DB_META_LATEST_BLOCK_META_KEY,
DB_NSSA_STATE_KEY,
},
};
#[derive(BorshDeserialize)]
pub struct NSSAStateCellOwned(pub V03State);
impl SimpleStorableCell for NSSAStateCellOwned {
type KeyParams = ();
const CELL_NAME: &'static str = DB_NSSA_STATE_KEY;
const CF_NAME: &'static str = CF_NSSA_STATE_NAME;
}
impl SimpleReadableCell for NSSAStateCellOwned {}
#[derive(BorshSerialize)]
pub struct NSSAStateCellRef<'state>(pub &'state V03State);
impl SimpleStorableCell for NSSAStateCellRef<'_> {
type KeyParams = ();
const CELL_NAME: &'static str = DB_NSSA_STATE_KEY;
const CF_NAME: &'static str = CF_NSSA_STATE_NAME;
}
impl SimpleWritableCell for NSSAStateCellRef<'_> {
fn value_constructor(&self) -> DbResult<Vec<u8>> {
borsh::to_vec(&self).map_err(|err| {
DbError::borsh_cast_message(err, Some("Failed to serialize last state".to_owned()))
})
}
}
#[derive(Debug, BorshSerialize, BorshDeserialize)]
pub struct LastFinalizedBlockIdCell(pub Option<u64>);
impl SimpleStorableCell for LastFinalizedBlockIdCell {
type KeyParams = ();
const CELL_NAME: &'static str = DB_META_LAST_FINALIZED_BLOCK_ID;
const CF_NAME: &'static str = CF_META_NAME;
}
impl SimpleReadableCell for LastFinalizedBlockIdCell {}
impl SimpleWritableCell for LastFinalizedBlockIdCell {
fn value_constructor(&self) -> DbResult<Vec<u8>> {
borsh::to_vec(&self).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize last finalized block id".to_owned()),
)
})
}
}
#[derive(BorshDeserialize)]
pub struct LatestBlockMetaCellOwned(pub BlockMeta);
impl SimpleStorableCell for LatestBlockMetaCellOwned {
type KeyParams = ();
const CELL_NAME: &'static str = DB_META_LATEST_BLOCK_META_KEY;
const CF_NAME: &'static str = CF_META_NAME;
}
impl SimpleReadableCell for LatestBlockMetaCellOwned {}
#[derive(BorshSerialize)]
pub struct LatestBlockMetaCellRef<'blockmeta>(pub &'blockmeta BlockMeta);
impl SimpleStorableCell for LatestBlockMetaCellRef<'_> {
type KeyParams = ();
const CELL_NAME: &'static str = DB_META_LATEST_BLOCK_META_KEY;
const CF_NAME: &'static str = CF_META_NAME;
}
impl SimpleWritableCell for LatestBlockMetaCellRef<'_> {
fn value_constructor(&self) -> DbResult<Vec<u8>> {
borsh::to_vec(&self).map_err(|err| {
DbError::borsh_cast_message(err, Some("Failed to serialize last block meta".to_owned()))
})
}
}
#[cfg(test)]
mod uniform_tests {
use crate::{
cells::SimpleStorableCell as _,
sequencer::sequencer_cells::{
LatestBlockMetaCellOwned, LatestBlockMetaCellRef, NSSAStateCellOwned, NSSAStateCellRef,
},
};
#[test]
fn state_ref_and_owned_is_aligned() {
assert_eq!(NSSAStateCellRef::CELL_NAME, NSSAStateCellOwned::CELL_NAME);
assert_eq!(NSSAStateCellRef::CF_NAME, NSSAStateCellOwned::CF_NAME);
assert_eq!(
NSSAStateCellRef::key_constructor(()).unwrap(),
NSSAStateCellOwned::key_constructor(()).unwrap()
);
}
#[test]
fn block_meta_ref_and_owned_is_aligned() {
assert_eq!(
LatestBlockMetaCellRef::CELL_NAME,
LatestBlockMetaCellOwned::CELL_NAME
);
assert_eq!(
LatestBlockMetaCellRef::CF_NAME,
LatestBlockMetaCellOwned::CF_NAME
);
assert_eq!(
LatestBlockMetaCellRef::key_constructor(()).unwrap(),
LatestBlockMetaCellOwned::key_constructor(()).unwrap()
);
}
}

View File

@ -9,5 +9,7 @@ workspace = true
[dependencies]
nssa_core.workspace = true
clock_core.workspace = true
risc0-zkvm.workspace = true
serde = { workspace = true, default-features = false }

View File

@ -6,6 +6,7 @@ fn main() {
let (
ProgramInput {
self_program_id,
caller_program_id,
pre_states,
instruction: balance_to_burn,
},
@ -22,6 +23,7 @@ fn main() {
ProgramOutput::new(
self_program_id,
caller_program_id,
instruction_words,
vec![pre],
vec![AccountPostState::new(account_post)],

View File

@ -14,6 +14,7 @@ fn main() {
let (
ProgramInput {
self_program_id,
caller_program_id,
pre_states,
instruction: (balance, auth_transfer_id, num_chain_calls, pda_seed),
},
@ -57,6 +58,7 @@ fn main() {
ProgramOutput::new(
self_program_id,
caller_program_id,
instruction_words,
vec![sender_pre.clone(), recipient_pre.clone()],
vec![

View File

@ -7,6 +7,7 @@ fn main() {
let (
ProgramInput {
self_program_id,
caller_program_id,
pre_states,
instruction: (data_opt, should_claim),
},
@ -36,6 +37,7 @@ fn main() {
ProgramOutput::new(
self_program_id,
caller_program_id,
instruction_words,
vec![pre],
vec![post_state],

View File

@ -6,6 +6,7 @@ fn main() {
let (
ProgramInput {
self_program_id,
caller_program_id,
pre_states,
instruction: (),
},
@ -20,6 +21,7 @@ fn main() {
ProgramOutput::new(
self_program_id,
caller_program_id,
instruction_words,
vec![pre],
vec![account_post],

View File

@ -0,0 +1,46 @@
use nssa_core::{
Timestamp,
program::{
AccountPostState, ChainedCall, ProgramId, ProgramInput, ProgramOutput, read_nssa_inputs,
},
};
use risc0_zkvm::serde::to_vec;
type Instruction = (ProgramId, Timestamp); // (clock_program_id, timestamp)
/// A program that chain-calls the clock program with the clock accounts it received as pre-states.
/// Used in tests to verify that user transactions cannot modify clock accounts, even indirectly
/// via chain calls.
fn main() {
let (
ProgramInput {
self_program_id,
caller_program_id,
pre_states,
instruction: (clock_program_id, timestamp),
},
instruction_words,
) = read_nssa_inputs::<Instruction>();
let post_states: Vec<_> = pre_states
.iter()
.map(|pre| AccountPostState::new(pre.account.clone()))
.collect();
let chained_call = ChainedCall {
program_id: clock_program_id,
instruction_data: to_vec(&timestamp).unwrap(),
pre_states: pre_states.clone(),
pda_seeds: vec![],
};
ProgramOutput::new(
self_program_id,
caller_program_id,
instruction_words,
pre_states,
post_states,
)
.with_chained_calls(vec![chained_call])
.write();
}

View File

@ -7,6 +7,7 @@ fn main() {
let (
ProgramInput {
self_program_id,
caller_program_id,
pre_states,
instruction: data,
},
@ -25,6 +26,7 @@ fn main() {
ProgramOutput::new(
self_program_id,
caller_program_id,
instruction_words,
vec![pre],
vec![AccountPostState::new_claimed(

View File

@ -9,6 +9,7 @@ fn main() {
let (
ProgramInput {
self_program_id,
caller_program_id,
pre_states,
..
},
@ -23,6 +24,7 @@ fn main() {
ProgramOutput::new(
self_program_id,
caller_program_id,
instruction_words,
vec![pre],
vec![

View File

@ -0,0 +1,94 @@
//! Flash swap callback, the user logic step in the "prep → callback → assert" pattern.
//!
//! # Role
//!
//! This program is called as chained call 2 in the flash swap sequence:
//! 1. Token transfer out (vault → receiver)
//! 2. **This callback** (user logic)
//! 3. Invariant check (assert vault balance restored)
//!
//! In a real flash swap, this would contain the user's arbitrage or other logic.
//! In this test program, it is controlled by `return_funds`:
//!
//! - `return_funds = true`: emits a token transfer (receiver → vault) to return the funds. The
//! invariant check will pass and the transaction will succeed.
//!
//! - `return_funds = false`: emits no transfers. Funds stay with the receiver. The invariant check
//! will fail (vault balance < initial), causing full atomic rollback. This simulates a malicious
//! or buggy callback that does not repay the flash loan.
//!
//! # Note on `caller_program_id`
//!
//! This program does not enforce any access control on `caller_program_id`.
//! It is designed to be called by the flash swap initiator but could in principle be
//! called by any program. In production, a callback would typically verify the caller
//! if it needs to trust the context it is called from.
use nssa_core::program::{
AccountPostState, ChainedCall, PdaSeed, ProgramId, ProgramInput, ProgramOutput,
read_nssa_inputs,
};
use serde::{Deserialize, Serialize};
#[derive(Serialize, Deserialize)]
pub struct CallbackInstruction {
/// If true, return the borrowed funds to the vault (happy path).
/// If false, keep the funds (simulates a malicious callback, triggers rollback).
pub return_funds: bool,
pub token_program_id: ProgramId,
pub amount: u128,
}
fn main() {
let (
ProgramInput {
self_program_id,
caller_program_id, // not enforced in this callback
pre_states,
instruction,
},
instruction_words,
) = read_nssa_inputs::<CallbackInstruction>();
// pre_states[0] = vault (after transfer out), pre_states[1] = receiver (after transfer out)
let Ok([vault_pre, receiver_pre]) = <[_; 2]>::try_from(pre_states) else {
panic!("Callback requires exactly 2 accounts: vault, receiver");
};
let mut chained_calls = Vec::new();
if instruction.return_funds {
// Happy path: return the borrowed funds via a token transfer (receiver → vault).
// The receiver is a PDA of this callback program (seed = [1_u8; 32]).
// Mark the receiver as authorized since it will be PDA-authorized in this chained call.
let mut receiver_authorized = receiver_pre.clone();
receiver_authorized.is_authorized = true;
let transfer_instruction = risc0_zkvm::serde::to_vec(&instruction.amount)
.expect("transfer instruction serialization");
chained_calls.push(ChainedCall {
program_id: instruction.token_program_id,
pre_states: vec![receiver_authorized, vault_pre.clone()],
instruction_data: transfer_instruction,
pda_seeds: vec![PdaSeed::new([1_u8; 32])],
});
}
// Malicious path (return_funds = false): emit no chained calls.
// The vault balance will not be restored, so the invariant check in the initiator
// will panic, rolling back the entire transaction including the initial transfer out.
// The callback itself makes no direct state changes, accounts pass through unchanged.
// All mutations go through the token program via chained calls.
ProgramOutput::new(
self_program_id,
caller_program_id,
instruction_words,
vec![vault_pre.clone(), receiver_pre.clone()],
vec![
AccountPostState::new(vault_pre.account),
AccountPostState::new(receiver_pre.account),
],
)
.with_chained_calls(chained_calls)
.write();
}

View File

@ -0,0 +1,216 @@
//! Flash swap initiator, demonstrates the "prep → callback → assert" pattern using
//! generalized multi tail-calls with `self_program_id` and `caller_program_id`.
//!
//! # Pattern
//!
//! A flash swap lets a program optimistically transfer tokens out, run arbitrary user
//! logic (the callback), then assert that invariants hold after the callback. The entire
//! sequence is a single atomic transaction: if any step fails, all state changes roll back.
//!
//! # How it works
//!
//! This program handles two instruction variants:
//!
//! - `Initiate` (external): the top-level entrypoint. Emits 3 chained calls:
//! 1. Token transfer out (vault → receiver)
//! 2. User callback (arbitrary logic, e.g. arbitrage)
//! 3. Self-call to `InvariantCheck` (using `self_program_id` to reference itself)
//!
//! - `InvariantCheck` (internal): enforces that the vault balance was restored after the callback.
//! Uses `caller_program_id == Some(self_program_id)` to prevent standalone calls (this is the
//! visibility enforcement mechanism).
//!
//! # What this demonstrates
//!
//! - `self_program_id`: enables a program to chain back to itself (step 3 above)
//! - `caller_program_id`: enables a program to restrict which callers can invoke an instruction
//! - Computed intermediate states: the initiator computes expected intermediate account states from
//! the `pre_states` and amount, keeping the instruction minimal.
//! - Atomic rollback: if the callback doesn't return funds, the invariant check fails, and all
//! state changes from steps 1 and 2 are rolled back automatically.
//!
//! # Tests
//!
//! See `nssa/src/state.rs` for integration tests:
//! - `flash_swap_successful`: full round-trip, funds returned, state unchanged
//! - `flash_swap_callback_keeps_funds_rollback`: callback keeps funds, full rollback
//! - `flash_swap_self_call_targets_correct_program`: zero-amount self-call isolation test
//! - `flash_swap_standalone_invariant_check_rejected`: `caller_program_id` access control
use nssa_core::program::{
AccountPostState, ChainedCall, PdaSeed, ProgramId, ProgramInput, ProgramOutput,
read_nssa_inputs,
};
use serde::{Deserialize, Serialize};
#[derive(Serialize, Deserialize)]
pub enum FlashSwapInstruction {
/// External entrypoint: initiate a flash swap.
///
/// Emits 3 chained calls:
/// 1. Token transfer (vault → receiver, `amount_out`)
/// 2. Callback (user logic, e.g. arbitrage)
/// 3. Self-call `InvariantCheck` (verify vault balance did not decrease)
///
/// Intermediate account states are computed inside the program from `pre_states` and
/// `amount_out`.
Initiate {
token_program_id: ProgramId,
callback_program_id: ProgramId,
amount_out: u128,
callback_instruction_data: Vec<u32>,
},
/// Internal: verify the vault invariant holds after callback execution.
///
/// Access control: only callable as a chained call from this program itself.
/// This is enforced by checking `caller_program_id == Some(self_program_id)`.
/// Any attempt to call this instruction as a standalone top-level transaction
/// will be rejected because `caller_program_id` will be `None`.
InvariantCheck { min_vault_balance: u128 },
}
fn main() {
let (
ProgramInput {
self_program_id,
caller_program_id,
pre_states,
instruction,
},
instruction_words,
) = read_nssa_inputs::<FlashSwapInstruction>();
match instruction {
FlashSwapInstruction::Initiate {
token_program_id,
callback_program_id,
amount_out,
callback_instruction_data,
} => {
let Ok([vault_pre, receiver_pre]) = <[_; 2]>::try_from(pre_states) else {
panic!("Initiate requires exactly 2 accounts: vault, receiver");
};
// Capture initial vault balance, the invariant check will verify it is restored.
let min_vault_balance = vault_pre.account.balance;
// Compute intermediate account states from pre_states and amount_out.
let mut vault_after_transfer = vault_pre.clone();
vault_after_transfer.account.balance = vault_pre
.account
.balance
.checked_sub(amount_out)
.expect("vault has insufficient balance for flash swap");
let mut receiver_after_transfer = receiver_pre.clone();
receiver_after_transfer.account.balance = receiver_pre
.account
.balance
.checked_add(amount_out)
.expect("receiver balance overflow");
let mut vault_after_callback = vault_after_transfer.clone();
vault_after_callback.account.balance = vault_after_transfer
.account
.balance
.checked_add(amount_out)
.expect("vault balance overflow after callback");
// Chained call 1: Token transfer (vault → receiver).
// The vault is a PDA of this initiator program (seed = [0_u8; 32]), so we provide
// the PDA seed to authorize the token program to debit the vault on our behalf.
// Mark the vault as authorized since it will be PDA-authorized in this chained call.
let mut vault_authorized = vault_pre.clone();
vault_authorized.is_authorized = true;
let transfer_instruction =
risc0_zkvm::serde::to_vec(&amount_out).expect("transfer instruction serialization");
let call_1 = ChainedCall {
program_id: token_program_id,
pre_states: vec![vault_authorized, receiver_pre.clone()],
instruction_data: transfer_instruction,
pda_seeds: vec![PdaSeed::new([0_u8; 32])],
};
// Chained call 2: User callback.
// Receives the post-transfer states as its pre_states. The callback may run
// arbitrary logic (arbitrage, etc.) and is expected to return funds to the vault.
let call_2 = ChainedCall {
program_id: callback_program_id,
pre_states: vec![vault_after_transfer, receiver_after_transfer],
instruction_data: callback_instruction_data,
pda_seeds: vec![],
};
// Chained call 3: Self-call to enforce the invariant.
// Uses `self_program_id` to reference this program, the key feature that enables
// the "prep → callback → assert" pattern without a separate checker program.
// If the callback did not return funds, vault_after_callback.balance <
// min_vault_balance and this call will panic, rolling back the entire
// transaction.
let invariant_instruction =
risc0_zkvm::serde::to_vec(&FlashSwapInstruction::InvariantCheck {
min_vault_balance,
})
.expect("invariant instruction serialization");
let call_3 = ChainedCall {
program_id: self_program_id, // self-referential chained call
pre_states: vec![vault_after_callback],
instruction_data: invariant_instruction,
pda_seeds: vec![],
};
// The initiator itself makes no direct state changes.
// All mutations happen inside the chained calls (token transfers).
ProgramOutput::new(
self_program_id,
caller_program_id,
instruction_words,
vec![vault_pre.clone(), receiver_pre.clone()],
vec![
AccountPostState::new(vault_pre.account),
AccountPostState::new(receiver_pre.account),
],
)
.with_chained_calls(vec![call_1, call_2, call_3])
.write();
}
FlashSwapInstruction::InvariantCheck { min_vault_balance } => {
// Visibility enforcement: `InvariantCheck` is an internal instruction.
// It must only be called as a chained call from this program itself (via `Initiate`).
// When called as a top-level transaction, `caller_program_id` is `None` → panics.
// When called as a chained call from `Initiate`, `caller_program_id` is
// `Some(self_program_id)` → passes.
assert_eq!(
caller_program_id,
Some(self_program_id),
"InvariantCheck is an internal instruction: must be called by flash_swap_initiator \
via a chained call",
);
let Ok([vault]) = <[_; 1]>::try_from(pre_states) else {
panic!("InvariantCheck requires exactly 1 account: vault");
};
// The core invariant: vault balance must not have decreased.
// If the callback returned funds, this passes. If not, this panics and
// the entire transaction (including the prior token transfer) rolls back.
assert!(
vault.account.balance >= min_vault_balance,
"Flash swap invariant violated: vault balance {} < minimum {}",
vault.account.balance,
min_vault_balance
);
// Pass-through: no state changes in the invariant check step.
ProgramOutput::new(
self_program_id,
caller_program_id,
instruction_words,
vec![vault.clone()],
vec![AccountPostState::new(vault.account)],
)
.write();
}
}
}

View File

@ -15,6 +15,7 @@ fn main() {
let (
ProgramInput {
self_program_id,
caller_program_id,
pre_states,
instruction: (balance, transfer_program_id),
},
@ -42,6 +43,7 @@ fn main() {
ProgramOutput::new(
self_program_id,
caller_program_id,
instruction_words,
vec![sender.clone(), receiver.clone()],
vec![

View File

@ -0,0 +1,34 @@
use nssa_core::program::{
AccountPostState, DEFAULT_PROGRAM_ID, ProgramInput, ProgramOutput, read_nssa_inputs,
};
type Instruction = ();
fn main() {
let (
ProgramInput {
self_program_id,
caller_program_id: _, // ignore the actual caller
pre_states,
instruction: (),
},
instruction_words,
) = read_nssa_inputs::<Instruction>();
let post_states = pre_states
.iter()
.map(|a| AccountPostState::new(a.account.clone()))
.collect();
// Deliberately output wrong caller_program_id.
// A real caller_program_id is None for a top-level call, so we spoof Some(DEFAULT_PROGRAM_ID)
// to simulate a program claiming it was invoked by another program when it was not.
ProgramOutput::new(
self_program_id,
Some(DEFAULT_PROGRAM_ID), // WRONG: should be None for a top-level call
instruction_words,
pre_states,
post_states,
)
.write();
}

View File

@ -0,0 +1,32 @@
use nssa_core::program::{
AccountPostState, DEFAULT_PROGRAM_ID, ProgramInput, ProgramOutput, read_nssa_inputs,
};
type Instruction = ();
fn main() {
let (
ProgramInput {
self_program_id: _, // ignore the correct ID
caller_program_id,
pre_states,
instruction: (),
},
instruction_words,
) = read_nssa_inputs::<Instruction>();
let post_states = pre_states
.iter()
.map(|a| AccountPostState::new(a.account.clone()))
.collect();
// Deliberately output wrong self_program_id
ProgramOutput::new(
DEFAULT_PROGRAM_ID, // WRONG: should be self_program_id
caller_program_id,
instruction_words,
pre_states,
post_states,
)
.write();
}

View File

@ -6,6 +6,7 @@ fn main() {
let (
ProgramInput {
self_program_id,
caller_program_id,
pre_states,
..
},
@ -25,6 +26,7 @@ fn main() {
ProgramOutput::new(
self_program_id,
caller_program_id,
instruction_words,
vec![pre],
vec![AccountPostState::new(account_post)],

View File

@ -6,6 +6,7 @@ fn main() {
let (
ProgramInput {
self_program_id,
caller_program_id,
pre_states,
..
},
@ -20,6 +21,7 @@ fn main() {
ProgramOutput::new(
self_program_id,
caller_program_id,
instruction_words,
vec![pre1, pre2],
vec![AccountPostState::new(account_pre1)],

View File

@ -65,6 +65,7 @@ fn main() {
let (
ProgramInput {
self_program_id,
caller_program_id,
pre_states,
instruction: balance_to_move,
},
@ -81,5 +82,12 @@ fn main() {
}
_ => panic!("invalid params"),
};
ProgramOutput::new(self_program_id, instruction_data, pre_states, post_states).write();
ProgramOutput::new(
self_program_id,
caller_program_id,
instruction_data,
pre_states,
post_states,
)
.write();
}

View File

@ -6,6 +6,7 @@ fn main() {
let (
ProgramInput {
self_program_id,
caller_program_id,
pre_states,
..
},
@ -22,6 +23,7 @@ fn main() {
ProgramOutput::new(
self_program_id,
caller_program_id,
instruction_words,
vec![pre],
vec![AccountPostState::new(account_post)],

Some files were not shown because too many files have changed in this diff Show More