From fe4a89191cf13da41ebe38c15bb1c255f7420904 Mon Sep 17 00:00:00 2001 From: Sergio Chouhy Date: Tue, 27 Jan 2026 01:20:17 -0300 Subject: [PATCH] add state to rocksdb --- Cargo.lock | 1 + nssa/core/src/commitment.rs | 2 +- nssa/core/src/nullifier.rs | 2 +- nssa/src/merkle_tree/mod.rs | 2 + nssa/src/program.rs | 3 +- nssa/src/state.rs | 46 ++++++++++- sequencer_core/src/block_store.rs | 15 ++-- sequencer_core/src/lib.rs | 2 +- storage/Cargo.toml | 1 + storage/src/lib.rs | 124 ++++++++++++++++-------------- 10 files changed, 125 insertions(+), 73 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c8f0b26a..f17e34f1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5455,6 +5455,7 @@ version = "0.1.0" dependencies = [ "borsh", "common", + "nssa", "rocksdb", "thiserror 2.0.17", ] diff --git a/nssa/core/src/commitment.rs b/nssa/core/src/commitment.rs index 52344177..90f3132c 100644 --- a/nssa/core/src/commitment.rs +++ b/nssa/core/src/commitment.rs @@ -5,7 +5,7 @@ use serde::{Deserialize, Serialize}; use crate::{NullifierPublicKey, account::Account}; #[derive(Serialize, Deserialize, BorshSerialize, BorshDeserialize)] -#[cfg_attr(any(feature = "host", test), derive(Debug, Clone, PartialEq, Eq, Hash))] +#[cfg_attr(any(feature = "host", test), derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord))] pub struct Commitment(pub(super) [u8; 32]); /// A commitment to all zero data. diff --git a/nssa/core/src/nullifier.rs b/nssa/core/src/nullifier.rs index 8d9d59fa..09446779 100644 --- a/nssa/core/src/nullifier.rs +++ b/nssa/core/src/nullifier.rs @@ -42,7 +42,7 @@ impl From<&NullifierSecretKey> for NullifierPublicKey { pub type NullifierSecretKey = [u8; 32]; #[derive(Serialize, Deserialize, BorshSerialize, BorshDeserialize)] -#[cfg_attr(any(feature = "host", test), derive(Debug, Clone, PartialEq, Eq, Hash))] +#[cfg_attr(any(feature = "host", test), derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash))] pub struct Nullifier(pub(super) [u8; 32]); impl Nullifier { diff --git a/nssa/src/merkle_tree/mod.rs b/nssa/src/merkle_tree/mod.rs index c4501cf8..b3637b13 100644 --- a/nssa/src/merkle_tree/mod.rs +++ b/nssa/src/merkle_tree/mod.rs @@ -1,3 +1,4 @@ +use borsh::{BorshDeserialize, BorshSerialize}; use sha2::{Digest, Sha256}; mod default_values; @@ -20,6 +21,7 @@ fn hash_value(value: &Value) -> Node { } #[cfg_attr(test, derive(Debug, PartialEq, Eq))] +#[derive(BorshSerialize, BorshDeserialize)] pub struct MerkleTree { nodes: Vec, capacity: usize, diff --git a/nssa/src/program.rs b/nssa/src/program.rs index 943b16ed..06c7ad29 100644 --- a/nssa/src/program.rs +++ b/nssa/src/program.rs @@ -1,3 +1,4 @@ +use borsh::{BorshDeserialize, BorshSerialize}; use nssa_core::{ account::AccountWithMetadata, program::{InstructionData, ProgramId, ProgramOutput}, @@ -14,7 +15,7 @@ use crate::{ /// TODO: Make this variable when fees are implemented const MAX_NUM_CYCLES_PUBLIC_EXECUTION: u64 = 1024 * 1024 * 32; // 32M cycles -#[derive(Clone, Debug, PartialEq, Eq)] +#[derive(Clone, Debug, PartialEq, Eq, BorshSerialize, BorshDeserialize)] pub struct Program { id: ProgramId, elf: Vec, diff --git a/nssa/src/state.rs b/nssa/src/state.rs index 1a384b2f..9888f583 100644 --- a/nssa/src/state.rs +++ b/nssa/src/state.rs @@ -1,4 +1,4 @@ -use std::collections::{HashMap, HashSet}; +use std::collections::{BTreeSet, HashMap, HashSet}; use nssa_core::{ Commitment, CommitmentSetDigest, DUMMY_COMMITMENT, MembershipProof, Nullifier, @@ -15,6 +15,7 @@ use crate::{ pub const MAX_NUMBER_CHAINED_CALLS: usize = 10; +#[derive(BorshSerialize, BorshDeserialize)] pub(crate) struct CommitmentSet { merkle_tree: MerkleTree, commitments: HashMap, @@ -60,8 +61,49 @@ impl CommitmentSet { } } -type NullifierSet = HashSet; +struct NullifierSet(BTreeSet); +impl NullifierSet { + fn new() -> Self { + Self(BTreeSet::new()) + } + + fn extend(&mut self, new_nullifiers: Vec) { + self.0.extend(new_nullifiers.into_iter()); + } + + fn contains(&self, nullifier: &Nullifier) -> bool { + self.0.contains(nullifier) + } +} + +impl BorshSerialize for NullifierSet { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + self.0.iter().collect::>().serialize(writer) + } +} + +impl BorshDeserialize for NullifierSet { + fn deserialize_reader(reader: &mut R) -> std::io::Result { + let vec = Vec::::deserialize_reader(reader)?; + + let mut set = BTreeSet::new(); + for n in vec { + if !set.insert(n) { + return Err(std::io::Error::new( + std::io::ErrorKind::InvalidData, + "duplicate nullifier in NullifierSet", + )); + } + } + + Ok(Self(set)) + } +} + +use borsh::{BorshDeserialize, BorshSerialize}; + +#[derive(BorshSerialize, BorshDeserialize)] pub struct V02State { public_state: HashMap, private_state: (CommitmentSet, NullifierSet), diff --git a/sequencer_core/src/block_store.rs b/sequencer_core/src/block_store.rs index 92543be7..108538c2 100644 --- a/sequencer_core/src/block_store.rs +++ b/sequencer_core/src/block_store.rs @@ -6,6 +6,7 @@ use common::{ block::{Block, BlockHash}, transaction::EncodedTransaction, }; +use nssa::V02State; use storage::RocksDBIO; pub struct SequencerBlockStore { @@ -53,13 +54,6 @@ impl SequencerBlockStore { Ok(self.dbio.get_block(id)?) } - pub fn put_block_at_id(&mut self, block: Block) -> Result<()> { - let new_transactions_map = block_to_transactions_map(&block); - self.dbio.put_block(block, false)?; - self.tx_hash_to_block_map.extend(new_transactions_map); - Ok(()) - } - pub fn delete_block_at_id(&mut self, block_id: u64) -> Result<()> { Ok(self.dbio.delete_block(block_id)?) } @@ -93,6 +87,10 @@ impl SequencerBlockStore { pub(crate) fn get_pending_blocks(&self) -> impl Iterator> { self.dbio.get_all_blocks().map(|res| Ok(res?)) } + + pub(crate) fn update(&self, block: Block, state: &V02State) -> Result<()> { + Ok(self.dbio.atomic_update(block, state)?) + } } pub(crate) fn block_to_transactions_map(block: &Block) -> HashMap { @@ -140,7 +138,8 @@ mod tests { let retrieved_tx = node_store.get_transaction_by_hash(tx.hash()); assert_eq!(None, retrieved_tx); // Add the block with the transaction - node_store.put_block_at_id(block).unwrap(); + let dummy_state = V02State::new_with_genesis_accounts(&[], &[]); + node_store.update(block, &dummy_state).unwrap(); // Try again let retrieved_tx = node_store.get_transaction_by_hash(tx.hash()); assert_eq!(Some(tx), retrieved_tx); diff --git a/sequencer_core/src/lib.rs b/sequencer_core/src/lib.rs index 5183ac43..f161521c 100644 --- a/sequencer_core/src/lib.rs +++ b/sequencer_core/src/lib.rs @@ -210,7 +210,7 @@ impl SequencerCore { .clone() .into_pending_block(self.block_store.signing_key(), bedrock_parent_id); - self.block_store.put_block_at_id(block)?; + self.block_store.update(block, &self.state)?; self.chain_height = new_block_height; diff --git a/storage/Cargo.toml b/storage/Cargo.toml index 4678560e..98257526 100644 --- a/storage/Cargo.toml +++ b/storage/Cargo.toml @@ -9,3 +9,4 @@ common.workspace = true thiserror.workspace = true borsh.workspace = true rocksdb.workspace = true +nssa.workspace = true diff --git a/storage/src/lib.rs b/storage/src/lib.rs index 498197b6..27d1f38a 100644 --- a/storage/src/lib.rs +++ b/storage/src/lib.rs @@ -1,9 +1,10 @@ use std::{path::Path, sync::Arc}; -use common::block::{Block, BlockHash}; +use common::block::Block; use error::DbError; +use nssa::V02State; use rocksdb::{ - BoundColumnFamily, ColumnFamilyDescriptor, DBWithThreadMode, MultiThreaded, Options, + BoundColumnFamily, ColumnFamilyDescriptor, DBWithThreadMode, MultiThreaded, Options, WriteBatch, }; pub mod error; @@ -29,15 +30,15 @@ pub const DB_META_FIRST_BLOCK_SET_KEY: &str = "first_block_set"; /// Key base for storing metainformation about the last finalized block on Bedrock pub const DB_META_LAST_FINALIZED_BLOCK_ID: &str = "last_finalized_block_id"; -/// Key base for storing snapshot which describe block id -pub const DB_SNAPSHOT_BLOCK_ID_KEY: &str = "block_id"; +/// Key base for storing the NSSA state +pub const DB_NSSA_STATE_KEY: &str = "nssa_state"; /// Name of block column family pub const CF_BLOCK_NAME: &str = "cf_block"; /// Name of meta column family pub const CF_META_NAME: &str = "cf_meta"; -/// Name of snapshot column family -pub const CF_SNAPSHOT_NAME: &str = "cf_snapshot"; +/// Name of state column family +pub const CF_NSSA_STATE_NAME: &str = "cf_state"; pub type DbResult = Result; @@ -52,7 +53,7 @@ impl RocksDBIO { // ToDo: Add more column families for different data let cfb = ColumnFamilyDescriptor::new(CF_BLOCK_NAME, cf_opts.clone()); let cfmeta = ColumnFamilyDescriptor::new(CF_META_NAME, cf_opts.clone()); - let cfsnapshot = ColumnFamilyDescriptor::new(CF_SNAPSHOT_NAME, cf_opts.clone()); + let cfstate = ColumnFamilyDescriptor::new(CF_NSSA_STATE_NAME, cf_opts.clone()); let mut db_opts = Options::default(); db_opts.create_missing_column_families(true); @@ -60,7 +61,7 @@ impl RocksDBIO { let db = DBWithThreadMode::::open_cf_descriptors( &db_opts, path, - vec![cfb, cfmeta, cfsnapshot], + vec![cfb, cfmeta, cfstate], ); let dbio = Self { @@ -92,7 +93,7 @@ impl RocksDBIO { // ToDo: Add more column families for different data let _cfb = ColumnFamilyDescriptor::new(CF_BLOCK_NAME, cf_opts.clone()); let _cfmeta = ColumnFamilyDescriptor::new(CF_META_NAME, cf_opts.clone()); - let _cfsnapshot = ColumnFamilyDescriptor::new(CF_SNAPSHOT_NAME, cf_opts.clone()); + let _cfstate = ColumnFamilyDescriptor::new(CF_NSSA_STATE_NAME, cf_opts.clone()); let mut db_opts = Options::default(); db_opts.create_missing_column_families(true); @@ -109,8 +110,8 @@ impl RocksDBIO { self.db.cf_handle(CF_BLOCK_NAME).unwrap() } - pub fn snapshot_column(&self) -> Arc> { - self.db.cf_handle(CF_SNAPSHOT_NAME).unwrap() + pub fn nssa_state_column(&self) -> Arc> { + self.db.cf_handle(CF_NSSA_STATE_NAME).unwrap() } pub fn get_meta_first_block_in_db(&self) -> DbResult { @@ -189,6 +190,27 @@ impl RocksDBIO { Ok(res.is_some()) } + pub fn put_nssa_state_in_db(&self, state: &V02State, batch: &mut WriteBatch) -> DbResult<()> { + let cf_state = self.nssa_state_column(); + batch.put_cf( + &cf_state, + borsh::to_vec(&DB_NSSA_STATE_KEY).map_err(|err| { + DbError::borsh_cast_message( + err, + Some("Failed to serialize DB_NSSA_STATE_KEY".to_string()), + ) + })?, + borsh::to_vec(state).map_err(|err| { + DbError::borsh_cast_message( + err, + Some("Failed to serialize first block id".to_string()), + ) + })?, + ); + + Ok(()) + } + pub fn put_meta_first_block_in_db(&self, block: Block) -> DbResult<()> { let cf_meta = self.meta_column(); self.db @@ -209,7 +231,15 @@ impl RocksDBIO { ) .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; - self.put_block(block, true)?; + let mut batch = WriteBatch::default(); + self.put_block(block, true, &mut batch)?; + self.db.write(batch).map_err(|rerr| { + DbError::rocksdb_cast_message( + rerr, + Some("Failed to write first block in db".to_string()), + ) + }); + Ok(()) } @@ -274,7 +304,7 @@ impl RocksDBIO { Ok(()) } - pub fn put_block(&self, block: Block, first: bool) -> DbResult<()> { + pub fn put_block(&self, block: Block, first: bool, batch: &mut WriteBatch) -> DbResult<()> { let cf_block = self.block_column(); if !first { @@ -285,23 +315,15 @@ impl RocksDBIO { } } - self.db - .put_cf( - &cf_block, - borsh::to_vec(&block.header.block_id).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize block id".to_string()), - ) - })?, - borsh::to_vec(&block).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize block data".to_string()), - ) - })?, - ) - .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; + batch.put_cf( + &cf_block, + borsh::to_vec(&block.header.block_id).map_err(|err| { + DbError::borsh_cast_message(err, Some("Failed to serialize block id".to_string())) + })?, + borsh::to_vec(&block).map_err(|err| { + DbError::borsh_cast_message(err, Some("Failed to serialize block data".to_string())) + })?, + ); Ok(()) } @@ -334,35 +356,6 @@ impl RocksDBIO { } } - pub fn get_snapshot_block_id(&self) -> DbResult { - let cf_snapshot = self.snapshot_column(); - let res = self - .db - .get_cf( - &cf_snapshot, - borsh::to_vec(&DB_SNAPSHOT_BLOCK_ID_KEY).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to serialize DB_SNAPSHOT_BLOCK_ID_KEY".to_string()), - ) - })?, - ) - .map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?; - - if let Some(data) = res { - Ok(borsh::from_slice::(&data).map_err(|err| { - DbError::borsh_cast_message( - err, - Some("Failed to deserialize last block".to_string()), - ) - })?) - } else { - Err(DbError::db_interaction_error( - "Snapshot block ID not found".to_string(), - )) - } - } - pub fn delete_block(&self, block_id: u64) -> DbResult<()> { let cf_block = self.block_column(); let key = borsh::to_vec(&block_id).map_err(|err| { @@ -407,4 +400,17 @@ impl RocksDBIO { }) }) } + + pub fn atomic_update(&self, block: Block, state: &V02State) -> DbResult<()> { + let block_id = block.header.block_id; + let mut batch = WriteBatch::default(); + self.put_block(block, false, &mut batch)?; + self.put_nssa_state_in_db(state, &mut batch)?; + self.db.write(batch).map_err(|rerr| { + DbError::rocksdb_cast_message( + rerr, + Some(format!("Failed to udpate db with block {block_id}")), + ) + }) + } }