add state to rocksdb

This commit is contained in:
Sergio Chouhy 2026-01-27 01:20:17 -03:00
parent e897fd6076
commit fe4a89191c
10 changed files with 125 additions and 73 deletions

1
Cargo.lock generated
View File

@ -5455,6 +5455,7 @@ version = "0.1.0"
dependencies = [
"borsh",
"common",
"nssa",
"rocksdb",
"thiserror 2.0.17",
]

View File

@ -5,7 +5,7 @@ use serde::{Deserialize, Serialize};
use crate::{NullifierPublicKey, account::Account};
#[derive(Serialize, Deserialize, BorshSerialize, BorshDeserialize)]
#[cfg_attr(any(feature = "host", test), derive(Debug, Clone, PartialEq, Eq, Hash))]
#[cfg_attr(any(feature = "host", test), derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord))]
pub struct Commitment(pub(super) [u8; 32]);
/// A commitment to all zero data.

View File

@ -42,7 +42,7 @@ impl From<&NullifierSecretKey> for NullifierPublicKey {
pub type NullifierSecretKey = [u8; 32];
#[derive(Serialize, Deserialize, BorshSerialize, BorshDeserialize)]
#[cfg_attr(any(feature = "host", test), derive(Debug, Clone, PartialEq, Eq, Hash))]
#[cfg_attr(any(feature = "host", test), derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash))]
pub struct Nullifier(pub(super) [u8; 32]);
impl Nullifier {

View File

@ -1,3 +1,4 @@
use borsh::{BorshDeserialize, BorshSerialize};
use sha2::{Digest, Sha256};
mod default_values;
@ -20,6 +21,7 @@ fn hash_value(value: &Value) -> Node {
}
#[cfg_attr(test, derive(Debug, PartialEq, Eq))]
#[derive(BorshSerialize, BorshDeserialize)]
pub struct MerkleTree {
nodes: Vec<Node>,
capacity: usize,

View File

@ -1,3 +1,4 @@
use borsh::{BorshDeserialize, BorshSerialize};
use nssa_core::{
account::AccountWithMetadata,
program::{InstructionData, ProgramId, ProgramOutput},
@ -14,7 +15,7 @@ use crate::{
/// TODO: Make this variable when fees are implemented
const MAX_NUM_CYCLES_PUBLIC_EXECUTION: u64 = 1024 * 1024 * 32; // 32M cycles
#[derive(Clone, Debug, PartialEq, Eq)]
#[derive(Clone, Debug, PartialEq, Eq, BorshSerialize, BorshDeserialize)]
pub struct Program {
id: ProgramId,
elf: Vec<u8>,

View File

@ -1,4 +1,4 @@
use std::collections::{HashMap, HashSet};
use std::collections::{BTreeSet, HashMap, HashSet};
use nssa_core::{
Commitment, CommitmentSetDigest, DUMMY_COMMITMENT, MembershipProof, Nullifier,
@ -15,6 +15,7 @@ use crate::{
pub const MAX_NUMBER_CHAINED_CALLS: usize = 10;
#[derive(BorshSerialize, BorshDeserialize)]
pub(crate) struct CommitmentSet {
merkle_tree: MerkleTree,
commitments: HashMap<Commitment, usize>,
@ -60,8 +61,49 @@ impl CommitmentSet {
}
}
type NullifierSet = HashSet<Nullifier>;
struct NullifierSet(BTreeSet<Nullifier>);
impl NullifierSet {
fn new() -> Self {
Self(BTreeSet::new())
}
fn extend(&mut self, new_nullifiers: Vec<Nullifier>) {
self.0.extend(new_nullifiers.into_iter());
}
fn contains(&self, nullifier: &Nullifier) -> bool {
self.0.contains(nullifier)
}
}
impl BorshSerialize for NullifierSet {
fn serialize<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {
self.0.iter().collect::<Vec<_>>().serialize(writer)
}
}
impl BorshDeserialize for NullifierSet {
fn deserialize_reader<R: std::io::Read>(reader: &mut R) -> std::io::Result<Self> {
let vec = Vec::<Nullifier>::deserialize_reader(reader)?;
let mut set = BTreeSet::new();
for n in vec {
if !set.insert(n) {
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidData,
"duplicate nullifier in NullifierSet",
));
}
}
Ok(Self(set))
}
}
use borsh::{BorshDeserialize, BorshSerialize};
#[derive(BorshSerialize, BorshDeserialize)]
pub struct V02State {
public_state: HashMap<AccountId, Account>,
private_state: (CommitmentSet, NullifierSet),

View File

@ -6,6 +6,7 @@ use common::{
block::{Block, BlockHash},
transaction::EncodedTransaction,
};
use nssa::V02State;
use storage::RocksDBIO;
pub struct SequencerBlockStore {
@ -53,13 +54,6 @@ impl SequencerBlockStore {
Ok(self.dbio.get_block(id)?)
}
pub fn put_block_at_id(&mut self, block: Block) -> Result<()> {
let new_transactions_map = block_to_transactions_map(&block);
self.dbio.put_block(block, false)?;
self.tx_hash_to_block_map.extend(new_transactions_map);
Ok(())
}
pub fn delete_block_at_id(&mut self, block_id: u64) -> Result<()> {
Ok(self.dbio.delete_block(block_id)?)
}
@ -93,6 +87,10 @@ impl SequencerBlockStore {
pub(crate) fn get_pending_blocks(&self) -> impl Iterator<Item = Result<Block>> {
self.dbio.get_all_blocks().map(|res| Ok(res?))
}
pub(crate) fn update(&self, block: Block, state: &V02State) -> Result<()> {
Ok(self.dbio.atomic_update(block, state)?)
}
}
pub(crate) fn block_to_transactions_map(block: &Block) -> HashMap<HashType, u64> {
@ -140,7 +138,8 @@ mod tests {
let retrieved_tx = node_store.get_transaction_by_hash(tx.hash());
assert_eq!(None, retrieved_tx);
// Add the block with the transaction
node_store.put_block_at_id(block).unwrap();
let dummy_state = V02State::new_with_genesis_accounts(&[], &[]);
node_store.update(block, &dummy_state).unwrap();
// Try again
let retrieved_tx = node_store.get_transaction_by_hash(tx.hash());
assert_eq!(Some(tx), retrieved_tx);

View File

@ -210,7 +210,7 @@ impl SequencerCore {
.clone()
.into_pending_block(self.block_store.signing_key(), bedrock_parent_id);
self.block_store.put_block_at_id(block)?;
self.block_store.update(block, &self.state)?;
self.chain_height = new_block_height;

View File

@ -9,3 +9,4 @@ common.workspace = true
thiserror.workspace = true
borsh.workspace = true
rocksdb.workspace = true
nssa.workspace = true

View File

@ -1,9 +1,10 @@
use std::{path::Path, sync::Arc};
use common::block::{Block, BlockHash};
use common::block::Block;
use error::DbError;
use nssa::V02State;
use rocksdb::{
BoundColumnFamily, ColumnFamilyDescriptor, DBWithThreadMode, MultiThreaded, Options,
BoundColumnFamily, ColumnFamilyDescriptor, DBWithThreadMode, MultiThreaded, Options, WriteBatch,
};
pub mod error;
@ -29,15 +30,15 @@ pub const DB_META_FIRST_BLOCK_SET_KEY: &str = "first_block_set";
/// Key base for storing metainformation about the last finalized block on Bedrock
pub const DB_META_LAST_FINALIZED_BLOCK_ID: &str = "last_finalized_block_id";
/// Key base for storing snapshot which describe block id
pub const DB_SNAPSHOT_BLOCK_ID_KEY: &str = "block_id";
/// Key base for storing the NSSA state
pub const DB_NSSA_STATE_KEY: &str = "nssa_state";
/// Name of block column family
pub const CF_BLOCK_NAME: &str = "cf_block";
/// Name of meta column family
pub const CF_META_NAME: &str = "cf_meta";
/// Name of snapshot column family
pub const CF_SNAPSHOT_NAME: &str = "cf_snapshot";
/// Name of state column family
pub const CF_NSSA_STATE_NAME: &str = "cf_state";
pub type DbResult<T> = Result<T, DbError>;
@ -52,7 +53,7 @@ impl RocksDBIO {
// ToDo: Add more column families for different data
let cfb = ColumnFamilyDescriptor::new(CF_BLOCK_NAME, cf_opts.clone());
let cfmeta = ColumnFamilyDescriptor::new(CF_META_NAME, cf_opts.clone());
let cfsnapshot = ColumnFamilyDescriptor::new(CF_SNAPSHOT_NAME, cf_opts.clone());
let cfstate = ColumnFamilyDescriptor::new(CF_NSSA_STATE_NAME, cf_opts.clone());
let mut db_opts = Options::default();
db_opts.create_missing_column_families(true);
@ -60,7 +61,7 @@ impl RocksDBIO {
let db = DBWithThreadMode::<MultiThreaded>::open_cf_descriptors(
&db_opts,
path,
vec![cfb, cfmeta, cfsnapshot],
vec![cfb, cfmeta, cfstate],
);
let dbio = Self {
@ -92,7 +93,7 @@ impl RocksDBIO {
// ToDo: Add more column families for different data
let _cfb = ColumnFamilyDescriptor::new(CF_BLOCK_NAME, cf_opts.clone());
let _cfmeta = ColumnFamilyDescriptor::new(CF_META_NAME, cf_opts.clone());
let _cfsnapshot = ColumnFamilyDescriptor::new(CF_SNAPSHOT_NAME, cf_opts.clone());
let _cfstate = ColumnFamilyDescriptor::new(CF_NSSA_STATE_NAME, cf_opts.clone());
let mut db_opts = Options::default();
db_opts.create_missing_column_families(true);
@ -109,8 +110,8 @@ impl RocksDBIO {
self.db.cf_handle(CF_BLOCK_NAME).unwrap()
}
pub fn snapshot_column(&self) -> Arc<BoundColumnFamily<'_>> {
self.db.cf_handle(CF_SNAPSHOT_NAME).unwrap()
pub fn nssa_state_column(&self) -> Arc<BoundColumnFamily<'_>> {
self.db.cf_handle(CF_NSSA_STATE_NAME).unwrap()
}
pub fn get_meta_first_block_in_db(&self) -> DbResult<u64> {
@ -189,6 +190,27 @@ impl RocksDBIO {
Ok(res.is_some())
}
pub fn put_nssa_state_in_db(&self, state: &V02State, batch: &mut WriteBatch) -> DbResult<()> {
let cf_state = self.nssa_state_column();
batch.put_cf(
&cf_state,
borsh::to_vec(&DB_NSSA_STATE_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_NSSA_STATE_KEY".to_string()),
)
})?,
borsh::to_vec(state).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize first block id".to_string()),
)
})?,
);
Ok(())
}
pub fn put_meta_first_block_in_db(&self, block: Block) -> DbResult<()> {
let cf_meta = self.meta_column();
self.db
@ -209,7 +231,15 @@ impl RocksDBIO {
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
self.put_block(block, true)?;
let mut batch = WriteBatch::default();
self.put_block(block, true, &mut batch)?;
self.db.write(batch).map_err(|rerr| {
DbError::rocksdb_cast_message(
rerr,
Some("Failed to write first block in db".to_string()),
)
});
Ok(())
}
@ -274,7 +304,7 @@ impl RocksDBIO {
Ok(())
}
pub fn put_block(&self, block: Block, first: bool) -> DbResult<()> {
pub fn put_block(&self, block: Block, first: bool, batch: &mut WriteBatch) -> DbResult<()> {
let cf_block = self.block_column();
if !first {
@ -285,23 +315,15 @@ impl RocksDBIO {
}
}
self.db
.put_cf(
&cf_block,
borsh::to_vec(&block.header.block_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize block id".to_string()),
)
})?,
borsh::to_vec(&block).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize block data".to_string()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
batch.put_cf(
&cf_block,
borsh::to_vec(&block.header.block_id).map_err(|err| {
DbError::borsh_cast_message(err, Some("Failed to serialize block id".to_string()))
})?,
borsh::to_vec(&block).map_err(|err| {
DbError::borsh_cast_message(err, Some("Failed to serialize block data".to_string()))
})?,
);
Ok(())
}
@ -334,35 +356,6 @@ impl RocksDBIO {
}
}
pub fn get_snapshot_block_id(&self) -> DbResult<u64> {
let cf_snapshot = self.snapshot_column();
let res = self
.db
.get_cf(
&cf_snapshot,
borsh::to_vec(&DB_SNAPSHOT_BLOCK_ID_KEY).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_SNAPSHOT_BLOCK_ID_KEY".to_string()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(borsh::from_slice::<u64>(&data).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to deserialize last block".to_string()),
)
})?)
} else {
Err(DbError::db_interaction_error(
"Snapshot block ID not found".to_string(),
))
}
}
pub fn delete_block(&self, block_id: u64) -> DbResult<()> {
let cf_block = self.block_column();
let key = borsh::to_vec(&block_id).map_err(|err| {
@ -407,4 +400,17 @@ impl RocksDBIO {
})
})
}
pub fn atomic_update(&self, block: Block, state: &V02State) -> DbResult<()> {
let block_id = block.header.block_id;
let mut batch = WriteBatch::default();
self.put_block(block, false, &mut batch)?;
self.put_nssa_state_in_db(state, &mut batch)?;
self.db.write(batch).map_err(|rerr| {
DbError::rocksdb_cast_message(
rerr,
Some(format!("Failed to udpate db with block {block_id}")),
)
})
}
}