mirror of
https://github.com/logos-blockchain/lssa.git
synced 2026-01-04 06:13:10 +00:00
Merge pull request #53 from vacp2p/Pravdyvy/db-sc-public-data-storage
Smart contract public state storage in DB
This commit is contained in:
commit
df57293ebb
744
Cargo.lock
generated
744
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@ -7,6 +7,7 @@ use merkle_tree_public::TreeHashType;
|
|||||||
use rocksdb::{
|
use rocksdb::{
|
||||||
BoundColumnFamily, ColumnFamilyDescriptor, DBWithThreadMode, MultiThreaded, Options,
|
BoundColumnFamily, ColumnFamilyDescriptor, DBWithThreadMode, MultiThreaded, Options,
|
||||||
};
|
};
|
||||||
|
use sc_db_utils::{produce_blob_from_fit_vec, DataBlob, DataBlobChangeVariant};
|
||||||
|
|
||||||
pub mod block;
|
pub mod block;
|
||||||
pub mod commitment;
|
pub mod commitment;
|
||||||
@ -15,6 +16,7 @@ pub mod error;
|
|||||||
pub mod merkle_tree_public;
|
pub mod merkle_tree_public;
|
||||||
pub mod nullifier;
|
pub mod nullifier;
|
||||||
pub mod nullifier_sparse_merkle_tree;
|
pub mod nullifier_sparse_merkle_tree;
|
||||||
|
pub mod sc_db_utils;
|
||||||
pub mod transaction;
|
pub mod transaction;
|
||||||
pub mod utxo_commitment;
|
pub mod utxo_commitment;
|
||||||
|
|
||||||
@ -33,17 +35,27 @@ pub const BUFF_SIZE_ROCKSDB: usize = usize::MAX;
|
|||||||
///Keeping small to not run out of memory
|
///Keeping small to not run out of memory
|
||||||
pub const CACHE_SIZE: usize = 1000;
|
pub const CACHE_SIZE: usize = 1000;
|
||||||
|
|
||||||
|
///Size in bytes of a singular smart contract data blob, stored in db
|
||||||
|
pub const SC_DATA_BLOB_SIZE: usize = 256;
|
||||||
|
|
||||||
///Key base for storing metainformation about id of first block in db
|
///Key base for storing metainformation about id of first block in db
|
||||||
pub const DB_META_FIRST_BLOCK_IN_DB_KEY: &str = "first_block_in_db";
|
pub const DB_META_FIRST_BLOCK_IN_DB_KEY: &str = "first_block_in_db";
|
||||||
///Key base for storing metainformation about id of last current block in db
|
///Key base for storing metainformation about id of last current block in db
|
||||||
pub const DB_META_LAST_BLOCK_IN_DB_KEY: &str = "last_block_in_db";
|
pub const DB_META_LAST_BLOCK_IN_DB_KEY: &str = "last_block_in_db";
|
||||||
///Key base for storing metainformation which describe if first block has been set
|
///Key base for storing metainformation which describe if first block has been set
|
||||||
pub const DB_META_FIRST_BLOCK_SET_KEY: &str = "first_block_set";
|
pub const DB_META_FIRST_BLOCK_SET_KEY: &str = "first_block_set";
|
||||||
|
///Key to list of all known smart contract addresses
|
||||||
|
pub const DB_META_SC_LIST: &str = "sc_list";
|
||||||
|
|
||||||
///Name of block column family
|
///Name of block column family
|
||||||
pub const CF_BLOCK_NAME: &str = "cf_block";
|
pub const CF_BLOCK_NAME: &str = "cf_block";
|
||||||
///Name of meta column family
|
///Name of meta column family
|
||||||
pub const CF_META_NAME: &str = "cf_meta";
|
pub const CF_META_NAME: &str = "cf_meta";
|
||||||
|
///Name of smart contract column family
|
||||||
|
pub const CF_SC_NAME: &str = "cf_sc";
|
||||||
|
|
||||||
|
///Suffix, used to mark field, which contain length of smart contract
|
||||||
|
pub const SC_LEN_SUFFIX: &str = "sc_len";
|
||||||
|
|
||||||
pub type DbResult<T> = Result<T, DbError>;
|
pub type DbResult<T> = Result<T, DbError>;
|
||||||
|
|
||||||
@ -58,6 +70,7 @@ impl RocksDBIO {
|
|||||||
//ToDo: Add more column families for different data
|
//ToDo: Add more column families for different data
|
||||||
let cfb = ColumnFamilyDescriptor::new(CF_BLOCK_NAME, cf_opts.clone());
|
let cfb = ColumnFamilyDescriptor::new(CF_BLOCK_NAME, cf_opts.clone());
|
||||||
let cfmeta = ColumnFamilyDescriptor::new(CF_META_NAME, cf_opts.clone());
|
let cfmeta = ColumnFamilyDescriptor::new(CF_META_NAME, cf_opts.clone());
|
||||||
|
let cfsc = ColumnFamilyDescriptor::new(CF_SC_NAME, cf_opts.clone());
|
||||||
|
|
||||||
let mut db_opts = Options::default();
|
let mut db_opts = Options::default();
|
||||||
db_opts.create_missing_column_families(true);
|
db_opts.create_missing_column_families(true);
|
||||||
@ -65,7 +78,7 @@ impl RocksDBIO {
|
|||||||
let db = DBWithThreadMode::<MultiThreaded>::open_cf_descriptors(
|
let db = DBWithThreadMode::<MultiThreaded>::open_cf_descriptors(
|
||||||
&db_opts,
|
&db_opts,
|
||||||
path,
|
path,
|
||||||
vec![cfb, cfmeta],
|
vec![cfb, cfmeta, cfsc],
|
||||||
);
|
);
|
||||||
|
|
||||||
let dbio = Self {
|
let dbio = Self {
|
||||||
@ -84,6 +97,8 @@ impl RocksDBIO {
|
|||||||
|
|
||||||
dbio.put_meta_last_block_in_db(block_id)?;
|
dbio.put_meta_last_block_in_db(block_id)?;
|
||||||
|
|
||||||
|
dbio.put_meta_sc_list(vec![])?;
|
||||||
|
|
||||||
Ok(dbio)
|
Ok(dbio)
|
||||||
} else {
|
} else {
|
||||||
warn!("Starting db in unset mode, will have to set starting block manually");
|
warn!("Starting db in unset mode, will have to set starting block manually");
|
||||||
@ -114,6 +129,10 @@ impl RocksDBIO {
|
|||||||
self.db.cf_handle(CF_BLOCK_NAME).unwrap()
|
self.db.cf_handle(CF_BLOCK_NAME).unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn sc_column(&self) -> Arc<BoundColumnFamily> {
|
||||||
|
self.db.cf_handle(CF_SC_NAME).unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
pub fn get_meta_first_block_in_db(&self) -> DbResult<u64> {
|
pub fn get_meta_first_block_in_db(&self) -> DbResult<u64> {
|
||||||
let cf_meta = self.meta_column();
|
let cf_meta = self.meta_column();
|
||||||
let res = self
|
let res = self
|
||||||
@ -182,6 +201,19 @@ impl RocksDBIO {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
///Setting list of known smart contracts in a DB as a `sc_list`
|
||||||
|
pub fn put_meta_sc_list(&self, sc_list: Vec<String>) -> DbResult<()> {
|
||||||
|
let cf_meta = self.meta_column();
|
||||||
|
self.db
|
||||||
|
.put_cf(
|
||||||
|
&cf_meta,
|
||||||
|
DB_META_SC_LIST.as_bytes(),
|
||||||
|
serde_json::to_vec(&sc_list).unwrap(),
|
||||||
|
)
|
||||||
|
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
pub fn put_meta_is_first_block_set(&self) -> DbResult<()> {
|
pub fn put_meta_is_first_block_set(&self) -> DbResult<()> {
|
||||||
let cf_meta = self.meta_column();
|
let cf_meta = self.meta_column();
|
||||||
self.db
|
self.db
|
||||||
@ -233,4 +265,146 @@ impl RocksDBIO {
|
|||||||
))
|
))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
///Getting list of known smart contracts in a DB
|
||||||
|
pub fn get_meta_sc_list(&self) -> DbResult<Vec<String>> {
|
||||||
|
let cf_meta = self.meta_column();
|
||||||
|
let sc_list = self
|
||||||
|
.db
|
||||||
|
.get_cf(&cf_meta, DB_META_SC_LIST)
|
||||||
|
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
|
||||||
|
if let Some(data) = sc_list {
|
||||||
|
Ok(
|
||||||
|
serde_json::from_slice::<Vec<String>>(&data).map_err(|serr| {
|
||||||
|
DbError::serde_cast_message(
|
||||||
|
serr,
|
||||||
|
Some("List of Sc Deserialization failed".to_string()),
|
||||||
|
)
|
||||||
|
})?,
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
Err(DbError::db_interaction_error(
|
||||||
|
"Sc list not found".to_string(),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
///Push additional contract into list of known contracts in a DB
|
||||||
|
pub fn put_meta_sc(&self, sc_addr: String) -> DbResult<()> {
|
||||||
|
let mut sc_list = self.get_meta_sc_list()?;
|
||||||
|
sc_list.push(sc_addr);
|
||||||
|
self.put_meta_sc_list(sc_list)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
///Put/Modify sc state in db
|
||||||
|
pub fn put_sc_sc_state(
|
||||||
|
&self,
|
||||||
|
sc_addr: &str,
|
||||||
|
length: usize,
|
||||||
|
modifications: Vec<DataBlobChangeVariant>,
|
||||||
|
) -> DbResult<()> {
|
||||||
|
self.put_meta_sc(sc_addr.to_string())?;
|
||||||
|
|
||||||
|
let cf_sc = self.sc_column();
|
||||||
|
|
||||||
|
let sc_addr_loc = format!("{sc_addr:?}{SC_LEN_SUFFIX}");
|
||||||
|
let sc_len_addr = sc_addr_loc.as_str().as_bytes();
|
||||||
|
|
||||||
|
self.db
|
||||||
|
.put_cf(&cf_sc, sc_len_addr, length.to_be_bytes())
|
||||||
|
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
|
||||||
|
|
||||||
|
for data_change in modifications {
|
||||||
|
match data_change {
|
||||||
|
DataBlobChangeVariant::Created { id, blob } => {
|
||||||
|
let blob_addr = produce_address_for_data_blob_at_id(sc_addr, id);
|
||||||
|
|
||||||
|
self.db
|
||||||
|
.put_cf(&cf_sc, blob_addr, blob)
|
||||||
|
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
|
||||||
|
}
|
||||||
|
DataBlobChangeVariant::Modified {
|
||||||
|
id,
|
||||||
|
blob_old: _,
|
||||||
|
blob_new,
|
||||||
|
} => {
|
||||||
|
let blob_addr = produce_address_for_data_blob_at_id(sc_addr, id);
|
||||||
|
|
||||||
|
self.db
|
||||||
|
.put_cf(&cf_sc, blob_addr, blob_new)
|
||||||
|
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
|
||||||
|
}
|
||||||
|
DataBlobChangeVariant::Deleted { id } => {
|
||||||
|
let blob_addr = produce_address_for_data_blob_at_id(sc_addr, id);
|
||||||
|
|
||||||
|
self.db
|
||||||
|
.delete_cf(&cf_sc, blob_addr)
|
||||||
|
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
///Get sc state length in blobs from DB
|
||||||
|
pub fn get_sc_sc_state_len(&self, sc_addr: &str) -> DbResult<usize> {
|
||||||
|
let cf_sc = self.sc_column();
|
||||||
|
let sc_addr_loc = format!("{sc_addr:?}{SC_LEN_SUFFIX}");
|
||||||
|
|
||||||
|
let sc_len_addr = sc_addr_loc.as_str().as_bytes();
|
||||||
|
|
||||||
|
let sc_len = self
|
||||||
|
.db
|
||||||
|
.get_cf(&cf_sc, sc_len_addr)
|
||||||
|
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
|
||||||
|
|
||||||
|
if let Some(sc_len) = sc_len {
|
||||||
|
Ok(usize::from_be_bytes(sc_len.as_slice().try_into().unwrap()))
|
||||||
|
} else {
|
||||||
|
Err(DbError::db_interaction_error(format!(
|
||||||
|
"Sc len for {sc_addr:?} not found"
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
///Get full sc state from DB
|
||||||
|
pub fn get_sc_sc_state(&self, sc_addr: &str) -> DbResult<Vec<DataBlob>> {
|
||||||
|
let cf_sc = self.sc_column();
|
||||||
|
let sc_len = self.get_sc_sc_state_len(&sc_addr)?;
|
||||||
|
let mut data_blob_list = vec![];
|
||||||
|
|
||||||
|
for id in 0..sc_len {
|
||||||
|
let blob_addr = produce_address_for_data_blob_at_id(&sc_addr, id);
|
||||||
|
|
||||||
|
let blob = self
|
||||||
|
.db
|
||||||
|
.get_cf(&cf_sc, blob_addr)
|
||||||
|
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
|
||||||
|
|
||||||
|
if let Some(blob_data) = blob {
|
||||||
|
data_blob_list.push(produce_blob_from_fit_vec(blob_data));
|
||||||
|
} else {
|
||||||
|
return Err(DbError::db_interaction_error(format!(
|
||||||
|
"Blob for {sc_addr:?} at id {id} not found"
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(data_blob_list)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
///Creates address for sc data blob at corresponding id
|
||||||
|
fn produce_address_for_data_blob_at_id(sc_addr: &str, id: usize) -> Vec<u8> {
|
||||||
|
let mut prefix_bytes: Vec<u8> = sc_addr.as_bytes().iter().cloned().collect();
|
||||||
|
|
||||||
|
let id_bytes = id.to_be_bytes();
|
||||||
|
|
||||||
|
for byte in id_bytes {
|
||||||
|
prefix_bytes.push(byte);
|
||||||
|
}
|
||||||
|
|
||||||
|
prefix_bytes
|
||||||
}
|
}
|
||||||
|
|||||||
118
storage/src/sc_db_utils.rs
Normal file
118
storage/src/sc_db_utils.rs
Normal file
@ -0,0 +1,118 @@
|
|||||||
|
use serde::Serialize;
|
||||||
|
|
||||||
|
use crate::SC_DATA_BLOB_SIZE;
|
||||||
|
|
||||||
|
pub type DataBlob = [u8; SC_DATA_BLOB_SIZE];
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Copy)]
|
||||||
|
pub enum DataBlobChangeVariant {
|
||||||
|
Created {
|
||||||
|
id: usize,
|
||||||
|
blob: DataBlob,
|
||||||
|
},
|
||||||
|
Modified {
|
||||||
|
id: usize,
|
||||||
|
blob_old: DataBlob,
|
||||||
|
blob_new: DataBlob,
|
||||||
|
},
|
||||||
|
Deleted {
|
||||||
|
id: usize,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
///Produce `DataBlob` from vector of size <= `SC_DATA_BLOB_SIZE`
|
||||||
|
///
|
||||||
|
///Extends to `SC_DATA_BLOB_SIZE`, if necessary.
|
||||||
|
///
|
||||||
|
///Panics, if size > `SC_DATA_BLOB_SIZE`
|
||||||
|
pub fn produce_blob_from_fit_vec(data: Vec<u8>) -> DataBlob {
|
||||||
|
let data_len = data.len();
|
||||||
|
|
||||||
|
assert!(data_len <= SC_DATA_BLOB_SIZE);
|
||||||
|
let mut blob: DataBlob = [0; SC_DATA_BLOB_SIZE];
|
||||||
|
|
||||||
|
for (idx, item) in data.into_iter().enumerate() {
|
||||||
|
blob[idx] = item
|
||||||
|
}
|
||||||
|
|
||||||
|
blob
|
||||||
|
}
|
||||||
|
|
||||||
|
///Creates blob list from generic serializable state
|
||||||
|
///
|
||||||
|
///`ToDo`: Find a way to align data in a way, to minimize read and write operations in db
|
||||||
|
pub fn produce_blob_list_from_sc_public_state<S: Serialize>(
|
||||||
|
state: &S,
|
||||||
|
) -> Result<Vec<DataBlob>, serde_json::Error> {
|
||||||
|
let mut blob_list = vec![];
|
||||||
|
|
||||||
|
let ser_data = serde_json::to_vec(state)?;
|
||||||
|
|
||||||
|
//`ToDo` Replace with `next_chunk` usage, when feature stabilizes in Rust
|
||||||
|
for i in 0..(ser_data.len() / SC_DATA_BLOB_SIZE) {
|
||||||
|
let next_chunk: Vec<u8>;
|
||||||
|
|
||||||
|
if (i + 1) * SC_DATA_BLOB_SIZE < ser_data.len() {
|
||||||
|
next_chunk = ser_data[(i * SC_DATA_BLOB_SIZE)..((i + 1) * SC_DATA_BLOB_SIZE)]
|
||||||
|
.iter()
|
||||||
|
.cloned()
|
||||||
|
.collect();
|
||||||
|
} else {
|
||||||
|
next_chunk = ser_data[(i * SC_DATA_BLOB_SIZE)..(ser_data.len())]
|
||||||
|
.iter()
|
||||||
|
.cloned()
|
||||||
|
.collect();
|
||||||
|
}
|
||||||
|
|
||||||
|
blob_list.push(produce_blob_from_fit_vec(next_chunk));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(blob_list)
|
||||||
|
}
|
||||||
|
|
||||||
|
///Compare two consecutive in time blob lists to produce list of modified ids
|
||||||
|
pub fn compare_blob_lists(
|
||||||
|
blob_list_old: &[DataBlob],
|
||||||
|
blob_list_new: &[DataBlob],
|
||||||
|
) -> Vec<DataBlobChangeVariant> {
|
||||||
|
let mut changed_ids = vec![];
|
||||||
|
let mut id_end = 0;
|
||||||
|
|
||||||
|
let old_len = blob_list_old.len();
|
||||||
|
let new_len = blob_list_new.len();
|
||||||
|
|
||||||
|
if old_len > new_len {
|
||||||
|
for id in new_len..old_len {
|
||||||
|
changed_ids.push(DataBlobChangeVariant::Deleted { id });
|
||||||
|
}
|
||||||
|
} else if new_len > old_len {
|
||||||
|
for id in old_len..new_len {
|
||||||
|
changed_ids.push(DataBlobChangeVariant::Created {
|
||||||
|
id,
|
||||||
|
blob: blob_list_new[id],
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
loop {
|
||||||
|
let old_blob = blob_list_old.get(id_end);
|
||||||
|
let new_blob = blob_list_new.get(id_end);
|
||||||
|
|
||||||
|
match (old_blob, new_blob) {
|
||||||
|
(Some(old), Some(new)) => {
|
||||||
|
if old != new {
|
||||||
|
changed_ids.push(DataBlobChangeVariant::Modified {
|
||||||
|
id: id_end,
|
||||||
|
blob_old: *old,
|
||||||
|
blob_new: *new,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => break,
|
||||||
|
}
|
||||||
|
|
||||||
|
id_end += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
changed_ids
|
||||||
|
}
|
||||||
Loading…
x
Reference in New Issue
Block a user