fix: code cleanup and final removals

This commit is contained in:
Oleksandr Pravdyvyi 2025-08-05 14:59:20 +03:00
parent 6565e0af18
commit 4f95cef08f
No known key found for this signature in database
GPG Key ID: 9F8955C63C443871
40 changed files with 212 additions and 1854 deletions

94
Cargo.lock generated
View File

@ -1016,6 +1016,7 @@ dependencies = [
"hex",
"k256",
"log",
"rand 0.8.5",
"reqwest 0.11.27",
"risc0-zkvm",
"rs_merkle",
@ -1026,19 +1027,6 @@ dependencies = [
"thiserror 1.0.69",
]
[[package]]
name = "consensus"
version = "0.1.0"
dependencies = [
"anyhow",
"env_logger",
"log",
"networking",
"serde",
"serde_json",
"tokio",
]
[[package]]
name = "const-oid"
version = "0.9.6"
@ -2395,8 +2383,6 @@ dependencies = [
"hex",
"log",
"node_core",
"node_rpc",
"node_runner",
"sequencer_core",
"sequencer_rpc",
"sequencer_runner",
@ -2943,17 +2929,6 @@ dependencies = [
"rayon",
]
[[package]]
name = "networking"
version = "0.1.0"
dependencies = [
"anyhow",
"env_logger",
"log",
"serde",
"serde_json",
]
[[package]]
name = "no_std_strings"
version = "0.1.3"
@ -2990,58 +2965,6 @@ dependencies = [
"zkvm",
]
[[package]]
name = "node_rpc"
version = "0.1.0"
dependencies = [
"accounts",
"actix",
"actix-cors",
"actix-web",
"anyhow",
"common",
"consensus",
"env_logger",
"futures",
"hex",
"log",
"networking",
"node_core",
"serde",
"serde_json",
"storage",
"tokio",
"utxo",
"vm",
"zkvm",
]
[[package]]
name = "node_runner"
version = "0.1.0"
dependencies = [
"accounts",
"actix",
"actix-web",
"anyhow",
"clap",
"common",
"consensus",
"env_logger",
"hex",
"log",
"networking",
"node_core",
"node_rpc",
"serde",
"serde_json",
"storage",
"tokio",
"utxo",
"vm",
"zkvm",
]
[[package]]
name = "nom"
version = "7.1.3"
@ -4429,13 +4352,11 @@ dependencies = [
"actix-web",
"anyhow",
"common",
"consensus",
"env_logger",
"futures",
"hex",
"log",
"mempool",
"networking",
"sequencer_core",
"serde",
"serde_json",
@ -4453,11 +4374,9 @@ dependencies = [
"anyhow",
"clap",
"common",
"consensus",
"env_logger",
"log",
"mempool",
"networking",
"sequencer_core",
"sequencer_rpc",
"serde",
@ -5293,17 +5212,6 @@ version = "0.9.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a"
[[package]]
name = "vm"
version = "0.1.0"
dependencies = [
"anyhow",
"env_logger",
"log",
"serde",
"serde_json",
]
[[package]]
name = "want"
version = "0.3.1"

View File

@ -1,15 +1,10 @@
[workspace]
resolver = "2"
members = [
"node_runner",
"sequencer_runner",
"storage",
"accounts",
"utxo",
"vm",
"networking",
"consensus",
"node_rpc",
"sequencer_rpc",
"mempool",
"zkvm",

View File

@ -11,6 +11,7 @@ serde.workspace = true
reqwest.workspace = true
risc0-zkvm = { git = "https://github.com/risc0/risc0.git", branch = "release-2.3" }
k256.workspace = true
rand.workspace = true
rs_merkle.workspace = true
sha2.workspace = true

View File

@ -26,19 +26,19 @@ pub struct HashableBlockData {
pub data: Data,
}
impl Block {
pub fn produce_block_from_hashable_data(hashable_data: HashableBlockData) -> Self {
let data = serde_json::to_vec(&hashable_data).unwrap();
impl From<HashableBlockData> for Block {
fn from(value: HashableBlockData) -> Self {
let data = serde_json::to_vec(&value).unwrap();
let hash = OwnHasher::hash(&data);
Self {
block_id: hashable_data.block_id,
prev_block_id: hashable_data.prev_block_id,
block_id: value.block_id,
prev_block_id: value.prev_block_id,
hash,
transactions: hashable_data.transactions,
data: hashable_data.data,
prev_block_hash: hashable_data.prev_block_hash,
transactions: value.transactions,
data: value.data,
prev_block_hash: value.prev_block_hash,
}
}
}

View File

@ -10,6 +10,9 @@ pub mod rpc_primitives;
pub mod transaction;
pub mod utxo_commitment;
//Module for tests utility functions
pub mod test_utils;
use rpc_primitives::errors::RpcError;
///Account id on blockchain

116
common/src/test_utils.rs Normal file
View File

@ -0,0 +1,116 @@
use k256::ecdsa::SigningKey;
use secp256k1_zkp::Tweak;
use crate::{
block::{Block, HashableBlockData}, execution_input::PublicNativeTokenSend, transaction::{SignaturePrivateKey, Transaction, TransactionBody, TxKind}
};
//Dummy producers
///Produce dummy block with
///
/// `id` - block id, provide zero for genesis
///
/// `prev_hash` - hash of previous block, provide None for genesis
///
/// `transactions` - vector of `Transaction` objects
///
/// `additional_data` - vector with additional data
pub fn produce_dummy_block(
id: u64,
prev_hash: Option<[u8; 32]>,
transactions: Vec<Transaction>,
additional_data: Vec<u8>,
) -> Block {
let block_data = HashableBlockData {
block_id: id,
prev_block_id: id.saturating_sub(1),
prev_block_hash: prev_hash.unwrap_or_default(),
transactions,
data: additional_data,
};
block_data.into()
}
pub fn produce_dummy_empty_transaction() -> Transaction {
let body = TransactionBody {
tx_kind: TxKind::Public,
execution_input: Default::default(),
execution_output: Default::default(),
utxo_commitments_spent_hashes: Default::default(),
utxo_commitments_created_hashes: Default::default(),
nullifier_created_hashes: Default::default(),
execution_proof_private: Default::default(),
encoded_data: Default::default(),
ephemeral_pub_key: Default::default(),
commitment: Default::default(),
tweak: Default::default(),
secret_r: Default::default(),
sc_addr: Default::default(),
state_changes: Default::default(),
};
Transaction::new(body, SignaturePrivateKey::from_slice(&[1; 32]).unwrap())
}
pub fn create_dummy_private_transaction_random_signer(
nullifier_created_hashes: Vec<[u8; 32]>,
utxo_commitments_spent_hashes: Vec<[u8; 32]>,
utxo_commitments_created_hashes: Vec<[u8; 32]>,
) -> Transaction {
let mut rng = rand::thread_rng();
let body = TransactionBody {
tx_kind: TxKind::Private,
execution_input: vec![],
execution_output: vec![],
utxo_commitments_spent_hashes,
utxo_commitments_created_hashes,
nullifier_created_hashes,
execution_proof_private: "dummy_proof".to_string(),
encoded_data: vec![],
ephemeral_pub_key: vec![10, 11, 12],
commitment: vec![],
tweak: Tweak::new(&mut rng),
secret_r: [0; 32],
sc_addr: "sc_addr".to_string(),
state_changes: (serde_json::Value::Null, 0),
};
Transaction::new(body, SignaturePrivateKey::random(&mut rng))
}
pub fn create_dummy_transaction_native_token_transfer(
from: [u8; 32],
nonce: u64,
to: [u8; 32],
balance_to_move: u64,
signing_key: SigningKey,
) -> Transaction {
let mut rng = rand::thread_rng();
let native_token_transfer = PublicNativeTokenSend {
from,
nonce,
to,
balance_to_move,
};
let body = TransactionBody {
tx_kind: TxKind::Public,
execution_input: serde_json::to_vec(&native_token_transfer).unwrap(),
execution_output: vec![],
utxo_commitments_spent_hashes: vec![],
utxo_commitments_created_hashes: vec![],
nullifier_created_hashes: vec![],
execution_proof_private: "".to_string(),
encoded_data: vec![],
ephemeral_pub_key: vec![10, 11, 12],
commitment: vec![],
tweak: Tweak::new(&mut rng),
secret_r: [0; 32],
sc_addr: "sc_addr".to_string(),
state_changes: (serde_json::Value::Null, 0),
};
Transaction::new(body, signing_key)
}

View File

@ -1,15 +0,0 @@
[package]
name = "consensus"
version = "0.1.0"
edition = "2021"
[dependencies]
anyhow.workspace = true
serde_json.workspace = true
env_logger.workspace = true
log.workspace = true
serde.workspace = true
tokio.workspace = true
[dependencies.networking]
path = "../networking"

View File

@ -1,22 +0,0 @@
use std::sync::Arc;
use networking::peer_manager::PeerManager;
use tokio::sync::Mutex;
#[derive(Debug)]
///Entrypoint to consensus.
/// Manages consensus protocol.
pub struct ConsensusManager {
pub peer_manager: Arc<Mutex<PeerManager>>,
}
impl ConsensusManager {
pub fn new(peer_manager: Arc<Mutex<PeerManager>>) -> Self {
Self { peer_manager }
}
//ToDo: change block from generic value into struct, when data block will be defined
pub fn vote(&self, _block: serde_json::Value) -> bool {
todo!()
}
}

View File

@ -30,15 +30,9 @@ path = "../sequencer_core"
[dependencies.sequencer_runner]
path = "../sequencer_runner"
[dependencies.node_rpc]
path = "../node_rpc"
[dependencies.node_core]
path = "../node_core"
[dependencies.node_runner]
path = "../node_runner"
[dependencies.common]
path = "../common"

View File

@ -3,7 +3,6 @@ use std::{path::PathBuf, sync::Arc, time::Duration};
use actix_web::dev::ServerHandle;
use anyhow::Result;
use clap::Parser;
use common::rpc_primitives::RpcConfig;
use log::info;
use node_core::{NodeCore, config::NodeConfig};
use sequencer_core::config::SequencerConfig;
@ -31,7 +30,6 @@ pub async fn pre_test(
) -> Result<(
ServerHandle,
JoinHandle<Result<()>>,
ServerHandle,
TempDir,
TempDir,
Arc<Mutex<NodeCore>>,
@ -43,7 +41,7 @@ pub async fn pre_test(
sequencer_runner::config::from_file(home_dir_sequencer.join("sequencer_config.json"))
.unwrap();
let mut node_config =
node_runner::config::from_file(home_dir_node.join("node_config.json")).unwrap();
node_core::config::from_file(home_dir_node.join("node_config.json")).unwrap();
let (temp_dir_node, temp_dir_sequencer) =
replace_home_dir_with_temp_dir_in_configs(&mut node_config, &mut sequencer_config);
@ -51,25 +49,13 @@ pub async fn pre_test(
let (seq_http_server_handle, sequencer_loop_handle) =
startup_sequencer(sequencer_config).await?;
let node_port = node_config.port;
let node_core = NodeCore::start_from_config_update_chain(node_config.clone()).await?;
let wrapped_node_core = Arc::new(Mutex::new(node_core));
let http_server = node_rpc::new_http_server(
RpcConfig::with_port(node_port),
node_config.clone(),
wrapped_node_core.clone(),
)?;
info!("HTTP server started");
let node_http_server_handle = http_server.handle();
tokio::spawn(http_server);
Ok((
seq_http_server_handle,
sequencer_loop_handle,
node_http_server_handle,
temp_dir_node,
temp_dir_sequencer,
wrapped_node_core,
@ -94,18 +80,15 @@ pub async fn post_test(
residual: (
ServerHandle,
JoinHandle<Result<()>>,
ServerHandle,
TempDir,
TempDir,
Arc<Mutex<NodeCore>>,
),
) {
let (seq_http_server_handle, sequencer_loop_handle, node_http_server_handle, _, _, _) =
residual;
let (seq_http_server_handle, sequencer_loop_handle, _, _, _) = residual;
info!("Cleanup");
node_http_server_handle.stop(true).await;
sequencer_loop_handle.abort();
seq_http_server_handle.stop(true).await;
@ -224,7 +207,7 @@ macro_rules! test_cleanup_wrap {
($home_dir:ident, $test_func:ident) => {{
let res = pre_test($home_dir.clone()).await.unwrap();
let wrapped_node_core = res.5.clone();
let wrapped_node_core = res.4.clone();
info!("Waiting for first block creation");
tokio::time::sleep(Duration::from_secs(TIME_TO_WAIT_FOR_BLOCK_SECONDS)).await;

View File

@ -1,11 +0,0 @@
[package]
name = "networking"
version = "0.1.0"
edition = "2021"
[dependencies]
anyhow.workspace = true
serde_json.workspace = true
env_logger.workspace = true
log.workspace = true
serde.workspace = true

View File

@ -1,5 +0,0 @@
pub mod network_protocol;
pub mod peer;
pub mod peer_manager;
pub mod rate_limiter;
pub mod tcp;

View File

@ -1,19 +0,0 @@
#[derive(Debug)]
pub enum MessageKind {}
pub type PeerId = u64;
pub type PeerDistance = u32;
#[derive(Debug)]
pub struct PeerAddr {
pub id: PeerId,
//Probably will be socket address in the future
pub addr: String,
}
#[derive(Debug)]
///Structure, which contains all necessary fields for handshake
pub struct Handshake {}
#[derive(Debug)]
pub enum HandshakeFailedReason {}

View File

@ -1,18 +0,0 @@
use crate::{
network_protocol::{HandshakeFailedReason, PeerAddr},
tcp::Connection,
};
#[derive(Debug)]
/// Structure, which stores all of the peer interaction data.
/// Created at per-peer connection basis at `PeerManager`
pub struct Peer {
pub connection: Connection,
pub peer_addr: PeerAddr,
}
impl Peer {
pub fn handshake(&mut self) -> Result<(), HandshakeFailedReason> {
todo!();
}
}

View File

@ -1,20 +0,0 @@
use anyhow::Result;
use crate::{network_protocol::PeerId, peer::Peer};
#[derive(Debug)]
///Entrypoint to network module.
/// Manages connections with peers in network
pub struct PeerManager {
pub my_peer_id: PeerId,
}
impl PeerManager {
pub async fn start_peer_manager(_num_threads: u8, my_peer_id: PeerId) -> Result<Self> {
Ok(Self { my_peer_id })
}
pub async fn connect(&self, _peer_id: PeerId) -> Peer {
todo!()
}
}

View File

@ -1,16 +0,0 @@
use std::collections::HashMap;
use crate::network_protocol::MessageKind;
#[derive(Debug)]
/// Object responsible to manage the rate limits of all network messages
/// for a single connection/peer.
pub struct RateLimiter {
pub limits: HashMap<MessageKind, u64>,
}
impl RateLimiter {
pub fn is_allowed(&self, _message: MessageKind) -> bool {
todo!();
}
}

View File

@ -1,11 +0,0 @@
use crate::network_protocol::PeerAddr;
#[derive(Debug)]
///Structure, representing peer connection
pub struct Connection {}
#[derive(Debug)]
pub enum ConnectionType {
Inbound { conn: Connection },
Outbound { conn: Connection, peer: PeerAddr },
}

View File

@ -1,269 +0,0 @@
use std::collections::{HashMap, HashSet};
use std::path::Path;
use accounts::account_core::Account;
use anyhow::{anyhow, Result};
use common::block::Block;
use common::merkle_tree_public::merkle_tree::HashStorageMerkleTree;
use common::nullifier::UTXONullifier;
use common::transaction::Transaction;
use common::utxo_commitment::UTXOCommitment;
use log::error;
use storage::sc_db_utils::{DataBlob, DataBlobChangeVariant};
use storage::RocksDBIO;
use crate::chain_storage::AccMap;
pub struct NodeBlockStore {
dbio: RocksDBIO,
}
impl NodeBlockStore {
///Starting database at the start of new chain.
/// Creates files if necessary.
///
/// ATTENTION: Will overwrite genesis block.
pub fn open_db_with_genesis(location: &Path, genesis_block: Option<Block>) -> Result<Self> {
Ok(Self {
dbio: RocksDBIO::new(location, genesis_block)?,
})
}
///Reopening existing database
pub fn open_db_restart(location: &Path, genesis_block: Block) -> Result<Self> {
NodeBlockStore::db_destroy(location)?;
NodeBlockStore::open_db_with_genesis(location, Some(genesis_block))
}
///Reloading existing database
pub fn open_db_reload(location: &Path) -> Result<Self> {
NodeBlockStore::open_db_with_genesis(location, None)
}
///Destroying existing database
fn db_destroy(location: &Path) -> Result<()> {
RocksDBIO::destroy(location).map_err(|err| anyhow!("RocksDBIO error: {}", err))
}
pub fn get_block_at_id(&self, id: u64) -> Result<Block> {
Ok(self.dbio.get_block(id)?)
}
pub fn put_block_at_id(&self, block: Block) -> Result<()> {
Ok(self.dbio.put_block(block, false)?)
}
pub fn put_sc_sc_state(
&self,
sc_addr: &str,
length: usize,
modifications: Vec<DataBlobChangeVariant>,
) -> Result<()> {
Ok(self.dbio.put_sc_sc_state(sc_addr, length, modifications)?)
}
pub fn get_sc_sc_state(&self, sc_addr: &str) -> Result<Vec<DataBlob>> {
Ok(self.dbio.get_sc_sc_state(sc_addr)?)
}
pub fn get_snapshot_block_id(&self) -> Result<u64> {
Ok(self.dbio.get_snapshot_block_id()?)
}
pub fn get_snapshot_account(&self) -> Result<HashMap<[u8; 32], Account>> {
let temp: AccMap = serde_json::from_slice(&self.dbio.get_snapshot_account()?)?;
Ok(temp.into())
}
pub fn get_snapshot_commitment(&self) -> Result<HashStorageMerkleTree<UTXOCommitment>> {
Ok(serde_json::from_slice(
&self.dbio.get_snapshot_commitment()?,
)?)
}
pub fn get_snapshot_nullifier(&self) -> Result<HashSet<UTXONullifier>> {
Ok(serde_json::from_slice(
&self.dbio.get_snapshot_nullifier()?,
)?)
}
pub fn get_snapshot_transaction(&self) -> Result<HashStorageMerkleTree<Transaction>> {
Ok(serde_json::from_slice(
&self.dbio.get_snapshot_transaction()?,
)?)
}
pub fn put_snapshot_at_block_id(
&self,
id: u64,
accounts_ser: Vec<u8>,
comm_ser: Vec<u8>,
txs_ser: Vec<u8>,
nullifiers_ser: Vec<u8>,
) -> Result<()> {
//Error notification for writing into DB error
self.dbio
.put_snapshot_block_id_db(id)
.inspect_err(|err| error!("Failed to store snapshot block id with error {err:#?}"))?;
self.dbio
.put_snapshot_account_db(accounts_ser)
.inspect_err(|err| error!("Failed to store snapshot accounts with error {err:#?}"))?;
self.dbio
.put_snapshot_commitement_db(comm_ser)
.inspect_err(|err| {
error!("Failed to store snapshot commitments with error {err:#?}")
})?;
self.dbio
.put_snapshot_transaction_db(txs_ser)
.inspect_err(|err| {
error!("Failed to store snapshot transactions with error {err:#?}")
})?;
self.dbio
.put_snapshot_nullifier_db(nullifiers_ser)
.inspect_err(|err| error!("Failed to store snapshot nullifiers with error {err:#?}"))?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use common::block::Data;
use tempfile::tempdir;
fn create_genesis_block() -> Block {
Block {
block_id: 0,
prev_block_id: 0,
prev_block_hash: [0; 32],
hash: [1; 32],
transactions: vec![],
data: Data::default(),
}
}
fn create_sample_block(block_id: u64, prev_block_id: u64) -> Block {
Block {
block_id,
prev_block_id,
prev_block_hash: [0; 32],
hash: [1; 32],
transactions: vec![],
data: Data::default(),
}
}
#[test]
fn test_open_db_with_genesis() {
let temp_dir = tempdir().unwrap();
let path = temp_dir.path();
let genesis_block = create_genesis_block();
let node_store =
NodeBlockStore::open_db_with_genesis(path, Some(genesis_block.clone())).unwrap();
// Verify the genesis block is stored
let stored_block = node_store.get_block_at_id(0).unwrap();
assert_eq!(stored_block.block_id, genesis_block.block_id);
assert_eq!(stored_block.hash, genesis_block.hash);
}
#[test]
fn test_open_db_restart() {
let temp_dir = tempdir().unwrap();
let path = temp_dir.path();
let genesis_block = create_genesis_block();
{
let node_store_old =
NodeBlockStore::open_db_with_genesis(path, Some(genesis_block.clone())).unwrap();
let block = create_sample_block(1, 0);
node_store_old.put_block_at_id(block.clone()).unwrap();
}
// Check that the first block is still in the old database
{
let node_store_old = NodeBlockStore::open_db_reload(path).unwrap();
let result = node_store_old.get_block_at_id(1);
assert!(result.is_ok());
}
// Restart the database
let node_store = NodeBlockStore::open_db_restart(path, genesis_block).unwrap();
// The block should no longer be available since no first block is set on restart
let result = node_store.get_block_at_id(1);
assert!(result.is_err());
}
#[test]
fn test_open_db_reload() {
let temp_dir = tempdir().unwrap();
let path = temp_dir.path();
let genesis_block = create_genesis_block();
let _ = NodeBlockStore::open_db_with_genesis(path, Some(genesis_block)).unwrap();
// Reload the database
let node_store = NodeBlockStore::open_db_reload(path).unwrap();
// The genesis block should be available on reload
let result = node_store.get_block_at_id(0);
assert!(result.is_ok());
}
#[test]
fn test_put_and_get_block() {
let temp_dir = tempdir().unwrap();
let path = temp_dir.path();
let genesis_block = create_genesis_block();
let node_store = NodeBlockStore::open_db_with_genesis(path, Some(genesis_block)).unwrap();
let block = create_sample_block(1, 0);
node_store.put_block_at_id(block.clone()).unwrap();
let retrieved_block = node_store.get_block_at_id(1).unwrap();
assert_eq!(retrieved_block.block_id, block.block_id);
assert_eq!(retrieved_block.hash, block.hash);
}
#[test]
fn test_put_snapshot_at_block_id() {
let temp_dir = tempdir().unwrap();
let path = temp_dir.path();
let genesis_block = create_genesis_block();
let node_store = NodeBlockStore::open_db_with_genesis(path, Some(genesis_block)).unwrap();
let id = 3;
let accounts_ser = vec![1, 2, 3, 4];
let comm_ser = vec![5, 6, 7, 8];
let txs_ser = vec![9, 10, 11, 12];
let nullifiers_ser = vec![13, 14, 15, 16];
node_store
.put_snapshot_at_block_id(
id,
accounts_ser.clone(),
comm_ser.clone(),
txs_ser.clone(),
nullifiers_ser.clone(),
)
.unwrap();
assert_eq!(node_store.dbio.get_snapshot_block_id().unwrap(), id);
assert_eq!(
node_store.dbio.get_snapshot_account().unwrap(),
accounts_ser
);
assert_eq!(node_store.dbio.get_snapshot_commitment().unwrap(), comm_ser);
assert_eq!(node_store.dbio.get_snapshot_transaction().unwrap(), txs_ser);
assert_eq!(
node_store.dbio.get_snapshot_nullifier().unwrap(),
nullifiers_ser
);
}
}

View File

@ -3,7 +3,6 @@ use std::collections::{BTreeMap, HashMap, HashSet};
use accounts::account_core::{address::AccountAddress, Account};
use anyhow::Result;
use common::{
block::Block,
merkle_tree_public::merkle_tree::{PublicTransactionMerkleTree, UTXOCommitmentsMerkleTree},
nullifier::UTXONullifier,
};
@ -13,7 +12,6 @@ use serde::{Deserialize, Serialize};
use crate::config::NodeConfig;
pub mod accounts_store;
//pub mod block_store;
#[derive(Deserialize, Serialize)]
pub struct AccMap {
@ -50,23 +48,21 @@ pub struct NodeChainStore {
}
impl NodeChainStore {
pub fn new(config: NodeConfig, genesis_block: Block) -> Result<(Self, u64)> {
pub fn new(config: NodeConfig) -> Result<Self> {
let acc_map = HashMap::new();
let nullifier_store = HashSet::new();
let utxo_commitments_store = UTXOCommitmentsMerkleTree::new(vec![]);
let pub_tx_store = PublicTransactionMerkleTree::new(vec![]);
let block_id = genesis_block.block_id;
Ok((
Ok(
Self {
acc_map,
nullifier_store,
utxo_commitments_store,
pub_tx_store,
node_config: config,
},
block_id,
))
}
)
}
pub fn produce_context(&self, caller: AccountAddress) -> PublicSCContext {
@ -97,9 +93,6 @@ mod tests {
use super::*;
use crate::config::GasConfig;
use accounts::account_core::Account;
use common::block::{Block, Data};
use common::transaction::{SignaturePrivateKey, Transaction, TransactionBody, TxKind};
use secp256k1_zkp::Tweak;
use std::path::PathBuf;
use tempfile::tempdir;
@ -277,55 +270,16 @@ mod tests {
initial_accounts
}
fn create_genesis_block() -> Block {
Block {
block_id: 0,
prev_block_id: 0,
prev_block_hash: [0; 32],
hash: [1; 32],
transactions: vec![],
data: Data::default(),
}
}
//ToDo: Continue refactor
fn create_dummy_transaction(
nullifier_created_hashes: Vec<[u8; 32]>,
utxo_commitments_spent_hashes: Vec<[u8; 32]>,
utxo_commitments_created_hashes: Vec<[u8; 32]>,
) -> Transaction {
let mut rng = rand::thread_rng();
let body = TransactionBody {
tx_kind: TxKind::Private,
execution_input: vec![],
execution_output: vec![],
utxo_commitments_spent_hashes,
utxo_commitments_created_hashes,
nullifier_created_hashes,
execution_proof_private: "dummy_proof".to_string(),
encoded_data: vec![],
ephemeral_pub_key: vec![10, 11, 12],
commitment: vec![],
tweak: Tweak::new(&mut rng),
secret_r: [0; 32],
sc_addr: "sc_addr".to_string(),
state_changes: (serde_json::Value::Null, 0),
};
Transaction::new(body, SignaturePrivateKey::random(&mut rng))
}
//ToDo: Continue refactor
fn create_sample_block(block_id: u64, prev_block_id: u64) -> Block {
Block {
block_id,
prev_block_id,
prev_block_hash: [0; 32],
hash: [1; 32],
transactions: vec![],
data: Data::default(),
}
}
// fn create_genesis_block() -> Block {
// Block {
// block_id: 0,
// prev_block_id: 0,
// prev_block_hash: [0; 32],
// hash: [1; 32],
// transactions: vec![],
// data: Data::default(),
// }
// }
fn create_sample_node_config(home: PathBuf) -> NodeConfig {
NodeConfig {
@ -359,11 +313,8 @@ mod tests {
let config = create_sample_node_config(path.to_path_buf());
let genesis_block = create_genesis_block();
let store = NodeChainStore::new(config.clone()).unwrap();
let (store, block_id) = NodeChainStore::new(config.clone(), genesis_block.clone()).unwrap();
assert_eq!(block_id, 0);
assert!(store.acc_map.is_empty());
assert!(store.nullifier_store.is_empty());
assert_eq!(

View File

@ -4,6 +4,11 @@ use accounts::account_core::Account;
use serde::{Deserialize, Serialize};
use zkvm::gas_calculator::GasCalculator;
use anyhow::Result;
use std::fs::File;
use std::io::BufReader;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct GasConfig {
/// Gas spent per deploying one byte of data
@ -55,3 +60,10 @@ pub struct NodeConfig {
///Initial accounts for wallet
pub initial_accounts: Vec<Account>,
}
pub fn from_file(config_home: PathBuf) -> Result<NodeConfig> {
let file = File::open(config_home)?;
let reader = BufReader::new(file);
Ok(serde_json::from_reader(reader)?)
}

View File

@ -1,4 +1,4 @@
use std::sync::{atomic::AtomicU64, Arc};
use std::sync::Arc;
use common::{
execution_input::PublicNativeTokenSend, transaction::Transaction, ExecutionFailureKind,
@ -9,7 +9,6 @@ use anyhow::Result;
use chain_storage::NodeChainStore;
use common::transaction::TransactionBody;
use config::NodeConfig;
use log::info;
use sc_core::proofs_circuits::{generate_commitments, pedersen_commitment_vec};
use sequencer_client::{json::SendTxResponse, SequencerClient};
use serde::{Deserialize, Serialize};
@ -73,7 +72,6 @@ pub enum ActionData {
pub struct NodeCore {
pub storage: Arc<RwLock<NodeChainStore>>,
pub curr_height: Arc<AtomicU64>,
pub node_config: NodeConfig,
pub sequencer_client: Arc<SequencerClient>,
pub gas_calculator: GasCalculator,
@ -83,22 +81,15 @@ impl NodeCore {
pub async fn start_from_config_update_chain(config: NodeConfig) -> Result<Self> {
let client = Arc::new(SequencerClient::new(config.clone())?);
let genesis_id = client.get_genesis_id().await?;
info!("Genesis id is {genesis_id:?}");
let genesis_block = client.get_block(genesis_id.genesis_id).await?.block;
let (mut storage, chain_height) = NodeChainStore::new(config.clone(), genesis_block)?;
let mut storage = NodeChainStore::new(config.clone())?;
for acc in config.clone().initial_accounts {
storage.acc_map.insert(acc.address, acc);
}
let wrapped_storage = Arc::new(RwLock::new(storage));
let chain_height_wrapped = Arc::new(AtomicU64::new(chain_height));
Ok(Self {
storage: wrapped_storage,
curr_height: chain_height_wrapped,
node_config: config.clone(),
sequencer_client: client.clone(),
gas_calculator: GasCalculator::from(config.gas_config),

View File

@ -1,45 +0,0 @@
[package]
name = "node_rpc"
version = "0.1.0"
edition = "2021"
[dependencies]
anyhow.workspace = true
serde_json.workspace = true
env_logger.workspace = true
log.workspace = true
serde.workspace = true
actix.workspace = true
actix-cors.workspace = true
futures.workspace = true
tokio.workspace = true
hex.workspace = true
actix-web.workspace = true
[dependencies.accounts]
path = "../accounts"
[dependencies.consensus]
path = "../consensus"
[dependencies.networking]
path = "../networking"
[dependencies.storage]
path = "../storage"
[dependencies.utxo]
path = "../utxo"
[dependencies.vm]
path = "../vm"
[dependencies.zkvm]
path = "../zkvm"
[dependencies.node_core]
path = "../node_core"
[dependencies.common]
path = "../common"

View File

@ -1,45 +0,0 @@
pub mod net_utils;
pub mod process;
pub mod types;
use std::sync::Arc;
use common::rpc_primitives::{
errors::{RpcError, RpcErrorKind},
RpcPollingConfig,
};
use node_core::{config::NodeConfig, NodeCore};
use serde::Serialize;
use serde_json::Value;
pub use net_utils::*;
use tokio::sync::Mutex;
use self::types::err_rpc::RpcErr;
//ToDo: Add necessary fields
pub struct JsonHandler {
pub polling_config: RpcPollingConfig,
pub node_core_config: NodeConfig,
pub node_chain_store: Arc<Mutex<NodeCore>>,
}
fn respond<T: Serialize>(val: T) -> Result<Value, RpcErr> {
Ok(serde_json::to_value(val)?)
}
pub fn rpc_error_responce_inverter(err: RpcError) -> RpcError {
let mut content: Option<Value> = None;
if err.error_struct.is_some() {
content = match err.error_struct.clone().unwrap() {
RpcErrorKind::HandlerError(val) | RpcErrorKind::InternalError(val) => Some(val),
RpcErrorKind::RequestValidationError(vall) => Some(serde_json::to_value(vall).unwrap()),
};
}
RpcError {
error_struct: None,
code: err.code,
message: err.message,
data: content,
}
}

View File

@ -1,76 +0,0 @@
use std::io;
use std::sync::Arc;
use actix_cors::Cors;
use actix_web::{http, middleware, web, App, Error as HttpError, HttpResponse, HttpServer};
use futures::Future;
use futures::FutureExt;
use log::info;
use common::rpc_primitives::message::Message;
use common::rpc_primitives::RpcConfig;
use node_core::config::NodeConfig;
use node_core::NodeCore;
use tokio::sync::Mutex;
use super::JsonHandler;
pub const SHUTDOWN_TIMEOUT_SECS: u64 = 10;
fn rpc_handler(
message: web::Json<Message>,
handler: web::Data<JsonHandler>,
) -> impl Future<Output = Result<HttpResponse, HttpError>> {
let response = async move {
let message = handler.process(message.0).await?;
Ok(HttpResponse::Ok().json(&message))
};
response.boxed()
}
fn get_cors(cors_allowed_origins: &[String]) -> Cors {
let mut cors = Cors::permissive();
if cors_allowed_origins != ["*".to_string()] {
for origin in cors_allowed_origins {
cors = cors.allowed_origin(origin);
}
}
cors.allowed_methods(vec!["GET", "POST"])
.allowed_headers(vec![http::header::AUTHORIZATION, http::header::ACCEPT])
.allowed_header(http::header::CONTENT_TYPE)
.max_age(3600)
}
#[allow(clippy::too_many_arguments)]
pub fn new_http_server(
config: RpcConfig,
node_config: NodeConfig,
node_chain_store: Arc<Mutex<NodeCore>>,
) -> io::Result<actix_web::dev::Server> {
let RpcConfig {
addr,
cors_allowed_origins,
polling_config,
limits_config,
} = config;
info!(target:"network", "Starting http server at {addr}");
let handler = web::Data::new(JsonHandler {
polling_config,
node_core_config: node_config,
node_chain_store,
});
// HTTP server
Ok(HttpServer::new(move || {
App::new()
.wrap(get_cors(&cors_allowed_origins))
.app_data(handler.clone())
.app_data(web::JsonConfig::default().limit(limits_config.json_payload_max_size))
.wrap(middleware::Logger::default())
.service(web::resource("/").route(web::post().to(rpc_handler)))
})
.bind(addr)?
.shutdown_timeout(SHUTDOWN_TIMEOUT_SECS)
.disable_signals()
.run())
}

View File

@ -1,257 +0,0 @@
use std::sync::atomic::Ordering;
use actix_web::Error as HttpError;
use serde_json::Value;
use common::rpc_primitives::{
errors::RpcError,
message::{Message, Request},
parser::RpcRequest,
};
use common::transaction::ActionData;
use common::rpc_primitives::requests::{GetLastBlockRequest, GetLastBlockResponse};
use crate::types::rpc_structs::{
CreateAccountRequest, CreateAccountResponse, ShowAccountPublicBalanceRequest,
ShowAccountPublicBalanceResponse, ShowAccountUTXORequest, ShowAccountUTXOResponse,
ShowTransactionRequest, ShowTransactionResponse,
};
pub const CREATE_ACCOUNT: &str = "create_account";
pub const EXECUTE_SUBSCENARIO: &str = "execute_subscenario";
pub const GET_BLOCK: &str = "get_block";
pub const GET_LAST_BLOCK: &str = "get_last_block";
pub const EXECUTE_SCENARIO_SPLIT: &str = "execute_scenario_split";
pub const EXECUTE_SCENARIO_MULTIPLE_SEND: &str = "execute_scenario_multiple_send";
pub const SHOW_ACCOUNT_PUBLIC_BALANCE: &str = "show_account_public_balance";
pub const SHOW_ACCOUNT_UTXO: &str = "show_account_utxo";
pub const SHOW_TRANSACTION: &str = "show_transaction";
pub const WRITE_MINT_UTXO: &str = "write_mint_utxo";
pub const WRITE_MINT_UTXO_MULTIPLE_ASSETS: &str = "write_mint_utxo_multiple_assets";
pub const WRITE_SEND_UTXO_PRIVATE: &str = "write_send_utxo_private";
pub const WRITE_SEND_UTXO_SHIELDED: &str = "write_send_utxo_shielded";
pub const WRITE_SEND_UTXO_DESHIELDED: &str = "write_send_utxo_deshielded";
pub const WRITE_SPLIT_UTXO: &str = "write_split_utxo";
pub const SUCCESS: &str = "success";
pub const ACCOUNT_NOT_FOUND: &str = "Account not found";
pub const TRANSACTION_NOT_FOUND: &str = "Transaction not found";
use super::{respond, types::err_rpc::RpcErr, JsonHandler};
impl JsonHandler {
pub async fn process(&self, message: Message) -> Result<Message, HttpError> {
let id = message.id();
if let Message::Request(request) = message {
let message_inner = self
.process_request_internal(request)
.await
.map_err(|e| e.0);
Ok(Message::response(id, message_inner))
} else {
Ok(Message::error(RpcError::parse_error(
"JSON RPC Request format was expected".to_owned(),
)))
}
}
async fn process_create_account(&self, request: Request) -> Result<Value, RpcErr> {
let _req = CreateAccountRequest::parse(Some(request.params))?;
let acc_addr = {
let mut guard = self.node_chain_store.lock().await;
guard.create_new_account().await
};
let helperstruct = CreateAccountResponse {
status: hex::encode(acc_addr),
};
respond(helperstruct)
}
async fn process_get_last_block(&self, request: Request) -> Result<Value, RpcErr> {
let _req = GetLastBlockRequest::parse(Some(request.params))?;
let last_block = {
let guard = self.node_chain_store.lock().await;
guard.curr_height.load(Ordering::Relaxed)
};
let helperstruct = GetLastBlockResponse { last_block };
respond(helperstruct)
}
async fn process_show_account_public_balance(&self, request: Request) -> Result<Value, RpcErr> {
let req = ShowAccountPublicBalanceRequest::parse(Some(request.params))?;
let acc_addr_hex_dec = hex::decode(req.account_addr.clone()).map_err(|_| {
RpcError::parse_error("Failed to decode account address from hex string".to_string())
})?;
let acc_addr: [u8; 32] = acc_addr_hex_dec.try_into().map_err(|_| {
RpcError::parse_error("Failed to parse account address from bytes".to_string())
})?;
let balance = {
let cover_guard = self.node_chain_store.lock().await;
{
let under_guard = cover_guard.storage.read().await;
let acc = under_guard
.acc_map
.get(&acc_addr)
.ok_or(RpcError::new_internal_error(None, ACCOUNT_NOT_FOUND))?;
acc.balance
}
};
let helperstruct = ShowAccountPublicBalanceResponse {
addr: req.account_addr,
balance,
};
respond(helperstruct)
}
async fn process_show_account_utxo_request(&self, request: Request) -> Result<Value, RpcErr> {
let req = ShowAccountUTXORequest::parse(Some(request.params))?;
let acc_addr_hex_dec = hex::decode(req.account_addr.clone()).map_err(|_| {
RpcError::parse_error("Failed to decode account address from hex string".to_string())
})?;
let acc_addr: [u8; 32] = acc_addr_hex_dec.try_into().map_err(|_| {
RpcError::parse_error("Failed to parse account address from bytes".to_string())
})?;
let utxo_hash_hex_dec = hex::decode(req.utxo_hash.clone()).map_err(|_| {
RpcError::parse_error("Failed to decode hash from hex string".to_string())
})?;
let utxo_hash: [u8; 32] = utxo_hash_hex_dec
.try_into()
.map_err(|_| RpcError::parse_error("Failed to parse hash from bytes".to_string()))?;
let (asset, amount) = {
let cover_guard = self.node_chain_store.lock().await;
{
let mut under_guard = cover_guard.storage.write().await;
let acc = under_guard
.acc_map
.get_mut(&acc_addr)
.ok_or(RpcError::new_internal_error(None, ACCOUNT_NOT_FOUND))?;
let utxo = acc
.utxos
.get(&utxo_hash)
.ok_or(RpcError::new_internal_error(
None,
"UTXO does not exist in the tree",
))?;
(utxo.asset.clone(), utxo.amount)
}
};
let helperstruct = ShowAccountUTXOResponse {
hash: req.utxo_hash,
asset,
amount,
};
respond(helperstruct)
}
async fn process_show_transaction(&self, request: Request) -> Result<Value, RpcErr> {
let req = ShowTransactionRequest::parse(Some(request.params))?;
let tx_hash_hex_dec = hex::decode(req.tx_hash.clone()).map_err(|_| {
RpcError::parse_error("Failed to decode hash from hex string".to_string())
})?;
let tx_hash: [u8; 32] = tx_hash_hex_dec
.try_into()
.map_err(|_| RpcError::parse_error("Failed to parse hash from bytes".to_string()))?;
let helperstruct = {
let cover_guard = self.node_chain_store.lock().await;
{
let under_guard = cover_guard.storage.read().await;
let tx = under_guard
.pub_tx_store
.get_tx(tx_hash)
.ok_or(RpcError::new_internal_error(None, TRANSACTION_NOT_FOUND))?;
ShowTransactionResponse {
hash: req.tx_hash,
tx_kind: tx.body().tx_kind,
public_input: if let Ok(action) =
serde_json::from_slice::<ActionData>(&tx.body().execution_input)
{
action.into_hexed_print()
} else {
"".to_string()
},
public_output: if let Ok(action) =
serde_json::from_slice::<ActionData>(&tx.body().execution_output)
{
action.into_hexed_print()
} else {
"".to_string()
},
utxo_commitments_created_hashes: tx
.body()
.utxo_commitments_created_hashes
.iter()
.map(hex::encode)
.collect::<Vec<_>>(),
utxo_commitments_spent_hashes: tx
.body()
.utxo_commitments_spent_hashes
.iter()
.map(hex::encode)
.collect::<Vec<_>>(),
utxo_nullifiers_created_hashes: tx
.body()
.nullifier_created_hashes
.iter()
.map(hex::encode)
.collect::<Vec<_>>(),
encoded_data: tx
.body()
.encoded_data
.iter()
.map(|val| (hex::encode(val.0.clone()), hex::encode(val.1.clone())))
.collect::<Vec<_>>(),
ephemeral_pub_key: hex::encode(tx.body().ephemeral_pub_key.clone()),
}
}
};
respond(helperstruct)
}
pub async fn process_request_internal(&self, request: Request) -> Result<Value, RpcErr> {
match request.method.as_ref() {
//Todo : Add handling of more JSON RPC methods
CREATE_ACCOUNT => self.process_create_account(request).await,
GET_LAST_BLOCK => self.process_get_last_block(request).await,
SHOW_ACCOUNT_PUBLIC_BALANCE => self.process_show_account_public_balance(request).await,
SHOW_ACCOUNT_UTXO => self.process_show_account_utxo_request(request).await,
SHOW_TRANSACTION => self.process_show_transaction(request).await,
_ => Err(RpcErr(RpcError::method_not_found(request.method))),
}
}
}

View File

@ -1,85 +0,0 @@
use common::{ExecutionFailureKind, SequencerClientError};
use log::debug;
use common::rpc_primitives::errors::{RpcError, RpcParseError};
pub struct RpcErr(pub RpcError);
pub type RpcErrInternal = anyhow::Error;
pub trait RpcErrKind: 'static {
fn into_rpc_err(self) -> RpcError;
}
impl<T: RpcErrKind> From<T> for RpcErr {
fn from(e: T) -> Self {
Self(e.into_rpc_err())
}
}
macro_rules! standard_rpc_err_kind {
($type_name:path) => {
impl RpcErrKind for $type_name {
fn into_rpc_err(self) -> RpcError {
self.into()
}
}
};
}
standard_rpc_err_kind!(RpcError);
standard_rpc_err_kind!(RpcParseError);
impl RpcErrKind for serde_json::Error {
fn into_rpc_err(self) -> RpcError {
RpcError::serialization_error(&self.to_string())
}
}
impl RpcErrKind for RpcErrInternal {
fn into_rpc_err(self) -> RpcError {
RpcError::new_internal_error(None, &format!("{self:#?}"))
}
}
#[allow(clippy::needless_pass_by_value)]
pub fn from_rpc_err_into_anyhow_err(rpc_err: RpcError) -> anyhow::Error {
debug!("Rpc error cast to anyhow error : err {rpc_err:?}");
anyhow::anyhow!(format!("{rpc_err:#?}"))
}
pub fn cast_seq_client_error_into_rpc_error(seq_cli_err: SequencerClientError) -> RpcError {
let error_string = seq_cli_err.to_string();
match seq_cli_err {
SequencerClientError::SerdeError(_) => RpcError::serialization_error(&error_string),
SequencerClientError::HTTPError(_) => RpcError::new_internal_error(None, &error_string),
SequencerClientError::InternalError(err) => RpcError::new_internal_error(
err.error.data,
&serde_json::to_string(&err.error.error_struct).unwrap_or(String::default()),
),
}
}
pub fn cast_common_execution_error_into_rpc_error(comm_exec_err: ExecutionFailureKind) -> RpcError {
let error_string = comm_exec_err.to_string();
match comm_exec_err {
ExecutionFailureKind::BuilderError(_) => RpcError::new_internal_error(None, &error_string),
ExecutionFailureKind::WriteError(_) => RpcError::new_internal_error(None, &error_string),
ExecutionFailureKind::DBError(_) => RpcError::new_internal_error(None, &error_string),
ExecutionFailureKind::DecodeError(_) => RpcError::new_internal_error(None, &error_string),
ExecutionFailureKind::ProveError(_) => RpcError::new_internal_error(None, &error_string),
ExecutionFailureKind::AmountMismatchError => {
RpcError::new_internal_error(None, &error_string)
}
ExecutionFailureKind::InsufficientGasError => {
RpcError::new_internal_error(None, &error_string)
}
ExecutionFailureKind::InsufficientFundsError => {
RpcError::new_internal_error(None, &error_string)
}
ExecutionFailureKind::SequencerClientError(seq_cli_err) => {
cast_seq_client_error_into_rpc_error(seq_cli_err)
}
}
}

View File

@ -1,2 +0,0 @@
pub mod err_rpc;
pub mod rpc_structs;

View File

@ -1,206 +0,0 @@
use common::parse_request;
use common::rpc_primitives::errors::RpcParseError;
use common::rpc_primitives::parser::parse_params;
use common::rpc_primitives::parser::RpcRequest;
use common::transaction::TxKind;
use serde::{Deserialize, Serialize};
use serde_json::Value;
#[derive(Serialize, Deserialize, Debug)]
pub struct ExecuteSubscenarioRequest {
pub scenario_id: u64,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct ExecuteScenarioSplitRequest {
pub visibility_list: [bool; 3],
pub publication_index: usize,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct ExecuteScenarioMultipleSendRequest {
pub number_of_assets: usize,
pub number_to_send: usize,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct ShowAccountPublicBalanceRequest {
pub account_addr: String,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct ShowAccountUTXORequest {
pub account_addr: String,
pub utxo_hash: String,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct ShowTransactionRequest {
pub tx_hash: String,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct WriteDepositPublicBalanceRequest {
pub account_addr: String,
pub amount: u64,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct WriteMintPrivateUTXORequest {
pub account_addr: String,
pub amount: u64,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct WriteMintPrivateUTXOMultipleAssetsRequest {
pub account_addr: String,
pub num_of_assets: usize,
pub amount: u64,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct WriteSendPrivateUTXORequest {
pub account_addr_sender: String,
pub account_addr_receiver: String,
pub utxo_hash: String,
pub utxo_commitment: String,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct WriteSendShieldedUTXORequest {
pub account_addr_sender: String,
pub account_addr_receiver: String,
pub amount: u64,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct WriteSendDeshieldedBalanceRequest {
pub account_addr_sender: String,
pub account_addr_receiver: String,
pub utxo_hash: String,
pub utxo_commitment: String,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct WriteSplitUTXORequest {
pub account_addr_sender: String,
pub account_addr_receivers: [String; 3],
pub visibility_list: [bool; 3],
pub utxo_hash: String,
pub utxo_commitment: String,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct CreateAccountRequest {}
// parse_request!(GetGenesisIdRequest);
parse_request!(ExecuteSubscenarioRequest);
parse_request!(ExecuteScenarioSplitRequest);
parse_request!(ExecuteScenarioMultipleSendRequest);
// parse_request!(GetLastBlockRequest);
parse_request!(ShowAccountPublicBalanceRequest);
parse_request!(ShowAccountUTXORequest);
parse_request!(ShowTransactionRequest);
parse_request!(WriteDepositPublicBalanceRequest);
parse_request!(WriteMintPrivateUTXORequest);
parse_request!(WriteMintPrivateUTXOMultipleAssetsRequest);
parse_request!(WriteSendPrivateUTXORequest);
parse_request!(WriteSendShieldedUTXORequest);
parse_request!(WriteSendDeshieldedBalanceRequest);
parse_request!(WriteSplitUTXORequest);
parse_request!(CreateAccountRequest);
#[derive(Serialize, Deserialize, Debug)]
pub struct ExecuteSubscenarioResponse {
pub scenario_result: String,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct ExecuteScenarioSplitResponse {
pub scenario_result: String,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct ExecuteScenarioMultipleSendResponse {
pub scenario_result: String,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct ShowAccountPublicBalanceResponse {
pub addr: String,
pub balance: u64,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct ShowAccountUTXOResponse {
pub hash: String,
pub asset: Vec<u8>,
pub amount: u128,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct ShowTransactionResponse {
pub hash: String,
pub tx_kind: TxKind,
pub public_input: String,
pub public_output: String,
pub utxo_commitments_created_hashes: Vec<String>,
pub utxo_commitments_spent_hashes: Vec<String>,
pub utxo_nullifiers_created_hashes: Vec<String>,
pub encoded_data: Vec<(String, String)>,
pub ephemeral_pub_key: String,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct WriteDepositPublicBalanceResponse {
pub status: String,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct UTXOShortEssentialStruct {
pub hash: String,
pub commitment_hash: String,
pub asset: Vec<u8>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct WriteMintPrivateUTXOResponse {
pub status: String,
pub utxo: UTXOShortEssentialStruct,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct WriteMintPrivateUTXOMultipleAssetsResponse {
pub status: String,
pub utxos: Vec<UTXOShortEssentialStruct>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct WriteSendPrivateUTXOResponse {
pub status: String,
pub utxo_result: UTXOShortEssentialStruct,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct WriteSendShieldedUTXOResponse {
pub status: String,
pub utxo_result: UTXOShortEssentialStruct,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct WriteSendDeshieldedUTXOResponse {
pub status: String,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct WriteSendSplitUTXOResponse {
pub status: String,
pub utxo_results: Vec<UTXOShortEssentialStruct>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct CreateAccountResponse {
pub status: String,
}

View File

@ -1,50 +0,0 @@
[package]
name = "node_runner"
version = "0.1.0"
edition = "2021"
[dependencies]
hex.workspace = true
anyhow.workspace = true
serde_json.workspace = true
env_logger.workspace = true
log.workspace = true
serde.workspace = true
actix.workspace = true
actix-web.workspace = true
tokio.workspace = true
[dependencies.clap]
features = ["derive", "env"]
workspace = true
[dependencies.accounts]
path = "../accounts"
[dependencies.consensus]
path = "../consensus"
[dependencies.networking]
path = "../networking"
[dependencies.storage]
path = "../storage"
[dependencies.utxo]
path = "../utxo"
[dependencies.vm]
path = "../vm"
[dependencies.zkvm]
path = "../zkvm"
[dependencies.node_rpc]
path = "../node_rpc"
[dependencies.node_core]
path = "../node_core"
[dependencies.common]
path = "../common"

View File

@ -1,253 +0,0 @@
{
"home": ".",
"override_rust_log": null,
"sequencer_addr": "http://127.0.0.1:3040",
"seq_poll_timeout_secs": 10,
"port": 3041,
"gas_config": {
"gas_fee_per_byte_deploy": 100,
"gas_fee_per_input_buffer_runtime": 1000,
"gas_fee_per_byte_runtime": 10,
"gas_cost_runtime": 100,
"gas_cost_deploy": 1000,
"gas_limit_deploy": 30000000,
"gas_limit_runtime": 30000000
},
"shapshot_frequency_in_blocks": 10,
"initial_accounts": [
{
"address": [
13,
150,
223,
204,
65,
64,
25,
56,
12,
157,
222,
12,
211,
220,
229,
170,
201,
15,
181,
68,
59,
248,
113,
16,
135,
65,
174,
175,
222,
85,
42,
215
],
"balance": 10000,
"key_holder": {
"address": [
13,
150,
223,
204,
65,
64,
25,
56,
12,
157,
222,
12,
211,
220,
229,
170,
201,
15,
181,
68,
59,
248,
113,
16,
135,
65,
174,
175,
222,
85,
42,
215
],
"nullifer_public_key": "03A340BECA9FAAB444CED0140681D72EA1318B5C611704FEE017DA9836B17DB718",
"pub_account_signing_key": [
133,
143,
177,
187,
252,
66,
237,
236,
234,
252,
244,
138,
5,
151,
3,
99,
217,
231,
112,
217,
77,
211,
58,
218,
176,
68,
99,
53,
152,
228,
198,
190
],
"top_secret_key_holder": {
"secret_spending_key": "7BC46784DB1BC67825D8F029436846712BFDF9B5D79EA3AB11D39A52B9B229D4"
},
"utxo_secret_key_holder": {
"nullifier_secret_key": "BB54A8D3C9C51B82C431082D1845A74677B0EF829A11B517E1D9885DE3139506",
"viewing_secret_key": "AD923E92F6A5683E30140CEAB2702AFB665330C1EE4EFA70FAF29767B6B52BAF"
},
"viewing_public_key": "0361220C5D277E7A1709340FD31A52600C1432B9C45B9BCF88A43581D58824A8B6"
},
"utxos": {}
},
{
"address": [
151,
72,
112,
233,
190,
141,
10,
192,
138,
168,
59,
63,
199,
167,
166,
134,
41,
29,
135,
50,
80,
138,
186,
152,
179,
96,
128,
243,
156,
44,
243,
100
],
"balance": 20000,
"key_holder": {
"address": [
151,
72,
112,
233,
190,
141,
10,
192,
138,
168,
59,
63,
199,
167,
166,
134,
41,
29,
135,
50,
80,
138,
186,
152,
179,
96,
128,
243,
156,
44,
243,
100
],
"nullifer_public_key": "02172F50274DE67C4087C344F5D58E11DF761D90285B095060E0994FAA6BCDE271",
"pub_account_signing_key": [
54,
90,
62,
225,
71,
225,
228,
148,
143,
53,
210,
23,
137,
158,
171,
156,
48,
7,
139,
52,
117,
242,
214,
7,
99,
29,
122,
184,
59,
116,
144,
107
],
"top_secret_key_holder": {
"secret_spending_key": "80A186737C8D38B4288A03F0F589957D9C040D79C19F3E0CC4BA80F8494E5179"
},
"utxo_secret_key_holder": {
"nullifier_secret_key": "746928E63F0984F6F4818933493CE9C067562D9CB932FDC06D82C86CDF6D7122",
"viewing_secret_key": "89176CF4BC9E673807643FD52110EF99D4894335AFB10D881AC0B5041FE1FCB7"
},
"viewing_public_key": "026072A8F83FEC3472E30CDD4767683F30B91661D25B1040AD9A5FC2E01D659F99"
},
"utxos": {}
}
]
}

View File

@ -1,14 +0,0 @@
use std::path::PathBuf;
use anyhow::Result;
use node_core::config::NodeConfig;
use std::fs::File;
use std::io::BufReader;
pub fn from_file(config_home: PathBuf) -> Result<NodeConfig> {
let file = File::open(config_home)?;
let reader = BufReader::new(file);
Ok(serde_json::from_reader(reader)?)
}

View File

@ -1,56 +0,0 @@
use std::{path::PathBuf, sync::Arc};
use anyhow::Result;
use clap::Parser;
use common::rpc_primitives::RpcConfig;
use consensus::ConsensusManager;
use log::info;
use networking::peer_manager::PeerManager;
use node_core::NodeCore;
use node_rpc::new_http_server;
use tokio::sync::Mutex;
pub mod config;
#[derive(Parser, Debug)]
#[clap(version)]
struct Args {
/// Path to configs
home_dir: PathBuf,
}
pub async fn main_runner() -> Result<()> {
env_logger::init();
let args = Args::parse();
let Args { home_dir } = args;
let app_config = config::from_file(home_dir.join("node_config.json"))?;
let port = app_config.port;
let node_core = NodeCore::start_from_config_update_chain(app_config.clone()).await?;
let wrapped_node_core = Arc::new(Mutex::new(node_core));
let http_server = new_http_server(
RpcConfig::with_port(port),
app_config.clone(),
wrapped_node_core.clone(),
)?;
info!("HTTP server started");
let _http_server_handle = http_server.handle();
tokio::spawn(http_server);
let peer_manager = PeerManager::start_peer_manager(4, 0).await?;
info!("Peer manager mock started");
let peer_manager_shared = Arc::new(Mutex::new(peer_manager));
let _consensus_manager = ConsensusManager::new(peer_manager_shared.clone());
info!("Consensus manger mock started");
#[allow(clippy::empty_loop)]
loop {
//ToDo: Insert activity into main loop
}
}

View File

@ -1,16 +0,0 @@
use anyhow::Result;
use node_runner::main_runner;
pub const NUM_THREADS: usize = 4;
fn main() -> Result<()> {
actix::System::with_tokio_rt(|| {
tokio::runtime::Builder::new_multi_thread()
.worker_threads(NUM_THREADS)
.enable_all()
.build()
.unwrap()
})
.block_on(main_runner())
}

View File

@ -3,7 +3,7 @@ use std::fmt::Display;
use accounts::account_core::address::{self, AccountAddress};
use anyhow::Result;
use common::{
block::{Block, HashableBlockData},
block::HashableBlockData,
execution_input::PublicNativeTokenSend,
merkle_tree_public::TreeHashType,
nullifier::UTXONullifier,
@ -321,7 +321,7 @@ impl SequencerCore {
prev_block_hash,
};
let block = Block::produce_block_from_hashable_data(hashable_data);
let block = hashable_data.into();
self.store.block_store.put_block_at_id(block)?;
@ -336,11 +336,8 @@ mod tests {
use crate::config::AccountInitialData;
use super::*;
use common::transaction::{SignaturePrivateKey, Transaction, TransactionBody, TxKind};
use k256::{ecdsa::SigningKey, FieldBytes};
use mempool_transaction::MempoolTransaction;
use secp256k1_zkp::Tweak;
fn setup_sequencer_config_variable_initial_accounts(
initial_accounts: Vec<AccountInitialData>,
@ -386,67 +383,6 @@ mod tests {
setup_sequencer_config_variable_initial_accounts(initial_accounts)
}
fn create_dummy_transaction(
nullifier_created_hashes: Vec<[u8; 32]>,
utxo_commitments_spent_hashes: Vec<[u8; 32]>,
utxo_commitments_created_hashes: Vec<[u8; 32]>,
) -> Transaction {
let mut rng = rand::thread_rng();
let body = TransactionBody {
tx_kind: TxKind::Private,
execution_input: vec![],
execution_output: vec![],
utxo_commitments_spent_hashes,
utxo_commitments_created_hashes,
nullifier_created_hashes,
execution_proof_private: "dummy_proof".to_string(),
encoded_data: vec![],
ephemeral_pub_key: vec![10, 11, 12],
commitment: vec![],
tweak: Tweak::new(&mut rng),
secret_r: [0; 32],
sc_addr: "sc_addr".to_string(),
state_changes: (serde_json::Value::Null, 0),
};
Transaction::new(body, SignaturePrivateKey::random(&mut rng))
}
fn create_dummy_transaction_native_token_transfer(
from: [u8; 32],
nonce: u64,
to: [u8; 32],
balance_to_move: u64,
signing_key: SigningKey,
) -> Transaction {
let mut rng = rand::thread_rng();
let native_token_transfer = PublicNativeTokenSend {
from,
nonce,
to,
balance_to_move,
};
let body = TransactionBody {
tx_kind: TxKind::Public,
execution_input: serde_json::to_vec(&native_token_transfer).unwrap(),
execution_output: vec![],
utxo_commitments_spent_hashes: vec![],
utxo_commitments_created_hashes: vec![],
nullifier_created_hashes: vec![],
execution_proof_private: "".to_string(),
encoded_data: vec![],
ephemeral_pub_key: vec![10, 11, 12],
commitment: vec![],
tweak: Tweak::new(&mut rng),
secret_r: [0; 32],
sc_addr: "sc_addr".to_string(),
state_changes: (serde_json::Value::Null, 0),
};
Transaction::new(body, signing_key)
}
fn create_signing_key_for_account1() -> SigningKey {
let pub_sign_key_acc1 = [
133, 143, 177, 187, 252, 66, 237, 236, 234, 252, 244, 138, 5, 151, 3, 99, 217, 231,
@ -468,7 +404,11 @@ mod tests {
}
fn common_setup(sequencer: &mut SequencerCore) {
let tx = create_dummy_transaction(vec![[9; 32]], vec![[7; 32]], vec![[8; 32]]);
let tx = common::test_utils::create_dummy_private_transaction_random_signer(
vec![[9; 32]],
vec![[7; 32]],
vec![[8; 32]],
);
let mempool_tx = MempoolTransaction {
auth_tx: tx.into_authenticated().unwrap(),
};
@ -581,7 +521,11 @@ mod tests {
common_setup(&mut sequencer);
let tx = create_dummy_transaction(vec![[91; 32]], vec![[71; 32]], vec![[81; 32]]);
let tx = common::test_utils::create_dummy_private_transaction_random_signer(
vec![[91; 32]],
vec![[71; 32]],
vec![[81; 32]],
);
let tx_roots = sequencer.get_tree_roots();
let result = sequencer.transaction_pre_check(tx, tx_roots);
@ -606,7 +550,9 @@ mod tests {
let sign_key1 = create_signing_key_for_account1();
let tx = create_dummy_transaction_native_token_transfer(acc1, 0, acc2, 10, sign_key1);
let tx = common::test_utils::create_dummy_transaction_native_token_transfer(
acc1, 0, acc2, 10, sign_key1,
);
let tx_roots = sequencer.get_tree_roots();
let result = sequencer.transaction_pre_check(tx, tx_roots);
@ -631,7 +577,9 @@ mod tests {
let sign_key2 = create_signing_key_for_account2();
let tx = create_dummy_transaction_native_token_transfer(acc1, 0, acc2, 10, sign_key2);
let tx = common::test_utils::create_dummy_transaction_native_token_transfer(
acc1, 0, acc2, 10, sign_key2,
);
let tx_roots = sequencer.get_tree_roots();
let result = sequencer.transaction_pre_check(tx, tx_roots);
@ -659,7 +607,9 @@ mod tests {
let sign_key1 = create_signing_key_for_account1();
let tx = create_dummy_transaction_native_token_transfer(acc1, 0, acc2, 10000000, sign_key1);
let tx = common::test_utils::create_dummy_transaction_native_token_transfer(
acc1, 0, acc2, 10000000, sign_key1,
);
let tx_roots = sequencer.get_tree_roots();
let result = sequencer.transaction_pre_check(tx, tx_roots);
@ -693,7 +643,9 @@ mod tests {
let sign_key1 = create_signing_key_for_account1();
let tx = create_dummy_transaction_native_token_transfer(acc1, 0, acc2, 100, sign_key1);
let tx = common::test_utils::create_dummy_transaction_native_token_transfer(
acc1, 0, acc2, 100, sign_key1,
);
sequencer
.execute_check_transaction_on_state(&tx.into_authenticated().unwrap().into())
@ -716,7 +668,11 @@ mod tests {
common_setup(&mut sequencer);
let tx = create_dummy_transaction(vec![[92; 32]], vec![[72; 32]], vec![[82; 32]]);
let tx = common::test_utils::create_dummy_private_transaction_random_signer(
vec![[92; 32]],
vec![[72; 32]],
vec![[82; 32]],
);
let tx_roots = sequencer.get_tree_roots();
// Fill the mempool
@ -740,7 +696,11 @@ mod tests {
common_setup(&mut sequencer);
let tx = create_dummy_transaction(vec![[93; 32]], vec![[73; 32]], vec![[83; 32]]);
let tx = common::test_utils::create_dummy_private_transaction_random_signer(
vec![[93; 32]],
vec![[73; 32]],
vec![[83; 32]],
);
let tx_roots = sequencer.get_tree_roots();
let result = sequencer.push_tx_into_mempool_pre_check(tx, tx_roots);
@ -754,7 +714,11 @@ mod tests {
let mut sequencer = SequencerCore::start_from_config(config);
let genesis_height = sequencer.chain_height;
let tx = create_dummy_transaction(vec![[94; 32]], vec![[7; 32]], vec![[8; 32]]);
let tx = common::test_utils::create_dummy_private_transaction_random_signer(
vec![[94; 32]],
vec![[7; 32]],
vec![[8; 32]],
);
let tx_mempool = MempoolTransaction {
auth_tx: tx.into_authenticated().unwrap(),
};
@ -783,7 +747,9 @@ mod tests {
let sign_key1 = create_signing_key_for_account1();
let tx = create_dummy_transaction_native_token_transfer(acc1, 0, acc2, 100, sign_key1);
let tx = common::test_utils::create_dummy_transaction_native_token_transfer(
acc1, 0, acc2, 100, sign_key1,
);
let tx_mempool_original = MempoolTransaction {
auth_tx: tx.clone().into_authenticated().unwrap(),
@ -828,7 +794,9 @@ mod tests {
let sign_key1 = create_signing_key_for_account1();
let tx = create_dummy_transaction_native_token_transfer(acc1, 0, acc2, 100, sign_key1);
let tx = common::test_utils::create_dummy_transaction_native_token_transfer(
acc1, 0, acc2, 100, sign_key1,
);
// The transaction should be included the first time
let tx_mempool_original = MempoolTransaction {

View File

@ -76,40 +76,8 @@ fn block_to_transactions_map(block: &Block) -> HashMap<TreeHashType, u64> {
#[cfg(test)]
mod tests {
use super::*;
use common::transaction::{SignaturePrivateKey, TransactionBody};
use tempfile::tempdir;
fn create_dummy_block_with_transaction(block_id: u64) -> (Block, Transaction) {
let body = TransactionBody {
tx_kind: common::transaction::TxKind::Public,
execution_input: Default::default(),
execution_output: Default::default(),
utxo_commitments_spent_hashes: Default::default(),
utxo_commitments_created_hashes: Default::default(),
nullifier_created_hashes: Default::default(),
execution_proof_private: Default::default(),
encoded_data: Default::default(),
ephemeral_pub_key: Default::default(),
commitment: Default::default(),
tweak: Default::default(),
secret_r: Default::default(),
sc_addr: Default::default(),
state_changes: Default::default(),
};
let tx = Transaction::new(body, SignaturePrivateKey::from_slice(&[1; 32]).unwrap());
(
Block {
block_id,
prev_block_id: block_id - 1,
prev_block_hash: [0; 32],
hash: [1; 32],
transactions: vec![tx.clone()],
data: vec![],
},
tx,
)
}
#[test]
fn test_get_transaction_by_hash() {
let temp_dir = tempdir().unwrap();
@ -125,7 +93,10 @@ mod tests {
// Start an empty node store
let mut node_store =
SequecerBlockStore::open_db_with_genesis(path, Some(genesis_block)).unwrap();
let (block, tx) = create_dummy_block_with_transaction(1);
let tx = common::test_utils::produce_dummy_empty_transaction();
let block = common::test_utils::produce_dummy_block(1, None, vec![tx.clone()], vec![]);
// Try retrieve a tx that's not in the chain yet.
let retrieved_tx = node_store.get_transaction_by_hash(tx.body().hash());
assert_eq!(None, retrieved_tx);

View File

@ -3,7 +3,7 @@ use std::{collections::HashSet, path::Path};
use accounts_store::SequencerAccountsStore;
use block_store::SequecerBlockStore;
use common::{
block::{Block, HashableBlockData},
block::HashableBlockData,
merkle_tree_public::merkle_tree::{PublicTransactionMerkleTree, UTXOCommitmentsMerkleTree},
nullifier::UTXONullifier,
};
@ -63,7 +63,7 @@ impl SequecerChainStore {
prev_block_hash,
};
let genesis_block = Block::produce_block_from_hashable_data(hashable_data);
let genesis_block = hashable_data.into();
//Sequencer should panic if unable to open db,
//as fixing this issue may require actions non-native to program scope

View File

@ -24,12 +24,6 @@ path = "../mempool"
[dependencies.accounts]
path = "../accounts"
[dependencies.consensus]
path = "../consensus"
[dependencies.networking]
path = "../networking"
[dependencies.sequencer_core]
path = "../sequencer_core"

View File

@ -22,12 +22,6 @@ workspace = true
[dependencies.mempool]
path = "../mempool"
[dependencies.consensus]
path = "../consensus"
[dependencies.networking]
path = "../networking"
[dependencies.sequencer_rpc]
path = "../sequencer_rpc"

View File

@ -1,11 +0,0 @@
[package]
name = "vm"
version = "0.1.0"
edition = "2021"
[dependencies]
anyhow.workspace = true
serde_json.workspace = true
env_logger.workspace = true
log.workspace = true
serde.workspace = true

View File

@ -1 +0,0 @@
//ToDo: Add vm module