Merge branch 'main' into Pravdyvy/block-parsing-validation

This commit is contained in:
Pravdyvy 2026-01-27 07:57:41 +02:00
commit 516feee101
14 changed files with 616 additions and 696 deletions

View File

@ -99,7 +99,7 @@ jobs:
run: rustup install
- name: Install nextest
run: cargo install cargo-nextest
run: cargo install --locked cargo-nextest
- name: Run tests
env:

1086
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -82,10 +82,9 @@ itertools = "0.14.0"
url = "2.5.4"
tokio-retry = "0.3.0"
common-http-client = { git = "https://github.com/logos-blockchain/logos-blockchain.git" }
key-management-system-service = { git = "https://github.com/logos-blockchain/logos-blockchain.git" }
nomos-core = { git = "https://github.com/logos-blockchain/logos-blockchain.git" }
broadcast-service = { git = "https://github.com/logos-blockchain/logos-blockchain.git" }
logos-blockchain-common-http-client = { git = "https://github.com/logos-blockchain/logos-blockchain.git" }
logos-blockchain-key-management-system-service = { git = "https://github.com/logos-blockchain/logos-blockchain.git" }
logos-blockchain-core = { git = "https://github.com/logos-blockchain/logos-blockchain.git" }
rocksdb = { version = "0.24.0", default-features = false, features = [
"snappy",

View File

@ -6,10 +6,5 @@ edition = "2024"
[dependencies]
reqwest.workspace = true
anyhow.workspace = true
common-http-client.workspace = true
nomos-core.workspace = true
broadcast-service.workspace = true
url.workspace = true
futures.workspace = true
tokio-retry.workspace = true
log.workspace = true
logos-blockchain-common-http-client.workspace = true
logos-blockchain-core.workspace = true

View File

@ -1,28 +1,33 @@
use anyhow::Result;
use broadcast_service::BlockInfo;
use common_http_client::CommonHttpClient;
pub use common_http_client::{BasicAuthCredentials, Error};
use futures::{Stream, TryFutureExt};
use log::warn;
use nomos_core::{block::Block, header::HeaderId, mantle::SignedMantleTx};
use reqwest::Client;
use tokio_retry::Retry;
use url::Url;
pub use logos_blockchain_common_http_client::{BasicAuthCredentials, CommonHttpClient, Error};
use logos_blockchain_core::mantle::SignedMantleTx;
use reqwest::{Client, Url};
// Simple wrapper
// maybe extend in the future for our purposes
pub struct BedrockClient(pub CommonHttpClient);
pub struct BedrockClient {
http_client: CommonHttpClient,
node_url: Url,
}
impl BedrockClient {
pub fn new(auth: Option<BasicAuthCredentials>) -> Result<Self> {
pub fn new(auth: Option<BasicAuthCredentials>, node_url: Url) -> Result<Self> {
let client = Client::builder()
//Add more fiedls if needed
//Add more fields if needed
.timeout(std::time::Duration::from_secs(60))
.build()?;
Ok(BedrockClient(CommonHttpClient::new_with_client(
client, auth,
)))
let http_client = CommonHttpClient::new_with_client(client, auth);
Ok(Self {
http_client,
node_url,
})
}
pub async fn post_transaction(&self, tx: SignedMantleTx) -> Result<(), Error> {
self.http_client
.post_transaction(self.node_url.clone(), tx)
.await
}
pub async fn get_lib_stream(&self, url: Url) -> Result<impl Stream<Item = BlockInfo>, Error> {

View File

@ -23,7 +23,7 @@ pub type BlockHash = [u8; 32];
pub type BlockId = u64;
pub type TimeStamp = u64;
#[derive(Debug, Clone)]
#[derive(Debug, Clone, BorshSerialize, BorshDeserialize)]
pub struct BlockHeader {
pub block_id: BlockId,
pub prev_block_hash: BlockHash,
@ -32,15 +32,23 @@ pub struct BlockHeader {
pub signature: nssa::Signature,
}
#[derive(Debug, Clone)]
#[derive(Debug, Clone, BorshSerialize, BorshDeserialize)]
pub struct BlockBody {
pub transactions: Vec<EncodedTransaction>,
}
#[derive(Debug, Clone)]
#[derive(Debug, Clone, BorshSerialize, BorshDeserialize)]
pub enum BedrockStatus {
Pending,
Safe,
Finalized,
}
#[derive(Debug, BorshSerialize, BorshDeserialize)]
pub struct Block {
pub header: BlockHeader,
pub body: BlockBody,
pub bedrock_status: BedrockStatus,
}
#[derive(Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize)]
@ -52,7 +60,7 @@ pub struct HashableBlockData {
}
impl HashableBlockData {
pub fn into_block(self, signing_key: &nssa::PrivateKey) -> Block {
pub fn into_pending_block(self, signing_key: &nssa::PrivateKey) -> Block {
let data_bytes = borsh::to_vec(&self).unwrap();
let signature = nssa::Signature::new(signing_key, &data_bytes);
let hash = OwnHasher::hash(&data_bytes);
@ -67,6 +75,7 @@ impl HashableBlockData {
body: BlockBody {
transactions: self.transactions,
},
bedrock_status: BedrockStatus::Pending,
}
}

View File

@ -30,7 +30,7 @@ pub fn produce_dummy_block(
transactions,
};
block_data.into_block(&sequencer_sign_key_for_testing())
block_data.into_pending_block(&sequencer_sign_key_for_testing())
}
pub fn produce_dummy_empty_transaction() -> EncodedTransaction {

View File

@ -19,8 +19,8 @@ chrono.workspace = true
log.workspace = true
tokio = { workspace = true, features = ["rt-multi-thread", "macros"] }
bedrock_client.workspace = true
key-management-system-service.workspace = true
nomos-core.workspace = true
logos-blockchain-key-management-system-service.workspace = true
logos-blockchain-core.workspace = true
rand.workspace = true
reqwest.workspace = true
borsh.workspace = true

View File

@ -1,20 +1,20 @@
use std::{fs, path::Path};
use anyhow::Result;
use anyhow::{Context, Result, anyhow};
use bedrock_client::BedrockClient;
use common::block::HashableBlockData;
use key_management_system_service::keys::{ED25519_SECRET_KEY_SIZE, Ed25519Key, Ed25519PublicKey};
use nomos_core::mantle::{
use logos_blockchain_core::mantle::{
MantleTx, Op, OpProof, SignedMantleTx, Transaction, TxHash, ledger,
ops::channel::{ChannelId, MsgId, inscribe::InscriptionOp},
};
use reqwest::Url;
use logos_blockchain_key_management_system_service::keys::{
ED25519_SECRET_KEY_SIZE, Ed25519Key, Ed25519PublicKey,
};
use crate::config::BedrockConfig;
/// A component that posts block data to logos blockchain
pub struct BlockSettlementClient {
bedrock_node_url: Url,
bedrock_client: BedrockClient,
bedrock_signing_key: Ed25519Key,
bedrock_channel_id: ChannelId,
@ -22,25 +22,23 @@ pub struct BlockSettlementClient {
}
impl BlockSettlementClient {
pub fn new(home: &Path, config: &BedrockConfig) -> Self {
pub fn try_new(home: &Path, config: &BedrockConfig) -> Result<Self> {
let bedrock_signing_key = load_or_create_signing_key(&home.join("bedrock_signing_key"))
.expect("Signing key should load or be created successfully");
let bedrock_node_url =
Url::parse(&config.node_url).expect("Bedrock URL should be a valid URL");
let bedrock_channel_id = config.channel_id;
let bedrock_client =
BedrockClient::new(None).expect("Bedrock client should be able to initialize");
Self {
bedrock_node_url,
.context("Failed to load or create signing key")?;
let bedrock_channel_id = ChannelId::from(config.channel_id);
let bedrock_client = BedrockClient::new(None, config.node_url.clone())
.context("Failed to initialize bedrock client")?;
let channel_genesis_msg = MsgId::from([0; 32]);
Ok(Self {
bedrock_client,
bedrock_signing_key,
bedrock_channel_id,
last_message_id: MsgId::from([0; 32]),
}
last_message_id: channel_genesis_msg,
})
}
/// Create and sign a transaction for inscribing data
pub fn create_inscribe_tx(&self, data: Vec<u8>) -> SignedMantleTx {
pub fn create_inscribe_tx(&self, data: Vec<u8>) -> (SignedMantleTx, MsgId) {
let verifying_key_bytes = self.bedrock_signing_key.public_key().to_bytes();
let verifying_key =
Ed25519PublicKey::from_bytes(&verifying_key_bytes).expect("valid ed25519 public key");
@ -51,12 +49,14 @@ impl BlockSettlementClient {
parent: self.last_message_id,
signer: verifying_key,
};
let inscribe_op_id = inscribe_op.id();
let ledger_tx = ledger::Tx::new(vec![], vec![]);
let inscribe_tx = MantleTx {
ops: vec![Op::ChannelInscribe(inscribe_op)],
ledger_tx,
// Altruistic test config
storage_gas_price: 0,
execution_gas_price: 0,
};
@ -67,54 +67,51 @@ impl BlockSettlementClient {
.sign_payload(tx_hash.as_signing_bytes().as_ref())
.to_bytes();
let signature =
key_management_system_service::keys::Ed25519Signature::from_bytes(&signature_bytes);
logos_blockchain_key_management_system_service::keys::Ed25519Signature::from_bytes(
&signature_bytes,
);
SignedMantleTx {
let signed_mantle_tx = SignedMantleTx {
ops_proofs: vec![OpProof::Ed25519Sig(signature)],
ledger_tx_proof: empty_ledger_signature(&tx_hash),
mantle_tx: inscribe_tx,
}
};
(signed_mantle_tx, inscribe_op_id)
}
/// Post a transaction to the node and wait for inclusion
pub async fn post_and_wait(&mut self, block_data: &HashableBlockData) -> Result<u64> {
let inscription_data = borsh::to_vec(&block_data)?;
let tx = self.create_inscribe_tx(inscription_data);
let (tx, new_msg_id) = self.create_inscribe_tx(inscription_data);
// Post the transaction
self.bedrock_client
.0
.post_transaction(self.bedrock_node_url.clone(), tx.clone())
.await?;
self.bedrock_client.post_transaction(tx).await?;
if let Some(Op::ChannelInscribe(inscribe)) = tx.mantle_tx.ops.first() {
self.last_message_id = inscribe.id()
}
self.last_message_id = new_msg_id;
Ok(block_data.block_id)
}
}
/// Load signing key from file or generate a new one if it doesn't exist
fn load_or_create_signing_key(path: &Path) -> Result<Ed25519Key, ()> {
fn load_or_create_signing_key(path: &Path) -> Result<Ed25519Key> {
if path.exists() {
let key_bytes = fs::read(path).map_err(|_| ())?;
if key_bytes.len() != ED25519_SECRET_KEY_SIZE {
// TODO: proper error
return Err(());
}
let key_array: [u8; ED25519_SECRET_KEY_SIZE] =
key_bytes.try_into().expect("length already checked");
let key_bytes = fs::read(path)?;
let key_array: [u8; ED25519_SECRET_KEY_SIZE] = key_bytes
.try_into()
.map_err(|_| anyhow!("Found key with incorrect length"))?;
Ok(Ed25519Key::from_bytes(&key_array))
} else {
let mut key_bytes = [0u8; ED25519_SECRET_KEY_SIZE];
rand::RngCore::fill_bytes(&mut rand::thread_rng(), &mut key_bytes);
fs::write(path, key_bytes).map_err(|_| ())?;
fs::write(path, key_bytes)?;
Ok(Ed25519Key::from_bytes(&key_bytes))
}
}
fn empty_ledger_signature(tx_hash: &TxHash) -> key_management_system_service::keys::ZkSignature {
key_management_system_service::keys::ZkKey::multi_sign(&[], tx_hash.as_ref())
fn empty_ledger_signature(
tx_hash: &TxHash,
) -> logos_blockchain_key_management_system_service::keys::ZkSignature {
logos_blockchain_key_management_system_service::keys::ZkKey::multi_sign(&[], tx_hash.as_ref())
.expect("multi-sign with empty key set works")
}

View File

@ -46,7 +46,7 @@ impl SequencerBlockStore {
}
pub fn get_block_at_id(&self, id: u64) -> Result<Block> {
Ok(self.dbio.get_block(id)?.into_block(&self.signing_key))
Ok(self.dbio.get_block(id)?)
}
pub fn put_block_at_id(&mut self, block: Block) -> Result<()> {
@ -113,7 +113,7 @@ mod tests {
transactions: vec![],
};
let genesis_block = genesis_block_hashable_data.into_block(&signing_key);
let genesis_block = genesis_block_hashable_data.into_pending_block(&signing_key);
// Start an empty node store
let mut node_store =
SequencerBlockStore::open_db_with_genesis(path, Some(genesis_block), signing_key)

View File

@ -5,7 +5,7 @@ use std::{
};
use anyhow::Result;
use nomos_core::mantle::ops::channel::ChannelId;
use reqwest::Url;
use serde::{Deserialize, Serialize};
#[derive(Debug, Serialize, Deserialize, Clone)]
@ -57,11 +57,7 @@ pub struct BedrockConfig {
/// Bedrock channel ID
pub channel_id: ChannelId,
/// Bedrock Url
pub node_url: String,
/// Bedrock user
pub user: String,
/// Bedrock password(optional)
pub password: Option<String>,
pub node_url: Url,
}
impl SequencerConfig {

View File

@ -53,7 +53,7 @@ impl SequencerCore {
};
let signing_key = nssa::PrivateKey::try_new(config.signing_key).unwrap();
let genesis_block = hashable_data.into_block(&signing_key);
let genesis_block = hashable_data.into_pending_block(&signing_key);
// Sequencer should panic if unable to open db,
// as fixing this issue may require actions non-native to program scope
@ -89,10 +89,10 @@ impl SequencerCore {
state.add_pinata_program(PINATA_BASE58.parse().unwrap());
let (mempool, mempool_handle) = MemPool::new(config.mempool_max_size);
let block_settlement = config
.bedrock_config
.as_ref()
.map(|bedrock_config| BlockSettlementClient::new(&config.home, bedrock_config));
let block_settlement_client = config.bedrock_config.as_ref().map(|bedrock_config| {
BlockSettlementClient::try_new(&config.home, bedrock_config)
.expect("Block settlement client should be constructible")
});
let mut this = Self {
state,
@ -100,7 +100,7 @@ impl SequencerCore {
mempool,
chain_height: config.genesis_id,
sequencer_config: config,
block_settlement_client: block_settlement,
block_settlement_client,
};
this.sync_state_with_stored_blocks();
@ -196,14 +196,14 @@ impl SequencerCore {
let block = hashable_data
.clone()
.into_block(self.block_store.signing_key());
.into_pending_block(self.block_store.signing_key());
self.block_store.put_block_at_id(block)?;
self.chain_height = new_block_height;
// TODO: Consider switching to `tracing` crate to have more structured and consistent logs
// // e.g.
// e.g.
//
// ```
// info!(

View File

@ -1,6 +1,6 @@
use std::{path::Path, sync::Arc};
use common::block::{Block, HashableBlockData};
use common::block::Block;
use error::DbError;
use rocksdb::{
BoundColumnFamily, ColumnFamilyDescriptor, DBWithThreadMode, MultiThreaded, Options,
@ -26,6 +26,8 @@ pub const DB_META_FIRST_BLOCK_IN_DB_KEY: &str = "first_block_in_db";
pub const DB_META_LAST_BLOCK_IN_DB_KEY: &str = "last_block_in_db";
/// Key base for storing metainformation which describe if first block has been set
pub const DB_META_FIRST_BLOCK_SET_KEY: &str = "first_block_set";
/// Key base for storing metainformation about the last finalized block on Bedrock
pub const DB_META_LAST_FINALIZED_BLOCK_ID: &str = "last_finalized_block_id";
/// Key base for storing snapshot which describe block id
pub const DB_SNAPSHOT_BLOCK_ID_KEY: &str = "block_id";
@ -75,6 +77,7 @@ impl RocksDBIO {
dbio.put_meta_first_block_in_db(block)?;
dbio.put_meta_is_first_block_set()?;
dbio.put_meta_last_block_in_db(block_id)?;
dbio.put_meta_last_finalized_block_id(None)?;
Ok(dbio)
} else {
@ -232,6 +235,28 @@ impl RocksDBIO {
Ok(())
}
pub fn put_meta_last_finalized_block_id(&self, block_id: Option<u64>) -> DbResult<()> {
let cf_meta = self.meta_column();
self.db
.put_cf(
&cf_meta,
borsh::to_vec(&DB_META_LAST_FINALIZED_BLOCK_ID).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize DB_META_LAST_FINALIZED_BLOCK_ID".to_string()),
)
})?,
borsh::to_vec(&block_id).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize last block id".to_string()),
)
})?,
)
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
Ok(())
}
pub fn put_meta_is_first_block_set(&self) -> DbResult<()> {
let cf_meta = self.meta_column();
self.db
@ -269,7 +294,7 @@ impl RocksDBIO {
Some("Failed to serialize block id".to_string()),
)
})?,
borsh::to_vec(&HashableBlockData::from(block)).map_err(|err| {
borsh::to_vec(&block).map_err(|err| {
DbError::borsh_cast_message(
err,
Some("Failed to serialize block data".to_string()),
@ -280,7 +305,7 @@ impl RocksDBIO {
Ok(())
}
pub fn get_block(&self, block_id: u64) -> DbResult<HashableBlockData> {
pub fn get_block(&self, block_id: u64) -> DbResult<Block> {
let cf_block = self.block_column();
let res = self
.db
@ -296,14 +321,12 @@ impl RocksDBIO {
.map_err(|rerr| DbError::rocksdb_cast_message(rerr, None))?;
if let Some(data) = res {
Ok(
borsh::from_slice::<HashableBlockData>(&data).map_err(|serr| {
DbError::borsh_cast_message(
serr,
Some("Failed to deserialize block data".to_string()),
)
})?,
)
Ok(borsh::from_slice::<Block>(&data).map_err(|serr| {
DbError::borsh_cast_message(
serr,
Some("Failed to deserialize block data".to_string()),
)
})?)
} else {
Err(DbError::db_interaction_error(
"Block on this id not found".to_string(),

View File

@ -19,7 +19,7 @@ pub enum ChainSubcommand {
/// Get transaction at hash from sequencer
Transaction {
/// hash - valid 32 byte hex string
#[arg(short, long)]
#[arg(short = 't', long)]
hash: String,
},
}