diff --git a/Cargo.toml b/Cargo.toml index 3aa60f50..e90045b7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,9 @@ [workspace] members = [ "nomos-core", + "nomos-da/kzgrs", + "nomos-da/kzgrs-backend", + "nomos-da/storage", "nomos-libp2p", "nomos-services/api", "nomos-services/log", @@ -9,18 +12,19 @@ members = [ "nomos-services/cryptarchia-consensus", "nomos-services/mempool", "nomos-services/metrics", - "nomos-services/data-availability", "nomos-services/system-sig", - "nomos-da/reed-solomon", - "nomos-da/kzg", + "nomos-services/data-availability/indexer", + "nomos-services/data-availability/verifier", + "nomos-services/data-availability/tests", "nomos-da/full-replication", - "nomos-cli", + # TODO: add it again and reimplement full replication + # "nomos-cli", "nomos-utils", "nodes/nomos-node", "mixnet", "consensus/carnot-engine", "consensus/cryptarchia-engine", "ledger/cryptarchia-ledger", - "tests", + "tests" ] resolver = "2" diff --git a/nodes/nomos-node/Cargo.toml b/nodes/nomos-node/Cargo.toml index ebb4aa4d..123ef8db 100644 --- a/nodes/nomos-node/Cargo.toml +++ b/nodes/nomos-node/Cargo.toml @@ -31,9 +31,6 @@ nomos-metrics = { path = "../../nomos-services/metrics" } nomos-storage = { path = "../../nomos-services/storage", features = ["rocksdb"] } cryptarchia-consensus = { path = "../../nomos-services/cryptarchia-consensus", features = ["libp2p"] } nomos-libp2p = { path = "../../nomos-libp2p" } -nomos-da = { path = "../../nomos-services/data-availability", features = [ - "libp2p", -] } nomos-system-sig = { path = "../../nomos-services/system-sig" } tracing-subscriber = "0.3" cryptarchia-engine = { path = "../../consensus/cryptarchia-engine" } diff --git a/nodes/nomos-node/src/api.rs b/nodes/nomos-node/src/api.rs index f6a201a1..5d53cd51 100644 --- a/nodes/nomos-node/src/api.rs +++ b/nodes/nomos-node/src/api.rs @@ -19,8 +19,7 @@ use tower_http::{ use utoipa::OpenApi; use utoipa_swagger_ui::SwaggerUi; -use full_replication::{Blob, Certificate}; -use nomos_core::{da::blob, header::HeaderId, tx::Transaction}; +use nomos_core::{header::HeaderId, tx::Transaction}; use nomos_mempool::{ network::adapters::libp2p::Libp2pAdapter as MempoolNetworkAdapter, tx::service::openapi::Status, MempoolMetrics, @@ -29,7 +28,7 @@ use nomos_network::backends::libp2p::Libp2p as NetworkBackend; use nomos_storage::backends::StorageSerde; use nomos_api::{ - http::{cl, consensus, da, libp2p, mempool, metrics, storage}, + http::{cl, consensus, libp2p, mempool, metrics, storage}, Backend, }; @@ -51,8 +50,6 @@ pub struct AxumBackend { #[derive(OpenApi)] #[openapi( paths( - da_metrics, - da_status, ), components( schemas(Status, MempoolMetrics) @@ -117,9 +114,6 @@ where ) .layer(TraceLayer::new_for_http()) .merge(SwaggerUi::new("/swagger-ui").url("/api-docs/openapi.json", ApiDoc::openapi())) - .route("/da/metrics", routing::get(da_metrics)) - .route("/da/status", routing::post(da_status)) - .route("/da/blobs", routing::post(da_blobs)) .route("/cl/metrics", routing::get(cl_metrics::)) .route("/cl/status", routing::post(cl_status::)) .route( @@ -133,7 +127,6 @@ where .route("/network/info", routing::get(libp2p_info)) .route("/storage/block", routing::post(block::)) .route("/mempool/add/tx", routing::post(add_tx::)) - .route("/mempool/add/cert", routing::post(add_cert)) .route("/metrics", routing::get(get_metrics)) .with_state(handle); @@ -158,48 +151,6 @@ macro_rules! make_request_and_return_response { }}; } -#[utoipa::path( - get, - path = "/da/metrics", - responses( - (status = 200, description = "Get the mempool metrics of the da service", body = MempoolMetrics), - (status = 500, description = "Internal server error", body = String), - ) -)] -async fn da_metrics(State(handle): State) -> Response { - make_request_and_return_response!(da::da_mempool_metrics(&handle)) -} - -#[utoipa::path( - post, - path = "/da/status", - responses( - (status = 200, description = "Query the mempool status of the da service", body = Vec<::Hash>), - (status = 500, description = "Internal server error", body = String), - ) -)] -async fn da_status( - State(handle): State, - Json(items): Json::Hash>>, -) -> Response { - make_request_and_return_response!(da::da_mempool_status(&handle, items)) -} - -#[utoipa::path( - post, - path = "/da/blobs", - responses( - (status = 200, description = "Get pending blobs", body = Vec), - (status = 500, description = "Internal server error", body = String), - ) -)] -async fn da_blobs( - State(handle): State, - Json(items): Json::Hash>>, -) -> Response { - make_request_and_return_response!(da::da_blobs(&handle, items)) -} - #[utoipa::path( get, path = "/cl/metrics", @@ -359,30 +310,6 @@ where >(&handle, tx, Transaction::hash)) } -#[utoipa::path( - post, - path = "/mempool/add/cert", - responses( - (status = 200, description = "Add certificate to the mempool"), - (status = 500, description = "Internal server error", body = String), - ) -)] -async fn add_cert( - State(handle): State, - Json(cert): Json, -) -> Response { - make_request_and_return_response!(mempool::add_cert::< - NetworkBackend, - MempoolNetworkAdapter::Hash>, - Certificate, - ::Hash, - >( - &handle, - cert, - nomos_core::da::certificate::Certificate::hash - )) -} - #[utoipa::path( get, path = "/metrics", diff --git a/nodes/nomos-node/src/config.rs b/nodes/nomos-node/src/config.rs index b649c60d..9286a57b 100644 --- a/nodes/nomos-node/src/config.rs +++ b/nodes/nomos-node/src/config.rs @@ -4,7 +4,6 @@ use std::{ }; use crate::api::AxumBackend; -use crate::DataAvailability; use crate::{Tx, Wire, MB16}; use clap::{Parser, ValueEnum}; use color_eyre::eyre::{eyre, Result}; @@ -107,12 +106,6 @@ pub struct CryptarchiaArgs { coin_value: Option, } -#[derive(Parser, Debug, Clone)] -pub struct DaArgs { - #[clap(long = "da-voter", env = "DA_VOTER")] - da_voter: Option, -} - #[derive(Parser, Debug, Clone)] pub struct MetricsArgs { #[clap(long = "with-metrics", env = "WITH_METRICS")] @@ -125,7 +118,6 @@ pub struct Config { pub network: as ServiceData>::Settings, pub http: > as ServiceData>::Settings, pub cryptarchia: ::Settings, - pub da: ::Settings, } impl Config { @@ -255,15 +247,4 @@ impl Config { Ok(self) } - - pub fn update_da(mut self, da_args: DaArgs) -> Result { - let DaArgs { da_voter } = da_args; - - if let Some(voter) = da_voter { - let bytes = <[u8; 32]>::from_hex(voter)?; - self.da.da_protocol.voter = bytes; - } - - Ok(self) - } } diff --git a/nodes/nomos-node/src/lib.rs b/nodes/nomos-node/src/lib.rs index b2df8fe3..2c27f89c 100644 --- a/nodes/nomos-node/src/lib.rs +++ b/nodes/nomos-node/src/lib.rs @@ -3,24 +3,15 @@ mod config; mod tx; use color_eyre::eyre::Result; -use full_replication::Certificate; -use full_replication::{AbsoluteNumber, Attestation, Blob, FullReplication}; +use full_replication::{Certificate, VidCertificate}; use api::AxumBackend; use bytes::Bytes; -pub use config::{Config, CryptarchiaArgs, DaArgs, HttpArgs, LogArgs, MetricsArgs, NetworkArgs}; +pub use config::{Config, CryptarchiaArgs, HttpArgs, LogArgs, MetricsArgs, NetworkArgs}; use nomos_api::ApiService; -use nomos_core::{ - da::{blob, certificate}, - header::HeaderId, - tx::Transaction, - wire, -}; -use nomos_da::{ - backend::memory_cache::BlobCache, network::adapters::libp2p::Libp2pAdapter as DaNetworkAdapter, - DataAvailabilityService, -}; +use nomos_core::{da::certificate, header::HeaderId, tx::Transaction, wire}; use nomos_log::Logger; +use nomos_mempool::da::verify::fullreplication::DaVerificationProvider as MempoolVerificationProvider; use nomos_mempool::network::adapters::libp2p::Libp2pAdapter as MempoolNetworkAdapter; use nomos_mempool::{backend::mockpool::MockPool, TxMempoolService}; #[cfg(feature = "metrics")] @@ -51,46 +42,36 @@ pub const DA_TOPIC: &str = "da"; const MB16: usize = 1024 * 1024 * 16; pub type Cryptarchia = cryptarchia_consensus::CryptarchiaConsensus< - cryptarchia_consensus::network::adapters::libp2p::LibP2pAdapter, + cryptarchia_consensus::network::adapters::libp2p::LibP2pAdapter, MockPool::Hash>, MempoolNetworkAdapter::Hash>, MockPool< HeaderId, - Certificate, - <::Blob as blob::Blob>::Hash, - >, - MempoolNetworkAdapter< - Certificate, - <::Blob as blob::Blob>::Hash, + VidCertificate, + ::CertificateId, >, + MempoolNetworkAdapter::Id>, + MempoolVerificationProvider, FillSizeWithTx, - FillSizeWithBlobsCertificate, + FillSizeWithBlobsCertificate, RocksBackend, >; -pub type DataAvailability = DataAvailabilityService< - FullReplication>, - BlobCache<::Hash, Blob>, - DaNetworkAdapter, ->; - -pub type DaMempool = DaMempoolService< - MempoolNetworkAdapter< - Certificate, - <::Blob as blob::Blob>::Hash, - >, - MockPool< - HeaderId, - Certificate, - <::Blob as blob::Blob>::Hash, - >, ->; - pub type TxMempool = TxMempoolService< MempoolNetworkAdapter::Hash>, MockPool::Hash>, >; +pub type DaMempool = DaMempoolService< + MempoolNetworkAdapter::Id>, + MockPool< + HeaderId, + VidCertificate, + ::CertificateId, + >, + MempoolVerificationProvider, +>; + #[derive(Services)] pub struct Nomos { logging: ServiceHandle, @@ -99,7 +80,6 @@ pub struct Nomos { da_mempool: ServiceHandle, cryptarchia: ServiceHandle, http: ServiceHandle>>, - da: ServiceHandle, storage: ServiceHandle>>, #[cfg(feature = "metrics")] metrics: ServiceHandle, diff --git a/nodes/nomos-node/src/main.rs b/nodes/nomos-node/src/main.rs index bd4af96f..36ca5c2d 100644 --- a/nodes/nomos-node/src/main.rs +++ b/nodes/nomos-node/src/main.rs @@ -1,17 +1,14 @@ -use full_replication::{Blob, Certificate}; +use full_replication::Certificate; #[cfg(feature = "metrics")] use nomos_metrics::MetricsSettings; use nomos_node::{ - Config, CryptarchiaArgs, DaArgs, HttpArgs, LogArgs, MetricsArgs, NetworkArgs, Nomos, + Config, CryptarchiaArgs, HttpArgs, LogArgs, MetricsArgs, NetworkArgs, Nomos, NomosServiceSettings, Tx, }; use clap::Parser; use color_eyre::eyre::{eyre, Result}; -use nomos_core::{ - da::{blob, certificate}, - tx::Transaction, -}; +use nomos_core::{da::certificate, tx::Transaction}; use nomos_mempool::network::adapters::libp2p::Settings as AdapterSettings; @@ -35,9 +32,6 @@ struct Args { http_args: HttpArgs, #[clap(flatten)] cryptarchia_args: CryptarchiaArgs, - /// Overrides da config. - #[clap(flatten)] - da_args: DaArgs, /// Overrides metrics config. #[clap(flatten)] metrics_args: MetricsArgs, @@ -46,7 +40,6 @@ struct Args { fn main() -> Result<()> { let Args { config, - da_args, log_args, http_args, network_args, @@ -54,7 +47,6 @@ fn main() -> Result<()> { metrics_args, } = Args::parse(); let config = serde_yaml::from_reader::<_, Config>(std::fs::File::open(config)?)? - .update_da(da_args)? .update_log(log_args)? .update_http(http_args)? .update_network(network_args)? @@ -85,14 +77,16 @@ fn main() -> Result<()> { backend: (), network: AdapterSettings { topic: String::from(nomos_node::DA_TOPIC), - id: cert_id, + id: ::id, + }, + verification_provider: full_replication::CertificateVerificationParameters { + threshold: 0, }, registry: registry.clone(), }, cryptarchia: config.cryptarchia, #[cfg(feature = "metrics")] metrics: MetricsSettings { registry }, - da: config.da, storage: nomos_storage::backends::rocksdb::RocksBackendSettings { db_path: std::path::PathBuf::from(DEFAULT_DB_PATH), read_only: false, @@ -106,8 +100,3 @@ fn main() -> Result<()> { app.wait_finished(); Ok(()) } - -fn cert_id(cert: &Certificate) -> ::Hash { - use certificate::Certificate; - cert.hash() -} diff --git a/nomos-cli/Cargo.toml b/nomos-cli/Cargo.toml index 4053e6a2..0cbe127c 100644 --- a/nomos-cli/Cargo.toml +++ b/nomos-cli/Cargo.toml @@ -18,9 +18,6 @@ tokio = { version = "1", features = ["sync"] } overwatch-rs = { git = "https://github.com/logos-co/Overwatch", rev = "2f70806" } overwatch-derive = { git = "https://github.com/logos-co/Overwatch", rev = "ac28d01" } nomos-network = { path = "../nomos-services/network", features = ["libp2p"] } -nomos-da = { path = "../nomos-services/data-availability", features = [ - "libp2p", -] } cryptarchia-consensus = { path = "../nomos-services/cryptarchia-consensus" } nomos-log = { path = "../nomos-services/log" } nomos-libp2p = { path = "../nomos-libp2p" } diff --git a/nomos-cli/src/api/da.rs b/nomos-cli/src/api/da.rs index 7a3d3c07..8b137891 100644 --- a/nomos-cli/src/api/da.rs +++ b/nomos-cli/src/api/da.rs @@ -1,18 +1 @@ -use super::CLIENT; -use full_replication::Blob; -use nomos_core::da::blob; -use reqwest::Url; -pub async fn get_blobs( - node: &Url, - ids: Vec<::Hash>, -) -> Result, reqwest::Error> { - const BLOBS_PATH: &str = "da/blobs"; - CLIENT - .post(node.join(BLOBS_PATH).unwrap()) - .json(&ids) - .send() - .await? - .json() - .await -} diff --git a/nomos-cli/src/cmds/chat/mod.rs b/nomos-cli/src/cmds/chat/mod.rs index 69a604d7..62d3c9df 100644 --- a/nomos-cli/src/cmds/chat/mod.rs +++ b/nomos-cli/src/cmds/chat/mod.rs @@ -6,19 +6,19 @@ mod ui; use crate::{ api::consensus::get_headers_info, - da::{ - disseminate::{ - DaProtocolChoice, DisseminateApp, DisseminateAppServiceSettings, Settings, Status, - }, - retrieve::get_block_blobs, - }, + //da::{ + // disseminate::{ + // DaProtocolChoice, DisseminateApp, DisseminateAppServiceSettings, Settings, Status, + // }, + // retrieve::get_block_blobs, + //}, }; use clap::Args; use full_replication::{ AbsoluteNumber, Attestation, Certificate, FullReplication, Settings as DaSettings, }; use futures::{stream, StreamExt}; -use nomos_core::{da::DaProtocol, header::HeaderId, wire}; +use nomos_core::{header::HeaderId, wire}; use nomos_log::{LoggerBackend, LoggerSettings, SharedWriter}; use nomos_network::backends::libp2p::Libp2p as NetworkBackend; use nomos_network::NetworkService; diff --git a/nomos-cli/src/cmds/mod.rs b/nomos-cli/src/cmds/mod.rs index bf2648ac..4d4752d7 100644 --- a/nomos-cli/src/cmds/mod.rs +++ b/nomos-cli/src/cmds/mod.rs @@ -1,21 +1,22 @@ use clap::Subcommand; -pub mod chat; -pub mod disseminate; +// pub mod chat; +// pub mod disseminate; #[derive(Debug, Subcommand)] pub enum Command { - /// Send a blob to the network and collect attestations to create a DA proof - Disseminate(disseminate::Disseminate), - /// (Almost) Instant messaging protocol on top of the Nomos network - Chat(chat::NomosChat), + // /// Send a blob to the network and collect attestations to create a DA proof + // Disseminate(disseminate::Disseminate), + // /// (Almost) Instant messaging protocol on top of the Nomos network + // Chat(chat::NomosChat), } impl Command { pub fn run(&self) -> Result<(), Box> { - match self { - Command::Disseminate(cmd) => cmd.run(), - Command::Chat(cmd) => cmd.run(), - } + // match self { + // Command::Disseminate(cmd) => cmd.run(), + // Command::Chat(cmd) => cmd.run(), + // } + Ok(()) } } diff --git a/nomos-cli/src/da/disseminate.rs b/nomos-cli/src/da/disseminate.rs index ec4fc5bd..729547ea 100644 --- a/nomos-cli/src/da/disseminate.rs +++ b/nomos-cli/src/da/disseminate.rs @@ -3,7 +3,7 @@ use clap::{Args, ValueEnum}; use full_replication::{AbsoluteNumber, Attestation, Certificate, FullReplication, Voter}; use futures::StreamExt; use hex::FromHex; -use nomos_core::{da::DaProtocol, wire}; +use nomos_core::wire; use nomos_da::network::{adapters::libp2p::Libp2pAdapter as DaNetworkAdapter, NetworkAdapter}; use nomos_log::Logger; use nomos_network::backends::libp2p::Libp2p as NetworkBackend; diff --git a/nomos-cli/src/lib.rs b/nomos-cli/src/lib.rs index 27e0099a..ffc18115 100644 --- a/nomos-cli/src/lib.rs +++ b/nomos-cli/src/lib.rs @@ -1,6 +1,6 @@ pub mod api; pub mod cmds; -pub mod da; +// pub mod da; use clap::Parser; use cmds::Command; diff --git a/nomos-core/src/block/builder.rs b/nomos-core/src/block/builder.rs index f3c8c85f..47c1a8e3 100644 --- a/nomos-core/src/block/builder.rs +++ b/nomos-core/src/block/builder.rs @@ -7,8 +7,8 @@ use serde::Serialize; // internal use crate::block::Block; use crate::crypto::Blake2b; +use crate::da::certificate::vid::VidCertificate; use crate::da::certificate::BlobCertificateSelect; -use crate::da::certificate::Certificate; use crate::header::{ carnot::Builder as CarnotBuilder, cryptarchia::Builder as CryptarchiaBuilder, Header, HeaderId, }; @@ -64,7 +64,7 @@ where impl BlockBuilder where Tx: Transaction + Clone + Eq + Hash + Serialize + DeserializeOwned, - C: Certificate + Clone + Eq + Hash + Serialize + DeserializeOwned, + C: VidCertificate + Clone + Eq + Hash + Serialize + DeserializeOwned, TxSelector: TxSelect, BlobSelector: BlobCertificateSelect, { diff --git a/nomos-core/src/da/attestation/mod.rs b/nomos-core/src/da/attestation/mod.rs index a3dfd8dd..9709ede5 100644 --- a/nomos-core/src/da/attestation/mod.rs +++ b/nomos-core/src/da/attestation/mod.rs @@ -1,11 +1,9 @@ -use crate::da::blob::Blob; -use bytes::Bytes; use std::hash::Hash; pub trait Attestation { - type Blob: Blob; type Hash: Hash + Eq + Clone; - fn blob(&self) -> ::Hash; + + fn blob_hash(&self) -> Self::Hash; fn hash(&self) -> Self::Hash; - fn as_bytes(&self) -> Bytes; + fn signature(&self) -> &[u8]; } diff --git a/nomos-core/src/da/blob/mod.rs b/nomos-core/src/da/blob/mod.rs index f7780db4..d76deaca 100644 --- a/nomos-core/src/da/blob/mod.rs +++ b/nomos-core/src/da/blob/mod.rs @@ -1,13 +1,5 @@ -use bytes::Bytes; -use std::hash::Hash; - -pub type BlobHasher = fn(&T) -> ::Hash; - pub trait Blob { - const HASHER: BlobHasher; - type Hash: Hash + Eq + Clone; - fn hash(&self) -> Self::Hash { - Self::HASHER(self) - } - fn as_bytes(&self) -> Bytes; + type BlobId; + + fn id(&self) -> Self::BlobId; } diff --git a/nomos-core/src/da/certificate/metadata.rs b/nomos-core/src/da/certificate/metadata.rs new file mode 100644 index 00000000..c2aaccae --- /dev/null +++ b/nomos-core/src/da/certificate/metadata.rs @@ -0,0 +1,10 @@ +pub trait Next { + fn next(self) -> Self; +} + +pub trait Metadata { + type AppId; + type Index: Next; + + fn metadata(&self) -> (Self::AppId, Self::Index); +} diff --git a/nomos-core/src/da/certificate/mock.rs b/nomos-core/src/da/certificate/mock.rs new file mode 100644 index 00000000..02f72a77 --- /dev/null +++ b/nomos-core/src/da/certificate/mock.rs @@ -0,0 +1,63 @@ +use crate::da::{attestation::Attestation, certificate::Certificate}; + +#[derive(Clone, Debug, PartialEq)] +pub struct MockAttestation { + voter: [u8; 32], + signature: Vec, +} + +impl MockAttestation { + pub fn new(voter: &[u8; 32], signature: &[u8]) -> Self { + MockAttestation { + voter: *voter, + signature: signature.to_vec(), + } + } +} + +impl Attestation for MockAttestation { + type Hash = Vec; + + fn blob_hash(&self) -> Self::Hash { + unimplemented!() + } + fn hash(&self) -> Self::Hash { + vec![0u8] + } + fn signature(&self) -> &[u8] { + &self.signature + } +} + +#[derive(Clone, Debug, PartialEq)] +pub struct MockCertificate { + attestations: Vec, +} + +impl MockCertificate { + pub fn new(attestations: Vec) -> Self { + MockCertificate { attestations } + } +} + +impl Certificate for MockCertificate { + type Signature = [u8; 32]; + type Id = [u8; 32]; + type VerificationParameters = (); + + fn signers(&self) -> Vec { + todo!() + } + + fn signature(&self) -> Self::Signature { + todo!() + } + + fn id(&self) -> Self::Id { + todo!() + } + + fn verify(&self, _: Self::VerificationParameters) -> bool { + todo!() + } +} diff --git a/nomos-core/src/da/certificate/mod.rs b/nomos-core/src/da/certificate/mod.rs index cd1238af..434e781e 100644 --- a/nomos-core/src/da/certificate/mod.rs +++ b/nomos-core/src/da/certificate/mod.rs @@ -1,19 +1,21 @@ +pub mod metadata; +pub mod mock; pub mod select; - -use crate::da::blob::Blob; -use bytes::Bytes; -use std::hash::Hash; +pub mod vid; pub trait Certificate { - type Blob: Blob; - type Hash: Hash + Eq + Clone; - fn blob(&self) -> ::Hash; - fn hash(&self) -> Self::Hash; - fn as_bytes(&self) -> Bytes; + type Signature; + type Id; + type VerificationParameters; + + fn signers(&self) -> Vec; + fn signature(&self) -> Self::Signature; + fn id(&self) -> Self::Id; + fn verify(&self, verification_params: Self::VerificationParameters) -> bool; } pub trait BlobCertificateSelect { - type Certificate: Certificate; + type Certificate: vid::VidCertificate; type Settings: Clone; fn new(settings: Self::Settings) -> Self; @@ -22,3 +24,17 @@ pub trait BlobCertificateSelect { certificates: I, ) -> impl Iterator + 'i; } + +pub trait CertificateStrategy { + type Attestation; + type Certificate; + type Metadata: metadata::Metadata; + + fn can_build(&self, attestations: &[Self::Attestation]) -> bool; + fn build( + &self, + attestations: Vec, + app_id: ::AppId, + index: ::Index, + ) -> Self::Certificate; +} diff --git a/nomos-core/src/da/certificate/select.rs b/nomos-core/src/da/certificate/select.rs index fe2f08f0..75eebf3c 100644 --- a/nomos-core/src/da/certificate/select.rs +++ b/nomos-core/src/da/certificate/select.rs @@ -3,7 +3,7 @@ use std::marker::PhantomData; // crates // internal -use crate::da::certificate::{BlobCertificateSelect, Certificate}; +use crate::da::certificate::{vid::VidCertificate, BlobCertificateSelect}; use crate::utils; #[derive(Default, Clone, Copy)] @@ -19,8 +19,9 @@ impl FillSize { } } -impl BlobCertificateSelect for FillSize { +impl BlobCertificateSelect for FillSize { type Certificate = C; + type Settings = (); fn new(_settings: Self::Settings) -> Self { @@ -32,7 +33,7 @@ impl BlobCertificateSelect for FillSize impl Iterator + 'i { utils::select::select_from_till_fill_size::( - |blob| blob.as_bytes().len(), + |c| c.size(), certificates, ) } diff --git a/nomos-core/src/da/certificate/vid.rs b/nomos-core/src/da/certificate/vid.rs new file mode 100644 index 00000000..d1036604 --- /dev/null +++ b/nomos-core/src/da/certificate/vid.rs @@ -0,0 +1,8 @@ +use super::metadata::Metadata; + +pub trait VidCertificate: Metadata { + type CertificateId; + + fn certificate_id(&self) -> Self::CertificateId; + fn size(&self) -> usize; +} diff --git a/nomos-core/src/da/mod.rs b/nomos-core/src/da/mod.rs index db301fba..5e342928 100644 --- a/nomos-core/src/da/mod.rs +++ b/nomos-core/src/da/mod.rs @@ -1,40 +1,31 @@ +use std::error::Error; // crates -use bytes::Bytes; // internal -use crate::da::attestation::Attestation; -use crate::da::blob::Blob; -use crate::da::certificate::Certificate; pub mod attestation; pub mod blob; pub mod certificate; -pub trait DaProtocol { - type Blob: Blob; - type Attestation: Attestation; - type Certificate: Certificate; - type Settings: Clone; - - // Construct a new instance - fn new(settings: Self::Settings) -> Self; - /// Encode bytes into blobs - fn encode>(&self, data: T) -> Vec; - /// Feed a blob for decoding. - /// Depending on the protocol, it may be necessary to feed multiple blobs to - /// recover the initial data. - fn recv_blob(&mut self, blob: Self::Blob); - /// Attempt to recover the initial data from fed blobs. - /// If the protocol is not yet ready to return the data, return None. - fn extract(&mut self) -> Option; - /// Attest that we have received and stored a blob. - fn attest(&self, blob: &Self::Blob) -> Self::Attestation; - /// Validate that an attestation is valid for a blob. - fn validate_attestation(&self, blob: &Self::Blob, attestation: &Self::Attestation) -> bool; - /// Buffer attestations to produce a certificate of correct dispersal. - fn recv_attestation(&mut self, attestation: Self::Attestation); - /// Attempt to produce a certificate of correct disperal for a blob. - /// If the protocol is not yet ready to return the certificate, return None. - fn certify_dispersal(&mut self) -> Option; - /// Validate a certificate. - fn validate_certificate(&self, certificate: &Self::Certificate) -> bool; +pub trait DaEncoder { + type EncodedData; + fn encode(b: &[u8]) -> Result; +} + +pub trait DaVerifier { + type DaBlob; + type Attestation; + type Error; + + fn verify(&self, blob: &Self::DaBlob) -> Result; +} + +pub trait DaDispersal { + type EncodedData; + type Certificate; + + fn disperse(&self, encoded_data: Self::EncodedData) -> Result; +} + +pub trait Signer { + fn sign(&self, message: &[u8]) -> Vec; } diff --git a/nomos-da/full-replication/src/attestation.rs b/nomos-da/full-replication/src/attestation.rs new file mode 100644 index 00000000..812e280c --- /dev/null +++ b/nomos-da/full-replication/src/attestation.rs @@ -0,0 +1,40 @@ +use nomos_core::da::{attestation, Signer}; +use serde::{Deserialize, Serialize}; + +use crate::{hash, Voter}; + +#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq, PartialOrd, Ord)] +#[cfg_attr(feature = "openapi", derive(utoipa::ToSchema))] +pub struct Attestation { + blob_hash: [u8; 32], + attester: Voter, + sig: Vec, +} + +impl Attestation { + pub fn new_signed(blob_hash: [u8; 32], attester: Voter, key_pair: &S) -> Self { + let attestation_hash = hash([blob_hash, attester].concat()); + let sig = key_pair.sign(&attestation_hash); + Self { + blob_hash, + attester, + sig, + } + } +} + +impl attestation::Attestation for Attestation { + type Hash = [u8; 32]; + + fn blob_hash(&self) -> Self::Hash { + self.blob_hash + } + + fn hash(&self) -> Self::Hash { + hash([self.blob_hash, self.attester].concat()) + } + + fn signature(&self) -> &[u8] { + self.sig.as_ref() + } +} diff --git a/nomos-da/full-replication/src/lib.rs b/nomos-da/full-replication/src/lib.rs index e39c6d7b..d68833ae 100644 --- a/nomos-da/full-replication/src/lib.rs +++ b/nomos-da/full-replication/src/lib.rs @@ -1,9 +1,11 @@ +pub mod attestation; + +use attestation::Attestation; +use nomos_core::da::attestation::Attestation as _; +use nomos_core::da::certificate::metadata::Next; +use nomos_core::da::certificate::CertificateStrategy; // internal -use nomos_core::da::{ - attestation::{self, Attestation as _}, - blob::{self, BlobHasher}, - certificate, DaProtocol, -}; +use nomos_core::da::certificate::{self, metadata}; // std use std::collections::HashSet; use std::hash::{Hash, Hasher}; @@ -13,43 +15,15 @@ use blake2::{ Blake2bVar, }; use bytes::Bytes; -use nomos_core::wire; use serde::{Deserialize, Serialize}; +#[derive(Copy, Clone, Default, Debug, PartialEq, PartialOrd, Ord, Eq, Serialize, Deserialize)] +pub struct Index([u8; 8]); + /// Re-export the types for OpenAPI #[cfg(feature = "openapi")] pub mod openapi { - pub use super::{Attestation, Certificate}; -} - -#[derive(Debug, Clone)] -pub struct FullReplication { - voter: Voter, - certificate_strategy: CertificateStrategy, - output_buffer: Vec, - attestations: Vec, - output_certificate_buf: Vec, -} - -impl FullReplication { - pub fn new(voter: Voter, strategy: S) -> Self { - Self { - voter, - certificate_strategy: strategy, - output_buffer: Vec::new(), - attestations: Vec::new(), - output_certificate_buf: Vec::new(), - } - } -} - -// TODO: maybe abstract in a general library? -trait CertificateStrategy { - type Attestation: attestation::Attestation; - type Certificate: certificate::Certificate; - - fn can_build(&self, attestations: &[Self::Attestation]) -> bool; - fn build(&self, attestations: Vec) -> Certificate; + pub use super::Certificate; } #[derive(Debug, Clone)] @@ -78,20 +52,29 @@ pub struct Settings { impl CertificateStrategy for AbsoluteNumber { type Attestation = Attestation; type Certificate = Certificate; + type Metadata = Certificate; fn can_build(&self, attestations: &[Self::Attestation]) -> bool { attestations.len() >= self.num_attestations && attestations .iter() - .map(|a| &a.blob) + .map(|a| a.blob_hash()) .collect::>() .len() == 1 } - fn build(&self, attestations: Vec) -> Certificate { + fn build( + &self, + attestations: Vec, + app_id: [u8; 32], + index: Index, + ) -> Certificate { assert!(self.can_build(&attestations)); - Certificate { attestations } + Certificate { + attestations, + metadata: Metadata { app_id, index }, + } } } @@ -103,46 +86,15 @@ pub struct Blob { data: Bytes, } -fn hasher(blob: &Blob) -> [u8; 32] { - let mut hasher = Blake2bVar::new(32).unwrap(); - hasher.update(&blob.data); - let mut output = [0; 32]; - hasher.finalize_variable(&mut output).unwrap(); - output +#[derive(Default, Debug, Copy, Clone, Serialize, Deserialize, Eq, PartialEq)] +pub struct Metadata { + app_id: [u8; 32], + index: Index, } -impl blob::Blob for Blob { - const HASHER: BlobHasher = hasher as BlobHasher; - type Hash = [u8; 32]; - - fn as_bytes(&self) -> bytes::Bytes { - self.data.clone() - } -} - -#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)] -#[cfg_attr(feature = "openapi", derive(utoipa::ToSchema))] -pub struct Attestation { - blob: [u8; 32], - voter: Voter, -} - -impl attestation::Attestation for Attestation { - type Blob = Blob; - type Hash = [u8; 32]; - - fn blob(&self) -> [u8; 32] { - self.blob - } - - fn hash(&self) -> ::Hash { - hash([self.blob, self.voter].concat()) - } - - fn as_bytes(&self) -> Bytes { - wire::serialize(self) - .expect("Attestation shouldn't fail to be serialized") - .into() +impl Metadata { + fn size(&self) -> usize { + std::mem::size_of_val(&self.app_id) + std::mem::size_of_val(&self.index) } } @@ -150,96 +102,130 @@ impl attestation::Attestation for Attestation { #[cfg_attr(feature = "openapi", derive(utoipa::ToSchema))] pub struct Certificate { attestations: Vec, + metadata: Metadata, } impl Hash for Certificate { fn hash(&self, state: &mut H) { - state.write(certificate::Certificate::as_bytes(self).as_ref()); + state.write(::id(self).as_ref()); } } -impl certificate::Certificate for Certificate { - type Blob = Blob; - type Hash = [u8; 32]; +#[derive(Clone, Debug)] +pub struct CertificateVerificationParameters { + pub threshold: usize, +} - fn blob(&self) -> ::Hash { - self.attestations[0].blob +impl certificate::Certificate for Certificate { + type Id = [u8; 32]; + type Signature = [u8; 32]; + type VerificationParameters = CertificateVerificationParameters; + + fn signature(&self) -> Self::Signature { + let mut attestations = self.attestations.clone(); + attestations.sort(); + let mut signatures = Vec::new(); + for attestation in &attestations { + signatures.extend_from_slice(attestation.signature()); + } + hash(signatures) } - fn hash(&self) -> ::Hash { + fn id(&self) -> Self::Id { let mut input = self .attestations .iter() - .map(|a| a.hash()) + .map(|a| a.signature()) .collect::>(); // sort to make the hash deterministic input.sort(); hash(input.concat()) } - fn as_bytes(&self) -> Bytes { - wire::serialize(self) - .expect("Certificate shouldn't fail to be serialized") - .into() + fn signers(&self) -> Vec { + unimplemented!() + } + + fn verify(&self, params: Self::VerificationParameters) -> bool { + self.attestations.len() >= params.threshold } } -// TODO: add generic impl when the trait for Certificate is expanded -impl DaProtocol for FullReplication> { - type Blob = Blob; - type Attestation = Attestation; - type Certificate = Certificate; - type Settings = Settings; +#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)] +pub struct VidCertificate { + id: [u8; 32], + metadata: Metadata, +} - fn new(settings: Self::Settings) -> Self { - Self::new( - settings.voter, - AbsoluteNumber::new(settings.num_attestations), - ) +impl certificate::vid::VidCertificate for VidCertificate { + type CertificateId = [u8; 32]; + + fn certificate_id(&self) -> Self::CertificateId { + self.id } - fn encode>(&self, data: T) -> Vec { - vec![Blob { - data: Bytes::copy_from_slice(data.as_ref()), - }] + fn size(&self) -> usize { + std::mem::size_of_val(&self.id) + self.metadata.size() } +} - fn recv_blob(&mut self, blob: Self::Blob) { - self.output_buffer.push(blob.data); +impl metadata::Metadata for VidCertificate { + type AppId = [u8; 32]; + type Index = Index; + + fn metadata(&self) -> (Self::AppId, Self::Index) { + (self.metadata.app_id, self.metadata.index) } +} - fn extract(&mut self) -> Option { - self.output_buffer.pop() +impl Hash for VidCertificate { + fn hash(&self, state: &mut H) { + state.write( + ::certificate_id(self).as_ref(), + ); } +} - fn attest(&self, blob: &Self::Blob) -> Self::Attestation { - Attestation { - blob: hasher(blob), - voter: self.voter, +impl From for VidCertificate { + fn from(cert: Certificate) -> Self { + // To simulate the propery of aggregate committment + row commitment in Nomos Da Protocol, + // when full replication certificate is converted into the VID (which should happen after + // the verification in the mempool) the id is set to the blob hash to allow identification + // of the distributed data accross nomos nodes. + let id = cert.attestations[0].blob_hash(); + Self { + id, + metadata: cert.metadata, } } +} - fn validate_attestation(&self, blob: &Self::Blob, attestation: &Self::Attestation) -> bool { - hasher(blob) == attestation.blob +impl metadata::Metadata for Certificate { + type AppId = [u8; 32]; + type Index = Index; + + fn metadata(&self) -> (Self::AppId, Self::Index) { + (self.metadata.app_id, self.metadata.index) } +} - fn recv_attestation(&mut self, attestation: Self::Attestation) { - self.attestations.push(attestation); - if self.certificate_strategy.can_build(&self.attestations) { - self.output_certificate_buf.push( - self.certificate_strategy - .build(std::mem::take(&mut self.attestations)), - ); - } +impl From for Index { + fn from(value: u64) -> Self { + Self(value.to_be_bytes()) } +} - fn certify_dispersal(&mut self) -> Option { - self.output_certificate_buf.pop() +impl Next for Index { + fn next(self) -> Self { + let num = u64::from_be_bytes(self.0); + let incremented_num = num.wrapping_add(1); + Self(incremented_num.to_be_bytes()) } +} - fn validate_certificate(&self, certificate: &Self::Certificate) -> bool { - self.certificate_strategy - .can_build(&certificate.attestations) +impl AsRef<[u8]> for Index { + fn as_ref(&self) -> &[u8] { + self.0.as_ref() } } diff --git a/nomos-da/kzg/Cargo.toml b/nomos-da/kzg/Cargo.toml deleted file mode 100644 index 46363f19..00000000 --- a/nomos-da/kzg/Cargo.toml +++ /dev/null @@ -1,17 +0,0 @@ -[package] -name = "nomos-kzg" -version = "0.1.0" -edition = "2021" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -kzg = { git = "https://github.com/sifraitech/rust-kzg.git", rev = "222a61df62ef26e91448ad2c934ab8b408e45a61", package = "rust-kzg-blst", features = ["parallel"] } -kzg_traits = { git = "https://github.com/sifraitech/rust-kzg.git", rev = "222a61df62ef26e91448ad2c934ab8b408e45a61", package = "kzg" } - -[dev-dependencies] -criterion = "0.5.1" - -[[bench]] -name = "nomos_kzg" -harness = false diff --git a/nomos-da/kzg/benches/nomos_kzg.rs b/nomos-da/kzg/benches/nomos_kzg.rs deleted file mode 100644 index 094cdfac..00000000 --- a/nomos-da/kzg/benches/nomos_kzg.rs +++ /dev/null @@ -1,36 +0,0 @@ -use criterion::{black_box, criterion_group, criterion_main, Criterion}; -use kzg::{types::kzg_settings::FsKZGSettings, utils::generate_trusted_setup}; -use kzg_traits::{FFTSettings, KZGSettings}; -use nomos_kzg::{Blob, KzgSettings}; - -fn nomos_dynamic_vs_external(c: &mut Criterion) { - let (g1s, g2s) = generate_trusted_setup(4096, [0; 32]); - let fft_settings = kzg::types::fft_settings::FsFFTSettings::new(8).unwrap(); - let settings = FsKZGSettings::new(&g1s, &g2s, 4096, &fft_settings).unwrap(); - let kzg_settings = KzgSettings { - settings: settings.clone(), - bytes_per_field_element: 32, - }; - let data = [5; 4096 * 32]; - let blob = Blob::from_bytes(&data, &kzg_settings).unwrap(); - - let mut group = c.benchmark_group("KZG Commitment Benchmarks"); - - group.bench_function("nomos blob commitment", |b| { - b.iter(|| nomos_kzg::compute_commitment(black_box(&data), black_box(&kzg_settings))) - }); - - group.bench_function("external blob commitment", |b| { - b.iter(|| { - kzg::eip_4844::blob_to_kzg_commitment_rust( - black_box(&blob.inner()), - black_box(&settings), - ) - }) - }); - - group.finish(); -} - -criterion_group!(benches, nomos_dynamic_vs_external); -criterion_main!(benches); diff --git a/nomos-da/kzg/src/dynamic_kzg.rs b/nomos-da/kzg/src/dynamic_kzg.rs deleted file mode 100644 index 26363683..00000000 --- a/nomos-da/kzg/src/dynamic_kzg.rs +++ /dev/null @@ -1,245 +0,0 @@ -//! Custom variant of rust-kzg that supports dynamic sized blobs. -//! https://github.com/sifraitech/rust-kzg -//! Some types were changed to fit our API for comfort. -//! Blob related constants were removed and we use a config based approach. - -use crate::types::{Blob, Commitment, KzgSettings, Proof}; -use kzg::kzg_proofs::g1_linear_combination; -use kzg::types::fr::FsFr; -use kzg::types::g1::FsG1; -use kzg::types::kzg_settings::FsKZGSettings; -use kzg::types::poly::FsPoly; -use kzg_traits::eip_4844::{ - bytes_of_uint64, hash, hash_to_bls_field, verify_kzg_proof_rust, CHALLENGE_INPUT_SIZE, - FIAT_SHAMIR_PROTOCOL_DOMAIN, -}; -use kzg_traits::{Fr, Poly, G1}; - -pub fn blob_to_kzg_commitment( - blob: &Blob, - s: &FsKZGSettings, - field_elements_per_blob: usize, -) -> FsG1 { - let mut out = FsG1::default(); - g1_linear_combination(&mut out, &s.secret_g1, &blob.inner, field_elements_per_blob); - out -} - -pub fn compute_blob_kzg_proof( - blob: &Blob, - commitment: &Commitment, - settings: &KzgSettings, -) -> Result { - if !commitment.0.is_valid() { - return Err("Invalid commitment".to_string()); - } - - let evaluation_challenge_fr = compute_challenge(blob, &commitment.0, settings); - let (proof, _) = compute_kzg_proof(blob, &evaluation_challenge_fr, settings); - Ok(proof) -} - -pub fn verify_blob_kzg_proof( - blob: &Blob, - commitment: &Commitment, - proof: &Proof, - settings: &KzgSettings, -) -> Result { - if !commitment.0.is_valid() { - return Err("Invalid commitment".to_string()); - } - if !proof.0.is_valid() { - return Err("Invalid proof".to_string()); - } - - let polynomial = blob_to_polynomial(&blob.inner); - let evaluation_challenge_fr = compute_challenge(blob, &commitment.0, settings); - let y_fr = evaluate_polynomial_in_evaluation_form_rust( - &polynomial, - &evaluation_challenge_fr, - &settings.settings, - ); - verify_kzg_proof_rust( - &commitment.0, - &evaluation_challenge_fr, - &y_fr, - &proof.0, - &settings.settings, - ) -} - -fn compute_challenge(blob: &Blob, commitment: &FsG1, settings: &KzgSettings) -> FsFr { - let mut bytes: Vec = vec![0; CHALLENGE_INPUT_SIZE]; - - // Copy domain separator - bytes[..16].copy_from_slice(&FIAT_SHAMIR_PROTOCOL_DOMAIN); - bytes_of_uint64(&mut bytes[16..24], blob.len() as u64); - // Set all other bytes of this 16-byte (little-endian) field to zero - bytes_of_uint64(&mut bytes[24..32], 0); - - // Copy blob - for i in 0..blob.len() { - let v = blob.inner[i].to_bytes(); - bytes[(32 + i * settings.bytes_per_field_element) - ..(32 + (i + 1) * settings.bytes_per_field_element)] - .copy_from_slice(&v); - } - - // Copy commitment - let v = commitment.to_bytes(); - for i in 0..v.len() { - bytes[32 + settings.bytes_per_field_element * blob.len() + i] = v[i]; - } - - // Now let's create the challenge! - let eval_challenge = hash(&bytes); - hash_to_bls_field(&eval_challenge) -} - -fn compute_kzg_proof(blob: &Blob, z: &FsFr, s: &KzgSettings) -> (FsG1, FsFr) { - let polynomial = blob_to_polynomial(blob.inner.as_slice()); - let poly_len = polynomial.coeffs.len(); - let y = evaluate_polynomial_in_evaluation_form_rust(&polynomial, z, &s.settings); - - let mut tmp: FsFr; - let roots_of_unity: &Vec = &s.settings.fs.roots_of_unity; - - let mut m: usize = 0; - let mut q: FsPoly = FsPoly::new(poly_len); - - let mut inverses_in: Vec = vec![FsFr::default(); poly_len]; - let mut inverses: Vec = vec![FsFr::default(); poly_len]; - - for i in 0..poly_len { - if z.equals(&roots_of_unity[i]) { - // We are asked to compute a KZG proof inside the domain - m = i + 1; - inverses_in[i] = FsFr::one(); - continue; - } - // (p_i - y) / (ω_i - z) - q.coeffs[i] = polynomial.coeffs[i].sub(&y); - inverses_in[i] = roots_of_unity[i].sub(z); - } - - fr_batch_inv(&mut inverses, &inverses_in, poly_len); - - for (i, inverse) in inverses.iter().enumerate().take(poly_len) { - q.coeffs[i] = q.coeffs[i].mul(inverse); - } - - if m != 0 { - // ω_{m-1} == z - m -= 1; - q.coeffs[m] = FsFr::zero(); - for i in 0..poly_len { - if i == m { - continue; - } - // Build denominator: z * (z - ω_i) - tmp = z.sub(&roots_of_unity[i]); - inverses_in[i] = tmp.mul(z); - } - - fr_batch_inv(&mut inverses, &inverses_in, poly_len); - - for i in 0..poly_len { - if i == m { - continue; - } - // Build numerator: ω_i * (p_i - y) - tmp = polynomial.coeffs[i].sub(&y); - tmp = tmp.mul(&roots_of_unity[i]); - // Do the division: (p_i - y) * ω_i / (z * (z - ω_i)) - tmp = tmp.mul(&inverses[i]); - q.coeffs[m] = q.coeffs[m].add(&tmp); - } - } - - let proof = g1_lincomb(&s.settings.secret_g1, &q.coeffs, poly_len); - (proof, y) -} - -fn evaluate_polynomial_in_evaluation_form_rust(p: &FsPoly, x: &FsFr, s: &FsKZGSettings) -> FsFr { - let poly_len = p.coeffs.len(); - let roots_of_unity: &Vec = &s.fs.roots_of_unity; - let mut inverses_in: Vec = vec![FsFr::default(); poly_len]; - let mut inverses: Vec = vec![FsFr::default(); poly_len]; - - for i in 0..poly_len { - if x.equals(&roots_of_unity[i]) { - return p.get_coeff_at(i); - } - inverses_in[i] = x.sub(&roots_of_unity[i]); - } - - fr_batch_inv(&mut inverses, &inverses_in, poly_len); - - let mut tmp: FsFr; - let mut out = FsFr::zero(); - - for i in 0..poly_len { - tmp = inverses[i].mul(&roots_of_unity[i]); - tmp = tmp.mul(&p.coeffs[i]); - out = out.add(&tmp); - } - - tmp = FsFr::from_u64(poly_len as u64); - out = out.div(&tmp).unwrap(); - tmp = x.pow(poly_len); - tmp = tmp.sub(&FsFr::one()); - out = out.mul(&tmp); - out -} - -fn fr_batch_inv(out: &mut [FsFr], a: &[FsFr], len: usize) { - assert!(len > 0); - - let mut accumulator = FsFr::one(); - - for i in 0..len { - out[i] = accumulator; - accumulator = accumulator.mul(&a[i]); - } - - accumulator = accumulator.eucl_inverse(); - - for i in (0..len).rev() { - out[i] = out[i].mul(&accumulator); - accumulator = accumulator.mul(&a[i]); - } -} - -fn g1_lincomb(points: &[FsG1], scalars: &[FsFr], length: usize) -> FsG1 { - let mut out = FsG1::default(); - g1_linear_combination(&mut out, points, scalars, length); - out -} - -fn blob_to_polynomial(blob: &[FsFr]) -> FsPoly { - let mut p: FsPoly = FsPoly::new(blob.len()); - p.coeffs = blob.to_vec(); - p -} - -#[cfg(test)] -mod test { - use super::*; - use kzg::utils::generate_trusted_setup; - use kzg_traits::{eip_4844::blob_to_kzg_commitment_rust, FFTSettings, KZGSettings}; - - #[test] - fn test_blob_to_kzg_commitment() { - let (g1s, g2s) = generate_trusted_setup(4096, [0; 32]); - let fft_settings = kzg::types::fft_settings::FsFFTSettings::new(8).unwrap(); - let settings = FsKZGSettings::new(&g1s, &g2s, 4096, &fft_settings).unwrap(); - let kzg_settings = KzgSettings { - settings, - bytes_per_field_element: 32, - }; - let blob = Blob::from_bytes(&[5; 4096 * 32], &kzg_settings).unwrap(); - let commitment = blob_to_kzg_commitment(&blob, &kzg_settings.settings, 4096); - let commitment2 = blob_to_kzg_commitment_rust(&blob.inner, &kzg_settings.settings).unwrap(); - assert_eq!(commitment, commitment2); - } -} diff --git a/nomos-da/kzg/src/lib.rs b/nomos-da/kzg/src/lib.rs deleted file mode 100644 index 00527293..00000000 --- a/nomos-da/kzg/src/lib.rs +++ /dev/null @@ -1,82 +0,0 @@ -mod dynamic_kzg; -mod types; - -pub use crate::types::{Blob, Commitment, KzgSettings, Proof}; -pub use dynamic_kzg::{blob_to_kzg_commitment, compute_blob_kzg_proof, verify_blob_kzg_proof}; -use std::error::Error; - -pub const BYTES_PER_PROOF: usize = 48; -pub const BYTES_PER_COMMITMENT: usize = 48; - -/// Compute a kzg commitment for the given data. -/// It works for arbitrary data, but the data must be a multiple of **32 bytes**. -/// The data is interpreted as a sequence of field elements. Each consisting of **32 bytes**. -pub fn compute_commitment( - data: &[u8], - settings: &KzgSettings, -) -> Result> { - let blob = Blob::from_bytes(data, settings)?; - Ok(Commitment(blob_to_kzg_commitment( - &blob, - &settings.settings, - data.len() / settings.bytes_per_field_element, - ))) -} - -/// Compute a kzg proof for each field element in the given data. -/// It works for arbitrary data, but the data must be a multiple of **32 bytes**. -/// The data is interpreted as a sequence of field elements. Each consisting of **32 bytes**. -pub fn compute_proofs( - data: &[u8], - commitment: &Commitment, - settings: &KzgSettings, -) -> Result, Box> { - let blobs = data - .chunks(settings.bytes_per_field_element) - .map(|b| Blob::from_bytes(b, settings)); - let mut res = Vec::new(); - for blob in blobs { - let blob = blob?; - res.push(Proof(compute_blob_kzg_proof(&blob, commitment, settings)?)) - } - Ok(res) -} - -/// Verify a kzg proof for the given blob. -/// It works for arbitrary data, but the data must be a multiple of **32 bytes**. -/// The data is interpreted as a sequence of field elements. Each consisting of **32 bytes**. -pub fn verify_blob( - blob: &[u8], - proof: &Proof, - commitment: &Commitment, - settings: &KzgSettings, -) -> Result> { - let blob = Blob::from_bytes(blob, settings)?; - verify_blob_kzg_proof(&blob, commitment, proof, settings).map_err(|e| e.into()) -} - -#[cfg(test)] -mod test { - use super::*; - use kzg::types::kzg_settings::FsKZGSettings; - use kzg::utils::generate_trusted_setup; - use kzg_traits::{FFTSettings, KZGSettings}; - - #[test] - fn test_compute_and_verify() -> Result<(), Box> { - let (g1s, g2s) = generate_trusted_setup(4096, [0; 32]); - let fft_settings = kzg::types::fft_settings::FsFFTSettings::new(8).unwrap(); - let settings = FsKZGSettings::new(&g1s, &g2s, 4096, &fft_settings).unwrap(); - let kzg_settings = KzgSettings { - settings, - bytes_per_field_element: 32, - }; - let blob = vec![0; 4096]; - let commitment = compute_commitment(&blob, &kzg_settings)?; - let proofs = compute_proofs(&blob, &commitment, &kzg_settings)?; - for proof in proofs { - assert!(verify_blob(&blob, &proof, &commitment, &kzg_settings)?); - } - Ok(()) - } -} diff --git a/nomos-da/kzg/src/types.rs b/nomos-da/kzg/src/types.rs deleted file mode 100644 index 3d6054ff..00000000 --- a/nomos-da/kzg/src/types.rs +++ /dev/null @@ -1,63 +0,0 @@ -use crate::{BYTES_PER_COMMITMENT, BYTES_PER_PROOF}; -use kzg::types::fr::FsFr; -use kzg::types::g1::FsG1; -use kzg::types::kzg_settings::FsKZGSettings; -use kzg_traits::{Fr, G1}; -use std::error::Error; - -/// A wrapper around the KZG settings that also stores the number of bytes per field element. -pub struct KzgSettings { - pub settings: FsKZGSettings, - pub bytes_per_field_element: usize, -} - -/// A KZG commitment. -pub struct Commitment(pub(crate) FsG1); - -/// A KZG proof. -pub struct Proof(pub(crate) FsG1); - -/// A blob of data. -pub struct Blob { - pub(crate) inner: Vec, -} - -impl Commitment { - pub fn as_bytes_owned(&self) -> [u8; BYTES_PER_COMMITMENT] { - self.0.to_bytes() - } -} - -impl Proof { - pub fn as_bytes_owned(&self) -> [u8; BYTES_PER_PROOF] { - self.0.to_bytes() - } -} - -impl Blob { - pub fn from_bytes(data: &[u8], settings: &KzgSettings) -> Result> { - let mut inner = Vec::with_capacity(data.len() / settings.bytes_per_field_element); - for chunk in data.chunks(settings.bytes_per_field_element) { - if chunk.len() < settings.bytes_per_field_element { - let mut padded_chunk = vec![0; settings.bytes_per_field_element]; - padded_chunk[..chunk.len()].copy_from_slice(chunk); - inner.push(FsFr::from_bytes(&padded_chunk)?); - } else { - inner.push(FsFr::from_bytes(chunk)?); - } - } - Ok(Self { inner }) - } - - pub fn len(&self) -> usize { - self.inner.len() - } - - pub fn is_empty(&self) -> bool { - self.inner.is_empty() - } - - pub fn inner(&self) -> Vec { - self.inner.clone() - } -} diff --git a/nomos-da/kzgrs-backend/Cargo.toml b/nomos-da/kzgrs-backend/Cargo.toml new file mode 100644 index 00000000..6c110e58 --- /dev/null +++ b/nomos-da/kzgrs-backend/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "kzgrs-backend" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +ark-ff = "0.4" +ark-serialize = "0.4.2" +ark-poly = "0.4.2" +bitvec = "1.0.1" +blake2 = "0.10" +blst = { version = "0.3.11", features = ["serde"] } +itertools = "0.12" +kzgrs = { path = "../kzgrs" } +nomos-core = { path = "../../nomos-core" } +num-bigint = "0.4.4" +rand = "0.8.5" +once_cell = "1.19" +sha3 = "0.10" +serde = { version = "1.0", features = ["derive"] } + +[dev-dependencies] +rand = "0.8" + diff --git a/nomos-da/kzgrs-backend/src/common/attestation.rs b/nomos-da/kzgrs-backend/src/common/attestation.rs new file mode 100644 index 00000000..1882f701 --- /dev/null +++ b/nomos-da/kzgrs-backend/src/common/attestation.rs @@ -0,0 +1,32 @@ +// std +// crates +use blake2::Blake2b; +use nomos_core::da::attestation; +use serde::{Deserialize, Serialize}; +use sha3::Digest; +// internal + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Attestation { + pub blob_hash: [u8; 32], + pub signature: Vec, +} + +impl attestation::Attestation for Attestation { + type Hash = [u8; 32]; + + fn blob_hash(&self) -> Self::Hash { + self.blob_hash + } + + fn hash(&self) -> Self::Hash { + Blake2b::new() + .chain_update(self.blob_hash) + .finalize() + .into() + } + + fn signature(&self) -> &[u8] { + self.signature.as_ref() + } +} diff --git a/nomos-da/kzgrs-backend/src/common/blob.rs b/nomos-da/kzgrs-backend/src/common/blob.rs new file mode 100644 index 00000000..cde61f5b --- /dev/null +++ b/nomos-da/kzgrs-backend/src/common/blob.rs @@ -0,0 +1,116 @@ +// std +use std::io::Cursor; +// crates +use ark_serialize::*; +use kzgrs::Proof; +use nomos_core::da::blob; +use serde::ser::SerializeSeq; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; +use sha3::{Digest, Sha3_256}; +// internal +use super::build_attestation_message; +use crate::common::Column; +use crate::common::Commitment; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct DaBlob { + pub column: Column, + #[serde( + serialize_with = "serialize_canonical", + deserialize_with = "deserialize_canonical" + )] + pub column_commitment: Commitment, + #[serde( + serialize_with = "serialize_canonical", + deserialize_with = "deserialize_canonical" + )] + pub aggregated_column_commitment: Commitment, + #[serde( + serialize_with = "serialize_canonical", + deserialize_with = "deserialize_canonical" + )] + pub aggregated_column_proof: Proof, + #[serde( + serialize_with = "serialize_vec_canonical", + deserialize_with = "deserialize_vec_canonical" + )] + pub rows_commitments: Vec, + #[serde( + serialize_with = "serialize_vec_canonical", + deserialize_with = "deserialize_vec_canonical" + )] + pub rows_proofs: Vec, +} + +impl DaBlob { + pub fn id(&self) -> Vec { + build_attestation_message(&self.aggregated_column_commitment, &self.rows_commitments) + } + + pub fn column_id(&self) -> Vec { + let mut hasher = Sha3_256::new(); + hasher.update(self.column.as_bytes()); + hasher.finalize().as_slice().to_vec() + } +} + +impl blob::Blob for DaBlob { + type BlobId = Vec; + + fn id(&self) -> Self::BlobId { + build_attestation_message(&self.aggregated_column_commitment, &self.rows_commitments) + } +} + +fn serialize_canonical(value: &T, serializer: S) -> Result +where + S: Serializer, + T: CanonicalSerialize, +{ + let mut bytes = Vec::new(); + value + .serialize_compressed(&mut bytes) + .map_err(serde::ser::Error::custom)?; + serializer.serialize_bytes(&bytes) +} + +fn deserialize_canonical<'de, D, T>(deserializer: D) -> Result +where + D: Deserializer<'de>, + T: CanonicalDeserialize, +{ + let bytes: Vec = serde::Deserialize::deserialize(deserializer)?; + let mut cursor = Cursor::new(bytes); + T::deserialize_compressed(&mut cursor).map_err(serde::de::Error::custom) +} + +fn serialize_vec_canonical(values: &[T], serializer: S) -> Result +where + S: Serializer, + T: CanonicalSerialize, +{ + let mut container = serializer.serialize_seq(Some(values.len()))?; + for value in values { + let mut bytes = Vec::new(); + value + .serialize_compressed(&mut bytes) + .map_err(serde::ser::Error::custom)?; + container.serialize_element(&bytes)?; + } + container.end() +} + +fn deserialize_vec_canonical<'de, D, T>(deserializer: D) -> Result, D::Error> +where + D: Deserializer<'de>, + T: CanonicalDeserialize, +{ + let bytes_vecs: Vec> = Deserialize::deserialize(deserializer)?; + bytes_vecs + .iter() + .map(|bytes| { + let mut cursor = Cursor::new(bytes); + T::deserialize_compressed(&mut cursor).map_err(serde::de::Error::custom) + }) + .collect() +} diff --git a/nomos-da/kzgrs-backend/src/common/mod.rs b/nomos-da/kzgrs-backend/src/common/mod.rs new file mode 100644 index 00000000..46cd6732 --- /dev/null +++ b/nomos-da/kzgrs-backend/src/common/mod.rs @@ -0,0 +1,171 @@ +pub mod attestation; +pub mod blob; + +// std +use ark_serialize::CanonicalSerialize; +use serde::{Deserialize, Serialize}; +use std::io::Cursor; +// crates +use blake2::digest::{Update, VariableOutput}; +use sha3::{Digest, Sha3_256}; +// internal +use kzgrs::Commitment; + +#[derive(Clone, Eq, PartialEq, Debug, Serialize, Deserialize)] +pub struct Chunk(pub Vec); +pub struct Row(pub Vec); +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Column(pub Vec); +pub struct ChunksMatrix(pub Vec); + +pub const NOMOS_DA_DST: &[u8] = b"NOMOS_DA_AVAIL"; + +impl Chunk { + pub fn len(&self) -> usize { + self.0.len() + } + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + pub fn as_bytes(&self) -> Vec { + self.0.to_vec() + } + pub const fn empty() -> Self { + Self(vec![]) + } +} + +impl From<&[u8]> for Chunk { + fn from(value: &[u8]) -> Self { + Self(value.to_vec()) + } +} + +impl Row { + pub fn iter(&self) -> impl Iterator { + self.0.iter() + } + pub fn len(&self) -> usize { + self.0.len() + } + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + pub fn as_bytes(&self) -> Vec { + self.0.iter().flat_map(Chunk::as_bytes).collect() + } +} + +impl Column { + #[allow(unused)] + pub fn iter(&self) -> impl Iterator { + self.0.iter() + } + #[allow(unused)] + pub fn len(&self) -> usize { + self.0.len() + } + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + pub fn as_bytes(&self) -> Vec { + self.0.iter().flat_map(Chunk::as_bytes).collect() + } +} + +impl FromIterator for Row { + fn from_iter>(iter: T) -> Self { + Self(iter.into_iter().collect()) + } +} + +impl FromIterator for Column { + fn from_iter>(iter: T) -> Self { + Self(iter.into_iter().collect()) + } +} + +impl AsRef<[Chunk]> for Row { + fn as_ref(&self) -> &[Chunk] { + &self.0 + } +} + +impl AsRef<[Chunk]> for Column { + fn as_ref(&self) -> &[Chunk] { + &self.0 + } +} + +impl ChunksMatrix { + pub fn len(&self) -> usize { + self.0.len() + } + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + pub fn rows(&self) -> impl Iterator + '_ { + self.0.iter() + } + pub fn columns(&self) -> impl Iterator + '_ { + let size = self.0.first().map(|r| r.0.len()).unwrap_or(0); + (0..size).map(|i| { + self.0 + .iter() + .map(|row| row.0.get(i).cloned().unwrap_or_else(Chunk::empty)) + .collect::() + }) + } + + pub fn transposed(&self) -> Self { + Self(self.columns().map(|c| Row(c.0)).collect()) + } + + pub fn iter(&self) -> impl Iterator { + self.0.iter() + } +} + +impl FromIterator for ChunksMatrix { + fn from_iter>(iter: T) -> Self { + Self(iter.into_iter().collect()) + } +} + +pub fn hash_column_and_commitment( + column: &Column, + commitment: &Commitment, +) -> [u8; HASH_SIZE] { + let mut hasher = blake2::Blake2bVar::new(HASH_SIZE) + .unwrap_or_else(|e| panic!("Blake2b should work for size {HASH_SIZE}, {e}")); + hasher.update(column.as_bytes().as_ref()); + hasher.update(commitment_to_bytes(commitment).as_ref()); + hasher + .finalize_boxed() + .to_vec() + .try_into() + .unwrap_or_else(|_| panic!("Size is guaranteed by constant {HASH_SIZE:?}")) +} + +pub fn build_attestation_message( + aggregated_column_commitment: &Commitment, + rows_commitments: &[Commitment], +) -> Vec { + let mut hasher = Sha3_256::new(); + Digest::update( + &mut hasher, + commitment_to_bytes(aggregated_column_commitment), + ); + for c in rows_commitments { + Digest::update(&mut hasher, commitment_to_bytes(c)); + } + hasher.finalize().to_vec() +} + +pub fn commitment_to_bytes(commitment: &Commitment) -> Vec { + let mut buff = Cursor::new(vec![]); + commitment + .serialize_uncompressed(&mut buff) + .expect("Serialization of commitment should work"); + buff.into_inner() +} diff --git a/nomos-da/kzgrs-backend/src/dispersal.rs b/nomos-da/kzgrs-backend/src/dispersal.rs new file mode 100644 index 00000000..a87cd694 --- /dev/null +++ b/nomos-da/kzgrs-backend/src/dispersal.rs @@ -0,0 +1,448 @@ +// std +use std::hash::{Hash, Hasher}; + +// crates +use bitvec::prelude::*; +use blst::min_sig::{AggregateSignature, PublicKey, Signature}; +use blst::BLST_ERROR; +use kzgrs::{Commitment, KzgRsError}; +use nomos_core::da::certificate::metadata::Next; +use nomos_core::da::certificate::{self, metadata}; + +// internal +use crate::common::{attestation::Attestation, build_attestation_message, NOMOS_DA_DST}; +use crate::encoder::EncodedData; + +#[derive(Debug, Clone, PartialEq)] +pub struct Certificate { + aggregated_signatures: Signature, + signers: BitVec, + aggregated_column_commitment: Commitment, + row_commitments: Vec, + metadata: Metadata, +} + +impl Certificate { + pub fn id(&self) -> Vec { + build_attestation_message(&self.aggregated_column_commitment, &self.row_commitments) + } + + pub fn verify(&self, nodes_public_keys: &[PublicKey]) -> bool { + let signers_keys: Vec<&PublicKey> = nodes_public_keys + .iter() + .enumerate() + .filter(|(index, _)| self.signers[*index]) + .map(|(_, pk)| pk) + .collect(); + + let message = self.id(); + let messages: Vec<&[u8]> = std::iter::repeat(message.as_slice()) + .take(signers_keys.len()) + .collect(); + + verify_aggregate_signature(&self.aggregated_signatures, &signers_keys, &messages) + } + + pub fn build_certificate( + encoded_data: &EncodedData, + attestations: &[Attestation], + signers: BitVec, + threshold: usize, + metadata: Metadata, + ) -> Result { + if attestations.len() < threshold { + return Err(KzgRsError::NotEnoughAttestations { + required: threshold, + received: attestations.len(), + }); + } + + if attestations.len() != signers.count_ones() { + return Err(KzgRsError::AttestationSignersMismatch { + attestations_count: attestations.len(), + signers_count: signers.count_ones(), + }); + } + + let signatures: Vec = attestations + .iter() + .filter_map(|att| Signature::from_bytes(&att.signature).ok()) + .collect(); + + // Certificate will fail to be built if number of valid signatures from the attestations + // doesn't satisfy the same threshold used for attestations. + if signatures.len() < threshold { + return Err(KzgRsError::NotEnoughAttestations { + required: threshold, + received: signatures.len(), + }); + } + + let aggregated_signatures = aggregate_signatures(signatures)?; + + Ok(Self { + aggregated_signatures, + signers, + aggregated_column_commitment: encoded_data.aggregated_column_commitment, + row_commitments: encoded_data.row_commitments.clone(), + metadata, + }) + } +} + +fn aggregate_signatures(signatures: Vec) -> Result { + let refs: Vec<&Signature> = signatures.iter().collect(); + AggregateSignature::aggregate(&refs, true).map(|agg_sig| agg_sig.to_signature()) +} + +fn verify_aggregate_signature( + aggregate_signature: &Signature, + public_keys: &[&PublicKey], + messages: &[&[u8]], +) -> bool { + BLST_ERROR::BLST_SUCCESS + == aggregate_signature.aggregate_verify(true, messages, NOMOS_DA_DST, public_keys, true) +} + +#[derive(Clone, Debug)] +pub struct CertificateVerificationParameters { + pub nodes_public_keys: Vec, +} + +impl certificate::Certificate for Certificate { + type Signature = Signature; + type Id = Vec; + type VerificationParameters = CertificateVerificationParameters; + + fn signers(&self) -> Vec { + self.signers.iter().map(|b| *b).collect() + } + + fn signature(&self) -> Self::Signature { + self.aggregated_signatures + } + + fn id(&self) -> Self::Id { + build_attestation_message(&self.aggregated_column_commitment, &self.row_commitments) + } + + fn verify(&self, params: Self::VerificationParameters) -> bool { + self.verify(¶ms.nodes_public_keys) + } +} + +#[derive(Copy, Clone, Default, Debug, Ord, PartialOrd, PartialEq, Eq)] +pub struct Index([u8; 8]); + +#[derive(Default, Debug, Copy, Clone, Eq, PartialEq)] +pub struct Metadata { + app_id: [u8; 32], + index: Index, +} + +impl Metadata { + pub fn size(&self) -> usize { + std::mem::size_of_val(&self.app_id) + std::mem::size_of_val(&self.index) + } +} + +#[derive(Debug, Clone, Eq, PartialEq)] +pub struct VidCertificate { + id: Vec, + metadata: Metadata, +} + +impl certificate::vid::VidCertificate for VidCertificate { + type CertificateId = Vec; + + fn certificate_id(&self) -> Self::CertificateId { + self.id.clone() + } + + fn size(&self) -> usize { + std::mem::size_of_val(&self.id) + self.metadata.size() + } +} + +impl metadata::Metadata for VidCertificate { + type AppId = [u8; 32]; + type Index = Index; + + fn metadata(&self) -> (Self::AppId, Self::Index) { + (self.metadata.app_id, self.metadata.index) + } +} + +impl Hash for VidCertificate { + fn hash(&self, state: &mut H) { + state.write( + ::certificate_id(self).as_ref(), + ); + } +} + +impl From for VidCertificate { + fn from(cert: Certificate) -> Self { + Self { + id: cert.id(), + metadata: cert.metadata, + } + } +} + +impl metadata::Metadata for Certificate { + type AppId = [u8; 32]; + type Index = Index; + + fn metadata(&self) -> (Self::AppId, Self::Index) { + (self.metadata.app_id, self.metadata.index) + } +} + +impl From for Index { + fn from(value: u64) -> Self { + Self(value.to_be_bytes()) + } +} + +impl Next for Index { + fn next(self) -> Self { + let num = u64::from_be_bytes(self.0); + let incremented_num = num.wrapping_add(1); + Self(incremented_num.to_be_bytes()) + } +} + +impl AsRef<[u8]> for Index { + fn as_ref(&self) -> &[u8] { + self.0.as_ref() + } +} + +#[cfg(test)] +mod tests { + use bitvec::prelude::*; + use blst::min_sig::{PublicKey, SecretKey}; + use rand::{rngs::OsRng, thread_rng, Rng, RngCore}; + + use crate::{ + common::{attestation::Attestation, blob::DaBlob, NOMOS_DA_DST}, + dispersal::{aggregate_signatures, verify_aggregate_signature, Metadata}, + encoder::{ + test::{rand_data, ENCODER}, + EncodedData, + }, + verifier::DaVerifier, + }; + + use super::Certificate; + + fn generate_keys() -> (PublicKey, SecretKey) { + let mut rng = OsRng; + let sk_bytes: [u8; 32] = rng.gen(); + let sk = SecretKey::key_gen(&sk_bytes, &[]).unwrap(); + let pk = sk.sk_to_pk(); + (pk, sk) + } + + fn attest_encoded_data( + encoded_data: &EncodedData, + verifiers: &[DaVerifier], + ) -> Vec { + let mut attestations = Vec::new(); + for (i, column) in encoded_data.extended_data.columns().enumerate() { + let verifier = &verifiers[i]; + let da_blob = DaBlob { + column, + column_commitment: encoded_data.column_commitments[i], + aggregated_column_commitment: encoded_data.aggregated_column_commitment, + aggregated_column_proof: encoded_data.aggregated_column_proofs[i], + rows_commitments: encoded_data.row_commitments.clone(), + rows_proofs: encoded_data + .rows_proofs + .iter() + .map(|proofs| proofs.get(i).cloned().unwrap()) + .collect(), + }; + attestations.push(verifier.verify(da_blob).unwrap()); + } + attestations + } + + #[test] + fn test_signature_aggregation_and_verification() { + let (pk1, sk1) = generate_keys(); + let (pk2, sk2) = generate_keys(); + let (pk3, sk3) = generate_keys(); + + let message = b"Test message"; + let sig1 = sk1.sign(message, NOMOS_DA_DST, &[]); + let sig2 = sk2.sign(message, NOMOS_DA_DST, &[]); + let sig3 = sk3.sign(message, NOMOS_DA_DST, &[]); + + let aggregated_signature = aggregate_signatures(vec![sig1, sig2, sig3]).unwrap(); + + let public_keys = vec![&pk1, &pk2, &pk3]; + let messages = vec![message.as_ref(), message.as_ref(), message.as_ref()]; + let result = verify_aggregate_signature(&aggregated_signature, &public_keys, &messages); + + assert!(result, "Aggregated signature should be valid."); + } + + #[test] + fn test_invalid_signature_aggregation() { + let (pk1, sk1) = generate_keys(); + let (pk2, sk2) = generate_keys(); + let (_, sk3) = generate_keys(); + + let message = b"Test message"; + let sig1 = sk1.sign(message, NOMOS_DA_DST, &[]); + let sig2 = sk2.sign(message, NOMOS_DA_DST, &[]); + let sig3 = sk3.sign(message, NOMOS_DA_DST, &[]); + + let aggregated_signature = aggregate_signatures(vec![sig1, sig2, sig3]).unwrap(); + + let (wrong_pk3, _) = generate_keys(); // Generate another key pair for the "wrong" public key + + let public_keys = vec![&pk1, &pk2, &wrong_pk3]; // Incorrect public key for sig3 to demonstrate failure. + let messages = vec![message.as_ref(), message.as_ref(), message.as_ref()]; + let result = verify_aggregate_signature(&aggregated_signature, &public_keys, &messages); + + assert!( + !result, + "Aggregated signature with a mismatched public key should not be valid." + ); + } + + #[test] + fn test_encoded_data_verification() { + const THRESHOLD: usize = 16; + + let encoder = &ENCODER; + let data = rand_data(8); + let mut rng = thread_rng(); + + let sks: Vec = (0..16) + .map(|_| { + let mut buff = [0u8; 32]; + rng.fill_bytes(&mut buff); + SecretKey::key_gen(&buff, &[]).unwrap() + }) + .collect(); + + let verifiers: Vec = sks + .clone() + .into_iter() + .enumerate() + .map(|(index, sk)| DaVerifier { sk, index }) + .collect(); + + let encoded_data = encoder.encode(&data).unwrap(); + + let attestations = attest_encoded_data(&encoded_data, &verifiers); + + let signers = bitvec![u8, Lsb0; 1; 16]; + let cert = Certificate::build_certificate( + &encoded_data, + &attestations, + signers, + THRESHOLD, + Metadata::default(), + ) + .unwrap(); + + let public_keys: Vec = sks.iter().map(|sk| sk.sk_to_pk()).collect(); + assert!(cert.verify(&public_keys)); + } + + #[test] + fn test_encoded_data_insufficient_verification() { + const THRESHOLD: usize = 16; + + let encoder = &ENCODER; + let data = rand_data(8); + let mut rng = thread_rng(); + + let sks: Vec = (0..16) + .map(|_| { + let mut buff = [0u8; 32]; + rng.fill_bytes(&mut buff); + SecretKey::key_gen(&buff, &[]).unwrap() + }) + .collect(); + + let verifiers: Vec = sks + .clone() + .into_iter() + .enumerate() + .map(|(index, sk)| DaVerifier { sk, index }) + .collect(); + + let encoded_data = encoder.encode(&data).unwrap(); + + let mut attestations = attest_encoded_data(&encoded_data, &verifiers); + + // Imitate missing attestation. + attestations.pop(); + + let signers = bitvec![u8, Lsb0; 1; 16]; + let cert_result = Certificate::build_certificate( + &encoded_data, + &attestations, + signers, + THRESHOLD, + Metadata::default(), + ); + + // Certificate won't be created because of not reaching required threshold. + assert!(cert_result.is_err()); + } + + #[test] + fn test_encoded_data_wrong_pk_verification() { + const THRESHOLD: usize = 16; + + let encoder = &ENCODER; + let data = rand_data(8); + let mut rng = thread_rng(); + + let sks: Vec = (0..16) + .map(|_| { + let mut buff = [0u8; 32]; + rng.fill_bytes(&mut buff); + SecretKey::key_gen(&buff, &[]).unwrap() + }) + .collect(); + + let verifiers: Vec = sks + .clone() + .into_iter() + .enumerate() + .map(|(index, sk)| DaVerifier { sk, index }) + .collect(); + + let encoded_data = encoder.encode(&data).unwrap(); + + let attestations = attest_encoded_data(&encoded_data, &verifiers); + + let signers = bitvec![u8, Lsb0; 1; 16]; + let cert = Certificate::build_certificate( + &encoded_data, + &attestations, + signers, + THRESHOLD, + Metadata::default(), + ) + .unwrap(); + + let mut public_keys: Vec = sks.iter().map(|sk| sk.sk_to_pk()).collect(); + + // Imitate different set of public keys on the verifier side. + let (wrong_pk, _) = generate_keys(); + public_keys.pop(); + public_keys.push(wrong_pk); + + // Certificate should fail to be verified. + assert!(!cert.verify(&public_keys)); + } +} diff --git a/nomos-da/kzgrs-backend/src/encoder.rs b/nomos-da/kzgrs-backend/src/encoder.rs new file mode 100644 index 00000000..f3cdbe6b --- /dev/null +++ b/nomos-da/kzgrs-backend/src/encoder.rs @@ -0,0 +1,382 @@ +// std +use std::ops::Div; + +// crates +use ark_ff::{BigInteger, PrimeField}; +use kzgrs::common::bytes_to_polynomial_unchecked; +use kzgrs::{ + bytes_to_polynomial, commit_polynomial, encode, generate_element_proof, Commitment, + Evaluations, KzgRsError, Polynomial, Proof, BYTES_PER_FIELD_ELEMENT, +}; + +// internal +use crate::common::{hash_column_and_commitment, Chunk, ChunksMatrix, Row}; +use crate::global::{DOMAIN, GLOBAL_PARAMETERS}; + +#[derive(Copy, Clone)] +pub struct DaEncoderParams { + column_count: usize, +} + +impl DaEncoderParams { + pub const MAX_BLS12_381_ENCODING_CHUNK_SIZE: usize = 31; + + pub const fn default_with(column_count: usize) -> Self { + Self { column_count } + } +} + +pub struct EncodedData { + pub data: Vec, + pub chunked_data: ChunksMatrix, + pub extended_data: ChunksMatrix, + pub row_commitments: Vec, + pub rows_proofs: Vec>, + pub column_commitments: Vec, + pub aggregated_column_commitment: Commitment, + pub aggregated_column_proofs: Vec, +} + +pub struct DaEncoder { + params: DaEncoderParams, +} + +impl DaEncoder { + pub const fn new(settings: DaEncoderParams) -> Self { + Self { params: settings } + } + + fn chunkify(&self, data: &[u8]) -> ChunksMatrix { + let chunk_size = + // column count is divided by two, as later on rows are encoded to twice the size + self.params.column_count.div(2) * DaEncoderParams::MAX_BLS12_381_ENCODING_CHUNK_SIZE; + data.chunks(chunk_size) + .map(|d| { + d.chunks(DaEncoderParams::MAX_BLS12_381_ENCODING_CHUNK_SIZE) + .map(|chunk| { + let mut buff = [0u8; BYTES_PER_FIELD_ELEMENT]; + buff[..DaEncoderParams::MAX_BLS12_381_ENCODING_CHUNK_SIZE] + .copy_from_slice(chunk); + Chunk::from(buff.as_slice()) + }) + .collect() + }) + .collect() + } + + #[allow(clippy::type_complexity)] + fn compute_kzg_row_commitments( + matrix: &ChunksMatrix, + ) -> Result, KzgRsError> { + matrix + .rows() + .map(|r| { + // Using the unchecked version here. Because during the process of chunkifiying + // we already make sure to have the chunks of proper elements. + // Also, after rs encoding, we are sure all `Fr` elements already fits within modulus. + let (evals, poly) = bytes_to_polynomial_unchecked::( + r.as_bytes().as_ref(), + *DOMAIN, + ); + commit_polynomial(&poly, &GLOBAL_PARAMETERS) + .map(|commitment| ((evals, poly), commitment)) + }) + .collect() + } + + fn rs_encode_row(evaluations: &Evaluations, row: &Polynomial) -> Evaluations { + encode(row, evaluations, 2, *DOMAIN) + } + + fn rs_encode_rows(rows: &[(Evaluations, Polynomial)]) -> Vec { + rows.iter() + .map(|(eval, poly)| Self::rs_encode_row(eval, poly)) + .collect() + } + + fn compute_rows_proofs( + polynomials: &[Polynomial], + evals: &[Evaluations], + proof_count: usize, + ) -> Result>, KzgRsError> { + polynomials + .iter() + .zip(evals) + .map(|(poly, eval)| { + (0..proof_count) + .map(|i| generate_element_proof(i, poly, eval, &GLOBAL_PARAMETERS, *DOMAIN)) + .collect() + }) + .collect() + } + + #[allow(clippy::type_complexity)] + fn compute_kzg_column_commitments( + matrix: &ChunksMatrix, + ) -> Result, KzgRsError> { + Self::compute_kzg_row_commitments(&matrix.transposed()) + } + + fn compute_aggregated_column_commitment( + matrix: &ChunksMatrix, + commitments: &[Commitment], + ) -> Result<((Evaluations, Polynomial), Commitment), KzgRsError> { + let hashes: Vec = + matrix + .columns() + .zip(commitments) + .flat_map(|(column, commitment)| { + hash_column_and_commitment::< + { DaEncoderParams::MAX_BLS12_381_ENCODING_CHUNK_SIZE }, + >(&column, commitment) + }) + .collect(); + let (evals, poly) = bytes_to_polynomial::< + { DaEncoderParams::MAX_BLS12_381_ENCODING_CHUNK_SIZE }, + >(hashes.as_ref(), *DOMAIN)?; + let commitment = commit_polynomial(&poly, &GLOBAL_PARAMETERS)?; + Ok(((evals, poly), commitment)) + } + + fn compute_aggregated_column_proofs( + polynomial: &Polynomial, + evals: &Evaluations, + proof_count: usize, + ) -> Result, KzgRsError> { + (0..proof_count) + .map(|i| generate_element_proof(i, polynomial, evals, &GLOBAL_PARAMETERS, *DOMAIN)) + .collect() + } + + fn evals_to_chunk_matrix(evals: &[Evaluations]) -> ChunksMatrix { + ChunksMatrix( + evals + .iter() + .map(|eval| { + Row(eval + .evals + .iter() + .map(|point| Chunk(point.into_bigint().to_bytes_le())) + .collect()) + }) + .collect(), + ) + } + + pub fn encode(&self, data: &[u8]) -> Result { + let chunked_data = self.chunkify(data); + let (row_polynomials, row_commitments): (Vec<_>, Vec<_>) = + Self::compute_kzg_row_commitments(&chunked_data)? + .into_iter() + .unzip(); + let encoded_evaluations = Self::rs_encode_rows(&row_polynomials); + let extended_data = Self::evals_to_chunk_matrix(&encoded_evaluations); + let row_polynomials: Vec<_> = row_polynomials.into_iter().map(|(_, p)| p).collect(); + let rows_proofs = Self::compute_rows_proofs( + &row_polynomials, + &encoded_evaluations, + self.params.column_count, + )?; + let (_column_polynomials, column_commitments): (Vec<_>, Vec<_>) = + Self::compute_kzg_column_commitments(&extended_data)? + .into_iter() + .unzip(); + let ((aggregated_evals, aggregated_polynomial), aggregated_column_commitment) = + Self::compute_aggregated_column_commitment(&extended_data, &column_commitments)?; + let aggregated_column_proofs = Self::compute_aggregated_column_proofs( + &aggregated_polynomial, + &aggregated_evals, + column_commitments.len(), + )?; + Ok(EncodedData { + data: data.to_vec(), + chunked_data, + extended_data, + row_commitments, + rows_proofs, + column_commitments, + aggregated_column_commitment, + aggregated_column_proofs, + }) + } +} + +#[cfg(test)] +pub mod test { + use crate::encoder::{DaEncoder, DaEncoderParams}; + use crate::global::{DOMAIN, GLOBAL_PARAMETERS}; + use ark_ff::PrimeField; + use itertools::izip; + use kzgrs::common::bytes_to_polynomial_unchecked; + use kzgrs::{decode, verify_element_proof, FieldElement, BYTES_PER_FIELD_ELEMENT}; + use rand::RngCore; + use std::ops::Div; + + pub const PARAMS: DaEncoderParams = DaEncoderParams::default_with(16); + pub const ENCODER: DaEncoder = DaEncoder::new(PARAMS); + + pub fn rand_data(elements_count: usize) -> Vec { + let mut buff = vec![0; elements_count * DaEncoderParams::MAX_BLS12_381_ENCODING_CHUNK_SIZE]; + rand::thread_rng().fill_bytes(&mut buff); + buff + } + + #[test] + fn test_chunkify() { + let params = DaEncoderParams::default_with(2); + let elements = 10usize; + let data = rand_data(elements); + let encoder = DaEncoder::new(params); + let matrix = encoder.chunkify(&data); + assert_eq!(matrix.len(), elements.div(params.column_count.div(2))); + for row in matrix.rows() { + assert_eq!(row.len(), params.column_count.div(2)); + assert_eq!(row.0[0].len(), BYTES_PER_FIELD_ELEMENT); + } + } + + #[test] + fn test_compute_row_kzg_commitments() { + let data = rand_data(32); + let matrix = ENCODER.chunkify(data.as_ref()); + let commitments_data = DaEncoder::compute_kzg_row_commitments(&matrix).unwrap(); + assert_eq!(commitments_data.len(), matrix.len()); + } + + #[test] + fn test_evals_to_chunk_matrix() { + let data = rand_data(32); + let matrix = ENCODER.chunkify(data.as_ref()); + let (poly_data, _): (Vec<_>, Vec<_>) = DaEncoder::compute_kzg_row_commitments(&matrix) + .unwrap() + .into_iter() + .unzip(); + let extended_rows = DaEncoder::rs_encode_rows(&poly_data); + let extended_matrix = DaEncoder::evals_to_chunk_matrix(&extended_rows); + for (r1, r2) in izip!(matrix.iter(), extended_matrix.iter()) { + for (c1, c2) in izip!(r1.iter(), r2.iter()) { + assert_eq!(c1, c2); + } + } + } + + #[test] + fn test_rs_encode_rows() { + let data = rand_data(32); + let matrix = ENCODER.chunkify(data.as_ref()); + let (poly_data, _): (Vec<_>, Vec<_>) = DaEncoder::compute_kzg_row_commitments(&matrix) + .unwrap() + .into_iter() + .unzip(); + let extended_rows = DaEncoder::rs_encode_rows(&poly_data); + let (evals, _): (Vec<_>, Vec<_>) = poly_data.into_iter().unzip(); + // check encoding went well, original evaluation points vs extended ones + for (e1, e2) in izip!(evals.iter(), extended_rows.iter()) { + for (c1, c2) in izip!(&e1.evals, &e2.evals) { + assert_eq!(c1, c2); + } + } + let extended_matrix = DaEncoder::evals_to_chunk_matrix(&extended_rows); + for (r1, r2, evals) in izip!(matrix.iter(), extended_matrix.iter(), extended_rows) { + assert_eq!(r1.len(), r2.len().div(2)); + for (c1, c2) in izip!(r1.iter(), r2.iter()) { + assert_eq!(c1, c2); + } + let points: Vec<_> = evals.evals.iter().cloned().map(Some).collect(); + let poly_2 = decode(r1.len(), &points, *DOMAIN); + let (poly_1, _) = bytes_to_polynomial_unchecked::( + r1.as_bytes().as_ref(), + *DOMAIN, + ); + assert_eq!(poly_1, poly_2); + } + } + + #[test] + fn test_compute_row_proofs() { + let data = rand_data(32); + let matrix = ENCODER.chunkify(data.as_ref()); + let (poly_data, commitments): (Vec<_>, Vec<_>) = + DaEncoder::compute_kzg_row_commitments(&matrix) + .unwrap() + .into_iter() + .unzip(); + let extended_evaluations = DaEncoder::rs_encode_rows(&poly_data); + let (evals, polynomials): (Vec<_>, Vec<_>) = poly_data.into_iter().unzip(); + let extended_matrix = DaEncoder::evals_to_chunk_matrix(&extended_evaluations); + let original_proofs = + DaEncoder::compute_rows_proofs(&polynomials, &evals, PARAMS.column_count.div(2)) + .unwrap(); + let extended_proofs = DaEncoder::compute_rows_proofs( + &polynomials, + &extended_evaluations, + PARAMS.column_count, + ) + .unwrap(); + + let checks = izip!(matrix.iter(), &commitments, &original_proofs); + for (row, commitment, proofs) in checks { + assert_eq!(proofs.len(), row.len()); + for (i, chunk) in row.iter().enumerate() { + let element = FieldElement::from_le_bytes_mod_order(chunk.as_bytes().as_ref()); + assert!(verify_element_proof( + i, + &element, + &commitment, + &proofs[i], + *DOMAIN, + &GLOBAL_PARAMETERS + )); + } + } + let checks = izip!(extended_matrix.iter(), &commitments, &extended_proofs); + for (row, commitment, proofs) in checks { + assert_eq!(proofs.len(), row.len()); + for (i, chunk) in row.iter().enumerate() { + let element = FieldElement::from_le_bytes_mod_order(chunk.as_bytes().as_ref()); + assert!(verify_element_proof( + i, + &element, + &commitment, + &proofs[i], + *DOMAIN, + &GLOBAL_PARAMETERS + )); + } + } + } + + #[test] + fn test_compute_column_kzg_commitments() { + let data = rand_data(32); + let matrix = ENCODER.chunkify(data.as_ref()); + let commitments_data = DaEncoder::compute_kzg_column_commitments(&matrix).unwrap(); + assert_eq!(commitments_data.len(), matrix.columns().count()); + } + + #[test] + fn test_compute_aggregated_column_kzg_commitment() { + let data = rand_data(32); + let matrix = ENCODER.chunkify(data.as_ref()); + let (_, commitments): (Vec<_>, Vec<_>) = DaEncoder::compute_kzg_column_commitments(&matrix) + .unwrap() + .into_iter() + .unzip(); + let _ = DaEncoder::compute_aggregated_column_commitment(&matrix, &commitments).unwrap(); + } + + #[test] + fn test_compute_aggregated_column_kzg_proofs() { + let data = rand_data(32); + let matrix = ENCODER.chunkify(data.as_ref()); + let (_poly_data, commitments): (Vec<_>, Vec<_>) = + DaEncoder::compute_kzg_column_commitments(&matrix) + .unwrap() + .into_iter() + .unzip(); + let ((evals, polynomial), _aggregated_commitment) = + DaEncoder::compute_aggregated_column_commitment(&matrix, &commitments).unwrap(); + DaEncoder::compute_aggregated_column_proofs(&polynomial, &evals, commitments.len()) + .unwrap(); + } +} diff --git a/nomos-da/kzgrs-backend/src/global.rs b/nomos-da/kzgrs-backend/src/global.rs new file mode 100644 index 00000000..a0695fa6 --- /dev/null +++ b/nomos-da/kzgrs-backend/src/global.rs @@ -0,0 +1,11 @@ +use ark_poly::EvaluationDomain; +use kzgrs::{global_parameters_from_randomness, GlobalParameters, PolynomialEvaluationDomain}; +use once_cell::sync::Lazy; + +pub static GLOBAL_PARAMETERS: Lazy = Lazy::new(|| { + let mut rng = rand::thread_rng(); + global_parameters_from_randomness(&mut rng) +}); + +pub static DOMAIN: Lazy = + Lazy::new(|| PolynomialEvaluationDomain::new(8192).unwrap()); diff --git a/nomos-da/kzgrs-backend/src/lib.rs b/nomos-da/kzgrs-backend/src/lib.rs new file mode 100644 index 00000000..065a5ca5 --- /dev/null +++ b/nomos-da/kzgrs-backend/src/lib.rs @@ -0,0 +1,5 @@ +pub mod common; +pub mod dispersal; +pub mod encoder; +pub mod global; +pub mod verifier; diff --git a/nomos-da/kzgrs-backend/src/verifier.rs b/nomos-da/kzgrs-backend/src/verifier.rs new file mode 100644 index 00000000..af0469b3 --- /dev/null +++ b/nomos-da/kzgrs-backend/src/verifier.rs @@ -0,0 +1,236 @@ +// std + +// crates +use blst::min_sig::{PublicKey, SecretKey}; +use itertools::{izip, Itertools}; +use kzgrs::common::field_element_from_bytes_le; +use kzgrs::{ + bytes_to_polynomial, commit_polynomial, verify_element_proof, Commitment, Proof, + BYTES_PER_FIELD_ELEMENT, +}; + +use crate::common::blob::DaBlob; +use crate::common::NOMOS_DA_DST; +// internal +use crate::common::{ + attestation::Attestation, build_attestation_message, hash_column_and_commitment, Chunk, Column, +}; +use crate::encoder::DaEncoderParams; +use crate::global::{DOMAIN, GLOBAL_PARAMETERS}; + +pub struct DaVerifier { + // TODO: substitute this for an abstraction to sign things over + pub sk: SecretKey, + pub index: usize, +} + +impl DaVerifier { + pub fn new(sk: SecretKey, nodes_public_keys: &[PublicKey]) -> Self { + // TODO: `is_sorted` is experimental, and by contract `nodes_public_keys` should be shorted + // but not sure how we could enforce it here without re-sorting anyway. + // assert!(nodes_public_keys.is_sorted()); + let self_pk = sk.sk_to_pk(); + let (index, _) = nodes_public_keys + .iter() + .find_position(|&pk| pk == &self_pk) + .expect("Self pk should be registered"); + Self { sk, index } + } + + fn verify_column( + column: &Column, + column_commitment: &Commitment, + aggregated_column_commitment: &Commitment, + aggregated_column_proof: &Proof, + index: usize, + ) -> bool { + // 1. compute commitment for column + let Ok((_, polynomial)) = + bytes_to_polynomial::(column.as_bytes().as_slice(), *DOMAIN) + else { + return false; + }; + let Ok(computed_column_commitment) = commit_polynomial(&polynomial, &GLOBAL_PARAMETERS) + else { + return false; + }; + // 2. if computed column commitment != column commitment, fail + if &computed_column_commitment != column_commitment { + return false; + } + // 3. compute column hash + let column_hash = hash_column_and_commitment::< + { DaEncoderParams::MAX_BLS12_381_ENCODING_CHUNK_SIZE }, + >(column, column_commitment); + // 4. check proof with commitment and proof over the aggregated column commitment + let element = field_element_from_bytes_le(column_hash.as_slice()); + verify_element_proof( + index, + &element, + aggregated_column_commitment, + aggregated_column_proof, + *DOMAIN, + &GLOBAL_PARAMETERS, + ) + } + + fn verify_chunk(chunk: &Chunk, commitment: &Commitment, proof: &Proof, index: usize) -> bool { + let element = field_element_from_bytes_le(chunk.as_bytes().as_slice()); + verify_element_proof( + index, + &element, + commitment, + proof, + *DOMAIN, + &GLOBAL_PARAMETERS, + ) + } + + fn verify_chunks( + chunks: &[Chunk], + commitments: &[Commitment], + proofs: &[Proof], + index: usize, + ) -> bool { + if ![chunks.len(), commitments.len(), proofs.len()] + .iter() + .all_equal() + { + return false; + } + for (chunk, commitment, proof) in izip!(chunks, commitments, proofs) { + if !DaVerifier::verify_chunk(chunk, commitment, proof, index) { + return false; + } + } + true + } + + fn build_attestation(&self, blob: &DaBlob) -> Attestation { + let message = + build_attestation_message(&blob.aggregated_column_commitment, &blob.rows_commitments); + let signature = self.sk.sign(&message, NOMOS_DA_DST, b""); + + let blob_id = blob.id(); + let blob_hash: [u8; 32] = blob_id + .try_into() + .expect("Blob ID must be exactly 32 bytes long"); + + Attestation { + signature: signature.to_bytes().to_vec(), + blob_hash, + } + } + + pub fn verify(&self, blob: DaBlob) -> Option { + let is_column_verified = DaVerifier::verify_column( + &blob.column, + &blob.column_commitment, + &blob.aggregated_column_commitment, + &blob.aggregated_column_proof, + self.index, + ); + if !is_column_verified { + return None; + } + + let are_chunks_verified = DaVerifier::verify_chunks( + blob.column.as_ref(), + &blob.rows_commitments, + &blob.rows_proofs, + self.index, + ); + if !are_chunks_verified { + return None; + } + Some(self.build_attestation(&blob)) + } +} + +#[cfg(test)] +mod test { + use crate::common::blob::DaBlob; + use crate::common::{hash_column_and_commitment, Chunk, Column}; + use crate::encoder::test::{rand_data, ENCODER}; + use crate::encoder::DaEncoderParams; + use crate::global::{DOMAIN, GLOBAL_PARAMETERS}; + use crate::verifier::DaVerifier; + use blst::min_sig::SecretKey; + use kzgrs::{ + bytes_to_polynomial, commit_polynomial, generate_element_proof, BYTES_PER_FIELD_ELEMENT, + }; + use rand::{thread_rng, RngCore}; + + #[test] + fn test_verify_column() { + let column: Column = (0..10).map(|i| Chunk(vec![i; 32])).collect(); + let (_, column_poly) = + bytes_to_polynomial::(column.as_bytes().as_slice(), *DOMAIN) + .unwrap(); + let column_commitment = commit_polynomial(&column_poly, &GLOBAL_PARAMETERS).unwrap(); + let (aggregated_evals, aggregated_poly) = bytes_to_polynomial::< + { DaEncoderParams::MAX_BLS12_381_ENCODING_CHUNK_SIZE }, + >( + hash_column_and_commitment::<{ DaEncoderParams::MAX_BLS12_381_ENCODING_CHUNK_SIZE }>( + &column, + &column_commitment, + ) + .as_slice(), + *DOMAIN, + ) + .unwrap(); + let aggregated_commitment = + commit_polynomial(&aggregated_poly, &GLOBAL_PARAMETERS).unwrap(); + let column_proof = generate_element_proof( + 0, + &aggregated_poly, + &aggregated_evals, + &GLOBAL_PARAMETERS, + *DOMAIN, + ) + .unwrap(); + assert!(DaVerifier::verify_column( + &column, + &column_commitment, + &aggregated_commitment, + &column_proof, + 0 + )); + } + + #[test] + fn test_verify() { + let encoder = &ENCODER; + let data = rand_data(8); + let mut rng = thread_rng(); + let sks: Vec = (0..16) + .map(|_| { + let mut buff = [0u8; 32]; + rng.fill_bytes(&mut buff); + SecretKey::key_gen(&buff, &[]).unwrap() + }) + .collect(); + let verifiers: Vec = sks + .into_iter() + .enumerate() + .map(|(index, sk)| DaVerifier { sk, index }) + .collect(); + let encoded_data = encoder.encode(&data).unwrap(); + for (i, column) in encoded_data.extended_data.columns().enumerate() { + let verifier = &verifiers[i]; + let da_blob = DaBlob { + column, + column_commitment: encoded_data.column_commitments[i], + aggregated_column_commitment: encoded_data.aggregated_column_commitment, + aggregated_column_proof: encoded_data.aggregated_column_proofs[i], + rows_commitments: encoded_data.row_commitments.clone(), + rows_proofs: encoded_data + .rows_proofs + .iter() + .map(|proofs| proofs.get(i).cloned().unwrap()) + .collect(), + }; + assert!(verifier.verify(da_blob).is_some()); + } + } +} diff --git a/nomos-da/kzgrs/Cargo.toml b/nomos-da/kzgrs/Cargo.toml new file mode 100644 index 00000000..ba7dfb07 --- /dev/null +++ b/nomos-da/kzgrs/Cargo.toml @@ -0,0 +1,42 @@ +[package] +name = "kzgrs" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +once_cell = "1.19" +ark-ec = "0.4.2" +ark-bls12-381 = { version = "0.4.0" } +ark-bls12-381-ext = "0.4.1" +ark-ff = { version = "0.4.2" } +ark-poly = { version = "0.4.2" } +ark-poly-commit = { version = "0.4.0" } +ark-serialize = { version = "0.4" } +blst = "0.3.11" +num-bigint = "0.4.4" +thiserror = "1.0.58" +num-traits = "0.2.18" +rand = "0.8.5" + +[dev-dependencies] +divan = "0.1" +rayon = "1.10" + +[[bench]] +name = "kzg" +harness = false + +[features] +default = ["single"] +single = [] +parallel = [ + "ark-ff/parallel", + "ark-ff/asm", + "ark-ff/rayon", + "ark-poly/parallel", + "ark-poly/rayon", + "ark-poly-commit/rayon", + "ark-poly-commit/parallel" +] diff --git a/nomos-da/kzgrs/benches/kzg.rs b/nomos-da/kzgrs/benches/kzg.rs new file mode 100644 index 00000000..18c26eb6 --- /dev/null +++ b/nomos-da/kzgrs/benches/kzg.rs @@ -0,0 +1,162 @@ +use ark_bls12_381::{Bls12_381, Fr}; +use ark_poly::univariate::DensePolynomial; +use ark_poly::{EvaluationDomain, GeneralEvaluationDomain}; +use ark_poly_commit::kzg10::{UniversalParams, KZG10}; +use divan::counter::ItemsCount; +use divan::{black_box, counter::BytesCount, AllocProfiler, Bencher}; +use once_cell::sync::Lazy; +use rand::RngCore; +use rayon::iter::IntoParallelIterator; +use rayon::iter::ParallelIterator; + +use kzgrs::{common::bytes_to_polynomial_unchecked, kzg::*}; + +fn main() { + divan::main() +} + +// This allocator setting seems like it doesn't work on windows. Disable for now, but letting +// it here in case it's needed at some specific point. +// #[global_allocator] +// static ALLOC: AllocProfiler = AllocProfiler::system(); + +static GLOBAL_PARAMETERS: Lazy> = Lazy::new(|| { + let mut rng = rand::thread_rng(); + KZG10::>::setup(4096, true, &mut rng).unwrap() +}); + +fn rand_data_elements(elements_count: usize, chunk_size: usize) -> Vec { + let mut buff = vec![0u8; elements_count * chunk_size]; + rand::thread_rng().fill_bytes(&mut buff); + buff +} + +const CHUNK_SIZE: usize = 31; + +#[allow(non_snake_case)] +#[divan::bench(args = [16, 32, 64, 128, 256, 512, 1024, 2048, 4096])] +fn commit_single_polynomial_with_element_count(bencher: Bencher, element_count: usize) { + bencher + .with_inputs(|| { + let domain = GeneralEvaluationDomain::new(element_count).unwrap(); + let data = rand_data_elements(element_count, CHUNK_SIZE); + bytes_to_polynomial_unchecked::(&data, domain) + }) + .input_counter(move |(_evals, _poly)| ItemsCount::new(1usize)) + .bench_refs(|(_evals, poly)| black_box(commit_polynomial(poly, &GLOBAL_PARAMETERS))); +} + +#[allow(non_snake_case)] +#[divan::bench(args = [16, 32, 64, 128, 256, 512, 1024, 2048, 4096])] +fn commit_polynomial_with_element_count_parallelized(bencher: Bencher, element_count: usize) { + let threads = 8usize; + bencher + .with_inputs(|| { + let domain = GeneralEvaluationDomain::new(element_count).unwrap(); + let data = rand_data_elements(element_count, CHUNK_SIZE); + bytes_to_polynomial_unchecked::(&data, domain) + }) + .input_counter(move |(_evals, _poly)| ItemsCount::new(threads)) + .bench_refs(|(_evals, poly)| { + let commitments: Vec<_> = (0..threads) + .into_par_iter() + .map(|_| commit_polynomial(poly, &GLOBAL_PARAMETERS)) + .collect(); + }); +} + +#[allow(non_snake_case)] +#[divan::bench(args = [128, 256, 512, 1024, 2048, 4096])] +fn compute_single_proof(bencher: Bencher, element_count: usize) { + bencher + .with_inputs(|| { + let domain = GeneralEvaluationDomain::new(element_count).unwrap(); + let data = rand_data_elements(element_count, CHUNK_SIZE); + ( + bytes_to_polynomial_unchecked::(&data, domain), + domain, + ) + }) + .input_counter(|_| ItemsCount::new(1usize)) + .bench_refs(|((evals, poly), domain)| { + black_box(generate_element_proof( + 7, + poly, + evals, + &GLOBAL_PARAMETERS, + *domain, + )) + }); +} + +#[allow(non_snake_case)] +#[divan::bench(args = [128, 256, 512, 1024], sample_count = 3, sample_size = 5)] +fn compute_batch_proofs(bencher: Bencher, element_count: usize) { + bencher + .with_inputs(|| { + let domain = GeneralEvaluationDomain::new(element_count).unwrap(); + let data = rand_data_elements(element_count, CHUNK_SIZE); + ( + bytes_to_polynomial_unchecked::(&data, domain), + domain, + ) + }) + .input_counter(move |_| ItemsCount::new(element_count)) + .bench_refs(|((evals, poly), domain)| { + for i in 0..element_count { + black_box( + generate_element_proof(i, poly, evals, &GLOBAL_PARAMETERS, *domain).unwrap(), + ); + } + }); +} + +// This is a test on how will perform by having a wrapping rayon on top of the proof computation +// ark libraries already use rayon underneath so no great improvements are probably come up from this. +// But it should help reusing the same thread pool for all jobs saving a little time. +#[allow(non_snake_case)] +#[divan::bench(args = [128, 256, 512, 1024], sample_count = 3, sample_size = 5)] +fn compute_parallelize_batch_proofs(bencher: Bencher, element_count: usize) { + bencher + .with_inputs(|| { + let domain = GeneralEvaluationDomain::new(element_count).unwrap(); + let data = rand_data_elements(element_count, CHUNK_SIZE); + ( + bytes_to_polynomial_unchecked::(&data, domain), + domain, + ) + }) + .input_counter(move |_| ItemsCount::new(element_count)) + .bench_refs(|((evals, poly), domain)| { + black_box((0..element_count).into_par_iter().for_each(|i| { + generate_element_proof(i, poly, evals, &GLOBAL_PARAMETERS, *domain).unwrap(); + })); + }); +} + +#[allow(non_snake_case)] +#[divan::bench] +fn verify_single_proof(bencher: Bencher) { + bencher + .with_inputs(|| { + let element_count = 10; + let domain = GeneralEvaluationDomain::new(element_count).unwrap(); + let data = rand_data_elements(element_count, CHUNK_SIZE); + let (eval, poly) = bytes_to_polynomial_unchecked::(&data, domain); + let commitment = commit_polynomial(&poly, &GLOBAL_PARAMETERS).unwrap(); + let proof = + generate_element_proof(0, &poly, &eval, &GLOBAL_PARAMETERS, domain).unwrap(); + (0usize, eval.evals[0], commitment, proof, domain) + }) + .input_counter(|_| ItemsCount::new(1usize)) + .bench_refs(|(index, elemnent, commitment, proof, domain)| { + black_box(verify_element_proof( + index.clone(), + elemnent, + commitment, + proof, + *domain, + &GLOBAL_PARAMETERS, + )) + }); +} diff --git a/nomos-da/kzgrs/src/common.rs b/nomos-da/kzgrs/src/common.rs new file mode 100644 index 00000000..35f7bc28 --- /dev/null +++ b/nomos-da/kzgrs/src/common.rs @@ -0,0 +1,176 @@ +use std::fmt; + +// std +// crates +use crate::{FieldElement, BYTES_PER_FIELD_ELEMENT}; +use ark_bls12_381::fr::Fr; +use ark_ff::Zero; +use ark_poly::domain::general::GeneralEvaluationDomain; +use ark_poly::evaluations::univariate::Evaluations; +use ark_poly::univariate::DensePolynomial; +use blst::BLST_ERROR; +use num_bigint::BigUint; +use thiserror::Error; +// internal + +#[derive(Error, Debug)] +pub struct BlstError(pub BLST_ERROR); + +impl fmt::Display for BlstError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self.0 { + BLST_ERROR::BLST_SUCCESS => write!(f, "Operation successful"), + BLST_ERROR::BLST_BAD_ENCODING => write!(f, "Bad encoding"), + BLST_ERROR::BLST_POINT_NOT_ON_CURVE => write!(f, "Point not on curve"), + BLST_ERROR::BLST_POINT_NOT_IN_GROUP => write!(f, "Point not in group"), + BLST_ERROR::BLST_AGGR_TYPE_MISMATCH => write!(f, "Aggregate type mismatch"), + BLST_ERROR::BLST_VERIFY_FAIL => write!(f, "Verification failed"), + BLST_ERROR::BLST_PK_IS_INFINITY => write!(f, "Public key is infinity"), + BLST_ERROR::BLST_BAD_SCALAR => write!(f, "Bad scalar value"), + } + } +} + +impl From for KzgRsError { + fn from(err: BLST_ERROR) -> Self { + KzgRsError::BlstError(BlstError(err)) + } +} + +#[derive(Error, Debug)] +pub enum KzgRsError { + #[error("Data isn't properly padded, data len must match modulus {expected_modulus} but it is {current_size}")] + UnpaddedDataError { + expected_modulus: usize, + current_size: usize, + }, + #[error("ChunkSize should be <= 32 (bytes), got {0}")] + ChunkSizeTooBig(usize), + #[error("Not enough attestations, required {required} but received {received}")] + NotEnoughAttestations { required: usize, received: usize }, + #[error("Mismatch between number of attestations ({attestations_count}) and number of signers ({signers_count})")] + AttestationSignersMismatch { + attestations_count: usize, + signers_count: usize, + }, + #[error(transparent)] + PolyCommitError(#[from] ark_poly_commit::Error), + #[error("BLST error: {0}")] + BlstError(BlstError), +} + +/// Transform chunks of bytes (of size `CHUNK_SIZE`) into `Fr` which are considered evaluations of a +/// polynomial. +pub fn bytes_to_evaluations( + data: &[u8], + domain: GeneralEvaluationDomain, +) -> Evaluations { + assert!((data.len() % CHUNK_SIZE).is_zero()); + Evaluations::from_vec_and_domain( + data.chunks(CHUNK_SIZE) + .map( + // use little endian for convenience as shortening 1 byte (<32 supported) + // do not matter in this endianness + field_element_from_bytes_le, + ) + .collect(), + domain, + ) +} + +/// Transform chunks of bytes (of size `CHUNK_SIZE`) into `Fr` which are considered evaluations of a +/// polynomial. Then use FFT to transform that polynomial into coefficient form. +/// `CHUNK_SIZE` needs to be 31 (bytes) or less, otherwise it cannot be encoded. +/// The input data need to be padded, so it fits in a len modulus of `CHUNK_SIZE`. +/// Returns the polynomial in evaluation form and in coefficient form +pub fn bytes_to_polynomial( + data: &[u8], + domain: GeneralEvaluationDomain, +) -> Result<(Evaluations, DensePolynomial), KzgRsError> { + if CHUNK_SIZE > BYTES_PER_FIELD_ELEMENT { + return Err(KzgRsError::ChunkSizeTooBig(CHUNK_SIZE)); + } + if data.len() % CHUNK_SIZE != 0 { + return Err(KzgRsError::UnpaddedDataError { + expected_modulus: CHUNK_SIZE, + current_size: data.len(), + }); + } + Ok(bytes_to_polynomial_unchecked::(data, domain)) +} + +/// Transform chunks of bytes (of size `CHUNK_SIZE`) into `Fr` which are considered evaluations of a +/// polynomial. Then use FFT to transform that polynomial into coefficient form. +/// No extra checks are done for the caller. +/// Caller need to ensure that `CHUNK_SIZE` is not bigger than the underlying `Fr` element can be +/// decoded from. +pub fn bytes_to_polynomial_unchecked( + data: &[u8], + domain: GeneralEvaluationDomain, +) -> (Evaluations, DensePolynomial) { + let evals = bytes_to_evaluations::(data, domain); + let coefficients = evals.interpolate_by_ref(); + (evals, coefficients) +} + +/// Transform arbitrary bytes into a field element +/// This transformation is bounds unchecked, it's up to the caller to know if +/// data fits within the bls modulus. +/// Data len cannot be higher than `BYTES_PER_FIELD_ELEMENT` +pub fn field_element_from_bytes_le(b: &[u8]) -> FieldElement { + assert!(b.len() <= BYTES_PER_FIELD_ELEMENT); + FieldElement::from(BigUint::from_bytes_le(b)) +} + +#[cfg(test)] +mod test { + use super::{bytes_to_evaluations, bytes_to_polynomial, KzgRsError}; + use ark_bls12_381::fr::Fr; + use ark_ff::{BigInteger, PrimeField}; + use ark_poly::{EvaluationDomain, GeneralEvaluationDomain, Polynomial}; + use once_cell::sync::Lazy; + use rand::{thread_rng, Fill}; + + const CHUNK_SIZE: usize = 31; + static DOMAIN: Lazy> = + Lazy::new(|| GeneralEvaluationDomain::new(128).unwrap()); + #[test] + fn encode_random_polynomial() { + const N: usize = 100; + let mut bytes: [u8; CHUNK_SIZE * N] = [0; CHUNK_SIZE * N]; + let mut rng = thread_rng(); + bytes.try_fill(&mut rng).unwrap(); + let evals = bytes_to_evaluations::<31>(&bytes, *DOMAIN); + let (_, poly) = bytes_to_polynomial::<31>(&bytes, *DOMAIN).unwrap(); + for i in 0..N { + let eval_point = DOMAIN.element(i); + let point = poly.evaluate(&eval_point); + // check point is the same + assert_eq!(evals[i], point); + // check point bytes are the same + assert_eq!( + &bytes[CHUNK_SIZE * i..CHUNK_SIZE * i + CHUNK_SIZE], + &point.into_bigint().to_bytes_le()[..CHUNK_SIZE] + ) + } + } + + #[test] + fn encode_chunk_size_too_big() { + assert!(matches!( + bytes_to_polynomial::<33>(&[], *DOMAIN), + Err(KzgRsError::ChunkSizeTooBig(33)) + )); + } + + #[test] + fn encode_not_padded_data() { + assert!(matches!( + bytes_to_polynomial::<31>(&[0; 12], *DOMAIN), + Err(KzgRsError::UnpaddedDataError { + expected_modulus: 31, + current_size: 12 + }) + )); + } +} diff --git a/nomos-da/kzgrs/src/global_parameters.rs b/nomos-da/kzgrs/src/global_parameters.rs new file mode 100644 index 00000000..417e4120 --- /dev/null +++ b/nomos-da/kzgrs/src/global_parameters.rs @@ -0,0 +1,9 @@ +use super::GlobalParameters; +use ark_bls12_381::{fr::Fr, Bls12_381}; +use ark_poly::polynomial::univariate::DensePolynomial; +use ark_poly_commit::kzg10::KZG10; +use rand::Rng; + +pub fn global_parameters_from_randomness(rng: &mut R) -> GlobalParameters { + KZG10::>::setup(8192, true, rng).unwrap() +} diff --git a/nomos-da/kzgrs/src/kzg.rs b/nomos-da/kzgrs/src/kzg.rs new file mode 100644 index 00000000..8ff0230e --- /dev/null +++ b/nomos-da/kzgrs/src/kzg.rs @@ -0,0 +1,135 @@ +use crate::common::KzgRsError; +use crate::Evaluations; +use ark_bls12_381::{Bls12_381, Fr}; +use ark_ec::pairing::Pairing; +use ark_poly::univariate::DensePolynomial; +use ark_poly::{DenseUVPolynomial, EvaluationDomain, GeneralEvaluationDomain}; +use ark_poly_commit::kzg10::{Commitment, Powers, Proof, UniversalParams, KZG10}; +use num_traits::One; +use std::borrow::Cow; +use std::ops::{Mul, Neg}; + +/// Commit to a polynomial where each of the evaluations are over `w(i)` for the degree +/// of the polynomial being omega (`w`) the root of unity (2^x). +pub fn commit_polynomial( + polynomial: &DensePolynomial, + global_parameters: &UniversalParams, +) -> Result, KzgRsError> { + let roots_of_unity = Powers { + powers_of_g: Cow::Borrowed(&global_parameters.powers_of_g), + powers_of_gamma_g: Cow::Owned(vec![]), + }; + KZG10::commit(&roots_of_unity, polynomial, None, None) + .map_err(KzgRsError::PolyCommitError) + .map(|(commitment, _)| commitment) +} + +/// Compute a witness polynomial in that satisfies `witness(x) = (f(x)-v)/(x-u)` +pub fn generate_element_proof( + element_index: usize, + polynomial: &DensePolynomial, + evaluations: &Evaluations, + global_parameters: &UniversalParams, + domain: GeneralEvaluationDomain, +) -> Result, KzgRsError> { + let u = domain.element(element_index); + // Instead of evaluating over the polynomial, we can reuse the evaluation points from the rs encoding + // let v = polynomial.evaluate(&u); + let v = evaluations.evals[element_index]; + let f_x_v = polynomial + &DensePolynomial::::from_coefficients_vec(vec![-v]); + let x_u = DensePolynomial::::from_coefficients_vec(vec![-u, Fr::one()]); + let witness_polynomial: DensePolynomial<_> = &f_x_v / &x_u; + let proof = commit_polynomial(&witness_polynomial, global_parameters)?; + let proof = Proof { + w: proof.0, + random_v: None, + }; + Ok(proof) +} + +/// Verify proof for a single element +pub fn verify_element_proof( + element_index: usize, + element: &Fr, + commitment: &Commitment, + proof: &Proof, + domain: GeneralEvaluationDomain, + global_parameters: &UniversalParams, +) -> bool { + let u = domain.element(element_index); + let v = element; + let commitment_check_g1 = commitment.0 + global_parameters.powers_of_g[0].mul(v).neg(); + let proof_check_g2 = global_parameters.beta_h + global_parameters.h.mul(u).neg(); + let lhs = Bls12_381::pairing(commitment_check_g1, global_parameters.h); + let rhs = Bls12_381::pairing(proof.w, proof_check_g2); + lhs == rhs +} + +#[cfg(test)] +mod test { + use crate::common::{bytes_to_evaluations, bytes_to_polynomial}; + use crate::kzg::{commit_polynomial, generate_element_proof, verify_element_proof}; + use ark_bls12_381::{Bls12_381, Fr}; + use ark_poly::univariate::DensePolynomial; + use ark_poly::{DenseUVPolynomial, EvaluationDomain, GeneralEvaluationDomain}; + use ark_poly_commit::kzg10::{UniversalParams, KZG10}; + use once_cell::sync::Lazy; + use rand::{thread_rng, Fill}; + + const COEFFICIENTS_SIZE: usize = 16; + static GLOBAL_PARAMETERS: Lazy> = Lazy::new(|| { + let mut rng = rand::thread_rng(); + KZG10::>::setup( + crate::kzg::test::COEFFICIENTS_SIZE - 1, + true, + &mut rng, + ) + .unwrap() + }); + + static DOMAIN: Lazy> = + Lazy::new(|| GeneralEvaluationDomain::new(COEFFICIENTS_SIZE).unwrap()); + #[test] + fn test_poly_commit() { + let poly = DensePolynomial::from_coefficients_vec((0..10).map(|i| Fr::from(i)).collect()); + assert!(matches!( + commit_polynomial(&poly, &GLOBAL_PARAMETERS), + Ok(_) + )); + } + + #[test] + fn generate_proof_and_validate() { + let mut bytes: [u8; 310] = [0; 310]; + let mut rng = thread_rng(); + bytes.try_fill(&mut rng).unwrap(); + let evaluations = bytes_to_evaluations::<31>(&bytes, *DOMAIN).evals; + let (eval, poly) = bytes_to_polynomial::<31>(&bytes, *DOMAIN).unwrap(); + let commitment = commit_polynomial(&poly, &GLOBAL_PARAMETERS).unwrap(); + let proofs: Vec<_> = (0..10) + .map(|i| generate_element_proof(i, &poly, &eval, &GLOBAL_PARAMETERS, *DOMAIN).unwrap()) + .collect(); + for (i, (element, proof)) in evaluations.iter().zip(proofs.iter()).enumerate() { + // verifying works + assert!(verify_element_proof( + i, + element, + &commitment, + proof, + *DOMAIN, + &GLOBAL_PARAMETERS + )); + // verification fails for other items + for ii in i + 1..10 { + assert!(!verify_element_proof( + ii, + element, + &commitment, + proof, + *DOMAIN, + &GLOBAL_PARAMETERS + )); + } + } + } +} diff --git a/nomos-da/kzgrs/src/lib.rs b/nomos-da/kzgrs/src/lib.rs new file mode 100644 index 00000000..188bee30 --- /dev/null +++ b/nomos-da/kzgrs/src/lib.rs @@ -0,0 +1,27 @@ +pub mod common; +pub mod global_parameters; +pub mod kzg; +pub mod rs; + +use ark_bls12_381::{Bls12_381, Fr}; +use ark_poly::univariate::DensePolynomial; +use ark_poly::GeneralEvaluationDomain; +use ark_poly_commit::kzg10; +use ark_poly_commit::sonic_pc::UniversalParams; +use std::mem; + +pub use common::{bytes_to_evaluations, bytes_to_polynomial, KzgRsError}; +pub use global_parameters::global_parameters_from_randomness; +pub use kzg::{commit_polynomial, generate_element_proof, verify_element_proof}; +pub use rs::{decode, encode}; + +pub type Commitment = kzg10::Commitment; +pub type Proof = kzg10::Proof; +pub type FieldElement = ark_bls12_381::Fr; +pub type Polynomial = DensePolynomial; +pub type Evaluations = ark_poly::Evaluations; +pub type PolynomialEvaluationDomain = GeneralEvaluationDomain; + +pub type GlobalParameters = UniversalParams; + +pub const BYTES_PER_FIELD_ELEMENT: usize = mem::size_of::(); diff --git a/nomos-da/kzgrs/src/rs.rs b/nomos-da/kzgrs/src/rs.rs new file mode 100644 index 00000000..68c84184 --- /dev/null +++ b/nomos-da/kzgrs/src/rs.rs @@ -0,0 +1,126 @@ +use ark_bls12_381::Fr; +use ark_ff::{BigInteger, Field, PrimeField}; +use ark_poly::univariate::DensePolynomial; +use ark_poly::{ + DenseUVPolynomial, EvaluationDomain, Evaluations, GeneralEvaluationDomain, Polynomial, +}; +use num_traits::Zero; +use std::ops::{Mul, Neg}; + +/// Extend a polynomial over some factor `polynomial.len()*factor and return the original points +/// plus the extra ones. +/// `factor` need to be `>1` +pub fn encode( + polynomial: &DensePolynomial, + evaluations: &Evaluations, + factor: usize, + domain: GeneralEvaluationDomain, +) -> Evaluations { + assert!(factor > 1); + Evaluations::from_vec_and_domain( + (0..evaluations.evals.len() * factor) + .map(|i| polynomial.evaluate(&domain.element(i))) + .collect(), + domain, + ) +} + +/// Interpolate points into a polynomial, then evaluate the polynomial in the original evaluations +/// to recover the original data. +/// `domain` need to be the same domain of the original `evaluations` and `polynomial` used for encoding. +pub fn decode( + original_chunks_len: usize, + points: &[Option], + domain: GeneralEvaluationDomain, +) -> Evaluations { + let (points, roots_of_unity): (Vec, Vec) = points + .iter() + .enumerate() + .flat_map(|(i, e)| e.map(|e| (e, domain.element(i)))) + .unzip(); + let coeffs = lagrange_interpolate(&points, &roots_of_unity); + Evaluations::from_vec_and_domain( + (0..original_chunks_len) + .map(|i| coeffs.evaluate(&domain.element(i))) + .collect(), + domain, + ) +} + +/// Interpolate a set of points using lagrange interpolation and roots of unity +/// Warning!! Be aware that the mapping between points and roots of unity is the intended: +/// A polynomial `f(x)` is derived for `w_x` (root) mapping to p_x. `[(w_1, p_1)..(w_n, p_n)]` even +/// if points are missing it is important to keep the mapping integrity. +pub fn lagrange_interpolate(points: &[Fr], roots_of_unity: &[Fr]) -> DensePolynomial { + assert_eq!(points.len(), roots_of_unity.len()); + let mut result = DensePolynomial::from_coefficients_vec(vec![Fr::zero()]); + for i in 0..roots_of_unity.len() { + let mut summand = DensePolynomial::from_coefficients_vec(vec![points[i]]); + for j in 0..points.len() { + if i != j { + let weight_adjustment = + (roots_of_unity[i] - roots_of_unity[j]) + .inverse() + .expect( + "Roots of unity are/should not repeated. If this panics it means we have no coefficients enough in the evaluation domain" + ); + summand = summand.naive_mul(&DensePolynomial::from_coefficients_vec(vec![ + weight_adjustment.mul(roots_of_unity[j]).neg(), + weight_adjustment, + ])) + } + } + result = result + summand; + } + result +} + +/// Reconstruct bytes from the polynomial evaluation points using original chunk size and a set of points +pub fn points_to_bytes(points: &[Fr]) -> Vec { + fn point_to_buff(p: &Fr) -> impl Iterator { + p.into_bigint().to_bytes_le().into_iter().take(CHUNK_SIZE) + } + points + .iter() + .flat_map(point_to_buff::) + .collect() +} + +#[cfg(test)] +mod test { + use crate::common::bytes_to_polynomial; + use crate::rs::{decode, encode, points_to_bytes}; + use ark_bls12_381::Fr; + use ark_poly::{EvaluationDomain, GeneralEvaluationDomain}; + use once_cell::sync::Lazy; + use rand::{thread_rng, Fill}; + + const COEFFICIENTS_SIZE: usize = 32; + static DOMAIN: Lazy> = + Lazy::new(|| GeneralEvaluationDomain::new(COEFFICIENTS_SIZE).unwrap()); + + #[test] + fn test_encode_decode() { + let mut bytes: [u8; 310] = [0; 310]; + let mut rng = thread_rng(); + bytes.try_fill(&mut rng).unwrap(); + + let (evals, poly) = bytes_to_polynomial::<31>(&bytes, *DOMAIN).unwrap(); + + let encoded = encode(&poly, &evals, 2, *DOMAIN); + let mut encoded: Vec> = encoded.evals.into_iter().map(Some).collect(); + + let decoded = decode(10, &encoded, *DOMAIN); + let decoded_bytes = points_to_bytes::<31>(&decoded.evals); + assert_eq!(decoded_bytes, bytes); + + // check with missing pieces + + for i in (1..encoded.len()).step_by(2) { + encoded[i] = None; + } + + let decoded_bytes = points_to_bytes::<31>(&decoded.evals); + assert_eq!(decoded_bytes, bytes); + } +} diff --git a/nomos-da/reed-solomon/Cargo.toml b/nomos-da/reed-solomon/Cargo.toml deleted file mode 100644 index 843614dc..00000000 --- a/nomos-da/reed-solomon/Cargo.toml +++ /dev/null @@ -1,9 +0,0 @@ -[package] -name = "reed-solomon" -version = "0.1.0" -edition = "2021" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -reed-solomon-erasure = "6.0" \ No newline at end of file diff --git a/nomos-da/reed-solomon/src/lib.rs b/nomos-da/reed-solomon/src/lib.rs deleted file mode 100644 index c2289fdd..00000000 --- a/nomos-da/reed-solomon/src/lib.rs +++ /dev/null @@ -1,73 +0,0 @@ -use reed_solomon_erasure::{galois_8::ReedSolomon, Error}; - -/// Reed Sololomon encode the elements with a custom parity ratio -/// # Arguments -/// * `parity_ratio` - Ratio of parity elements over original elements size -/// * `elements` - Elements to encode -pub fn encode_elements(parity_ratio: usize, elements: &[u8]) -> Result, Error> { - let mut encoded = vec![vec![0]; elements.len() * (parity_ratio + 1)]; - for (i, &e) in elements.iter().enumerate() { - // review bytes encoding - encoded[i] = e.to_be_bytes().to_vec(); - } - let encoder = ReedSolomon::new(elements.len(), elements.len() * parity_ratio)?; - encoder.encode(&mut encoded)?; - Ok(encoded.into_iter().flatten().collect()) -} - -/// Reed solomon decode the elements with a custom parity ratio -/// # Arguments -/// * `original_size` - Original size of encoded elements -/// * `parity_ratio` - Ratio of parity elements over original elements size (must be the same as the one used for encoding) -/// * `elements` - Elements to decode -pub fn decode_from_elements( - original_size: usize, - parity_ratio: usize, - elements: &[Option], -) -> Result, Error> { - let mut elements: Vec<_> = elements - .iter() - .map(|e| e.map(|n| n.to_be_bytes().to_vec())) - .collect(); - let decoder = ReedSolomon::new(original_size, parity_ratio * original_size)?; - decoder.reconstruct(&mut elements)?; - Ok(elements - .into_iter() - .filter_map(|e: Option>| e.map(|n| u8::from_be_bytes(n.try_into().unwrap()))) - .collect()) -} - -#[cfg(test)] -mod test { - use reed_solomon_erasure::Error; - - #[test] - fn encode_with_ratio() { - let elements = vec![1, 2, 3, 4, 5, 6, 7, 8]; - let encoded = super::encode_elements(1, &elements).unwrap(); - // check intended size - assert_eq!(encoded.len(), 16); - // check elements - assert_eq!(&encoded[0..8], &elements); - } - - #[test] - fn decode_with_ratio() { - let elements = vec![1, 2, 3, 4, 5, 6, 7, 8]; - let encoded = super::encode_elements(1, &elements).unwrap(); - let mut encoded: Vec<_> = encoded.into_iter().map(Some).collect(); - encoded[4..12].copy_from_slice(&[None; 8]); - let decoded = super::decode_from_elements(8, 1, &encoded).unwrap(); - assert_eq!(decoded[0..8], elements); - } - - #[test] - fn decode_fails_with_insufficient_shards() { - let elements = vec![1, 2, 3, 4, 5, 6, 7, 8]; - let encoded = super::encode_elements(1, &elements).unwrap(); - let mut encoded: Vec<_> = encoded.into_iter().map(Some).collect(); - encoded[7..].copy_from_slice(&[None; 9]); - let decoded = super::decode_from_elements(8, 1, &encoded); - assert!(matches!(decoded, Err(Error::TooFewShardsPresent))); - } -} diff --git a/nomos-da/storage/Cargo.toml b/nomos-da/storage/Cargo.toml new file mode 100644 index 00000000..c62abb22 --- /dev/null +++ b/nomos-da/storage/Cargo.toml @@ -0,0 +1,10 @@ +[package] +name = "nomos-da-storage" +version = "0.1.0" +edition = "2021" + +[dependencies] +bytes = "1.2" +hex = "0.4.3" +tokio = { version = "1", features = ["fs", "io-util"] } +tracing = "0.1" diff --git a/nomos-da/storage/src/fs/mod.rs b/nomos-da/storage/src/fs/mod.rs new file mode 100644 index 00000000..23dc31b1 --- /dev/null +++ b/nomos-da/storage/src/fs/mod.rs @@ -0,0 +1,60 @@ +// std +use std::path::PathBuf; +// crates +use bytes::Bytes; +use tokio::{ + fs::{File, OpenOptions}, + io::{AsyncReadExt, AsyncWriteExt}, +}; +// internal + +// TODO: Rocksdb has a feature called BlobDB that handles largo blob storing, but further +// investigation needs to be done to see if rust wrapper supports it. +pub async fn load_blob(base_dir: PathBuf, blob_id: &[u8]) -> Option { + let blob_id = hex::encode(blob_id); + + let mut path = base_dir; + path.push(blob_id); + + let mut file = match File::open(path).await { + Ok(file) => file, + Err(e) => { + tracing::error!("Failed to open file: {}", e); + return None; + } + }; + + let mut contents = vec![]; + if let Err(e) = file.read_to_end(&mut contents).await { + tracing::error!("Failed to read file: {}", e); + return None; + } + + Some(Bytes::from(contents)) +} + +pub async fn write_blob( + base_dir: PathBuf, + blob_id: &[u8], + data: &[u8], +) -> Result<(), std::io::Error> { + let blob_id = hex::encode(blob_id); + + let mut path = base_dir; + path.push(blob_id); + + if let Some(parent) = path.parent() { + tokio::fs::create_dir_all(parent).await?; + } + + let mut file = OpenOptions::new() + .write(true) + .create(true) + // In an unlikely scenario when a file already exists, rewrite the + // contents. + .truncate(true) + .open(path) + .await?; + + file.write_all(data).await +} diff --git a/nomos-da/storage/src/lib.rs b/nomos-da/storage/src/lib.rs new file mode 100644 index 00000000..222e956d --- /dev/null +++ b/nomos-da/storage/src/lib.rs @@ -0,0 +1,2 @@ +pub mod fs; +pub mod rocksdb; diff --git a/nomos-da/storage/src/rocksdb/mod.rs b/nomos-da/storage/src/rocksdb/mod.rs new file mode 100644 index 00000000..6cb55845 --- /dev/null +++ b/nomos-da/storage/src/rocksdb/mod.rs @@ -0,0 +1,13 @@ +use bytes::{Bytes, BytesMut}; + +pub const DA_VID_KEY_PREFIX: &str = "da/vid/"; +pub const DA_ATTESTED_KEY_PREFIX: &str = "da/attested/"; + +pub fn key_bytes(prefix: &str, id: impl AsRef<[u8]>) -> Bytes { + let mut buffer = BytesMut::new(); + + buffer.extend_from_slice(prefix.as_bytes()); + buffer.extend_from_slice(id.as_ref()); + + buffer.freeze() +} diff --git a/nomos-services/api/Cargo.toml b/nomos-services/api/Cargo.toml index 95a87285..acab9f8c 100644 --- a/nomos-services/api/Cargo.toml +++ b/nomos-services/api/Cargo.toml @@ -15,7 +15,6 @@ tracing = "0.1" nomos-core = { path = "../../nomos-core" } cryptarchia-consensus = { path = "../cryptarchia-consensus" } nomos-network = { path = "../../nomos-services/network" } -nomos-da = { path = "../../nomos-services/data-availability" } nomos-mempool = { path = "../../nomos-services/mempool", features = [ "mock", "libp2p", diff --git a/nomos-services/api/src/http/consensus/cryptarchia.rs b/nomos-services/api/src/http/consensus/cryptarchia.rs index 7fff5f87..ba7a7cfe 100644 --- a/nomos-services/api/src/http/consensus/cryptarchia.rs +++ b/nomos-services/api/src/http/consensus/cryptarchia.rs @@ -9,17 +9,16 @@ use cryptarchia_consensus::{ network::adapters::libp2p::LibP2pAdapter as ConsensusNetworkAdapter, ConsensusMsg, CryptarchiaConsensus, CryptarchiaInfo, }; -use full_replication::Certificate; +use full_replication::{Certificate, VidCertificate}; use nomos_core::{ - da::{ - blob, - certificate::{self, select::FillSize as FillSizeWithBlobsCertificate}, - }, + da::certificate::{self, select::FillSize as FillSizeWithBlobsCertificate}, header::HeaderId, tx::{select::FillSize as FillSizeWithTx, Transaction}, }; use nomos_mempool::{ - backend::mockpool::MockPool, network::adapters::libp2p::Libp2pAdapter as MempoolNetworkAdapter, + backend::mockpool::MockPool, + da::verify::fullreplication::DaVerificationProvider as MempoolVerificationProvider, + network::adapters::libp2p::Libp2pAdapter as MempoolNetworkAdapter, }; use nomos_storage::backends::{rocksdb::RocksBackend, StorageSerde}; @@ -29,15 +28,13 @@ pub type Cryptarchia = CryptarchiaConsensus< MempoolNetworkAdapter::Hash>, MockPool< HeaderId, - Certificate, - <::Blob as blob::Blob>::Hash, - >, - MempoolNetworkAdapter< - Certificate, - <::Blob as blob::Blob>::Hash, + VidCertificate, + ::CertificateId, >, + MempoolNetworkAdapter::Id>, + MempoolVerificationProvider, FillSizeWithTx, - FillSizeWithBlobsCertificate, + FillSizeWithBlobsCertificate, RocksBackend, >; diff --git a/nomos-services/api/src/http/da.rs b/nomos-services/api/src/http/da.rs deleted file mode 100644 index 45c0ed66..00000000 --- a/nomos-services/api/src/http/da.rs +++ /dev/null @@ -1,74 +0,0 @@ -use full_replication::{AbsoluteNumber, Attestation, Blob, Certificate, FullReplication}; -use nomos_core::da::blob; -use nomos_core::header::HeaderId; -use nomos_da::{ - backend::memory_cache::BlobCache, network::adapters::libp2p::Libp2pAdapter as DaNetworkAdapter, - DaMsg, DataAvailabilityService, -}; -use nomos_mempool::da::service::DaMempoolService; -use nomos_mempool::{ - backend::mockpool::MockPool, network::adapters::libp2p::Libp2pAdapter as MempoolNetworkAdapter, - tx::service::openapi::Status, -}; -use nomos_mempool::{MempoolMetrics, MempoolMsg}; -use tokio::sync::oneshot; - -pub type MempoolServiceDa = DaMempoolService< - MempoolNetworkAdapter::Hash>, - MockPool::Hash>, ->; - -pub type DataAvailability = DataAvailabilityService< - FullReplication>, - BlobCache<::Hash, Blob>, - DaNetworkAdapter, ->; - -pub async fn da_mempool_metrics( - handle: &overwatch_rs::overwatch::handle::OverwatchHandle, -) -> Result { - let relay = handle.relay::().connect().await?; - let (sender, receiver) = oneshot::channel(); - relay - .send(MempoolMsg::Metrics { - reply_channel: sender, - }) - .await - .map_err(|(e, _)| e)?; - - Ok(receiver.await.unwrap()) -} - -pub async fn da_mempool_status( - handle: &overwatch_rs::overwatch::handle::OverwatchHandle, - items: Vec<::Hash>, -) -> Result>, super::DynError> { - let relay = handle.relay::().connect().await?; - let (sender, receiver) = oneshot::channel(); - relay - .send(MempoolMsg::Status { - items, - reply_channel: sender, - }) - .await - .map_err(|(e, _)| e)?; - - Ok(receiver.await.unwrap()) -} - -pub async fn da_blobs( - handle: &overwatch_rs::overwatch::handle::OverwatchHandle, - ids: Vec<::Hash>, -) -> Result, super::DynError> { - let relay = handle.relay::().connect().await?; - let (reply_channel, receiver) = oneshot::channel(); - relay - .send(DaMsg::Get { - ids: Box::new(ids.into_iter()), - reply_channel, - }) - .await - .map_err(|(e, _)| e)?; - - Ok(receiver.await?) -} diff --git a/nomos-services/api/src/http/mempool.rs b/nomos-services/api/src/http/mempool.rs index 870465ac..d1363480 100644 --- a/nomos-services/api/src/http/mempool.rs +++ b/nomos-services/api/src/http/mempool.rs @@ -1,8 +1,8 @@ use core::{fmt::Debug, hash::Hash}; -use nomos_core::header::HeaderId; +use nomos_core::{da::certificate::Certificate, header::HeaderId}; use nomos_mempool::{ - backend::mockpool::MockPool, network::NetworkAdapter, DaMempoolService, MempoolMsg, - TxMempoolService, + backend::mockpool::MockPool, network::NetworkAdapter, verify::MempoolVerificationProvider, + DaMempoolService, MempoolMsg, TxMempoolService, }; use nomos_network::backends::NetworkBackend; use tokio::sync::oneshot; @@ -14,7 +14,7 @@ pub async fn add_tx( ) -> Result<(), super::DynError> where N: NetworkBackend, - A: NetworkAdapter + Send + Sync + 'static, + A: NetworkAdapter + Send + Sync + 'static, A::Settings: Send + Sync, Item: Clone + Debug + Send + Sync + 'static + Hash, Key: Clone + Debug + Ord + Hash + 'static, @@ -28,7 +28,7 @@ where relay .send(MempoolMsg::Add { key: converter(&item), - item, + payload: item, reply_channel: sender, }) .await @@ -41,20 +41,25 @@ where } } -pub async fn add_cert( +pub async fn add_cert( handle: &overwatch_rs::overwatch::handle::OverwatchHandle, - item: Item, - converter: impl Fn(&Item) -> Key, + item: A::Payload, + converter: impl Fn(&A::Payload) -> Key, ) -> Result<(), super::DynError> where N: NetworkBackend, - A: NetworkAdapter + Send + Sync + 'static, + A: NetworkAdapter + Send + Sync + 'static, + A::Payload: Certificate + Into + Debug, A::Settings: Send + Sync, + V: MempoolVerificationProvider< + Payload = A::Payload, + Parameters = ::VerificationParameters, + >, Item: Clone + Debug + Send + Sync + 'static + Hash, Key: Clone + Debug + Ord + Hash + 'static, { let relay = handle - .relay::>>() + .relay::, V>>() .connect() .await?; let (sender, receiver) = oneshot::channel(); @@ -62,7 +67,7 @@ where relay .send(MempoolMsg::Add { key: converter(&item), - item, + payload: item, reply_channel: sender, }) .await diff --git a/nomos-services/api/src/http/mod.rs b/nomos-services/api/src/http/mod.rs index 6ab13b2a..d63a1021 100644 --- a/nomos-services/api/src/http/mod.rs +++ b/nomos-services/api/src/http/mod.rs @@ -1,7 +1,6 @@ pub type DynError = Box; pub mod cl; pub mod consensus; -pub mod da; pub mod libp2p; pub mod mempool; pub mod metrics; diff --git a/nomos-services/cryptarchia-consensus/src/lib.rs b/nomos-services/cryptarchia-consensus/src/lib.rs index 01f31983..d5951487 100644 --- a/nomos-services/cryptarchia-consensus/src/lib.rs +++ b/nomos-services/cryptarchia-consensus/src/lib.rs @@ -7,13 +7,16 @@ use cryptarchia_engine::Slot; use cryptarchia_ledger::{Coin, LeaderProof, LedgerState}; use futures::StreamExt; use network::{messages::NetworkMessage, NetworkAdapter}; -use nomos_core::da::certificate::{BlobCertificateSelect, Certificate}; +use nomos_core::da::certificate::{ + metadata::Metadata, vid::VidCertificate, BlobCertificateSelect, Certificate, +}; use nomos_core::header::{cryptarchia::Header, HeaderId}; use nomos_core::tx::{Transaction, TxSelect}; use nomos_core::{ block::{builder::BlockBuilder, Block}, header::cryptarchia::Builder, }; +use nomos_mempool::verify::MempoolVerificationProvider; use nomos_mempool::{ backend::MemPool, network::NetworkAdapter as MempoolAdapter, DaMempoolService, MempoolMsg, TxMempoolService, @@ -37,6 +40,8 @@ use tokio::sync::{broadcast, oneshot}; use tokio_stream::wrappers::IntervalStream; use tracing::{error, instrument}; +type MempoolRelay = OutboundRelay>; + // Limit the number of blocks returned by GetHeaders const HEADERS_LIMIT: usize = 512; @@ -128,14 +133,27 @@ impl CryptarchiaSettings { } } -pub struct CryptarchiaConsensus -where +pub struct CryptarchiaConsensus< + A, + ClPool, + ClPoolAdapter, + DaPool, + DaPoolAdapter, + DaVerificationProvider, + TxS, + BS, + Storage, +> where A: NetworkAdapter, - ClPoolAdapter: MempoolAdapter, + ClPoolAdapter: MempoolAdapter, ClPool: MemPool, DaPool: MemPool, - DaPoolAdapter: MempoolAdapter, - + DaPoolAdapter: MempoolAdapter, + DaPoolAdapter::Payload: Certificate + Into + Debug, + DaVerificationProvider: MempoolVerificationProvider< + Payload = DaPoolAdapter::Payload, + Parameters = ::VerificationParameters, + >, ClPool::Item: Clone + Eq + Hash + Debug + 'static, ClPool::Key: Debug + 'static, DaPool::Item: Clone + Eq + Hash + Debug + 'static, @@ -150,13 +168,24 @@ where // when implementing ServiceCore for CryptarchiaConsensus network_relay: Relay>, cl_mempool_relay: Relay>, - da_mempool_relay: Relay>, + da_mempool_relay: Relay>, block_subscription_sender: broadcast::Sender>, storage_relay: Relay>, } -impl ServiceData - for CryptarchiaConsensus +impl + ServiceData + for CryptarchiaConsensus< + A, + ClPool, + ClPoolAdapter, + DaPool, + DaPoolAdapter, + DaVerificationProvider, + TxS, + BS, + Storage, + > where A: NetworkAdapter, ClPool: MemPool, @@ -165,8 +194,13 @@ where DaPool: MemPool, DaPool::Item: Clone + Eq + Hash + Debug, DaPool::Key: Debug, - ClPoolAdapter: MempoolAdapter, - DaPoolAdapter: MempoolAdapter, + ClPoolAdapter: MempoolAdapter, + DaPoolAdapter: MempoolAdapter, + DaPoolAdapter::Payload: Certificate + Into + Debug, + DaVerificationProvider: MempoolVerificationProvider< + Payload = DaPoolAdapter::Payload, + Parameters = ::VerificationParameters, + >, TxS: TxSelect, BS: BlobCertificateSelect, Storage: StorageBackend + Send + Sync + 'static, @@ -179,8 +213,19 @@ where } #[async_trait::async_trait] -impl ServiceCore - for CryptarchiaConsensus +impl + ServiceCore + for CryptarchiaConsensus< + A, + ClPool, + ClPoolAdapter, + DaPool, + DaPoolAdapter, + DaVerificationProvider, + TxS, + BS, + Storage, + > where A: NetworkAdapter + Clone @@ -201,7 +246,9 @@ where + Send + Sync + 'static, - DaPool::Item: Certificate + // TODO: Change to specific certificate bounds here + DaPool::Item: VidCertificate + + Metadata + Debug + Clone + Eq @@ -213,8 +260,16 @@ where + 'static, ClPool::Key: Debug + Send + Sync, DaPool::Key: Debug + Send + Sync, - ClPoolAdapter: MempoolAdapter + Send + Sync + 'static, - DaPoolAdapter: MempoolAdapter + Send + Sync + 'static, + ClPoolAdapter: + MempoolAdapter + Send + Sync + 'static, + DaPoolAdapter: MempoolAdapter + Send + Sync + 'static, + DaPoolAdapter::Payload: Certificate + Into + Debug, + DaVerificationProvider: MempoolVerificationProvider< + Payload = DaPoolAdapter::Payload, + Parameters = ::VerificationParameters, + > + Send + + Sync + + 'static, TxS: TxSelect + Clone + Send + Sync + 'static, TxS::Settings: Send + Sync + 'static, BS: BlobCertificateSelect + Clone + Send + Sync + 'static, @@ -350,8 +405,18 @@ where } } -impl - CryptarchiaConsensus +impl + CryptarchiaConsensus< + A, + ClPool, + ClPoolAdapter, + DaPool, + DaPoolAdapter, + DaVerificationProvider, + TxS, + BS, + Storage, + > where A: NetworkAdapter + Clone + Send + Sync + 'static, ClPool: MemPool + Send + Sync + 'static, @@ -368,7 +433,8 @@ where + Send + Sync + 'static, - DaPool::Item: Certificate + DaPool::Item: VidCertificate + + Metadata + Debug + Clone + Eq @@ -382,8 +448,16 @@ where BS: BlobCertificateSelect + Clone + Send + Sync + 'static, ClPool::Key: Debug + Send + Sync, DaPool::Key: Debug + Send + Sync, - ClPoolAdapter: MempoolAdapter + Send + Sync + 'static, - DaPoolAdapter: MempoolAdapter + Send + Sync + 'static, + ClPoolAdapter: + MempoolAdapter + Send + Sync + 'static, + DaPoolAdapter: MempoolAdapter + Send + Sync + 'static, + DaPoolAdapter::Payload: Certificate + Into + Debug, + DaVerificationProvider: MempoolVerificationProvider< + Payload = DaPoolAdapter::Payload, + Parameters = ::VerificationParameters, + > + Send + + Sync + + 'static, Storage: StorageBackend + Send + Sync + 'static, { async fn should_stop_service(message: LifecycleMessage) -> bool { @@ -466,8 +540,12 @@ where leader: &mut leadership::Leader, block: Block, storage_relay: OutboundRelay>, - cl_mempool_relay: OutboundRelay>, - da_mempool_relay: OutboundRelay>, + cl_mempool_relay: OutboundRelay< + MempoolMsg, + >, + da_mempool_relay: OutboundRelay< + MempoolMsg, + >, block_broadcaster: &mut broadcast::Sender>, ) -> Cryptarchia { tracing::debug!("received proposal {:?}", block); @@ -489,7 +567,12 @@ where ) .await; - mark_in_block(da_mempool_relay, block.blobs().map(Certificate::hash), id).await; + mark_in_block( + da_mempool_relay, + block.blobs().map(VidCertificate::certificate_id), + id, + ) + .await; // store block let msg = >::new_store_message(header.id(), block.clone()); @@ -522,8 +605,8 @@ where proof: LeaderProof, tx_selector: TxS, blob_selector: BS, - cl_mempool_relay: OutboundRelay>, - da_mempool_relay: OutboundRelay>, + cl_mempool_relay: MempoolRelay, + da_mempool_relay: MempoolRelay, ) -> Option> { let mut output = None; let cl_txs = get_mempool_contents(cl_mempool_relay); @@ -575,8 +658,8 @@ pub struct CryptarchiaInfo { pub height: u64, } -async fn get_mempool_contents( - mempool: OutboundRelay>, +async fn get_mempool_contents( + mempool: OutboundRelay>, ) -> Result + Send>, tokio::sync::oneshot::error::RecvError> { let (reply_channel, rx) = tokio::sync::oneshot::channel(); @@ -591,8 +674,8 @@ async fn get_mempool_contents( rx.await } -async fn mark_in_block( - mempool: OutboundRelay>, +async fn mark_in_block( + mempool: OutboundRelay>, ids: impl Iterator, block: HeaderId, ) { diff --git a/nomos-services/data-availability/Cargo.toml b/nomos-services/data-availability/Cargo.toml deleted file mode 100644 index 069ae646..00000000 --- a/nomos-services/data-availability/Cargo.toml +++ /dev/null @@ -1,21 +0,0 @@ -[package] -name = "nomos-da" -version = "0.1.0" -edition = "2021" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -async-trait = "0.1" -futures = "0.3" -moka = { version = "0.11", features = ["future"] } -nomos-core = { path = "../../nomos-core" } -nomos-network = { path = "../network" } -overwatch-rs = { git = "https://github.com/logos-co/Overwatch", rev = "2f70806" } -serde = "1.0" -tracing = "0.1" -tokio = { version = "1", features = ["sync", "macros"] } -tokio-stream = "0.1" - -[features] -libp2p = ["nomos-network/libp2p"] diff --git a/nomos-services/data-availability/indexer/Cargo.toml b/nomos-services/data-availability/indexer/Cargo.toml new file mode 100644 index 00000000..d4513202 --- /dev/null +++ b/nomos-services/data-availability/indexer/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "nomos-da-indexer" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +async-trait = "0.1" +bytes = "1.2" +futures = "0.3" +nomos-core = { path = "../../../nomos-core" } +nomos-da-storage = { path = "../../../nomos-da/storage" } +nomos-storage = { path = "../../../nomos-services/storage" } +nomos-mempool = { path = "../../../nomos-services/mempool" } +cryptarchia-consensus = { path = "../../../nomos-services/cryptarchia-consensus" } +overwatch-rs = { git = "https://github.com/logos-co/Overwatch", rev = "2f70806" } +overwatch-derive = { git = "https://github.com/logos-co/Overwatch", rev = "ac28d01" } +tokio = { version = "1", features = ["sync"] } +serde = { version = "1.0", features = ["derive"] } +tracing = "0.1" +tokio-stream = "0.1.15" + +[features] +rocksdb-backend = ["nomos-storage/rocksdb-backend"] diff --git a/nomos-services/data-availability/indexer/src/consensus/adapters/cryptarchia.rs b/nomos-services/data-availability/indexer/src/consensus/adapters/cryptarchia.rs new file mode 100644 index 00000000..c06d3cb5 --- /dev/null +++ b/nomos-services/data-availability/indexer/src/consensus/adapters/cryptarchia.rs @@ -0,0 +1,58 @@ +use cryptarchia_consensus::ConsensusMsg; +use futures::Stream; +use nomos_core::block::Block; +use overwatch_rs::services::relay::OutboundRelay; +use tokio::sync::oneshot; +use tokio_stream::{wrappers::BroadcastStream, StreamExt}; +use tracing::debug; + +use crate::consensus::ConsensusAdapter; + +pub struct CryptarchiaConsensusAdapter +where + Tx: Clone + Eq + std::hash::Hash, + C: Clone + Eq + std::hash::Hash, +{ + consensus_relay: OutboundRelay>>, +} + +#[async_trait::async_trait] +impl ConsensusAdapter for CryptarchiaConsensusAdapter +where + Tx: Clone + Eq + std::hash::Hash + Send + Sync + 'static + std::fmt::Debug, + C: Clone + Eq + std::hash::Hash + Send + Sync + 'static + std::fmt::Debug, +{ + type Tx = Tx; + type Cert = C; + + async fn new( + consensus_relay: OutboundRelay>>, + ) -> Self { + Self { consensus_relay } + } + + async fn block_stream( + &self, + ) -> Box> + Unpin + Send> { + let (sender, receiver) = oneshot::channel(); + + self.consensus_relay + .send(ConsensusMsg::BlockSubscribe { sender }) + .await + .expect("Failed to send BlockSubscribe message"); + + let broadcast_receiver = receiver + .await + .expect("Failed to receive broadcast receiver"); + + Box::new( + BroadcastStream::new(broadcast_receiver).filter_map(|result| match result { + Ok(block) => Some(block), + Err(e) => { + debug!("Unrecognized message: {e}"); + None + } + }), + ) + } +} diff --git a/nomos-services/data-availability/indexer/src/consensus/adapters/mod.rs b/nomos-services/data-availability/indexer/src/consensus/adapters/mod.rs new file mode 100644 index 00000000..85421e87 --- /dev/null +++ b/nomos-services/data-availability/indexer/src/consensus/adapters/mod.rs @@ -0,0 +1 @@ +pub mod cryptarchia; diff --git a/nomos-services/data-availability/indexer/src/consensus/mod.rs b/nomos-services/data-availability/indexer/src/consensus/mod.rs new file mode 100644 index 00000000..495fc6f8 --- /dev/null +++ b/nomos-services/data-availability/indexer/src/consensus/mod.rs @@ -0,0 +1,19 @@ +pub mod adapters; + +use cryptarchia_consensus::ConsensusMsg; +use futures::Stream; +use nomos_core::block::Block; +use overwatch_rs::services::relay::OutboundRelay; + +#[async_trait::async_trait] +pub trait ConsensusAdapter { + type Tx: Clone + Eq + std::hash::Hash; + type Cert: Clone + Eq + std::hash::Hash; + + async fn new(consensus_relay: OutboundRelay>>) + -> Self; + + async fn block_stream( + &self, + ) -> Box> + Unpin + Send>; +} diff --git a/nomos-services/data-availability/indexer/src/lib.rs b/nomos-services/data-availability/indexer/src/lib.rs new file mode 100644 index 00000000..879e3242 --- /dev/null +++ b/nomos-services/data-availability/indexer/src/lib.rs @@ -0,0 +1,425 @@ +pub mod consensus; +pub mod storage; + +use std::fmt::{Debug, Formatter}; +use std::hash::Hash; +use std::ops::Range; + +use consensus::ConsensusAdapter; +use cryptarchia_consensus::network::NetworkAdapter; +use cryptarchia_consensus::CryptarchiaConsensus; +use futures::StreamExt; +use nomos_core::block::Block; +use nomos_core::da::certificate::metadata::Metadata; +use nomos_core::da::certificate::vid::VidCertificate; +use nomos_core::da::certificate::{BlobCertificateSelect, Certificate}; +use nomos_core::header::HeaderId; +use nomos_core::tx::{Transaction, TxSelect}; +use nomos_mempool::verify::MempoolVerificationProvider; +use nomos_mempool::{backend::MemPool, network::NetworkAdapter as MempoolAdapter}; +use nomos_storage::backends::StorageBackend; +use nomos_storage::StorageService; +use overwatch_rs::services::handle::ServiceStateHandle; +use overwatch_rs::services::life_cycle::LifecycleMessage; +use overwatch_rs::services::relay::{Relay, RelayMessage}; +use overwatch_rs::services::state::{NoOperator, NoState}; +use overwatch_rs::services::{ServiceCore, ServiceData, ServiceId}; +use overwatch_rs::DynError; +use serde::de::DeserializeOwned; +use serde::Serialize; +use storage::DaStorageAdapter; +use tokio::sync::oneshot::Sender; +use tracing::error; + +pub type ConsensusRelay< + A, + ClPool, + ClPoolAdapter, + DaPool, + DaPoolAdapter, + DaVerificationProvider, + TxS, + BS, + Storage, +> = Relay< + CryptarchiaConsensus< + A, + ClPool, + ClPoolAdapter, + DaPool, + DaPoolAdapter, + DaVerificationProvider, + TxS, + BS, + Storage, + >, +>; + +pub struct DataIndexerService< + B, + DaStorage, + Consensus, + A, + ClPool, + ClPoolAdapter, + DaPool, + DaPoolAdapter, + DaVerificationProvider, + TxS, + BS, + ConsensusStorage, +> where + B: 'static, + A: NetworkAdapter, + ClPoolAdapter: MempoolAdapter, + ClPool: MemPool, + DaPool: MemPool, + DaPoolAdapter: MempoolAdapter, + DaPoolAdapter::Payload: Certificate + Into + Debug, + DaVerificationProvider: MempoolVerificationProvider< + Payload = DaPoolAdapter::Payload, + Parameters = ::VerificationParameters, + >, + ClPool::Item: Clone + Eq + Hash + Debug + 'static, + ClPool::Key: Debug + 'static, + DaPool::Item: Metadata + Clone + Eq + Hash + Debug + 'static, + DaPool::Key: Debug + 'static, + A::Backend: 'static, + TxS: TxSelect, + BS: BlobCertificateSelect, + DaStorage: DaStorageAdapter, + ConsensusStorage: StorageBackend + Send + Sync + 'static, +{ + service_state: ServiceStateHandle, + storage_relay: Relay>, + consensus_relay: ConsensusRelay< + A, + ClPool, + ClPoolAdapter, + DaPool, + DaPoolAdapter, + DaVerificationProvider, + TxS, + BS, + ConsensusStorage, + >, +} + +pub enum DaMsg { + AddIndex { + vid: V, + }, + GetRange { + app_id: ::AppId, + range: Range<::Index>, + reply_channel: Sender::Index, Option)>>, + }, +} + +impl Debug for DaMsg { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + DaMsg::AddIndex { .. } => { + write!(f, "DaMsg::AddIndex") + } + DaMsg::GetRange { .. } => { + write!(f, "DaMsg::GetRange") + } + } + } +} + +impl RelayMessage for DaMsg {} + +impl< + B, + DaStorage, + Consensus, + A, + ClPool, + ClPoolAdapter, + DaPool, + DaPoolAdapter, + DaVerificationProvider, + TxS, + BS, + ConsensusStorage, + > ServiceData + for DataIndexerService< + B, + DaStorage, + Consensus, + A, + ClPool, + ClPoolAdapter, + DaPool, + DaPoolAdapter, + DaVerificationProvider, + TxS, + BS, + ConsensusStorage, + > +where + B: 'static, + A: NetworkAdapter, + ClPoolAdapter: MempoolAdapter, + ClPool: MemPool, + DaPool: MemPool, + DaPoolAdapter: MempoolAdapter, + DaPoolAdapter::Payload: Certificate + Into + Debug, + DaVerificationProvider: MempoolVerificationProvider< + Payload = DaPoolAdapter::Payload, + Parameters = ::VerificationParameters, + >, + ClPool::Item: Clone + Eq + Hash + Debug + 'static, + ClPool::Key: Debug + 'static, + DaPool::Item: Metadata + Clone + Eq + Hash + Debug + 'static, + DaPool::Key: Debug + 'static, + A::Backend: 'static, + TxS: TxSelect, + BS: BlobCertificateSelect, + DaStorage: DaStorageAdapter, + ConsensusStorage: StorageBackend + Send + Sync + 'static, +{ + const SERVICE_ID: ServiceId = "DaIndexer"; + type Settings = IndexerSettings; + type State = NoState; + type StateOperator = NoOperator; + type Message = DaMsg; +} + +impl< + B, + DaStorage, + Consensus, + A, + ClPool, + ClPoolAdapter, + DaPool, + DaPoolAdapter, + DaVerificationProvider, + TxS, + BS, + ConsensusStorage, + > + DataIndexerService< + B, + DaStorage, + Consensus, + A, + ClPool, + ClPoolAdapter, + DaPool, + DaPoolAdapter, + DaVerificationProvider, + TxS, + BS, + ConsensusStorage, + > +where + B: Send + Sync + 'static, + A: NetworkAdapter, + ClPoolAdapter: MempoolAdapter, + ClPool: MemPool, + DaPool: MemPool, + DaPoolAdapter: MempoolAdapter, + DaPoolAdapter::Payload: Certificate + Into + Debug, + DaVerificationProvider: MempoolVerificationProvider< + Payload = DaPoolAdapter::Payload, + Parameters = ::VerificationParameters, + >, + ClPool::Item: Clone + Eq + Hash + Debug + 'static, + ClPool::Key: Debug + 'static, + DaPool::Item: Metadata + Clone + Eq + Hash + Debug + 'static, + DaPool::Key: Debug + 'static, + ::Index: Send + Sync, + A::Backend: 'static, + TxS: TxSelect, + BS: BlobCertificateSelect, + DaStorage: DaStorageAdapter, + ConsensusStorage: StorageBackend + Send + Sync + 'static, +{ + async fn handle_new_block( + storage_adapter: &DaStorage, + block: Block, + ) -> Result<(), DynError> { + for vid in block.blobs() { + storage_adapter.add_index(vid).await?; + } + Ok(()) + } + + async fn handle_da_msg( + storage_adapter: &DaStorage, + msg: DaMsg, + ) -> Result<(), DynError> { + match msg { + DaMsg::AddIndex { vid } => storage_adapter.add_index(&vid).await, + DaMsg::GetRange { + app_id, + range, + reply_channel, + } => { + let stream = storage_adapter.get_range_stream(app_id, range).await; + let results = stream.collect::>().await; + + reply_channel + .send(results) + .map_err(|_| "Error sending range response".into()) + } + } + } + + async fn should_stop_service(message: LifecycleMessage) -> bool { + match message { + LifecycleMessage::Shutdown(sender) => { + if sender.send(()).is_err() { + error!( + "Error sending successful shutdown signal from service {}", + Self::SERVICE_ID + ); + } + true + } + LifecycleMessage::Kill => true, + } + } +} + +#[async_trait::async_trait] +impl< + B, + DaStorage, + Consensus, + A, + ClPool, + ClPoolAdapter, + DaPool, + DaPoolAdapter, + DaVerificationProvider, + TxS, + BS, + ConsensusStorage, + > ServiceCore + for DataIndexerService< + B, + DaStorage, + Consensus, + A, + ClPool, + ClPoolAdapter, + DaPool, + DaPoolAdapter, + DaVerificationProvider, + TxS, + BS, + ConsensusStorage, + > +where + B: Debug + Send + Sync, + A: NetworkAdapter, + ClPoolAdapter: MempoolAdapter, + ClPool: MemPool, + DaPool: MemPool, + DaPoolAdapter: MempoolAdapter, + DaPoolAdapter::Payload: Certificate + Into + Debug, + DaVerificationProvider: MempoolVerificationProvider< + Payload = DaPoolAdapter::Payload, + Parameters = ::VerificationParameters, + >, + ClPool::Key: Debug + 'static, + DaPool::Key: Debug + 'static, + ClPool::Item: Transaction + + Debug + + Clone + + Eq + + Hash + + Serialize + + serde::de::DeserializeOwned + + Send + + Sync + + 'static, + DaPool::Item: VidCertificate + + Metadata + + Debug + + Clone + + Eq + + Hash + + Serialize + + DeserializeOwned + + Send + + Sync + + 'static, + ::AppId: Send + Sync, + ::Index: Send + Sync, + + A::Backend: 'static, + TxS: TxSelect, + BS: BlobCertificateSelect, + DaStorage: DaStorageAdapter + Send + Sync + 'static, + DaStorage::Settings: Clone + Send + Sync + 'static, + ConsensusStorage: StorageBackend + Send + Sync + 'static, + Consensus: ConsensusAdapter + Send + Sync, +{ + fn init(service_state: ServiceStateHandle) -> Result { + let consensus_relay = service_state.overwatch_handle.relay(); + let storage_relay = service_state.overwatch_handle.relay(); + + Ok(Self { + service_state, + storage_relay, + consensus_relay, + }) + } + + async fn run(self) -> Result<(), DynError> { + let Self { + mut service_state, + consensus_relay, + storage_relay, + } = self; + let consensus_relay = consensus_relay + .connect() + .await + .expect("Relay connection with ConsensusService should succeed"); + let storage_relay = storage_relay + .connect() + .await + .expect("Relay connection with StorageService should succeed"); + + let consensus_adapter = Consensus::new(consensus_relay).await; + let mut consensus_blocks = consensus_adapter.block_stream().await; + let storage_adapter = DaStorage::new( + service_state.settings_reader.get_updated_settings().storage, + storage_relay, + ) + .await; + + let mut lifecycle_stream = service_state.lifecycle_handle.message_stream(); + loop { + tokio::select! { + Some(block) = consensus_blocks.next() => { + if let Err(e) = Self::handle_new_block(&storage_adapter, block).await { + tracing::debug!("Failed to add a new received block: {e:?}"); + } + } + Some(msg) = service_state.inbound_relay.recv() => { + if let Err(e) = Self::handle_da_msg(&storage_adapter, msg).await { + tracing::debug!("Failed to handle da msg: {e:?}"); + } + } + Some(msg) = lifecycle_stream.next() => { + if Self::should_stop_service(msg).await { + break; + } + } + } + } + Ok(()) + } +} + +#[derive(Debug, Clone)] +pub struct IndexerSettings { + pub storage: S, +} diff --git a/nomos-services/data-availability/indexer/src/storage/adapters/mod.rs b/nomos-services/data-availability/indexer/src/storage/adapters/mod.rs new file mode 100644 index 00000000..663a2ff3 --- /dev/null +++ b/nomos-services/data-availability/indexer/src/storage/adapters/mod.rs @@ -0,0 +1,2 @@ +#[cfg(feature = "rocksdb-backend")] +pub mod rocksdb; diff --git a/nomos-services/data-availability/indexer/src/storage/adapters/rocksdb.rs b/nomos-services/data-availability/indexer/src/storage/adapters/rocksdb.rs new file mode 100644 index 00000000..493e0d51 --- /dev/null +++ b/nomos-services/data-availability/indexer/src/storage/adapters/rocksdb.rs @@ -0,0 +1,148 @@ +use std::path::PathBuf; +use std::{marker::PhantomData, ops::Range}; + +use bytes::Bytes; +use futures::{stream::FuturesUnordered, Stream}; +use nomos_core::da::certificate::{ + metadata::{Metadata, Next}, + vid::VidCertificate, +}; +use nomos_da_storage::fs::load_blob; +use nomos_da_storage::rocksdb::{key_bytes, DA_ATTESTED_KEY_PREFIX, DA_VID_KEY_PREFIX}; +use nomos_storage::{ + backends::{rocksdb::RocksBackend, StorageSerde}, + StorageMsg, StorageService, +}; +use overwatch_rs::{ + services::{relay::OutboundRelay, ServiceData}, + DynError, +}; + +use crate::storage::DaStorageAdapter; + +pub struct RocksAdapter +where + S: StorageSerde + Send + Sync + 'static, + V: VidCertificate + Metadata + Send + Sync, +{ + settings: RocksAdapterSettings, + storage_relay: OutboundRelay>>, + _vid: PhantomData, +} + +#[async_trait::async_trait] +impl DaStorageAdapter for RocksAdapter +where + S: StorageSerde + Send + Sync + 'static, + V: VidCertificate + Metadata + Send + Sync, + V::Index: AsRef<[u8]> + Next + Clone + PartialOrd + Send + Sync + 'static, + V::AppId: AsRef<[u8]> + Clone + Send + Sync + 'static, +{ + type Backend = RocksBackend; + type Blob = Bytes; + type VID = V; + type Settings = RocksAdapterSettings; + + async fn new( + settings: Self::Settings, + storage_relay: OutboundRelay< as ServiceData>::Message>, + ) -> Self { + Self { + settings, + storage_relay, + _vid: PhantomData, + } + } + + async fn add_index(&self, vid: &Self::VID) -> Result<(), DynError> { + let (app_id, idx) = vid.metadata(); + + // Check if VID in a block is something that the node've seen before. + let attested_key = key_bytes(DA_ATTESTED_KEY_PREFIX, vid.certificate_id()); + let (reply_tx, reply_rx) = tokio::sync::oneshot::channel(); + + self.storage_relay + .send(StorageMsg::Load { + key: attested_key, + reply_channel: reply_tx, + }) + .await + .expect("Failed to send load request to storage relay"); + + // If node haven't attested this vid, return early. + if reply_rx.await?.is_none() { + return Ok(()); + } + + let vid_key = key_bytes( + DA_VID_KEY_PREFIX, + [app_id.clone().as_ref(), idx.as_ref()].concat(), + ); + + // We are only persisting the id part of VID, the metadata can be derived from the key. + let value = Bytes::from(vid.certificate_id().to_vec()); + + self.storage_relay + .send(StorageMsg::Store { + key: vid_key, + value, + }) + .await + .map_err(|(e, _)| e.into()) + } + + async fn get_range_stream( + &self, + app_id: ::AppId, + index_range: Range<::Index>, + ) -> Box::Index, Option)> + Unpin + Send> + { + let futures = FuturesUnordered::new(); + + // TODO: Using while loop here until `Step` trait is stable. + // + // For index_range to be used as Range with the stepping capabilities (eg. `for idx in + // item_range`), Metadata::Index needs to implement `Step` trait, which is unstable. + // See issue #42168 for more information. + let mut current_index = index_range.start.clone(); + while current_index <= index_range.end { + let idx = current_index.clone(); + let app_id = app_id.clone(); + let settings = self.settings.clone(); + + let (reply_tx, reply_rx) = tokio::sync::oneshot::channel(); + let key = key_bytes( + DA_VID_KEY_PREFIX, + [app_id.as_ref(), current_index.as_ref()].concat(), + ); + + self.storage_relay + .send(StorageMsg::Load { + key, + reply_channel: reply_tx, + }) + .await + .expect("Failed to send load request to storage relay"); + + futures.push(async move { + match reply_rx.await { + Ok(Some(id)) => (idx, load_blob(settings.blob_storage_directory, &id).await), + Ok(None) => (idx, None), + Err(_) => { + tracing::error!("Failed to receive storage response"); + (idx, None) + } + } + }); + + current_index = current_index.next(); + } + + Box::new(futures) + } +} + +#[derive(Debug, Clone)] +pub struct RocksAdapterSettings { + pub blob_storage_directory: PathBuf, +} diff --git a/nomos-services/data-availability/indexer/src/storage/mod.rs b/nomos-services/data-availability/indexer/src/storage/mod.rs new file mode 100644 index 00000000..183639f0 --- /dev/null +++ b/nomos-services/data-availability/indexer/src/storage/mod.rs @@ -0,0 +1,32 @@ +pub mod adapters; + +use std::ops::Range; + +use futures::Stream; +use nomos_core::da::certificate::{metadata::Metadata, vid::VidCertificate}; +use nomos_storage::{backends::StorageBackend, StorageService}; +use overwatch_rs::{ + services::{relay::OutboundRelay, ServiceData}, + DynError, +}; + +#[async_trait::async_trait] +pub trait DaStorageAdapter { + type Backend: StorageBackend + Send + Sync + 'static; + type Settings: Clone; + + type Blob; + type VID: VidCertificate; + + async fn new( + config: Self::Settings, + storage_relay: OutboundRelay< as ServiceData>::Message>, + ) -> Self; + + async fn add_index(&self, vid: &Self::VID) -> Result<(), DynError>; + async fn get_range_stream( + &self, + app_id: ::AppId, + range: Range<::Index>, + ) -> Box::Index, Option)> + Unpin + Send>; +} diff --git a/nomos-services/data-availability/src/backend/memory_cache.rs b/nomos-services/data-availability/src/backend/memory_cache.rs deleted file mode 100644 index 8b64e37c..00000000 --- a/nomos-services/data-availability/src/backend/memory_cache.rs +++ /dev/null @@ -1,68 +0,0 @@ -use crate::backend::{DaBackend, DaError}; -use moka::future::{Cache, CacheBuilder}; -use nomos_core::da::blob::Blob; -use serde::{Deserialize, Serialize}; -use std::time::Duration; - -#[derive(Clone, Copy, Debug, Serialize, Deserialize)] -pub struct BlobCacheSettings { - pub max_capacity: usize, - pub evicting_period: Duration, -} - -pub struct BlobCache(Cache); - -impl BlobCache -where - B: Clone + Blob + Send + Sync + 'static, - B::Hash: Send + Sync + 'static, -{ - pub fn new(settings: BlobCacheSettings) -> Self { - let BlobCacheSettings { - max_capacity, - evicting_period, - } = settings; - let cache = CacheBuilder::new(max_capacity as u64) - .time_to_live(evicting_period) - // can we leverage this to evict really old blobs? - .time_to_idle(evicting_period) - .build(); - Self(cache) - } - - pub async fn add(&self, blob: B) { - self.0.insert(blob.hash(), blob).await - } - - pub async fn remove(&self, hash: &B::Hash) { - self.0.remove(hash).await; - } -} - -#[async_trait::async_trait] -impl DaBackend for BlobCache -where - B: Clone + Blob + Send + Sync + 'static, - B::Hash: Send + Sync + 'static, -{ - type Settings = BlobCacheSettings; - type Blob = B; - - fn new(settings: Self::Settings) -> Self { - BlobCache::new(settings) - } - - async fn add_blob(&self, blob: Self::Blob) -> Result<(), DaError> { - self.add(blob).await; - Ok(()) - } - - async fn remove_blob(&self, blob: &::Hash) -> Result<(), DaError> { - self.remove(blob).await; - Ok(()) - } - - fn get_blob(&self, id: &::Hash) -> Option { - self.0.get(id) - } -} diff --git a/nomos-services/data-availability/src/backend/mod.rs b/nomos-services/data-availability/src/backend/mod.rs deleted file mode 100644 index 6ed72c33..00000000 --- a/nomos-services/data-availability/src/backend/mod.rs +++ /dev/null @@ -1,24 +0,0 @@ -pub mod memory_cache; - -use nomos_core::da::blob::Blob; -use overwatch_rs::DynError; - -#[derive(Debug)] -pub enum DaError { - Dyn(DynError), -} - -#[async_trait::async_trait] -pub trait DaBackend { - type Settings: Clone; - - type Blob: Blob; - - fn new(settings: Self::Settings) -> Self; - - async fn add_blob(&self, blob: Self::Blob) -> Result<(), DaError>; - - async fn remove_blob(&self, blob: &::Hash) -> Result<(), DaError>; - - fn get_blob(&self, id: &::Hash) -> Option; -} diff --git a/nomos-services/data-availability/src/lib.rs b/nomos-services/data-availability/src/lib.rs deleted file mode 100644 index edffb8fb..00000000 --- a/nomos-services/data-availability/src/lib.rs +++ /dev/null @@ -1,214 +0,0 @@ -pub mod backend; -pub mod network; - -// std -use overwatch_rs::DynError; -use std::fmt::{Debug, Formatter}; -// crates -use futures::StreamExt; -use serde::{Deserialize, Serialize}; -use tokio::sync::oneshot::Sender; -// internal -use crate::backend::{DaBackend, DaError}; -use crate::network::NetworkAdapter; -use nomos_core::da::{blob::Blob, DaProtocol}; -use nomos_network::NetworkService; -use overwatch_rs::services::handle::ServiceStateHandle; -use overwatch_rs::services::life_cycle::LifecycleMessage; -use overwatch_rs::services::relay::{Relay, RelayMessage}; -use overwatch_rs::services::state::{NoOperator, NoState}; -use overwatch_rs::services::{ServiceCore, ServiceData, ServiceId}; -use tracing::error; - -pub struct DataAvailabilityService -where - Protocol: DaProtocol, - Backend: DaBackend, - Backend::Blob: 'static, - Network: NetworkAdapter, -{ - service_state: ServiceStateHandle, - backend: Backend, - da: Protocol, - network_relay: Relay>, -} - -pub enum DaMsg { - RemoveBlobs { - blobs: Box::Hash> + Send>, - }, - Get { - ids: Box::Hash> + Send>, - reply_channel: Sender>, - }, -} - -impl Debug for DaMsg { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - match self { - DaMsg::RemoveBlobs { .. } => { - write!(f, "DaMsg::RemoveBlobs") - } - DaMsg::Get { .. } => { - write!(f, "DaMsg::Get") - } - } - } -} - -impl RelayMessage for DaMsg {} - -impl ServiceData for DataAvailabilityService -where - Protocol: DaProtocol, - Backend: DaBackend, - Backend::Blob: 'static, - Network: NetworkAdapter, -{ - const SERVICE_ID: ServiceId = "DA"; - type Settings = Settings; - type State = NoState; - type StateOperator = NoOperator; - type Message = DaMsg; -} - -impl DataAvailabilityService -where - Protocol: DaProtocol + Send + Sync, - Backend: DaBackend + Send + Sync, - Protocol::Settings: Clone + Send + Sync + 'static, - Protocol::Blob: 'static, - Backend::Settings: Clone + Send + Sync + 'static, - Protocol::Blob: Send, - Protocol::Attestation: Send, - ::Hash: Debug + Send + Sync, - Network: - NetworkAdapter + Send + Sync, -{ - async fn handle_new_blob( - da: &Protocol, - backend: &Backend, - adapter: &Network, - blob: Protocol::Blob, - ) -> Result<(), DaError> { - // we need to handle the reply (verification + signature) - let attestation = da.attest(&blob); - backend.add_blob(blob).await?; - // we do not call `da.recv_blob` here because that is meant to - // be called to retrieve the original data, while here we're only interested - // in storing the blob. - // We might want to refactor the backend to be part of implementations of the - // Da protocol instead of this service and clear this confusion. - adapter - .send_attestation(attestation) - .await - .map_err(DaError::Dyn) - } - - async fn handle_da_msg(backend: &Backend, msg: DaMsg) -> Result<(), DaError> { - match msg { - DaMsg::RemoveBlobs { blobs } => { - futures::stream::iter(blobs) - .for_each_concurrent(None, |blob| async move { - if let Err(e) = backend.remove_blob(&blob).await { - tracing::debug!("Could not remove blob {blob:?} due to: {e:?}"); - } - }) - .await; - } - DaMsg::Get { ids, reply_channel } => { - let res = ids.filter_map(|id| backend.get_blob(&id)).collect(); - if reply_channel.send(res).is_err() { - tracing::error!("Could not returns blobs"); - } - } - } - Ok(()) - } - - async fn should_stop_service(message: LifecycleMessage) -> bool { - match message { - LifecycleMessage::Shutdown(sender) => { - if sender.send(()).is_err() { - error!( - "Error sending successful shutdown signal from service {}", - Self::SERVICE_ID - ); - } - true - } - LifecycleMessage::Kill => true, - } - } -} - -#[async_trait::async_trait] -impl ServiceCore for DataAvailabilityService -where - Protocol: DaProtocol + Send + Sync, - Backend: DaBackend + Send + Sync, - Protocol::Settings: Clone + Send + Sync + 'static, - Backend::Settings: Clone + Send + Sync + 'static, - Protocol::Blob: Send, - Protocol::Attestation: Send, - ::Hash: Debug + Send + Sync, - Network: - NetworkAdapter + Send + Sync, -{ - fn init(service_state: ServiceStateHandle) -> Result { - let network_relay = service_state.overwatch_handle.relay(); - let settings = service_state.settings_reader.get_updated_settings(); - let backend = Backend::new(settings.backend); - let da = Protocol::new(settings.da_protocol); - Ok(Self { - service_state, - backend, - da, - network_relay, - }) - } - - async fn run(self) -> Result<(), DynError> { - let Self { - mut service_state, - backend, - da, - network_relay, - } = self; - - let network_relay = network_relay - .connect() - .await - .expect("Relay connection with NetworkService should succeed"); - - let adapter = Network::new(network_relay).await; - let mut network_blobs = adapter.blob_stream().await; - let mut lifecycle_stream = service_state.lifecycle_handle.message_stream(); - loop { - tokio::select! { - Some(blob) = network_blobs.next() => { - if let Err(e) = Self::handle_new_blob(&da, &backend, &adapter, blob).await { - tracing::debug!("Failed to add a new received blob: {e:?}"); - } - } - Some(msg) = service_state.inbound_relay.recv() => { - if let Err(e) = Self::handle_da_msg(&backend, msg).await { - tracing::debug!("Failed to handle da msg: {e:?}"); - } - } - Some(msg) = lifecycle_stream.next() => { - if Self::should_stop_service(msg).await { - break; - } - } - } - } - Ok(()) - } -} - -#[derive(Debug, Clone, Serialize, Deserialize, Default)] -pub struct Settings { - pub da_protocol: P, - pub backend: B, -} diff --git a/nomos-services/data-availability/tests/Cargo.toml b/nomos-services/data-availability/tests/Cargo.toml new file mode 100644 index 00000000..58bc1014 --- /dev/null +++ b/nomos-services/data-availability/tests/Cargo.toml @@ -0,0 +1,35 @@ +[package] +name = "nomos-da-tests" +version = "0.1.0" +edition = "2021" + +[dependencies] +blst = "0.3.11" +bytes = "1.2" +cryptarchia-consensus = { path = "../../../nomos-services/cryptarchia-consensus" } +cryptarchia-engine = { path = "../../../consensus/cryptarchia-engine", features = ["serde"] } +cryptarchia-ledger = { path = "../../../ledger/cryptarchia-ledger", features = ["serde"] } +full-replication = { path = "../../../nomos-da/full-replication" } +kzgrs-backend = { path = "../../../nomos-da/kzgrs-backend" } +nomos-core = { path = "../../../nomos-core" } +nomos-da-indexer = { path = "../indexer", features = ["rocksdb-backend"] } +nomos-da-verifier = { path = "../verifier", features = ["rocksdb-backend", "libp2p"] } +nomos-da-storage = { path = "../../../nomos-da/storage" } +nomos-node = { path = "../../../nodes/nomos-node" } +nomos-mempool = { path = "../../../nomos-services/mempool" } +nomos-storage = { path = "../../../nomos-services/storage", features = ["rocksdb-backend"] } +nomos-log = { path = "../../log" } +nomos-network = { path = "../../network", features = ["mock"] } +nomos-libp2p = { path = "../../../nomos-libp2p" } +overwatch-rs = { git = "https://github.com/logos-co/Overwatch", rev = "2f70806" } +overwatch-derive = { git = "https://github.com/logos-co/Overwatch", rev = "ac28d01" } +tokio = { version = "1", features = ["sync"] } +tempfile = "3.6" +tracing = "0.1" +time = "0.3" +rand = "0.8" + +[features] +default = [] +libp2p = [] +mixnet = [] diff --git a/nomos-services/data-availability/tests/src/common.rs b/nomos-services/data-availability/tests/src/common.rs new file mode 100644 index 00000000..0a0530e4 --- /dev/null +++ b/nomos-services/data-availability/tests/src/common.rs @@ -0,0 +1,88 @@ +use bytes::Bytes; +use full_replication::{Certificate, VidCertificate}; +use kzgrs_backend::common::attestation::Attestation; +use kzgrs_backend::common::blob::DaBlob; +use nomos_core::{da::certificate, header::HeaderId, tx::Transaction}; +use nomos_da_indexer::consensus::adapters::cryptarchia::CryptarchiaConsensusAdapter; +use nomos_da_indexer::storage::adapters::rocksdb::RocksAdapter as IndexerStorageAdapter; +use nomos_da_indexer::DataIndexerService; +use nomos_da_verifier::backend::kzgrs::KzgrsDaVerifier; +use nomos_da_verifier::network::adapters::libp2p::Libp2pAdapter; +use nomos_da_verifier::storage::adapters::rocksdb::RocksAdapter as VerifierStorageAdapter; +use nomos_da_verifier::DaVerifierService; +use nomos_libp2p::{Multiaddr, Swarm, SwarmConfig}; +use nomos_mempool::da::verify::fullreplication::DaVerificationProvider as MempoolVerificationProvider; +use nomos_mempool::network::adapters::libp2p::Libp2pAdapter as MempoolNetworkAdapter; +use nomos_mempool::{backend::mockpool::MockPool, TxMempoolService}; +use nomos_storage::backends::rocksdb::RocksBackend; + +pub use nomos_core::{ + da::certificate::select::FillSize as FillSizeWithBlobsCertificate, + tx::select::FillSize as FillSizeWithTx, +}; +use nomos_mempool::da::service::DaMempoolService; +use nomos_node::{Tx, Wire}; + +pub(crate) type Cryptarchia = cryptarchia_consensus::CryptarchiaConsensus< + cryptarchia_consensus::network::adapters::libp2p::LibP2pAdapter, + MockPool::Hash>, + MempoolNetworkAdapter::Hash>, + MockPool< + HeaderId, + VidCertificate, + ::CertificateId, + >, + MempoolNetworkAdapter::Id>, + MempoolVerificationProvider, + FillSizeWithTx, + FillSizeWithBlobsCertificate, + RocksBackend, +>; + +pub(crate) type DaIndexer = DataIndexerService< + // Indexer specific. + Bytes, + IndexerStorageAdapter, + CryptarchiaConsensusAdapter, + // Cryptarchia specific, should be the same as in `Cryptarchia` type above. + cryptarchia_consensus::network::adapters::libp2p::LibP2pAdapter, + MockPool::Hash>, + MempoolNetworkAdapter::Hash>, + MockPool< + HeaderId, + VidCertificate, + ::CertificateId, + >, + MempoolNetworkAdapter::Id>, + MempoolVerificationProvider, + FillSizeWithTx, + FillSizeWithBlobsCertificate, + RocksBackend, +>; + +pub(crate) type TxMempool = TxMempoolService< + MempoolNetworkAdapter::Hash>, + MockPool::Hash>, +>; + +pub(crate) type DaMempool = DaMempoolService< + MempoolNetworkAdapter::Id>, + MockPool< + HeaderId, + VidCertificate, + ::CertificateId, + >, + MempoolVerificationProvider, +>; + +pub(crate) type DaVerifier = DaVerifierService< + KzgrsDaVerifier, + Libp2pAdapter, + VerifierStorageAdapter, +>; + +pub(crate) const MB16: usize = 1024 * 1024 * 16; + +pub fn node_address(config: &SwarmConfig) -> Multiaddr { + Swarm::multiaddr(std::net::Ipv4Addr::new(127, 0, 0, 1), config.port) +} diff --git a/nomos-services/data-availability/tests/src/indexer_integration.rs b/nomos-services/data-availability/tests/src/indexer_integration.rs new file mode 100644 index 00000000..64b5170d --- /dev/null +++ b/nomos-services/data-availability/tests/src/indexer_integration.rs @@ -0,0 +1,270 @@ +use std::path::PathBuf; +use std::sync::atomic::AtomicBool; +use std::sync::atomic::Ordering::SeqCst; +use std::sync::Arc; +use std::time::Duration; + +use bytes::Bytes; +use cryptarchia_consensus::TimeConfig; +use cryptarchia_ledger::{Coin, LedgerState}; +use full_replication::attestation::Attestation; +use full_replication::{Certificate, VidCertificate}; +use nomos_core::da::certificate::vid::VidCertificate as _; +use nomos_core::da::certificate::Certificate as _; +use nomos_core::da::certificate::CertificateStrategy; +use nomos_core::da::Signer; +use nomos_core::{da::certificate, tx::Transaction}; +use nomos_da_indexer::storage::adapters::rocksdb::RocksAdapterSettings; +use nomos_da_indexer::IndexerSettings; +use nomos_da_storage::fs::write_blob; +use nomos_libp2p::{Multiaddr, SwarmConfig}; +use nomos_mempool::network::adapters::libp2p::Settings as AdapterSettings; +use nomos_mempool::{DaMempoolSettings, TxMempoolSettings}; +use nomos_network::backends::libp2p::{Libp2p as NetworkBackend, Libp2pConfig}; +use nomos_network::{NetworkConfig, NetworkService}; +use nomos_node::{Tx, Wire}; +use nomos_storage::{backends::rocksdb::RocksBackend, StorageService}; +use overwatch_derive::*; +use overwatch_rs::overwatch::{Overwatch, OverwatchRunner}; +use overwatch_rs::services::handle::ServiceHandle; +use rand::{thread_rng, Rng}; +use tempfile::{NamedTempFile, TempDir}; +use time::OffsetDateTime; + +use crate::common::*; + +#[derive(Services)] +struct IndexerNode { + network: ServiceHandle>, + cl_mempool: ServiceHandle, + da_mempool: ServiceHandle, + storage: ServiceHandle>>, + cryptarchia: ServiceHandle, + indexer: ServiceHandle, +} + +fn new_node( + coin: &Coin, + ledger_config: &cryptarchia_ledger::Config, + genesis_state: &LedgerState, + time_config: &TimeConfig, + swarm_config: &SwarmConfig, + db_path: PathBuf, + blobs_dir: &PathBuf, + initial_peers: Vec, +) -> Overwatch { + OverwatchRunner::::run( + IndexerNodeServiceSettings { + network: NetworkConfig { + backend: Libp2pConfig { + inner: swarm_config.clone(), + initial_peers, + }, + }, + cl_mempool: TxMempoolSettings { + backend: (), + network: AdapterSettings { + topic: String::from(nomos_node::CL_TOPIC), + id: ::hash, + }, + registry: None, + }, + da_mempool: DaMempoolSettings { + backend: (), + network: AdapterSettings { + topic: String::from(nomos_node::DA_TOPIC), + id: ::id, + }, + verification_provider: full_replication::CertificateVerificationParameters { + threshold: 0, + }, + registry: None, + }, + storage: nomos_storage::backends::rocksdb::RocksBackendSettings { + db_path, + read_only: false, + column_family: Some("blocks".into()), + }, + indexer: IndexerSettings { + storage: RocksAdapterSettings { + blob_storage_directory: blobs_dir.clone(), + }, + }, + cryptarchia: cryptarchia_consensus::CryptarchiaSettings { + transaction_selector_settings: (), + blob_selector_settings: (), + config: ledger_config.clone(), + genesis_state: genesis_state.clone(), + time: time_config.clone(), + coins: vec![coin.clone()], + }, + }, + None, + ) + .map_err(|e| eprintln!("Error encountered: {}", e)) + .unwrap() +} + +// TODO: When verifier is implemented this test should be removed and a new one +// performed in integration tests crate using the real node. +#[test] +fn test_indexer() { + let performed_tx = Arc::new(AtomicBool::new(false)); + let performed_rx = performed_tx.clone(); + let is_success_tx = Arc::new(AtomicBool::new(false)); + let is_success_rx = is_success_tx.clone(); + + let mut ids = vec![[0; 32]; 2]; + for id in &mut ids { + thread_rng().fill(id); + } + + let coins = ids + .iter() + .map(|&id| Coin::new(id, id.into(), 1.into())) + .collect::>(); + let genesis_state = LedgerState::from_commitments( + coins.iter().map(|c| c.commitment()), + (ids.len() as u32).into(), + ); + let ledger_config = cryptarchia_ledger::Config { + epoch_stake_distribution_stabilization: 3, + epoch_period_nonce_buffer: 3, + epoch_period_nonce_stabilization: 4, + consensus_config: cryptarchia_engine::Config { + security_param: 10, + active_slot_coeff: 0.9, + }, + }; + let time_config = TimeConfig { + slot_duration: Duration::from_secs(1), + chain_start_time: OffsetDateTime::now_utc(), + }; + + let swarm_config1 = SwarmConfig { + port: 7771, + ..Default::default() + }; + let swarm_config2 = SwarmConfig { + port: 7772, + ..Default::default() + }; + + let blobs_dir = TempDir::new().unwrap().path().to_path_buf(); + + let node1 = new_node( + &coins[0], + &ledger_config, + &genesis_state, + &time_config, + &swarm_config1, + NamedTempFile::new().unwrap().path().to_path_buf(), + &blobs_dir, + vec![node_address(&swarm_config2)], + ); + + let _node2 = new_node( + &coins[1], + &ledger_config, + &genesis_state, + &time_config, + &swarm_config2, + NamedTempFile::new().unwrap().path().to_path_buf(), + &blobs_dir, + vec![node_address(&swarm_config1)], + ); + + let mempool = node1.handle().relay::(); + let storage = node1.handle().relay::>>(); + let indexer = node1.handle().relay::(); + + let blob_hash = [9u8; 32]; + let app_id = [7u8; 32]; + let index = 0.into(); + + let attestation = Attestation::new_signed(blob_hash, ids[0], &MockKeyPair); + let certificate_strategy = full_replication::AbsoluteNumber::new(1); + let cert = certificate_strategy.build(vec![attestation], app_id, index); + let cert_id = cert.id(); + let vid: VidCertificate = cert.clone().into(); + let range = 0.into()..1.into(); // get idx 0 and 1. + + // Mock attestation step where blob is persisted in nodes blob storage. + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(write_blob( + blobs_dir, + vid.certificate_id().as_ref(), + b"blob", + )) + .unwrap(); + + node1.spawn(async move { + let mempool_outbound = mempool.connect().await.unwrap(); + let storage_outbound = storage.connect().await.unwrap(); + let indexer_outbound = indexer.connect().await.unwrap(); + + // Mock attested blob by writting directly into the da storage. + let mut attested_key = Vec::from(b"da/attested/" as &[u8]); + attested_key.extend_from_slice(&blob_hash); + + storage_outbound + .send(nomos_storage::StorageMsg::Store { + key: attested_key.into(), + value: Bytes::new(), + }) + .await + .unwrap(); + + // Put cert into the mempool. + let (mempool_tx, mempool_rx) = tokio::sync::oneshot::channel(); + mempool_outbound + .send(nomos_mempool::MempoolMsg::Add { + payload: cert, + key: cert_id, + reply_channel: mempool_tx, + }) + .await + .unwrap(); + let _ = mempool_rx.await.unwrap(); + + // Wait for block in the network. + tokio::time::sleep(Duration::from_secs(2)).await; + + // Request range of vids from indexer. + let (indexer_tx, indexer_rx) = tokio::sync::oneshot::channel(); + indexer_outbound + .send(nomos_da_indexer::DaMsg::GetRange { + app_id, + range, + reply_channel: indexer_tx, + }) + .await + .unwrap(); + let mut app_id_blobs = indexer_rx.await.unwrap(); + + // Since we've only attested to certificate at idx 0, the first + // item should have "some" data, other indexes should be None. + app_id_blobs.sort_by(|(a, _), (b, _)| a.partial_cmp(b).unwrap()); + let app_id_blobs = app_id_blobs.iter().map(|(_, b)| b).collect::>(); + if let Some(blob) = app_id_blobs[0] { + if **blob == *b"blob" && app_id_blobs[1].is_none() { + is_success_tx.store(true, SeqCst); + } + } + + performed_tx.store(true, SeqCst); + }); + + while !performed_rx.load(SeqCst) { + std::thread::sleep(std::time::Duration::from_millis(200)); + } + assert!(is_success_rx.load(SeqCst)); +} + +struct MockKeyPair; + +impl Signer for MockKeyPair { + fn sign(&self, _message: &[u8]) -> Vec { + vec![] + } +} diff --git a/nomos-services/data-availability/tests/src/lib.rs b/nomos-services/data-availability/tests/src/lib.rs new file mode 100644 index 00000000..03302be9 --- /dev/null +++ b/nomos-services/data-availability/tests/src/lib.rs @@ -0,0 +1,9 @@ +// Networking is not essential for verifier and indexer tests. +// Libp2p network is chosen for consensus requirement, mixnet is ignored. + +#[cfg(all(test, feature = "libp2p", not(feature = "mixnet")))] +mod common; +#[cfg(all(test, feature = "libp2p", not(feature = "mixnet")))] +mod indexer_integration; +#[cfg(all(test, feature = "libp2p", not(feature = "mixnet")))] +mod verifier_integration; diff --git a/nomos-services/data-availability/tests/src/verifier_integration.rs b/nomos-services/data-availability/tests/src/verifier_integration.rs new file mode 100644 index 00000000..3f4f16b5 --- /dev/null +++ b/nomos-services/data-availability/tests/src/verifier_integration.rs @@ -0,0 +1,301 @@ +use std::path::PathBuf; +use std::sync::atomic::AtomicBool; +use std::sync::atomic::Ordering::SeqCst; +use std::sync::Arc; +use std::time::Duration; + +use cryptarchia_consensus::TimeConfig; +use cryptarchia_ledger::{Coin, LedgerState}; +use full_replication::Certificate; +use kzgrs_backend::common::blob::DaBlob; +use kzgrs_backend::encoder::{DaEncoder, DaEncoderParams}; +use nomos_core::{da::certificate, tx::Transaction}; +use nomos_da_indexer::storage::adapters::rocksdb::RocksAdapterSettings as IndexerStorageSettings; +use nomos_da_indexer::IndexerSettings; +use nomos_da_verifier::backend::kzgrs::KzgrsDaVerifierSettings; +use nomos_da_verifier::storage::adapters::rocksdb::RocksAdapterSettings as VerifierStorageSettings; +use nomos_da_verifier::DaVerifierServiceSettings; +use nomos_libp2p::{Multiaddr, SwarmConfig}; +use nomos_mempool::network::adapters::libp2p::Settings as AdapterSettings; +use nomos_mempool::{DaMempoolSettings, TxMempoolSettings}; +use nomos_network::backends::libp2p::{Libp2p as NetworkBackend, Libp2pConfig}; +use nomos_network::{NetworkConfig, NetworkService}; +use nomos_node::{Tx, Wire}; +use nomos_storage::{backends::rocksdb::RocksBackend, StorageService}; +use overwatch_derive::*; +use overwatch_rs::overwatch::{Overwatch, OverwatchRunner}; +use overwatch_rs::services::handle::ServiceHandle; +use rand::{thread_rng, Rng, RngCore}; +use tempfile::{NamedTempFile, TempDir}; +use time::OffsetDateTime; + +use crate::common::*; + +// Client node is only created for asyncroniously interact with nodes in the test. +// The services defined in it are not used. +#[derive(Services)] +struct ClientNode { + storage: ServiceHandle>>, +} + +#[derive(Services)] +struct VerifierNode { + network: ServiceHandle>, + cl_mempool: ServiceHandle, + da_mempool: ServiceHandle, + storage: ServiceHandle>>, + cryptarchia: ServiceHandle, + indexer: ServiceHandle, + verifier: ServiceHandle, +} + +// Client node is just an empty overwatch service to spawn a task that could communicate with other +// nodes and manage the data availability cycle during tests. +fn new_client(db_path: PathBuf) -> Overwatch { + OverwatchRunner::::run( + ClientNodeServiceSettings { + storage: nomos_storage::backends::rocksdb::RocksBackendSettings { + db_path, + read_only: false, + column_family: None, + }, + }, + None, + ) + .map_err(|e| eprintln!("Error encountered: {}", e)) + .unwrap() +} + +fn new_node( + coin: &Coin, + ledger_config: &cryptarchia_ledger::Config, + genesis_state: &LedgerState, + time_config: &TimeConfig, + swarm_config: &SwarmConfig, + db_path: PathBuf, + blobs_dir: &PathBuf, + initial_peers: Vec, + verifier_settings: KzgrsDaVerifierSettings, +) -> Overwatch { + OverwatchRunner::::run( + VerifierNodeServiceSettings { + network: NetworkConfig { + backend: Libp2pConfig { + inner: swarm_config.clone(), + initial_peers, + }, + }, + cl_mempool: TxMempoolSettings { + backend: (), + network: AdapterSettings { + topic: String::from(nomos_node::CL_TOPIC), + id: ::hash, + }, + registry: None, + }, + da_mempool: DaMempoolSettings { + backend: (), + network: AdapterSettings { + topic: String::from(nomos_node::DA_TOPIC), + id: ::id, + }, + verification_provider: full_replication::CertificateVerificationParameters { + threshold: 0, + }, + registry: None, + }, + storage: nomos_storage::backends::rocksdb::RocksBackendSettings { + db_path, + read_only: false, + column_family: Some("blocks".into()), + }, + indexer: IndexerSettings { + storage: IndexerStorageSettings { + blob_storage_directory: blobs_dir.clone(), + }, + }, + cryptarchia: cryptarchia_consensus::CryptarchiaSettings { + transaction_selector_settings: (), + blob_selector_settings: (), + config: ledger_config.clone(), + genesis_state: genesis_state.clone(), + time: time_config.clone(), + coins: vec![coin.clone()], + }, + verifier: DaVerifierServiceSettings { + verifier_settings, + network_adapter_settings: (), + storage_adapter_settings: VerifierStorageSettings { + blob_storage_directory: blobs_dir.clone(), + }, + }, + }, + None, + ) + .map_err(|e| eprintln!("Error encountered: {}", e)) + .unwrap() +} + +fn generate_keys() -> (blst::min_sig::SecretKey, blst::min_sig::PublicKey) { + let mut rng = rand::thread_rng(); + let sk_bytes: [u8; 32] = rng.gen(); + let sk = blst::min_sig::SecretKey::key_gen(&sk_bytes, &[]).unwrap(); + + let pk = sk.sk_to_pk(); + (sk, pk) +} + +pub fn rand_data(elements_count: usize) -> Vec { + let mut buff = vec![0; elements_count * DaEncoderParams::MAX_BLS12_381_ENCODING_CHUNK_SIZE]; + rand::thread_rng().fill_bytes(&mut buff); + buff +} + +pub const PARAMS: DaEncoderParams = DaEncoderParams::default_with(2); +pub const ENCODER: DaEncoder = DaEncoder::new(PARAMS); + +#[test] +fn test_verifier() { + let performed_tx = Arc::new(AtomicBool::new(false)); + let performed_rx = performed_tx.clone(); + let is_success_tx = Arc::new(AtomicBool::new(false)); + let is_success_rx = is_success_tx.clone(); + + let mut ids = vec![[0; 32]; 2]; + for id in &mut ids { + thread_rng().fill(id); + } + + let coins = ids + .iter() + .map(|&id| Coin::new(id, id.into(), 1.into())) + .collect::>(); + let genesis_state = LedgerState::from_commitments( + coins.iter().map(|c| c.commitment()), + (ids.len() as u32).into(), + ); + let ledger_config = cryptarchia_ledger::Config { + epoch_stake_distribution_stabilization: 3, + epoch_period_nonce_buffer: 3, + epoch_period_nonce_stabilization: 4, + consensus_config: cryptarchia_engine::Config { + security_param: 10, + active_slot_coeff: 0.9, + }, + }; + let time_config = TimeConfig { + slot_duration: Duration::from_secs(1), + chain_start_time: OffsetDateTime::now_utc(), + }; + + let swarm_config1 = SwarmConfig { + port: 7773, + ..Default::default() + }; + let swarm_config2 = SwarmConfig { + port: 7774, + ..Default::default() + }; + + let blobs_dir = TempDir::new().unwrap().path().to_path_buf(); + + let (node1_sk, node1_pk) = generate_keys(); + let (node2_sk, node2_pk) = generate_keys(); + + let client_zone = new_client(NamedTempFile::new().unwrap().path().to_path_buf()); + + let node1 = new_node( + &coins[0], + &ledger_config, + &genesis_state, + &time_config, + &swarm_config1, + NamedTempFile::new().unwrap().path().to_path_buf(), + &blobs_dir, + vec![node_address(&swarm_config2)], + KzgrsDaVerifierSettings { + sk: node1_sk, + nodes_public_keys: vec![node1_pk, node2_pk], + }, + ); + + let node2 = new_node( + &coins[1], + &ledger_config, + &genesis_state, + &time_config, + &swarm_config2, + NamedTempFile::new().unwrap().path().to_path_buf(), + &blobs_dir, + vec![node_address(&swarm_config1)], + KzgrsDaVerifierSettings { + sk: node2_sk, + nodes_public_keys: vec![node1_pk, node2_pk], + }, + ); + + let node1_verifier = node1.handle().relay::(); + + let node2_verifier = node2.handle().relay::(); + + client_zone.spawn(async move { + let node1_verifier = node1_verifier.connect().await.unwrap(); + let (node1_reply_tx, node1_reply_rx) = tokio::sync::oneshot::channel(); + + let node2_verifier = node2_verifier.connect().await.unwrap(); + let (node2_reply_tx, node2_reply_rx) = tokio::sync::oneshot::channel(); + + let verifiers = vec![ + (node1_verifier, node1_reply_tx), + (node2_verifier, node2_reply_tx), + ]; + + // Encode data + let encoder = &ENCODER; + let data = rand_data(10); + + let encoded_data = encoder.encode(&data).unwrap(); + let columns: Vec<_> = encoded_data.extended_data.columns().collect(); + + for (i, (verifier, reply_tx)) in verifiers.into_iter().enumerate() { + let column = &columns[i]; + + let da_blob = DaBlob { + column: column.clone(), + column_commitment: encoded_data.column_commitments[i], + aggregated_column_commitment: encoded_data.aggregated_column_commitment, + aggregated_column_proof: encoded_data.aggregated_column_proofs[i], + rows_commitments: encoded_data.row_commitments.clone(), + rows_proofs: encoded_data + .rows_proofs + .iter() + .map(|proofs| proofs.get(i).cloned().unwrap()) + .collect(), + }; + + verifier + .send(nomos_da_verifier::DaVerifierMsg::AddBlob { + blob: da_blob, + reply_channel: reply_tx, + }) + .await + .unwrap(); + } + + // Create cert + let a1 = node1_reply_rx.await.unwrap(); + let a2 = node2_reply_rx.await.unwrap(); + + if a1.is_some() && a2.is_some() { + is_success_tx.store(true, SeqCst); + } + + // TODO: Create cert and check indexer integration. + performed_tx.store(true, SeqCst); + }); + + while !performed_rx.load(SeqCst) { + std::thread::sleep(std::time::Duration::from_millis(200)); + } + assert!(is_success_rx.load(SeqCst)); +} diff --git a/nomos-services/data-availability/verifier/Cargo.toml b/nomos-services/data-availability/verifier/Cargo.toml new file mode 100644 index 00000000..3ea0931a --- /dev/null +++ b/nomos-services/data-availability/verifier/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "nomos-da-verifier" +version = "0.1.0" +edition = "2021" + +[dependencies] +async-trait = "0.1" +blst = "0.3.11" +bytes = "1.2" +futures = "0.3" +kzgrs-backend = { path = "../../../nomos-da/kzgrs-backend" } +nomos-core = { path = "../../../nomos-core" } +nomos-da-storage = { path = "../../../nomos-da/storage" } +nomos-network = { path = "../../../nomos-services/network" } +nomos-storage = { path = "../../../nomos-services/storage" } +overwatch-rs = { git = "https://github.com/logos-co/Overwatch", rev = "2f70806" } +overwatch-derive = { git = "https://github.com/logos-co/Overwatch", rev = "ac28d01" } +serde = { version = "1.0", features = ["derive"] } +tokio = { version = "1", features = ["sync", "macros"] } +tokio-stream = "0.1.15" +tracing = "0.1" + +[features] +rocksdb-backend = ["nomos-storage/rocksdb-backend"] +libp2p = ["nomos-network/libp2p"] diff --git a/nomos-services/data-availability/verifier/src/backend/kzgrs.rs b/nomos-services/data-availability/verifier/src/backend/kzgrs.rs new file mode 100644 index 00000000..f8dc8f09 --- /dev/null +++ b/nomos-services/data-availability/verifier/src/backend/kzgrs.rs @@ -0,0 +1,60 @@ +// std +use core::fmt; +// crates +use blst::{min_sig::PublicKey, min_sig::SecretKey}; +use kzgrs_backend::{ + common::{attestation::Attestation, blob::DaBlob}, + verifier::DaVerifier as NomosKzgrsVerifier, +}; +use nomos_core::da::DaVerifier; +// internal +use super::VerifierBackend; + +#[derive(Debug)] +pub enum KzgrsDaVerifierError { + VerificationError, +} + +impl fmt::Display for KzgrsDaVerifierError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + KzgrsDaVerifierError::VerificationError => write!(f, "Verification failed"), + } + } +} + +impl std::error::Error for KzgrsDaVerifierError {} + +pub struct KzgrsDaVerifier { + verifier: NomosKzgrsVerifier, +} + +impl VerifierBackend for KzgrsDaVerifier { + type Settings = KzgrsDaVerifierSettings; + + fn new(settings: Self::Settings) -> Self { + let verifier = NomosKzgrsVerifier::new(settings.sk, &settings.nodes_public_keys); + Self { verifier } + } +} + +impl DaVerifier for KzgrsDaVerifier { + type DaBlob = DaBlob; + type Attestation = Attestation; + type Error = KzgrsDaVerifierError; + + fn verify(&self, blob: &Self::DaBlob) -> Result { + let blob = blob.clone(); + match self.verifier.verify(blob) { + Some(attestation) => Ok(attestation), + None => Err(KzgrsDaVerifierError::VerificationError), + } + } +} + +// TODO: `sk` and `nodes_public_keys` need to be fetched from the params provider service. +#[derive(Debug, Clone)] +pub struct KzgrsDaVerifierSettings { + pub sk: SecretKey, + pub nodes_public_keys: Vec, +} diff --git a/nomos-services/data-availability/verifier/src/backend/mod.rs b/nomos-services/data-availability/verifier/src/backend/mod.rs new file mode 100644 index 00000000..e2764502 --- /dev/null +++ b/nomos-services/data-availability/verifier/src/backend/mod.rs @@ -0,0 +1,8 @@ +pub mod kzgrs; + +use nomos_core::da::DaVerifier; + +pub trait VerifierBackend: DaVerifier { + type Settings; + fn new(settings: Self::Settings) -> Self; +} diff --git a/nomos-services/data-availability/verifier/src/lib.rs b/nomos-services/data-availability/verifier/src/lib.rs new file mode 100644 index 00000000..95c2c703 --- /dev/null +++ b/nomos-services/data-availability/verifier/src/lib.rs @@ -0,0 +1,218 @@ +pub mod backend; +pub mod network; +pub mod storage; + +// std +use nomos_storage::StorageService; +use overwatch_rs::services::life_cycle::LifecycleMessage; +use std::error::Error; +use std::fmt::{Debug, Formatter}; +use storage::DaStorageAdapter; +use tokio::sync::oneshot::Sender; +// crates +use tokio_stream::StreamExt; +use tracing::error; +// internal +use backend::VerifierBackend; +use network::NetworkAdapter; +use nomos_network::NetworkService; +use overwatch_rs::services::handle::ServiceStateHandle; +use overwatch_rs::services::relay::{Relay, RelayMessage}; +use overwatch_rs::services::state::{NoOperator, NoState}; +use overwatch_rs::services::{ServiceCore, ServiceData, ServiceId}; +use overwatch_rs::DynError; + +pub enum DaVerifierMsg { + AddBlob { + blob: B, + reply_channel: Sender>, + }, +} + +impl Debug for DaVerifierMsg { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + DaVerifierMsg::AddBlob { .. } => { + write!(f, "DaVerifierMsg::AddBlob") + } + } + } +} + +impl RelayMessage for DaVerifierMsg {} + +pub struct DaVerifierService +where + Backend: VerifierBackend, + Backend::Settings: Clone, + Backend::DaBlob: 'static, + Backend::Attestation: 'static, + Backend::Error: Error, + N: NetworkAdapter, + N::Settings: Clone, + S: DaStorageAdapter, +{ + network_relay: Relay>, + service_state: ServiceStateHandle, + storage_relay: Relay>, + verifier: Backend, +} + +impl DaVerifierService +where + Backend: VerifierBackend + Send + 'static, + Backend::DaBlob: Debug + Send, + Backend::Attestation: Debug + Send, + Backend::Error: Error + Send + Sync, + Backend::Settings: Clone, + N: NetworkAdapter + Send + 'static, + N::Settings: Clone, + S: DaStorageAdapter + + Send + + 'static, +{ + async fn handle_new_blob( + verifier: &Backend, + storage_adapter: &S, + blob: &Backend::DaBlob, + ) -> Result { + if let Some(attestation) = storage_adapter.get_attestation(blob).await? { + Ok(attestation) + } else { + let attestation = verifier.verify(blob)?; + storage_adapter.add_blob(blob, &attestation).await?; + Ok(attestation) + } + } + + async fn should_stop_service(message: LifecycleMessage) -> bool { + match message { + LifecycleMessage::Shutdown(sender) => { + if sender.send(()).is_err() { + error!( + "Error sending successful shutdown signal from service {}", + Self::SERVICE_ID + ); + } + true + } + LifecycleMessage::Kill => true, + } + } +} + +impl ServiceData for DaVerifierService +where + Backend: VerifierBackend, + Backend::Settings: Clone, + Backend::Error: Error, + N: NetworkAdapter, + N::Settings: Clone, + S: DaStorageAdapter, + S::Settings: Clone, +{ + const SERVICE_ID: ServiceId = "DaVerifier"; + type Settings = DaVerifierServiceSettings; + type State = NoState; + type StateOperator = NoOperator; + type Message = DaVerifierMsg; +} + +#[async_trait::async_trait] +impl ServiceCore for DaVerifierService +where + Backend: VerifierBackend + Send + Sync + 'static, + Backend::Settings: Clone + Send + Sync + 'static, + Backend::DaBlob: Debug + Send + Sync + 'static, + Backend::Attestation: Debug + Send + Sync + 'static, + Backend::Error: Error + Send + Sync + 'static, + N: NetworkAdapter + Send + 'static, + N::Settings: Clone + Send + Sync + 'static, + S: DaStorageAdapter + + Send + + Sync + + 'static, + S::Settings: Clone + Send + Sync + 'static, +{ + fn init(service_state: ServiceStateHandle) -> Result { + let DaVerifierServiceSettings { + verifier_settings, .. + } = service_state.settings_reader.get_updated_settings(); + let network_relay = service_state.overwatch_handle.relay(); + let storage_relay = service_state.overwatch_handle.relay(); + Ok(Self { + network_relay, + storage_relay, + service_state, + verifier: Backend::new(verifier_settings), + }) + } + + async fn run(self) -> Result<(), DynError> { + // This service will likely have to be modified later on. + // Most probably the verifier itself need to be constructed/update for every message with + // an updated list of the available nodes list, as it needs his own index coming from the + // position of his bls public key landing in the above-mentioned list. + let Self { + network_relay, + storage_relay, + mut service_state, + verifier, + } = self; + + let DaVerifierServiceSettings { + network_adapter_settings, + storage_adapter_settings, + .. + } = service_state.settings_reader.get_updated_settings(); + + let network_relay = network_relay.connect().await?; + let network_adapter = N::new(network_adapter_settings, network_relay).await; + let mut blob_stream = network_adapter.blob_stream().await; + + let storage_relay = storage_relay.connect().await?; + let storage_adapter = S::new(storage_adapter_settings, storage_relay).await; + + let mut lifecycle_stream = service_state.lifecycle_handle.message_stream(); + loop { + tokio::select! { + Some(blob) = blob_stream.next() => { + match Self::handle_new_blob(&verifier,&storage_adapter, &blob).await { + Ok(attestation) => if let Err(err) = network_adapter.send_attestation(attestation).await { + error!("Error replying attestation {err:?}"); + }, + Err(err) => error!("Error handling blob {blob:?} due to {err:?}"), + } + } + Some(msg) = service_state.inbound_relay.recv() => { + let DaVerifierMsg::AddBlob { blob, reply_channel } = msg; + match Self::handle_new_blob(&verifier, &storage_adapter, &blob).await { + Ok(attestation) => if let Err(err) = reply_channel.send(Some(attestation)) { + error!("Error replying attestation {err:?}"); + }, + Err(err) => { + error!("Error handling blob {blob:?} due to {err:?}"); + if let Err(err) = reply_channel.send(None) { + error!("Error replying attestation {err:?}"); + } + }, + }; + } + Some(msg) = lifecycle_stream.next() => { + if Self::should_stop_service(msg).await { + break; + } + } + } + } + + Ok(()) + } +} + +#[derive(Debug, Clone)] +pub struct DaVerifierServiceSettings { + pub verifier_settings: BackendSettings, + pub network_adapter_settings: NetworkSettings, + pub storage_adapter_settings: StorageSettings, +} diff --git a/nomos-services/data-availability/src/network/adapters/libp2p.rs b/nomos-services/data-availability/verifier/src/network/adapters/libp2p.rs similarity index 92% rename from nomos-services/data-availability/src/network/adapters/libp2p.rs rename to nomos-services/data-availability/verifier/src/network/adapters/libp2p.rs index b219833f..3386e20b 100644 --- a/nomos-services/data-availability/src/network/adapters/libp2p.rs +++ b/nomos-services/data-availability/verifier/src/network/adapters/libp2p.rs @@ -76,10 +76,12 @@ where A: Serialize + DeserializeOwned + Send + Sync + 'static, { type Backend = Libp2p; + type Settings = (); type Blob = B; type Attestation = A; async fn new( + _settings: Self::Settings, network_relay: OutboundRelay< as ServiceData>::Message>, ) -> Self { network_relay @@ -99,15 +101,7 @@ where self.stream_for::().await } - async fn attestation_stream(&self) -> Box + Unpin + Send> { - self.stream_for::().await - } - async fn send_attestation(&self, attestation: Self::Attestation) -> Result<(), DynError> { self.send(attestation).await } - - async fn send_blob(&self, blob: Self::Blob) -> Result<(), DynError> { - self.send(blob).await - } } diff --git a/nomos-services/data-availability/src/network/adapters/mod.rs b/nomos-services/data-availability/verifier/src/network/adapters/mod.rs similarity index 100% rename from nomos-services/data-availability/src/network/adapters/mod.rs rename to nomos-services/data-availability/verifier/src/network/adapters/mod.rs diff --git a/nomos-services/data-availability/src/network/mod.rs b/nomos-services/data-availability/verifier/src/network/mod.rs similarity index 61% rename from nomos-services/data-availability/src/network/mod.rs rename to nomos-services/data-availability/verifier/src/network/mod.rs index 360a63da..83e63735 100644 --- a/nomos-services/data-availability/src/network/mod.rs +++ b/nomos-services/data-availability/verifier/src/network/mod.rs @@ -1,33 +1,25 @@ pub mod adapters; -// std -// crates use futures::Stream; -// internal use nomos_network::backends::NetworkBackend; use nomos_network::NetworkService; use overwatch_rs::services::relay::OutboundRelay; use overwatch_rs::services::ServiceData; use overwatch_rs::DynError; -use serde::de::DeserializeOwned; -use serde::Serialize; #[async_trait::async_trait] pub trait NetworkAdapter { type Backend: NetworkBackend + 'static; + type Settings; - type Blob: Serialize + DeserializeOwned + Send + Sync + 'static; - type Attestation: Serialize + DeserializeOwned + Send + Sync + 'static; + type Blob; + type Attestation; async fn new( + settings: Self::Settings, network_relay: OutboundRelay< as ServiceData>::Message>, ) -> Self; async fn blob_stream(&self) -> Box + Unpin + Send>; - - async fn attestation_stream(&self) -> Box + Unpin + Send>; - async fn send_attestation(&self, attestation: Self::Attestation) -> Result<(), DynError>; - - async fn send_blob(&self, blob: Self::Blob) -> Result<(), DynError>; } diff --git a/nomos-services/data-availability/verifier/src/storage/adapters/mod.rs b/nomos-services/data-availability/verifier/src/storage/adapters/mod.rs new file mode 100644 index 00000000..663a2ff3 --- /dev/null +++ b/nomos-services/data-availability/verifier/src/storage/adapters/mod.rs @@ -0,0 +1,2 @@ +#[cfg(feature = "rocksdb-backend")] +pub mod rocksdb; diff --git a/nomos-services/data-availability/verifier/src/storage/adapters/rocksdb.rs b/nomos-services/data-availability/verifier/src/storage/adapters/rocksdb.rs new file mode 100644 index 00000000..bca73d72 --- /dev/null +++ b/nomos-services/data-availability/verifier/src/storage/adapters/rocksdb.rs @@ -0,0 +1,113 @@ +// std +use serde::{de::DeserializeOwned, Serialize}; +use std::{marker::PhantomData, path::PathBuf}; +// crates +use nomos_core::da::{attestation::Attestation, blob::Blob}; +use nomos_da_storage::{ + fs::write_blob, + rocksdb::{key_bytes, DA_ATTESTED_KEY_PREFIX}, +}; +use nomos_storage::{ + backends::{rocksdb::RocksBackend, StorageSerde}, + StorageMsg, StorageService, +}; +use overwatch_rs::{ + services::{relay::OutboundRelay, ServiceData}, + DynError, +}; +// internal +use crate::storage::DaStorageAdapter; + +pub struct RocksAdapter +where + S: StorageSerde + Send + Sync + 'static, +{ + settings: RocksAdapterSettings, + storage_relay: OutboundRelay>>, + _blob: PhantomData, + _attestation: PhantomData, +} + +#[async_trait::async_trait] +impl DaStorageAdapter for RocksAdapter +where + A: Attestation + Serialize + DeserializeOwned + Clone + Send + Sync, + B: Blob + Serialize + Clone + Send + Sync + 'static, + B::BlobId: AsRef<[u8]> + Send + Sync + 'static, + S: StorageSerde + Send + Sync + 'static, +{ + type Backend = RocksBackend; + type Blob = B; + type Attestation = A; + type Settings = RocksAdapterSettings; + + async fn new( + settings: Self::Settings, + storage_relay: OutboundRelay< as ServiceData>::Message>, + ) -> Self { + Self { + settings, + storage_relay, + _blob: PhantomData, + _attestation: PhantomData, + } + } + + async fn add_blob( + &self, + blob: &Self::Blob, + attestation: &Self::Attestation, + ) -> Result<(), DynError> { + let blob_bytes = S::serialize(blob); + + write_blob( + self.settings.blob_storage_directory.clone(), + blob.id().as_ref(), + &blob_bytes, + ) + .await?; + + // Mark blob as attested for lateer use in Indexer and attestation cache. + let blob_key = key_bytes(DA_ATTESTED_KEY_PREFIX, blob.id().as_ref()); + self.storage_relay + .send(StorageMsg::Store { + key: blob_key, + value: S::serialize(attestation), + }) + .await + .map_err(|(e, _)| e.into()) + } + + async fn get_attestation( + &self, + blob: &Self::Blob, + ) -> Result, DynError> { + let attestation_key = key_bytes(DA_ATTESTED_KEY_PREFIX, blob.id().as_ref()); + let (reply_tx, reply_rx) = tokio::sync::oneshot::channel(); + self.storage_relay + .send(StorageMsg::Load { + key: attestation_key, + reply_channel: reply_tx, + }) + .await + .expect("Failed to send load request to storage relay"); + + // TODO: Use storage backend ser/de functionality. + // + // Storage backend already handles ser/de, but lacks the ability to seperate storage + // domains using prefixed keys. Once implemented Indexer and Verifier can be simplified. + reply_rx + .await + .map(|maybe_bytes| { + maybe_bytes.map(|bytes| { + S::deserialize(bytes).expect("Attestation should be deserialized from bytes") + }) + }) + .map_err(|_| "".into()) + } +} + +#[derive(Debug, Clone)] +pub struct RocksAdapterSettings { + pub blob_storage_directory: PathBuf, +} diff --git a/nomos-services/data-availability/verifier/src/storage/mod.rs b/nomos-services/data-availability/verifier/src/storage/mod.rs new file mode 100644 index 00000000..78ab2a56 --- /dev/null +++ b/nomos-services/data-availability/verifier/src/storage/mod.rs @@ -0,0 +1,30 @@ +pub mod adapters; + +use nomos_storage::{backends::StorageBackend, StorageService}; +use overwatch_rs::{ + services::{relay::OutboundRelay, ServiceData}, + DynError, +}; + +#[async_trait::async_trait] +pub trait DaStorageAdapter { + type Backend: StorageBackend + Send + Sync + 'static; + type Settings: Clone; + type Blob: Clone; + type Attestation: Clone; + + async fn new( + settings: Self::Settings, + storage_relay: OutboundRelay< as ServiceData>::Message>, + ) -> Self; + + async fn add_blob( + &self, + blob: &Self::Blob, + attestation: &Self::Attestation, + ) -> Result<(), DynError>; + async fn get_attestation( + &self, + blob: &Self::Blob, + ) -> Result, DynError>; +} diff --git a/nomos-services/mempool/Cargo.toml b/nomos-services/mempool/Cargo.toml index a31a531d..16dcacfb 100644 --- a/nomos-services/mempool/Cargo.toml +++ b/nomos-services/mempool/Cargo.toml @@ -13,6 +13,7 @@ linked-hash-map = { version = "0.5.6", optional = true } nomos-metrics = { path = "../../nomos-services/metrics" } nomos-network = { path = "../network" } nomos-core = { path = "../../nomos-core" } +full-replication = { path = "../../nomos-da/full-replication" } overwatch-rs = { git = "https://github.com/logos-co/Overwatch", rev = "2f70806" } rand = { version = "0.8", optional = true } serde = { version = "1.0", features = ["derive"] } diff --git a/nomos-services/mempool/src/backend/mockpool.rs b/nomos-services/mempool/src/backend/mockpool.rs index 6c1de925..0e9e1751 100644 --- a/nomos-services/mempool/src/backend/mockpool.rs +++ b/nomos-services/mempool/src/backend/mockpool.rs @@ -55,11 +55,15 @@ where Self::new() } - fn add_item(&mut self, key: Self::Key, item: Self::Item) -> Result<(), MempoolError> { + fn add_item>( + &mut self, + key: Self::Key, + item: I, + ) -> Result<(), MempoolError> { if self.pending_items.contains_key(&key) || self.in_block_items_by_id.contains_key(&key) { return Err(MempoolError::ExistingItem); } - self.pending_items.insert(key, item); + self.pending_items.insert(key, item.into()); self.last_item_timestamp = SystemTime::now() .duration_since(UNIX_EPOCH) .unwrap() diff --git a/nomos-services/mempool/src/backend/mod.rs b/nomos-services/mempool/src/backend/mod.rs index e97f25be..955b2367 100644 --- a/nomos-services/mempool/src/backend/mod.rs +++ b/nomos-services/mempool/src/backend/mod.rs @@ -21,7 +21,11 @@ pub trait MemPool { fn new(settings: Self::Settings) -> Self; /// Add a new item to the mempool, for example because we received it from the network - fn add_item(&mut self, key: Self::Key, item: Self::Item) -> Result<(), MempoolError>; + fn add_item>( + &mut self, + key: Self::Key, + item: I, + ) -> Result<(), MempoolError>; /// Return a view over items contained in the mempool. /// Implementations should provide *at least* all the items which have not been marked as diff --git a/nomos-services/mempool/src/da/mod.rs b/nomos-services/mempool/src/da/mod.rs index 1f278a4d..84ec6fd2 100644 --- a/nomos-services/mempool/src/da/mod.rs +++ b/nomos-services/mempool/src/da/mod.rs @@ -1 +1,2 @@ pub mod service; +pub mod verify; diff --git a/nomos-services/mempool/src/da/service.rs b/nomos-services/mempool/src/da/service.rs index 3644ded3..7c8c35f6 100644 --- a/nomos-services/mempool/src/da/service.rs +++ b/nomos-services/mempool/src/da/service.rs @@ -12,10 +12,12 @@ use std::fmt::Debug; // #[cfg(feature = "metrics")] // use super::metrics::Metrics; use futures::StreamExt; +use nomos_core::da::certificate::Certificate; use nomos_metrics::NomosRegistry; // internal use crate::backend::MemPool; use crate::network::NetworkAdapter; +use crate::verify::MempoolVerificationProvider; use crate::{MempoolMetrics, MempoolMsg}; use nomos_network::{NetworkMsg, NetworkService}; use overwatch_rs::services::life_cycle::LifecycleMessage; @@ -27,49 +29,73 @@ use overwatch_rs::services::{ }; use tracing::error; -pub struct DaMempoolService +pub struct DaMempoolService where - N: NetworkAdapter, + N: NetworkAdapter, + N::Payload: Certificate + Into + Debug + 'static, P: MemPool, P::Settings: Clone, P::Item: Debug + 'static, P::Key: Debug + 'static, P::BlockId: Debug + 'static, + V: MempoolVerificationProvider< + Payload = N::Payload, + Parameters = ::VerificationParameters, + >, { service_state: ServiceStateHandle, network_relay: Relay>, pool: P, + verification_provider: V, // TODO: Add again after metrics refactor // #[cfg(feature = "metrics")] // metrics: Option, } -impl ServiceData for DaMempoolService +impl ServiceData for DaMempoolService where - N: NetworkAdapter, + N: NetworkAdapter, + N::Payload: Certificate + Debug + Into + 'static, P: MemPool, P::Settings: Clone, P::Item: Debug + 'static, P::Key: Debug + 'static, P::BlockId: Debug + 'static, + V: MempoolVerificationProvider< + Payload = N::Payload, + Parameters = ::VerificationParameters, + >, { const SERVICE_ID: ServiceId = "mempool-da"; - type Settings = DaMempoolSettings; + type Settings = DaMempoolSettings; type State = NoState; type StateOperator = NoOperator; - type Message = MempoolMsg<

::BlockId,

::Item,

::Key>; + type Message = MempoolMsg< +

::BlockId, + ::Payload, +

::Item, +

::Key, + >; } #[async_trait::async_trait] -impl ServiceCore for DaMempoolService +impl ServiceCore for DaMempoolService where P: MemPool + Send + 'static, P::Settings: Clone + Send + Sync + 'static, N::Settings: Clone + Send + Sync + 'static, + V::Settings: Clone + Send + Sync + 'static, P::Item: Clone + Debug + Send + Sync + 'static, P::Key: Debug + Send + Sync + 'static, P::BlockId: Send + Debug + 'static, - N: NetworkAdapter + Send + Sync + 'static, + N::Payload: Certificate + Into + Clone + Debug + Send + 'static, + N: NetworkAdapter + Send + Sync + 'static, + V: MempoolVerificationProvider< + Payload = N::Payload, + Parameters = ::VerificationParameters, + > + Send + + Sync + + 'static, { fn init(service_state: ServiceStateHandle) -> Result { let network_relay = service_state.overwatch_handle.relay(); @@ -85,6 +111,7 @@ where service_state, network_relay, pool: P::new(settings.backend), + verification_provider: V::new(settings.verification_provider), // #[cfg(feature = "metrics")] // metrics, }) @@ -109,7 +136,7 @@ where ); let adapter = adapter.await; - let mut network_items = adapter.transactions_stream().await; + let mut network_items = adapter.payload_stream().await; let mut lifecycle_stream = service_state.lifecycle_handle.message_stream(); loop { @@ -120,10 +147,13 @@ where // if let Some(metrics) = &self.metrics { metrics.record(&msg) } Self::handle_mempool_message(msg, &mut pool, &mut network_relay, &mut service_state).await; } - Some((key, item )) = network_items.next() => { - pool.add_item(key, item).unwrap_or_else(|e| { - tracing::debug!("could not add item to the pool due to: {}", e) - }); + Some((key, item)) = network_items.next() => { + let params = self.verification_provider.get_parameters(&item).await; + if item.verify(params) { + pool.add_item(key, item).unwrap_or_else(|e| { + tracing::debug!("could not add item to the pool due to: {}", e) + }); + } } Some(msg) = lifecycle_stream.next() => { if Self::should_stop_service(msg).await { @@ -136,7 +166,7 @@ where } } -impl DaMempoolService +impl DaMempoolService where P: MemPool + Send + 'static, P::Settings: Clone + Send + Sync + 'static, @@ -144,7 +174,14 @@ where P::Item: Clone + Debug + Send + Sync + 'static, P::Key: Debug + Send + Sync + 'static, P::BlockId: Debug + Send + 'static, - N: NetworkAdapter + Send + Sync + 'static, + N::Payload: Certificate + Into + Debug + Clone + Send + 'static, + N: NetworkAdapter + Send + Sync + 'static, + V: MempoolVerificationProvider< + Payload = N::Payload, + Parameters = ::VerificationParameters, + > + Send + + Sync + + 'static, { async fn should_stop_service(message: LifecycleMessage) -> bool { match message { @@ -162,14 +199,14 @@ where } async fn handle_mempool_message( - message: MempoolMsg, + message: MempoolMsg, pool: &mut P, network_relay: &mut OutboundRelay>, service_state: &mut ServiceStateHandle, ) { match message { MempoolMsg::Add { - item, + payload: item, key, reply_channel, } => { @@ -237,8 +274,9 @@ where } #[derive(Clone, Debug)] -pub struct DaMempoolSettings { +pub struct DaMempoolSettings { pub backend: B, pub network: N, + pub verification_provider: V, pub registry: Option, } diff --git a/nomos-services/mempool/src/da/verify/fullreplication.rs b/nomos-services/mempool/src/da/verify/fullreplication.rs new file mode 100644 index 00000000..00a4c881 --- /dev/null +++ b/nomos-services/mempool/src/da/verify/fullreplication.rs @@ -0,0 +1,22 @@ +use full_replication::{Certificate, CertificateVerificationParameters}; + +use crate::verify::MempoolVerificationProvider; + +pub struct DaVerificationProvider { + settings: CertificateVerificationParameters, +} + +#[async_trait::async_trait] +impl MempoolVerificationProvider for DaVerificationProvider { + type Payload = Certificate; + type Parameters = CertificateVerificationParameters; + type Settings = CertificateVerificationParameters; + + fn new(settings: Self::Settings) -> Self { + Self { settings } + } + + async fn get_parameters(&self, _: &Self::Payload) -> Self::Parameters { + self.settings.clone() + } +} diff --git a/nomos-services/mempool/src/da/verify/mod.rs b/nomos-services/mempool/src/da/verify/mod.rs new file mode 100644 index 00000000..fc98c3dc --- /dev/null +++ b/nomos-services/mempool/src/da/verify/mod.rs @@ -0,0 +1 @@ +pub mod fullreplication; diff --git a/nomos-services/mempool/src/lib.rs b/nomos-services/mempool/src/lib.rs index c7a1f4d1..99eb5c82 100644 --- a/nomos-services/mempool/src/lib.rs +++ b/nomos-services/mempool/src/lib.rs @@ -2,6 +2,7 @@ pub mod backend; pub mod da; pub mod network; pub mod tx; +pub mod verify; use backend::Status; use overwatch_rs::services::relay::RelayMessage; @@ -11,9 +12,9 @@ use tokio::sync::oneshot::Sender; pub use da::service::{DaMempoolService, DaMempoolSettings}; pub use tx::service::{TxMempoolService, TxMempoolSettings}; -pub enum MempoolMsg { +pub enum MempoolMsg { Add { - item: Item, + payload: Payload, key: Key, reply_channel: Sender>, }, @@ -42,9 +43,10 @@ pub enum MempoolMsg { }, } -impl Debug for MempoolMsg +impl Debug for MempoolMsg where BlockId: Debug, + Payload: Debug, Item: Debug, Key: Debug, { @@ -53,7 +55,7 @@ where Self::View { ancestor_hint, .. } => { write!(f, "MempoolMsg::View {{ ancestor_hint: {ancestor_hint:?}}}") } - Self::Add { item, .. } => write!(f, "MempoolMsg::Add{{item: {item:?}}}"), + Self::Add { payload, .. } => write!(f, "MempoolMsg::Add{{payload: {payload:?}}}"), Self::Prune { ids } => write!(f, "MempoolMsg::Prune{{ids: {ids:?}}}"), Self::MarkInBlock { ids, block } => { write!( @@ -78,7 +80,7 @@ pub struct MempoolMetrics { pub last_item_timestamp: u64, } -impl RelayMessage - for MempoolMsg +impl RelayMessage + for MempoolMsg { } diff --git a/nomos-services/mempool/src/network/adapters/libp2p.rs b/nomos-services/mempool/src/network/adapters/libp2p.rs index e6dd6bca..3a0a55fc 100644 --- a/nomos-services/mempool/src/network/adapters/libp2p.rs +++ b/nomos-services/mempool/src/network/adapters/libp2p.rs @@ -24,7 +24,7 @@ where { type Backend = Libp2p; type Settings = Settings; - type Item = Item; + type Payload = Item; type Key = Key; async fn new( @@ -42,9 +42,9 @@ where settings, } } - async fn transactions_stream( + async fn payload_stream( &self, - ) -> Box + Unpin + Send> { + ) -> Box + Unpin + Send> { let topic_hash = TopicHash::from_raw(self.settings.topic.clone()); let id = self.settings.id; let (sender, receiver) = tokio::sync::oneshot::channel(); diff --git a/nomos-services/mempool/src/network/adapters/mock.rs b/nomos-services/mempool/src/network/adapters/mock.rs index fd677256..805e7044 100644 --- a/nomos-services/mempool/src/network/adapters/mock.rs +++ b/nomos-services/mempool/src/network/adapters/mock.rs @@ -26,7 +26,7 @@ pub struct MockAdapter { impl NetworkAdapter for MockAdapter { type Backend = Mock; type Settings = (); - type Item = MockTransaction; + type Payload = MockTransaction; type Key = MockTxId; async fn new( @@ -60,9 +60,9 @@ impl NetworkAdapter for MockAdapter { Self { network_relay } } - async fn transactions_stream( + async fn payload_stream( &self, - ) -> Box + Unpin + Send> { + ) -> Box + Unpin + Send> { let (sender, receiver) = tokio::sync::oneshot::channel(); if let Err((_, e)) = self .network_relay @@ -94,7 +94,7 @@ impl NetworkAdapter for MockAdapter { ))) } - async fn send(&self, msg: Self::Item) { + async fn send(&self, msg: Self::Payload) { if let Err((e, _)) = self .network_relay .send(NetworkMsg::Process(MockBackendMessage::Broadcast { diff --git a/nomos-services/mempool/src/network/mod.rs b/nomos-services/mempool/src/network/mod.rs index 01a593cf..95e56d0a 100644 --- a/nomos-services/mempool/src/network/mod.rs +++ b/nomos-services/mempool/src/network/mod.rs @@ -15,16 +15,17 @@ use overwatch_rs::services::ServiceData; pub trait NetworkAdapter { type Backend: NetworkBackend + 'static; type Settings: Clone; - - type Item: Send + Sync + 'static; + type Payload: Send + Sync + 'static; type Key: Send + Sync + 'static; + async fn new( settings: Self::Settings, network_relay: OutboundRelay< as ServiceData>::Message>, ) -> Self; - async fn transactions_stream( - &self, - ) -> Box + Unpin + Send>; - async fn send(&self, item: Self::Item); + async fn payload_stream( + &self, + ) -> Box + Unpin + Send>; + + async fn send(&self, payload: Self::Payload); } diff --git a/nomos-services/mempool/src/tx/service.rs b/nomos-services/mempool/src/tx/service.rs index 1820a9a7..a03e3328 100644 --- a/nomos-services/mempool/src/tx/service.rs +++ b/nomos-services/mempool/src/tx/service.rs @@ -28,7 +28,7 @@ use tracing::error; pub struct TxMempoolService where - N: NetworkAdapter, + N: NetworkAdapter, P: MemPool, P::Settings: Clone, P::Item: Debug + 'static, @@ -44,7 +44,7 @@ where impl ServiceData for TxMempoolService where - N: NetworkAdapter, + N: NetworkAdapter, P: MemPool, P::Settings: Clone, P::Item: Debug + 'static, @@ -55,7 +55,12 @@ where type Settings = TxMempoolSettings; type State = NoState; type StateOperator = NoOperator; - type Message = MempoolMsg<

::BlockId,

::Item,

::Key>; + type Message = MempoolMsg< +

::BlockId, +

::Item, +

::Item, +

::Key, + >; } #[async_trait::async_trait] @@ -67,7 +72,7 @@ where P::Item: Clone + Debug + Send + Sync + 'static, P::Key: Debug + Send + Sync + 'static, P::BlockId: Send + Debug + 'static, - N: NetworkAdapter + Send + Sync + 'static, + N: NetworkAdapter + Send + Sync + 'static, { fn init(service_state: ServiceStateHandle) -> Result { let network_relay = service_state.overwatch_handle.relay(); @@ -106,7 +111,7 @@ where ); let adapter = adapter.await; - let mut network_items = adapter.transactions_stream().await; + let mut network_items = adapter.payload_stream().await; let mut lifecycle_stream = service_state.lifecycle_handle.message_stream(); loop { @@ -140,7 +145,7 @@ where P::Item: Clone + Debug + Send + Sync + 'static, P::Key: Debug + Send + Sync + 'static, P::BlockId: Debug + Send + 'static, - N: NetworkAdapter + Send + Sync + 'static, + N: NetworkAdapter + Send + Sync + 'static, { async fn should_stop_service(message: LifecycleMessage) -> bool { match message { @@ -158,14 +163,14 @@ where } async fn handle_mempool_message( - message: MempoolMsg, + message: MempoolMsg, pool: &mut P, network_relay: &mut OutboundRelay>, service_state: &mut ServiceStateHandle, ) { match message { MempoolMsg::Add { - item, + payload: item, key, reply_channel, } => { diff --git a/nomos-services/mempool/src/verify/mod.rs b/nomos-services/mempool/src/verify/mod.rs new file mode 100644 index 00000000..c10466f0 --- /dev/null +++ b/nomos-services/mempool/src/verify/mod.rs @@ -0,0 +1,12 @@ +#[async_trait::async_trait] +pub trait MempoolVerificationProvider { + type Payload; + type Parameters; + type Settings: Clone; + + // TODO: Payload verification parameters most likely will come from another Overwatch service. + // Once it's decided, update the `new` method to require service relay as parameter. + fn new(settings: Self::Settings) -> Self; + + async fn get_parameters(&self, payload: &Self::Payload) -> Self::Parameters; +} diff --git a/tests/Cargo.toml b/tests/Cargo.toml index d26c6545..88bb4a87 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -16,7 +16,6 @@ nomos-core = { path = "../nomos-core" } cryptarchia-engine = { path = "../consensus/cryptarchia-engine", features = ["serde"] } cryptarchia-ledger = { path = "../ledger/cryptarchia-ledger", features = ["serde"] } nomos-mempool = { path = "../nomos-services/mempool", features = ["mock", "libp2p"] } -nomos-da = { path = "../nomos-services/data-availability" } full-replication = { path = "../nomos-da/full-replication" } rand = "0.8" once_cell = "1" @@ -40,11 +39,6 @@ time = "0.3" name = "test_cryptarchia_happy_path" path = "src/tests/cryptarchia/happy.rs" -[[test]] -name = "test_cli" -path = "src/tests/cli.rs" - - [features] mixnet = ["nomos-network/mixnet"] -metrics = ["nomos-node/metrics"] \ No newline at end of file +metrics = ["nomos-node/metrics"] diff --git a/tests/src/lib.rs b/tests/src/lib.rs index 708da679..dffd18c5 100644 --- a/tests/src/lib.rs +++ b/tests/src/lib.rs @@ -1,5 +1,5 @@ pub mod nodes; -pub use nodes::NomosNode; +// pub use nodes::NomosNode; use once_cell::sync::Lazy; use std::env; diff --git a/tests/src/nodes/nomos.rs b/tests/src/nodes/nomos.rs index 4680f539..c9303d4d 100644 --- a/tests/src/nodes/nomos.rs +++ b/tests/src/nodes/nomos.rs @@ -107,7 +107,7 @@ impl NomosNode { async fn wait_online(&self) { loop { - let res = self.get("da/metrics").await; + let res = self.get("cl/metrics").await; if res.is_ok() && res.unwrap().status().is_success() { break; } @@ -343,7 +343,7 @@ fn build_mixnet_topology(mixnode_candidates: &[&Config]) -> MixnetTopology { } fn create_node_config( - id: [u8; 32], + _id: [u8; 32], genesis_state: LedgerState, config: cryptarchia_ledger::Config, coins: Vec, @@ -376,16 +376,6 @@ fn create_node_config( cors_origins: vec![], }, }, - da: nomos_da::Settings { - da_protocol: full_replication::Settings { - voter: id, - num_attestations: 1, - }, - backend: nomos_da::backend::memory_cache::BlobCacheSettings { - max_capacity: usize::MAX, - evicting_period: Duration::from_secs(60 * 60 * 24), // 1 day - }, - }, }; config.network.backend.inner.port = get_available_port(); diff --git a/tests/src/tests/cli.rs b/tests/src/tests/cli.rs deleted file mode 100644 index 09a87498..00000000 --- a/tests/src/tests/cli.rs +++ /dev/null @@ -1,140 +0,0 @@ -use full_replication::{AbsoluteNumber, Attestation, Certificate, FullReplication}; -use nomos_cli::{ - api::da::get_blobs, - cmds::disseminate::Disseminate, - da::disseminate::{DaProtocolChoice, FullReplicationSettings, Protocol, ProtocolSettings}, -}; -use nomos_core::da::{blob::Blob as _, DaProtocol}; -use std::{io::Write, time::Duration}; -use tempfile::NamedTempFile; -use tests::{adjust_timeout, nodes::nomos::Pool, SpawnConfig}; -use tests::{Node, NomosNode}; - -const CLI_BIN: &str = "../target/debug/nomos-cli"; - -use std::process::Command; - -const TIMEOUT_SECS: u64 = 20; - -fn run_disseminate(disseminate: &Disseminate) { - let mut binding = Command::new(CLI_BIN); - let c = binding - .args(["disseminate", "--network-config"]) - .arg(disseminate.network_config.as_os_str()) - .arg("--node-addr") - .arg(disseminate.node_addr.as_ref().unwrap().as_str()); - - match (&disseminate.data, &disseminate.file) { - (Some(data), None) => c.args(["--data", &data]), - (None, Some(file)) => c.args(["--file", file.as_os_str().to_str().unwrap()]), - (_, _) => panic!("Either data or file needs to be provided, but not both"), - }; - - c.status().expect("failed to execute nomos cli"); -} - -async fn disseminate(config: &mut Disseminate) { - let node_configs = NomosNode::node_configs(SpawnConfig::chain_happy(2)); - let first_node = NomosNode::spawn(node_configs[0].clone()).await; - - let mut file = NamedTempFile::new().unwrap(); - let config_path = file.path().to_owned(); - serde_yaml::to_writer(&mut file, &node_configs[1].network).unwrap(); - let da_protocol = DaProtocolChoice { - da_protocol: Protocol::FullReplication, - settings: ProtocolSettings { - full_replication: FullReplicationSettings { - voter: [0; 32], - num_attestations: 1, - }, - }, - }; - - let da = - >>::try_from(da_protocol.clone()) - .unwrap(); - - config.timeout = 20; - config.network_config = config_path; - config.da_protocol = da_protocol; - config.node_addr = Some( - format!( - "http://{}", - first_node.config().http.backend_settings.address.clone() - ) - .parse() - .unwrap(), - ); - - run_disseminate(&config); - // let thread = std::thread::spawn(move || cmd.run().unwrap()); - - tokio::time::timeout( - adjust_timeout(Duration::from_secs(TIMEOUT_SECS)), - wait_for_cert_in_mempool(&first_node), - ) - .await - .unwrap(); - - let (blob, bytes) = if let Some(data) = &config.data { - let bytes = data.as_bytes().to_vec(); - (da.encode(bytes.clone())[0].hash(), bytes) - } else { - let bytes = std::fs::read(&config.file.as_ref().unwrap()).unwrap(); - (da.encode(bytes.clone())[0].hash(), bytes) - }; - - assert_eq!( - get_blobs(&first_node.url(), vec![blob]).await.unwrap()[0].as_bytes(), - bytes.clone() - ); -} - -#[tokio::test] -async fn disseminate_blob() { - let mut config = Disseminate { - data: Some("hello world".to_string()), - ..Default::default() - }; - disseminate(&mut config).await; -} - -#[tokio::test] -async fn disseminate_big_blob() { - const MSG_SIZE: usize = 1024; - let mut config = Disseminate { - data: std::iter::repeat(String::from("X")) - .take(MSG_SIZE) - .collect::>() - .join("") - .into(), - ..Default::default() - }; - disseminate(&mut config).await; -} - -#[tokio::test] -async fn disseminate_blob_from_file() { - let mut file = NamedTempFile::new().unwrap(); - file.write_all("hello world".as_bytes()).unwrap(); - - let mut config = Disseminate { - file: Some(file.path().to_path_buf()), - ..Default::default() - }; - disseminate(&mut config).await; -} - -async fn wait_for_cert_in_mempool(node: &NomosNode) { - loop { - if node - .get_mempoool_metrics(Pool::Da) - .await - .last_item_timestamp - != 0 - { - break; - } - tokio::time::sleep(Duration::from_millis(100)).await; - } -} diff --git a/tests/src/tests/cryptarchia/happy.rs b/tests/src/tests/cryptarchia/happy.rs index 02a760e4..853cee71 100644 --- a/tests/src/tests/cryptarchia/happy.rs +++ b/tests/src/tests/cryptarchia/happy.rs @@ -1,7 +1,7 @@ use futures::stream::{self, StreamExt}; use std::collections::HashSet; use std::time::Duration; -use tests::{adjust_timeout, Node, NomosNode, SpawnConfig}; +use tests::{adjust_timeout, nodes::NomosNode, Node, SpawnConfig}; // how many times more than the expected time to produce a predefined number of blocks we wait before timing out const TIMEOUT_MULTIPLIER: f64 = 3.0;