diff --git a/.gitignore b/.gitignore index 11162827..ee92cc1b 100644 --- a/.gitignore +++ b/.gitignore @@ -15,6 +15,7 @@ store.* sim_config.json *.txt .env +.idea/ # Integration test temp dirs tests/.tmp* diff --git a/Cargo.toml b/Cargo.toml index a5bc5b56..91fc6861 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -16,7 +16,6 @@ members = [ "nomos-services/storage", "nomos-services/cryptarchia-consensus", "nomos-services/mempool", - "nomos-services/metrics", "nomos-services/system-sig", "nomos-services/data-availability/indexer", "nomos-services/data-availability/network", diff --git a/README.md b/README.md index dcab9586..92e358f2 100644 --- a/README.md +++ b/README.md @@ -11,7 +11,6 @@ Nomos blockchain node mvp - http - mempool - network - - metrics - `nodes`: Nomos nodes is the collection of nodes that are used to run the Nomos mvp and experimental nodes. - `nomos-node`: main implementation of the Nomos mvp node. - `mockpool-node`: node with single mempool service, used to measure transaction dissemination. diff --git a/compose.debug.yml b/compose.debug.yml index 383f163c..aea73219 100644 --- a/compose.debug.yml +++ b/compose.debug.yml @@ -10,6 +10,7 @@ services: command: - --config.file=/etc/prometheus/prometheus.yml - --storage.tsdb.retention.time=7d + - --enable-feature=otlp-write-receiver ports: - 127.0.0.1:9090:9090 restart: on-failure diff --git a/compose.static.yml b/compose.static.yml index 7b385aff..e9dfb168 100644 --- a/compose.static.yml +++ b/compose.static.yml @@ -84,6 +84,7 @@ services: command: - --config.file=/etc/prometheus/prometheus.yml - --storage.tsdb.retention.time=7d + - --enable-feature=otlp-write-receiver ports: - 127.0.0.1:9090:9090 restart: on-failure diff --git a/nodes/nomos-executor/Cargo.toml b/nodes/nomos-executor/Cargo.toml index 6a6a5319..808a6486 100644 --- a/nodes/nomos-executor/Cargo.toml +++ b/nodes/nomos-executor/Cargo.toml @@ -22,7 +22,6 @@ nomos-mempool = { path = "../../nomos-services/mempool", features = [ "mock", "libp2p", ] } -nomos-metrics = { path = "../../nomos-services/metrics" } nomos-network = { path = "../../nomos-services/network", features = ["libp2p"] } nomos-mix-service = { path = "../../nomos-services/mix", features = ["libp2p"] } nomos-node = { path = "../nomos-node" } @@ -42,5 +41,4 @@ uuid = { version = "1.10.0", features = ["v4"] } [features] default = ["tracing"] -metrics = ["nomos-node/metrics"] tracing = ["nomos-node/tracing"] diff --git a/nodes/nomos-executor/src/api/backend.rs b/nodes/nomos-executor/src/api/backend.rs index 8282ad0d..ce6ad5cf 100644 --- a/nodes/nomos-executor/src/api/backend.rs +++ b/nodes/nomos-executor/src/api/backend.rs @@ -28,7 +28,7 @@ use nomos_libp2p::PeerId; use nomos_mempool::{tx::service::openapi::Status, MempoolMetrics}; use nomos_node::api::handlers::{ add_blob, add_blob_info, add_tx, block, cl_metrics, cl_status, cryptarchia_headers, - cryptarchia_info, get_metrics, get_range, libp2p_info, + cryptarchia_info, get_range, libp2p_info, }; use nomos_storage::backends::StorageSerde; use overwatch_rs::overwatch::handle::OverwatchHandle; @@ -327,7 +327,6 @@ where >, ), ) - .route(paths::METRICS, routing::get(get_metrics)) .route( paths::DISPERSE_DATA, routing::post( diff --git a/nodes/nomos-executor/src/lib.rs b/nodes/nomos-executor/src/lib.rs index db8f34dc..17ed141c 100644 --- a/nodes/nomos-executor/src/lib.rs +++ b/nodes/nomos-executor/src/lib.rs @@ -24,8 +24,6 @@ use nomos_mix_service::MixService; use nomos_node::DispersedBlobInfo; use nomos_node::HeaderId; use nomos_node::MempoolNetworkAdapter; -#[cfg(feature = "metrics")] -use nomos_node::Metrics; use nomos_node::NetworkBackend; use nomos_node::{ BlobInfo, Cryptarchia, DaIndexer, DaMempool, DaNetworkService, DaSampling, DaVerifier, @@ -101,7 +99,5 @@ pub struct NomosExecutor { cryptarchia: ServiceHandle, http: ServiceHandle, storage: ServiceHandle>>, - #[cfg(feature = "metrics")] - metrics: ServiceHandle, system_sig: ServiceHandle, } diff --git a/nodes/nomos-executor/src/main.rs b/nodes/nomos-executor/src/main.rs index 64b49a3c..310a88b9 100644 --- a/nodes/nomos-executor/src/main.rs +++ b/nodes/nomos-executor/src/main.rs @@ -4,12 +4,10 @@ use clap::Parser; use color_eyre::eyre::{eyre, Result}; use nomos_executor::config::Config as ExecutorConfig; use nomos_executor::{NomosExecutor, NomosExecutorServiceSettings}; -#[cfg(feature = "metrics")] -use nomos_node::MetricsSettings; use nomos_node::{ config::MixArgs, BlobInfo, CryptarchiaArgs, DaMempoolSettings, DispersedBlobInfo, HttpArgs, - LogArgs, MempoolAdapterSettings, MetricsArgs, NetworkArgs, Transaction, Tx, TxMempoolSettings, - CL_TOPIC, DA_TOPIC, + LogArgs, MempoolAdapterSettings, NetworkArgs, Transaction, Tx, TxMempoolSettings, CL_TOPIC, + DA_TOPIC, }; use overwatch_rs::overwatch::*; use tracing::{span, Level}; @@ -35,9 +33,6 @@ struct Args { http_args: HttpArgs, #[clap(flatten)] cryptarchia_args: CryptarchiaArgs, - /// Overrides metrics config. - #[clap(flatten)] - metrics_args: MetricsArgs, } fn main() -> Result<()> { @@ -48,7 +43,6 @@ fn main() -> Result<()> { network_args, mix_args, cryptarchia_args, - metrics_args, } = Args::parse(); let config = serde_yaml::from_reader::<_, ExecutorConfig>(std::fs::File::open(config)?)? .update_from_args( @@ -59,14 +53,6 @@ fn main() -> Result<()> { cryptarchia_args, )?; - let registry = cfg!(feature = "metrics") - .then(|| { - metrics_args - .with_metrics - .then(nomos_metrics::NomosRegistry::default) - }) - .flatten(); - #[cfg(debug_assertions)] let debug_span = { let debug_id = Uuid::new_v4(); @@ -87,7 +73,6 @@ fn main() -> Result<()> { topic: String::from(CL_TOPIC), id: ::hash, }, - registry: registry.clone(), }, da_mempool: DaMempoolSettings { backend: (), @@ -95,7 +80,6 @@ fn main() -> Result<()> { topic: String::from(DA_TOPIC), id: ::blob_id, }, - registry: registry.clone(), }, da_dispersal: config.da_dispersal, da_network: config.da_network, @@ -103,8 +87,6 @@ fn main() -> Result<()> { da_sampling: config.da_sampling, da_verifier: config.da_verifier, cryptarchia: config.cryptarchia, - #[cfg(feature = "metrics")] - metrics: MetricsSettings { registry }, storage: config.storage, system_sig: (), }, diff --git a/nodes/nomos-node/Cargo.toml b/nodes/nomos-node/Cargo.toml index 3688e2f0..3f54f53e 100644 --- a/nodes/nomos-node/Cargo.toml +++ b/nodes/nomos-node/Cargo.toml @@ -36,7 +36,6 @@ nomos-mempool = { path = "../../nomos-services/mempool", features = [ "mock", "libp2p", ] } -nomos-metrics = { path = "../../nomos-services/metrics" } nomos-storage = { path = "../../nomos-services/storage", features = ["rocksdb"] } cryptarchia-consensus = { path = "../../nomos-services/cryptarchia-consensus", features = ["libp2p"] } nomos-libp2p = { path = "../../nomos-libp2p" } @@ -65,5 +64,4 @@ rand = "0.8" [features] default = ["tracing"] -metrics = [] tracing = [] diff --git a/nodes/nomos-node/config.yaml b/nodes/nomos-node/config.yaml index dfe3ce51..339f5334 100644 --- a/nodes/nomos-node/config.yaml +++ b/nodes/nomos-node/config.yaml @@ -52,10 +52,28 @@ mix: backend: listening_address: /ip4/0.0.0.0/udp/3001/quic-v1 node_key: 40fb62acf1604000c1b8d3bd0880e43eb2f6ae52029fde75d992ba0fed6e01c3 - membership: - - /ip4/0.0.0.0/udp/3001/quic-v1/p2p/12D3KooWMj7KgmbDJ7RSXeFhFimvqddSXzA5XWDTwvdEJYfuoPhM peering_degree: 1 - num_mix_layers: 1 + persistent_transmission: + max_emission_frequency: 1 + drop_message_probability: 0.5 + message_blend: + cryptographic_processor: + private_key: [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0] + num_mix_layers: 1 + temporal_processor: + max_delay_seconds: 5 + cover_traffic: + epoch_duration: + secs: 432000 + nanos: 0 + slot_duration: + secs: 20 + nanos: 0 + membership: + - address: /ip4/127.0.0.1/udp/3001/quic-v1 + public_key: [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] + - address: /ip4/127.0.0.1/udp/3002/quic-v1 + public_key: [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2] http: backend_settings: diff --git a/nodes/nomos-node/src/api/backend.rs b/nodes/nomos-node/src/api/backend.rs index 9d61a649..e94ae6ad 100644 --- a/nodes/nomos-node/src/api/backend.rs +++ b/nodes/nomos-node/src/api/backend.rs @@ -29,7 +29,7 @@ use utoipa_swagger_ui::SwaggerUi; // internal use super::handlers::{ add_blob, add_blob_info, add_tx, block, cl_metrics, cl_status, cryptarchia_headers, - cryptarchia_info, get_metrics, get_range, libp2p_info, + cryptarchia_info, get_range, libp2p_info, }; /// Configuration for the Http Server @@ -303,7 +303,6 @@ where >, ), ) - .route(paths::METRICS, routing::get(get_metrics)) .with_state(handle); Server::bind(&self.settings.address) diff --git a/nodes/nomos-node/src/api/handlers.rs b/nodes/nomos-node/src/api/handlers.rs index 6689f781..c9e3dbfc 100644 --- a/nodes/nomos-node/src/api/handlers.rs +++ b/nodes/nomos-node/src/api/handlers.rs @@ -5,16 +5,14 @@ use std::{fmt::Debug, hash::Hash}; // crates use axum::{ extract::{Query, State}, - http::HeaderValue, - response::{IntoResponse, Response}, + response::Response, Json, }; -use hyper::{header::CONTENT_TYPE, Body, StatusCode}; use rand::{RngCore, SeedableRng}; use serde::{de::DeserializeOwned, Deserialize, Serialize}; // internal use super::paths; -use nomos_api::http::{cl, consensus, da, libp2p, mempool, metrics, storage}; +use nomos_api::http::{cl, consensus, da, libp2p, mempool, storage}; use nomos_core::da::blob::info::DispersedBlobInfo; use nomos_core::da::blob::metadata::Metadata; use nomos_core::da::{BlobId, DaVerifier as CoreDaVerifier}; @@ -418,29 +416,3 @@ where SamplingStorage, >(&handle, blob_info, DispersedBlobInfo::blob_id)) } - -#[utoipa::path( - get, - path = paths::METRICS, - responses( - (status = 200, description = "Get all metrics"), - (status = 500, description = "Internal server error", body = String), - ) -)] -pub async fn get_metrics(State(handle): State) -> Response { - match metrics::gather(&handle).await { - Ok(encoded_metrics) => Response::builder() - .status(StatusCode::OK) - .header( - CONTENT_TYPE, - HeaderValue::from_static("text/plain; version=0.0.4"), - ) - .body(Body::from(encoded_metrics)) - .unwrap() - .into_response(), - Err(e) => axum::response::IntoResponse::into_response(( - hyper::StatusCode::INTERNAL_SERVER_ERROR, - e.to_string(), - )), - } -} diff --git a/nodes/nomos-node/src/api/paths.rs b/nodes/nomos-node/src/api/paths.rs index 6f43824b..6357739c 100644 --- a/nodes/nomos-node/src/api/paths.rs +++ b/nodes/nomos-node/src/api/paths.rs @@ -8,4 +8,3 @@ pub const NETWORK_INFO: &str = "/network/info"; pub const STORAGE_BLOCK: &str = "/storage/block"; pub const MEMPOOL_ADD_TX: &str = "/mempool/add/tx"; pub const MEMPOOL_ADD_BLOB_INFO: &str = "/mempool/add/blobinfo"; -pub const METRICS: &str = "/metrics"; diff --git a/nodes/nomos-node/src/config.rs b/nodes/nomos-node/src/config.rs index 6ce275a5..78bf97e1 100644 --- a/nodes/nomos-node/src/config.rs +++ b/nodes/nomos-node/src/config.rs @@ -83,9 +83,6 @@ pub struct MixArgs { #[clap(long = "mix-node-key", env = "MIX_NODE_KEY")] mix_node_key: Option, - #[clap(long = "mix-membership", env = "MIX_MEMBERSHIP", num_args = 1.., value_delimiter = ',')] - pub mix_membership: Option>, - #[clap(long = "mix-peering-degree", env = "MIX_PEERING_DEGREE")] mix_peering_degree: Option, @@ -132,12 +129,6 @@ pub struct CryptarchiaArgs { note_nonce: Option, } -#[derive(Parser, Debug, Clone)] -pub struct MetricsArgs { - #[clap(long = "with-metrics", env = "WITH_METRICS")] - pub with_metrics: bool, -} - #[derive(Deserialize, Debug, Clone, Serialize)] pub struct Config { pub tracing: ::Settings, @@ -256,7 +247,6 @@ pub fn update_mix( let MixArgs { mix_addr, mix_node_key, - mix_membership, mix_peering_degree, mix_num_mix_layers, } = mix_args; @@ -270,10 +260,6 @@ pub fn update_mix( mix.backend.node_key = SecretKey::try_from_bytes(key_bytes.as_mut_slice())?; } - if let Some(membership) = mix_membership { - mix.backend.membership = membership; - } - if let Some(peering_degree) = mix_peering_degree { mix.backend.peering_degree = peering_degree; } diff --git a/nodes/nomos-node/src/lib.rs b/nodes/nomos-node/src/lib.rs index 0856688e..ea3c8031 100644 --- a/nodes/nomos-node/src/lib.rs +++ b/nodes/nomos-node/src/lib.rs @@ -7,7 +7,7 @@ mod tx; use api::backend::AxumBackend; use bytes::Bytes; use color_eyre::eyre::Result; -pub use config::{Config, CryptarchiaArgs, HttpArgs, LogArgs, MetricsArgs, NetworkArgs}; +pub use config::{Config, CryptarchiaArgs, HttpArgs, LogArgs, NetworkArgs}; use kzgrs_backend::common::blob::DaBlob; pub use kzgrs_backend::dispersal::BlobInfo; use nomos_api::ApiService; @@ -35,9 +35,6 @@ pub use nomos_mempool::network::adapters::libp2p::{ }; pub use nomos_mempool::TxMempoolSettings; use nomos_mempool::{backend::mockpool::MockPool, TxMempoolService}; -pub use nomos_metrics::NomosRegistry; -#[cfg(feature = "metrics")] -pub use nomos_metrics::{Metrics, MetricsSettings}; pub use nomos_mix_service::backends::libp2p::Libp2pMixBackend as MixBackend; pub use nomos_mix_service::network::libp2p::Libp2pAdapter as MixNetworkAdapter; pub use nomos_mix_service::MixService; @@ -173,8 +170,6 @@ pub struct Nomos { cryptarchia: ServiceHandle, http: ServiceHandle, storage: ServiceHandle>>, - #[cfg(feature = "metrics")] - metrics: ServiceHandle, system_sig: ServiceHandle, } diff --git a/nodes/nomos-node/src/main.rs b/nodes/nomos-node/src/main.rs index 1f54d228..6b44d9d7 100644 --- a/nodes/nomos-node/src/main.rs +++ b/nodes/nomos-node/src/main.rs @@ -1,8 +1,6 @@ use kzgrs_backend::dispersal::BlobInfo; -#[cfg(feature = "metrics")] -use nomos_metrics::MetricsSettings; use nomos_node::{ - config::MixArgs, Config, CryptarchiaArgs, HttpArgs, LogArgs, MetricsArgs, NetworkArgs, Nomos, + config::MixArgs, Config, CryptarchiaArgs, HttpArgs, LogArgs, NetworkArgs, Nomos, NomosServiceSettings, Tx, }; @@ -35,9 +33,6 @@ struct Args { http_args: HttpArgs, #[clap(flatten)] cryptarchia_args: CryptarchiaArgs, - /// Overrides metrics config. - #[clap(flatten)] - metrics_args: MetricsArgs, } fn main() -> Result<()> { @@ -48,7 +43,6 @@ fn main() -> Result<()> { network_args, mix_args, cryptarchia_args, - metrics_args, } = Args::parse(); let config = serde_yaml::from_reader::<_, Config>(std::fs::File::open(config)?)? .update_from_args( @@ -59,13 +53,6 @@ fn main() -> Result<()> { cryptarchia_args, )?; - let registry = cfg!(feature = "metrics") - .then(|| { - metrics_args - .with_metrics - .then(nomos_metrics::NomosRegistry::default) - }) - .flatten(); #[cfg(debug_assertions)] let debug_span = { let debug_id = Uuid::new_v4(); @@ -86,7 +73,6 @@ fn main() -> Result<()> { topic: String::from(nomos_node::CL_TOPIC), id: ::hash, }, - registry: registry.clone(), }, da_mempool: nomos_mempool::DaMempoolSettings { backend: (), @@ -94,15 +80,12 @@ fn main() -> Result<()> { topic: String::from(nomos_node::DA_TOPIC), id: ::blob_id, }, - registry: registry.clone(), }, da_network: config.da_network, da_indexer: config.da_indexer, da_sampling: config.da_sampling, da_verifier: config.da_verifier, cryptarchia: config.cryptarchia, - #[cfg(feature = "metrics")] - metrics: MetricsSettings { registry }, storage: config.storage, system_sig: (), }, diff --git a/nomos-da/kzgrs/Cargo.toml b/nomos-da/kzgrs/Cargo.toml index 3c004123..83e07436 100644 --- a/nomos-da/kzgrs/Cargo.toml +++ b/nomos-da/kzgrs/Cargo.toml @@ -39,6 +39,11 @@ harness = false name = "fk20" harness = false + +[[bench]] +name = "rs" +harness = false + [features] default = ["single"] single = [] diff --git a/nomos-da/kzgrs/benches/rs.rs b/nomos-da/kzgrs/benches/rs.rs new file mode 100644 index 00000000..3ed0ff81 --- /dev/null +++ b/nomos-da/kzgrs/benches/rs.rs @@ -0,0 +1,54 @@ +use ark_bls12_381::Fr; +use ark_poly::{EvaluationDomain, GeneralEvaluationDomain}; +use kzgrs::common::bytes_to_polynomial; +use kzgrs::rs::*; + +use divan::counter::BytesCount; +use divan::{black_box, Bencher}; +use rand::{thread_rng, RngCore}; + +fn main() { + divan::main() +} + +#[divan::bench(args = [3224])] +fn rs_encode(bencher: Bencher, size: usize) { + bencher + .with_inputs(move || { + let mut buffer = vec![0u8; size]; + thread_rng().fill_bytes(&mut buffer); + buffer + }) + .input_counter(move |buff| BytesCount::new(buff.len())) + .bench_refs(|buff| { + let domain = GeneralEvaluationDomain::::new(size).unwrap(); + let (_, poly) = bytes_to_polynomial::<31>(buff, domain).unwrap(); + let domain = GeneralEvaluationDomain::::new(size * 2).unwrap(); + black_box(move || encode(&poly, domain)) + }) +} + +#[divan::bench(args = [3224], sample_size = 10, sample_count = 100)] +fn rs_decode(bencher: Bencher, size: usize) { + bencher + .with_inputs(move || { + let mut buffer = vec![0u8; size]; + thread_rng().fill_bytes(&mut buffer); + let domain = GeneralEvaluationDomain::::new(size).unwrap(); + let (_, poly) = bytes_to_polynomial::<31>(&buffer, domain).unwrap(); + let domain = GeneralEvaluationDomain::::new(size * 2).unwrap(); + let encoded = encode(&poly, domain); + encoded + }) + .input_counter(move |_buff| BytesCount::new(size * 31)) + .bench_values(|buff| { + black_box(move || { + let domain = GeneralEvaluationDomain::::new(size).unwrap(); + let missing_data: Vec<_> = std::iter::repeat(None) + .take(size) + .chain(buff.evals[size..].iter().copied().map(Some)) + .collect(); + decode(size, &missing_data, domain) + }) + }) +} diff --git a/nomos-mix/core/Cargo.toml b/nomos-mix/core/Cargo.toml index fe6901cd..d55a8c19 100644 --- a/nomos-mix/core/Cargo.toml +++ b/nomos-mix/core/Cargo.toml @@ -4,6 +4,7 @@ version = "0.1.0" edition = "2021" [dependencies] +blake2 = "0.10" cached = "0.53" tokio = { version = "1", features = ["time", "sync", "macros"] } tokio-stream = "0.1" @@ -12,8 +13,10 @@ rand = "0.8" serde = { version = "1.0", features = ["derive"] } nomos-mix-message = { path = "../message" } futures = "0.3" -rand_chacha = "0.3" +multiaddr = "0.18" +x25519-dalek = { version = "2", features = ["getrandom", "static_secrets"] } [dev-dependencies] tokio = { version = "1", features = ["rt-multi-thread"] } +rand_chacha = "0.3" diff --git a/nomos-mix/core/src/cover_traffic.rs b/nomos-mix/core/src/cover_traffic.rs new file mode 100644 index 00000000..8042ed2b --- /dev/null +++ b/nomos-mix/core/src/cover_traffic.rs @@ -0,0 +1,130 @@ +use blake2::digest::consts::U4; +use blake2::Digest; +use futures::{Stream, StreamExt}; +use nomos_mix_message::MixMessage; +use serde::Deserialize; +use std::collections::HashSet; +use std::hash::Hash; +use std::marker::PhantomData; +use std::ops::{DerefMut, Div}; +use std::pin::Pin; +use std::task::{Context, Poll}; + +#[derive(Copy, Clone, Deserialize)] +pub struct CoverTrafficSettings { + pub node_id: [u8; 32], + pub number_of_hops: usize, + pub slots_per_epoch: usize, + pub network_size: usize, +} + +pub struct CoverTraffic { + winning_probability: f64, + settings: CoverTrafficSettings, + epoch_stream: EpochStream, + slot_stream: SlotStream, + selected_slots: HashSet, + _message: PhantomData, +} + +impl CoverTraffic +where + EpochStream: Stream + Send + Sync + Unpin, + SlotStream: Stream + Send + Sync + Unpin, +{ + pub fn new( + settings: CoverTrafficSettings, + epoch_stream: EpochStream, + slot_stream: SlotStream, + ) -> Self { + let winning_probability = winning_probability(settings.number_of_hops); + CoverTraffic { + winning_probability, + settings, + epoch_stream, + slot_stream, + selected_slots: Default::default(), + _message: Default::default(), + } + } +} + +impl Stream for CoverTraffic +where + EpochStream: Stream + Send + Sync + Unpin, + SlotStream: Stream + Send + Sync + Unpin, + Message: MixMessage + Send + Sync + Unpin, +{ + type Item = Vec; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let Self { + winning_probability, + settings, + epoch_stream, + slot_stream, + selected_slots, + .. + } = self.deref_mut(); + if let Poll::Ready(Some(epoch)) = epoch_stream.poll_next_unpin(cx) { + *selected_slots = select_slot( + settings.node_id, + epoch, + settings.network_size, + settings.slots_per_epoch, + *winning_probability, + ); + } + if let Poll::Ready(Some(slot)) = slot_stream.poll_next_unpin(cx) { + if selected_slots.contains(&(slot as u32)) { + return Poll::Ready(Some(vec![])); + } + } + Poll::Pending + } +} + +fn generate_ticket>(node_id: Id, r: usize, slot: usize) -> u32 { + let mut hasher = blake2::Blake2s::::new(); + hasher.update(node_id); + hasher.update(r.to_be_bytes()); + hasher.update(slot.to_be_bytes()); + let hash: [u8; std::mem::size_of::()] = hasher.finalize()[..].to_vec().try_into().unwrap(); + u32::from_be_bytes(hash) +} + +fn select_slot + Copy>( + node_id: Id, + r: usize, + network_size: usize, + slots_per_epoch: usize, + winning_probability: f64, +) -> HashSet { + let i = (slots_per_epoch as f64).div(network_size as f64) * winning_probability; + let size = i.ceil() as usize; + let mut w = HashSet::new(); + let mut i = 0; + while w.len() != size { + w.insert(generate_ticket(node_id, r, i) % slots_per_epoch as u32); + i += 1; + } + w +} + +fn winning_probability(number_of_hops: usize) -> f64 { + 1.0 / number_of_hops as f64 +} + +#[cfg(test)] +mod tests { + use crate::cover_traffic::{generate_ticket, select_slot, winning_probability}; + + #[test] + fn test_ticket() { + generate_ticket(10u32.to_be_bytes(), 1123, 0); + for i in 0..1u32 { + let slots = select_slot(i.to_be_bytes(), 1234, 100, 21600, winning_probability(1)); + println!("slots = {slots:?}"); + } + } +} diff --git a/nomos-mix/core/src/lib.rs b/nomos-mix/core/src/lib.rs index 0326319c..53f33bf1 100644 --- a/nomos-mix/core/src/lib.rs +++ b/nomos-mix/core/src/lib.rs @@ -1,3 +1,5 @@ +pub mod cover_traffic; +pub mod membership; pub mod message_blend; pub mod persistent_transmission; diff --git a/nomos-mix/core/src/membership.rs b/nomos-mix/core/src/membership.rs new file mode 100644 index 00000000..f1bda2ca --- /dev/null +++ b/nomos-mix/core/src/membership.rs @@ -0,0 +1,58 @@ +use multiaddr::Multiaddr; +use nomos_mix_message::MixMessage; +use rand::{seq::SliceRandom, Rng}; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Debug)] +pub struct Membership +where + M: MixMessage, +{ + remote_nodes: Vec>, + local_node: Node, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct Node { + pub address: Multiaddr, + pub public_key: K, +} + +impl Membership +where + M: MixMessage, + M::PublicKey: PartialEq, +{ + pub fn new(nodes: Vec>, local_public_key: M::PublicKey) -> Self { + let mut remote_nodes = Vec::with_capacity(nodes.len() - 1); + let mut local_node = None; + nodes.into_iter().for_each(|node| { + if node.public_key == local_public_key { + local_node = Some(node); + } else { + remote_nodes.push(node); + } + }); + + Self { + remote_nodes, + local_node: local_node.expect("Local node not found"), + } + } + + pub fn choose_remote_nodes( + &self, + rng: &mut R, + amount: usize, + ) -> Vec<&Node> { + self.remote_nodes.choose_multiple(rng, amount).collect() + } + + pub fn local_node(&self) -> &Node { + &self.local_node + } + + pub fn size(&self) -> usize { + self.remote_nodes.len() + 1 + } +} diff --git a/nomos-mix/core/src/message_blend/crypto.rs b/nomos-mix/core/src/message_blend/crypto.rs index 910b28e0..62a8a865 100644 --- a/nomos-mix/core/src/message_blend/crypto.rs +++ b/nomos-mix/core/src/message_blend/crypto.rs @@ -1,33 +1,56 @@ -use nomos_mix_message::{new_message, unwrap_message}; +use crate::membership::Membership; +use nomos_mix_message::MixMessage; +use rand::RngCore; use serde::{Deserialize, Serialize}; /// [`CryptographicProcessor`] is responsible for wrapping and unwrapping messages /// for the message indistinguishability. -#[derive(Clone, Copy, Debug)] -pub struct CryptographicProcessor { - settings: CryptographicProcessorSettings, +pub struct CryptographicProcessor +where + M: MixMessage, +{ + settings: CryptographicProcessorSettings, + membership: Membership, + rng: R, } -#[derive(Clone, Copy, Debug, Serialize, Deserialize)] -pub struct CryptographicProcessorSettings { +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct CryptographicProcessorSettings { + pub private_key: K, pub num_mix_layers: usize, } -impl CryptographicProcessor { - pub fn new(settings: CryptographicProcessorSettings) -> Self { - Self { settings } +impl CryptographicProcessor +where + R: RngCore, + M: MixMessage, + M::PublicKey: Clone + PartialEq, +{ + pub fn new( + settings: CryptographicProcessorSettings, + membership: Membership, + rng: R, + ) -> Self { + Self { + settings, + membership, + rng, + } } - pub fn wrap_message(&self, message: &[u8]) -> Result, nomos_mix_message::Error> { + pub fn wrap_message(&mut self, message: &[u8]) -> Result, M::Error> { // TODO: Use the actual Sphinx encoding instead of mock. - // TODO: Select `num_mix_layers` random nodes from the membership. - new_message(message, self.settings.num_mix_layers.try_into().unwrap()) + let public_keys = self + .membership + .choose_remote_nodes(&mut self.rng, self.settings.num_mix_layers) + .iter() + .map(|node| node.public_key.clone()) + .collect::>(); + + M::build_message(message, &public_keys) } - pub fn unwrap_message( - &self, - message: &[u8], - ) -> Result<(Vec, bool), nomos_mix_message::Error> { - unwrap_message(message) + pub fn unwrap_message(&self, message: &[u8]) -> Result<(Vec, bool), M::Error> { + M::unwrap_message(message, &self.settings.private_key) } } diff --git a/nomos-mix/core/src/message_blend/mod.rs b/nomos-mix/core/src/message_blend/mod.rs index 180a583f..2cc988b6 100644 --- a/nomos-mix/core/src/message_blend/mod.rs +++ b/nomos-mix/core/src/message_blend/mod.rs @@ -2,51 +2,82 @@ pub mod crypto; pub mod temporal; pub use crypto::CryptographicProcessorSettings; -use futures::stream::BoxStream; use futures::{Stream, StreamExt}; +use rand::RngCore; +use std::fmt::Debug; +use std::marker::PhantomData; use std::pin::Pin; use std::task::{Context, Poll}; -pub use temporal::TemporalProcessorSettings; +pub use temporal::TemporalSchedulerSettings; +use crate::membership::Membership; use crate::message_blend::crypto::CryptographicProcessor; use crate::message_blend::temporal::TemporalProcessorExt; use crate::MixOutgoingMessage; +use nomos_mix_message::MixMessage; +use serde::de::DeserializeOwned; use serde::{Deserialize, Serialize}; use tokio::sync::mpsc; use tokio::sync::mpsc::UnboundedSender; use tokio_stream::wrappers::UnboundedReceiverStream; #[derive(Clone, Debug, Serialize, Deserialize)] -pub struct MessageBlendSettings { - pub cryptographic_processor: CryptographicProcessorSettings, - pub temporal_processor: TemporalProcessorSettings, +pub struct MessageBlendSettings +where + M: MixMessage, + M::PrivateKey: Serialize + DeserializeOwned, +{ + pub cryptographic_processor: CryptographicProcessorSettings, + pub temporal_processor: TemporalSchedulerSettings, } /// [`MessageBlendStream`] handles the entire mixing tiers process /// - Unwraps incoming messages received from network using [`CryptographicProcessor`] /// - Pushes unwrapped messages to [`TemporalProcessor`] -pub struct MessageBlendStream { +pub struct MessageBlendStream +where + M: MixMessage, +{ input_stream: S, - output_stream: BoxStream<'static, MixOutgoingMessage>, + output_stream: Pin + Send + Sync + 'static>>, temporal_sender: UnboundedSender, - cryptographic_processor: CryptographicProcessor, + cryptographic_processor: CryptographicProcessor, + _rng: PhantomData, + _scheduler: PhantomData, } -impl MessageBlendStream +impl MessageBlendStream where S: Stream>, + Rng: RngCore + Unpin + Send + 'static, + M: MixMessage, + M::PrivateKey: Serialize + DeserializeOwned, + M::PublicKey: Clone + PartialEq, + M::Error: Debug, + Scheduler: Stream + Unpin + Send + Sync + 'static, { - pub fn new(input_stream: S, settings: MessageBlendSettings) -> Self { - let cryptographic_processor = CryptographicProcessor::new(settings.cryptographic_processor); + pub fn new( + input_stream: S, + settings: MessageBlendSettings, + membership: Membership, + scheduler: Scheduler, + cryptographic_processor_rng: Rng, + ) -> Self { + let cryptographic_processor = CryptographicProcessor::new( + settings.cryptographic_processor, + membership, + cryptographic_processor_rng, + ); let (temporal_sender, temporal_receiver) = mpsc::unbounded_channel(); - let output_stream = UnboundedReceiverStream::new(temporal_receiver) - .temporal_stream(settings.temporal_processor) - .boxed(); + let output_stream = + Box::pin(UnboundedReceiverStream::new(temporal_receiver).temporal_stream(scheduler)); Self { input_stream, output_stream, temporal_sender, cryptographic_processor, + _rng: Default::default(), + _scheduler: Default::default(), } } @@ -62,9 +93,6 @@ where tracing::error!("Failed to send message to the outbound channel: {e:?}"); } } - Err(nomos_mix_message::Error::MsgUnwrapNotAllowed) => { - tracing::debug!("Message cannot be unwrapped by this node"); - } Err(e) => { tracing::error!("Failed to unwrap message: {:?}", e); } @@ -72,9 +100,15 @@ where } } -impl Stream for MessageBlendStream +impl Stream for MessageBlendStream where S: Stream> + Unpin, + Rng: RngCore + Unpin + Send + 'static, + M: MixMessage + Unpin, + M::PrivateKey: Serialize + DeserializeOwned + Unpin, + M::PublicKey: Clone + PartialEq + Unpin, + M::Error: Debug, + Scheduler: Stream + Unpin + Send + Sync + 'static, { type Item = MixOutgoingMessage; @@ -86,13 +120,43 @@ where } } -pub trait MessageBlendExt: Stream> { - fn blend(self, message_blend_settings: MessageBlendSettings) -> MessageBlendStream +pub trait MessageBlendExt: Stream> +where + Rng: RngCore + Send + Unpin + 'static, + M: MixMessage, + M::PrivateKey: Serialize + DeserializeOwned, + M::PublicKey: Clone + PartialEq, + M::Error: Debug, + Scheduler: Stream + Unpin + Send + Sync + 'static, +{ + fn blend( + self, + message_blend_settings: MessageBlendSettings, + membership: Membership, + scheduler: Scheduler, + cryptographic_processor_rng: Rng, + ) -> MessageBlendStream where Self: Sized + Unpin, { - MessageBlendStream::new(self, message_blend_settings) + MessageBlendStream::new( + self, + message_blend_settings, + membership, + scheduler, + cryptographic_processor_rng, + ) } } -impl MessageBlendExt for T where T: Stream> {} +impl MessageBlendExt for T +where + T: Stream>, + Rng: RngCore + Unpin + Send + 'static, + M: MixMessage, + M::PrivateKey: Clone + Serialize + DeserializeOwned + PartialEq, + M::PublicKey: Clone + Serialize + DeserializeOwned + PartialEq, + M::Error: Debug, + S: Stream + Unpin + Send + Sync + 'static, +{ +} diff --git a/nomos-mix/core/src/message_blend/temporal.rs b/nomos-mix/core/src/message_blend/temporal.rs index 586254b7..f42bc97a 100644 --- a/nomos-mix/core/src/message_blend/temporal.rs +++ b/nomos-mix/core/src/message_blend/temporal.rs @@ -6,38 +6,29 @@ use std::{ }; use futures::{Future, Stream, StreamExt}; -use rand::Rng; +use rand::{Rng, RngCore}; use serde::{Deserialize, Serialize}; use tokio::time; -/// [`TemporalProcessor`] delays messages randomly to hide timing correlation -/// between incoming and outgoing messages from a node. -/// -/// See the [`Stream`] implementation below for more details on how it works. -pub(crate) struct TemporalProcessor { - settings: TemporalProcessorSettings, - // All scheduled messages - queue: VecDeque, +pub struct TemporalScheduler { + settings: TemporalSchedulerSettings, /// Interval in seconds for running the lottery to release a message lottery_interval: time::Interval, /// To wait a few seconds after running the lottery before releasing the message. /// The lottery returns how long to wait before releasing the message. release_timer: Option>>, + /// local lottery rng + rng: Rng, } -#[derive(Clone, Copy, Debug, Serialize, Deserialize)] -pub struct TemporalProcessorSettings { - pub max_delay_seconds: u64, -} - -impl TemporalProcessor { - pub(crate) fn new(settings: TemporalProcessorSettings) -> Self { +impl TemporalScheduler { + pub fn new(settings: TemporalSchedulerSettings, rng: Rng) -> Self { let lottery_interval = Self::lottery_interval(settings.max_delay_seconds); Self { settings, - queue: VecDeque::new(), lottery_interval, release_timer: None, + rng, } } @@ -55,25 +46,25 @@ impl TemporalProcessor { fn lottery_interval_seconds(max_delay_seconds: u64) -> u64 { max_delay_seconds / 2 } +} +impl TemporalScheduler +where + Rng: RngCore, +{ /// Run the lottery to determine the delay before releasing a message. /// The delay is in [0, `lottery_interval_seconds`). - fn run_lottery(&self) -> u64 { + fn run_lottery(&mut self) -> u64 { let interval = Self::lottery_interval_seconds(self.settings.max_delay_seconds); - rand::thread_rng().gen_range(0..interval) - } - - /// Schedule a message to be released later. - pub(crate) fn push_message(&mut self, message: M) { - self.queue.push_back(message); + self.rng.gen_range(0..interval) } } -impl Stream for TemporalProcessor +impl Stream for TemporalScheduler where - M: Unpin, + Rng: RngCore + Unpin, { - type Item = M; + type Item = (); fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { // Check whether it's time to run a new lottery to determine the delay. @@ -87,31 +78,74 @@ where if let Some(timer) = self.release_timer.as_mut() { if timer.as_mut().poll(cx).is_ready() { self.release_timer.take(); // Reset timer after it's done - if let Some(msg) = self.queue.pop_front() { - // Release the 1st message in the queue if it exists. - return Poll::Ready(Some(msg)); - } + return Poll::Ready(Some(())); } } - Poll::Pending } } -pub struct TemporalStream -where - S: Stream, -{ - processor: TemporalProcessor, - wrapped_stream: S, +/// [`TemporalProcessor`] delays messages randomly to hide timing correlation +/// between incoming and outgoing messages from a node. +/// +/// See the [`Stream`] implementation below for more details on how it works. +pub struct TemporalProcessor { + // All scheduled messages + queue: VecDeque, + scheduler: S, } -impl Stream for TemporalStream +#[derive(Clone, Copy, Debug, Serialize, Deserialize)] +pub struct TemporalSchedulerSettings { + pub max_delay_seconds: u64, +} + +impl TemporalProcessor { + pub(crate) fn new(scheduler: S) -> Self { + Self { + queue: VecDeque::new(), + scheduler, + } + } + /// Schedule a message to be released later. + pub(crate) fn push_message(&mut self, message: M) { + self.queue.push_back(message); + } +} + +impl Stream for TemporalProcessor where - S: Stream + Unpin, - S::Item: Unpin, + M: Unpin, + S: Stream + Unpin, { - type Item = S::Item; + type Item = M; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + if self.scheduler.poll_next_unpin(cx).is_ready() { + if let Some(msg) = self.queue.pop_front() { + return Poll::Ready(Some(msg)); + } + }; + Poll::Pending + } +} + +pub struct TemporalStream +where + WrappedStream: Stream, + Scheduler: Stream, +{ + processor: TemporalProcessor, + wrapped_stream: WrappedStream, +} + +impl Stream for TemporalStream +where + WrappedStream: Stream + Unpin, + WrappedStream::Item: Unpin, + Scheduler: Stream + Unpin, +{ + type Item = WrappedStream::Item; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { if let Poll::Ready(Some(item)) = self.wrapped_stream.poll_next_unpin(cx) { @@ -120,16 +154,24 @@ where self.processor.poll_next_unpin(cx) } } -pub trait TemporalProcessorExt: Stream { - fn temporal_stream(self, settings: TemporalProcessorSettings) -> TemporalStream +pub trait TemporalProcessorExt: Stream +where + Scheduler: Stream, +{ + fn temporal_stream(self, scheduler: Scheduler) -> TemporalStream where Self: Sized, { TemporalStream { - processor: TemporalProcessor::new(settings), + processor: TemporalProcessor::::new(scheduler), wrapped_stream: self, } } } -impl TemporalProcessorExt for T where T: Stream {} +impl TemporalProcessorExt for T +where + T: Stream, + S: Stream, +{ +} diff --git a/nomos-mix/core/src/persistent_transmission.rs b/nomos-mix/core/src/persistent_transmission.rs index a0ef0520..72531a49 100644 --- a/nomos-mix/core/src/persistent_transmission.rs +++ b/nomos-mix/core/src/persistent_transmission.rs @@ -1,13 +1,11 @@ -use futures::Stream; -use nomos_mix_message::DROP_MESSAGE; -use rand::{distributions::Uniform, prelude::Distribution, Rng, SeedableRng}; -use rand_chacha::ChaCha12Rng; +use futures::{Stream, StreamExt}; +use nomos_mix_message::MixMessage; +use rand::{distributions::Uniform, prelude::Distribution, Rng, RngCore}; +use serde::de::DeserializeOwned; use serde::{Deserialize, Serialize}; +use std::marker::PhantomData; use std::pin::{pin, Pin}; use std::task::{Context, Poll}; -use std::time::Duration; -use tokio::time; -use tokio::time::Interval; #[derive(Clone, Copy, Debug, Serialize, Deserialize)] pub struct PersistentTransmissionSettings { @@ -27,78 +25,97 @@ impl Default for PersistentTransmissionSettings { } /// Transmit scheduled messages with a persistent rate as a stream. -pub struct PersistentTransmissionStream +pub struct PersistentTransmissionStream where S: Stream, + Rng: RngCore, { - interval: Interval, - coin: Coin, + coin: Coin, stream: S, + scheduler: Scheduler, + _mix_message: PhantomData, } -impl PersistentTransmissionStream +impl PersistentTransmissionStream where S: Stream, + Rng: RngCore, + M: MixMessage, + Scheduler: Stream, { pub fn new( settings: PersistentTransmissionSettings, stream: S, - ) -> PersistentTransmissionStream { - let interval = time::interval(Duration::from_secs_f64( - 1.0 / settings.max_emission_frequency, - )); - let coin = Coin::<_>::new( - ChaCha12Rng::from_entropy(), - settings.drop_message_probability, - ) - .unwrap(); + scheduler: Scheduler, + rng: Rng, + ) -> PersistentTransmissionStream { + let coin = Coin::::new(rng, settings.drop_message_probability).unwrap(); Self { - interval, coin, stream, + scheduler, + _mix_message: Default::default(), } } } -impl Stream for PersistentTransmissionStream +impl Stream for PersistentTransmissionStream where S: Stream> + Unpin, + Rng: RngCore + Unpin, + M: MixMessage + Unpin, + Scheduler: Stream + Unpin, { type Item = Vec; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let Self { - ref mut interval, + ref mut scheduler, ref mut stream, ref mut coin, .. } = self.get_mut(); - if pin!(interval).poll_tick(cx).is_pending() { + if pin!(scheduler).poll_next_unpin(cx).is_pending() { return Poll::Pending; } if let Poll::Ready(Some(item)) = pin!(stream).poll_next(cx) { Poll::Ready(Some(item)) } else if coin.flip() { - Poll::Ready(Some(DROP_MESSAGE.to_vec())) + Poll::Ready(Some(M::DROP_MESSAGE.to_vec())) } else { Poll::Pending } } } -pub trait PersistentTransmissionExt: Stream { +pub trait PersistentTransmissionExt: Stream +where + Rng: RngCore, + M: MixMessage, + Scheduler: Stream, +{ fn persistent_transmission( self, settings: PersistentTransmissionSettings, - ) -> PersistentTransmissionStream + rng: Rng, + scheduler: Scheduler, + ) -> PersistentTransmissionStream where Self: Sized + Unpin, { - PersistentTransmissionStream::new(settings, self) + PersistentTransmissionStream::new(settings, self, scheduler, rng) } } -impl PersistentTransmissionExt for S where S: Stream {} +impl PersistentTransmissionExt for S +where + S: Stream, + Rng: RngCore, + M: MixMessage, + M::PublicKey: Clone + Serialize + DeserializeOwned, + Scheduler: Stream, +{ +} struct Coin { rng: R, @@ -133,7 +150,13 @@ enum CoinError { mod tests { use super::*; use futures::StreamExt; + use nomos_mix_message::mock::MockMixMessage; + use rand::SeedableRng; + use rand_chacha::ChaCha8Rng; + use std::time::Duration; use tokio::sync::mpsc; + use tokio::time; + use tokio_stream::wrappers::IntervalStream; macro_rules! assert_interval { ($last_time:expr, $lower_bound:expr, $upper_bound:expr) => { @@ -173,7 +196,16 @@ mod tests { let lower_bound = expected_emission_interval - torelance; let upper_bound = expected_emission_interval + torelance; // prepare stream - let mut persistent_transmission_stream = stream.persistent_transmission(settings); + let mut persistent_transmission_stream: PersistentTransmissionStream< + _, + _, + MockMixMessage, + _, + > = stream.persistent_transmission( + settings, + ChaCha8Rng::from_entropy(), + IntervalStream::new(time::interval(expected_emission_interval)).map(|_| ()), + ); // Messages must be scheduled in non-blocking manner. schedule_sender.send(vec![1]).unwrap(); schedule_sender.send(vec![2]).unwrap(); @@ -198,16 +230,14 @@ mod tests { ); assert_interval!(&mut last_time, lower_bound, upper_bound); - assert_eq!( - persistent_transmission_stream.next().await.unwrap(), - DROP_MESSAGE.to_vec() - ); + assert!(MockMixMessage::is_drop_message( + &persistent_transmission_stream.next().await.unwrap() + )); assert_interval!(&mut last_time, lower_bound, upper_bound); - assert_eq!( - persistent_transmission_stream.next().await.unwrap(), - DROP_MESSAGE.to_vec() - ); + assert!(MockMixMessage::is_drop_message( + &persistent_transmission_stream.next().await.unwrap() + )); assert_interval!(&mut last_time, lower_bound, upper_bound); // Schedule a new message and check if it is emitted at the next interval diff --git a/nomos-mix/message/Cargo.toml b/nomos-mix/message/Cargo.toml index ef339926..3a4c33c1 100644 --- a/nomos-mix/message/Cargo.toml +++ b/nomos-mix/message/Cargo.toml @@ -4,7 +4,8 @@ version = "0.1.0" edition = "2021" [dependencies] -serde = { version = "1.0", features = ["derive"] } +itertools = "0.13" +rand_chacha = "0.3" sha2 = "0.10" sphinx-packet = "0.2" thiserror = "1.0.65" diff --git a/nomos-mix/message/src/lib.rs b/nomos-mix/message/src/lib.rs index cd982f94..cc8e4c27 100644 --- a/nomos-mix/message/src/lib.rs +++ b/nomos-mix/message/src/lib.rs @@ -1,64 +1,28 @@ -mod error; -pub mod packet; +pub mod mock; +pub mod sphinx; -pub use error::Error; +pub trait MixMessage { + type PublicKey; + type PrivateKey; + type Error; + const DROP_MESSAGE: &'static [u8]; -use sha2::{Digest, Sha256}; - -pub const MSG_SIZE: usize = 2048; -pub const DROP_MESSAGE: [u8; MSG_SIZE] = [0; MSG_SIZE]; - -// TODO: Remove all the mock below once the actual implementation is integrated to the system. -// -/// A mock implementation of the Sphinx encoding. -/// -/// The length of the encoded message is fixed to [`MSG_SIZE`] bytes. -/// The first byte of the encoded message is the number of remaining layers to be unwrapped. -/// The remaining bytes are the payload that is zero-padded to the end. -pub fn new_message(payload: &[u8], num_layers: u8) -> Result, Error> { - if payload.len() > MSG_SIZE - 1 { - return Err(Error::PayloadTooLarge); - } - - let mut message: Vec = Vec::with_capacity(MSG_SIZE); - message.push(num_layers); - message.extend(payload); - message.extend(std::iter::repeat(0).take(MSG_SIZE - message.len())); - Ok(message) -} - -/// SHA-256 hash of the message -pub fn message_id(message: &[u8]) -> Vec { - let mut hasher = Sha256::new(); - hasher.update(message); - hasher.finalize().to_vec() -} - -/// Unwrap the message one layer. -/// -/// This function returns the unwrapped message and a boolean indicating whether the message was fully unwrapped. -/// (False if the message still has layers to be unwrapped, true otherwise) -/// -/// If the input message was already fully unwrapped, or if ititss format is invalid, -/// this function returns `[Error::InvalidMixMessage]`. -pub fn unwrap_message(message: &[u8]) -> Result<(Vec, bool), Error> { - if message.is_empty() { - return Err(Error::InvalidMixMessage); - } - - match message[0] { - 0 => Err(Error::InvalidMixMessage), - 1 => Ok((message[1..].to_vec(), true)), - n => { - let mut unwrapped: Vec = Vec::with_capacity(message.len()); - unwrapped.push(n - 1); - unwrapped.extend(&message[1..]); - Ok((unwrapped, false)) - } + fn build_message( + payload: &[u8], + public_keys: &[Self::PublicKey], + ) -> Result, Self::Error>; + /// Unwrap the message one layer. + /// + /// This function returns the unwrapped message and a boolean indicating whether the message was fully unwrapped. + /// (False if the message still has layers to be unwrapped, true otherwise) + /// + /// If the input message was already fully unwrapped, or if its format is invalid, + /// this function returns `[Error::InvalidMixMessage]`. + fn unwrap_message( + message: &[u8], + private_key: &Self::PrivateKey, + ) -> Result<(Vec, bool), Self::Error>; + fn is_drop_message(message: &[u8]) -> bool { + message == Self::DROP_MESSAGE } } - -/// Check if the message is a drop message. -pub fn is_drop_message(message: &[u8]) -> bool { - message == DROP_MESSAGE -} diff --git a/nomos-mix/message/src/error.rs b/nomos-mix/message/src/mock/error.rs similarity index 69% rename from nomos-mix/message/src/error.rs rename to nomos-mix/message/src/mock/error.rs index 92f3d345..bc54d0fc 100644 --- a/nomos-mix/message/src/error.rs +++ b/nomos-mix/message/src/mock/error.rs @@ -4,10 +4,8 @@ pub enum Error { InvalidMixMessage, #[error("Payload is too large")] PayloadTooLarge, - #[error("Too many recipients")] - TooManyRecipients, - #[error("Sphinx packet error: {0}")] - SphinxPacketError(#[from] sphinx_packet::Error), + #[error("Invalid number of layers")] + InvalidNumberOfLayers, #[error("Unwrapping a message is not allowed to this node")] /// e.g. the message cannot be unwrapped using the private key provided MsgUnwrapNotAllowed, diff --git a/nomos-mix/message/src/mock/mod.rs b/nomos-mix/message/src/mock/mod.rs new file mode 100644 index 00000000..fcb8cf6f --- /dev/null +++ b/nomos-mix/message/src/mock/mod.rs @@ -0,0 +1,124 @@ +pub mod error; + +use error::Error; + +use crate::MixMessage; +// TODO: Remove all the mock below once the actual implementation is integrated to the system. +// +/// A mock implementation of the Sphinx encoding. + +const NODE_ID_SIZE: usize = 32; + +// TODO: Move MAX_PAYLOAD_SIZE and MAX_LAYERS to the upper layer (service layer). +const MAX_PAYLOAD_SIZE: usize = 2048; +const PAYLOAD_PADDING_SEPARATOR: u8 = 0x01; +const PAYLOAD_PADDING_SEPARATOR_SIZE: usize = 1; +const MAX_LAYERS: usize = 5; +pub const MESSAGE_SIZE: usize = + NODE_ID_SIZE * MAX_LAYERS + MAX_PAYLOAD_SIZE + PAYLOAD_PADDING_SEPARATOR_SIZE; + +#[derive(Clone, Debug)] +pub struct MockMixMessage; + +impl MixMessage for MockMixMessage { + type PublicKey = [u8; NODE_ID_SIZE]; + type PrivateKey = [u8; NODE_ID_SIZE]; + type Error = Error; + const DROP_MESSAGE: &'static [u8] = &[0; MESSAGE_SIZE]; + + /// The length of the encoded message is fixed to [`MESSAGE_SIZE`] bytes. + /// The [`MAX_LAYERS`] number of [`NodeId`]s are concatenated in front of the payload. + /// The payload is zero-padded to the end. + /// + fn build_message( + payload: &[u8], + public_keys: &[Self::PublicKey], + ) -> Result, Self::Error> { + // In this mock, we don't encrypt anything. So, we use public key as just a node ID. + let node_ids = public_keys; + if node_ids.is_empty() || node_ids.len() > MAX_LAYERS { + return Err(Error::InvalidNumberOfLayers); + } + if payload.len() > MAX_PAYLOAD_SIZE { + return Err(Error::PayloadTooLarge); + } + + let mut message: Vec = Vec::with_capacity(MESSAGE_SIZE); + + node_ids.iter().for_each(|node_id| { + message.extend(node_id); + }); + // If there is any remaining layers, fill them with zeros. + (0..MAX_LAYERS - node_ids.len()).for_each(|_| message.extend(&[0; NODE_ID_SIZE])); + + // Append payload with padding + message.extend(payload); + message.push(PAYLOAD_PADDING_SEPARATOR); + message.extend(std::iter::repeat(0).take(MAX_PAYLOAD_SIZE - payload.len())); + Ok(message) + } + + fn unwrap_message( + message: &[u8], + private_key: &Self::PrivateKey, + ) -> Result<(Vec, bool), Self::Error> { + if message.len() != MESSAGE_SIZE { + return Err(Error::InvalidMixMessage); + } + + // In this mock, we don't decrypt anything. So, we use private key as just a node ID. + let node_id = private_key; + if &message[0..NODE_ID_SIZE] != node_id { + return Err(Error::MsgUnwrapNotAllowed); + } + + // If this is the last layer + if message[NODE_ID_SIZE..NODE_ID_SIZE * 2] == [0; NODE_ID_SIZE] { + let padded_payload = &message[NODE_ID_SIZE * MAX_LAYERS..]; + // remove the payload padding + match padded_payload + .iter() + .rposition(|&x| x == PAYLOAD_PADDING_SEPARATOR) + { + Some(pos) => { + return Ok((padded_payload[0..pos].to_vec(), true)); + } + _ => return Err(Error::InvalidMixMessage), + } + } + + let mut new_message: Vec = Vec::with_capacity(MESSAGE_SIZE); + new_message.extend(&message[NODE_ID_SIZE..NODE_ID_SIZE * MAX_LAYERS]); + new_message.extend(&[0; NODE_ID_SIZE]); + new_message.extend(&message[NODE_ID_SIZE * MAX_LAYERS..]); // padded payload + Ok((new_message, false)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn message() { + let node_ids = (0..3).map(|i| [i; NODE_ID_SIZE]).collect::>(); + let payload = [7; 10]; + let message = MockMixMessage::build_message(&payload, &node_ids).unwrap(); + assert_eq!(message.len(), MESSAGE_SIZE); + + let (message, is_fully_unwrapped) = + MockMixMessage::unwrap_message(&message, &node_ids[0]).unwrap(); + assert!(!is_fully_unwrapped); + assert_eq!(message.len(), MESSAGE_SIZE); + + let (message, is_fully_unwrapped) = + MockMixMessage::unwrap_message(&message, &node_ids[1]).unwrap(); + assert!(!is_fully_unwrapped); + assert_eq!(message.len(), MESSAGE_SIZE); + + let (unwrapped_payload, is_fully_unwrapped) = + MockMixMessage::unwrap_message(&message, &node_ids[2]).unwrap(); + assert!(is_fully_unwrapped); + assert_eq!(unwrapped_payload, payload); + } +} diff --git a/nomos-mix/message/src/sphinx/error.rs b/nomos-mix/message/src/sphinx/error.rs new file mode 100644 index 00000000..55af8cd2 --- /dev/null +++ b/nomos-mix/message/src/sphinx/error.rs @@ -0,0 +1,15 @@ +use sphinx_packet::header::routing::RoutingFlag; + +#[derive(thiserror::Error, Debug)] +pub enum Error { + #[error("Sphinx packet error: {0}")] + SphinxPacketError(#[from] sphinx_packet::Error), + #[error("Invalid packet bytes")] + InvalidPacketBytes, + #[error("Invalid routing flag: {0}")] + InvalidRoutingFlag(RoutingFlag), + #[error("Invalid routing length: {0} bytes")] + InvalidEncryptedRoutingInfoLength(usize), + #[error("ConsistentLengthLayeredEncryptionError: {0}")] + ConsistentLengthLayeredEncryptionError(#[from] super::layered_cipher::Error), +} diff --git a/nomos-mix/message/src/sphinx/layered_cipher.rs b/nomos-mix/message/src/sphinx/layered_cipher.rs new file mode 100644 index 00000000..0bfcf249 --- /dev/null +++ b/nomos-mix/message/src/sphinx/layered_cipher.rs @@ -0,0 +1,345 @@ +use std::marker::PhantomData; + +use rand_chacha::{ + rand_core::{RngCore, SeedableRng}, + ChaCha12Rng, +}; +use sphinx_packet::{ + constants::HEADER_INTEGRITY_MAC_SIZE, + crypto::STREAM_CIPHER_INIT_VECTOR, + header::{ + keys::{HeaderIntegrityMacKey, StreamCipherKey}, + mac::HeaderIntegrityMac, + }, +}; + +use super::parse_bytes; + +#[derive(thiserror::Error, Debug)] +pub enum Error { + #[error("Invalid cipher text length")] + InvalidCipherTextLength, + #[error("Invalid encryption param")] + InvalidEncryptionParam, + #[error("Integrity MAC verification failed")] + IntegrityMacVerificationFailed, +} + +type Result = std::result::Result; + +/// A cipher to encrypt/decrypt a list of data of the same size using a list of keys. +/// +/// The cipher performs the layered encryption. +/// The following example shows the simplified output. +/// - Input: [[data0, k0], [data1, k1]] +/// - Output: encrypt(k0, [data0, encrypt(k1, [data1])]) +/// +/// The max number of layers is limited to the `max_layers` parameter. +/// Even if the number of data and keys provided for encryption is smaller than `max_layers`, +/// The cipher always produces the max-size output regardless of the number of data and keys provided, +/// in order to ensure that all outputs generated by the cipher are the same size. +/// +/// The cipher also provides the length-preserved decryption. +/// Even if one layer of encryptions is decrypted, the length of decrypted data is +/// the same as the length of the original data. +/// For example: +/// len(encrypt(k0, [data0, encrypt(k1, [data1])])) == len(encrypt(k1, [data1])) +pub struct ConsistentLengthLayeredCipher { + /// All encrypted data produced by the cipher has the same size according to the `max_layers`. + pub max_layers: usize, + _data: PhantomData, +} + +pub trait ConsistentLengthLayeredCipherData { + // Returns the serialized bytes for an instance of the implementing type + fn to_bytes(&self) -> Vec; + // The size of the serialized data. + const SIZE: usize; +} + +/// A parameter for one layer of encryption +pub struct EncryptionParam { + /// A data to be included in the layer. + pub data: D, + /// A [`Key`] to encrypt the layer that will include the [`Self::data`]. + pub key: Key, +} + +/// A set of keys to encrypt/decrypt a single layer. +pub struct Key { + /// A 128-bit key for encryption/decryption + pub stream_cipher_key: StreamCipherKey, + /// A 128-bit key for computing/verifying integrity MAC + pub integrity_mac_key: HeaderIntegrityMacKey, +} + +impl ConsistentLengthLayeredCipher { + pub fn new(max_layers: usize) -> Self { + Self { + max_layers, + _data: Default::default(), + } + } + + /// The total size of fully encrypted output that includes all layers. + /// This size is determined by [`D::size`] and [`max_layers`]. + pub const fn total_size(max_layers: usize) -> usize { + Self::SINGLE_LAYER_SIZE * max_layers + } + + /// The size of a single layer that contains a data and a MAC. + /// The MAC is used to verify integrity of the encrypted next layer. + const SINGLE_LAYER_SIZE: usize = D::SIZE + HEADER_INTEGRITY_MAC_SIZE; + + /// Perform the layered encryption. + pub fn encrypt(&self, params: &[EncryptionParam]) -> Result<(Vec, HeaderIntegrityMac)> { + if params.is_empty() || params.len() > self.max_layers { + return Err(Error::InvalidEncryptionParam); + } + + params + .iter() + .take(params.len() - 1) // Exclude the last param that will be treated separately below. + .rev() // Data and keys must be used in reverse order to encrypt the inner-most layer first + .try_fold(self.build_last_layer(params)?, |(encrypted, mac), param| { + self.build_intermediate_layer(param, mac, encrypted) + }) + } + + /// Build an intermediate layer of encryption that wraps subsequent layers already encrypted. + /// The output has the same size as [`Self::total_size`], + /// regardless of how many subsequent layers that this layer wraps. + fn build_intermediate_layer( + &self, + param: &EncryptionParam, + next_mac: HeaderIntegrityMac, + next_encrypted_data: Vec, + ) -> Result<(Vec, HeaderIntegrityMac)> { + // Concatenate the data with the encrypted subsequent layers and its MAC. + let data = param.data.to_bytes(); + let total_data = itertools::chain!( + &data, + next_mac.as_bytes(), + // Truncate last bytes for the length-preserved decryption later. + // They will be restored by a filler during the decryption process. + &next_encrypted_data[..next_encrypted_data.len() - Self::SINGLE_LAYER_SIZE], + ) + .copied() + .collect::>(); + + // Encrypt the concatenated bytes, and compute MAC. + let mut encrypted = total_data; + self.apply_streamcipher( + &mut encrypted, + ¶m.key.stream_cipher_key, + StreamCipherOption::FromFront, + ); + let mac = Self::compute_mac(¶m.key.integrity_mac_key, &encrypted); + + assert_eq!(encrypted.len(), Self::total_size(self.max_layers)); + Ok((encrypted, mac)) + } + + /// Build the last layer of encryption. + /// The output has the same size as [`Self::total_size`] by using fillers, + /// even though it doesn't wrap any subsequent layer. + /// This is for the length-preserved decryption. + fn build_last_layer( + &self, + params: &[EncryptionParam], + ) -> Result<(Vec, HeaderIntegrityMac)> { + let last_param = params.last().ok_or(Error::InvalidEncryptionParam)?; + + // Build fillers that will be appended to the last data. + // The number of fillers must be the same as the number of intermediate layers + // (excluding the last layer) that will be decrypted later. + let fillers = self.build_fillers(¶ms[..params.len() - 1]); + // Header integrity MAC doesn't need to be included in the last layer + // because there is no next encrypted layer. + // Instead, random bytes are used to fill the space between data and fillers. + // The size of random bytes depends on the [`self.max_layers`]. + let random_bytes = + random_bytes(Self::total_size(self.max_layers) - D::SIZE - fillers.len()); + + // First, concat the data and the random bytes, and encrypt it. + let last_data = last_param.data.to_bytes(); + let total_data_without_fillers = itertools::chain!(&last_data, &random_bytes) + .copied() + .collect::>(); + let mut encrypted = total_data_without_fillers; + self.apply_streamcipher( + &mut encrypted, + &last_param.key.stream_cipher_key, + StreamCipherOption::FromFront, + ); + + // Append fillers to the encrypted bytes, and compute MAC. + encrypted.extend(fillers); + let mac = Self::compute_mac(&last_param.key.integrity_mac_key, &encrypted); + + assert_eq!(encrypted.len(), Self::total_size(self.max_layers)); + Ok((encrypted, mac)) + } + + /// Build as many fillers as the number of keys provided. + /// Fillers are encrypted in accumulated manner by keys. + fn build_fillers(&self, params: &[EncryptionParam]) -> Vec { + let mut fillers = vec![0u8; Self::SINGLE_LAYER_SIZE * params.len()]; + params + .iter() + .map(|param| ¶m.key.stream_cipher_key) + .enumerate() + .for_each(|(i, key)| { + self.apply_streamcipher( + &mut fillers[0..(i + 1) * Self::SINGLE_LAYER_SIZE], + key, + StreamCipherOption::FromBack, + ) + }); + fillers + } + + /// Unpack one layer of encryption by performing the length-preserved decryption. + pub fn unpack( + &self, + mac: &HeaderIntegrityMac, + encrypted_total_data: &[u8], + key: &Key, + ) -> Result<(Vec, HeaderIntegrityMac, Vec)> { + if encrypted_total_data.len() != Self::total_size(self.max_layers) { + return Err(Error::InvalidCipherTextLength); + } + // If a wrong key is used, the decryption should fail. + if !mac.verify(key.integrity_mac_key, encrypted_total_data) { + return Err(Error::IntegrityMacVerificationFailed); + } + + // Extend the encrypted data by the length of a single layer + // in order to restore the truncated part (a encrypted filler) + // by [`Self::build_intermediate_layer`] during the encryption process. + let total_data_with_zero_filler = encrypted_total_data + .iter() + .copied() + .chain(std::iter::repeat(0u8).take(Self::SINGLE_LAYER_SIZE)) + .collect::>(); + + // Decrypt the extended data. + let mut decrypted = total_data_with_zero_filler; + self.apply_streamcipher( + &mut decrypted, + &key.stream_cipher_key, + StreamCipherOption::FromFront, + ); + + // Parse the decrypted data into 3 parts: data, MAC, and the next encrypted data. + let parsed = parse_bytes( + &decrypted, + &[ + D::SIZE, + HEADER_INTEGRITY_MAC_SIZE, + Self::total_size(self.max_layers), + ], + ) + .unwrap(); + let data = parsed[0].to_vec(); + let next_mac = HeaderIntegrityMac::from_bytes(parsed[1].try_into().unwrap()); + let next_encrypted_data = parsed[2].to_vec(); + Ok((data, next_mac, next_encrypted_data)) + } + + fn apply_streamcipher(&self, data: &mut [u8], key: &StreamCipherKey, opt: StreamCipherOption) { + let pseudorandom_bytes = sphinx_packet::crypto::generate_pseudorandom_bytes( + key, + &STREAM_CIPHER_INIT_VECTOR, + Self::total_size(self.max_layers) + Self::SINGLE_LAYER_SIZE, + ); + let pseudorandom_bytes = match opt { + StreamCipherOption::FromFront => &pseudorandom_bytes[..data.len()], + StreamCipherOption::FromBack => { + &pseudorandom_bytes[pseudorandom_bytes.len() - data.len()..] + } + }; + Self::xor_in_place(data, pseudorandom_bytes) + } + + // In-place XOR operation: b is applied to a. + fn xor_in_place(a: &mut [u8], b: &[u8]) { + assert_eq!(a.len(), b.len()); + a.iter_mut().zip(b.iter()).for_each(|(x1, &x2)| *x1 ^= x2); + } + + fn compute_mac(key: &HeaderIntegrityMacKey, data: &[u8]) -> HeaderIntegrityMac { + let mac = sphinx_packet::crypto::compute_keyed_hmac::(key, data).into_bytes(); + assert!(mac.len() >= HEADER_INTEGRITY_MAC_SIZE); + HeaderIntegrityMac::from_bytes( + mac.into_iter() + .take(HEADER_INTEGRITY_MAC_SIZE) + .collect::>() + .try_into() + .unwrap(), + ) + } +} + +fn random_bytes(size: usize) -> Vec { + let mut bytes = vec![0u8; size]; + let mut rng = ChaCha12Rng::from_entropy(); + rng.fill_bytes(&mut bytes); + bytes +} + +enum StreamCipherOption { + FromFront, + FromBack, +} + +#[cfg(test)] +mod tests { + use sphinx_packet::{constants::INTEGRITY_MAC_KEY_SIZE, crypto::STREAM_CIPHER_KEY_SIZE}; + + use super::*; + + #[test] + fn build_and_unpack() { + let cipher = ConsistentLengthLayeredCipher::<[u8; 10]>::new(5); + + let params = (0u8..3) + .map(|i| EncryptionParam::<[u8; 10]> { + data: [i; 10], + key: Key { + stream_cipher_key: [i * 10; STREAM_CIPHER_KEY_SIZE], + integrity_mac_key: [i * 20; INTEGRITY_MAC_KEY_SIZE], + }, + }) + .collect::>(); + + let (encrypted, mac) = cipher.encrypt(¶ms).unwrap(); + + let next_encrypted = encrypted.clone(); + let (data, next_mac, next_encrypted) = cipher + .unpack(&mac, &next_encrypted, ¶ms[0].key) + .unwrap(); + assert_eq!(data, params[0].data); + assert_eq!(next_encrypted.len(), encrypted.len()); + + let (data, next_mac, next_encrypted) = cipher + .unpack(&next_mac, &next_encrypted, ¶ms[1].key) + .unwrap(); + assert_eq!(data, params[1].data); + assert_eq!(next_encrypted.len(), encrypted.len()); + + let (data, _, next_encrypted) = cipher + .unpack(&next_mac, &next_encrypted, ¶ms[2].key) + .unwrap(); + assert_eq!(data, params[2].data); + assert_eq!(next_encrypted.len(), encrypted.len()); + } + + impl ConsistentLengthLayeredCipherData for [u8; 10] { + fn to_bytes(&self) -> Vec { + self.to_vec() + } + + const SIZE: usize = 10; + } +} diff --git a/nomos-mix/message/src/sphinx/mod.rs b/nomos-mix/message/src/sphinx/mod.rs new file mode 100644 index 00000000..137a3c6a --- /dev/null +++ b/nomos-mix/message/src/sphinx/mod.rs @@ -0,0 +1,69 @@ +use error::Error; +use packet::{Packet, UnpackedPacket}; + +use crate::MixMessage; + +pub mod error; +mod layered_cipher; +pub mod packet; +mod routing; + +#[derive(Clone, Debug)] +pub struct SphinxMessage; + +const ASYM_KEY_SIZE: usize = 32; +// TODO: Move these constants to the upper layer (service layer). +const MAX_PAYLOAD_SIZE: usize = 2048; +const MAX_LAYERS: usize = 5; + +impl MixMessage for SphinxMessage { + type PublicKey = [u8; ASYM_KEY_SIZE]; + type PrivateKey = [u8; ASYM_KEY_SIZE]; + type Error = Error; + + const DROP_MESSAGE: &'static [u8] = &[0; Packet::size(MAX_LAYERS, MAX_PAYLOAD_SIZE)]; + + fn build_message( + payload: &[u8], + public_keys: &[Self::PublicKey], + ) -> Result, Self::Error> { + let packet = Packet::build( + &public_keys + .iter() + .map(|k| x25519_dalek::PublicKey::from(*k)) + .collect::>(), + MAX_LAYERS, + payload, + MAX_PAYLOAD_SIZE, + )?; + Ok(packet.to_bytes()) + } + + fn unwrap_message( + message: &[u8], + private_key: &Self::PrivateKey, + ) -> Result<(Vec, bool), Self::Error> { + let packet = Packet::from_bytes(message, MAX_LAYERS)?; + let unpacked_packet = + packet.unpack(&x25519_dalek::StaticSecret::from(*private_key), MAX_LAYERS)?; + match unpacked_packet { + UnpackedPacket::ToForward(packet) => Ok((packet.to_bytes(), false)), + UnpackedPacket::FullyUnpacked(payload) => Ok((payload, true)), + } + } +} + +fn parse_bytes<'a>(data: &'a [u8], sizes: &[usize]) -> Result, String> { + let mut i = 0; + sizes + .iter() + .map(|&size| { + if i + size > data.len() { + return Err("The sum of sizes exceeds the length of the input slice".to_string()); + } + let slice = &data[i..i + size]; + i += size; + Ok(slice) + }) + .collect() +} diff --git a/nomos-mix/message/src/packet.rs b/nomos-mix/message/src/sphinx/packet.rs similarity index 51% rename from nomos-mix/message/src/packet.rs rename to nomos-mix/message/src/sphinx/packet.rs index ecf9fdee..2db9159e 100644 --- a/nomos-mix/message/src/packet.rs +++ b/nomos-mix/message/src/sphinx/packet.rs @@ -1,11 +1,20 @@ -use crate::Error; -use serde::{Deserialize, Serialize}; -use sphinx_packet::constants::NODE_ADDRESS_LENGTH; +use sphinx_packet::{ + constants::NODE_ADDRESS_LENGTH, + header::{ + keys::RoutingKeys, + routing::{FINAL_HOP, FORWARD_HOP}, + }, + payload::{Payload, PAYLOAD_OVERHEAD_SIZE}, +}; + +use crate::sphinx::ASYM_KEY_SIZE; + +use super::{error::Error, parse_bytes, routing::EncryptedRoutingInformation}; /// A packet that contains a header and a payload. /// The header and payload are encrypted for the selected recipients. /// This packet can be serialized and sent over the network. -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug)] pub struct Packet { header: Header, // This crate doesn't limit the payload size. @@ -14,36 +23,29 @@ pub struct Packet { } /// The packet header -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug)] struct Header { /// The ephemeral public key for a recipient to derive the shared secret /// which can be used to decrypt the header and payload. ephemeral_public_key: x25519_dalek::PublicKey, - // TODO: Length-preserved layered encryption on RoutingInfo - routing_info: RoutingInfo, -} - -#[derive(Debug, Serialize, Deserialize)] -struct RoutingInfo { - // TODO: Change this to `is_final_layer: bool` - // by implementing the length-preserved layered encryption. - // It's not good to expose the info that how many layers remain to the intermediate recipients. - remaining_layers: u8, - // TODO:: Add the following fields - // header_integrity_hamc - // additional data (e.g. incentivization) + encrypted_routing_info: EncryptedRoutingInformation, } impl Packet { pub fn build( recipient_pubkeys: &[x25519_dalek::PublicKey], + max_layers: usize, payload: &[u8], - payload_size: usize, + max_payload_size: usize, ) -> Result { // Derive `[sphinx_packet::header::keys::KeyMaterial]` for all recipients. let ephemeral_privkey = x25519_dalek::StaticSecret::random(); let key_material = Self::derive_key_material(recipient_pubkeys, &ephemeral_privkey); + // Build the encrypted routing information. + let encrypted_routing_info = + EncryptedRoutingInformation::new(&key_material.routing_keys, max_layers)?; + // Encrypt the payload for all recipients. let payload_keys = key_material .routing_keys @@ -53,22 +55,20 @@ impl Packet { let payload = sphinx_packet::payload::Payload::encapsulate_message( payload, &payload_keys, - payload_size, + // sphinx_packet::payload requires this parameter to include the overhead size. + max_payload_size + PAYLOAD_OVERHEAD_SIZE, )?; Ok(Packet { header: Header { ephemeral_public_key: x25519_dalek::PublicKey::from(&ephemeral_privkey), - routing_info: RoutingInfo { - remaining_layers: u8::try_from(recipient_pubkeys.len()) - .map_err(|_| Error::TooManyRecipients)?, - }, + encrypted_routing_info, }, payload: payload.into_bytes(), }) } - fn derive_key_material( + pub(crate) fn derive_key_material( recipient_pubkeys: &[x25519_dalek::PublicKey], ephemeral_privkey: &x25519_dalek::StaticSecret, ) -> sphinx_packet::header::keys::KeyMaterial { @@ -90,6 +90,7 @@ impl Packet { pub fn unpack( &self, private_key: &x25519_dalek::StaticSecret, + max_layers: usize, ) -> Result { // Derive the routing keys for the recipient let routing_keys = sphinx_packet::header::SphinxHeader::compute_routing_keys( @@ -101,25 +102,40 @@ impl Packet { let payload = sphinx_packet::payload::Payload::from_bytes(&self.payload)?; let payload = payload.unwrap(&routing_keys.payload_key)?; - // If this is the last layer of encryption, return the decrypted payload. - if self.header.routing_info.remaining_layers == 1 { - return Ok(UnpackedPacket::FullyUnpacked(payload.recover_plaintext()?)); + // Unpack the routing information + let (routing_info, next_encrypted_routing_info) = self + .header + .encrypted_routing_info + .unpack(&routing_keys, max_layers)?; + match routing_info.flag { + FORWARD_HOP => Ok(UnpackedPacket::ToForward(self.build_next_packet( + &routing_keys, + next_encrypted_routing_info, + payload, + ))), + FINAL_HOP => Ok(UnpackedPacket::FullyUnpacked(payload.recover_plaintext()?)), + _ => Err(Error::InvalidRoutingFlag(routing_info.flag)), } + } + fn build_next_packet( + &self, + routing_keys: &RoutingKeys, + next_encrypted_routing_info: EncryptedRoutingInformation, + payload: Payload, + ) -> Packet { // Derive the new ephemeral public key for the next recipient let next_ephemeral_pubkey = Self::derive_next_ephemeral_public_key( &self.header.ephemeral_public_key, &routing_keys.blinding_factor, ); - Ok(UnpackedPacket::ToForward(Packet { + Packet { header: Header { ephemeral_public_key: next_ephemeral_pubkey, - routing_info: RoutingInfo { - remaining_layers: self.header.routing_info.remaining_layers - 1, - }, + encrypted_routing_info: next_encrypted_routing_info, }, payload: payload.into_bytes(), - })) + } } /// Derive the next ephemeral public key for the next recipient. @@ -135,6 +151,52 @@ impl Packet { let new_shared_secret = blinding_factor.diffie_hellman(cur_ephemeral_pubkey); x25519_dalek::PublicKey::from(new_shared_secret.to_bytes()) } + + pub fn to_bytes(&self) -> Vec { + let ephemeral_public_key = self.header.ephemeral_public_key.to_bytes(); + let encrypted_routing_info = self.header.encrypted_routing_info.to_bytes(); + itertools::chain!( + &ephemeral_public_key, + &encrypted_routing_info, + &self.payload, + ) + .copied() + .collect() + } + + pub fn from_bytes(data: &[u8], max_layers: usize) -> Result { + let ephemeral_public_key_size = ASYM_KEY_SIZE; + let encrypted_routing_info_size = EncryptedRoutingInformation::size(max_layers); + let parsed = parse_bytes( + data, + &[ + ephemeral_public_key_size, + encrypted_routing_info_size, + data.len() - ephemeral_public_key_size - encrypted_routing_info_size, + ], + ) + .map_err(|_| Error::InvalidPacketBytes)?; + + Ok(Packet { + header: Header { + ephemeral_public_key: { + let bytes: [u8; 32] = parsed[0].try_into().unwrap(); + x25519_dalek::PublicKey::from(bytes) + }, + encrypted_routing_info: EncryptedRoutingInformation::from_bytes( + parsed[1], max_layers, + )?, + }, + payload: parsed[2].to_vec(), + }) + } + + pub const fn size(max_layers: usize, max_payload_size: usize) -> usize { + ASYM_KEY_SIZE + + EncryptedRoutingInformation::size(max_layers) + + max_payload_size + + PAYLOAD_OVERHEAD_SIZE + } } pub enum UnpackedPacket { @@ -144,14 +206,12 @@ pub enum UnpackedPacket { #[cfg(test)] mod tests { - use nomos_core::wire; - use super::*; #[test] fn unpack() { // Prepare keys of two recipients - let recipient_privkeys = (0..2) + let recipient_privkeys = (0..3) .map(|_| x25519_dalek::StaticSecret::random()) .collect::>(); let recipient_pubkeys = recipient_privkeys @@ -160,18 +220,26 @@ mod tests { .collect::>(); // Build a packet + let max_layers = 5; let payload = [10u8; 512]; - let packet = Packet::build(&recipient_pubkeys, &payload, 1024).unwrap(); + let packet = Packet::build(&recipient_pubkeys, max_layers, &payload, 1024).unwrap(); // The 1st recipient unpacks the packet - let packet = match packet.unpack(&recipient_privkeys[0]).unwrap() { + let packet = match packet.unpack(&recipient_privkeys[0], max_layers).unwrap() { UnpackedPacket::ToForward(packet) => packet, UnpackedPacket::FullyUnpacked(_) => { panic!("The unpacked packet should be the ToFoward type"); } }; // The 2nd recipient unpacks the packet - match packet.unpack(&recipient_privkeys[1]).unwrap() { + let packet = match packet.unpack(&recipient_privkeys[1], max_layers).unwrap() { + UnpackedPacket::ToForward(packet) => packet, + UnpackedPacket::FullyUnpacked(_) => { + panic!("The unpacked packet should be the ToFoward type"); + } + }; + // The last recipient unpacks the packet + match packet.unpack(&recipient_privkeys[2], max_layers).unwrap() { UnpackedPacket::ToForward(_) => { panic!("The unpacked packet should be the FullyUnpacked type"); } @@ -185,34 +253,25 @@ mod tests { #[test] fn unpack_with_wrong_keys() { // Build a packet with two public keys + let max_layers = 5; let payload = [10u8; 512]; let packet = Packet::build( &(0..2) .map(|_| x25519_dalek::PublicKey::from(&x25519_dalek::StaticSecret::random())) .collect::>(), + max_layers, &payload, 1024, ) .unwrap(); - // The 1st recipient unpacks the packet with an wrong key - let packet = match packet - .unpack(&x25519_dalek::StaticSecret::random()) - .unwrap() - { - UnpackedPacket::ToForward(packet) => packet, - UnpackedPacket::FullyUnpacked(_) => { - panic!("The unpacked packet should be the ToFoward type"); - } - }; - // The 2nd recipient unpacks the packet with an wrong key assert!(packet - .unpack(&x25519_dalek::StaticSecret::random()) + .unpack(&x25519_dalek::StaticSecret::random(), max_layers) .is_err()); } #[test] - fn consistent_size_serialization() { + fn consistent_size_after_unpack() { // Prepare keys of two recipients let recipient_privkeys = (0..2) .map(|_| x25519_dalek::StaticSecret::random()) @@ -223,28 +282,76 @@ mod tests { .collect::>(); // Build a packet + let max_layers = 5; let payload = [10u8; 512]; - let packet = Packet::build(&recipient_pubkeys, &payload, 1024).unwrap(); + let max_payload_size = 1024; + let packet = + Packet::build(&recipient_pubkeys, max_layers, &payload, max_payload_size).unwrap(); // Calculate the expected packet size - let pubkey_size = 32; - let routing_info_size = 1; - let payload_length_enconding_size = 8; - let payload_size = 1024; - let packet_size = - pubkey_size + routing_info_size + payload_length_enconding_size + payload_size; + let packet_size = Packet::size(max_layers, max_payload_size); // The serialized packet size must be the same as the expected size. - assert_eq!(wire::serialize(&packet).unwrap().len(), packet_size); + assert_eq!(packet.to_bytes().len(), packet_size); // The unpacked packet size must be the same as the original packet size. - match packet.unpack(&recipient_privkeys[0]).unwrap() { + match packet.unpack(&recipient_privkeys[0], max_layers).unwrap() { UnpackedPacket::ToForward(packet) => { - assert_eq!(wire::serialize(&packet).unwrap().len(), packet_size); + assert_eq!(packet.to_bytes().len(), packet_size); } UnpackedPacket::FullyUnpacked(_) => { panic!("The unpacked packet should be the ToFoward type"); } } } + + #[test] + fn consistent_size_with_any_num_layers() { + let max_layers = 5; + let payload = [10u8; 512]; + + // Build a packet with 2 recipients + let recipient_pubkeys = (0..2) + .map(|_| x25519_dalek::PublicKey::from(&x25519_dalek::StaticSecret::random())) + .collect::>(); + let packet1 = Packet::build(&recipient_pubkeys, max_layers, &payload, 1024).unwrap(); + + // Build a packet with 3 recipients + let recipient_pubkeys = (0..3) + .map(|_| x25519_dalek::PublicKey::from(&x25519_dalek::StaticSecret::random())) + .collect::>(); + let packet2 = Packet::build(&recipient_pubkeys, max_layers, &payload, 1024).unwrap(); + + assert_eq!(packet1.to_bytes().len(), packet2.to_bytes().len()); + } + + #[test] + fn to_from_bytes() { + let max_layers = 5; + let payload = [10u8; 512]; + + // Build a packet with 2 recipients + let recipient_pubkeys = (0..2) + .map(|_| x25519_dalek::PublicKey::from(&x25519_dalek::StaticSecret::random())) + .collect::>(); + let packet = Packet::build(&recipient_pubkeys, max_layers, &payload, 1024).unwrap(); + + let bytes = packet.to_bytes(); + let loaded_packet = Packet::from_bytes(&bytes, max_layers).unwrap(); + + // Manually compare packets because PartialEq is not implemented + // for [`sphinx_packet::header::mac::HeaderIntegrityMac`] used in our header. + assert_eq!( + packet.header.ephemeral_public_key, + loaded_packet.header.ephemeral_public_key + ); + assert_eq!( + packet.header.encrypted_routing_info.encrypted_routing_info, + loaded_packet + .header + .encrypted_routing_info + .encrypted_routing_info + ); + assert_eq!(packet.payload, loaded_packet.payload); + } } diff --git a/nomos-mix/message/src/sphinx/routing.rs b/nomos-mix/message/src/sphinx/routing.rs new file mode 100644 index 00000000..a0f22b75 --- /dev/null +++ b/nomos-mix/message/src/sphinx/routing.rs @@ -0,0 +1,140 @@ +use sphinx_packet::{ + constants::HEADER_INTEGRITY_MAC_SIZE, + header::{ + keys::RoutingKeys, + mac::HeaderIntegrityMac, + routing::{RoutingFlag, FINAL_HOP, FORWARD_HOP}, + }, +}; + +use super::{ + error::Error, + layered_cipher::{ + ConsistentLengthLayeredCipher, ConsistentLengthLayeredCipherData, EncryptionParam, Key, + }, + parse_bytes, +}; + +/// A routing information that will be contained in a packet header +/// in the encrypted format. +pub struct RoutingInformation { + pub flag: RoutingFlag, + // Add additional fields here +} + +impl RoutingInformation { + pub fn new(flag: RoutingFlag) -> Self { + Self { flag } + } + + pub fn from_bytes(data: &[u8]) -> Result { + if data.len() != Self::SIZE { + return Err(Error::InvalidEncryptedRoutingInfoLength(data.len())); + } + Ok(Self { flag: data[0] }) + } +} + +impl ConsistentLengthLayeredCipherData for RoutingInformation { + fn to_bytes(&self) -> Vec { + vec![self.flag] + } + + const SIZE: usize = std::mem::size_of::(); +} + +/// Encrypted routing information that will be contained in a packet header. +#[derive(Debug)] +pub struct EncryptedRoutingInformation { + /// A MAC to verify the integrity of [`Self::encrypted_routing_info`]. + pub mac: HeaderIntegrityMac, + /// The actual encrypted routing information produced by [`ConsistentLengthLayeredCipher`]. + /// Its size should be the same as [`ConsistentLengthLayeredCipher::total_size`]. + pub encrypted_routing_info: Vec, +} + +type LayeredCipher = ConsistentLengthLayeredCipher; + +impl EncryptedRoutingInformation { + /// Build all [`RoutingInformation`]s for the provides keys, + /// and encrypt them using [`ConsistentLengthLayeredCipher`]. + pub fn new(routing_keys: &[RoutingKeys], max_layers: usize) -> Result { + let cipher = LayeredCipher::new(max_layers); + let params = routing_keys + .iter() + .enumerate() + .map(|(i, k)| { + let flag = if i == routing_keys.len() - 1 { + FINAL_HOP + } else { + FORWARD_HOP + }; + EncryptionParam:: { + data: RoutingInformation::new(flag), + key: Self::layered_cipher_key(k), + } + }) + .collect::>(); + let (encrypted, mac) = cipher.encrypt(¶ms)?; + + Ok(Self { + mac, + encrypted_routing_info: encrypted, + }) + } + + /// Unpack one layer of encryptions using the key provided. + /// Returns the decrypted routing information + /// and the next [`EncryptedRoutingInformation`] to be unpacked further. + pub fn unpack( + &self, + routing_key: &RoutingKeys, + max_layers: usize, + ) -> Result<(RoutingInformation, Self), Error> { + let cipher = LayeredCipher::new(max_layers); + let (routing_info, next_mac, next_encrypted_routing_info) = cipher.unpack( + &self.mac, + &self.encrypted_routing_info, + &Self::layered_cipher_key(routing_key), + )?; + Ok(( + RoutingInformation::from_bytes(&routing_info)?, + Self { + mac: next_mac, + encrypted_routing_info: next_encrypted_routing_info, + }, + )) + } + + fn layered_cipher_key(routing_key: &RoutingKeys) -> Key { + Key { + stream_cipher_key: routing_key.stream_cipher_key, + integrity_mac_key: routing_key.header_integrity_hmac_key, + } + } + + pub fn to_bytes(&self) -> Vec { + itertools::chain!(self.mac.as_bytes(), &self.encrypted_routing_info) + .copied() + .collect() + } + + pub fn from_bytes(data: &[u8], max_layers: usize) -> Result { + let parsed = parse_bytes( + data, + &[ + HEADER_INTEGRITY_MAC_SIZE, + LayeredCipher::total_size(max_layers), + ], + ) + .map_err(|_| Error::InvalidEncryptedRoutingInfoLength(data.len()))?; + Ok(Self { + mac: HeaderIntegrityMac::from_bytes(parsed[0].try_into().unwrap()), + encrypted_routing_info: parsed[1].to_vec(), + }) + } + + pub const fn size(max_layers: usize) -> usize { + HEADER_INTEGRITY_MAC_SIZE + LayeredCipher::total_size(max_layers) + } +} diff --git a/nomos-mix/network/Cargo.toml b/nomos-mix/network/Cargo.toml index 9871cdc1..c8dfdc24 100644 --- a/nomos-mix/network/Cargo.toml +++ b/nomos-mix/network/Cargo.toml @@ -11,6 +11,7 @@ libp2p = "0.53" tracing = "0.1" nomos-mix = { path = "../core" } nomos-mix-message = { path = "../message" } +sha2 = "0.10" [dev-dependencies] tokio = { version = "1", features = ["macros", "rt-multi-thread", "time"] } diff --git a/nomos-mix/network/src/behaviour.rs b/nomos-mix/network/src/behaviour.rs index 93f55f42..d2e04d84 100644 --- a/nomos-mix/network/src/behaviour.rs +++ b/nomos-mix/network/src/behaviour.rs @@ -1,8 +1,7 @@ -use std::{ - collections::{HashMap, HashSet, VecDeque}, - task::{Context, Poll, Waker}, +use crate::{ + error::Error, + handler::{FromBehaviour, MixConnectionHandler, ToBehaviour}, }; - use cached::{Cached, TimedCache}; use libp2p::{ core::Endpoint, @@ -12,17 +11,18 @@ use libp2p::{ }, Multiaddr, PeerId, }; -use nomos_mix_message::{is_drop_message, message_id}; - -use crate::{ - error::Error, - handler::{FromBehaviour, MixConnectionHandler, ToBehaviour}, +use nomos_mix_message::MixMessage; +use sha2::{Digest, Sha256}; +use std::marker::PhantomData; +use std::{ + collections::{HashMap, HashSet, VecDeque}, + task::{Context, Poll, Waker}, }; /// A [`NetworkBehaviour`]: /// - forwards messages to all connected peers with deduplication. /// - receives messages from all connected peers. -pub struct Behaviour { +pub struct Behaviour { config: Config, /// Peers that support the mix protocol, and their connection IDs negotiated_peers: HashMap>, @@ -33,6 +33,7 @@ pub struct Behaviour { /// An LRU time cache for storing seen messages (based on their ID). This cache prevents /// duplicates from being propagated on the network. duplicate_cache: TimedCache, ()>, + _mix_message: PhantomData, } #[derive(Debug)] @@ -47,7 +48,10 @@ pub enum Event { Error(Error), } -impl Behaviour { +impl Behaviour +where + M: MixMessage, +{ pub fn new(config: Config) -> Self { let duplicate_cache = TimedCache::with_lifespan(config.duplicate_cache_lifespan); Self { @@ -56,17 +60,18 @@ impl Behaviour { events: VecDeque::new(), waker: None, duplicate_cache, + _mix_message: Default::default(), } } /// Publish a message (data or drop) to all connected peers pub fn publish(&mut self, message: Vec) -> Result<(), Error> { - if is_drop_message(&message) { + if M::is_drop_message(&message) { // Bypass deduplication for the drop message return self.forward_message(message, None); } - let msg_id = message_id(&message); + let msg_id = Self::message_id(&message); // If the message was already seen, don't forward it again if self.duplicate_cache.cache_get(&msg_id).is_some() { return Ok(()); @@ -136,6 +141,13 @@ impl Behaviour { } } + /// SHA-256 hash of the message + fn message_id(message: &[u8]) -> Vec { + let mut hasher = Sha256::new(); + hasher.update(message); + hasher.finalize().to_vec() + } + fn try_wake(&mut self) { if let Some(waker) = self.waker.take() { waker.wake(); @@ -143,7 +155,10 @@ impl Behaviour { } } -impl NetworkBehaviour for Behaviour { +impl NetworkBehaviour for Behaviour +where + M: MixMessage + 'static, +{ type ConnectionHandler = MixConnectionHandler; type ToSwarm = Event; @@ -191,14 +206,14 @@ impl NetworkBehaviour for Behaviour { // A message was forwarded from the peer. ToBehaviour::Message(message) => { // Ignore drop message - if is_drop_message(&message) { + if M::is_drop_message(&message) { return; } // Add the message to the cache. If it was already seen, ignore it. if self .duplicate_cache - .cache_set(message_id(&message), ()) + .cache_set(Self::message_id(&message), ()) .is_some() { return; diff --git a/nomos-mix/network/src/handler.rs b/nomos-mix/network/src/handler.rs index 09f45415..e6bddc64 100644 --- a/nomos-mix/network/src/handler.rs +++ b/nomos-mix/network/src/handler.rs @@ -13,7 +13,6 @@ use libp2p::{ }, Stream, StreamProtocol, }; -use nomos_mix_message::MSG_SIZE; use crate::behaviour::Config; @@ -248,15 +247,28 @@ impl ConnectionHandler for MixConnectionHandler { /// Write a message to the stream async fn send_msg(mut stream: Stream, msg: Vec) -> io::Result { + let msg_len: u16 = msg.len().try_into().map_err(|_| { + std::io::Error::new( + std::io::ErrorKind::InvalidInput, + format!( + "Message length is too big. Got {}, expected {}", + msg.len(), + std::mem::size_of::() + ), + ) + })?; + stream.write_all(msg_len.to_be_bytes().as_ref()).await?; stream.write_all(&msg).await?; stream.flush().await?; Ok(stream) } - -/// Read a fixed-length message from the stream -// TODO: Consider handling variable-length messages +/// Read a message from the stream async fn recv_msg(mut stream: Stream) -> io::Result<(Stream, Vec)> { - let mut buf = vec![0; MSG_SIZE]; + let mut msg_len = [0; std::mem::size_of::()]; + stream.read_exact(&mut msg_len).await?; + let msg_len = u16::from_be_bytes(msg_len) as usize; + + let mut buf = vec![0; msg_len]; stream.read_exact(&mut buf).await?; Ok((stream, buf)) } diff --git a/nomos-mix/network/src/lib.rs b/nomos-mix/network/src/lib.rs index 1de9838c..6db526e4 100644 --- a/nomos-mix/network/src/lib.rs +++ b/nomos-mix/network/src/lib.rs @@ -14,7 +14,7 @@ mod test { swarm::{dummy, NetworkBehaviour, SwarmEvent}, Multiaddr, PeerId, Swarm, SwarmBuilder, }; - use nomos_mix_message::MSG_SIZE; + use nomos_mix_message::mock::MockMixMessage; use tokio::select; use crate::{behaviour::Config, error::Error, Behaviour, Event}; @@ -43,7 +43,7 @@ mod test { // Swamr2 publishes a message. let task = async { - let msg = vec![1; MSG_SIZE]; + let msg = vec![1; 10]; let mut msg_published = false; let mut publish_try_interval = tokio::time::interval(Duration::from_secs(1)); loop { @@ -98,7 +98,7 @@ mod test { // Expect all publish attempts to fail with [`Error::NoPeers`] // because swarm2 doesn't have any peers that support the mix protocol. - let msg = vec![1; MSG_SIZE]; + let msg = vec![1; 10]; let mut publish_try_interval = tokio::time::interval(Duration::from_secs(1)); let mut publish_try_count = 0; loop { @@ -116,7 +116,7 @@ mod test { } } - fn new_swarm(key: Keypair) -> Swarm { + fn new_swarm(key: Keypair) -> Swarm> { new_swarm_with_behaviour( key, Behaviour::new(Config { diff --git a/nomos-services/api/Cargo.toml b/nomos-services/api/Cargo.toml index bac4ecc0..2b812678 100644 --- a/nomos-services/api/Cargo.toml +++ b/nomos-services/api/Cargo.toml @@ -22,7 +22,6 @@ nomos-mempool = { path = "../../nomos-services/mempool", features = [ "libp2p", "openapi", ] } -nomos-metrics = { path = "../../nomos-services/metrics" } nomos-da-dispersal = { path = "../data-availability/dispersal" } nomos-da-indexer = { path = "../data-availability/indexer", features = ["rocksdb-backend"] } nomos-da-sampling = { path = "../data-availability/sampling" } diff --git a/nomos-services/api/src/http/metrics.rs b/nomos-services/api/src/http/metrics.rs deleted file mode 100644 index c25deb08..00000000 --- a/nomos-services/api/src/http/metrics.rs +++ /dev/null @@ -1,18 +0,0 @@ -use nomos_metrics::{Metrics, MetricsMsg}; -use tokio::sync::oneshot; - -pub async fn gather( - handle: &overwatch_rs::overwatch::handle::OverwatchHandle, -) -> Result { - let relay = handle.relay::().connect().await?; - let (sender, receiver) = oneshot::channel(); - - relay - .send(MetricsMsg::Gather { - reply_channel: sender, - }) - .await - .map_err(|(e, _)| e)?; - - Ok(receiver.await?) -} diff --git a/nomos-services/api/src/http/mod.rs b/nomos-services/api/src/http/mod.rs index 6ab13b2a..789aeee9 100644 --- a/nomos-services/api/src/http/mod.rs +++ b/nomos-services/api/src/http/mod.rs @@ -4,5 +4,4 @@ pub mod consensus; pub mod da; pub mod libp2p; pub mod mempool; -pub mod metrics; pub mod storage; diff --git a/nomos-services/data-availability/tests/Cargo.toml b/nomos-services/data-availability/tests/Cargo.toml index 600cac8d..0baffd23 100644 --- a/nomos-services/data-availability/tests/Cargo.toml +++ b/nomos-services/data-availability/tests/Cargo.toml @@ -25,6 +25,7 @@ nomos-storage = { path = "../../../nomos-services/storage", features = ["rocksdb nomos-network = { path = "../../network", features = ["mock"] } nomos-mix-service = { path = "../../mix" } nomos-mix = { path = "../../../nomos-mix/core" } +nomos-mix-message = { path = "../../../nomos-mix/message" } nomos-libp2p = { path = "../../../nomos-libp2p" } libp2p = { version = "0.53.2", features = ["ed25519"] } once_cell = "1.19" @@ -38,9 +39,11 @@ tokio-stream = "0.1.15" tempfile = "3.6" tracing = "0.1" time = "0.3" +tracing-subscriber = "0.2.25" [dev-dependencies] blake2 = { version = "0.10" } +x25519-dalek = { version = "2", features = ["getrandom", "static_secrets"] } [features] default = ["libp2p"] diff --git a/nomos-services/data-availability/tests/src/common.rs b/nomos-services/data-availability/tests/src/common.rs index edc406aa..90c1d734 100644 --- a/nomos-services/data-availability/tests/src/common.rs +++ b/nomos-services/data-availability/tests/src/common.rs @@ -1,14 +1,15 @@ use cryptarchia_consensus::LeaderConfig; // std use nomos_da_network_service::backends::libp2p::common::DaNetworkBackendSettings; +use nomos_mix::membership::Node; use nomos_mix::message_blend::{ - CryptographicProcessorSettings, MessageBlendSettings, TemporalProcessorSettings, + CryptographicProcessorSettings, MessageBlendSettings, TemporalSchedulerSettings, }; +use nomos_mix_message::{sphinx::SphinxMessage, MixMessage}; use std::path::PathBuf; use std::time::Duration; // crates use bytes::Bytes; -use cl::InputWitness; use cryptarchia_consensus::TimeConfig; use kzgrs_backend::common::blob::DaBlob; use kzgrs_backend::dispersal::BlobInfo; @@ -187,13 +188,19 @@ pub struct TestDaNetworkSettings { pub node_key: ed25519::SecretKey, } +pub struct TestMixSettings { + pub backend: Libp2pMixBackendSettings, + pub private_key: x25519_dalek::StaticSecret, + pub membership: Vec::PublicKey>>, +} + pub fn new_node( leader_config: &LeaderConfig, ledger_config: &nomos_ledger::Config, genesis_state: &LedgerState, time_config: &TimeConfig, swarm_config: &SwarmConfig, - mix_config: &Libp2pMixBackendSettings, + mix_config: &TestMixSettings, db_path: PathBuf, blobs_dir: &PathBuf, initial_peers: Vec, @@ -210,14 +217,22 @@ pub fn new_node( }, }, mix: MixConfig { - backend: mix_config.clone(), + backend: mix_config.backend.clone(), persistent_transmission: Default::default(), message_blend: MessageBlendSettings { - cryptographic_processor: CryptographicProcessorSettings { num_mix_layers: 1 }, - temporal_processor: TemporalProcessorSettings { + cryptographic_processor: CryptographicProcessorSettings { + private_key: mix_config.private_key.to_bytes(), + num_mix_layers: 1, + }, + temporal_processor: TemporalSchedulerSettings { max_delay_seconds: 2, }, }, + cover_traffic: nomos_mix_service::CoverTrafficExtSettings { + epoch_duration: Duration::from_secs(432000), + slot_duration: Duration::from_secs(20), + }, + membership: mix_config.membership.clone(), }, da_network: DaNetworkConfig { backend: DaNetworkBackendSettings { @@ -241,7 +256,6 @@ pub fn new_node( topic: String::from(nomos_node::CL_TOPIC), id: ::hash, }, - registry: None, }, da_mempool: DaMempoolSettings { backend: (), @@ -249,7 +263,6 @@ pub fn new_node( topic: String::from(nomos_node::DA_TOPIC), id: ::blob_id, }, - registry: None, }, storage: nomos_storage::backends::rocksdb::RocksBackendSettings { db_path, @@ -308,35 +321,40 @@ pub fn new_node( .unwrap() } -pub fn new_mix_configs(listening_addresses: Vec) -> Vec { - let mut configs = listening_addresses +pub fn new_mix_configs(listening_addresses: Vec) -> Vec { + let settings = listening_addresses .iter() - .map(|listening_address| Libp2pMixBackendSettings { - listening_address: listening_address.clone(), - node_key: ed25519::SecretKey::generate(), - membership: Vec::new(), - peering_degree: 1, + .map(|listening_address| { + ( + Libp2pMixBackendSettings { + listening_address: listening_address.clone(), + node_key: ed25519::SecretKey::generate(), + peering_degree: 1, + }, + x25519_dalek::StaticSecret::random(), + ) }) .collect::>(); - let membership = configs + let membership = settings .iter() - .map(|c| { - let peer_id = PeerId::from_public_key( - &ed25519::Keypair::from(c.node_key.clone()).public().into(), - ); - c.listening_address - .clone() - .with_p2p(peer_id) - .unwrap_or_else(|orig_addr| orig_addr) + .map(|(backend, private_key)| Node { + address: backend.listening_address.clone(), + public_key: x25519_dalek::PublicKey::from(&x25519_dalek::StaticSecret::from( + private_key.to_bytes(), + )) + .to_bytes(), }) .collect::>(); - configs - .iter_mut() - .for_each(|c| c.membership = membership.clone()); - - configs + settings + .into_iter() + .map(|(backend, private_key)| TestMixSettings { + backend, + private_key, + membership: membership.clone(), + }) + .collect() } // Client node is only created for asyncroniously interact with nodes in the test. diff --git a/nomos-services/data-availability/tests/src/indexer_integration.rs b/nomos-services/data-availability/tests/src/indexer_integration.rs index 249f73b7..7696d68f 100644 --- a/nomos-services/data-availability/tests/src/indexer_integration.rs +++ b/nomos-services/data-availability/tests/src/indexer_integration.rs @@ -33,6 +33,7 @@ use tempfile::{NamedTempFile, TempDir}; use time::OffsetDateTime; use tokio_stream::wrappers::BroadcastStream; use tokio_stream::StreamExt; + // internal use crate::common::*; @@ -229,6 +230,17 @@ fn test_indexer() { let blob_hash = ::id(&blobs[0]); let blob_info = BlobInfo::new(blob_hash, meta); + // Create orphan blob metadata - without having actual blob stored + let orphan_app_id = [10u8; 32]; + let orphan_index = 2.into(); + + let orphan_meta = Metadata::new(orphan_app_id, orphan_index); + let mut orphan_blob_hash = [0u8; 32]; + thread_rng().fill(&mut orphan_blob_hash[..]); + + let orphan_blob_info = BlobInfo::new(orphan_blob_hash, orphan_meta); + + // Prepare indexes for blobs let mut node_1_blob_0_idx = Vec::new(); node_1_blob_0_idx.extend_from_slice(&blob_hash); node_1_blob_0_idx.extend_from_slice(&0u16.to_be_bytes()); @@ -315,6 +327,18 @@ fn test_indexer() { .unwrap(); let _ = mempool_rx.await.unwrap(); + // Put orphan_blob_info into the mempool. + let (mempool2_tx, mempool2_rx) = tokio::sync::oneshot::channel(); + mempool_outbound + .send(nomos_mempool::MempoolMsg::Add { + payload: orphan_blob_info, + key: orphan_blob_hash, + reply_channel: mempool2_tx, + }) + .await + .unwrap(); + let _ = mempool2_rx.await.unwrap(); + // Wait for block in the network. let timeout = tokio::time::sleep(Duration::from_secs(INDEXER_TEST_MAX_SECONDS)); tokio::pin!(timeout); @@ -347,6 +371,36 @@ fn test_indexer() { .unwrap(); let mut app_id_blobs = indexer_rx.await.unwrap(); + // Request range of orphan blob from indexer - nothing is expected in return + let (indexer2_tx, indexer2_rx) = tokio::sync::oneshot::channel(); + indexer_outbound + .send(nomos_da_indexer::DaMsg::GetRange { + app_id: orphan_app_id, + range: 0.into()..2.into(), + reply_channel: indexer2_tx, + }) + .await + .unwrap(); + let orphan_app_id_blobs = indexer2_rx.await.unwrap(); + + // Indexer should not return any blobs for orphan app_id + for v in orphan_app_id_blobs { + assert!(v.1.is_empty()); + } + + // Mempool should still contain orphan_blob_info + let (mempool3_tx, mempool3_rx) = tokio::sync::oneshot::channel(); + mempool_outbound + .send(nomos_mempool::MempoolMsg::Status { + items: vec![orphan_blob_hash], + reply_channel: mempool3_tx, + }) + .await + .unwrap(); + let blocks_with_orphan_blob_hash = mempool3_rx.await.unwrap(); + + assert_eq!(blocks_with_orphan_blob_hash.len(), 1); + // Since we've only attested to blob_info at idx 0, the first // item should have "some" data, other indexes should be None. app_id_blobs.sort_by(|(a, _), (b, _)| a.partial_cmp(b).unwrap()); diff --git a/nomos-services/data-availability/tests/src/verifier_integration.rs b/nomos-services/data-availability/tests/src/verifier_integration.rs index fe53abff..0e47dff5 100644 --- a/nomos-services/data-availability/tests/src/verifier_integration.rs +++ b/nomos-services/data-availability/tests/src/verifier_integration.rs @@ -19,17 +19,24 @@ use nomos_libp2p::SwarmConfig; use rand::{thread_rng, Rng}; use tempfile::{NamedTempFile, TempDir}; use time::OffsetDateTime; +use tracing_subscriber::fmt::TestWriter; +use tracing_subscriber::EnvFilter; // internal use crate::common::*; #[test] fn test_verifier() { + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .compact() + .with_writer(TestWriter::default()) + .try_init(); let performed_tx = Arc::new(AtomicBool::new(false)); let performed_rx = performed_tx.clone(); let is_success_tx = Arc::new(AtomicBool::new(false)); let is_success_rx = is_success_tx.clone(); - let mut ids = vec![[0; 32]; 2]; + let mut ids = vec![[0; 32]; 3]; for id in &mut ids { thread_rng().fill(id); } @@ -70,28 +77,42 @@ fn test_verifier() { port: 7774, ..Default::default() }; + + let swarm_config3 = SwarmConfig { + port: 7775, + ..Default::default() + }; + let mix_configs = new_mix_configs(vec![ Multiaddr::from_str("/ip4/127.0.0.1/udp/7783/quic-v1").unwrap(), Multiaddr::from_str("/ip4/127.0.0.1/udp/7784/quic-v1").unwrap(), + Multiaddr::from_str("/ip4/127.0.0.1/udp/7785/quic-v1").unwrap(), ]); let blobs_dir = TempDir::new().unwrap().path().to_path_buf(); let (node1_sk, _) = generate_blst_hex_keys(); let (node2_sk, _) = generate_blst_hex_keys(); + let (node3_sk, _) = generate_blst_hex_keys(); let client_zone = new_client(NamedTempFile::new().unwrap().path().to_path_buf()); let (peer_sk_1, peer_id_1) = generate_ed25519_sk_peerid(); let (peer_sk_2, peer_id_2) = generate_ed25519_sk_peerid(); + let (peer_sk_3, peer_id_3) = generate_ed25519_sk_peerid(); let addr_1 = Multiaddr::from_str("/ip4/127.0.0.1/udp/8880/quic-v1").unwrap(); let addr_2 = Multiaddr::from_str("/ip4/127.0.0.1/udp/8881/quic-v1").unwrap(); + let addr_3 = Multiaddr::from_str("/ip4/127.0.0.1/udp/8882/quic-v1").unwrap(); - let peer_addresses = vec![(peer_id_1, addr_1.clone()), (peer_id_2, addr_2.clone())]; + let peer_addresses = vec![ + (peer_id_1, addr_1.clone()), + (peer_id_2, addr_2.clone()), + (peer_id_3, addr_3.clone()), + ]; let num_samples = 1; - let num_subnets = 2; + let num_subnets = 3; let nodes_per_subnet = 1; let node1 = new_node( @@ -141,7 +162,7 @@ fn test_verifier() { global_params_path: GLOBAL_PARAMS_PATH.into(), }, TestDaNetworkSettings { - peer_addresses, + peer_addresses: peer_addresses.clone(), listening_address: addr_2, num_subnets, num_samples, @@ -150,9 +171,37 @@ fn test_verifier() { }, ); - let node1_verifier = node1.handle().relay::(); + let node3 = new_node( + &LeaderConfig { + notes: vec![notes[2].clone()], + nf_sk: sks[2], + }, + &ledger_config, + &genesis_state, + &time_config, + &swarm_config3, + &mix_configs[2], + NamedTempFile::new().unwrap().path().to_path_buf(), + &blobs_dir, + vec![node_address(&swarm_config2)], + KzgrsDaVerifierSettings { + sk: node3_sk, + index: [2].into(), + global_params_path: GLOBAL_PARAMS_PATH.into(), + }, + TestDaNetworkSettings { + peer_addresses, + listening_address: addr_3, + num_subnets, + num_samples, + nodes_per_subnet, + node_key: peer_sk_3, + }, + ); + let node1_verifier = node1.handle().relay::(); let node2_verifier = node2.handle().relay::(); + let node3_verifier = node3.handle().relay::(); client_zone.spawn(async move { let node1_verifier = node1_verifier.connect().await.unwrap(); @@ -161,9 +210,13 @@ fn test_verifier() { let node2_verifier = node2_verifier.connect().await.unwrap(); let (node2_reply_tx, node2_reply_rx) = tokio::sync::oneshot::channel(); + let node3_verifier = node3_verifier.connect().await.unwrap(); + let (node3_reply_tx, node3_reply_rx) = tokio::sync::oneshot::channel::>(); + let verifiers = vec![ (node1_verifier, node1_reply_tx), (node2_verifier, node2_reply_tx), + (node3_verifier, node3_reply_tx), ]; // Encode data @@ -176,32 +229,34 @@ fn test_verifier() { let encoded_data = encoder.encode(&data).unwrap(); let columns: Vec<_> = encoded_data.extended_data.columns().collect(); + drop(node3_reply_rx); + for (i, (verifier, reply_tx)) in verifiers.into_iter().enumerate() { - let column = &columns[i]; + let index = i % 2; + let column = &columns[index]; let da_blob = DaBlob { column: column.clone(), - column_idx: i + column_idx: index .try_into() .expect("Column index shouldn't overflow the target type"), - column_commitment: encoded_data.column_commitments[i], + column_commitment: encoded_data.column_commitments[index], aggregated_column_commitment: encoded_data.aggregated_column_commitment, - aggregated_column_proof: encoded_data.aggregated_column_proofs[i], + aggregated_column_proof: encoded_data.aggregated_column_proofs[index], rows_commitments: encoded_data.row_commitments.clone(), rows_proofs: encoded_data .rows_proofs .iter() - .map(|proofs| proofs.get(i).cloned().unwrap()) + .map(|proofs| proofs.get(index).cloned().unwrap()) .collect(), }; - verifier - .send(nomos_da_verifier::DaVerifierMsg::AddBlob { - blob: da_blob, - reply_channel: reply_tx, - }) - .await - .unwrap(); + let add_blob_message = nomos_da_verifier::DaVerifierMsg::AddBlob { + blob: da_blob, + reply_channel: reply_tx, + }; + + verifier.send(add_blob_message).await.unwrap(); } // Wait for response from the verifier. diff --git a/nomos-services/mempool/Cargo.toml b/nomos-services/mempool/Cargo.toml index 8a5ec8e9..b0b4051e 100644 --- a/nomos-services/mempool/Cargo.toml +++ b/nomos-services/mempool/Cargo.toml @@ -10,7 +10,6 @@ async-trait = "0.1" bincode = { version = "2.0.0-rc.2", features = ["serde"] } futures = "0.3" linked-hash-map = { version = "0.5.6", optional = true } -nomos-metrics = { path = "../../nomos-services/metrics" } nomos-network = { path = "../network" } nomos-da-network-core = { path = "../../nomos-da/network/core" } nomos-da-sampling = { path = "../../nomos-services/data-availability/sampling/" } @@ -38,7 +37,6 @@ blake2 = "0.10" default = [] mock = ["linked-hash-map", "nomos-network/mock", "nomos-core/mock"] libp2p = ["nomos-network/libp2p"] -metrics = [] # enable to help generate OpenAPI openapi = ["dep:utoipa", "serde_json"] diff --git a/nomos-services/mempool/src/da/service.rs b/nomos-services/mempool/src/da/service.rs index 0a1384a4..a9f72738 100644 --- a/nomos-services/mempool/src/da/service.rs +++ b/nomos-services/mempool/src/da/service.rs @@ -8,9 +8,6 @@ pub mod openapi { use std::fmt::Debug; // crates -// TODO: Add again after metrics refactor -// #[cfg(feature = "metrics")] -// use super::metrics::Metrics; use futures::StreamExt; use nomos_da_sampling::storage::DaStorageAdapter; use rand::{RngCore, SeedableRng}; @@ -23,7 +20,6 @@ use nomos_da_sampling::{ backend::DaSamplingServiceBackend, network::NetworkAdapter as DaSamplingNetworkAdapter, DaSamplingService, DaSamplingServiceMsg, }; -use nomos_metrics::NomosRegistry; use nomos_network::{NetworkMsg, NetworkService}; use overwatch_rs::services::life_cycle::LifecycleMessage; use overwatch_rs::services::{ @@ -55,9 +51,6 @@ where network_relay: Relay>, sampling_relay: Relay>, pool: P, - // TODO: Add again after metrics refactor - // #[cfg(feature = "metrics")] - // metrics: Option, } impl ServiceData for DaMempoolService @@ -113,19 +106,11 @@ where let sampling_relay = service_state.overwatch_handle.relay(); let settings = service_state.settings_reader.get_updated_settings(); - // TODO: Refactor metrics to be reusable then replug it again - // #[cfg(feature = "metrics")] - // let metrics = settings - // .registry - // .map(|reg| Metrics::new(reg, service_state.id())); - Ok(Self { service_state, network_relay, sampling_relay, pool: P::new(settings.backend), - // #[cfg(feature = "metrics")] - // metrics, }) } @@ -160,9 +145,6 @@ where loop { tokio::select! { Some(msg) = service_state.inbound_relay.recv() => { - // TODO: replug metrics once refactor is done - // #[cfg(feature = "metrics")] - // if let Some(metrics) = &self.metrics { metrics.record(&msg) } Self::handle_mempool_message(msg, &mut pool, &mut network_relay, &mut service_state).await; } Some((key, item)) = network_items.next() => { @@ -170,6 +152,7 @@ where pool.add_item(key, item).unwrap_or_else(|e| { tracing::debug!("could not add item to the pool due to: {}", e) }); + tracing::info!(counter.da_mempool_pending_items = pool.pending_item_count()); } Some(msg) = lifecycle_stream.next() => { if Self::should_stop_service(msg).await { @@ -294,5 +277,4 @@ where pub struct DaMempoolSettings { pub backend: B, pub network: N, - pub registry: Option, } diff --git a/nomos-services/mempool/src/tx/metrics.rs b/nomos-services/mempool/src/tx/metrics.rs deleted file mode 100644 index 0fbcd1b5..00000000 --- a/nomos-services/mempool/src/tx/metrics.rs +++ /dev/null @@ -1,80 +0,0 @@ -// std -use std::fmt::Debug; -// crates -use nomos_metrics::{ - metrics::{counter::Counter, family::Family}, - prometheus_client::{self, encoding::EncodeLabelSet, encoding::EncodeLabelValue}, - NomosRegistry, -}; -use overwatch_rs::services::ServiceId; -// internal -use crate::MempoolMsg; - -#[derive(Debug, Clone, Hash, PartialEq, Eq, EncodeLabelValue)] -enum MempoolMsgType { - Add, - View, - Prune, - MarkInBlock, -} - -impl From<&MempoolMsg> for MempoolMsgType -where - I: 'static + Debug, - K: 'static + Debug, -{ - fn from(event: &MempoolMsg) -> Self { - match event { - MempoolMsg::Add { .. } => MempoolMsgType::Add, - MempoolMsg::View { .. } => MempoolMsgType::View, - MempoolMsg::Prune { .. } => MempoolMsgType::Prune, - MempoolMsg::MarkInBlock { .. } => MempoolMsgType::MarkInBlock, - _ => unimplemented!(), - } - } -} - -#[derive(Debug, Clone, Hash, PartialEq, Eq, EncodeLabelSet)] -struct MessageLabels { - label: MempoolMsgType, -} - -pub(crate) struct Metrics { - messages: Family, -} - -impl Metrics { - pub(crate) fn new(registry: NomosRegistry, discriminant: ServiceId) -> Self { - let mut registry = registry - .lock() - .expect("should've acquired the lock for registry"); - let sub_registry = registry.sub_registry_with_prefix(discriminant); - - let messages = Family::default(); - sub_registry.register( - "messages", - "Messages emitted by the Mempool", - messages.clone(), - ); - - Self { messages } - } - - pub(crate) fn record(&self, msg: &MempoolMsg) - where - I: 'static + Debug, - K: 'static + Debug, - { - match msg { - MempoolMsg::Add { .. } - | MempoolMsg::View { .. } - | MempoolMsg::Prune { .. } - | MempoolMsg::MarkInBlock { .. } => { - self.messages - .get_or_create(&MessageLabels { label: msg.into() }) - .inc(); - } - _ => {} - } - } -} diff --git a/nomos-services/mempool/src/tx/mod.rs b/nomos-services/mempool/src/tx/mod.rs index 320bcdf1..1f278a4d 100644 --- a/nomos-services/mempool/src/tx/mod.rs +++ b/nomos-services/mempool/src/tx/mod.rs @@ -1,3 +1 @@ -#[cfg(feature = "metrics")] -pub mod metrics; pub mod service; diff --git a/nomos-services/mempool/src/tx/service.rs b/nomos-services/mempool/src/tx/service.rs index a03e3328..fe8fc798 100644 --- a/nomos-services/mempool/src/tx/service.rs +++ b/nomos-services/mempool/src/tx/service.rs @@ -8,10 +8,7 @@ pub mod openapi { use std::fmt::Debug; // crates -#[cfg(feature = "metrics")] -use super::metrics::Metrics; use futures::StreamExt; -use nomos_metrics::NomosRegistry; // internal use crate::backend::MemPool; use crate::network::NetworkAdapter; @@ -38,8 +35,6 @@ where service_state: ServiceStateHandle, network_relay: Relay>, pool: P, - #[cfg(feature = "metrics")] - metrics: Option, } impl ServiceData for TxMempoolService @@ -78,17 +73,10 @@ where let network_relay = service_state.overwatch_handle.relay(); let settings = service_state.settings_reader.get_updated_settings(); - #[cfg(feature = "metrics")] - let metrics = settings - .registry - .map(|reg| Metrics::new(reg, service_state.id())); - Ok(Self { service_state, network_relay, pool: P::new(settings.backend), - #[cfg(feature = "metrics")] - metrics, }) } @@ -117,14 +105,13 @@ where loop { tokio::select! { Some(msg) = service_state.inbound_relay.recv() => { - #[cfg(feature = "metrics")] - if let Some(metrics) = &self.metrics { metrics.record(&msg) } Self::handle_mempool_message(msg, &mut pool, &mut network_relay, &mut service_state).await; } Some((key, item )) = network_items.next() => { pool.add_item(key, item).unwrap_or_else(|e| { tracing::debug!("could not add item to the pool due to: {}", e) }); + tracing::info!(counter.tx_mempool_pending_items = pool.pending_item_count()); } Some(msg) = lifecycle_stream.next() => { if Self::should_stop_service(msg).await { @@ -241,5 +228,4 @@ where pub struct TxMempoolSettings { pub backend: B, pub network: N, - pub registry: Option, } diff --git a/nomos-services/mempool/tests/mock.rs b/nomos-services/mempool/tests/mock.rs index b9436f82..42676741 100644 --- a/nomos-services/mempool/tests/mock.rs +++ b/nomos-services/mempool/tests/mock.rs @@ -64,7 +64,6 @@ fn test_mockmempool() { mockpool: TxMempoolSettings { backend: (), network: (), - registry: None, }, logging: TracingSettings::default(), }, diff --git a/nomos-services/metrics/Cargo.toml b/nomos-services/metrics/Cargo.toml deleted file mode 100644 index 1867b0f1..00000000 --- a/nomos-services/metrics/Cargo.toml +++ /dev/null @@ -1,16 +0,0 @@ -[package] -name = "nomos-metrics" -version = "0.1.0" -edition = "2021" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -async-trait = "0.1" -futures = "0.3" -overwatch-rs = { git = "https://github.com/logos-co/Overwatch", rev = "2f70806" } -overwatch-derive = { git = "https://github.com/logos-co/Overwatch", rev = "ac28d01" } -prometheus-client = "0.22.0" -tracing = "0.1" -tokio = { version = "1", features = ["sync", "macros"] } -serde = { version = "1", features = ["derive"] } diff --git a/nomos-services/metrics/src/lib.rs b/nomos-services/metrics/src/lib.rs deleted file mode 100644 index d9461b38..00000000 --- a/nomos-services/metrics/src/lib.rs +++ /dev/null @@ -1,119 +0,0 @@ -pub use prometheus_client::{self, *}; - -// std -use std::fmt::{Debug, Error, Formatter}; -use std::sync::{Arc, Mutex}; -// crates -use futures::StreamExt; -use overwatch_rs::services::life_cycle::LifecycleMessage; -use overwatch_rs::services::{ - handle::ServiceStateHandle, - relay::RelayMessage, - state::{NoOperator, NoState}, - ServiceCore, ServiceData, -}; -use prometheus_client::encoding::text::encode; -use prometheus_client::registry::Registry; -use tokio::sync::oneshot::Sender; -use tracing::error; -// internal - -// A wrapper for prometheus_client Registry. -// Lock is only used during services initialization and prometheus pull query. -pub type NomosRegistry = Arc>; - -pub struct Metrics { - service_state: ServiceStateHandle, - registry: NomosRegistry, -} - -#[derive(Clone, Debug)] -pub struct MetricsSettings { - pub registry: Option, -} - -pub enum MetricsMsg { - Gather { reply_channel: Sender }, -} - -impl RelayMessage for MetricsMsg {} - -impl Debug for MetricsMsg { - fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> { - match self { - Self::Gather { .. } => { - write!(f, "MetricsMsg::Gather") - } - } - } -} - -impl ServiceData for Metrics { - const SERVICE_ID: &'static str = "Metrics"; - type Settings = MetricsSettings; - type State = NoState; - type StateOperator = NoOperator; - type Message = MetricsMsg; -} - -#[async_trait::async_trait] -impl ServiceCore for Metrics { - fn init(service_state: ServiceStateHandle) -> Result { - let config = service_state.settings_reader.get_updated_settings(); - - Ok(Self { - service_state, - registry: config.registry.ok_or("No registry provided")?, - }) - } - - async fn run(self) -> Result<(), overwatch_rs::DynError> { - let Self { - mut service_state, - registry, - } = self; - let mut lifecycle_stream = service_state.lifecycle_handle.message_stream(); - loop { - tokio::select! { - Some(msg) = service_state.inbound_relay.recv() => { - let MetricsMsg::Gather{reply_channel} = msg; - - let mut buf = String::new(); - { - let reg = registry.lock().unwrap(); - // If encoding fails, we need to stop trying process subsequent metrics gather - // requests. If it succeds, encode method returns empty unit type. - _ = encode(&mut buf, ®).map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?; - } - - reply_channel - .send(buf) - .unwrap_or_else(|_| tracing::debug!("could not send back metrics")); - } - Some(msg) = lifecycle_stream.next() => { - if Self::should_stop_service(msg).await { - break; - } - } - } - } - Ok(()) - } -} - -impl Metrics { - async fn should_stop_service(message: LifecycleMessage) -> bool { - match message { - LifecycleMessage::Shutdown(sender) => { - if sender.send(()).is_err() { - error!( - "Error sending successful shutdown signal from service {}", - Self::SERVICE_ID - ); - } - true - } - LifecycleMessage::Kill => true, - } - } -} diff --git a/nomos-services/mix/Cargo.toml b/nomos-services/mix/Cargo.toml index 79b475eb..c5ad3c75 100644 --- a/nomos-services/mix/Cargo.toml +++ b/nomos-services/mix/Cargo.toml @@ -15,10 +15,12 @@ nomos-mix-message = { path = "../../nomos-mix/message" } nomos-network = { path = "../network" } overwatch-rs = { git = "https://github.com/logos-co/Overwatch", rev = "2f70806" } rand = "0.8.5" +rand_chacha = "0.3" serde = { version = "1.0", features = ["derive"] } tokio = { version = "1", features = ["macros", "sync"] } tokio-stream = "0.1" tracing = "0.1" +x25519-dalek = { version = "2", features = ["getrandom", "static_secrets"] } [features] default = [] diff --git a/nomos-services/mix/src/backends/libp2p.rs b/nomos-services/mix/src/backends/libp2p.rs index 7a891d0b..8aa4b2b1 100644 --- a/nomos-services/mix/src/backends/libp2p.rs +++ b/nomos-services/mix/src/backends/libp2p.rs @@ -1,16 +1,19 @@ use std::{io, pin::Pin, time::Duration}; +use super::MixBackend; use async_trait::async_trait; use futures::{Stream, StreamExt}; use libp2p::{ core::transport::ListenerId, identity::{ed25519, Keypair}, swarm::SwarmEvent, - Multiaddr, PeerId, Swarm, SwarmBuilder, TransportError, + Multiaddr, Swarm, SwarmBuilder, TransportError, }; -use nomos_libp2p::{secret_key_serde, DialError, DialOpts, Protocol}; +use nomos_libp2p::{secret_key_serde, DialError, DialOpts}; +use nomos_mix::membership::Membership; +use nomos_mix_message::sphinx::SphinxMessage; use overwatch_rs::overwatch::handle::OverwatchHandle; -use rand::seq::IteratorRandom; +use rand::Rng; use serde::{Deserialize, Serialize}; use tokio::{ sync::{broadcast, mpsc}, @@ -18,8 +21,6 @@ use tokio::{ }; use tokio_stream::wrappers::BroadcastStream; -use super::MixBackend; - /// A mix backend that uses the libp2p network stack. pub struct Libp2pMixBackend { #[allow(dead_code)] @@ -34,7 +35,6 @@ pub struct Libp2pMixBackendSettings { // A key for deriving PeerId and establishing secure connections (TLS 1.3 by QUIC) #[serde(with = "secret_key_serde", default = "ed25519::SecretKey::generate")] pub node_key: ed25519::SecretKey, - pub membership: Vec, pub peering_degree: usize, } @@ -44,12 +44,16 @@ const CHANNEL_SIZE: usize = 64; impl MixBackend for Libp2pMixBackend { type Settings = Libp2pMixBackendSettings; - fn new(config: Self::Settings, overwatch_handle: OverwatchHandle) -> Self { + fn new( + config: Self::Settings, + overwatch_handle: OverwatchHandle, + membership: Membership, + mut rng: R, + ) -> Self { let (swarm_message_sender, swarm_message_receiver) = mpsc::channel(CHANNEL_SIZE); let (incoming_message_sender, _) = broadcast::channel(CHANNEL_SIZE); let keypair = Keypair::from(ed25519::Keypair::from(config.node_key.clone())); - let local_peer_id = keypair.public().to_peer_id(); let mut swarm = MixSwarm::new( keypair, swarm_message_receiver, @@ -63,20 +67,12 @@ impl MixBackend for Libp2pMixBackend { }); // Randomly select peering_degree number of peers, and dial to them - // TODO: Consider moving the peer seelction to the nomos_mix_network::Behaviour - config - .membership + membership + .choose_remote_nodes(&mut rng, config.peering_degree) .iter() - .filter(|addr| match extract_peer_id(addr) { - Some(peer_id) => peer_id != local_peer_id, - None => false, - }) - .choose_multiple(&mut rand::thread_rng(), config.peering_degree) - .iter() - .cloned() - .for_each(|addr| { - if let Err(e) = swarm.dial(addr.clone()) { - tracing::error!("failed to dial to {:?}: {:?}", addr, e); + .for_each(|node| { + if let Err(e) = swarm.dial(node.address.clone()) { + tracing::error!("failed to dial to {:?}: {:?}", node.address, e); } }); @@ -110,7 +106,7 @@ impl MixBackend for Libp2pMixBackend { } struct MixSwarm { - swarm: Swarm, + swarm: Swarm>, swarm_messages_receiver: mpsc::Receiver, incoming_message_sender: broadcast::Sender>, } @@ -182,7 +178,9 @@ impl MixSwarm { match event { SwarmEvent::Behaviour(nomos_mix_network::Event::Message(msg)) => { tracing::debug!("Received message from a peer: {msg:?}"); - self.incoming_message_sender.send(msg).unwrap(); + if let Err(e) = self.incoming_message_sender.send(msg) { + tracing::error!("Failed to send incoming message to channel: {e}"); + } } SwarmEvent::Behaviour(nomos_mix_network::Event::Error(e)) => { tracing::error!("Received error from mix network: {e:?}"); @@ -193,13 +191,3 @@ impl MixSwarm { } } } - -fn extract_peer_id(multiaddr: &Multiaddr) -> Option { - multiaddr.iter().find_map(|protocol| { - if let Protocol::P2p(peer_id) = protocol { - Some(peer_id) - } else { - None - } - }) -} diff --git a/nomos-services/mix/src/backends/mod.rs b/nomos-services/mix/src/backends/mod.rs index f983b2f9..8c1ee8ef 100644 --- a/nomos-services/mix/src/backends/mod.rs +++ b/nomos-services/mix/src/backends/mod.rs @@ -4,14 +4,24 @@ pub mod libp2p; use std::{fmt::Debug, pin::Pin}; use futures::Stream; +use nomos_mix::membership::Membership; +use nomos_mix_message::sphinx::SphinxMessage; use overwatch_rs::overwatch::handle::OverwatchHandle; +use rand::Rng; /// A trait for mix backends that send messages to the mix network. #[async_trait::async_trait] pub trait MixBackend { type Settings: Clone + Debug + Send + Sync + 'static; - fn new(config: Self::Settings, overwatch_handle: OverwatchHandle) -> Self; + fn new( + config: Self::Settings, + overwatch_handle: OverwatchHandle, + membership: Membership, + rng: R, + ) -> Self + where + R: Rng; /// Publish a message to the mix network. async fn publish(&self, msg: Vec); /// Listen to messages received from the mix network. diff --git a/nomos-services/mix/src/lib.rs b/nomos-services/mix/src/lib.rs index 8a2e3777..2338fbc4 100644 --- a/nomos-services/mix/src/lib.rs +++ b/nomos-services/mix/src/lib.rs @@ -6,12 +6,18 @@ use backends::MixBackend; use futures::StreamExt; use network::NetworkAdapter; use nomos_core::wire; -use nomos_mix::message_blend::crypto::CryptographicProcessor; +use nomos_mix::message_blend::temporal::TemporalScheduler; +use nomos_mix::message_blend::{crypto::CryptographicProcessor, CryptographicProcessorSettings}; use nomos_mix::message_blend::{MessageBlendExt, MessageBlendSettings}; use nomos_mix::persistent_transmission::{ - PersistentTransmissionExt, PersistentTransmissionSettings, + PersistentTransmissionExt, PersistentTransmissionSettings, PersistentTransmissionStream, }; use nomos_mix::MixOutgoingMessage; +use nomos_mix::{ + cover_traffic::{CoverTraffic, CoverTrafficSettings}, + membership::{Membership, Node}, +}; +use nomos_mix_message::{sphinx::SphinxMessage, MixMessage}; use nomos_network::NetworkService; use overwatch_rs::services::{ handle::ServiceStateHandle, @@ -20,10 +26,14 @@ use overwatch_rs::services::{ state::{NoOperator, NoState}, ServiceCore, ServiceData, ServiceId, }; +use rand::SeedableRng; +use rand_chacha::ChaCha12Rng; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use std::fmt::Debug; +use std::time::Duration; use tokio::sync::mpsc; -use tokio_stream::wrappers::UnboundedReceiverStream; +use tokio::time; +use tokio_stream::wrappers::{IntervalStream, UnboundedReceiverStream}; /// A mix service that sends messages to the mix network /// and broadcasts fully unwrapped messages through the [`NetworkService`]. @@ -40,6 +50,7 @@ where backend: Backend, service_state: ServiceStateHandle, network_relay: Relay>, + membership: Membership, } impl ServiceData for MixService @@ -67,13 +78,17 @@ where { fn init(service_state: ServiceStateHandle) -> Result { let network_relay = service_state.overwatch_handle.relay(); + let mix_config = service_state.settings_reader.get_updated_settings(); Ok(Self { backend: ::new( service_state.settings_reader.get_updated_settings().backend, service_state.overwatch_handle.clone(), + mix_config.membership(), + ChaCha12Rng::from_entropy(), ), service_state, network_relay, + membership: mix_config.membership(), }) } @@ -82,23 +97,54 @@ where service_state, mut backend, network_relay, + membership, } = self; let mix_config = service_state.settings_reader.get_updated_settings(); - let cryptographic_processor = - CryptographicProcessor::new(mix_config.message_blend.cryptographic_processor); + let mut cryptographic_processor = CryptographicProcessor::new( + mix_config.message_blend.cryptographic_processor.clone(), + membership.clone(), + ChaCha12Rng::from_entropy(), + ); let network_relay = network_relay.connect().await?; let network_adapter = Network::new(network_relay); // tier 1 persistent transmission let (persistent_sender, persistent_receiver) = mpsc::unbounded_channel(); - let mut persistent_transmission_messages = - UnboundedReceiverStream::new(persistent_receiver) - .persistent_transmission(mix_config.persistent_transmission); + let mut persistent_transmission_messages: PersistentTransmissionStream< + _, + _, + SphinxMessage, + _, + > = UnboundedReceiverStream::new(persistent_receiver).persistent_transmission( + mix_config.persistent_transmission, + ChaCha12Rng::from_entropy(), + IntervalStream::new(time::interval(Duration::from_secs_f64( + 1.0 / mix_config.persistent_transmission.max_emission_frequency, + ))) + .map(|_| ()), + ); // tier 2 blend - let mut blend_messages = backend - .listen_to_incoming_messages() - .blend(mix_config.message_blend); + let temporal_scheduler = TemporalScheduler::new( + mix_config.message_blend.temporal_processor, + ChaCha12Rng::from_entropy(), + ); + let mut blend_messages = backend.listen_to_incoming_messages().blend( + mix_config.message_blend.clone(), + membership.clone(), + temporal_scheduler, + ChaCha12Rng::from_entropy(), + ); + + // tier 3 cover traffic + let mut cover_traffic: CoverTraffic<_, _, SphinxMessage> = CoverTraffic::new( + mix_config.cover_traffic.cover_traffic_settings( + &membership, + &mix_config.message_blend.cryptographic_processor, + ), + mix_config.cover_traffic.epoch_stream(), + mix_config.cover_traffic.slot_stream(), + ); // local messages, are bypassed and send immediately let mut local_messages = service_state @@ -129,23 +175,17 @@ where network_adapter.broadcast(msg.message, msg.broadcast_settings).await; }, _ => { - tracing::error!("unrecognized message from mix backend"); + tracing::debug!("unrecognized message from mix backend"); } } } } } + Some(msg) = cover_traffic.next() => { + Self::wrap_and_send_to_persistent_transmission(msg, &mut cryptographic_processor, &persistent_sender); + } Some(msg) = local_messages.next() => { - match cryptographic_processor.wrap_message(&msg) { - Ok(wrapped_message) => { - if let Err(e) = persistent_sender.send(wrapped_message) { - tracing::error!("Error sending message to persistent stream: {e}"); - } - } - Err(e) => { - tracing::error!("Failed to wrap message: {:?}", e); - } - } + Self::wrap_and_send_to_persistent_transmission(msg, &mut cryptographic_processor, &persistent_sender); } Some(msg) = lifecycle_stream.next() => { if Self::should_stop_service(msg).await { @@ -181,13 +221,94 @@ where } } } + + fn wrap_and_send_to_persistent_transmission( + message: Vec, + cryptographic_processor: &mut CryptographicProcessor, + persistent_sender: &mpsc::UnboundedSender>, + ) { + match cryptographic_processor.wrap_message(&message) { + Ok(wrapped_message) => { + if let Err(e) = persistent_sender.send(wrapped_message) { + tracing::error!("Error sending message to persistent stream: {e}"); + } + } + Err(e) => { + tracing::error!("Failed to wrap message: {:?}", e); + } + } + } } #[derive(Serialize, Deserialize, Clone, Debug)] pub struct MixConfig { pub backend: BackendSettings, - pub message_blend: MessageBlendSettings, + pub message_blend: MessageBlendSettings, pub persistent_transmission: PersistentTransmissionSettings, + pub cover_traffic: CoverTrafficExtSettings, + pub membership: Vec::PublicKey>>, +} + +#[derive(Serialize, Deserialize, Clone, Debug)] +pub struct CoverTrafficExtSettings { + pub epoch_duration: Duration, + pub slot_duration: Duration, +} + +impl CoverTrafficExtSettings { + fn cover_traffic_settings( + &self, + membership: &Membership, + cryptographic_processor_settings: &CryptographicProcessorSettings< + ::PrivateKey, + >, + ) -> CoverTrafficSettings { + CoverTrafficSettings { + node_id: membership.local_node().public_key, + number_of_hops: cryptographic_processor_settings.num_mix_layers, + slots_per_epoch: self.slots_per_epoch(), + network_size: membership.size(), + } + } + + fn slots_per_epoch(&self) -> usize { + (self.epoch_duration.as_secs() as usize) + .checked_div(self.slot_duration.as_secs() as usize) + .expect("Invalid epoch & slot duration") + } + + fn epoch_stream( + &self, + ) -> futures::stream::Map< + futures::stream::Enumerate, + impl FnMut((usize, time::Instant)) -> usize, + > { + IntervalStream::new(time::interval(self.epoch_duration)) + .enumerate() + .map(|(i, _)| i) + } + + fn slot_stream( + &self, + ) -> futures::stream::Map< + futures::stream::Enumerate, + impl FnMut((usize, time::Instant)) -> usize, + > { + let slots_per_epoch = self.slots_per_epoch(); + IntervalStream::new(time::interval(self.slot_duration)) + .enumerate() + .map(move |(i, _)| i % slots_per_epoch) + } +} + +impl MixConfig { + fn membership(&self) -> Membership { + let public_key = x25519_dalek::PublicKey::from(&x25519_dalek::StaticSecret::from( + self.message_blend.cryptographic_processor.private_key, + )) + .to_bytes(); + Membership::new(self.membership.clone(), public_key) + } } /// A message that is handled by [`MixService`]. diff --git a/nomos-services/tracing/src/lib.rs b/nomos-services/tracing/src/lib.rs index 15d928fa..8b4b475d 100644 --- a/nomos-services/tracing/src/lib.rs +++ b/nomos-services/tracing/src/lib.rs @@ -8,6 +8,7 @@ use nomos_tracing::filter::envfilter::{create_envfilter_layer, EnvFilterConfig}; use nomos_tracing::logging::gelf::{create_gelf_layer, GelfConfig}; use nomos_tracing::logging::local::{create_file_layer, create_writer_layer, FileConfig}; use nomos_tracing::logging::loki::{create_loki_layer, LokiConfig}; +use nomos_tracing::metrics::otlp::{create_otlp_metrics_layer, OtlpMetricsConfig}; use overwatch_rs::services::life_cycle::LifecycleMessage; use overwatch_rs::services::{ handle::ServiceStateHandle, @@ -93,11 +94,18 @@ pub enum FilterLayer { None, } +#[derive(Clone, Debug, Serialize, Deserialize)] +pub enum MetricsLayer { + Otlp(OtlpMetricsConfig), + None, +} + #[derive(Clone, Debug, Serialize, Deserialize)] pub struct TracingSettings { pub logger: LoggerLayer, pub tracing: TracingLayer, pub filter: FilterLayer, + pub metrics: MetricsLayer, #[serde(with = "serde_level")] pub level: Level, } @@ -108,6 +116,7 @@ impl Default for TracingSettings { logger: LoggerLayer::Stdout, tracing: TracingLayer::None, filter: FilterLayer::None, + metrics: MetricsLayer::None, level: Level::DEBUG, } } @@ -119,12 +128,14 @@ impl TracingSettings { logger: LoggerLayer, tracing: TracingLayer, filter: FilterLayer, + metrics: MetricsLayer, level: Level, ) -> Self { Self { logger, tracing, filter, + metrics, level, } } @@ -194,6 +205,11 @@ impl ServiceCore for Tracing { layers.push(Box::new(filter_layer)); } + if let MetricsLayer::Otlp(config) = config.metrics { + let metrics_layer = create_otlp_metrics_layer(config)?; + layers.push(Box::new(metrics_layer)); + } + // If no layers are created, tracing subscriber is not required. if layers.is_empty() { return Ok(Self { diff --git a/nomos-tracing/Cargo.toml b/nomos-tracing/Cargo.toml index 0c876cdf..9b584c2f 100644 --- a/nomos-tracing/Cargo.toml +++ b/nomos-tracing/Cargo.toml @@ -5,14 +5,17 @@ edition = "2021" [dependencies] opentelemetry = { version = "0.26" } -opentelemetry-otlp = "0.26" +opentelemetry-otlp = { version = "0.26", features = ["grpc-tonic", "http-proto", "opentelemetry-http"] } opentelemetry_sdk = { version = "0.26", features = ["rt-tokio"] } +opentelemetry-http = { version = "0.26", features = ["reqwest"] } +opentelemetry-semantic-conventions = "0.26" +reqwest = "0.12" serde = { version = "1.0", features = ["derive"] } tokio = "1" tracing = "0.1" tracing-appender = "0.2" +tracing-gelf = "0.7" tracing-loki = "0.2.5" tracing-opentelemetry = "0.27" tracing-subscriber = { version = "0.3", features = ["env-filter", "json", "registry"] } -tracing-gelf = "0.7" url = { version = "2", features = ["serde"] } diff --git a/nomos-tracing/src/lib.rs b/nomos-tracing/src/lib.rs index 68b36ec4..ac434e96 100644 --- a/nomos-tracing/src/lib.rs +++ b/nomos-tracing/src/lib.rs @@ -1,3 +1,4 @@ pub mod filter; pub mod logging; +pub mod metrics; pub mod tracing; diff --git a/nomos-tracing/src/metrics/mod.rs b/nomos-tracing/src/metrics/mod.rs new file mode 100644 index 00000000..95eb89aa --- /dev/null +++ b/nomos-tracing/src/metrics/mod.rs @@ -0,0 +1 @@ +pub mod otlp; diff --git a/nomos-tracing/src/metrics/otlp.rs b/nomos-tracing/src/metrics/otlp.rs new file mode 100644 index 00000000..5774b2c2 --- /dev/null +++ b/nomos-tracing/src/metrics/otlp.rs @@ -0,0 +1,52 @@ +// std +use opentelemetry_otlp::{ExportConfig, Protocol, WithExportConfig}; +use opentelemetry_sdk::{runtime, Resource}; +use reqwest::Client; +use std::error::Error; +// crates +use opentelemetry::{global, KeyValue}; +use serde::{Deserialize, Serialize}; +use tracing::Subscriber; +use tracing_opentelemetry::MetricsLayer; +use tracing_subscriber::registry::LookupSpan; +use url::Url; +// internal + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct OtlpMetricsConfig { + pub endpoint: Url, + pub host_identifier: String, +} + +pub fn create_otlp_metrics_layer( + config: OtlpMetricsConfig, +) -> Result, Box> +where + S: Subscriber + for<'span> LookupSpan<'span>, +{ + let resource = Resource::new(vec![KeyValue::new( + opentelemetry_semantic_conventions::resource::SERVICE_NAME, + config.host_identifier, + )]); + + let export_config = ExportConfig { + endpoint: config.endpoint.into(), + protocol: Protocol::HttpBinary, + ..ExportConfig::default() + }; + + let client = Client::new(); + let meter_provider = opentelemetry_otlp::new_pipeline() + .metrics(runtime::Tokio) + .with_exporter( + opentelemetry_otlp::new_exporter() + .http() + .with_http_client(client) + .with_export_config(export_config), + ) + .with_resource(resource) + .build()?; + + global::set_meter_provider(meter_provider.clone()); + Ok(MetricsLayer::new(meter_provider)) +} diff --git a/testnet/cfgsync.yaml b/testnet/cfgsync.yaml index 750658aa..72ee17d8 100644 --- a/testnet/cfgsync.yaml +++ b/testnet/cfgsync.yaml @@ -18,3 +18,4 @@ global_params_path: "/kzgrs_test_params" # Tracing params tempo_endpoint: "http://tempo:4317" loki_endpoint: "http://loki:3100" +metrics_endpoint: "http://prometheus:9090/api/v1/otlp/v1/metrics" diff --git a/testnet/cfgsync/Cargo.toml b/testnet/cfgsync/Cargo.toml index 4229e187..318760ab 100644 --- a/testnet/cfgsync/Cargo.toml +++ b/testnet/cfgsync/Cargo.toml @@ -9,6 +9,8 @@ clap = { version = "4", features = ["derive"] } nomos-executor = { path = "../../nodes/nomos-executor" } nomos-libp2p = { path = "../../nomos-libp2p" } nomos-node = { path = "../../nodes/nomos-node" } +nomos-mix = { path = "../../nomos-mix/core" } +nomos-mix-message = { path = "../../nomos-mix/message" } nomos-tracing = { path = "../../nomos-tracing" } nomos-tracing-service = { path = "../../nomos-services/tracing" } rand = "0.8" diff --git a/testnet/cfgsync/src/bin/cfgsync-server.rs b/testnet/cfgsync/src/bin/cfgsync-server.rs index 276ef96b..b775f444 100644 --- a/testnet/cfgsync/src/bin/cfgsync-server.rs +++ b/testnet/cfgsync/src/bin/cfgsync-server.rs @@ -49,6 +49,7 @@ struct CfgSyncConfig { // Tracing params tempo_endpoint: Url, loki_endpoint: Url, + metrics_endpoint: Url, } impl CfgSyncConfig { @@ -83,6 +84,7 @@ impl CfgSyncConfig { TracingParams { tempo_endpoint: self.tempo_endpoint.clone(), loki_endpoint: self.loki_endpoint.clone(), + metrics_endpoint: self.metrics_endpoint.clone(), } } } diff --git a/testnet/cfgsync/src/config.rs b/testnet/cfgsync/src/config.rs index 776b57a6..44a5c978 100644 --- a/testnet/cfgsync/src/config.rs +++ b/testnet/cfgsync/src/config.rs @@ -1,9 +1,13 @@ // std use std::{collections::HashMap, net::Ipv4Addr, str::FromStr}; // crates -use nomos_libp2p::{Multiaddr, PeerId, Protocol}; -use nomos_tracing::{logging::loki::LokiConfig, tracing::otlp::OtlpTracingConfig}; -use nomos_tracing_service::{FilterLayer, LoggerLayer, TracingSettings}; +use nomos_libp2p::{Multiaddr, PeerId}; +use nomos_mix::membership::Node; +use nomos_mix_message::{sphinx::SphinxMessage, MixMessage}; +use nomos_tracing::{ + logging::loki::LokiConfig, metrics::otlp::OtlpMetricsConfig, tracing::otlp::OtlpTracingConfig, +}; +use nomos_tracing_service::{FilterLayer, LoggerLayer, MetricsLayer, TracingSettings}; use rand::{thread_rng, Rng}; use tests::topology::configs::{ api::GeneralApiConfig, @@ -91,7 +95,7 @@ pub fn create_node_configs( let host_network_init_peers = update_network_init_peers(hosts.clone()); let host_da_peer_addresses = update_da_peer_addresses(hosts.clone(), peer_addresses); let host_mix_membership = - update_mix_membership(hosts.clone(), mix_configs[0].backend.membership.clone()); + update_mix_membership(hosts.clone(), mix_configs[0].membership.clone()); let new_peer_addresses: HashMap = host_da_peer_addresses .clone() @@ -122,7 +126,7 @@ pub fn create_node_configs( let mut mix_config = mix_configs[i].to_owned(); mix_config.backend.listening_address = Multiaddr::from_str(&format!("/ip4/0.0.0.0/udp/{}/quic-v1", host.mix_port)).unwrap(); - mix_config.backend.membership = host_mix_membership.clone(); + mix_config.membership = host_mix_membership.clone(); // Tracing config. let tracing_config = @@ -170,44 +174,40 @@ fn update_da_peer_addresses( .collect() } -fn update_mix_membership(hosts: Vec, membership: Vec) -> Vec { +fn update_mix_membership( + hosts: Vec, + membership: Vec::PublicKey>>, +) -> Vec::PublicKey>> { membership .into_iter() .zip(hosts) - .map(|(addr, host)| { - Multiaddr::from_str(&format!( - "/ip4/{}/udp/{}/quic-v1/p2p/{}", - host.ip, - host.mix_port, - extract_peer_id(&addr).unwrap(), - )) - .unwrap() + .map(|(mut node, host)| { + node.address = + Multiaddr::from_str(&format!("/ip4/{}/udp/{}/quic-v1", host.ip, host.mix_port)) + .unwrap(); + node }) .collect() } -fn extract_peer_id(multiaddr: &Multiaddr) -> Option { - multiaddr.iter().find_map(|protocol| { - if let Protocol::P2p(peer_id) = protocol { - Some(peer_id) - } else { - None - } - }) -} - fn tracing_config_for_grafana(params: TracingParams, identifier: String) -> GeneralTracingConfig { GeneralTracingConfig { tracing_settings: TracingSettings { logger: LoggerLayer::Loki(LokiConfig { endpoint: params.loki_endpoint, - host_identifier: identifier, + host_identifier: identifier.clone(), }), tracing: nomos_tracing_service::TracingLayer::Otlp(OtlpTracingConfig { endpoint: params.tempo_endpoint, sample_ratio: 1.0, }), filter: FilterLayer::None, + metrics: MetricsLayer::Otlp(OtlpMetricsConfig { + endpoint: "http://127.0.0.1:9090/api/v1/otlp/v1/metrics" + .try_into() + .unwrap(), + host_identifier: identifier, + }), level: Level::INFO, }, } @@ -257,6 +257,7 @@ mod cfgsync_tests { TracingParams { tempo_endpoint: "http://test.com".try_into().unwrap(), loki_endpoint: "http://test.com".try_into().unwrap(), + metrics_endpoint: "http://test.com".try_into().unwrap(), }, hosts, ); diff --git a/testnet/cfgsync/src/lib.rs b/testnet/cfgsync/src/lib.rs index a432324e..c03e0708 100644 --- a/testnet/cfgsync/src/lib.rs +++ b/testnet/cfgsync/src/lib.rs @@ -7,4 +7,5 @@ pub mod repo; pub struct TracingParams { pub tempo_endpoint: Url, pub loki_endpoint: Url, + pub metrics_endpoint: Url, } diff --git a/testnet/monitoring/prometheus.yml b/testnet/monitoring/prometheus.yml index 6772dd7e..ee039b07 100644 --- a/testnet/monitoring/prometheus.yml +++ b/testnet/monitoring/prometheus.yml @@ -3,12 +3,3 @@ global: evaluation_interval: 15s external_labels: monitor: "Monitoring" - -scrape_configs: - - job_name: "nomos" - static_configs: - - targets: - - nomos-node-0:18080 - - nomos-node-1:18080 - - nomos-node-2:18080 - - nomos-node-3:18080 diff --git a/tests/Cargo.toml b/tests/Cargo.toml index 77297970..c4ae7b46 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -12,6 +12,7 @@ nomos-executor = { path = "../nodes/nomos-executor", default-features = false } nomos-network = { path = "../nomos-services/network", features = ["libp2p"] } nomos-mix-service = { path = "../nomos-services/mix", features = ["libp2p"] } nomos-mix = { path = "../nomos-mix/core" } +nomos-mix-message = { path = "../nomos-mix/message" } cryptarchia-consensus = { path = "../nomos-services/cryptarchia-consensus" } nomos-tracing = { path = "../nomos-tracing" } nomos-tracing-service = { path = "../nomos-services/tracing" } @@ -50,6 +51,7 @@ criterion = { version = "0.5", features = ["async_tokio"] } nomos-cli = { path = "../nomos-cli" } time = "0.3" tracing = "0.1" +x25519-dalek = { version = "2", features = ["getrandom", "static_secrets"] } [[test]] name = "test_cryptarchia_happy_path" @@ -60,5 +62,4 @@ name = "test_da" path = "src/tests/da.rs" [features] -metrics = ["nomos-node/metrics"] debug = [] diff --git a/tests/src/nodes/executor.rs b/tests/src/nodes/executor.rs index de525c1c..382b56eb 100644 --- a/tests/src/nodes/executor.rs +++ b/tests/src/nodes/executor.rs @@ -23,7 +23,7 @@ use nomos_da_verifier::DaVerifierServiceSettings; use nomos_executor::api::backend::AxumBackendSettings; use nomos_executor::config::Config; use nomos_mix::message_blend::{ - CryptographicProcessorSettings, MessageBlendSettings, TemporalProcessorSettings, + CryptographicProcessorSettings, MessageBlendSettings, TemporalSchedulerSettings, }; use nomos_network::{backends::libp2p::Libp2pConfig, NetworkConfig}; use nomos_node::api::paths::{CL_METRICS, DA_GET_RANGE}; @@ -158,11 +158,19 @@ pub fn create_executor_config(config: GeneralConfig) -> Config { backend: config.mix_config.backend, persistent_transmission: Default::default(), message_blend: MessageBlendSettings { - cryptographic_processor: CryptographicProcessorSettings { num_mix_layers: 1 }, - temporal_processor: TemporalProcessorSettings { + cryptographic_processor: CryptographicProcessorSettings { + private_key: config.mix_config.private_key.to_bytes(), + num_mix_layers: 1, + }, + temporal_processor: TemporalSchedulerSettings { max_delay_seconds: 2, }, }, + cover_traffic: nomos_mix_service::CoverTrafficExtSettings { + epoch_duration: Duration::from_secs(432000), + slot_duration: Duration::from_secs(20), + }, + membership: config.mix_config.membership, }, cryptarchia: CryptarchiaSettings { leader_config: config.consensus_config.leader_config, diff --git a/tests/src/nodes/validator.rs b/tests/src/nodes/validator.rs index 1a894f81..1ce45546 100644 --- a/tests/src/nodes/validator.rs +++ b/tests/src/nodes/validator.rs @@ -15,7 +15,7 @@ use nomos_da_verifier::storage::adapters::rocksdb::RocksAdapterSettings as Verif use nomos_da_verifier::{backend::kzgrs::KzgrsDaVerifierSettings, DaVerifierServiceSettings}; use nomos_mempool::MempoolMetrics; use nomos_mix::message_blend::{ - CryptographicProcessorSettings, MessageBlendSettings, TemporalProcessorSettings, + CryptographicProcessorSettings, MessageBlendSettings, TemporalSchedulerSettings, }; use nomos_network::{backends::libp2p::Libp2pConfig, NetworkConfig}; use nomos_node::api::paths::{ @@ -243,13 +243,20 @@ pub fn create_validator_config(config: GeneralConfig) -> Config { mix: nomos_mix_service::MixConfig { backend: config.mix_config.backend, persistent_transmission: Default::default(), - message_blend: MessageBlendSettings { - cryptographic_processor: CryptographicProcessorSettings { num_mix_layers: 1 }, - temporal_processor: TemporalProcessorSettings { + cryptographic_processor: CryptographicProcessorSettings { + private_key: config.mix_config.private_key.to_bytes(), + num_mix_layers: 1, + }, + temporal_processor: TemporalSchedulerSettings { max_delay_seconds: 2, }, }, + cover_traffic: nomos_mix_service::CoverTrafficExtSettings { + epoch_duration: Duration::from_secs(432000), + slot_duration: Duration::from_secs(20), + }, + membership: config.mix_config.membership, }, cryptarchia: CryptarchiaSettings { leader_config: config.consensus_config.leader_config, diff --git a/tests/src/topology/configs/mix.rs b/tests/src/topology/configs/mix.rs index 318f2e85..16454b64 100644 --- a/tests/src/topology/configs/mix.rs +++ b/tests/src/topology/configs/mix.rs @@ -1,13 +1,17 @@ use std::str::FromStr; use nomos_libp2p::{ed25519, Multiaddr}; +use nomos_mix::membership::Node; +use nomos_mix_message::{sphinx::SphinxMessage, MixMessage}; use nomos_mix_service::backends::libp2p::Libp2pMixBackendSettings; -use crate::{get_available_port, secret_key_to_peer_id}; +use crate::get_available_port; #[derive(Clone)] pub struct GeneralMixConfig { pub backend: Libp2pMixBackendSettings, + pub private_key: x25519_dalek::StaticSecret, + pub membership: Vec::PublicKey>>, } pub fn create_mix_configs(ids: &[[u8; 32]]) -> Vec { @@ -26,33 +30,31 @@ pub fn create_mix_configs(ids: &[[u8; 32]]) -> Vec { )) .unwrap(), node_key, - membership: Vec::new(), peering_degree: 1, }, + private_key: x25519_dalek::StaticSecret::random(), + membership: Vec::new(), } }) .collect(); - let membership = mix_membership(&configs); - + let nodes = mix_nodes(&configs); configs.iter_mut().for_each(|config| { - config.backend.membership = membership.clone(); + config.membership = nodes.clone(); }); configs } -fn mix_membership(configs: &[GeneralMixConfig]) -> Vec { +fn mix_nodes(configs: &[GeneralMixConfig]) -> Vec::PublicKey>> { configs .iter() - .map(|config| { - let peer_id = secret_key_to_peer_id(config.backend.node_key.clone()); - config - .backend - .listening_address - .clone() - .with_p2p(peer_id) - .unwrap_or_else(|orig_addr| orig_addr) + .map(|config| Node { + address: config.backend.listening_address.clone(), + public_key: x25519_dalek::PublicKey::from(&x25519_dalek::StaticSecret::from( + config.private_key.to_bytes(), + )) + .to_bytes(), }) .collect() } diff --git a/tests/src/topology/configs/tracing.rs b/tests/src/topology/configs/tracing.rs index 8e445d21..7c67f3f7 100644 --- a/tests/src/topology/configs/tracing.rs +++ b/tests/src/topology/configs/tracing.rs @@ -1,5 +1,9 @@ -use nomos_tracing::{logging::loki::LokiConfig, tracing::otlp::OtlpTracingConfig}; -use nomos_tracing_service::{FilterLayer, LoggerLayer, TracingLayer, TracingSettings}; +use nomos_tracing::{ + logging::loki::LokiConfig, metrics::otlp::OtlpMetricsConfig, tracing::otlp::OtlpTracingConfig, +}; +use nomos_tracing_service::{ + FilterLayer, LoggerLayer, MetricsLayer, TracingLayer, TracingSettings, +}; use tracing::Level; #[derive(Clone, Default)] @@ -10,11 +14,12 @@ pub struct GeneralTracingConfig { impl GeneralTracingConfig { #[allow(dead_code)] fn local_debug_tracing(id: usize) -> Self { + let host_identifier = format!("node-{id}"); Self { tracing_settings: TracingSettings { logger: LoggerLayer::Loki(LokiConfig { endpoint: "http://localhost:3100".try_into().unwrap(), - host_identifier: format!("node-{id}"), + host_identifier: host_identifier.clone(), }), tracing: TracingLayer::Otlp(OtlpTracingConfig { endpoint: "http://localhost:4317".try_into().unwrap(), @@ -29,6 +34,12 @@ impl GeneralTracingConfig { .map(|(k, v)| (k.to_string(), v.to_string())) .collect(), }), + metrics: MetricsLayer::Otlp(OtlpMetricsConfig { + endpoint: "http://127.0.0.1:9090/api/v1/otlp/v1/metrics" + .try_into() + .unwrap(), + host_identifier, + }), level: Level::DEBUG, }, }