diff --git a/nodes/nomos-executor/Cargo.toml b/nodes/nomos-executor/Cargo.toml index 49a5bf59..7e1408d1 100644 --- a/nodes/nomos-executor/Cargo.toml +++ b/nodes/nomos-executor/Cargo.toml @@ -4,17 +4,42 @@ version = "0.1.0" edition = "2021" [dependencies] +async-trait = "0.1" +axum = { version = "0.6" } +clap = { version = "4.5.13", features = ["derive"] } color-eyre = "0.6.0" +hyper = { version = "0.14", features = ["full"] } +kzgrs-backend = { path = "../../nomos-da/kzgrs-backend" } +nomos-api = { path = "../../nomos-services/api" } +nomos-core = { path = "../../nomos-core" } +nomos-da-network-core = { path = "../../nomos-da/network/core" } +nomos-da-network-service = { path = "../../nomos-services/data-availability/network" } +nomos-da-sampling = { path = "../../nomos-services/data-availability/sampling", features = ["rocksdb-backend"] } +nomos-da-verifier = { path = "../../nomos-services/data-availability/verifier", features = ["rocksdb-backend", "libp2p"] } +nomos-libp2p = { path = "../../nomos-libp2p" } +nomos-mempool = { path = "../../nomos-services/mempool", features = [ + "mock", + "libp2p", +] } +nomos-metrics = { path = "../../nomos-services/metrics" } +nomos-network = { path = "../../nomos-services/network", features = ["libp2p"] } nomos-node = { path = "../nomos-node" } +nomos-storage = { path = "../../nomos-services/storage", features = ["rocksdb"] } overwatch-rs = { git = "https://github.com/logos-co/Overwatch", rev = "2f70806" } overwatch-derive = { git = "https://github.com/logos-co/Overwatch", rev = "ac28d01" } -clap = { version = "4.5.13", features = ["derive"] } +rand = "0.8" +rand_chacha = "0.3" +serde = "1" +serde_yaml = "0.9" +subnetworks-assignations = { path = "../../nomos-da/network/subnetworks-assignations" } +tower-http = { version = "0.4", features = ["cors", "trace"] } tracing = "0.1.40" +utoipa = "4.0" +utoipa-swagger-ui = { version = "4.0" } uuid = { version = "1.10.0", features = ["v4"] } -serde_yaml = "0.9.34+deprecated" [features] default = ["tracing"] mixnet = ["nomos-node/mixnet"] metrics = ["nomos-node/metrics"] -tracing = ["nomos-node/tracing"] \ No newline at end of file +tracing = ["nomos-node/tracing"] diff --git a/nodes/nomos-executor/src/api/backend.rs b/nodes/nomos-executor/src/api/backend.rs new file mode 100644 index 00000000..22c0e33e --- /dev/null +++ b/nodes/nomos-executor/src/api/backend.rs @@ -0,0 +1,312 @@ +// std +use std::error::Error; +use std::{fmt::Debug, hash::Hash}; +// crates +use axum::{http::HeaderValue, routing, Router, Server}; +use hyper::header::{CONTENT_TYPE, USER_AGENT}; +use nomos_api::Backend; +use nomos_core::da::blob::info::DispersedBlobInfo; +use nomos_core::da::blob::metadata::Metadata; +use nomos_core::da::DaVerifier as CoreDaVerifier; +use nomos_core::{da::blob::Blob, header::HeaderId, tx::Transaction}; +use nomos_da_network_core::SubnetworkId; +use nomos_da_sampling::backend::DaSamplingServiceBackend; +use nomos_da_verifier::backend::VerifierBackend; +use nomos_libp2p::PeerId; +use nomos_mempool::{tx::service::openapi::Status, MempoolMetrics}; +use nomos_node::api::handlers::{ + add_blob, add_blob_info, add_tx, block, cl_metrics, cl_status, cryptarchia_headers, + cryptarchia_info, get_metrics, get_range, libp2p_info, +}; +use nomos_storage::backends::StorageSerde; +use overwatch_rs::overwatch::handle::OverwatchHandle; +use rand::{RngCore, SeedableRng}; +use serde::{de::DeserializeOwned, Deserialize, Serialize}; +use subnetworks_assignations::MembershipHandler; +use tower_http::{ + cors::{Any, CorsLayer}, + trace::TraceLayer, +}; +use utoipa::OpenApi; +use utoipa_swagger_ui::SwaggerUi; +// internal + +/// Configuration for the Http Server +#[derive(Debug, Clone, serde::Deserialize, serde::Serialize)] +pub struct AxumBackendSettings { + /// Socket where the server will be listening on for incoming requests. + pub address: std::net::SocketAddr, + /// Allowed origins for this server deployment requests. + pub cors_origins: Vec, +} + +pub struct AxumBackend< + DaAttestation, + DaBlob, + DaBlobInfo, + Memebership, + DaVerifiedBlobInfo, + DaVerifierBackend, + Tx, + DaStorageSerializer, + SamplingBackend, + SamplingNetworkAdapter, + SamplingRng, + SamplingStorage, + const SIZE: usize, +> { + settings: AxumBackendSettings, + _attestation: core::marker::PhantomData, + _blob: core::marker::PhantomData, + _certificate: core::marker::PhantomData, + _membership: core::marker::PhantomData, + _vid: core::marker::PhantomData, + _verifier_backend: core::marker::PhantomData, + _tx: core::marker::PhantomData, + _storage_serde: core::marker::PhantomData, + _sampling_backend: core::marker::PhantomData, + _sampling_network_adapter: core::marker::PhantomData, + _sampling_rng: core::marker::PhantomData, + _sampling_storage: core::marker::PhantomData, +} + +#[derive(OpenApi)] +#[openapi( + paths( + ), + components( + schemas(Status, MempoolMetrics) + ), + tags( + (name = "da", description = "data availibility related APIs") + ) +)] +struct ApiDoc; + +#[async_trait::async_trait] +impl< + DaAttestation, + DaBlob, + DaBlobInfo, + Membership, + DaVerifiedBlobInfo, + DaVerifierBackend, + Tx, + DaStorageSerializer, + SamplingBackend, + SamplingNetworkAdapter, + SamplingRng, + SamplingStorage, + const SIZE: usize, + > Backend + for AxumBackend< + DaAttestation, + DaBlob, + DaBlobInfo, + Membership, + DaVerifiedBlobInfo, + DaVerifierBackend, + Tx, + DaStorageSerializer, + SamplingBackend, + SamplingNetworkAdapter, + SamplingRng, + SamplingStorage, + SIZE, + > +where + DaAttestation: Serialize + DeserializeOwned + Clone + Send + Sync + 'static, + DaBlob: Blob + Serialize + DeserializeOwned + Clone + Send + Sync + 'static, + ::BlobId: AsRef<[u8]> + Send + Sync + 'static, + ::ColumnIndex: AsRef<[u8]> + Send + Sync + 'static, + DaBlobInfo: DispersedBlobInfo + + Clone + + Debug + + Serialize + + DeserializeOwned + + Send + + Sync + + 'static, + ::BlobId: Clone + Send + Sync, + Membership: MembershipHandler + + Clone + + Debug + + Send + + Sync + + 'static, + DaVerifiedBlobInfo: DispersedBlobInfo + + From + + Eq + + Debug + + Metadata + + Hash + + Clone + + Serialize + + DeserializeOwned + + Send + + Sync + + 'static, + ::BlobId: Debug + Clone + Ord + Hash, + ::AppId: + AsRef<[u8]> + Clone + Serialize + DeserializeOwned + Send + Sync, + ::Index: + AsRef<[u8]> + Clone + Serialize + DeserializeOwned + PartialOrd + Send + Sync, + DaVerifierBackend: VerifierBackend + CoreDaVerifier + Send + Sync + 'static, + ::Settings: Clone, + ::Error: Error, + Tx: Transaction + + Clone + + Debug + + Eq + + Hash + + Serialize + + for<'de> Deserialize<'de> + + Send + + Sync + + 'static, + ::Hash: + Serialize + for<'de> Deserialize<'de> + std::cmp::Ord + Debug + Send + Sync + 'static, + DaStorageSerializer: StorageSerde + Send + Sync + 'static, + SamplingRng: SeedableRng + RngCore + Send + 'static, + SamplingBackend: DaSamplingServiceBackend< + SamplingRng, + BlobId = ::BlobId, + > + Send + + 'static, + SamplingBackend::Settings: Clone, + SamplingBackend::Blob: Debug + 'static, + SamplingBackend::BlobId: Debug + 'static, + SamplingNetworkAdapter: nomos_da_sampling::network::NetworkAdapter + Send + 'static, + SamplingStorage: nomos_da_sampling::storage::DaStorageAdapter + Send + 'static, +{ + type Error = hyper::Error; + type Settings = AxumBackendSettings; + + async fn new(settings: Self::Settings) -> Result + where + Self: Sized, + { + Ok(Self { + settings, + _attestation: core::marker::PhantomData, + _blob: core::marker::PhantomData, + _certificate: core::marker::PhantomData, + _membership: core::marker::PhantomData, + _vid: core::marker::PhantomData, + _verifier_backend: core::marker::PhantomData, + _tx: core::marker::PhantomData, + _storage_serde: core::marker::PhantomData, + _sampling_backend: core::marker::PhantomData, + _sampling_network_adapter: core::marker::PhantomData, + _sampling_rng: core::marker::PhantomData, + _sampling_storage: core::marker::PhantomData, + }) + } + + async fn serve(self, handle: OverwatchHandle) -> Result<(), Self::Error> { + let mut builder = CorsLayer::new(); + if self.settings.cors_origins.is_empty() { + builder = builder.allow_origin(Any); + } + + for origin in &self.settings.cors_origins { + builder = builder.allow_origin( + origin + .as_str() + .parse::() + .expect("fail to parse origin"), + ); + } + + let app = Router::new() + .layer( + builder + .allow_headers([CONTENT_TYPE, USER_AGENT]) + .allow_methods(Any), + ) + .layer(TraceLayer::new_for_http()) + .merge(SwaggerUi::new("/swagger-ui").url("/api-docs/openapi.json", ApiDoc::openapi())) + .route("/cl/metrics", routing::get(cl_metrics::)) + .route("/cl/status", routing::post(cl_status::)) + .route( + "/cryptarchia/info", + routing::get( + cryptarchia_info::< + Tx, + DaStorageSerializer, + SamplingBackend, + SamplingNetworkAdapter, + SamplingRng, + SamplingStorage, + SIZE, + >, + ), + ) + .route( + "/cryptarchia/headers", + routing::get( + cryptarchia_headers::< + Tx, + DaStorageSerializer, + SamplingBackend, + SamplingNetworkAdapter, + SamplingRng, + SamplingStorage, + SIZE, + >, + ), + ) + .route( + "/da/add_blob", + routing::post( + add_blob::< + DaAttestation, + DaBlob, + Membership, + DaVerifierBackend, + DaStorageSerializer, + >, + ), + ) + .route( + "/da/get_range", + routing::post( + get_range::< + Tx, + DaBlobInfo, + DaVerifiedBlobInfo, + DaStorageSerializer, + SamplingBackend, + SamplingNetworkAdapter, + SamplingRng, + SamplingStorage, + SIZE, + >, + ), + ) + .route("/network/info", routing::get(libp2p_info)) + .route( + "/storage/block", + routing::post(block::), + ) + .route("/mempool/add/tx", routing::post(add_tx::)) + .route( + "/mempool/add/blobinfo", + routing::post( + add_blob_info::< + DaVerifiedBlobInfo, + SamplingBackend, + SamplingNetworkAdapter, + SamplingRng, + SamplingStorage, + >, + ), + ) + .route("/metrics", routing::get(get_metrics)) + .with_state(handle); + + Server::bind(&self.settings.address) + .serve(app.into_make_service()) + .await + } +} diff --git a/nodes/nomos-executor/src/api/mod.rs b/nodes/nomos-executor/src/api/mod.rs new file mode 100644 index 00000000..fceb1419 --- /dev/null +++ b/nodes/nomos-executor/src/api/mod.rs @@ -0,0 +1 @@ +pub mod backend; diff --git a/nodes/nomos-executor/src/config.rs b/nodes/nomos-executor/src/config.rs new file mode 100644 index 00000000..d91a2f00 --- /dev/null +++ b/nodes/nomos-executor/src/config.rs @@ -0,0 +1,67 @@ +// std +// crates +use color_eyre::eyre::Result; +use nomos_da_network_service::backends::libp2p::executor::DaNetworkExecutorBackend; +use nomos_da_network_service::NetworkService as DaNetworkService; +use nomos_network::backends::libp2p::Libp2p as NetworkBackend; +use nomos_node::{ + config::{update_cryptarchia_consensus, update_log, update_network}, + CryptarchiaArgs, HttpArgs, LogArgs, Logger, NetworkArgs, NetworkService, Wire, +}; +use nomos_storage::backends::rocksdb::RocksBackend; +use overwatch_rs::services::ServiceData; +use serde::{Deserialize, Serialize}; +use subnetworks_assignations::versions::v1::FillFromNodeList; +// internal +use crate::ExecutorApiService; + +#[derive(Deserialize, Debug, Clone, Serialize)] +pub struct Config { + pub log: ::Settings, + pub network: as ServiceData>::Settings, + pub da_network: + > as ServiceData>::Settings, + pub da_indexer: ::Settings, + pub da_verifier: ::Settings, + pub da_sampling: ::Settings, + pub http: ::Settings, + pub cryptarchia: ::Settings, + pub storage: > as ServiceData>::Settings, + pub wait_online_secs: u64, +} + +impl Config { + pub fn update_from_args( + mut self, + log_args: LogArgs, + network_args: NetworkArgs, + http_args: HttpArgs, + cryptarchia_args: CryptarchiaArgs, + ) -> Result { + update_log(&mut self.log, log_args)?; + update_network(&mut self.network, network_args)?; + update_http(&mut self.http, http_args)?; + update_cryptarchia_consensus(&mut self.cryptarchia, cryptarchia_args)?; + Ok(self) + } +} + +pub fn update_http( + http: &mut ::Settings, + http_args: HttpArgs, +) -> Result<()> { + let HttpArgs { + http_addr, + cors_origins, + } = http_args; + + if let Some(addr) = http_addr { + http.backend_settings.address = addr; + } + + if let Some(cors) = cors_origins { + http.backend_settings.cors_origins = cors; + } + + Ok(()) +} diff --git a/nodes/nomos-executor/src/lib.rs b/nodes/nomos-executor/src/lib.rs index 0283ea6e..bb677f11 100644 --- a/nodes/nomos-executor/src/lib.rs +++ b/nodes/nomos-executor/src/lib.rs @@ -1,6 +1,38 @@ +mod api; +pub mod config; + +// std +// crates +use kzgrs_backend::common::blob::DaBlob; +use nomos_api::ApiService; +use nomos_da_network_service::backends::libp2p::executor::DaNetworkExecutorBackend; +use nomos_da_sampling::backend::kzgrs::KzgrsSamplingBackend; +use nomos_da_sampling::storage::adapters::rocksdb::RocksAdapter as SamplingStorageAdapter; +use nomos_da_verifier::backend::kzgrs::KzgrsDaVerifier; use nomos_node::*; use overwatch_derive::Services; use overwatch_rs::services::handle::ServiceHandle; +use rand_chacha::ChaCha20Rng; +// internal +use api::backend::AxumBackend; + +pub type ExecutorApiService = ApiService< + AxumBackend< + (), + DaBlob, + BlobInfo, + NomosDaMembership, + BlobInfo, + KzgrsDaVerifier, + Tx, + Wire, + KzgrsSamplingBackend, + nomos_da_sampling::network::adapters::libp2p::Libp2pAdapter, + ChaCha20Rng, + SamplingStorageAdapter, + MB16, + >, +>; #[derive(Services)] pub struct NomosExecutor { @@ -10,11 +42,11 @@ pub struct NomosExecutor { da_indexer: ServiceHandle, da_verifier: ServiceHandle, da_sampling: ServiceHandle, - da_network: ServiceHandle>>, + da_network: ServiceHandle>>, cl_mempool: ServiceHandle, da_mempool: ServiceHandle, cryptarchia: ServiceHandle, - http: ServiceHandle, + http: ServiceHandle, storage: ServiceHandle>>, #[cfg(feature = "metrics")] metrics: ServiceHandle, diff --git a/nodes/nomos-executor/src/main.rs b/nodes/nomos-executor/src/main.rs index c67e2029..d4be1bdd 100644 --- a/nodes/nomos-executor/src/main.rs +++ b/nodes/nomos-executor/src/main.rs @@ -1,13 +1,20 @@ -use nomos_node::*; -#[cfg(feature = "metrics")] -use nomos_node::{MetricsSettings, NomosRegistry}; - +// std +// crates use clap::Parser; use color_eyre::eyre::{eyre, Result}; +use nomos_executor::config::Config as ExecutorConfig; use nomos_executor::{NomosExecutor, NomosExecutorServiceSettings}; +#[cfg(feature = "metrics")] +use nomos_node::MetricsSettings; +use nomos_node::{ + BlobInfo, CryptarchiaArgs, DaMempoolSettings, DispersedBlobInfo, HttpArgs, LogArgs, + MempoolAdapterSettings, MetricsArgs, NetworkArgs, Transaction, Tx, TxMempoolSettings, CL_TOPIC, + DA_TOPIC, +}; use overwatch_rs::overwatch::*; use tracing::{span, Level}; use uuid::Uuid; +// internal #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] @@ -39,14 +46,15 @@ fn main() -> Result<()> { cryptarchia_args, metrics_args, } = Args::parse(); - let config = serde_yaml::from_reader::<_, Config>(std::fs::File::open(config)?)? - .update_log(log_args)? - .update_http(http_args)? - .update_network(network_args)? - .update_cryptarchia_consensus(cryptarchia_args)?; + let config = serde_yaml::from_reader::<_, ExecutorConfig>(std::fs::File::open(config)?)? + .update_from_args(log_args, network_args, http_args, cryptarchia_args)?; let registry = cfg!(feature = "metrics") - .then(|| metrics_args.with_metrics.then(NomosRegistry::default)) + .then(|| { + metrics_args + .with_metrics + .then(nomos_metrics::NomosRegistry::default) + }) .flatten(); #[cfg(debug_assertions)] diff --git a/nodes/nomos-node/src/api/backend.rs b/nodes/nomos-node/src/api/backend.rs new file mode 100644 index 00000000..b260bde7 --- /dev/null +++ b/nodes/nomos-node/src/api/backend.rs @@ -0,0 +1,312 @@ +// std +use std::error::Error; +use std::{fmt::Debug, hash::Hash}; +// crates +use axum::{http::HeaderValue, routing, Router, Server}; +use hyper::header::{CONTENT_TYPE, USER_AGENT}; +use nomos_api::Backend; +use nomos_core::da::blob::info::DispersedBlobInfo; +use nomos_core::da::blob::metadata::Metadata; +use nomos_core::da::DaVerifier as CoreDaVerifier; +use nomos_core::{da::blob::Blob, header::HeaderId, tx::Transaction}; +use nomos_da_network_core::SubnetworkId; +use nomos_da_sampling::backend::DaSamplingServiceBackend; +use nomos_da_verifier::backend::VerifierBackend; +use nomos_libp2p::PeerId; +use nomos_mempool::{tx::service::openapi::Status, MempoolMetrics}; +use nomos_storage::backends::StorageSerde; +use overwatch_rs::overwatch::handle::OverwatchHandle; +use rand::{RngCore, SeedableRng}; +use serde::{de::DeserializeOwned, Deserialize, Serialize}; +use subnetworks_assignations::MembershipHandler; +use tower_http::{ + cors::{Any, CorsLayer}, + trace::TraceLayer, +}; +use utoipa::OpenApi; +use utoipa_swagger_ui::SwaggerUi; +// internal +use super::handlers::{ + add_blob, add_blob_info, add_tx, block, cl_metrics, cl_status, cryptarchia_headers, + cryptarchia_info, get_metrics, get_range, libp2p_info, +}; + +/// Configuration for the Http Server +#[derive(Debug, Clone, serde::Deserialize, serde::Serialize)] +pub struct AxumBackendSettings { + /// Socket where the server will be listening on for incoming requests. + pub address: std::net::SocketAddr, + /// Allowed origins for this server deployment requests. + pub cors_origins: Vec, +} + +pub struct AxumBackend< + DaAttestation, + DaBlob, + DaBlobInfo, + Membership, + DaVerifiedBlobInfo, + DaVerifierBackend, + Tx, + DaStorageSerializer, + SamplingBackend, + SamplingNetworkAdapter, + SamplingRng, + SamplingStorage, + const SIZE: usize, +> { + settings: AxumBackendSettings, + _attestation: core::marker::PhantomData, + _blob: core::marker::PhantomData, + _certificate: core::marker::PhantomData, + _membership: core::marker::PhantomData, + _vid: core::marker::PhantomData, + _verifier_backend: core::marker::PhantomData, + _tx: core::marker::PhantomData, + _storage_serde: core::marker::PhantomData, + _sampling_backend: core::marker::PhantomData, + _sampling_network_adapter: core::marker::PhantomData, + _sampling_rng: core::marker::PhantomData, + _sampling_storage: core::marker::PhantomData, +} + +#[derive(OpenApi)] +#[openapi( + paths( + ), + components( + schemas(Status, MempoolMetrics) + ), + tags( + (name = "da", description = "data availibility related APIs") + ) +)] +struct ApiDoc; + +#[async_trait::async_trait] +impl< + DaAttestation, + DaBlob, + DaBlobInfo, + Membership, + DaVerifiedBlobInfo, + DaVerifierBackend, + Tx, + DaStorageSerializer, + SamplingBackend, + SamplingNetworkAdapter, + SamplingRng, + SamplingStorage, + const SIZE: usize, + > Backend + for AxumBackend< + DaAttestation, + DaBlob, + DaBlobInfo, + Membership, + DaVerifiedBlobInfo, + DaVerifierBackend, + Tx, + DaStorageSerializer, + SamplingBackend, + SamplingNetworkAdapter, + SamplingRng, + SamplingStorage, + SIZE, + > +where + DaAttestation: Serialize + DeserializeOwned + Clone + Send + Sync + 'static, + DaBlob: Blob + Serialize + DeserializeOwned + Clone + Send + Sync + 'static, + ::BlobId: AsRef<[u8]> + Send + Sync + 'static, + ::ColumnIndex: AsRef<[u8]> + Send + Sync + 'static, + DaBlobInfo: DispersedBlobInfo + + Clone + + Debug + + Serialize + + DeserializeOwned + + Send + + Sync + + 'static, + ::BlobId: Clone + Send + Sync, + Membership: MembershipHandler + + Clone + + Debug + + Send + + Sync + + 'static, + DaVerifiedBlobInfo: DispersedBlobInfo + + From + + Eq + + Debug + + Metadata + + Hash + + Clone + + Serialize + + DeserializeOwned + + Send + + Sync + + 'static, + ::BlobId: Debug + Clone + Ord + Hash, + ::AppId: + AsRef<[u8]> + Clone + Serialize + DeserializeOwned + Send + Sync, + ::Index: + AsRef<[u8]> + Clone + Serialize + DeserializeOwned + PartialOrd + Send + Sync, + DaVerifierBackend: VerifierBackend + CoreDaVerifier + Send + Sync + 'static, + ::Settings: Clone, + ::Error: Error, + Tx: Transaction + + Clone + + Debug + + Eq + + Hash + + Serialize + + for<'de> Deserialize<'de> + + Send + + Sync + + 'static, + ::Hash: + Serialize + for<'de> Deserialize<'de> + std::cmp::Ord + Debug + Send + Sync + 'static, + DaStorageSerializer: StorageSerde + Send + Sync + 'static, + SamplingRng: SeedableRng + RngCore + Send + 'static, + SamplingBackend: DaSamplingServiceBackend< + SamplingRng, + BlobId = ::BlobId, + > + Send + + 'static, + SamplingBackend::Settings: Clone, + SamplingBackend::Blob: Debug + 'static, + SamplingBackend::BlobId: Debug + 'static, + SamplingNetworkAdapter: nomos_da_sampling::network::NetworkAdapter + Send + 'static, + SamplingStorage: nomos_da_sampling::storage::DaStorageAdapter + Send + 'static, +{ + type Error = hyper::Error; + type Settings = AxumBackendSettings; + + async fn new(settings: Self::Settings) -> Result + where + Self: Sized, + { + Ok(Self { + settings, + _attestation: core::marker::PhantomData, + _blob: core::marker::PhantomData, + _certificate: core::marker::PhantomData, + _membership: core::marker::PhantomData, + _vid: core::marker::PhantomData, + _verifier_backend: core::marker::PhantomData, + _tx: core::marker::PhantomData, + _storage_serde: core::marker::PhantomData, + _sampling_backend: core::marker::PhantomData, + _sampling_network_adapter: core::marker::PhantomData, + _sampling_rng: core::marker::PhantomData, + _sampling_storage: core::marker::PhantomData, + }) + } + + async fn serve(self, handle: OverwatchHandle) -> Result<(), Self::Error> { + let mut builder = CorsLayer::new(); + if self.settings.cors_origins.is_empty() { + builder = builder.allow_origin(Any); + } + + for origin in &self.settings.cors_origins { + builder = builder.allow_origin( + origin + .as_str() + .parse::() + .expect("fail to parse origin"), + ); + } + + let app = Router::new() + .layer( + builder + .allow_headers([CONTENT_TYPE, USER_AGENT]) + .allow_methods(Any), + ) + .layer(TraceLayer::new_for_http()) + .merge(SwaggerUi::new("/swagger-ui").url("/api-docs/openapi.json", ApiDoc::openapi())) + .route("/cl/metrics", routing::get(cl_metrics::)) + .route("/cl/status", routing::post(cl_status::)) + .route( + "/cryptarchia/info", + routing::get( + cryptarchia_info::< + Tx, + DaStorageSerializer, + SamplingBackend, + SamplingNetworkAdapter, + SamplingRng, + SamplingStorage, + SIZE, + >, + ), + ) + .route( + "/cryptarchia/headers", + routing::get( + cryptarchia_headers::< + Tx, + DaStorageSerializer, + SamplingBackend, + SamplingNetworkAdapter, + SamplingRng, + SamplingStorage, + SIZE, + >, + ), + ) + .route( + "/da/add_blob", + routing::post( + add_blob::< + DaAttestation, + DaBlob, + Membership, + DaVerifierBackend, + DaStorageSerializer, + >, + ), + ) + .route( + "/da/get_range", + routing::post( + get_range::< + Tx, + DaBlobInfo, + DaVerifiedBlobInfo, + DaStorageSerializer, + SamplingBackend, + SamplingNetworkAdapter, + SamplingRng, + SamplingStorage, + SIZE, + >, + ), + ) + .route("/network/info", routing::get(libp2p_info)) + .route( + "/storage/block", + routing::post(block::), + ) + .route("/mempool/add/tx", routing::post(add_tx::)) + .route( + "/mempool/add/blobinfo", + routing::post( + add_blob_info::< + DaVerifiedBlobInfo, + SamplingBackend, + SamplingNetworkAdapter, + SamplingRng, + SamplingStorage, + >, + ), + ) + .route("/metrics", routing::get(get_metrics)) + .with_state(handle); + + Server::bind(&self.settings.address) + .serve(app.into_make_service()) + .await + } +} diff --git a/nodes/nomos-node/src/api.rs b/nodes/nomos-node/src/api/handlers.rs similarity index 57% rename from nodes/nomos-node/src/api.rs rename to nodes/nomos-node/src/api/handlers.rs index b5c14ae6..dd654900 100644 --- a/nodes/nomos-node/src/api.rs +++ b/nodes/nomos-node/src/api/handlers.rs @@ -7,16 +7,10 @@ use axum::{ extract::{Query, State}, http::HeaderValue, response::{IntoResponse, Response}, - routing, Json, Router, Server, -}; -use hyper::{ - header::{CONTENT_TYPE, USER_AGENT}, - Body, StatusCode, -}; -use nomos_api::{ - http::{cl, consensus, da, libp2p, mempool, metrics, storage}, - Backend, + Json, }; +use hyper::{header::CONTENT_TYPE, Body, StatusCode}; +use nomos_api::http::{cl, consensus, da, libp2p, mempool, metrics, storage}; use nomos_core::da::blob::info::DispersedBlobInfo; use nomos_core::da::blob::metadata::Metadata; use nomos_core::da::{BlobId, DaVerifier as CoreDaVerifier}; @@ -25,287 +19,15 @@ use nomos_da_network_core::SubnetworkId; use nomos_da_sampling::backend::DaSamplingServiceBackend; use nomos_da_verifier::backend::VerifierBackend; use nomos_libp2p::PeerId; -use nomos_mempool::{ - network::adapters::libp2p::Libp2pAdapter as MempoolNetworkAdapter, - tx::service::openapi::Status, MempoolMetrics, -}; +use nomos_mempool::network::adapters::libp2p::Libp2pAdapter as MempoolNetworkAdapter; use nomos_network::backends::libp2p::Libp2p as NetworkBackend; use nomos_storage::backends::StorageSerde; use overwatch_rs::overwatch::handle::OverwatchHandle; use rand::{RngCore, SeedableRng}; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use subnetworks_assignations::MembershipHandler; -use tower_http::{ - cors::{Any, CorsLayer}, - trace::TraceLayer, -}; -use utoipa::OpenApi; -use utoipa_swagger_ui::SwaggerUi; // internal -/// Configuration for the Http Server -#[derive(Debug, Clone, serde::Deserialize, serde::Serialize)] -pub struct AxumBackendSettings { - /// Socket where the server will be listening on for incoming requests. - pub address: std::net::SocketAddr, - /// Allowed origins for this server deployment requests. - pub cors_origins: Vec, -} - -pub struct AxumBackend< - A, - B, - C, - M, - V, - VB, - T, - S, - SamplingBackend, - SamplingNetworkAdapter, - SamplingRng, - SamplingStorage, - const SIZE: usize, -> { - settings: AxumBackendSettings, - _attestation: core::marker::PhantomData, - _blob: core::marker::PhantomData, - _certificate: core::marker::PhantomData, - _membership: core::marker::PhantomData, - _vid: core::marker::PhantomData, - _verifier_backend: core::marker::PhantomData, - _tx: core::marker::PhantomData, - _storage_serde: core::marker::PhantomData, - _sampling_backend: core::marker::PhantomData, - _sampling_network_adapter: core::marker::PhantomData, - _sampling_rng: core::marker::PhantomData, - _sampling_storage: core::marker::PhantomData, -} - -#[derive(OpenApi)] -#[openapi( - paths( - ), - components( - schemas(Status, MempoolMetrics) - ), - tags( - (name = "da", description = "data availibility related APIs") - ) -)] -struct ApiDoc; - -#[async_trait::async_trait] -impl< - A, - B, - C, - M, - V, - VB, - T, - S, - SamplingBackend, - SamplingNetworkAdapter, - SamplingRng, - SamplingStorage, - const SIZE: usize, - > Backend - for AxumBackend< - A, - B, - C, - M, - V, - VB, - T, - S, - SamplingBackend, - SamplingNetworkAdapter, - SamplingRng, - SamplingStorage, - SIZE, - > -where - A: Serialize + DeserializeOwned + Clone + Send + Sync + 'static, - B: Blob + Serialize + DeserializeOwned + Clone + Send + Sync + 'static, - ::BlobId: AsRef<[u8]> + Send + Sync + 'static, - ::ColumnIndex: AsRef<[u8]> + Send + Sync + 'static, - C: DispersedBlobInfo - + Clone - + Debug - + Serialize - + DeserializeOwned - + Send - + Sync - + 'static, - ::BlobId: Clone + Send + Sync, - M: MembershipHandler - + Clone - + Debug - + Send - + Sync - + 'static, - V: DispersedBlobInfo - + From - + Eq - + Debug - + Metadata - + Hash - + Clone - + Serialize - + DeserializeOwned - + Send - + Sync - + 'static, - ::BlobId: Debug + Clone + Ord + Hash, - ::AppId: AsRef<[u8]> + Clone + Serialize + DeserializeOwned + Send + Sync, - ::Index: - AsRef<[u8]> + Clone + Serialize + DeserializeOwned + PartialOrd + Send + Sync, - VB: VerifierBackend + CoreDaVerifier + Send + Sync + 'static, - ::Settings: Clone, - ::Error: Error, - T: Transaction - + Clone - + Debug - + Eq - + Hash - + Serialize - + for<'de> Deserialize<'de> - + Send - + Sync - + 'static, - ::Hash: - Serialize + for<'de> Deserialize<'de> + std::cmp::Ord + Debug + Send + Sync + 'static, - S: StorageSerde + Send + Sync + 'static, - SamplingRng: SeedableRng + RngCore + Send + 'static, - SamplingBackend: DaSamplingServiceBackend::BlobId> - + Send - + 'static, - SamplingBackend::Settings: Clone, - SamplingBackend::Blob: Debug + 'static, - SamplingBackend::BlobId: Debug + 'static, - SamplingNetworkAdapter: nomos_da_sampling::network::NetworkAdapter + Send + 'static, - SamplingStorage: nomos_da_sampling::storage::DaStorageAdapter + Send + 'static, -{ - type Error = hyper::Error; - type Settings = AxumBackendSettings; - - async fn new(settings: Self::Settings) -> Result - where - Self: Sized, - { - Ok(Self { - settings, - _attestation: core::marker::PhantomData, - _blob: core::marker::PhantomData, - _certificate: core::marker::PhantomData, - _membership: core::marker::PhantomData, - _vid: core::marker::PhantomData, - _verifier_backend: core::marker::PhantomData, - _tx: core::marker::PhantomData, - _storage_serde: core::marker::PhantomData, - _sampling_backend: core::marker::PhantomData, - _sampling_network_adapter: core::marker::PhantomData, - _sampling_rng: core::marker::PhantomData, - _sampling_storage: core::marker::PhantomData, - }) - } - - async fn serve(self, handle: OverwatchHandle) -> Result<(), Self::Error> { - let mut builder = CorsLayer::new(); - if self.settings.cors_origins.is_empty() { - builder = builder.allow_origin(Any); - } - - for origin in &self.settings.cors_origins { - builder = builder.allow_origin( - origin - .as_str() - .parse::() - .expect("fail to parse origin"), - ); - } - - let app = Router::new() - .layer( - builder - .allow_headers([CONTENT_TYPE, USER_AGENT]) - .allow_methods(Any), - ) - .layer(TraceLayer::new_for_http()) - .merge(SwaggerUi::new("/swagger-ui").url("/api-docs/openapi.json", ApiDoc::openapi())) - .route("/cl/metrics", routing::get(cl_metrics::)) - .route("/cl/status", routing::post(cl_status::)) - .route( - "/cryptarchia/info", - routing::get( - cryptarchia_info::< - T, - S, - SamplingBackend, - SamplingNetworkAdapter, - SamplingRng, - SamplingStorage, - SIZE, - >, - ), - ) - .route( - "/cryptarchia/headers", - routing::get( - cryptarchia_headers::< - T, - S, - SamplingBackend, - SamplingNetworkAdapter, - SamplingRng, - SamplingStorage, - SIZE, - >, - ), - ) - .route("/da/add_blob", routing::post(add_blob::)) - .route( - "/da/get_range", - routing::post( - get_range::< - T, - C, - V, - S, - SamplingBackend, - SamplingNetworkAdapter, - SamplingRng, - SamplingStorage, - SIZE, - >, - ), - ) - .route("/network/info", routing::get(libp2p_info)) - .route("/storage/block", routing::post(block::)) - .route("/mempool/add/tx", routing::post(add_tx::)) - .route( - "/mempool/add/blobinfo", - routing::post( - add_blob_info::< - V, - SamplingBackend, - SamplingNetworkAdapter, - SamplingRng, - SamplingStorage, - >, - ), - ) - .route("/metrics", routing::get(get_metrics)) - .with_state(handle); - - Server::bind(&self.settings.address) - .serve(app.into_make_service()) - .await - } -} - macro_rules! make_request_and_return_response { ($cond:expr) => {{ match $cond.await { @@ -329,7 +51,7 @@ macro_rules! make_request_and_return_response { (status = 500, description = "Internal server error", body = String), ) )] -async fn cl_metrics(State(handle): State) -> Response +pub async fn cl_metrics(State(handle): State) -> Response where T: Transaction + Clone @@ -353,7 +75,7 @@ where (status = 500, description = "Internal server error", body = String), ) )] -async fn cl_status( +pub async fn cl_status( State(handle): State, Json(items): Json::Hash>>, ) -> Response @@ -366,7 +88,7 @@ where } #[derive(Deserialize)] #[allow(dead_code)] -struct QueryParams { +pub struct CryptarchiaInfoQuery { from: Option, to: Option, } @@ -379,7 +101,7 @@ struct QueryParams { (status = 500, description = "Internal server error", body = String), ) )] -async fn cryptarchia_info< +pub async fn cryptarchia_info< Tx, SS, SamplingBackend, @@ -430,7 +152,7 @@ where (status = 500, description = "Internal server error", body = String), ) )] -async fn cryptarchia_headers< +pub async fn cryptarchia_headers< Tx, SS, SamplingBackend, @@ -440,7 +162,7 @@ async fn cryptarchia_headers< const SIZE: usize, >( State(store): State, - Query(query): Query, + Query(query): Query, ) -> Response where Tx: Transaction @@ -463,7 +185,7 @@ where SamplingNetworkAdapter: nomos_da_sampling::network::NetworkAdapter, SamplingStorage: nomos_da_sampling::storage::DaStorageAdapter, { - let QueryParams { from, to } = query; + let CryptarchiaInfoQuery { from, to } = query; make_request_and_return_response!(consensus::cryptarchia_headers::< Tx, SS, @@ -483,7 +205,7 @@ where (status = 500, description = "Internal server error", body = String), ) )] -async fn add_blob( +pub async fn add_blob( State(handle): State, Json(blob): Json, ) -> Response @@ -507,7 +229,7 @@ where } #[derive(Serialize, Deserialize)] -struct GetRangeReq +pub struct GetRangeReq where ::AppId: Serialize + DeserializeOwned, ::Index: Serialize + DeserializeOwned, @@ -524,7 +246,7 @@ where (status = 500, description = "Internal server error", body = String), ) )] -async fn get_range< +pub async fn get_range< Tx, C, V, @@ -606,7 +328,7 @@ where (status = 500, description = "Internal server error", body = String), ) )] -async fn libp2p_info(State(handle): State) -> Response { +pub async fn libp2p_info(State(handle): State) -> Response { make_request_and_return_response!(libp2p::libp2p_info(&handle)) } @@ -618,7 +340,10 @@ async fn libp2p_info(State(handle): State) -> Response { (status = 500, description = "Internal server error", body = String), ) )] -async fn block(State(handle): State, Json(id): Json) -> Response +pub async fn block( + State(handle): State, + Json(id): Json, +) -> Response where Tx: serde::Serialize + serde::de::DeserializeOwned + Clone + Eq + core::hash::Hash, S: StorageSerde + Send + Sync + 'static, @@ -634,7 +359,7 @@ where (status = 500, description = "Internal server error", body = String), ) )] -async fn add_tx(State(handle): State, Json(tx): Json) -> Response +pub async fn add_tx(State(handle): State, Json(tx): Json) -> Response where Tx: Transaction + Clone + Debug + Hash + Serialize + DeserializeOwned + Send + Sync + 'static, ::Hash: std::cmp::Ord + Debug + Send + Sync + 'static, @@ -655,7 +380,7 @@ where (status = 500, description = "Internal server error", body = String), ) )] -async fn add_blob_info( +pub async fn add_blob_info( State(handle): State, Json(blob_info): Json, ) -> Response @@ -700,7 +425,7 @@ where (status = 500, description = "Internal server error", body = String), ) )] -async fn get_metrics(State(handle): State) -> Response { +pub async fn get_metrics(State(handle): State) -> Response { match metrics::gather(&handle).await { Ok(encoded_metrics) => Response::builder() .status(StatusCode::OK) diff --git a/nodes/nomos-node/src/api/mod.rs b/nodes/nomos-node/src/api/mod.rs new file mode 100644 index 00000000..5f457cf4 --- /dev/null +++ b/nodes/nomos-node/src/api/mod.rs @@ -0,0 +1,2 @@ +pub mod backend; +pub mod handlers; diff --git a/nodes/nomos-node/src/config.rs b/nodes/nomos-node/src/config.rs index 87d7479c..fdabc457 100644 --- a/nodes/nomos-node/src/config.rs +++ b/nodes/nomos-node/src/config.rs @@ -75,7 +75,7 @@ pub struct NetworkArgs { #[derive(Parser, Debug, Clone)] pub struct HttpArgs { #[clap(long = "http-host", env = "HTTP_HOST")] - http_addr: Option, + pub http_addr: Option, #[clap(long = "http-cors-origin", env = "HTTP_CORS_ORIGIN")] pub cors_origins: Option>, @@ -126,131 +126,151 @@ pub struct Config { } impl Config { - pub fn update_log(mut self, log_args: LogArgs) -> Result { - let LogArgs { - backend, - log_addr: addr, - directory, - prefix, - format, - level, - } = log_args; - - // Override the file config with the one from env variables. - if let Some(backend) = backend { - self.log.backend = match backend { - LoggerBackendType::Gelf => LoggerBackend::Gelf { - addr: addr - .ok_or_else(|| eyre!("Gelf backend requires an address."))? - .to_socket_addrs()? - .next() - .ok_or_else(|| eyre!("Invalid gelf address"))?, - }, - LoggerBackendType::File => LoggerBackend::File { - directory: directory - .ok_or_else(|| eyre!("File backend requires a directory."))?, - prefix, - }, - LoggerBackendType::Stdout => LoggerBackend::Stdout, - LoggerBackendType::Stderr => LoggerBackend::Stderr, - } - }; - - // Update parts of the config. - if let Some(format_str) = format { - self.log.format = match format_str.as_str() { - "Json" => LoggerFormat::Json, - "Plain" => LoggerFormat::Plain, - _ => return Err(eyre!("Invalid log format provided.")), - }; - } - if let Some(level_str) = level { - self.log.level = match level_str.as_str() { - "DEBUG" => Level::DEBUG, - _ => return Err(eyre!("Invalid log level provided.")), - }; - } - Ok(self) - } - - pub fn update_network(mut self, network_args: NetworkArgs) -> Result { - let NetworkArgs { - host, - port, - node_key, - initial_peers, - } = network_args; - - if let Some(IpAddr::V4(h)) = host { - self.network.backend.inner.host = h; - } else if host.is_some() { - return Err(eyre!("Unsupported ip version")); - } - - if let Some(port) = port { - self.network.backend.inner.port = port as u16; - } - - if let Some(node_key) = node_key { - let mut key_bytes = hex::decode(node_key)?; - self.network.backend.inner.node_key = - SecretKey::try_from_bytes(key_bytes.as_mut_slice())?; - } - - if let Some(peers) = initial_peers { - self.network.backend.initial_peers = peers; - } - - // TODO: configure mixclient and mixnode if the mixnet feature is enabled - - Ok(self) - } - - pub fn update_http(mut self, http_args: HttpArgs) -> Result { - let HttpArgs { - http_addr, - cors_origins, - } = http_args; - - if let Some(addr) = http_addr { - self.http.backend_settings.address = addr; - } - - if let Some(cors) = cors_origins { - self.http.backend_settings.cors_origins = cors; - } - - Ok(self) - } - - pub fn update_cryptarchia_consensus(mut self, consensus_args: CryptarchiaArgs) -> Result { - let CryptarchiaArgs { - chain_start_time, - slot_duration, - note_secret_key, - note_value, - } = consensus_args; - - if let Some(start_time) = chain_start_time { - self.cryptarchia.time.chain_start_time = - time::OffsetDateTime::from_unix_timestamp(start_time)?; - } - - if let Some(duration) = slot_duration { - self.cryptarchia.time.slot_duration = std::time::Duration::from_secs(duration); - } - - if let Some(sk) = note_secret_key { - let sk = <[u8; 16]>::from_hex(sk)?; - - let value = note_value.expect("Should be available if coin sk provided"); - - self.cryptarchia.notes.push(InputWitness::new( - NoteWitness::basic(value as u64, NMO_UNIT, &mut rand::thread_rng()), - NullifierSecret::from_bytes(sk), - )); - } - + pub fn update_from_args( + mut self, + log_args: LogArgs, + network_args: NetworkArgs, + http_args: HttpArgs, + cryptarchia_args: CryptarchiaArgs, + ) -> Result { + update_log(&mut self.log, log_args)?; + update_network(&mut self.network, network_args)?; + update_http(&mut self.http, http_args)?; + update_cryptarchia_consensus(&mut self.cryptarchia, cryptarchia_args)?; Ok(self) } } + +pub fn update_log(log: &mut ::Settings, log_args: LogArgs) -> Result<()> { + let LogArgs { + backend, + log_addr: addr, + directory, + prefix, + format, + level, + } = log_args; + + // Override the file config with the one from env variables. + if let Some(backend) = backend { + log.backend = match backend { + LoggerBackendType::Gelf => LoggerBackend::Gelf { + addr: addr + .ok_or_else(|| eyre!("Gelf backend requires an address."))? + .to_socket_addrs()? + .next() + .ok_or_else(|| eyre!("Invalid gelf address"))?, + }, + LoggerBackendType::File => LoggerBackend::File { + directory: directory.ok_or_else(|| eyre!("File backend requires a directory."))?, + prefix, + }, + LoggerBackendType::Stdout => LoggerBackend::Stdout, + LoggerBackendType::Stderr => LoggerBackend::Stderr, + } + }; + + // Update parts of the config. + if let Some(format_str) = format { + log.format = match format_str.as_str() { + "Json" => LoggerFormat::Json, + "Plain" => LoggerFormat::Plain, + _ => return Err(eyre!("Invalid log format provided.")), + }; + } + if let Some(level_str) = level { + log.level = match level_str.as_str() { + "DEBUG" => Level::DEBUG, + _ => return Err(eyre!("Invalid log level provided.")), + }; + } + Ok(()) +} + +pub fn update_network( + network: &mut as ServiceData>::Settings, + network_args: NetworkArgs, +) -> Result<()> { + let NetworkArgs { + host, + port, + node_key, + initial_peers, + } = network_args; + + if let Some(IpAddr::V4(h)) = host { + network.backend.inner.host = h; + } else if host.is_some() { + return Err(eyre!("Unsupported ip version")); + } + + if let Some(port) = port { + network.backend.inner.port = port as u16; + } + + if let Some(node_key) = node_key { + let mut key_bytes = hex::decode(node_key)?; + network.backend.inner.node_key = SecretKey::try_from_bytes(key_bytes.as_mut_slice())?; + } + + if let Some(peers) = initial_peers { + network.backend.initial_peers = peers; + } + + // TODO: configure mixclient and mixnode if the mixnet feature is enabled + + Ok(()) +} + +pub fn update_http( + http: &mut ::Settings, + http_args: HttpArgs, +) -> Result<()> { + let HttpArgs { + http_addr, + cors_origins, + } = http_args; + + if let Some(addr) = http_addr { + http.backend_settings.address = addr; + } + + if let Some(cors) = cors_origins { + http.backend_settings.cors_origins = cors; + } + + Ok(()) +} + +pub fn update_cryptarchia_consensus( + cryptarchia: &mut ::Settings, + consensus_args: CryptarchiaArgs, +) -> Result<()> { + let CryptarchiaArgs { + chain_start_time, + slot_duration, + note_secret_key, + note_value, + } = consensus_args; + + if let Some(start_time) = chain_start_time { + cryptarchia.time.chain_start_time = time::OffsetDateTime::from_unix_timestamp(start_time)?; + } + + if let Some(duration) = slot_duration { + cryptarchia.time.slot_duration = std::time::Duration::from_secs(duration); + } + + if let Some(sk) = note_secret_key { + let sk = <[u8; 16]>::from_hex(sk)?; + + let value = note_value.expect("Should be available if coin sk provided"); + + cryptarchia.notes.push(InputWitness::new( + NoteWitness::basic(value as u64, NMO_UNIT, &mut rand::thread_rng()), + NullifierSecret::from_bytes(sk), + )); + } + + Ok(()) +} diff --git a/nodes/nomos-node/src/lib.rs b/nodes/nomos-node/src/lib.rs index d32585b7..22bd80fe 100644 --- a/nodes/nomos-node/src/lib.rs +++ b/nodes/nomos-node/src/lib.rs @@ -1,10 +1,10 @@ pub mod api; -mod config; +pub mod config; mod tx; // std // crates -use api::AxumBackend; +use api::backend::AxumBackend; use bytes::Bytes; use color_eyre::eyre::Result; pub use config::{Config, CryptarchiaArgs, HttpArgs, LogArgs, MetricsArgs, NetworkArgs}; @@ -81,7 +81,7 @@ pub type NomosApiService = ApiService< pub const CL_TOPIC: &str = "cl"; pub const DA_TOPIC: &str = "da"; -const MB16: usize = 1024 * 1024 * 16; +pub const MB16: usize = 1024 * 1024 * 16; pub type Cryptarchia = cryptarchia_consensus::CryptarchiaConsensus< cryptarchia_consensus::network::adapters::libp2p::LibP2pAdapter, diff --git a/nodes/nomos-node/src/main.rs b/nodes/nomos-node/src/main.rs index 672d8bb8..c6160228 100644 --- a/nodes/nomos-node/src/main.rs +++ b/nodes/nomos-node/src/main.rs @@ -47,10 +47,7 @@ fn main() -> Result<()> { metrics_args, } = Args::parse(); let config = serde_yaml::from_reader::<_, Config>(std::fs::File::open(config)?)? - .update_log(log_args)? - .update_http(http_args)? - .update_network(network_args)? - .update_cryptarchia_consensus(cryptarchia_args)?; + .update_from_args(log_args, network_args, http_args, cryptarchia_args)?; let registry = cfg!(feature = "metrics") .then(|| { diff --git a/tests/src/nodes/nomos.rs b/tests/src/nodes/nomos.rs index d2657a87..fe8ec681 100644 --- a/tests/src/nodes/nomos.rs +++ b/tests/src/nodes/nomos.rs @@ -34,7 +34,7 @@ use nomos_mempool::MempoolMetrics; #[cfg(feature = "mixnet")] use nomos_network::backends::libp2p::mixnet::MixnetConfig; use nomos_network::{backends::libp2p::Libp2pConfig, NetworkConfig}; -use nomos_node::{api::AxumBackendSettings, Config, Tx}; +use nomos_node::{api::backend::AxumBackendSettings, Config, Tx}; use nomos_storage::backends::rocksdb::RocksBackendSettings; use once_cell::sync::Lazy; use rand::{thread_rng, Rng};