1
0
mirror of synced 2025-02-22 12:38:10 +00:00

Testnet: Kzgrs config (#797)

* Remove out of place wait_online test param

* Cfgsync crate for testnet startup

* Config dispersal to node hosts

* Remove etcd dep and obsolete bash scripts

* Kzgrs test params in testnet

* Nodes listen on all interfaces
This commit is contained in:
gusto 2024-10-02 14:26:39 +03:00 committed by GitHub
parent b01c4dd8c7
commit 66fc046091
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
30 changed files with 559 additions and 504 deletions

View File

@ -1,9 +1,2 @@
# Environment variables for compose.yml file config.
DOCKER_COMPOSE_LIBP2P_REPLICAS=1
DOCKER_COMPOSE_LIBP2P_NODE_KEY_MASK=2000000000000000000000000000000000000000000000000000000000000000
DOCKER_COMPOSE_SUPER_MAJORITY_THRESHOLD=1
DOCKER_COMPOSE_ETCDCTL_ENDPOINTS=etcd:2379
DOCKER_COMPOSE_ETCDCTL_API=3
DOCKER_COMPOSE_BOOSTRAP_NET_NODE_KEY=1000000000000000000000000000000000000000000000000000000000000000
DOCKER_COMPOSE_OVERLAY_NODES=$DOCKER_COMPOSE_BOOSTRAP_NET_NODE_KEY
DOCKER_COMPOSE_NET_INITIAL_PEERS=/dns/bootstrap/udp/3000/quic-v1

View File

@ -1,12 +1,2 @@
# Environment variables for compose.yml file config.
DOCKER_COMPOSE_LIBP2P_REPLICAS=3
DOCKER_COMPOSE_LIBP2P_NODE_KEY_MASK=2000000000000000000000000000000000000000000000000000000000000000
DOCKER_COMPOSE_SUPER_MAJORITY_THRESHOLD=1
DOCKER_COMPOSE_ETCDCTL_ENDPOINTS=etcd:2379
DOCKER_COMPOSE_ETCDCTL_API=3
DOCKER_COMPOSE_BOOSTRAP_NET_NODE_KEY=1000000000000000000000000000000000000000000000000000000000000000
DOCKER_COMPOSE_OVERLAY_NODES=1000000000000000000000000000000000000000000000000000000000000000
DOCKER_COMPOSE_NET_INITIAL_PEERS=/dns/bootstrap/udp/3000/quic-v1
GRAYLOG_PASSWORD_SECRET="Jcjw7g22kJw3aSjjnCQ7DiQvlSJJ38WZ2MvuIyZ4RTILUoxGEQb5EsmAAdcp3lnnlwdSKaZTDFcvh4Xq2h4aTsa4HLx3SZxM"
GRAYLOG_ROOT_PASSWORD_SHA2="7092a9ec7c94ba6c452a3937a380b9cfdac8e2d0b342c034ea9e306d41ce6d89"

View File

@ -35,6 +35,7 @@ members = [
"ledger/cryptarchia-ledger",
"cl/cl",
"proof_of_leadership/proof_statements",
"testnet/cfgsync",
"tests",
]
exclude = ["proof_of_leadership/risc0/risc0_proofs"]

View File

@ -1,8 +1,19 @@
version: '3.8'
services:
bootstrap:
container_name: bootstrap
cfgsync:
container_name: cfgsync
build:
context: .
dockerfile: testnet/Dockerfile
image: nomos:latest
volumes:
- ./testnet:/etc/nomos
entrypoint: /etc/nomos/scripts/run_cfgsync.sh
nomos-node-0:
container_name: nomos_node_0
build:
context: .
dockerfile: testnet/Dockerfile
@ -12,110 +23,63 @@ services:
- "18080:18080/tcp"
volumes:
- ./testnet:/etc/nomos
- ./tests/kzgrs/kzgrs_test_params:/kzgrs_test_params:z
depends_on:
- cfgsync
- graylog
environment:
- BOOTSTRAP_NODE_KEY=${DOCKER_COMPOSE_BOOSTRAP_NET_NODE_KEY:-1000000000000000000000000000000000000000000000000000000000000000}
- LIBP2P_NODE_MASK=${DOCKER_COMPOSE_LIBP2P_NODE_KEY_MASK:-2000000000000000000000000000000000000000000000000000000000000000}
- LIBP2P_REPLICAS=3
- OVERLAY_NODES=${DOCKER_COMPOSE_OVERLAY_NODES:-1000000000000000000000000000000000000000000000000000000000000000}
entrypoint: /etc/nomos/scripts/run_bootstrap_node.sh
entrypoint: /etc/nomos/scripts/run_nomos_node.sh
libp2p-node-1:
container_name: libp2p_node_1
nomos-node-1:
container_name: nomos_node_1
build:
context: .
dockerfile: testnet/Dockerfile
image: nomos:latest
volumes:
- ./testnet:/etc/nomos
- ./tests/kzgrs/kzgrs_test_params:/kzgrs_test_params:z
depends_on:
- bootstrap
- etcd
- cfgsync
- graylog
ports:
- "3001:3000/udp"
- "18081:18080/tcp"
environment:
- LIBP2P_REPLICAS=3
- ETCDCTL_ENDPOINTS=${DOCKER_COMPOSE_ETCDCTL_ENDPOINTS:-etcd:2379}
- ETCDCTL_API=${DOCKER_COMPOSE_ETCDCTL_API:-3}
- LIBP2P_NODE_MASK=${DOCKER_COMPOSE_LIBP2P_NODE_KEY_MASK:-2000000000000000000000000000000000000000000000000000000000000000}
- OVERLAY_NODES=${DOCKER_COMPOSE_OVERLAY_NODES:-1000000000000000000000000000000000000000000000000000000000000000}
- OVERLAY_SUPER_MAJORITY_THRESHOLD=${DOCKER_COMPOSE_SUPER_MAJORITY_THRESHOLD:-1}
- NET_INITIAL_PEERS=${DOCKER_COMPOSE_NET_INITIAL_PEERS:-/dns/bootstrap/udp/3000/quic-v1}
entrypoint: /etc/nomos/scripts/run_nomos_node.sh
libp2p-node-2:
container_name: libp2p_node_2
nomos-node-2:
container_name: nomos_node_2
build:
context: .
dockerfile: testnet/Dockerfile
image: nomos:latest
volumes:
- ./testnet:/etc/nomos
- ./tests/kzgrs/kzgrs_test_params:/kzgrs_test_params:z
depends_on:
- bootstrap
- etcd
- cfgsync
- graylog
ports:
- "3002:3000/udp"
- "18082:18080/tcp"
environment:
- LIBP2P_REPLICAS=3
- ETCDCTL_ENDPOINTS=${DOCKER_COMPOSE_ETCDCTL_ENDPOINTS:-etcd:2379}
- ETCDCTL_API=${DOCKER_COMPOSE_ETCDCTL_API:-3}
- LIBP2P_NODE_MASK=${DOCKER_COMPOSE_LIBP2P_NODE_KEY_MASK:-2000000000000000000000000000000000000000000000000000000000000000}
- OVERLAY_NODES=${DOCKER_COMPOSE_OVERLAY_NODES:-1000000000000000000000000000000000000000000000000000000000000000}
- OVERLAY_SUPER_MAJORITY_THRESHOLD=${DOCKER_COMPOSE_SUPER_MAJORITY_THRESHOLD:-1}
- NET_INITIAL_PEERS=${DOCKER_COMPOSE_NET_INITIAL_PEERS:-/dns/bootstrap/udp/3000/quic-v1}
entrypoint: /etc/nomos/scripts/run_nomos_node.sh
libp2p-node-3:
container_name: libp2p_node_3
nomos-node-3:
container_name: nomos_node_3
build:
context: .
dockerfile: testnet/Dockerfile
image: nomos:latest
volumes:
- ./testnet:/etc/nomos
- ./tests/kzgrs/kzgrs_test_params:/kzgrs_test_params:z
depends_on:
- bootstrap
- etcd
- cfgsync
- graylog
ports:
- "3003:3000/udp"
- "18083:18080/tcp"
environment:
- LIBP2P_REPLICAS=3
- ETCDCTL_ENDPOINTS=${DOCKER_COMPOSE_ETCDCTL_ENDPOINTS:-etcd:2379}
- ETCDCTL_API=${DOCKER_COMPOSE_ETCDCTL_API:-3}
- LIBP2P_NODE_MASK=${DOCKER_COMPOSE_LIBP2P_NODE_KEY_MASK:-2000000000000000000000000000000000000000000000000000000000000000}
- OVERLAY_NODES=${DOCKER_COMPOSE_OVERLAY_NODES:-1000000000000000000000000000000000000000000000000000000000000000}
- OVERLAY_SUPER_MAJORITY_THRESHOLD=${DOCKER_COMPOSE_SUPER_MAJORITY_THRESHOLD:-1}
- NET_INITIAL_PEERS=${DOCKER_COMPOSE_NET_INITIAL_PEERS:-/dns/bootstrap/udp/3000/quic-v1}
entrypoint: /etc/nomos/scripts/run_nomos_node.sh
chatbot:
container_name: chatbot
build:
context: .
dockerfile: testnet/Dockerfile
image: nomos:latest
volumes:
- ./testnet:/etc/nomos
entrypoint: /etc/nomos/scripts/run_nomos_bot.sh
etcd:
container_name: etcd
image: quay.io/coreos/etcd:v3.4.15
ports:
- "2379:2379/tcp"
command:
- /usr/local/bin/etcd
- --advertise-client-urls=http://etcd:2379
- --listen-client-urls=http://0.0.0.0:2379
prometheus:
container_name: prometheus
image: prom/prometheus:latest

View File

@ -1,22 +1,16 @@
services:
bootstrap:
cfgsync:
container_name: cfgsync
build:
context: .
dockerfile: testnet/Dockerfile
ports:
- "3000:3000/udp"
- "18080:18080/tcp"
image: nomos:latest
volumes:
- ./testnet:/etc/nomos
environment:
- BOOTSTRAP_NODE_KEY=${DOCKER_COMPOSE_BOOSTRAP_NET_NODE_KEY:-1000000000000000000000000000000000000000000000000000000000000000}
- LIBP2P_NODE_MASK=${DOCKER_COMPOSE_LIBP2P_NODE_KEY_MASK:-2000000000000000000000000000000000000000000000000000000000000000}
- LIBP2P_REPLICAS=${DOCKER_COMPOSE_LIBP2P_REPLICAS:-1}
- OVERLAY_NODES=${DOCKER_COMPOSE_OVERLAY_NODES:-1000000000000000000000000000000000000000000000000000000000000000}
entrypoint: /etc/nomos/scripts/run_bootstrap_node.sh
entrypoint: /etc/nomos/scripts/run_cfgsync.sh
libp2p-node:
nomos-node:
build:
context: .
dockerfile: testnet/Dockerfile
@ -25,23 +19,5 @@ services:
deploy:
replicas: ${DOCKER_COMPOSE_LIBP2P_REPLICAS:-1}
depends_on:
- bootstrap
- etcd
environment:
- LIBP2P_REPLICAS=${DOCKER_COMPOSE_LIBP2P_REPLICAS:-1}
- ETCDCTL_ENDPOINTS=${DOCKER_COMPOSE_ETCDCTL_ENDPOINTS:-etcd:2379}
- ETCDCTL_API=${DOCKER_COMPOSE_ETCDCTL_API:-3}
- LIBP2P_NODE_MASK=${DOCKER_COMPOSE_LIBP2P_NODE_KEY_MASK:-2000000000000000000000000000000000000000000000000000000000000000}
- OVERLAY_NODES=${DOCKER_COMPOSE_OVERLAY_NODES:-1000000000000000000000000000000000000000000000000000000000000000}
- OVERLAY_SUPER_MAJORITY_THRESHOLD=${DOCKER_COMPOSE_SUPER_MAJORITY_THRESHOLD:-1}
- NET_INITIAL_PEERS=${DOCKER_COMPOSE_NET_INITIAL_PEERS:-/dns/bootstrap/udp/3000/quic-v1}
- cfgsync
entrypoint: /etc/nomos/scripts/run_nomos_node.sh
etcd:
image: quay.io/coreos/etcd:v3.4.15
ports:
- "2379:2379/tcp"
command:
- /usr/local/bin/etcd
- --advertise-client-urls=http://etcd:2379
- --listen-client-urls=http://0.0.0.0:2379

View File

@ -122,7 +122,6 @@ pub struct Config {
pub http: <NomosApiService as ServiceData>::Settings,
pub cryptarchia: <crate::Cryptarchia as ServiceData>::Settings,
pub storage: <crate::StorageService<RocksBackend<Wire>> as ServiceData>::Settings,
pub wait_online_secs: u64,
}
impl Config {

View File

@ -1,13 +1,13 @@
# BUILD IMAGE ---------------------------------------------------------
FROM rust:1.80.0-slim-bookworm AS builder
FROM rust:1.81.0-slim-bookworm AS builder
WORKDIR /nomos
COPY . .
# Install dependencies needed for building RocksDB and etcd.
RUN apt-get update && apt-get install -yq \
git gcc g++ clang etcd-client libssl-dev \
git gcc g++ clang libssl-dev \
pkg-config protobuf-compiler
RUN cargo install cargo-binstall
@ -18,18 +18,20 @@ RUN cargo build --release --all --features metrics
# NODE IMAGE ----------------------------------------------------------
FROM bitnami/minideb:latest
FROM bitnami/minideb:bookworm
LABEL maintainer="augustinas@status.im" \
source="https://github.com/logos-co/nomos-node" \
description="Nomos testnet image"
# nomos default ports
EXPOSE 3000 8080 9000 60000
EXPOSE 3000 8080 9000 60000
RUN apt-get update && apt-get install -y libssl3
COPY --from=builder /nomos/target/release/nomos-node /usr/bin/nomos-node
COPY --from=builder /nomos/target/release/nomos-cli /usr/bin/nomos-cli
COPY --from=builder /usr/bin/etcdctl /usr/bin/etcdctl
COPY nodes/nomos-node/config.yaml /etc/nomos/config.yaml
COPY --from=builder /nomos/target/release/cfgsync-server /usr/bin/cfgsync-server
COPY --from=builder /nomos/target/release/cfgsync-client /usr/bin/cfgsync-client
ENTRYPOINT ["/usr/bin/nomos-node"]

View File

@ -2,9 +2,7 @@
The Nomos Docker Compose Testnet contains four distinct service types:
- **Bootstrap Node Service**: A singular Nomos node with its own service and a deterministic DNS address. Other nodes utilize this as their initial peer.
- **Libp2p Node Services**: Multiple dynamically spawned Nomos nodes that announce their existence through etcd.
- **Etcd Service**: A container running an etcd instance.
- **Nomos Node Services**: Multiple dynamically spawned Nomos nodes that synchronizes their configuration via cfgsync utility.
## Building
@ -41,15 +39,15 @@ docker compose up -d
Followed by:
```bash
docker compose logs -f {bootstrap,libp2p-node,etcd}
docker compose logs -f nomos-node
```
## Using testnet
Bootstrap node is accessible from the host via `3000` and `18080` ports. To expose other nomos nodes, please update `libp2p-node` service in the `compose.yml` file with this configuration:
Bootstrap node is accessible from the host via `3000` and `18080` ports. To expose other nomos nodes, please update `nomos-node` service in the `compose.yml` file with this configuration:
```bash
libp2p-node:
nomos-node-0:
ports:
- "3001-3010:3000" # Use range depending on the number of nomos node replicas.
- "18081-18190:18080"

View File

@ -1,68 +0,0 @@
log:
backend: "Stdout"
format: "Json"
level: "info"
cryptarchia:
config:
epoch_stake_distribution_stabilization: 3
epoch_period_nonce_buffer: 3
epoch_period_nonce_stabilization: 4
consensus_config:
security_param: 10
active_slot_coeff: 0.9
time:
slot_duration:
secs: 5
nanos: 0
chain_start_time: [2024, 115, 6, 45, 44, 159214915, 0, 0, 0]
coins:
genesis_state:
lead_commitments:
- 20345e93cc65057a391893cbd88d86568efd3073156564797e4a912e4ae1c3ab
- 1594ef82f13d0b64284a9134f2f2ed3b30bca26812a69886a3f9ed737f117bd5
- 76721421649fbf175aff27470e40f44ade69bac844abcf27215f5c0d79d2ec46
- 06f7f2078ba6b24af7c5aae6f24889f6c609195ad796fb11b42ad6e0a3f8c10f
spend_commitments:
- 20345e93cc65057a391893cbd88d86568efd3073156564797e4a912e4ae1c3ab
- 1594ef82f13d0b64284a9134f2f2ed3b30bca26812a69886a3f9ed737f117bd5
- 76721421649fbf175aff27470e40f44ade69bac844abcf27215f5c0d79d2ec46
- 06f7f2078ba6b24af7c5aae6f24889f6c609195ad796fb11b42ad6e0a3f8c10f
nullifiers: []
nonce: '0000000000000000000000000000000000000000000000000000000000000000'
slot: 0
next_epoch_state:
epoch: 1
nonce: '0000000000000000000000000000000000000000000000000000000000000000'
commitments: []
total_stake: 4
epoch_state:
epoch: 0
nonce: '0000000000000000000000000000000000000000000000000000000000000000'
commitments: []
total_stake: 4
network:
backend:
host: 0.0.0.0
port: 3000
log_level: "fatal"
node_key: "0000000000000000000000000000000000000000000000000000000000000001"
discV5BootstrapNodes: []
initial_peers: []
relayTopics: []
http:
backend_settings:
address: 0.0.0.0:18080
cors_origins: []
da:
da_protocol:
voter: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
num_attestations: 1
backend:
max_capacity: 10000
evicting_period:
secs: 3600
nanos: 0

16
testnet/cfgsync.yaml Normal file
View File

@ -0,0 +1,16 @@
port: 4400
n_hosts: 4
timeout: 10
# ConsensusConfig related parameters
security_param: 10
active_slot_coeff: 0.9
# DaConfig related parameters
subnetwork_size: 2
dispersal_factor: 2
num_samples: 1
num_subnets: 2
old_blobs_check_interval_secs: 5
blobs_validity_duration_secs: 60
global_params_path: "/kzgrs_test_params"

View File

@ -0,0 +1,16 @@
[package]
name = "cfgsync"
version = "0.1.0"
edition = "2021"
[dependencies]
axum = { version = "0.6" }
clap = { version = "4", features = ["derive"] }
nomos-libp2p = { path = "../../nomos-libp2p" }
nomos-node = { path = "../../nodes/nomos-node" }
reqwest = { version = "0.11", features = ["json", "rustls-tls"] }
tests = { path = "../../tests" }
tokio = { version = "1.24", features = ["rt-multi-thread"] }
serde = { version = "1", features = ["derive"] }
serde_json = "1"
serde_yaml = "0.9"

View File

@ -0,0 +1,66 @@
// std
use std::{env, fs, net::Ipv4Addr, process};
// crates
use nomos_node::Config as NodeConfig;
use reqwest::Client;
use serde::{de::DeserializeOwned, Serialize};
// internal
#[derive(Serialize)]
struct ClientIp {
ip: Ipv4Addr,
}
fn parse_ip(ip_str: String) -> Ipv4Addr {
ip_str.parse().unwrap_or_else(|_| {
eprintln!("Invalid IP format, defaulting to 127.0.0.1");
Ipv4Addr::new(127, 0, 0, 1)
})
}
async fn get_config<Config: Serialize + DeserializeOwned>(
ip: Ipv4Addr,
url: &str,
config_file: &str,
) -> Result<(), String> {
let client = Client::new();
let response = client
.post(url)
.json(&ClientIp { ip })
.send()
.await
.map_err(|err| format!("Failed to send IP announcement: {}", err))?;
if !response.status().is_success() {
return Err(format!("Server error: {:?}", response.status()));
}
let config = response
.json::<Config>()
.await
.map_err(|err| format!("Failed to parse response: {}", err))?;
let yaml = serde_yaml::to_string(&config)
.map_err(|err| format!("Failed to serialize config to YAML: {}", err))?;
fs::write(config_file, yaml)
.map_err(|err| format!("Failed to write config to file: {}", err))?;
println!("Config saved to {}", config_file);
Ok(())
}
#[tokio::main]
async fn main() {
let config_file_path = env::var("CFG_FILE_PATH").unwrap_or("config.yaml".to_string());
let server_addr = env::var("CFG_SERVER_ADDR").unwrap_or("http://127.0.0.1:4400".to_string());
let ip = parse_ip(env::var("CFG_HOST_IP").unwrap_or_else(|_| "127.0.0.1".to_string()));
let node_config_endpoint = format!("{}/node", server_addr);
if let Err(err) = get_config::<NodeConfig>(ip, &node_config_endpoint, &config_file_path).await {
eprintln!("Error: {}", err);
process::exit(1);
}
}

View File

@ -0,0 +1,125 @@
// std
use std::net::Ipv4Addr;
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Duration;
use std::{fs, process};
// crates
use axum::extract::State;
use axum::Json;
use axum::{http::StatusCode, response::IntoResponse, routing::post, Router};
use cfgsync::config::Host;
use cfgsync::repo::{ConfigRepo, RepoResponse};
use clap::Parser;
use serde::{Deserialize, Serialize};
use tests::{ConsensusConfig, DaConfig};
use tokio::sync::oneshot::channel;
// internal
#[derive(Parser, Debug)]
#[command(about = "CfgSync")]
struct Args {
config: PathBuf,
}
#[derive(Debug, Deserialize)]
struct CfgSyncConfig {
port: u16,
n_hosts: usize,
timeout: u64,
// ConsensusConfig related parameters
security_param: u32,
active_slot_coeff: f64,
// DaConfig related parameters
subnetwork_size: usize,
dispersal_factor: usize,
num_samples: u16,
num_subnets: u16,
old_blobs_check_interval_secs: u64,
blobs_validity_duration_secs: u64,
global_params_path: String,
}
impl CfgSyncConfig {
fn load_from_file(file_path: &PathBuf) -> Result<Self, String> {
let config_content = fs::read_to_string(file_path)
.map_err(|err| format!("Failed to read config file: {}", err))?;
serde_yaml::from_str(&config_content)
.map_err(|err| format!("Failed to parse config file: {}", err))
}
fn to_consensus_config(&self) -> ConsensusConfig {
ConsensusConfig {
n_participants: self.n_hosts,
security_param: self.security_param,
active_slot_coeff: self.active_slot_coeff,
}
}
fn to_da_config(&self) -> DaConfig {
DaConfig {
subnetwork_size: self.subnetwork_size,
dispersal_factor: self.dispersal_factor,
num_samples: self.num_samples,
num_subnets: self.num_subnets,
old_blobs_check_interval: Duration::from_secs(self.old_blobs_check_interval_secs),
blobs_validity_duration: Duration::from_secs(self.blobs_validity_duration_secs),
global_params_path: self.global_params_path.clone(),
}
}
}
#[derive(Serialize, Deserialize)]
struct ClientIp {
ip: Ipv4Addr,
}
async fn node_config(
State(config_repo): State<Arc<ConfigRepo>>,
Json(payload): Json<ClientIp>,
) -> impl IntoResponse {
let ClientIp { ip } = payload;
let (reply_tx, reply_rx) = channel();
config_repo.register(Host::default_node_from_ip(ip), reply_tx);
match reply_rx.await {
Ok(config_response) => match config_response {
RepoResponse::Config(config) => (StatusCode::OK, Json(config)).into_response(),
RepoResponse::Timeout => {
(StatusCode::REQUEST_TIMEOUT, Json(RepoResponse::Timeout)).into_response()
}
},
Err(_) => (StatusCode::INTERNAL_SERVER_ERROR, "Error receiving config").into_response(),
}
}
#[tokio::main]
async fn main() {
let cli = Args::parse();
let config = CfgSyncConfig::load_from_file(&cli.config).unwrap_or_else(|err| {
eprintln!("{}", err);
process::exit(1);
});
let consensus_config = config.to_consensus_config();
let da_config = config.to_da_config();
let config_repo = ConfigRepo::new(
config.n_hosts,
consensus_config,
da_config,
Duration::from_secs(config.timeout),
);
let app = Router::new()
.route("/node", post(node_config))
.with_state(config_repo.clone());
println!("Server running on http://0.0.0.0:{}", config.port);
axum::Server::bind(&format!("0.0.0.0:{}", config.port).parse().unwrap())
.serve(app.into_make_service())
.await
.unwrap();
}

View File

@ -0,0 +1,157 @@
// std
use std::{collections::HashMap, net::Ipv4Addr, str::FromStr};
// crates
use nomos_libp2p::{Multiaddr, PeerId};
use nomos_node::Config as NodeConfig;
use tests::{ConsensusConfig, DaConfig, Node, NomosNode};
// internal
const DEFAULT_NETWORK_PORT: u16 = 3000;
const DEFAULT_DA_NETWORK_PORT: u16 = 3300;
#[derive(Eq, PartialEq, Hash, Clone)]
pub enum HostKind {
Nomos,
}
#[derive(Eq, PartialEq, Hash, Clone)]
pub struct Host {
pub kind: HostKind,
pub ip: Ipv4Addr,
pub network_port: u16,
pub da_network_port: u16,
}
impl Host {
pub fn default_node_from_ip(ip: Ipv4Addr) -> Self {
Self {
kind: HostKind::Nomos,
ip,
network_port: DEFAULT_NETWORK_PORT,
da_network_port: DEFAULT_DA_NETWORK_PORT,
}
}
}
pub fn create_node_configs(
consensus: ConsensusConfig,
da: DaConfig,
hosts: Vec<Host>,
) -> HashMap<Host, NodeConfig> {
let mut configs = NomosNode::create_node_configs(consensus, da);
let mut configured_hosts = HashMap::new();
// Rebuild DA address lists.
let peer_addresses = configs[0].da_network.backend.addresses.clone();
let host_network_init_peers = update_network_init_peers(hosts.clone());
let host_da_peer_addresses = update_da_peer_addresses(hosts.clone(), peer_addresses);
let new_peer_addresses: HashMap<PeerId, Multiaddr> = host_da_peer_addresses
.clone()
.into_iter()
.map(|(peer_id, (multiaddr, _))| (peer_id, multiaddr))
.collect();
for (config, host) in configs.iter_mut().zip(hosts.into_iter()) {
config.da_network.backend.addresses = new_peer_addresses.clone();
// Libp2p network config.
config.network.backend.inner.host = Ipv4Addr::from_str("0.0.0.0").unwrap();
config.network.backend.inner.port = host.network_port;
config.network.backend.initial_peers = host_network_init_peers.clone();
// DA Libp2p network config.
config.da_network.backend.listening_address = Multiaddr::from_str(&format!(
"/ip4/0.0.0.0/udp/{}/quic-v1",
host.da_network_port,
))
.unwrap();
configured_hosts.insert(host.clone(), config.clone());
}
configured_hosts
}
fn update_network_init_peers(hosts: Vec<Host>) -> Vec<Multiaddr> {
hosts
.iter()
.map(|h| nomos_libp2p::Swarm::multiaddr(h.ip, h.network_port))
.collect()
}
fn update_da_peer_addresses(
hosts: Vec<Host>,
peer_addresses: HashMap<PeerId, Multiaddr>,
) -> HashMap<PeerId, (Multiaddr, Ipv4Addr)> {
peer_addresses
.into_iter()
.zip(hosts)
.map(|((peer_id, _), host)| {
let new_multiaddr = Multiaddr::from_str(&format!(
"/ip4/{}/udp/{}/quic-v1",
host.ip, host.da_network_port,
))
.unwrap();
(peer_id, (new_multiaddr, host.ip))
})
.collect()
}
#[cfg(test)]
mod cfgsync_tests {
use std::str::FromStr;
use std::{net::Ipv4Addr, time::Duration};
use nomos_libp2p::Protocol;
use tests::{ConsensusConfig, DaConfig};
use super::{create_node_configs, Host, HostKind};
#[test]
fn basic_ip_list() {
let hosts = (0..10)
.map(|i| Host {
kind: HostKind::Nomos,
ip: Ipv4Addr::from_str(&format!("10.1.1.{i}")).unwrap(),
network_port: 3000,
da_network_port: 4044,
})
.collect();
let configs = create_node_configs(
ConsensusConfig {
n_participants: 10,
security_param: 10,
active_slot_coeff: 0.9,
},
DaConfig {
subnetwork_size: 2,
dispersal_factor: 1,
num_samples: 1,
num_subnets: 2,
old_blobs_check_interval: Duration::from_secs(5),
blobs_validity_duration: Duration::from_secs(u64::MAX),
global_params_path: "".into(),
},
hosts,
);
for (host, config) in configs.iter() {
let network_port = config.network.backend.inner.port;
let da_network_addr = config.da_network.backend.listening_address.clone();
let da_network_port = da_network_addr
.iter()
.find_map(|protocol| match protocol {
Protocol::Udp(port) => Some(port),
_ => None,
})
.unwrap();
assert_eq!(network_port, host.network_port);
assert_eq!(da_network_port, host.da_network_port);
}
}
}

View File

@ -0,0 +1,2 @@
pub mod config;
pub mod repo;

101
testnet/cfgsync/src/repo.rs Normal file
View File

@ -0,0 +1,101 @@
// std
use std::collections::HashMap;
use std::sync::{Arc, Mutex};
use std::time::Duration;
// crates
use nomos_node::Config as NodeConfig;
use serde::{Deserialize, Serialize};
use tests::{ConsensusConfig, DaConfig};
use tokio::sync::oneshot::Sender;
use tokio::time::timeout;
// internal
use crate::config::{create_node_configs, Host};
#[derive(Serialize, Deserialize)]
pub enum RepoResponse {
Config(NodeConfig),
Timeout,
}
pub struct ConfigRepo {
waiting_hosts: Mutex<HashMap<Host, Sender<RepoResponse>>>,
n_hosts: usize,
consensus_config: ConsensusConfig,
da_config: DaConfig,
timeout_duration: Duration,
}
impl ConfigRepo {
pub fn new(
n_hosts: usize,
consensus_config: ConsensusConfig,
da_config: DaConfig,
timeout_duration: Duration,
) -> Arc<Self> {
let repo = Arc::new(Self {
waiting_hosts: Mutex::new(HashMap::new()),
n_hosts,
consensus_config,
da_config,
timeout_duration,
});
let repo_clone = repo.clone();
tokio::spawn(async move {
repo_clone.run().await;
});
repo
}
pub fn register(&self, host: Host, reply_tx: Sender<RepoResponse>) {
let mut waiting_hosts = self.waiting_hosts.lock().unwrap();
waiting_hosts.insert(host, reply_tx);
}
async fn run(&self) {
let timeout_duration = self.timeout_duration;
match timeout(timeout_duration, self.wait_for_hosts()).await {
Ok(_) => {
println!("All hosts have announced their IPs");
let mut waiting_hosts = self.waiting_hosts.lock().unwrap();
let hosts = waiting_hosts
.iter()
.map(|(host, _)| host)
.cloned()
.collect();
let configs = create_node_configs(
self.consensus_config.clone(),
self.da_config.clone(),
hosts,
);
for (host, sender) in waiting_hosts.drain() {
let config = configs.get(&host).expect("host should have a config");
let _ = sender.send(RepoResponse::Config(config.to_owned()));
}
}
Err(_) => {
println!("Timeout: Not all hosts announced within the time limit");
let mut waiting_hosts = self.waiting_hosts.lock().unwrap();
for (_, sender) in waiting_hosts.drain() {
let _ = sender.send(RepoResponse::Timeout);
}
}
}
}
async fn wait_for_hosts(&self) {
loop {
if self.waiting_hosts.lock().unwrap().len() >= self.n_hosts {
break;
}
tokio::time::sleep(tokio::time::Duration::from_secs(1)).await;
}
}
}

View File

@ -1,8 +0,0 @@
backend:
host: 0.0.0.0
port: 4007
log_level: "fatal"
node_key: "0000000000000000000000000000000000000000000000000000000000000667"
discV5BootstrapNodes: []
initial_peers: ["/dns/bootstrap/udp/3000/quic-v1"]
relayTopics: []

View File

@ -1,10 +0,0 @@
log:
backend: "Stdout"
format: "Json"
level: "info"
api:
backend_settings:
address: 0.0.0.0:9090
cors_origins: []

View File

@ -1,68 +0,0 @@
log:
backend: "Stdout"
format: "Json"
level: "info"
cryptarchia:
config:
epoch_stake_distribution_stabilization: 3
epoch_period_nonce_buffer: 3
epoch_period_nonce_stabilization: 4
consensus_config:
security_param: 10
active_slot_coeff: 0.9
time:
slot_duration:
secs: 5
nanos: 0
chain_start_time: [2024, 115, 6, 45, 44, 159214915, 0, 0, 0]
coins:
genesis_state:
lead_commitments:
- 20345e93cc65057a391893cbd88d86568efd3073156564797e4a912e4ae1c3ab
- 1594ef82f13d0b64284a9134f2f2ed3b30bca26812a69886a3f9ed737f117bd5
- 76721421649fbf175aff27470e40f44ade69bac844abcf27215f5c0d79d2ec46
- 06f7f2078ba6b24af7c5aae6f24889f6c609195ad796fb11b42ad6e0a3f8c10f
spend_commitments:
- 20345e93cc65057a391893cbd88d86568efd3073156564797e4a912e4ae1c3ab
- 1594ef82f13d0b64284a9134f2f2ed3b30bca26812a69886a3f9ed737f117bd5
- 76721421649fbf175aff27470e40f44ade69bac844abcf27215f5c0d79d2ec46
- 06f7f2078ba6b24af7c5aae6f24889f6c609195ad796fb11b42ad6e0a3f8c10f
nullifiers: []
nonce: '0000000000000000000000000000000000000000000000000000000000000000'
slot: 0
next_epoch_state:
epoch: 1
nonce: '0000000000000000000000000000000000000000000000000000000000000000'
commitments: []
total_stake: 4
epoch_state:
epoch: 0
nonce: '0000000000000000000000000000000000000000000000000000000000000000'
commitments: []
total_stake: 4
network:
backend:
host: 0.0.0.0
port: 3000
log_level: "fatal"
node_key: "0000000000000000000000000000000000000000000000000000000000000001"
discV5BootstrapNodes: []
initial_peers: ["/dns/bootstrap/udp/3000/quic-v1"]
relayTopics: []
http:
backend_settings:
address: 0.0.0.0:18080
cors_origins: []
da:
da_protocol:
voter: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
num_attestations: 1
backend:
max_capacity: 10000
evicting_period:
secs: 3600
nanos: 0

View File

@ -1,22 +0,0 @@
#!/bin/sh
set -e
# This node id will be used to generate consensus node list.
tmp_node_id=0
# OVERLAY_NODES might be set in compose.yml.
tmp_overlay_nodes=$OVERLAY_NODES
# All spawned nodes should be added to consensus configuration.
for i in $(seq 1 $LIBP2P_REPLICAS); do
tmp_node_id=$((tmp_node_id + 1))
node_key=$(/etc/nomos/scripts/node_key_from_id.sh "$LIBP2P_NODE_MASK" "$tmp_node_id")
if [ -z "$tmp_overlay_nodes" ]; then
tmp_overlay_nodes=$node_key
else
tmp_overlay_nodes="${tmp_overlay_nodes},${node_key}"
fi
done
echo "${tmp_overlay_nodes}"

View File

@ -1,17 +0,0 @@
#!/bin/sh
set -e
if [ -z "$1" ] || [ -z "$2" ]; then
echo "Usage: $0 <libp2p_node_mask> <node_id>"
exit 1
fi
libp2p_node_mask=$1
node_id=$2
node_key_from_id() {
echo "${libp2p_node_mask}" | sed "s/.\{${#node_id}\}$/${node_id}/"
}
node_key_from_id

View File

@ -1,45 +0,0 @@
#!/bin/sh
# LIBP2P_NODE_MASK is set via compose.yml file.
node_key_from_id() {
echo "${LIBP2P_NODE_MASK}" | sed "s/.\{${#NODE_ID}\}$/${NODE_ID}/"
}
END=$LIBP2P_REPLICAS
NODE_ID=1
NODE_IP=$(hostname -i)
NODE_KEY=$(node_key_from_id)
register_node() {
## Conditional transaction to set node config key if it doesn't exist.
## Newlines in EOF block are important, more info here:
## https://github.com/etcd-io/etcd/tree/main/etcdctl#examples-3
etcdctl txn <<EOF
mod("/node/${NODE_ID}") = "0"
put /node/${NODE_ID} "${NODE_ID}"
put /config/node/${NODE_ID}/key "${NODE_KEY}"
put /config/node/${NODE_ID}/ip "${NODE_IP}"
EOF
}
while [ "${NODE_ID}" -le "${END}" ]; do
result=$(register_node)
# Check if the key was registered or already exists
if [ "${result}" != "FAILURE" ]; then
break
else
NODE_ID=$((NODE_ID + 1))
NODE_KEY=$(node_key_from_id)
fi
done
if [ "${NODE_ID}" -gt "${END}" ]; then
echo "Reached the limit without registering a ${NODE_ID}."
return 1
fi
echo "${NODE_KEY}"

View File

@ -1,28 +0,0 @@
#!/bin/sh
set -e
CONSENSUS_CHAIN_START=$(date +%s)
CONSENSUS_COIN_SK=$BOOTSTRAP_NODE_KEY
CONSENSUS_COIN_NONCE=$BOOTSTRAP_NODE_KEY
CONSENSUS_COIN_VALUE=1
DA_VOTER=$BOOTSTRAP_NODE_KEY
NET_NODE_KEY=$BOOTSTRAP_NODE_KEY
OVERLAY_NODES=$(/etc/nomos/scripts/consensus_node_list.sh)
export CONSENSUS_COIN_SK \
CONSENSUS_COIN_NONCE \
CONSENSUS_COIN_VALUE \
CONSENSUS_CHAIN_START \
DA_VOTER \
OVERLAY_NODES \
NET_NODE_KEY
echo "I am a container ${HOSTNAME} node ${NET_NODE_KEY}"
echo "CONSENSUS_COIN_SK: ${CONSENSUS_COIN_SK}"
echo "CONSENSUS_COIN_NONCE: ${CONSENSUS_COIN_NONCE}"
echo "CONSENSUS_COIN_VALUE: ${CONSENSUS_COIN_VALUE}"
echo "DA_VOTER: ${DA_VOTER}"
echo "OVERLAY_NODES: ${OVERLAY_NODES}"
exec /usr/bin/nomos-node /etc/nomos/bootstrap_config.yaml --with-metrics --log-backend gelf --log-addr graylog:12201

5
testnet/scripts/run_cfgsync.sh Executable file
View File

@ -0,0 +1,5 @@
#!/bin/sh
set -e
exec /usr/bin/cfgsync-server /etc/nomos/cfgsync.yaml

View File

@ -1,9 +0,0 @@
#!/bin/sh
echo "I am a container ${HOSTNAME} bot"
while true
do
/usr/bin/nomos-cli chat --author nomos-ghost --message "$(date +%H:%M:%S) ~ ping" --network-config /etc/nomos/cli_config.yaml --node http://bootstrap:18080
sleep 10
done

View File

@ -2,43 +2,10 @@
set -e
# Set env variables for nomos-node.
NET_NODE_KEY=$(/etc/nomos/scripts/register_node.sh)
CONSENSUS_CHAIN_START=$(date +%s)
CONSENSUS_COIN_SK=$NET_NODE_KEY
CONSENSUS_COIN_NONCE=$NET_NODE_KEY
CONSENSUS_COIN_VALUE=1
DA_VOTER=$NET_NODE_KEY
OVERLAY_NODES=$(/etc/nomos/scripts/consensus_node_list.sh)
export CFG_FILE_PATH="/config.yaml" \
CFG_SERVER_ADDR="http://cfgsync:4400" \
CFG_HOST_IP=$(hostname -i) \
RISC0_DEV_MODE=true
node_ids=$(etcdctl get "/node/" --prefix --keys-only)
for node_id in $node_ids; do
node_key=$(etcdctl get "/config${node_id}/key" --print-value-only)
node_ip=$(etcdctl get "/config${node_id}/ip" --print-value-only)
node_multiaddr="/ip4/${node_ip}/udp/3000/quic-v1"
if [ -z "$NET_INITIAL_PEERS" ]; then
NET_INITIAL_PEERS=$node_multiaddr
else
NET_INITIAL_PEERS="${NET_INITIAL_PEERS},${node_multiaddr}"
fi
done
export CONSENSUS_COIN_SK \
CONSENSUS_COIN_NONCE \
CONSENSUS_COIN_VALUE \
CONSENSUS_CHAIN_START \
DA_VOTER \
OVERLAY_NODES \
NET_NODE_KEY \
NET_INITIAL_PEERS
echo "I am a container ${HOSTNAME} node ${NET_NODE_KEY}"
echo "CONSENSUS_COIN_SK: ${CONSENSUS_COIN_SK}"
echo "CONSENSUS_COIN_NONCE: ${CONSENSUS_COIN_NONCE}"
echo "CONSENSUS_COIN_VALUE: ${CONSENSUS_COIN_VALUE}"
echo "DA_VOTER: ${DA_VOTER}"
echo "OVERLAY_NODES: ${OVERLAY_NODES}"
echo "NET_INITIAL_PEERS: ${NET_INITIAL_PEERS}"
exec /usr/bin/nomos-node /etc/nomos/libp2p_config.yaml --with-metrics --log-backend gelf --log-addr graylog:12201
/usr/bin/cfgsync-client && \
exec /usr/bin/nomos-node /config.yaml --with-metrics --log-backend gelf --log-addr graylog:12201

View File

@ -10,7 +10,7 @@ use std::time::Duration;
use std::{fmt::Debug, sync::Mutex};
//crates
use nomos_libp2p::{Multiaddr, PeerId, Swarm};
use nomos_libp2p::{Multiaddr, Swarm};
use nomos_node::Config;
use rand::{thread_rng, Rng};
@ -59,12 +59,8 @@ pub trait Node: Sized {
}
fn node_configs(config: SpawnConfig) -> Vec<Config> {
match config {
SpawnConfig::Star {
consensus,
da,
test,
} => {
let mut configs = Self::create_node_configs(consensus, da, test);
SpawnConfig::Star { consensus, da } => {
let mut configs = Self::create_node_configs(consensus, da);
let next_leader_config = configs.remove(0);
let first_node_addr = node_address(&next_leader_config);
let mut node_configs = vec![next_leader_config];
@ -78,12 +74,8 @@ pub trait Node: Sized {
}
node_configs
}
SpawnConfig::Chain {
consensus,
da,
test,
} => {
let mut configs = Self::create_node_configs(consensus, da, test);
SpawnConfig::Chain { consensus, da } => {
let mut configs = Self::create_node_configs(consensus, da);
let next_leader_config = configs.remove(0);
let mut prev_node_addr = node_address(&next_leader_config);
let mut node_configs = vec![next_leader_config];
@ -97,11 +89,7 @@ pub trait Node: Sized {
}
}
}
fn create_node_configs(
consensus: ConsensusConfig,
da: DaConfig,
test: TestConfig,
) -> Vec<Config>;
fn create_node_configs(consensus: ConsensusConfig, da: DaConfig) -> Vec<Config>;
async fn consensus_info(&self) -> Self::ConsensusInfo;
fn stop(&mut self);
}
@ -112,19 +100,17 @@ pub enum SpawnConfig {
Star {
consensus: ConsensusConfig,
da: DaConfig,
test: TestConfig,
},
// Chain topology: Every node is chained to the node next to it.
Chain {
consensus: ConsensusConfig,
da: DaConfig,
test: TestConfig,
},
}
impl SpawnConfig {
// Returns a SpawnConfig::Chain with proper configurations for happy-path tests
pub fn chain_happy(n_participants: usize, da: DaConfig, test: TestConfig) -> Self {
pub fn chain_happy(n_participants: usize, da: DaConfig) -> Self {
Self::Chain {
consensus: ConsensusConfig {
n_participants,
@ -136,11 +122,10 @@ impl SpawnConfig {
active_slot_coeff: 0.9,
},
da,
test,
}
}
pub fn star_happy(n_participants: usize, da: DaConfig, test: TestConfig) -> Self {
pub fn star_happy(n_participants: usize, da: DaConfig) -> Self {
Self::Star {
consensus: ConsensusConfig {
n_participants,
@ -152,7 +137,6 @@ impl SpawnConfig {
active_slot_coeff: 0.9,
},
da,
test,
}
}
}
@ -175,7 +159,6 @@ pub struct ConsensusConfig {
pub struct DaConfig {
pub subnetwork_size: usize,
pub dispersal_factor: usize,
pub executor_peer_ids: Vec<PeerId>,
pub num_samples: u16,
pub num_subnets: u16,
pub old_blobs_check_interval: Duration,
@ -188,7 +171,6 @@ impl Default for DaConfig {
Self {
subnetwork_size: 2,
dispersal_factor: 1,
executor_peer_ids: vec![],
num_samples: 1,
num_subnets: 2,
old_blobs_check_interval: Duration::from_secs(5),
@ -197,16 +179,3 @@ impl Default for DaConfig {
}
}
}
#[derive(Clone)]
pub struct TestConfig {
pub wait_online_secs: u64,
}
impl Default for TestConfig {
fn default() -> Self {
Self {
wait_online_secs: 10,
}
}
}

View File

@ -46,7 +46,7 @@ use tempfile::NamedTempFile;
use time::OffsetDateTime;
// internal
use super::{create_tempdir, persist_tempdir, LOGS_PREFIX};
use crate::{adjust_timeout, get_available_port, ConsensusConfig, DaConfig, Node, TestConfig};
use crate::{adjust_timeout, get_available_port, ConsensusConfig, DaConfig, Node};
static CLIENT: Lazy<Client> = Lazy::new(Client::new);
const CRYPTARCHIA_INFO_API: &str = "cryptarchia/info";
@ -86,7 +86,6 @@ impl NomosNode {
let dir = create_tempdir().unwrap();
let mut file = NamedTempFile::new().unwrap();
let config_path = file.path().to_owned();
let wait_online_secs = config.wait_online_secs;
// setup logging so that we can intercept it later in testing
config.log.backend = LoggerBackend::File {
@ -119,10 +118,9 @@ impl NomosNode {
_tempdir: dir,
config,
};
tokio::time::timeout(
adjust_timeout(Duration::from_secs(wait_online_secs)),
async { node.wait_online().await },
)
tokio::time::timeout(adjust_timeout(Duration::from_secs(10)), async {
node.wait_online().await
})
.await
.unwrap();
@ -266,11 +264,7 @@ impl Node for NomosNode {
/// so the leader can receive votes from all other nodes that will be subsequently spawned.
/// If not, the leader will miss votes from nodes spawned before itself.
/// This issue will be resolved by devising the block catch-up mechanism in the future.
fn create_node_configs(
consensus: ConsensusConfig,
da: DaConfig,
test: TestConfig,
) -> Vec<Config> {
fn create_node_configs(consensus: ConsensusConfig, da: DaConfig) -> Vec<Config> {
// we use the same random bytes for:
// * da id
// * coin sk
@ -329,7 +323,6 @@ impl Node for NomosNode {
vec![coin],
time_config.clone(),
da.clone(),
test.wait_online_secs,
#[cfg(feature = "mixnet")]
MixnetConfig {
mixclient: mixclient_config.clone(),
@ -341,8 +334,7 @@ impl Node for NomosNode {
// Build DA memberships and address lists.
let peer_addresses = build_da_peer_list(&configs);
let mut peer_ids = peer_addresses.iter().map(|(p, _)| *p).collect::<Vec<_>>();
peer_ids.extend(da.executor_peer_ids);
let peer_ids = peer_addresses.iter().map(|(p, _)| *p).collect::<Vec<_>>();
for config in &mut configs {
let membership =
@ -459,10 +451,10 @@ fn create_node_config(
notes: Vec<InputWitness>,
time: TimeConfig,
da_config: DaConfig,
wait_online_secs: u64,
#[cfg(feature = "mixnet")] mixnet_config: MixnetConfig,
) -> Config {
let swarm_config: SwarmConfig = Default::default();
let node_key = swarm_config.node_key.clone();
let verifier_sk = SecretKey::key_gen(&id, &[]).unwrap();
let verifier_sk_bytes = verifier_sk.to_bytes();
@ -470,7 +462,7 @@ fn create_node_config(
let mut config = Config {
network: NetworkConfig {
backend: Libp2pConfig {
inner: swarm_config.clone(),
inner: swarm_config,
initial_peers: vec![],
#[cfg(feature = "mixnet")]
mixnet: mixnet_config,
@ -486,7 +478,7 @@ fn create_node_config(
},
da_network: DaNetworkConfig {
backend: DaNetworkBackendSettings {
node_key: swarm_config.node_key,
node_key,
listening_address: Multiaddr::from_str(&format!(
"/ip4/127.0.0.1/udp/{}/quic-v1",
get_available_port(),
@ -538,7 +530,6 @@ fn create_node_config(
read_only: false,
column_family: Some("blocks".into()),
},
wait_online_secs,
};
config.network.backend.inner.port = get_available_port();

View File

@ -122,9 +122,6 @@ async fn disseminate_and_retrieve() {
num_subnets: 2,
..Default::default()
},
tests::TestConfig {
wait_online_secs: 50,
},
))
.await;

View File

@ -52,11 +52,6 @@ async fn happy_test(nodes: &[NomosNode]) {
#[tokio::test]
async fn two_nodes_happy() {
let nodes = NomosNode::spawn_nodes(SpawnConfig::star_happy(
2,
Default::default(),
Default::default(),
))
.await;
let nodes = NomosNode::spawn_nodes(SpawnConfig::star_happy(2, Default::default())).await;
happy_test(&nodes).await;
}