refactor(compose): keep metadata API minimal and test-local restart checks

This commit is contained in:
andrussal 2026-03-06 14:08:56 +01:00
parent 25d5a4859b
commit d9c99322c7
3 changed files with 84 additions and 171 deletions

View File

@ -4,6 +4,7 @@ use anyhow::{Result, anyhow};
use lb_ext::{CoreBuilderExt as _, LbcComposeDeployer, LbcExtEnv, ScenarioBuilder};
use testing_framework_core::scenario::{AttachSource, Deployer as _, Runner};
use testing_framework_runner_compose::{ComposeDeploymentMetadata, ComposeRunnerError};
use tokio::process::Command;
#[tokio::test]
#[ignore = "requires Docker and mutates compose runtime state"]
@ -25,7 +26,7 @@ async fn compose_attach_mode_restart_node_opt_in() -> Result<()> {
.project_name()
.ok_or_else(|| anyhow!("compose deployment metadata has no project name"))?
.to_owned();
let attach_source = AttachSource::compose(vec![]).with_project(project_name);
let attach_source = AttachSource::compose(vec![]).with_project(project_name.clone());
let attached = ScenarioBuilder::deployment_with(|d| d.with_node_count(1))
.enable_node_control()
@ -56,25 +57,94 @@ async fn compose_attach_mode_restart_node_opt_in() -> Result<()> {
}
for service in services {
let pre_restart_started_at = metadata
.service_started_at(&service)
.await
.map_err(|err| anyhow!("{err}"))?;
let pre_restart_started_at = service_started_at(&project_name, &service).await?;
control
.restart_node(&service)
.await
.map_err(|err| anyhow!("attached restart failed for {service}: {err}"))?;
metadata
.wait_until_service_restarted(
&service,
&pre_restart_started_at,
Duration::from_secs(30),
)
.await
.map_err(|err| anyhow!("{err}"))?;
wait_until_service_restarted(
&project_name,
&service,
&pre_restart_started_at,
Duration::from_secs(30),
)
.await?;
}
Ok(())
}
async fn service_started_at(project: &str, service: &str) -> Result<String> {
let container_id = run_docker(&[
"ps",
"--filter",
&format!("label=com.docker.compose.project={project}"),
"--filter",
&format!("label=com.docker.compose.service={service}"),
"--format",
"{{.ID}}",
])
.await?;
let container_id = container_id
.lines()
.next()
.map(str::trim)
.filter(|value| !value.is_empty())
.ok_or_else(|| anyhow!("no running container found for service '{service}'"))?;
let started_at =
run_docker(&["inspect", "--format", "{{.State.StartedAt}}", container_id]).await?;
let started_at = started_at.trim().to_owned();
if started_at.is_empty() {
return Err(anyhow!(
"docker inspect returned empty StartedAt for service '{service}'"
));
}
Ok(started_at)
}
async fn wait_until_service_restarted(
project: &str,
service: &str,
previous_started_at: &str,
timeout: Duration,
) -> Result<()> {
let deadline = std::time::Instant::now() + timeout;
loop {
let started_at = service_started_at(project, service).await?;
if started_at != previous_started_at {
return Ok(());
}
if std::time::Instant::now() >= deadline {
return Err(anyhow!(
"timed out waiting for restarted compose service '{service}'"
));
}
tokio::time::sleep(Duration::from_millis(500)).await;
}
}
async fn run_docker(args: &[&str]) -> Result<String> {
let output = Command::new("docker").args(args).output().await?;
if !output.status.success() {
return Err(anyhow!(
"docker {} failed with status {}: {}",
args.join(" "),
output.status,
String::from_utf8_lossy(&output.stderr).trim()
));
}
Ok(String::from_utf8_lossy(&output.stdout).to_string())
}

View File

@ -103,110 +103,6 @@ async fn scenario_builder_api_port_override() -> Result<()> {
Ok(())
}
#[tokio::test]
#[ignore = "run manually with `cargo test -p runner-examples -- --ignored scenario_builder_identify_hide_listen_addrs_override`"]
async fn scenario_builder_identify_hide_listen_addrs_override() -> Result<()> {
let _ = try_init();
// Required env vars (set on the command line when running this test):
// - `POL_PROOF_DEV_MODE=true`
// - `LOGOS_BLOCKCHAIN_NODE_BIN=...`
// - `LOGOS_BLOCKCHAIN_CIRCUITS=...`
// - `RUST_LOG=info` (optional)
let base_builder = DeploymentBuilder::new(TopologyConfig::with_node_numbers(1));
let base_descriptors = base_builder.clone().build()?;
let base_node = base_descriptors.nodes().first().expect("node 0 descriptor");
let mut run_config = build_node_run_config(
&base_descriptors,
base_node,
base_descriptors
.config()
.node_config_override(base_node.index()),
)
.expect("build run config");
run_config
.user
.network
.backend
.swarm
.identify
.hide_listen_addrs = Some(true);
let mut scenario = ScenarioBuilder::new(Box::new(
base_builder.with_node_config_override(0, run_config),
))
.with_run_duration(Duration::from_secs(1))
.build()?;
let deployer = LbcLocalDeployer::default();
let runner = deployer.deploy(&scenario).await?;
let handle = runner.run(&mut scenario).await?;
let client = handle
.context()
.random_node_client()
.ok_or_else(|| anyhow::anyhow!("scenario did not expose any node clients"))?;
client
.consensus_info()
.await
.expect("consensus_info should succeed");
Ok(())
}
#[tokio::test]
#[ignore = "run manually with `cargo test -p runner-examples -- --ignored scenario_builder_identify_hide_listen_addrs_two_nodes`"]
async fn scenario_builder_identify_hide_listen_addrs_two_nodes() -> Result<()> {
let _ = try_init();
// Required env vars (set on the command line when running this test):
// - `POL_PROOF_DEV_MODE=true`
// - `LOGOS_BLOCKCHAIN_NODE_BIN=...`
// - `LOGOS_BLOCKCHAIN_CIRCUITS=...`
// - `RUST_LOG=info` (optional)
let base_builder = DeploymentBuilder::new(TopologyConfig::with_node_numbers(2));
let base_descriptors = base_builder.clone().build()?;
let mut deployment_builder = base_builder;
for idx in 0..2 {
let node = &base_descriptors.nodes()[idx];
let mut run_config = build_node_run_config(
&base_descriptors,
node,
base_descriptors.config().node_config_override(node.index()),
)
.expect("build run config");
run_config
.user
.network
.backend
.swarm
.identify
.hide_listen_addrs = Some(true);
deployment_builder = deployment_builder.with_node_config_override(idx, run_config);
}
let mut scenario = ScenarioBuilder::new(Box::new(deployment_builder))
.with_run_duration(Duration::from_secs(3))
.build()?;
let deployer = LbcLocalDeployer::default();
let runner = deployer.deploy(&scenario).await?;
let handle = runner.run(&mut scenario).await?;
let client = handle
.context()
.random_node_client()
.ok_or_else(|| anyhow::anyhow!("scenario did not expose any node clients"))?;
client
.consensus_info()
.await
.expect("consensus_info should succeed");
Ok(())
}
fn random_api_port() -> u16 {
let listener = TcpListener::bind("127.0.0.1:0").expect("bind random API port");
listener.local_addr().expect("read API port").port()

View File

@ -5,14 +5,13 @@ pub mod ports;
pub mod readiness;
pub mod setup;
use std::{marker::PhantomData, time::Duration};
use std::marker::PhantomData;
use async_trait::async_trait;
use testing_framework_core::scenario::{
AttachSource, CleanupGuard, Deployer, DynError, FeedHandle, ObservabilityCapabilityProvider,
RequiresNodeControl, Runner, Scenario,
};
use tokio::time::sleep;
use crate::{env::ComposeDeployEnv, errors::ComposeRunnerError, lifecycle::cleanup::RunnerCleanup};
@ -49,58 +48,6 @@ impl ComposeDeploymentMetadata {
Ok(AttachSource::compose(services).with_project(project_name.to_owned()))
}
/// Returns the current StartedAt timestamp for a compose service container.
pub async fn service_started_at(&self, service: &str) -> Result<String, DynError> {
let Some(project_name) = self.project_name() else {
return Err("compose metadata has no project name".into());
};
let container_id =
crate::docker::attached::discover_service_container_id(project_name, service).await?;
let started_at = crate::docker::attached::run_docker_capture([
"inspect",
"--format",
"{{.State.StartedAt}}",
&container_id,
])
.await?;
let started_at = started_at.trim();
if started_at.is_empty() {
return Err(format!(
"docker inspect returned empty StartedAt for compose service '{service}'"
)
.into());
}
Ok(started_at.to_owned())
}
/// Waits until a service container reports a different StartedAt timestamp.
pub async fn wait_until_service_restarted(
&self,
service: &str,
previous_started_at: &str,
timeout: Duration,
) -> Result<(), DynError> {
let deadline = std::time::Instant::now() + timeout;
loop {
let started_at = self.service_started_at(service).await?;
if started_at != previous_started_at {
return Ok(());
}
if std::time::Instant::now() >= deadline {
return Err(
format!("timed out waiting for restarted compose service '{service}'").into(),
);
}
sleep(Duration::from_millis(500)).await;
}
}
}
impl<E: ComposeDeployEnv> Default for ComposeDeployer<E> {