From 6226f515989a7c64678209fc40de41e0f3d4f573 Mon Sep 17 00:00:00 2001 From: andrussal Date: Fri, 6 Mar 2026 13:25:11 +0100 Subject: [PATCH] feat(compose): expose deploy metadata for attach node-control tests --- .../tests/compose_attach_node_control.rs | 71 +++++-------------- .../deployers/compose/src/deployer/mod.rs | 26 +++++++ .../compose/src/deployer/orchestrator.rs | 42 ++++++++++- 3 files changed, 82 insertions(+), 57 deletions(-) diff --git a/logos/examples/tests/compose_attach_node_control.rs b/logos/examples/tests/compose_attach_node_control.rs index 4a85422..0248e5a 100644 --- a/logos/examples/tests/compose_attach_node_control.rs +++ b/logos/examples/tests/compose_attach_node_control.rs @@ -1,5 +1,4 @@ use std::{ - env, process::{Command, Stdio}, thread, time::Duration, @@ -8,20 +7,23 @@ use std::{ use anyhow::{Result, anyhow}; use lb_ext::{CoreBuilderExt as _, LbcComposeDeployer, LbcExtEnv, ScenarioBuilder}; use testing_framework_core::scenario::{AttachSource, Deployer as _, Runner}; +use testing_framework_runner_compose::ComposeRunnerError; #[tokio::test] +#[ignore = "requires Docker and mutates compose runtime state"] async fn compose_attach_mode_restart_node_opt_in() -> Result<()> { - if env::var("TF_RUN_COMPOSE_ATTACH_NODE_CONTROL").is_err() { - return Ok(()); - } - let managed = ScenarioBuilder::deployment_with(|d| d.with_node_count(1)) .enable_node_control() .with_run_duration(Duration::from_secs(5)) .build()?; let deployer = LbcComposeDeployer::default(); - let managed_runner: Runner = deployer.deploy(&managed).await?; + let (managed_runner, metadata): (Runner, _) = + match deployer.deploy_with_metadata(&managed).await { + Ok(result) => result, + Err(ComposeRunnerError::DockerUnavailable) => return Ok(()), + Err(error) => return Err(anyhow::Error::new(error)), + }; let managed_client = managed_runner .context() .node_clients() @@ -29,12 +31,14 @@ async fn compose_attach_mode_restart_node_opt_in() -> Result<()> { .into_iter() .next() .ok_or_else(|| anyhow!("managed compose runner returned no node clients"))?; - let api_port = managed_client + managed_client .base_url() .port() .ok_or_else(|| anyhow!("managed node base url has no port"))?; - let project_name = discover_compose_project_by_published_port(api_port)?; + let project_name = metadata + .project_name + .ok_or_else(|| anyhow!("compose metadata did not include project name"))?; let attached = ScenarioBuilder::deployment_with(|d| d.with_node_count(1)) .enable_node_control() @@ -44,7 +48,11 @@ async fn compose_attach_mode_restart_node_opt_in() -> Result<()> { ) .build()?; - let attached_runner: Runner = deployer.deploy(&attached).await?; + let attached_runner: Runner = match deployer.deploy(&attached).await { + Ok(runner) => runner, + Err(ComposeRunnerError::DockerUnavailable) => return Ok(()), + Err(error) => return Err(anyhow::Error::new(error)), + }; let pre_restart_container = discover_compose_service_container(&project_name, "node-0")?; let pre_restart_started_at = inspect_container_started_at(&pre_restart_container)?; let control = attached_runner @@ -65,51 +73,6 @@ async fn compose_attach_mode_restart_node_opt_in() -> Result<()> { Ok(()) } - -fn discover_compose_project_by_published_port(port: u16) -> Result { - let container_ids = run_docker_capture(["ps", "-q"])?; - let host_port_token = format!("\"HostPort\":\"{port}\""); - let mut matching_projects = Vec::new(); - - for container_id in container_ids - .lines() - .map(str::trim) - .filter(|id| !id.is_empty()) - { - let ports = run_docker_capture([ - "inspect", - "--format", - "{{json .NetworkSettings.Ports}}", - container_id, - ])?; - if !ports.contains(&host_port_token) { - continue; - } - - let project = run_docker_capture([ - "inspect", - "--format", - "{{ index .Config.Labels \"com.docker.compose.project\" }}", - container_id, - ])?; - let project = project.trim(); - if !project.is_empty() { - matching_projects.push(project.to_owned()); - } - } - - match matching_projects.as_slice() { - [project] => Ok(project.clone()), - [] => Err(anyhow!( - "no compose project found exposing api host port {port}" - )), - _ => Err(anyhow!( - "multiple compose projects expose api host port {port}: {:?}", - matching_projects - )), - } -} - fn discover_compose_service_container(project: &str, service: &str) -> Result { let container = run_docker_capture([ "ps", diff --git a/testing-framework/deployers/compose/src/deployer/mod.rs b/testing-framework/deployers/compose/src/deployer/mod.rs index 6e45367..4430a7d 100644 --- a/testing-framework/deployers/compose/src/deployer/mod.rs +++ b/testing-framework/deployers/compose/src/deployer/mod.rs @@ -22,6 +22,13 @@ pub struct ComposeDeployer { _env: PhantomData, } +/// Compose deployment metadata returned by compose-specific deployment APIs. +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct ComposeDeploymentMetadata { + /// Docker Compose project name used for this deployment when available. + pub project_name: Option, +} + impl Default for ComposeDeployer { fn default() -> Self { Self::new() @@ -42,6 +49,25 @@ impl ComposeDeployer { self.readiness_checks = enabled; self } + + /// Deploy and return compose-specific metadata alongside the generic + /// runner. + pub async fn deploy_with_metadata( + &self, + scenario: &Scenario, + ) -> Result<(Runner, ComposeDeploymentMetadata), ComposeRunnerError> + where + Caps: RequiresNodeControl + ObservabilityCapabilityProvider + Send + Sync, + { + let deployer = Self { + readiness_checks: self.readiness_checks, + _env: PhantomData, + }; + + orchestrator::DeploymentOrchestrator::new(deployer) + .deploy_with_metadata(scenario) + .await + } } #[async_trait] diff --git a/testing-framework/deployers/compose/src/deployer/orchestrator.rs b/testing-framework/deployers/compose/src/deployer/orchestrator.rs index 66cb9e6..7889f68 100644 --- a/testing-framework/deployers/compose/src/deployer/orchestrator.rs +++ b/testing-framework/deployers/compose/src/deployer/orchestrator.rs @@ -14,7 +14,7 @@ use testing_framework_core::{ use tracing::info; use super::{ - ComposeDeployer, + ComposeDeployer, ComposeDeploymentMetadata, attach_provider::ComposeAttachProvider, clients::ClientBuilder, make_cleanup_guard, @@ -48,6 +48,18 @@ impl DeploymentOrchestrator { &self, scenario: &Scenario, ) -> Result, ComposeRunnerError> + where + Caps: RequiresNodeControl + ObservabilityCapabilityProvider + Send + Sync, + { + self.deploy_with_metadata(scenario) + .await + .map(|(runner, _)| runner) + } + + pub async fn deploy_with_metadata( + &self, + scenario: &Scenario, + ) -> Result<(Runner, ComposeDeploymentMetadata), ComposeRunnerError> where Caps: RequiresNodeControl + ObservabilityCapabilityProvider + Send + Sync, { @@ -61,7 +73,8 @@ impl DeploymentOrchestrator { if scenario.sources().is_attached() { return self .deploy_attached_only::(scenario, source_plan) - .await; + .await + .map(|runner| (runner, attached_metadata(scenario))); } let deployment = scenario.deployment(); @@ -103,6 +116,8 @@ impl DeploymentOrchestrator { .await .map_err(|source| ComposeRunnerError::SourceOrchestration { source })?; + let project_name = prepared.environment.project_name().to_owned(); + let runner = self .build_runner::( scenario, @@ -121,7 +136,12 @@ impl DeploymentOrchestrator { readiness_enabled, ); - Ok(runner) + Ok(( + runner, + ComposeDeploymentMetadata { + project_name: Some(project_name), + }, + )) } async fn deploy_attached_only( @@ -304,6 +324,22 @@ impl DeploymentOrchestrator { } } +fn attached_metadata(scenario: &Scenario) -> ComposeDeploymentMetadata +where + E: ComposeDeployEnv, + Caps: Send + Sync, +{ + let project_name = match scenario.sources() { + ScenarioSources::Attached { + attach: AttachSource::Compose { project, .. }, + .. + } => project.clone(), + _ => None, + }; + + ComposeDeploymentMetadata { project_name } +} + struct DeployedNodes { host_ports: HostPortMapping, host: String,