feat(k8s): Attached mode support

This commit is contained in:
Andrus Salumets 2026-03-08 17:27:29 +07:00 committed by GitHub
commit 93161113db
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
8 changed files with 631 additions and 16 deletions

View File

@ -0,0 +1,55 @@
use std::time::Duration;
use anyhow::{Error, Result, anyhow};
use lb_ext::{CoreBuilderExt as _, LbcExtEnv, LbcK8sDeployer, ScenarioBuilder};
use testing_framework_core::scenario::{Deployer as _, Runner};
use testing_framework_runner_k8s::{K8sDeploymentMetadata, K8sRunnerError};
#[tokio::test]
#[ignore = "requires k8s cluster access and mutates k8s runtime state"]
async fn k8s_attach_mode_queries_node_api_opt_in() -> Result<()> {
let managed = ScenarioBuilder::deployment_with(|d| d.with_node_count(1))
.with_run_duration(Duration::from_secs(5))
.build()?;
let managed_deployer = LbcK8sDeployer::default();
let (_managed_runner, metadata): (Runner<LbcExtEnv>, K8sDeploymentMetadata) =
match managed_deployer.deploy_with_metadata(&managed).await {
Ok(result) => result,
Err(K8sRunnerError::ClientInit { .. }) => return Ok(()),
Err(error) => return Err(Error::new(error)),
};
let attach_source = metadata.attach_source().map_err(|err| anyhow!("{err}"))?;
let attached = ScenarioBuilder::deployment_with(|d| d.with_node_count(1))
.with_run_duration(Duration::from_secs(5))
.with_attach_source(attach_source)
.build()?;
let attached_deployer = LbcK8sDeployer::default();
let attached_runner: Runner<LbcExtEnv> = match attached_deployer.deploy(&attached).await {
Ok(runner) => runner,
Err(K8sRunnerError::ClientInit { .. }) => return Ok(()),
Err(error) => return Err(Error::new(error)),
};
attached_runner
.wait_network_ready()
.await
.map_err(|err| anyhow!("k8s attached runner readiness failed: {err}"))?;
if attached_runner.context().node_clients().is_empty() {
return Err(anyhow!("k8s attach resolved no node clients"));
}
for node_client in attached_runner.context().node_clients().snapshot() {
node_client.consensus_info().await.map_err(|err| {
anyhow!(
"attached node api query failed at {}: {err}",
node_client.base_url()
)
})?;
}
Ok(())
}

View File

@ -31,7 +31,7 @@ use crate::{
const CFGSYNC_K8S_TIMEOUT_SECS: u64 = 300;
const K8S_FULLNAME_OVERRIDE: &str = "logos-runner";
const DEFAULT_K8S_TESTNET_IMAGE: &str = "public.ecr.aws/r4s5t9y4/logos/logos-blockchain:test";
const DEFAULT_K8S_TESTNET_IMAGE: &str = "logos-blockchain-testing:local";
/// Paths and image metadata required to deploy the Helm chart.
pub struct K8sAssets {

View File

@ -0,0 +1,333 @@
use std::marker::PhantomData;
use async_trait::async_trait;
use k8s_openapi::api::core::v1::Service;
use kube::{
Api, Client,
api::{ListParams, ObjectList},
};
use testing_framework_core::scenario::{
AttachProvider, AttachProviderError, AttachSource, AttachedNode, ClusterWaitHandle, DynError,
ExternalNodeSource, HttpReadinessRequirement, wait_http_readiness,
};
use url::Url;
use crate::{env::K8sDeployEnv, host::node_host};
#[derive(Debug, thiserror::Error)]
enum K8sAttachDiscoveryError {
#[error("k8s attach source requires a non-empty label selector")]
EmptyLabelSelector,
#[error("no services matched label selector '{selector}' in namespace '{namespace}'")]
NoMatchingServices { namespace: String, selector: String },
#[error("k8s service has no metadata.name")]
MissingServiceName,
#[error("service '{service}' has no TCP node ports exposed")]
ServiceHasNoNodePorts { service: String },
#[error(
"service '{service}' has multiple candidate API node ports ({ports}); explicit API port required"
)]
ServiceHasMultipleNodePorts { service: String, ports: String },
}
pub(super) struct K8sAttachProvider<E: K8sDeployEnv> {
client: Client,
_env: PhantomData<E>,
}
pub(super) struct K8sAttachedClusterWait<E: K8sDeployEnv> {
client: Client,
source: AttachSource,
_env: PhantomData<E>,
}
struct K8sAttachRequest<'a> {
namespace: &'a str,
label_selector: &'a str,
}
impl<E: K8sDeployEnv> K8sAttachProvider<E> {
pub(super) fn new(client: Client) -> Self {
Self {
client,
_env: PhantomData,
}
}
}
impl<E: K8sDeployEnv> K8sAttachedClusterWait<E> {
pub(super) fn new(client: Client, source: AttachSource) -> Self {
Self {
client,
source,
_env: PhantomData,
}
}
}
#[async_trait]
impl<E: K8sDeployEnv> AttachProvider<E> for K8sAttachProvider<E> {
async fn discover(
&self,
source: &AttachSource,
) -> Result<Vec<AttachedNode<E>>, AttachProviderError> {
let request = k8s_attach_request(source)?;
let services = discover_services(&self.client, request.namespace, request.label_selector)
.await
.map_err(to_discovery_error)?;
let host = node_host();
let mut attached = Vec::with_capacity(services.items.len());
for service in services.items {
attached.push(build_attached_node::<E>(&host, service).map_err(to_discovery_error)?);
}
Ok(attached)
}
}
fn to_discovery_error(source: DynError) -> AttachProviderError {
AttachProviderError::Discovery { source }
}
fn k8s_attach_request(source: &AttachSource) -> Result<K8sAttachRequest<'_>, AttachProviderError> {
let AttachSource::K8s {
namespace,
label_selector,
} = source
else {
return Err(AttachProviderError::UnsupportedSource {
attach_source: source.clone(),
});
};
if label_selector.trim().is_empty() {
return Err(AttachProviderError::Discovery {
source: K8sAttachDiscoveryError::EmptyLabelSelector.into(),
});
}
Ok(K8sAttachRequest {
namespace: namespace.as_deref().unwrap_or("default"),
label_selector,
})
}
fn build_attached_node<E: K8sDeployEnv>(
host: &str,
service: Service,
) -> Result<AttachedNode<E>, DynError> {
let service_name = service
.metadata
.name
.clone()
.ok_or(K8sAttachDiscoveryError::MissingServiceName)?;
let api_port = extract_api_node_port(&service)?;
let endpoint = format!("http://{host}:{api_port}/");
let source = ExternalNodeSource::new(service_name.clone(), endpoint);
let client = E::external_node_client(&source)?;
Ok(AttachedNode {
identity_hint: Some(service_name),
client,
})
}
pub(super) async fn discover_services(
client: &Client,
namespace: &str,
selector: &str,
) -> Result<ObjectList<Service>, DynError> {
let services: Api<Service> = Api::namespaced(client.clone(), namespace);
let params = ListParams::default().labels(selector);
let services = services.list(&params).await?;
let services = filter_services_with_tcp_node_ports(services);
if services.items.is_empty() {
return Err(K8sAttachDiscoveryError::NoMatchingServices {
namespace: namespace.to_owned(),
selector: selector.to_owned(),
}
.into());
}
Ok(services)
}
fn filter_services_with_tcp_node_ports(services: ObjectList<Service>) -> ObjectList<Service> {
ObjectList {
items: services
.items
.into_iter()
.filter(|service| !tcp_node_ports(service).is_empty())
.collect(),
metadata: services.metadata,
}
}
fn tcp_node_ports(service: &Service) -> Vec<(String, u16)> {
service
.spec
.as_ref()
.into_iter()
.flat_map(|spec| spec.ports.as_ref())
.flat_map(|ports| ports.iter())
.filter_map(|port| {
let node_port = port.node_port.and_then(|value| u16::try_from(value).ok())?;
let protocol = port.protocol.as_deref().unwrap_or("TCP");
if protocol != "TCP" {
return None;
}
Some((port.name.clone().unwrap_or_default(), node_port))
})
.collect()
}
pub(super) fn extract_api_node_port(service: &Service) -> Result<u16, DynError> {
let service_name = service
.metadata
.name
.clone()
.unwrap_or_else(|| "<unknown>".to_owned());
let ports = api_port_candidates(tcp_node_ports(service));
match ports.as_slice() {
[] => Err(K8sAttachDiscoveryError::ServiceHasNoNodePorts {
service: service_name,
}
.into()),
[port] => Ok(*port),
_ => Err(K8sAttachDiscoveryError::ServiceHasMultipleNodePorts {
service: service_name,
ports: ports
.iter()
.map(ToString::to_string)
.collect::<Vec<_>>()
.join(", "),
}
.into()),
}
}
fn api_port_candidates(ports: Vec<(String, u16)>) -> Vec<u16> {
let explicit_api: Vec<u16> = ports
.iter()
.filter_map(|(name, port)| (name == "http" || name == "api").then_some(*port))
.collect();
if !explicit_api.is_empty() {
return explicit_api;
}
let non_testing: Vec<u16> = ports
.iter()
.filter_map(|(name, port)| (!name.contains("testing")).then_some(*port))
.collect();
if !non_testing.is_empty() {
return non_testing;
}
ports.into_iter().map(|(_, port)| port).collect()
}
#[async_trait]
impl<E: K8sDeployEnv> ClusterWaitHandle<E> for K8sAttachedClusterWait<E> {
async fn wait_network_ready(&self) -> Result<(), DynError> {
let request = k8s_wait_request(&self.source)?;
let services =
discover_services(&self.client, request.namespace, request.label_selector).await?;
let host = node_host();
let endpoints = collect_readiness_endpoints::<E>(&host, &services.items)?;
wait_http_readiness(&endpoints, HttpReadinessRequirement::AllNodesReady).await?;
Ok(())
}
}
fn k8s_wait_request(source: &AttachSource) -> Result<K8sAttachRequest<'_>, DynError> {
let AttachSource::K8s {
namespace,
label_selector,
} = source
else {
return Err("k8s cluster wait requires a k8s attach source".into());
};
if label_selector.trim().is_empty() {
return Err(K8sAttachDiscoveryError::EmptyLabelSelector.into());
}
Ok(K8sAttachRequest {
namespace: namespace.as_deref().unwrap_or("default"),
label_selector,
})
}
fn collect_readiness_endpoints<E: K8sDeployEnv>(
host: &str,
services: &[Service],
) -> Result<Vec<Url>, DynError> {
let mut endpoints = Vec::with_capacity(services.len());
for service in services {
let api_port = extract_api_node_port(service)?;
let mut endpoint = Url::parse(&format!("http://{host}:{api_port}/"))?;
endpoint.set_path(E::readiness_path());
endpoints.push(endpoint);
}
Ok(endpoints)
}
#[cfg(test)]
mod tests {
use k8s_openapi::api::core::v1::{Service, ServicePort, ServiceSpec};
use super::extract_api_node_port;
#[test]
fn extract_api_node_port_returns_single_port() {
let service = Service {
metadata: Default::default(),
spec: Some(ServiceSpec {
ports: Some(vec![ServicePort {
node_port: Some(31234),
..Default::default()
}]),
..Default::default()
}),
..Default::default()
};
let port = extract_api_node_port(&service).expect("single port should resolve");
assert_eq!(port, 31234);
}
#[test]
fn extract_api_node_port_prefers_http_name() {
let service = Service {
metadata: Default::default(),
spec: Some(ServiceSpec {
ports: Some(vec![
ServicePort {
name: Some("testing-http".to_owned()),
node_port: Some(31234),
..Default::default()
},
ServicePort {
name: Some("http".to_owned()),
node_port: Some(31235),
..Default::default()
},
]),
..Default::default()
}),
..Default::default()
};
let port = extract_api_node_port(&service).expect("http-named port should resolve");
assert_eq!(port, 31235);
}
}

View File

@ -1,3 +1,46 @@
mod attach_provider;
mod orchestrator;
pub use orchestrator::{K8sDeployer, K8sRunnerError};
use testing_framework_core::scenario::{AttachSource, DynError};
/// Kubernetes deployment metadata returned by k8s-specific deployment APIs.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct K8sDeploymentMetadata {
/// Namespace used for this deployment when available.
pub namespace: Option<String>,
/// Attach selector used to discover node services.
pub label_selector: Option<String>,
}
#[derive(Debug, thiserror::Error)]
enum K8sMetadataError {
#[error("k8s deployment metadata has no namespace")]
MissingNamespace,
#[error("k8s deployment metadata has no label selector")]
MissingLabelSelector,
}
impl K8sDeploymentMetadata {
/// Returns namespace when deployment is bound to a specific namespace.
#[must_use]
pub fn namespace(&self) -> Option<&str> {
self.namespace.as_deref()
}
/// Returns attach label selector when available.
#[must_use]
pub fn label_selector(&self) -> Option<&str> {
self.label_selector.as_deref()
}
/// Builds an attach source for the same k8s deployment scope.
pub fn attach_source(&self) -> Result<AttachSource, DynError> {
let namespace = self.namespace().ok_or(K8sMetadataError::MissingNamespace)?;
let label_selector = self
.label_selector()
.ok_or(K8sMetadataError::MissingLabelSelector)?;
Ok(AttachSource::k8s(label_selector.to_owned()).with_namespace(namespace.to_owned()))
}
}

View File

@ -1,20 +1,26 @@
use std::{env, fmt::Debug, marker::PhantomData, time::Duration};
use std::{env, fmt::Debug, marker::PhantomData, sync::Arc, time::Duration};
use async_trait::async_trait;
use kube::Client;
use reqwest::Url;
use testing_framework_core::{
scenario::{
Application, CleanupGuard, Deployer, DynError, FeedHandle, FeedRuntime,
HttpReadinessRequirement, Metrics, MetricsError, NodeClients,
ObservabilityCapabilityProvider, ObservabilityInputs, RequiresNodeControl, RunContext,
Runner, Scenario, build_source_orchestration_plan, orchestrate_sources,
Application, ApplicationExternalProvider, AttachSource, CleanupGuard, ClusterWaitHandle,
Deployer, DynError, FeedHandle, FeedRuntime, HttpReadinessRequirement, Metrics,
MetricsError, NodeClients, ObservabilityCapabilityProvider, ObservabilityInputs,
RequiresNodeControl, RunContext, Runner, Scenario, ScenarioSources,
SourceOrchestrationPlan, SourceProviders, StaticManagedProvider,
build_source_orchestration_plan, orchestrate_sources_with_providers,
},
topology::DeploymentDescriptor,
};
use tracing::{error, info};
use crate::{
deployer::{
K8sDeploymentMetadata,
attach_provider::{K8sAttachProvider, K8sAttachedClusterWait},
},
env::K8sDeployEnv,
infrastructure::cluster::{
ClusterEnvironment, ClusterEnvironmentError, NodeClientError, PortSpecs,
@ -56,6 +62,17 @@ impl<E: K8sDeployEnv> K8sDeployer<E> {
self.readiness_checks = enabled;
self
}
/// Deploy and return k8s-specific metadata alongside the generic runner.
pub async fn deploy_with_metadata<Caps>(
&self,
scenario: &Scenario<E, Caps>,
) -> Result<(Runner<E>, K8sDeploymentMetadata), K8sRunnerError>
where
Caps: RequiresNodeControl + ObservabilityCapabilityProvider + Send + Sync,
{
deploy_with_observability(self, scenario).await
}
}
#[derive(Debug, thiserror::Error)]
@ -115,7 +132,9 @@ where
type Error = K8sRunnerError;
async fn deploy(&self, scenario: &Scenario<E, Caps>) -> Result<Runner<E>, Self::Error> {
deploy_with_observability(self, scenario).await
self.deploy_with_metadata(scenario)
.await
.map(|(runner, _)| runner)
}
}
@ -147,10 +166,10 @@ fn ensure_supported_topology<E: K8sDeployEnv>(
async fn deploy_with_observability<E, Caps>(
deployer: &K8sDeployer<E>,
scenario: &Scenario<E, Caps>,
) -> Result<Runner<E>, K8sRunnerError>
) -> Result<(Runner<E>, K8sDeploymentMetadata), K8sRunnerError>
where
E: K8sDeployEnv,
Caps: ObservabilityCapabilityProvider,
Caps: ObservabilityCapabilityProvider + Send + Sync,
{
// Source planning is currently resolved here before deployer-specific setup.
let source_plan = build_source_orchestration_plan(scenario).map_err(|source| {
@ -160,21 +179,170 @@ where
})?;
let observability = resolve_observability_inputs(scenario.capabilities())?;
if scenario.sources().is_attached() {
let runner = deploy_attached_only::<E, Caps>(scenario, source_plan, observability).await?;
return Ok((runner, attached_metadata(scenario)));
}
let deployment = build_k8s_deployment::<E, Caps>(deployer, scenario, &observability).await?;
let metadata = K8sDeploymentMetadata {
namespace: Some(deployment.cluster.namespace().to_owned()),
label_selector: Some(E::attach_node_service_selector(
deployment.cluster.release(),
)),
};
let mut cluster = Some(deployment.cluster);
let mut runtime = build_runtime_artifacts::<E>(&mut cluster, &observability).await?;
let cluster_wait = managed_cluster_wait::<E>(&cluster, &metadata)?;
// Source orchestration currently runs here after managed clients are prepared.
runtime.node_clients = orchestrate_sources(&source_plan, runtime.node_clients)
.await
.map_err(|source| K8sRunnerError::SourceOrchestration { source })?;
let source_providers = source_providers::<E>(
client_from_cluster(&cluster)?,
runtime.node_clients.snapshot(),
);
let parts = build_runner_parts(scenario, deployment.node_count, runtime);
runtime.node_clients = resolve_node_clients(&source_plan, source_providers).await?;
ensure_non_empty_node_clients(&runtime.node_clients)?;
let parts = build_runner_parts(scenario, deployment.node_count, runtime, cluster_wait);
log_configured_observability(&observability);
maybe_print_endpoints::<E>(&observability, &parts.node_clients);
finalize_runner::<E>(&mut cluster, parts)
let runner = finalize_runner::<E>(&mut cluster, parts)?;
Ok((runner, metadata))
}
async fn deploy_attached_only<E, Caps>(
scenario: &Scenario<E, Caps>,
source_plan: SourceOrchestrationPlan,
observability: ObservabilityInputs,
) -> Result<Runner<E>, K8sRunnerError>
where
E: K8sDeployEnv,
Caps: ObservabilityCapabilityProvider + Send + Sync,
{
let client = init_kube_client().await?;
let source_providers = source_providers::<E>(client.clone(), Vec::new());
let node_clients = resolve_node_clients(&source_plan, source_providers).await?;
ensure_non_empty_node_clients(&node_clients)?;
let telemetry = observability.telemetry_handle()?;
let (feed, feed_task) = spawn_block_feed_with::<E>(&node_clients).await?;
let cluster_wait = attached_cluster_wait::<E, Caps>(scenario, client)?;
let context = RunContext::new(
scenario.deployment().clone(),
node_clients,
scenario.duration(),
scenario.expectation_cooldown(),
telemetry,
feed,
None,
)
.with_cluster_wait(cluster_wait);
Ok(Runner::new(context, Some(Box::new(feed_task))))
}
fn attached_metadata<E, Caps>(scenario: &Scenario<E, Caps>) -> K8sDeploymentMetadata
where
E: K8sDeployEnv,
Caps: Send + Sync,
{
match scenario.sources() {
ScenarioSources::Attached {
attach:
AttachSource::K8s {
namespace,
label_selector,
},
..
} => K8sDeploymentMetadata {
namespace: namespace.clone(),
label_selector: Some(label_selector.clone()),
},
_ => K8sDeploymentMetadata {
namespace: None,
label_selector: None,
},
}
}
fn attached_cluster_wait<E, Caps>(
scenario: &Scenario<E, Caps>,
client: Client,
) -> Result<Arc<dyn ClusterWaitHandle<E>>, K8sRunnerError>
where
E: K8sDeployEnv,
Caps: Send + Sync,
{
let ScenarioSources::Attached { attach, .. } = scenario.sources() else {
return Err(K8sRunnerError::InternalInvariant {
message: "k8s attached cluster wait requested outside attached source mode".to_owned(),
});
};
Ok(Arc::new(K8sAttachedClusterWait::<E>::new(
client,
attach.clone(),
)))
}
fn managed_cluster_wait<E: K8sDeployEnv>(
cluster: &Option<ClusterEnvironment>,
metadata: &K8sDeploymentMetadata,
) -> Result<Arc<dyn ClusterWaitHandle<E>>, K8sRunnerError> {
let client = client_from_cluster(cluster)?;
let attach_source = metadata
.attach_source()
.map_err(|source| K8sRunnerError::SourceOrchestration { source })?;
Ok(Arc::new(K8sAttachedClusterWait::<E>::new(
client,
attach_source,
)))
}
fn client_from_cluster(cluster: &Option<ClusterEnvironment>) -> Result<Client, K8sRunnerError> {
let client = cluster
.as_ref()
.ok_or_else(|| K8sRunnerError::InternalInvariant {
message: "cluster must exist while resolving source providers".to_owned(),
})?
.client()
.clone();
Ok(client)
}
fn source_providers<E: K8sDeployEnv>(
client: Client,
managed_clients: Vec<E::NodeClient>,
) -> SourceProviders<E> {
SourceProviders::default()
.with_managed(Arc::new(StaticManagedProvider::new(managed_clients)))
.with_attach(Arc::new(K8sAttachProvider::<E>::new(client)))
.with_external(Arc::new(ApplicationExternalProvider))
}
async fn resolve_node_clients<E: K8sDeployEnv>(
source_plan: &SourceOrchestrationPlan,
source_providers: SourceProviders<E>,
) -> Result<NodeClients<E>, K8sRunnerError> {
orchestrate_sources_with_providers(source_plan, source_providers)
.await
.map_err(|source| K8sRunnerError::SourceOrchestration { source })
}
fn ensure_non_empty_node_clients<E: K8sDeployEnv>(
node_clients: &NodeClients<E>,
) -> Result<(), K8sRunnerError> {
if node_clients.is_empty() {
return Err(K8sRunnerError::RuntimePreflight);
}
Ok(())
}
struct BuiltK8sDeployment {
@ -346,6 +514,7 @@ fn build_runner_parts<E: K8sDeployEnv, Caps>(
scenario: &Scenario<E, Caps>,
node_count: usize,
runtime: RuntimeArtifacts<E>,
cluster_wait: Arc<dyn ClusterWaitHandle<E>>,
) -> K8sRunnerParts<E> {
K8sRunnerParts {
descriptors: scenario.deployment().clone(),
@ -356,6 +525,7 @@ fn build_runner_parts<E: K8sDeployEnv, Caps>(
feed: runtime.feed,
feed_task: runtime.feed_task,
node_count,
cluster_wait,
}
}
@ -451,6 +621,7 @@ struct K8sRunnerParts<E: K8sDeployEnv> {
feed: Feed<E>,
feed_task: FeedHandle,
node_count: usize,
cluster_wait: Arc<dyn ClusterWaitHandle<E>>,
}
fn finalize_runner<E: K8sDeployEnv>(
@ -469,6 +640,7 @@ fn finalize_runner<E: K8sDeployEnv>(
feed,
feed_task,
node_count,
cluster_wait,
} = parts;
let duration_secs = duration.as_secs();
@ -481,6 +653,7 @@ fn finalize_runner<E: K8sDeployEnv>(
expectation_cooldown,
telemetry,
feed,
cluster_wait,
);
info!(
@ -508,6 +681,7 @@ fn build_k8s_run_context<E: K8sDeployEnv>(
expectation_cooldown: Duration,
telemetry: Metrics,
feed: Feed<E>,
cluster_wait: Arc<dyn ClusterWaitHandle<E>>,
) -> RunContext<E> {
RunContext::new(
descriptors,
@ -518,6 +692,7 @@ fn build_k8s_run_context<E: K8sDeployEnv>(
feed,
None,
)
.with_cluster_wait(cluster_wait)
}
fn endpoint_or_disabled(endpoint: Option<&Url>) -> String {

View File

@ -106,6 +106,11 @@ pub trait K8sDeployEnv: Application {
format!("{release}-node-{index}")
}
/// Label selector used to discover managed node services in attached mode.
fn attach_node_service_selector(release: &str) -> String {
format!("app.kubernetes.io/instance={release}")
}
/// Wait for HTTP readiness on provided ports for a given host.
async fn wait_for_node_http(
ports: &[u16],

View File

@ -94,6 +94,10 @@ impl ClusterEnvironment {
&self.release
}
pub fn client(&self) -> &Client {
&self.client
}
pub fn node_ports(&self) -> (&[u16], &[u16]) {
(&self.node_api_ports, &self.node_testing_ports)
}

View File

@ -7,7 +7,7 @@ pub mod wait {
pub use crate::lifecycle::wait::*;
}
pub use deployer::{K8sDeployer, K8sRunnerError};
pub use deployer::{K8sDeployer, K8sDeploymentMetadata, K8sRunnerError};
pub use env::K8sDeployEnv;
pub use infrastructure::cluster::PortSpecs;
pub use lifecycle::cleanup::RunnerCleanup;