Merge approved framework coherence stack into dev

This commit is contained in:
andrussal 2026-03-25 07:27:56 +01:00
commit 481758e0b9
41 changed files with 1205 additions and 867 deletions

View File

@ -7,7 +7,7 @@ use crate::SnippetResult;
pub fn random_restart_plan() -> SnippetResult<Scenario<NodeControlCapability>> {
ScenarioBuilder::topology_with(|t| t.network_star().nodes(2))
.enable_node_control()
.with_node_control()
.with_workload(RandomRestartWorkload::new(
Duration::from_secs(45), // min delay
Duration::from_secs(75), // max delay

View File

@ -8,7 +8,7 @@ use crate::SnippetResult;
pub fn chaos_plan()
-> SnippetResult<testing_framework_core::scenario::Scenario<NodeControlCapability>> {
ScenarioBuilder::topology_with(|t| t.network_star().nodes(3))
.enable_node_control() // Enable node control capability
.with_node_control() // Enable node control capability
.chaos_with(|c| {
c.restart() // Random restart chaos
.min_delay(Duration::from_secs(30)) // Min time between restarts

View File

@ -7,7 +7,7 @@ use testing_framework_workflows::{ChaosBuilderExt, ScenarioBuilderExt};
pub async fn aggressive_chaos_test() -> Result<()> {
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(4))
.enable_node_control()
.with_node_control()
.wallets(50)
.transactions_with(|txs| txs.rate(10).users(20))
.chaos_with(|c| {

View File

@ -7,7 +7,7 @@ use testing_framework_workflows::{ChaosBuilderExt, ScenarioBuilderExt};
pub async fn chaos_resilience() -> Result<()> {
let mut plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(4))
.enable_node_control()
.with_node_control()
.wallets(20)
.transactions_with(|txs| txs.rate(3).users(10))
.chaos_with(|c| {

View File

@ -16,7 +16,7 @@ pub fn determinism_first() -> SnippetResult<()> {
// Separate: chaos test (introduces randomness)
let _chaos_plan = ScenarioBuilder::topology_with(|t| t.network_star().nodes(3))
.enable_node_control()
.with_node_control()
.chaos_with(|c| {
c.restart()
.min_delay(Duration::from_secs(30))

View File

@ -41,7 +41,7 @@ async fn run_compose_case(nodes: usize, run_duration: Duration) -> Result<()> {
t.with_network_layout(Libp2pNetworkLayout::Star)
.with_node_count(nodes)
})
.enable_node_control()
.with_node_control()
.with_run_duration(run_duration)
.with_deployment_seed(seed)
.initialize_wallet(

View File

@ -37,7 +37,7 @@ async fn run_k8s_case(nodes: usize, run_duration: Duration) -> Result<()> {
t.with_network_layout(Libp2pNetworkLayout::Star)
.with_node_count(nodes)
})
.enable_observability()
.with_observability()
.with_run_duration(run_duration)
.with_deployment_seed(seed)
.initialize_wallet(

View File

@ -20,10 +20,10 @@ async fn compose_attach_mode_queries_node_api_opt_in() -> Result<()> {
Err(error) => return Err(Error::new(error)),
};
let attach_source = metadata.attach_source().map_err(|err| anyhow!("{err}"))?;
let attached = ScenarioBuilder::deployment_with(|d| d.with_node_count(1))
.with_run_duration(Duration::from_secs(5))
.with_attach_source(attach_source)
.with_existing_cluster_from(&metadata)
.map_err(|err| anyhow!("{err}"))?
.build()?;
let attached_deployer = LbcComposeDeployer::default();

View File

@ -115,7 +115,7 @@ async fn dynamic_join_reaches_consensus_liveness() -> Result<()> {
t.with_network_layout(Libp2pNetworkLayout::Star)
.with_node_count(2)
})
.enable_node_control()
.with_node_control()
.with_workload(JoinNodeWorkload::new("joiner"))
.with_expectation(lb_framework::workloads::ConsensusLiveness::<LbcEnv>::default())
.with_run_duration(Duration::from_secs(60))
@ -135,7 +135,7 @@ async fn dynamic_join_with_peers_reaches_consensus_liveness() -> Result<()> {
t.with_network_layout(Libp2pNetworkLayout::Star)
.with_node_count(2)
})
.enable_node_control()
.with_node_control()
.with_workload(JoinNodeWithPeersWorkload::new(
"joiner",
vec!["node-0".to_string()],

View File

@ -145,8 +145,7 @@ async fn scenario_managed_plus_external_sources_are_orchestrated() -> Result<()>
let mut scenario = ScenarioBuilder::new(Box::new(deployment_builder))
.with_run_duration(Duration::from_secs(5))
.with_external_node(seed_cluster.external_sources()[0].clone())
.with_external_node(seed_cluster.external_sources()[1].clone())
.with_external_nodes(seed_cluster.external_sources().to_vec())
.build()?;
let deployer = ProcessDeployer::<LbcExtEnv>::default();

View File

@ -20,10 +20,10 @@ async fn k8s_attach_mode_queries_node_api_opt_in() -> Result<()> {
Err(error) => return Err(Error::new(error)),
};
let attach_source = metadata.attach_source().map_err(|err| anyhow!("{err}"))?;
let attached = ScenarioBuilder::deployment_with(|d| d.with_node_count(1))
.with_run_duration(Duration::from_secs(5))
.with_attach_source(attach_source)
.with_existing_cluster_from(&metadata)
.map_err(|err| anyhow!("{err}"))?
.build()?;
let attached_deployer = LbcK8sDeployer::default();

View File

@ -12,7 +12,7 @@ use tracing_subscriber::fmt::try_init;
async fn local_restart_node() -> Result<()> {
let _ = try_init();
let mut scenario = ScenarioBuilder::deployment_with(|t| t.with_node_count(1))
.enable_node_control()
.with_node_control()
.with_run_duration(Duration::from_secs(1))
.build()?;

View File

@ -44,7 +44,7 @@ impl Application for LbcExtEnv {
type FeedRuntime = <LbcEnv as Application>::FeedRuntime;
fn external_node_client(source: &ExternalNodeSource) -> Result<Self::NodeClient, DynError> {
let base_url = Url::parse(&source.endpoint)?;
let base_url = Url::parse(source.endpoint())?;
Ok(NodeHttpClient::from_urls(base_url, None))
}

View File

@ -63,7 +63,7 @@ impl CoreBuilderExt for ScenarioBuilder {
impl CoreBuilderExt for NodeControlScenarioBuilder<LbcExtEnv> {
fn deployment_with(f: impl FnOnce(DeploymentBuilder) -> DeploymentBuilder) -> Self {
ScenarioBuilder::deployment_with(f).enable_node_control()
ScenarioBuilder::deployment_with(f).with_node_control()
}
fn with_wallet_config(self, wallet: WalletConfig) -> Self {
@ -82,7 +82,7 @@ impl CoreBuilderExt for NodeControlScenarioBuilder<LbcExtEnv> {
impl CoreBuilderExt for ObservabilityScenarioBuilder<LbcExtEnv> {
fn deployment_with(f: impl FnOnce(DeploymentBuilder) -> DeploymentBuilder) -> Self {
ScenarioBuilder::deployment_with(f).enable_observability()
ScenarioBuilder::deployment_with(f).with_observability()
}
fn with_wallet_config(self, wallet: WalletConfig) -> Self {

View File

@ -1,15 +1,11 @@
use async_trait::async_trait;
use crate::scenario::{Application, DynError, NodeControlHandle, StartNodeOptions, StartedNode};
use crate::scenario::{Application, ClusterControlProfile, ClusterWaitHandle, NodeControlHandle};
/// Interface for imperative, deployer-backed manual clusters.
#[async_trait]
pub trait ManualClusterHandle<E: Application>: NodeControlHandle<E> {
async fn start_node_with(
&self,
name: &str,
options: StartNodeOptions<E>,
) -> Result<StartedNode<E>, DynError>;
async fn wait_network_ready(&self) -> Result<(), DynError>;
pub trait ManualClusterHandle<E: Application>: NodeControlHandle<E> + ClusterWaitHandle<E> {
fn cluster_control_profile(&self) -> ClusterControlProfile {
ClusterControlProfile::ManualControlled
}
}

View File

@ -95,7 +95,7 @@ impl<E: Application> ObservabilityBuilderExt for ScenarioBuilder<E> {
type Env = E;
fn with_metrics_query_url(self, url: Url) -> ObservabilityScenarioBuilder<E> {
self.with_observability(single_url_observability(Some(url), None, None))
self.with_observability_capability(single_url_observability(Some(url), None, None))
}
fn with_metrics_query_url_str(self, url: &str) -> ObservabilityScenarioBuilder<E> {
@ -112,7 +112,7 @@ impl<E: Application> ObservabilityBuilderExt for ScenarioBuilder<E> {
}
fn with_metrics_otlp_ingest_url(self, url: Url) -> ObservabilityScenarioBuilder<E> {
self.with_observability(single_url_observability(None, Some(url), None))
self.with_observability_capability(single_url_observability(None, Some(url), None))
}
fn with_metrics_otlp_ingest_url_str(self, url: &str) -> ObservabilityScenarioBuilder<E> {
@ -129,7 +129,7 @@ impl<E: Application> ObservabilityBuilderExt for ScenarioBuilder<E> {
}
fn with_grafana_url(self, url: Url) -> ObservabilityScenarioBuilder<E> {
self.with_observability(single_url_observability(None, None, Some(url)))
self.with_observability_capability(single_url_observability(None, None, Some(url)))
}
fn with_grafana_url_str(self, url: &str) -> ObservabilityScenarioBuilder<E> {

View File

@ -4,15 +4,16 @@ use thiserror::Error;
use tracing::{debug, info};
use super::{
Application, AttachSource, DeploymentPolicy, DynError, ExternalNodeSource,
HttpReadinessRequirement, NodeControlCapability, ObservabilityCapability, ScenarioSources,
SourceReadinessPolicy,
Application, ClusterControlProfile, ClusterMode, DeploymentPolicy, DynError, ExistingCluster,
ExternalNodeSource, HttpReadinessRequirement, IntoExistingCluster, NodeControlCapability,
ObservabilityCapability, RequiresNodeControl,
builder_ops::CoreBuilderAccess,
expectation::Expectation,
runtime::{
context::RunMetrics,
orchestration::{SourceModeName, SourceOrchestrationPlan, SourceOrchestrationPlanError},
orchestration::{SourceOrchestrationPlan, SourceOrchestrationPlanError},
},
sources::ScenarioSources,
workload::Workload,
};
use crate::topology::{DeploymentDescriptor, DeploymentProvider, DeploymentSeed, DynTopologyError};
@ -44,7 +45,6 @@ pub struct Scenario<E: Application, Caps = ()> {
expectation_cooldown: Duration,
deployment_policy: DeploymentPolicy,
sources: ScenarioSources,
source_readiness_policy: SourceReadinessPolicy,
source_orchestration_plan: SourceOrchestrationPlan,
capabilities: Caps,
}
@ -58,7 +58,6 @@ impl<E: Application, Caps> Scenario<E, Caps> {
expectation_cooldown: Duration,
deployment_policy: DeploymentPolicy,
sources: ScenarioSources,
source_readiness_policy: SourceReadinessPolicy,
source_orchestration_plan: SourceOrchestrationPlan,
capabilities: Caps,
) -> Self {
@ -70,7 +69,6 @@ impl<E: Application, Caps> Scenario<E, Caps> {
expectation_cooldown,
deployment_policy,
sources,
source_readiness_policy,
source_orchestration_plan,
capabilities,
}
@ -117,17 +115,34 @@ impl<E: Application, Caps> Scenario<E, Caps> {
}
#[must_use]
/// Selected source readiness policy.
///
/// This is currently reserved for future mixed-source orchestration and
/// does not change runtime behavior yet.
pub const fn source_readiness_policy(&self) -> SourceReadinessPolicy {
self.source_readiness_policy
pub fn existing_cluster(&self) -> Option<&ExistingCluster> {
self.sources.existing_cluster()
}
#[must_use]
pub fn sources(&self) -> &ScenarioSources {
&self.sources
pub const fn cluster_mode(&self) -> ClusterMode {
self.sources.cluster_mode()
}
#[must_use]
pub const fn cluster_control_profile(&self) -> ClusterControlProfile {
self.sources.control_profile()
}
#[must_use]
#[doc(hidden)]
pub fn attached_source(&self) -> Option<&ExistingCluster> {
self.existing_cluster()
}
#[must_use]
pub fn external_nodes(&self) -> &[ExternalNodeSource] {
self.sources.external_nodes()
}
#[must_use]
pub fn has_external_nodes(&self) -> bool {
!self.sources.external_nodes().is_empty()
}
#[must_use]
@ -151,7 +166,6 @@ pub struct Builder<E: Application, Caps = ()> {
expectation_cooldown: Option<Duration>,
deployment_policy: DeploymentPolicy,
sources: ScenarioSources,
source_readiness_policy: SourceReadinessPolicy,
capabilities: Caps,
}
@ -247,8 +261,24 @@ macro_rules! impl_common_builder_methods {
}
#[must_use]
pub fn with_attach_source(self, attach: AttachSource) -> Self {
self.map_core_builder(|builder| builder.with_attach_source(attach))
pub fn with_existing_cluster(self, cluster: ExistingCluster) -> Self {
self.map_core_builder(|builder| builder.with_existing_cluster(cluster))
}
#[must_use]
pub fn with_existing_cluster_from(
self,
cluster: impl IntoExistingCluster,
) -> Result<Self, DynError> {
let cluster = cluster.into_existing_cluster()?;
Ok(self.with_existing_cluster(cluster))
}
#[must_use]
#[doc(hidden)]
pub fn with_attach_source(self, attach: ExistingCluster) -> Self {
self.with_existing_cluster(attach)
}
#[must_use]
@ -257,13 +287,24 @@ macro_rules! impl_common_builder_methods {
}
#[must_use]
pub fn with_source_readiness_policy(self, policy: SourceReadinessPolicy) -> Self {
self.map_core_builder(|builder| builder.with_source_readiness_policy(policy))
pub fn with_external_nodes(
self,
nodes: impl IntoIterator<Item = ExternalNodeSource>,
) -> Self {
self.map_core_builder(|builder| builder.with_external_nodes(nodes))
}
#[must_use]
pub fn with_external_only_sources(self) -> Self {
self.map_core_builder(|builder| builder.with_external_only_sources())
pub fn with_external_only(self) -> Self {
self.map_core_builder(|builder| builder.with_external_only())
}
#[must_use]
pub fn with_external_only_nodes(
self,
nodes: impl IntoIterator<Item = ExternalNodeSource>,
) -> Self {
self.map_core_builder(|builder| builder.with_external_only_nodes(nodes))
}
#[must_use]
@ -350,7 +391,6 @@ impl<E: Application, Caps: Default> Builder<E, Caps> {
expectation_cooldown: None,
deployment_policy: DeploymentPolicy::default(),
sources: ScenarioSources::default(),
source_readiness_policy: SourceReadinessPolicy::default(),
capabilities: Caps::default(),
}
}
@ -365,14 +405,20 @@ impl<E: Application> ScenarioBuilder<E> {
}
#[must_use]
pub fn enable_node_control(self) -> NodeControlScenarioBuilder<E> {
pub fn with_node_control(self) -> NodeControlScenarioBuilder<E> {
NodeControlScenarioBuilder {
inner: self.inner.with_capabilities(NodeControlCapability),
}
}
#[must_use]
pub fn enable_observability(self) -> ObservabilityScenarioBuilder<E> {
#[doc(hidden)]
pub fn enable_node_control(self) -> NodeControlScenarioBuilder<E> {
self.with_node_control()
}
#[must_use]
pub fn with_observability(self) -> ObservabilityScenarioBuilder<E> {
ObservabilityScenarioBuilder {
inner: self
.inner
@ -380,11 +426,17 @@ impl<E: Application> ScenarioBuilder<E> {
}
}
#[must_use]
#[doc(hidden)]
pub fn enable_observability(self) -> ObservabilityScenarioBuilder<E> {
self.with_observability()
}
pub fn build(self) -> Result<Scenario<E>, ScenarioBuildError> {
self.inner.build()
}
pub(crate) fn with_observability(
pub(crate) fn with_observability_capability(
self,
observability: ObservabilityCapability,
) -> ObservabilityScenarioBuilder<E> {
@ -453,7 +505,6 @@ impl<E: Application, Caps> Builder<E, Caps> {
expectation_cooldown,
deployment_policy,
sources,
source_readiness_policy,
..
} = self;
@ -466,7 +517,6 @@ impl<E: Application, Caps> Builder<E, Caps> {
expectation_cooldown,
deployment_policy,
sources,
source_readiness_policy,
capabilities,
}
}
@ -568,33 +618,65 @@ impl<E: Application, Caps> Builder<E, Caps> {
}
#[must_use]
pub fn with_attach_source(mut self, attach: AttachSource) -> Self {
self.sources.set_attach(attach);
pub fn with_existing_cluster(mut self, cluster: ExistingCluster) -> Self {
self.sources = self.sources.with_attach(cluster);
self
}
#[must_use]
pub fn with_existing_cluster_from(
self,
cluster: impl IntoExistingCluster,
) -> Result<Self, DynError> {
let cluster = cluster.into_existing_cluster()?;
Ok(self.with_existing_cluster(cluster))
}
#[must_use]
#[doc(hidden)]
pub fn with_attach_source(self, attach: ExistingCluster) -> Self {
self.with_existing_cluster(attach)
}
#[must_use]
pub fn with_external_node(mut self, node: ExternalNodeSource) -> Self {
self.sources.add_external_node(node);
self.sources = self.sources.with_external_node(node);
self
}
#[must_use]
/// Configure source readiness policy metadata.
///
/// This is currently reserved for future mixed-source orchestration and
/// does not change runtime behavior yet.
pub fn with_source_readiness_policy(mut self, policy: SourceReadinessPolicy) -> Self {
self.source_readiness_policy = policy;
pub fn with_external_nodes(
mut self,
nodes: impl IntoIterator<Item = ExternalNodeSource>,
) -> Self {
for node in nodes {
self.sources = self.sources.with_external_node(node);
}
self
}
#[must_use]
pub fn with_external_only_sources(mut self) -> Self {
self.sources.set_external_only();
pub fn with_external_only(mut self) -> Self {
self.sources = self.sources.into_external_only();
self
}
#[must_use]
pub fn with_external_only_nodes(
self,
nodes: impl IntoIterator<Item = ExternalNodeSource>,
) -> Self {
self.with_external_only().with_external_nodes(nodes)
}
#[must_use]
#[doc(hidden)]
pub fn with_external_only_sources(self) -> Self {
self.with_external_only()
}
fn add_workload(&mut self, workload: Box<dyn Workload<E>>) {
self.expectations.extend(workload.expectations());
self.workloads.push(workload);
@ -607,13 +689,18 @@ impl<E: Application, Caps> Builder<E, Caps> {
#[must_use]
/// Finalize the scenario, computing run metrics and initializing
/// components.
pub fn build(self) -> Result<Scenario<E, Caps>, ScenarioBuildError> {
pub fn build(self) -> Result<Scenario<E, Caps>, ScenarioBuildError>
where
Caps: RequiresNodeControl,
{
let mut parts = BuilderParts::from_builder(self);
let descriptors = parts.resolve_deployment()?;
let run_plan = parts.run_plan();
let run_metrics = RunMetrics::new(run_plan.duration);
let source_orchestration_plan =
build_source_orchestration_plan(parts.sources(), parts.source_readiness_policy)?;
validate_source_contract::<Caps>(parts.sources())?;
let source_orchestration_plan = build_source_orchestration_plan(parts.sources())?;
initialize_components(
&descriptors,
@ -640,7 +727,6 @@ impl<E: Application, Caps> Builder<E, Caps> {
run_plan.expectation_cooldown,
parts.deployment_policy,
parts.sources,
parts.source_readiness_policy,
source_orchestration_plan,
parts.capabilities,
))
@ -661,7 +747,6 @@ struct BuilderParts<E: Application, Caps> {
expectation_cooldown: Option<Duration>,
deployment_policy: DeploymentPolicy,
sources: ScenarioSources,
source_readiness_policy: SourceReadinessPolicy,
capabilities: Caps,
}
@ -676,7 +761,6 @@ impl<E: Application, Caps> BuilderParts<E, Caps> {
expectation_cooldown,
deployment_policy,
sources,
source_readiness_policy,
capabilities,
..
} = builder;
@ -690,7 +774,6 @@ impl<E: Application, Caps> BuilderParts<E, Caps> {
expectation_cooldown,
deployment_policy,
sources,
source_readiness_policy,
capabilities,
}
}
@ -715,38 +798,82 @@ impl<E: Application, Caps> BuilderParts<E, Caps> {
fn build_source_orchestration_plan(
sources: &ScenarioSources,
readiness_policy: SourceReadinessPolicy,
) -> Result<SourceOrchestrationPlan, ScenarioBuildError> {
SourceOrchestrationPlan::try_from_sources(sources, readiness_policy)
.map_err(source_plan_error_to_build_error)
SourceOrchestrationPlan::try_from_sources(sources).map_err(source_plan_error_to_build_error)
}
fn validate_source_contract<Caps>(sources: &ScenarioSources) -> Result<(), ScenarioBuildError>
where
Caps: RequiresNodeControl,
{
validate_external_only_sources(sources)?;
validate_node_control_profile::<Caps>(sources)?;
Ok(())
}
fn source_plan_error_to_build_error(error: SourceOrchestrationPlanError) -> ScenarioBuildError {
match error {
SourceOrchestrationPlanError::SourceModeNotWiredYet { mode } => {
ScenarioBuildError::SourceModeNotWiredYet {
mode: source_mode_name(mode),
}
ScenarioBuildError::SourceModeNotWiredYet { mode }
}
}
}
const fn source_mode_name(mode: SourceModeName) -> &'static str {
match mode {
SourceModeName::Attached => "Attached",
fn validate_external_only_sources(sources: &ScenarioSources) -> Result<(), ScenarioBuildError> {
if matches!(sources.cluster_mode(), ClusterMode::ExternalOnly)
&& sources.external_nodes().is_empty()
{
return Err(ScenarioBuildError::SourceConfiguration {
message: "external-only scenarios require at least one external node".to_owned(),
});
}
Ok(())
}
fn validate_node_control_profile<Caps>(sources: &ScenarioSources) -> Result<(), ScenarioBuildError>
where
Caps: RequiresNodeControl,
{
let profile = sources.control_profile();
if Caps::REQUIRED && matches!(profile, ClusterControlProfile::ExternalUncontrolled) {
return Err(ScenarioBuildError::SourceConfiguration {
message: format!(
"node control is not available for cluster mode '{}' with control profile '{}'",
sources.cluster_mode().as_str(),
profile.as_str(),
),
});
}
Ok(())
}
impl<E: Application> Builder<E, ()> {
#[must_use]
pub fn enable_node_control(self) -> Builder<E, NodeControlCapability> {
pub fn with_node_control(self) -> Builder<E, NodeControlCapability> {
self.with_capabilities(NodeControlCapability)
}
#[must_use]
pub fn enable_observability(self) -> Builder<E, ObservabilityCapability> {
#[doc(hidden)]
pub fn enable_node_control(self) -> Builder<E, NodeControlCapability> {
self.with_node_control()
}
#[must_use]
pub fn with_observability(self) -> Builder<E, ObservabilityCapability> {
self.with_capabilities(ObservabilityCapability::default())
}
#[must_use]
#[doc(hidden)]
pub fn enable_observability(self) -> Builder<E, ObservabilityCapability> {
self.with_observability()
}
}
fn initialize_components<E: Application>(
@ -821,3 +948,59 @@ fn expectation_cooldown_for(override_value: Option<Duration>) -> Duration {
fn min_run_duration() -> Duration {
Duration::from_secs(MIN_RUN_DURATION_SECS)
}
#[cfg(test)]
mod tests {
use super::{
ScenarioBuildError, validate_external_only_sources, validate_node_control_profile,
};
use crate::scenario::{
ExistingCluster, ExternalNodeSource, NodeControlCapability, sources::ScenarioSources,
};
#[test]
fn external_only_requires_external_nodes() {
let error =
validate_external_only_sources(&ScenarioSources::default().into_external_only())
.expect_err("external-only without nodes should fail");
assert!(matches!(
error,
ScenarioBuildError::SourceConfiguration { .. }
));
assert_eq!(
error.to_string(),
"invalid scenario source configuration: external-only scenarios require at least one external node"
);
}
#[test]
fn external_only_rejects_node_control_requirement() {
let sources = ScenarioSources::default()
.with_external_node(ExternalNodeSource::new(
"node-0".to_owned(),
"http://127.0.0.1:1".to_owned(),
))
.into_external_only();
let error = validate_node_control_profile::<NodeControlCapability>(&sources)
.expect_err("external-only should reject node control");
assert!(matches!(
error,
ScenarioBuildError::SourceConfiguration { .. }
));
assert_eq!(
error.to_string(),
"invalid scenario source configuration: node control is not available for cluster mode 'external-only' with control profile 'external-uncontrolled'"
);
}
#[test]
fn existing_cluster_accepts_node_control_requirement() {
let sources = ScenarioSources::default()
.with_attach(ExistingCluster::for_compose_project("project".to_owned()));
validate_node_control_profile::<NodeControlCapability>(&sources)
.expect("existing cluster should be considered controllable");
}
}

View File

@ -36,23 +36,27 @@ pub use definition::{Scenario, ScenarioBuildError, ScenarioBuilder};
pub use deployment_policy::{CleanupPolicy, DeploymentPolicy, RetryPolicy};
pub use expectation::Expectation;
pub use observability::{ObservabilityCapabilityProvider, ObservabilityInputs};
#[doc(hidden)]
pub use runtime::{
ApplicationExternalProvider, AttachProvider, AttachProviderError, AttachedNode, BorrowedNode,
BorrowedOrigin, CleanupGuard, Deployer, Feed, FeedHandle, FeedRuntime,
HttpReadinessRequirement, ManagedNode, ManagedSource, NodeClients, NodeHandle, NodeInventory,
ReadinessError, RunContext, RunHandle, RunMetrics, Runner, ScenarioError,
SourceOrchestrationPlan, SourceProviders, StabilizationConfig, StaticManagedProvider,
build_source_orchestration_plan,
ApplicationExternalProvider, AttachProvider, AttachProviderError, AttachedNode, CleanupGuard,
FeedHandle, ManagedSource, RuntimeAssembly, SourceOrchestrationPlan, SourceProviders,
StaticManagedProvider, build_source_orchestration_plan, orchestrate_sources,
orchestrate_sources_with_providers, resolve_sources,
};
pub use runtime::{
Deployer, Feed, FeedRuntime, HttpReadinessRequirement, NodeClients, ReadinessError, RunContext,
RunHandle, RunMetrics, Runner, ScenarioError, StabilizationConfig,
metrics::{
CONSENSUS_PROCESSED_BLOCKS, CONSENSUS_TRANSACTIONS_TOTAL, Metrics, MetricsError,
PrometheusEndpoint, PrometheusInstantSample,
},
orchestrate_sources, orchestrate_sources_with_providers, resolve_sources, spawn_feed,
wait_for_http_ports, wait_for_http_ports_with_host,
spawn_feed, wait_for_http_ports, wait_for_http_ports_with_host,
wait_for_http_ports_with_host_and_requirement, wait_for_http_ports_with_requirement,
wait_http_readiness, wait_until_stable,
};
pub use sources::{AttachSource, ExternalNodeSource, ScenarioSources, SourceReadinessPolicy};
pub use sources::{
ClusterControlProfile, ClusterMode, ExistingCluster, ExternalNodeSource, IntoExistingCluster,
};
pub use workload::Workload;
pub use crate::env::Application;

View File

@ -2,8 +2,7 @@ use std::{sync::Arc, time::Duration};
use super::{metrics::Metrics, node_clients::ClusterClient};
use crate::scenario::{
Application, BorrowedNode, ClusterWaitHandle, DynError, ManagedNode, NodeClients,
NodeControlHandle,
Application, ClusterControlProfile, ClusterWaitHandle, DynError, NodeClients, NodeControlHandle,
};
#[derive(Debug, thiserror::Error)]
@ -18,6 +17,21 @@ pub struct RunContext<E: Application> {
node_clients: NodeClients<E>,
metrics: RunMetrics,
expectation_cooldown: Duration,
cluster_control_profile: ClusterControlProfile,
telemetry: Metrics,
feed: <E::FeedRuntime as super::FeedRuntime>::Feed,
node_control: Option<Arc<dyn NodeControlHandle<E>>>,
cluster_wait: Option<Arc<dyn ClusterWaitHandle<E>>>,
}
/// Low-level runtime assembly input used by deployers to build a runnable
/// cluster context.
pub struct RuntimeAssembly<E: Application> {
descriptors: E::Deployment,
node_clients: NodeClients<E>,
run_duration: Duration,
expectation_cooldown: Duration,
cluster_control_profile: ClusterControlProfile,
telemetry: Metrics,
feed: <E::FeedRuntime as super::FeedRuntime>::Feed,
node_control: Option<Arc<dyn NodeControlHandle<E>>>,
@ -27,11 +41,12 @@ pub struct RunContext<E: Application> {
impl<E: Application> RunContext<E> {
/// Builds a run context from prepared deployment/runtime artifacts.
#[must_use]
pub fn new(
pub(crate) fn new(
descriptors: E::Deployment,
node_clients: NodeClients<E>,
run_duration: Duration,
expectation_cooldown: Duration,
cluster_control_profile: ClusterControlProfile,
telemetry: Metrics,
feed: <E::FeedRuntime as super::FeedRuntime>::Feed,
node_control: Option<Arc<dyn NodeControlHandle<E>>>,
@ -43,6 +58,7 @@ impl<E: Application> RunContext<E> {
node_clients,
metrics,
expectation_cooldown,
cluster_control_profile,
telemetry,
feed,
node_control,
@ -51,7 +67,7 @@ impl<E: Application> RunContext<E> {
}
#[must_use]
pub fn with_cluster_wait(mut self, cluster_wait: Arc<dyn ClusterWaitHandle<E>>) -> Self {
pub(crate) fn with_cluster_wait(mut self, cluster_wait: Arc<dyn ClusterWaitHandle<E>>) -> Self {
self.cluster_wait = Some(cluster_wait);
self
}
@ -71,26 +87,6 @@ impl<E: Application> RunContext<E> {
self.node_clients.random_client()
}
#[must_use]
pub fn managed_nodes(&self) -> Vec<ManagedNode<E>> {
self.node_clients.managed_nodes()
}
#[must_use]
pub fn borrowed_nodes(&self) -> Vec<BorrowedNode<E>> {
self.node_clients.borrowed_nodes()
}
#[must_use]
pub fn find_managed_node(&self, identity: &str) -> Option<ManagedNode<E>> {
self.node_clients.find_managed(identity)
}
#[must_use]
pub fn find_borrowed_node(&self, identity: &str) -> Option<BorrowedNode<E>> {
self.node_clients.find_borrowed(identity)
}
#[must_use]
pub fn feed(&self) -> <E::FeedRuntime as super::FeedRuntime>::Feed {
self.feed.clone()
@ -107,13 +103,13 @@ impl<E: Application> RunContext<E> {
}
#[must_use]
pub const fn expectation_cooldown(&self) -> Duration {
pub(crate) const fn expectation_cooldown(&self) -> Duration {
self.expectation_cooldown
}
#[must_use]
pub const fn run_metrics(&self) -> RunMetrics {
self.metrics
pub const fn cluster_control_profile(&self) -> ClusterControlProfile {
self.cluster_control_profile
}
#[must_use]
@ -121,22 +117,7 @@ impl<E: Application> RunContext<E> {
self.node_control.clone()
}
#[must_use]
pub fn cluster_wait(&self) -> Option<Arc<dyn ClusterWaitHandle<E>>> {
self.cluster_wait.clone()
}
#[must_use]
pub const fn controls_nodes(&self) -> bool {
self.node_control.is_some()
}
#[must_use]
pub const fn can_wait_network_ready(&self) -> bool {
self.cluster_wait.is_some()
}
pub async fn wait_network_ready(&self) -> Result<(), DynError> {
pub(crate) async fn wait_network_ready(&self) -> Result<(), DynError> {
self.require_cluster_wait()?.wait_network_ready().await
}
@ -146,11 +127,90 @@ impl<E: Application> RunContext<E> {
}
fn require_cluster_wait(&self) -> Result<Arc<dyn ClusterWaitHandle<E>>, DynError> {
self.cluster_wait()
self.cluster_wait
.as_ref()
.map(Arc::clone)
.ok_or_else(|| RunContextCapabilityError::MissingClusterWait.into())
}
}
impl<E: Application> RuntimeAssembly<E> {
#[must_use]
pub fn new(
descriptors: E::Deployment,
node_clients: NodeClients<E>,
run_duration: Duration,
expectation_cooldown: Duration,
cluster_control_profile: ClusterControlProfile,
telemetry: Metrics,
feed: <E::FeedRuntime as super::FeedRuntime>::Feed,
) -> Self {
Self {
descriptors,
node_clients,
run_duration,
expectation_cooldown,
cluster_control_profile,
telemetry,
feed,
node_control: None,
cluster_wait: None,
}
}
#[must_use]
pub fn with_node_control(mut self, node_control: Arc<dyn NodeControlHandle<E>>) -> Self {
self.node_control = Some(node_control);
self
}
#[must_use]
pub fn with_cluster_wait(mut self, cluster_wait: Arc<dyn ClusterWaitHandle<E>>) -> Self {
self.cluster_wait = Some(cluster_wait);
self
}
#[must_use]
pub fn build_context(self) -> RunContext<E> {
let context = RunContext::new(
self.descriptors,
self.node_clients,
self.run_duration,
self.expectation_cooldown,
self.cluster_control_profile,
self.telemetry,
self.feed,
self.node_control,
);
match self.cluster_wait {
Some(cluster_wait) => context.with_cluster_wait(cluster_wait),
None => context,
}
}
#[must_use]
pub fn build_runner(self, cleanup_guard: Option<Box<dyn CleanupGuard>>) -> super::Runner<E> {
super::Runner::new(self.build_context(), cleanup_guard)
}
}
impl<E: Application> From<RunContext<E>> for RuntimeAssembly<E> {
fn from(context: RunContext<E>) -> Self {
Self {
descriptors: context.descriptors,
node_clients: context.node_clients,
run_duration: context.metrics.run_duration(),
expectation_cooldown: context.expectation_cooldown,
cluster_control_profile: context.cluster_control_profile,
telemetry: context.telemetry,
feed: context.feed,
node_control: context.node_control,
cluster_wait: context.cluster_wait,
}
}
}
/// Handle returned by the runner to control the lifecycle of the run.
pub struct RunHandle<E: Application> {
run_context: Arc<RunContext<E>>,
@ -166,15 +226,6 @@ impl<E: Application> Drop for RunHandle<E> {
}
impl<E: Application> RunHandle<E> {
#[must_use]
/// Build a handle from owned context and optional cleanup guard.
pub fn new(context: RunContext<E>, cleanup_guard: Option<Box<dyn CleanupGuard>>) -> Self {
Self {
run_context: Arc::new(context),
cleanup_guard,
}
}
#[must_use]
/// Build a handle from a shared context reference.
pub(crate) fn from_shared(
@ -192,10 +243,6 @@ impl<E: Application> RunHandle<E> {
pub fn context(&self) -> &RunContext<E> {
&self.run_context
}
pub async fn wait_network_ready(&self) -> Result<(), DynError> {
self.run_context.wait_network_ready().await
}
}
/// Derived metrics about the current run timing.

View File

@ -1,3 +1,3 @@
mod node_inventory;
pub use node_inventory::{BorrowedNode, BorrowedOrigin, ManagedNode, NodeHandle, NodeInventory};
pub(crate) use node_inventory::NodeInventory;

View File

@ -1,91 +1,18 @@
use std::{collections::HashMap, sync::Arc};
use std::sync::Arc;
use parking_lot::RwLock;
use crate::scenario::{Application, DynError, NodeControlHandle, StartNodeOptions, StartedNode};
use crate::scenario::Application;
/// Origin for borrowed (non-managed) nodes in the runtime inventory.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum BorrowedOrigin {
/// Node discovered from an attached cluster provider.
Attached,
/// Node provided explicitly as an external endpoint.
External,
}
/// Managed node handle with full lifecycle capabilities.
pub struct ManagedNode<E: Application> {
/// Canonical node identity used for deduplication and lookups.
pub identity: String,
/// Application-specific API client for this node.
pub client: E::NodeClient,
}
/// Borrowed node handle (attached or external), query-only by default.
pub struct BorrowedNode<E: Application> {
/// Canonical node identity used for deduplication and lookups.
pub identity: String,
/// Application-specific API client for this node.
pub client: E::NodeClient,
/// Borrowed source kind used for diagnostics and selection.
pub origin: BorrowedOrigin,
}
/// Unified node handle variant used by runtime inventory snapshots.
pub enum NodeHandle<E: Application> {
/// Managed node variant.
Managed(ManagedNode<E>),
/// Borrowed node variant.
Borrowed(BorrowedNode<E>),
}
impl<E: Application> Clone for ManagedNode<E> {
fn clone(&self) -> Self {
Self {
identity: self.identity.clone(),
client: self.client.clone(),
}
}
}
impl<E: Application> Clone for BorrowedNode<E> {
fn clone(&self) -> Self {
Self {
identity: self.identity.clone(),
client: self.client.clone(),
origin: self.origin,
}
}
}
impl<E: Application> Clone for NodeHandle<E> {
fn clone(&self) -> Self {
match self {
Self::Managed(node) => Self::Managed(node.clone()),
Self::Borrowed(node) => Self::Borrowed(node.clone()),
}
}
}
/// Thread-safe node inventory with identity-based upsert semantics.
pub struct NodeInventory<E: Application> {
inner: Arc<RwLock<NodeInventoryInner<E>>>,
}
struct NodeInventoryInner<E: Application> {
nodes: Vec<NodeHandle<E>>,
indices_by_identity: HashMap<String, usize>,
next_synthetic_id: usize,
/// Thread-safe node client storage used by runtime handles.
pub(crate) struct NodeInventory<E: Application> {
clients: Arc<RwLock<Vec<E::NodeClient>>>,
}
impl<E: Application> Default for NodeInventory<E> {
fn default() -> Self {
Self {
inner: Arc::new(RwLock::new(NodeInventoryInner {
nodes: Vec::new(),
indices_by_identity: HashMap::new(),
next_synthetic_id: 0,
})),
clients: Arc::new(RwLock::new(Vec::new())),
}
}
}
@ -93,243 +20,44 @@ impl<E: Application> Default for NodeInventory<E> {
impl<E: Application> Clone for NodeInventory<E> {
fn clone(&self) -> Self {
Self {
inner: Arc::clone(&self.inner),
clients: Arc::clone(&self.clients),
}
}
}
impl<E: Application> NodeInventory<E> {
#[must_use]
/// Builds an inventory from managed clients.
pub fn from_managed_clients(clients: Vec<E::NodeClient>) -> Self {
let inventory = Self::default();
for client in clients {
inventory.add_managed_node(client, None);
}
inventory
}
#[must_use]
/// Returns a cloned snapshot of all node clients.
pub fn snapshot_clients(&self) -> Vec<E::NodeClient> {
self.inner.read().nodes.iter().map(clone_client).collect()
}
#[must_use]
/// Returns cloned managed node handles from the current inventory.
pub fn managed_nodes(&self) -> Vec<ManagedNode<E>> {
self.inner
.read()
.nodes
.iter()
.filter_map(|handle| match handle {
NodeHandle::Managed(node) => Some(node.clone()),
NodeHandle::Borrowed(_) => None,
})
.collect()
}
#[must_use]
/// Returns cloned borrowed node handles from the current inventory.
pub fn borrowed_nodes(&self) -> Vec<BorrowedNode<E>> {
self.inner
.read()
.nodes
.iter()
.filter_map(|handle| match handle {
NodeHandle::Managed(_) => None,
NodeHandle::Borrowed(node) => Some(node.clone()),
})
.collect()
}
#[must_use]
/// Finds a managed node by canonical identity.
pub fn find_managed(&self, identity: &str) -> Option<ManagedNode<E>> {
let guard = self.inner.read();
match node_by_identity(&guard, identity)? {
NodeHandle::Managed(node) => Some(node.clone()),
NodeHandle::Borrowed(_) => None,
pub(crate) fn from_clients(clients: Vec<E::NodeClient>) -> Self {
Self {
clients: Arc::new(RwLock::new(clients)),
}
}
#[must_use]
/// Finds a borrowed node by canonical identity.
pub fn find_borrowed(&self, identity: &str) -> Option<BorrowedNode<E>> {
let guard = self.inner.read();
match node_by_identity(&guard, identity)? {
NodeHandle::Managed(_) => None,
NodeHandle::Borrowed(node) => Some(node.clone()),
}
pub(crate) fn snapshot_clients(&self) -> Vec<E::NodeClient> {
self.clients.read().clone()
}
#[must_use]
/// Finds any node handle by canonical identity.
pub fn find_node(&self, identity: &str) -> Option<NodeHandle<E>> {
let guard = self.inner.read();
node_by_identity(&guard, identity).cloned()
pub(crate) fn len(&self) -> usize {
self.clients.read().len()
}
#[must_use]
/// Returns current number of nodes in inventory.
pub fn len(&self) -> usize {
self.inner.read().nodes.len()
}
#[must_use]
/// Returns true when no nodes are registered.
pub fn is_empty(&self) -> bool {
pub(crate) fn is_empty(&self) -> bool {
self.len() == 0
}
/// Clears all nodes and identity indexes.
pub fn clear(&self) {
let mut guard = self.inner.write();
guard.nodes.clear();
guard.indices_by_identity.clear();
guard.next_synthetic_id = 0;
pub(crate) fn clear(&self) {
self.clients.write().clear();
}
/// Adds or replaces a managed node entry using canonical identity
/// resolution. Re-adding the same node identity updates the stored handle.
pub fn add_managed_node(&self, client: E::NodeClient, identity_hint: Option<String>) {
let mut guard = self.inner.write();
let identity = canonical_identity::<E>(&client, identity_hint, &mut guard);
let handle = NodeHandle::Managed(ManagedNode {
identity: identity.clone(),
client,
});
upsert_node(&mut guard, identity, handle);
pub(crate) fn add_client(&self, client: E::NodeClient) {
self.clients.write().push(client);
}
/// Adds or replaces an attached node entry.
pub fn add_attached_node(&self, client: E::NodeClient, identity_hint: Option<String>) {
self.add_borrowed_node(client, BorrowedOrigin::Attached, identity_hint);
}
/// Adds or replaces an external static node entry.
pub fn add_external_node(&self, client: E::NodeClient, identity_hint: Option<String>) {
self.add_borrowed_node(client, BorrowedOrigin::External, identity_hint);
}
/// Executes a synchronous read over a cloned client slice.
pub fn with_clients<R>(&self, f: impl FnOnce(&[E::NodeClient]) -> R) -> R {
let guard = self.inner.read();
let clients = guard.nodes.iter().map(clone_client).collect::<Vec<_>>();
pub(crate) fn with_clients<R>(&self, f: impl FnOnce(&[E::NodeClient]) -> R) -> R {
let clients = self.clients.read();
f(&clients)
}
fn add_borrowed_node(
&self,
client: E::NodeClient,
origin: BorrowedOrigin,
identity_hint: Option<String>,
) {
let mut guard = self.inner.write();
let identity = canonical_identity::<E>(&client, identity_hint, &mut guard);
let handle = NodeHandle::Borrowed(BorrowedNode {
identity: identity.clone(),
client,
origin,
});
upsert_node(&mut guard, identity, handle);
}
}
impl<E: Application> ManagedNode<E> {
#[must_use]
/// Returns the node client.
pub const fn client(&self) -> &E::NodeClient {
&self.client
}
/// Delegates restart to the deployer's control surface for this node name.
pub async fn restart(
&self,
control: &dyn NodeControlHandle<E>,
node_name: &str,
) -> Result<(), DynError> {
control.restart_node(node_name).await
}
/// Delegates stop to the deployer's control surface for this node name.
pub async fn stop(
&self,
control: &dyn NodeControlHandle<E>,
node_name: &str,
) -> Result<(), DynError> {
control.stop_node(node_name).await
}
/// Delegates dynamic node start with options to the control surface.
pub async fn start_with(
&self,
control: &dyn NodeControlHandle<E>,
node_name: &str,
options: StartNodeOptions<E>,
) -> Result<StartedNode<E>, DynError> {
control.start_node_with(node_name, options).await
}
#[must_use]
/// Returns process id if the backend can expose it for this node name.
pub fn pid(&self, control: &dyn NodeControlHandle<E>, node_name: &str) -> Option<u32> {
control.node_pid(node_name)
}
}
impl<E: Application> BorrowedNode<E> {
#[must_use]
/// Returns the node client.
pub const fn client(&self) -> &E::NodeClient {
&self.client
}
}
fn upsert_node<E: Application>(
inner: &mut NodeInventoryInner<E>,
identity: String,
handle: NodeHandle<E>,
) {
if let Some(existing_index) = inner.indices_by_identity.get(&identity).copied() {
inner.nodes[existing_index] = handle;
return;
}
let index = inner.nodes.len();
inner.nodes.push(handle);
inner.indices_by_identity.insert(identity, index);
}
fn canonical_identity<E: Application>(
_client: &E::NodeClient,
identity_hint: Option<String>,
inner: &mut NodeInventoryInner<E>,
) -> String {
// Priority: explicit hint -> synthetic.
if let Some(identity) = identity_hint.filter(|value| !value.trim().is_empty()) {
return identity;
}
let synthetic = format!("node:{}", inner.next_synthetic_id);
inner.next_synthetic_id += 1;
synthetic
}
fn clone_client<E: Application>(handle: &NodeHandle<E>) -> E::NodeClient {
match handle {
NodeHandle::Managed(node) => node.client.clone(),
NodeHandle::Borrowed(node) => node.client.clone(),
}
}
fn node_by_identity<'a, E: Application>(
inner: &'a NodeInventoryInner<E>,
identity: &str,
) -> Option<&'a NodeHandle<E>> {
let index = *inner.indices_by_identity.get(identity)?;
inner.nodes.get(index)
}

View File

@ -1,6 +1,6 @@
pub mod context;
mod deployer;
pub mod inventory;
mod inventory;
pub mod metrics;
mod node_clients;
pub mod orchestration;
@ -9,9 +9,8 @@ pub mod readiness;
mod runner;
use async_trait::async_trait;
pub use context::{CleanupGuard, RunContext, RunHandle, RunMetrics};
pub use context::{CleanupGuard, RunContext, RunHandle, RunMetrics, RuntimeAssembly};
pub use deployer::{Deployer, ScenarioError};
pub use inventory::{BorrowedNode, BorrowedOrigin, ManagedNode, NodeHandle, NodeInventory};
pub use node_clients::NodeClients;
#[doc(hidden)]
pub use orchestration::{

View File

@ -1,6 +1,6 @@
use rand::{seq::SliceRandom as _, thread_rng};
use super::inventory::{BorrowedNode, ManagedNode, NodeInventory};
use super::inventory::NodeInventory;
use crate::scenario::{Application, DynError};
/// Collection of API clients for the node set.
@ -29,7 +29,7 @@ impl<E: Application> NodeClients<E> {
/// Build clients from preconstructed vectors.
pub fn new(nodes: Vec<E::NodeClient>) -> Self {
Self {
inventory: NodeInventory::from_managed_clients(nodes),
inventory: NodeInventory::from_clients(nodes),
}
}
@ -72,37 +72,13 @@ impl<E: Application> NodeClients<E> {
}
pub fn add_node(&self, client: E::NodeClient) {
self.inventory.add_managed_node(client, None);
self.inventory.add_client(client);
}
pub fn clear(&self) {
self.inventory.clear();
}
#[must_use]
/// Returns a cloned snapshot of managed node handles.
pub fn managed_nodes(&self) -> Vec<ManagedNode<E>> {
self.inventory.managed_nodes()
}
#[must_use]
/// Returns a cloned snapshot of borrowed node handles.
pub fn borrowed_nodes(&self) -> Vec<BorrowedNode<E>> {
self.inventory.borrowed_nodes()
}
#[must_use]
/// Finds a managed node by canonical identity.
pub fn find_managed(&self, identity: &str) -> Option<ManagedNode<E>> {
self.inventory.find_managed(identity)
}
#[must_use]
/// Finds a borrowed node by canonical identity.
pub fn find_borrowed(&self, identity: &str) -> Option<BorrowedNode<E>> {
self.inventory.find_borrowed(identity)
}
fn shuffled_snapshot(&self) -> Vec<E::NodeClient> {
let mut clients = self.snapshot();
clients.shuffle(&mut thread_rng());

View File

@ -3,9 +3,9 @@ mod source_orchestration_plan;
#[allow(dead_code)]
mod source_resolver;
pub(crate) use source_orchestration_plan::SourceOrchestrationMode;
pub use source_orchestration_plan::{
ManagedSource, SourceModeName, SourceOrchestrationMode, SourceOrchestrationPlan,
SourceOrchestrationPlanError,
ManagedSource, SourceOrchestrationPlan, SourceOrchestrationPlanError,
};
pub use source_resolver::{
build_source_orchestration_plan, orchestrate_sources, orchestrate_sources_with_providers,

View File

@ -1,6 +1,4 @@
use std::fmt;
use crate::scenario::{AttachSource, ExternalNodeSource, ScenarioSources, SourceReadinessPolicy};
use crate::scenario::{ClusterMode, ExistingCluster, ExternalNodeSource, sources::ScenarioSources};
/// Explicit descriptor for managed node sourcing.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
@ -15,13 +13,13 @@ pub enum ManagedSource {
/// This is scaffolding-only and is intentionally not executed by deployers
/// yet.
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum SourceOrchestrationMode {
pub(crate) enum SourceOrchestrationMode {
Managed {
managed: ManagedSource,
external: Vec<ExternalNodeSource>,
},
Attached {
attach: AttachSource,
attach: ExistingCluster,
external: Vec<ExternalNodeSource>,
},
ExternalOnly {
@ -34,41 +32,28 @@ pub enum SourceOrchestrationMode {
/// This captures only mapping-time source intent and readiness policy.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct SourceOrchestrationPlan {
pub mode: SourceOrchestrationMode,
pub readiness_policy: SourceReadinessPolicy,
}
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum SourceModeName {
Attached,
}
impl fmt::Display for SourceModeName {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Attached => f.write_str("Attached"),
}
}
mode: SourceOrchestrationMode,
}
/// Validation failure while building orchestration plan from sources.
#[derive(Debug, thiserror::Error)]
pub enum SourceOrchestrationPlanError {
#[error("source mode '{mode}' is not wired into deployers yet")]
SourceModeNotWiredYet { mode: SourceModeName },
SourceModeNotWiredYet { mode: &'static str },
}
impl SourceOrchestrationPlan {
pub fn try_from_sources(
pub(crate) fn try_from_sources(
sources: &ScenarioSources,
readiness_policy: SourceReadinessPolicy,
) -> Result<Self, SourceOrchestrationPlanError> {
let mode = mode_from_sources(sources);
Ok(Self {
mode,
readiness_policy,
})
Ok(Self { mode })
}
#[must_use]
pub(crate) fn mode(&self) -> &SourceOrchestrationMode {
&self.mode
}
#[must_use]
@ -84,34 +69,104 @@ impl SourceOrchestrationPlan {
#[cfg(test)]
mod tests {
use super::{SourceOrchestrationMode, SourceOrchestrationPlan};
use crate::scenario::{AttachSource, ScenarioSources, SourceReadinessPolicy};
use crate::scenario::{ExistingCluster, ExternalNodeSource, sources::ScenarioSources};
#[test]
fn managed_sources_are_planned() {
let plan = SourceOrchestrationPlan::try_from_sources(&ScenarioSources::default())
.expect("managed sources should build a source orchestration plan");
assert!(matches!(
plan.mode(),
SourceOrchestrationMode::Managed { .. }
));
assert!(plan.external_sources().is_empty());
}
#[test]
fn attached_sources_are_planned() {
let sources = ScenarioSources::attached(AttachSource::compose(vec!["node-0".to_string()]));
let plan =
SourceOrchestrationPlan::try_from_sources(&sources, SourceReadinessPolicy::AllReady)
.expect("attached sources should build a source orchestration plan");
let sources =
ScenarioSources::default().with_attach(ExistingCluster::for_compose_services(
"test-project".to_string(),
vec!["node-0".to_string()],
));
let plan = SourceOrchestrationPlan::try_from_sources(&sources)
.expect("attached sources should build a source orchestration plan");
assert!(matches!(
plan.mode,
plan.mode(),
SourceOrchestrationMode::Attached { .. }
));
}
#[test]
fn attached_sources_keep_external_nodes() {
let sources = ScenarioSources::default()
.with_attach(ExistingCluster::for_compose_project(
"test-project".to_string(),
))
.with_external_node(ExternalNodeSource::new(
"external-0".to_owned(),
"http://127.0.0.1:1".to_owned(),
));
let plan = SourceOrchestrationPlan::try_from_sources(&sources)
.expect("attached sources with external nodes should build");
assert!(matches!(
plan.mode(),
SourceOrchestrationMode::Attached { .. }
));
assert_eq!(plan.external_sources().len(), 1);
assert_eq!(plan.external_sources()[0].label(), "external-0");
}
#[test]
fn external_only_sources_are_planned() {
let sources = ScenarioSources::default()
.with_external_node(ExternalNodeSource::new(
"external-0".to_owned(),
"http://127.0.0.1:1".to_owned(),
))
.into_external_only();
let plan = SourceOrchestrationPlan::try_from_sources(&sources)
.expect("external-only sources should build a source orchestration plan");
assert!(matches!(
plan.mode(),
SourceOrchestrationMode::ExternalOnly { .. }
));
assert_eq!(plan.external_sources().len(), 1);
assert_eq!(plan.external_sources()[0].label(), "external-0");
}
}
fn mode_from_sources(sources: &ScenarioSources) -> SourceOrchestrationMode {
match sources {
ScenarioSources::Managed { external } => SourceOrchestrationMode::Managed {
managed: ManagedSource::DeployerManaged,
external: external.clone(),
match sources.cluster_mode() {
ClusterMode::Managed => match sources {
ScenarioSources::Managed { external } => SourceOrchestrationMode::Managed {
managed: ManagedSource::DeployerManaged,
external: external.clone(),
},
ScenarioSources::Attached { .. } | ScenarioSources::ExternalOnly { .. } => {
unreachable!("cluster mode and source storage must stay aligned")
}
},
ScenarioSources::Attached { attach, external } => SourceOrchestrationMode::Attached {
attach: attach.clone(),
external: external.clone(),
ClusterMode::ExistingCluster => match sources {
ScenarioSources::Attached { attach, external } => SourceOrchestrationMode::Attached {
attach: attach.clone(),
external: external.clone(),
},
ScenarioSources::Managed { .. } | ScenarioSources::ExternalOnly { .. } => {
unreachable!("cluster mode and source storage must stay aligned")
}
},
ScenarioSources::ExternalOnly { external } => SourceOrchestrationMode::ExternalOnly {
external: external.clone(),
ClusterMode::ExternalOnly => match sources {
ScenarioSources::ExternalOnly { external } => SourceOrchestrationMode::ExternalOnly {
external: external.clone(),
},
ScenarioSources::Managed { .. } | ScenarioSources::Attached { .. } => {
unreachable!("cluster mode and source storage must stay aligned")
}
},
}
}

View File

@ -41,10 +41,7 @@ pub enum SourceResolveError {
pub fn build_source_orchestration_plan<E: Application, Caps>(
scenario: &Scenario<E, Caps>,
) -> Result<SourceOrchestrationPlan, SourceOrchestrationPlanError> {
SourceOrchestrationPlan::try_from_sources(
scenario.sources(),
scenario.source_readiness_policy(),
)
Ok(scenario.source_orchestration_plan().clone())
}
/// Resolves runtime source nodes via unified providers from orchestration plan.
@ -52,7 +49,7 @@ pub async fn resolve_sources<E: Application>(
plan: &SourceOrchestrationPlan,
providers: &SourceProviders<E>,
) -> Result<ResolvedSources<E>, SourceResolveError> {
match &plan.mode {
match plan.mode() {
SourceOrchestrationMode::Managed { managed, .. } => {
let managed_nodes = providers.managed.provide(managed).await?;
let external_nodes = providers.external.provide(plan.external_sources()).await?;
@ -115,7 +112,8 @@ pub async fn orchestrate_sources_with_providers<E: Application>(
) -> Result<NodeClients<E>, DynError> {
let resolved = resolve_sources(plan, &providers).await?;
if matches!(plan.mode, SourceOrchestrationMode::Managed { .. }) && resolved.managed.is_empty() {
if matches!(plan.mode(), SourceOrchestrationMode::Managed { .. }) && resolved.managed.is_empty()
{
return Err(SourceResolveError::ManagedNodesMissing.into());
}

View File

@ -1,8 +1,8 @@
use async_trait::async_trait;
use crate::scenario::{Application, AttachSource, DynError};
use crate::scenario::{Application, DynError, ExistingCluster};
/// Attached node discovered from an existing external cluster source.
/// Node discovered from an existing cluster descriptor.
#[derive(Clone, Debug)]
pub struct AttachedNode<E: Application> {
/// Optional stable identity hint used by runtime inventory dedup logic.
@ -14,8 +14,8 @@ pub struct AttachedNode<E: Application> {
/// Errors returned by attach providers while discovering attached nodes.
#[derive(Debug, thiserror::Error)]
pub enum AttachProviderError {
#[error("attach source is not supported by this provider: {attach_source:?}")]
UnsupportedSource { attach_source: AttachSource },
#[error("existing cluster descriptor is not supported by this provider: {attach_source:?}")]
UnsupportedSource { attach_source: ExistingCluster },
#[error("attach discovery failed: {source}")]
Discovery {
#[source]
@ -23,16 +23,16 @@ pub enum AttachProviderError {
},
}
/// Internal adapter interface for discovering pre-existing nodes.
/// Internal adapter interface for discovering nodes in an existing cluster.
///
/// This is scaffolding-only in phase 1 and is intentionally not wired into
/// deployer runtime orchestration yet.
#[async_trait]
pub trait AttachProvider<E: Application>: Send + Sync {
/// Discovers node clients for the requested attach source.
/// Discovers node clients for the requested existing cluster.
async fn discover(
&self,
source: &AttachSource,
source: &ExistingCluster,
) -> Result<Vec<AttachedNode<E>>, AttachProviderError>;
}
@ -44,7 +44,7 @@ pub struct NoopAttachProvider;
impl<E: Application> AttachProvider<E> for NoopAttachProvider {
async fn discover(
&self,
source: &AttachSource,
source: &ExistingCluster,
) -> Result<Vec<AttachedNode<E>>, AttachProviderError> {
Err(AttachProviderError::UnsupportedSource {
attach_source: source.clone(),

View File

@ -71,11 +71,11 @@ impl<E: Application> ExternalProvider<E> for ApplicationExternalProvider {
.map(|source| {
E::external_node_client(source)
.map(|client| ExternalNode {
identity_hint: Some(source.label.clone()),
identity_hint: Some(source.label().to_string()),
client,
})
.map_err(|build_error| ExternalProviderError::Build {
source_label: source.label.clone(),
source_label: source.label().to_string(),
source: build_error,
})
})

View File

@ -34,9 +34,11 @@ impl<E: Application> Drop for Runner<E> {
}
impl<E: Application> Runner<E> {
/// Construct a runner from the run context and optional cleanup guard.
#[must_use]
pub fn new(context: RunContext<E>, cleanup_guard: Option<Box<dyn CleanupGuard>>) -> Self {
pub(crate) fn new(
context: RunContext<E>,
cleanup_guard: Option<Box<dyn CleanupGuard>>,
) -> Self {
Self {
context: Arc::new(context),
cleanup_guard,
@ -45,8 +47,8 @@ impl<E: Application> Runner<E> {
/// Access the underlying run context.
#[must_use]
pub fn context(&self) -> Arc<RunContext<E>> {
Arc::clone(&self.context)
pub fn context(&self) -> &RunContext<E> {
self.context.as_ref()
}
pub async fn wait_network_ready(&self) -> Result<(), DynError> {
@ -71,7 +73,7 @@ impl<E: Application> Runner<E> {
where
Caps: Send + Sync,
{
let context = self.context();
let context = Arc::clone(&self.context);
let run_duration = scenario.duration();
let workloads = scenario.workloads().to_vec();
let expectation_count = scenario.expectations().len();
@ -190,7 +192,7 @@ impl<E: Application> Runner<E> {
}
fn settle_wait_duration(context: &RunContext<E>) -> Option<Duration> {
let has_node_control = context.controls_nodes();
let has_node_control = context.node_control().is_some();
let configured_wait = context.expectation_cooldown();
if configured_wait.is_zero() && !has_node_control {
@ -231,7 +233,7 @@ impl<E: Application> Runner<E> {
fn cooldown_duration(context: &RunContext<E>) -> Option<Duration> {
// Managed environments need a minimum cooldown so feed and expectations
// observe stabilized state.
let needs_stabilization = context.controls_nodes();
let needs_stabilization = context.cluster_control_profile().framework_owns_lifecycle();
let mut wait = context.expectation_cooldown();

View File

@ -1,3 +1,7 @@
mod model;
pub use model::{AttachSource, ExternalNodeSource, ScenarioSources, SourceReadinessPolicy};
pub(crate) use model::ScenarioSources;
#[doc(hidden)]
pub use model::{
ClusterControlProfile, ClusterMode, ExistingCluster, ExternalNodeSource, IntoExistingCluster,
};

View File

@ -1,6 +1,13 @@
/// Typed attach source for existing clusters.
use crate::scenario::DynError;
/// Typed descriptor for an existing cluster.
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum AttachSource {
pub struct ExistingCluster {
kind: ExistingClusterKind,
}
#[derive(Clone, Debug, Eq, PartialEq)]
enum ExistingClusterKind {
K8s {
namespace: Option<String>,
label_selector: String,
@ -11,52 +18,107 @@ pub enum AttachSource {
},
}
impl AttachSource {
impl ExistingCluster {
#[must_use]
pub fn k8s(label_selector: String) -> Self {
Self::K8s {
namespace: None,
label_selector,
pub fn for_k8s_selector(label_selector: String) -> Self {
Self {
kind: ExistingClusterKind::K8s {
namespace: None,
label_selector,
},
}
}
#[must_use]
pub fn with_namespace(self, namespace: String) -> Self {
match self {
Self::K8s { label_selector, .. } => Self::K8s {
pub fn for_k8s_selector_in_namespace(namespace: String, label_selector: String) -> Self {
Self {
kind: ExistingClusterKind::K8s {
namespace: Some(namespace),
label_selector,
},
other => other,
}
}
#[must_use]
pub fn compose(services: Vec<String>) -> Self {
Self::Compose {
project: None,
services,
pub fn for_compose_project(project: String) -> Self {
Self {
kind: ExistingClusterKind::Compose {
project: Some(project),
services: Vec::new(),
},
}
}
#[must_use]
pub fn with_project(self, project: String) -> Self {
match self {
Self::Compose { services, .. } => Self::Compose {
pub fn for_compose_services(project: String, services: Vec<String>) -> Self {
Self {
kind: ExistingClusterKind::Compose {
project: Some(project),
services,
},
other => other,
}
}
#[must_use]
#[doc(hidden)]
pub fn compose_project(&self) -> Option<&str> {
match &self.kind {
ExistingClusterKind::Compose { project, .. } => project.as_deref(),
ExistingClusterKind::K8s { .. } => None,
}
}
#[must_use]
#[doc(hidden)]
pub fn compose_services(&self) -> Option<&[String]> {
match &self.kind {
ExistingClusterKind::Compose { services, .. } => Some(services),
ExistingClusterKind::K8s { .. } => None,
}
}
#[must_use]
#[doc(hidden)]
pub fn k8s_namespace(&self) -> Option<&str> {
match &self.kind {
ExistingClusterKind::K8s { namespace, .. } => namespace.as_deref(),
ExistingClusterKind::Compose { .. } => None,
}
}
#[must_use]
#[doc(hidden)]
pub fn k8s_label_selector(&self) -> Option<&str> {
match &self.kind {
ExistingClusterKind::K8s { label_selector, .. } => Some(label_selector),
ExistingClusterKind::Compose { .. } => None,
}
}
}
/// Converts a value into an existing-cluster descriptor.
pub trait IntoExistingCluster {
fn into_existing_cluster(self) -> Result<ExistingCluster, DynError>;
}
impl IntoExistingCluster for ExistingCluster {
fn into_existing_cluster(self) -> Result<ExistingCluster, DynError> {
Ok(self)
}
}
impl IntoExistingCluster for &ExistingCluster {
fn into_existing_cluster(self) -> Result<ExistingCluster, DynError> {
Ok(self.clone())
}
}
/// Static external node endpoint that should be included in the runtime
/// inventory.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct ExternalNodeSource {
pub label: String,
pub endpoint: String,
label: String,
endpoint: String,
}
impl ExternalNodeSource {
@ -64,30 +126,72 @@ impl ExternalNodeSource {
pub fn new(label: String, endpoint: String) -> Self {
Self { label, endpoint }
}
#[must_use]
pub fn label(&self) -> &str {
&self.label
}
#[must_use]
pub fn endpoint(&self) -> &str {
&self.endpoint
}
}
/// Planned readiness strategy for mixed managed/attached/external sources.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Default)]
pub enum SourceReadinessPolicy {
/// Phase 1 default: require every known node to pass readiness checks.
#[default]
AllReady,
/// Optional relaxed policy for large/partial environments.
Quorum,
/// Future policy for per-source constraints (for example managed minimum
/// plus overall quorum).
SourceAware,
/// High-level source mode of a scenario cluster.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum ClusterMode {
Managed,
ExistingCluster,
ExternalOnly,
}
impl ClusterMode {
#[must_use]
pub const fn as_str(self) -> &'static str {
match self {
Self::Managed => "managed",
Self::ExistingCluster => "existing-cluster",
Self::ExternalOnly => "external-only",
}
}
}
/// High-level control/lifecycle expectation for a cluster surface.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum ClusterControlProfile {
FrameworkManaged,
ExistingClusterAttached,
ExternalUncontrolled,
ManualControlled,
}
impl ClusterControlProfile {
#[must_use]
pub const fn as_str(self) -> &'static str {
match self {
Self::FrameworkManaged => "framework-managed",
Self::ExistingClusterAttached => "existing-cluster-attached",
Self::ExternalUncontrolled => "external-uncontrolled",
Self::ManualControlled => "manual-controlled",
}
}
#[must_use]
pub const fn framework_owns_lifecycle(self) -> bool {
matches!(self, Self::FrameworkManaged)
}
}
/// Source model that makes invalid managed+attached combinations
/// unrepresentable by type.
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum ScenarioSources {
pub(crate) enum ScenarioSources {
Managed {
external: Vec<ExternalNodeSource>,
},
Attached {
attach: AttachSource,
attach: ExistingCluster,
external: Vec<ExternalNodeSource>,
},
ExternalOnly {
@ -105,45 +209,40 @@ impl Default for ScenarioSources {
impl ScenarioSources {
#[must_use]
pub const fn managed() -> Self {
Self::Managed {
external: Vec::new(),
}
}
#[must_use]
pub fn attached(attach: AttachSource) -> Self {
Self::Attached {
attach,
external: Vec::new(),
}
}
#[must_use]
pub fn external_only(external: Vec<ExternalNodeSource>) -> Self {
Self::ExternalOnly { external }
}
pub fn add_external_node(&mut self, node: ExternalNodeSource) {
match self {
pub(crate) fn with_external_node(mut self, node: ExternalNodeSource) -> Self {
match &mut self {
Self::Managed { external }
| Self::Attached { external, .. }
| Self::ExternalOnly { external } => external.push(node),
}
}
pub fn set_attach(&mut self, attach: AttachSource) {
let external = self.external_nodes().to_vec();
*self = Self::Attached { attach, external };
}
pub fn set_external_only(&mut self) {
let external = self.external_nodes().to_vec();
*self = Self::ExternalOnly { external };
self
}
#[must_use]
pub fn external_nodes(&self) -> &[ExternalNodeSource] {
pub(crate) fn with_attach(self, attach: ExistingCluster) -> Self {
let external = self.external_nodes().to_vec();
Self::Attached { attach, external }
}
#[must_use]
pub(crate) fn into_external_only(self) -> Self {
let external = self.external_nodes().to_vec();
Self::ExternalOnly { external }
}
#[must_use]
pub(crate) fn existing_cluster(&self) -> Option<&ExistingCluster> {
match self {
Self::Attached { attach, .. } => Some(attach),
Self::Managed { .. } | Self::ExternalOnly { .. } => None,
}
}
#[must_use]
pub(crate) fn external_nodes(&self) -> &[ExternalNodeSource] {
match self {
Self::Managed { external }
| Self::Attached { external, .. }
@ -152,17 +251,59 @@ impl ScenarioSources {
}
#[must_use]
pub const fn is_managed(&self) -> bool {
matches!(self, Self::Managed { .. })
pub(crate) const fn cluster_mode(&self) -> ClusterMode {
match self {
Self::Managed { .. } => ClusterMode::Managed,
Self::Attached { .. } => ClusterMode::ExistingCluster,
Self::ExternalOnly { .. } => ClusterMode::ExternalOnly,
}
}
#[must_use]
pub const fn is_attached(&self) -> bool {
matches!(self, Self::Attached { .. })
}
#[must_use]
pub const fn is_external_only(&self) -> bool {
matches!(self, Self::ExternalOnly { .. })
pub(crate) const fn control_profile(&self) -> ClusterControlProfile {
match self.cluster_mode() {
ClusterMode::Managed => ClusterControlProfile::FrameworkManaged,
ClusterMode::ExistingCluster => ClusterControlProfile::ExistingClusterAttached,
ClusterMode::ExternalOnly => ClusterControlProfile::ExternalUncontrolled,
}
}
}
#[cfg(test)]
mod tests {
use super::{ClusterControlProfile, ExistingCluster, ExternalNodeSource, ScenarioSources};
#[test]
fn managed_sources_map_to_framework_managed_control() {
assert_eq!(
ScenarioSources::default().control_profile(),
ClusterControlProfile::FrameworkManaged,
);
}
#[test]
fn attached_sources_map_to_existing_cluster_control() {
let sources = ScenarioSources::default()
.with_attach(ExistingCluster::for_compose_project("project".to_owned()));
assert_eq!(
sources.control_profile(),
ClusterControlProfile::ExistingClusterAttached,
);
}
#[test]
fn external_only_sources_map_to_uncontrolled_profile() {
let sources = ScenarioSources::default()
.with_external_node(ExternalNodeSource::new(
"node".to_owned(),
"http://node".to_owned(),
))
.into_external_only();
assert_eq!(
sources.control_profile(),
ClusterControlProfile::ExternalUncontrolled,
);
}
}

View File

@ -2,8 +2,8 @@ use std::marker::PhantomData;
use async_trait::async_trait;
use testing_framework_core::scenario::{
AttachProvider, AttachProviderError, AttachSource, AttachedNode, ClusterWaitHandle, DynError,
ExternalNodeSource, HttpReadinessRequirement, wait_http_readiness,
AttachProvider, AttachProviderError, AttachedNode, ClusterWaitHandle, DynError,
ExistingCluster, ExternalNodeSource, HttpReadinessRequirement, wait_http_readiness,
};
use url::Url;
@ -22,7 +22,7 @@ pub(super) struct ComposeAttachProvider<E: ComposeDeployEnv> {
pub(super) struct ComposeAttachedClusterWait<E: ComposeDeployEnv> {
host: String,
source: AttachSource,
source: ExistingCluster,
_env: PhantomData<E>,
}
@ -42,12 +42,14 @@ impl<E: ComposeDeployEnv> ComposeAttachProvider<E> {
}
impl<E: ComposeDeployEnv> ComposeAttachedClusterWait<E> {
pub(super) fn new(host: String, source: AttachSource) -> Self {
Self {
pub(super) fn try_new(host: String, source: &ExistingCluster) -> Result<Self, DynError> {
let _ = compose_wait_request(source)?;
Ok(Self {
host,
source,
source: source.clone(),
_env: PhantomData,
}
})
}
}
@ -60,7 +62,7 @@ struct ComposeAttachRequest<'a> {
impl<E: ComposeDeployEnv> AttachProvider<E> for ComposeAttachProvider<E> {
async fn discover(
&self,
source: &AttachSource,
source: &ExistingCluster,
) -> Result<Vec<AttachedNode<E>>, AttachProviderError> {
let request = compose_attach_request(source)?;
let services = resolve_services(request.project, request.services)
@ -85,16 +87,17 @@ fn to_discovery_error(source: DynError) -> AttachProviderError {
}
fn compose_attach_request(
source: &AttachSource,
source: &ExistingCluster,
) -> Result<ComposeAttachRequest<'_>, AttachProviderError> {
let AttachSource::Compose { project, services } = source else {
return Err(AttachProviderError::UnsupportedSource {
attach_source: source.clone(),
});
};
let services =
source
.compose_services()
.ok_or_else(|| AttachProviderError::UnsupportedSource {
attach_source: source.clone(),
})?;
let project = project
.as_deref()
let project = source
.compose_project()
.ok_or_else(|| AttachProviderError::Discovery {
source: ComposeAttachDiscoveryError::MissingProjectName.into(),
})?;
@ -172,14 +175,13 @@ impl<E: ComposeDeployEnv> ClusterWaitHandle<E> for ComposeAttachedClusterWait<E>
}
}
fn compose_wait_request(source: &AttachSource) -> Result<ComposeAttachRequest<'_>, DynError> {
let AttachSource::Compose { project, services } = source else {
return Err("compose cluster wait requires a compose attach source".into());
};
let project = project
.as_deref()
.ok_or(ComposeAttachDiscoveryError::MissingProjectName)?;
fn compose_wait_request(source: &ExistingCluster) -> Result<ComposeAttachRequest<'_>, DynError> {
let project = source.compose_project().ok_or_else(|| {
DynError::from("compose cluster wait requires a compose existing-cluster descriptor")
})?;
let services = source.compose_services().ok_or_else(|| {
DynError::from("compose cluster wait requires a compose existing-cluster descriptor")
})?;
Ok(ComposeAttachRequest { project, services })
}

View File

@ -9,8 +9,8 @@ use std::marker::PhantomData;
use async_trait::async_trait;
use testing_framework_core::scenario::{
AttachSource, CleanupGuard, Deployer, DynError, FeedHandle, ObservabilityCapabilityProvider,
RequiresNodeControl, Runner, Scenario,
CleanupGuard, Deployer, DynError, ExistingCluster, FeedHandle, IntoExistingCluster,
ObservabilityCapabilityProvider, RequiresNodeControl, Runner, Scenario,
};
use crate::{env::ComposeDeployEnv, errors::ComposeRunnerError, lifecycle::cleanup::RunnerCleanup};
@ -36,6 +36,22 @@ enum ComposeMetadataError {
}
impl ComposeDeploymentMetadata {
#[must_use]
pub fn for_project(project_name: String) -> Self {
Self {
project_name: Some(project_name),
}
}
#[must_use]
pub fn from_existing_cluster(cluster: Option<&ExistingCluster>) -> Self {
Self {
project_name: cluster
.and_then(ExistingCluster::compose_project)
.map(ToOwned::to_owned),
}
}
/// Returns project name when deployment is bound to a specific compose
/// project.
#[must_use]
@ -43,26 +59,56 @@ impl ComposeDeploymentMetadata {
self.project_name.as_deref()
}
/// Builds an attach source for the same compose project using deployer
/// discovery to resolve services.
pub fn attach_source(&self) -> Result<AttachSource, DynError> {
/// Builds an existing-cluster descriptor for the same compose project
/// using deployer discovery to resolve services.
pub fn existing_cluster(&self) -> Result<ExistingCluster, DynError> {
let project_name = self
.project_name()
.ok_or(ComposeMetadataError::MissingProjectName)?;
Ok(AttachSource::compose(Vec::new()).with_project(project_name.to_owned()))
Ok(ExistingCluster::for_compose_project(
project_name.to_owned(),
))
}
/// Builds an attach source for the same compose project.
/// Builds an existing-cluster descriptor for the same compose project.
pub fn existing_cluster_for_services(
&self,
services: Vec<String>,
) -> Result<ExistingCluster, DynError> {
let project_name = self
.project_name()
.ok_or(ComposeMetadataError::MissingProjectName)?;
Ok(ExistingCluster::for_compose_services(
project_name.to_owned(),
services,
))
}
#[doc(hidden)]
pub fn attach_source(&self) -> Result<ExistingCluster, DynError> {
self.existing_cluster()
}
#[doc(hidden)]
pub fn attach_source_for_services(
&self,
services: Vec<String>,
) -> Result<AttachSource, DynError> {
let project_name = self
.project_name()
.ok_or(ComposeMetadataError::MissingProjectName)?;
) -> Result<ExistingCluster, DynError> {
self.existing_cluster_for_services(services)
}
}
Ok(AttachSource::compose(services).with_project(project_name.to_owned()))
impl IntoExistingCluster for ComposeDeploymentMetadata {
fn into_existing_cluster(self) -> Result<ExistingCluster, DynError> {
self.existing_cluster()
}
}
impl IntoExistingCluster for &ComposeDeploymentMetadata {
fn into_existing_cluster(self) -> Result<ExistingCluster, DynError> {
self.existing_cluster()
}
}

View File

@ -3,11 +3,11 @@ use std::{env, sync::Arc, time::Duration};
use reqwest::Url;
use testing_framework_core::{
scenario::{
ApplicationExternalProvider, AttachSource, CleanupGuard, ClusterWaitHandle,
DeploymentPolicy, FeedHandle, FeedRuntime, HttpReadinessRequirement, Metrics, NodeClients,
NodeControlHandle, ObservabilityCapabilityProvider, ObservabilityInputs,
RequiresNodeControl, RunContext, Runner, Scenario, ScenarioSources,
SourceOrchestrationPlan, SourceProviders, StaticManagedProvider,
Application, ApplicationExternalProvider, CleanupGuard, ClusterControlProfile, ClusterMode,
ClusterWaitHandle, DeploymentPolicy, DynError, ExistingCluster, FeedHandle, FeedRuntime,
HttpReadinessRequirement, Metrics, NodeClients, NodeControlHandle,
ObservabilityCapabilityProvider, ObservabilityInputs, RequiresNodeControl, Runner,
RuntimeAssembly, Scenario, SourceOrchestrationPlan, SourceProviders, StaticManagedProvider,
build_source_orchestration_plan, orchestrate_sources_with_providers,
},
topology::DeploymentDescriptor,
@ -64,6 +64,12 @@ impl<E: ComposeDeployEnv> DeploymentOrchestrator<E> {
where
Caps: RequiresNodeControl + ObservabilityCapabilityProvider + Send + Sync,
{
validate_supported_cluster_mode(scenario).map_err(|source| {
ComposeRunnerError::SourceOrchestration {
source: source.into(),
}
})?;
// Source planning is currently resolved here before deployer-specific setup.
let source_plan = build_source_orchestration_plan(scenario).map_err(|source| {
ComposeRunnerError::SourceOrchestration {
@ -71,11 +77,11 @@ impl<E: ComposeDeployEnv> DeploymentOrchestrator<E> {
}
})?;
if scenario.sources().is_attached() {
if matches!(scenario.cluster_mode(), ClusterMode::ExistingCluster) {
return self
.deploy_attached_only::<Caps>(scenario, source_plan)
.deploy_existing_cluster::<Caps>(scenario, source_plan)
.await
.map(|runner| (runner, attached_metadata(scenario)));
.map(|runner| (runner, existing_cluster_metadata(scenario)));
}
let deployment = scenario.deployment();
@ -138,7 +144,7 @@ impl<E: ComposeDeployEnv> DeploymentOrchestrator<E> {
))
}
async fn deploy_attached_only<Caps>(
async fn deploy_existing_cluster<Caps>(
&self,
scenario: &Scenario<E, Caps>,
source_plan: SourceOrchestrationPlan,
@ -158,11 +164,12 @@ impl<E: ComposeDeployEnv> DeploymentOrchestrator<E> {
let node_control = self.attached_node_control::<Caps>(scenario)?;
let cluster_wait = self.attached_cluster_wait(scenario)?;
let (feed, feed_task) = spawn_block_feed_with_retry::<E>(&node_clients).await?;
let context = build_run_context(
let assembly = build_runtime_assembly(
scenario.deployment().clone(),
node_clients,
scenario.duration(),
scenario.expectation_cooldown(),
scenario.cluster_control_profile(),
observability.telemetry_handle()?,
feed,
node_control,
@ -170,7 +177,7 @@ impl<E: ComposeDeployEnv> DeploymentOrchestrator<E> {
);
let cleanup_guard: Box<dyn CleanupGuard> = Box::new(feed_task);
Ok(Runner::new(context, Some(cleanup_guard)))
Ok(assembly.build_runner(Some(cleanup_guard)))
}
fn source_providers(&self, managed_clients: Vec<E::NodeClient>) -> SourceProviders<E> {
@ -214,31 +221,15 @@ impl<E: ComposeDeployEnv> DeploymentOrchestrator<E> {
return Ok(None);
}
let ScenarioSources::Attached { attach, .. } = scenario.sources() else {
return Err(ComposeRunnerError::InternalInvariant {
message: "attached node control requested outside attached source mode",
});
};
let attach = scenario
.existing_cluster()
.ok_or(ComposeRunnerError::InternalInvariant {
message: "existing-cluster node control requested outside existing-cluster mode",
})?;
let node_control = ComposeAttachedNodeControl::try_from_existing_cluster(attach)
.map_err(|source| ComposeRunnerError::SourceOrchestration { source })?;
let AttachSource::Compose { project, .. } = attach else {
return Err(ComposeRunnerError::InternalInvariant {
message: "compose deployer requires compose attach source for node control",
});
};
let Some(project_name) = project
.as_ref()
.map(|value| value.trim())
.filter(|value| !value.is_empty())
else {
return Err(ComposeRunnerError::InternalInvariant {
message: "attached compose mode requires explicit project name for node control",
});
};
Ok(Some(Arc::new(ComposeAttachedNodeControl {
project_name: project_name.to_owned(),
}) as Arc<dyn NodeControlHandle<E>>))
Ok(Some(Arc::new(node_control) as Arc<dyn NodeControlHandle<E>>))
}
fn attached_cluster_wait<Caps>(
@ -248,16 +239,15 @@ impl<E: ComposeDeployEnv> DeploymentOrchestrator<E> {
where
Caps: Send + Sync,
{
let ScenarioSources::Attached { attach, .. } = scenario.sources() else {
return Err(ComposeRunnerError::InternalInvariant {
message: "compose attached cluster wait requested outside attached source mode",
});
};
let attach = scenario
.existing_cluster()
.ok_or(ComposeRunnerError::InternalInvariant {
message: "compose cluster wait requested outside existing-cluster mode",
})?;
let cluster_wait = ComposeAttachedClusterWait::<E>::try_new(compose_runner_host(), attach)
.map_err(|source| ComposeRunnerError::SourceOrchestration { source })?;
Ok(Arc::new(ComposeAttachedClusterWait::<E>::new(
compose_runner_host(),
attach.clone(),
)))
Ok(Arc::new(cluster_wait))
}
async fn build_runner<Caps>(
@ -274,7 +264,8 @@ impl<E: ComposeDeployEnv> DeploymentOrchestrator<E> {
{
let telemetry = observability.telemetry_handle()?;
let node_control = self.maybe_node_control::<Caps>(&prepared.environment);
let cluster_wait = self.managed_cluster_wait(project_name);
let cluster_wait =
self.managed_cluster_wait(ComposeDeploymentMetadata::for_project(project_name))?;
log_observability_endpoints(&observability);
log_profiling_urls(&deployed.host, &deployed.host_ports);
@ -285,6 +276,7 @@ impl<E: ComposeDeployEnv> DeploymentOrchestrator<E> {
descriptors: prepared.descriptors.clone(),
duration: scenario.duration(),
expectation_cooldown: scenario.expectation_cooldown(),
cluster_control_profile: scenario.cluster_control_profile(),
telemetry,
environment: &mut prepared.environment,
node_control,
@ -300,7 +292,7 @@ impl<E: ComposeDeployEnv> DeploymentOrchestrator<E> {
"compose runtime prepared"
);
Ok(Runner::new(runtime.context, Some(cleanup_guard)))
Ok(runtime.assembly.build_runner(Some(cleanup_guard)))
}
fn maybe_node_control<Caps>(
@ -318,11 +310,18 @@ impl<E: ComposeDeployEnv> DeploymentOrchestrator<E> {
})
}
fn managed_cluster_wait(&self, project_name: String) -> Arc<dyn ClusterWaitHandle<E>> {
Arc::new(ComposeAttachedClusterWait::<E>::new(
compose_runner_host(),
AttachSource::compose(Vec::new()).with_project(project_name),
))
fn managed_cluster_wait(
&self,
metadata: ComposeDeploymentMetadata,
) -> Result<Arc<dyn ClusterWaitHandle<E>>, ComposeRunnerError> {
let existing_cluster = metadata
.existing_cluster()
.map_err(|source| ComposeRunnerError::SourceOrchestration { source })?;
let cluster_wait =
ComposeAttachedClusterWait::<E>::try_new(compose_runner_host(), &existing_cluster)
.map_err(|source| ComposeRunnerError::SourceOrchestration { source })?;
Ok(Arc::new(cluster_wait))
}
fn log_deploy_start<Caps>(
@ -373,20 +372,61 @@ impl<E: ComposeDeployEnv> DeploymentOrchestrator<E> {
}
}
fn attached_metadata<E, Caps>(scenario: &Scenario<E, Caps>) -> ComposeDeploymentMetadata
fn validate_supported_cluster_mode<E: Application, Caps>(
scenario: &Scenario<E, Caps>,
) -> Result<(), DynError> {
if !matches!(scenario.cluster_mode(), ClusterMode::ExistingCluster) {
return Ok(());
}
let cluster = scenario
.existing_cluster()
.ok_or_else(|| DynError::from("existing-cluster mode requires an existing cluster"))?;
ensure_compose_existing_cluster(cluster)
}
fn ensure_compose_existing_cluster(cluster: &ExistingCluster) -> Result<(), DynError> {
if cluster.compose_project().is_some() && cluster.compose_services().is_some() {
return Ok(());
}
Err("compose deployer requires a compose existing-cluster descriptor".into())
}
#[cfg(test)]
mod tests {
use testing_framework_core::scenario::ExistingCluster;
use super::ensure_compose_existing_cluster;
#[test]
fn compose_cluster_validator_accepts_compose_descriptor() {
ensure_compose_existing_cluster(&ExistingCluster::for_compose_project(
"project".to_owned(),
))
.expect("compose descriptor should be accepted");
}
#[test]
fn compose_cluster_validator_rejects_k8s_descriptor() {
let error = ensure_compose_existing_cluster(&ExistingCluster::for_k8s_selector(
"app=node".to_owned(),
))
.expect_err("k8s descriptor should be rejected");
assert_eq!(
error.to_string(),
"compose deployer requires a compose existing-cluster descriptor"
);
}
}
fn existing_cluster_metadata<E, Caps>(scenario: &Scenario<E, Caps>) -> ComposeDeploymentMetadata
where
E: ComposeDeployEnv,
Caps: Send + Sync,
{
let project_name = match scenario.sources() {
ScenarioSources::Attached {
attach: AttachSource::Compose { project, .. },
..
} => project.clone(),
_ => None,
};
ComposeDeploymentMetadata { project_name }
ComposeDeploymentMetadata::from_existing_cluster(scenario.existing_cluster())
}
struct DeployedNodes<E: ComposeDeployEnv> {
@ -397,7 +437,7 @@ struct DeployedNodes<E: ComposeDeployEnv> {
}
struct ComposeRuntime<E: ComposeDeployEnv> {
context: RunContext<E>,
assembly: RuntimeAssembly<E>,
feed_task: FeedHandle,
}
@ -406,6 +446,7 @@ struct RuntimeBuildInput<'a, E: ComposeDeployEnv> {
descriptors: E::Deployment,
duration: Duration,
expectation_cooldown: Duration,
cluster_control_profile: ClusterControlProfile,
telemetry: Metrics,
environment: &'a mut StackEnvironment,
node_control: Option<Arc<dyn NodeControlHandle<E>>>,
@ -426,18 +467,22 @@ async fn build_compose_runtime<E: ComposeDeployEnv>(
.start_block_feed(&node_clients, input.environment)
.await?;
let context = build_run_context(
let assembly = build_runtime_assembly(
input.descriptors,
node_clients,
input.duration,
input.expectation_cooldown,
input.cluster_control_profile,
input.telemetry,
feed,
input.node_control,
input.cluster_wait,
);
Ok(ComposeRuntime { context, feed_task })
Ok(ComposeRuntime {
assembly,
feed_task,
})
}
async fn deploy_nodes<E: ComposeDeployEnv>(
@ -470,26 +515,33 @@ async fn deploy_nodes<E: ComposeDeployEnv>(
})
}
fn build_run_context<E: ComposeDeployEnv>(
fn build_runtime_assembly<E: ComposeDeployEnv>(
descriptors: E::Deployment,
node_clients: NodeClients<E>,
run_duration: Duration,
expectation_cooldown: Duration,
cluster_control_profile: ClusterControlProfile,
telemetry: Metrics,
feed: <E::FeedRuntime as FeedRuntime>::Feed,
node_control: Option<Arc<dyn NodeControlHandle<E>>>,
cluster_wait: Arc<dyn ClusterWaitHandle<E>>,
) -> RunContext<E> {
RunContext::new(
) -> RuntimeAssembly<E> {
let mut assembly = RuntimeAssembly::new(
descriptors,
node_clients,
run_duration,
expectation_cooldown,
cluster_control_profile,
telemetry,
feed,
node_control,
)
.with_cluster_wait(cluster_wait)
.with_cluster_wait(cluster_wait);
if let Some(node_control) = node_control {
assembly = assembly.with_node_control(node_control);
}
assembly
}
fn resolve_observability_inputs<E, Caps>(

View File

@ -5,7 +5,7 @@ use std::{
use testing_framework_core::{
adjust_timeout,
scenario::{Application, DynError, NodeControlHandle},
scenario::{Application, DynError, ExistingCluster, NodeControlHandle},
};
use tokio::{process::Command, time::timeout};
use tracing::info;
@ -155,11 +155,27 @@ impl<E: Application> NodeControlHandle<E> for ComposeNodeControl {
}
}
/// Node control handle for compose attached mode.
/// Node control handle for compose existing-cluster mode.
pub struct ComposeAttachedNodeControl {
pub(crate) project_name: String,
}
impl ComposeAttachedNodeControl {
pub fn try_from_existing_cluster(source: &ExistingCluster) -> Result<Self, DynError> {
let Some(project_name) = source
.compose_project()
.map(str::trim)
.filter(|value| !value.is_empty())
else {
return Err("attached compose node control requires explicit project name".into());
};
Ok(Self {
project_name: project_name.to_owned(),
})
}
}
#[async_trait::async_trait]
impl<E: Application> NodeControlHandle<E> for ComposeAttachedNodeControl {
async fn restart_node(&self, name: &str) -> Result<(), DynError> {

View File

@ -7,8 +7,8 @@ use kube::{
api::{ListParams, ObjectList},
};
use testing_framework_core::scenario::{
AttachProvider, AttachProviderError, AttachSource, AttachedNode, ClusterWaitHandle, DynError,
ExternalNodeSource, HttpReadinessRequirement, wait_http_readiness,
AttachProvider, AttachProviderError, AttachedNode, ClusterWaitHandle, DynError,
ExistingCluster, ExternalNodeSource, HttpReadinessRequirement, wait_http_readiness,
};
use url::Url;
@ -37,7 +37,7 @@ pub(super) struct K8sAttachProvider<E: K8sDeployEnv> {
pub(super) struct K8sAttachedClusterWait<E: K8sDeployEnv> {
client: Client,
source: AttachSource,
source: ExistingCluster,
_env: PhantomData<E>,
}
@ -56,12 +56,14 @@ impl<E: K8sDeployEnv> K8sAttachProvider<E> {
}
impl<E: K8sDeployEnv> K8sAttachedClusterWait<E> {
pub(super) fn new(client: Client, source: AttachSource) -> Self {
Self {
pub(super) fn try_new(client: Client, source: &ExistingCluster) -> Result<Self, DynError> {
let _ = k8s_wait_request(source)?;
Ok(Self {
client,
source,
source: source.clone(),
_env: PhantomData,
}
})
}
}
@ -69,7 +71,7 @@ impl<E: K8sDeployEnv> K8sAttachedClusterWait<E> {
impl<E: K8sDeployEnv> AttachProvider<E> for K8sAttachProvider<E> {
async fn discover(
&self,
source: &AttachSource,
source: &ExistingCluster,
) -> Result<Vec<AttachedNode<E>>, AttachProviderError> {
let request = k8s_attach_request(source)?;
let services = discover_services(&self.client, request.namespace, request.label_selector)
@ -90,12 +92,10 @@ fn to_discovery_error(source: DynError) -> AttachProviderError {
AttachProviderError::Discovery { source }
}
fn k8s_attach_request(source: &AttachSource) -> Result<K8sAttachRequest<'_>, AttachProviderError> {
let AttachSource::K8s {
namespace,
label_selector,
} = source
else {
fn k8s_attach_request(
source: &ExistingCluster,
) -> Result<K8sAttachRequest<'_>, AttachProviderError> {
let Some(label_selector) = source.k8s_label_selector() else {
return Err(AttachProviderError::UnsupportedSource {
attach_source: source.clone(),
});
@ -108,7 +108,7 @@ fn k8s_attach_request(source: &AttachSource) -> Result<K8sAttachRequest<'_>, Att
}
Ok(K8sAttachRequest {
namespace: namespace.as_deref().unwrap_or("default"),
namespace: source.k8s_namespace().unwrap_or("default"),
label_selector,
})
}
@ -246,21 +246,17 @@ impl<E: K8sDeployEnv> ClusterWaitHandle<E> for K8sAttachedClusterWait<E> {
}
}
fn k8s_wait_request(source: &AttachSource) -> Result<K8sAttachRequest<'_>, DynError> {
let AttachSource::K8s {
namespace,
label_selector,
} = source
else {
return Err("k8s cluster wait requires a k8s attach source".into());
};
fn k8s_wait_request(source: &ExistingCluster) -> Result<K8sAttachRequest<'_>, DynError> {
let label_selector = source.k8s_label_selector().ok_or_else(|| {
DynError::from("k8s cluster wait requires a k8s existing-cluster descriptor")
})?;
if label_selector.trim().is_empty() {
return Err(K8sAttachDiscoveryError::EmptyLabelSelector.into());
}
Ok(K8sAttachRequest {
namespace: namespace.as_deref().unwrap_or("default"),
namespace: source.k8s_namespace().unwrap_or("default"),
label_selector,
})
}

View File

@ -2,7 +2,7 @@ mod attach_provider;
mod orchestrator;
pub use orchestrator::{K8sDeployer, K8sRunnerError};
use testing_framework_core::scenario::{AttachSource, DynError};
use testing_framework_core::scenario::{DynError, ExistingCluster, IntoExistingCluster};
/// Kubernetes deployment metadata returned by k8s-specific deployment APIs.
#[derive(Clone, Debug, Eq, PartialEq)]
@ -22,6 +22,18 @@ enum K8sMetadataError {
}
impl K8sDeploymentMetadata {
#[must_use]
pub fn from_existing_cluster(cluster: Option<&ExistingCluster>) -> Self {
Self {
namespace: cluster
.and_then(ExistingCluster::k8s_namespace)
.map(ToOwned::to_owned),
label_selector: cluster
.and_then(ExistingCluster::k8s_label_selector)
.map(ToOwned::to_owned),
}
}
/// Returns namespace when deployment is bound to a specific namespace.
#[must_use]
pub fn namespace(&self) -> Option<&str> {
@ -34,13 +46,33 @@ impl K8sDeploymentMetadata {
self.label_selector.as_deref()
}
/// Builds an attach source for the same k8s deployment scope.
pub fn attach_source(&self) -> Result<AttachSource, DynError> {
/// Builds an existing-cluster descriptor for the same k8s deployment scope.
pub fn existing_cluster(&self) -> Result<ExistingCluster, DynError> {
let namespace = self.namespace().ok_or(K8sMetadataError::MissingNamespace)?;
let label_selector = self
.label_selector()
.ok_or(K8sMetadataError::MissingLabelSelector)?;
Ok(AttachSource::k8s(label_selector.to_owned()).with_namespace(namespace.to_owned()))
Ok(ExistingCluster::for_k8s_selector_in_namespace(
namespace.to_owned(),
label_selector.to_owned(),
))
}
#[doc(hidden)]
pub fn attach_source(&self) -> Result<ExistingCluster, DynError> {
self.existing_cluster()
}
}
impl IntoExistingCluster for K8sDeploymentMetadata {
fn into_existing_cluster(self) -> Result<ExistingCluster, DynError> {
self.existing_cluster()
}
}
impl IntoExistingCluster for &K8sDeploymentMetadata {
fn into_existing_cluster(self) -> Result<ExistingCluster, DynError> {
self.existing_cluster()
}
}

View File

@ -5,11 +5,11 @@ use kube::Client;
use reqwest::Url;
use testing_framework_core::{
scenario::{
Application, ApplicationExternalProvider, AttachSource, CleanupGuard, ClusterWaitHandle,
Deployer, DynError, FeedHandle, FeedRuntime, HttpReadinessRequirement, Metrics,
MetricsError, NodeClients, ObservabilityCapabilityProvider, ObservabilityInputs,
RequiresNodeControl, RunContext, Runner, Scenario, ScenarioSources,
SourceOrchestrationPlan, SourceProviders, StaticManagedProvider,
Application, ApplicationExternalProvider, CleanupGuard, ClusterControlProfile, ClusterMode,
ClusterWaitHandle, Deployer, DynError, ExistingCluster, FeedHandle, FeedRuntime,
HttpReadinessRequirement, Metrics, MetricsError, NodeClients,
ObservabilityCapabilityProvider, ObservabilityInputs, RequiresNodeControl, Runner,
RuntimeAssembly, Scenario, SourceOrchestrationPlan, SourceProviders, StaticManagedProvider,
build_source_orchestration_plan, orchestrate_sources_with_providers,
},
topology::DeploymentDescriptor,
@ -171,6 +171,9 @@ where
E: K8sDeployEnv,
Caps: ObservabilityCapabilityProvider + Send + Sync,
{
validate_supported_cluster_mode(scenario)
.map_err(|source| K8sRunnerError::SourceOrchestration { source })?;
// Source planning is currently resolved here before deployer-specific setup.
let source_plan = build_source_orchestration_plan(scenario).map_err(|source| {
K8sRunnerError::SourceOrchestration {
@ -180,9 +183,10 @@ where
let observability = resolve_observability_inputs(scenario.capabilities())?;
if scenario.sources().is_attached() {
let runner = deploy_attached_only::<E, Caps>(scenario, source_plan, observability).await?;
return Ok((runner, attached_metadata(scenario)));
if matches!(scenario.cluster_mode(), ClusterMode::ExistingCluster) {
let runner =
deploy_existing_cluster::<E, Caps>(scenario, source_plan, observability).await?;
return Ok((runner, existing_cluster_metadata(scenario)));
}
let deployment = build_k8s_deployment::<E, Caps>(deployer, scenario, &observability).await?;
@ -205,15 +209,15 @@ where
runtime.node_clients = resolve_node_clients(&source_plan, source_providers).await?;
ensure_non_empty_node_clients(&runtime.node_clients)?;
let parts = build_runner_parts(scenario, deployment.node_count, runtime, cluster_wait);
log_configured_observability(&observability);
maybe_print_endpoints::<E>(&observability, &parts.node_clients);
maybe_print_endpoints::<E>(&observability, &runtime.node_clients);
let parts = build_runner_parts(scenario, deployment.node_count, runtime, cluster_wait);
let runner = finalize_runner::<E>(&mut cluster, parts)?;
Ok((runner, metadata))
}
async fn deploy_attached_only<E, Caps>(
async fn deploy_existing_cluster<E, Caps>(
scenario: &Scenario<E, Caps>,
source_plan: SourceOrchestrationPlan,
observability: ObservabilityInputs,
@ -231,42 +235,26 @@ where
let telemetry = observability.telemetry_handle()?;
let (feed, feed_task) = spawn_block_feed_with::<E>(&node_clients).await?;
let cluster_wait = attached_cluster_wait::<E, Caps>(scenario, client)?;
let context = RunContext::new(
let context = RuntimeAssembly::new(
scenario.deployment().clone(),
node_clients,
scenario.duration(),
scenario.expectation_cooldown(),
scenario.cluster_control_profile(),
telemetry,
feed,
None,
)
.with_cluster_wait(cluster_wait);
Ok(Runner::new(context, Some(Box::new(feed_task))))
Ok(context.build_runner(Some(Box::new(feed_task))))
}
fn attached_metadata<E, Caps>(scenario: &Scenario<E, Caps>) -> K8sDeploymentMetadata
fn existing_cluster_metadata<E, Caps>(scenario: &Scenario<E, Caps>) -> K8sDeploymentMetadata
where
E: K8sDeployEnv,
Caps: Send + Sync,
{
match scenario.sources() {
ScenarioSources::Attached {
attach:
AttachSource::K8s {
namespace,
label_selector,
},
..
} => K8sDeploymentMetadata {
namespace: namespace.clone(),
label_selector: Some(label_selector.clone()),
},
_ => K8sDeploymentMetadata {
namespace: None,
label_selector: None,
},
}
K8sDeploymentMetadata::from_existing_cluster(scenario.existing_cluster())
}
fn attached_cluster_wait<E, Caps>(
@ -277,16 +265,63 @@ where
E: K8sDeployEnv,
Caps: Send + Sync,
{
let ScenarioSources::Attached { attach, .. } = scenario.sources() else {
return Err(K8sRunnerError::InternalInvariant {
message: "k8s attached cluster wait requested outside attached source mode".to_owned(),
});
};
let attach = scenario
.existing_cluster()
.ok_or_else(|| K8sRunnerError::InternalInvariant {
message: "k8s cluster wait requested outside existing-cluster mode".to_owned(),
})?;
let cluster_wait = K8sAttachedClusterWait::<E>::try_new(client, attach)
.map_err(|source| K8sRunnerError::SourceOrchestration { source })?;
Ok(Arc::new(K8sAttachedClusterWait::<E>::new(
client,
attach.clone(),
)))
Ok(Arc::new(cluster_wait))
}
fn validate_supported_cluster_mode<E: Application, Caps>(
scenario: &Scenario<E, Caps>,
) -> Result<(), DynError> {
if !matches!(scenario.cluster_mode(), ClusterMode::ExistingCluster) {
return Ok(());
}
let cluster = scenario
.existing_cluster()
.ok_or_else(|| DynError::from("existing-cluster mode requires an existing cluster"))?;
ensure_k8s_existing_cluster(cluster)
}
fn ensure_k8s_existing_cluster(cluster: &ExistingCluster) -> Result<(), DynError> {
if cluster.k8s_label_selector().is_some() {
return Ok(());
}
Err("k8s deployer requires a k8s existing-cluster descriptor".into())
}
#[cfg(test)]
mod tests {
use testing_framework_core::scenario::ExistingCluster;
use super::ensure_k8s_existing_cluster;
#[test]
fn k8s_cluster_validator_accepts_k8s_descriptor() {
ensure_k8s_existing_cluster(&ExistingCluster::for_k8s_selector("app=node".to_owned()))
.expect("k8s descriptor should be accepted");
}
#[test]
fn k8s_cluster_validator_rejects_compose_descriptor() {
let error = ensure_k8s_existing_cluster(&ExistingCluster::for_compose_project(
"project".to_owned(),
))
.expect_err("compose descriptor should be rejected");
assert_eq!(
error.to_string(),
"k8s deployer requires a k8s existing-cluster descriptor"
);
}
}
fn managed_cluster_wait<E: K8sDeployEnv>(
@ -295,13 +330,12 @@ fn managed_cluster_wait<E: K8sDeployEnv>(
) -> Result<Arc<dyn ClusterWaitHandle<E>>, K8sRunnerError> {
let client = client_from_cluster(cluster)?;
let attach_source = metadata
.attach_source()
.existing_cluster()
.map_err(|source| K8sRunnerError::SourceOrchestration { source })?;
let cluster_wait = K8sAttachedClusterWait::<E>::try_new(client, &attach_source)
.map_err(|source| K8sRunnerError::SourceOrchestration { source })?;
Ok(Arc::new(K8sAttachedClusterWait::<E>::new(
client,
attach_source,
)))
Ok(Arc::new(cluster_wait))
}
fn client_from_cluster(cluster: &Option<ClusterEnvironment>) -> Result<Client, K8sRunnerError> {
@ -517,15 +551,19 @@ fn build_runner_parts<E: K8sDeployEnv, Caps>(
cluster_wait: Arc<dyn ClusterWaitHandle<E>>,
) -> K8sRunnerParts<E> {
K8sRunnerParts {
descriptors: scenario.deployment().clone(),
node_clients: runtime.node_clients,
duration: scenario.duration(),
expectation_cooldown: scenario.expectation_cooldown(),
telemetry: runtime.telemetry,
feed: runtime.feed,
assembly: build_k8s_runtime_assembly(
scenario.deployment().clone(),
runtime.node_clients,
scenario.duration(),
scenario.expectation_cooldown(),
scenario.cluster_control_profile(),
runtime.telemetry,
runtime.feed,
cluster_wait,
),
feed_task: runtime.feed_task,
node_count,
cluster_wait,
duration_secs: scenario.duration().as_secs(),
}
}
@ -613,15 +651,10 @@ fn maybe_print_endpoints<E: K8sDeployEnv>(
}
struct K8sRunnerParts<E: K8sDeployEnv> {
descriptors: E::Deployment,
node_clients: NodeClients<E>,
duration: Duration,
expectation_cooldown: Duration,
telemetry: Metrics,
feed: Feed<E>,
assembly: RuntimeAssembly<E>,
feed_task: FeedHandle,
node_count: usize,
cluster_wait: Arc<dyn ClusterWaitHandle<E>>,
duration_secs: u64,
}
fn finalize_runner<E: K8sDeployEnv>(
@ -632,36 +665,21 @@ fn finalize_runner<E: K8sDeployEnv>(
let (cleanup, port_forwards) = environment.into_cleanup()?;
let K8sRunnerParts {
descriptors,
node_clients,
duration,
expectation_cooldown,
telemetry,
feed,
assembly,
feed_task,
node_count,
cluster_wait,
duration_secs,
} = parts;
let duration_secs = duration.as_secs();
let cleanup_guard: Box<dyn CleanupGuard> =
Box::new(K8sCleanupGuard::new(cleanup, feed_task, port_forwards));
let context = build_k8s_run_context(
descriptors,
node_clients,
duration,
expectation_cooldown,
telemetry,
feed,
cluster_wait,
);
info!(
nodes = node_count,
duration_secs, "k8s deployment ready; handing control to scenario runner"
);
Ok(Runner::new(context, Some(cleanup_guard)))
Ok(assembly.build_runner(Some(cleanup_guard)))
}
fn take_ready_cluster(
@ -674,23 +692,24 @@ fn take_ready_cluster(
})
}
fn build_k8s_run_context<E: K8sDeployEnv>(
fn build_k8s_runtime_assembly<E: K8sDeployEnv>(
descriptors: E::Deployment,
node_clients: NodeClients<E>,
duration: Duration,
expectation_cooldown: Duration,
cluster_control_profile: ClusterControlProfile,
telemetry: Metrics,
feed: Feed<E>,
cluster_wait: Arc<dyn ClusterWaitHandle<E>>,
) -> RunContext<E> {
RunContext::new(
) -> RuntimeAssembly<E> {
RuntimeAssembly::new(
descriptors,
node_clients,
duration,
expectation_cooldown,
cluster_control_profile,
telemetry,
feed,
None,
)
.with_cluster_wait(cluster_wait)
}

View File

@ -10,10 +10,10 @@ use std::{
use async_trait::async_trait;
use testing_framework_core::{
scenario::{
Application, CleanupGuard, Deployer, DeploymentPolicy, DynError, FeedHandle, FeedRuntime,
HttpReadinessRequirement, Metrics, NodeClients, NodeControlCapability, NodeControlHandle,
RetryPolicy, RunContext, Runner, Scenario, ScenarioError, SourceOrchestrationPlan,
build_source_orchestration_plan, spawn_feed,
Application, CleanupGuard, ClusterControlProfile, ClusterMode, Deployer, DeploymentPolicy,
DynError, FeedHandle, FeedRuntime, HttpReadinessRequirement, Metrics, NodeClients,
NodeControlCapability, NodeControlHandle, RetryPolicy, Runner, RuntimeAssembly, Scenario,
ScenarioError, SourceOrchestrationPlan, build_source_orchestration_plan, spawn_feed,
},
topology::DeploymentDescriptor,
};
@ -187,6 +187,8 @@ impl<E: LocalDeployerEnv> ProcessDeployer<E> {
&self,
scenario: &Scenario<E, ()>,
) -> Result<Runner<E>, ProcessDeployerError> {
validate_supported_cluster_mode(scenario)?;
// Source planning is currently resolved here before node spawn/runtime setup.
let source_plan = build_source_orchestration_plan(scenario).map_err(|source| {
ProcessDeployerError::SourceOrchestration {
@ -211,6 +213,7 @@ impl<E: LocalDeployerEnv> ProcessDeployer<E> {
node_clients,
scenario.duration(),
scenario.expectation_cooldown(),
scenario.cluster_control_profile(),
None,
)
.await?;
@ -218,13 +221,15 @@ impl<E: LocalDeployerEnv> ProcessDeployer<E> {
let cleanup_guard: Box<dyn CleanupGuard> =
Box::new(LocalProcessGuard::<E>::new(nodes, runtime.feed_task));
Ok(Runner::new(runtime.context, Some(cleanup_guard)))
Ok(runtime.assembly.build_runner(Some(cleanup_guard)))
}
async fn deploy_with_node_control(
&self,
scenario: &Scenario<E, NodeControlCapability>,
) -> Result<Runner<E>, ProcessDeployerError> {
validate_supported_cluster_mode(scenario)?;
// Source planning is currently resolved here before node spawn/runtime setup.
let source_plan = build_source_orchestration_plan(scenario).map_err(|source| {
ProcessDeployerError::SourceOrchestration {
@ -248,14 +253,14 @@ impl<E: LocalDeployerEnv> ProcessDeployer<E> {
node_clients,
scenario.duration(),
scenario.expectation_cooldown(),
scenario.cluster_control_profile(),
Some(node_control),
)
.await?;
Ok(Runner::new(
runtime.context,
Some(Box::new(runtime.feed_task)),
))
Ok(runtime
.assembly
.build_runner(Some(Box::new(runtime.feed_task))))
}
fn node_control_from(
@ -312,6 +317,22 @@ impl<E: LocalDeployerEnv> ProcessDeployer<E> {
}
}
fn validate_supported_cluster_mode<E: Application, Caps>(
scenario: &Scenario<E, Caps>,
) -> Result<(), ProcessDeployerError> {
ensure_local_cluster_mode(scenario.cluster_mode())
}
fn ensure_local_cluster_mode(mode: ClusterMode) -> Result<(), ProcessDeployerError> {
if matches!(mode, ClusterMode::ExistingCluster) {
return Err(ProcessDeployerError::SourceOrchestration {
source: DynError::from("local deployer does not support existing-cluster mode"),
});
}
Ok(())
}
fn merge_source_clients_for_local<E: LocalDeployerEnv>(
source_plan: &SourceOrchestrationPlan,
node_clients: NodeClients<E>,
@ -339,6 +360,29 @@ fn build_retry_execution_config(
(retry_policy, execution)
}
#[cfg(test)]
mod tests {
use testing_framework_core::scenario::ClusterMode;
use super::ensure_local_cluster_mode;
#[test]
fn local_cluster_validator_accepts_managed_mode() {
ensure_local_cluster_mode(ClusterMode::Managed).expect("managed mode should be accepted");
}
#[test]
fn local_cluster_validator_rejects_existing_cluster_mode() {
let error = ensure_local_cluster_mode(ClusterMode::ExistingCluster)
.expect_err("existing-cluster mode should be rejected");
assert_eq!(
error.to_string(),
"source orchestration failed: local deployer does not support existing-cluster mode"
);
}
}
async fn run_retry_attempt<E: LocalDeployerEnv>(
descriptors: &E::Deployment,
execution: RetryExecutionConfig,
@ -475,7 +519,7 @@ fn log_local_deploy_start(node_count: usize, policy: DeploymentPolicy, has_node_
}
struct RuntimeContext<E: Application> {
context: RunContext<E>,
assembly: RuntimeAssembly<E>,
feed_task: FeedHandle,
}
@ -484,6 +528,7 @@ async fn run_context_for<E: Application>(
node_clients: NodeClients<E>,
duration: Duration,
expectation_cooldown: Duration,
cluster_control_profile: ClusterControlProfile,
node_control: Option<Arc<dyn NodeControlHandle<E>>>,
) -> Result<RuntimeContext<E>, ProcessDeployerError> {
if node_clients.is_empty() {
@ -491,15 +536,21 @@ async fn run_context_for<E: Application>(
}
let (feed, feed_task) = spawn_feed_with::<E>(&node_clients).await?;
let context = RunContext::new(
let mut assembly = RuntimeAssembly::new(
descriptors,
node_clients,
duration,
expectation_cooldown,
cluster_control_profile,
Metrics::empty(),
feed,
node_control,
);
if let Some(node_control) = node_control {
assembly = assembly.with_node_control(node_control);
}
Ok(RuntimeContext { context, feed_task })
Ok(RuntimeContext {
assembly,
feed_task,
})
}

View File

@ -35,8 +35,8 @@ pub fn build_external_client<E: LocalDeployerEnv>(
}
fn resolve_api_socket(source: &ExternalNodeSource) -> Result<std::net::SocketAddr, DynError> {
let source_label = source.label.clone();
let endpoint = source.endpoint.trim();
let source_label = source.label().to_string();
let endpoint = source.endpoint().trim();
if endpoint.is_empty() {
return Err(ExternalClientBuildError::EmptyEndpoint {
label: source_label,

View File

@ -1,8 +1,8 @@
use testing_framework_core::{
manual::ManualClusterHandle,
scenario::{
DynError, ExternalNodeSource, NodeClients, NodeControlHandle, ReadinessError,
StartNodeOptions, StartedNode,
ClusterWaitHandle, DynError, ExternalNodeSource, NodeClients, NodeControlHandle,
ReadinessError, StartNodeOptions, StartedNode,
},
};
use thiserror::Error;
@ -157,19 +157,11 @@ impl<E: LocalDeployerEnv> NodeControlHandle<E> for ManualCluster<E> {
}
#[async_trait::async_trait]
impl<E: LocalDeployerEnv> ManualClusterHandle<E> for ManualCluster<E> {
async fn start_node_with(
&self,
name: &str,
options: StartNodeOptions<E>,
) -> Result<StartedNode<E>, DynError> {
self.nodes
.start_node_with(name, options)
.await
.map_err(|err| err.into())
}
impl<E: LocalDeployerEnv> ClusterWaitHandle<E> for ManualCluster<E> {
async fn wait_network_ready(&self) -> Result<(), DynError> {
self.wait_network_ready().await.map_err(|err| err.into())
}
}
#[async_trait::async_trait]
impl<E: LocalDeployerEnv> ManualClusterHandle<E> for ManualCluster<E> {}