mirror of
https://github.com/logos-blockchain/logos-blockchain-testing.git
synced 2026-03-31 16:23:08 +00:00
196 lines
5.3 KiB
Rust
196 lines
5.3 KiB
Rust
mod attach_provider;
|
|
pub mod clients;
|
|
pub mod orchestrator;
|
|
pub mod ports;
|
|
pub mod readiness;
|
|
pub mod setup;
|
|
|
|
use std::marker::PhantomData;
|
|
|
|
use async_trait::async_trait;
|
|
use testing_framework_core::scenario::{
|
|
CleanupGuard, Deployer, DynError, ExistingCluster, FeedHandle, ObservabilityCapabilityProvider,
|
|
RequiresNodeControl, Runner, Scenario,
|
|
};
|
|
|
|
use crate::{env::ComposeDeployEnv, errors::ComposeRunnerError, lifecycle::cleanup::RunnerCleanup};
|
|
|
|
/// Docker Compose-based deployer for test scenarios.
|
|
#[derive(Clone, Copy)]
|
|
pub struct ComposeDeployer<E: ComposeDeployEnv> {
|
|
readiness_checks: bool,
|
|
_env: PhantomData<E>,
|
|
}
|
|
|
|
/// Compose deployment metadata returned by compose-specific deployment APIs.
|
|
#[derive(Clone, Debug, Eq, PartialEq)]
|
|
pub struct ComposeDeploymentMetadata {
|
|
/// Docker Compose project name used for this deployment when available.
|
|
pub project_name: Option<String>,
|
|
}
|
|
|
|
#[derive(Debug, thiserror::Error)]
|
|
enum ComposeMetadataError {
|
|
#[error("compose deployment metadata has no project name")]
|
|
MissingProjectName,
|
|
}
|
|
|
|
impl ComposeDeploymentMetadata {
|
|
#[must_use]
|
|
pub fn for_project(project_name: String) -> Self {
|
|
Self {
|
|
project_name: Some(project_name),
|
|
}
|
|
}
|
|
|
|
#[must_use]
|
|
pub fn from_existing_cluster(cluster: Option<&ExistingCluster>) -> Self {
|
|
Self {
|
|
project_name: cluster
|
|
.and_then(ExistingCluster::compose_project)
|
|
.map(ToOwned::to_owned),
|
|
}
|
|
}
|
|
|
|
/// Returns project name when deployment is bound to a specific compose
|
|
/// project.
|
|
#[must_use]
|
|
pub fn project_name(&self) -> Option<&str> {
|
|
self.project_name.as_deref()
|
|
}
|
|
|
|
/// Builds an existing-cluster descriptor for the same compose project
|
|
/// using deployer discovery to resolve services.
|
|
pub fn existing_cluster(&self) -> Result<ExistingCluster, DynError> {
|
|
let project_name = self
|
|
.project_name()
|
|
.ok_or(ComposeMetadataError::MissingProjectName)?;
|
|
|
|
Ok(ExistingCluster::compose_in_project(
|
|
Vec::new(),
|
|
project_name.to_owned(),
|
|
))
|
|
}
|
|
|
|
/// Builds an existing-cluster descriptor for the same compose project.
|
|
pub fn existing_cluster_for_services(
|
|
&self,
|
|
services: Vec<String>,
|
|
) -> Result<ExistingCluster, DynError> {
|
|
let project_name = self
|
|
.project_name()
|
|
.ok_or(ComposeMetadataError::MissingProjectName)?;
|
|
|
|
Ok(ExistingCluster::compose_in_project(
|
|
services,
|
|
project_name.to_owned(),
|
|
))
|
|
}
|
|
|
|
#[doc(hidden)]
|
|
pub fn attach_source(&self) -> Result<ExistingCluster, DynError> {
|
|
self.existing_cluster()
|
|
}
|
|
|
|
#[doc(hidden)]
|
|
pub fn attach_source_for_services(
|
|
&self,
|
|
services: Vec<String>,
|
|
) -> Result<ExistingCluster, DynError> {
|
|
self.existing_cluster_for_services(services)
|
|
}
|
|
}
|
|
|
|
impl<E: ComposeDeployEnv> Default for ComposeDeployer<E> {
|
|
fn default() -> Self {
|
|
Self::new()
|
|
}
|
|
}
|
|
|
|
impl<E: ComposeDeployEnv> ComposeDeployer<E> {
|
|
#[must_use]
|
|
pub const fn new() -> Self {
|
|
Self {
|
|
readiness_checks: true,
|
|
_env: PhantomData,
|
|
}
|
|
}
|
|
|
|
#[must_use]
|
|
pub const fn with_readiness(mut self, enabled: bool) -> Self {
|
|
self.readiness_checks = enabled;
|
|
self
|
|
}
|
|
|
|
/// Deploy and return compose-specific metadata alongside the generic
|
|
/// runner.
|
|
pub async fn deploy_with_metadata<Caps>(
|
|
&self,
|
|
scenario: &Scenario<E, Caps>,
|
|
) -> Result<(Runner<E>, ComposeDeploymentMetadata), ComposeRunnerError>
|
|
where
|
|
Caps: RequiresNodeControl + ObservabilityCapabilityProvider + Send + Sync,
|
|
{
|
|
let deployer = Self {
|
|
readiness_checks: self.readiness_checks,
|
|
_env: PhantomData,
|
|
};
|
|
|
|
orchestrator::DeploymentOrchestrator::new(deployer)
|
|
.deploy_with_metadata(scenario)
|
|
.await
|
|
}
|
|
}
|
|
|
|
#[async_trait]
|
|
impl<E, Caps> Deployer<E, Caps> for ComposeDeployer<E>
|
|
where
|
|
Caps: RequiresNodeControl + ObservabilityCapabilityProvider + Send + Sync,
|
|
E: ComposeDeployEnv,
|
|
{
|
|
type Error = ComposeRunnerError;
|
|
|
|
async fn deploy(&self, scenario: &Scenario<E, Caps>) -> Result<Runner<E>, Self::Error> {
|
|
let deployer = Self {
|
|
readiness_checks: self.readiness_checks,
|
|
_env: PhantomData,
|
|
};
|
|
orchestrator::DeploymentOrchestrator::new(deployer)
|
|
.deploy(scenario)
|
|
.await
|
|
}
|
|
}
|
|
|
|
pub(super) struct ComposeCleanupGuard {
|
|
environment: RunnerCleanup,
|
|
block_feed: Option<FeedHandle>,
|
|
}
|
|
|
|
impl ComposeCleanupGuard {
|
|
const fn new(environment: RunnerCleanup, block_feed: FeedHandle) -> Self {
|
|
Self {
|
|
environment,
|
|
block_feed: Some(block_feed),
|
|
}
|
|
}
|
|
}
|
|
|
|
impl CleanupGuard for ComposeCleanupGuard {
|
|
fn cleanup(mut self: Box<Self>) {
|
|
if let Some(block_feed) = self.block_feed.take() {
|
|
CleanupGuard::cleanup(Box::new(block_feed));
|
|
}
|
|
CleanupGuard::cleanup(Box::new(self.environment));
|
|
}
|
|
}
|
|
|
|
pub(super) fn make_cleanup_guard(
|
|
environment: RunnerCleanup,
|
|
block_feed: FeedHandle,
|
|
) -> Box<dyn CleanupGuard> {
|
|
Box::new(ComposeCleanupGuard::new(environment, block_feed))
|
|
}
|
|
|
|
#[cfg(test)]
|
|
mod tests {}
|