From 83c13b771c0002dc5d2910941fe3fbc1ded9d315 Mon Sep 17 00:00:00 2001 From: andrussal Date: Sun, 7 Dec 2025 07:33:23 +0100 Subject: [PATCH] Fix k8s runner keeping scenario alive during checks --- examples/src/bin/k8s_runner.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/examples/src/bin/k8s_runner.rs b/examples/src/bin/k8s_runner.rs index 2a0c2a5..de4d18e 100644 --- a/examples/src/bin/k8s_runner.rs +++ b/examples/src/bin/k8s_runner.rs @@ -75,10 +75,11 @@ async fn run_k8s_case( let validator_clients = runner.context().node_clients().validator_clients().to_vec(); info!("running scenario"); - let _handle = runner + // Keep the handle alive until after we query consensus info, so port-forwards + // and services stay up while we inspect nodes. + let handle = runner .run(&mut plan) .await - .map(|_| ()) .map_err(|err| format!("k8s scenario failed: {err}"))?; for (idx, client) in validator_clients.iter().enumerate() { @@ -95,6 +96,9 @@ async fn run_k8s_case( } } + // Explicitly drop after checks, allowing cleanup to proceed. + drop(handle); + Ok(()) }