Merge branch 'master' into feature/public-testnet-deploying

This commit is contained in:
benbierens 2023-12-11 08:30:25 +01:00
commit 5dc918287c
No known key found for this signature in database
GPG Key ID: FE44815D96D0A1AA
26 changed files with 289 additions and 43 deletions

View File

@ -12,6 +12,10 @@ on:
description: Branch with tests (master)
required: false
type: string
codexdockerimage:
description: Codex Docker image (codexstorage/nim-codex:latest-dist-tests)
required: false
type: string
nameprefix:
description: Resources prefix (c-tests)
required: false
@ -26,6 +30,7 @@ on:
type: string
tests_cleanup:
description: Runner tests cleanup
required: false
type: boolean
default: true
workflow_call:
@ -38,6 +43,10 @@ on:
description: Branch with tests (master)
required: false
type: string
codexdockerimage:
description: Codex Docker image (codexstorage/nim-codex:latest-dist-tests)
required: false
type: string
nameprefix:
description: Resources prefix (c-tests)
required: false
@ -54,11 +63,18 @@ on:
description: Runner tests cleanup
required: false
type: boolean
default: true
workflow_source:
description: Workflow source
required: false
type: string
default: ''
env:
SOURCE: ${{ format('{0}/{1}', github.server_url, github.repository) }}
BRANCH: ${{ github.ref_name }}
CODEXDOCKERIMAGE: codexstorage/nim-codex:latest-dist-tests
NAMEPREFIX: c-tests
NAMESPACE: default
TESTS_TARGET_DURATION: 2d
@ -71,11 +87,13 @@ env:
jobs:
run_tests:
name: Run Tests
name: Run Continuous Tests ${{ inputs.tests_filter }}
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
with:
repository: ${{ inputs.workflow_source }}
- name: Variables
run: |
@ -84,8 +102,10 @@ jobs:
echo "TESTID=$(git rev-parse --short HEAD)" >> $GITHUB_ENV
[[ -n "${{ inputs.source }}" ]] && echo "SOURCE=${{ inputs.source }}" >>"$GITHUB_ENV" || echo "SOURCE=${{ env.SOURCE }}" >>"$GITHUB_ENV"
[[ -n "${{ inputs.branch }}" ]] && echo "BRANCH=${{ inputs.branch }}" >>"$GITHUB_ENV" || echo "BRANCH=${{ env.BRANCH }}" >>"$GITHUB_ENV"
[[ -n "${{ inputs.nameprefix }}" ]] && echo "NAMEPREFIX=${{ inputs.nameprefix }}-${RUNID}" >>"$GITHUB_ENV" || echo "NAMEPREFIX=${{ env.NAMEPREFIX }}-${RUNID}" >>"$GITHUB_ENV"
[[ -n "${{ inputs.nameprefix }}" ]] && echo "DEPLOYMENT_NAMESPACE=${{ inputs.nameprefix }}-${RUNID}" >>"$GITHUB_ENV" || echo "DEPLOYMENT_NAMESPACE=${{ env.NAMEPREFIX }}-${RUNID}" >>"$GITHUB_ENV"
[[ -n "${{ inputs.codexdockerimage }}" ]] && echo "CODEXDOCKERIMAGE=${{ inputs.codexdockerimage }}" >>"$GITHUB_ENV" || echo "CODEXDOCKERIMAGE=${{ env.CODEXDOCKERIMAGE }}" >>"$GITHUB_ENV"
[[ -n "${{ inputs.nameprefix }}" ]] && NAMEPREFIX="`awk '{ print tolower($0) }' <<< ${{ inputs.nameprefix }}`" || NAMEPREFIX="`awk '{ print tolower($0) }' <<< ${{ env.NAMEPREFIX }}`"
echo "NAMEPREFIX=${NAMEPREFIX}-${RUNID}" >>"$GITHUB_ENV"
echo "DEPLOYMENT_NAMESPACE=${NAMEPREFIX}-${RUNID}" >>"$GITHUB_ENV"
[[ -n "${{ inputs.namespace }}" ]] && echo "NAMESPACE=${{ inputs.namespace }}" >>"$GITHUB_ENV" || echo "NAMESPACE=${{ env.NAMESPACE }}" >>"$GITHUB_ENV"
[[ -n "${{ inputs.tests_target_duration }}" ]] && echo "TESTS_TARGET_DURATION=${{ inputs.tests_target_duration }}" >>"$GITHUB_ENV" || echo "TESTS_TARGET_DURATION=${{ env.TESTS_TARGET_DURATION }}" >>"$GITHUB_ENV"
[[ -n "${{ inputs.tests_filter }}" ]] && echo "TESTS_FILTER=${{ inputs.tests_filter }}" >>"$GITHUB_ENV" || echo "TESTS_FILTERS=${{ env.TESTS_FILTERS }}" >>"$GITHUB_ENV"
@ -115,6 +135,7 @@ jobs:
echo "Runner namespace: ${{ env.NAMESPACE }}"
echo "----"
echo "Tests runid: ${{ env.RUNID }}"
echo "Tests codexdockerimage: ${{ env.CODEXDOCKERIMAGE }}"
echo "Tests namespace: ${{ env.DEPLOYMENT_NAMESPACE }}"
echo "Tests duration: ${{ env.TESTS_TARGET_DURATION }}"
echo "Tests filter: ${{ env.TESTS_FILTER }}"

View File

@ -3,7 +3,7 @@ name: Report - HoldMyBeerTest
on:
schedule:
- cron: '30 */49 * * *'
- cron: '30 * */2 * *'
workflow_dispatch:
jobs:

View File

@ -3,7 +3,7 @@ name: Report - PeersTest
on:
schedule:
- cron: '30 */49 * * *'
- cron: '30 * */2 * *'
workflow_dispatch:
jobs:

View File

@ -37,6 +37,11 @@ namespace FileUtils
return $"'{Filename}'{sizePostfix}";
}
public ByteSize GetFilesize()
{
return new ByteSize(GetFileSize());
}
private void AssertEqual(TrackedFile? actual)
{
if (actual == null) FrameworkAssert.Fail("TestFile is null.");

View File

@ -360,6 +360,7 @@ namespace KubernetesWorkflow
},
Spec = new V1PodSpec
{
PriorityClassName = GetPriorityClassName(containerRecipes),
Affinity = CreatePodAffinity(containerRecipes),
NodeSelector = CreateNodeSelector(location),
Containers = CreateDeploymentContainers(containerRecipes),
@ -417,7 +418,7 @@ namespace KubernetesWorkflow
{
new V1NodeSelectorRequirement
{
Key = "workload-type",
Key = "allow-tests-pods",
OperatorProperty = "NotIn",
Values = notIns
}
@ -435,6 +436,15 @@ namespace KubernetesWorkflow
return l.NodeLabel;
}
private string GetPriorityClassName(ContainerRecipe[] containerRecipes)
{
if (containerRecipes.Any(c => c.SetCriticalPriority))
{
return "system-node-critical";
}
return null!;
}
private IDictionary<string, string> GetSelector(ContainerRecipe[] containerRecipes)
{
return containerRecipes.First().PodLabels.GetLabels();
@ -689,7 +699,8 @@ namespace KubernetesWorkflow
private V1Pod GetPodForDeployment(RunningDeployment deployment)
{
return Time.Retry(() => GetPodForDeplomentInternal(deployment),
maxRetries: 2,
// We will wait up to 1 minute, k8s might be moving pods around.
maxRetries: 6,
retryTime: TimeSpan.FromSeconds(10),
description: "Find pod by label for deployment.");
}

View File

@ -0,0 +1,7 @@
namespace KubernetesWorkflow
{
public static class PublicIpService
{
public static string Address { get; } = "ip.codex.storage";
}
}

View File

@ -2,13 +2,14 @@
{
public class ContainerRecipe
{
public ContainerRecipe(int number, string? nameOverride, string image, ContainerResources resources, SchedulingAffinity schedulingAffinity, Port[] exposedPorts, Port[] internalPorts, EnvVar[] envVars, PodLabels podLabels, PodAnnotations podAnnotations, VolumeMount[] volumes, ContainerAdditionals additionals)
public ContainerRecipe(int number, string? nameOverride, string image, ContainerResources resources, SchedulingAffinity schedulingAffinity, bool setCriticalPriority, Port[] exposedPorts, Port[] internalPorts, EnvVar[] envVars, PodLabels podLabels, PodAnnotations podAnnotations, VolumeMount[] volumes, ContainerAdditionals additionals)
{
Number = number;
NameOverride = nameOverride;
Image = image;
Resources = resources;
SchedulingAffinity = schedulingAffinity;
SetCriticalPriority = setCriticalPriority;
ExposedPorts = exposedPorts;
InternalPorts = internalPorts;
EnvVars = envVars;
@ -34,6 +35,7 @@
public string? NameOverride { get; }
public ContainerResources Resources { get; }
public SchedulingAffinity SchedulingAffinity { get; }
public bool SetCriticalPriority { get; }
public string Image { get; }
public Port[] ExposedPorts { get; }
public Port[] InternalPorts { get; }

View File

@ -14,6 +14,7 @@ namespace KubernetesWorkflow.Recipe
private RecipeComponentFactory factory = null!;
private ContainerResources resources = new ContainerResources();
private SchedulingAffinity schedulingAffinity = new SchedulingAffinity();
private bool setCriticalPriority;
public ContainerRecipe CreateRecipe(int index, int containerNumber, RecipeComponentFactory factory, StartupConfig config)
{
@ -23,7 +24,7 @@ namespace KubernetesWorkflow.Recipe
Initialize(config);
var recipe = new ContainerRecipe(containerNumber, config.NameOverride, Image, resources, schedulingAffinity,
var recipe = new ContainerRecipe(containerNumber, config.NameOverride, Image, resources, schedulingAffinity, setCriticalPriority,
exposedPorts.ToArray(),
internalPorts.ToArray(),
envVars.ToArray(),
@ -42,6 +43,7 @@ namespace KubernetesWorkflow.Recipe
this.factory = null!;
resources = new ContainerResources();
schedulingAffinity = new SchedulingAffinity();
setCriticalPriority = false;
return recipe;
}
@ -128,6 +130,11 @@ namespace KubernetesWorkflow.Recipe
schedulingAffinity = new SchedulingAffinity(notIn);
}
protected void SetSystemCriticalPriority()
{
setCriticalPriority = true;
}
// Disabled following a possible bug in the k8s cluster that will throttle containers much more than is
// called for if they have resource limits defined.
//protected void SetResourceLimits(int milliCPUs, ByteSize memory)

View File

@ -16,19 +16,19 @@ namespace Logging
this.debug = debug;
}
public static void Measure(ILog log, string name, Action action, bool debug = false)
public static TimeSpan Measure(ILog log, string name, Action action, bool debug = false)
{
var sw = Begin(log, name, debug);
action();
sw.End();
return sw.End();
}
public static T Measure<T>(ILog log, string name, Func<T> action, bool debug = false)
public static StopwatchResult<T> Measure<T>(ILog log, string name, Func<T> action, bool debug = false)
{
var sw = Begin(log, name, debug);
var result = action();
sw.End();
return result;
var duration = sw.End();
return new StopwatchResult<T>(result, duration);
}
public static Stopwatch Begin(ILog log)
@ -68,4 +68,16 @@ namespace Logging
return duration;
}
}
public class StopwatchResult<T>
{
public StopwatchResult(T value, TimeSpan duration)
{
Value = value;
Duration = duration;
}
public T Value { get; }
public TimeSpan Duration { get; }
}
}

View File

@ -46,6 +46,18 @@
}
}
public class BytesPerSecond : ByteSize
{
public BytesPerSecond(long sizeInBytes) : base(sizeInBytes)
{
}
public override string ToString()
{
return base.ToString() + "/s";
}
}
public static class ByteSizeIntExtensions
{
private const long Kilo = 1024;

View File

@ -7,7 +7,7 @@ namespace CodexContractsPlugin
{
public class CodexContractsContainerRecipe : ContainerRecipeFactory
{
public static string DockerImage { get; } = "codexstorage/codex-contracts-eth:sha-1854dfb-dist-tests";
public static string DockerImage { get; } = "codexstorage/codex-contracts-eth:latest-dist-tests";
public const string MarketplaceAddressFilename = "/hardhat/deployments/codexdisttestnetwork/Marketplace.json";
public const string MarketplaceArtifactFilename = "/hardhat/artifacts/contracts/Marketplace.sol/Marketplace.json";
@ -21,7 +21,7 @@ namespace CodexContractsPlugin
var address = config.GethNode.StartResult.Container.GetAddress(new NullLog(), GethContainerRecipe.HttpPortTag);
SetSchedulingAffinity(notIn: "tests-runners");
SetSchedulingAffinity(notIn: "false");
AddEnvVar("DISTTEST_NETWORK_URL", address.ToString());
AddEnvVar("HARDHAT_NETWORK", "codexdisttestnetwork");

View File

@ -13,7 +13,7 @@ namespace CodexDiscordBotPlugin
{
var config = startupConfig.Get<DiscordBotStartupConfig>();
SetSchedulingAffinity(notIn: "tests-runners");
SetSchedulingAffinity(notIn: "false");
AddEnvVar("TOKEN", config.Token);
AddEnvVar("SERVERNAME", config.ServerName);

View File

@ -68,12 +68,12 @@ namespace CodexPlugin
public Stream DownloadFile(string contentId)
{
return Http().HttpGetStream("data/" + contentId);
return Http().HttpGetStream("data/" + contentId + "/network");
}
public CodexLocalDataResponse[] LocalFiles()
{
return Http().HttpGetJson<CodexLocalDataResponse[]>("local");
return Http().HttpGetJson<CodexLocalDataResponse[]>("data");
}
public CodexSalesAvailabilityResponse SalesAvailability(CodexSalesAvailabilityRequest request)

View File

@ -29,7 +29,8 @@ namespace CodexPlugin
SetResourcesRequest(milliCPUs: 100, memory: 100.MB());
//SetResourceLimits(milliCPUs: 4000, memory: 12.GB());
SetSchedulingAffinity(notIn: "tests-runners");
SetSchedulingAffinity(notIn: "false");
SetSystemCriticalPriority();
var config = startupConfig.Get<CodexStartupConfig>();

View File

@ -25,6 +25,7 @@ namespace CodexPlugin
IMarketplaceAccess Marketplace { get; }
CrashWatcher CrashWatcher { get; }
PodInfo GetPodInfo();
ITransferSpeeds TransferSpeeds { get; }
void Stop();
}
@ -34,6 +35,7 @@ namespace CodexPlugin
private const string UploadFailedMessage = "Unable to store block";
private readonly IPluginTools tools;
private readonly EthAddress? ethAddress;
private readonly TransferSpeeds transferSpeeds;
public CodexNode(IPluginTools tools, CodexAccess codexAccess, CodexNodeGroup group, IMarketplaceAccess marketplaceAccess, EthAddress? ethAddress)
{
@ -43,6 +45,7 @@ namespace CodexPlugin
Group = group;
Marketplace = marketplaceAccess;
Version = new CodexDebugVersionResponse();
transferSpeeds = new TransferSpeeds();
}
public RunningContainer Container { get { return CodexAccess.Container; } }
@ -51,6 +54,7 @@ namespace CodexPlugin
public CodexNodeGroup Group { get; }
public IMarketplaceAccess Marketplace { get; }
public CodexDebugVersionResponse Version { get; private set; }
public ITransferSpeeds TransferSpeeds { get => transferSpeeds; }
public IMetricsScrapeTarget MetricsScrapeTarget
{
get
@ -101,11 +105,14 @@ namespace CodexPlugin
var logMessage = $"Uploading file {file.Describe()}...";
Log(logMessage);
var response = Stopwatch.Measure(tools.GetLog(), logMessage, () =>
var measurement = Stopwatch.Measure(tools.GetLog(), logMessage, () =>
{
return CodexAccess.UploadFile(fileStream);
});
var response = measurement.Value;
transferSpeeds.AddUploadSample(file.GetFilesize(), measurement.Duration);
if (string.IsNullOrEmpty(response)) FrameworkAssert.Fail("Received empty response.");
if (response.StartsWith(UploadFailedMessage)) FrameworkAssert.Fail("Node failed to store block.");
@ -118,7 +125,8 @@ namespace CodexPlugin
var logMessage = $"Downloading for contentId: '{contentId.Id}'...";
Log(logMessage);
var file = tools.GetFileManager().CreateEmptyFile(fileLabel);
Stopwatch.Measure(tools.GetLog(), logMessage, () => DownloadToFile(contentId.Id, file));
var measurement = Stopwatch.Measure(tools.GetLog(), logMessage, () => DownloadToFile(contentId.Id, file));
transferSpeeds.AddDownloadSample(file.GetFilesize(), measurement);
Log($"Downloaded file {file.Describe()} to '{file.Filename}'.");
return file;
}

View File

@ -10,6 +10,7 @@ namespace CodexPlugin
{
string MakeStorageAvailable(ByteSize size, TestToken minPriceForTotalSpace, TestToken maxCollateral, TimeSpan maxDuration);
StoragePurchaseContract RequestStorage(ContentId contentId, TestToken pricePerSlotPerSecond, TestToken requiredCollateral, uint minRequiredNumberOfNodes, int proofProbability, TimeSpan duration);
StoragePurchaseContract RequestStorage(ContentId contentId, TestToken pricePerSlotPerSecond, TestToken requiredCollateral, uint minRequiredNumberOfNodes, int proofProbability, TimeSpan duration, TimeSpan expiry);
}
public class MarketplaceAccess : IMarketplaceAccess
@ -25,13 +26,20 @@ namespace CodexPlugin
public StoragePurchaseContract RequestStorage(ContentId contentId, TestToken pricePerSlotPerSecond, TestToken requiredCollateral, uint minRequiredNumberOfNodes, int proofProbability, TimeSpan duration)
{
return RequestStorage(contentId, pricePerSlotPerSecond, requiredCollateral, minRequiredNumberOfNodes, proofProbability, duration, duration / 2);
}
public StoragePurchaseContract RequestStorage(ContentId contentId, TestToken pricePerSlotPerSecond, TestToken requiredCollateral, uint minRequiredNumberOfNodes, int proofProbability, TimeSpan duration, TimeSpan expiry)
{
var expireUtc = DateTimeOffset.UtcNow.ToUnixTimeSeconds() + expiry.TotalSeconds;
var request = new CodexSalesRequestStorageRequest
{
duration = ToDecInt(duration.TotalSeconds),
proofProbability = ToDecInt(proofProbability),
reward = ToDecInt(pricePerSlotPerSecond),
collateral = ToDecInt(requiredCollateral),
expiry = null,
expiry = ToDecInt(expireUtc),
nodes = minRequiredNumberOfNodes,
tolerance = null,
};
@ -41,11 +49,14 @@ namespace CodexPlugin
$"requiredCollateral: {requiredCollateral}, " +
$"minRequiredNumberOfNodes: {minRequiredNumberOfNodes}, " +
$"proofProbability: {proofProbability}, " +
$"expiry: {Time.FormatDuration(expiry)}, " +
$"duration: {Time.FormatDuration(duration)})");
var response = codexAccess.RequestStorage(request, contentId.Id);
if (response == "Purchasing not available")
if (response == "Purchasing not available" ||
response == "Expiry required" ||
response == "Expiry needs to be in future")
{
throw new InvalidOperationException(response);
}
@ -104,6 +115,12 @@ namespace CodexPlugin
return null!;
}
public StoragePurchaseContract RequestStorage(ContentId contentId, TestToken pricePerSlotPerSecond, TestToken requiredCollateral, uint minRequiredNumberOfNodes, int proofProbability, TimeSpan duration, TimeSpan expiry)
{
Unavailable();
throw new NotImplementedException();
}
public string MakeStorageAvailable(ByteSize size, TestToken minPricePerBytePerSecond, TestToken maxCollateral, TimeSpan duration)
{
Unavailable();

View File

@ -0,0 +1,67 @@
using Utils;
namespace CodexPlugin
{
public interface ITransferSpeeds
{
BytesPerSecond? GetUploadSpeed();
BytesPerSecond? GetDownloadSpeed();
}
public class TransferSpeeds : ITransferSpeeds
{
private readonly List<BytesPerSecond> uploads = new List<BytesPerSecond>();
private readonly List<BytesPerSecond> downloads = new List<BytesPerSecond>();
public void AddUploadSample(ByteSize bytes, TimeSpan duration)
{
uploads.Add(Convert(bytes, duration));
}
public void AddDownloadSample(ByteSize bytes, TimeSpan duration)
{
downloads.Add(Convert(bytes, duration));
}
public BytesPerSecond? GetUploadSpeed()
{
if (!uploads.Any()) return null;
return uploads.Average();
}
public BytesPerSecond? GetDownloadSpeed()
{
if (!downloads.Any()) return null;
return downloads.Average();
}
private static BytesPerSecond Convert(ByteSize size, TimeSpan duration)
{
double bytes = size.SizeInBytes;
double seconds = duration.TotalSeconds;
return new BytesPerSecond(System.Convert.ToInt64(Math.Round(bytes / seconds)));
}
}
public static class ListExtensions
{
public static BytesPerSecond Average(this List<BytesPerSecond> list)
{
double sum = list.Sum(i => i.SizeInBytes);
double num = list.Count;
return new BytesPerSecond(Convert.ToInt64(Math.Round(sum / num)));
}
public static BytesPerSecond? OptionalAverage(this List<BytesPerSecond?>? list)
{
if (list == null || !list.Any() || !list.Any(i => i != null)) return null;
var values = list.Where(i => i != null).Cast<BytesPerSecond>().ToArray();
double sum = values.Sum(i => i.SizeInBytes);
double num = values.Length;
return new BytesPerSecond(Convert.ToInt64(Math.Round(sum / num)));
}
}
}

View File

@ -24,7 +24,8 @@ namespace GethPlugin
var args = CreateArgs(config);
SetSchedulingAffinity(notIn: "tests-runners");
SetSchedulingAffinity(notIn: "false");
SetSystemCriticalPriority();
AddEnvVar("GETH_ARGS", args);
}

View File

@ -14,7 +14,7 @@ namespace MetricsPlugin
{
var config = startupConfig.Get<PrometheusStartupConfig>();
SetSchedulingAffinity(notIn: "tests-runners");
SetSchedulingAffinity(notIn: "false");
AddExposedPortAndVar("PROM_PORT", PortTag);
AddEnvVar("PROM_CONFIG", config.PrometheusConfigBase64);

View File

@ -197,6 +197,11 @@ namespace ContinuousTests
if (error.Contains(":")) error = error.Substring(1 + error.LastIndexOf(":"));
result.Add("error", error);
var upload = nodes.Select(n => n.TransferSpeeds.GetUploadSpeed()).ToList()!.OptionalAverage();
var download = nodes.Select(n => n.TransferSpeeds.GetDownloadSpeed()).ToList()!.OptionalAverage();
if (upload != null) result.Add("avgupload", upload.ToString());
if (download != null) result.Add("avgdownload", download.ToString());
return result;
}

View File

@ -0,0 +1,41 @@
# Codex Continuous Test-net Report
Date: 05-12-2023
Report for: 11-2023
## Continuous test-net Status
Continuous test runs (which can take many hours or days) can easily be started by team member from the github actions UI. Results are collected and displayed in Grafana. For the time being, we're suspending the effort to have a network of Codex nodes "always online" and continuously being tested, until overal reliability improves.
## Deployment Configuration
Continous Test-net is deployed to the kubernetes cluster with the following configuration:
5x Codex Nodes:
- Log-level: Trace
- Storage quota: 20480 MB
- Storage sell: 1024 MB
- Min price: 1024
- Max collateral: 1024
- Max duration: 3600000 seconds
- Block-TTL*: 99999999 seconds
- Block-MI*: 99999999 seconds
- Block-MN*: 100 blocks
3 of these 5 nodes have:
- Validator: true
## Test Overview
| Changes | Test | Description | Status | Results |
|---------|------------------|--------------------------------|---------|----------------------|
| todo | Two-client test | See report for July 2023. | Faulted | Test reliably fails. |
| todo | Two-client test* | See report for September 2023. | Faulted | Test reliably fails. |
| todo | HoldMyBeer test | See report for August 2023. | todo | todo |
| todo | Peers test | See report for August 2023. | todo | todo |
## Resulting changes
As a result of the testing efforts in 11-2023, these changes were made:
1. todo
## Action Points
- Debugging efforts continuou
-

View File

@ -6,14 +6,13 @@ using Core;
using DistTestCore;
using DistTestCore.Helpers;
using DistTestCore.Logs;
using NUnit.Framework;
using NUnit.Framework.Constraints;
namespace CodexTests
{
public class CodexDistTest : DistTest
{
private readonly List<ICodexNode> onlineCodexNodes = new List<ICodexNode>();
private readonly Dictionary<TestLifecycle, List<ICodexNode>> onlineCodexNodes = new Dictionary<TestLifecycle, List<ICodexNode>>();
public CodexDistTest()
{
@ -23,12 +22,6 @@ namespace CodexTests
ProjectPlugin.Load<MetricsPlugin.MetricsPlugin>();
}
[TearDown]
public void TearDownCodexFixture()
{
onlineCodexNodes.Clear();
}
protected override void Initialize(FixtureLog fixtureLog)
{
var localBuilder = new LocalCodexBuilder(fixtureLog);
@ -36,6 +29,16 @@ namespace CodexTests
localBuilder.Build();
}
protected override void LifecycleStart(TestLifecycle lifecycle)
{
onlineCodexNodes.Add(lifecycle, new List<ICodexNode>());
}
protected override void LifecycleStop(TestLifecycle lifecycle)
{
onlineCodexNodes.Remove(lifecycle);
}
public ICodexNode AddCodex()
{
return AddCodex(s => { });
@ -58,7 +61,7 @@ namespace CodexTests
setup(s);
OnCodexSetup(s);
});
onlineCodexNodes.AddRange(group);
onlineCodexNodes[Get()].AddRange(group);
return group;
}
@ -74,7 +77,7 @@ namespace CodexTests
public IEnumerable<ICodexNode> GetAllOnlineCodexNodes()
{
return onlineCodexNodes;
return onlineCodexNodes[Get()];
}
public void AssertBalance(ICodexContracts contracts, ICodexNode codexNode, Constraint constraint, string msg = "")
@ -85,5 +88,14 @@ namespace CodexTests
protected virtual void OnCodexSetup(ICodexSetup setup)
{
}
protected override void CollectStatusLogData(TestLifecycle lifecycle, Dictionary<string, string> data)
{
var nodes = onlineCodexNodes[lifecycle];
var upload = nodes.Select(n => n.TransferSpeeds.GetUploadSpeed()).ToList()!.OptionalAverage();
var download = nodes.Select(n => n.TransferSpeeds.GetDownloadSpeed()).ToList()!.OptionalAverage();
if (upload != null) data.Add("avgupload", upload.ToString());
if (download != null) data.Add("avgdownload", download.ToString());
}
}
}

View File

@ -9,7 +9,6 @@ namespace CodexTests.Helpers
public class PeerDownloadTestHelpers : IFullConnectivityImplementation
{
private readonly FullConnectivityHelper helper;
private readonly ILog log;
private readonly IFileManager fileManager;
private ByteSize testFileSize;
@ -17,7 +16,6 @@ namespace CodexTests.Helpers
{
helper = new FullConnectivityHelper(log, this);
testFileSize = 1.MB();
this.log = log;
this.fileManager = fileManager;
}
@ -45,11 +43,11 @@ namespace CodexTests.Helpers
private PeerConnectionState CheckConnectivity(Entry from, Entry to)
{
var expectedFile = GenerateTestFile(from.Node, to.Node);
var contentId = Stopwatch.Measure(log, "Upload", () => from.Node.UploadFile(expectedFile));
var contentId = from.Node.UploadFile(expectedFile);
try
{
var downloadedFile = Stopwatch.Measure(log, "Download", () => DownloadFile(to.Node, contentId, expectedFile.Label + "_downloaded"));
var downloadedFile = DownloadFile(to.Node, contentId, expectedFile.Label + "_downloaded");
expectedFile.AssertIsEqual(downloadedFile);
return PeerConnectionState.Connection;
}

View File

@ -147,6 +147,18 @@ namespace DistTestCore
{
}
protected virtual void LifecycleStart(TestLifecycle lifecycle)
{
}
protected virtual void LifecycleStop(TestLifecycle lifecycle)
{
}
protected virtual void CollectStatusLogData(TestLifecycle lifecycle, Dictionary<string, string> data)
{
}
protected TestLifecycle Get()
{
lock (lifecycleLock)
@ -166,6 +178,7 @@ namespace DistTestCore
var testNamespace = TestNamespacePrefix + Guid.NewGuid().ToString();
var lifecycle = new TestLifecycle(fixtureLog.CreateTestLog(), configuration, GetTimeSet(), testNamespace);
lifecycles.Add(testName, lifecycle);
LifecycleStart(lifecycle);
}
});
}
@ -175,13 +188,16 @@ namespace DistTestCore
var lifecycle = Get();
var testResult = GetTestResult();
var testDuration = lifecycle.GetTestDuration();
var data = lifecycle.GetPluginMetadata();
CollectStatusLogData(lifecycle, data);
fixtureLog.Log($"{GetCurrentTestName()} = {testResult} ({testDuration})");
statusLog.ConcludeTest(testResult, testDuration, lifecycle.GetPluginMetadata());
statusLog.ConcludeTest(testResult, testDuration, data);
Stopwatch.Measure(fixtureLog, $"Teardown for {GetCurrentTestName()}", () =>
{
WriteEndTestLog(lifecycle.Log);
IncludeLogsOnTestFailure(lifecycle);
LifecycleStop(lifecycle);
lifecycle.DeleteAllResources();
lifecycle = null!;
});

View File

@ -7,6 +7,7 @@ metadata:
name: ${NAMEPREFIX}
runid: ${RUNID}
spec:
ttlSecondsAfterFinished: 86400
backoffLimit: 0
template:
metadata:
@ -18,7 +19,7 @@ spec:
spec:
priorityClassName: system-node-critical
nodeSelector:
workload-type: "tests-runners"
workload-type: "tests-runners-ci"
containers:
- name: ${NAMEPREFIX}-runner
image: codexstorage/cs-codex-dist-tests:latest
@ -39,6 +40,8 @@ spec:
value: "${SOURCE}"
- name: RUNID
value: "${RUNID}"
- name: CODEXDOCKERIMAGE
value: "${CODEXDOCKERIMAGE}"
- name: TESTID
value: "${TESTID}"
- name: DEPLOYMENT_NAMESPACE

View File

@ -18,7 +18,7 @@ spec:
spec:
priorityClassName: system-node-critical
nodeSelector:
workload-type: "tests-runners"
workload-type: "tests-runners-ci"
containers:
- name: ${NAMEPREFIX}-runner
image: codexstorage/cs-codex-dist-tests:latest