diff --git a/.github/workflows/continuous-tests.yaml b/.github/workflows/continuous-tests.yaml index 6eab3279..74334e89 100644 --- a/.github/workflows/continuous-tests.yaml +++ b/.github/workflows/continuous-tests.yaml @@ -12,6 +12,10 @@ on: description: Branch with tests (master) required: false type: string + codexdockerimage: + description: Codex Docker image (codexstorage/nim-codex:latest-dist-tests) + required: false + type: string nameprefix: description: Resources prefix (c-tests) required: false @@ -26,6 +30,7 @@ on: type: string tests_cleanup: description: Runner tests cleanup + required: false type: boolean default: true workflow_call: @@ -38,6 +43,10 @@ on: description: Branch with tests (master) required: false type: string + codexdockerimage: + description: Codex Docker image (codexstorage/nim-codex:latest-dist-tests) + required: false + type: string nameprefix: description: Resources prefix (c-tests) required: false @@ -54,11 +63,18 @@ on: description: Runner tests cleanup required: false type: boolean + default: true + workflow_source: + description: Workflow source + required: false + type: string + default: '' env: SOURCE: ${{ format('{0}/{1}', github.server_url, github.repository) }} BRANCH: ${{ github.ref_name }} + CODEXDOCKERIMAGE: codexstorage/nim-codex:latest-dist-tests NAMEPREFIX: c-tests NAMESPACE: default TESTS_TARGET_DURATION: 2d @@ -71,11 +87,13 @@ env: jobs: run_tests: - name: Run Tests + name: Run Continuous Tests ${{ inputs.tests_filter }} runs-on: ubuntu-latest steps: - name: Checkout uses: actions/checkout@v3 + with: + repository: ${{ inputs.workflow_source }} - name: Variables run: | @@ -84,8 +102,10 @@ jobs: echo "TESTID=$(git rev-parse --short HEAD)" >> $GITHUB_ENV [[ -n "${{ inputs.source }}" ]] && echo "SOURCE=${{ inputs.source }}" >>"$GITHUB_ENV" || echo "SOURCE=${{ env.SOURCE }}" >>"$GITHUB_ENV" [[ -n "${{ inputs.branch }}" ]] && echo "BRANCH=${{ inputs.branch }}" >>"$GITHUB_ENV" || echo "BRANCH=${{ env.BRANCH }}" >>"$GITHUB_ENV" - [[ -n "${{ inputs.nameprefix }}" ]] && echo "NAMEPREFIX=${{ inputs.nameprefix }}-${RUNID}" >>"$GITHUB_ENV" || echo "NAMEPREFIX=${{ env.NAMEPREFIX }}-${RUNID}" >>"$GITHUB_ENV" - [[ -n "${{ inputs.nameprefix }}" ]] && echo "DEPLOYMENT_NAMESPACE=${{ inputs.nameprefix }}-${RUNID}" >>"$GITHUB_ENV" || echo "DEPLOYMENT_NAMESPACE=${{ env.NAMEPREFIX }}-${RUNID}" >>"$GITHUB_ENV" + [[ -n "${{ inputs.codexdockerimage }}" ]] && echo "CODEXDOCKERIMAGE=${{ inputs.codexdockerimage }}" >>"$GITHUB_ENV" || echo "CODEXDOCKERIMAGE=${{ env.CODEXDOCKERIMAGE }}" >>"$GITHUB_ENV" + [[ -n "${{ inputs.nameprefix }}" ]] && NAMEPREFIX="`awk '{ print tolower($0) }' <<< ${{ inputs.nameprefix }}`" || NAMEPREFIX="`awk '{ print tolower($0) }' <<< ${{ env.NAMEPREFIX }}`" + echo "NAMEPREFIX=${NAMEPREFIX}-${RUNID}" >>"$GITHUB_ENV" + echo "DEPLOYMENT_NAMESPACE=${NAMEPREFIX}-${RUNID}" >>"$GITHUB_ENV" [[ -n "${{ inputs.namespace }}" ]] && echo "NAMESPACE=${{ inputs.namespace }}" >>"$GITHUB_ENV" || echo "NAMESPACE=${{ env.NAMESPACE }}" >>"$GITHUB_ENV" [[ -n "${{ inputs.tests_target_duration }}" ]] && echo "TESTS_TARGET_DURATION=${{ inputs.tests_target_duration }}" >>"$GITHUB_ENV" || echo "TESTS_TARGET_DURATION=${{ env.TESTS_TARGET_DURATION }}" >>"$GITHUB_ENV" [[ -n "${{ inputs.tests_filter }}" ]] && echo "TESTS_FILTER=${{ inputs.tests_filter }}" >>"$GITHUB_ENV" || echo "TESTS_FILTERS=${{ env.TESTS_FILTERS }}" >>"$GITHUB_ENV" @@ -115,6 +135,7 @@ jobs: echo "Runner namespace: ${{ env.NAMESPACE }}" echo "----" echo "Tests runid: ${{ env.RUNID }}" + echo "Tests codexdockerimage: ${{ env.CODEXDOCKERIMAGE }}" echo "Tests namespace: ${{ env.DEPLOYMENT_NAMESPACE }}" echo "Tests duration: ${{ env.TESTS_TARGET_DURATION }}" echo "Tests filter: ${{ env.TESTS_FILTER }}" diff --git a/.github/workflows/report-HoldMyBeerTest.yaml b/.github/workflows/report-HoldMyBeerTest.yaml index 7bd78904..c4ae2608 100644 --- a/.github/workflows/report-HoldMyBeerTest.yaml +++ b/.github/workflows/report-HoldMyBeerTest.yaml @@ -3,7 +3,7 @@ name: Report - HoldMyBeerTest on: schedule: - - cron: '30 */49 * * *' + - cron: '30 * */2 * *' workflow_dispatch: jobs: diff --git a/.github/workflows/report-PeersTest.yaml b/.github/workflows/report-PeersTest.yaml index 0f1166bc..b063a02c 100644 --- a/.github/workflows/report-PeersTest.yaml +++ b/.github/workflows/report-PeersTest.yaml @@ -3,7 +3,7 @@ name: Report - PeersTest on: schedule: - - cron: '30 */49 * * *' + - cron: '30 * */2 * *' workflow_dispatch: jobs: diff --git a/Framework/FileUtils/TrackedFile.cs b/Framework/FileUtils/TrackedFile.cs index 5f9b04a8..694408b9 100644 --- a/Framework/FileUtils/TrackedFile.cs +++ b/Framework/FileUtils/TrackedFile.cs @@ -37,6 +37,11 @@ namespace FileUtils return $"'{Filename}'{sizePostfix}"; } + public ByteSize GetFilesize() + { + return new ByteSize(GetFileSize()); + } + private void AssertEqual(TrackedFile? actual) { if (actual == null) FrameworkAssert.Fail("TestFile is null."); diff --git a/Framework/KubernetesWorkflow/K8sController.cs b/Framework/KubernetesWorkflow/K8sController.cs index 27f477a1..bfea181b 100644 --- a/Framework/KubernetesWorkflow/K8sController.cs +++ b/Framework/KubernetesWorkflow/K8sController.cs @@ -360,6 +360,7 @@ namespace KubernetesWorkflow }, Spec = new V1PodSpec { + PriorityClassName = GetPriorityClassName(containerRecipes), Affinity = CreatePodAffinity(containerRecipes), NodeSelector = CreateNodeSelector(location), Containers = CreateDeploymentContainers(containerRecipes), @@ -417,7 +418,7 @@ namespace KubernetesWorkflow { new V1NodeSelectorRequirement { - Key = "workload-type", + Key = "allow-tests-pods", OperatorProperty = "NotIn", Values = notIns } @@ -435,6 +436,15 @@ namespace KubernetesWorkflow return l.NodeLabel; } + private string GetPriorityClassName(ContainerRecipe[] containerRecipes) + { + if (containerRecipes.Any(c => c.SetCriticalPriority)) + { + return "system-node-critical"; + } + return null!; + } + private IDictionary GetSelector(ContainerRecipe[] containerRecipes) { return containerRecipes.First().PodLabels.GetLabels(); @@ -689,7 +699,8 @@ namespace KubernetesWorkflow private V1Pod GetPodForDeployment(RunningDeployment deployment) { return Time.Retry(() => GetPodForDeplomentInternal(deployment), - maxRetries: 2, + // We will wait up to 1 minute, k8s might be moving pods around. + maxRetries: 6, retryTime: TimeSpan.FromSeconds(10), description: "Find pod by label for deployment."); } diff --git a/Framework/KubernetesWorkflow/PublicIpService.cs b/Framework/KubernetesWorkflow/PublicIpService.cs new file mode 100644 index 00000000..6e7f4143 --- /dev/null +++ b/Framework/KubernetesWorkflow/PublicIpService.cs @@ -0,0 +1,7 @@ +namespace KubernetesWorkflow +{ + public static class PublicIpService + { + public static string Address { get; } = "ip.codex.storage"; + } +} diff --git a/Framework/KubernetesWorkflow/Recipe/ContainerRecipe.cs b/Framework/KubernetesWorkflow/Recipe/ContainerRecipe.cs index 1865dc75..fb7c8a43 100644 --- a/Framework/KubernetesWorkflow/Recipe/ContainerRecipe.cs +++ b/Framework/KubernetesWorkflow/Recipe/ContainerRecipe.cs @@ -2,13 +2,14 @@ { public class ContainerRecipe { - public ContainerRecipe(int number, string? nameOverride, string image, ContainerResources resources, SchedulingAffinity schedulingAffinity, Port[] exposedPorts, Port[] internalPorts, EnvVar[] envVars, PodLabels podLabels, PodAnnotations podAnnotations, VolumeMount[] volumes, ContainerAdditionals additionals) + public ContainerRecipe(int number, string? nameOverride, string image, ContainerResources resources, SchedulingAffinity schedulingAffinity, bool setCriticalPriority, Port[] exposedPorts, Port[] internalPorts, EnvVar[] envVars, PodLabels podLabels, PodAnnotations podAnnotations, VolumeMount[] volumes, ContainerAdditionals additionals) { Number = number; NameOverride = nameOverride; Image = image; Resources = resources; SchedulingAffinity = schedulingAffinity; + SetCriticalPriority = setCriticalPriority; ExposedPorts = exposedPorts; InternalPorts = internalPorts; EnvVars = envVars; @@ -34,6 +35,7 @@ public string? NameOverride { get; } public ContainerResources Resources { get; } public SchedulingAffinity SchedulingAffinity { get; } + public bool SetCriticalPriority { get; } public string Image { get; } public Port[] ExposedPorts { get; } public Port[] InternalPorts { get; } diff --git a/Framework/KubernetesWorkflow/Recipe/ContainerRecipeFactory.cs b/Framework/KubernetesWorkflow/Recipe/ContainerRecipeFactory.cs index 7c734e80..931013dc 100644 --- a/Framework/KubernetesWorkflow/Recipe/ContainerRecipeFactory.cs +++ b/Framework/KubernetesWorkflow/Recipe/ContainerRecipeFactory.cs @@ -14,6 +14,7 @@ namespace KubernetesWorkflow.Recipe private RecipeComponentFactory factory = null!; private ContainerResources resources = new ContainerResources(); private SchedulingAffinity schedulingAffinity = new SchedulingAffinity(); + private bool setCriticalPriority; public ContainerRecipe CreateRecipe(int index, int containerNumber, RecipeComponentFactory factory, StartupConfig config) { @@ -23,7 +24,7 @@ namespace KubernetesWorkflow.Recipe Initialize(config); - var recipe = new ContainerRecipe(containerNumber, config.NameOverride, Image, resources, schedulingAffinity, + var recipe = new ContainerRecipe(containerNumber, config.NameOverride, Image, resources, schedulingAffinity, setCriticalPriority, exposedPorts.ToArray(), internalPorts.ToArray(), envVars.ToArray(), @@ -42,6 +43,7 @@ namespace KubernetesWorkflow.Recipe this.factory = null!; resources = new ContainerResources(); schedulingAffinity = new SchedulingAffinity(); + setCriticalPriority = false; return recipe; } @@ -128,6 +130,11 @@ namespace KubernetesWorkflow.Recipe schedulingAffinity = new SchedulingAffinity(notIn); } + protected void SetSystemCriticalPriority() + { + setCriticalPriority = true; + } + // Disabled following a possible bug in the k8s cluster that will throttle containers much more than is // called for if they have resource limits defined. //protected void SetResourceLimits(int milliCPUs, ByteSize memory) diff --git a/Framework/Logging/Stopwatch.cs b/Framework/Logging/Stopwatch.cs index fddd8b55..d2f13b18 100644 --- a/Framework/Logging/Stopwatch.cs +++ b/Framework/Logging/Stopwatch.cs @@ -16,19 +16,19 @@ namespace Logging this.debug = debug; } - public static void Measure(ILog log, string name, Action action, bool debug = false) + public static TimeSpan Measure(ILog log, string name, Action action, bool debug = false) { var sw = Begin(log, name, debug); action(); - sw.End(); + return sw.End(); } - public static T Measure(ILog log, string name, Func action, bool debug = false) + public static StopwatchResult Measure(ILog log, string name, Func action, bool debug = false) { var sw = Begin(log, name, debug); var result = action(); - sw.End(); - return result; + var duration = sw.End(); + return new StopwatchResult(result, duration); } public static Stopwatch Begin(ILog log) @@ -68,4 +68,16 @@ namespace Logging return duration; } } + + public class StopwatchResult + { + public StopwatchResult(T value, TimeSpan duration) + { + Value = value; + Duration = duration; + } + + public T Value { get; } + public TimeSpan Duration { get; } + } } diff --git a/Framework/Utils/ByteSize.cs b/Framework/Utils/ByteSize.cs index 170aaf7f..a5204c71 100644 --- a/Framework/Utils/ByteSize.cs +++ b/Framework/Utils/ByteSize.cs @@ -46,6 +46,18 @@ } } + public class BytesPerSecond : ByteSize + { + public BytesPerSecond(long sizeInBytes) : base(sizeInBytes) + { + } + + public override string ToString() + { + return base.ToString() + "/s"; + } + } + public static class ByteSizeIntExtensions { private const long Kilo = 1024; diff --git a/ProjectPlugins/CodexContractsPlugin/CodexContractsContainerRecipe.cs b/ProjectPlugins/CodexContractsPlugin/CodexContractsContainerRecipe.cs index 3e5faa19..e3814142 100644 --- a/ProjectPlugins/CodexContractsPlugin/CodexContractsContainerRecipe.cs +++ b/ProjectPlugins/CodexContractsPlugin/CodexContractsContainerRecipe.cs @@ -7,7 +7,7 @@ namespace CodexContractsPlugin { public class CodexContractsContainerRecipe : ContainerRecipeFactory { - public static string DockerImage { get; } = "codexstorage/codex-contracts-eth:sha-1854dfb-dist-tests"; + public static string DockerImage { get; } = "codexstorage/codex-contracts-eth:latest-dist-tests"; public const string MarketplaceAddressFilename = "/hardhat/deployments/codexdisttestnetwork/Marketplace.json"; public const string MarketplaceArtifactFilename = "/hardhat/artifacts/contracts/Marketplace.sol/Marketplace.json"; @@ -21,7 +21,7 @@ namespace CodexContractsPlugin var address = config.GethNode.StartResult.Container.GetAddress(new NullLog(), GethContainerRecipe.HttpPortTag); - SetSchedulingAffinity(notIn: "tests-runners"); + SetSchedulingAffinity(notIn: "false"); AddEnvVar("DISTTEST_NETWORK_URL", address.ToString()); AddEnvVar("HARDHAT_NETWORK", "codexdisttestnetwork"); diff --git a/ProjectPlugins/CodexDiscordBotPlugin/DiscordBotContainerRecipe.cs b/ProjectPlugins/CodexDiscordBotPlugin/DiscordBotContainerRecipe.cs index c193e028..8a5dcd37 100644 --- a/ProjectPlugins/CodexDiscordBotPlugin/DiscordBotContainerRecipe.cs +++ b/ProjectPlugins/CodexDiscordBotPlugin/DiscordBotContainerRecipe.cs @@ -13,7 +13,7 @@ namespace CodexDiscordBotPlugin { var config = startupConfig.Get(); - SetSchedulingAffinity(notIn: "tests-runners"); + SetSchedulingAffinity(notIn: "false"); AddEnvVar("TOKEN", config.Token); AddEnvVar("SERVERNAME", config.ServerName); diff --git a/ProjectPlugins/CodexPlugin/CodexAccess.cs b/ProjectPlugins/CodexPlugin/CodexAccess.cs index 9fa1683a..a305d76b 100644 --- a/ProjectPlugins/CodexPlugin/CodexAccess.cs +++ b/ProjectPlugins/CodexPlugin/CodexAccess.cs @@ -68,12 +68,12 @@ namespace CodexPlugin public Stream DownloadFile(string contentId) { - return Http().HttpGetStream("data/" + contentId); + return Http().HttpGetStream("data/" + contentId + "/network"); } public CodexLocalDataResponse[] LocalFiles() { - return Http().HttpGetJson("local"); + return Http().HttpGetJson("data"); } public CodexSalesAvailabilityResponse SalesAvailability(CodexSalesAvailabilityRequest request) diff --git a/ProjectPlugins/CodexPlugin/CodexContainerRecipe.cs b/ProjectPlugins/CodexPlugin/CodexContainerRecipe.cs index 5907f968..2e7e84a7 100644 --- a/ProjectPlugins/CodexPlugin/CodexContainerRecipe.cs +++ b/ProjectPlugins/CodexPlugin/CodexContainerRecipe.cs @@ -29,7 +29,8 @@ namespace CodexPlugin SetResourcesRequest(milliCPUs: 100, memory: 100.MB()); //SetResourceLimits(milliCPUs: 4000, memory: 12.GB()); - SetSchedulingAffinity(notIn: "tests-runners"); + SetSchedulingAffinity(notIn: "false"); + SetSystemCriticalPriority(); var config = startupConfig.Get(); diff --git a/ProjectPlugins/CodexPlugin/CodexNode.cs b/ProjectPlugins/CodexPlugin/CodexNode.cs index d0a06360..2c79459e 100644 --- a/ProjectPlugins/CodexPlugin/CodexNode.cs +++ b/ProjectPlugins/CodexPlugin/CodexNode.cs @@ -25,6 +25,7 @@ namespace CodexPlugin IMarketplaceAccess Marketplace { get; } CrashWatcher CrashWatcher { get; } PodInfo GetPodInfo(); + ITransferSpeeds TransferSpeeds { get; } void Stop(); } @@ -34,6 +35,7 @@ namespace CodexPlugin private const string UploadFailedMessage = "Unable to store block"; private readonly IPluginTools tools; private readonly EthAddress? ethAddress; + private readonly TransferSpeeds transferSpeeds; public CodexNode(IPluginTools tools, CodexAccess codexAccess, CodexNodeGroup group, IMarketplaceAccess marketplaceAccess, EthAddress? ethAddress) { @@ -43,6 +45,7 @@ namespace CodexPlugin Group = group; Marketplace = marketplaceAccess; Version = new CodexDebugVersionResponse(); + transferSpeeds = new TransferSpeeds(); } public RunningContainer Container { get { return CodexAccess.Container; } } @@ -51,6 +54,7 @@ namespace CodexPlugin public CodexNodeGroup Group { get; } public IMarketplaceAccess Marketplace { get; } public CodexDebugVersionResponse Version { get; private set; } + public ITransferSpeeds TransferSpeeds { get => transferSpeeds; } public IMetricsScrapeTarget MetricsScrapeTarget { get @@ -101,11 +105,14 @@ namespace CodexPlugin var logMessage = $"Uploading file {file.Describe()}..."; Log(logMessage); - var response = Stopwatch.Measure(tools.GetLog(), logMessage, () => + var measurement = Stopwatch.Measure(tools.GetLog(), logMessage, () => { return CodexAccess.UploadFile(fileStream); }); + var response = measurement.Value; + transferSpeeds.AddUploadSample(file.GetFilesize(), measurement.Duration); + if (string.IsNullOrEmpty(response)) FrameworkAssert.Fail("Received empty response."); if (response.StartsWith(UploadFailedMessage)) FrameworkAssert.Fail("Node failed to store block."); @@ -118,7 +125,8 @@ namespace CodexPlugin var logMessage = $"Downloading for contentId: '{contentId.Id}'..."; Log(logMessage); var file = tools.GetFileManager().CreateEmptyFile(fileLabel); - Stopwatch.Measure(tools.GetLog(), logMessage, () => DownloadToFile(contentId.Id, file)); + var measurement = Stopwatch.Measure(tools.GetLog(), logMessage, () => DownloadToFile(contentId.Id, file)); + transferSpeeds.AddDownloadSample(file.GetFilesize(), measurement); Log($"Downloaded file {file.Describe()} to '{file.Filename}'."); return file; } diff --git a/ProjectPlugins/CodexPlugin/MarketplaceAccess.cs b/ProjectPlugins/CodexPlugin/MarketplaceAccess.cs index 9edea6e4..cde4eb6e 100644 --- a/ProjectPlugins/CodexPlugin/MarketplaceAccess.cs +++ b/ProjectPlugins/CodexPlugin/MarketplaceAccess.cs @@ -10,6 +10,7 @@ namespace CodexPlugin { string MakeStorageAvailable(ByteSize size, TestToken minPriceForTotalSpace, TestToken maxCollateral, TimeSpan maxDuration); StoragePurchaseContract RequestStorage(ContentId contentId, TestToken pricePerSlotPerSecond, TestToken requiredCollateral, uint minRequiredNumberOfNodes, int proofProbability, TimeSpan duration); + StoragePurchaseContract RequestStorage(ContentId contentId, TestToken pricePerSlotPerSecond, TestToken requiredCollateral, uint minRequiredNumberOfNodes, int proofProbability, TimeSpan duration, TimeSpan expiry); } public class MarketplaceAccess : IMarketplaceAccess @@ -25,13 +26,20 @@ namespace CodexPlugin public StoragePurchaseContract RequestStorage(ContentId contentId, TestToken pricePerSlotPerSecond, TestToken requiredCollateral, uint minRequiredNumberOfNodes, int proofProbability, TimeSpan duration) { + return RequestStorage(contentId, pricePerSlotPerSecond, requiredCollateral, minRequiredNumberOfNodes, proofProbability, duration, duration / 2); + } + + public StoragePurchaseContract RequestStorage(ContentId contentId, TestToken pricePerSlotPerSecond, TestToken requiredCollateral, uint minRequiredNumberOfNodes, int proofProbability, TimeSpan duration, TimeSpan expiry) + { + var expireUtc = DateTimeOffset.UtcNow.ToUnixTimeSeconds() + expiry.TotalSeconds; + var request = new CodexSalesRequestStorageRequest { duration = ToDecInt(duration.TotalSeconds), proofProbability = ToDecInt(proofProbability), reward = ToDecInt(pricePerSlotPerSecond), collateral = ToDecInt(requiredCollateral), - expiry = null, + expiry = ToDecInt(expireUtc), nodes = minRequiredNumberOfNodes, tolerance = null, }; @@ -41,11 +49,14 @@ namespace CodexPlugin $"requiredCollateral: {requiredCollateral}, " + $"minRequiredNumberOfNodes: {minRequiredNumberOfNodes}, " + $"proofProbability: {proofProbability}, " + + $"expiry: {Time.FormatDuration(expiry)}, " + $"duration: {Time.FormatDuration(duration)})"); var response = codexAccess.RequestStorage(request, contentId.Id); - if (response == "Purchasing not available") + if (response == "Purchasing not available" || + response == "Expiry required" || + response == "Expiry needs to be in future") { throw new InvalidOperationException(response); } @@ -104,6 +115,12 @@ namespace CodexPlugin return null!; } + public StoragePurchaseContract RequestStorage(ContentId contentId, TestToken pricePerSlotPerSecond, TestToken requiredCollateral, uint minRequiredNumberOfNodes, int proofProbability, TimeSpan duration, TimeSpan expiry) + { + Unavailable(); + throw new NotImplementedException(); + } + public string MakeStorageAvailable(ByteSize size, TestToken minPricePerBytePerSecond, TestToken maxCollateral, TimeSpan duration) { Unavailable(); diff --git a/ProjectPlugins/CodexPlugin/TransferSpeeds.cs b/ProjectPlugins/CodexPlugin/TransferSpeeds.cs new file mode 100644 index 00000000..b745f644 --- /dev/null +++ b/ProjectPlugins/CodexPlugin/TransferSpeeds.cs @@ -0,0 +1,67 @@ +using Utils; + +namespace CodexPlugin +{ + public interface ITransferSpeeds + { + BytesPerSecond? GetUploadSpeed(); + BytesPerSecond? GetDownloadSpeed(); + } + + public class TransferSpeeds : ITransferSpeeds + { + private readonly List uploads = new List(); + private readonly List downloads = new List(); + + public void AddUploadSample(ByteSize bytes, TimeSpan duration) + { + uploads.Add(Convert(bytes, duration)); + } + + public void AddDownloadSample(ByteSize bytes, TimeSpan duration) + { + downloads.Add(Convert(bytes, duration)); + } + + public BytesPerSecond? GetUploadSpeed() + { + if (!uploads.Any()) return null; + return uploads.Average(); + } + + public BytesPerSecond? GetDownloadSpeed() + { + if (!downloads.Any()) return null; + return downloads.Average(); + } + + private static BytesPerSecond Convert(ByteSize size, TimeSpan duration) + { + double bytes = size.SizeInBytes; + double seconds = duration.TotalSeconds; + + return new BytesPerSecond(System.Convert.ToInt64(Math.Round(bytes / seconds))); + } + } + + public static class ListExtensions + { + public static BytesPerSecond Average(this List list) + { + double sum = list.Sum(i => i.SizeInBytes); + double num = list.Count; + + return new BytesPerSecond(Convert.ToInt64(Math.Round(sum / num))); + } + + public static BytesPerSecond? OptionalAverage(this List? list) + { + if (list == null || !list.Any() || !list.Any(i => i != null)) return null; + var values = list.Where(i => i != null).Cast().ToArray(); + double sum = values.Sum(i => i.SizeInBytes); + double num = values.Length; + + return new BytesPerSecond(Convert.ToInt64(Math.Round(sum / num))); + } + } +} diff --git a/ProjectPlugins/GethPlugin/GethContainerRecipe.cs b/ProjectPlugins/GethPlugin/GethContainerRecipe.cs index 5af9371b..564844f5 100644 --- a/ProjectPlugins/GethPlugin/GethContainerRecipe.cs +++ b/ProjectPlugins/GethPlugin/GethContainerRecipe.cs @@ -24,7 +24,8 @@ namespace GethPlugin var args = CreateArgs(config); - SetSchedulingAffinity(notIn: "tests-runners"); + SetSchedulingAffinity(notIn: "false"); + SetSystemCriticalPriority(); AddEnvVar("GETH_ARGS", args); } diff --git a/ProjectPlugins/MetricsPlugin/PrometheusContainerRecipe.cs b/ProjectPlugins/MetricsPlugin/PrometheusContainerRecipe.cs index e132a8fd..23ebf3a2 100644 --- a/ProjectPlugins/MetricsPlugin/PrometheusContainerRecipe.cs +++ b/ProjectPlugins/MetricsPlugin/PrometheusContainerRecipe.cs @@ -14,7 +14,7 @@ namespace MetricsPlugin { var config = startupConfig.Get(); - SetSchedulingAffinity(notIn: "tests-runners"); + SetSchedulingAffinity(notIn: "false"); AddExposedPortAndVar("PROM_PORT", PortTag); AddEnvVar("PROM_CONFIG", config.PrometheusConfigBase64); diff --git a/Tests/CodexContinuousTests/SingleTestRun.cs b/Tests/CodexContinuousTests/SingleTestRun.cs index 2ab5a338..39b45e51 100644 --- a/Tests/CodexContinuousTests/SingleTestRun.cs +++ b/Tests/CodexContinuousTests/SingleTestRun.cs @@ -197,6 +197,11 @@ namespace ContinuousTests if (error.Contains(":")) error = error.Substring(1 + error.LastIndexOf(":")); result.Add("error", error); + var upload = nodes.Select(n => n.TransferSpeeds.GetUploadSpeed()).ToList()!.OptionalAverage(); + var download = nodes.Select(n => n.TransferSpeeds.GetDownloadSpeed()).ToList()!.OptionalAverage(); + if (upload != null) result.Add("avgupload", upload.ToString()); + if (download != null) result.Add("avgdownload", download.ToString()); + return result; } diff --git a/Tests/CodexContinuousTests/reports/CodexTestNetReport-November2023.md b/Tests/CodexContinuousTests/reports/CodexTestNetReport-November2023.md new file mode 100644 index 00000000..ebe4b827 --- /dev/null +++ b/Tests/CodexContinuousTests/reports/CodexTestNetReport-November2023.md @@ -0,0 +1,41 @@ +# Codex Continuous Test-net Report +Date: 05-12-2023 + +Report for: 11-2023 + + +## Continuous test-net Status +Continuous test runs (which can take many hours or days) can easily be started by team member from the github actions UI. Results are collected and displayed in Grafana. For the time being, we're suspending the effort to have a network of Codex nodes "always online" and continuously being tested, until overal reliability improves. + +## Deployment Configuration +Continous Test-net is deployed to the kubernetes cluster with the following configuration: + +5x Codex Nodes: +- Log-level: Trace +- Storage quota: 20480 MB +- Storage sell: 1024 MB +- Min price: 1024 +- Max collateral: 1024 +- Max duration: 3600000 seconds +- Block-TTL*: 99999999 seconds +- Block-MI*: 99999999 seconds +- Block-MN*: 100 blocks +3 of these 5 nodes have: +- Validator: true + +## Test Overview +| Changes | Test | Description | Status | Results | +|---------|------------------|--------------------------------|---------|----------------------| +| todo | Two-client test | See report for July 2023. | Faulted | Test reliably fails. | +| todo | Two-client test* | See report for September 2023. | Faulted | Test reliably fails. | +| todo | HoldMyBeer test | See report for August 2023. | todo | todo | +| todo | Peers test | See report for August 2023. | todo | todo | + +## Resulting changes +As a result of the testing efforts in 11-2023, these changes were made: +1. todo + +## Action Points +- Debugging efforts continuou +- + diff --git a/Tests/CodexTests/CodexDistTest.cs b/Tests/CodexTests/CodexDistTest.cs index 4d6dab23..a8b406ce 100644 --- a/Tests/CodexTests/CodexDistTest.cs +++ b/Tests/CodexTests/CodexDistTest.cs @@ -6,14 +6,13 @@ using Core; using DistTestCore; using DistTestCore.Helpers; using DistTestCore.Logs; -using NUnit.Framework; using NUnit.Framework.Constraints; namespace CodexTests { public class CodexDistTest : DistTest { - private readonly List onlineCodexNodes = new List(); + private readonly Dictionary> onlineCodexNodes = new Dictionary>(); public CodexDistTest() { @@ -23,12 +22,6 @@ namespace CodexTests ProjectPlugin.Load(); } - [TearDown] - public void TearDownCodexFixture() - { - onlineCodexNodes.Clear(); - } - protected override void Initialize(FixtureLog fixtureLog) { var localBuilder = new LocalCodexBuilder(fixtureLog); @@ -36,6 +29,16 @@ namespace CodexTests localBuilder.Build(); } + protected override void LifecycleStart(TestLifecycle lifecycle) + { + onlineCodexNodes.Add(lifecycle, new List()); + } + + protected override void LifecycleStop(TestLifecycle lifecycle) + { + onlineCodexNodes.Remove(lifecycle); + } + public ICodexNode AddCodex() { return AddCodex(s => { }); @@ -58,7 +61,7 @@ namespace CodexTests setup(s); OnCodexSetup(s); }); - onlineCodexNodes.AddRange(group); + onlineCodexNodes[Get()].AddRange(group); return group; } @@ -74,7 +77,7 @@ namespace CodexTests public IEnumerable GetAllOnlineCodexNodes() { - return onlineCodexNodes; + return onlineCodexNodes[Get()]; } public void AssertBalance(ICodexContracts contracts, ICodexNode codexNode, Constraint constraint, string msg = "") @@ -85,5 +88,14 @@ namespace CodexTests protected virtual void OnCodexSetup(ICodexSetup setup) { } + + protected override void CollectStatusLogData(TestLifecycle lifecycle, Dictionary data) + { + var nodes = onlineCodexNodes[lifecycle]; + var upload = nodes.Select(n => n.TransferSpeeds.GetUploadSpeed()).ToList()!.OptionalAverage(); + var download = nodes.Select(n => n.TransferSpeeds.GetDownloadSpeed()).ToList()!.OptionalAverage(); + if (upload != null) data.Add("avgupload", upload.ToString()); + if (download != null) data.Add("avgdownload", download.ToString()); + } } } diff --git a/Tests/CodexTests/Helpers/PeerDownloadTestHelpers.cs b/Tests/CodexTests/Helpers/PeerDownloadTestHelpers.cs index fab2a23c..6dab12a1 100644 --- a/Tests/CodexTests/Helpers/PeerDownloadTestHelpers.cs +++ b/Tests/CodexTests/Helpers/PeerDownloadTestHelpers.cs @@ -9,7 +9,6 @@ namespace CodexTests.Helpers public class PeerDownloadTestHelpers : IFullConnectivityImplementation { private readonly FullConnectivityHelper helper; - private readonly ILog log; private readonly IFileManager fileManager; private ByteSize testFileSize; @@ -17,7 +16,6 @@ namespace CodexTests.Helpers { helper = new FullConnectivityHelper(log, this); testFileSize = 1.MB(); - this.log = log; this.fileManager = fileManager; } @@ -45,11 +43,11 @@ namespace CodexTests.Helpers private PeerConnectionState CheckConnectivity(Entry from, Entry to) { var expectedFile = GenerateTestFile(from.Node, to.Node); - var contentId = Stopwatch.Measure(log, "Upload", () => from.Node.UploadFile(expectedFile)); + var contentId = from.Node.UploadFile(expectedFile); try { - var downloadedFile = Stopwatch.Measure(log, "Download", () => DownloadFile(to.Node, contentId, expectedFile.Label + "_downloaded")); + var downloadedFile = DownloadFile(to.Node, contentId, expectedFile.Label + "_downloaded"); expectedFile.AssertIsEqual(downloadedFile); return PeerConnectionState.Connection; } diff --git a/Tests/DistTestCore/DistTest.cs b/Tests/DistTestCore/DistTest.cs index 74898d5f..06014e0c 100644 --- a/Tests/DistTestCore/DistTest.cs +++ b/Tests/DistTestCore/DistTest.cs @@ -147,6 +147,18 @@ namespace DistTestCore { } + protected virtual void LifecycleStart(TestLifecycle lifecycle) + { + } + + protected virtual void LifecycleStop(TestLifecycle lifecycle) + { + } + + protected virtual void CollectStatusLogData(TestLifecycle lifecycle, Dictionary data) + { + } + protected TestLifecycle Get() { lock (lifecycleLock) @@ -166,6 +178,7 @@ namespace DistTestCore var testNamespace = TestNamespacePrefix + Guid.NewGuid().ToString(); var lifecycle = new TestLifecycle(fixtureLog.CreateTestLog(), configuration, GetTimeSet(), testNamespace); lifecycles.Add(testName, lifecycle); + LifecycleStart(lifecycle); } }); } @@ -175,13 +188,16 @@ namespace DistTestCore var lifecycle = Get(); var testResult = GetTestResult(); var testDuration = lifecycle.GetTestDuration(); + var data = lifecycle.GetPluginMetadata(); + CollectStatusLogData(lifecycle, data); fixtureLog.Log($"{GetCurrentTestName()} = {testResult} ({testDuration})"); - statusLog.ConcludeTest(testResult, testDuration, lifecycle.GetPluginMetadata()); + statusLog.ConcludeTest(testResult, testDuration, data); Stopwatch.Measure(fixtureLog, $"Teardown for {GetCurrentTestName()}", () => { WriteEndTestLog(lifecycle.Log); IncludeLogsOnTestFailure(lifecycle); + LifecycleStop(lifecycle); lifecycle.DeleteAllResources(); lifecycle = null!; }); diff --git a/docker/continuous-tests-job.yaml b/docker/continuous-tests-job.yaml index f4399812..ecf8ea8b 100644 --- a/docker/continuous-tests-job.yaml +++ b/docker/continuous-tests-job.yaml @@ -7,6 +7,7 @@ metadata: name: ${NAMEPREFIX} runid: ${RUNID} spec: + ttlSecondsAfterFinished: 86400 backoffLimit: 0 template: metadata: @@ -18,7 +19,7 @@ spec: spec: priorityClassName: system-node-critical nodeSelector: - workload-type: "tests-runners" + workload-type: "tests-runners-ci" containers: - name: ${NAMEPREFIX}-runner image: codexstorage/cs-codex-dist-tests:latest @@ -39,6 +40,8 @@ spec: value: "${SOURCE}" - name: RUNID value: "${RUNID}" + - name: CODEXDOCKERIMAGE + value: "${CODEXDOCKERIMAGE}" - name: TESTID value: "${TESTID}" - name: DEPLOYMENT_NAMESPACE diff --git a/docker/dist-tests-job.yaml b/docker/dist-tests-job.yaml index 2eaed221..b6fdb728 100644 --- a/docker/dist-tests-job.yaml +++ b/docker/dist-tests-job.yaml @@ -18,7 +18,7 @@ spec: spec: priorityClassName: system-node-critical nodeSelector: - workload-type: "tests-runners" + workload-type: "tests-runners-ci" containers: - name: ${NAMEPREFIX}-runner image: codexstorage/cs-codex-dist-tests:latest