Clean up codex long tests

This commit is contained in:
Ben 2024-05-07 10:15:02 +02:00
parent 725dfc23a6
commit 8ea4c4ee37
No known key found for this signature in database
GPG Key ID: 541B9D8C9F1426A1
8 changed files with 57 additions and 168 deletions

View File

@ -7,7 +7,8 @@ namespace CodexPlugin
{
public class CodexContainerRecipe : ContainerRecipeFactory
{
private const string DefaultDockerImage = "codexstorage/nim-codex:sha-c58d4d7-dist-tests";
private const string DefaultDockerImage = "codexstorage/nim-codex:sha-ea6d681-dist-tests";
public const string ApiPortTag = "codex_api_port";
public const string ListenPortTag = "codex_listen_port";
public const string MetricsPortTag = "codex_metrics_port";

View File

@ -1,3 +1,4 @@
using CodexPlugin;
using CodexTests;
using DistTestCore;
using FileUtils;
@ -7,35 +8,45 @@ using Utils;
namespace CodexLongTests.BasicTests
{
[TestFixture]
public class DownloadTests : CodexDistTest
public class DownloadTests : AutoBootstrapDistTest
{
[TestCase(3, 500)]
[TestCase(5, 100)]
[TestCase(10, 256)]
[Test]
[Combinatorial]
[UseLongTimeouts]
public void ParallelDownload(int numberOfNodes, int filesizeMb)
public void ParallelDownload(
[Values(1, 3, 5)] int numberOfFiles,
[Values(10, 50, 100)] int filesizeMb)
{
var group = AddCodex(numberOfNodes);
var host = AddCodex();
var client = AddCodex();
foreach (var node in group)
var testfiles = new List<TrackedFile>();
var contentIds = new List<ContentId>();
var downloadedFiles = new List<TrackedFile?>();
for (int i = 0; i < numberOfFiles; i++)
{
host.ConnectToPeer(node);
testfiles.Add(GenerateTestFile(filesizeMb.MB()));
contentIds.Add(new ContentId());
downloadedFiles.Add(null);
}
var testFile = GenerateTestFile(filesizeMb.MB());
var contentId = host.UploadFile(testFile);
var list = new List<Task<TrackedFile?>>();
foreach (var node in group)
for (int i = 0; i < numberOfFiles; i++)
{
list.Add(Task.Run(() => { return node.DownloadContent(contentId); }));
contentIds[i] = host.UploadFile(testfiles[i]);
}
Task.WaitAll(list.ToArray());
foreach (var task in list)
var downloadTasks = new List<Task>();
for (int i = 0; i < numberOfFiles; i++)
{
testFile.AssertIsEqual(task.Result);
downloadTasks.Add(Task.Run(() => { downloadedFiles[i] = client.DownloadContent(contentIds[i]); }));
}
Task.WaitAll(downloadTasks.ToArray());
for (int i = 0; i < numberOfFiles; i++)
{
testfiles[i].AssertIsEqual(downloadedFiles[i]);
}
}
}

View File

@ -6,7 +6,9 @@ namespace CodexLongTests.BasicTests
{
public class TestInfraTests : CodexDistTest
{
[Test, UseLongTimeouts]
[Test]
[UseLongTimeouts]
[Ignore("Not supported atm")]
public void TestInfraShouldHave1000AddressSpacesPerPod()
{
var group = AddCodex(1000, s => s.EnableMetrics());
@ -17,7 +19,9 @@ namespace CodexLongTests.BasicTests
"Not all created nodes provided a unique id.");
}
[Test, UseLongTimeouts]
[Test]
[UseLongTimeouts]
[Ignore("Not supported atm")]
public void TestInfraSupportsManyConcurrentPods()
{
for (var i = 0; i < 20; i++)

View File

@ -8,41 +8,39 @@ using Utils;
namespace CodexLongTests.BasicTests
{
[TestFixture]
public class UploadTests : CodexDistTest
public class UploadTests : AutoBootstrapDistTest
{
[TestCase(3, 50)]
[TestCase(5, 75)]
[TestCase(10, 25)]
[Test]
[Combinatorial]
[UseLongTimeouts]
public void ParallelUpload(int numberOfNodes, int filesizeMb)
public void ParallelUpload(
[Values(1, 3, 5)] int numberOfFiles,
[Values(10, 50, 100)] int filesizeMb)
{
var group = AddCodex(numberOfNodes);
var host = AddCodex();
foreach (var node in group)
{
host.ConnectToPeer(node);
}
var client = AddCodex();
var testfiles = new List<TrackedFile>();
var contentIds = new List<Task<ContentId>>();
var contentIds = new List<ContentId>();
for (int i = 0; i < group.Count(); i++)
for (int i = 0; i < numberOfFiles; i++)
{
testfiles.Add(GenerateTestFile(filesizeMb.MB()));
var n = i;
contentIds.Add(Task.Run(() => { return host.UploadFile(testfiles[n]); }));
contentIds.Add(new ContentId());
}
var downloads = new List<Task<TrackedFile?>>();
for (int i = 0; i < group.Count(); i++)
var uploadTasks = new List<Task>();
for (int i = 0; i < numberOfFiles; i++)
{
var n = i;
downloads.Add(Task.Run(() => { return group[n].DownloadContent(contentIds[n].Result); }));
uploadTasks.Add(Task.Run(() => { contentIds[i] = host.UploadFile(testfiles[i]); }));
}
Task.WaitAll(downloads.ToArray());
for (int i = 0; i < group.Count(); i++)
Task.WaitAll(uploadTasks.ToArray());
for (int i = 0; i < numberOfFiles; i++)
{
testfiles[i].AssertIsEqual(downloads[i].Result);
var downloaded = client.DownloadContent(contentIds[i]);
testfiles[i].AssertIsEqual(downloaded);
}
}
}

View File

@ -1,126 +0,0 @@
using CodexPlugin;
using DistTestCore;
using Logging;
using NUnit.Framework;
using Utils;
namespace CodexTests.ScalabilityTests
{
[TestFixture]
public class OneClientLargeFileTests : CodexDistTest
{
[Test]
[Combinatorial]
[UseLongTimeouts]
public void OneClientLargeFile([Values(
256,
512,
1024, // GB
2048,
4096,
8192,
16384,
32768,
65536,
131072
)] int sizeMb)
{
var testFile = GenerateTestFile(sizeMb.MB());
var node = AddCodex(s => s
.WithLogLevel(CodexLogLevel.Warn)
.WithStorageQuota((sizeMb + 10).MB())
);
var contentId = node.UploadFile(testFile);
var downloadedFile = node.DownloadContent(contentId);
testFile.AssertIsEqual(downloadedFile);
}
[Test]
public void ManyFiles()
{
// I suspect that the upload speed is linked to the total
// number of blocks already in the node. I suspect the
// metadata store to be the cause of any slow-down.
// Using this test to detect and quantify the numbers.
var node = AddCodex(s => s
.WithLogLevel(CodexLogLevel.Trace)
.WithStorageQuota(20.GB())
);
var startUtc = DateTime.UtcNow;
var endUtc = DateTime.UtcNow;
var fastMap = new Dictionary<string, int>();
var slowMap = new Dictionary<string, int>();
var times = new List<TimeSpan>();
for (var i = 0; i < 100; i++)
{
Thread.Sleep(1000);
var file = GenerateTestFile(100.MB());
startUtc = DateTime.UtcNow;
var duration = Stopwatch.Measure(GetTestLog(), "Upload_" + i, () =>
{
node.UploadFile(file);
});
times.Add(duration);
endUtc = DateTime.UtcNow;
// We collect the log of the node during the upload.
// We count the line occurances.
// If the upload was fast, add it to the fast-map.
// If it was slow, add it to the slow-map.
// After the test, we can compare and hopefully see what the node was doing during the slow uploads
// that it wasn't doing during the fast ones.
if (duration.TotalSeconds < 12)
{
AddToLogMap(fastMap, node, startUtc, endUtc);
}
else if (duration.TotalSeconds > 25)
{
AddToLogMap(slowMap, node, startUtc, endUtc);
}
}
Log("Upload times:");
foreach (var t in times)
{
Log(Time.FormatDuration(t));
}
Log("Fast map:");
foreach (var entry in fastMap.OrderByDescending(p => p.Value))
{
if (entry.Value > 9)
{
Log($"'{entry.Key}' = {entry.Value}");
}
}
Log("Slow map:");
foreach (var entry in slowMap.OrderByDescending(p => p.Value))
{
if (entry.Value > 9)
{
Log($"'{entry.Key}' = {entry.Value}");
}
}
}
private void AddToLogMap(Dictionary<string, int> map, ICodexNode node, DateTime startUtc, DateTime endUtc)
{
var log = Ci.DownloadLog(node, 1000000);
log.IterateLines(line =>
{
var log = CodexLogLine.Parse(line);
if (log == null) return;
if (log.TimestampUtc < startUtc) return;
if (log.TimestampUtc > endUtc) return;
if (map.ContainsKey(log.Message)) map[log.Message] += 1;
else map.Add(log.Message, 1);
});
}
}
}

View File

@ -12,6 +12,7 @@ namespace CodexTests.ScalabilityTests
[Test]
[Combinatorial]
[Ignore("Used to measure disc io speeds in cluster.")]
public void DiscSpeedTest(
[Values(1, 10, 100, 1024, 1024 * 10, 1024 * 100, 1024 * 1024)] int bufferSizeKb
)