modifications to tests to fix CI issue

This commit is contained in:
aya 2025-03-25 13:09:41 +02:00
parent 8621e63beb
commit f7cdae09c7
2 changed files with 71 additions and 66 deletions

View File

@ -33,9 +33,12 @@ jobs:
- name: Build nwaku dependencies
run: make -C waku
- name: Increase ulimit
run: sudo sh -c "ulimit -n 8192"
- name: Run Endurance Test
run: |
go test -v ./waku -count=1 -timeout=360m -run '^TestStress' | tee testlogs.log
go test -p=1 -v ./waku -count=1 -timeout=360m -run '^TestStress' | tee testlogs.log
- name: Upload Test Logs
uses: actions/upload-artifact@v4

View File

@ -72,73 +72,10 @@ func TestStressMemoryUsageForThreeNodes(t *testing.T) {
Debug("[%s] Test completed successfully", testName)
}
func TestStress2Nodes500IterationTearDown(t *testing.T) {
var memStats runtime.MemStats
runtime.ReadMemStats(&memStats)
initialMem := memStats.HeapAlloc
Debug("[%s] Memory usage at test START: %d KB", t.Name(), initialMem/1024)
initialRSS, err := utils.GetRSSKB()
require.NoError(t, err)
Debug("[%s] OS-level RSS at test START: %d KB", t.Name(), initialRSS)
totalIterations := 500
for i := 1; i <= totalIterations; i++ {
var nodes []*WakuNode
for n := 1; n <= 2; n++ {
cfg := DefaultWakuConfig
cfg.Relay = true
cfg.Discv5Discovery = false
cfg.TcpPort, cfg.Discv5UdpPort, err = GetFreePortIfNeeded(0, 0)
require.NoError(t, err, "Failed to get free ports for node%d", n)
node, err := NewWakuNode(&cfg, fmt.Sprintf("node%d", n))
require.NoError(t, err, "Failed to create node%d", n)
err = node.Start()
require.NoError(t, err, "Failed to start node%d", n)
nodes = append(nodes, node)
}
err = ConnectAllPeers(nodes)
require.NoError(t, err)
message := nodes[0].CreateMessage()
msgHash, err := nodes[0].RelayPublishNoCTX(DefaultPubsubTopic, message)
require.NoError(t, err)
time.Sleep(500 * time.Millisecond)
err = nodes[1].VerifyMessageReceived(message, msgHash, 500*time.Millisecond)
require.NoError(t, err, "Node1 did not receive message from node1")
for _, node := range nodes {
node.StopAndDestroy()
}
runtime.GC()
time.Sleep(250 * time.Millisecond)
runtime.GC()
if i == 250 || i == 500 {
runtime.ReadMemStats(&memStats)
Debug("Iteration %d, usage after teardown: %d KB", i, memStats.HeapAlloc/1024)
require.LessOrEqual(t, memStats.HeapAlloc, initialMem*3, "Memory usage soared above threshold after iteration %d", i)
rssNow, err := utils.GetRSSKB()
require.NoError(t, err)
Debug("Iteration %d, OS-level RSS after teardown: %d KB", i, rssNow)
//require.LessOrEqual(t, rssNow, initialRSS*10, "OS-level RSS soared above threshold after iteration %d", i)
}
Debug("Iteration numberrrrrr %d", i)
}
runtime.GC()
time.Sleep(500 * time.Millisecond)
runtime.GC()
runtime.ReadMemStats(&memStats)
finalMem := memStats.HeapAlloc
Debug("[%s] Memory usage at test END: %d KB", t.Name(), finalMem/1024)
// require.LessOrEqual(t, finalMem, initialMem*3, "Memory usage soared above threshold after %d cycles", totalIterations)
finalRSS, err := utils.GetRSSKB()
require.NoError(t, err)
Debug("[%s] OS-level RSS at test END: %d KB", t.Name(), finalRSS)
//require.LessOrEqual(t, finalRSS, initialRSS*3, "OS-level RSS soared above threshold after %d cycles", totalIterations)
}
func TestStressStoreQuery5kMessagesWithPagination(t *testing.T) {
Debug("Starting test")
runtime.GC()
time.Sleep(5 * time.Second)
nodeConfig := DefaultWakuConfig
nodeConfig.Relay = true
nodeConfig.Store = true
@ -366,3 +303,68 @@ func TestStressLargePayloadEphemeralMessagesEndurance(t *testing.T) {
require.NoError(t, err)
Debug("After endurance test: HeapAlloc = %d KB, RSS = %d KB", endHeapKB, endRSSKB)
}
func TestStress2Nodes500IterationTearDown(t *testing.T) {
var memStats runtime.MemStats
runtime.ReadMemStats(&memStats)
initialMem := memStats.HeapAlloc
Debug("[%s] Memory usage at test START: %d KB", t.Name(), initialMem/1024)
initialRSS, err := utils.GetRSSKB()
require.NoError(t, err)
Debug("[%s] OS-level RSS at test START: %d KB", t.Name(), initialRSS)
totalIterations := 500
for i := 1; i <= totalIterations; i++ {
var nodes []*WakuNode
for n := 1; n <= 2; n++ {
cfg := DefaultWakuConfig
cfg.Relay = true
cfg.Discv5Discovery = false
cfg.TcpPort, cfg.Discv5UdpPort, err = GetFreePortIfNeeded(0, 0)
require.NoError(t, err, "Failed to get free ports for node%d", n)
node, err := NewWakuNode(&cfg, fmt.Sprintf("node%d", n))
require.NoError(t, err, "Failed to create node%d", n)
err = node.Start()
require.NoError(t, err, "Failed to start node%d", n)
nodes = append(nodes, node)
}
err = ConnectAllPeers(nodes)
require.NoError(t, err)
message := nodes[0].CreateMessage()
msgHash, err := nodes[0].RelayPublishNoCTX(DefaultPubsubTopic, message)
require.NoError(t, err)
time.Sleep(500 * time.Millisecond)
err = nodes[1].VerifyMessageReceived(message, msgHash, 500*time.Millisecond)
require.NoError(t, err, "Node1 did not receive message from node1")
for _, node := range nodes {
node.StopAndDestroy()
time.Sleep(50 * time.Millisecond)
}
runtime.GC()
time.Sleep(250 * time.Millisecond)
runtime.GC()
if i == 250 || i == 500 {
runtime.ReadMemStats(&memStats)
Debug("Iteration %d, usage after teardown: %d KB", i, memStats.HeapAlloc/1024)
require.LessOrEqual(t, memStats.HeapAlloc, initialMem*3, "Memory usage soared above threshold after iteration %d", i)
rssNow, err := utils.GetRSSKB()
require.NoError(t, err)
Debug("Iteration %d, OS-level RSS after teardown: %d KB", i, rssNow)
//require.LessOrEqual(t, rssNow, initialRSS*10, "OS-level RSS soared above threshold after iteration %d", i)
}
Debug("Iteration numberrrrrr %d", i)
}
runtime.GC()
time.Sleep(500 * time.Millisecond)
runtime.GC()
runtime.ReadMemStats(&memStats)
finalMem := memStats.HeapAlloc
Debug("[%s] Memory usage at test END: %d KB", t.Name(), finalMem/1024)
// require.LessOrEqual(t, finalMem, initialMem*3, "Memory usage soared above threshold after %d cycles", totalIterations)
finalRSS, err := utils.GetRSSKB()
require.NoError(t, err)
Debug("[%s] OS-level RSS at test END: %d KB", t.Name(), finalRSS)
//require.LessOrEqual(t, finalRSS, initialRSS*3, "OS-level RSS soared above threshold after %d cycles", totalIterations)
}