mirror of
https://github.com/logos-messaging/logos-messaging-go-bindings.git
synced 2026-01-08 00:43:09 +00:00
Merge pull request #62 from waku-org/Maintenance_Tests
Add modifications for failed tests
This commit is contained in:
commit
b95c71a08b
21
.github/workflows/CI_endurance.yml
vendored
21
.github/workflows/CI_endurance.yml
vendored
@ -36,8 +36,10 @@ jobs:
|
|||||||
run: sudo sh -c "ulimit -n 8192"
|
run: sudo sh -c "ulimit -n 8192"
|
||||||
|
|
||||||
- name: Run Endurance Test (Group 1)
|
- name: Run Endurance Test (Group 1)
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
go test -p=1 -v ./waku -count=1 -timeout=360m -run '^(TestStressMemoryUsageForThreeNodes|TestStressStoreQuery5kMessagesWithPagination|TestStressHighThroughput10kPublish|TestStressConnectDisconnect500Iteration)$' | tee testlogs1.log
|
set -euo pipefail
|
||||||
|
go test -p=1 -v ./waku -count=1 -timeout=360m -run '^(TestStressMemoryUsageForThreeNodes|TestStressStoreQuery5kMessagesWithPagination|TestStressConnectDisconnect500Iteration|TestStressHighThroughput10kPublish)$' | tee testlogs1.log
|
||||||
|
|
||||||
- name: Upload Test Logs (Group 1)
|
- name: Upload Test Logs (Group 1)
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
@ -45,6 +47,12 @@ jobs:
|
|||||||
name: endurance-logs-group1
|
name: endurance-logs-group1
|
||||||
path: testlogs1.log
|
path: testlogs1.log
|
||||||
|
|
||||||
|
- name: Upload Memory Metrics (Group 1)
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: memory-metrics-group1
|
||||||
|
path: waku/px_load_metrics.csv
|
||||||
|
|
||||||
endurance2:
|
endurance2:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
@ -77,11 +85,20 @@ jobs:
|
|||||||
run: sudo sh -c "ulimit -n 8192"
|
run: sudo sh -c "ulimit -n 8192"
|
||||||
|
|
||||||
- name: Run Endurance Test (Group 2)
|
- name: Run Endurance Test (Group 2)
|
||||||
|
|
||||||
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
go test -p=1 -v ./waku -count=1 -timeout=360m -run '^(TestStressRandomNodesInMesh|TestStressLargePayloadEphemeralMessagesEndurance|TestStress2Nodes500IterationTearDown|TestPeerExchangePXLoad)$' | tee testlogs2.log
|
set -euo pipefail
|
||||||
|
go test -p=1 -v ./waku -count=1 -timeout=360m -run '^(TestStressRandomNodesInMesh|TestStress2Nodes2kIterationTearDown|TestPeerExchangePXLoad)$' | tee testlogs2.log
|
||||||
|
|
||||||
- name: Upload Test Logs (Group 2)
|
- name: Upload Test Logs (Group 2)
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: endurance-logs-group2
|
name: endurance-logs-group2
|
||||||
path: testlogs2.log
|
path: testlogs2.log
|
||||||
|
|
||||||
|
- name: Upload Memory Metrics (Group 2)
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: memory-metrics-group2
|
||||||
|
path: waku/px_load_metrics.csv
|
||||||
@ -5,7 +5,8 @@ on:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
repeated-tests:
|
repeated-tests:
|
||||||
runs-on: ubuntu-latest
|
runs-on: [self-hosted, ubuntu-22.04]
|
||||||
|
timeout-minutes: 900
|
||||||
steps:
|
steps:
|
||||||
- name: Check out repository
|
- name: Check out repository
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
@ -41,7 +42,7 @@ jobs:
|
|||||||
- name: Repeated test runs
|
- name: Repeated test runs
|
||||||
run: |
|
run: |
|
||||||
set +e
|
set +e
|
||||||
for i in {1..100}; do
|
for i in {1..80}; do
|
||||||
echo "Iteration $i: measuring memory BEFORE the tests..."
|
echo "Iteration $i: measuring memory BEFORE the tests..."
|
||||||
go run tools/memory_record.go --iteration $i --phase start
|
go run tools/memory_record.go --iteration $i --phase start
|
||||||
echo "Running tests (iteration $i)..."
|
echo "Running tests (iteration $i)..."
|
||||||
|
|||||||
@ -75,3 +75,4 @@ func GetRSSKB() (uint64, error) {
|
|||||||
pageSize := os.Getpagesize()
|
pageSize := os.Getpagesize()
|
||||||
return (rssPages * uint64(pageSize)) / 1024, nil
|
return (rssPages * uint64(pageSize)) / 1024, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -1,15 +1,15 @@
|
|||||||
package common
|
package common
|
||||||
|
|
||||||
type StoreQueryRequest struct {
|
type StoreQueryRequest struct {
|
||||||
RequestId string `json:"requestId"`
|
RequestId string `json:"request_id"`
|
||||||
IncludeData bool `json:"includeData"`
|
IncludeData bool `json:"include_data"`
|
||||||
PubsubTopic string `json:"pubsubTopic,omitempty"`
|
PubsubTopic string `json:"pubsubTopic,omitempty"`
|
||||||
ContentTopics *[]string `json:"contentTopics,omitempty"`
|
ContentTopics *[]string `json:"contentTopics,omitempty"`
|
||||||
TimeStart *int64 `json:"timeStart,omitempty"`
|
TimeStart *int64 `json:"timeStart,omitempty"`
|
||||||
TimeEnd *int64 `json:"timeEnd,omitempty"`
|
TimeEnd *int64 `json:"timeEnd,omitempty"`
|
||||||
MessageHashes *[]MessageHash `json:"messageHashes,omitempty"`
|
MessageHashes *[]MessageHash `json:"messageHashes,omitempty"`
|
||||||
PaginationCursor *MessageHash `json:"paginationCursor,omitempty"`
|
PaginationCursor *MessageHash `json:"paginationCursor,omitempty"`
|
||||||
PaginationForward bool `json:"paginationForward"`
|
PaginationForward bool `json:"pagination_forward"`
|
||||||
PaginationLimit *uint64 `json:"paginationLimit,omitempty"`
|
PaginationLimit *uint64 `json:"paginationLimit,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -9,6 +9,7 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
|
"runtime"
|
||||||
"strconv"
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
@ -16,6 +17,7 @@ import (
|
|||||||
"github.com/cenkalti/backoff/v3"
|
"github.com/cenkalti/backoff/v3"
|
||||||
"github.com/libp2p/go-libp2p/core/peer"
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
"github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
"github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
||||||
|
"github.com/waku-org/waku-go-bindings/utils"
|
||||||
"github.com/waku-org/waku-go-bindings/waku/common"
|
"github.com/waku-org/waku-go-bindings/waku/common"
|
||||||
"google.golang.org/protobuf/proto"
|
"google.golang.org/protobuf/proto"
|
||||||
)
|
)
|
||||||
@ -288,3 +290,15 @@ func recordMemoryMetricsPX(testName, phase string, heapAllocKB, rssKB uint64) er
|
|||||||
}
|
}
|
||||||
return writer.Write(row)
|
return writer.Write(row)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func captureMemory(testName, phase string) {
|
||||||
|
var ms runtime.MemStats
|
||||||
|
runtime.ReadMemStats(&ms)
|
||||||
|
|
||||||
|
heapKB := ms.HeapAlloc / 1024
|
||||||
|
rssKB, _ := utils.GetRSSKB()
|
||||||
|
|
||||||
|
Debug("[%s] Memory usage (%s): %d KB (RSS %d KB)", testName, phase, heapKB, rssKB)
|
||||||
|
|
||||||
|
_ = recordMemoryMetricsPX(testName, phase, heapKB, rssKB)
|
||||||
|
}
|
||||||
@ -11,7 +11,6 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"github.com/waku-org/waku-go-bindings/utils"
|
|
||||||
"github.com/waku-org/waku-go-bindings/waku/common"
|
"github.com/waku-org/waku-go-bindings/waku/common"
|
||||||
|
|
||||||
// "go.uber.org/zap/zapcore"
|
// "go.uber.org/zap/zapcore"
|
||||||
@ -21,9 +20,7 @@ import (
|
|||||||
func TestStressMemoryUsageForThreeNodes(t *testing.T) {
|
func TestStressMemoryUsageForThreeNodes(t *testing.T) {
|
||||||
testName := t.Name()
|
testName := t.Name()
|
||||||
var err error
|
var err error
|
||||||
var memStats runtime.MemStats
|
captureMemory(t.Name(), "start")
|
||||||
runtime.ReadMemStats(&memStats)
|
|
||||||
Debug("[%s] Memory usage BEFORE creating nodes: %d KB", testName, memStats.HeapAlloc/1024)
|
|
||||||
node1Cfg := DefaultWakuConfig
|
node1Cfg := DefaultWakuConfig
|
||||||
node1Cfg.TcpPort, node1Cfg.Discv5UdpPort, err = GetFreePortIfNeeded(0, 0)
|
node1Cfg.TcpPort, node1Cfg.Discv5UdpPort, err = GetFreePortIfNeeded(0, 0)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -41,8 +38,7 @@ func TestStressMemoryUsageForThreeNodes(t *testing.T) {
|
|||||||
node3, err := NewWakuNode(&node3Cfg, "node3")
|
node3, err := NewWakuNode(&node3Cfg, "node3")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
runtime.ReadMemStats(&memStats)
|
captureMemory(t.Name(), "before nodes start")
|
||||||
Debug("[%s] Memory usage AFTER creating nodes: %d KB", testName, memStats.HeapAlloc/1024)
|
|
||||||
|
|
||||||
err = node1.Start()
|
err = node1.Start()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -51,8 +47,7 @@ func TestStressMemoryUsageForThreeNodes(t *testing.T) {
|
|||||||
err = node3.Start()
|
err = node3.Start()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
runtime.ReadMemStats(&memStats)
|
captureMemory(t.Name(), "after nodes run")
|
||||||
Debug("[%s] Memory usage AFTER starting nodes: %d KB", testName, memStats.HeapAlloc/1024)
|
|
||||||
|
|
||||||
time.Sleep(2 * time.Second)
|
time.Sleep(2 * time.Second)
|
||||||
|
|
||||||
@ -64,8 +59,7 @@ func TestStressMemoryUsageForThreeNodes(t *testing.T) {
|
|||||||
time.Sleep(1 * time.Second)
|
time.Sleep(1 * time.Second)
|
||||||
runtime.GC()
|
runtime.GC()
|
||||||
|
|
||||||
runtime.ReadMemStats(&memStats)
|
captureMemory(t.Name(), "at end")
|
||||||
Debug("[%s] Memory usage AFTER destroying nodes: %d KB", testName, memStats.HeapAlloc/1024)
|
|
||||||
|
|
||||||
Debug("[%s] Test completed successfully", testName)
|
Debug("[%s] Test completed successfully", testName)
|
||||||
}
|
}
|
||||||
@ -91,12 +85,10 @@ func TestStressStoreQuery5kMessagesWithPagination(t *testing.T) {
|
|||||||
node2.StopAndDestroy()
|
node2.StopAndDestroy()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
var memStats runtime.MemStats
|
|
||||||
iterations := 50
|
|
||||||
|
|
||||||
runtime.ReadMemStats(&memStats)
|
iterations := 4000
|
||||||
initialHeapAlloc := memStats.HeapAlloc
|
|
||||||
Debug("Initial memory usage check before publishing %d MB", initialHeapAlloc/1024/1024)
|
captureMemory(t.Name(), "at start")
|
||||||
|
|
||||||
queryTimestamp := proto.Int64(time.Now().UnixNano())
|
queryTimestamp := proto.Int64(time.Now().UnixNano())
|
||||||
|
|
||||||
@ -107,14 +99,12 @@ func TestStressStoreQuery5kMessagesWithPagination(t *testing.T) {
|
|||||||
require.NoError(t, err, "Failed to publish message")
|
require.NoError(t, err, "Failed to publish message")
|
||||||
|
|
||||||
if i%10 == 0 {
|
if i%10 == 0 {
|
||||||
runtime.ReadMemStats(&memStats)
|
|
||||||
Debug("Memory usage at iteration %d: HeapAlloc=%v MB, NumGC=%v",
|
|
||||||
i, memStats.HeapAlloc/1024/1024, memStats.NumGC)
|
|
||||||
|
|
||||||
storeQueryRequest := &common.StoreQueryRequest{
|
storeQueryRequest := &common.StoreQueryRequest{
|
||||||
TimeStart: queryTimestamp,
|
TimeStart: queryTimestamp,
|
||||||
IncludeData: true,
|
IncludeData: true,
|
||||||
PaginationLimit: proto.Uint64(50),
|
PaginationLimit: proto.Uint64(50),
|
||||||
|
PaginationForward: false,
|
||||||
}
|
}
|
||||||
|
|
||||||
storedmsgs, err := wakuNode.GetStoredMessages(node2, storeQueryRequest)
|
storedmsgs, err := wakuNode.GetStoredMessages(node2, storeQueryRequest)
|
||||||
@ -123,11 +113,7 @@ func TestStressStoreQuery5kMessagesWithPagination(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
runtime.ReadMemStats(&memStats)
|
captureMemory(t.Name(), "at end")
|
||||||
finalHeapAlloc := memStats.HeapAlloc
|
|
||||||
Debug("Memory before test: %v KB, Memory after test: %v KB", initialHeapAlloc/1024, finalHeapAlloc/1024)
|
|
||||||
|
|
||||||
//require.LessOrEqual(t, finalHeapAlloc, initialHeapAlloc*2, "Memory usage has grown too much")
|
|
||||||
|
|
||||||
Debug("[%s] Test completed successfully", t.Name())
|
Debug("[%s] Test completed successfully", t.Name())
|
||||||
}
|
}
|
||||||
@ -149,15 +135,9 @@ func TestStressHighThroughput10kPublish(t *testing.T) {
|
|||||||
err = node1.ConnectPeer(node2)
|
err = node1.ConnectPeer(node2)
|
||||||
require.NoError(t, err, "Failed to connect node1 to node2")
|
require.NoError(t, err, "Failed to connect node1 to node2")
|
||||||
|
|
||||||
var memStats runtime.MemStats
|
captureMemory(t.Name(), "at start")
|
||||||
runtime.ReadMemStats(&memStats)
|
|
||||||
startHeapKB := memStats.HeapAlloc / 1024
|
|
||||||
startRSSKB, err := utils.GetRSSKB()
|
|
||||||
require.NoError(t, err, "Failed to read initial RSS")
|
|
||||||
|
|
||||||
Debug("Memory usage BEFORE sending => HeapAlloc: %d KB, RSS: %d KB", startHeapKB, startRSSKB)
|
totalMessages := 2000
|
||||||
|
|
||||||
totalMessages := 5000
|
|
||||||
pubsubTopic := DefaultPubsubTopic
|
pubsubTopic := DefaultPubsubTopic
|
||||||
|
|
||||||
for i := 0; i < totalMessages; i++ {
|
for i := 0; i < totalMessages; i++ {
|
||||||
@ -170,21 +150,11 @@ func TestStressHighThroughput10kPublish(t *testing.T) {
|
|||||||
Debug("###Iteration number#%d", i)
|
Debug("###Iteration number#%d", i)
|
||||||
}
|
}
|
||||||
|
|
||||||
runtime.ReadMemStats(&memStats)
|
captureMemory(t.Name(), "at end")
|
||||||
endHeapKB := memStats.HeapAlloc / 1024
|
|
||||||
endRSSKB, err := utils.GetRSSKB()
|
|
||||||
require.NoError(t, err, "Failed to read final RSS")
|
|
||||||
|
|
||||||
Debug("Memory usage AFTER sending => HeapAlloc: %d KB, RSS: %d KB", endHeapKB, endRSSKB)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestStressConnectDisconnect500Iteration(t *testing.T) {
|
func TestStressConnectDisconnect1kIteration(t *testing.T) {
|
||||||
var memStats runtime.MemStats
|
captureMemory(t.Name(), "at start")
|
||||||
runtime.ReadMemStats(&memStats)
|
|
||||||
startHeapKB := memStats.HeapAlloc / 1024
|
|
||||||
startRSSKB, err := utils.GetRSSKB()
|
|
||||||
require.NoError(t, err)
|
|
||||||
Debug("Before test: HeapAlloc = %d KB, RSS = %d KB", startHeapKB, startRSSKB)
|
|
||||||
|
|
||||||
node0Cfg := DefaultWakuConfig
|
node0Cfg := DefaultWakuConfig
|
||||||
node0Cfg.Relay = true
|
node0Cfg.Relay = true
|
||||||
@ -199,7 +169,7 @@ func TestStressConnectDisconnect500Iteration(t *testing.T) {
|
|||||||
node1.StopAndDestroy()
|
node1.StopAndDestroy()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
iterations := 200
|
iterations := 2000
|
||||||
for i := 1; i <= iterations; i++ {
|
for i := 1; i <= iterations; i++ {
|
||||||
err := node0.ConnectPeer(node1)
|
err := node0.ConnectPeer(node1)
|
||||||
require.NoError(t, err, "Iteration %d: node0 failed to connect to node1", i)
|
require.NoError(t, err, "Iteration %d: node0 failed to connect to node1", i)
|
||||||
@ -219,18 +189,14 @@ func TestStressConnectDisconnect500Iteration(t *testing.T) {
|
|||||||
Debug("Iteration %d: node0 disconnected from node1", i)
|
Debug("Iteration %d: node0 disconnected from node1", i)
|
||||||
time.Sleep(2 * time.Second)
|
time.Sleep(2 * time.Second)
|
||||||
}
|
}
|
||||||
runtime.ReadMemStats(&memStats)
|
captureMemory(t.Name(), "at end")
|
||||||
endHeapKB := memStats.HeapAlloc / 1024
|
|
||||||
endRSSKB, err := utils.GetRSSKB()
|
|
||||||
require.NoError(t, err)
|
|
||||||
Debug("After test: HeapAlloc = %d KB, RSS = %d KB", endHeapKB, endRSSKB)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestStressRandomNodesInMesh(t *testing.T) {
|
func TestStressRandomNodesInMesh(t *testing.T) {
|
||||||
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||||
|
|
||||||
minNodes := 5
|
minNodes := 5
|
||||||
maxNodes := 10
|
maxNodes := 20
|
||||||
nodes := make([]*WakuNode, 0, maxNodes)
|
nodes := make([]*WakuNode, 0, maxNodes)
|
||||||
|
|
||||||
for i := 0; i < minNodes; i++ {
|
for i := 0; i < minNodes; i++ {
|
||||||
@ -245,12 +211,7 @@ func TestStressRandomNodesInMesh(t *testing.T) {
|
|||||||
time.Sleep(1 * time.Second)
|
time.Sleep(1 * time.Second)
|
||||||
require.NoError(t, err, "Failed to connect initial nodes with ConnectAllPeers")
|
require.NoError(t, err, "Failed to connect initial nodes with ConnectAllPeers")
|
||||||
|
|
||||||
var memStats runtime.MemStats
|
captureMemory(t.Name(), "at start")
|
||||||
runtime.ReadMemStats(&memStats)
|
|
||||||
startHeapKB := memStats.HeapAlloc / 1024
|
|
||||||
startRSSKB, err2 := utils.GetRSSKB()
|
|
||||||
require.NoError(t, err2, "Failed to read initial RSS")
|
|
||||||
Debug("Memory at start of test: HeapAlloc=%d KB, RSS=%d KB", startHeapKB, startRSSKB)
|
|
||||||
|
|
||||||
testDuration := 30 * time.Minute
|
testDuration := 30 * time.Minute
|
||||||
endTime := time.Now().Add(testDuration)
|
endTime := time.Now().Add(testDuration)
|
||||||
@ -308,11 +269,7 @@ func TestStressRandomNodesInMesh(t *testing.T) {
|
|||||||
n.StopAndDestroy()
|
n.StopAndDestroy()
|
||||||
}
|
}
|
||||||
|
|
||||||
runtime.ReadMemStats(&memStats)
|
captureMemory(t.Name(), "at end")
|
||||||
endHeapKB := memStats.HeapAlloc / 1024
|
|
||||||
endRSSKB, err3 := utils.GetRSSKB()
|
|
||||||
require.NoError(t, err3, "Failed to read final RSS")
|
|
||||||
Debug("Memory at end of test: HeapAlloc=%d KB, RSS=%d KB", endHeapKB, endRSSKB)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestStressLargePayloadEphemeralMessagesEndurance(t *testing.T) {
|
func TestStressLargePayloadEphemeralMessagesEndurance(t *testing.T) {
|
||||||
@ -320,30 +277,29 @@ func TestStressLargePayloadEphemeralMessagesEndurance(t *testing.T) {
|
|||||||
nodePubCfg.Relay = true
|
nodePubCfg.Relay = true
|
||||||
publisher, err := StartWakuNode("publisher", &nodePubCfg)
|
publisher, err := StartWakuNode("publisher", &nodePubCfg)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer publisher.StopAndDestroy()
|
|
||||||
|
|
||||||
nodeRecvCfg := DefaultWakuConfig
|
nodeRecvCfg := DefaultWakuConfig
|
||||||
nodeRecvCfg.Relay = true
|
nodeRecvCfg.Relay = true
|
||||||
receiver, err := StartWakuNode("receiver", &nodeRecvCfg)
|
receiver, err := StartWakuNode("receiver", &nodeRecvCfg)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer receiver.StopAndDestroy()
|
|
||||||
|
|
||||||
err = receiver.RelaySubscribe(DefaultPubsubTopic)
|
err = receiver.RelaySubscribe(DefaultPubsubTopic)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
publisher.StopAndDestroy()
|
||||||
|
time.Sleep(30 * time.Second)
|
||||||
|
receiver.StopAndDestroy()
|
||||||
|
|
||||||
|
}()
|
||||||
err = publisher.ConnectPeer(receiver)
|
err = publisher.ConnectPeer(receiver)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
time.Sleep(2 * time.Second)
|
time.Sleep(2 * time.Second)
|
||||||
|
|
||||||
var memStats runtime.MemStats
|
captureMemory(t.Name(), "at start")
|
||||||
runtime.ReadMemStats(&memStats)
|
|
||||||
startHeapKB := memStats.HeapAlloc / 1024
|
|
||||||
startRSSKB, err := utils.GetRSSKB()
|
|
||||||
require.NoError(t, err)
|
|
||||||
Debug("Before endurance test: HeapAlloc = %d KB, RSS = %d KB", startHeapKB, startRSSKB)
|
|
||||||
|
|
||||||
maxIterations := 2000
|
maxIterations := 5000
|
||||||
payloadSize := 100 * 1024
|
payloadSize := 100 * 1024
|
||||||
largePayload := make([]byte, payloadSize)
|
largePayload := make([]byte, payloadSize)
|
||||||
for i := range largePayload {
|
for i := range largePayload {
|
||||||
@ -368,25 +324,15 @@ func TestStressLargePayloadEphemeralMessagesEndurance(t *testing.T) {
|
|||||||
Debug("###Iteration number %d", i+1)
|
Debug("###Iteration number %d", i+1)
|
||||||
}
|
}
|
||||||
|
|
||||||
runtime.ReadMemStats(&memStats)
|
captureMemory(t.Name(), "at end")
|
||||||
endHeapKB := memStats.HeapAlloc / 1024
|
|
||||||
endRSSKB, err := utils.GetRSSKB()
|
|
||||||
require.NoError(t, err)
|
|
||||||
Debug("After endurance test: HeapAlloc = %d KB, RSS = %d KB", endHeapKB, endRSSKB)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestStress2Nodes500IterationTearDown(t *testing.T) {
|
func TestStress2Nodes2kIterationTearDown(t *testing.T) {
|
||||||
|
|
||||||
var memStats runtime.MemStats
|
captureMemory(t.Name(), "at start")
|
||||||
runtime.ReadMemStats(&memStats)
|
var err error
|
||||||
initialMem := memStats.HeapAlloc
|
totalIterations := 2000
|
||||||
Debug("[%s] Memory usage at test START: %d KB", t.Name(), initialMem/1024)
|
|
||||||
|
|
||||||
initialRSS, err := utils.GetRSSKB()
|
|
||||||
require.NoError(t, err)
|
|
||||||
Debug("[%s] OS-level RSS at test START: %d KB", t.Name(), initialRSS)
|
|
||||||
|
|
||||||
totalIterations := 500
|
|
||||||
for i := 1; i <= totalIterations; i++ {
|
for i := 1; i <= totalIterations; i++ {
|
||||||
var nodes []*WakuNode
|
var nodes []*WakuNode
|
||||||
for n := 1; n <= 2; n++ {
|
for n := 1; n <= 2; n++ {
|
||||||
@ -416,32 +362,16 @@ func TestStress2Nodes500IterationTearDown(t *testing.T) {
|
|||||||
runtime.GC()
|
runtime.GC()
|
||||||
time.Sleep(250 * time.Millisecond)
|
time.Sleep(250 * time.Millisecond)
|
||||||
runtime.GC()
|
runtime.GC()
|
||||||
if i == 250 || i == 500 {
|
|
||||||
runtime.ReadMemStats(&memStats)
|
|
||||||
Debug("Iteration %d, usage after teardown: %d KB", i, memStats.HeapAlloc/1024)
|
|
||||||
require.LessOrEqual(t, memStats.HeapAlloc, initialMem*3, "Memory usage soared above threshold after iteration %d", i)
|
|
||||||
rssNow, err := utils.GetRSSKB()
|
|
||||||
require.NoError(t, err)
|
|
||||||
Debug("Iteration %d, OS-level RSS after teardown: %d KB", i, rssNow)
|
|
||||||
//require.LessOrEqual(t, rssNow, initialRSS*10, "OS-level RSS soared above threshold after iteration %d", i)
|
|
||||||
}
|
|
||||||
Debug("Iteration numberrrrrr %d", i)
|
Debug("Iteration numberrrrrr %d", i)
|
||||||
}
|
}
|
||||||
runtime.GC()
|
runtime.GC()
|
||||||
time.Sleep(500 * time.Millisecond)
|
time.Sleep(500 * time.Millisecond)
|
||||||
runtime.GC()
|
runtime.GC()
|
||||||
runtime.ReadMemStats(&memStats)
|
captureMemory(t.Name(), "at end")
|
||||||
finalMem := memStats.HeapAlloc
|
|
||||||
Debug("[%s] Memory usage at test END: %d KB", t.Name(), finalMem/1024)
|
|
||||||
// require.LessOrEqual(t, finalMem, initialMem*3, "Memory usage soared above threshold after %d cycles", totalIterations)
|
|
||||||
finalRSS, err := utils.GetRSSKB()
|
|
||||||
require.NoError(t, err)
|
|
||||||
Debug("[%s] OS-level RSS at test END: %d KB", t.Name(), finalRSS)
|
|
||||||
//require.LessOrEqual(t, finalRSS, initialRSS*3, "OS-level RSS soared above threshold after %d cycles", totalIterations)
|
//require.LessOrEqual(t, finalRSS, initialRSS*3, "OS-level RSS soared above threshold after %d cycles", totalIterations)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPeerExchangePXLoad(t *testing.T) {
|
func TestPeerExchangePXLoad(t *testing.T) {
|
||||||
testName := "PeerExchangePXLoad"
|
|
||||||
pxServerCfg := DefaultWakuConfig
|
pxServerCfg := DefaultWakuConfig
|
||||||
pxServerCfg.PeerExchange = true
|
pxServerCfg.PeerExchange = true
|
||||||
pxServerCfg.Relay = true
|
pxServerCfg.Relay = true
|
||||||
@ -464,16 +394,7 @@ func TestPeerExchangePXLoad(t *testing.T) {
|
|||||||
|
|
||||||
time.Sleep(2 * time.Second)
|
time.Sleep(2 * time.Second)
|
||||||
|
|
||||||
var memStats runtime.MemStats
|
captureMemory(t.Name(), "at start")
|
||||||
runtime.ReadMemStats(&memStats)
|
|
||||||
startHeapKB := memStats.HeapAlloc / 1024
|
|
||||||
startRSSKB, err := utils.GetRSSKB()
|
|
||||||
require.NoError(t, err, "Failed to get initial RSS")
|
|
||||||
Debug("%s: Before test: HeapAlloc=%d KB, RSS=%d KB", testName, startHeapKB, startRSSKB)
|
|
||||||
|
|
||||||
// Save the initial memory reading to CSV
|
|
||||||
err = recordMemoryMetricsPX(testName, "start", startHeapKB, startRSSKB)
|
|
||||||
require.NoError(t, err, "Failed to record start metrics")
|
|
||||||
|
|
||||||
testDuration := 30 * time.Minute
|
testDuration := 30 * time.Minute
|
||||||
endTime := time.Now().Add(testDuration)
|
endTime := time.Now().Add(testDuration)
|
||||||
@ -509,13 +430,5 @@ func TestPeerExchangePXLoad(t *testing.T) {
|
|||||||
time.Sleep(1 * time.Second)
|
time.Sleep(1 * time.Second)
|
||||||
}
|
}
|
||||||
|
|
||||||
runtime.ReadMemStats(&memStats)
|
captureMemory(t.Name(), "at end")
|
||||||
endHeapKB := memStats.HeapAlloc / 1024
|
|
||||||
endRSSKB, err := utils.GetRSSKB()
|
|
||||||
require.NoError(t, err, "Failed to get final RSS")
|
|
||||||
Debug("Memory %s: After test: HeapAlloc=%d KB, RSS=%d KB", testName, endHeapKB, endRSSKB)
|
|
||||||
|
|
||||||
// Save the final memory reading to CSV
|
|
||||||
err = recordMemoryMetricsPX(testName, "end", endHeapKB, endRSSKB)
|
|
||||||
require.NoError(t, err, "Failed to record end metrics")
|
|
||||||
}
|
}
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user