chore(test_stats)_: better stats, print skipped tests (#5781)

* chore(test_stats)_: better stats, list skipped tests

* chore_: add skip reasons to some tests
This commit is contained in:
Igor Sirotin 2024-08-29 21:09:26 +01:00 committed by GitHub
parent dc7ca3ddb4
commit 28e7a364c7
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 30 additions and 5 deletions

View File

@ -3,8 +3,10 @@
import glob import glob
import xml.etree.ElementTree as ET import xml.etree.ElementTree as ET
from collections import defaultdict from collections import defaultdict
import re
test_stats = defaultdict(lambda: defaultdict(int)) test_stats = defaultdict(lambda: defaultdict(int))
skipped_tests = {} # Use a dictionary to store test names and their skip reasons
for file in glob.glob("report.xml", recursive=True): for file in glob.glob("report.xml", recursive=True):
tree = ET.parse(file) tree = ET.parse(file)
@ -19,6 +21,21 @@ for file in glob.glob("report.xml", recursive=True):
elif testcase.find("error") is not None: elif testcase.find("error") is not None:
test_stats[test_name]["failed_runs"] += 1 test_stats[test_name]["failed_runs"] += 1
# Check for skipped tests
skipped_element = testcase.find("skipped")
if skipped_element is not None:
message = skipped_element.attrib.get("message", "")
# Extract the real reason from the message
match = re.search(r': (.*?)\s*--- SKIP', message)
skip_reason = match.group(1).strip() if match else "unknown reason"
skipped_tests[test_name] = skip_reason # Store test name and skip reason
# Filter out root test cases if they have subtests
filtered_test_stats = {
name: stats for name, stats in test_stats.items()
if not any(subtest.startswith(name + "/") for subtest in test_stats)
}
failing_test_stats = [ failing_test_stats = [
{ {
"name": name, "name": name,
@ -26,20 +43,28 @@ failing_test_stats = [
"failed_runs": stats["failed_runs"], "failed_runs": stats["failed_runs"],
"total_runs": stats["total_runs"] "total_runs": stats["total_runs"]
} }
for name, stats in test_stats.items() if stats["failed_runs"] != 0 for name, stats in filtered_test_stats.items() if stats["failed_runs"] != 0
] ]
sorted_failing_test_stats = sorted(failing_test_stats, sorted_failing_test_stats = sorted(failing_test_stats,
key=lambda x: x["failure_rate"], key=lambda x: x["failure_rate"],
reverse=True) reverse=True)
flaky_skipped_count = sum(1 for reason in skipped_tests.values() if reason == "flaky test")
print("---") print("---")
print("Failing tests stats") print(f"Failing tests stats (total: {len(failing_test_stats)})")
print("---") print("---")
for test_stat in sorted_failing_test_stats: for test_stat in sorted_failing_test_stats:
print("{}: {}% ({} of {} failed)".format( print("{}: {:.1f}% ({} of {} failed)".format(
test_stat['name'], test_stat['name'],
test_stat['failure_rate'] * 100, test_stat['failure_rate'] * 100,
test_stat['failed_runs'], test_stat['failed_runs'],
test_stat['total_runs'] test_stat['total_runs']
)) ))
print("---")
print(f"Skipped tests (total: {len(skipped_tests)}, skipped as flaky: {flaky_skipped_count})")
print("---")
for test_name, skip_reason in skipped_tests.items():
print(f"{test_name}: {skip_reason}")

View File

@ -21,7 +21,7 @@ import (
) )
func TestMessengerPeersyncingSuite(t *testing.T) { func TestMessengerPeersyncingSuite(t *testing.T) {
t.SkipNow() // FIXME t.Skip("broken test") // FIXME
suite.Run(t, new(MessengerPeersyncingSuite)) suite.Run(t, new(MessengerPeersyncingSuite))
} }

View File

@ -82,7 +82,7 @@ func TestGetFilterChangesResetsTimer(t *testing.T) {
} }
func TestGetFilterLogs(t *testing.T) { func TestGetFilterLogs(t *testing.T) {
t.Skip("Skipping due to flakiness: https://github.com/status-im/status-go/issues/1281") t.Skip("flaky test")
tracker := new(callTracker) tracker := new(callTracker)
api := &PublicAPI{ api := &PublicAPI{