feat: detect flaky tests

This reverts commit f12803ec95.
This commit is contained in:
Anton Iakimov 2023-11-21 16:39:11 +01:00
parent f12803ec95
commit d1a9ad599c
No known key found for this signature in database
GPG Key ID: DEA1FE58DD8BF7FA
9 changed files with 112 additions and 6 deletions

View File

@ -314,6 +314,7 @@ docker-test: ##@tests Run tests in a docker container with golang.
test: test-unit ##@tests Run basic, short tests during development test: test-unit ##@tests Run basic, short tests during development
test-unit: export BUILD_TAGS ?= test-unit: export BUILD_TAGS ?=
test-unit: export UNIT_TEST_FAILFAST ?= true
# Ensure 'waku' and 'wakuv2' tests are executed first to reduce the impact of flaky tests. # Ensure 'waku' and 'wakuv2' tests are executed first to reduce the impact of flaky tests.
# Otherwise, the entire target might fail at the end, making re-runs time-consuming. # Otherwise, the entire target might fail at the end, making re-runs time-consuming.
test-unit: export UNIT_TEST_PACKAGES ?= $(shell go list ./... | \ test-unit: export UNIT_TEST_PACKAGES ?= $(shell go list ./... | \

View File

@ -1,3 +1,4 @@
#!/usr/bin/env groovy
library 'status-jenkins-lib@v1.7.0' library 'status-jenkins-lib@v1.7.0'
pipeline { pipeline {

View File

@ -1,3 +1,4 @@
#!/usr/bin/env groovy
library 'status-jenkins-lib@v1.7.0' library 'status-jenkins-lib@v1.7.0'
pipeline { pipeline {

View File

@ -1,3 +1,4 @@
#!/usr/bin/env groovy
library 'status-jenkins-lib@v1.7.0' library 'status-jenkins-lib@v1.7.0'
pipeline { pipeline {

View File

@ -1,3 +1,4 @@
#!/usr/bin/env groovy
library 'status-jenkins-lib@v1.7.0' library 'status-jenkins-lib@v1.7.0'
pipeline { pipeline {

View File

@ -1,3 +1,4 @@
#!/usr/bin/env groovy
library 'status-jenkins-lib@v1.7.0' library 'status-jenkins-lib@v1.7.0'
pipeline { pipeline {

View File

@ -1,3 +1,4 @@
#!/usr/bin/env groovy
library 'status-jenkins-lib@v1.7.0' library 'status-jenkins-lib@v1.7.0'
pipeline { pipeline {
@ -9,12 +10,22 @@ pipeline {
defaultValue: 'develop', defaultValue: 'develop',
description: 'Name of branch to build.' description: 'Name of branch to build.'
) )
string(
name: 'UNIT_TEST_COUNT',
defaultValue: getDefaultUnitTestCount(),
description: 'How many times to run tests?'
)
booleanParam(
name: 'UNIT_TEST_FAILFAST',
defaultValue: getDefaultUnitTestFailfast(),
description: 'Should the job fail fast on first test failure?'
)
} }
options { options {
timestamps() timestamps()
/* Prevent Jenkins jobs from running forever */ /* Prevent Jenkins jobs from running forever */
timeout(time: 40, unit: 'MINUTES') timeout(time: getDefaultTimeout(), unit: 'MINUTES')
disableConcurrentBuilds() disableConcurrentBuilds()
/* manage how many builds we keep */ /* manage how many builds we keep */
buildDiscarder(logRotator( buildDiscarder(logRotator(
@ -99,3 +110,24 @@ pipeline {
cleanup { dir(env.TMPDIR) { deleteDir() } } cleanup { dir(env.TMPDIR) { deleteDir() } }
} // post } // post
} // pipeline } // pipeline
def getDefaultUnitTestCount() {
if (env.JOB_BASE_NAME == 'tests-nightly') {
return '10'
}
return '1'
}
def getDefaultUnitTestFailfast() {
if (env.JOB_BASE_NAME == 'tests-nightly') {
return false
}
return true
}
def getDefaultTimeout() {
if (env.JOB_BASE_NAME == 'tests-nightly') {
return 8*60
}
return 40
}

View File

@ -5,21 +5,53 @@ GIT_ROOT=$(cd "${BASH_SOURCE%/*}" && git rev-parse --show-toplevel)
source "${GIT_ROOT}/_assets/scripts/colors.sh" source "${GIT_ROOT}/_assets/scripts/colors.sh"
if [[ $UNIT_TEST_FAILFAST == 'true' ]]; then
GOTEST_EXTRAFLAGS="${GOTEST_EXTRAFLAGS} --failfast"
fi
if [[ -z "${UNIT_TEST_COUNT}" ]]; then
UNIT_TEST_COUNT=1
fi
redirect_stdout() {
output_file=$1
if [[ "${CI}" == 'true' ]];
then
cat > "${output_file}";
else
tee "${output_file}";
fi
}
last_failing_exit_code=0
for package in ${UNIT_TEST_PACKAGES}; do for package in ${UNIT_TEST_PACKAGES}; do
echo -e "${GRN}Testing:${RST} ${package}" echo -e "${GRN}Testing:${RST} ${package}"
package_dir=$(go list -f "{{.Dir}}" "${package}") package_dir=$(go list -f "{{.Dir}}" "${package}")
output_file=${package_dir}/test.log output_file=${package_dir}/test.log
go test -tags "${BUILD_TAGS}" -timeout 30m -v -failfast "${package}" ${GOTEST_EXTRAFLAGS} | \ go test -timeout 30m -count="${UNIT_TEST_COUNT}" -tags "${BUILD_TAGS}" -v "${package}" ${GOTEST_EXTRAFLAGS} | \
if [ "${CI}" = "true" ]; then cat > "${output_file}"; else tee "${output_file}"; fi redirect_stdout "${output_file}"
go_test_exit=$? go_test_exit=$?
if [ "${CI}" = "true" ]; then if [[ "${CI}" == 'true' ]]; then
go-junit-report -in "${output_file}" -out "${package_dir}"/report.xml go-junit-report -in "${output_file}" -out "${package_dir}"/report.xml
fi fi
if [ ${go_test_exit} -ne 0 ]; then if [[ "${go_test_exit}" -ne 0 ]]; then
echo -e "${YLW}Failed, see the log:${RST} ${BLD}${output_file}${RST}" echo -e "${YLW}Failed, see the log:${RST} ${BLD}${output_file}${RST}"
exit "${go_test_exit}" if [[ "$UNIT_TEST_FAILFAST" == 'true' ]]; then
exit "${go_test_exit}"
fi
last_failing_exit_code="${go_test_exit}"
fi fi
done done
if [[ "${last_failing_exit_code}" -ne 0 ]]; then
if [[ "${UNIT_TEST_COUNT}" -gt 1 ]]; then
"${GIT_ROOT}/_assets/scripts/test_stats.py"
fi
exit "${last_failing_exit_code}"
fi

36
_assets/scripts/test_stats.py Executable file
View File

@ -0,0 +1,36 @@
#!/usr/bin/env python
import glob
import xml.etree.ElementTree as ET
from collections import defaultdict
test_stats = defaultdict(lambda: defaultdict(int))
for file in glob.glob("**/report.xml", recursive=True):
tree = ET.parse(file)
root = tree.getroot()
for testcase in root.iter("testcase"):
test_name = testcase.attrib["name"]
test_stats[test_name]["total"] += 1
if testcase.find("failure") is not None:
test_stats[test_name]["failed"] += 1
elif testcase.find("error") is not None:
test_stats[test_name]["failed"] += 1
failing_test_stats = [
{"name": name, "failure_rate": stats["failed"] / stats["total"]}
for name, stats in test_stats.items() if stats["failed"] != 0
]
sorted_failing_test_stats = sorted(failing_test_stats,
key=lambda x: x["failure_rate"],
reverse=True)
print("---")
print("Failing tests stats")
print("(test name: failure rate)")
print("---")
for test_stat in sorted_failing_test_stats:
print(f"{test_stat['name']}: {test_stat['failure_rate'] * 100}%")