fix_: nightly test runs (#5791)

* fix_: run nightly protocol tests as separate processes

* fix_: calculate timeout

* fix_: TEST_WITH_COVERAGE_REPORTS_DIR

* fix_: proper filter protocol package

* chore_: log run_unit_tests stages

* fix_: coverage reports merging

* chore_: more logs

* chore_: fix typo

* chore_: increase test timeouts

* fix_: properly filter packages

* feat_: UNIT_TEST_DRY_RUN flag for tests run

* fix_: UNIT_TEST_PACKAGES_FILTERED calculation

* fix_: remove the force-run waku test first

* fix_: delete unused file
This commit is contained in:
Igor Sirotin 2024-09-03 12:50:09 +01:00 committed by GitHub
parent beaca5faf7
commit 7623f68679
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 97 additions and 36 deletions

View File

@ -361,19 +361,17 @@ docker-test: ##@tests Run tests in a docker container with golang.
test: test-unit ##@tests Run basic, short tests during development
test-unit: export BUILD_TAGS ?=
test-unit: export UNIT_TEST_DRY_RUN ?= false
test-unit: export UNIT_TEST_COUNT ?= 1
test-unit: export UNIT_TEST_FAILFAST ?= true
test-unit: export UNIT_TEST_RERUN_FAILS ?= true
test-unit: export UNIT_TEST_USE_DEVELOPMENT_LOGGER ?= true
test-unit: export UNIT_TEST_REPORT_CODECLIMATE ?= false
test-unit: export UNIT_TEST_PACKAGES ?= $(call sh, go list ./... | grep -E '/waku(/.*|$$)|/wakuv2(/.*|$$)') \
$(call sh, go list ./... | \
test-unit: export UNIT_TEST_PACKAGES ?= $(call sh, go list ./... | \
grep -v /vendor | \
grep -v /t/e2e | \
grep -v /t/benchmarks | \
grep -v /transactions/fake | \
grep -E -v '/waku(/.*|$$)' | \
grep -E -v '/wakuv2(/.*|$$)')
grep -v /transactions/fake)
test-unit: ##@tests Run unit and integration tests
./_assets/scripts/run_unit_tests.sh

View File

@ -35,6 +35,11 @@ pipeline {
defaultValue: true,
description: 'Should the job report test coverage to CodeClimate?'
)
booleanParam(
name: 'UNIT_TEST_DRY_RUN',
defaultValue: false,
description: 'Should the job report ignore the actual test run and just print the test plan?'
)
}
options {
@ -72,6 +77,7 @@ pipeline {
UNIT_TEST_RERUN_FAILS = "${params.UNIT_TEST_RERUN_FAILS}"
UNIT_TEST_USE_DEVELOPMENT_LOGGER = "${params.UNIT_TEST_USE_DEVELOPMENT_LOGGER}"
UNIT_TEST_REPORT_CODECLIMATE = "${params.UNIT_TEST_REPORT_CODECLIMATE}"
UNIT_TEST_DRY_RUN = "${params.UNIT_TEST_DRY_RUN}"
}
stages {
@ -164,7 +170,7 @@ pipeline {
nix.shell('make test-unit V=1', pure: false)
}
sh "mv c.out test-coverage.out"
archiveArtifacts('test-coverage.out, coverage/codeclimate.json, test-coverage.html')
archiveArtifacts('test-coverage.out, coverage/codeclimate.json, test-coverage.html, coverage_merged.out')
}
}
} }
@ -181,16 +187,16 @@ pipeline {
env.PKG_URL = "${currentBuild.absoluteUrl}/consoleText"
if (isTestNightlyJob()) {
archiveArtifacts('report.xml, test.log')
archiveArtifacts('report_*.xml, test_*.log, test-coverage.html, test-coverage.out, coverage/codeclimate.json')
}
if (params.UNIT_TEST_RERUN_FAILS) {
def rerunReports = findFiles(glob: 'report_rerun_fails.txt')
def rerunReports = findFiles(glob: 'report_rerun_fails_*.txt')
if (rerunReports.length > 0) {
archiveArtifacts('report_rerun_fails.txt')
archiveArtifacts('report_rerun_fails_*.txt')
}
}
junit(
testResults: 'report.xml',
testResults: 'report_*.xml',
skipOldReports: true,
skipPublishingChecks: true,
skipMarkingBuildUnstable: true
@ -210,7 +216,7 @@ pipeline {
failure {
script {
github.notifyPR(false)
archiveArtifacts('test.log')
archiveArtifacts('**/test_*.log')
}
}
cleanup {

View File

@ -34,37 +34,55 @@ redirect_stdout() {
}
run_test_for_packages() {
local output_file="test.log"
local coverage_file="test.coverage.out"
local report_file="report.xml"
local rerun_report_file="report_rerun_fails.txt"
local exit_code_file="exit_code.txt"
local packages="$1"
local iteration="$2"
local count="$3"
local single_timeout="$4"
local log_message="$5"
echo -e "${GRN}Testing:${RST} All packages. Single iteration. -test.count=${UNIT_TEST_COUNT}"
local output_file="test_${iteration}.log"
local coverage_file="test_${iteration}.coverage.out"
local report_file="report_${iteration}.xml"
local rerun_report_file="report_rerun_fails_${iteration}.txt"
local exit_code_file="exit_code_${iteration}.txt"
local timeout="$(( single_timeout * count))m"
if [[ "${UNIT_TEST_DRY_RUN}" == 'true' ]]; then
echo -e "${GRN}Dry run ${iteration}. message:${RST} ${log_message}\n"\
"${YLW}Dry run ${iteration}. packages:${RST} ${packages}\n"\
"${YLW}Dry run ${iteration}. count:${RST} ${count}\n"\
"${YLW}Dry run ${iteration}. timeout:${RST} ${timeout}"
return 0
fi
echo -e "${GRN}Testing:${RST} ${log_message}. Iteration ${iteration}. -test.count=${count}. Timeout: ${timeout}"
gotestsum_flags="${GOTESTSUM_EXTRAFLAGS}"
if [[ "${CI}" == 'true' ]]; then
gotestsum_flags="${gotestsum_flags} --junitfile=${report_file} --rerun-fails-report=${rerun_report_file}"
fi
# Prepare env variables for `test-with-coverage.sh`
export TEST_WITH_COVERAGE_PACKAGES="${packages}"
export TEST_WITH_COVERAGE_COUNT="${count}"
export TEST_WITH_COVERAGE_REPORTS_DIR="$(mktemp -d)"
# Cleanup previous coverage reports
rm -f coverage.out.rerun.*
rm -f "${TEST_WITH_COVERAGE_REPORTS_DIR}/coverage.out.rerun.*"
# Run tests
gotestsum --packages="${UNIT_TEST_PACKAGES}" ${gotestsum_flags} --raw-command -- \
gotestsum --packages="${packages}" ${gotestsum_flags} --raw-command -- \
./_assets/scripts/test-with-coverage.sh \
-v ${GOTEST_EXTRAFLAGS} \
-timeout 45m \
${GOTEST_EXTRAFLAGS} \
-timeout "${timeout}" \
-tags "${BUILD_TAGS}" | \
redirect_stdout "${output_file}"
local go_test_exit=$?
# Merge package coverage results
go run ./cmd/test-coverage-utils/gocovmerge.go coverage.out.rerun.* > ${coverage_file}
# Cleanup coverage reports
rm -f coverage.out.rerun.*
go run ./cmd/test-coverage-utils/gocovmerge.go ${TEST_WITH_COVERAGE_REPORTS_DIR}/coverage.out.rerun.* > ${coverage_file}
rm -f "${COVERAGE_REPORTS_DIR}/coverage.out.rerun.*"
echo "${go_test_exit}" > "${exit_code_file}"
if [[ "${go_test_exit}" -ne 0 ]]; then
@ -83,33 +101,70 @@ fi
rm -rf ./**/*.coverage.out
echo -e "${GRN}Testing HEAD:${RST} $(git rev-parse HEAD)"
run_test_for_packages
DEFAULT_TIMEOUT_MINUTES=5
PROTOCOL_TIMEOUT_MINUTES=45
HAS_PROTOCOL_PACKAGE=true
if [[ $(echo "${UNIT_TEST_PACKAGES}" | grep -E '\s?\S+protocol\s+') == "" ]]; then
HAS_PROTOCOL_PACKAGE=false
fi
if [[ $HAS_PROTOCOL_PACKAGE == 'false' ]]; then
# This is the default single-line flow for testing all packages
# The `else` branch is temporary and will be removed once the `protocol` package runtime is optimized.
run_test_for_packages "${UNIT_TEST_PACKAGES}" "0" "${UNIT_TEST_COUNT}" "${DEFAULT_TIMEOUT_MINUTES}" "All packages"
else
# Spawn a process to test all packages except `protocol`
UNIT_TEST_PACKAGES_FILTERED=$(echo "${UNIT_TEST_PACKAGES}" | tr ' ' '\n' | grep -v '/protocol$' | tr '\n' ' ')
run_test_for_packages "${UNIT_TEST_PACKAGES_FILTERED}" "0" "${UNIT_TEST_COUNT}" "${DEFAULT_TIMEOUT_MINUTES}" "All packages except 'protocol'" &
# Spawn separate processes to run `protocol` package
for ((i=1; i<=UNIT_TEST_COUNT; i++)); do
run_test_for_packages github.com/status-im/status-go/protocol "${i}" 1 "${PROTOCOL_TIMEOUT_MINUTES}" "Only 'protocol' package" &
done
wait
fi
# Gather test coverage results
rm -f c.out c-full.out
go run ./cmd/test-coverage-utils/gocovmerge.go $(find -iname "*.coverage.out") >> c-full.out
merged_coverage_report="coverage_merged.out"
final_coverage_report="c.out" # Name expected by cc-test-reporter
coverage_reports=$(find . -iname "*.coverage.out")
rm -f ${final_coverage_report} ${merged_coverage_report}
echo -e "${GRN}Gathering test coverage results: ${RST} output: ${merged_coverage_report}, input: ${coverage_reports}"
echo $coverage_reports | xargs go run ./cmd/test-coverage-utils/gocovmerge.go > ${merged_coverage_report}
# Filter out test coverage for packages in ./cmd
grep -v '^github.com/status-im/status-go/cmd/' c-full.out > c.out
echo -e "${GRN}Filtering test coverage packages:${RST} ./cmd"
grep -v '^github.com/status-im/status-go/cmd/' ${merged_coverage_report} > ${final_coverage_report}
# Generate HTML coverage report
go tool cover -html c.out -o test-coverage.html
echo -e "${GRN}Generating HTML coverage report${RST}"
go tool cover -html ${final_coverage_report} -o test-coverage.html
# Upload coverage report to CodeClimate
if [[ $UNIT_TEST_REPORT_CODECLIMATE == 'true' ]]; then
echo -e "${GRN}Uploading coverage report to CodeClimate${RST}"
# https://docs.codeclimate.com/docs/jenkins#jenkins-ci-builds
GIT_COMMIT=$(git log | grep -m1 -oE '[^ ]+$')
cc-test-reporter format-coverage --prefix=github.com/status-im/status-go # To generate 'coverage/codeclimate.json'
cc-test-reporter after-build --prefix=github.com/status-im/status-go
fi
# Generate report with test stats
shopt -s globstar nullglob # Enable recursive globbing
if [[ "${UNIT_TEST_COUNT}" -gt 1 ]]; then
for exit_code_file in "${GIT_ROOT}"/**/exit_code.txt; do
for exit_code_file in "${GIT_ROOT}"/**/exit_code_*.txt; do
read exit_code < "${exit_code_file}"
if [[ "${exit_code}" -ne 0 ]]; then
echo -e "${GRN}Generating test stats${RST}, exit code: ${exit_code}"
mkdir -p "${GIT_ROOT}/reports"
"${GIT_ROOT}/_assets/scripts/test_stats.py" | redirect_stdout "${GIT_ROOT}/reports/test_stats.txt"
"${GIT_ROOT}/_assets/scripts/test_stats.py" | tee "${GIT_ROOT}/reports/test_stats.txt"
exit ${exit_code}
fi
done
fi
echo -e "${GRN}Testing finished${RST}"

View File

@ -2,15 +2,15 @@
set -eu
packages=""
coverage_file_path="$(mktemp coverage.out.rerun.XXXXXXXXXX)"
coverage_file_path="$(mktemp coverage.out.rerun.XXXXXXXXXX --tmpdir="${TEST_WITH_COVERAGE_REPORTS_DIR}")"
count=1
# This is a hack to workaround gotestsum behaviour. When using a --raw-command,
# gotestsum will only pass the package when rerunning a test. Otherwise we should pass the package ourselves.
# https://github.com/gotestyourself/gotestsum/blob/03568ab6d48faabdb632013632ac42687b5f17d1/cmd/main.go#L331-L336
if [[ "$*" != *"-test.run"* ]]; then
packages="${UNIT_TEST_PACKAGES}"
count=${UNIT_TEST_COUNT}
packages="${TEST_WITH_COVERAGE_PACKAGES}"
count=${TEST_WITH_COVERAGE_COUNT}
fi
go test -json \

View File

@ -8,7 +8,9 @@ import re
test_stats = defaultdict(lambda: defaultdict(int))
skipped_tests = {} # Use a dictionary to store test names and their skip reasons
for file in glob.glob("report.xml", recursive=True):
file_path = "**/report_*.xml"
for file in glob.glob(file_path, recursive=True):
tree = ET.parse(file)
root = tree.getroot()
for testcase in root.iter("testcase"):