chore: add reporting of nightly results to test rail (#228)

* chore: Rerun failed test added

* chore: Create Test Run in TestRail if it not exists

#227
This commit is contained in:
Vladimir Druzhinin 2023-11-02 17:08:22 +01:00 committed by GitHub
parent 950b9dfa8a
commit dbf485f669
5 changed files with 90 additions and 36 deletions

View File

@ -36,7 +36,7 @@ pipeline {
defaultValue: '' defaultValue: ''
) )
string( string(
name: 'TESTRAIL_RUN_ID', name: 'TESTRAIL_RUN_NAME',
description: 'Test run ID in Test Rail.', description: 'Test run ID in Test Rail.',
defaultValue: '' defaultValue: ''
) )
@ -70,6 +70,7 @@ pipeline {
LD_LIBRARY_PATH = "${SQUISH_DIR}/lib:${SQUISH_DIR}/python3/lib:${LD_LIBRARY_PATH}" LD_LIBRARY_PATH = "${SQUISH_DIR}/lib:${SQUISH_DIR}/python3/lib:${LD_LIBRARY_PATH}"
TESTRAIL_URL = 'https://ethstatus.testrail.net' TESTRAIL_URL = 'https://ethstatus.testrail.net'
TESTRAIL_PROJECT_ID = 17
} }
stages { stages {

View File

@ -1,7 +1,7 @@
import logging import logging
import os import os
LOG_LEVEL = logging.DEBUG LOG_LEVEL = logging.INFO
UPDATE_VP_ON_FAIL = False UPDATE_VP_ON_FAIL = False
DEV_BUILD = False DEV_BUILD = False
APP_DIR = os.getenv('APP_DIR') APP_DIR = os.getenv('APP_DIR')

View File

@ -1,6 +1,11 @@
import os import os
from datetime import datetime
TESTRAIL_RUN_ID = os.getenv('TESTRAIL_RUN_ID', '').strip() CI_BUILD_URL = os.getenv('BUILD_URL', '')
TESTRAIL_URL = os.getenv('TESTRAIL_URL', None) CI_NIGHTLY = True if 'nightly' in CI_BUILD_URL else False
TESTRAIL_USR = os.getenv('TESTRAIL_USR', None)
TESTRAIL_PSW = os.getenv('TESTRAIL_PSW', None) RUN_NAME = os.getenv('TESTRAIL_RUN_NAME', f'Nightly regression {datetime.now():%d.%m.%Y}' if CI_NIGHTLY else '')
PROJECT_ID = os.getenv('TESTRAIL_PROJECT_ID', '')
URL = os.getenv('TESTRAIL_URL', '')
USR = os.getenv('TESTRAIL_USR', '')
PSW = os.getenv('TESTRAIL_PSW', '')

View File

@ -1,5 +1,6 @@
import logging import logging
import typing import typing
from collections import namedtuple
import pytest import pytest
from testrail_api import TestRailAPI from testrail_api import TestRailAPI
@ -9,29 +10,47 @@ import configs
_logger = logging.getLogger(__name__) _logger = logging.getLogger(__name__)
testrail_api = None testrail_api = None
test_run_id = None
PASS = 1 PASS = 1
FAIL = 5 FAIL = 5
RETEST = 4 SKIPPED = 11
UNTESTED = 12
test_case = namedtuple('TestCase', ['id', 'skipped'])
@pytest.fixture(scope='session') @pytest.fixture(scope='session')
def init_testrail_api(request): def init_testrail_api(request):
global testrail_api if configs.testrail.RUN_NAME:
if configs.testrail.TESTRAIL_RUN_ID:
_logger.info('TestRail API initializing') _logger.info('TestRail API initializing')
global testrail_api
testrail_api = TestRailAPI( testrail_api = TestRailAPI(
configs.testrail.TESTRAIL_URL, configs.testrail.URL,
configs.testrail.TESTRAIL_USR, configs.testrail.USR,
configs.testrail.TESTRAIL_PSW configs.testrail.PSW
) )
test_case_ids = get_test_ids_in_session(request) test_cases = get_test_cases_in_session(request)
for test_case_id in test_case_ids: test_run = get_test_run(configs.testrail.RUN_NAME)
if is_test_case_in_run(test_case_id): if not test_run:
_update_result(test_case_id, RETEST) test_case_ids = list(set([tc_id.id for tc_id in test_cases]))
_logger.info(f'Test: "{test_case_id}" marked as "Retest"') test_run = create_test_run(configs.testrail.RUN_NAME, test_case_ids)
global test_run_id
test_run_id = test_run['id']
for test_case in test_cases:
if is_test_case_in_run(test_case.id):
if test_case.skipped:
_update_result(test_case.id, SKIPPED, test_case.skipped)
_logger.info(f'Test: "{test_case.id}" marked as "Skipped"')
else:
if _get_test_case_status(test_case.id) != UNTESTED:
_update_result(test_case.id, UNTESTED)
_logger.info(f'Test: "{test_case.id}" marked as "Untested"')
else: else:
_logger.info(f'Report result for test case: {test_case_id} skipped, not in test run') _logger.info(
f'Report result for test case: {test_case.id} skipped, not in test run: {configs.testrail.RUN_NAME}')
else: else:
_logger.info('TestRail report skipped') _logger.info('TestRail report skipped')
@ -39,33 +58,33 @@ def init_testrail_api(request):
@pytest.fixture @pytest.fixture
def check_result(request): def check_result(request):
yield yield
if configs.testrail.TESTRAIL_RUN_ID: if configs.testrail.RUN_NAME:
item = request.node item = request.node
test_case_ids = _find_test_case_id_markers(request) test_case_ids = _find_test_case_id_markers(request)
for test_case_id in test_case_ids: for test_case_id in test_case_ids:
if is_test_case_in_run(test_case_id): if is_test_case_in_run(test_case_id):
current_test_status = _get_test_case_status(test_case_id) current_test_status = _get_test_case_status(test_case_id)
if item.rep_call.failed: if item.rep_call.failed:
if current_test_status != FAIL: _update_result(test_case_id, FAIL, item.rep_call.longreprtext)
_update_result(test_case_id, FAIL)
_update_comment(test_case_id, f"{request.node.name} FAILED")
else: else:
if current_test_status != FAIL: if current_test_status != FAIL:
_update_result(test_case_id, PASS) _update_result(test_case_id, PASS, f"{request.node.name} SUCCESS")
_update_comment(test_case_id, f"{request.node.name} SUCCESS") else:
_update_comment(test_case_id, f"{request.node.name} SUCCESS")
def _update_result(test_case_id: int, result: int): def _update_result(test_case_id: int, result: int, comment: str = None):
testrail_api.results.add_result_for_case( testrail_api.results.add_result_for_case(
run_id=configs.testrail.TESTRAIL_RUN_ID, run_id=test_run_id,
case_id=test_case_id, case_id=test_case_id,
status_id=result, status_id=result,
comment=comment or ""
) )
def _update_comment(test_case_id: int, comment: str): def _update_comment(test_case_id: int, comment: str):
testrail_api.results.add_result_for_case( testrail_api.results.add_result_for_case(
run_id=configs.testrail.TESTRAIL_RUN_ID, run_id=test_run_id,
case_id=test_case_id, case_id=test_case_id,
comment=comment comment=comment
) )
@ -80,7 +99,7 @@ def _find_test_case_id_markers(request) -> typing.List[int]:
def _get_test_case_status(test_case_id: int) -> int: def _get_test_case_status(test_case_id: int) -> int:
test_case_results = testrail_api.results.get_results_for_case(configs.testrail.TESTRAIL_RUN_ID, test_case_id) test_case_results = testrail_api.results.get_results_for_case(test_run_id, test_case_id)
try: try:
result = 0 result = 0
while True: while True:
@ -90,12 +109,12 @@ def _get_test_case_status(test_case_id: int) -> int:
else: else:
return last_test_case_status return last_test_case_status
except: except:
return RETEST return SKIPPED
def is_test_case_in_run(test_case_id: int) -> bool: def is_test_case_in_run(test_case_id: int) -> bool:
try: try:
testrail_api.results.get_results_for_case(configs.testrail.TESTRAIL_RUN_ID, test_case_id) testrail_api.results.get_results_for_case(test_run_id, test_case_id)
except Exception as err: except Exception as err:
return False return False
else: else:
@ -107,7 +126,7 @@ def _get_test_cases():
limit = 250 limit = 250
chunk = 0 chunk = 0
while True: while True:
tests = testrail_api.tests.get_tests(configs.testrail.TESTRAIL_RUN_ID, offset=chunk)['tests'] tests = testrail_api.tests.get_tests(test_run_id, offset=chunk)['tests']
results.extend(tests) results.extend(tests)
if len(tests) == limit: if len(tests) == limit:
chunk += limit chunk += limit
@ -115,11 +134,39 @@ def _get_test_cases():
return results return results
def get_test_ids_in_session(request): def get_test_cases_in_session(request) -> typing.List[test_case]:
tests = request.session.items tests = request.session.items
ids = [] test_cases = []
for test in tests: for test in tests:
tc_ids = []
skipped = ''
for marker in getattr(test, 'own_markers', []): for marker in getattr(test, 'own_markers', []):
if getattr(marker, 'name', '') == 'case': match getattr(marker, 'name', ''):
ids.extend(list(marker.args)) case 'case':
return set(ids) tc_ids = list(marker.args)
case 'skip':
skipped = f'Reason: {marker.kwargs.get("reason", "")}'
for tc_id in tc_ids:
test_cases.append(test_case(tc_id, skipped))
return test_cases
def create_test_run(name: str, ids: list) -> dict:
test_run = testrail_api.runs.add_run(
project_id=configs.testrail.PROJECT_ID,
name = name,
description = f'Jenkins: {configs.testrail.CI_BUILD_URL}',
include_all = False if list else True,
case_ids = ids or None
)
return test_run
def get_test_run(name: str) -> typing.Optional[dict]:
test_runs = testrail_api.runs.get_runs(
project_id=configs.testrail.PROJECT_ID,
is_completed=False
)
for test_run in test_runs['runs']:
if test_run['name'] == name:
return test_run

View File

@ -8,3 +8,4 @@ atomacos==3.3.0; platform_system == "Darwin"
allure-pytest==2.13.2 allure-pytest==2.13.2
testrail-api==1.12.0 testrail-api==1.12.0
pyperclip==1.8.2 pyperclip==1.8.2
pytest-rerunfailures==11.1.2