chore: add reporting of nightly results to test rail (#228)
* chore: Rerun failed test added * chore: Create Test Run in TestRail if it not exists #227
This commit is contained in:
parent
950b9dfa8a
commit
dbf485f669
|
@ -36,7 +36,7 @@ pipeline {
|
|||
defaultValue: ''
|
||||
)
|
||||
string(
|
||||
name: 'TESTRAIL_RUN_ID',
|
||||
name: 'TESTRAIL_RUN_NAME',
|
||||
description: 'Test run ID in Test Rail.',
|
||||
defaultValue: ''
|
||||
)
|
||||
|
@ -70,6 +70,7 @@ pipeline {
|
|||
LD_LIBRARY_PATH = "${SQUISH_DIR}/lib:${SQUISH_DIR}/python3/lib:${LD_LIBRARY_PATH}"
|
||||
|
||||
TESTRAIL_URL = 'https://ethstatus.testrail.net'
|
||||
TESTRAIL_PROJECT_ID = 17
|
||||
}
|
||||
|
||||
stages {
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
import logging
|
||||
import os
|
||||
|
||||
LOG_LEVEL = logging.DEBUG
|
||||
LOG_LEVEL = logging.INFO
|
||||
UPDATE_VP_ON_FAIL = False
|
||||
DEV_BUILD = False
|
||||
APP_DIR = os.getenv('APP_DIR')
|
||||
|
|
|
@ -1,6 +1,11 @@
|
|||
import os
|
||||
from datetime import datetime
|
||||
|
||||
TESTRAIL_RUN_ID = os.getenv('TESTRAIL_RUN_ID', '').strip()
|
||||
TESTRAIL_URL = os.getenv('TESTRAIL_URL', None)
|
||||
TESTRAIL_USR = os.getenv('TESTRAIL_USR', None)
|
||||
TESTRAIL_PSW = os.getenv('TESTRAIL_PSW', None)
|
||||
CI_BUILD_URL = os.getenv('BUILD_URL', '')
|
||||
CI_NIGHTLY = True if 'nightly' in CI_BUILD_URL else False
|
||||
|
||||
RUN_NAME = os.getenv('TESTRAIL_RUN_NAME', f'Nightly regression {datetime.now():%d.%m.%Y}' if CI_NIGHTLY else '')
|
||||
PROJECT_ID = os.getenv('TESTRAIL_PROJECT_ID', '')
|
||||
URL = os.getenv('TESTRAIL_URL', '')
|
||||
USR = os.getenv('TESTRAIL_USR', '')
|
||||
PSW = os.getenv('TESTRAIL_PSW', '')
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
import logging
|
||||
import typing
|
||||
from collections import namedtuple
|
||||
|
||||
import pytest
|
||||
from testrail_api import TestRailAPI
|
||||
|
@ -9,29 +10,47 @@ import configs
|
|||
_logger = logging.getLogger(__name__)
|
||||
|
||||
testrail_api = None
|
||||
test_run_id = None
|
||||
|
||||
PASS = 1
|
||||
FAIL = 5
|
||||
RETEST = 4
|
||||
SKIPPED = 11
|
||||
UNTESTED = 12
|
||||
|
||||
test_case = namedtuple('TestCase', ['id', 'skipped'])
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
def init_testrail_api(request):
|
||||
global testrail_api
|
||||
if configs.testrail.TESTRAIL_RUN_ID:
|
||||
if configs.testrail.RUN_NAME:
|
||||
_logger.info('TestRail API initializing')
|
||||
global testrail_api
|
||||
testrail_api = TestRailAPI(
|
||||
configs.testrail.TESTRAIL_URL,
|
||||
configs.testrail.TESTRAIL_USR,
|
||||
configs.testrail.TESTRAIL_PSW
|
||||
configs.testrail.URL,
|
||||
configs.testrail.USR,
|
||||
configs.testrail.PSW
|
||||
)
|
||||
test_case_ids = get_test_ids_in_session(request)
|
||||
for test_case_id in test_case_ids:
|
||||
if is_test_case_in_run(test_case_id):
|
||||
_update_result(test_case_id, RETEST)
|
||||
_logger.info(f'Test: "{test_case_id}" marked as "Retest"')
|
||||
test_cases = get_test_cases_in_session(request)
|
||||
test_run = get_test_run(configs.testrail.RUN_NAME)
|
||||
if not test_run:
|
||||
test_case_ids = list(set([tc_id.id for tc_id in test_cases]))
|
||||
test_run = create_test_run(configs.testrail.RUN_NAME, test_case_ids)
|
||||
|
||||
global test_run_id
|
||||
test_run_id = test_run['id']
|
||||
|
||||
for test_case in test_cases:
|
||||
if is_test_case_in_run(test_case.id):
|
||||
if test_case.skipped:
|
||||
_update_result(test_case.id, SKIPPED, test_case.skipped)
|
||||
_logger.info(f'Test: "{test_case.id}" marked as "Skipped"')
|
||||
else:
|
||||
_logger.info(f'Report result for test case: {test_case_id} skipped, not in test run')
|
||||
if _get_test_case_status(test_case.id) != UNTESTED:
|
||||
_update_result(test_case.id, UNTESTED)
|
||||
_logger.info(f'Test: "{test_case.id}" marked as "Untested"')
|
||||
else:
|
||||
_logger.info(
|
||||
f'Report result for test case: {test_case.id} skipped, not in test run: {configs.testrail.RUN_NAME}')
|
||||
else:
|
||||
_logger.info('TestRail report skipped')
|
||||
|
||||
|
@ -39,33 +58,33 @@ def init_testrail_api(request):
|
|||
@pytest.fixture
|
||||
def check_result(request):
|
||||
yield
|
||||
if configs.testrail.TESTRAIL_RUN_ID:
|
||||
if configs.testrail.RUN_NAME:
|
||||
item = request.node
|
||||
test_case_ids = _find_test_case_id_markers(request)
|
||||
for test_case_id in test_case_ids:
|
||||
if is_test_case_in_run(test_case_id):
|
||||
current_test_status = _get_test_case_status(test_case_id)
|
||||
if item.rep_call.failed:
|
||||
if current_test_status != FAIL:
|
||||
_update_result(test_case_id, FAIL)
|
||||
_update_comment(test_case_id, f"{request.node.name} FAILED")
|
||||
_update_result(test_case_id, FAIL, item.rep_call.longreprtext)
|
||||
else:
|
||||
if current_test_status != FAIL:
|
||||
_update_result(test_case_id, PASS)
|
||||
_update_result(test_case_id, PASS, f"{request.node.name} SUCCESS")
|
||||
else:
|
||||
_update_comment(test_case_id, f"{request.node.name} SUCCESS")
|
||||
|
||||
|
||||
def _update_result(test_case_id: int, result: int):
|
||||
def _update_result(test_case_id: int, result: int, comment: str = None):
|
||||
testrail_api.results.add_result_for_case(
|
||||
run_id=configs.testrail.TESTRAIL_RUN_ID,
|
||||
run_id=test_run_id,
|
||||
case_id=test_case_id,
|
||||
status_id=result,
|
||||
comment=comment or ""
|
||||
)
|
||||
|
||||
|
||||
def _update_comment(test_case_id: int, comment: str):
|
||||
testrail_api.results.add_result_for_case(
|
||||
run_id=configs.testrail.TESTRAIL_RUN_ID,
|
||||
run_id=test_run_id,
|
||||
case_id=test_case_id,
|
||||
comment=comment
|
||||
)
|
||||
|
@ -80,7 +99,7 @@ def _find_test_case_id_markers(request) -> typing.List[int]:
|
|||
|
||||
|
||||
def _get_test_case_status(test_case_id: int) -> int:
|
||||
test_case_results = testrail_api.results.get_results_for_case(configs.testrail.TESTRAIL_RUN_ID, test_case_id)
|
||||
test_case_results = testrail_api.results.get_results_for_case(test_run_id, test_case_id)
|
||||
try:
|
||||
result = 0
|
||||
while True:
|
||||
|
@ -90,12 +109,12 @@ def _get_test_case_status(test_case_id: int) -> int:
|
|||
else:
|
||||
return last_test_case_status
|
||||
except:
|
||||
return RETEST
|
||||
return SKIPPED
|
||||
|
||||
|
||||
def is_test_case_in_run(test_case_id: int) -> bool:
|
||||
try:
|
||||
testrail_api.results.get_results_for_case(configs.testrail.TESTRAIL_RUN_ID, test_case_id)
|
||||
testrail_api.results.get_results_for_case(test_run_id, test_case_id)
|
||||
except Exception as err:
|
||||
return False
|
||||
else:
|
||||
|
@ -107,7 +126,7 @@ def _get_test_cases():
|
|||
limit = 250
|
||||
chunk = 0
|
||||
while True:
|
||||
tests = testrail_api.tests.get_tests(configs.testrail.TESTRAIL_RUN_ID, offset=chunk)['tests']
|
||||
tests = testrail_api.tests.get_tests(test_run_id, offset=chunk)['tests']
|
||||
results.extend(tests)
|
||||
if len(tests) == limit:
|
||||
chunk += limit
|
||||
|
@ -115,11 +134,39 @@ def _get_test_cases():
|
|||
return results
|
||||
|
||||
|
||||
def get_test_ids_in_session(request):
|
||||
def get_test_cases_in_session(request) -> typing.List[test_case]:
|
||||
tests = request.session.items
|
||||
ids = []
|
||||
test_cases = []
|
||||
for test in tests:
|
||||
tc_ids = []
|
||||
skipped = ''
|
||||
for marker in getattr(test, 'own_markers', []):
|
||||
if getattr(marker, 'name', '') == 'case':
|
||||
ids.extend(list(marker.args))
|
||||
return set(ids)
|
||||
match getattr(marker, 'name', ''):
|
||||
case 'case':
|
||||
tc_ids = list(marker.args)
|
||||
case 'skip':
|
||||
skipped = f'Reason: {marker.kwargs.get("reason", "")}'
|
||||
for tc_id in tc_ids:
|
||||
test_cases.append(test_case(tc_id, skipped))
|
||||
return test_cases
|
||||
|
||||
|
||||
def create_test_run(name: str, ids: list) -> dict:
|
||||
test_run = testrail_api.runs.add_run(
|
||||
project_id=configs.testrail.PROJECT_ID,
|
||||
name = name,
|
||||
description = f'Jenkins: {configs.testrail.CI_BUILD_URL}',
|
||||
include_all = False if list else True,
|
||||
case_ids = ids or None
|
||||
)
|
||||
return test_run
|
||||
|
||||
|
||||
def get_test_run(name: str) -> typing.Optional[dict]:
|
||||
test_runs = testrail_api.runs.get_runs(
|
||||
project_id=configs.testrail.PROJECT_ID,
|
||||
is_completed=False
|
||||
)
|
||||
for test_run in test_runs['runs']:
|
||||
if test_run['name'] == name:
|
||||
return test_run
|
||||
|
|
|
@ -8,3 +8,4 @@ atomacos==3.3.0; platform_system == "Darwin"
|
|||
allure-pytest==2.13.2
|
||||
testrail-api==1.12.0
|
||||
pyperclip==1.8.2
|
||||
pytest-rerunfailures==11.1.2
|
||||
|
|
Loading…
Reference in New Issue