e2e: mark skipped tests in TR
This commit is contained in:
parent
1d7eb49c57
commit
e67f6c1cd3
|
@ -74,7 +74,8 @@ class BaseTestReport:
|
||||||
jobs=testrun_data['jobs'],
|
jobs=testrun_data['jobs'],
|
||||||
error=testrun_data['error'],
|
error=testrun_data['error'],
|
||||||
first_commands=testrun_data['first_commands'],
|
first_commands=testrun_data['first_commands'],
|
||||||
xfail=testrun_data['xfail']))
|
xfail=testrun_data['xfail'],
|
||||||
|
run=testrun_data['run']))
|
||||||
tests.append(SingleTestData(name=test_data['name'],
|
tests.append(SingleTestData(name=test_data['name'],
|
||||||
logs_paths=test_data['logs_paths'],
|
logs_paths=test_data['logs_paths'],
|
||||||
testruns=testruns,
|
testruns=testruns,
|
||||||
|
|
|
@ -11,15 +11,18 @@ class SingleTestData(object):
|
||||||
self.secured = secured
|
self.secured = secured
|
||||||
|
|
||||||
class TestRunData(object):
|
class TestRunData(object):
|
||||||
def __init__(self, steps, jobs, error, first_commands: Dict[str, int], xfail):
|
def __init__(self, steps, jobs, error, first_commands: Dict[str, int], xfail, run):
|
||||||
self.steps = steps
|
self.steps = steps
|
||||||
self.jobs = jobs
|
self.jobs = jobs
|
||||||
self.error = error
|
self.error = error
|
||||||
self.first_commands = first_commands
|
self.first_commands = first_commands
|
||||||
self.xfail = xfail
|
self.xfail = xfail
|
||||||
|
self.run = run
|
||||||
|
|
||||||
def create_new_testrun(self):
|
def create_new_testrun(self):
|
||||||
self.testruns.append(SingleTestData.TestRunData(list(), dict(), None, dict(), xfail=''))
|
self.testruns.append(SingleTestData.TestRunData(
|
||||||
|
steps=list(), jobs=dict(), error=None, first_commands=dict(), xfail='', run=True
|
||||||
|
))
|
||||||
|
|
||||||
|
|
||||||
class TestSuiteData(object):
|
class TestSuiteData(object):
|
||||||
|
|
|
@ -28,7 +28,8 @@ class TestrailReport(BaseTestReport):
|
||||||
|
|
||||||
self.outcomes = {
|
self.outcomes = {
|
||||||
'passed': 1,
|
'passed': 1,
|
||||||
'undefined_fail': 10}
|
'undefined_fail': 10,
|
||||||
|
'skipped': 11}
|
||||||
|
|
||||||
self.headers = dict()
|
self.headers = dict()
|
||||||
self.headers['Authorization'] = 'Basic %s' % str(
|
self.headers['Authorization'] = 'Basic %s' % str(
|
||||||
|
@ -185,9 +186,16 @@ class TestrailReport(BaseTestReport):
|
||||||
comment += '%s' % ('# Error: \n %s \n' % emoji.demojize(error)) + devices + test_steps
|
comment += '%s' % ('# Error: \n %s \n' % emoji.demojize(error)) + devices + test_steps
|
||||||
else:
|
else:
|
||||||
comment += devices + test_steps
|
comment += devices + test_steps
|
||||||
|
|
||||||
|
if last_testrun.xfail and not last_testrun.run:
|
||||||
|
status_id = self.outcomes['skipped']
|
||||||
|
elif last_testrun.error:
|
||||||
|
status_id = self.outcomes['undefined_fail']
|
||||||
|
else:
|
||||||
|
status_id = self.outcomes['passed']
|
||||||
data.append(
|
data.append(
|
||||||
{'case_id': test.testrail_case_id,
|
{'case_id': test.testrail_case_id,
|
||||||
'status_id': self.outcomes['undefined_fail'] if last_testrun.error else self.outcomes['passed'],
|
'status_id': status_id,
|
||||||
'comment': comment})
|
'comment': comment})
|
||||||
|
|
||||||
results = self.post('add_results_for_cases/%s' % self.run_id, data={"results": data})
|
results = self.post('add_results_for_cases/%s' % self.run_id, data={"results": data})
|
||||||
|
|
|
@ -353,6 +353,7 @@ def pytest_runtest_makereport(item, call):
|
||||||
if is_group:
|
if is_group:
|
||||||
test_suite_data.current_test.group_name = item.instance.__class__.__name__
|
test_suite_data.current_test.group_name = item.instance.__class__.__name__
|
||||||
test_suite_data.current_test.testruns[-1].xfail = report.wasxfail
|
test_suite_data.current_test.testruns[-1].xfail = report.wasxfail
|
||||||
|
test_suite_data.current_test.testruns[-1].run = False
|
||||||
error_intro, error = 'Test is not run, e2e blocker ', report.wasxfail
|
error_intro, error = 'Test is not run, e2e blocker ', report.wasxfail
|
||||||
final_error = "%s [[%s]]" % (error_intro, error)
|
final_error = "%s [[%s]]" % (error_intro, error)
|
||||||
else:
|
else:
|
||||||
|
@ -381,6 +382,8 @@ def pytest_runtest_makereport(item, call):
|
||||||
if (hasattr(report, 'wasxfail') and not case_ids_set) or (hasattr(report, 'wasxfail') and (
|
if (hasattr(report, 'wasxfail') and not case_ids_set) or (hasattr(report, 'wasxfail') and (
|
||||||
str([mark.args[0] for mark in item.iter_markers(name='testrail_id')][0]) in str(case_ids_set))):
|
str([mark.args[0] for mark in item.iter_markers(name='testrail_id')][0]) in str(case_ids_set))):
|
||||||
current_test.testruns[-1].xfail = report.wasxfail
|
current_test.testruns[-1].xfail = report.wasxfail
|
||||||
|
if '[NOTRUN]' in report.wasxfail:
|
||||||
|
current_test.testruns[-1].run = False
|
||||||
if error:
|
if error:
|
||||||
current_test.testruns[-1].error = '%s [[%s]]' % (error, report.wasxfail)
|
current_test.testruns[-1].error = '%s [[%s]]' % (error, report.wasxfail)
|
||||||
if is_sauce_env:
|
if is_sauce_env:
|
||||||
|
|
Loading…
Reference in New Issue