diff --git a/test/appium/support/base_test_report.py b/test/appium/support/base_test_report.py index 2411027554..1ab6337022 100644 --- a/test/appium/support/base_test_report.py +++ b/test/appium/support/base_test_report.py @@ -74,7 +74,8 @@ class BaseTestReport: jobs=testrun_data['jobs'], error=testrun_data['error'], first_commands=testrun_data['first_commands'], - xfail=testrun_data['xfail'])) + xfail=testrun_data['xfail'], + run=testrun_data['run'])) tests.append(SingleTestData(name=test_data['name'], logs_paths=test_data['logs_paths'], testruns=testruns, diff --git a/test/appium/support/test_data.py b/test/appium/support/test_data.py index 660b975ae8..80aefd38a4 100644 --- a/test/appium/support/test_data.py +++ b/test/appium/support/test_data.py @@ -11,15 +11,18 @@ class SingleTestData(object): self.secured = secured class TestRunData(object): - def __init__(self, steps, jobs, error, first_commands: Dict[str, int], xfail): + def __init__(self, steps, jobs, error, first_commands: Dict[str, int], xfail, run): self.steps = steps self.jobs = jobs self.error = error self.first_commands = first_commands self.xfail = xfail + self.run = run def create_new_testrun(self): - self.testruns.append(SingleTestData.TestRunData(list(), dict(), None, dict(), xfail='')) + self.testruns.append(SingleTestData.TestRunData( + steps=list(), jobs=dict(), error=None, first_commands=dict(), xfail='', run=True + )) class TestSuiteData(object): diff --git a/test/appium/support/testrail_report.py b/test/appium/support/testrail_report.py index 704f9bb728..ff944e088a 100644 --- a/test/appium/support/testrail_report.py +++ b/test/appium/support/testrail_report.py @@ -28,7 +28,8 @@ class TestrailReport(BaseTestReport): self.outcomes = { 'passed': 1, - 'undefined_fail': 10} + 'undefined_fail': 10, + 'skipped': 11} self.headers = dict() self.headers['Authorization'] = 'Basic %s' % str( @@ -185,9 +186,16 @@ class TestrailReport(BaseTestReport): comment += '%s' % ('# Error: \n %s \n' % emoji.demojize(error)) + devices + test_steps else: comment += devices + test_steps + + if last_testrun.xfail and not last_testrun.run: + status_id = self.outcomes['skipped'] + elif last_testrun.error: + status_id = self.outcomes['undefined_fail'] + else: + status_id = self.outcomes['passed'] data.append( {'case_id': test.testrail_case_id, - 'status_id': self.outcomes['undefined_fail'] if last_testrun.error else self.outcomes['passed'], + 'status_id': status_id, 'comment': comment}) results = self.post('add_results_for_cases/%s' % self.run_id, data={"results": data}) diff --git a/test/appium/tests/conftest.py b/test/appium/tests/conftest.py index fd6d0c1830..a100d43044 100644 --- a/test/appium/tests/conftest.py +++ b/test/appium/tests/conftest.py @@ -353,6 +353,7 @@ def pytest_runtest_makereport(item, call): if is_group: test_suite_data.current_test.group_name = item.instance.__class__.__name__ test_suite_data.current_test.testruns[-1].xfail = report.wasxfail + test_suite_data.current_test.testruns[-1].run = False error_intro, error = 'Test is not run, e2e blocker ', report.wasxfail final_error = "%s [[%s]]" % (error_intro, error) else: @@ -381,6 +382,8 @@ def pytest_runtest_makereport(item, call): if (hasattr(report, 'wasxfail') and not case_ids_set) or (hasattr(report, 'wasxfail') and ( str([mark.args[0] for mark in item.iter_markers(name='testrail_id')][0]) in str(case_ids_set))): current_test.testruns[-1].xfail = report.wasxfail + if '[NOTRUN]' in report.wasxfail: + current_test.testruns[-1].run = False if error: current_test.testruns[-1].error = '%s [[%s]]' % (error, report.wasxfail) if is_sauce_env: