Rerun appium tests failed due to infrastructure reasons
rerun_count=0 by default
This commit is contained in:
parent
c1b217e032
commit
e32ce7af68
|
@ -15,7 +15,7 @@ namedlist==1.7
|
|||
py==1.4.34
|
||||
pytest==3.2.1
|
||||
pytest-forked==0.2
|
||||
pytest-xdist==1.20.0
|
||||
pytest-xdist==1.22.2
|
||||
requests==2.18.3
|
||||
sauceclient==1.0.0
|
||||
selenium==3.8.1
|
||||
|
|
|
@ -2,7 +2,7 @@ import json
|
|||
import hmac
|
||||
import os
|
||||
from hashlib import md5
|
||||
from tests import SingleTestData
|
||||
from support.test_data import SingleTestData
|
||||
|
||||
|
||||
class BaseTestReport:
|
||||
|
@ -28,24 +28,34 @@ class BaseTestReport:
|
|||
|
||||
def save_test(self, test):
|
||||
file_path = self.get_test_report_file_path(test.name)
|
||||
json.dump(test.__dict__, open(file_path, 'w'))
|
||||
test_dict = {
|
||||
'testrail_case_id': test.testrail_case_id,
|
||||
'name': test.name,
|
||||
'testruns': list()
|
||||
}
|
||||
for testrun in test.testruns:
|
||||
test_dict['testruns'].append(testrun.__dict__)
|
||||
json.dump(test_dict, open(file_path, 'w'))
|
||||
|
||||
def get_all_tests(self):
|
||||
tests = list()
|
||||
file_list = [f for f in os.listdir(self.TEST_REPORT_DIR)]
|
||||
for file_name in file_list:
|
||||
file_path = os.path.join(self.TEST_REPORT_DIR, file_name)
|
||||
test_dict = json.load(open(file_path))
|
||||
tests.append(SingleTestData(name=test_dict['name'], steps=test_dict['steps'],
|
||||
jobs=test_dict['jobs'], error=test_dict['error'],
|
||||
testrail_case_id=test_dict['testrail_case_id']))
|
||||
test_data = json.load(open(file_path))
|
||||
testruns = list()
|
||||
for testrun_data in test_data['testruns']:
|
||||
testruns.append(SingleTestData.TestRunData(
|
||||
steps=testrun_data['steps'], jobs=testrun_data['jobs'], error=testrun_data['error']))
|
||||
tests.append(SingleTestData(name=test_data['name'], testruns=testruns,
|
||||
testrail_case_id=test_data['testrail_case_id']))
|
||||
return tests
|
||||
|
||||
def get_failed_tests(self):
|
||||
tests = self.get_all_tests()
|
||||
failed = list()
|
||||
for test in tests:
|
||||
if test.error is not None:
|
||||
if not self.is_test_successful(test):
|
||||
failed.append(test)
|
||||
return failed
|
||||
|
||||
|
@ -53,7 +63,7 @@ class BaseTestReport:
|
|||
tests = self.get_all_tests()
|
||||
passed = list()
|
||||
for test in tests:
|
||||
if test.error is None:
|
||||
if self.is_test_successful(test):
|
||||
passed.append(test)
|
||||
return passed
|
||||
|
||||
|
@ -61,3 +71,8 @@ class BaseTestReport:
|
|||
token = hmac.new(bytes(self.sauce_username + ":" + self.sauce_access_key, 'latin-1'),
|
||||
bytes(job_id, 'latin-1'), md5).hexdigest()
|
||||
return "https://saucelabs.com/jobs/%s?auth=%s" % (job_id, token)
|
||||
|
||||
@staticmethod
|
||||
def is_test_successful(test):
|
||||
# Test passed if last testrun has passed
|
||||
return test.testruns[-1].error is None
|
||||
|
|
|
@ -56,9 +56,10 @@ class GithubHtmlReport(BaseTestReport):
|
|||
html = "<tr><td><b>%d. %s</b></td></tr>" % (index+1, test.name)
|
||||
html += "<tr><td>"
|
||||
test_steps_html = list()
|
||||
for step in test.steps:
|
||||
last_testrun = test.testruns[-1]
|
||||
for step in last_testrun.steps:
|
||||
test_steps_html.append("<div>%s</div>" % step)
|
||||
if test.error:
|
||||
if last_testrun.error:
|
||||
if test_steps_html:
|
||||
html += "<p>"
|
||||
html += "<blockquote>"
|
||||
|
@ -66,10 +67,10 @@ class GithubHtmlReport(BaseTestReport):
|
|||
html += "%s" % ''.join(test_steps_html[-2:])
|
||||
html += "</blockquote>"
|
||||
html += "</p>"
|
||||
html += "<code>%s</code>" % test.error
|
||||
html += "<code>%s</code>" % last_testrun.error
|
||||
html += "<br/><br/>"
|
||||
if test.jobs:
|
||||
html += self.build_device_sessions_html(test.jobs)
|
||||
if last_testrun.jobs:
|
||||
html += self.build_device_sessions_html(last_testrun.jobs)
|
||||
html += "</td></tr>"
|
||||
return html
|
||||
|
||||
|
@ -80,3 +81,4 @@ class GithubHtmlReport(BaseTestReport):
|
|||
html += "<li><a href=\"%s\">Device %d</a></li>" % (self.get_sauce_job_url(job_id), i+1)
|
||||
html += "</ul></p>"
|
||||
return html
|
||||
|
||||
|
|
|
@ -0,0 +1,30 @@
|
|||
class SingleTestData(object):
|
||||
def __init__(self, name, testruns, testrail_case_id):
|
||||
self.testrail_case_id = testrail_case_id
|
||||
self.name = name
|
||||
self.testruns = testruns
|
||||
|
||||
class TestRunData(object):
|
||||
def __init__(self, steps, jobs, error):
|
||||
self.steps = steps
|
||||
self.jobs = jobs
|
||||
self.error = error
|
||||
|
||||
def create_new_testrun(self):
|
||||
self.testruns.append(SingleTestData.TestRunData(list(), list(), None))
|
||||
|
||||
|
||||
class TestSuiteData(object):
|
||||
def __init__(self):
|
||||
self.apk_name = None
|
||||
self.current_test = None
|
||||
self.tests = list()
|
||||
|
||||
def set_current_test(self, test_name, testrail_case_id):
|
||||
existing_test = next((test for test in self.tests if test.name == test_name), None)
|
||||
if existing_test:
|
||||
self.current_test = existing_test
|
||||
else:
|
||||
test = SingleTestData(test_name, list(), testrail_case_id)
|
||||
self.tests.append(test)
|
||||
self.current_test = test
|
|
@ -0,0 +1,13 @@
|
|||
RERUN_ERRORS = [
|
||||
'Original error: Error: ESOCKETTIMEDOUT',
|
||||
"The server didn't respond in time.",
|
||||
'An unknown server-side error occurred while processing the command.',
|
||||
'Could not proxy command to remote server. Original error: Error: socket hang up'
|
||||
]
|
||||
|
||||
|
||||
def should_rerun_test(test_error):
|
||||
for rerun_error in RERUN_ERRORS:
|
||||
if rerun_error in test_error:
|
||||
return True
|
||||
return False
|
|
@ -62,11 +62,12 @@ class TestrailReport(BaseTestReport):
|
|||
test_steps = "# Steps: \n"
|
||||
devices = str()
|
||||
method = 'add_result_for_case/%s/%s' % (self.run_id, test.testrail_case_id)
|
||||
for step in test.steps:
|
||||
last_testrun = test.testruns[-1]
|
||||
for step in last_testrun.steps:
|
||||
test_steps += step + "\n"
|
||||
for i, device in enumerate(test.jobs):
|
||||
for i, device in enumerate(last_testrun.jobs):
|
||||
devices += "# [Device %d](%s) \n" % (i + 1, self.get_sauce_job_url(device))
|
||||
data = {'status_id': self.outcomes['undefined_fail'] if test.error else self.outcomes['passed'],
|
||||
'comment': '%s' % ('# Error: \n %s \n' % test.error) + devices + test_steps if test.error
|
||||
data = {'status_id': self.outcomes['undefined_fail'] if last_testrun.error else self.outcomes['passed'],
|
||||
'comment': '%s' % ('# Error: \n %s \n' % last_testrun.error) + devices + test_steps if last_testrun.error
|
||||
else devices + test_steps}
|
||||
self.post(method, data=data)
|
||||
|
|
|
@ -2,6 +2,8 @@ import asyncio
|
|||
import logging
|
||||
from datetime import datetime
|
||||
|
||||
from support.test_data import TestSuiteData
|
||||
|
||||
|
||||
@asyncio.coroutine
|
||||
def start_threads(quantity: int, func: type, returns: dict, *args):
|
||||
|
@ -20,27 +22,11 @@ def get_current_time():
|
|||
def info(text: str):
|
||||
if "Base" not in text:
|
||||
logging.info(text)
|
||||
test_suite_data.current_test.steps.append(text)
|
||||
test_suite_data.current_test.testruns[-1].steps.append(text)
|
||||
|
||||
|
||||
class SingleTestData(object):
|
||||
def __init__(self, name, steps=list(), jobs=list(), error=None, testrail_case_id=None):
|
||||
self.testrail_case_id = testrail_case_id
|
||||
self.name = name
|
||||
self.steps = steps
|
||||
self.jobs = jobs
|
||||
self.error = error
|
||||
|
||||
|
||||
class TestSuiteData(object):
|
||||
def __init__(self):
|
||||
self.apk_name = None
|
||||
self.current_test = None
|
||||
self.tests = list()
|
||||
|
||||
def add_test(self, test):
|
||||
self.tests.append(test)
|
||||
self.current_test = test
|
||||
def debug(text: str):
|
||||
logging.debug(text)
|
||||
|
||||
|
||||
test_suite_data = TestSuiteData()
|
||||
|
|
|
@ -121,7 +121,7 @@ class SingleDeviceTestCase(AbstractTestCase):
|
|||
capabilities[self.environment]['capabilities'])
|
||||
self.driver.implicitly_wait(self.implicitly_wait)
|
||||
BaseView(self.driver).accept_agreements()
|
||||
test_suite_data.current_test.jobs.append(self.driver.session_id)
|
||||
test_suite_data.current_test.testruns[-1].jobs.append(self.driver.session_id)
|
||||
break
|
||||
except WebDriverException:
|
||||
counter += 1
|
||||
|
@ -146,7 +146,7 @@ class LocalMultipleDeviceTestCase(AbstractTestCase):
|
|||
self.drivers[driver] = webdriver.Remote(self.executor_local, capabilities[driver])
|
||||
self.drivers[driver].implicitly_wait(self.implicitly_wait)
|
||||
BaseView(self.drivers[driver]).accept_agreements()
|
||||
test_suite_data.current_test.jobs.append(self.drivers[driver].session_id)
|
||||
test_suite_data.current_test.testruns[-1].jobs.append(self.drivers[driver].session_id)
|
||||
|
||||
def teardown_method(self, method):
|
||||
for driver in self.drivers:
|
||||
|
@ -174,7 +174,7 @@ class SauceMultipleDeviceTestCase(AbstractTestCase):
|
|||
for driver in range(quantity):
|
||||
self.drivers[driver].implicitly_wait(self.implicitly_wait)
|
||||
BaseView(self.drivers[driver]).accept_agreements()
|
||||
test_suite_data.current_test.jobs.append(self.drivers[driver].session_id)
|
||||
test_suite_data.current_test.testruns[-1].jobs.append(self.drivers[driver].session_id)
|
||||
|
||||
def teardown_method(self, method):
|
||||
for driver in self.drivers:
|
||||
|
|
|
@ -1,5 +1,9 @@
|
|||
from tests import test_suite_data, SingleTestData
|
||||
from _pytest.runner import runtestprotocol
|
||||
|
||||
from support.test_rerun import should_rerun_test
|
||||
from tests import test_suite_data, debug
|
||||
import requests
|
||||
import re
|
||||
import pytest
|
||||
from datetime import datetime
|
||||
from os import environ
|
||||
|
@ -42,6 +46,14 @@ def pytest_addoption(parser):
|
|||
action='store',
|
||||
default=False,
|
||||
help='boolean; For running extended test suite against nightly build')
|
||||
parser.addoption('--rerun_count',
|
||||
action='store',
|
||||
default=0,
|
||||
help='How many times tests should be re-run if failed')
|
||||
|
||||
|
||||
def get_rerun_count():
|
||||
return int(pytest.config.getoption('rerun_count'))
|
||||
|
||||
|
||||
def is_master(config):
|
||||
|
@ -95,13 +107,13 @@ def pytest_unconfigure(config):
|
|||
def pytest_runtest_makereport(item, call):
|
||||
outcome = yield
|
||||
report = outcome.get_result()
|
||||
is_sauce_env = pytest.config.getoption('env') == 'sauce'
|
||||
current_test = test_suite_data.current_test
|
||||
if report.when == 'call':
|
||||
is_sauce_env = pytest.config.getoption('env') == 'sauce'
|
||||
current_test = test_suite_data.current_test
|
||||
if report.failed:
|
||||
current_test.error = report.longreprtext
|
||||
current_test.testruns[-1].error = report.longreprtext
|
||||
if is_sauce_env:
|
||||
update_sauce_jobs(current_test.name, current_test.jobs, report.passed)
|
||||
update_sauce_jobs(current_test.name, current_test.testruns[-1].jobs, report.passed)
|
||||
github_report.save_test(current_test)
|
||||
|
||||
|
||||
|
@ -116,4 +128,15 @@ def get_testrail_case_id(obj):
|
|||
|
||||
|
||||
def pytest_runtest_setup(item):
|
||||
test_suite_data.add_test(SingleTestData(item.name, testrail_case_id=get_testrail_case_id(item)))
|
||||
test_suite_data.set_current_test(item.name, testrail_case_id=get_testrail_case_id(item))
|
||||
test_suite_data.current_test.create_new_testrun()
|
||||
|
||||
|
||||
def pytest_runtest_protocol(item, nextitem):
|
||||
for i in range(get_rerun_count()):
|
||||
reports = runtestprotocol(item, nextitem=nextitem)
|
||||
for report in reports:
|
||||
if report.failed and should_rerun_test(report.longreprtext):
|
||||
break # rerun
|
||||
else:
|
||||
return True # no need to rerun
|
||||
|
|
Loading…
Reference in New Issue