2019-12-18 14:02:17 -05:00
|
|
|
# Set environment variable to testing before loading.
|
|
|
|
# IMPORTANT - Environment must be loaded before app, models, etc....
|
|
|
|
import os
|
2020-05-25 12:29:05 -04:00
|
|
|
|
2021-07-06 13:10:20 -04:00
|
|
|
|
2020-05-22 14:37:49 -04:00
|
|
|
os.environ["TESTING"] = "true"
|
|
|
|
|
|
|
|
import json
|
2020-01-24 09:35:14 -05:00
|
|
|
import unittest
|
2020-02-27 10:30:16 -05:00
|
|
|
import urllib.parse
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 08:00:16 -04:00
|
|
|
import datetime
|
2022-01-06 11:46:54 -05:00
|
|
|
import shutil
|
2020-06-12 13:46:10 -04:00
|
|
|
from flask import g
|
2020-02-27 10:30:16 -05:00
|
|
|
|
2022-02-07 12:59:48 -05:00
|
|
|
from crc import app, db, session
|
2020-06-12 13:46:10 -04:00
|
|
|
from crc.models.api_models import WorkflowApiSchema, MultiInstanceType
|
2022-04-15 15:36:23 -04:00
|
|
|
from crc.models.task_event import TaskEventModel, TaskAction
|
2021-12-10 16:17:23 -05:00
|
|
|
from crc.models.study import StudyModel, StudyStatus, ProgressStatus
|
2020-06-12 13:46:10 -04:00
|
|
|
from crc.models.user import UserModel
|
2022-02-07 14:58:25 -05:00
|
|
|
from crc.models.workflow import WorkflowSpecCategory
|
2021-09-22 13:16:25 -04:00
|
|
|
from crc.services.ldap_service import LdapService
|
2022-01-12 14:37:33 -05:00
|
|
|
from crc.services.reference_file_service import ReferenceFileService
|
2022-01-07 15:34:51 -05:00
|
|
|
from crc.services.spec_file_service import SpecFileService
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 08:00:16 -04:00
|
|
|
from crc.services.study_service import StudyService
|
2020-07-29 22:47:47 -04:00
|
|
|
from crc.services.user_service import UserService
|
2021-07-06 13:10:20 -04:00
|
|
|
from crc.services.document_service import DocumentService
|
2019-12-30 13:03:57 -05:00
|
|
|
from example_data import ExampleDataLoader
|
2022-02-07 12:59:48 -05:00
|
|
|
from crc.services.workflow_spec_service import WorkflowSpecService
|
2022-05-12 10:06:57 -04:00
|
|
|
from crc.services.workflow_service import WorkflowService
|
|
|
|
from crc.services.workflow_processor import WorkflowProcessor
|
2019-12-18 14:02:17 -05:00
|
|
|
|
2020-07-29 22:47:47 -04:00
|
|
|
# UNCOMMENT THIS FOR DEBUGGING SQL ALCHEMY QUERIES
|
2020-05-27 14:36:10 -04:00
|
|
|
import logging
|
2020-07-29 22:47:47 -04:00
|
|
|
|
2020-05-27 14:36:10 -04:00
|
|
|
logging.basicConfig()
|
2019-12-27 13:50:03 -05:00
|
|
|
|
|
|
|
|
2020-01-24 09:35:14 -05:00
|
|
|
class BaseTest(unittest.TestCase):
|
2020-02-27 10:30:16 -05:00
|
|
|
""" Great class to inherit from, as it sets up and tears down classes
|
|
|
|
efficiently when we have a database in place.
|
|
|
|
"""
|
2022-02-08 16:35:15 -05:00
|
|
|
workflow_spec_service = WorkflowSpecService()
|
2019-11-21 09:22:42 -05:00
|
|
|
|
2020-05-22 14:37:49 -04:00
|
|
|
if not app.config['TESTING']:
|
2020-05-22 18:25:00 -04:00
|
|
|
raise (Exception("INVALID TEST CONFIGURATION. This is almost always in import order issue."
|
2020-07-29 22:47:47 -04:00
|
|
|
"The first class to import in each test should be the base_test.py file."))
|
2020-05-22 14:37:49 -04:00
|
|
|
|
2019-11-21 09:22:42 -05:00
|
|
|
auths = {}
|
2020-02-27 10:30:16 -05:00
|
|
|
test_uid = "dhf8r"
|
2019-11-21 09:22:42 -05:00
|
|
|
|
2021-09-22 13:16:25 -04:00
|
|
|
# These users correspond to the ldap details available in our mock ldap service.
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 08:00:16 -04:00
|
|
|
users = [
|
|
|
|
{
|
2020-07-29 22:47:47 -04:00
|
|
|
'uid': 'dhf8r',
|
|
|
|
},
|
|
|
|
{
|
2021-09-22 13:16:25 -04:00
|
|
|
'uid': 'lb3dp',
|
|
|
|
}
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 08:00:16 -04:00
|
|
|
]
|
|
|
|
|
|
|
|
studies = [
|
|
|
|
{
|
2021-03-12 20:41:07 -05:00
|
|
|
'id': 0,
|
|
|
|
'title': 'The impact of fried pickles on beer consumption in bipedal software developers.',
|
2021-04-29 10:25:28 -04:00
|
|
|
'last_updated': datetime.datetime.utcnow(),
|
2021-03-12 20:41:07 -05:00
|
|
|
'status': StudyStatus.in_progress,
|
2021-12-10 16:17:23 -05:00
|
|
|
'progress_status': ProgressStatus.in_progress,
|
2021-03-12 20:41:07 -05:00
|
|
|
'sponsor': 'Sartography Pharmaceuticals',
|
|
|
|
'ind_number': '1234',
|
2022-03-18 09:59:10 -04:00
|
|
|
'user_uid': 'dhf8r',
|
|
|
|
'review_type': 2
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 08:00:16 -04:00
|
|
|
},
|
|
|
|
{
|
2021-03-12 20:41:07 -05:00
|
|
|
'id': 1,
|
|
|
|
'title': 'Requirement of hippocampal neurogenesis for the behavioral effects of soft pretzels',
|
2021-04-29 10:25:28 -04:00
|
|
|
'last_updated': datetime.datetime.utcnow(),
|
2021-03-12 20:41:07 -05:00
|
|
|
'status': StudyStatus.in_progress,
|
2021-12-10 16:17:23 -05:00
|
|
|
'progress_status': ProgressStatus.in_progress,
|
2021-03-12 20:41:07 -05:00
|
|
|
'sponsor': 'Makerspace & Co.',
|
|
|
|
'ind_number': '5678',
|
2022-03-18 09:59:10 -04:00
|
|
|
'user_uid': 'dhf8r',
|
|
|
|
'review_type': 2
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 08:00:16 -04:00
|
|
|
}
|
|
|
|
]
|
|
|
|
|
2019-11-21 09:22:42 -05:00
|
|
|
@classmethod
|
|
|
|
def setUpClass(cls):
|
2022-01-07 15:34:51 -05:00
|
|
|
cls.clear_test_sync_files()
|
2019-11-21 09:22:42 -05:00
|
|
|
app.config.from_object('config.testing')
|
|
|
|
cls.ctx = app.test_request_context()
|
|
|
|
cls.app = app.test_client()
|
2020-05-19 16:11:43 -04:00
|
|
|
cls.ctx.push()
|
2019-12-18 14:02:17 -05:00
|
|
|
db.create_all()
|
2019-11-21 09:22:42 -05:00
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def tearDownClass(cls):
|
2022-10-07 14:58:08 -04:00
|
|
|
db.session.commit()
|
2020-05-23 15:08:17 -04:00
|
|
|
db.drop_all()
|
2022-10-07 14:58:08 -04:00
|
|
|
cls.ctx.pop()
|
2019-11-21 09:22:42 -05:00
|
|
|
|
|
|
|
def setUp(self):
|
2020-05-19 16:11:43 -04:00
|
|
|
pass
|
2019-11-21 09:22:42 -05:00
|
|
|
|
|
|
|
def tearDown(self):
|
2020-05-19 16:11:43 -04:00
|
|
|
ExampleDataLoader.clean_db()
|
2020-07-29 22:47:47 -04:00
|
|
|
self.logout()
|
2019-11-21 09:22:42 -05:00
|
|
|
self.auths = {}
|
2022-01-06 11:46:54 -05:00
|
|
|
self.clear_test_sync_files()
|
|
|
|
|
2022-02-07 13:43:20 -05:00
|
|
|
@staticmethod
|
|
|
|
def copy_files_to_file_system(import_spec_path, spec_path):
|
|
|
|
"""Some tests rely on a well populated file system """
|
|
|
|
shutil.copytree(import_spec_path, spec_path)
|
|
|
|
|
2022-01-06 11:46:54 -05:00
|
|
|
@staticmethod
|
|
|
|
def clear_test_sync_files():
|
2022-01-28 06:42:37 -05:00
|
|
|
sync_file_root = SpecFileService().root_path()
|
2022-01-06 11:46:54 -05:00
|
|
|
if os.path.exists(sync_file_root):
|
|
|
|
shutil.rmtree(sync_file_root)
|
2019-11-21 09:22:42 -05:00
|
|
|
|
2020-02-27 10:30:16 -05:00
|
|
|
def logged_in_headers(self, user=None, redirect_url='http://some/frontend/url'):
|
|
|
|
if user is None:
|
|
|
|
uid = self.test_uid
|
2020-05-26 15:47:41 -04:00
|
|
|
user_info = {'uid': self.test_uid}
|
2020-02-27 10:30:16 -05:00
|
|
|
else:
|
|
|
|
uid = user.uid
|
2020-05-26 15:47:41 -04:00
|
|
|
user_info = {'uid': user.uid}
|
2020-02-27 10:30:16 -05:00
|
|
|
|
|
|
|
query_string = self.user_info_to_query_string(user_info, redirect_url)
|
2020-05-31 16:49:39 -04:00
|
|
|
rv = self.app.get("/v1.0/login%s" % query_string, follow_redirects=False)
|
2020-02-27 10:30:16 -05:00
|
|
|
self.assertTrue(rv.status_code == 302)
|
|
|
|
self.assertTrue(str.startswith(rv.location, redirect_url))
|
|
|
|
|
|
|
|
user_model = session.query(UserModel).filter_by(uid=uid).first()
|
2021-09-22 13:16:25 -04:00
|
|
|
self.assertIsNotNone(user_model.ldap_info.display_name)
|
2020-06-12 13:46:10 -04:00
|
|
|
self.assertEqual(user_model.uid, uid)
|
2020-06-12 14:09:08 -04:00
|
|
|
self.assertTrue('user' in g, 'User should be in Flask globals')
|
2020-07-27 14:38:57 -04:00
|
|
|
user = UserService.current_user(allow_admin_impersonate=True)
|
|
|
|
self.assertEqual(uid, user.uid, 'Logged in user should match given user uid')
|
2020-06-12 13:46:10 -04:00
|
|
|
|
2021-05-04 13:39:49 -04:00
|
|
|
return dict(Authorization='Bearer ' + user_model.encode_auth_token())
|
2020-02-27 10:30:16 -05:00
|
|
|
|
2021-01-12 10:18:35 -05:00
|
|
|
def delete_example_data(self, use_crc_data=False, use_rrt_data=False):
|
|
|
|
"""
|
|
|
|
delete everything that matters in the local database - this is used to
|
|
|
|
test ground zero copy of workflow specs.
|
|
|
|
"""
|
2021-07-06 13:10:20 -04:00
|
|
|
ExampleDataLoader.clean_db()
|
2021-01-12 10:18:35 -05:00
|
|
|
|
2022-02-09 22:13:02 -05:00
|
|
|
def add_users(self):
|
|
|
|
for user_json in self.users:
|
|
|
|
ldap_info = LdapService.user_info(user_json['uid'])
|
|
|
|
session.add(UserModel(uid=user_json['uid'], ldap_info=ldap_info))
|
|
|
|
session.commit()
|
2022-02-02 12:59:56 -05:00
|
|
|
|
2022-02-09 22:13:02 -05:00
|
|
|
def add_studies(self):
|
|
|
|
self.add_users()
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 08:00:16 -04:00
|
|
|
for study_json in self.studies:
|
|
|
|
study_model = StudyModel(**study_json)
|
2020-07-30 12:40:53 -04:00
|
|
|
session.add(study_model)
|
2021-04-02 17:10:05 -04:00
|
|
|
update_seq = f"ALTER SEQUENCE %s RESTART WITH %s" % (StudyModel.__tablename__ + '_id_seq', study_model.id + 1)
|
|
|
|
session.execute(update_seq)
|
2022-02-09 22:13:02 -05:00
|
|
|
session.commit()
|
|
|
|
|
2020-01-24 09:35:14 -05:00
|
|
|
|
2022-02-07 14:58:25 -05:00
|
|
|
def assure_category_name_exists(self, name):
|
|
|
|
category = self.workflow_spec_service.get_category(name)
|
2022-02-04 13:51:39 -05:00
|
|
|
if category is None:
|
2022-02-07 14:58:25 -05:00
|
|
|
category = WorkflowSpecCategory(id=name, display_name=name, admin=False, display_order=0)
|
|
|
|
self.workflow_spec_service.add_category(category)
|
2022-02-04 13:51:39 -05:00
|
|
|
return category
|
|
|
|
|
2022-05-24 15:32:51 -04:00
|
|
|
def assure_category_exists(self, category_id=None, display_name="Test Workflows", admin=False):
|
2022-02-02 12:59:56 -05:00
|
|
|
category = None
|
|
|
|
if category_id is not None:
|
2022-02-07 14:58:25 -05:00
|
|
|
category = self.workflow_spec_service.get_category(category_id)
|
2022-02-02 12:59:56 -05:00
|
|
|
if category is None:
|
2022-05-24 15:32:51 -04:00
|
|
|
if category_id is None:
|
|
|
|
category_id = 'test_category'
|
|
|
|
category = WorkflowSpecCategory(id=category_id, display_name=display_name, admin=admin, display_order=0)
|
2022-02-09 08:50:00 -05:00
|
|
|
self.workflow_spec_service.add_category(category)
|
2022-02-02 12:59:56 -05:00
|
|
|
return category
|
|
|
|
|
2022-02-07 14:58:25 -05:00
|
|
|
def load_test_spec(self, dir_name, display_name=None, master_spec=False, category_id=None, library=False):
|
2022-02-02 12:59:56 -05:00
|
|
|
"""Loads a spec into the database based on a directory in /tests/data"""
|
2022-02-09 08:50:00 -05:00
|
|
|
category = None
|
|
|
|
if not master_spec and not library:
|
|
|
|
category = self.assure_category_exists(category_id)
|
|
|
|
category_id = category.id
|
2022-02-07 14:58:25 -05:00
|
|
|
workflow_spec = self.workflow_spec_service.get_spec(dir_name)
|
|
|
|
if workflow_spec:
|
|
|
|
return workflow_spec
|
|
|
|
else:
|
|
|
|
filepath = os.path.join(app.root_path, '..', 'tests', 'data', dir_name, "*")
|
|
|
|
if display_name is None:
|
|
|
|
display_name = dir_name
|
|
|
|
spec = ExampleDataLoader().create_spec(id=dir_name, filepath=filepath, master_spec=master_spec,
|
2022-02-08 16:39:31 -05:00
|
|
|
display_name=display_name, category_id=category_id, library=library)
|
2022-02-07 14:58:25 -05:00
|
|
|
return spec
|
2020-02-04 16:49:28 -05:00
|
|
|
|
2020-02-27 10:30:16 -05:00
|
|
|
@staticmethod
|
|
|
|
def protocol_builder_response(file_name):
|
2020-02-20 13:30:04 -05:00
|
|
|
filepath = os.path.join(app.root_path, '..', 'tests', 'data', 'pb_responses', file_name)
|
|
|
|
with open(filepath, 'r') as myfile:
|
|
|
|
data = myfile.read()
|
|
|
|
return data
|
|
|
|
|
2020-12-11 08:34:59 -05:00
|
|
|
@staticmethod
|
|
|
|
def workflow_sync_response(file_name):
|
|
|
|
filepath = os.path.join(app.root_path, '..', 'tests', 'data', 'workflow_sync_responses', file_name)
|
2020-12-14 10:27:40 -05:00
|
|
|
with open(filepath, 'rb') as myfile:
|
2020-12-11 08:34:59 -05:00
|
|
|
data = myfile.read()
|
|
|
|
return data
|
|
|
|
|
2019-12-18 14:02:17 -05:00
|
|
|
def assert_success(self, rv, msg=""):
|
|
|
|
try:
|
|
|
|
data = json.loads(rv.get_data(as_text=True))
|
2022-10-07 14:58:08 -04:00
|
|
|
error_message = ""
|
|
|
|
if 'message' in data:
|
|
|
|
error_message = data['message']
|
2020-02-18 16:38:56 -05:00
|
|
|
self.assertTrue(200 <= rv.status_code < 300,
|
2019-12-18 14:02:17 -05:00
|
|
|
"BAD Response: %i. \n %s" %
|
2022-10-07 14:58:08 -04:00
|
|
|
(rv.status_code, error_message + ". " + msg + ". "))
|
|
|
|
except Exception as e:
|
2020-02-18 16:38:56 -05:00
|
|
|
self.assertTrue(200 <= rv.status_code < 300,
|
2019-12-18 14:02:17 -05:00
|
|
|
"BAD Response: %i." % rv.status_code + ". " + msg)
|
2020-02-18 16:38:56 -05:00
|
|
|
|
2020-03-09 15:12:40 -04:00
|
|
|
def assert_failure(self, rv, status_code=0, error_code=""):
|
2020-02-18 16:38:56 -05:00
|
|
|
self.assertFalse(200 <= rv.status_code < 300,
|
|
|
|
"Incorrect Valid Response:" + rv.status + ".")
|
2020-03-09 15:12:40 -04:00
|
|
|
if status_code != 0:
|
|
|
|
self.assertEqual(status_code, rv.status_code)
|
|
|
|
if error_code != "":
|
|
|
|
data = json.loads(rv.get_data(as_text=True))
|
|
|
|
self.assertEqual(error_code, data["code"])
|
2020-02-27 10:30:16 -05:00
|
|
|
|
2020-08-21 13:34:37 -04:00
|
|
|
def assert_dict_contains_subset(self, container, subset):
|
|
|
|
def extract_dict_a_from_b(a, b):
|
|
|
|
return dict([(k, b[k]) for k in a.keys() if k in b.keys()])
|
|
|
|
|
|
|
|
extract = extract_dict_a_from_b(subset, container)
|
|
|
|
self.assertEqual(subset, extract)
|
|
|
|
|
2020-02-27 10:30:16 -05:00
|
|
|
@staticmethod
|
|
|
|
def user_info_to_query_string(user_info, redirect_url):
|
|
|
|
query_string_list = []
|
|
|
|
items = user_info.items()
|
|
|
|
for key, value in items:
|
|
|
|
query_string_list.append('%s=%s' % (key, urllib.parse.quote(value)))
|
|
|
|
|
2020-05-31 16:49:39 -04:00
|
|
|
query_string_list.append('redirect_url=%s' % redirect_url)
|
2020-02-27 10:30:16 -05:00
|
|
|
|
|
|
|
return '?%s' % '&'.join(query_string_list)
|
|
|
|
|
2022-02-02 12:59:56 -05:00
|
|
|
def replace_file(self, spec, name, file_path):
|
2020-03-05 13:25:28 -05:00
|
|
|
"""Replaces a stored file with the given name with the contents of the file at the given path."""
|
|
|
|
file = open(file_path, "rb")
|
|
|
|
data = file.read()
|
2022-02-02 12:59:56 -05:00
|
|
|
SpecFileService().update_file(spec, name, data)
|
2020-03-19 17:13:30 -04:00
|
|
|
|
2020-05-23 15:08:17 -04:00
|
|
|
def create_user(self, uid="dhf8r", email="daniel.h.funk@gmail.com", display_name="Hoopy Frood"):
|
|
|
|
user = session.query(UserModel).filter(UserModel.uid == uid).first()
|
|
|
|
if user is None:
|
2021-09-22 13:16:25 -04:00
|
|
|
ldap_user = LdapService.user_info(uid)
|
|
|
|
user = UserModel(uid=uid, ldap_info=ldap_user)
|
2020-07-30 12:40:53 -04:00
|
|
|
session.add(user)
|
|
|
|
session.commit()
|
2020-05-23 15:08:17 -04:00
|
|
|
return user
|
|
|
|
|
2022-03-18 12:04:20 -04:00
|
|
|
def create_study(self, uid="dhf8r", title="Beer consumption in the bipedal software engineer"):
|
2020-06-11 16:39:00 -04:00
|
|
|
study = session.query(StudyModel).filter_by(user_uid=uid).filter_by(title=title).first()
|
2020-05-23 15:08:17 -04:00
|
|
|
if study is None:
|
|
|
|
user = self.create_user(uid=uid)
|
2020-07-30 21:03:11 -06:00
|
|
|
study = StudyModel(title=title, status=StudyStatus.in_progress,
|
2022-03-18 12:26:20 -04:00
|
|
|
user_uid=user.uid, review_type=2)
|
2020-07-30 12:40:53 -04:00
|
|
|
session.add(study)
|
|
|
|
session.commit()
|
2020-05-23 15:08:17 -04:00
|
|
|
return study
|
|
|
|
|
2021-10-05 13:36:53 -04:00
|
|
|
def create_workflow(self, dir_name, display_name=None, study=None, category_id=None, as_user="dhf8r"):
|
2020-07-30 12:40:53 -04:00
|
|
|
session.flush()
|
2022-02-07 14:58:25 -05:00
|
|
|
spec = self.workflow_spec_service.get_spec(dir_name)
|
2020-05-25 12:29:05 -04:00
|
|
|
if spec is None:
|
2020-07-21 15:18:08 -04:00
|
|
|
if display_name is None:
|
2021-10-05 13:36:53 -04:00
|
|
|
display_name = dir_name
|
|
|
|
spec = self.load_test_spec(dir_name, display_name, category_id=category_id)
|
2020-05-23 15:08:17 -04:00
|
|
|
if study is None:
|
2020-07-14 10:29:25 -04:00
|
|
|
study = self.create_study(uid=as_user)
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 08:00:16 -04:00
|
|
|
workflow_model = StudyService._create_workflow_model(study, spec)
|
2020-03-30 14:01:57 -04:00
|
|
|
return workflow_model
|
2020-03-19 17:13:30 -04:00
|
|
|
|
|
|
|
def create_reference_document(self):
|
2022-02-02 12:59:56 -05:00
|
|
|
file_path = os.path.join(app.root_path, 'static', 'reference', 'documents.xlsx')
|
2022-01-07 15:34:51 -05:00
|
|
|
with open(file_path, "rb") as file:
|
2022-01-12 14:37:33 -05:00
|
|
|
ReferenceFileService.add_reference_file(DocumentService.DOCUMENT_LIST,
|
2022-02-02 12:59:56 -05:00
|
|
|
file.read())
|
|
|
|
file_path = os.path.join(app.root_path, 'static', 'reference', 'investigators.xlsx')
|
|
|
|
with open(file_path, "rb") as file:
|
|
|
|
ReferenceFileService.add_reference_file('investigators.xlsx',
|
|
|
|
file.read())
|
2020-06-11 16:39:00 -04:00
|
|
|
|
2021-01-19 15:27:44 -05:00
|
|
|
def get_workflow_common(self, url, user):
|
2021-01-14 15:32:14 -05:00
|
|
|
rv = self.app.get(url,
|
2020-06-12 13:46:10 -04:00
|
|
|
headers=self.logged_in_headers(user),
|
|
|
|
content_type="application/json")
|
|
|
|
self.assert_success(rv)
|
|
|
|
json_data = json.loads(rv.get_data(as_text=True))
|
|
|
|
workflow_api = WorkflowApiSchema().load(json_data)
|
2021-01-19 15:27:44 -05:00
|
|
|
return workflow_api
|
|
|
|
|
|
|
|
def get_workflow_api(self, workflow, do_engine_steps=True, user_uid="dhf8r"):
|
|
|
|
user = session.query(UserModel).filter_by(uid=user_uid).first()
|
|
|
|
self.assertIsNotNone(user)
|
2021-03-12 20:41:07 -05:00
|
|
|
url = (f'/v1.0/workflow/{workflow.id}'
|
2021-01-19 15:27:44 -05:00
|
|
|
f'?do_engine_steps={str(do_engine_steps)}')
|
|
|
|
workflow_api = self.get_workflow_common(url, user)
|
|
|
|
self.assertEqual(workflow.workflow_spec_id, workflow_api.workflow_spec_id)
|
|
|
|
return workflow_api
|
|
|
|
|
2021-04-15 11:28:35 -04:00
|
|
|
def restart_workflow_api(self, workflow, clear_data=False, delete_files=False, user_uid="dhf8r"):
|
2021-01-19 15:27:44 -05:00
|
|
|
user = session.query(UserModel).filter_by(uid=user_uid).first()
|
|
|
|
self.assertIsNotNone(user)
|
2021-03-12 20:41:07 -05:00
|
|
|
url = (f'/v1.0/workflow/{workflow.id}/restart'
|
2021-04-15 11:28:35 -04:00
|
|
|
f'?clear_data={str(clear_data)}'
|
|
|
|
f'&delete_files={str(delete_files)}')
|
2021-01-19 15:27:44 -05:00
|
|
|
workflow_api = self.get_workflow_common(url, user)
|
2020-06-12 13:46:10 -04:00
|
|
|
self.assertEqual(workflow.workflow_spec_id, workflow_api.workflow_spec_id)
|
|
|
|
return workflow_api
|
|
|
|
|
2021-03-12 20:41:07 -05:00
|
|
|
def complete_form(self, workflow_in, task_in, dict_data, update_all=False, error_code=None, terminate_loop=None,
|
|
|
|
user_uid="dhf8r"):
|
2021-04-26 08:50:46 -04:00
|
|
|
# workflow_in should be a workflow, not a workflow_api
|
|
|
|
# we were passing in workflow_api in many of our tests, and
|
|
|
|
# this caused problems testing standalone workflows
|
2022-02-07 14:58:25 -05:00
|
|
|
spec = self.workflow_spec_service.get_spec(workflow_in.workflow_spec_id)
|
|
|
|
standalone = getattr(spec, 'standalone', False)
|
2020-06-12 13:46:10 -04:00
|
|
|
prev_completed_task_count = workflow_in.completed_tasks
|
|
|
|
if isinstance(task_in, dict):
|
|
|
|
task_id = task_in["id"]
|
|
|
|
else:
|
|
|
|
task_id = task_in.id
|
|
|
|
|
|
|
|
user = session.query(UserModel).filter_by(uid=user_uid).first()
|
|
|
|
self.assertIsNotNone(user)
|
2021-03-12 20:41:07 -05:00
|
|
|
args = ""
|
2020-06-17 11:35:06 -04:00
|
|
|
if terminate_loop:
|
2021-03-12 20:41:07 -05:00
|
|
|
args += "?terminate_loop=true"
|
|
|
|
if update_all:
|
|
|
|
args += "?update_all=true"
|
|
|
|
|
|
|
|
rv = self.app.put('/v1.0/workflow/%i/task/%s/data%s' % (workflow_in.id, task_id, args),
|
|
|
|
headers=self.logged_in_headers(user=user),
|
|
|
|
content_type="application/json",
|
|
|
|
data=json.dumps(dict_data))
|
2020-06-12 13:46:10 -04:00
|
|
|
if error_code:
|
|
|
|
self.assert_failure(rv, error_code=error_code)
|
|
|
|
return
|
|
|
|
|
|
|
|
self.assert_success(rv)
|
|
|
|
json_data = json.loads(rv.get_data(as_text=True))
|
|
|
|
|
2020-07-14 22:16:44 -04:00
|
|
|
# Assure task events are updated on the model
|
2020-06-12 13:46:10 -04:00
|
|
|
workflow = WorkflowApiSchema().load(json_data)
|
|
|
|
|
|
|
|
# Assure a record exists in the Task Events
|
|
|
|
task_events = session.query(TaskEventModel) \
|
|
|
|
.filter_by(workflow_id=workflow.id) \
|
|
|
|
.filter_by(task_id=task_id) \
|
2022-04-15 15:36:23 -04:00
|
|
|
.filter_by(action=TaskAction.COMPLETE.value) \
|
2020-06-12 13:46:10 -04:00
|
|
|
.order_by(TaskEventModel.date.desc()).all()
|
|
|
|
self.assertGreater(len(task_events), 0)
|
|
|
|
event = task_events[0]
|
2021-04-26 08:50:46 -04:00
|
|
|
if not standalone:
|
|
|
|
self.assertIsNotNone(event.study_id)
|
2020-06-12 13:46:10 -04:00
|
|
|
self.assertEqual(user_uid, event.user_uid)
|
|
|
|
self.assertEqual(workflow.id, event.workflow_id)
|
|
|
|
self.assertEqual(workflow.workflow_spec_id, event.workflow_spec_id)
|
2022-04-15 15:36:23 -04:00
|
|
|
self.assertEqual(TaskAction.COMPLETE.value, event.action)
|
2020-06-12 13:46:10 -04:00
|
|
|
self.assertEqual(task_in.id, task_id)
|
|
|
|
self.assertEqual(task_in.name, event.task_name)
|
|
|
|
self.assertEqual(task_in.title, event.task_title)
|
|
|
|
self.assertEqual(task_in.type, event.task_type)
|
2020-06-17 11:35:06 -04:00
|
|
|
if not task_in.multi_instance_type == 'looping':
|
|
|
|
self.assertEqual("COMPLETED", event.task_state)
|
2020-06-12 13:46:10 -04:00
|
|
|
|
|
|
|
# Not sure what voodoo is happening inside of marshmallow to get me in this state.
|
|
|
|
if isinstance(task_in.multi_instance_type, MultiInstanceType):
|
|
|
|
self.assertEqual(task_in.multi_instance_type.value, event.mi_type)
|
|
|
|
else:
|
|
|
|
self.assertEqual(task_in.multi_instance_type, event.mi_type)
|
|
|
|
|
|
|
|
self.assertEqual(task_in.multi_instance_count, event.mi_count)
|
2020-06-17 11:35:06 -04:00
|
|
|
if task_in.multi_instance_type == 'looping' and not terminate_loop:
|
2020-07-29 22:47:47 -04:00
|
|
|
self.assertEqual(task_in.multi_instance_index + 1, event.mi_index)
|
2020-06-17 11:35:06 -04:00
|
|
|
else:
|
|
|
|
self.assertEqual(task_in.multi_instance_index, event.mi_index)
|
2020-06-12 13:46:10 -04:00
|
|
|
self.assertEqual(task_in.process_name, event.process_name)
|
|
|
|
self.assertIsNotNone(event.date)
|
|
|
|
|
|
|
|
workflow = WorkflowApiSchema().load(json_data)
|
|
|
|
return workflow
|
2020-07-29 22:47:47 -04:00
|
|
|
|
|
|
|
def logout(self):
|
2020-07-30 10:40:06 -04:00
|
|
|
if 'user' in g:
|
|
|
|
del g.user
|
|
|
|
|
|
|
|
if 'impersonate_user' in g:
|
|
|
|
del g.impersonate_user
|
2021-04-15 13:28:03 -04:00
|
|
|
|
2022-05-12 10:06:57 -04:00
|
|
|
@staticmethod
|
|
|
|
def minimal_bpmn(content):
|
2021-04-15 13:28:03 -04:00
|
|
|
"""Returns a bytesIO object of a well formed BPMN xml file with some string content of your choosing."""
|
|
|
|
minimal_dbpm = "<x><process id='1' isExecutable='false'><startEvent id='a'/></process>%s</x>"
|
|
|
|
return (minimal_dbpm % content).encode()
|
2022-05-12 10:06:57 -04:00
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def run_master_spec(study_model):
|
|
|
|
spec_service = WorkflowSpecService()
|
|
|
|
master_spec = spec_service.master_spec
|
|
|
|
master_workflow_results = WorkflowProcessor.run_master_spec(master_spec, study_model)
|
|
|
|
WorkflowService().update_workflow_state_from_master_workflow(study_model.id, master_workflow_results)
|