2019-12-18 19:02:17 +00:00
|
|
|
# Set environment variable to testing before loading.
|
|
|
|
# IMPORTANT - Environment must be loaded before app, models, etc....
|
|
|
|
import json
|
|
|
|
import os
|
2020-01-24 14:35:14 +00:00
|
|
|
import unittest
|
2020-02-27 15:30:16 +00:00
|
|
|
import urllib.parse
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
import datetime
|
2020-02-27 15:30:16 +00:00
|
|
|
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
from crc.models.protocol_builder import ProtocolBuilderStatus
|
2020-03-19 21:13:30 +00:00
|
|
|
from crc.models.study import StudyModel
|
2020-03-05 18:25:28 +00:00
|
|
|
from crc.services.file_service import FileService
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
from crc.services.study_service import StudyService
|
2020-03-19 21:13:30 +00:00
|
|
|
from crc.services.workflow_processor import WorkflowProcessor
|
2020-03-05 18:25:28 +00:00
|
|
|
|
2020-02-27 15:30:16 +00:00
|
|
|
os.environ["TESTING"] = "true"
|
2020-01-24 14:35:14 +00:00
|
|
|
|
2020-03-05 18:25:28 +00:00
|
|
|
from crc.models.file import FileModel, FileDataModel, CONTENT_TYPES
|
2020-03-19 21:13:30 +00:00
|
|
|
from crc.models.workflow import WorkflowSpecModel, WorkflowSpecModelSchema, WorkflowModel
|
2020-02-27 15:30:16 +00:00
|
|
|
from crc.models.user import UserModel
|
2019-12-18 19:02:17 +00:00
|
|
|
|
2020-01-14 16:45:12 +00:00
|
|
|
from crc import app, db, session
|
2019-12-30 18:03:57 +00:00
|
|
|
from example_data import ExampleDataLoader
|
2019-12-18 19:02:17 +00:00
|
|
|
|
2019-12-27 18:50:03 +00:00
|
|
|
# UNCOMMENT THIS FOR DEBUGGING SQL ALCHEMY QUERIES
|
|
|
|
# import logging
|
|
|
|
# logging.basicConfig()
|
|
|
|
# logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)
|
|
|
|
|
|
|
|
|
2020-01-24 14:35:14 +00:00
|
|
|
class BaseTest(unittest.TestCase):
|
2020-02-27 15:30:16 +00:00
|
|
|
""" Great class to inherit from, as it sets up and tears down classes
|
|
|
|
efficiently when we have a database in place.
|
|
|
|
"""
|
2019-11-21 14:22:42 +00:00
|
|
|
|
|
|
|
auths = {}
|
2020-02-27 15:30:16 +00:00
|
|
|
test_uid = "dhf8r"
|
2019-11-21 14:22:42 +00:00
|
|
|
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
users = [
|
|
|
|
{
|
|
|
|
'uid':'dhf8r',
|
|
|
|
'email_address':'dhf8r@virginia.EDU',
|
|
|
|
'display_name':'Daniel Harold Funk',
|
|
|
|
'affiliation':'staff@virginia.edu;member@virginia.edu',
|
|
|
|
'eppn':'dhf8r@virginia.edu',
|
|
|
|
'first_name':'Daniel',
|
|
|
|
'last_name':'Funk',
|
|
|
|
'title':'SOFTWARE ENGINEER V'
|
|
|
|
}
|
|
|
|
]
|
|
|
|
|
|
|
|
studies = [
|
|
|
|
{
|
|
|
|
'id':0,
|
|
|
|
'title':'The impact of fried pickles on beer consumption in bipedal software developers.',
|
|
|
|
'last_updated':datetime.datetime.now(),
|
|
|
|
'protocol_builder_status':ProtocolBuilderStatus.IN_PROCESS,
|
|
|
|
'primary_investigator_id':'dhf8r',
|
|
|
|
'sponsor':'Sartography Pharmaceuticals',
|
|
|
|
'ind_number':'1234',
|
|
|
|
'user_uid':'dhf8r'
|
|
|
|
},
|
|
|
|
{
|
|
|
|
'id':1,
|
|
|
|
'title':'Requirement of hippocampal neurogenesis for the behavioral effects of soft pretzels',
|
|
|
|
'last_updated':datetime.datetime.now(),
|
|
|
|
'protocol_builder_status':ProtocolBuilderStatus.IN_PROCESS,
|
|
|
|
'primary_investigator_id':'dhf8r',
|
|
|
|
'sponsor':'Makerspace & Co.',
|
|
|
|
'ind_number':'5678',
|
|
|
|
'user_uid':'dhf8r'
|
|
|
|
}
|
|
|
|
]
|
|
|
|
|
|
|
|
|
2019-11-21 14:22:42 +00:00
|
|
|
@classmethod
|
|
|
|
def setUpClass(cls):
|
|
|
|
app.config.from_object('config.testing')
|
|
|
|
cls.ctx = app.test_request_context()
|
|
|
|
cls.app = app.test_client()
|
2019-12-18 19:02:17 +00:00
|
|
|
db.create_all()
|
2019-11-21 14:22:42 +00:00
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def tearDownClass(cls):
|
2019-12-18 19:02:17 +00:00
|
|
|
db.drop_all()
|
2020-01-14 16:45:12 +00:00
|
|
|
session.remove()
|
2019-11-21 14:22:42 +00:00
|
|
|
pass
|
|
|
|
|
|
|
|
def setUp(self):
|
|
|
|
self.ctx.push()
|
|
|
|
|
|
|
|
def tearDown(self):
|
2019-12-30 18:03:57 +00:00
|
|
|
ExampleDataLoader.clean_db() # This does not seem to work, some colision of sessions.
|
2019-11-21 14:22:42 +00:00
|
|
|
self.ctx.pop()
|
|
|
|
self.auths = {}
|
|
|
|
|
2020-02-27 15:30:16 +00:00
|
|
|
def logged_in_headers(self, user=None, redirect_url='http://some/frontend/url'):
|
|
|
|
if user is None:
|
|
|
|
uid = self.test_uid
|
|
|
|
user_info = {'uid': self.test_uid, 'first_name': 'Daniel', 'last_name': 'Funk',
|
|
|
|
'email_address': 'dhf8r@virginia.edu'}
|
|
|
|
else:
|
|
|
|
uid = user.uid
|
|
|
|
user_info = {'uid': user.uid, 'first_name': user.first_name, 'last_name': user.last_name,
|
|
|
|
'email_address': user.email_address}
|
|
|
|
|
|
|
|
query_string = self.user_info_to_query_string(user_info, redirect_url)
|
|
|
|
rv = self.app.get("/v1.0/sso_backdoor%s" % query_string, follow_redirects=False)
|
|
|
|
self.assertTrue(rv.status_code == 302)
|
|
|
|
self.assertTrue(str.startswith(rv.location, redirect_url))
|
|
|
|
|
|
|
|
user_model = session.query(UserModel).filter_by(uid=uid).first()
|
|
|
|
self.assertIsNotNone(user_model.display_name)
|
|
|
|
return dict(Authorization='Bearer ' + user_model.encode_auth_token().decode())
|
|
|
|
|
2019-12-18 19:02:17 +00:00
|
|
|
def load_example_data(self):
|
|
|
|
from example_data import ExampleDataLoader
|
2019-12-30 18:15:39 +00:00
|
|
|
ExampleDataLoader.clean_db()
|
2019-12-18 19:02:17 +00:00
|
|
|
ExampleDataLoader().load_all()
|
|
|
|
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
for user_json in self.users:
|
|
|
|
db.session.add(UserModel(**user_json))
|
|
|
|
db.session.commit()
|
|
|
|
for study_json in self.studies:
|
|
|
|
study_model = StudyModel(**study_json)
|
|
|
|
db.session.add(study_model)
|
|
|
|
StudyService._add_all_workflow_specs_to_study(study_model)
|
|
|
|
db.session.commit()
|
|
|
|
db.session.flush()
|
|
|
|
|
2020-01-24 14:35:14 +00:00
|
|
|
specs = session.query(WorkflowSpecModel).all()
|
|
|
|
self.assertIsNotNone(specs)
|
|
|
|
|
|
|
|
for spec in specs:
|
|
|
|
files = session.query(FileModel).filter_by(workflow_spec_id=spec.id).all()
|
|
|
|
self.assertIsNotNone(files)
|
|
|
|
self.assertGreater(len(files), 0)
|
|
|
|
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
for spec in specs:
|
|
|
|
files = session.query(FileModel).filter_by(workflow_spec_id=spec.id).all()
|
|
|
|
self.assertIsNotNone(files)
|
|
|
|
self.assertGreater(len(files), 0)
|
2020-01-24 14:35:14 +00:00
|
|
|
for file in files:
|
|
|
|
file_data = session.query(FileDataModel).filter_by(file_model_id=file.id).all()
|
|
|
|
self.assertIsNotNone(file_data)
|
|
|
|
self.assertGreater(len(file_data), 0)
|
|
|
|
|
2020-02-27 15:30:16 +00:00
|
|
|
@staticmethod
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
def load_test_spec(dir_name, master_spec=False, category_id=None):
|
2020-02-04 21:49:28 +00:00
|
|
|
"""Loads a spec into the database based on a directory in /tests/data"""
|
2020-02-11 16:11:21 +00:00
|
|
|
if session.query(WorkflowSpecModel).filter_by(id=dir_name).count() > 0:
|
|
|
|
return
|
2020-02-04 21:49:28 +00:00
|
|
|
filepath = os.path.join(app.root_path, '..', 'tests', 'data', dir_name, "*")
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
return ExampleDataLoader().create_spec(id=dir_name, name=dir_name, filepath=filepath, master_spec=master_spec,
|
|
|
|
category_id=category_id)
|
2020-02-04 21:49:28 +00:00
|
|
|
|
2020-02-27 15:30:16 +00:00
|
|
|
@staticmethod
|
|
|
|
def protocol_builder_response(file_name):
|
2020-02-20 18:30:04 +00:00
|
|
|
filepath = os.path.join(app.root_path, '..', 'tests', 'data', 'pb_responses', file_name)
|
|
|
|
with open(filepath, 'r') as myfile:
|
|
|
|
data = myfile.read()
|
|
|
|
return data
|
|
|
|
|
2019-12-18 19:02:17 +00:00
|
|
|
def assert_success(self, rv, msg=""):
|
|
|
|
try:
|
|
|
|
data = json.loads(rv.get_data(as_text=True))
|
2020-02-18 21:38:56 +00:00
|
|
|
self.assertTrue(200 <= rv.status_code < 300,
|
2019-12-18 19:02:17 +00:00
|
|
|
"BAD Response: %i. \n %s" %
|
|
|
|
(rv.status_code, json.dumps(data)) + ". " + msg)
|
|
|
|
except:
|
2020-02-18 21:38:56 +00:00
|
|
|
self.assertTrue(200 <= rv.status_code < 300,
|
2019-12-18 19:02:17 +00:00
|
|
|
"BAD Response: %i." % rv.status_code + ". " + msg)
|
2020-02-18 21:38:56 +00:00
|
|
|
|
2020-03-09 19:12:40 +00:00
|
|
|
def assert_failure(self, rv, status_code=0, error_code=""):
|
2020-02-18 21:38:56 +00:00
|
|
|
self.assertFalse(200 <= rv.status_code < 300,
|
|
|
|
"Incorrect Valid Response:" + rv.status + ".")
|
2020-03-09 19:12:40 +00:00
|
|
|
if status_code != 0:
|
|
|
|
self.assertEqual(status_code, rv.status_code)
|
|
|
|
if error_code != "":
|
|
|
|
data = json.loads(rv.get_data(as_text=True))
|
|
|
|
self.assertEqual(error_code, data["code"])
|
2020-02-27 15:30:16 +00:00
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def user_info_to_query_string(user_info, redirect_url):
|
|
|
|
query_string_list = []
|
|
|
|
items = user_info.items()
|
|
|
|
for key, value in items:
|
|
|
|
query_string_list.append('%s=%s' % (key, urllib.parse.quote(value)))
|
|
|
|
|
|
|
|
query_string_list.append('redirect_url=%s' % redirect_url)
|
|
|
|
|
|
|
|
return '?%s' % '&'.join(query_string_list)
|
|
|
|
|
|
|
|
|
2020-03-05 18:25:28 +00:00
|
|
|
def replace_file(self, name, file_path):
|
|
|
|
"""Replaces a stored file with the given name with the contents of the file at the given path."""
|
|
|
|
file_service = FileService()
|
|
|
|
file = open(file_path, "rb")
|
|
|
|
data = file.read()
|
|
|
|
|
|
|
|
file_model = db.session.query(FileModel).filter(FileModel.name == name).first()
|
|
|
|
noise, file_extension = os.path.splitext(file_path)
|
|
|
|
content_type = CONTENT_TYPES[file_extension[1:]]
|
|
|
|
file_service.update_file(file_model, data, content_type)
|
2020-03-19 21:13:30 +00:00
|
|
|
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
def create_workflow(self, workflow_name, study=None, category_id=None):
|
|
|
|
if study == None:
|
|
|
|
study = session.query(StudyModel).first()
|
|
|
|
spec = self.load_test_spec(workflow_name, category_id=category_id)
|
|
|
|
workflow_model = StudyService._create_workflow_model(study, spec)
|
2020-03-30 18:01:57 +00:00
|
|
|
#processor = WorkflowProcessor(workflow_model)
|
|
|
|
#workflow = session.query(WorkflowModel).filter_by(study_id=study.id, workflow_spec_id=workflow_name).first()
|
|
|
|
return workflow_model
|
2020-03-19 21:13:30 +00:00
|
|
|
|
|
|
|
def create_reference_document(self):
|
|
|
|
file_path = os.path.join(app.root_path, '..', 'tests', 'data', 'reference', 'irb_documents.xlsx')
|
|
|
|
file = open(file_path, "rb")
|
|
|
|
FileService.add_reference_file(FileService.IRB_PRO_CATEGORIES_FILE,
|
|
|
|
binary_data=file.read(),
|
|
|
|
content_type=CONTENT_TYPES['xls'])
|
|
|
|
file.close()
|