2019-12-18 19:02:17 +00:00
|
|
|
import datetime
|
2020-01-23 20:32:53 +00:00
|
|
|
import glob
|
2019-12-27 18:50:03 +00:00
|
|
|
import os
|
2020-02-27 15:30:16 +00:00
|
|
|
import xml.etree.ElementTree as ElementTree
|
2019-12-18 19:02:17 +00:00
|
|
|
|
2020-01-14 16:45:12 +00:00
|
|
|
from crc import app, db, session
|
2020-03-04 18:40:25 +00:00
|
|
|
from crc.models.file import FileType, FileModel, FileDataModel, CONTENT_TYPES
|
2020-01-24 16:52:52 +00:00
|
|
|
from crc.models.study import StudyModel
|
2020-02-27 15:30:16 +00:00
|
|
|
from crc.models.user import UserModel
|
2020-03-27 19:32:07 +00:00
|
|
|
from crc.models.workflow import WorkflowSpecModel, WorkflowSpecCategoryModel
|
2020-03-04 18:40:25 +00:00
|
|
|
from crc.services.file_service import FileService
|
2020-02-10 21:19:23 +00:00
|
|
|
from crc.services.workflow_processor import WorkflowProcessor
|
2020-03-05 19:21:03 +00:00
|
|
|
from crc.models.protocol_builder import ProtocolBuilderStatus
|
2019-12-18 19:02:17 +00:00
|
|
|
|
2020-02-27 15:30:16 +00:00
|
|
|
|
2019-12-18 19:02:17 +00:00
|
|
|
class ExampleDataLoader:
|
2020-03-04 18:40:25 +00:00
|
|
|
@staticmethod
|
|
|
|
def clean_db():
|
|
|
|
session.flush() # Clear out any transactions before deleting it all to avoid spurious errors.
|
|
|
|
for table in reversed(db.metadata.sorted_tables):
|
|
|
|
session.execute(table.delete())
|
|
|
|
session.flush()
|
|
|
|
|
|
|
|
def load_all(self):
|
2020-03-20 11:41:21 +00:00
|
|
|
|
|
|
|
self.load_reference_documents()
|
|
|
|
|
2020-03-27 19:32:07 +00:00
|
|
|
categories = [
|
|
|
|
WorkflowSpecCategoryModel(
|
|
|
|
id=0,
|
|
|
|
name='irb_review',
|
|
|
|
display_name='Pass IRB Review',
|
|
|
|
display_order=0
|
|
|
|
),
|
|
|
|
WorkflowSpecCategoryModel(
|
|
|
|
id=1,
|
|
|
|
name='core_info',
|
|
|
|
display_name='Enter Core Info',
|
|
|
|
display_order=1
|
|
|
|
),
|
|
|
|
WorkflowSpecCategoryModel(
|
|
|
|
id=2,
|
|
|
|
name='approvals',
|
|
|
|
display_name='Obtain Approvals',
|
|
|
|
display_order=2
|
|
|
|
),
|
|
|
|
WorkflowSpecCategoryModel(
|
|
|
|
id=3,
|
|
|
|
name='data_security_plan',
|
|
|
|
display_name='Enter Data Security Plan',
|
|
|
|
display_order=3
|
|
|
|
),
|
|
|
|
WorkflowSpecCategoryModel(
|
|
|
|
id=4,
|
|
|
|
name='finance',
|
|
|
|
display_name='Enter Finance Data',
|
|
|
|
display_order=4
|
|
|
|
),
|
|
|
|
WorkflowSpecCategoryModel(
|
|
|
|
id=5,
|
|
|
|
name='notifications',
|
|
|
|
display_name='View and Send Notifications',
|
|
|
|
display_order=5
|
|
|
|
),
|
|
|
|
]
|
|
|
|
db.session.add_all(categories)
|
|
|
|
db.session.commit()
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
self.create_spec(id="top_level_workflow",
|
|
|
|
name="top_level_workflow",
|
|
|
|
display_name="Top Level Workflow",
|
|
|
|
description="Determines the status of other workflows in a study",
|
|
|
|
category_id=0,
|
|
|
|
master_spec=True
|
|
|
|
)
|
2020-03-25 15:13:52 +00:00
|
|
|
self.create_spec(id="irb_api_details",
|
|
|
|
name="irb_api_details",
|
2020-03-27 19:32:07 +00:00
|
|
|
display_name="IRB API Details",
|
|
|
|
description="TBD",
|
|
|
|
category_id=0)
|
2020-03-25 15:13:52 +00:00
|
|
|
self.create_spec(id="irb_api_personnel",
|
|
|
|
name="irb_api_personnel",
|
2020-03-27 19:32:07 +00:00
|
|
|
display_name="IRB API Personnel",
|
|
|
|
description="TBD",
|
|
|
|
category_id=0)
|
2020-03-26 12:52:53 +00:00
|
|
|
# self.create_spec(id="irb_api_required_docs",
|
|
|
|
# name="irb_api_required_docs",
|
2020-03-27 19:32:07 +00:00
|
|
|
# display_name="IRB API Required Documents",
|
|
|
|
# description="TBD",
|
|
|
|
# category_id=0)
|
|
|
|
self.create_spec(id="core_info",
|
|
|
|
name="core_info",
|
|
|
|
display_name="Core Data",
|
|
|
|
description="TBD",
|
|
|
|
category_id=1)
|
|
|
|
self.create_spec(id="ids",
|
|
|
|
name="ids",
|
|
|
|
display_name="Investigative Drug Services (IDS)",
|
|
|
|
description="TBD",
|
|
|
|
category_id=2)
|
|
|
|
self.create_spec(id="data_security_plan",
|
|
|
|
name="data_security_plan",
|
|
|
|
display_name="Data Security Plan",
|
|
|
|
description="TBD",
|
|
|
|
category_id=3)
|
2020-03-25 15:13:52 +00:00
|
|
|
self.create_spec(id="sponsor_funding_source",
|
|
|
|
name="sponsor_funding_source",
|
2020-03-27 19:32:07 +00:00
|
|
|
display_name="Sponsor Funding Source",
|
|
|
|
description="TBD",
|
|
|
|
category_id=4)
|
|
|
|
self.create_spec(id="finance",
|
|
|
|
name="finance",
|
|
|
|
display_name="Finance",
|
|
|
|
description="TBD",
|
|
|
|
category_id=4)
|
2019-12-18 19:02:17 +00:00
|
|
|
|
2019-12-27 18:50:03 +00:00
|
|
|
|
2020-03-27 19:32:07 +00:00
|
|
|
def create_spec(self, id, name, display_name="", description="", filepath=None, master_spec=False, category_id=None):
|
2020-01-23 20:32:53 +00:00
|
|
|
"""Assumes that a directory exists in static/bpmn with the same name as the given id.
|
|
|
|
further assumes that the [id].bpmn is the primary file for the workflow.
|
2019-12-31 21:32:47 +00:00
|
|
|
returns an array of data models to be added to the database."""
|
2020-03-05 21:55:46 +00:00
|
|
|
global file
|
2020-03-04 18:40:25 +00:00
|
|
|
file_service = FileService()
|
2019-12-31 21:32:47 +00:00
|
|
|
spec = WorkflowSpecModel(id=id,
|
2020-01-28 18:25:54 +00:00
|
|
|
name=name,
|
2019-12-31 21:32:47 +00:00
|
|
|
display_name=display_name,
|
2020-03-15 19:54:13 +00:00
|
|
|
description=description,
|
2020-03-27 19:32:07 +00:00
|
|
|
is_master_spec=master_spec,
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
category_id=category_id)
|
2020-03-04 18:40:25 +00:00
|
|
|
db.session.add(spec)
|
|
|
|
db.session.commit()
|
2020-02-04 21:49:28 +00:00
|
|
|
if not filepath:
|
|
|
|
filepath = os.path.join(app.root_path, 'static', 'bpmn', id, "*")
|
2020-01-23 20:32:53 +00:00
|
|
|
files = glob.glob(filepath)
|
|
|
|
for file_path in files:
|
|
|
|
noise, file_extension = os.path.splitext(file_path)
|
|
|
|
filename = os.path.basename(file_path)
|
2020-03-13 18:57:28 +00:00
|
|
|
|
2020-03-15 19:54:13 +00:00
|
|
|
is_status = filename.lower() == 'status.bpmn'
|
2020-03-13 18:57:28 +00:00
|
|
|
is_primary = filename.lower() == id + '.bpmn'
|
2020-01-23 20:32:53 +00:00
|
|
|
try:
|
2020-03-13 18:57:28 +00:00
|
|
|
file = open(file_path, 'rb')
|
2020-02-10 21:19:23 +00:00
|
|
|
data = file.read()
|
2020-03-04 18:40:25 +00:00
|
|
|
content_type = CONTENT_TYPES[file_extension[1:]]
|
|
|
|
file_service.add_workflow_spec_file(workflow_spec=spec, name=filename, content_type=content_type,
|
2020-03-13 18:57:28 +00:00
|
|
|
binary_data=data, primary=is_primary, is_status=is_status)
|
2020-03-05 16:18:20 +00:00
|
|
|
except IsADirectoryError as de:
|
|
|
|
# Ignore sub directories
|
|
|
|
pass
|
2020-01-23 20:32:53 +00:00
|
|
|
finally:
|
2020-03-05 21:55:46 +00:00
|
|
|
if file:
|
|
|
|
file.close()
|
2020-03-04 18:40:25 +00:00
|
|
|
return spec
|
2020-03-20 11:41:21 +00:00
|
|
|
|
|
|
|
def load_reference_documents(self):
|
|
|
|
file_path = os.path.join(app.root_path, 'static', 'reference', 'irb_documents.xlsx')
|
|
|
|
file = open(file_path, "rb")
|
|
|
|
FileService.add_reference_file(FileService.IRB_PRO_CATEGORIES_FILE,
|
|
|
|
binary_data=file.read(),
|
|
|
|
content_type=CONTENT_TYPES['xls'])
|
|
|
|
file.close()
|