diff --git a/Pipfile.lock b/Pipfile.lock
index d9c2bfab..ce620efc 100644
--- a/Pipfile.lock
+++ b/Pipfile.lock
@@ -428,10 +428,10 @@
},
"mako": {
"hashes": [
- "sha256:3139c5d64aa5d175dbafb95027057128b5fbd05a40c53999f3905ceb53366d9d",
- "sha256:8e8b53c71c7e59f3de716b6832c4e401d903af574f6962edbbbf6ecc2a5fe6c9"
+ "sha256:8195c8c1400ceb53496064314c6736719c6f25e7479cd24c77be3d9361cddc27",
+ "sha256:93729a258e4ff0747c876bd9e20df1b9758028946e976324ccd2d68245c7b6a9"
],
- "version": "==1.1.2"
+ "version": "==1.1.3"
},
"markupsafe": {
"hashes": [
@@ -489,11 +489,11 @@
},
"marshmallow-sqlalchemy": {
"hashes": [
- "sha256:3247e41e424146340b03a369f2b7c6f0364477ccedc4e2481e84d5f3a8d3c67f",
- "sha256:dbbe51d28bb28e7ee2782e51310477f7a2c5a111a301f6dd8e264e11ab820427"
+ "sha256:03a555b610bb307689b821b64e2416593ec21a85925c8c436c2cd08ebc6bb85e",
+ "sha256:0ef59c8da8da2e18e808e3880158049e9d72f3031c84cc804b6c533a0eb668a9"
],
"index": "pypi",
- "version": "==0.23.0"
+ "version": "==0.23.1"
},
"numpy": {
"hashes": [
@@ -778,7 +778,7 @@
"spiffworkflow": {
"editable": true,
"git": "https://github.com/sartography/SpiffWorkflow.git",
- "ref": "c8d87826d496af825a184bdc3f0a751e603cfe44"
+ "ref": "b8a064a0bb76c705a1be04ee9bb8ac7beee56eb0"
},
"sqlalchemy": {
"hashes": [
@@ -876,11 +876,11 @@
},
"xlsxwriter": {
"hashes": [
- "sha256:488e1988ab16ff3a9cd58c7656d0a58f8abe46ee58b98eecea78c022db28656b",
- "sha256:97ab487b81534415c5313154203f3e8a637d792b1e6a8201e8f7f71da0203c2a"
+ "sha256:828b3285fc95105f5b1946a6a015b31cf388bd5378fdc6604e4d1b7839df2e77",
+ "sha256:82a3b0e73e3913483da23791d1a25e4d2dbb3837d1be4129473526b9a270a5cc"
],
"index": "pypi",
- "version": "==1.2.8"
+ "version": "==1.2.9"
},
"zipp": {
"hashes": [
diff --git a/crc/api.yml b/crc/api.yml
index edc3861b..758169b7 100644
--- a/crc/api.yml
+++ b/crc/api.yml
@@ -173,6 +173,30 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/Study"
+ /study/{study_id}/approvals:
+ parameters:
+ - name: study_id
+ in: path
+ required: true
+ description: The id of the study for which workflows should be returned.
+ schema:
+ type: integer
+ format: int32
+ get:
+ operationId: crc.api.approval.get_approvals_for_study
+ summary: Returns approvals for a single study
+ tags:
+ - Studies
+ - Approvals
+ responses:
+ '200':
+ description: An array of approvals
+ content:
+ application/json:
+ schema:
+ type: array
+ items:
+ $ref: "#/components/schemas/Approval"
/workflow-specification:
get:
operationId: crc.api.workflow.all_specifications
diff --git a/crc/api/approval.py b/crc/api/approval.py
index 739773c1..32238cf0 100644
--- a/crc/api/approval.py
+++ b/crc/api/approval.py
@@ -5,15 +5,24 @@ from crc.models.approval import Approval, ApprovalModel, ApprovalSchema
from crc.services.approval_service import ApprovalService
-def get_approvals(approver_uid = None):
+def get_approvals(approver_uid=None):
if not approver_uid:
db_approvals = ApprovalService.get_all_approvals()
else:
db_approvals = ApprovalService.get_approvals_per_user(approver_uid)
approvals = [Approval.from_model(approval_model) for approval_model in db_approvals]
+
results = ApprovalSchema(many=True).dump(approvals)
return results
+
+def get_approvals_for_study(study_id=None):
+ db_approvals = ApprovalService.get_approvals_for_study(study_id)
+ approvals = [Approval.from_model(approval_model) for approval_model in db_approvals]
+ results = ApprovalSchema(many=True).dump(approvals)
+ return results
+
+
def update_approval(approval_id, body):
if approval_id is None:
raise ApiError('unknown_approval', 'Please provide a valid Approval ID.')
diff --git a/crc/api/common.py b/crc/api/common.py
index 2cd09522..f8673a5b 100644
--- a/crc/api/common.py
+++ b/crc/api/common.py
@@ -1,9 +1,12 @@
+from SpiffWorkflow import WorkflowException
+from SpiffWorkflow.exceptions import WorkflowTaskExecException
+
from crc import ma, app
class ApiError(Exception):
def __init__(self, code, message, status_code=400,
- file_name="", task_id="", task_name="", tag=""):
+ file_name="", task_id="", task_name="", tag="", task_data = {}):
self.status_code = status_code
self.code = code # a short consistent string describing the error.
self.message = message # A detailed message that provides more information.
@@ -11,6 +14,7 @@ class ApiError(Exception):
self.task_name = task_name or "" # OPTIONAL: The name of the task in the BPMN Diagram.
self.file_name = file_name or "" # OPTIONAL: The file that caused the error.
self.tag = tag or "" # OPTIONAL: The XML Tag that caused the issue.
+ self.task_data = task_data or "" # OPTIONAL: A snapshot of data connected to the task when error ocurred.
Exception.__init__(self, self.message)
@classmethod
@@ -20,6 +24,7 @@ class ApiError(Exception):
instance.task_id = task.task_spec.name or ""
instance.task_name = task.task_spec.description or ""
instance.file_name = task.workflow.spec.file or ""
+ instance.task_data = task.data
return instance
@classmethod
@@ -32,10 +37,21 @@ class ApiError(Exception):
instance.file_name = task_spec._wf_spec.file
return instance
+ @classmethod
+ def from_workflow_exception(cls, code, message, exp: WorkflowException):
+ """We catch a lot of workflow exception errors,
+ so consolidating the code, and doing the best things
+ we can with the data we have."""
+ if isinstance(exp, WorkflowTaskExecException):
+ return ApiError.from_task(code, message, exp.task)
+ else:
+ return ApiError.from_task_spec(code, message, exp.sender)
+
class ApiErrorSchema(ma.Schema):
class Meta:
- fields = ("code", "message", "workflow_name", "file_name", "task_name", "task_id")
+ fields = ("code", "message", "workflow_name", "file_name", "task_name", "task_id",
+ "task_data")
@app.errorhandler(ApiError)
diff --git a/crc/api/file.py b/crc/api/file.py
index 07ced388..a537cfe5 100644
--- a/crc/api/file.py
+++ b/crc/api/file.py
@@ -12,8 +12,9 @@ from crc.services.file_service import FileService
def to_file_api(file_model):
- """Converts a FileModel object to something we can return via the aip"""
- return File.from_models(file_model, FileService.get_file_data(file_model.id))
+ """Converts a FileModel object to something we can return via the api"""
+ return File.from_models(file_model, FileService.get_file_data(file_model.id),
+ FileService.get_doc_dictionary())
def get_files(workflow_spec_id=None, workflow_id=None, form_field_key=None):
diff --git a/crc/api/study.py b/crc/api/study.py
index 423f6fe2..e9a251f8 100644
--- a/crc/api/study.py
+++ b/crc/api/study.py
@@ -48,12 +48,10 @@ def update_study(study_id, body):
def get_study(study_id):
- study_service = StudyService()
- study = study_service.get_study(study_id)
+ study = StudyService.get_study(study_id)
if (study is None):
raise ApiError("Study not found", status_code=404)
- schema = StudySchema()
- return schema.dump(study)
+ return StudySchema().dump(study)
def delete_study(study_id):
diff --git a/crc/api/workflow.py b/crc/api/workflow.py
index efcccc26..81252056 100644
--- a/crc/api/workflow.py
+++ b/crc/api/workflow.py
@@ -44,6 +44,13 @@ def validate_workflow_specification(spec_id):
try:
WorkflowService.test_spec(spec_id)
except ApiError as ae:
+ ae.message = "When populating all fields ... " + ae.message
+ errors.append(ae)
+ try:
+ # Run the validation twice, the second time, just populate the required fields.
+ WorkflowService.test_spec(spec_id, required_only=True)
+ except ApiError as ae:
+ ae.message = "When populating only required fields ... " + ae.message
errors.append(ae)
return ApiErrorSchema(many=True).dump(errors)
@@ -112,6 +119,8 @@ def __get_workflow_api_model(processor: WorkflowProcessor, next_task = None):
navigation.append(NavigationItem(**nav_item))
NavigationItemSchema().dump(nav_item)
+
+ spec = session.query(WorkflowSpecModel).filter_by(id=processor.workflow_spec_id).first()
workflow_api = WorkflowApi(
id=processor.get_workflow_id(),
status=processor.get_status(),
@@ -122,7 +131,8 @@ def __get_workflow_api_model(processor: WorkflowProcessor, next_task = None):
is_latest_spec=processor.is_latest_spec,
total_tasks=processor.workflow_model.total_tasks,
completed_tasks=processor.workflow_model.completed_tasks,
- last_updated=processor.workflow_model.last_updated
+ last_updated=processor.workflow_model.last_updated,
+ title=spec.display_name
)
if not next_task: # The Next Task can be requested to be a certain task, useful for parallel tasks.
# This may or may not work, sometimes there is no next task to complete.
@@ -228,4 +238,4 @@ def lookup(workflow_id, field_id, query, limit):
"""
workflow = session.query(WorkflowModel).filter(WorkflowModel.id == workflow_id).first()
lookup_data = LookupService.lookup(workflow, field_id, query, limit)
- return LookupDataSchema(many=True).dump(lookup_data)
\ No newline at end of file
+ return LookupDataSchema(many=True).dump(lookup_data)
diff --git a/crc/models/api_models.py b/crc/models/api_models.py
index 4b279965..b8b535a7 100644
--- a/crc/models/api_models.py
+++ b/crc/models/api_models.py
@@ -31,10 +31,12 @@ class NavigationItem(object):
class Task(object):
+ PROP_OPTIONS_REPEAT = "repeat"
PROP_OPTIONS_FILE = "spreadsheet.name"
PROP_OPTIONS_VALUE_COLUMN = "spreadsheet.value.column"
PROP_OPTIONS_LABEL_COL = "spreadsheet.label.column"
PROP_LDAP_LOOKUP = "ldap.lookup"
+ VALIDATION_REQUIRED = "required"
FIELD_TYPE_AUTO_COMPLETE = "autocomplete"
@@ -117,7 +119,7 @@ class NavigationItemSchema(ma.Schema):
class WorkflowApi(object):
def __init__(self, id, status, next_task, navigation,
- spec_version, is_latest_spec, workflow_spec_id, total_tasks, completed_tasks, last_updated):
+ spec_version, is_latest_spec, workflow_spec_id, total_tasks, completed_tasks, last_updated, title):
self.id = id
self.status = status
self.next_task = next_task # The next task that requires user input.
@@ -128,13 +130,14 @@ class WorkflowApi(object):
self.total_tasks = total_tasks
self.completed_tasks = completed_tasks
self.last_updated = last_updated
+ self.title = title
class WorkflowApiSchema(ma.Schema):
class Meta:
model = WorkflowApi
fields = ["id", "status", "next_task", "navigation",
"workflow_spec_id", "spec_version", "is_latest_spec", "total_tasks", "completed_tasks",
- "last_updated"]
+ "last_updated", "title"]
unknown = INCLUDE
status = EnumField(WorkflowStatus)
@@ -145,7 +148,7 @@ class WorkflowApiSchema(ma.Schema):
def make_workflow(self, data, **kwargs):
keys = ['id', 'status', 'next_task', 'navigation',
'workflow_spec_id', 'spec_version', 'is_latest_spec', "total_tasks", "completed_tasks",
- "last_updated"]
+ "last_updated", "title"]
filtered_fields = {key: data[key] for key in keys}
filtered_fields['next_task'] = TaskSchema().make_task(data['next_task'])
return WorkflowApi(**filtered_fields)
diff --git a/crc/models/approval.py b/crc/models/approval.py
index f7aa2e06..1f7eed38 100644
--- a/crc/models/approval.py
+++ b/crc/models/approval.py
@@ -11,10 +11,11 @@ from crc.models.file import FileDataModel
from crc.models.study import StudyModel
from crc.models.workflow import WorkflowModel
from crc.services.ldap_service import LdapService
+from crc.services.file_service import FileService
class ApprovalStatus(enum.Enum):
- WAITING = "WAITING" # no one has done jack.
+ PENDING = "PENDING" # no one has done jack.
APPROVED = "APPROVED" # approved by the reviewer
DECLINED = "DECLINED" # rejected by the reviewer
CANCELED = "CANCELED" # The document was replaced with a new version and this review is no longer needed.
@@ -67,10 +68,10 @@ class Approval(object):
if model.study:
instance.title = model.study.title
+ principal_investigator_id = model.study.primary_investigator_id
instance.approver = {}
try:
ldap_service = LdapService()
- principal_investigator_id = model.study.primary_investigator_id
user_info = ldap_service.user_info(principal_investigator_id)
except (ApiError, LDAPSocketOpenError) as exception:
user_info = None
@@ -84,11 +85,25 @@ class Approval(object):
instance.approver['title'] = user_info.title
instance.approver['department'] = user_info.department
+ # TODO: Organize it properly, move it to services
+ doc_dictionary = FileService.get_reference_data(FileService.DOCUMENT_LIST, 'code', ['id'])
+
instance.associated_files = []
for approval_file in model.approval_files:
+ try:
+ extra_info = doc_dictionary[approval_file.file_data.file_model.irb_doc_code]
+ except:
+ extra_info = None
associated_file = {}
associated_file['id'] = approval_file.file_data.file_model.id
- associated_file['name'] = approval_file.file_data.file_model.name
+ if extra_info:
+ irb_doc_code = approval_file.file_data.file_model.irb_doc_code
+ associated_file['name'] = '_'.join((irb_doc_code, approval_file.file_data.file_model.name))
+ associated_file['description'] = extra_info['description']
+ else:
+ associated_file['name'] = approval_file.file_data.file_model.name
+ associated_file['description'] = 'No description available'
+ associated_file['name'] = '(' + principal_investigator_id + ')' + associated_file['name']
associated_file['content_type'] = approval_file.file_data.file_model.content_type
instance.associated_files.append(associated_file)
diff --git a/crc/models/file.py b/crc/models/file.py
index 184979e6..9cbfb7fc 100644
--- a/crc/models/file.py
+++ b/crc/models/file.py
@@ -86,7 +86,7 @@ class FileModel(db.Model):
class File(object):
@classmethod
- def from_models(cls, model: FileModel, data_model: FileDataModel):
+ def from_models(cls, model: FileModel, data_model: FileDataModel, doc_dictionary):
instance = cls()
instance.id = model.id
instance.name = model.name
@@ -99,6 +99,15 @@ class File(object):
instance.workflow_id = model.workflow_id
instance.irb_doc_code = model.irb_doc_code
instance.type = model.type
+ if model.irb_doc_code and model.irb_doc_code in doc_dictionary:
+ instance.category = "/".join(filter(None, [doc_dictionary[model.irb_doc_code]['category1'],
+ doc_dictionary[model.irb_doc_code]['category2'],
+ doc_dictionary[model.irb_doc_code]['category3']]))
+ instance.description = doc_dictionary[model.irb_doc_code]['description']
+ instance.download_name = ".".join([instance.category, model.type.value])
+ else:
+ instance.category = ""
+ instance.description = ""
if data_model:
instance.last_modified = data_model.date_created
instance.latest_version = data_model.version
@@ -122,7 +131,8 @@ class FileSchema(ma.Schema):
model = File
fields = ["id", "name", "is_status", "is_reference", "content_type",
"primary", "primary_process_id", "workflow_spec_id", "workflow_id",
- "irb_doc_code", "last_modified", "latest_version", "type"]
+ "irb_doc_code", "last_modified", "latest_version", "type", "categories",
+ "description", "category", "description", "download_name"]
unknown = INCLUDE
type = EnumField(FileType)
diff --git a/crc/models/study.py b/crc/models/study.py
index 38bd2f3b..540ee018 100644
--- a/crc/models/study.py
+++ b/crc/models/study.py
@@ -5,7 +5,7 @@ from sqlalchemy import func
from crc import db, ma
from crc.api.common import ApiErrorSchema
-from crc.models.file import FileModel, SimpleFileSchema
+from crc.models.file import FileModel, SimpleFileSchema, FileSchema
from crc.models.protocol_builder import ProtocolBuilderStatus, ProtocolBuilderStudy
from crc.models.workflow import WorkflowSpecCategoryModel, WorkflowState, WorkflowStatus, WorkflowSpecModel, \
WorkflowModel
@@ -106,7 +106,8 @@ class Study(object):
def __init__(self, title, last_updated, primary_investigator_id, user_uid,
id=None,
protocol_builder_status=None,
- sponsor="", hsr_number="", ind_number="", categories=[], **argsv):
+ sponsor="", hsr_number="", ind_number="", categories=[],
+ files=[], approvals=[], **argsv):
self.id = id
self.user_uid = user_uid
self.title = title
@@ -117,8 +118,9 @@ class Study(object):
self.hsr_number = hsr_number
self.ind_number = ind_number
self.categories = categories
+ self.approvals = approvals
self.warnings = []
- self.files = []
+ self.files = files
@classmethod
def from_model(cls, study_model: StudyModel):
@@ -149,12 +151,13 @@ class StudySchema(ma.Schema):
hsr_number = fields.String(allow_none=True)
sponsor = fields.String(allow_none=True)
ind_number = fields.String(allow_none=True)
- files = fields.List(fields.Nested(SimpleFileSchema), dump_only=True)
+ files = fields.List(fields.Nested(FileSchema), dump_only=True)
+ approvals = fields.List(fields.Nested('ApprovalSchema'), dump_only=True)
class Meta:
model = Study
additional = ["id", "title", "last_updated", "primary_investigator_id", "user_uid",
- "sponsor", "ind_number"]
+ "sponsor", "ind_number", "approvals", "files"]
unknown = INCLUDE
@marshmallow.post_load
diff --git a/crc/services/approval_service.py b/crc/services/approval_service.py
index 8a13e6c2..39886d62 100644
--- a/crc/services/approval_service.py
+++ b/crc/services/approval_service.py
@@ -19,6 +19,12 @@ class ApprovalService(object):
db_approvals = session.query(ApprovalModel).filter_by(approver_uid=approver_uid).all()
return db_approvals
+ @staticmethod
+ def get_approvals_for_study(study_id):
+ """Returns a list of all approvals for the given study"""
+ db_approvals = session.query(ApprovalModel).filter_by(study_id=study_id).all()
+ return db_approvals
+
@staticmethod
def get_all_approvals():
"""Returns a list of all approvlas"""
@@ -78,7 +84,7 @@ class ApprovalService(object):
version = 1
model = ApprovalModel(study_id=study_id, workflow_id=workflow_id,
- approver_uid=approver_uid, status=ApprovalStatus.WAITING.value,
+ approver_uid=approver_uid, status=ApprovalStatus.PENDING.value,
message="", date_created=datetime.now(),
version=version)
approval_files = ApprovalService._create_approval_files(workflow_data_files, model)
diff --git a/crc/services/file_service.py b/crc/services/file_service.py
index beb22831..9142a7c3 100644
--- a/crc/services/file_service.py
+++ b/crc/services/file_service.py
@@ -5,11 +5,13 @@ from datetime import datetime
from uuid import UUID
from xml.etree import ElementTree
+import flask
from SpiffWorkflow.bpmn.parser.ValidationException import ValidationException
from pandas import ExcelFile
from sqlalchemy import desc
+from sqlalchemy.exc import IntegrityError
-from crc import session
+from crc import session, app
from crc.api.common import ApiError
from crc.models.file import FileType, FileDataModel, FileModel, LookupFileModel, LookupDataModel
from crc.models.workflow import WorkflowSpecModel, WorkflowModel, WorkflowSpecDependencyFile
@@ -20,6 +22,14 @@ class FileService(object):
DOCUMENT_LIST = "irb_documents.xlsx"
INVESTIGATOR_LIST = "investigators.xlsx"
+ __doc_dictionary = None
+
+ @staticmethod
+ def get_doc_dictionary():
+ if not FileService.__doc_dictionary:
+ FileService.__doc_dictionary = FileService.get_reference_data(FileService.DOCUMENT_LIST, 'code', ['id'])
+ return FileService.__doc_dictionary
+
@staticmethod
def add_workflow_spec_file(workflow_spec: WorkflowSpecModel,
name, content_type, binary_data, primary=False, is_status=False):
@@ -295,12 +305,17 @@ class FileService(object):
@staticmethod
def delete_file(file_id):
- data_models = session.query(FileDataModel).filter_by(file_model_id=file_id).all()
- for dm in data_models:
- lookup_files = session.query(LookupFileModel).filter_by(file_data_model_id=dm.id).all()
- for lf in lookup_files:
- session.query(LookupDataModel).filter_by(lookup_file_model_id=lf.id).delete()
- session.query(LookupFileModel).filter_by(id=lf.id).delete()
- session.query(FileDataModel).filter_by(file_model_id=file_id).delete()
- session.query(FileModel).filter_by(id=file_id).delete()
- session.commit()
+ try:
+ data_models = session.query(FileDataModel).filter_by(file_model_id=file_id).all()
+ for dm in data_models:
+ lookup_files = session.query(LookupFileModel).filter_by(file_data_model_id=dm.id).all()
+ for lf in lookup_files:
+ session.query(LookupDataModel).filter_by(lookup_file_model_id=lf.id).delete()
+ session.query(LookupFileModel).filter_by(id=lf.id).delete()
+ session.query(FileDataModel).filter_by(file_model_id=file_id).delete()
+ session.query(FileModel).filter_by(id=file_id).delete()
+ session.commit()
+ except IntegrityError as ie:
+ app.logger.error("Failed to delete file: %i, due to %s" % (file_id, str(ie)))
+ raise ApiError('file_integrity_error', "You are attempting to delete a file that is "
+ "required by other records in the system.")
\ No newline at end of file
diff --git a/crc/services/study_service.py b/crc/services/study_service.py
index 98a8d15a..e6ef5291 100644
--- a/crc/services/study_service.py
+++ b/crc/services/study_service.py
@@ -4,11 +4,12 @@ from typing import List
import requests
from SpiffWorkflow import WorkflowException
+from SpiffWorkflow.exceptions import WorkflowTaskExecException
from ldap3.core.exceptions import LDAPSocketOpenError
from crc import db, session, app
from crc.api.common import ApiError
-from crc.models.file import FileModel, FileModelSchema
+from crc.models.file import FileModel, FileModelSchema, File
from crc.models.protocol_builder import ProtocolBuilderStudy, ProtocolBuilderStatus
from crc.models.stats import TaskEventModel
from crc.models.study import StudyModel, Study, Category, WorkflowMetadata
@@ -18,6 +19,8 @@ from crc.services.file_service import FileService
from crc.services.ldap_service import LdapService
from crc.services.protocol_builder import ProtocolBuilderService
from crc.services.workflow_processor import WorkflowProcessor
+from crc.services.approval_service import ApprovalService
+from crc.models.approval import Approval
class StudyService(object):
@@ -53,7 +56,13 @@ class StudyService(object):
study = Study.from_model(study_model)
study.categories = StudyService.get_categories()
workflow_metas = StudyService.__get_workflow_metas(study_id)
- study.files = FileService.get_files_for_study(study.id)
+ approvals = ApprovalService.get_approvals_for_study(study.id)
+ study.approvals = [Approval.from_model(approval_model) for approval_model in approvals]
+
+ files = FileService.get_files_for_study(study.id)
+ files = (File.from_models(model, FileService.get_file_data(model.id),
+ FileService.get_doc_dictionary()) for model in files)
+ study.files = list(files)
# Calling this line repeatedly is very very slow. It creates the
# master spec and runs it.
@@ -174,6 +183,7 @@ class StudyService(object):
return documents
+
@staticmethod
def get_investigators(study_id):
@@ -309,6 +319,8 @@ class StudyService(object):
for workflow_spec in new_specs:
try:
StudyService._create_workflow_model(study_model, workflow_spec)
+ except WorkflowTaskExecException as wtee:
+ errors.append(ApiError.from_task("workflow_execution_exception", str(wtee), wtee.task))
except WorkflowException as we:
errors.append(ApiError.from_task_spec("workflow_execution_exception", str(we), we.sender))
return errors
diff --git a/crc/services/workflow_processor.py b/crc/services/workflow_processor.py
index d032b94a..93590d94 100644
--- a/crc/services/workflow_processor.py
+++ b/crc/services/workflow_processor.py
@@ -299,21 +299,27 @@ class WorkflowProcessor(object):
return WorkflowStatus.waiting
def hard_reset(self):
- """Recreate this workflow, but keep the data from the last completed task and add it back into the first task.
- This may be useful when a workflow specification changes, and users need to review all the
- prior steps, but don't need to reenter all the previous data.
+ """Recreate this workflow, but keep the data from the last completed task and add
+ it back into the first task. This may be useful when a workflow specification changes,
+ and users need to review all the prior steps, but they don't need to reenter all the previous data.
Returns the new version.
"""
+
+ # Create a new workflow based on the latest specs.
self.spec_data_files = FileService.get_spec_data_files(workflow_spec_id=self.workflow_spec_id)
- spec = WorkflowProcessor.get_spec(self.spec_data_files, self.workflow_spec_id)
- # spec = WorkflowProcessor.get_spec(self.workflow_spec_id, version)
- bpmn_workflow = BpmnWorkflow(spec, script_engine=self._script_engine)
- bpmn_workflow.data = self.bpmn_workflow.data
- for task in bpmn_workflow.get_tasks(SpiffTask.READY):
- task.data = self.bpmn_workflow.last_task.data
- bpmn_workflow.do_engine_steps()
- self.bpmn_workflow = bpmn_workflow
+ new_spec = WorkflowProcessor.get_spec(self.spec_data_files, self.workflow_spec_id)
+ new_bpmn_workflow = BpmnWorkflow(new_spec, script_engine=self._script_engine)
+ new_bpmn_workflow.data = self.bpmn_workflow.data
+
+ # Reset the current workflow to the beginning - which we will consider to be the first task after the root
+ # element. This feels a little sketchy, but I think it is safe to assume root will have one child.
+ first_task = self.bpmn_workflow.task_tree.children[0]
+ first_task.reset_token(reset_data=False)
+ for task in new_bpmn_workflow.get_tasks(SpiffTask.READY):
+ task.data = first_task.data
+ new_bpmn_workflow.do_engine_steps()
+ self.bpmn_workflow = new_bpmn_workflow
def get_status(self):
return self.status_of(self.bpmn_workflow)
diff --git a/crc/services/workflow_service.py b/crc/services/workflow_service.py
index c6cb8638..03a23aac 100644
--- a/crc/services/workflow_service.py
+++ b/crc/services/workflow_service.py
@@ -7,7 +7,6 @@ from SpiffWorkflow import Task as SpiffTask, WorkflowException
from SpiffWorkflow.bpmn.specs.ManualTask import ManualTask
from SpiffWorkflow.bpmn.specs.ScriptTask import ScriptTask
from SpiffWorkflow.bpmn.specs.UserTask import UserTask
-from SpiffWorkflow.bpmn.workflow import BpmnWorkflow
from SpiffWorkflow.dmn.specs.BusinessRuleTask import BusinessRuleTask
from SpiffWorkflow.specs import CancelTask, StartTask
from flask import g
@@ -17,7 +16,6 @@ from crc import db, app
from crc.api.common import ApiError
from crc.models.api_models import Task, MultiInstanceType
from crc.models.file import LookupDataModel
-from crc.models.protocol_builder import ProtocolBuilderStatus
from crc.models.stats import TaskEventModel
from crc.models.study import StudyModel
from crc.models.user import UserModel
@@ -39,7 +37,9 @@ class WorkflowService(object):
the workflow Processor should be hidden behind this service.
This will help maintain a structure that avoids circular dependencies.
But for now, this contains tools for converting spiff-workflow models into our
- own API models with additional information and capabilities."""
+ own API models with additional information and capabilities and
+ handles the testing of a workflow specification by completing it with
+ random selections, attempting to mimic a front end as much as possible. """
@staticmethod
def make_test_workflow(spec_id):
@@ -61,12 +61,20 @@ class WorkflowService(object):
for study in db.session.query(StudyModel).filter(StudyModel.user_uid=="test"):
StudyService.delete_study(study.id)
db.session.commit()
- db.session.query(UserModel).filter_by(uid="test").delete()
+
+ user = db.session.query(UserModel).filter_by(uid="test").first()
+ if user:
+ db.session.delete(user)
@staticmethod
- def test_spec(spec_id):
- """Runs a spec through it's paces to see if it results in any errors. Not fool-proof, but a good
- sanity check."""
+ def test_spec(spec_id, required_only=False):
+ """Runs a spec through it's paces to see if it results in any errors.
+ Not fool-proof, but a good sanity check. Returns the final data
+ output form the last task if successful.
+
+ required_only can be set to true, in which case this will run the
+ spec, only completing the required fields, rather than everything.
+ """
workflow_model = WorkflowService.make_test_workflow(spec_id)
@@ -74,8 +82,7 @@ class WorkflowService(object):
processor = WorkflowProcessor(workflow_model, validate_only=True)
except WorkflowException as we:
WorkflowService.delete_test_data()
- raise ApiError.from_task_spec("workflow_execution_exception", str(we),
- we.sender)
+ raise ApiError.from_workflow_exception("workflow_execution_exception", str(we), we)
while not processor.bpmn_workflow.is_completed():
try:
@@ -85,38 +92,55 @@ class WorkflowService(object):
task_api = WorkflowService.spiff_task_to_api_task(
task,
add_docs_and_forms=True) # Assure we try to process the documenation, and raise those errors.
- WorkflowService.populate_form_with_random_data(task, task_api)
+ WorkflowService.populate_form_with_random_data(task, task_api, required_only)
task.complete()
except WorkflowException as we:
WorkflowService.delete_test_data()
- raise ApiError.from_task_spec("workflow_execution_exception", str(we),
- we.sender)
+ raise ApiError.from_workflow_exception("workflow_execution_exception", str(we), we)
+
WorkflowService.delete_test_data()
+ return processor.bpmn_workflow.last_task.data
@staticmethod
- def populate_form_with_random_data(task, task_api):
+ def populate_form_with_random_data(task, task_api, required_only):
"""populates a task with random data - useful for testing a spec."""
if not hasattr(task.task_spec, 'form'): return
form_data = {}
for field in task_api.form.fields:
- if field.type == "enum":
- if len(field.options) > 0:
- random_choice = random.choice(field.options)
- if isinstance(random_choice, dict):
- form_data[field.id] = random.choice(field.options)['id']
- else:
- # fixme: why it is sometimes an EnumFormFieldOption, and other times not?
- form_data[field.id] = random_choice.id ## Assume it is an EnumFormFieldOption
+ if required_only and (not field.has_validation(Task.VALIDATION_REQUIRED) or
+ field.get_validation(Task.VALIDATION_REQUIRED).lower().strip() != "true"):
+ continue # Don't include any fields that aren't specifically marked as required.
+ if field.has_property(Task.PROP_OPTIONS_REPEAT):
+ group = field.get_property(Task.PROP_OPTIONS_REPEAT)
+ if group not in form_data:
+ form_data[group] = [{},{},{}]
+ for i in range(3):
+ form_data[group][i][field.id] = WorkflowService.get_random_data_for_field(field, task)
+ else:
+ form_data[field.id] = WorkflowService.get_random_data_for_field(field, task)
+ if task.data is None:
+ task.data = {}
+ task.data.update(form_data)
+
+ @staticmethod
+ def get_random_data_for_field(field, task):
+ if field.type == "enum":
+ if len(field.options) > 0:
+ random_choice = random.choice(field.options)
+ if isinstance(random_choice, dict):
+ return random.choice(field.options)['id']
else:
- raise ApiError.from_task("invalid_enum", "You specified an enumeration field (%s),"
- " with no options" % field.id,
- task)
- elif field.type == "autocomplete":
- lookup_model = LookupService.get_lookup_model(task, field)
- if field.has_property(Task.PROP_LDAP_LOOKUP):
- form_data[field.id] = {
+ # fixme: why it is sometimes an EnumFormFieldOption, and other times not?
+ return random_choice.id ## Assume it is an EnumFormFieldOption
+ else:
+ raise ApiError.from_task("invalid_enum", "You specified an enumeration field (%s),"
+ " with no options" % field.id, task)
+ elif field.type == "autocomplete":
+ lookup_model = LookupService.get_lookup_model(task, field)
+ if field.has_property(Task.PROP_LDAP_LOOKUP): # All ldap records get the same person.
+ return {
"label": "dhf8r",
"value": "Dan Funk",
"data": {
@@ -126,32 +150,30 @@ class WorkflowService(object):
"email_address": "dhf8r@virginia.edu",
"department": "Depertment of Psychocosmographictology",
"affiliation": "Rousabout",
- "sponsor_type": "Staff"
+ "sponsor_type": "Staff"}
}
- }
- elif lookup_model:
- data = db.session.query(LookupDataModel).filter(
- LookupDataModel.lookup_file_model == lookup_model).limit(10).all()
- options = []
- for d in data:
- options.append({"id": d.value, "name": d.label})
- form_data[field.id] = random.choice(options)
- else:
- raise ApiError.from_task("invalid_autocomplete", "The settings for this auto complete field "
- "are incorrect: %s " % field.id, task)
- elif field.type == "long":
- form_data[field.id] = random.randint(1, 1000)
- elif field.type == 'boolean':
- form_data[field.id] = random.choice([True, False])
- elif field.type == 'file':
- form_data[field.id] = random.randint(1, 100)
- elif field.type == 'files':
- form_data[field.id] = random.randrange(1, 100)
+ elif lookup_model:
+ data = db.session.query(LookupDataModel).filter(
+ LookupDataModel.lookup_file_model == lookup_model).limit(10).all()
+ options = []
+ for d in data:
+ options.append({"id": d.value, "name": d.label})
+ return random.choice(options)
else:
- form_data[field.id] = WorkflowService._random_string()
- if task.data is None:
- task.data = {}
- task.data.update(form_data)
+ raise ApiError.from_task("invalid_autocomplete", "The settings for this auto complete field "
+ "are incorrect: %s " % field.id, task)
+ elif field.type == "long":
+ return random.randint(1, 1000)
+ elif field.type == 'boolean':
+ return random.choice([True, False])
+ elif field.type == 'file':
+ # fixme: produce some something sensible for files.
+ return random.randint(1, 100)
+ # fixme: produce some something sensible for files.
+ elif field.type == 'files':
+ return random.randrange(1, 100)
+ else:
+ return WorkflowService._random_string()
def __get_options(self):
pass
@@ -272,10 +294,11 @@ class WorkflowService(object):
template = Template(raw_doc)
return template.render(**spiff_task.data)
except jinja2.exceptions.TemplateError as ue:
-
- # return "Error processing template. %s" % ue.message
raise ApiError(code="template_error", message="Error processing template for task %s: %s" %
(spiff_task.task_spec.name, str(ue)), status_code=500)
+ except TypeError as te:
+ raise ApiError(code="template_error", message="Error processing template for task %s: %s" %
+ (spiff_task.task_spec.name, str(te)), status_code=500)
# TODO: Catch additional errors and report back.
@staticmethod
diff --git a/crc/static/bpmn/research_rampup/ResearchRampUpPlan.docx b/crc/static/bpmn/research_rampup/ResearchRampUpPlan.docx
index 0c555fdb..2ff0ed80 100644
Binary files a/crc/static/bpmn/research_rampup/ResearchRampUpPlan.docx and b/crc/static/bpmn/research_rampup/ResearchRampUpPlan.docx differ
diff --git a/crc/static/bpmn/research_rampup/exclusive_area_monitors.dmn b/crc/static/bpmn/research_rampup/exclusive_area_monitors.dmn
new file mode 100644
index 00000000..7a2251de
--- /dev/null
+++ b/crc/static/bpmn/research_rampup/exclusive_area_monitors.dmn
@@ -0,0 +1,54 @@
+
+
+
+
+
+
+ 'exclusive' in locals() and len(exclusive)
+
+
+
+
+ sum([1 for x in exclusive if x.get('ExclusiveSpaceAMComputingID',None) == None])
+
+
+
+
+ No exclusive spaces without Area Monitor
+
+ >0
+
+
+ 0
+
+
+ true
+
+
+
+ One or more exclusive space without an Area Monitor
+
+ >0
+
+
+ > 0
+
+
+ false
+
+
+
+ No exclusive spaces entered
+
+ 0
+
+
+
+
+
+ true
+
+
+
+
+
diff --git a/crc/static/bpmn/research_rampup/research_rampup.bpmn b/crc/static/bpmn/research_rampup/research_rampup.bpmn
index d3438d69..3f3b3f92 100644
--- a/crc/static/bpmn/research_rampup/research_rampup.bpmn
+++ b/crc/static/bpmn/research_rampup/research_rampup.bpmn
@@ -34,7 +34,7 @@ Schools are developing a process for the approval of ramp up requests and enforc
1. The Research Ramp-up Plan allows for one request to be entered for a single Principle Investigator. In the form that follows enter the Primary Investigator this request is for and other identifying information. The PI's School and Supervisor will be used as needed for approval routing.
2. Provide all available information in the forms that follow to provide an overview of where the research will resume, who will be involved, what supporting resources will be needed and what steps have been taken to assure compliance with [Research Ramp-up Guidance](https://research.virginia.edu/research-ramp-guidance).
3. After all forms have been completed, you will be presented with the option to create your Research Recovery Plan in Word format. Download the document and review it. If you see any corrections that need to be made, return to the corresponding form and make the correction.
-4. Once the generated Research Recovery Plan is finalize, proceed to the Plan Submission step to submit your plan for approval.
+4. Once the generated Research Recovery Plan is finalized, proceed to the Plan Submission step to submit your plan for approval.
SequenceFlow_05ja25wSequenceFlow_0h50bp3
@@ -47,6 +47,7 @@ Enter the following information for the PI submitting this request
+
@@ -60,6 +61,9 @@ Enter the following information for the PI submitting this request
+
+
+
@@ -68,6 +72,9 @@ Enter the following information for the PI submitting this request
+
+
+
@@ -77,6 +84,9 @@ Enter the following information for the PI submitting this request
+
+
+
@@ -85,6 +95,9 @@ Enter the following information for the PI submitting this request
+
+
+
@@ -93,6 +106,9 @@ Enter the following information for the PI submitting this request
+
+
+
@@ -101,6 +117,9 @@ Enter the following information for the PI submitting this request
+
+
+
@@ -109,21 +128,28 @@ Enter the following information for the PI submitting this request
+
+
+
+
+
+
+
-
+
@@ -133,34 +159,35 @@ Enter the following information for the PI submitting this request
-
- #### People for whom you are requesting access
-Provide information on all researchers you are requesting approval for reentry into the previously entered lab/research and/or office space(s) for conducting research on-Grounds. (If there are personnel already working in the space, include them).
+
+ #### Personnel for whom you are requesting access
+Provide information on all personnel you are requesting approval for reentry into the previously entered lab, workspace and/or office space(s) for conducting research on-Grounds. (If there are personnel already working in the space, include them).
**Note: no undergraduates will be allowed to work on-Grounds during Phase I.**
#### Exclusive Space previously entered
-{% for es in exclusive %}
-{{ es.ExclusiveSpaceRoomID + " " + es.ExclusiveSpaceBuilding.label }}
-{% else %}
-No exclusive space entered
-{% endfor %}
-
+{%+ for es in exclusive %}{{ es.ExclusiveSpaceRoomID + " " + es.ExclusiveSpaceBuilding.label }}{% if loop.last %}{% else %}, {% endif %}{% else %}No exclusive space entered{% endfor %}
#### Shared Space previously entered
-{% for ss in shared %}
-{{ ss.SharedSpaceRoomID + " " + ss.SharedSpaceBuilding.label }}
-{% else %}
-No shared space entered
-{% endfor %}
+{%+ for ss in shared %}{{ ss.SharedSpaceRoomID + " " + ss.SharedSpaceBuilding.label }}{% if loop.last %}{% else %}, {% endif %}{% else %}No shared space entered.{% endfor %}
+
+
+
+
+
+
+
+
+
+
@@ -179,9 +206,7 @@ No shared space entered
-
-
-
+
@@ -199,18 +224,13 @@ No shared space entered
-
-
-
-
-
- Flow_1eiud85
- Flow_1nbjr72
+ Flow_0hc1r8a
+ Flow_1yxaewj
- #### If applicable, provide a list of any [Core Resources](https://research.virginia.edu/research-core-resources) you will utilize space or instruments in and name/email of contact person in the core you have coordinated your plan with. (Core facility managers are responsible for developing a plan for their space)
+ If applicable, provide a list of any [Core Resources](https://research.virginia.edu/research-core-resources) utilization of space and/or instruments along with the name(s) and email(s) of contact person(s) in the core with whom you have coordinated your plan. (Core facility managers are responsible for developing a plan for their space)
@@ -224,16 +244,17 @@ No shared space entered
+
- Flow_15zy1q7
- Flow_12ie6w0
+ Flow_1n69wsr
+ Flow_13pusfu
- #### End of Workflow
-Place instruction here,
+ #### End of Research Ramp-up Plan Workflow
+Thank you for participating,Flow_05w8yd6
@@ -250,11 +271,12 @@ When your Research Ramp-up Plan is complete and ready to submit for review and a
-
+
+
@@ -266,6 +288,9 @@ When your Research Ramp-up Plan is complete and ready to submit for review and a
+
+
+
@@ -277,8 +302,9 @@ When your Research Ramp-up Plan is complete and ready to submit for review and a
-
+
+
@@ -304,6 +330,7 @@ When your Research Ramp-up Plan is complete and ready to submit for review and a
+
@@ -318,40 +345,24 @@ When your Research Ramp-up Plan is complete and ready to submit for review and a
- Flow_19xeq76
- Flow_16342pm
+ Flow_0o4tg9g
+ Flow_1n69wsr
-
- Flow_1v7r1tg
- Flow_19xeq76
- Flow_0qf2y84
- Flow_15zy1q7
- Flow_0ya8hw8
-
-
- Flow_0tk64b6
- Flow_12ie6w0
- Flow_0zz2hbq
- Flow_16342pm
- Flow_1eiud85
-
-
-
-
- #### Space managed exclusively by {{ PIComputingID.label }}
-Submit one entry for each space the PI is the exclusive investigator. If all space is shared with one or more other investigators, Click Save to skip this section and proceed to the Shared Space section.
+
+Submit one entry for each space the PI is the exclusive investigator. If all space is shared with one or more other investigators, click Save to skip this section and proceed to the Shared Space section.
-
+
-
+
+
@@ -363,10 +374,13 @@ Submit one entry for each space the PI is the exclusive investigator. If all sp
+
+
+
-
+
@@ -374,19 +388,21 @@ Submit one entry for each space the PI is the exclusive investigator. If all sp
+
-
+
-
+
+
@@ -410,21 +426,19 @@ Submit one entry for each space the PI is the exclusive investigator. If all sp
-
+
+
- Flow_0qf2y84
- Flow_0tk64b6
+ Flow_0uc4o6c
+ Flow_0o4tg9g
-
-
-
@@ -447,12 +461,9 @@ Submit one entry for each space the PI is the exclusive investigator. If all sp
- Flow_0ya8hw8
- Flow_0zz2hbq
+ Flow_13pusfu
+ Flow_0hc1r8a
-
-
- #### Distancing requirements:
Maintain social distancing by designing space between people to be at least 9 feet during prolonged work which will be accomplished by restricting the number of people in the lab to a density of ~250 sq. ft. /person in lab areas. When moving around, a minimum of 6 feet social distancing is required. Ideally only one person per lab bench and not more than one person can work at the same time in the same bay.
@@ -470,32 +481,23 @@ Maintain social distancing by designing space between people to be at least 9 fe
- Flow_0p2r1bo
- Flow_0tz5c2v
+ Flow_1itd8db
+ Flow_1lo964l
-
-
- Flow_1nbjr72
- Flow_0p2r1bo
- Flow_0mkh1wn
- Flow_1yqkpgu
- Flow_1c6m5wv
-
-
- Describe physical work arrangements for each lab. Show schematic of the lab and space organization to meet the distancing guidelines (see key safety expectations for ramp-up).
+ Describe physical work arrangements for each lab, workspace and/or office space previously entered. Show schematic of the space organization to meet the distancing guidelines (see key safety expectations for ramp-up).
- Show gross dimensions, location of desks, and equipment in blocks (not details) that show available space for work and foot traffic.
- Indicate total square footage for every lab/space that you are requesting adding personnel to in this application. If you would like help obtaining a floor plan for your lab, your department or deans office can help. You can also create a hand drawing/block diagram of your space and the location of objects on a graph paper.
- Upload your physical layout and workspace organization in the form of a jpg image or a pdf file. This can be hand-drawn or actual floor plans.
- Show and/or describe designated work location for each member (during their shift) in the lab when multiple members are present at a time to meet the distancing guidelines.
-- Provide a foot traffic plan (on the schematic) to indicate how people can move around while maintaining distancing requirements. This can be a freeform sketch on your floor plan showing where foot traffic can occur in your lab, and conditions, if any, to ensure distancing at all times. (e.g., direction to walk around a lab bench, rules for using shared equipment located in the lab, certain areas of lab prohibited from access, etc.).
-- Provide your initial weekly laboratory schedule (see excel template) for all members that you are requesting access for, indicating all shifts as necessary. If schedule changes, please submit your revised schedule through the web portal.
+- Provide a foot traffic plan (on the schematic) to indicate how people can move around while maintaining distancing requirements. This can be a freeform sketch on your floor plan showing where foot traffic can occur in your lab, and conditions, if any, to ensure distancing at all times. (e.g., direction to walk around a lab bench, rules for using shared equipment located in the lab, certain areas of lab prohibited from access, etc.).
+
@@ -504,11 +506,10 @@ Maintain social distancing by designing space between people to be at least 9 fe
- Flow_0mkh1wn
- Flow_0zrsh65
+ Flow_1lo964l
+ Flow_0wgdxa6
-
-
+ #### Health Safety Requirements:
Use the EHS [Lab Safety Plan During COVID 19 template](https://www.google.com/url?q=http://ehs.virginia.edu/files/Lab-Safety-Plan-During-COVID-19.docx&source=gmail&ust=1590687968958000&usg=AFQjCNE83uGDFtxGkKaxjuXGhTocu-FDmw) to create and upload a copy of your laboratory policy statement to all members which includes at a minimum the following details:
- Laboratory face covering rules, use of other PPE use as required
@@ -519,13 +520,12 @@ Use the EHS [Lab Safety Plan During COVID 19 template](https://www.google.com/ur
- Where and how to obtain PPE including face covering
-
+
- Flow_1yqkpgu
- Flow_1ox5nv6
+ Flow_0wgdxa6
+ Flow_0judgmp
-
@@ -569,42 +569,39 @@ Use the EHS [Lab Safety Plan During COVID 19 template](https://www.google.com/ur
- Flow_1c6m5wv
- Flow_0qbi47d
+ Flow_0judgmp
+ Flow_11uqavk#### By submitting this request, you understand that every member listed in this form for on Grounds laboratory access will:
-- Complete online COVID awareness & precaution training module (link forthcoming-May 25)
+- Complete [online COVID awareness & precaution training module](https://researchcompliance.web.virginia.edu/training_html5/module_content/154/index.cfm)
- Complete daily health acknowledgement form signed (electronically) –email generated daily to those listed on your plan for access to on Grounds lab/research space
- Fill out daily work attendance log for all lab members following your school process to check-in and out of work each day.Flow_08njvviFlow_0j4rs82
-
-
-
-
-
-
- Flow_0zrsh65
- Flow_0tz5c2v
- Flow_1ox5nv6
- Flow_0qbi47d
- Flow_06873ag
-
-
- Flow_06873ag
+
+ #### Script Task
+
+
+This step is internal to the system and do not require and user interaction
+ Flow_11uqavkFlow_0aqgwvuCompleteTemplate ResearchRampUpPlan.docx RESEARCH_RAMPUP
-
+
-
-
+ #### Approval Process
The Research Ramp-up Plan and associated documents will be reviewed by{{ " " + ApprvlApprvrName1 }}{{ '.' if ApprvlApprvrName2 == 'n/a' else ' and ' + ApprvlApprvrName2 + '.' }} While waiting for approval, be sure that all required training has been completed and supplies secured. When the approval email notification is received, confirming the three questions below will allow you to proceed.
+{%+ set ns = namespace() %}{% set ns.exclusive = 0 %}{% set ns.shared = 0 %}{% for es in exclusive %}{% if es.ExclusiveSpaceAMComputingID is none %}{% set ns.exclusive = ns.exclusive + 1 %}{% endif %}{% endfor %}{% for ss in shared %}{% if ss.SharedSpaceAMComputingID is none %}{% set ns.shared = ns.shared + 1 %}{% endif %}{% endfor %}
+
+
+#### Test
+Missing Exclusive: {{ ns.exclusive }}
+Missing Shared: {{ ns.shared }}
If a rejection notification is received, go back to the first step that needs to be addressed and step through each subsequent form from that point.
@@ -613,28 +610,102 @@ If a rejection notification is received, go back to the first step that needs to
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
- Flow_07ge8uf
- Flow_1ufh44h
+ SequenceFlow_0qc39tw
+ #### Business Rule Task
+
+
+This step is internal to the system and do not require and user interactionFlow_1e2qi9sFlow_08njvvi
@@ -653,254 +724,265 @@ If notification is received that the Research Ramp-up Plan approval process is n
Notify the Area Monitor for
-#### Exclusive Space Area Monitors
-{% for es in exclusive %}
-{{ es.ExclusiveSpaceAMComputingID.data.display_name }}
-{% else %}
-No exclusive space entered
-{% endfor %}
+#### Exclusive Space previously entered
+{%+ for es in exclusive %}{{ es.ExclusiveSpaceRoomID + " " + es.ExclusiveSpaceBuilding.label + " - " }}{% if es.ExclusiveSpaceAMComputingID is none %}No Area Monitor entered{% else %}{{ es.ExclusiveSpaceAMComputingID.label }}{% endif %}{% if loop.last %}{% else %}, {% endif %}{% else %}No exclusive space entered{% endfor %}
-#### Shared Space Area Monitors
-{% for ss in shared %}
-{{ ss.SharedSpaceAMComputingID.data.display_name }}
-{% else %}
-No shared space entered
-{% endfor %}
- Flow_1ufh44h
+
+
+#### Shared Space previously entered
+{%+ for ss in shared %}{{ ss.SharedSpaceRoomID + " " + ss.SharedSpaceBuilding.label }}{% if ss.SharedSpaceAMComputingID is none %}No Area Monitor entered{% else %}{{ ss.SharedSpaceAMComputingID.label }}{% endif %}{% if loop.last %}{% else %}, {% endif %}{% else %}No shared space entered.{% endfor %}
+ SequenceFlow_0qc39twFlow_0cpmvcw
+ #### Script Task
+
+
+This step is internal to the system and do not require and user interactionFlow_0j4rs82Flow_07ge8ufRequestApproval ApprvlApprvr1 ApprvlApprvr2
-
+ #### Script Task
+
+
+This step is internal to the system and do not require and user interactionFlow_16y8glw
- Flow_1v7r1tg
+ Flow_0uc4o6cUpdateStudy title:PIComputingID.label pi:PIComputingID.value
+
+ #### Weekly Personnel Schedule(s)
+Provide initial weekly schedule(s) for the PI and all personnel for whom access has been requested, indicating each space they will be working in and all shifts, if applicable.
+
+##### Personnel and spaces they will work in previously entered
+{%+ for p in personnel %}{{ p.PersonnelComputingID.label + " - " + p.PersonnelSpace }}{% if loop.last %}{% else %}; {% endif %}{% endfor %}
+
+**Note:** If any schedule changes after approval, please re-submit revised schedule(s) here for re-approval.
+
+
+
+
+
+
+
+
+
+
+
+
+ Flow_1yxaewj
+ Flow_1itd8db
+
+
+
+
+
+
+
+
+
+
+
+
+
+ #### Business Rule Task
+
+
+
+
+This step is internal to the system and do not require and user interaction
+ Flow_07ge8uf
+ Flow_0peeyne
+
+
+
+ #### Business Rule Task
+
+
+
+
+This step is internal to the system and do not require and user interaction
+ Flow_0peeyne
+ Flow_0tqna2m
+
+
+
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
+
+
-
-
+
+
-
-
-
-
-
-
+
+
-
-
+
+
-
-
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
-
-
-
-
-
-
-
-
-
-
-
-
+
+
-
-
+
+
-
-
+
+
-
-
+
+
-
+
-
+
-
+
-
-
+
+
-
+
-
+
-
+
-
-
-
-
-
-
-
+
-
+
-
+
-
-
-
-
+
-
+
-
+
-
+
-
+
-
-
-
-
-
+
+
-
+
-
+
-
+
-
+
-
+
-
+
+
+
+
+
+
+
+
+
+
diff --git a/crc/static/bpmn/research_rampup/shared_area_monitors.dmn b/crc/static/bpmn/research_rampup/shared_area_monitors.dmn
new file mode 100644
index 00000000..f0746f42
--- /dev/null
+++ b/crc/static/bpmn/research_rampup/shared_area_monitors.dmn
@@ -0,0 +1,54 @@
+
+
+
+
+
+
+ 'shared' in locals() and len(shared)
+
+
+
+
+ sum([1 for x in exclusive if x.get('SharedSpaceAMComputingID',None) == None])
+
+
+
+
+ No shared spaces without Area Monitor
+
+ >0
+
+
+ 0
+
+
+ true
+
+
+
+ One or more shared space without an Area Monitor
+
+ >0
+
+
+ > 0
+
+
+ false
+
+
+
+ No shared spaces entered
+
+ 0
+
+
+
+
+
+ true
+
+
+
+
+
diff --git a/crc/static/reference/rrt_documents.xlsx b/crc/static/reference/rrt_documents.xlsx
index 4e1663b2..cb09fd0f 100644
Binary files a/crc/static/reference/rrt_documents.xlsx and b/crc/static/reference/rrt_documents.xlsx differ
diff --git a/tests/base_test.py b/tests/base_test.py
index f8ffd1ca..f0418343 100644
--- a/tests/base_test.py
+++ b/tests/base_test.py
@@ -115,15 +115,17 @@ class BaseTest(unittest.TestCase):
self.assertIsNotNone(user_model.display_name)
return dict(Authorization='Bearer ' + user_model.encode_auth_token().decode())
- def load_example_data(self, use_crc_data=False):
+ def load_example_data(self, use_crc_data=False, use_rrt_data=False):
"""use_crc_data will cause this to load the mammoth collection of documents
- we built up developing crc, otherwise it depends on a small setup for
- running tests."""
+ we built up developing crc, use_rrt_data will do the same for hte rrt project,
+ otherwise it depends on a small setup for running tests."""
from example_data import ExampleDataLoader
ExampleDataLoader.clean_db()
- if(use_crc_data):
+ if use_crc_data:
ExampleDataLoader().load_all()
+ elif use_rrt_data:
+ ExampleDataLoader().load_rrt()
else:
ExampleDataLoader().load_test_data()
diff --git a/tests/data/decision_table/decision_table.bpmn b/tests/data/decision_table/decision_table.bpmn
index 796233e5..82bcb385 100644
--- a/tests/data/decision_table/decision_table.bpmn
+++ b/tests/data/decision_table/decision_table.bpmn
@@ -1,5 +1,5 @@
-
+SequenceFlow_1ma1wxb
@@ -8,7 +8,11 @@
-
+
+
+
+
+ SequenceFlow_1ma1wxb
@@ -26,38 +30,37 @@ Based on the information you provided (Ginger left {{num_presents}}, we recommen
## {{message}}
-We hope you both have an excellent day!
-
+We hope you both have an excellent day!
SequenceFlow_0grui6f
-
-
-
-
-
-
+
+
+
-
-
-
+
+
+
+
+
+
+
+
+
+
-
-
-
-
diff --git a/tests/data/exclusive_gateway/exclusive_gateway.bpmn b/tests/data/exclusive_gateway/exclusive_gateway.bpmn
index 1c7e55fe..8467c954 100644
--- a/tests/data/exclusive_gateway/exclusive_gateway.bpmn
+++ b/tests/data/exclusive_gateway/exclusive_gateway.bpmn
@@ -8,7 +8,11 @@
-
+
+
+
+
+ SequenceFlow_1pnq3kg
diff --git a/tests/data/random_fact/random_fact.bpmn b/tests/data/random_fact/random_fact.bpmn
index 81f355c3..628f1bd4 100644
--- a/tests/data/random_fact/random_fact.bpmn
+++ b/tests/data/random_fact/random_fact.bpmn
@@ -1,5 +1,5 @@
-
+SequenceFlow_0c7wlth
@@ -108,6 +108,9 @@ Autoconverted link https://github.com/nodeca/pica (enable linkify to see)
+
+
+
@@ -121,8 +124,7 @@ Autoconverted link https://github.com/nodeca/pica (enable linkify to see)
SequenceFlow_0641sh6
-
-
+
@@ -155,6 +157,18 @@ Your random fact is:
+
+
+
+
+
+
+
+
+
+
+
+
@@ -164,35 +178,23 @@ Your random fact is:
+
+
+
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/tests/data/repeat_form/repeat_form.bpmn b/tests/data/repeat_form/repeat_form.bpmn
new file mode 100644
index 00000000..f0e3f922
--- /dev/null
+++ b/tests/data/repeat_form/repeat_form.bpmn
@@ -0,0 +1,47 @@
+
+
+
+
+ SequenceFlow_0lvudp8
+
+
+
+ SequenceFlow_02vev7n
+
+
+
+
+
+
+
+
+
+
+
+
+ SequenceFlow_0lvudp8
+ SequenceFlow_02vev7n
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/tests/data/required_fields/required_fields.bpmn b/tests/data/required_fields/required_fields.bpmn
new file mode 100644
index 00000000..7612f69b
--- /dev/null
+++ b/tests/data/required_fields/required_fields.bpmn
@@ -0,0 +1,48 @@
+
+
+
+
+ SequenceFlow_0lvudp8
+
+
+
+ SequenceFlow_02vev7n
+
+
+
+
+
+
+
+
+
+
+
+
+
+ SequenceFlow_0lvudp8
+ SequenceFlow_02vev7n
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/tests/test_approvals_api.py b/tests/test_approvals_api.py
index 393831e7..b9b6d226 100644
--- a/tests/test_approvals_api.py
+++ b/tests/test_approvals_api.py
@@ -45,7 +45,7 @@ class TestApprovals(BaseTest):
study=self.study,
workflow=self.workflow,
approver_uid='arc93',
- status=ApprovalStatus.WAITING.value,
+ status=ApprovalStatus.PENDING.value,
version=1
)
session.add(self.approval)
@@ -54,7 +54,7 @@ class TestApprovals(BaseTest):
study=self.study,
workflow=self.workflow,
approver_uid='dhf8r',
- status=ApprovalStatus.WAITING.value,
+ status=ApprovalStatus.PENDING.value,
version=1
)
session.add(self.approval_2)
@@ -98,7 +98,7 @@ class TestApprovals(BaseTest):
data = dict(APPROVAL_PAYLOAD)
data['id'] = approval_id
- self.assertEqual(self.approval.status, ApprovalStatus.WAITING.value)
+ self.assertEqual(self.approval.status, ApprovalStatus.PENDING.value)
rv = self.app.put(f'/v1.0/approval/{approval_id}',
content_type="application/json",
diff --git a/tests/test_study_api.py b/tests/test_study_api.py
index 7282ac10..61e42543 100644
--- a/tests/test_study_api.py
+++ b/tests/test_study_api.py
@@ -1,5 +1,6 @@
import json
from tests.base_test import BaseTest
+
from datetime import datetime, timezone
from unittest.mock import patch
@@ -8,8 +9,9 @@ from crc.models.protocol_builder import ProtocolBuilderStatus, \
ProtocolBuilderStudySchema
from crc.models.stats import TaskEventModel
from crc.models.study import StudyModel, StudySchema
-from crc.models.workflow import WorkflowSpecModel, WorkflowModel, WorkflowSpecCategoryModel
-from crc.services.protocol_builder import ProtocolBuilderService
+from crc.models.workflow import WorkflowSpecModel, WorkflowModel
+from crc.services.file_service import FileService
+from crc.services.workflow_processor import WorkflowProcessor
class TestStudyApi(BaseTest):
@@ -68,6 +70,34 @@ class TestStudyApi(BaseTest):
self.assertEqual(0, workflow["total_tasks"])
self.assertEqual(0, workflow["completed_tasks"])
+ def test_get_study_has_details_about_files(self):
+
+ # Set up the study and attach a file to it.
+ self.load_example_data()
+ self.create_reference_document()
+ workflow = self.create_workflow('file_upload_form')
+ processor = WorkflowProcessor(workflow)
+ task = processor.next_task()
+ irb_code = "UVACompl_PRCAppr" # The first file referenced in pb required docs.
+ FileService.add_workflow_file(workflow_id=workflow.id,
+ name="anything.png", content_type="png",
+ binary_data=b'1234', irb_doc_code=irb_code)
+
+ api_response = self.app.get('/v1.0/study/%i' % workflow.study_id,
+ headers=self.logged_in_headers(), content_type="application/json")
+ self.assert_success(api_response)
+ study = StudySchema().loads(api_response.get_data(as_text=True))
+ self.assertEquals(1, len(study.files))
+ self.assertEquals("UVA Compliance/PRC Approval", study.files[0]["category"])
+ self.assertEquals("Cancer Center's PRC Approval Form", study.files[0]["description"])
+ self.assertEquals("UVA Compliance/PRC Approval.png", study.files[0]["download_name"])
+
+ # TODO: WRITE A TEST FOR STUDY FILES
+
+ def test_get_study_has_details_about_approvals(self):
+ # TODO: WRITE A TEST FOR STUDY APPROVALS
+ pass
+
def test_add_study(self):
self.load_example_data()
study = self.add_test_study()
diff --git a/tests/test_workflow_processor.py b/tests/test_workflow_processor.py
index 36d23755..b3f6c374 100644
--- a/tests/test_workflow_processor.py
+++ b/tests/test_workflow_processor.py
@@ -25,7 +25,7 @@ class TestWorkflowProcessor(BaseTest):
def _populate_form_with_random_data(self, task):
api_task = WorkflowService.spiff_task_to_api_task(task, add_docs_and_forms=True)
- WorkflowService.populate_form_with_random_data(task, api_task)
+ WorkflowService.populate_form_with_random_data(task, api_task, required_only=False)
def get_processor(self, study_model, spec_model):
workflow_model = StudyService._create_workflow_model(study_model, spec_model)
diff --git a/tests/test_workflow_service.py b/tests/test_workflow_service.py
index 281d1756..f509f642 100644
--- a/tests/test_workflow_service.py
+++ b/tests/test_workflow_service.py
@@ -77,5 +77,5 @@ class TestWorkflowService(BaseTest):
processor.do_engine_steps()
task = processor.next_task()
task_api = WorkflowService.spiff_task_to_api_task(task, add_docs_and_forms=True)
- WorkflowService.populate_form_with_random_data(task, task_api)
+ WorkflowService.populate_form_with_random_data(task, task_api, required_only=False)
self.assertTrue(isinstance(task.data["sponsor"], dict))
\ No newline at end of file
diff --git a/tests/test_workflow_spec_validation_api.py b/tests/test_workflow_spec_validation_api.py
index 9e581874..e2f652d9 100644
--- a/tests/test_workflow_spec_validation_api.py
+++ b/tests/test_workflow_spec_validation_api.py
@@ -3,17 +3,16 @@ from unittest.mock import patch
from tests.base_test import BaseTest
-from crc.services.protocol_builder import ProtocolBuilderService
from crc import session, app
from crc.api.common import ApiErrorSchema
from crc.models.protocol_builder import ProtocolBuilderStudySchema
from crc.models.workflow import WorkflowSpecModel
+from crc.services.workflow_service import WorkflowService
class TestWorkflowSpecValidation(BaseTest):
def validate_workflow(self, workflow_name):
- self.load_example_data()
spec_model = self.load_test_spec(workflow_name)
rv = self.app.get('/v1.0/workflow-specification/%s/validate' % spec_model.id, headers=self.logged_in_headers())
self.assert_success(rv)
@@ -22,6 +21,7 @@ class TestWorkflowSpecValidation(BaseTest):
def test_successful_validation_of_test_workflows(self):
app.config['PB_ENABLED'] = False # Assure this is disabled.
+ self.load_example_data()
self.assertEqual(0, len(self.validate_workflow("parallel_tasks")))
self.assertEqual(0, len(self.validate_workflow("decision_table")))
self.assertEqual(0, len(self.validate_workflow("docx")))
@@ -49,6 +49,13 @@ class TestWorkflowSpecValidation(BaseTest):
self.load_example_data(use_crc_data=True)
app.config['PB_ENABLED'] = True
+ self.validate_all_loaded_workflows()
+
+ def test_successful_validation_of_rrt_workflows(self):
+ self.load_example_data(use_rrt_data=True)
+ self.validate_all_loaded_workflows()
+
+ def validate_all_loaded_workflows(self):
workflows = session.query(WorkflowSpecModel).all()
errors = []
for w in workflows:
@@ -59,28 +66,54 @@ class TestWorkflowSpecValidation(BaseTest):
errors.extend(ApiErrorSchema(many=True).load(json_data))
self.assertEqual(0, len(errors), json.dumps(errors))
+
def test_invalid_expression(self):
+ self.load_example_data()
errors = self.validate_workflow("invalid_expression")
- self.assertEqual(1, len(errors))
+ self.assertEqual(2, len(errors))
self.assertEqual("workflow_execution_exception", errors[0]['code'])
self.assertEqual("ExclusiveGateway_003amsm", errors[0]['task_id'])
self.assertEqual("Has Bananas Gateway", errors[0]['task_name'])
self.assertEqual("invalid_expression.bpmn", errors[0]['file_name'])
- self.assertEqual('ExclusiveGateway_003amsm: Error evaluating expression \'this_value_does_not_exist==true\', '
+ self.assertEqual('When populating all fields ... ExclusiveGateway_003amsm: Error evaluating expression \'this_value_does_not_exist==true\', '
'name \'this_value_does_not_exist\' is not defined', errors[0]["message"])
+ self.assertIsNotNone(errors[0]['task_data'])
+ self.assertIn("has_bananas", errors[0]['task_data'])
def test_validation_error(self):
+ self.load_example_data()
errors = self.validate_workflow("invalid_spec")
- self.assertEqual(1, len(errors))
+ self.assertEqual(2, len(errors))
self.assertEqual("workflow_validation_error", errors[0]['code'])
self.assertEqual("StartEvent_1", errors[0]['task_id'])
self.assertEqual("invalid_spec.bpmn", errors[0]['file_name'])
def test_invalid_script(self):
+ self.load_example_data()
errors = self.validate_workflow("invalid_script")
- self.assertEqual(1, len(errors))
+ self.assertEqual(2, len(errors))
self.assertEqual("workflow_execution_exception", errors[0]['code'])
self.assertTrue("NoSuchScript" in errors[0]['message'])
self.assertEqual("Invalid_Script_Task", errors[0]['task_id'])
self.assertEqual("An Invalid Script Reference", errors[0]['task_name'])
self.assertEqual("invalid_script.bpmn", errors[0]['file_name'])
+
+ def test_repeating_sections_correctly_populated(self):
+ self.load_example_data()
+ spec_model = self.load_test_spec('repeat_form')
+ final_data = WorkflowService.test_spec(spec_model.id)
+ self.assertIsNotNone(final_data)
+ self.assertIn('cats', final_data)
+
+ def test_required_fields(self):
+ self.load_example_data()
+ spec_model = self.load_test_spec('required_fields')
+ final_data = WorkflowService.test_spec(spec_model.id)
+ self.assertIsNotNone(final_data)
+ self.assertIn('string_required', final_data)
+ self.assertIn('string_not_required', final_data)
+
+ final_data = WorkflowService.test_spec(spec_model.id, required_only=True)
+ self.assertIsNotNone(final_data)
+ self.assertIn('string_required', final_data)
+ self.assertNotIn('string_not_required', final_data)