Merge branch 'dev' into 367-Call-Activity

# Conflicts:
#	Pipfile.lock
This commit is contained in:
Kelly McDonald 2021-07-29 09:28:21 -04:00
commit 41ad7935f8
68 changed files with 2489 additions and 418 deletions

View File

@ -42,7 +42,6 @@ werkzeug = "*"
xlrd = "*"
xlsxwriter = "*"
pygithub = "*"
python-levenshtein = "*"
apscheduler = "*"
[requires]

1226
Pipfile.lock generated Normal file

File diff suppressed because it is too large Load Diff

View File

@ -15,6 +15,7 @@ JSON_SORT_KEYS = False # CRITICAL. Do not sort the data when returning values
API_TOKEN = environ.get('API_TOKEN', default = 'af95596f327c9ecc007b60414fc84b61')
NAME = "CR Connect Workflow"
SERVER_NAME = environ.get('SERVER_NAME', default="localhost:5000")
DEFAULT_PORT = "5000"
FLASK_PORT = environ.get('PORT0') or environ.get('FLASK_PORT', default=DEFAULT_PORT)
FRONTEND = environ.get('FRONTEND', default="localhost:4200")
@ -63,6 +64,7 @@ PB_REQUIRED_DOCS_URL = environ.get('PB_REQUIRED_DOCS_URL', default=PB_BASE_URL +
PB_STUDY_DETAILS_URL = environ.get('PB_STUDY_DETAILS_URL', default=PB_BASE_URL + "study?studyid=%i")
PB_SPONSORS_URL = environ.get('PB_SPONSORS_URL', default=PB_BASE_URL + "sponsors?studyid=%i")
PB_IRB_INFO_URL = environ.get('PB_IRB_INFO_URL', default=PB_BASE_URL + "current_irb_info/%i")
PB_CHECK_STUDY_URL = environ.get('PB_CHECK_STUDY_URL', default=PB_BASE_URL + "check_study/%i")
# Ldap Configuration
LDAP_URL = environ.get('LDAP_URL', default="ldap.virginia.edu").strip('/') # No trailing slash or http://

View File

@ -30,6 +30,3 @@ print('TESTING = ', TESTING)
#Use the mock ldap.
LDAP_URL = 'mock'
from config.default import DEFAULT_PORT
SERVER_NAME = f'localhost:{DEFAULT_PORT}'

View File

@ -55,7 +55,7 @@ def process_waiting_tasks():
with app.app_context():
WorkflowService.do_waiting()
scheduler.add_job(process_waiting_tasks,'interval',minutes=5)
scheduler.add_job(process_waiting_tasks,'interval',minutes=1)
scheduler.start()

View File

@ -82,7 +82,7 @@ paths:
schema :
type : integer
get:
operationId: crc.api.file.get_document_directory
operationId: crc.api.document.get_document_directory
summary: Returns a directory of all files for study in a nested structure
tags:
- Document Categories
@ -602,7 +602,7 @@ paths:
description: The unique id of an existing workflow specification to validate.
schema:
type: string
- name: validate_study_id
- name: study_id
in: query
required: false
description: Optional id of study to test under different scenarios

View File

@ -10,7 +10,9 @@ import sentry_sdk
class ApiError(Exception):
def __init__(self, code, message, status_code=400,
file_name="", task_id="", task_name="", tag="", task_data = {}):
file_name="", task_id="", task_name="", tag="", task_data=None, error_type="", line_number=0, offset=0):
if task_data is None:
task_data = {}
self.status_code = status_code
self.code = code # a short consistent string describing the error.
self.message = message # A detailed message that provides more information.
@ -18,8 +20,11 @@ class ApiError(Exception):
self.task_name = task_name or "" # OPTIONAL: The name of the task in the BPMN Diagram.
self.file_name = file_name or "" # OPTIONAL: The file that caused the error.
self.tag = tag or "" # OPTIONAL: The XML Tag that caused the issue.
self.task_data = task_data or "" # OPTIONAL: A snapshot of data connected to the task when error ocurred.
if hasattr(g,'user'):
self.task_data = task_data or "" # OPTIONAL: A snapshot of data connected to the task when error occurred.
self.line_number = line_number
self.offset = offset
self.error_type = error_type
if hasattr(g, 'user'):
user = g.user.uid
else:
user = 'Unknown'
@ -29,12 +34,16 @@ class ApiError(Exception):
Exception.__init__(self, self.message)
@classmethod
def from_task(cls, code, message, task, status_code=400):
def from_task(cls, code, message, task, status_code=400, line_number=0, offset=0, error_type="", error_line=""):
"""Constructs an API Error with details pulled from the current task."""
instance = cls(code, message, status_code=status_code)
instance.task_id = task.task_spec.name or ""
instance.task_name = task.task_spec.description or ""
instance.file_name = task.workflow.spec.file or ""
instance.line_number = line_number
instance.offset = offset
instance.error_type = error_type
instance.error_line = error_line
# Fixme: spiffworkflow is doing something weird where task ends up referenced in the data in some cases.
if "task" in task.data:
@ -61,7 +70,11 @@ class ApiError(Exception):
so consolidating the code, and doing the best things
we can with the data we have."""
if isinstance(exp, WorkflowTaskExecException):
return ApiError.from_task(code, message, exp.task)
return ApiError.from_task(code, message, exp.task, line_number=exp.line_number,
offset=exp.offset,
error_type=exp.exception.__class__.__name__,
error_line=exp.error_line)
else:
return ApiError.from_task_spec(code, message, exp.sender)
@ -69,7 +82,7 @@ class ApiError(Exception):
class ApiErrorSchema(ma.Schema):
class Meta:
fields = ("code", "message", "workflow_name", "file_name", "task_name", "task_id",
"task_data", "task_user", "hint")
"task_data", "task_user", "hint", "line_number", "offset", "error_type", "error_line")
@app.errorhandler(ApiError)

18
crc/api/document.py Normal file
View File

@ -0,0 +1,18 @@
from crc.models.api_models import DocumentDirectorySchema
from crc.models.file import File
from crc.services.document_service import DocumentService
from crc.services.file_service import FileService
from crc.services.lookup_service import LookupService
def get_document_directory(study_id, workflow_id=None):
"""
return a nested list of files arranged according to the category hierarchy
defined in the doc dictionary
"""
file_models = FileService.get_files_for_study(study_id=study_id)
doc_dict = DocumentService.get_dictionary()
files = (File.from_models(model, FileService.get_file_data(model.id), doc_dict) for model in file_models)
directory = DocumentService.get_directory(doc_dict, files, workflow_id)
return DocumentDirectorySchema(many=True).dump(directory)

View File

@ -7,71 +7,15 @@ from flask import send_file
from crc import session
from crc.api.common import ApiError
from crc.api.user import verify_token
from crc.models.api_models import DocumentDirectory, DocumentDirectorySchema
from crc.models.file import FileSchema, FileModel, File, FileModelSchema, FileDataModel, FileType
from crc.models.workflow import WorkflowSpecModel
from crc.services.document_service import DocumentService
from crc.services.file_service import FileService
def ensure_exists(output, categories, expanded):
"""
This is a recursive function, it expects a list of
levels with a file object at the end (kinda like duck,duck,duck,goose)
for each level, it makes sure that level is already in the structure and if it is not
it will add it
function terminates upon getting an entry that is a file object ( or really anything but string)
"""
current_item = categories[0]
found = False
if isinstance(current_item, str):
for item in output:
if item.level == current_item:
found = True
item.filecount = item.filecount + 1
item.expanded = expanded | item.expanded
ensure_exists(item.children, categories[1:], expanded)
if not found:
new_level = DocumentDirectory(level=current_item)
new_level.filecount = 1
new_level.expanded = expanded
output.append(new_level)
ensure_exists(new_level.children, categories[1:], expanded)
else:
new_level = DocumentDirectory(file=current_item)
new_level.expanded = expanded
output.append(new_level)
def get_document_directory(study_id, workflow_id=None):
"""
return a nested list of files arranged according to the category hirearchy
defined in the doc dictionary
"""
output = []
doc_dict = FileService.get_doc_dictionary()
file_models = FileService.get_files_for_study(study_id=study_id)
files = (to_file_api(model) for model in file_models)
for file in files:
if file.irb_doc_code in doc_dict:
doc_code = doc_dict[file.irb_doc_code]
else:
doc_code = {'category1': "Unknown", 'category2': '', 'category3': ''}
if workflow_id:
expand = file.workflow_id == int(workflow_id)
else:
expand = False
print(expand)
categories = [x for x in [doc_code['category1'],doc_code['category2'],doc_code['category3'],file] if x != '']
ensure_exists(output, categories, expanded=expand)
return DocumentDirectorySchema(many=True).dump(output)
def to_file_api(file_model):
"""Converts a FileModel object to something we can return via the api"""
return File.from_models(file_model, FileService.get_file_data(file_model.id),
FileService.get_doc_dictionary())
DocumentService.get_dictionary())
def get_files(workflow_spec_id=None, workflow_id=None, form_field_key=None,study_id=None):

View File

@ -94,7 +94,7 @@ def user_studies():
"""Returns all the studies associated with the current user. """
user = UserService.current_user(allow_admin_impersonate=True)
StudyService.synch_with_protocol_builder_if_enabled(user)
studies = StudyService.get_studies_for_user(user)
studies = StudyService().get_studies_for_user(user)
results = StudySchema(many=True).dump(studies)
return results

View File

@ -103,22 +103,15 @@ def drop_workflow_spec_library(spec_id,library_id):
libraries: WorkflowLibraryModel = session.query(WorkflowLibraryModel).filter_by(workflow_spec_id=spec_id).all()
return WorkflowLibraryModelSchema(many=True).dump(libraries)
def validate_workflow_specification(spec_id, validate_study_id=None, test_until=None):
errors = {}
def validate_workflow_specification(spec_id, study_id=None, test_until=None):
try:
WorkflowService.test_spec(spec_id, validate_study_id, test_until)
WorkflowService.test_spec(spec_id, study_id, test_until)
WorkflowService.test_spec(spec_id, study_id, test_until, required_only=True)
except ApiError as ae:
ae.message = "When populating all fields ... \n" + ae.message
errors['all'] = ae
try:
# Run the validation twice, the second time, just populate the required fields.
WorkflowService.test_spec(spec_id, validate_study_id, test_until, required_only=True)
except ApiError as ae:
ae.message = "When populating only required fields ... \n" + ae.message
errors['required'] = ae
interpreted_errors = ValidationErrorService.interpret_validation_errors(errors)
return ApiErrorSchema(many=True).dump(interpreted_errors)
error = ae
error = ValidationErrorService.interpret_validation_error(error)
return ApiErrorSchema(many=True).dump([error])
return []
def update_workflow_specification(spec_id, body):
if spec_id is None:

View File

@ -1,15 +1,14 @@
import enum
from typing import cast
from marshmallow import INCLUDE, EXCLUDE, fields, Schema
from marshmallow import INCLUDE, EXCLUDE, Schema
from marshmallow_enum import EnumField
from marshmallow_sqlalchemy import SQLAlchemyAutoSchema
from sqlalchemy import func, Index
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.orm import deferred, relationship
from crc.models.data_store import DataStoreModel # this is needed by the relationship
from crc import db, ma
from crc.models.data_store import DataStoreModel
class FileType(enum.Enum):
@ -43,7 +42,7 @@ CONTENT_TYPES = {
"docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
"gif": "image/gif",
"jpg": "image/jpeg",
"md" : "text/plain",
"md": "text/plain",
"pdf": "application/pdf",
"png": "image/png",
"ppt": "application/vnd.ms-powerpoint",
@ -69,7 +68,7 @@ class FileDataModel(db.Model):
date_created = db.Column(db.DateTime(timezone=True), server_default=func.now())
file_model_id = db.Column(db.Integer, db.ForeignKey('file.id'))
file_model = db.relationship("FileModel", foreign_keys=[file_model_id])
user_uid = db.Column(db.String, db.ForeignKey('user.uid'), nullable=True)
class FileModel(db.Model):
@ -79,18 +78,19 @@ class FileModel(db.Model):
type = db.Column(db.Enum(FileType))
is_status = db.Column(db.Boolean)
content_type = db.Column(db.String)
is_reference = db.Column(db.Boolean, nullable=False, default=False) # A global reference file.
primary = db.Column(db.Boolean, nullable=False, default=False) # Is this the primary BPMN in a workflow?
primary_process_id = db.Column(db.String, nullable=True) # An id in the xml of BPMN documents, critical for primary BPMN.
is_reference = db.Column(db.Boolean, nullable=False, default=False) # A global reference file.
primary = db.Column(db.Boolean, nullable=False, default=False) # Is this the primary BPMN in a workflow?
primary_process_id = db.Column(db.String, nullable=True) # An id in the xml of BPMN documents, for primary BPMN.
workflow_spec_id = db.Column(db.String, db.ForeignKey('workflow_spec.id'), nullable=True)
workflow_id = db.Column(db.Integer, db.ForeignKey('workflow.id'), nullable=True)
irb_doc_code = db.Column(db.String, nullable=True) # Code reference to the irb_documents.xlsx reference file.
irb_doc_code = db.Column(db.String, nullable=True) # Code reference to the irb_documents.xlsx reference file.
# A request was made to delete the file, but we can't because there are
# active approvals or running workflows that depend on it. So we archive
# it instead, hide it in the interface.
is_review = db.Column(db.Boolean, default=False, nullable=True)
archived = db.Column(db.Boolean, default=False, nullable=False)
data_stores = relationship("DataStoreModel", cascade="all,delete", backref="file")
data_stores = relationship(DataStoreModel, cascade="all,delete", backref="file")
class File(object):
@classmethod
@ -107,7 +107,7 @@ class File(object):
instance.workflow_id = model.workflow_id
instance.irb_doc_code = model.irb_doc_code
instance.type = model.type
if model.irb_doc_code and model.irb_doc_code in doc_dictionary:
if model.irb_doc_code and model.irb_doc_code in doc_dictionary:
instance.document = doc_dictionary[model.irb_doc_code]
else:
instance.document = {}
@ -115,6 +115,7 @@ class File(object):
instance.last_modified = data_model.date_created
instance.latest_version = data_model.version
instance.size = data_model.size
instance.user_uid = data_model.user_uid
else:
instance.last_modified = None
instance.latest_version = None
@ -142,12 +143,11 @@ class FileSchema(Schema):
fields = ["id", "name", "is_status", "is_reference", "content_type",
"primary", "primary_process_id", "workflow_spec_id", "workflow_id",
"irb_doc_code", "last_modified", "latest_version", "type", "size", "data_store",
"document"]
"document", "user_uid"]
unknown = INCLUDE
type = EnumField(FileType)
class LookupFileModel(db.Model):
"""Gives us a quick way to tell what kind of lookup is set on a form field.
Connected to the file data model, so that if a new version of the same file is
@ -159,7 +159,8 @@ class LookupFileModel(db.Model):
field_id = db.Column(db.String)
is_ldap = db.Column(db.Boolean) # Allows us to run an ldap query instead of a db lookup.
file_data_model_id = db.Column(db.Integer, db.ForeignKey('file_data.id'))
dependencies = db.relationship("LookupDataModel", lazy="select", backref="lookup_file_model", cascade="all, delete, delete-orphan")
dependencies = db.relationship("LookupDataModel", lazy="select", backref="lookup_file_model",
cascade="all, delete, delete-orphan")
class LookupDataModel(db.Model):
@ -169,7 +170,7 @@ class LookupDataModel(db.Model):
value = db.Column(db.String)
label = db.Column(db.String)
# In the future, we might allow adding an additional "search" column if we want to search things not in label.
data = db.Column(db.JSON) # all data for the row is stored in a json structure here, but not searched presently.
data = db.Column(db.JSON) # all data for the row is stored in a json structure here, but not searched presently.
# Assure there is a searchable index on the label column, so we can get fast results back.
# query with:
@ -192,7 +193,7 @@ class LookupDataSchema(SQLAlchemyAutoSchema):
load_instance = True
include_relationships = False
include_fk = False # Includes foreign keys
exclude = ['id'] # Do not include the id field, it should never be used via the API.
exclude = ['id'] # Do not include the id field, it should never be used via the API.
class SimpleFileSchema(ma.Schema):

View File

@ -0,0 +1,30 @@
from crc.scripts.script import Script
from crc.api.common import ApiError
from crc.services.protocol_builder import ProtocolBuilderService
from crc.services.study_service import StudyService
class CheckStudy(Script):
pb = ProtocolBuilderService()
def get_description(self):
return """Returns the Check Study data for a Study"""
def do_task_validate_only(self, task, study_id, workflow_id, *args, **kwargs):
study = StudyService.get_study(study_id)
if study:
return {"DETAIL": "Passed validation.", "STATUS": "No Error"}
else:
raise ApiError.from_task(code='bad_study',
message=f'No study for study_id {study_id}',
task=task)
def do_task(self, task, study_id, workflow_id, *args, **kwargs):
check_study = self.pb.check_study(study_id)
if check_study:
return check_study
else:
raise ApiError.from_task(code='missing_check_study',
message='There was a problem checking information for this study.',
task=task)

View File

@ -2,6 +2,7 @@ from crc import session
from crc.api.common import ApiError
from crc.models.file import FileModel
from crc.scripts.script import Script
from crc.services.document_service import DocumentService
from crc.services.file_service import FileService
@ -9,7 +10,7 @@ class DeleteFile(Script):
@staticmethod
def process_document_deletion(doc_code, workflow_id, task):
if FileService.is_allowed_document(doc_code):
if DocumentService.is_allowed_document(doc_code):
result = session.query(FileModel).filter(
FileModel.workflow_id == workflow_id, FileModel.irb_doc_code == doc_code).all()
if isinstance(result, list) and len(result) > 0 and isinstance(result[0], FileModel):

View File

@ -1,3 +1,6 @@
import sys
import traceback
from crc import app
from crc.api.common import ApiError
from crc.scripts.script import Script
@ -44,16 +47,24 @@ email (subject="My Subject", recipients=["dhf8r@virginia.edu", pi.email], cc='as
if recipients:
message = task.task_spec.documentation
data = task.data
content, content_html = EmailService().get_rendered_content(message, data)
EmailService.add_email(
subject=subject,
sender=app.config['DEFAULT_SENDER'],
recipients=recipients,
content=content,
content_html=content_html,
cc=cc,
study_id=study_id
)
try:
content, content_html = EmailService().get_rendered_content(message, data)
EmailService.add_email(
subject=subject,
sender=app.config['DEFAULT_SENDER'],
recipients=recipients,
content=content,
content_html=content_html,
cc=cc,
study_id=study_id
)
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
print("*** format_exception:")
# exc_type below is ignored on 3.5 and later
print(repr(traceback.format_exception(exc_type, exc_value,
exc_traceback)))
raise e
def get_email_addresses(self, users, study_id):
emails = []

View File

@ -3,6 +3,7 @@ from flask import g
from crc.api.common import ApiError
from crc.services.data_store_service import DataStoreBase
from crc.scripts.script import Script
from crc.services.document_service import DocumentService
from crc.services.file_service import FileService
@ -17,17 +18,22 @@ class FileDataSet(Script, DataStoreBase):
del(kwargs['file_id'])
return True
def validate_kw_args(self,**kwargs):
if kwargs.get('key',None) is None:
def validate_kw_args(self, **kwargs):
if kwargs.get('key', None) is None:
raise ApiError(code="missing_argument",
message=f"The 'file_data_get' script requires a keyword argument of 'key'")
message=f"The 'file_data_get' script requires a keyword argument of 'key'")
if kwargs.get('file_id', None) is None:
raise ApiError(code="missing_argument",
message=f"The 'file_data_get' script requires a keyword argument of 'file_id'")
if kwargs.get('value', None) is None:
raise ApiError(code="missing_argument",
message=f"The 'file_data_get' script requires a keyword argument of 'value'")
if kwargs.get('file_id',None) is None:
raise ApiError(code="missing_argument",
message=f"The 'file_data_get' script requires a keyword argument of 'file_id'")
if kwargs.get('value',None) is None:
raise ApiError(code="missing_argument",
message=f"The 'file_data_get' script requires a keyword argument of 'value'")
if kwargs['key'] == 'irb_code' and not DocumentService.is_allowed_document(kwargs.get('value')):
raise ApiError("invalid_form_field_key",
"When setting an irb_code, the form field id must match a known document in the "
"irb_docunents.xslx reference file. This code is not found in that file '%s'" %
kwargs.get('value'))
return True

View File

@ -9,7 +9,7 @@ class GetStudyAssociates(Script):
def get_description(self):
return """
Returns person assocated with study or an error if one is not associated.
Returns people associated with a study or an error if one is not associated.
example : get_study_associate('sbp3ey') => {'uid':'sbp3ey','role':'Unicorn Herder', 'send_email': False,
'access':True}

View File

@ -48,21 +48,29 @@ supervisor_info = ldap(supervisor_uid) // Sets the supervisor information to l
"UID for the person we want to look up.")
if len(args) < 1:
if UserService.has_user():
uid = UserService.current_user().uid
uid = UserService.current_user().uid
else:
uid = args[0]
user_info = LdapService.user_info(uid)
user_info_dict = {
"display_name": user_info.display_name,
"given_name": user_info.given_name,
"email_address": user_info.email_address,
"telephone_number": user_info.telephone_number,
"title": user_info.title,
"department": user_info.department,
"affiliation": user_info.affiliation,
"sponsor_type": user_info.sponsor_type,
"uid": user_info.uid,
"proper_name": user_info.proper_name()
}
return user_info_dict
try:
user_info = LdapService.user_info(uid)
except ApiError as ae:
app.logger.info(ae)
return {}
except Exception as e:
app.logger.info(e)
return {}
else:
user_info_dict = {
"display_name": user_info.display_name,
"given_name": user_info.given_name,
"email_address": user_info.email_address,
"telephone_number": user_info.telephone_number,
"title": user_info.title,
"department": user_info.department,
"affiliation": user_info.affiliation,
"sponsor_type": user_info.sponsor_type,
"uid": user_info.uid,
"proper_name": user_info.proper_name()
}
return user_info_dict

View File

@ -10,6 +10,7 @@ from crc.models.protocol_builder import ProtocolBuilderInvestigatorType
from crc.models.study import StudyModel, StudySchema
from crc.api import workflow as workflow_api
from crc.scripts.script import Script
from crc.services.document_service import DocumentService
from crc.services.file_service import FileService
from crc.services.protocol_builder import ProtocolBuilderService
from crc.services.study_service import StudyService
@ -168,8 +169,8 @@ Please note this is just a few examples, ALL known document types are returned i
"""For validation only, pretend no results come back from pb"""
self.check_args(args, 2)
# Assure the reference file exists (a bit hacky, but we want to raise this error early, and cleanly.)
FileService.get_reference_file_data(FileService.DOCUMENT_LIST)
FileService.get_reference_file_data(FileService.INVESTIGATOR_LIST)
FileService.get_reference_file_data(DocumentService.DOCUMENT_LIST)
FileService.get_reference_file_data(StudyService.INVESTIGATOR_LIST)
# we call the real do_task so we can
# seed workflow validations with settings from studies in PB Mock
# in order to test multiple paths thru the workflow

View File

@ -0,0 +1,98 @@
from crc.api.common import ApiError
from crc.models.api_models import DocumentDirectory
from crc.services.file_service import FileService
from crc.services.lookup_service import LookupService
class DocumentService(object):
"""The document service provides details about the types of documents that can be uploaded to a workflow.
This metadata about different document types is managed in an Excel spreadsheet, which can be uploaded at any
time to change which documents are accepted, and it allows us to categorize these documents. At a minimum,
the spreadsheet should contain the columns 'code', 'category1', 'category2', 'category3', 'description' and 'id',
code is required for all rows in the table, the other fields are optional. """
DOCUMENT_LIST = "documents.xlsx"
@staticmethod
def is_allowed_document(code):
doc_dict = DocumentService.get_dictionary()
return code in doc_dict
@staticmethod
def verify_doc_dictionary(dd):
"""
We are currently getting structured information from an XLS file, if someone accidentally
changes a header we will have problems later, so we will verify we have the headers we need
here
"""
required_fields = ['category1', 'category2', 'category3', 'description']
# we only need to check the first item, as all of the keys should be the same
key = list(dd.keys())[0]
for field in required_fields:
if field not in dd[key].keys():
raise ApiError(code="Invalid document list %s" % DocumentService.DOCUMENT_LIST,
message='Please check the headers in %s' % DocumentService.DOCUMENT_LIST)
@staticmethod
def get_dictionary():
"""Returns a dictionary of document details keyed on the doc_code."""
file_data = FileService.get_reference_file_data(DocumentService.DOCUMENT_LIST)
lookup_model = LookupService.get_lookup_model_for_file_data(file_data, 'code', 'description')
doc_dict = {}
for lookup_data in lookup_model.dependencies:
doc_dict[lookup_data.value] = lookup_data.data
return doc_dict
@staticmethod
def get_directory(doc_dict, files, workflow_id):
"""Returns a list of directories, hierarchically nested by category, with files at the deepest level.
Empty directories are not include."""
directory = []
if files:
for file in files:
if file.irb_doc_code in doc_dict:
doc_code = doc_dict[file.irb_doc_code]
else:
doc_code = {'category1': "Unknown", 'category2': None, 'category3': None}
if workflow_id:
expand = file.workflow_id == int(workflow_id)
else:
expand = False
print(expand)
categories = [x for x in [doc_code['category1'], doc_code['category2'], doc_code['category3'], file] if x]
DocumentService.ensure_exists(directory, categories, expanded=expand)
return directory
@staticmethod
def ensure_exists(output, categories, expanded):
"""
This is a recursive function, it expects a list of
levels with a file object at the end (kinda like duck,duck,duck,goose)
for each level, it makes sure that level is already in the structure and if it is not
it will add it
function terminates upon getting an entry that is a file object ( or really anything but string)
"""
current_item = categories[0]
found = False
if isinstance(current_item, str):
for item in output:
if item.level == current_item:
found = True
item.filecount = item.filecount + 1
item.expanded = expanded | item.expanded
DocumentService.ensure_exists(item.children, categories[1:], expanded)
if not found:
new_level = DocumentDirectory(level=current_item)
new_level.filecount = 1
new_level.expanded = expanded
output.append(new_level)
DocumentService.ensure_exists(new_level.children, categories[1:], expanded)
else:
print("Found it")
else:
new_level = DocumentDirectory(file=current_item)
new_level.expanded = expanded
output.append(new_level)

View File

@ -68,4 +68,5 @@ class EmailService(object):
@staticmethod
def get_cr_connect_wrapper(email_body):
return render_template('mail_content_template.html', email_body=email_body, base_url=request.base_url)
base_url = app.config['FRONTEND'] # The frontend url
return render_template('mail_content_template.html', email_body=email_body, base_url=base_url)

View File

@ -1,6 +1,5 @@
import re
generic_message = """Workflow validation failed. For more information about the error, see below."""
# known_errors is a dictionary of errors from validation that we want to give users a hint for solving their problem.
# The key is the known error, or part of the known error. It is a string.
@ -14,7 +13,7 @@ generic_message = """Workflow validation failed. For more information about the
# I know this explanation is confusing. If you have ideas for clarification, pull request welcome.
known_errors = {'Error is Non-default exclusive outgoing sequence flow without condition':
known_errors = {'Non-default exclusive outgoing sequence flow without condition':
{'hint': 'Add a Condition Type to your gateway path.'},
'Could not set task title on task .*':
@ -29,37 +28,16 @@ class ValidationErrorService(object):
Validation is run twice,
once where we try to fill in all form fields
and a second time where we only fill in the required fields.
We get a list that contains possible errors from the validation."""
@staticmethod
def interpret_validation_errors(errors):
if len(errors) == 0:
return ()
interpreted_errors = []
for error_type in ['all', 'required']:
if error_type in errors:
hint = generic_message
for known_key in known_errors:
regex = re.compile(known_key)
result = regex.search(errors[error_type].message)
if result is not None:
if 'hint' in known_errors[known_key]:
if 'groups' in known_errors[known_key]:
caught = {}
for group in known_errors[known_key]['groups']:
group_id = known_errors[known_key]['groups'][group]
group_value = result.groups()[group_id]
caught[group] = group_value
hint = known_errors[known_key]['hint'].format(**caught)
else:
hint = known_errors[known_key]['hint']
errors[error_type].hint = hint
interpreted_errors.append(errors[error_type])
return interpreted_errors
def interpret_validation_error(error):
if error is None:
return
for known_key in known_errors:
regex = re.compile(known_key)
result = regex.search(error.message)
if result is not None:
if 'hint' in known_errors[known_key]:
error.hint = known_errors[known_key]['hint']
return error

View File

@ -10,8 +10,6 @@ from lxml import etree
from SpiffWorkflow.bpmn.parser.ValidationException import ValidationException
from lxml.etree import XMLSyntaxError
from pandas import ExcelFile
from pandas._libs.missing import NA
from sqlalchemy import desc
from sqlalchemy.exc import IntegrityError
@ -21,6 +19,7 @@ from crc.models.data_store import DataStoreModel
from crc.models.file import FileType, FileDataModel, FileModel, LookupFileModel, LookupDataModel
from crc.models.workflow import WorkflowSpecModel, WorkflowModel, WorkflowSpecDependencyFile
from crc.services.cache_service import cache
from crc.services.user_service import UserService
import re
@ -38,34 +37,6 @@ def camel_to_snake(camel):
class FileService(object):
"""Provides consistent management and rules for storing, retrieving and processing files."""
DOCUMENT_LIST = "irb_documents.xlsx"
INVESTIGATOR_LIST = "investigators.xlsx"
__doc_dictionary = None
@staticmethod
def verify_doc_dictionary(dd):
"""
We are currently getting structured information from an XLS file, if someone accidentally
changes a header we will have problems later, so we will verify we have the headers we need
here
"""
required_fields = ['category1','category2','category3','description']
# we only need to check the first item, as all of the keys should be the same
key = list(dd.keys())[0]
for field in required_fields:
if field not in dd[key].keys():
raise ApiError(code="Invalid document list %s"%FileService.DOCUMENT_LIST,
message='Please check the headers in %s'%FileService.DOCUMENT_LIST)
@staticmethod
def get_doc_dictionary():
if not FileService.__doc_dictionary:
FileService.__doc_dictionary = FileService.get_reference_data(FileService.DOCUMENT_LIST, 'code', ['id'])
FileService.verify_doc_dictionary(FileService.__doc_dictionary)
return FileService.__doc_dictionary
@staticmethod
def add_workflow_spec_file(workflow_spec: WorkflowSpecModel,
@ -88,10 +59,7 @@ class FileService(object):
return FileService.update_file(file_model, binary_data, content_type)
@staticmethod
def is_allowed_document(code):
doc_dict = FileService.get_doc_dictionary()
return code in doc_dict
@staticmethod
@cache
@ -104,12 +72,6 @@ class FileService(object):
def update_irb_code(file_id, irb_doc_code):
"""Create a new file and associate it with the workflow
Please note that the irb_doc_code MUST be a known file in the irb_documents.xslx reference document."""
if not FileService.is_allowed_document(irb_doc_code):
raise ApiError("invalid_form_field_key",
"When uploading files, the form field id must match a known document in the "
"irb_docunents.xslx reference file. This code is not found in that file '%s'" % irb_doc_code)
""" """
file_model = session.query(FileModel)\
.filter(FileModel.id == file_id).first()
if file_model is None:
@ -137,28 +99,6 @@ class FileService(object):
)
return FileService.update_file(file_model, binary_data, content_type)
@staticmethod
def get_reference_data(reference_file_name, index_column, int_columns=[]):
""" Opens a reference file (assumes that it is xls file) and returns the data as a
dictionary, each row keyed on the given index_column name. If there are columns
that should be represented as integers, pass these as an array of int_columns, lest
you get '1.0' rather than '1'
fixme: This is stupid stupid slow. Place it in the database and just check if it is up to date."""
data_model = FileService.get_reference_file_data(reference_file_name)
xls = ExcelFile(data_model.data, engine='openpyxl')
df = xls.parse(xls.sheet_names[0])
df = df.convert_dtypes()
df = pd.DataFrame(df).dropna(how='all') # Drop null rows
df = pd.DataFrame(df).replace({NA: None}) # replace NA with None.
for c in int_columns:
df[c] = df[c].fillna(0)
df = df.astype({c: 'Int64'})
df = df.fillna('')
df = df.applymap(str)
df = df.set_index(index_column)
return json.loads(df.to_json(orient='index'))
@staticmethod
def get_workflow_files(workflow_id):
"""Returns all the file models associated with a running workflow."""
@ -229,10 +169,14 @@ class FileService(object):
except XMLSyntaxError as xse:
raise ApiError("invalid_xml", "Failed to parse xml: " + str(xse), file_name=file_model.name)
try:
user_uid = UserService.current_user().uid
except ApiError as ae:
user_uid = None
new_file_data_model = FileDataModel(
data=binary_data, file_model_id=file_model.id, file_model=file_model,
version=version, md5_hash=md5_checksum, date_created=datetime.utcnow(),
size=size
size=size, user_uid=user_uid
)
session.add_all([file_model, new_file_data_model])
session.commit()

View File

@ -58,6 +58,7 @@ class LdapService(object):
@staticmethod
def user_info(uva_uid):
uva_uid = uva_uid.lower()
user_info = db.session.query(LdapModel).filter(LdapModel.uid == uva_uid).first()
if not user_info:
app.logger.info("No cache for " + uva_uid)

View File

@ -12,7 +12,7 @@ from sqlalchemy.sql.functions import GenericFunction
from crc import db
from crc.api.common import ApiError
from crc.models.api_models import Task
from crc.models.file import FileDataModel, LookupFileModel, LookupDataModel
from crc.models.file import FileModel, FileDataModel, LookupFileModel, LookupDataModel
from crc.models.workflow import WorkflowModel, WorkflowSpecDependencyFile
from crc.services.file_service import FileService
from crc.services.ldap_service import LdapService
@ -25,11 +25,14 @@ class TSRank(GenericFunction):
class LookupService(object):
"""Provides tools for doing lookups for auto-complete fields.
This can currently take two forms:
"""Provides tools for doing lookups for auto-complete fields, and rapid access to any
uploaded spreadsheets.
This can currently take three forms:
1) Lookup from spreadsheet data associated with a workflow specification.
in which case we store the spreadsheet data in a lookup table with full
text indexing enabled, and run searches against that table.
2) Lookup from spreadsheet data associated with a specific file. This allows us
to get a lookup model for a specific file object, such as a reference file.
2) Lookup from LDAP records. In which case we call out to an external service
to pull back detailed records and return them.
@ -44,6 +47,14 @@ class LookupService(object):
workflow = db.session.query(WorkflowModel).filter(WorkflowModel.id == workflow_id).first()
return LookupService.__get_lookup_model(workflow, spiff_task.task_spec.name, field.id)
@staticmethod
def get_lookup_model_for_file_data(file_data: FileDataModel, value_column, label_column):
lookup_model = db.session.query(LookupFileModel).filter(LookupFileModel.file_data_model_id == file_data.id).first()
if not lookup_model:
logging.warning("!!!! Making a very expensive call to update the lookup model.")
lookup_model = LookupService.build_lookup_table(file_data, value_column, label_column)
return lookup_model
@staticmethod
def __get_lookup_model(workflow, task_spec_id, field_id):
lookup_model = db.session.query(LookupFileModel) \
@ -139,7 +150,8 @@ class LookupService(object):
return lookup_model
@staticmethod
def build_lookup_table(data_model: FileDataModel, value_column, label_column, workflow_spec_id, task_spec_id, field_id):
def build_lookup_table(data_model: FileDataModel, value_column, label_column,
workflow_spec_id=None, task_spec_id=None, field_id=None):
""" In some cases the lookup table can be very large. This method will add all values to the database
in a way that can be searched and returned via an api call - rather than sending the full set of
options along with the form. It will only open the file and process the options if something has
@ -147,8 +159,9 @@ class LookupService(object):
xls = ExcelFile(data_model.data, engine='openpyxl')
df = xls.parse(xls.sheet_names[0]) # Currently we only look at the fist sheet.
df = df.convert_dtypes()
df = df.loc[:, ~df.columns.str.contains('^Unnamed')] # Drop unnamed columns.
df = pd.DataFrame(df).dropna(how='all') # Drop null rows
df = pd.DataFrame(df).replace({NA: None})
df = pd.DataFrame(df).replace({NA: ''})
if value_column not in df:
raise ApiError("invalid_enum",

View File

@ -15,6 +15,7 @@ class ProtocolBuilderService(object):
STUDY_DETAILS_URL = app.config['PB_STUDY_DETAILS_URL']
SPONSORS_URL = app.config['PB_SPONSORS_URL']
IRB_INFO_URL = app.config['PB_IRB_INFO_URL']
CHECK_STUDY_URL = app.config['PB_CHECK_STUDY_URL']
@staticmethod
def is_enabled():
@ -64,6 +65,10 @@ class ProtocolBuilderService(object):
def get_sponsors(study_id) -> {}:
return ProtocolBuilderService.__make_request(study_id, ProtocolBuilderService.SPONSORS_URL)
@staticmethod
def check_study(study_id) -> {}:
return ProtocolBuilderService.__make_request(study_id, ProtocolBuilderService.CHECK_STUDY_URL)
@staticmethod
def __enabled_or_raise():
if not ProtocolBuilderService.is_enabled():

View File

@ -14,24 +14,34 @@ from crc import db, session, app
from crc.api.common import ApiError
from crc.models.data_store import DataStoreModel
from crc.models.email import EmailModel
from crc.models.file import FileDataModel, FileModel, FileModelSchema, File, LookupFileModel, LookupDataModel
from crc.models.file import FileModel, File
from crc.models.ldap import LdapSchema
from crc.models.protocol_builder import ProtocolBuilderStudy, ProtocolBuilderStatus
from crc.models.study import StudyModel, Study, StudyStatus, Category, WorkflowMetadata, StudyEventType, StudyEvent, \
IrbStatus, StudyAssociated
from crc.models.task_event import TaskEventModel, TaskEvent
from crc.models.protocol_builder import ProtocolBuilderStudy
from crc.models.study import StudyModel, Study, StudyStatus, Category, \
WorkflowMetadata, StudyEventType, StudyEvent, IrbStatus, StudyAssociated
from crc.models.task_event import TaskEventModel
from crc.models.workflow import WorkflowSpecCategoryModel, WorkflowModel, WorkflowSpecModel, WorkflowState, \
WorkflowStatus, WorkflowSpecDependencyFile
from crc.services.document_service import DocumentService
from crc.services.file_service import FileService
from crc.services.ldap_service import LdapService
from crc.services.lookup_service import LookupService
from crc.services.protocol_builder import ProtocolBuilderService
from crc.services.workflow_processor import WorkflowProcessor
class StudyService(object):
"""Provides common tools for working with a Study"""
INVESTIGATOR_LIST = "investigators.xlsx" # A reference document containing details about what investigators to show, and when.
@staticmethod
def get_studies_for_user(user):
def _is_valid_study(study_id):
study_info = ProtocolBuilderService().get_study_details(study_id)
if 'REVIEW_TYPE' in study_info.keys() and study_info['REVIEW_TYPE'] in [2, 3, 23, 24]:
return True
return False
def get_studies_for_user(self, user):
"""Returns a list of all studies for the given user."""
associated = session.query(StudyAssociated).filter_by(uid=user.uid,access=True).all()
associated_studies = [x.study_id for x in associated]
@ -40,7 +50,8 @@ class StudyService(object):
studies = []
for study_model in db_studies:
studies.append(StudyService.get_study(study_model.id, study_model,do_status=False))
if self._is_valid_study(study_model.id):
studies.append(StudyService.get_study(study_model.id, study_model,do_status=False))
return studies
@staticmethod
@ -77,7 +88,7 @@ class StudyService(object):
workflow_metas = StudyService._get_workflow_metas(study_id)
files = FileService.get_files_for_study(study.id)
files = (File.from_models(model, FileService.get_file_data(model.id),
FileService.get_doc_dictionary()) for model in files)
DocumentService.get_dictionary()) for model in files)
study.files = list(files)
# Calling this line repeatedly is very very slow. It creates the
# master spec and runs it. Don't execute this for Abandoned studies, as
@ -265,14 +276,14 @@ class StudyService(object):
# Loop through all known document types, get the counts for those files,
# and use pb_docs to mark those as required.
doc_dictionary = FileService.get_reference_data(FileService.DOCUMENT_LIST, 'code', ['id'])
doc_dictionary = DocumentService.get_dictionary()
documents = {}
for code, doc in doc_dictionary.items():
if ProtocolBuilderService.is_enabled():
doc['required'] = False
if ProtocolBuilderService.is_enabled() and doc['id']:
pb_data = next((item for item in pb_docs if int(item['AUXDOCID']) == int(doc['id'])), None)
doc['required'] = False
if pb_data:
doc['required'] = True
@ -282,7 +293,7 @@ class StudyService(object):
# Make a display name out of categories
name_list = []
for cat_key in ['category1', 'category2', 'category3']:
if doc[cat_key] not in ['', 'NULL']:
if doc[cat_key] not in ['', 'NULL', None]:
name_list.append(doc[cat_key])
doc['display_name'] = ' / '.join(name_list)
@ -319,12 +330,22 @@ class StudyService(object):
documents[code] = doc
return Box(documents)
@staticmethod
def get_investigator_dictionary():
"""Returns a dictionary of document details keyed on the doc_code."""
file_data = FileService.get_reference_file_data(StudyService.INVESTIGATOR_LIST)
lookup_model = LookupService.get_lookup_model_for_file_data(file_data, 'code', 'label')
doc_dict = {}
for lookup_data in lookup_model.dependencies:
doc_dict[lookup_data.value] = lookup_data.data
return doc_dict
@staticmethod
def get_investigators(study_id, all=False):
"""Convert array of investigators from protocol builder into a dictionary keyed on the type. """
# Loop through all known investigator types as set in the reference file
inv_dictionary = FileService.get_reference_data(FileService.INVESTIGATOR_LIST, 'code')
inv_dictionary = StudyService.get_investigator_dictionary()
# Get PB required docs
pb_investigators = ProtocolBuilderService.get_investigators(study_id=study_id)

View File

@ -7,7 +7,7 @@ import shlex
from datetime import datetime
from typing import List
from SpiffWorkflow import Task as SpiffTask, WorkflowException
from SpiffWorkflow import Task as SpiffTask, WorkflowException, Task
from SpiffWorkflow.bpmn.BpmnScriptEngine import BpmnScriptEngine
from SpiffWorkflow.bpmn.parser.ValidationException import ValidationException
from SpiffWorkflow.bpmn.serializer.BpmnSerializer import BpmnSerializer
@ -30,6 +30,8 @@ from crc.services.file_service import FileService
from crc import app
from crc.services.user_service import UserService
from difflib import SequenceMatcher
class CustomBpmnScriptEngine(BpmnScriptEngine):
"""This is a custom script processor that can be easily injected into Spiff Workflow.
It will execute python code read in from the bpmn. It will also make any scripts in the
@ -50,24 +52,16 @@ class CustomBpmnScriptEngine(BpmnScriptEngine):
workflow_id = task.workflow.data[WorkflowProcessor.WORKFLOW_ID_KEY]
else:
workflow_id = None
try:
if task.workflow.data[WorkflowProcessor.VALIDATION_PROCESS_KEY]:
augmentMethods = Script.generate_augmented_validate_list(task, study_id, workflow_id)
augment_methods = Script.generate_augmented_validate_list(task, study_id, workflow_id)
else:
augmentMethods = Script.generate_augmented_list(task, study_id, workflow_id)
super().execute(task, script, data, externalMethods=augmentMethods)
except SyntaxError as e:
raise ApiError('syntax_error',
f'Something is wrong with your python script '
f'please correct the following:'
f' {script}, {e.msg}')
except NameError as e:
raise ApiError('name_error',
f'something you are referencing does not exist:'
f' {script}, {e}')
augment_methods = Script.generate_augmented_list(task, study_id, workflow_id)
super().execute(task, script, data, external_methods=augment_methods)
except WorkflowException as e:
raise e
except Exception as e:
raise WorkflowTaskExecException(task, f' {script}, {e}', e)
def evaluate_expression(self, task, expression):
"""
@ -86,7 +80,7 @@ class CustomBpmnScriptEngine(BpmnScriptEngine):
else:
augmentMethods = Script.generate_augmented_list(task, study_id, workflow_id)
exp, valid = self.validateExpression(expression)
return self._eval(exp, externalMethods=augmentMethods, **task.data)
return self._eval(exp, external_methods=augmentMethods, **task.data)
except Exception as e:
raise WorkflowTaskExecException(task,
@ -331,8 +325,8 @@ class WorkflowProcessor(object):
spec = parser.get_spec(process_id)
except ValidationException as ve:
raise ApiError(code="workflow_validation_error",
message="Failed to parse Workflow Specification '%s'. \n" % workflow_spec_id +
"Error is %s. \n" % str(ve),
message="Failed to parse the Workflow Specification. " +
"Error is '%s.'" % str(ve),
file_name=ve.filename,
task_id=ve.id,
tag=ve.tag)
@ -343,6 +337,9 @@ class WorkflowProcessor(object):
if bpmn_workflow.is_completed():
return WorkflowStatus.complete
user_tasks = bpmn_workflow.get_ready_user_tasks()
waiting_tasks = bpmn_workflow.get_tasks(Task.WAITING)
if len(waiting_tasks) > 0:
return WorkflowStatus.waiting
if len(user_tasks) > 0:
return WorkflowStatus.user_input_required
else:
@ -392,10 +389,15 @@ class WorkflowProcessor(object):
"""
# If the whole blessed mess is done, return the end_event task in the tree
# This was failing in the case of a call activity where we have an intermediate EndEvent
# what we really want is the LAST EndEvent
endtasks = []
if self.bpmn_workflow.is_completed():
for task in SpiffTask.Iterator(self.bpmn_workflow.task_tree, SpiffTask.ANY_MASK):
if isinstance(task.task_spec, EndEvent):
return task
endtasks.append(task)
return endtasks[-1]
# If there are ready tasks to complete, return the next ready task, but return the one
# in the active parallel path if possible. In some cases the active parallel path may itself be

View File

@ -30,6 +30,9 @@ from crc.models.study import StudyModel
from crc.models.task_event import TaskEventModel
from crc.models.user import UserModel, UserModelSchema
from crc.models.workflow import WorkflowModel, WorkflowStatus, WorkflowSpecModel
from crc.services.data_store_service import DataStoreBase
from crc.services.document_service import DocumentService
from crc.services.file_service import FileService
from crc.services.lookup_service import LookupService
from crc.services.study_service import StudyService
@ -97,12 +100,18 @@ class WorkflowService(object):
def do_waiting():
records = db.session.query(WorkflowModel).filter(WorkflowModel.status==WorkflowStatus.waiting).all()
for workflow_model in records:
print('processing workflow %d'%workflow_model.id)
processor = WorkflowProcessor(workflow_model)
processor.bpmn_workflow.refresh_waiting_tasks()
processor.bpmn_workflow.do_engine_steps()
processor.save()
try:
app.logger.info('Processing workflow %s' % workflow_model.id)
processor = WorkflowProcessor(workflow_model)
processor.bpmn_workflow.refresh_waiting_tasks()
processor.bpmn_workflow.do_engine_steps()
processor.save()
except Exception as e:
app.logger.error(f"Error running waiting task for workflow #%i (%s) for study #%i. %s" %
(workflow_model.id,
workflow_model.workflow_spec.name,
workflow_model.study_id,
str(e)))
@staticmethod
@timeit
@ -305,8 +314,11 @@ class WorkflowService(object):
field.get_property(Task.FIELD_PROP_FILE_DATA) in data and \
field.id in data:
file_id = data[field.get_property(Task.FIELD_PROP_FILE_DATA)]["id"]
data_store = DataStoreModel(file_id=file_id, key=field.id, value=data[field.id])
db.session.add(data_store)
if field.type == 'enum':
data_args = (field.id, data[field.id]['label'])
else:
data_args = (field.id, data[field.id])
DataStoreBase().set_data_common(task.id, None, None, None, None, None, file_id, *data_args)
@staticmethod
def evaluate_property(property_name, field, task):
@ -434,7 +446,7 @@ class WorkflowService(object):
doc_code = WorkflowService.evaluate_property('doc_code', field, task)
file_model = FileModel(name="test.png",
irb_doc_code = field.id)
doc_dict = FileService.get_doc_dictionary()
doc_dict = DocumentService.get_dictionary()
file = File.from_models(file_model, None, doc_dict)
return FileSchema().dump(file)
elif field.type == 'files':
@ -803,7 +815,7 @@ class WorkflowService(object):
mi_count=task.multi_instance_count, # This is the number of times the task could repeat.
mi_index=task.multi_instance_index, # And the index of the currently repeating task.
process_name=task.process_name,
date=datetime.utcnow(),
# date=datetime.utcnow(), <=== For future reference, NEVER do this. Let the database set the time.
)
db.session.add(task_event)
db.session.commit()

View File

@ -61,7 +61,6 @@ python-box==5.2.0
python-dateutil==2.8.1
python-docx==0.8.10
python-editor==1.0.4
python-levenshtein==0.12.0
pytz==2020.4
pyyaml==5.4
recommonmark==0.6.0

View File

@ -13,10 +13,14 @@ if [ "$UPGRADE_DB" = "true" ]; then
pipenv run flask db upgrade
fi
if [ "$RESET_DB" = "true" ]; then
echo 'Resetting database and seeding it with example CR Connect data...'
pipenv run flask load-example-data
fi
# This is commented to keep humans from clearing the db accidentally.
# If you need to reset the db, you can uncomment,
# then set RESET_DB to true in config, push code, and pray
#if [ "$RESET_DB" = "true" ]; then
# echo 'Resetting database and seeding it with example CR Connect data...'
# pipenv run flask load-example-data
#fi
if [ "$RESET_DB_RRT" = "true" ]; then
echo 'Resetting database and seeding it with example RRT data...'

View File

@ -7,7 +7,9 @@ from crc.models.file import CONTENT_TYPES
from crc.models.ldap import LdapModel
from crc.models.user import UserModel
from crc.models.workflow import WorkflowSpecModel, WorkflowSpecCategoryModel
from crc.services.document_service import DocumentService
from crc.services.file_service import FileService
from crc.services.study_service import StudyService
class ExampleDataLoader:
@ -315,14 +317,14 @@ class ExampleDataLoader:
def load_reference_documents(self):
file_path = os.path.join(app.root_path, 'static', 'reference', 'irb_documents.xlsx')
file = open(file_path, "rb")
FileService.add_reference_file(FileService.DOCUMENT_LIST,
FileService.add_reference_file(DocumentService.DOCUMENT_LIST,
binary_data=file.read(),
content_type=CONTENT_TYPES['xls'])
file.close()
file_path = os.path.join(app.root_path, 'static', 'reference', 'investigators.xlsx')
file = open(file_path, "rb")
FileService.add_reference_file(FileService.INVESTIGATOR_LIST,
FileService.add_reference_file(StudyService.INVESTIGATOR_LIST,
binary_data=file.read(),
content_type=CONTENT_TYPES['xls'])
file.close()

View File

@ -0,0 +1,27 @@
"""add user_uid column to file_data table
Revision ID: 30e017a03948
Revises: bbf064082623
Create Date: 2021-07-06 10:39:04.661704
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '30e017a03948'
down_revision = 'bbf064082623'
branch_labels = None
depends_on = None
def upgrade():
op.add_column('file_data', sa.Column('user_uid', sa.String(), nullable=True))
op.create_foreign_key(None, 'file_data', 'user', ['user_uid'], ['uid'])
def downgrade():
# op.drop_constraint('file_data_user_uid_fkey', 'file_data', type_='foreignkey')
# op.execute("update file_data set user_uid = NULL WHERE user_uid IS NOT NULL")
op.drop_column('file_data', 'user_uid')

View File

@ -0,0 +1,25 @@
"""change irb_documents to documents
Revision ID: c16d3047abbe
Revises: bbf064082623
Create Date: 2021-07-07 13:07:53.966102
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'c16d3047abbe'
down_revision = 'bbf064082623'
branch_labels = None
depends_on = None
def upgrade():
pass
op.execute("update file set name = 'documents.xlsx' where name='irb_documents.xlsx'")
def downgrade():
op.execute("update file set name = 'irb_documents.xlsx' where name='documents.xlsx'")

View File

@ -0,0 +1,24 @@
"""merge 30e017a03948 and c16d3047abbe
Revision ID: dc30b8f6571c
Revises: 30e017a03948, c16d3047abbe
Create Date: 2021-07-12 11:11:47.410647
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'dc30b8f6571c'
down_revision = ('30e017a03948', 'c16d3047abbe')
branch_labels = None
depends_on = None
def upgrade():
pass
def downgrade():
pass

View File

@ -2,6 +2,7 @@
# IMPORTANT - Environment must be loaded before app, models, etc....
import os
os.environ["TESTING"] = "true"
import json
@ -23,6 +24,7 @@ from crc.services.file_service import FileService
from crc.services.study_service import StudyService
from crc.services.user_service import UserService
from crc.services.workflow_service import WorkflowService
from crc.services.document_service import DocumentService
from example_data import ExampleDataLoader
# UNCOMMENT THIS FOR DEBUGGING SQL ALCHEMY QUERIES
@ -138,8 +140,7 @@ class BaseTest(unittest.TestCase):
delete everything that matters in the local database - this is used to
test ground zero copy of workflow specs.
"""
session.execute("delete from workflow; delete from file_data; delete from file; delete from workflow_spec;")
session.commit()
ExampleDataLoader.clean_db()
def load_example_data(self, use_crc_data=False, use_rrt_data=False):
"""use_crc_data will cause this to load the mammoth collection of documents
@ -147,13 +148,6 @@ class BaseTest(unittest.TestCase):
otherwise it depends on a small setup for running tests."""
from example_data import ExampleDataLoader
ExampleDataLoader.clean_db()
if use_crc_data:
ExampleDataLoader().load_all()
elif use_rrt_data:
ExampleDataLoader().load_rrt()
else:
ExampleDataLoader().load_test_data()
# If in production mode, only add the first user.
if app.config['PRODUCTION']:
session.add(UserModel(**self.users[0]))
@ -161,6 +155,13 @@ class BaseTest(unittest.TestCase):
for user_json in self.users:
session.add(UserModel(**user_json))
if use_crc_data:
ExampleDataLoader().load_all()
elif use_rrt_data:
ExampleDataLoader().load_rrt()
else:
ExampleDataLoader().load_test_data()
session.commit()
for study_json in self.studies:
study_model = StudyModel(**study_json)
@ -282,28 +283,6 @@ class BaseTest(unittest.TestCase):
session.commit()
return study
def _create_study_workflow_approvals(self, user_uid, title, primary_investigator_id, approver_uids, statuses,
workflow_spec_name="random_fact"):
study = self.create_study(uid=user_uid, title=title, primary_investigator_id=primary_investigator_id)
workflow = self.create_workflow(workflow_name=workflow_spec_name, study=study)
approvals = []
for i in range(len(approver_uids)):
approvals.append(self.create_approval(
study=study,
workflow=workflow,
approver_uid=approver_uids[i],
status=statuses[i],
version=1
))
full_study = {
'study': study,
'workflow': workflow,
'approvals': approvals,
}
return full_study
def create_workflow(self, workflow_name, display_name=None, study=None, category_id=None, as_user="dhf8r"):
session.flush()
@ -320,30 +299,11 @@ class BaseTest(unittest.TestCase):
def create_reference_document(self):
file_path = os.path.join(app.root_path, 'static', 'reference', 'irb_documents.xlsx')
file = open(file_path, "rb")
FileService.add_reference_file(FileService.DOCUMENT_LIST,
FileService.add_reference_file(DocumentService.DOCUMENT_LIST,
binary_data=file.read(),
content_type=CONTENT_TYPES['xls'])
content_type=CONTENT_TYPES['xlsx'])
file.close()
def create_approval(
self,
study=None,
workflow=None,
approver_uid=None,
status=None,
version=None,
):
study = study or self.create_study()
workflow = workflow or self.create_workflow()
approver_uid = approver_uid or self.test_uid
status = status or ApprovalStatus.PENDING.value
version = version or 1
approval = ApprovalModel(study=study, workflow=workflow, approver_uid=approver_uid, status=status,
version=version)
session.add(approval)
session.commit()
return approval
def get_workflow_common(self, url, user):
rv = self.app.get(url,
headers=self.logged_in_headers(user),

View File

@ -0,0 +1,42 @@
<?xml version="1.0" encoding="UTF-8"?>
<bpmn:definitions xmlns:bpmn="http://www.omg.org/spec/BPMN/20100524/MODEL" xmlns:bpmndi="http://www.omg.org/spec/BPMN/20100524/DI" xmlns:dc="http://www.omg.org/spec/DD/20100524/DC" xmlns:di="http://www.omg.org/spec/DD/20100524/DI" id="Definitions_34b94b6" targetNamespace="http://bpmn.io/schema/bpmn" exporter="Camunda Modeler" exporterVersion="3.0.0-dev">
<bpmn:process id="Call_Activity_Get_Data" name="Call Activity Get Data" isExecutable="true">
<bpmn:startEvent id="StartEvent_1">
<bpmn:outgoing>Flow_07uhaa7</bpmn:outgoing>
</bpmn:startEvent>
<bpmn:sequenceFlow id="Flow_07uhaa7" sourceRef="StartEvent_1" targetRef="Activity_1mb2mnf" />
<bpmn:endEvent id="Event_1rokcus">
<bpmn:documentation># Call Event
&lt;div&gt;&lt;span&gt;Hello {{my_var}}&lt;/span&gt;&lt;/div&gt;</bpmn:documentation>
<bpmn:incoming>Flow_0apfnjq</bpmn:incoming>
</bpmn:endEvent>
<bpmn:sequenceFlow id="Flow_0apfnjq" sourceRef="Activity_1mb2mnf" targetRef="Event_1rokcus" />
<bpmn:scriptTask id="Activity_1mb2mnf" name="Create Data">
<bpmn:incoming>Flow_07uhaa7</bpmn:incoming>
<bpmn:outgoing>Flow_0apfnjq</bpmn:outgoing>
<bpmn:script>my_var = 'World'
my_other_var = 'Mike'</bpmn:script>
</bpmn:scriptTask>
</bpmn:process>
<bpmndi:BPMNDiagram id="BPMNDiagram_1">
<bpmndi:BPMNPlane id="BPMNPlane_1" bpmnElement="Call_Activity_Get_Data">
<bpmndi:BPMNEdge id="Flow_07uhaa7_di" bpmnElement="Flow_07uhaa7">
<di:waypoint x="215" y="177" />
<di:waypoint x="270" y="177" />
</bpmndi:BPMNEdge>
<bpmndi:BPMNEdge id="Flow_0apfnjq_di" bpmnElement="Flow_0apfnjq">
<di:waypoint x="370" y="177" />
<di:waypoint x="432" y="177" />
</bpmndi:BPMNEdge>
<bpmndi:BPMNShape id="_BPMNShape_StartEvent_2" bpmnElement="StartEvent_1">
<dc:Bounds x="179" y="159" width="36" height="36" />
</bpmndi:BPMNShape>
<bpmndi:BPMNShape id="Event_1rokcus_di" bpmnElement="Event_1rokcus">
<dc:Bounds x="432" y="159" width="36" height="36" />
</bpmndi:BPMNShape>
<bpmndi:BPMNShape id="Activity_0esr09m_di" bpmnElement="Activity_1mb2mnf">
<dc:Bounds x="270" y="137" width="100" height="80" />
</bpmndi:BPMNShape>
</bpmndi:BPMNPlane>
</bpmndi:BPMNDiagram>
</bpmn:definitions>

View File

@ -0,0 +1,70 @@
<?xml version="1.0" encoding="UTF-8"?>
<bpmn:definitions xmlns:bpmn="http://www.omg.org/spec/BPMN/20100524/MODEL" xmlns:bpmndi="http://www.omg.org/spec/BPMN/20100524/DI" xmlns:dc="http://www.omg.org/spec/DD/20100524/DC" xmlns:di="http://www.omg.org/spec/DD/20100524/DI" id="Definitions_f07329e" targetNamespace="http://bpmn.io/schema/bpmn" exporter="Camunda Modeler" exporterVersion="3.0.0-dev">
<bpmn:process id="Process_8200379" isExecutable="true">
<bpmn:startEvent id="StartEvent_1">
<bpmn:outgoing>Flow_1g3dpd7</bpmn:outgoing>
</bpmn:startEvent>
<bpmn:sequenceFlow id="Flow_1g3dpd7" sourceRef="StartEvent_1" targetRef="Activity_0wppf2v" />
<bpmn:sequenceFlow id="Flow_0ovgj6c" sourceRef="Activity_0wppf2v" targetRef="Activity_12zat0d" />
<bpmn:callActivity id="Activity_12zat0d" name="Get Data Call Activity" calledElement="Call_Activity_Get_Data">
<bpmn:incoming>Flow_0ovgj6c</bpmn:incoming>
<bpmn:outgoing>Flow_0qdgvah</bpmn:outgoing>
</bpmn:callActivity>
<bpmn:sequenceFlow id="Flow_0qdgvah" sourceRef="Activity_12zat0d" targetRef="Activity_1ta6769" />
<bpmn:endEvent id="Event_18dla68">
<bpmn:documentation># Main Workflow
Hello {{my_other_var}}
</bpmn:documentation>
<bpmn:incoming>Flow_0izaz4f</bpmn:incoming>
</bpmn:endEvent>
<bpmn:sequenceFlow id="Flow_0izaz4f" sourceRef="Activity_1ta6769" targetRef="Event_18dla68" />
<bpmn:scriptTask id="Activity_1ta6769" name="Print Data">
<bpmn:incoming>Flow_0qdgvah</bpmn:incoming>
<bpmn:outgoing>Flow_0izaz4f</bpmn:outgoing>
<bpmn:script>print(pre_var)
print(my_var)
print(my_other_var)</bpmn:script>
</bpmn:scriptTask>
<bpmn:scriptTask id="Activity_0wppf2v" name="Pre Data">
<bpmn:incoming>Flow_1g3dpd7</bpmn:incoming>
<bpmn:outgoing>Flow_0ovgj6c</bpmn:outgoing>
<bpmn:script>pre_var = 'some string'</bpmn:script>
</bpmn:scriptTask>
</bpmn:process>
<bpmndi:BPMNDiagram id="BPMNDiagram_1">
<bpmndi:BPMNPlane id="BPMNPlane_1" bpmnElement="Process_8200379">
<bpmndi:BPMNEdge id="Flow_0izaz4f_di" bpmnElement="Flow_0izaz4f">
<di:waypoint x="690" y="177" />
<di:waypoint x="752" y="177" />
</bpmndi:BPMNEdge>
<bpmndi:BPMNEdge id="Flow_0qdgvah_di" bpmnElement="Flow_0qdgvah">
<di:waypoint x="530" y="177" />
<di:waypoint x="590" y="177" />
</bpmndi:BPMNEdge>
<bpmndi:BPMNEdge id="Flow_0ovgj6c_di" bpmnElement="Flow_0ovgj6c">
<di:waypoint x="370" y="177" />
<di:waypoint x="430" y="177" />
</bpmndi:BPMNEdge>
<bpmndi:BPMNEdge id="Flow_1g3dpd7_di" bpmnElement="Flow_1g3dpd7">
<di:waypoint x="215" y="177" />
<di:waypoint x="270" y="177" />
</bpmndi:BPMNEdge>
<bpmndi:BPMNShape id="_BPMNShape_StartEvent_2" bpmnElement="StartEvent_1">
<dc:Bounds x="179" y="159" width="36" height="36" />
</bpmndi:BPMNShape>
<bpmndi:BPMNShape id="Activity_0mcej1g_di" bpmnElement="Activity_12zat0d">
<dc:Bounds x="430" y="137" width="100" height="80" />
</bpmndi:BPMNShape>
<bpmndi:BPMNShape id="Event_18dla68_di" bpmnElement="Event_18dla68">
<dc:Bounds x="752" y="159" width="36" height="36" />
</bpmndi:BPMNShape>
<bpmndi:BPMNShape id="Activity_1v8hse1_di" bpmnElement="Activity_1ta6769">
<dc:Bounds x="590" y="137" width="100" height="80" />
</bpmndi:BPMNShape>
<bpmndi:BPMNShape id="Activity_1mhwjko_di" bpmnElement="Activity_0wppf2v">
<dc:Bounds x="270" y="137" width="100" height="80" />
</bpmndi:BPMNShape>
</bpmndi:BPMNPlane>
</bpmndi:BPMNDiagram>
</bpmn:definitions>

View File

@ -0,0 +1,53 @@
<?xml version="1.0" encoding="UTF-8"?>
<bpmn:definitions xmlns:bpmn="http://www.omg.org/spec/BPMN/20100524/MODEL" xmlns:bpmndi="http://www.omg.org/spec/BPMN/20100524/DI" xmlns:dc="http://www.omg.org/spec/DD/20100524/DC" xmlns:di="http://www.omg.org/spec/DD/20100524/DI" id="Definitions_3fd9241" targetNamespace="http://bpmn.io/schema/bpmn" exporter="Camunda Modeler" exporterVersion="3.0.0-dev">
<bpmn:process id="Process_9d7b2c2" isExecutable="true">
<bpmn:startEvent id="StartEvent_1">
<bpmn:outgoing>Flow_17nzcku</bpmn:outgoing>
</bpmn:startEvent>
<bpmn:sequenceFlow id="Flow_17nzcku" sourceRef="StartEvent_1" targetRef="Activity_GetCheckStudy" />
<bpmn:scriptTask id="Activity_GetCheckStudy" name="Get Check Study">
<bpmn:incoming>Flow_17nzcku</bpmn:incoming>
<bpmn:outgoing>Flow_0oozrfg</bpmn:outgoing>
<bpmn:script>check_study = check_study()</bpmn:script>
</bpmn:scriptTask>
<bpmn:sequenceFlow id="Flow_0oozrfg" sourceRef="Activity_GetCheckStudy" targetRef="Activity_DisplayCheckStudy" />
<bpmn:manualTask id="Activity_DisplayCheckStudy" name="Display Check Study">
<bpmn:documentation># Check Study
&lt;div&gt;&lt;span&gt;{{check_study}}&lt;/span&gt;&lt;/div&gt;</bpmn:documentation>
<bpmn:incoming>Flow_0oozrfg</bpmn:incoming>
<bpmn:outgoing>Flow_10sc31i</bpmn:outgoing>
</bpmn:manualTask>
<bpmn:endEvent id="Event_0embsc7">
<bpmn:incoming>Flow_10sc31i</bpmn:incoming>
</bpmn:endEvent>
<bpmn:sequenceFlow id="Flow_10sc31i" sourceRef="Activity_DisplayCheckStudy" targetRef="Event_0embsc7" />
</bpmn:process>
<bpmndi:BPMNDiagram id="BPMNDiagram_1">
<bpmndi:BPMNPlane id="BPMNPlane_1" bpmnElement="Process_9d7b2c2">
<bpmndi:BPMNEdge id="Flow_10sc31i_di" bpmnElement="Flow_10sc31i">
<di:waypoint x="530" y="177" />
<di:waypoint x="592" y="177" />
</bpmndi:BPMNEdge>
<bpmndi:BPMNEdge id="Flow_0oozrfg_di" bpmnElement="Flow_0oozrfg">
<di:waypoint x="370" y="177" />
<di:waypoint x="430" y="177" />
</bpmndi:BPMNEdge>
<bpmndi:BPMNEdge id="Flow_17nzcku_di" bpmnElement="Flow_17nzcku">
<di:waypoint x="215" y="177" />
<di:waypoint x="270" y="177" />
</bpmndi:BPMNEdge>
<bpmndi:BPMNShape id="_BPMNShape_StartEvent_2" bpmnElement="StartEvent_1">
<dc:Bounds x="179" y="159" width="36" height="36" />
</bpmndi:BPMNShape>
<bpmndi:BPMNShape id="Activity_1f9d5ew_di" bpmnElement="Activity_GetCheckStudy">
<dc:Bounds x="270" y="137" width="100" height="80" />
</bpmndi:BPMNShape>
<bpmndi:BPMNShape id="Activity_01vscea_di" bpmnElement="Activity_DisplayCheckStudy">
<dc:Bounds x="430" y="137" width="100" height="80" />
</bpmndi:BPMNShape>
<bpmndi:BPMNShape id="Event_0embsc7_di" bpmnElement="Event_0embsc7">
<dc:Bounds x="592" y="159" width="36" height="36" />
</bpmndi:BPMNShape>
</bpmndi:BPMNPlane>
</bpmndi:BPMNDiagram>
</bpmn:definitions>

View File

@ -0,0 +1,88 @@
<?xml version="1.0" encoding="UTF-8"?>
<bpmn:definitions xmlns:bpmn="http://www.omg.org/spec/BPMN/20100524/MODEL" xmlns:bpmndi="http://www.omg.org/spec/BPMN/20100524/DI" xmlns:dc="http://www.omg.org/spec/DD/20100524/DC" xmlns:camunda="http://camunda.org/schema/1.0/bpmn" xmlns:di="http://www.omg.org/spec/DD/20100524/DI" id="Definitions_0b469f0" targetNamespace="http://bpmn.io/schema/bpmn" exporter="Camunda Modeler" exporterVersion="3.5.0">
<bpmn:process id="Process_4b7fa29" isExecutable="true">
<bpmn:startEvent id="StartEvent_1">
<bpmn:outgoing>Flow_1kvuzs1</bpmn:outgoing>
</bpmn:startEvent>
<bpmn:sequenceFlow id="Flow_1kvuzs1" sourceRef="StartEvent_1" targetRef="Activity_0gtrm5e" />
<bpmn:userTask id="Activity_0gtrm5e" name="Select Enum" camunda:formKey="Upload Application">
<bpmn:extensionElements>
<camunda:formData>
<camunda:formField id="IRB_HSR_Application_Type" label="IRB-HSR Application Type" type="enum">
<camunda:properties>
<camunda:property id="file_data" value="Study_App_Doc" />
<camunda:property id="spreadsheet.name" value="IRB_HSR_Application_Type.xlsx" />
<camunda:property id="spreadsheet.label.column" value="Label" />
<camunda:property id="spreadsheet.value.column" value="Value" />
<camunda:property id="group" value="Application" />
</camunda:properties>
<camunda:validation>
<camunda:constraint name="required" config="True" />
</camunda:validation>
</camunda:formField>
<camunda:formField id="Study_App_Doc" label="IRB-HSR Application" type="file" />
<camunda:formField id="my_test_field" label="Nickname" type="string">
<camunda:properties>
<camunda:property id="file_data" value="Study_App_Doc" />
</camunda:properties>
</camunda:formField>
<camunda:formField id="some_date" label="Date" type="date">
<camunda:properties>
<camunda:property id="file_data" value="Study_App_Doc" />
</camunda:properties>
</camunda:formField>
<camunda:formField id="a_boolean" label="A Boolean" type="boolean">
<camunda:properties>
<camunda:property id="file_data" value="Study_App_Doc" />
</camunda:properties>
</camunda:formField>
<camunda:formField id="the_number" label="The Number" type="long">
<camunda:properties>
<camunda:property id="file_data" value="Study_App_Doc" />
</camunda:properties>
</camunda:formField>
</camunda:formData>
</bpmn:extensionElements>
<bpmn:incoming>Flow_1kvuzs1</bpmn:incoming>
<bpmn:outgoing>Flow_0han7ki</bpmn:outgoing>
</bpmn:userTask>
<bpmn:sequenceFlow id="Flow_0han7ki" sourceRef="Activity_0gtrm5e" targetRef="Activity_0gpetln" />
<bpmn:manualTask id="Activity_0gpetln" name="Print Enum">
<bpmn:documentation># Enum</bpmn:documentation>
<bpmn:incoming>Flow_0han7ki</bpmn:incoming>
<bpmn:outgoing>Flow_0h5cdg9</bpmn:outgoing>
</bpmn:manualTask>
<bpmn:endEvent id="Event_124aupd">
<bpmn:incoming>Flow_0h5cdg9</bpmn:incoming>
</bpmn:endEvent>
<bpmn:sequenceFlow id="Flow_0h5cdg9" sourceRef="Activity_0gpetln" targetRef="Event_124aupd" />
</bpmn:process>
<bpmndi:BPMNDiagram id="BPMNDiagram_1">
<bpmndi:BPMNPlane id="BPMNPlane_1" bpmnElement="Process_4b7fa29">
<bpmndi:BPMNEdge id="Flow_0h5cdg9_di" bpmnElement="Flow_0h5cdg9">
<di:waypoint x="530" y="117" />
<di:waypoint x="592" y="117" />
</bpmndi:BPMNEdge>
<bpmndi:BPMNEdge id="Flow_0han7ki_di" bpmnElement="Flow_0han7ki">
<di:waypoint x="370" y="117" />
<di:waypoint x="430" y="117" />
</bpmndi:BPMNEdge>
<bpmndi:BPMNEdge id="Flow_1kvuzs1_di" bpmnElement="Flow_1kvuzs1">
<di:waypoint x="215" y="117" />
<di:waypoint x="270" y="117" />
</bpmndi:BPMNEdge>
<bpmndi:BPMNShape id="_BPMNShape_StartEvent_2" bpmnElement="StartEvent_1">
<dc:Bounds x="179" y="99" width="36" height="36" />
</bpmndi:BPMNShape>
<bpmndi:BPMNShape id="Activity_11t0hqf_di" bpmnElement="Activity_0gtrm5e">
<dc:Bounds x="270" y="77" width="100" height="80" />
</bpmndi:BPMNShape>
<bpmndi:BPMNShape id="Activity_0fax1lv_di" bpmnElement="Activity_0gpetln">
<dc:Bounds x="430" y="77" width="100" height="80" />
</bpmndi:BPMNShape>
<bpmndi:BPMNShape id="Event_124aupd_di" bpmnElement="Event_124aupd">
<dc:Bounds x="592" y="99" width="36" height="36" />
</bpmndi:BPMNShape>
</bpmndi:BPMNPlane>
</bpmndi:BPMNDiagram>
</bpmn:definitions>

View File

@ -16,6 +16,12 @@
OGC will upload the Non-Funded Executed Agreement after it has been negotiated by OSP contract negotiator.</bpmn:documentation>
<bpmn:extensionElements>
<camunda:formData>
<camunda:formField id="Date" label="Version Date" type="date">
<camunda:properties>
<camunda:property id="group" value="PCRApproval" />
<camunda:property id="file_data" value="Some_File" />
</camunda:properties>
</camunda:formField>
<camunda:formField id="file_type" type="enum" defaultValue="AD_CoCApp">
<camunda:value id="AD_CoCApp" name="Ancillary Documents / Case Report Form" />
<camunda:value id="AD_CoCAppr" name="Ancillary Documents / CoC Approval" />
@ -32,12 +38,6 @@ OGC will upload the Non-Funded Executed Agreement after it has been negotiated b
<camunda:property id="file_data" value="Some_File" />
</camunda:properties>
</camunda:formField>
<camunda:formField id="Date" label="Version Date" type="date">
<camunda:properties>
<camunda:property id="group" value="PCRApproval" />
<camunda:property id="file_data" value="Some_File" />
</camunda:properties>
</camunda:formField>
</camunda:formData>
</bpmn:extensionElements>
<bpmn:incoming>SequenceFlow_0ea9hvd</bpmn:incoming>
@ -67,4 +67,4 @@ OGC will upload the Non-Funded Executed Agreement after it has been negotiated b
</bpmndi:BPMNShape>
</bpmndi:BPMNPlane>
</bpmndi:BPMNDiagram>
</bpmn:definitions>
</bpmn:definitions>

View File

@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8"?>
<bpmn:definitions xmlns:bpmn="http://www.omg.org/spec/BPMN/20100524/MODEL" xmlns:bpmndi="http://www.omg.org/spec/BPMN/20100524/DI" xmlns:dc="http://www.omg.org/spec/DD/20100524/DC" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:di="http://www.omg.org/spec/DD/20100524/DI" id="Definitions_1j7idla" targetNamespace="http://bpmn.io/schema/bpmn" exporter="Camunda Modeler" exporterVersion="3.7.0">
<bpmn:definitions xmlns:bpmn="http://www.omg.org/spec/BPMN/20100524/MODEL" xmlns:bpmndi="http://www.omg.org/spec/BPMN/20100524/DI" xmlns:dc="http://www.omg.org/spec/DD/20100524/DC" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:di="http://www.omg.org/spec/DD/20100524/DI" id="Definitions_1j7idla" targetNamespace="http://bpmn.io/schema/bpmn" exporter="Camunda Modeler" exporterVersion="3.7.3">
<bpmn:process id="Process_18biih5" isExecutable="true">
<bpmn:startEvent id="StartEvent_1">
<bpmn:outgoing>SequenceFlow_1pnq3kg</bpmn:outgoing>
@ -8,32 +8,34 @@
<bpmn:endEvent id="EndEvent_063bpg6">
<bpmn:incoming>SequenceFlow_12pf6um</bpmn:incoming>
</bpmn:endEvent>
<bpmn:scriptTask id="Invalid_Script_Task" name="An Invalid Script Reference">
<bpmn:scriptTask id="Invalid_Script_Task" name="A Syntax Error">
<bpmn:incoming>SequenceFlow_1pnq3kg</bpmn:incoming>
<bpmn:outgoing>SequenceFlow_12pf6um</bpmn:outgoing>
<bpmn:script>a really bad error that should fail</bpmn:script>
<bpmn:script>x = 1
y = 2
x + y === a</bpmn:script>
</bpmn:scriptTask>
<bpmn:sequenceFlow id="SequenceFlow_12pf6um" sourceRef="Invalid_Script_Task" targetRef="EndEvent_063bpg6" />
</bpmn:process>
<bpmndi:BPMNDiagram id="BPMNDiagram_1">
<bpmndi:BPMNPlane id="BPMNPlane_1" bpmnElement="Process_18biih5">
<bpmndi:BPMNShape id="_BPMNShape_StartEvent_2" bpmnElement="StartEvent_1">
<dc:Bounds x="179" y="99" width="36" height="36" />
</bpmndi:BPMNShape>
<bpmndi:BPMNEdge id="SequenceFlow_12pf6um_di" bpmnElement="SequenceFlow_12pf6um">
<di:waypoint x="390" y="117" />
<di:waypoint x="442" y="117" />
</bpmndi:BPMNEdge>
<bpmndi:BPMNEdge id="SequenceFlow_1pnq3kg_di" bpmnElement="SequenceFlow_1pnq3kg">
<di:waypoint x="215" y="117" />
<di:waypoint x="290" y="117" />
</bpmndi:BPMNEdge>
<bpmndi:BPMNShape id="_BPMNShape_StartEvent_2" bpmnElement="StartEvent_1">
<dc:Bounds x="179" y="99" width="36" height="36" />
</bpmndi:BPMNShape>
<bpmndi:BPMNShape id="EndEvent_063bpg6_di" bpmnElement="EndEvent_063bpg6">
<dc:Bounds x="442" y="99" width="36" height="36" />
</bpmndi:BPMNShape>
<bpmndi:BPMNShape id="ScriptTask_1imeym0_di" bpmnElement="Invalid_Script_Task">
<dc:Bounds x="290" y="77" width="100" height="80" />
</bpmndi:BPMNShape>
<bpmndi:BPMNEdge id="SequenceFlow_12pf6um_di" bpmnElement="SequenceFlow_12pf6um">
<di:waypoint x="390" y="117" />
<di:waypoint x="442" y="117" />
</bpmndi:BPMNEdge>
</bpmndi:BPMNPlane>
</bpmndi:BPMNDiagram>
</bpmn:definitions>

View File

@ -0,0 +1,41 @@
<?xml version="1.0" encoding="UTF-8"?>
<bpmn:definitions xmlns:bpmn="http://www.omg.org/spec/BPMN/20100524/MODEL" xmlns:bpmndi="http://www.omg.org/spec/BPMN/20100524/DI" xmlns:dc="http://www.omg.org/spec/DD/20100524/DC" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:di="http://www.omg.org/spec/DD/20100524/DI" id="Definitions_1j7idla" targetNamespace="http://bpmn.io/schema/bpmn" exporter="Camunda Modeler" exporterVersion="3.7.3">
<bpmn:process id="Process_18biih5" isExecutable="true">
<bpmn:startEvent id="StartEvent_1">
<bpmn:outgoing>SequenceFlow_1pnq3kg</bpmn:outgoing>
</bpmn:startEvent>
<bpmn:sequenceFlow id="SequenceFlow_1pnq3kg" sourceRef="StartEvent_1" targetRef="Invalid_Script_Task" />
<bpmn:endEvent id="EndEvent_063bpg6">
<bpmn:incoming>SequenceFlow_12pf6um</bpmn:incoming>
</bpmn:endEvent>
<bpmn:scriptTask id="Invalid_Script_Task" name="An Invalid Variable">
<bpmn:incoming>SequenceFlow_1pnq3kg</bpmn:incoming>
<bpmn:outgoing>SequenceFlow_12pf6um</bpmn:outgoing>
<bpmn:script>x = 1
y = 2
x + a == 3</bpmn:script>
</bpmn:scriptTask>
<bpmn:sequenceFlow id="SequenceFlow_12pf6um" sourceRef="Invalid_Script_Task" targetRef="EndEvent_063bpg6" />
</bpmn:process>
<bpmndi:BPMNDiagram id="BPMNDiagram_1">
<bpmndi:BPMNPlane id="BPMNPlane_1" bpmnElement="Process_18biih5">
<bpmndi:BPMNEdge id="SequenceFlow_12pf6um_di" bpmnElement="SequenceFlow_12pf6um">
<di:waypoint x="390" y="117" />
<di:waypoint x="442" y="117" />
</bpmndi:BPMNEdge>
<bpmndi:BPMNEdge id="SequenceFlow_1pnq3kg_di" bpmnElement="SequenceFlow_1pnq3kg">
<di:waypoint x="215" y="117" />
<di:waypoint x="290" y="117" />
</bpmndi:BPMNEdge>
<bpmndi:BPMNShape id="_BPMNShape_StartEvent_2" bpmnElement="StartEvent_1">
<dc:Bounds x="179" y="99" width="36" height="36" />
</bpmndi:BPMNShape>
<bpmndi:BPMNShape id="EndEvent_063bpg6_di" bpmnElement="EndEvent_063bpg6">
<dc:Bounds x="442" y="99" width="36" height="36" />
</bpmndi:BPMNShape>
<bpmndi:BPMNShape id="ScriptTask_1imeym0_di" bpmnElement="Invalid_Script_Task">
<dc:Bounds x="290" y="77" width="100" height="80" />
</bpmndi:BPMNShape>
</bpmndi:BPMNPlane>
</bpmndi:BPMNDiagram>
</bpmn:definitions>

View File

@ -0,0 +1,3 @@
[
{"DETAIL": "Passed validation.", "STATUS": "No Error"}
]

View File

@ -62,5 +62,6 @@
"OTHER_VULNERABLE_DESC": null,
"PRC_NUMBER": null,
"SPONSORS_PROTOCOL_REVISION_DATE": "2021-04-20",
"UPLOAD_COMPLETE": null
"UPLOAD_COMPLETE": null,
"REVIEW_TYPE": 2
}

View File

@ -0,0 +1,67 @@
{
"DSMB": 1,
"DSMB_FREQUENCY": 2,
"GCRC_NUMBER": "9",
"IBC_NUMBER": "7",
"IDE": "12345",
"IND_1": "1234",
"IND_2": "2345",
"IND_3": "3456",
"IRBREVIEWERADMIN": null,
"IS_ADULT_PARTICIPANT": null,
"IS_APPROVED_DEVICE": null,
"IS_AUX": null,
"IS_BIOMEDICAL": null,
"IS_CANCER_PATIENT": null,
"IS_CENTRAL_REG_DB": null,
"IS_CHART_REVIEW": null,
"IS_COMMITTEE_CONFLICT": null,
"IS_CONSENT_WAIVER": null,
"IS_DB": null,
"IS_ELDERLY_POP": null,
"IS_ENGAGED_RESEARCH": null,
"IS_FETUS_POP": null,
"IS_FINANCIAL_CONFLICT": null,
"IS_FOR_CANCER_CENTER": null,
"IS_FUNDING_SOURCE": null,
"IS_GCRC": null,
"IS_GENE_TRANSFER": null,
"IS_GRANT": null,
"IS_HGT": null,
"IS_IBC": null,
"IS_IDE": null,
"IS_IND": null,
"IS_MENTAL_IMPAIRMENT_POP": null,
"IS_MINOR": null,
"IS_MINOR_PARTICIPANT": null,
"IS_MULTI_SITE": null,
"IS_NOT_CONSENT_WAIVER": null,
"IS_NOT_PRC_WAIVER": null,
"IS_OTHER_VULNERABLE_POP": null,
"IS_OUTSIDE_CONTRACT": null,
"IS_PI_INITIATED": null,
"IS_PI_SCHOOL": null,
"IS_PRC": null,
"IS_PRC_DSMP": null,
"IS_PREGNANT_POP": null,
"IS_PRISONERS_POP": null,
"IS_QUALITATIVE": null,
"IS_RADIATION": null,
"IS_REVIEW_BY_CENTRAL_IRB": null,
"IS_SPONSOR": null,
"IS_SPONSOR_MONITORING": null,
"IS_SURROGATE_CONSENT": null,
"IS_TISSUE_BANKING": null,
"IS_UVA_DB": null,
"IS_UVA_IDE": null,
"IS_UVA_IND": null,
"IS_UVA_LOCATION": null,
"IS_UVA_PI_MULTI": null,
"MULTI_SITE_LOCATIONS": null,
"NON_UVA_LOCATION": null,
"OTHER_VULNERABLE_DESC": null,
"PRC_NUMBER": null,
"SPONSORS_PROTOCOL_REVISION_DATE": "2021-04-20",
"UPLOAD_COMPLETE": null,
"REVIEW_TYPE": 99
}

View File

@ -0,0 +1,57 @@
<?xml version="1.0" encoding="UTF-8"?>
<bpmn:definitions xmlns:bpmn="http://www.omg.org/spec/BPMN/20100524/MODEL" xmlns:bpmndi="http://www.omg.org/spec/BPMN/20100524/DI" xmlns:dc="http://www.omg.org/spec/DD/20100524/DC" xmlns:di="http://www.omg.org/spec/DD/20100524/DI" xmlns:camunda="http://camunda.org/schema/1.0/bpmn" id="Definitions_8983dae" targetNamespace="http://bpmn.io/schema/bpmn" exporter="Camunda Modeler" exporterVersion="3.0.0-dev">
<bpmn:process id="Process_2a4c7a5" isExecutable="true">
<bpmn:startEvent id="StartEvent_1">
<bpmn:outgoing>Flow_13jyds8</bpmn:outgoing>
</bpmn:startEvent>
<bpmn:sequenceFlow id="Flow_13jyds8" sourceRef="StartEvent_1" targetRef="Activity_GetData" />
<bpmn:endEvent id="Event_03x966p">
<bpmn:incoming>Flow_18kybym</bpmn:incoming>
</bpmn:endEvent>
<bpmn:sequenceFlow id="Flow_18kybym" sourceRef="Activity_RunScript" targetRef="Event_03x966p" />
<bpmn:scriptTask id="Activity_RunScript" name="Run Script">
<bpmn:incoming>Flow_1jqzan6</bpmn:incoming>
<bpmn:outgoing>Flow_18kybym</bpmn:outgoing>
<bpmn:script>print(ham)</bpmn:script>
</bpmn:scriptTask>
<bpmn:sequenceFlow id="Flow_1jqzan6" sourceRef="Activity_GetData" targetRef="Activity_RunScript" />
<bpmn:userTask id="Activity_GetData" name="Get Data" camunda:formKey="DataForm">
<bpmn:extensionElements>
<camunda:formData>
<camunda:formField id="user" label="User" type="string" defaultValue="World" />
<camunda:formField id="spam" label="Spam" type="boolean" defaultValue="False" />
</camunda:formData>
</bpmn:extensionElements>
<bpmn:incoming>Flow_13jyds8</bpmn:incoming>
<bpmn:outgoing>Flow_1jqzan6</bpmn:outgoing>
</bpmn:userTask>
</bpmn:process>
<bpmndi:BPMNDiagram id="BPMNDiagram_1">
<bpmndi:BPMNPlane id="BPMNPlane_1" bpmnElement="Process_2a4c7a5">
<bpmndi:BPMNEdge id="Flow_18kybym_di" bpmnElement="Flow_18kybym">
<di:waypoint x="370" y="177" />
<di:waypoint x="432" y="177" />
</bpmndi:BPMNEdge>
<bpmndi:BPMNEdge id="Flow_13jyds8_di" bpmnElement="Flow_13jyds8">
<di:waypoint x="48" y="177" />
<di:waypoint x="90" y="177" />
</bpmndi:BPMNEdge>
<bpmndi:BPMNEdge id="Flow_1jqzan6_di" bpmnElement="Flow_1jqzan6">
<di:waypoint x="190" y="177" />
<di:waypoint x="270" y="177" />
</bpmndi:BPMNEdge>
<bpmndi:BPMNShape id="Event_03x966p_di" bpmnElement="Event_03x966p">
<dc:Bounds x="432" y="159" width="36" height="36" />
</bpmndi:BPMNShape>
<bpmndi:BPMNShape id="Activity_1d9d2u8_di" bpmnElement="Activity_RunScript">
<dc:Bounds x="270" y="137" width="100" height="80" />
</bpmndi:BPMNShape>
<bpmndi:BPMNShape id="_BPMNShape_StartEvent_2" bpmnElement="StartEvent_1">
<dc:Bounds x="12" y="159" width="36" height="36" />
</bpmndi:BPMNShape>
<bpmndi:BPMNShape id="Activity_10ypwag_di" bpmnElement="Activity_GetData">
<dc:Bounds x="90" y="137" width="100" height="80" />
</bpmndi:BPMNShape>
</bpmndi:BPMNPlane>
</bpmndi:BPMNDiagram>
</bpmn:definitions>

View File

@ -0,0 +1,76 @@
<?xml version="1.0" encoding="UTF-8"?>
<bpmn:definitions xmlns:bpmn="http://www.omg.org/spec/BPMN/20100524/MODEL" xmlns:bpmndi="http://www.omg.org/spec/BPMN/20100524/DI" xmlns:dc="http://www.omg.org/spec/DD/20100524/DC" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:di="http://www.omg.org/spec/DD/20100524/DI" id="Definitions_0ilr8m3" targetNamespace="http://bpmn.io/schema/bpmn" exporter="Camunda Modeler" exporterVersion="3.7.3">
<bpmn:process id="timer" isExecutable="true">
<bpmn:startEvent id="StartEvent_1">
<bpmn:outgoing>Flow_1pahvlr</bpmn:outgoing>
</bpmn:startEvent>
<bpmn:manualTask id="get_coffee" name="Eat Spam">
<bpmn:incoming>Flow_1pahvlr</bpmn:incoming>
<bpmn:outgoing>Flow_1pvkgnu</bpmn:outgoing>
</bpmn:manualTask>
<bpmn:manualTask id="back_to_work" name="Get Back To Work">
<bpmn:incoming>Flow_1pvkgnu</bpmn:incoming>
<bpmn:outgoing>Flow_1ekgt3x</bpmn:outgoing>
</bpmn:manualTask>
<bpmn:endEvent id="Event_03w65sk">
<bpmn:incoming>Flow_1ekgt3x</bpmn:incoming>
</bpmn:endEvent>
<bpmn:sequenceFlow id="Flow_1ekgt3x" sourceRef="back_to_work" targetRef="Event_03w65sk" />
<bpmn:sequenceFlow id="Flow_1pvkgnu" sourceRef="get_coffee" targetRef="back_to_work" />
<bpmn:sequenceFlow id="Flow_1pahvlr" sourceRef="StartEvent_1" targetRef="get_coffee" />
<bpmn:sequenceFlow id="Flow_05lcpdf" sourceRef="Event_1txv76c" targetRef="Activity_0onlql2" />
<bpmn:scriptTask id="Activity_0onlql2" name="Do Bad Burp">
<bpmn:incoming>Flow_05lcpdf</bpmn:incoming>
<bpmn:script># Tries to burp, failes.
my_burp = non_existent_burp_variable</bpmn:script>
</bpmn:scriptTask>
<bpmn:boundaryEvent id="Event_1txv76c" name="burp?" attachedToRef="get_coffee">
<bpmn:outgoing>Flow_05lcpdf</bpmn:outgoing>
<bpmn:timerEventDefinition id="TimerEventDefinition_1rvot87">
<bpmn:timeDuration xsi:type="bpmn:tFormalExpression">timedelta(seconds=.25)</bpmn:timeDuration>
</bpmn:timerEventDefinition>
</bpmn:boundaryEvent>
</bpmn:process>
<bpmndi:BPMNDiagram id="BPMNDiagram_1">
<bpmndi:BPMNPlane id="BPMNPlane_1" bpmnElement="timer">
<bpmndi:BPMNEdge id="Flow_1pahvlr_di" bpmnElement="Flow_1pahvlr">
<di:waypoint x="215" y="117" />
<di:waypoint x="260" y="117" />
</bpmndi:BPMNEdge>
<bpmndi:BPMNEdge id="Flow_1pvkgnu_di" bpmnElement="Flow_1pvkgnu">
<di:waypoint x="360" y="117" />
<di:waypoint x="533" y="117" />
</bpmndi:BPMNEdge>
<bpmndi:BPMNEdge id="Flow_1ekgt3x_di" bpmnElement="Flow_1ekgt3x">
<di:waypoint x="633" y="117" />
<di:waypoint x="682" y="117" />
</bpmndi:BPMNEdge>
<bpmndi:BPMNEdge id="Flow_05lcpdf_di" bpmnElement="Flow_05lcpdf">
<di:waypoint x="300" y="175" />
<di:waypoint x="300" y="180" />
<di:waypoint x="380" y="180" />
</bpmndi:BPMNEdge>
<bpmndi:BPMNShape id="_BPMNShape_StartEvent_2" bpmnElement="StartEvent_1">
<dc:Bounds x="179" y="99" width="36" height="36" />
</bpmndi:BPMNShape>
<bpmndi:BPMNShape id="Activity_0tjl9dd_di" bpmnElement="get_coffee">
<dc:Bounds x="260" y="77" width="100" height="80" />
</bpmndi:BPMNShape>
<bpmndi:BPMNShape id="Activity_15zi5m4_di" bpmnElement="back_to_work">
<dc:Bounds x="533" y="77" width="100" height="80" />
</bpmndi:BPMNShape>
<bpmndi:BPMNShape id="Event_03w65sk_di" bpmnElement="Event_03w65sk">
<dc:Bounds x="682" y="99" width="36" height="36" />
</bpmndi:BPMNShape>
<bpmndi:BPMNShape id="Activity_05taua6_di" bpmnElement="Activity_0onlql2">
<dc:Bounds x="380" y="140" width="100" height="80" />
</bpmndi:BPMNShape>
<bpmndi:BPMNShape id="Event_0xco4d9_di" bpmnElement="Event_1txv76c">
<dc:Bounds x="282" y="139" width="36" height="36" />
<bpmndi:BPMNLabel>
<dc:Bounds x="286" y="182" width="29" height="14" />
</bpmndi:BPMNLabel>
</bpmndi:BPMNShape>
</bpmndi:BPMNPlane>
</bpmndi:BPMNDiagram>
</bpmn:definitions>

View File

@ -1,14 +1,16 @@
import io
import json
import os
from tests.base_test import BaseTest
from crc import session, db
from crc import session, db, app
from crc.models.file import FileModel, FileType, FileSchema, FileModelSchema
from crc.models.workflow import WorkflowSpecModel
from crc.services.file_service import FileService
from crc.services.workflow_processor import WorkflowProcessor
from crc.models.data_store import DataStoreModel
from crc.services.document_service import DocumentService
from example_data import ExampleDataLoader
@ -110,20 +112,24 @@ class TestFilesApi(BaseTest):
self.assertEqual(0, len(json.loads(rv.get_data(as_text=True))))
def test_set_reference_file(self):
file_name = "irb_document_types.xls"
data = {'file': (io.BytesIO(b"abcdef"), "does_not_matter.xls")}
file_name = "documents.xlsx"
filepath = os.path.join(app.root_path, 'static', 'reference', 'irb_documents.xlsx')
with open(filepath, 'rb') as myfile:
file_data = myfile.read()
data = {'file': (io.BytesIO(file_data), file_name)}
rv = self.app.put('/v1.0/reference_file/%s' % file_name, data=data, follow_redirects=True,
content_type='multipart/form-data', headers=self.logged_in_headers())
self.assert_success(rv)
self.assertIsNotNone(rv.get_data())
json_data = json.loads(rv.get_data(as_text=True))
file = FileModelSchema().load(json_data, session=session)
self.assertEqual(FileType.xls, file.type)
self.assertEqual(FileType.xlsx, file.type)
self.assertTrue(file.is_reference)
self.assertEqual("application/vnd.ms-excel", file.content_type)
self.assertEqual("application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", file.content_type)
self.assertEqual('dhf8r', json_data['user_uid'])
def test_set_reference_file_bad_extension(self):
file_name = FileService.DOCUMENT_LIST
file_name = DocumentService.DOCUMENT_LIST
data = {'file': (io.BytesIO(b"abcdef"), "does_not_matter.ppt")}
rv = self.app.put('/v1.0/reference_file/%s' % file_name, data=data, follow_redirects=True,
content_type='multipart/form-data', headers=self.logged_in_headers())
@ -131,22 +137,28 @@ class TestFilesApi(BaseTest):
def test_get_reference_file(self):
file_name = "irb_document_types.xls"
data = {'file': (io.BytesIO(b"abcdef"), "some crazy thing do not care.xls")}
filepath = os.path.join(app.root_path, 'static', 'reference', 'irb_documents.xlsx')
with open(filepath, 'rb') as myfile:
file_data = myfile.read()
data = {'file': (io.BytesIO(file_data), file_name)}
rv = self.app.put('/v1.0/reference_file/%s' % file_name, data=data, follow_redirects=True,
content_type='multipart/form-data', headers=self.logged_in_headers())
rv = self.app.get('/v1.0/reference_file/%s' % file_name, headers=self.logged_in_headers())
self.assert_success(rv)
data_out = rv.get_data()
self.assertEqual(b"abcdef", data_out)
self.assertEqual(file_data, data_out)
def test_list_reference_files(self):
ExampleDataLoader.clean_db()
file_name = FileService.DOCUMENT_LIST
data = {'file': (io.BytesIO(b"abcdef"), file_name)}
file_name = DocumentService.DOCUMENT_LIST
filepath = os.path.join(app.root_path, 'static', 'reference', 'irb_documents.xlsx')
with open(filepath, 'rb') as myfile:
file_data = myfile.read()
data = {'file': (io.BytesIO(file_data), file_name)}
rv = self.app.put('/v1.0/reference_file/%s' % file_name, data=data, follow_redirects=True,
content_type='multipart/form-data', headers=self.logged_in_headers())
self.assert_success(rv)
rv = self.app.get('/v1.0/reference_file',
follow_redirects=True,
content_type="application/json", headers=self.logged_in_headers())
@ -159,7 +171,8 @@ class TestFilesApi(BaseTest):
def test_update_file_info(self):
self.load_example_data()
file: FileModel = session.query(FileModel).first()
self.create_reference_document()
file: FileModel = session.query(FileModel).filter(FileModel.is_reference==False).first()
file.name = "silly_new_name.bpmn"
rv = self.app.put('/v1.0/file/%i' % file.id,

View File

@ -45,8 +45,8 @@ class TestLdapLookupScript(BaseTest):
}
script = Ldap()
with(self.assertRaises(ApiError)):
user_details = script.do_task(task, workflow.study_id, workflow.id, "PIComputingID")
user_details = script.do_task(task, workflow.study_id, workflow.id, "PIComputingID")
self.assertEqual({}, user_details)
def test_get_current_user_details(self):
self.load_example_data()

View File

@ -0,0 +1,25 @@
from tests.base_test import BaseTest
from crc import app
from unittest.mock import patch
class TestCheckStudy(BaseTest):
def test_check_study_script_validation(self):
self.load_example_data()
spec_model = self.load_test_spec('check_study_script')
rv = self.app.get('/v1.0/workflow-specification/%s/validate' % spec_model.id, headers=self.logged_in_headers())
self.assertEqual([], rv.json)
@patch('crc.services.protocol_builder.requests.get')
def test_check_study(self, mock_get):
app.config['PB_ENABLED'] = True
app.config['PB_ENABLED'] = True
mock_get.return_value.ok = True
mock_get.return_value.text = self.protocol_builder_response('check_study.json')
workflow = self.create_workflow('check_study_script')
workflow_api = self.get_workflow_api(workflow)
task = workflow_api.next_task
self.assertIn('DETAIL', task.documentation)
self.assertIn('STATUS', task.documentation)

View File

@ -2,6 +2,7 @@ from tests.base_test import BaseTest
from crc import session
from crc.models.study import StudyModel, StudyStatus, StudySchema
import json
from unittest.mock import patch
class TestStudyActionsStatus(BaseTest):
@ -20,8 +21,11 @@ class TestStudyActionsStatus(BaseTest):
study_result = session.query(StudyModel).filter(StudyModel.id == study.id).first()
return study_result
def test_hold_study(self):
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_study_details') # mock_details
def test_hold_study(self, mock_details):
self.load_example_data()
details_response = self.protocol_builder_response('study_details.json')
mock_details.return_value = json.loads(details_response)
study = session.query(StudyModel).first()
self.assertEqual(study.status, StudyStatus.in_progress)
@ -33,8 +37,11 @@ class TestStudyActionsStatus(BaseTest):
study_result = self.update_study_status(study, study_schema)
self.assertEqual(StudyStatus.hold, study_result.status)
def test_abandon_study(self):
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_study_details') # mock_details
def test_abandon_study(self, mock_details):
self.load_example_data()
details_response = self.protocol_builder_response('study_details.json')
mock_details.return_value = json.loads(details_response)
study = session.query(StudyModel).first()
self.assertEqual(study.status, StudyStatus.in_progress)
@ -46,8 +53,11 @@ class TestStudyActionsStatus(BaseTest):
study_result = self.update_study_status(study, study_schema)
self.assertEqual(StudyStatus.abandoned, study_result.status)
def test_open_enrollment_study(self):
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_study_details') # mock_details
def test_open_enrollment_study(self, mock_details):
self.load_example_data()
details_response = self.protocol_builder_response('study_details.json')
mock_details.return_value = json.loads(details_response)
study = session.query(StudyModel).first()
self.assertEqual(study.status, StudyStatus.in_progress)

View File

@ -138,10 +138,13 @@ class TestSudySponsorsScript(BaseTest):
# who is allowed access
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_study_details') # mock_details
@patch('crc.services.protocol_builder.requests.get')
def test_study_sponsors_script_ensure_access(self, mock_get):
def test_study_sponsors_script_ensure_access(self, mock_get, mock_details):
mock_get.return_value.ok = True
mock_get.return_value.text = self.protocol_builder_response('sponsors.json')
details_response = self.protocol_builder_response('study_details.json')
mock_details.return_value = json.loads(details_response)
flask.g.user = UserModel(uid='dhf8r')
app.config['PB_ENABLED'] = True

View File

@ -5,6 +5,7 @@ from crc.models.study import StudyModel, StudySchema
from crc.models.workflow import WorkflowModel, WorkflowSpecModel
import json
from unittest.mock import patch
class TestStudyCancellations(BaseTest):
@ -60,7 +61,10 @@ class TestStudyCancellations(BaseTest):
self.assertEqual('Activity_Modify', third_task.name)
return workflow_api, third_task
def test_before_cancel(self):
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_study_details') # mock_details
def test_before_cancel(self, mock_details):
details_response = self.protocol_builder_response('study_details.json')
mock_details.return_value = json.loads(details_response)
workflow, study_id = self.load_workflow()
self.get_first_task(workflow)
@ -68,7 +72,10 @@ class TestStudyCancellations(BaseTest):
study_result = self.put_study_on_hold(study_id)
self.assertEqual('Beer consumption in the bipedal software engineer', study_result.title)
def test_first_cancel(self):
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_study_details') # mock_details
def test_first_cancel(self, mock_details):
details_response = self.protocol_builder_response('study_details.json')
mock_details.return_value = json.loads(details_response)
workflow, study_id = self.load_workflow()
workflow_api, first_task = self.get_first_task(workflow)
@ -77,7 +84,10 @@ class TestStudyCancellations(BaseTest):
study_result = self.put_study_on_hold(study_id)
self.assertEqual('New Title', study_result.title)
def test_second_cancel(self):
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_study_details') # mock_details
def test_second_cancel(self, mock_details):
details_response = self.protocol_builder_response('study_details.json')
mock_details.return_value = json.loads(details_response)
workflow, study_id = self.load_workflow()
workflow_api, first_task = self.get_first_task(workflow)
@ -90,7 +100,10 @@ class TestStudyCancellations(BaseTest):
study_result = self.put_study_on_hold(study_id)
self.assertEqual('Second Title', study_result.title)
def test_after_cancel(self):
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_study_details') # mock_details
def test_after_cancel(self, mock_details):
details_response = self.protocol_builder_response('study_details.json')
mock_details.return_value = json.loads(details_response)
workflow, study_id = self.load_workflow()
workflow_api, first_task = self.get_first_task(workflow)

View File

@ -1,4 +1,3 @@
import json
from SpiffWorkflow.bpmn.PythonScriptEngine import Box
@ -15,6 +14,7 @@ from crc.services.file_service import FileService
from crc.services.study_service import StudyService
from crc.services.workflow_processor import WorkflowProcessor
from crc.scripts.file_data_set import FileDataSet
from crc.services.document_service import DocumentService
class TestStudyDetailsDocumentsScript(BaseTest):
@ -43,8 +43,8 @@ class TestStudyDetailsDocumentsScript(BaseTest):
# Remove the reference file.
file_model = db.session.query(FileModel). \
filter(FileModel.is_reference == True). \
filter(FileModel.name == FileService.DOCUMENT_LIST).first()
filter(FileModel.is_reference is True). \
filter(FileModel.name == DocumentService.DOCUMENT_LIST).first()
if file_model:
db.session.query(FileDataModel).filter(FileDataModel.file_model_id == file_model.id).delete()
db.session.query(FileModel).filter(FileModel.id == file_model.id).delete()
@ -71,7 +71,7 @@ class TestStudyDetailsDocumentsScript(BaseTest):
def test_load_lookup_data(self):
self.create_reference_document()
dict = FileService.get_reference_data(FileService.DOCUMENT_LIST, 'code', ['id'])
dict = DocumentService.get_dictionary()
self.assertIsNotNone(dict)
def get_required_docs(self):

View File

@ -54,17 +54,20 @@ class TestStudyService(BaseTest):
ExampleDataLoader().load_reference_documents()
return user
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_study_details') # mock_details
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_required_docs') # mock_docs
def test_total_tasks_updated(self, mock_docs):
def test_total_tasks_updated(self, mock_docs, mock_details):
"""Assure that as a users progress is available when getting a list of studies for that user."""
app.config['PB_ENABLED'] = True
docs_response = self.protocol_builder_response('required_docs.json')
mock_docs.return_value = json.loads(docs_response)
details_response = self.protocol_builder_response('study_details.json')
mock_details.return_value = json.loads(details_response)
user = self.create_user_with_study_and_workflow()
# The load example data script should set us up a user and at least one study, one category, and one workflow.
studies = StudyService.get_studies_for_user(user)
studies = StudyService().get_studies_for_user(user)
self.assertTrue(len(studies) == 1)
self.assertTrue(len(studies[0].categories) == 1)
self.assertTrue(len(studies[0].categories[0].workflows) == 1)
@ -82,7 +85,7 @@ class TestStudyService(BaseTest):
processor.do_engine_steps()
# Assure the workflow is now started, and knows the total and completed tasks.
studies = StudyService.get_studies_for_user(user)
studies = StudyService().get_studies_for_user(user)
workflow = next(iter(studies[0].categories[0].workflows)) # Workflows is a set.
# self.assertEqual(WorkflowStatus.user_input_required, workflow.status)
self.assertTrue(workflow.total_tasks > 0)
@ -95,21 +98,24 @@ class TestStudyService(BaseTest):
processor.save()
# Assure the workflow has moved on to the next task.
studies = StudyService.get_studies_for_user(user)
studies = StudyService().get_studies_for_user(user)
workflow = next(iter(studies[0].categories[0].workflows)) # Workflows is a set.
self.assertEqual(1, workflow.completed_tasks)
# Get approvals
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_study_details') # mock_details
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_required_docs') # mock_docs
def test_get_required_docs(self, mock_docs):
def test_get_required_docs(self, mock_docs, mock_details):
app.config['PB_ENABLED'] = True
# mock out the protocol builder
docs_response = self.protocol_builder_response('required_docs.json')
mock_docs.return_value = json.loads(docs_response)
details_response = self.protocol_builder_response('study_details.json')
mock_details.return_value = json.loads(details_response)
user = self.create_user_with_study_and_workflow()
studies = StudyService.get_studies_for_user(user)
studies = StudyService().get_studies_for_user(user)
study = studies[0]
@ -126,7 +132,7 @@ class TestStudyService(BaseTest):
self.assertEqual("CRC", documents["UVACompl_PRCAppr"]['Who Uploads?'])
self.assertEqual(0, documents["UVACompl_PRCAppr"]['count'])
self.assertEqual(True, documents["UVACompl_PRCAppr"]['required'])
self.assertEqual('6', documents["UVACompl_PRCAppr"]['id'])
self.assertEqual(6, documents["UVACompl_PRCAppr"]['id'])
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_required_docs') # mock_docs
def test_get_documents_has_file_details(self, mock_docs):
@ -227,3 +233,23 @@ class TestStudyService(BaseTest):
# Both Alex and Aaron are SI, and both should be returned.
self.assertEqual("ajl2j", investigators['SI']['user_id'])
self.assertEqual("cah3us", investigators['SI_2']['user_id'])
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_study_details') # mock_details
def test_get_user_studies(self, mock_details):
details_response = self.protocol_builder_response('study_details.json')
mock_details.return_value = json.loads(details_response)
user = self.create_user_with_study_and_workflow()
studies = StudyService().get_studies_for_user(user)
# study_details has a valid REVIEW_TYPE, so we should get 1 study back
self.assertEqual(1, len(studies))
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_study_details') # mock_details
def test_get_user_studies_bad_review_type(self, mock_details):
details_response = self.protocol_builder_response('study_details_bad_review_type.json')
mock_details.return_value = json.loads(details_response)
user = self.create_user_with_study_and_workflow()
studies = StudyService().get_studies_for_user(user)
# study_details has an invalid REVIEW_TYPE, so we should get 0 studies back
self.assertEqual(0, len(studies))

View File

@ -11,6 +11,8 @@ from crc.models.protocol_builder import ProtocolBuilderStatus
from crc.models.study import StudySchema, StudyModel, StudyStatus
from crc.models.user import UserModel
from unittest.mock import patch
class TestAuthentication(BaseTest):
admin_uid = 'dhf8r'
@ -204,7 +206,10 @@ class TestAuthentication(BaseTest):
user_data = json.loads(rv.get_data(as_text=True))
self.assertEqual(len(user_data), len(all_users))
def test_admin_can_impersonate_another_user(self):
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_study_details') # mock_details
def test_admin_can_impersonate_another_user(self, mock_details):
details_response = self.protocol_builder_response('study_details.json')
mock_details.return_value = json.loads(details_response)
# Switch production mode on
app.config['PRODUCTION'] = True

View File

@ -3,9 +3,6 @@ from tests.base_test import BaseTest
from crc.services.file_service import FileService
class TestDocumentDirectories(BaseTest):
def test_directory_list(self):

View File

@ -1,18 +1,17 @@
import json
from tests.base_test import BaseTest
from crc import db
from crc.models.data_store import DataStoreModel
from crc.services.file_service import FileService
from crc.services.workflow_processor import WorkflowProcessor
from tests.base_test import BaseTest
from crc.models.workflow import WorkflowStatus
from crc import db
from crc.api.common import ApiError
from crc.models.task_event import TaskEventModel, TaskEventSchema
from crc.services.workflow_service import WorkflowService
from io import BytesIO
import json
class TestFileDatastore(BaseTest):
def test_file_datastore_workflow(self):
self.load_example_data()
self.create_reference_document()
@ -32,3 +31,41 @@ class TestFileDatastore(BaseTest):
self.assertEqual(task_data['output'], 'me')
self.assertEqual(task_data['output2'], 'nope')
def test_file_data_store_file_data_property(self):
self.load_example_data()
workflow = self.create_workflow('enum_file_data')
workflow_api = self.get_workflow_api(workflow)
task = workflow_api.next_task
# upload the file
correct_name = task.form['fields'][1]['id']
data = {'file': (BytesIO(b"abcdef"), 'test_file.txt')}
rv = self.app.post('/v1.0/file?study_id=%i&workflow_id=%s&task_id=%s&form_field_key=%s' %
(workflow.study_id, workflow.id, task.id, correct_name), data=data, follow_redirects=True,
content_type='multipart/form-data', headers=self.logged_in_headers())
self.assert_success(rv)
file_id = json.loads(rv.get_data())['id']
# process the form that sets the datastore values
self.complete_form(workflow, task, {'Study_App_Doc': {'id': file_id},
'IRB_HSR_Application_Type': {'label': 'Expedited Application'},
'my_test_field': 'some string',
'the_number': 8,
'a_boolean': True,
'some_date': '2021-07-23'})
# assert the data_store was set correctly
data_store_keys = ['IRB_HSR_Application_Type', 'my_test_field', 'the_number', 'a_boolean', 'some_date']
data_store = db.session.query(DataStoreModel).filter(DataStoreModel.file_id==file_id).all()
for item in data_store:
self.assertIn(item.key, data_store_keys)
if item.key == 'IRB_HSR_Application_Type':
self.assertEqual('Expedited Application', item.value)
if item.key == 'my_test_field':
self.assertEqual('some string', item.value)
if item.key == 'the_number':
self.assertEqual('8', item.value)
if item.key == 'a_boolean':
self.assertEqual('true', item.value)
if item.key == 'some_date':
self.assertEqual('2021-07-23', item.value)

View File

@ -31,3 +31,8 @@ class TestLdapService(BaseTest):
self.assertFalse(True, "An API error should be raised.")
except ApiError as ae:
self.assertEqual("missing_ldap_record", ae.code)
def test_get_user_with_caps(self):
user_info = LdapService.user_info("LB3DP")
self.assertIsNotNone(user_info)
self.assertEqual("lb3dp", user_info.uid)

View File

@ -46,7 +46,7 @@ class TestProtocolBuilder(BaseTest):
mock_get.return_value.text = self.protocol_builder_response('study_details.json')
response = ProtocolBuilderService.get_study_details(self.test_study_id)
self.assertIsNotNone(response)
self.assertEqual(64, len(response))
self.assertEqual(65, len(response))
self.assertEqual('1234', response['IND_1'])
@patch('crc.services.protocol_builder.requests.get')
@ -72,3 +72,13 @@ class TestProtocolBuilder(BaseTest):
self.assertEqual('IRB Event 1', response[0]["IRBEVENT"])
self.assertEqual('IRB Event 2', response[1]["IRBEVENT"])
self.assertEqual('IRB Event 3', response[2]["IRBEVENT"])
@patch('crc.services.protocol_builder.requests.get')
def test_check_study(self, mock_get):
app.config['PB_ENABLED'] = True
mock_get.return_value.ok = True
mock_get.return_value.text = self.protocol_builder_response('check_study.json')
response = ProtocolBuilderService.check_study(self.test_study_id)
self.assertIsNotNone(response)
self.assertIn('DETAIL', response[0].keys())
self.assertIn('STATUS', response[0].keys())

View File

@ -1,13 +1,13 @@
import json
import time
from crc.services.workflow_processor import WorkflowProcessor
from tests.base_test import BaseTest
from crc.models.workflow import WorkflowStatus, WorkflowModel
from crc import db
from crc.api.common import ApiError
from crc.models.task_event import TaskEventModel, TaskEventSchema
from crc.services.workflow_service import WorkflowService
from crc.services.workflow_processor import WorkflowProcessor
class TestTimerEvent(BaseTest):
@ -28,3 +28,19 @@ class TestTimerEvent(BaseTest):
wf = db.session.query(WorkflowModel).filter(WorkflowModel.id == workflow.id).first()
self.assertTrue(wf.status != WorkflowStatus.waiting)
def test_waiting_event_error(self):
workflow = self.create_workflow('timer_event_error')
processor = WorkflowProcessor(workflow)
processor.do_engine_steps()
processor.save()
time.sleep(.3) # our timer is at .25 sec so we have to wait for it
# get done waiting
wf = db.session.query(WorkflowModel).filter(WorkflowModel.id == workflow.id).first()
self.assertTrue(wf.status == WorkflowStatus.waiting)
with self.assertLogs('crc', level='ERROR') as cm:
WorkflowService.do_waiting()
self.assertEqual(1, len(cm.output))
self.assertRegexpMatches(cm.output[0], f"workflow #%i" % workflow.id)
self.assertRegexpMatches(cm.output[0], f"study #%i" % workflow.study_id)
self.assertTrue(wf.status == WorkflowStatus.waiting)

View File

@ -0,0 +1,23 @@
from tests.base_test import BaseTest
class TestCallActivityEndEvent(BaseTest):
def test_call_activity_end_event(self):
workflow = self.create_workflow('call_activity_end_event')
workflow_api = self.get_workflow_api(workflow)
first_task = workflow_api.next_task
# The tests looks at Element Documentation
# The actual end event has 'Main Workflow'
# The call activity has 'Call Event'
# Make sure we have the correct end event,
# and not the end event from the call activity
# This should fail
with self.assertRaises(AssertionError):
self.assertIn('Call Event', first_task.documentation)
# This should pass
self.assertIn('Main Workflow', first_task.documentation)

View File

@ -10,7 +10,7 @@ class TestFormFieldName(BaseTest):
json_data = json.loads(rv.get_data(as_text=True))
self.assertEqual(json_data[0]['message'],
'When populating all fields ... \nInvalid Field name: "user-title". A field ID must begin '
'Invalid Field name: "user-title". A field ID must begin '
'with a letter, and can only contain letters, numbers, and "_"')
def test_form_field_name_with_period(self):

View File

@ -10,5 +10,5 @@ class TestFormFieldType(BaseTest):
json_data = json.loads(rv.get_data(as_text=True))
self.assertEqual(json_data[0]['message'],
'When populating all fields ... \nType is missing for field "name". A field type must be provided.')
'Type is missing for field "name". A field type must be provided.')
# print('TestFormFieldType: Good Form')

View File

@ -0,0 +1,12 @@
from tests.base_test import BaseTest
import json
class TestNameErrorHint(BaseTest):
def test_name_error_hint(self):
self.load_example_data()
spec_model = self.load_test_spec('script_with_name_error')
rv = self.app.get('/v1.0/workflow-specification/%s/validate' % spec_model.id, headers=self.logged_in_headers())
json_data = json.loads(rv.get_data(as_text=True))
self.assertIn('Did you mean \'[\'spam\'', json_data[0]['message'])

View File

@ -59,7 +59,6 @@ class TestWorkflowSpecValidation(BaseTest):
app.config['PB_ENABLED'] = True
self.validate_all_loaded_workflows()
def validate_all_loaded_workflows(self):
workflows = session.query(WorkflowSpecModel).all()
errors = []
@ -71,12 +70,12 @@ class TestWorkflowSpecValidation(BaseTest):
def test_invalid_expression(self):
self.load_example_data()
errors = self.validate_workflow("invalid_expression")
self.assertEqual(2, len(errors))
self.assertEqual(1, len(errors))
self.assertEqual("workflow_validation_exception", errors[0]['code'])
self.assertEqual("ExclusiveGateway_003amsm", errors[0]['task_id'])
self.assertEqual("Has Bananas Gateway", errors[0]['task_name'])
self.assertEqual("invalid_expression.bpmn", errors[0]['file_name'])
self.assertEqual('When populating all fields ... \nExclusiveGateway_003amsm: Error evaluating expression \'this_value_does_not_exist==true\', '
self.assertEqual('ExclusiveGateway_003amsm: Error evaluating expression \'this_value_does_not_exist==true\', '
'name \'this_value_does_not_exist\' is not defined', errors[0]["message"])
self.assertIsNotNone(errors[0]['task_data'])
self.assertIn("has_bananas", errors[0]['task_data'])
@ -84,7 +83,7 @@ class TestWorkflowSpecValidation(BaseTest):
def test_validation_error(self):
self.load_example_data()
errors = self.validate_workflow("invalid_spec")
self.assertEqual(2, len(errors))
self.assertEqual(1, len(errors))
self.assertEqual("workflow_validation_error", errors[0]['code'])
self.assertEqual("StartEvent_1", errors[0]['task_id'])
self.assertEqual("invalid_spec.bpmn", errors[0]['file_name'])
@ -93,7 +92,7 @@ class TestWorkflowSpecValidation(BaseTest):
def test_invalid_script(self):
self.load_example_data()
errors = self.validate_workflow("invalid_script")
self.assertEqual(2, len(errors))
self.assertEqual(1, len(errors))
self.assertEqual("workflow_validation_exception", errors[0]['code'])
#self.assertTrue("NoSuchScript" in errors[0]['message'])
self.assertEqual("Invalid_Script_Task", errors[0]['task_id'])
@ -103,12 +102,23 @@ class TestWorkflowSpecValidation(BaseTest):
def test_invalid_script2(self):
self.load_example_data()
errors = self.validate_workflow("invalid_script2")
self.assertEqual(2, len(errors))
self.assertEqual(1, len(errors))
self.assertEqual("workflow_validation_exception", errors[0]['code'])
self.assertEqual("Invalid_Script_Task", errors[0]['task_id'])
self.assertEqual("An Invalid Script Reference", errors[0]['task_name'])
self.assertEqual(3, errors[0]['line_number'])
self.assertEqual(9, errors[0]['offset'])
self.assertEqual("SyntaxError", errors[0]['error_type'])
self.assertEqual("A Syntax Error", errors[0]['task_name'])
self.assertEqual("invalid_script2.bpmn", errors[0]['file_name'])
def test_invalid_script3(self):
self.load_example_data()
errors = self.validate_workflow("invalid_script3")
self.assertEqual(1, len(errors))
self.assertEqual("Invalid_Script_Task", errors[0]['task_id'])
self.assertEqual(3, errors[0]['line_number'])
self.assertEqual("NameError", errors[0]['error_type'])
def test_repeating_sections_correctly_populated(self):
self.load_example_data()
spec_model = self.load_test_spec('repeat_form')