Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way. Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started. The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times. Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition. Example data no longer creates users or studies, it just creates the specs.
This commit is contained in:
parent
c9900d787e
commit
4a916c1ee3
29
crc/api.yml
29
crc/api.yml
|
@ -198,33 +198,6 @@ paths:
|
|||
type: array
|
||||
items:
|
||||
$ref: "#/components/schemas/Workflow"
|
||||
post:
|
||||
operationId: crc.api.study.add_workflow_to_study
|
||||
summary: Starts a new workflow for the given study using the provided spec. This is atypical, and should be left to the protocol builder.
|
||||
tags:
|
||||
- Studies
|
||||
parameters:
|
||||
- name: study_id
|
||||
in: path
|
||||
required: true
|
||||
description: The id of the study for which a workflow should start
|
||||
schema:
|
||||
type: integer
|
||||
format: int32
|
||||
requestBody:
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/WorkflowSpec'
|
||||
responses:
|
||||
'200':
|
||||
description: An array of workflows
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
$ref: "#/components/schemas/Workflow"
|
||||
/workflow-specification:
|
||||
get:
|
||||
operationId: crc.api.workflow.all_specifications
|
||||
|
@ -898,7 +871,7 @@ components:
|
|||
primary_process_id:
|
||||
type: string
|
||||
nullable: true
|
||||
workflow_spec_category_id:
|
||||
category_id:
|
||||
type: integer
|
||||
nullable: true
|
||||
workflow_spec_category:
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
from typing import List
|
||||
|
||||
from SpiffWorkflow.exceptions import WorkflowException
|
||||
from connexion import NoContent
|
||||
from flask import g
|
||||
from sqlalchemy.exc import IntegrityError
|
||||
|
@ -10,18 +9,22 @@ from crc.api.common import ApiError, ApiErrorSchema
|
|||
from crc.api.workflow import __get_workflow_api_model
|
||||
from crc.models.api_models import WorkflowApiSchema
|
||||
from crc.models.protocol_builder import ProtocolBuilderStatus, ProtocolBuilderStudy
|
||||
from crc.models.study import StudyModelSchema, StudyModel
|
||||
from crc.models.study import StudySchema, StudyModel, Study
|
||||
from crc.models.workflow import WorkflowModel, WorkflowSpecModel
|
||||
from crc.services.protocol_builder import ProtocolBuilderService
|
||||
from crc.services.study_service import StudyService
|
||||
from crc.services.workflow_processor import WorkflowProcessor
|
||||
|
||||
|
||||
def add_study(body):
|
||||
study: StudyModel = StudyModelSchema().load(body, session=session)
|
||||
session.add(study)
|
||||
errors = add_all_workflow_specs_to_study(study)
|
||||
"""This should never get called, and is subject to deprication. Studies
|
||||
should be added through the protocol builder only."""
|
||||
study: Study = StudySchema().load(body)
|
||||
study_model = StudyModel(**study.model_args())
|
||||
session.add(study_model)
|
||||
errors = StudyService._add_all_workflow_specs_to_study(study)
|
||||
session.commit()
|
||||
study_data = StudyModelSchema().dump(study)
|
||||
study_data = StudySchema().dump(study)
|
||||
study_data["errors"] = ApiErrorSchema(many=True).dump(errors)
|
||||
return study_data
|
||||
|
||||
|
@ -30,68 +33,39 @@ def update_study(study_id, body):
|
|||
if study_id is None:
|
||||
raise ApiError('unknown_study', 'Please provide a valid Study ID.')
|
||||
|
||||
study = session.query(StudyModel).filter_by(id=study_id).first()
|
||||
|
||||
if study is None:
|
||||
study_model = session.query(StudyModel).filter_by(id=study_id).first()
|
||||
if study_model is None:
|
||||
raise ApiError('unknown_study', 'The study "' + study_id + '" is not recognized.')
|
||||
|
||||
schema = StudyModelSchema()
|
||||
study = schema.load(body, session=session, instance=study, partial=True)
|
||||
session.add(study)
|
||||
study: Study = StudySchema().load(body)
|
||||
study.update_model(study_model)
|
||||
session.add(study_model)
|
||||
session.commit()
|
||||
return schema.dump(study)
|
||||
return StudySchema().dump(study)
|
||||
|
||||
|
||||
def get_study(study_id):
|
||||
study = session.query(StudyModel).filter_by(id=study_id).first()
|
||||
schema = StudyModelSchema()
|
||||
if study is None:
|
||||
return NoContent, 404
|
||||
study_service = StudyService()
|
||||
study = study_service.get_study(study_id)
|
||||
schema = StudySchema()
|
||||
return schema.dump(study)
|
||||
|
||||
|
||||
def delete_study(study_id):
|
||||
try:
|
||||
session.query(StudyModel).filter_by(id=study_id).delete()
|
||||
StudyService.delete_study(study_id)
|
||||
except IntegrityError as ie:
|
||||
session.rollback()
|
||||
app.logger.error("Failed to delete Study #%i due to an Integrity Error: %s" % (study_id, str(ie)))
|
||||
raise ApiError(code="study_integrity_error", message="This study contains running workflows that is "
|
||||
"preventing deletion. Please delete the workflows " +
|
||||
"before proceeding.")
|
||||
message = "Failed to delete Study #%i due to an Integrity Error: %s" % (study_id, str(ie))
|
||||
raise ApiError(code="study_integrity_error", message=message)
|
||||
|
||||
|
||||
def all_studies():
|
||||
"""Returns all the studies associated with the current user. Assures we are
|
||||
in sync with values read in from the protocol builder. """
|
||||
|
||||
""":type: crc.models.user.UserModel"""
|
||||
|
||||
# Get studies matching this user from Protocol Builder
|
||||
pb_studies: List[ProtocolBuilderStudy] = ProtocolBuilderService.get_studies(g.user.uid)
|
||||
|
||||
# Get studies from the database
|
||||
db_studies = session.query(StudyModel).filter_by(user_uid=g.user.uid).all()
|
||||
|
||||
# Update all studies from the protocol builder, create new studies as needed.
|
||||
for pb_study in pb_studies:
|
||||
db_study = next((s for s in db_studies if s.id == pb_study.STUDYID), None)
|
||||
if not db_study:
|
||||
db_study = StudyModel(id=pb_study.STUDYID)
|
||||
session.add(db_study)
|
||||
db_studies.append(db_study)
|
||||
db_study.update_from_protocol_builder(pb_study)
|
||||
|
||||
# Mark studies as inactive that are no longer in Protocol Builder
|
||||
for study in db_studies:
|
||||
pb_study = next((pbs for pbs in pb_studies if pbs.STUDYID == study.id), None)
|
||||
if not pb_study:
|
||||
study.inactive = True
|
||||
study.protocol_builder_status = ProtocolBuilderStatus.INACTIVE
|
||||
|
||||
session.commit()
|
||||
# Return updated studies
|
||||
results = StudyModelSchema(many=True).dump(db_studies)
|
||||
StudyService.synch_all_studies_with_protocol_builder(g.user)
|
||||
studies = StudyService.get_studies_for_user(g.user)
|
||||
results = StudySchema(many=True).dump(studies)
|
||||
return results
|
||||
|
||||
|
||||
|
@ -124,24 +98,3 @@ def get_study_workflows(study_id):
|
|||
return schema.dump(api_models)
|
||||
|
||||
|
||||
def add_all_workflow_specs_to_study(study):
|
||||
existing_models = session.query(WorkflowModel).filter(WorkflowModel.study_id == study.id).all()
|
||||
existing_specs = list(m.workflow_spec_id for m in existing_models)
|
||||
new_specs = session.query(WorkflowSpecModel). \
|
||||
filter(WorkflowSpecModel.is_master_spec == False). \
|
||||
filter(WorkflowSpecModel.id.notin_(existing_specs)). \
|
||||
all()
|
||||
errors = []
|
||||
for workflow_spec in new_specs:
|
||||
try:
|
||||
WorkflowProcessor.create(study.id, workflow_spec.id)
|
||||
except WorkflowException as we:
|
||||
errors.append(ApiError.from_task_spec("workflow_execution_exception", str(we), we.sender))
|
||||
return errors
|
||||
|
||||
def add_workflow_to_study(study_id, body):
|
||||
workflow_spec_model: WorkflowSpecModel = session.query(WorkflowSpecModel).filter_by(id=body["id"]).first()
|
||||
if workflow_spec_model is None:
|
||||
raise ApiError('unknown_spec', 'The specification "' + body['id'] + '" is not recognized.')
|
||||
processor = WorkflowProcessor.create(study_id, workflow_spec_model.id)
|
||||
return WorkflowApiSchema().dump(__get_workflow_api_model(processor))
|
||||
|
|
|
@ -1,9 +1,13 @@
|
|||
import marshmallow
|
||||
from marshmallow import INCLUDE, fields
|
||||
from marshmallow_enum import EnumField
|
||||
from marshmallow_sqlalchemy import SQLAlchemyAutoSchema
|
||||
from sqlalchemy import func
|
||||
|
||||
from crc import db
|
||||
from crc import db, ma
|
||||
from crc.api.common import ApiErrorSchema
|
||||
from crc.models.protocol_builder import ProtocolBuilderStatus, ProtocolBuilderStudy
|
||||
from crc.models.workflow import WorkflowSpecCategoryModel, WorkflowState, WorkflowStatus, WorkflowSpecModel, \
|
||||
WorkflowModel
|
||||
|
||||
|
||||
class StudyModel(db.Model):
|
||||
|
@ -34,23 +38,114 @@ class StudyModel(db.Model):
|
|||
elif pbs.Q_COMPLETE:
|
||||
self.protocol_builder_status = ProtocolBuilderStatus.IN_PROCESS
|
||||
|
||||
class Study():
|
||||
def __init__(model: StudyModel, status, stats):
|
||||
self.id = id
|
||||
|
||||
|
||||
|
||||
|
||||
class WorkflowMetadata(object):
|
||||
def __init__(self, name, display_name, description, category_id, state: WorkflowState, status: WorkflowStatus,
|
||||
total_tasks, completed_tasks):
|
||||
self.name = name
|
||||
self.display_name = display_name
|
||||
self.description = description
|
||||
self.category_id = category_id
|
||||
self.state = state
|
||||
self.status = status
|
||||
self.user_tasks = user_tasks
|
||||
self.last_task = last_task
|
||||
self.next_task = next_task
|
||||
self.spec_version = spec_version
|
||||
self.is_latest_spec = is_latest_spec
|
||||
self.total_tasks = total_tasks
|
||||
self.completed_tasks = completed_tasks
|
||||
|
||||
|
||||
@classmethod
|
||||
def from_workflow(cls, workflow: WorkflowModel):
|
||||
instance = cls(
|
||||
name=workflow.workflow_spec.name,
|
||||
display_name=workflow.workflow_spec.display_name,
|
||||
description=workflow.workflow_spec.description,
|
||||
category_id=workflow.workflow_spec.category_id,
|
||||
state=WorkflowState.optional,
|
||||
status=workflow.status,
|
||||
total_tasks=workflow.total_tasks,
|
||||
completed_tasks=workflow.completed_tasks)
|
||||
return instance
|
||||
|
||||
class StudyModelSchema(SQLAlchemyAutoSchema):
|
||||
|
||||
class WorkflowMetadataSchema(ma.Schema):
|
||||
state = EnumField(WorkflowState)
|
||||
status = EnumField(WorkflowStatus)
|
||||
class Meta:
|
||||
model = StudyModel
|
||||
load_instance = True
|
||||
include_relationships = True
|
||||
include_fk = True # Includes foreign keys
|
||||
model = WorkflowMetadata
|
||||
additional = ["name", "display_name", "description",
|
||||
"total_tasks", "completed_tasks"]
|
||||
unknown = INCLUDE
|
||||
|
||||
|
||||
class Category(object):
|
||||
def __init__(self, model: WorkflowSpecCategoryModel):
|
||||
self.id = model.id
|
||||
self.name = model.name
|
||||
self.display_name = model.display_name
|
||||
self.display_order = model.display_order
|
||||
|
||||
|
||||
class CategorySchema(ma.Schema):
|
||||
workflows = fields.List(fields.Nested(WorkflowMetadataSchema), dump_only=True)
|
||||
class Meta:
|
||||
model = Category
|
||||
additional = ["id", "name", "display_name", "display_order"]
|
||||
unknown = INCLUDE
|
||||
|
||||
|
||||
class Study(object):
|
||||
|
||||
def __init__(self, id, title, last_updated, primary_investigator_id, user_uid,
|
||||
protocol_builder_status=None,
|
||||
sponsor="", hsr_number="", ind_number="", inactive=False, categories=[], **argsv):
|
||||
self.id = id
|
||||
self.user_uid = user_uid
|
||||
self.title = title
|
||||
self.last_updated = last_updated
|
||||
self.protocol_builder_status = protocol_builder_status
|
||||
self.primary_investigator_id = primary_investigator_id
|
||||
self.sponsor = sponsor
|
||||
self.hsr_number = hsr_number
|
||||
self.ind_number = ind_number
|
||||
self.inactive = inactive
|
||||
self.categories = categories
|
||||
self.warnings = []
|
||||
|
||||
@classmethod
|
||||
def from_model(cls, study_model: StudyModel):
|
||||
args = {k: v for k, v in study_model.__dict__.items() if not k.startswith('_')}
|
||||
instance = cls(**args)
|
||||
return instance
|
||||
|
||||
def update_model(self, study_model: StudyModel):
|
||||
for k,v in self.__dict__.items():
|
||||
if not k.startswith('_'):
|
||||
study_model.__dict__[k] = v
|
||||
|
||||
def model_args(self):
|
||||
"""Arguments that can be passed into the Study Model to update it."""
|
||||
self_dict = self.__dict__.copy()
|
||||
del self_dict["categories"]
|
||||
del self_dict["warnings"]
|
||||
return self_dict
|
||||
|
||||
|
||||
class StudySchema(ma.Schema):
|
||||
|
||||
categories = fields.List(fields.Nested(CategorySchema), dump_only=True)
|
||||
warnings = fields.List(fields.Nested(ApiErrorSchema), dump_only=True)
|
||||
protocol_builder_status = EnumField(ProtocolBuilderStatus)
|
||||
hsr_number = fields.String(allow_none=True)
|
||||
|
||||
class Meta:
|
||||
model = Study
|
||||
additional = ["id", "title", "last_updated", "primary_investigator_id", "user_uid",
|
||||
"sponsor", "ind_number", "inactive"]
|
||||
unknown = INCLUDE
|
||||
|
||||
@marshmallow.post_load
|
||||
def make_study(self, data, **kwargs):
|
||||
"""Can load the basic study data for updates to the database, but categories are write only"""
|
||||
return Study(**data)
|
|
@ -29,8 +29,8 @@ class WorkflowSpecModel(db.Model):
|
|||
display_name = db.Column(db.String)
|
||||
description = db.Column(db.Text)
|
||||
primary_process_id = db.Column(db.String)
|
||||
workflow_spec_category_id = db.Column(db.Integer, db.ForeignKey('workflow_spec_category.id'), nullable=True)
|
||||
workflow_spec_category = db.relationship("WorkflowSpecCategoryModel")
|
||||
category_id = db.Column(db.Integer, db.ForeignKey('workflow_spec_category.id'), nullable=True)
|
||||
category = db.relationship("WorkflowSpecCategoryModel")
|
||||
is_master_spec = db.Column(db.Boolean, default=False)
|
||||
|
||||
|
||||
|
@ -44,19 +44,28 @@ class WorkflowSpecModelSchema(SQLAlchemyAutoSchema):
|
|||
|
||||
workflow_spec_category = marshmallow.fields.Nested(WorkflowSpecCategoryModelSchema, dump_only=True)
|
||||
|
||||
|
||||
class WorkflowState(enum.Enum):
|
||||
hidden = "hidden"
|
||||
disabled = "disabled"
|
||||
required = "required"
|
||||
optional = "optional"
|
||||
|
||||
@classmethod
|
||||
def has_value(cls, value):
|
||||
return value in cls._value2member_map_
|
||||
|
||||
@staticmethod
|
||||
def list():
|
||||
return list(map(lambda c: c.value, WorkflowState))
|
||||
|
||||
class WorkflowStatus(enum.Enum):
|
||||
new = "new"
|
||||
not_started = "not_started"
|
||||
user_input_required = "user_input_required"
|
||||
waiting = "waiting"
|
||||
complete = "complete"
|
||||
|
||||
|
||||
class WorkflowModel(db.Model):
|
||||
__tablename__ = 'workflow'
|
||||
id = db.Column(db.Integer, primary_key=True)
|
||||
|
@ -64,4 +73,7 @@ class WorkflowModel(db.Model):
|
|||
status = db.Column(db.Enum(WorkflowStatus))
|
||||
study_id = db.Column(db.Integer, db.ForeignKey('study.id'))
|
||||
workflow_spec_id = db.Column(db.String, db.ForeignKey('workflow_spec.id'))
|
||||
workflow_spec = db.relationship("WorkflowSpecModel")
|
||||
spec_version = db.Column(db.String)
|
||||
total_tasks = db.Column(db.Integer, default=0)
|
||||
completed_tasks = db.Column(db.Integer, default=0)
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
from crc import session
|
||||
from crc.api.common import ApiError
|
||||
from crc.models.study import StudyModel, StudyModelSchema
|
||||
from crc.models.study import StudyModel, StudySchema
|
||||
from crc.scripts.script import Script
|
||||
from crc.services.protocol_builder import ProtocolBuilderService
|
||||
from crc.services.workflow_processor import WorkflowProcessor
|
||||
|
@ -36,7 +36,7 @@ class StudyInfo(Script):
|
|||
|
||||
if cmd == 'info':
|
||||
study = session.query(StudyModel).filter_by(id=study_id).first()
|
||||
schema = StudyModelSchema()
|
||||
schema = StudySchema()
|
||||
study_info["info"] = schema.dump(study)
|
||||
if cmd == 'investigators':
|
||||
study_info["investigators"] = self.pb.get_investigators(study_id, as_json=True)
|
||||
|
|
|
@ -0,0 +1,163 @@
|
|||
from typing import List
|
||||
|
||||
from SpiffWorkflow import WorkflowException
|
||||
|
||||
from crc import db, session
|
||||
from crc.api.common import ApiError
|
||||
from crc.models.protocol_builder import ProtocolBuilderStudy, ProtocolBuilderStatus
|
||||
from crc.models.study import StudyModel, Study, Category, WorkflowMetadata
|
||||
from crc.models.workflow import WorkflowSpecCategoryModel, WorkflowModel, WorkflowSpecModel, WorkflowState, \
|
||||
WorkflowStatus
|
||||
from crc.services.protocol_builder import ProtocolBuilderService
|
||||
from crc.services.workflow_processor import WorkflowProcessor
|
||||
|
||||
|
||||
class StudyService(object):
|
||||
"""Provides common tools for working with a Study"""
|
||||
|
||||
@staticmethod
|
||||
def get_studies_for_user(user):
|
||||
"""Returns a list of all studies for the given user."""
|
||||
db_studies = session.query(StudyModel).filter_by(user_uid=user.uid).all()
|
||||
studies = []
|
||||
for study_model in db_studies:
|
||||
studies.append(StudyService.get_study(study_model.id, study_model))
|
||||
return studies
|
||||
|
||||
@staticmethod
|
||||
def get_study(study_id, study_model: StudyModel = None):
|
||||
"""Returns a study model that contains all the workflows organized by category.
|
||||
IMPORTANT: This is intended to be a lightweight call, it should never involve
|
||||
loading up and executing all the workflows in a study to calculate information."""
|
||||
if not study_model:
|
||||
study_model = session.query(StudyModel).filter_by(id=study_id).first()
|
||||
study = Study.from_model(study_model)
|
||||
study.categories = StudyService.get_categories()
|
||||
workflow_metas = StudyService.__get_workflow_metas(study_id)
|
||||
status = StudyService.__get_study_status(study_model)
|
||||
study.warnings = StudyService.__update_status_of_workflow_meta(workflow_metas, status)
|
||||
|
||||
# Group the workflows into their categories.
|
||||
for category in study.categories:
|
||||
category.workflows = {w for w in workflow_metas if w.category_id == category.id}
|
||||
|
||||
return study
|
||||
|
||||
@staticmethod
|
||||
def delete_study(study_id):
|
||||
session.query(WorkflowModel).filter_by(study_id=study_id).delete()
|
||||
session.query(StudyModel).filter_by(id=study_id).delete()
|
||||
|
||||
@staticmethod
|
||||
def get_categories():
|
||||
"""Returns a list of category objects, in the correct order."""
|
||||
cat_models = db.session.query(WorkflowSpecCategoryModel) \
|
||||
.order_by(WorkflowSpecCategoryModel.display_order).all()
|
||||
categories = []
|
||||
for cat_model in cat_models:
|
||||
categories.append(Category(cat_model))
|
||||
return categories
|
||||
|
||||
@staticmethod
|
||||
def synch_all_studies_with_protocol_builder(user):
|
||||
"""Assures that the studies we have locally for the given user are
|
||||
in sync with the studies available in protocol builder. """
|
||||
# Get studies matching this user from Protocol Builder
|
||||
pb_studies: List[ProtocolBuilderStudy] = ProtocolBuilderService.get_studies(user.uid)
|
||||
|
||||
# Get studies from the database
|
||||
db_studies = session.query(StudyModel).filter_by(user_uid=user.uid).all()
|
||||
|
||||
# Update all studies from the protocol builder, create new studies as needed.
|
||||
# Futher assures that every active study (that does exist in the protocol builder)
|
||||
# has a reference to every available workflow (though some may not have started yet)
|
||||
for pb_study in pb_studies:
|
||||
db_study = next((s for s in db_studies if s.id == pb_study.STUDYID), None)
|
||||
if not db_study:
|
||||
db_study = StudyModel(id=pb_study.STUDYID)
|
||||
session.add(db_study)
|
||||
db_studies.append(db_study)
|
||||
db_study.update_from_protocol_builder(pb_study)
|
||||
StudyService._add_all_workflow_specs_to_study(db_study)
|
||||
|
||||
# Mark studies as inactive that are no longer in Protocol Builder
|
||||
for study in db_studies:
|
||||
pb_study = next((pbs for pbs in pb_studies if pbs.STUDYID == study.id), None)
|
||||
if not pb_study:
|
||||
study.inactive = True
|
||||
study.protocol_builder_status = ProtocolBuilderStatus.INACTIVE
|
||||
|
||||
db.session.commit()
|
||||
|
||||
@staticmethod
|
||||
def __update_status_of_workflow_meta(workflow_metas, status):
|
||||
# Update the status on each workflow
|
||||
warnings = []
|
||||
for wfm in workflow_metas:
|
||||
if wfm.name in status.keys():
|
||||
if not WorkflowState.has_value(status[wfm.name]):
|
||||
warnings.append(ApiError("invalid_status",
|
||||
"Workflow '%s' can not be set to '%s', should be one of %s" % (
|
||||
wfm.name, status[wfm.name], ",".join(WorkflowState.list())
|
||||
)))
|
||||
else:
|
||||
wfm.state = WorkflowState[status[wfm.name]]
|
||||
else:
|
||||
warnings.append(ApiError("missing_status", "No status specified for workflow %s" % wfm.name))
|
||||
return warnings
|
||||
|
||||
@staticmethod
|
||||
def __get_workflow_metas(study_id):
|
||||
# Add in the Workflows for each category
|
||||
workflow_models = db.session.query(WorkflowModel).filter_by(study_id=study_id).all()
|
||||
workflow_metas = []
|
||||
for workflow in workflow_models:
|
||||
workflow_metas.append(WorkflowMetadata.from_workflow(workflow))
|
||||
return workflow_metas
|
||||
|
||||
@staticmethod
|
||||
def __get_study_status(study_model):
|
||||
"""Uses the Top Level Workflow to calculate the status of the study, and it's
|
||||
workflow models."""
|
||||
master_specs = db.session.query(WorkflowSpecModel). \
|
||||
filter_by(is_master_spec=True).all()
|
||||
if len(master_specs) < 1:
|
||||
raise ApiError("missing_master_spec", "No specifications are currently marked as the master spec.")
|
||||
if len(master_specs) > 1:
|
||||
raise ApiError("multiple_master_specs",
|
||||
"There is more than one master specification, and I don't know what to do.")
|
||||
|
||||
master_spec = master_specs[0]
|
||||
master_workflow = StudyService._create_workflow_model(study_model, master_spec)
|
||||
processor = WorkflowProcessor(master_workflow)
|
||||
processor.do_engine_steps()
|
||||
if not processor.bpmn_workflow.is_completed():
|
||||
raise ApiError("master_spec_not_automatic",
|
||||
"The master spec should only contain fully automated tasks, it failed to complete.")
|
||||
|
||||
return processor.bpmn_workflow.last_task.data
|
||||
|
||||
@staticmethod
|
||||
def _add_all_workflow_specs_to_study(study):
|
||||
existing_models = session.query(WorkflowModel).filter(WorkflowModel.study_id == study.id).all()
|
||||
existing_specs = list(m.workflow_spec_id for m in existing_models)
|
||||
new_specs = session.query(WorkflowSpecModel). \
|
||||
filter(WorkflowSpecModel.is_master_spec == False). \
|
||||
filter(WorkflowSpecModel.id.notin_(existing_specs)). \
|
||||
all()
|
||||
errors = []
|
||||
for workflow_spec in new_specs:
|
||||
try:
|
||||
StudyService._create_workflow_model(study, workflow_spec)
|
||||
except WorkflowException as we:
|
||||
errors.append(ApiError.from_task_spec("workflow_execution_exception", str(we), we.sender))
|
||||
return errors
|
||||
|
||||
@staticmethod
|
||||
def _create_workflow_model(study, spec):
|
||||
workflow_model = WorkflowModel(status=WorkflowStatus.not_started,
|
||||
study_id=study.id,
|
||||
workflow_spec_id=spec.id)
|
||||
session.add(workflow_model)
|
||||
session.commit()
|
||||
return workflow_model
|
|
@ -1,4 +1,3 @@
|
|||
import re
|
||||
import random
|
||||
import re
|
||||
import string
|
||||
|
@ -14,6 +13,7 @@ from SpiffWorkflow.camunda.parser.CamundaParser import CamundaParser
|
|||
from SpiffWorkflow.dmn.parser.BpmnDmnParser import BpmnDmnParser
|
||||
from SpiffWorkflow.exceptions import WorkflowException
|
||||
from SpiffWorkflow.operators import Operator
|
||||
from SpiffWorkflow.specs import WorkflowSpec
|
||||
|
||||
from crc import session
|
||||
from crc.api.common import ApiError
|
||||
|
@ -27,7 +27,7 @@ class CustomBpmnScriptEngine(BpmnScriptEngine):
|
|||
Rather than execute arbitrary code, this assumes the script references a fully qualified python class
|
||||
such as myapp.RandomFact. """
|
||||
|
||||
def execute(self, task:SpiffTask, script, **kwargs):
|
||||
def execute(self, task: SpiffTask, script, **kwargs):
|
||||
"""
|
||||
Assume that the script read in from the BPMN file is a fully qualified python class. Instantiate
|
||||
that class, pass in any data available to the current task so that it might act on it.
|
||||
|
@ -49,20 +49,20 @@ class CustomBpmnScriptEngine(BpmnScriptEngine):
|
|||
study_id = task.workflow.data[WorkflowProcessor.STUDY_ID_KEY]
|
||||
if not isinstance(klass(), Script):
|
||||
raise ApiError.from_task("invalid_script",
|
||||
"This is an internal error. The script '%s:%s' you called "
|
||||
"does not properly implement the CRC Script class." %
|
||||
(module_name, class_name),
|
||||
task=task)
|
||||
"This is an internal error. The script '%s:%s' you called " %
|
||||
(module_name, class_name) +
|
||||
"does not properly implement the CRC Script class.",
|
||||
task=task)
|
||||
if task.workflow.data[WorkflowProcessor.VALIDATION_PROCESS_KEY]:
|
||||
"""If this is running a validation, and not a normal process, then we want to
|
||||
mimic running the script, but not make any external calls or database changes."""
|
||||
klass().do_task_validate_only(task, study_id, *commands[1:])
|
||||
else:
|
||||
klass().do_task(task, study_id, *commands[1:])
|
||||
except ModuleNotFoundError as mnfe:
|
||||
except ModuleNotFoundError:
|
||||
raise ApiError.from_task("invalid_script",
|
||||
"Unable to locate Script: '%s:%s'" % (module_name, class_name),
|
||||
task=task)
|
||||
"Unable to locate Script: '%s:%s'" % (module_name, class_name),
|
||||
task=task)
|
||||
|
||||
@staticmethod
|
||||
def camel_to_snake(camel):
|
||||
|
@ -85,9 +85,8 @@ class CustomBpmnScriptEngine(BpmnScriptEngine):
|
|||
return eval(expression)
|
||||
except NameError as ne:
|
||||
raise ApiError.from_task('invalid_expression',
|
||||
'The expression you provided does not exist:' + expression,
|
||||
task=task)
|
||||
|
||||
'The expression you provided does not exist:' + expression,
|
||||
task=task)
|
||||
|
||||
class MyCustomParser(BpmnDmnParser):
|
||||
"""
|
||||
|
@ -111,31 +110,54 @@ class WorkflowProcessor(object):
|
|||
completed task in the previous workflow.
|
||||
If neither flag is set, it will use the same version of the specification that was used to originally
|
||||
create the workflow model. """
|
||||
self.workflow_model = workflow_model
|
||||
orig_version = workflow_model.spec_version
|
||||
if soft_reset:
|
||||
spec = self.get_spec(workflow_model.workflow_spec_id)
|
||||
workflow_model.spec_version = spec.description
|
||||
else:
|
||||
spec = self.get_spec(workflow_model.workflow_spec_id, workflow_model.spec_version)
|
||||
|
||||
self.workflow_spec_id = workflow_model.workflow_spec_id
|
||||
try:
|
||||
self.bpmn_workflow = self._serializer.deserialize_workflow(workflow_model.bpmn_workflow_json, workflow_spec=spec)
|
||||
self.bpmn_workflow = self.__get_bpmn_workflow(workflow_model, spec)
|
||||
except KeyError as ke:
|
||||
if soft_reset:
|
||||
# Undo the soft-reset.
|
||||
workflow_model.spec_version = orig_version
|
||||
orig_version = workflow_model.spec_version
|
||||
raise ApiError(code="unexpected_workflow_structure",
|
||||
message="Failed to deserialize workflow '%s' version %s, due to a mis-placed or missing task '%s'" %
|
||||
message="Failed to deserialize workflow"
|
||||
" '%s' version %s, due to a mis-placed or missing task '%s'" %
|
||||
(self.workflow_spec_id, workflow_model.spec_version, str(ke)) +
|
||||
" This is very likely due to a soft reset where there was a structural change.")
|
||||
self.bpmn_workflow.script_engine = self._script_engine
|
||||
|
||||
if hard_reset:
|
||||
# Now that the spec is loaded, get the data and rebuild the bpmn with the new details
|
||||
workflow_model.spec_version = self.hard_reset()
|
||||
|
||||
def __get_bpmn_workflow(self, workflow_model: WorkflowModel, spec: WorkflowSpec):
|
||||
|
||||
workflow_model.spec_version = spec.description # Very naughty. But we keep the version in the spec desc.
|
||||
|
||||
if workflow_model.bpmn_workflow_json:
|
||||
bpmn_workflow = self._serializer.deserialize_workflow(workflow_model.bpmn_workflow_json, workflow_spec=spec)
|
||||
else:
|
||||
bpmn_workflow = BpmnWorkflow(spec, script_engine=self._script_engine)
|
||||
bpmn_workflow.data[WorkflowProcessor.STUDY_ID_KEY] = workflow_model.study_id
|
||||
bpmn_workflow.data[WorkflowProcessor.VALIDATION_PROCESS_KEY] = False
|
||||
bpmn_workflow.do_engine_steps()
|
||||
session.add(workflow_model)
|
||||
session.commit()
|
||||
|
||||
# Need to commit twice, first to get a unique id for the workflow model, and
|
||||
# a second time to store the serialization so we can maintain this link within
|
||||
# the spiff-workflow process.
|
||||
bpmn_workflow.data[WorkflowProcessor.WORKFLOW_ID_KEY] = workflow_model.id
|
||||
workflow_model.bpmn_workflow_json = WorkflowProcessor._serializer.serialize_workflow(bpmn_workflow)
|
||||
session.add(workflow_model)
|
||||
|
||||
# Assure the correct script engine is in use.
|
||||
bpmn_workflow.script_engine = self._script_engine
|
||||
return bpmn_workflow
|
||||
|
||||
@staticmethod
|
||||
def get_parser():
|
||||
parser = MyCustomParser()
|
||||
|
@ -164,7 +186,7 @@ class WorkflowProcessor(object):
|
|||
major_version = file_data.version
|
||||
else:
|
||||
minor_version.append(file_data.version)
|
||||
minor_version.insert(0, major_version) # Add major version to beginning.
|
||||
minor_version.insert(0, major_version) # Add major version to beginning.
|
||||
version = ".".join(str(x) for x in minor_version)
|
||||
files = ".".join(str(x) for x in file_ids)
|
||||
full_version = "v%s (%s)" % (version, files)
|
||||
|
@ -180,8 +202,9 @@ class WorkflowProcessor(object):
|
|||
.filter(FileDataModel.id.in_(file_ids)).all()
|
||||
if len(files) != len(file_ids):
|
||||
raise ApiError("invalid_version",
|
||||
"The version '%s' of workflow specification '%s' is invalid. Unable to locate the correct files to recreate it." %
|
||||
(version, workflow_spec_id))
|
||||
"The version '%s' of workflow specification '%s' is invalid. " %
|
||||
(version, workflow_spec_id) +
|
||||
" Unable to locate the correct files to recreate it.")
|
||||
return files
|
||||
|
||||
@staticmethod
|
||||
|
@ -194,7 +217,6 @@ class WorkflowProcessor(object):
|
|||
.order_by(FileModel.id)\
|
||||
.all()
|
||||
|
||||
|
||||
@staticmethod
|
||||
def get_spec(workflow_spec_id, version=None):
|
||||
"""Returns the requested version of the specification,
|
||||
|
@ -248,8 +270,7 @@ class WorkflowProcessor(object):
|
|||
task.complete()
|
||||
except WorkflowException as we:
|
||||
raise ApiError.from_task_spec("workflow_execution_exception", str(we),
|
||||
we.sender)
|
||||
|
||||
we.sender)
|
||||
|
||||
@staticmethod
|
||||
def populate_form_with_random_data(task):
|
||||
|
@ -260,18 +281,18 @@ class WorkflowProcessor(object):
|
|||
if field.type == "enum":
|
||||
form_data[field.id] = random.choice(field.options)
|
||||
elif field.type == "long":
|
||||
form_data[field.id] = random.randint(1,1000)
|
||||
form_data[field.id] = random.randint(1, 1000)
|
||||
else:
|
||||
form_data[field.id] = WorkflowProcessor._randomString()
|
||||
form_data[field.id] = WorkflowProcessor._random_string()
|
||||
if task.data is None:
|
||||
task.data = {}
|
||||
task.data.update(form_data)
|
||||
|
||||
@staticmethod
|
||||
def _randomString(stringLength=10):
|
||||
def _random_string(string_length=10):
|
||||
"""Generate a random string of fixed length """
|
||||
letters = string.ascii_lowercase
|
||||
return ''.join(random.choice(letters) for i in range(stringLength))
|
||||
return ''.join(random.choice(letters) for i in range(string_length))
|
||||
|
||||
@staticmethod
|
||||
def status_of(bpmn_workflow):
|
||||
|
@ -283,30 +304,6 @@ class WorkflowProcessor(object):
|
|||
else:
|
||||
return WorkflowStatus.waiting
|
||||
|
||||
@classmethod
|
||||
def create(cls, study_id, workflow_spec_id):
|
||||
spec = WorkflowProcessor.get_spec(workflow_spec_id)
|
||||
bpmn_workflow = BpmnWorkflow(spec, script_engine=cls._script_engine)
|
||||
bpmn_workflow.data[WorkflowProcessor.STUDY_ID_KEY] = study_id
|
||||
bpmn_workflow.data[WorkflowProcessor.VALIDATION_PROCESS_KEY] = False
|
||||
bpmn_workflow.do_engine_steps()
|
||||
workflow_model = WorkflowModel(status=WorkflowProcessor.status_of(bpmn_workflow),
|
||||
study_id=study_id,
|
||||
workflow_spec_id=workflow_spec_id,
|
||||
spec_version=spec.description)
|
||||
session.add(workflow_model)
|
||||
session.commit()
|
||||
# Need to commit twice, first to get a unique id for the workflow model, and
|
||||
# a second time to store the serialization so we can maintain this link within
|
||||
# the spiff-workflow process.
|
||||
bpmn_workflow.data[WorkflowProcessor.WORKFLOW_ID_KEY] = workflow_model.id
|
||||
|
||||
workflow_model.bpmn_workflow_json = WorkflowProcessor._serializer.serialize_workflow(bpmn_workflow)
|
||||
session.add(workflow_model)
|
||||
session.commit()
|
||||
processor = cls(workflow_model)
|
||||
return processor
|
||||
|
||||
def hard_reset(self):
|
||||
"""Recreate this workflow, but keep the data from the last completed task and add it back into the first task.
|
||||
This may be useful when a workflow specification changes, and users need to review all the
|
||||
|
@ -349,7 +346,6 @@ class WorkflowProcessor(object):
|
|||
|
||||
# If the whole blessed mess is done, return the end_event task in the tree
|
||||
if self.bpmn_workflow.is_completed():
|
||||
last_task = None
|
||||
for task in SpiffTask.Iterator(self.bpmn_workflow.task_tree, SpiffTask.ANY_MASK):
|
||||
if isinstance(task.task_spec, EndEvent):
|
||||
return task
|
||||
|
@ -412,3 +408,6 @@ class WorkflowProcessor(object):
|
|||
raise ValidationException('No start event found in %s' % et_root.attrib['id'])
|
||||
|
||||
return process_elements[0].attrib['id']
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -0,0 +1,25 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<definitions xmlns="http://www.omg.org/spec/DMN/20151101/dmn.xsd" xmlns:biodi="http://bpmn.io/schema/dmn/biodi/1.0" id="Definitions_1p34ouw" name="DRD" namespace="http://camunda.org/schema/1.0/dmn" exporter="Camunda Modeler" exporterVersion="3.4.1">
|
||||
<decision id="data_security_plan" name="Data Security Plan">
|
||||
<extensionElements>
|
||||
<biodi:bounds x="190" y="80" width="180" height="80" />
|
||||
</extensionElements>
|
||||
<decisionTable id="DecisionTable_1mjqwlv">
|
||||
<input id="InputClause_18pwfqu" label="Required Doc Keys">
|
||||
<inputExpression id="LiteralExpression_1y84stb" typeRef="string" expressionLanguage="feel">
|
||||
<text>required_docs.keys()</text>
|
||||
</inputExpression>
|
||||
</input>
|
||||
<output id="OutputClause_05y0j7c" label="data_security_plan" name="data_security_plan" typeRef="string" />
|
||||
<rule id="DecisionRule_17xsr74">
|
||||
<description></description>
|
||||
<inputEntry id="UnaryTests_05ldcq4">
|
||||
<text>contains(6)</text>
|
||||
</inputEntry>
|
||||
<outputEntry id="LiteralExpression_09oao3s">
|
||||
<text>"required"</text>
|
||||
</outputEntry>
|
||||
</rule>
|
||||
</decisionTable>
|
||||
</decision>
|
||||
</definitions>
|
|
@ -0,0 +1,25 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<definitions xmlns="http://www.omg.org/spec/DMN/20151101/dmn.xsd" xmlns:biodi="http://bpmn.io/schema/dmn/biodi/1.0" id="Definitions_1p34ouw" name="DRD" namespace="http://camunda.org/schema/1.0/dmn" exporter="Camunda Modeler" exporterVersion="3.4.1">
|
||||
<decision id="enter_core_info" name="Enter Core Info">
|
||||
<extensionElements>
|
||||
<biodi:bounds x="170" y="60" width="180" height="80" />
|
||||
</extensionElements>
|
||||
<decisionTable id="decisionTable_1">
|
||||
<input id="InputClause_1ki80j6" label="required doc ids">
|
||||
<inputExpression id="LiteralExpression_10mfcy7" typeRef="string" expressionLanguage="Python">
|
||||
<text>required_docs.keys()</text>
|
||||
</inputExpression>
|
||||
</input>
|
||||
<output id="output_1" label="enter_core_info" name="enter_core_info" typeRef="string" />
|
||||
<rule id="DecisionRule_10oo3ms">
|
||||
<description>Core information is always required.</description>
|
||||
<inputEntry id="UnaryTests_1dtfw0r">
|
||||
<text></text>
|
||||
</inputEntry>
|
||||
<outputEntry id="LiteralExpression_1d9565g">
|
||||
<text>"required"</text>
|
||||
</outputEntry>
|
||||
</rule>
|
||||
</decisionTable>
|
||||
</decision>
|
||||
</definitions>
|
|
@ -0,0 +1,40 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<definitions xmlns="http://www.omg.org/spec/DMN/20151101/dmn.xsd" xmlns:biodi="http://bpmn.io/schema/dmn/biodi/1.0" id="Definitions_1p34ouw" name="DRD" namespace="http://camunda.org/schema/1.0/dmn" exporter="Camunda Modeler" exporterVersion="3.4.1">
|
||||
<decision id="sponsor_funding_source" name="Sponsor Funding Source">
|
||||
<extensionElements>
|
||||
<biodi:bounds x="190" y="70" width="180" height="80" />
|
||||
</extensionElements>
|
||||
<decisionTable id="DecisionTable_00zdxg0">
|
||||
<input id="InputClause_02n3ccs" label="Required Doc Ids">
|
||||
<inputExpression id="LiteralExpression_1ju4o1o" typeRef="string" expressionLanguage="feel">
|
||||
<text>required_docs.keys()</text>
|
||||
</inputExpression>
|
||||
</input>
|
||||
<output id="OutputClause_1ybi1ud" label="sponsor_funding_source" name="eat_my_shorts" typeRef="string" />
|
||||
<rule id="DecisionRule_1t97mw4">
|
||||
<inputEntry id="UnaryTests_0ym4ln2">
|
||||
<text>contains(12)</text>
|
||||
</inputEntry>
|
||||
<outputEntry id="LiteralExpression_1pweuqc">
|
||||
<text>"required"</text>
|
||||
</outputEntry>
|
||||
</rule>
|
||||
<rule id="DecisionRule_1q965wz">
|
||||
<inputEntry id="UnaryTests_1mlhh3t">
|
||||
<text>not contains(12)</text>
|
||||
</inputEntry>
|
||||
<outputEntry id="LiteralExpression_073vd6i">
|
||||
<text>"disabled"</text>
|
||||
</outputEntry>
|
||||
</rule>
|
||||
<rule id="DecisionRule_0zh6o60">
|
||||
<inputEntry id="UnaryTests_059mk90">
|
||||
<text></text>
|
||||
</inputEntry>
|
||||
<outputEntry id="LiteralExpression_18pfm2o">
|
||||
<text>"hidden"</text>
|
||||
</outputEntry>
|
||||
</rule>
|
||||
</decisionTable>
|
||||
</decision>
|
||||
</definitions>
|
|
@ -0,0 +1,150 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<bpmn:definitions xmlns:bpmn="http://www.omg.org/spec/BPMN/20100524/MODEL" xmlns:bpmndi="http://www.omg.org/spec/BPMN/20100524/DI" xmlns:dc="http://www.omg.org/spec/DD/20100524/DC" xmlns:camunda="http://camunda.org/schema/1.0/bpmn" xmlns:di="http://www.omg.org/spec/DD/20100524/DI" id="Definitions_1kudwnk" targetNamespace="http://bpmn.io/schema/bpmn" exporter="Camunda Modeler" exporterVersion="3.4.1">
|
||||
<bpmn:process id="Process_0jhpidf" isExecutable="true">
|
||||
<bpmn:startEvent id="StartEvent_1">
|
||||
<bpmn:outgoing>SequenceFlow_1ees8ka</bpmn:outgoing>
|
||||
</bpmn:startEvent>
|
||||
<bpmn:sequenceFlow id="SequenceFlow_1ees8ka" sourceRef="StartEvent_1" targetRef="Task_Load_Requirements" />
|
||||
<bpmn:endEvent id="Event_135x8jg">
|
||||
<bpmn:incoming>Flow_0pwtiqm</bpmn:incoming>
|
||||
</bpmn:endEvent>
|
||||
<bpmn:scriptTask id="Task_Load_Requirements" name="Load Required Documents From PM">
|
||||
<bpmn:incoming>SequenceFlow_1ees8ka</bpmn:incoming>
|
||||
<bpmn:outgoing>SequenceFlow_17ct47v</bpmn:outgoing>
|
||||
<bpmn:script>RequiredDocs</bpmn:script>
|
||||
</bpmn:scriptTask>
|
||||
<bpmn:businessRuleTask id="Activity_1yqy50i" name="Enter Core Info " camunda:decisionRef="enter_core_info">
|
||||
<bpmn:incoming>Flow_1m8285h</bpmn:incoming>
|
||||
<bpmn:outgoing>Flow_1sggkit</bpmn:outgoing>
|
||||
</bpmn:businessRuleTask>
|
||||
<bpmn:sequenceFlow id="Flow_1sggkit" sourceRef="Activity_1yqy50i" targetRef="Gateway_12tpgcy" />
|
||||
<bpmn:parallelGateway id="Gateway_12tpgcy">
|
||||
<bpmn:incoming>Flow_1sggkit</bpmn:incoming>
|
||||
<bpmn:incoming>Flow_1txrak2</bpmn:incoming>
|
||||
<bpmn:incoming>Flow_0x9580l</bpmn:incoming>
|
||||
<bpmn:outgoing>Flow_0pwtiqm</bpmn:outgoing>
|
||||
</bpmn:parallelGateway>
|
||||
<bpmn:sequenceFlow id="Flow_0pwtiqm" sourceRef="Gateway_12tpgcy" targetRef="Event_135x8jg" />
|
||||
<bpmn:parallelGateway id="Gateway_1nta7st">
|
||||
<bpmn:incoming>SequenceFlow_17ct47v</bpmn:incoming>
|
||||
<bpmn:outgoing>Flow_1m8285h</bpmn:outgoing>
|
||||
<bpmn:outgoing>Flow_18pl92p</bpmn:outgoing>
|
||||
<bpmn:outgoing>Flow_1nimppb</bpmn:outgoing>
|
||||
</bpmn:parallelGateway>
|
||||
<bpmn:sequenceFlow id="Flow_1m8285h" sourceRef="Gateway_1nta7st" targetRef="Activity_1yqy50i" />
|
||||
<bpmn:sequenceFlow id="SequenceFlow_17ct47v" sourceRef="Task_Load_Requirements" targetRef="Gateway_1nta7st" />
|
||||
<bpmn:sequenceFlow id="Flow_18pl92p" sourceRef="Gateway_1nta7st" targetRef="Activity_16cm213" />
|
||||
<bpmn:sequenceFlow id="Flow_1nimppb" sourceRef="Gateway_1nta7st" targetRef="Activity_1k5eeun" />
|
||||
<bpmn:businessRuleTask id="Activity_1k5eeun" name="Data Security Plan" camunda:decisionRef="data_security_plan">
|
||||
<bpmn:incoming>Flow_1nimppb</bpmn:incoming>
|
||||
<bpmn:outgoing>Flow_1txrak2</bpmn:outgoing>
|
||||
</bpmn:businessRuleTask>
|
||||
<bpmn:sequenceFlow id="Flow_1txrak2" sourceRef="Activity_1k5eeun" targetRef="Gateway_12tpgcy" />
|
||||
<bpmn:businessRuleTask id="Activity_16cm213" name="Sponsor Funding Source" camunda:decisionRef="sponsor_funding_source">
|
||||
<bpmn:incoming>Flow_18pl92p</bpmn:incoming>
|
||||
<bpmn:outgoing>Flow_0x9580l</bpmn:outgoing>
|
||||
</bpmn:businessRuleTask>
|
||||
<bpmn:sequenceFlow id="Flow_0x9580l" sourceRef="Activity_16cm213" targetRef="Gateway_12tpgcy" />
|
||||
<bpmn:textAnnotation id="TextAnnotation_1pv8ygy">
|
||||
<bpmn:text>Loads information from the Protocol Builder</bpmn:text>
|
||||
</bpmn:textAnnotation>
|
||||
<bpmn:association id="Association_0w69z3w" sourceRef="Task_Load_Requirements" targetRef="TextAnnotation_1pv8ygy" />
|
||||
<bpmn:textAnnotation id="TextAnnotation_0ydnva4">
|
||||
<bpmn:text>Include only automatic tasks, no user input is accepted for the Master workflow</bpmn:text>
|
||||
</bpmn:textAnnotation>
|
||||
<bpmn:association id="Association_0a41ixa" sourceRef="StartEvent_1" targetRef="TextAnnotation_0ydnva4" />
|
||||
<bpmn:textAnnotation id="TextAnnotation_1f52jro">
|
||||
<bpmn:text>All workflows available in the sytem are considered "optional" by default. Use decision tables here to alter that state if needed. Alternate values include: "hidden" (do not show by them initially), "required" (must be completed), "disabled" (visible, but can not be started yet)</bpmn:text>
|
||||
</bpmn:textAnnotation>
|
||||
<bpmn:association id="Association_1mzqzwj" sourceRef="Gateway_1nta7st" targetRef="TextAnnotation_1f52jro" />
|
||||
</bpmn:process>
|
||||
<bpmndi:BPMNDiagram id="BPMNDiagram_1">
|
||||
<bpmndi:BPMNPlane id="BPMNPlane_1" bpmnElement="Process_0jhpidf">
|
||||
<bpmndi:BPMNShape id="_BPMNShape_StartEvent_2" bpmnElement="StartEvent_1">
|
||||
<dc:Bounds x="192" y="421" width="36" height="36" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNEdge id="SequenceFlow_1ees8ka_di" bpmnElement="SequenceFlow_1ees8ka">
|
||||
<di:waypoint x="228" y="439" />
|
||||
<di:waypoint x="300" y="439" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNShape id="Event_135x8jg_di" bpmnElement="Event_135x8jg">
|
||||
<dc:Bounds x="862" y="421" width="36" height="36" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="ScriptTask_0x4a3pe_di" bpmnElement="Task_Load_Requirements">
|
||||
<dc:Bounds x="300" y="399" width="100" height="80" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="Activity_1yqy50i_di" bpmnElement="Activity_1yqy50i">
|
||||
<dc:Bounds x="640" y="290" width="100" height="80" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNShape id="TextAnnotation_1pv8ygy_di" bpmnElement="TextAnnotation_1pv8ygy">
|
||||
<dc:Bounds x="300" y="247" width="100" height="68" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNEdge id="Association_0w69z3w_di" bpmnElement="Association_0w69z3w">
|
||||
<di:waypoint x="350" y="399" />
|
||||
<di:waypoint x="350" y="315" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNShape id="TextAnnotation_0ydnva4_di" bpmnElement="TextAnnotation_0ydnva4">
|
||||
<dc:Bounds x="155" y="220" width="110" height="82" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNEdge id="Association_0a41ixa_di" bpmnElement="Association_0a41ixa">
|
||||
<di:waypoint x="210" y="421" />
|
||||
<di:waypoint x="210" y="302" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNShape id="TextAnnotation_1f52jro_di" bpmnElement="TextAnnotation_1f52jro">
|
||||
<dc:Bounds x="461" y="80" width="243" height="124" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNEdge id="Flow_1sggkit_di" bpmnElement="Flow_1sggkit">
|
||||
<di:waypoint x="740" y="330" />
|
||||
<di:waypoint x="800" y="330" />
|
||||
<di:waypoint x="800" y="414" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNShape id="Gateway_1kk6x70_di" bpmnElement="Gateway_12tpgcy">
|
||||
<dc:Bounds x="775" y="414" width="50" height="50" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNEdge id="Flow_0pwtiqm_di" bpmnElement="Flow_0pwtiqm">
|
||||
<di:waypoint x="825" y="439" />
|
||||
<di:waypoint x="862" y="439" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNShape id="Gateway_1m22g4p_di" bpmnElement="Gateway_1nta7st">
|
||||
<dc:Bounds x="558" y="414" width="50" height="50" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNEdge id="Association_1mzqzwj_di" bpmnElement="Association_1mzqzwj">
|
||||
<di:waypoint x="583" y="414" />
|
||||
<di:waypoint x="583" y="204" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="Flow_1m8285h_di" bpmnElement="Flow_1m8285h">
|
||||
<di:waypoint x="583" y="414" />
|
||||
<di:waypoint x="583" y="330" />
|
||||
<di:waypoint x="640" y="330" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="SequenceFlow_17ct47v_di" bpmnElement="SequenceFlow_17ct47v">
|
||||
<di:waypoint x="400" y="439" />
|
||||
<di:waypoint x="558" y="439" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="Flow_18pl92p_di" bpmnElement="Flow_18pl92p">
|
||||
<di:waypoint x="583" y="464" />
|
||||
<di:waypoint x="583" y="550" />
|
||||
<di:waypoint x="640" y="550" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNEdge id="Flow_1nimppb_di" bpmnElement="Flow_1nimppb">
|
||||
<di:waypoint x="608" y="439" />
|
||||
<di:waypoint x="640" y="439" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNShape id="Activity_1k5eeun_di" bpmnElement="Activity_1k5eeun">
|
||||
<dc:Bounds x="640" y="399" width="100" height="80" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNEdge id="Flow_1txrak2_di" bpmnElement="Flow_1txrak2">
|
||||
<di:waypoint x="740" y="439" />
|
||||
<di:waypoint x="775" y="439" />
|
||||
</bpmndi:BPMNEdge>
|
||||
<bpmndi:BPMNShape id="Activity_16cm213_di" bpmnElement="Activity_16cm213">
|
||||
<dc:Bounds x="640" y="510" width="100" height="80" />
|
||||
</bpmndi:BPMNShape>
|
||||
<bpmndi:BPMNEdge id="Flow_0x9580l_di" bpmnElement="Flow_0x9580l">
|
||||
<di:waypoint x="740" y="550" />
|
||||
<di:waypoint x="800" y="550" />
|
||||
<di:waypoint x="800" y="464" />
|
||||
</bpmndi:BPMNEdge>
|
||||
</bpmndi:BPMNPlane>
|
||||
</bpmndi:BPMNDiagram>
|
||||
</bpmn:definitions>
|
|
@ -25,46 +25,6 @@ class ExampleDataLoader:
|
|||
|
||||
self.load_reference_documents()
|
||||
|
||||
users = [
|
||||
UserModel(
|
||||
uid='dhf8r',
|
||||
email_address='dhf8r@virginia.EDU',
|
||||
display_name='Daniel Harold Funk',
|
||||
affiliation='staff@virginia.edu;member@virginia.edu',
|
||||
eppn='dhf8r@virginia.edu',
|
||||
first_name='Daniel',
|
||||
last_name='Funk',
|
||||
title='SOFTWARE ENGINEER V'
|
||||
)
|
||||
]
|
||||
db.session.add_all(users)
|
||||
db.session.commit()
|
||||
|
||||
studies = [
|
||||
StudyModel(
|
||||
id=0,
|
||||
title='The impact of fried pickles on beer consumption in bipedal software developers.',
|
||||
last_updated=datetime.datetime.now(),
|
||||
protocol_builder_status=ProtocolBuilderStatus.IN_PROCESS,
|
||||
primary_investigator_id='dhf8r',
|
||||
sponsor='Sartography Pharmaceuticals',
|
||||
ind_number='1234',
|
||||
user_uid='dhf8r'
|
||||
),
|
||||
StudyModel(
|
||||
id=1,
|
||||
title='Requirement of hippocampal neurogenesis for the behavioral effects of soft pretzels',
|
||||
last_updated=datetime.datetime.now(),
|
||||
protocol_builder_status=ProtocolBuilderStatus.IN_PROCESS,
|
||||
primary_investigator_id='dhf8r',
|
||||
sponsor='Makerspace & Co.',
|
||||
ind_number='5678',
|
||||
user_uid='dhf8r'
|
||||
),
|
||||
]
|
||||
db.session.add_all(studies)
|
||||
db.session.commit()
|
||||
|
||||
categories = [
|
||||
WorkflowSpecCategoryModel(
|
||||
id=0,
|
||||
|
@ -105,7 +65,13 @@ class ExampleDataLoader:
|
|||
]
|
||||
db.session.add_all(categories)
|
||||
db.session.commit()
|
||||
|
||||
self.create_spec(id="top_level_workflow",
|
||||
name="top_level_workflow",
|
||||
display_name="Top Level Workflow",
|
||||
description="Determines the status of other workflows in a study",
|
||||
category_id=0,
|
||||
master_spec=True
|
||||
)
|
||||
self.create_spec(id="irb_api_details",
|
||||
name="irb_api_details",
|
||||
display_name="IRB API Details",
|
||||
|
@ -159,7 +125,7 @@ class ExampleDataLoader:
|
|||
display_name=display_name,
|
||||
description=description,
|
||||
is_master_spec=master_spec,
|
||||
workflow_spec_category_id=category_id)
|
||||
category_id=category_id)
|
||||
db.session.add(spec)
|
||||
db.session.commit()
|
||||
if not filepath:
|
||||
|
|
|
@ -4,9 +4,12 @@ import json
|
|||
import os
|
||||
import unittest
|
||||
import urllib.parse
|
||||
import datetime
|
||||
|
||||
from crc.models.protocol_builder import ProtocolBuilderStatus
|
||||
from crc.models.study import StudyModel
|
||||
from crc.services.file_service import FileService
|
||||
from crc.services.study_service import StudyService
|
||||
from crc.services.workflow_processor import WorkflowProcessor
|
||||
|
||||
os.environ["TESTING"] = "true"
|
||||
|
@ -32,6 +35,43 @@ class BaseTest(unittest.TestCase):
|
|||
auths = {}
|
||||
test_uid = "dhf8r"
|
||||
|
||||
users = [
|
||||
{
|
||||
'uid':'dhf8r',
|
||||
'email_address':'dhf8r@virginia.EDU',
|
||||
'display_name':'Daniel Harold Funk',
|
||||
'affiliation':'staff@virginia.edu;member@virginia.edu',
|
||||
'eppn':'dhf8r@virginia.edu',
|
||||
'first_name':'Daniel',
|
||||
'last_name':'Funk',
|
||||
'title':'SOFTWARE ENGINEER V'
|
||||
}
|
||||
]
|
||||
|
||||
studies = [
|
||||
{
|
||||
'id':0,
|
||||
'title':'The impact of fried pickles on beer consumption in bipedal software developers.',
|
||||
'last_updated':datetime.datetime.now(),
|
||||
'protocol_builder_status':ProtocolBuilderStatus.IN_PROCESS,
|
||||
'primary_investigator_id':'dhf8r',
|
||||
'sponsor':'Sartography Pharmaceuticals',
|
||||
'ind_number':'1234',
|
||||
'user_uid':'dhf8r'
|
||||
},
|
||||
{
|
||||
'id':1,
|
||||
'title':'Requirement of hippocampal neurogenesis for the behavioral effects of soft pretzels',
|
||||
'last_updated':datetime.datetime.now(),
|
||||
'protocol_builder_status':ProtocolBuilderStatus.IN_PROCESS,
|
||||
'primary_investigator_id':'dhf8r',
|
||||
'sponsor':'Makerspace & Co.',
|
||||
'ind_number':'5678',
|
||||
'user_uid':'dhf8r'
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
app.config.from_object('config.testing')
|
||||
|
@ -77,6 +117,16 @@ class BaseTest(unittest.TestCase):
|
|||
ExampleDataLoader.clean_db()
|
||||
ExampleDataLoader().load_all()
|
||||
|
||||
for user_json in self.users:
|
||||
db.session.add(UserModel(**user_json))
|
||||
db.session.commit()
|
||||
for study_json in self.studies:
|
||||
study_model = StudyModel(**study_json)
|
||||
db.session.add(study_model)
|
||||
StudyService._add_all_workflow_specs_to_study(study_model)
|
||||
db.session.commit()
|
||||
db.session.flush()
|
||||
|
||||
specs = session.query(WorkflowSpecModel).all()
|
||||
self.assertIsNotNone(specs)
|
||||
|
||||
|
@ -85,18 +135,23 @@ class BaseTest(unittest.TestCase):
|
|||
self.assertIsNotNone(files)
|
||||
self.assertGreater(len(files), 0)
|
||||
|
||||
for spec in specs:
|
||||
files = session.query(FileModel).filter_by(workflow_spec_id=spec.id).all()
|
||||
self.assertIsNotNone(files)
|
||||
self.assertGreater(len(files), 0)
|
||||
for file in files:
|
||||
file_data = session.query(FileDataModel).filter_by(file_model_id=file.id).all()
|
||||
self.assertIsNotNone(file_data)
|
||||
self.assertGreater(len(file_data), 0)
|
||||
|
||||
@staticmethod
|
||||
def load_test_spec(dir_name, master_spec=False):
|
||||
def load_test_spec(dir_name, master_spec=False, category_id=None):
|
||||
"""Loads a spec into the database based on a directory in /tests/data"""
|
||||
if session.query(WorkflowSpecModel).filter_by(id=dir_name).count() > 0:
|
||||
return
|
||||
filepath = os.path.join(app.root_path, '..', 'tests', 'data', dir_name, "*")
|
||||
return ExampleDataLoader().create_spec(id=dir_name, name=dir_name, filepath=filepath, master_spec=master_spec)
|
||||
return ExampleDataLoader().create_spec(id=dir_name, name=dir_name, filepath=filepath, master_spec=master_spec,
|
||||
category_id=category_id)
|
||||
|
||||
@staticmethod
|
||||
def protocol_builder_response(file_name):
|
||||
|
@ -147,16 +202,12 @@ class BaseTest(unittest.TestCase):
|
|||
content_type = CONTENT_TYPES[file_extension[1:]]
|
||||
file_service.update_file(file_model, data, content_type)
|
||||
|
||||
def create_workflow(self, workflow_name):
|
||||
study = session.query(StudyModel).first()
|
||||
spec = self.load_test_spec(workflow_name)
|
||||
processor = WorkflowProcessor.create(study.id, spec.id)
|
||||
rv = self.app.post(
|
||||
'/v1.0/study/%i/workflows' % study.id,
|
||||
headers=self.logged_in_headers(),
|
||||
content_type="application/json",
|
||||
data=json.dumps(WorkflowSpecModelSchema().dump(spec)))
|
||||
self.assert_success(rv)
|
||||
def create_workflow(self, workflow_name, study=None, category_id=None):
|
||||
if study == None:
|
||||
study = session.query(StudyModel).first()
|
||||
spec = self.load_test_spec(workflow_name, category_id=category_id)
|
||||
workflow_model = StudyService._create_workflow_model(study, spec)
|
||||
processor = WorkflowProcessor(workflow_model)
|
||||
workflow = session.query(WorkflowModel).filter_by(study_id=study.id, workflow_spec_id=workflow_name).first()
|
||||
return workflow
|
||||
|
||||
|
|
|
@ -26,7 +26,7 @@ class TestFilesApi(BaseTest):
|
|||
|
||||
def test_list_multiple_files_for_workflow_spec(self):
|
||||
self.load_example_data()
|
||||
spec = session.query(WorkflowSpecModel).first()
|
||||
spec = self.load_test_spec("random_fact")
|
||||
svgFile = FileModel(name="test.svg", type=FileType.svg,
|
||||
primary=False, workflow_spec_id=spec.id)
|
||||
session.add(svgFile)
|
||||
|
|
|
@ -3,45 +3,86 @@ from datetime import datetime, timezone
|
|||
from unittest.mock import patch
|
||||
|
||||
from crc import session
|
||||
from crc.models.api_models import WorkflowApiSchema, WorkflowApi
|
||||
from crc.models.api_models import WorkflowApiSchema
|
||||
from crc.models.protocol_builder import ProtocolBuilderStatus, ProtocolBuilderStudyDetailsSchema, \
|
||||
ProtocolBuilderStudySchema, ProtocolBuilderInvestigatorSchema, ProtocolBuilderRequiredDocumentSchema
|
||||
from crc.models.study import StudyModel, StudyModelSchema
|
||||
from crc.models.workflow import WorkflowSpecModel, WorkflowSpecModelSchema, WorkflowModel, WorkflowStatus
|
||||
ProtocolBuilderStudySchema
|
||||
from crc.models.study import StudyModel, StudySchema
|
||||
from crc.models.workflow import WorkflowSpecModel, WorkflowSpecModelSchema, WorkflowModel, WorkflowStatus, \
|
||||
WorkflowSpecCategoryModel
|
||||
from tests.base_test import BaseTest
|
||||
|
||||
|
||||
class TestStudyApi(BaseTest):
|
||||
|
||||
TEST_STUDY = {
|
||||
"id": 12345,
|
||||
"title": "Phase III Trial of Genuine People Personalities (GPP) Autonomous Intelligent Emotional Agents "
|
||||
"for Interstellar Spacecraft",
|
||||
"last_updated": datetime.now(tz=timezone.utc),
|
||||
"protocol_builder_status": ProtocolBuilderStatus.IN_PROCESS,
|
||||
"primary_investigator_id": "tricia.marie.mcmillan@heartofgold.edu",
|
||||
"sponsor": "Sirius Cybernetics Corporation",
|
||||
"ind_number": "567890",
|
||||
"user_uid": "dhf8r",
|
||||
}
|
||||
|
||||
def add_test_study(self):
|
||||
rv = self.app.post('/v1.0/study',
|
||||
content_type="application/json",
|
||||
headers=self.logged_in_headers(),
|
||||
data=json.dumps(StudySchema().dump(self.TEST_STUDY)))
|
||||
self.assert_success(rv)
|
||||
return json.loads(rv.get_data(as_text=True))
|
||||
|
||||
def test_study_basics(self):
|
||||
self.load_example_data()
|
||||
study = session.query(StudyModel).first()
|
||||
self.assertIsNotNone(study)
|
||||
|
||||
def test_get_study(self):
|
||||
"""Generic test, but pretty detailed, in that the study should return a categorized list of workflows
|
||||
This starts with out loading the example data, to show that all the bases are covered from ground 0."""
|
||||
new_study = self.add_test_study()
|
||||
new_study = session.query(StudyModel).filter_by(id=new_study["id"]).first()
|
||||
# Add a category
|
||||
new_category = WorkflowSpecCategoryModel(id=21, name="test_cat", display_name="Test Category", display_order=0)
|
||||
session.add(new_category)
|
||||
session.commit()
|
||||
# Create a workflow specification
|
||||
self.create_workflow("random_fact", study=new_study, category_id=new_category.id)
|
||||
# Assure there is a master specification, and it has the lookup files it needs.
|
||||
spec = self.load_test_spec("top_level_workflow", master_spec=True)
|
||||
self.create_reference_document()
|
||||
|
||||
api_response = self.app.get('/v1.0/study/%i' % new_study.id,
|
||||
headers=self.logged_in_headers(), content_type="application/json")
|
||||
self.assert_success(api_response)
|
||||
study = StudySchema().loads(api_response.get_data(as_text=True))
|
||||
|
||||
self.assertEqual(study.title, self.TEST_STUDY['title'])
|
||||
self.assertEqual(study.primary_investigator_id, self.TEST_STUDY['primary_investigator_id'])
|
||||
self.assertEqual(study.sponsor, self.TEST_STUDY['sponsor'])
|
||||
self.assertEqual(study.ind_number, self.TEST_STUDY['ind_number'])
|
||||
self.assertEqual(study.user_uid, self.TEST_STUDY['user_uid'])
|
||||
|
||||
# Categories are read only, so switching to sub-scripting here.
|
||||
category = [c for c in study.categories if c['name'] == "test_cat"][0]
|
||||
self.assertEqual("test_cat", category['name'])
|
||||
self.assertEqual("Test Category", category['display_name'])
|
||||
self.assertEqual(1, len(category["workflows"]))
|
||||
workflow = category["workflows"][0]
|
||||
self.assertEqual("random_fact", workflow["name"])
|
||||
self.assertEqual("optional", workflow["state"])
|
||||
self.assertEqual("not_started", workflow["status"])
|
||||
self.assertEqual(0, workflow["total_tasks"])
|
||||
self.assertEqual(0, workflow["completed_tasks"])
|
||||
|
||||
def test_add_study(self):
|
||||
self.load_example_data()
|
||||
study = {
|
||||
"id": 12345,
|
||||
"title": "Phase III Trial of Genuine People Personalities (GPP) Autonomous Intelligent Emotional Agents "
|
||||
"for Interstellar Spacecraft",
|
||||
"last_updated": datetime.now(tz=timezone.utc),
|
||||
"protocol_builder_status": ProtocolBuilderStatus.IN_PROCESS,
|
||||
"primary_investigator_id": "tricia.marie.mcmillan@heartofgold.edu",
|
||||
"sponsor": "Sirius Cybernetics Corporation",
|
||||
"ind_number": "567890",
|
||||
"user_uid": "dhf8r",
|
||||
}
|
||||
rv = self.app.post('/v1.0/study',
|
||||
content_type="application/json",
|
||||
headers=self.logged_in_headers(),
|
||||
data=json.dumps(StudyModelSchema().dump(study)))
|
||||
self.assert_success(rv)
|
||||
study = json.loads(rv.get_data(as_text=True))
|
||||
study = self.add_test_study()
|
||||
db_study = session.query(StudyModel).filter_by(id=12345).first()
|
||||
self.assertIsNotNone(db_study)
|
||||
self.assertEqual(study["title"], db_study.title)
|
||||
#self.assertAlmostEqual(study["last_updated"], db_study.last_updated)
|
||||
#self.assertEqual(study["protocol_builder_status"], db_study.protocol_builder_status)
|
||||
self.assertEqual(study["primary_investigator_id"], db_study.primary_investigator_id)
|
||||
self.assertEqual(study["sponsor"], db_study.sponsor)
|
||||
self.assertEqual(study["ind_number"], db_study.ind_number)
|
||||
|
@ -52,7 +93,6 @@ class TestStudyApi(BaseTest):
|
|||
error_count = len(study["errors"])
|
||||
self.assertEquals(workflow_spec_count, workflow_count + error_count)
|
||||
|
||||
|
||||
def test_update_study(self):
|
||||
self.load_example_data()
|
||||
study: StudyModel = session.query(StudyModel).first()
|
||||
|
@ -61,12 +101,11 @@ class TestStudyApi(BaseTest):
|
|||
rv = self.app.put('/v1.0/study/%i' % study.id,
|
||||
content_type="application/json",
|
||||
headers=self.logged_in_headers(),
|
||||
data=json.dumps(StudyModelSchema().dump(study)))
|
||||
data=json.dumps(StudySchema().dump(study)))
|
||||
self.assert_success(rv)
|
||||
db_study = session.query(StudyModel).filter_by(id=study.id).first()
|
||||
self.assertIsNotNone(db_study)
|
||||
self.assertEqual(study.title, db_study.title)
|
||||
self.assertEqual(study.protocol_builder_status, db_study.protocol_builder_status)
|
||||
json_data = json.loads(rv.get_data(as_text=True))
|
||||
self.assertEqual(study.title, json_data['title'])
|
||||
self.assertEqual(study.protocol_builder_status.name, json_data['protocol_builder_status'])
|
||||
|
||||
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_study_details') # mock_details
|
||||
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_studies') # mock_studies
|
||||
|
@ -92,13 +131,12 @@ class TestStudyApi(BaseTest):
|
|||
api_response = self.app.get('/v1.0/study', headers=self.logged_in_headers(), content_type="application/json")
|
||||
self.assert_success(api_response)
|
||||
json_data = json.loads(api_response.get_data(as_text=True))
|
||||
api_studies = StudyModelSchema(many=True).load(json_data, session=session)
|
||||
|
||||
num_inactive = 0
|
||||
num_active = 0
|
||||
|
||||
for study in api_studies:
|
||||
if study.inactive:
|
||||
for study in json_data:
|
||||
if study['inactive']:
|
||||
num_inactive += 1
|
||||
else:
|
||||
num_active += 1
|
||||
|
@ -108,14 +146,14 @@ class TestStudyApi(BaseTest):
|
|||
self.assertGreater(num_db_studies_after, num_db_studies_before)
|
||||
self.assertGreater(num_inactive, 0)
|
||||
self.assertGreater(num_active, 0)
|
||||
self.assertEqual(len(api_studies), num_db_studies_after)
|
||||
self.assertEqual(len(json_data), num_db_studies_after)
|
||||
self.assertEqual(num_active + num_inactive, num_db_studies_after)
|
||||
|
||||
# Assure that the existing study is properly updated.
|
||||
test_study = session.query(StudyModel).filter_by(id=54321).first()
|
||||
self.assertFalse(test_study.inactive)
|
||||
|
||||
def test_study_api_get_single_study(self):
|
||||
def test_get_single_study(self):
|
||||
self.load_example_data()
|
||||
study = session.query(StudyModel).first()
|
||||
rv = self.app.get('/v1.0/study/%i' % study.id,
|
||||
|
@ -124,36 +162,12 @@ class TestStudyApi(BaseTest):
|
|||
content_type="application/json")
|
||||
self.assert_success(rv)
|
||||
json_data = json.loads(rv.get_data(as_text=True))
|
||||
study2 = StudyModelSchema().load(json_data, session=session)
|
||||
self.assertEqual(study, study2)
|
||||
self.assertEqual(study.id, study2.id)
|
||||
self.assertEqual(study.title, study2.title)
|
||||
self.assertEqual(study.last_updated, study2.last_updated)
|
||||
self.assertEqual(study.protocol_builder_status, study2.protocol_builder_status)
|
||||
self.assertEqual(study.primary_investigator_id, study2.primary_investigator_id)
|
||||
self.assertEqual(study.sponsor, study2.sponsor)
|
||||
self.assertEqual(study.ind_number, study2.ind_number)
|
||||
|
||||
def test_add_workflow_to_study(self):
|
||||
self.load_example_data()
|
||||
study = session.query(StudyModel).first()
|
||||
self.assertEqual(0, session.query(WorkflowModel).count())
|
||||
spec = session.query(WorkflowSpecModel).first()
|
||||
rv = self.app.post('/v1.0/study/%i/workflows' % study.id,
|
||||
content_type="application/json",
|
||||
headers=self.logged_in_headers(),
|
||||
data=json.dumps(WorkflowSpecModelSchema().dump(spec)))
|
||||
self.assert_success(rv)
|
||||
self.assertEqual(1, session.query(WorkflowModel).count())
|
||||
workflow_model = session.query(WorkflowModel).first()
|
||||
self.assertEqual(study.id, workflow_model.study_id)
|
||||
self.assertEqual(WorkflowStatus.user_input_required, workflow_model.status)
|
||||
self.assertIsNotNone(workflow_model.bpmn_workflow_json)
|
||||
self.assertEqual(spec.id, workflow_model.workflow_spec_id)
|
||||
|
||||
json_data = json.loads(rv.get_data(as_text=True))
|
||||
workflow2 = WorkflowApiSchema().load(json_data)
|
||||
self.assertEqual(workflow_model.id, workflow2.id)
|
||||
self.assertEqual(study.id, json_data['id'])
|
||||
self.assertEqual(study.title, json_data['title'])
|
||||
self.assertEqual(study.protocol_builder_status.name, json_data['protocol_builder_status'])
|
||||
self.assertEqual(study.primary_investigator_id, json_data['primary_investigator_id'])
|
||||
self.assertEqual(study.sponsor, json_data['sponsor'])
|
||||
self.assertEqual(study.ind_number, json_data['ind_number'])
|
||||
|
||||
def test_delete_study(self):
|
||||
self.load_example_data()
|
||||
|
@ -161,64 +175,6 @@ class TestStudyApi(BaseTest):
|
|||
rv = self.app.delete('/v1.0/study/%i' % study.id, headers=self.logged_in_headers())
|
||||
self.assert_success(rv)
|
||||
|
||||
def test_delete_study_with_workflow(self):
|
||||
self.load_example_data()
|
||||
study = session.query(StudyModel).first()
|
||||
|
||||
spec = session.query(WorkflowSpecModel).first()
|
||||
rv = self.app.post('/v1.0/study/%i/workflows' % study.id,
|
||||
content_type="application/json",
|
||||
headers=self.logged_in_headers(),
|
||||
data=json.dumps(WorkflowSpecModelSchema().dump(spec)))
|
||||
|
||||
rv = self.app.delete('/v1.0/study/%i' % study.id, headers=self.logged_in_headers())
|
||||
self.assert_failure(rv, error_code="study_integrity_error")
|
||||
|
||||
def test_delete_workflow(self):
|
||||
self.load_example_data()
|
||||
study = session.query(StudyModel).first()
|
||||
spec = session.query(WorkflowSpecModel).first()
|
||||
rv = self.app.post('/v1.0/study/%i/workflows' % study.id,
|
||||
content_type="application/json",
|
||||
headers=self.logged_in_headers(),
|
||||
data=json.dumps(WorkflowSpecModelSchema().dump(spec)))
|
||||
self.assertEqual(1, session.query(WorkflowModel).count())
|
||||
json_data = json.loads(rv.get_data(as_text=True))
|
||||
workflow = WorkflowApiSchema().load(json_data)
|
||||
rv = self.app.delete('/v1.0/workflow/%i' % workflow.id, headers=self.logged_in_headers())
|
||||
self.assert_success(rv)
|
||||
self.assertEqual(0, session.query(WorkflowModel).count())
|
||||
|
||||
def test_get_study_workflows(self):
|
||||
self.load_example_data()
|
||||
|
||||
# Should have no workflows to start
|
||||
study = session.query(StudyModel).first()
|
||||
response_before = self.app.get('/v1.0/study/%i/workflows' % study.id,
|
||||
content_type="application/json",
|
||||
headers=self.logged_in_headers())
|
||||
self.assert_success(response_before)
|
||||
json_data_before = json.loads(response_before.get_data(as_text=True))
|
||||
workflows_before = WorkflowApiSchema(many=True).load(json_data_before)
|
||||
self.assertEqual(0, len(workflows_before))
|
||||
|
||||
# Add a workflow
|
||||
spec = session.query(WorkflowSpecModel).first()
|
||||
add_response = self.app.post('/v1.0/study/%i/workflows' % study.id,
|
||||
content_type="application/json",
|
||||
headers=self.logged_in_headers(),
|
||||
data=json.dumps(WorkflowSpecModelSchema().dump(spec)))
|
||||
self.assert_success(add_response)
|
||||
|
||||
# Should have one workflow now
|
||||
response_after = self.app.get('/v1.0/study/%i/workflows' % study.id,
|
||||
content_type="application/json",
|
||||
headers=self.logged_in_headers())
|
||||
self.assert_success(response_after)
|
||||
json_data_after = json.loads(response_after.get_data(as_text=True))
|
||||
workflows_after = WorkflowApiSchema(many=True).load(json_data_after)
|
||||
self.assertEqual(1, len(workflows_after))
|
||||
|
||||
# """
|
||||
# Workflow Specs that have been made available (or not) to a particular study via the status.bpmn should be flagged
|
||||
# as available (or not) when the list of a study's workflows is retrieved.
|
||||
|
|
|
@ -12,22 +12,25 @@ from crc.models.file import FileModel, FileDataModel, CONTENT_TYPES
|
|||
from crc.models.study import StudyModel
|
||||
from crc.models.workflow import WorkflowSpecModel, WorkflowStatus, WorkflowModel
|
||||
from crc.services.file_service import FileService
|
||||
from crc.services.study_service import StudyService
|
||||
from tests.base_test import BaseTest
|
||||
from crc.services.workflow_processor import WorkflowProcessor
|
||||
|
||||
|
||||
class TestWorkflowProcessor(BaseTest):
|
||||
|
||||
|
||||
|
||||
def _populate_form_with_random_data(self, task):
|
||||
WorkflowProcessor.populate_form_with_random_data(task)
|
||||
|
||||
def get_processor(self, study_model, spec_model):
|
||||
workflow_model = StudyService._create_workflow_model(study_model, spec_model)
|
||||
return WorkflowProcessor(workflow_model)
|
||||
|
||||
def test_create_and_complete_workflow(self):
|
||||
self.load_example_data()
|
||||
workflow_spec_model = self.load_test_spec("random_fact")
|
||||
study = session.query(StudyModel).first()
|
||||
processor = WorkflowProcessor.create(study.id, workflow_spec_model.id)
|
||||
processor = self.get_processor(study, workflow_spec_model)
|
||||
self.assertEqual(study.id, processor.bpmn_workflow.data[WorkflowProcessor.STUDY_ID_KEY])
|
||||
self.assertIsNotNone(processor)
|
||||
self.assertEqual(WorkflowStatus.user_input_required, processor.get_status())
|
||||
|
@ -53,7 +56,7 @@ class TestWorkflowProcessor(BaseTest):
|
|||
workflow_spec_model = self.load_test_spec("decision_table")
|
||||
files = session.query(FileModel).filter_by(workflow_spec_id='decision_table').all()
|
||||
self.assertEqual(2, len(files))
|
||||
processor = WorkflowProcessor.create(study.id, workflow_spec_model.id)
|
||||
processor = self.get_processor(study, workflow_spec_model)
|
||||
self.assertEqual(WorkflowStatus.user_input_required, processor.get_status())
|
||||
next_user_tasks = processor.next_user_tasks()
|
||||
self.assertEqual(1, len(next_user_tasks))
|
||||
|
@ -77,7 +80,7 @@ class TestWorkflowProcessor(BaseTest):
|
|||
self.load_example_data()
|
||||
workflow_spec_model = self.load_test_spec("parallel_tasks")
|
||||
study = session.query(StudyModel).first()
|
||||
processor = WorkflowProcessor.create(study.id, workflow_spec_model.id)
|
||||
processor = self.get_processor(study, workflow_spec_model)
|
||||
self.assertEqual(WorkflowStatus.user_input_required, processor.get_status())
|
||||
|
||||
# Complete the first steps of the 4 parallel tasks
|
||||
|
@ -118,7 +121,7 @@ class TestWorkflowProcessor(BaseTest):
|
|||
self.load_example_data()
|
||||
study = session.query(StudyModel).first()
|
||||
workflow_spec_model = self.load_test_spec("parallel_tasks")
|
||||
processor = WorkflowProcessor.create(study.id, workflow_spec_model.id)
|
||||
processor = self.get_processor(study, workflow_spec_model)
|
||||
self.assertEqual(WorkflowStatus.user_input_required, processor.get_status())
|
||||
next_user_tasks = processor.next_user_tasks()
|
||||
self.assertEqual(4, len(next_user_tasks))
|
||||
|
@ -139,7 +142,7 @@ class TestWorkflowProcessor(BaseTest):
|
|||
self.load_example_data()
|
||||
workflow_spec_model = self.load_test_spec("random_fact")
|
||||
study = session.query(StudyModel).first()
|
||||
processor = WorkflowProcessor.create(study.id, workflow_spec_model.id)
|
||||
processor = self.get_processor(study, workflow_spec_model)
|
||||
processor.do_engine_steps()
|
||||
task = processor.next_task()
|
||||
task.data = {"type": "buzzword"}
|
||||
|
@ -157,7 +160,7 @@ class TestWorkflowProcessor(BaseTest):
|
|||
workflow_spec_model = self.load_test_spec("invalid_spec")
|
||||
study = session.query(StudyModel).first()
|
||||
with self.assertRaises(ApiError) as context:
|
||||
WorkflowProcessor.create(study.id, workflow_spec_model.id)
|
||||
self.get_processor(study, workflow_spec_model)
|
||||
self.assertEqual("workflow_validation_error", context.exception.code)
|
||||
self.assertTrue("bpmn:startEvent" in context.exception.message)
|
||||
|
||||
|
@ -169,9 +172,8 @@ class TestWorkflowProcessor(BaseTest):
|
|||
self.load_example_data()
|
||||
study = session.query(StudyModel).first()
|
||||
workflow_spec_model = self.load_test_spec("two_forms")
|
||||
processor = WorkflowProcessor.create(study.id, workflow_spec_model.id)
|
||||
workflow_model = db.session.query(WorkflowModel).filter(WorkflowModel.study_id == study.id).first()
|
||||
self.assertEqual(workflow_model.workflow_spec_id, workflow_spec_model.id)
|
||||
processor = self.get_processor(study, workflow_spec_model)
|
||||
self.assertEqual(processor.workflow_model.workflow_spec_id, workflow_spec_model.id)
|
||||
task = processor.next_task()
|
||||
task.data = {"color": "blue"}
|
||||
processor.complete_task(task)
|
||||
|
@ -182,7 +184,7 @@ class TestWorkflowProcessor(BaseTest):
|
|||
|
||||
# Attemping a soft update on a structural change should raise a sensible error.
|
||||
with self.assertRaises(ApiError) as context:
|
||||
processor3 = WorkflowProcessor(workflow_model, soft_reset=True)
|
||||
processor3 = WorkflowProcessor(processor.workflow_model, soft_reset=True)
|
||||
self.assertEqual("unexpected_workflow_structure", context.exception.code)
|
||||
|
||||
def test_workflow_with_bad_expression_raises_sensible_error(self):
|
||||
|
@ -190,7 +192,7 @@ class TestWorkflowProcessor(BaseTest):
|
|||
|
||||
workflow_spec_model = self.load_test_spec("invalid_expression")
|
||||
study = session.query(StudyModel).first()
|
||||
processor = WorkflowProcessor.create(study.id, workflow_spec_model.id)
|
||||
processor = self.get_processor(study, workflow_spec_model)
|
||||
processor.do_engine_steps()
|
||||
next_user_tasks = processor.next_user_tasks()
|
||||
self.assertEqual(1, len(next_user_tasks))
|
||||
|
@ -207,7 +209,7 @@ class TestWorkflowProcessor(BaseTest):
|
|||
files = session.query(FileModel).filter_by(workflow_spec_id='docx').all()
|
||||
self.assertEqual(2, len(files))
|
||||
workflow_spec_model = session.query(WorkflowSpecModel).filter_by(id="docx").first()
|
||||
processor = WorkflowProcessor.create(study.id, workflow_spec_model.id)
|
||||
processor = self.get_processor(study, workflow_spec_model)
|
||||
self.assertEqual(WorkflowStatus.user_input_required, processor.get_status())
|
||||
next_user_tasks = processor.next_user_tasks()
|
||||
self.assertEqual(1, len(next_user_tasks))
|
||||
|
@ -232,7 +234,7 @@ class TestWorkflowProcessor(BaseTest):
|
|||
self.load_example_data()
|
||||
study = session.query(StudyModel).first()
|
||||
workflow_spec_model = self.load_test_spec("study_details")
|
||||
processor = WorkflowProcessor.create(study.id, workflow_spec_model.id)
|
||||
processor = self.get_processor(study, workflow_spec_model)
|
||||
processor.do_engine_steps()
|
||||
task = processor.bpmn_workflow.last_task
|
||||
self.assertIsNotNone(task.data)
|
||||
|
@ -246,12 +248,12 @@ class TestWorkflowProcessor(BaseTest):
|
|||
self.load_example_data()
|
||||
study = session.query(StudyModel).first()
|
||||
workflow_spec_model = self.load_test_spec("decision_table")
|
||||
processor = WorkflowProcessor.create(study.id, workflow_spec_model.id)
|
||||
processor = self.get_processor(study, workflow_spec_model)
|
||||
self.assertTrue(processor.get_spec_version().startswith('v1.1'))
|
||||
file_service = FileService()
|
||||
|
||||
file_service.add_workflow_spec_file(workflow_spec_model, "new_file.txt", "txt", b'blahblah')
|
||||
processor = WorkflowProcessor.create(study.id, workflow_spec_model.id)
|
||||
processor = self.get_processor(study, workflow_spec_model)
|
||||
self.assertTrue(processor.get_spec_version().startswith('v1.1.1'))
|
||||
|
||||
file_path = os.path.join(app.root_path, '..', 'tests', 'data', 'docx', 'docx.bpmn')
|
||||
|
@ -260,16 +262,15 @@ class TestWorkflowProcessor(BaseTest):
|
|||
|
||||
file_model = db.session.query(FileModel).filter(FileModel.name == "decision_table.bpmn").first()
|
||||
file_service.update_file(file_model, data, "txt")
|
||||
processor = WorkflowProcessor.create(study.id, workflow_spec_model.id)
|
||||
processor = self.get_processor(study, workflow_spec_model)
|
||||
self.assertTrue(processor.get_spec_version().startswith('v2.1.1'))
|
||||
|
||||
def test_restart_workflow(self):
|
||||
self.load_example_data()
|
||||
study = session.query(StudyModel).first()
|
||||
workflow_spec_model = self.load_test_spec("two_forms")
|
||||
processor = WorkflowProcessor.create(study.id, workflow_spec_model.id)
|
||||
workflow_model = db.session.query(WorkflowModel).filter(WorkflowModel.study_id == study.id).first()
|
||||
self.assertEqual(workflow_model.workflow_spec_id, workflow_spec_model.id)
|
||||
processor = self.get_processor(study, workflow_spec_model)
|
||||
self.assertEqual(processor.workflow_model.workflow_spec_id, workflow_spec_model.id)
|
||||
task = processor.next_task()
|
||||
task.data = {"key": "Value"}
|
||||
processor.complete_task(task)
|
||||
|
@ -287,9 +288,8 @@ class TestWorkflowProcessor(BaseTest):
|
|||
# Start the two_forms workflow, and enter some data in the first form.
|
||||
study = session.query(StudyModel).first()
|
||||
workflow_spec_model = self.load_test_spec("two_forms")
|
||||
processor = WorkflowProcessor.create(study.id, workflow_spec_model.id)
|
||||
workflow_model = db.session.query(WorkflowModel).filter(WorkflowModel.study_id == study.id).first()
|
||||
self.assertEqual(workflow_model.workflow_spec_id, workflow_spec_model.id)
|
||||
processor = self.get_processor(study, workflow_spec_model)
|
||||
self.assertEqual(processor.workflow_model.workflow_spec_id, workflow_spec_model.id)
|
||||
task = processor.next_task()
|
||||
task.data = {"color": "blue"}
|
||||
processor.complete_task(task)
|
||||
|
@ -299,14 +299,14 @@ class TestWorkflowProcessor(BaseTest):
|
|||
self.replace_file("two_forms.bpmn", file_path)
|
||||
|
||||
# Setting up another processor should not error out, but doesn't pick up the update.
|
||||
workflow_model.bpmn_workflow_json = processor.serialize()
|
||||
processor2 = WorkflowProcessor(workflow_model)
|
||||
processor.workflow_model.bpmn_workflow_json = processor.serialize()
|
||||
processor2 = WorkflowProcessor(processor.workflow_model)
|
||||
self.assertEqual("Step 1", processor2.bpmn_workflow.last_task.task_spec.description)
|
||||
self.assertNotEqual("# This is some documentation I wanted to add.",
|
||||
processor2.bpmn_workflow.last_task.task_spec.documentation)
|
||||
|
||||
# You can do a soft update and get the right response.
|
||||
processor3 = WorkflowProcessor(workflow_model, soft_reset=True)
|
||||
processor3 = WorkflowProcessor(processor.workflow_model, soft_reset=True)
|
||||
self.assertEqual("Step 1", processor3.bpmn_workflow.last_task.task_spec.description)
|
||||
self.assertEqual("# This is some documentation I wanted to add.",
|
||||
processor3.bpmn_workflow.last_task.task_spec.documentation)
|
||||
|
@ -319,9 +319,8 @@ class TestWorkflowProcessor(BaseTest):
|
|||
# Start the two_forms workflow, and enter some data in the first form.
|
||||
study = session.query(StudyModel).first()
|
||||
workflow_spec_model = self.load_test_spec("two_forms")
|
||||
processor = WorkflowProcessor.create(study.id, workflow_spec_model.id)
|
||||
workflow_model = db.session.query(WorkflowModel).filter(WorkflowModel.study_id == study.id).first()
|
||||
self.assertEqual(workflow_model.workflow_spec_id, workflow_spec_model.id)
|
||||
processor = self.get_processor(study, workflow_spec_model)
|
||||
self.assertEqual(processor.workflow_model.workflow_spec_id, workflow_spec_model.id)
|
||||
task = processor.next_task()
|
||||
task.data = {"color": "blue"}
|
||||
processor.complete_task(task)
|
||||
|
@ -333,12 +332,12 @@ class TestWorkflowProcessor(BaseTest):
|
|||
self.replace_file("two_forms.bpmn", file_path)
|
||||
|
||||
# Assure that creating a new processor doesn't cause any issues, and maintains the spec version.
|
||||
workflow_model.bpmn_workflow_json = processor.serialize()
|
||||
processor2 = WorkflowProcessor(workflow_model)
|
||||
processor.workflow_model.bpmn_workflow_json = processor.serialize()
|
||||
processor2 = WorkflowProcessor(processor.workflow_model)
|
||||
self.assertTrue(processor2.get_spec_version().startswith("v1 ")) # Still at version 1.
|
||||
|
||||
# Do a hard reset, which should bring us back to the beginning, but retain the data.
|
||||
processor3 = WorkflowProcessor(workflow_model, hard_reset=True)
|
||||
processor3 = WorkflowProcessor(processor.workflow_model, hard_reset=True)
|
||||
self.assertEqual("Step 1", processor3.next_task().task_spec.description)
|
||||
self.assertEqual({"color": "blue"}, processor3.next_task().data)
|
||||
processor3.complete_task(processor3.next_task())
|
||||
|
@ -357,9 +356,10 @@ class TestWorkflowProcessor(BaseTest):
|
|||
self.load_example_data()
|
||||
|
||||
study = session.query(StudyModel).first()
|
||||
workflow_spec_model = self.load_test_spec("top_level_workflow", )
|
||||
workflow_spec_model = db.session.query(WorkflowSpecModel).\
|
||||
filter(WorkflowSpecModel.name=="top_level_workflow").first()
|
||||
|
||||
processor = WorkflowProcessor.create(study.id, workflow_spec_model.id)
|
||||
processor = self.get_processor(study, workflow_spec_model)
|
||||
processor.do_engine_steps()
|
||||
self.assertTrue("Top level process is fully automatic.", processor.bpmn_workflow.is_completed())
|
||||
data = processor.bpmn_workflow.last_task.data
|
||||
|
|
|
@ -57,9 +57,9 @@ class TestWorkflowSpec(BaseTest):
|
|||
|
||||
db_spec_before: WorkflowSpecModel = session.query(WorkflowSpecModel).first()
|
||||
spec_id = db_spec_before.id
|
||||
self.assertNotEqual(db_spec_before.workflow_spec_category_id, category_id)
|
||||
self.assertNotEqual(db_spec_before.category_id, category_id)
|
||||
|
||||
db_spec_before.workflow_spec_category_id = category_id
|
||||
db_spec_before.category_id = category_id
|
||||
rv = self.app.put('/v1.0/workflow-specification/%s' % spec_id,
|
||||
content_type="application/json",
|
||||
headers=self.logged_in_headers(),
|
||||
|
@ -70,10 +70,10 @@ class TestWorkflowSpec(BaseTest):
|
|||
self.assertEqual(db_spec_before, api_spec)
|
||||
|
||||
db_spec_after: WorkflowSpecModel = session.query(WorkflowSpecModel).filter_by(id=spec_id).first()
|
||||
self.assertIsNotNone(db_spec_after.workflow_spec_category_id)
|
||||
self.assertIsNotNone(db_spec_after.workflow_spec_category)
|
||||
self.assertEqual(db_spec_after.workflow_spec_category.display_name, category.display_name)
|
||||
self.assertEqual(db_spec_after.workflow_spec_category.display_order, category.display_order)
|
||||
self.assertIsNotNone(db_spec_after.category_id)
|
||||
self.assertIsNotNone(db_spec_after.category)
|
||||
self.assertEqual(db_spec_after.category.display_name, category.display_name)
|
||||
self.assertEqual(db_spec_after.category.display_order, category.display_order)
|
||||
|
||||
def test_delete_workflow_specification(self):
|
||||
self.load_example_data()
|
||||
|
|
|
@ -26,7 +26,6 @@ class TestWorkflowSpecValidation(BaseTest):
|
|||
self.assertEqual(0, len(self.validate_workflow("file_upload_form")))
|
||||
self.assertEqual(0, len(self.validate_workflow("random_fact")))
|
||||
self.assertEqual(0, len(self.validate_workflow("study_details")))
|
||||
self.assertEqual(0, len(self.validate_workflow("top_level_workflow")))
|
||||
self.assertEqual(0, len(self.validate_workflow("two_forms")))
|
||||
|
||||
@unittest.skip("There is one workflow that is failing right now, and I want that visible after deployment.")
|
||||
|
|
Loading…
Reference in New Issue