diff --git a/crc/__init__.py b/crc/__init__.py
index cab9ccde..11c0c4d6 100644
--- a/crc/__init__.py
+++ b/crc/__init__.py
@@ -15,6 +15,7 @@ connexion_app = connexion.FlaskApp(__name__)
app = connexion_app.app
app.config.from_object('config.default')
+app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
auth = HTTPTokenAuth('Bearer')
if "TESTING" in os.environ and os.environ["TESTING"] == "true":
diff --git a/crc/api.yml b/crc/api.yml
index b535310f..ddc5938f 100644
--- a/crc/api.yml
+++ b/crc/api.yml
@@ -443,11 +443,14 @@ paths:
content:
multipart/form-data:
schema:
+ x-body-name: file
type: object
properties:
file:
type: string
format: binary
+ required:
+ - file
responses:
'200':
description: Returns the actual file
@@ -561,6 +564,27 @@ paths:
responses:
'204':
description: The workflow was removed
+ /workflow/{workflow_id}/stats:
+ parameters:
+ - name: workflow_id
+ in: path
+ required: true
+ description: The id of the workflow
+ schema:
+ type: integer
+ format: int32
+ get:
+ operationId: crc.api.stats.get_workflow_stats
+ summary: Stats about a given workflow
+ tags:
+ - Workflows and Tasks
+ responses:
+ '200':
+ description: Returns counts of complete, incomplete, and total number of tasks
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Workflow"
# /v1.0/workflow/0/task/0
/workflow/{workflow_id}/task/{task_id}:
parameters:
@@ -756,10 +780,31 @@ components:
properties:
id:
type: string
+ name:
+ type: string
display_name:
type: string
description:
type: string
+ primary_process_id:
+ type: string
+ nullable: true
+ workflow_spec_category_id:
+ type: integer
+ nullable: true
+ workflow_spec_category:
+ $ref: "#/components/schemas/WorkflowSpecCategory"
+ is_status:
+ type: boolean
+ nullable: true
+ WorkflowSpecCategory:
+ properties:
+ id:
+ type: integer
+ name:
+ type: string
+ display_name:
+ type: string
File:
properties:
id:
@@ -810,6 +855,12 @@ components:
type: string
is_latest_spec:
type: boolean
+ num_tasks_total:
+ type: integer
+ num_tasks_complete:
+ type: integer
+ num_tasks_incomplete:
+ type: integer
example:
id: 291234
diff --git a/crc/api/stats.py b/crc/api/stats.py
new file mode 100644
index 00000000..66d88455
--- /dev/null
+++ b/crc/api/stats.py
@@ -0,0 +1,59 @@
+from datetime import datetime
+
+from flask import g
+
+from crc import session, auth
+from crc.models.stats import WorkflowStatsModel, WorkflowStatsModelSchema, TaskEventModel, TaskEventModelSchema
+
+
+@auth.login_required
+def get_workflow_stats(workflow_id):
+ workflow_model = session.query(WorkflowStatsModel).filter_by(workflow_id=workflow_id).first()
+ return WorkflowStatsModelSchema().dump(workflow_model)
+
+
+@auth.login_required
+def update_workflow_stats(workflow_model, workflow_api_model):
+ stats = session.query(WorkflowStatsModel) \
+ .filter_by(study_id=workflow_model.study_id) \
+ .filter_by(workflow_id=workflow_model.id) \
+ .filter_by(workflow_spec_id=workflow_model.workflow_spec_id) \
+ .filter_by(spec_version=workflow_model.spec_version) \
+ .first()
+
+ if stats is None:
+ stats = WorkflowStatsModel(
+ study_id=workflow_model.study_id,
+ workflow_id=workflow_model.id,
+ workflow_spec_id=workflow_model.workflow_spec_id,
+ spec_version=workflow_model.spec_version,
+ )
+
+ complete_states = ['CANCELLED', 'COMPLETED']
+ incomplete_states = ['MAYBE', 'LIKELY', 'FUTURE', 'WAITING', 'READY']
+ tasks = list(workflow_api_model.user_tasks)
+ stats.num_tasks_total = len(tasks)
+ stats.num_tasks_complete = sum(1 for t in tasks if t.state in complete_states)
+ stats.num_tasks_incomplete = sum(1 for t in tasks if t.state in incomplete_states)
+ stats.last_updated = datetime.now()
+
+ session.add(stats)
+ session.commit()
+ return WorkflowStatsModelSchema().dump(stats)
+
+
+@auth.login_required
+def log_task_complete(workflow_model, task_id):
+ task_event = TaskEventModel(
+ study_id=workflow_model.study_id,
+ user_uid=g.user.uid,
+ workflow_id=workflow_model.id,
+ workflow_spec_id=workflow_model.workflow_spec_id,
+ spec_version=workflow_model.spec_version,
+ task_id=task_id,
+ task_state='COMPLETE',
+ date=datetime.now(),
+ )
+ session.add(task_event)
+ session.commit()
+ return TaskEventModelSchema().dump(task_event)
diff --git a/crc/api/study.py b/crc/api/study.py
index bfc01dc8..fbdc96c1 100644
--- a/crc/api/study.py
+++ b/crc/api/study.py
@@ -23,16 +23,45 @@ def all_studies():
@auth.login_required
def add_study(body):
- study = StudyModelSchema().load(body, session=session)
+ study: StudyModel = StudyModelSchema().load(body, session=session)
+ status_spec = __get_status_spec(study.status_spec_id)
+
+ # Get latest status spec version
+ if status_spec is not None:
+ study.status_spec_id = status_spec.id
+ study.status_spec_version = WorkflowProcessor.get_latest_version_string(status_spec.id)
+
session.add(study)
session.commit()
- # FIXME: We need to ask the protocol builder what workflows to add to the study, not just add them all.
- for spec in session.query(WorkflowSpecModel).all():
- WorkflowProcessor.create(study.id, spec.id)
+ __add_study_workflows_from_status(study.id, status_spec)
return StudyModelSchema().dump(study)
+def __get_status_spec(status_spec_id):
+ if status_spec_id is None:
+ return session.query(WorkflowSpecModel).filter_by(is_status=True).first()
+ else:
+ return session.query(WorkflowSpecModel).filter_by(id=status_spec_id).first()
+
+
+def __add_study_workflows_from_status(study_id, status_spec):
+ all_specs = session.query(WorkflowSpecModel).all()
+ if status_spec is not None:
+ # Run status spec to get list of workflow specs applicable to this study
+ status_processor = WorkflowProcessor.create(study_id, status_spec)
+ status_processor.do_engine_steps()
+ status_data = status_processor.next_task().data
+
+ # Only add workflow specs listed in status spec
+ for spec in all_specs:
+ if spec.id in status_data and status_data[spec.id]:
+ WorkflowProcessor.create(study_id, spec.id)
+ else:
+ # No status spec. Just add all workflows.
+ for spec in all_specs:
+ WorkflowProcessor.create(study_id, spec.id)
+
@auth.login_required
def update_study(study_id, body):
@@ -131,23 +160,63 @@ def post_update_study_from_protocol_builder(study_id):
@auth.login_required
def get_study_workflows(study_id):
- workflow_models = session.query(WorkflowModel).filter_by(study_id=study_id).all()
+
+ # Get study
+ study: StudyModel = session.query(StudyModel).filter_by(id=study_id).first()
+
+ # Get study status spec
+ status_spec: WorkflowSpecModel = session.query(WorkflowSpecModel)\
+ .filter_by(is_status=True).first()
+
+ status_data = None
+
+ if status_spec is not None:
+ # Run status spec
+ status_workflow_model: WorkflowModel = session.query(WorkflowModel)\
+ .filter_by(study_id=study.id)\
+ .filter_by(workflow_spec_id=status_spec.id)\
+ .first()
+ status_processor = WorkflowProcessor(status_workflow_model)
+
+ # Get list of active workflow specs for study
+ status_processor.do_engine_steps()
+ status_data = status_processor.bpmn_workflow.last_task.data
+
+ # Get study workflows
+ workflow_models = session.query(WorkflowModel)\
+ .filter_by(study_id=study_id)\
+ .filter(WorkflowModel.workflow_spec_id != status_spec.id)\
+ .all()
+ else:
+ # Get study workflows
+ workflow_models = session.query(WorkflowModel)\
+ .filter_by(study_id=study_id)\
+ .all()
api_models = []
for workflow_model in workflow_models:
processor = WorkflowProcessor(workflow_model,
workflow_model.bpmn_workflow_json)
- api_models.append(__get_workflow_api_model(processor))
+ api_models.append(__get_workflow_api_model(processor, status_data))
schema = WorkflowApiSchema(many=True)
return schema.dump(api_models)
@auth.login_required
def add_workflow_to_study(study_id, body):
- workflow_spec_model = session.query(WorkflowSpecModel).filter_by(id=body["id"]).first()
+ workflow_spec_model: WorkflowSpecModel = session.query(WorkflowSpecModel).filter_by(id=body["id"]).first()
if workflow_spec_model is None:
error = ApiError('unknown_spec', 'The specification "' + body['id'] + '" is not recognized.')
return ApiErrorSchema.dump(error), 404
processor = WorkflowProcessor.create(study_id, workflow_spec_model.id)
+
+ # If workflow spec is a status spec, update the study status spec
+ if workflow_spec_model.is_status:
+ study = session.query(StudyModel).filter_by(id=study_id).first()
+ study.status_spec_id = workflow_spec_model.id
+ study.status_spec_version = processor.get_spec_version()
+ session.add(study)
+ session.commit()
+
return WorkflowApiSchema().dump(__get_workflow_api_model(processor))
@@ -194,3 +263,4 @@ def map_pb_study_to_study(pb_study):
study_info['inactive'] = False
return study_info
+
diff --git a/crc/api/workflow.py b/crc/api/workflow.py
index e14023ea..25536423 100644
--- a/crc/api/workflow.py
+++ b/crc/api/workflow.py
@@ -1,12 +1,13 @@
import uuid
-from crc.api.file import delete_file
-from crc import session
+from crc.api.stats import update_workflow_stats, log_task_complete
+from crc import session, auth
from crc.api.common import ApiError, ApiErrorSchema
+from crc.api.file import delete_file
from crc.models.api_models import Task, WorkflowApi, WorkflowApiSchema
+from crc.models.file import FileModel
from crc.models.workflow import WorkflowModel, WorkflowSpecModelSchema, WorkflowSpecModel
from crc.services.workflow_processor import WorkflowProcessor
-from crc.models.file import FileModel
def all_specifications():
@@ -14,13 +15,16 @@ def all_specifications():
return schema.dump(session.query(WorkflowSpecModel).all())
+@auth.login_required
def add_workflow_specification(body):
- new_spec = WorkflowSpecModelSchema().load(body, session=session)
+ new_spec: WorkflowSpecModel = WorkflowSpecModelSchema().load(body, session=session)
+ new_spec.is_status = new_spec.id == 'status'
session.add(new_spec)
session.commit()
return WorkflowSpecModelSchema().dump(new_spec)
+@auth.login_required
def get_workflow_specification(spec_id):
if spec_id is None:
error = ApiError('unknown_spec', 'Please provide a valid Workflow Specification ID.')
@@ -35,6 +39,7 @@ def get_workflow_specification(spec_id):
return WorkflowSpecModelSchema().dump(spec)
+@auth.login_required
def update_workflow_specification(spec_id, body):
spec = WorkflowSpecModelSchema().load(body, session=session)
spec.id = spec_id
@@ -43,6 +48,7 @@ def update_workflow_specification(spec_id, body):
return WorkflowSpecModelSchema().dump(spec)
+@auth.login_required
def delete_workflow_specification(spec_id):
if spec_id is None:
error = ApiError('unknown_spec', 'Please provide a valid Workflow Specification ID.')
@@ -61,13 +67,17 @@ def delete_workflow_specification(spec_id):
session.query(WorkflowModel).filter_by(workflow_spec_id=spec_id).delete()
session.query(WorkflowSpecModel).filter_by(id=spec_id).delete()
-
session.commit()
-def __get_workflow_api_model(processor: WorkflowProcessor):
+def __get_workflow_api_model(processor: WorkflowProcessor, status_data=None):
spiff_tasks = processor.get_all_user_tasks()
- user_tasks = map(Task.from_spiff, spiff_tasks)
+ user_tasks = list(map(Task.from_spiff, spiff_tasks))
+ is_active = True
+
+ if status_data is not None and processor.workflow_spec_id in status_data:
+ is_active = status_data[processor.workflow_spec_id]
+
workflow_api = WorkflowApi(
id=processor.get_workflow_id(),
status=processor.get_status(),
@@ -76,29 +86,36 @@ def __get_workflow_api_model(processor: WorkflowProcessor):
user_tasks=user_tasks,
workflow_spec_id=processor.workflow_spec_id,
spec_version=processor.get_spec_version(),
- is_latest_spec=processor.get_spec_version() == processor.get_latest_version_string(processor.workflow_spec_id)
+ is_latest_spec=processor.get_spec_version() == processor.get_latest_version_string(processor.workflow_spec_id),
+ is_active=is_active
)
if processor.next_task():
workflow_api.next_task = Task.from_spiff(processor.next_task())
return workflow_api
+@auth.login_required
def get_workflow(workflow_id, soft_reset=False, hard_reset=False):
- schema = WorkflowApiSchema()
- workflow_model = session.query(WorkflowModel).filter_by(id=workflow_id).first()
+ workflow_model: WorkflowModel = session.query(WorkflowModel).filter_by(id=workflow_id).first()
processor = WorkflowProcessor(workflow_model, soft_reset=soft_reset, hard_reset=hard_reset)
- return schema.dump(__get_workflow_api_model(processor))
+ workflow_api_model = __get_workflow_api_model(processor)
+ update_workflow_stats(workflow_model, workflow_api_model)
+ return WorkflowApiSchema().dump(workflow_api_model)
+@auth.login_required
def delete(workflow_id):
session.query(WorkflowModel).filter_by(id=workflow_id).delete()
session.commit()
+
+@auth.login_required
def get_task(workflow_id, task_id):
workflow = session.query(WorkflowModel).filter_by(id=workflow_id).first()
return workflow.bpmn_workflow().get_task(task_id)
+@auth.login_required
def update_task(workflow_id, task_id, body):
workflow_model = session.query(WorkflowModel).filter_by(id=workflow_id).first()
processor = WorkflowProcessor(workflow_model)
@@ -111,5 +128,8 @@ def update_task(workflow_id, task_id, body):
workflow_model.bpmn_workflow_json = processor.serialize()
session.add(workflow_model)
session.commit()
- return WorkflowApiSchema().dump(__get_workflow_api_model(processor)
- )
+
+ workflow_api_model = __get_workflow_api_model(processor)
+ update_workflow_stats(workflow_model, workflow_api_model)
+ log_task_complete(workflow_model, task_id)
+ return WorkflowApiSchema().dump(workflow_api_model)
diff --git a/crc/models/api_models.py b/crc/models/api_models.py
index 1d7a4f3a..aa8fd0fe 100644
--- a/crc/models/api_models.py
+++ b/crc/models/api_models.py
@@ -95,7 +95,8 @@ class TaskSchema(ma.Schema):
class WorkflowApi(object):
- def __init__(self, id, status, user_tasks, last_task, next_task, workflow_spec_id, spec_version, is_latest_spec):
+ def __init__(self, id, status, user_tasks, last_task, next_task, workflow_spec_id, spec_version,
+ is_latest_spec, is_active):
self.id = id
self.status = status
self.user_tasks = user_tasks
@@ -104,15 +105,14 @@ class WorkflowApi(object):
self.workflow_spec_id = workflow_spec_id
self.spec_version = spec_version
self.is_latest_spec = is_latest_spec
+ self.is_active = is_active
class WorkflowApiSchema(ma.Schema):
class Meta:
model = WorkflowApi
- fields = ["id", "status",
- "user_tasks", "last_task", "next_task",
- "workflow_spec_id",
- "spec_version", "is_latest_spec"]
+ fields = ["id", "status", "user_tasks", "last_task", "next_task",
+ "workflow_spec_id", "spec_version", "is_latest_spec", "is_active"]
unknown = INCLUDE
status = EnumField(WorkflowStatus)
@@ -122,4 +122,7 @@ class WorkflowApiSchema(ma.Schema):
@marshmallow.post_load
def make_workflow(self, data, **kwargs):
- return WorkflowApi(**data)
+ keys = ['id', 'status', 'user_tasks', 'last_task', 'next_task',
+ 'workflow_spec_id', 'spec_version', 'is_latest_spec', "is_active"]
+ filtered_fields = {key: data[key] for key in keys}
+ return WorkflowApi(**filtered_fields)
diff --git a/crc/models/file.py b/crc/models/file.py
index 932b197b..77e895a6 100644
--- a/crc/models/file.py
+++ b/crc/models/file.py
@@ -1,7 +1,7 @@
import enum
from marshmallow_enum import EnumField
-from marshmallow_sqlalchemy import ModelSchema
+from marshmallow_sqlalchemy import SQLAlchemyAutoSchema
from sqlalchemy import func
from sqlalchemy.dialects.postgresql import UUID
@@ -76,6 +76,8 @@ class FileModel(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String)
type = db.Column(db.Enum(FileType))
+ primary = db.Column(db.Boolean)
+ is_status = db.Column(db.Boolean)
content_type = db.Column(db.String)
is_reference = db.Column(db.Boolean, nullable=False, default=False) # A global reference file.
primary = db.Column(db.Boolean) # Is this the primary BPMN in a workflow?
@@ -87,8 +89,10 @@ class FileModel(db.Model):
latest_version = db.Column(db.Integer, default=0)
-class FileModelSchema(ModelSchema):
+class FileModelSchema(SQLAlchemyAutoSchema):
class Meta:
model = FileModel
+ load_instance = True
+ include_relationships = True
include_fk = True # Includes foreign keys
type = EnumField(FileType)
diff --git a/crc/models/stats.py b/crc/models/stats.py
new file mode 100644
index 00000000..1bc1935d
--- /dev/null
+++ b/crc/models/stats.py
@@ -0,0 +1,45 @@
+from marshmallow_sqlalchemy import SQLAlchemyAutoSchema
+
+from crc import db
+
+
+class WorkflowStatsModel(db.Model):
+ __tablename__ = 'workflow_stats'
+ id = db.Column(db.Integer, primary_key=True)
+ study_id = db.Column(db.Integer, db.ForeignKey('study.id'), nullable=False)
+ workflow_id = db.Column(db.Integer, db.ForeignKey('workflow.id'), nullable=False)
+ workflow_spec_id = db.Column(db.String, db.ForeignKey('workflow_spec.id'))
+ spec_version = db.Column(db.String)
+ num_tasks_total = db.Column(db.Integer)
+ num_tasks_complete = db.Column(db.Integer)
+ num_tasks_incomplete = db.Column(db.Integer)
+ last_updated = db.Column(db.DateTime)
+
+
+class WorkflowStatsModelSchema(SQLAlchemyAutoSchema):
+ class Meta:
+ model = WorkflowStatsModel
+ load_instance = True
+ include_relationships = True
+ include_fk = True # Includes foreign keys
+
+
+class TaskEventModel(db.Model):
+ __tablename__ = 'task_event'
+ id = db.Column(db.Integer, primary_key=True)
+ study_id = db.Column(db.Integer, db.ForeignKey('study.id'), nullable=False)
+ user_uid = db.Column(db.String, db.ForeignKey('user.uid'), nullable=False)
+ workflow_id = db.Column(db.Integer, db.ForeignKey('workflow.id'), nullable=False)
+ workflow_spec_id = db.Column(db.String, db.ForeignKey('workflow_spec.id'))
+ spec_version = db.Column(db.String)
+ task_id = db.Column(db.String)
+ task_state = db.Column(db.String)
+ date = db.Column(db.DateTime)
+
+
+class TaskEventModelSchema(SQLAlchemyAutoSchema):
+ class Meta:
+ model = TaskEventModel
+ load_instance = True
+ include_relationships = True
+ include_fk = True # Includes foreign keys
diff --git a/crc/models/study.py b/crc/models/study.py
index 815acb49..7e0d6dda 100644
--- a/crc/models/study.py
+++ b/crc/models/study.py
@@ -1,5 +1,5 @@
from marshmallow_enum import EnumField
-from marshmallow_sqlalchemy import ModelSchema
+from marshmallow_sqlalchemy import SQLAlchemyAutoSchema
from sqlalchemy import func
from crc import db
@@ -20,13 +20,15 @@ class StudyModel(db.Model):
investigator_uids = db.Column(db.ARRAY(db.String), nullable=True)
inactive = db.Column(db.Boolean, default=False)
requirements = db.Column(db.ARRAY(db.Integer), nullable=True)
+ status_spec_id = db.Column(db.String, db.ForeignKey('workflow_spec.id'))
+ status_spec_version = db.Column(db.String)
-class StudyModelSchema(ModelSchema):
+class StudyModelSchema(SQLAlchemyAutoSchema):
class Meta:
model = StudyModel
+ load_instance = True
+ include_relationships = True
include_fk = True # Includes foreign keys
protocol_builder_status = EnumField(ProtocolBuilderStatus)
-
-
diff --git a/crc/models/user.py b/crc/models/user.py
index 16a528c8..65b46de4 100644
--- a/crc/models/user.py
+++ b/crc/models/user.py
@@ -1,7 +1,7 @@
import datetime
import jwt
-from marshmallow_sqlalchemy import ModelSchema
+from marshmallow_sqlalchemy import SQLAlchemyAutoSchema
from crc import db, app
from crc.api.common import ApiError
@@ -52,7 +52,9 @@ class UserModel(db.Model):
raise ApiError('token_invalid', 'The Authentication token you provided. You need a new token. ')
-class UserModelSchema(ModelSchema):
+class UserModelSchema(SQLAlchemyAutoSchema):
class Meta:
model = UserModel
+ load_instance = True
+ include_relationships = True
diff --git a/crc/models/workflow.py b/crc/models/workflow.py
index 92e74137..438daab7 100644
--- a/crc/models/workflow.py
+++ b/crc/models/workflow.py
@@ -1,9 +1,22 @@
import enum
-from marshmallow_sqlalchemy import ModelSchema
+from marshmallow_sqlalchemy import SQLAlchemyAutoSchema
from crc import db
+class WorkflowSpecCategoryModel(db.Model):
+ __tablename__ = 'workflow_spec_category'
+ id = db.Column(db.Integer, primary_key=True)
+ name = db.Column(db.String)
+ display_name = db.Column(db.String)
+
+
+class WorkflowSpecCategoryModelSchema(SQLAlchemyAutoSchema):
+ class Meta:
+ model = WorkflowSpecCategoryModel
+ load_instance = True
+ include_relationships = True
+
class WorkflowSpecModel(db.Model):
__tablename__ = 'workflow_spec'
@@ -12,11 +25,17 @@ class WorkflowSpecModel(db.Model):
display_name = db.Column(db.String)
description = db.Column(db.Text)
primary_process_id = db.Column(db.String)
+ workflow_spec_category_id = db.Column(db.Integer, db.ForeignKey('workflow_spec_category.id'), nullable=True)
+ workflow_spec_category = db.relationship("WorkflowSpecCategoryModel")
+ is_status = db.Column(db.Boolean, default=False)
-class WorkflowSpecModelSchema(ModelSchema):
+class WorkflowSpecModelSchema(SQLAlchemyAutoSchema):
class Meta:
model = WorkflowSpecModel
+ load_instance = True
+ include_relationships = True
+ include_fk = True # Includes foreign keys
class WorkflowStatus(enum.Enum):
@@ -34,4 +53,3 @@ class WorkflowModel(db.Model):
study_id = db.Column(db.Integer, db.ForeignKey('study.id'))
workflow_spec_id = db.Column(db.String, db.ForeignKey('workflow_spec.id'))
spec_version = db.Column(db.String)
-
diff --git a/crc/scripts/complete_template.py b/crc/scripts/complete_template.py
index ee25d48f..8b190038 100644
--- a/crc/scripts/complete_template.py
+++ b/crc/scripts/complete_template.py
@@ -28,6 +28,11 @@ class CompleteTemplate(Script):
"the name of the docx template to use.")
file_name = args[0]
workflow_spec_model = self.find_spec_model_in_db(task.workflow)
+ task_study_id = task.workflow.data[WorkflowProcessor.STUDY_ID_KEY]
+
+ if task_study_id != study_id:
+ raise ApiError(code="invalid_argument",
+ message="The given task does not match the given study.")
if workflow_spec_model is None:
raise ApiError(code="workflow_model_error",
@@ -44,7 +49,6 @@ class CompleteTemplate(Script):
"within workflow specification '%s'") % (args[0], workflow_spec_model.id)
final_document_stream = self.make_template(BytesIO(file_data_model.data), task.data)
- study_id = task.workflow.data[WorkflowProcessor.STUDY_ID_KEY]
workflow_id = task.workflow.data[WorkflowProcessor.WORKFLOW_ID_KEY]
FileService.add_task_file(study_id=study_id, workflow_id=workflow_id, task_id=task.id,
name=file_name,
diff --git a/crc/scripts/study_info.py b/crc/scripts/study_info.py
index 78861bcc..6d168af5 100644
--- a/crc/scripts/study_info.py
+++ b/crc/scripts/study_info.py
@@ -44,7 +44,8 @@ class StudyInfo(Script):
def get_required_docs(self, study_id):
- required_docs = self.pb.get_required_docs(study_id)
+ required_docs = self.pb.get_required_docs(study_id)
+ return required_docs
diff --git a/crc/services/file_service.py b/crc/services/file_service.py
index 929f1e6b..3042c12e 100644
--- a/crc/services/file_service.py
+++ b/crc/services/file_service.py
@@ -15,12 +15,13 @@ class FileService(object):
@staticmethod
def add_workflow_spec_file(workflow_spec: WorkflowSpecModel,
- name, content_type, binary_data, primary=False):
+ name, content_type, binary_data, primary=False, is_status=False):
"""Create a new file and associate it with a workflow spec."""
file_model = FileModel(
workflow_spec_id=workflow_spec.id,
name=name,
- primary=primary
+ primary=primary,
+ is_status=is_status
)
if primary:
bpmn: ElementTree.Element = ElementTree.fromstring(binary_data)
diff --git a/crc/services/workflow_processor.py b/crc/services/workflow_processor.py
index 9f9535ad..b15f048f 100644
--- a/crc/services/workflow_processor.py
+++ b/crc/services/workflow_processor.py
@@ -204,7 +204,8 @@ class WorkflowProcessor(object):
dmn: ElementTree.Element = ElementTree.fromstring(file_data.data)
parser.add_dmn_xml(dmn, filename=file_data.file_model.name)
if process_id is None:
- raise(Exception("There is no primary BPMN model defined for workflow %s" % workflow_spec_id))
+ raise(ApiError(code="no_primary_bpmn_error",
+ message="There is no primary BPMN model defined for workflow %s" % workflow_spec_id))
try:
spec = parser.get_spec(process_id)
except ValidationException as ve:
@@ -237,7 +238,7 @@ class WorkflowProcessor(object):
session.add(workflow_model)
session.commit()
# Need to commit twice, first to get a unique id for the workflow model, and
- # a second time to store the serilaization so we can maintain this link within
+ # a second time to store the serialization so we can maintain this link within
# the spiff-workflow process.
bpmn_workflow.data[WorkflowProcessor.WORKFLOW_ID_KEY] = workflow_model.id
@@ -337,7 +338,7 @@ class WorkflowProcessor(object):
process_elements.append(child)
if len(process_elements) == 0:
- raise Exception('No executable process tag found')
+ raise ValidationException('No executable process tag found')
# There are multiple root elements
if len(process_elements) > 1:
@@ -349,6 +350,6 @@ class WorkflowProcessor(object):
if child_element.tag.endswith('startEvent'):
return this_element.attrib['id']
- raise Exception('No start event found in %s' % et_root.attrib['id'])
+ raise ValidationException('No start event found in %s' % et_root.attrib['id'])
return process_elements[0].attrib['id']
diff --git a/example_data.py b/example_data.py
index ddbcd9b0..dc1d1869 100644
--- a/example_data.py
+++ b/example_data.py
@@ -82,11 +82,11 @@ class ExampleDataLoader:
returns an array of data models to be added to the database."""
global file
file_service = FileService()
-
spec = WorkflowSpecModel(id=id,
name=name,
display_name=display_name,
- description=description)
+ description=description,
+ is_status=id == 'status')
db.session.add(spec)
db.session.commit()
if not filepath:
@@ -95,13 +95,15 @@ class ExampleDataLoader:
for file_path in files:
noise, file_extension = os.path.splitext(file_path)
filename = os.path.basename(file_path)
- is_primary = filename.lower() == id + ".bpmn"
+
+ is_status = filename.lower() == 'status.bpmn'
+ is_primary = filename.lower() == id + '.bpmn'
try:
- file = open(file_path, "rb")
+ file = open(file_path, 'rb')
data = file.read()
content_type = CONTENT_TYPES[file_extension[1:]]
file_service.add_workflow_spec_file(workflow_spec=spec, name=filename, content_type=content_type,
- binary_data=data, primary=is_primary)
+ binary_data=data, primary=is_primary, is_status=is_status)
except IsADirectoryError as de:
# Ignore sub directories
pass
diff --git a/migrations/versions/5f06108116ae_.py b/migrations/versions/5f06108116ae_.py
new file mode 100644
index 00000000..9b651a70
--- /dev/null
+++ b/migrations/versions/5f06108116ae_.py
@@ -0,0 +1,41 @@
+"""empty message
+
+Revision ID: 5f06108116ae
+Revises: 90dd63672e0a
+Create Date: 2020-03-13 14:55:32.214380
+
+"""
+from alembic import op
+import sqlalchemy as sa
+
+
+# revision identifiers, used by Alembic.
+revision = '5f06108116ae'
+down_revision = '90dd63672e0a'
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.create_table('workflow_spec_category',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('name', sa.String(), nullable=True),
+ sa.Column('display_name', sa.String(), nullable=True),
+ sa.PrimaryKeyConstraint('id')
+ )
+ op.add_column('file', sa.Column('is_status', sa.Boolean(), nullable=True))
+ op.add_column('workflow_spec', sa.Column('is_status', sa.Boolean(), nullable=True))
+ op.add_column('workflow_spec', sa.Column('workflow_spec_category_id', sa.Integer(), nullable=True))
+ op.create_foreign_key(None, 'workflow_spec', 'workflow_spec_category', ['workflow_spec_category_id'], ['id'])
+ # ### end Alembic commands ###
+
+
+def downgrade():
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_constraint(None, 'workflow_spec', type_='foreignkey')
+ op.drop_column('workflow_spec', 'workflow_spec_category_id')
+ op.drop_column('workflow_spec', 'is_status')
+ op.drop_column('file', 'is_status')
+ op.drop_table('workflow_spec_category')
+ # ### end Alembic commands ###
diff --git a/migrations/versions/65f3fce6031a_.py b/migrations/versions/65f3fce6031a_.py
new file mode 100644
index 00000000..5fa2ce0b
--- /dev/null
+++ b/migrations/versions/65f3fce6031a_.py
@@ -0,0 +1,32 @@
+"""empty message
+
+Revision ID: 65f3fce6031a
+Revises: 5f06108116ae
+Create Date: 2020-03-15 12:40:42.314190
+
+"""
+from alembic import op
+import sqlalchemy as sa
+
+
+# revision identifiers, used by Alembic.
+revision = '65f3fce6031a'
+down_revision = '5f06108116ae'
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.add_column('study', sa.Column('status_spec_id', sa.String(), nullable=True))
+ op.add_column('study', sa.Column('status_spec_version', sa.String(), nullable=True))
+ op.create_foreign_key(None, 'study', 'workflow_spec', ['status_spec_id'], ['id'])
+ # ### end Alembic commands ###
+
+
+def downgrade():
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_constraint(None, 'study', type_='foreignkey')
+ op.drop_column('study', 'status_spec_version')
+ op.drop_column('study', 'status_spec_id')
+ # ### end Alembic commands ###
diff --git a/migrations/versions/90dd63672e0a_.py b/migrations/versions/90dd63672e0a_.py
new file mode 100644
index 00000000..819e4234
--- /dev/null
+++ b/migrations/versions/90dd63672e0a_.py
@@ -0,0 +1,59 @@
+"""empty message
+
+Revision ID: 90dd63672e0a
+Revises: 8856126b6658
+Create Date: 2020-03-10 21:16:38.827156
+
+"""
+from alembic import op
+import sqlalchemy as sa
+
+
+# revision identifiers, used by Alembic.
+revision = '90dd63672e0a'
+down_revision = '8856126b6658'
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.create_table('task_event',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('study_id', sa.Integer(), nullable=False),
+ sa.Column('user_uid', sa.String(), nullable=False),
+ sa.Column('workflow_id', sa.Integer(), nullable=False),
+ sa.Column('workflow_spec_id', sa.String(), nullable=True),
+ sa.Column('spec_version', sa.String(), nullable=True),
+ sa.Column('task_id', sa.String(), nullable=True),
+ sa.Column('task_state', sa.String(), nullable=True),
+ sa.Column('date', sa.DateTime(), nullable=True),
+ sa.ForeignKeyConstraint(['study_id'], ['study.id'], ),
+ sa.ForeignKeyConstraint(['user_uid'], ['user.uid'], ),
+ sa.ForeignKeyConstraint(['workflow_id'], ['workflow.id'], ),
+ sa.ForeignKeyConstraint(['workflow_spec_id'], ['workflow_spec.id'], ),
+ sa.PrimaryKeyConstraint('id')
+ )
+ op.create_table('workflow_stats',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('study_id', sa.Integer(), nullable=False),
+ sa.Column('workflow_id', sa.Integer(), nullable=False),
+ sa.Column('workflow_spec_id', sa.String(), nullable=True),
+ sa.Column('spec_version', sa.String(), nullable=True),
+ sa.Column('num_tasks_total', sa.Integer(), nullable=True),
+ sa.Column('num_tasks_complete', sa.Integer(), nullable=True),
+ sa.Column('num_tasks_incomplete', sa.Integer(), nullable=True),
+ sa.Column('last_updated', sa.DateTime(), nullable=True),
+ sa.ForeignKeyConstraint(['study_id'], ['study.id'], ),
+ sa.ForeignKeyConstraint(['workflow_id'], ['workflow.id'], ),
+ sa.ForeignKeyConstraint(['workflow_spec_id'], ['workflow_spec.id'], ),
+ sa.PrimaryKeyConstraint('id')
+ )
+ # ### end Alembic commands ###
+
+
+def downgrade():
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_table('workflow_stats')
+ op.drop_table('task_event')
+ # ### end Alembic commands ###
diff --git a/tests/data/status/crc2_training_session_data_security_plan.dmn b/tests/data/status/crc2_training_session_data_security_plan.dmn
new file mode 100644
index 00000000..4acadb96
--- /dev/null
+++ b/tests/data/status/crc2_training_session_data_security_plan.dmn
@@ -0,0 +1,41 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ false
+
+
+
+
+
+ false
+
+
+
+
+ true
+
+
+
+
+
+ true
+
+
+
+
+
diff --git a/tests/data/status/crc2_training_session_enter_core_info.dmn b/tests/data/status/crc2_training_session_enter_core_info.dmn
new file mode 100644
index 00000000..2fbdd5ce
--- /dev/null
+++ b/tests/data/status/crc2_training_session_enter_core_info.dmn
@@ -0,0 +1,32 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ false
+
+
+ false
+
+
+
+
+ true
+
+
+ true
+
+
+
+
+
diff --git a/tests/data/status/crc2_training_session_sponsor_funding_source.dmn b/tests/data/status/crc2_training_session_sponsor_funding_source.dmn
new file mode 100644
index 00000000..e5020439
--- /dev/null
+++ b/tests/data/status/crc2_training_session_sponsor_funding_source.dmn
@@ -0,0 +1,30 @@
+
+
+
+
diff --git a/tests/data/status/status.bpmn b/tests/data/status/status.bpmn
new file mode 100644
index 00000000..13f144ed
--- /dev/null
+++ b/tests/data/status/status.bpmn
@@ -0,0 +1,121 @@
+
+
+
+
+ SequenceFlow_1ees8ka
+
+
+
+ Flow_1nimppb
+ Flow_1txrak2
+
+
+ Flow_1m8285h
+ Flow_1sggkit
+
+
+ Flow_18pl92p
+ Flow_0x9580l
+
+
+ Flow_024q2cw
+ Flow_1m8285h
+ Flow_1nimppb
+ Flow_18pl92p
+
+
+
+
+
+ Flow_1txrak2
+ Flow_1sggkit
+ Flow_0x9580l
+ Flow_0pwtiqm
+
+
+ Flow_0pwtiqm
+
+
+
+
+
+
+
+
+
+
+
+
+ SequenceFlow_1ees8ka
+ Flow_024q2cw
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/tests/test_study_api.py b/tests/test_study_api.py
index 57a287f7..1988c43b 100644
--- a/tests/test_study_api.py
+++ b/tests/test_study_api.py
@@ -3,7 +3,7 @@ from datetime import datetime, timezone
from unittest.mock import patch, Mock
from crc import session
-from crc.models.api_models import WorkflowApiSchema
+from crc.models.api_models import WorkflowApiSchema, WorkflowApi
from crc.models.study import StudyModel, StudyModelSchema
from crc.models.protocol_builder import ProtocolBuilderStatus, ProtocolBuilderStudyDetailsSchema, \
ProtocolBuilderStudySchema
@@ -160,9 +160,6 @@ class TestStudyApi(BaseTest):
rv = self.app.delete('/v1.0/study/%i' % study.id)
self.assert_failure(rv, error_code="study_integrity_error")
-
-
-
def test_delete_workflow(self):
self.load_example_data()
study = session.query(StudyModel).first()
@@ -174,7 +171,7 @@ class TestStudyApi(BaseTest):
self.assertEqual(1, session.query(WorkflowModel).count())
json_data = json.loads(rv.get_data(as_text=True))
workflow = WorkflowApiSchema().load(json_data)
- rv = self.app.delete('/v1.0/workflow/%i' % workflow.id)
+ rv = self.app.delete('/v1.0/workflow/%i' % workflow.id, headers=self.logged_in_headers())
self.assert_success(rv)
self.assertEqual(0, session.query(WorkflowModel).count())
@@ -207,3 +204,79 @@ class TestStudyApi(BaseTest):
json_data_after = json.loads(response_after.get_data(as_text=True))
workflows_after = WorkflowApiSchema(many=True).load(json_data_after)
self.assertEqual(1, len(workflows_after))
+
+ """
+ Workflow Specs that have been made available (or not) to a particular study via the status.bpmn should be flagged
+ as available (or not) when the list of a study's workflows is retrieved.
+ """
+ def test_workflow_spec_status(self):
+ self.load_example_data()
+ study = session.query(StudyModel).first()
+ study_id = study.id
+
+ # Add status workflow
+ self.load_test_spec('status')
+
+ # Add status workflow to the study
+ status_spec = session.query(WorkflowSpecModel).filter_by(is_status=True).first()
+ add_status_response = self.app.post('/v1.0/study/%i/workflows' % study.id,
+ content_type="application/json",
+ headers=self.logged_in_headers(),
+ data=json.dumps(WorkflowSpecModelSchema().dump(status_spec)))
+ self.assert_success(add_status_response)
+ json_data_status = json.loads(add_status_response.get_data(as_text=True))
+ status_workflow: WorkflowApi = WorkflowApiSchema().load(json_data_status)
+ self.assertIsNotNone(status_workflow)
+ self.assertIsNotNone(status_workflow.workflow_spec_id)
+ self.assertIsNotNone(status_workflow.spec_version)
+ self.assertIsNotNone(status_workflow.next_task)
+ self.assertIsNotNone(status_workflow.next_task['id'])
+ status_task_id = status_workflow.next_task['id']
+
+ # Verify that the study status spec is populated
+ updated_study: StudyModel = session.query(StudyModel).filter_by(id=study_id).first()
+ self.assertIsNotNone(updated_study)
+ self.assertIsNotNone(updated_study.status_spec_id)
+ self.assertIsNotNone(updated_study.status_spec_version)
+ self.assertEqual(updated_study.status_spec_id, status_workflow.workflow_spec_id)
+ self.assertEqual(updated_study.status_spec_version, status_workflow.spec_version)
+
+ # Add all available non-status workflows to the study
+ specs = session.query(WorkflowSpecModel).filter_by(is_status=False).all()
+ for spec in specs:
+ add_response = self.app.post('/v1.0/study/%i/workflows' % study.id,
+ content_type="application/json",
+ headers=self.logged_in_headers(),
+ data=json.dumps(WorkflowSpecModelSchema().dump(spec)))
+ self.assert_success(add_response)
+
+ for is_active in [False, True]:
+ # Set all workflow specs to inactive|active
+ update_status_response = self.app.put('/v1.0/workflow/%i/task/%s/data' % (status_workflow.id, status_task_id),
+ headers=self.logged_in_headers(),
+ content_type="application/json",
+ data=json.dumps({'some_input': is_active}))
+ self.assert_success(update_status_response)
+ json_workflow_api = json.loads(update_status_response.get_data(as_text=True))
+ updated_workflow_api: WorkflowApi = WorkflowApiSchema().load(json_workflow_api)
+ self.assertIsNotNone(updated_workflow_api)
+ self.assertEqual(updated_workflow_api.status, WorkflowStatus.complete)
+ self.assertIsNotNone(updated_workflow_api.last_task)
+ self.assertIsNotNone(updated_workflow_api.last_task['data'])
+ self.assertIsNotNone(updated_workflow_api.last_task['data']['some_input'])
+ self.assertEqual(updated_workflow_api.last_task['data']['some_input'], is_active)
+
+ # List workflows for study
+ response_after = self.app.get('/v1.0/study/%i/workflows' % study.id,
+ content_type="application/json",
+ headers=self.logged_in_headers())
+ self.assert_success(response_after)
+
+ json_data_after = json.loads(response_after.get_data(as_text=True))
+ workflows_after = WorkflowApiSchema(many=True).load(json_data_after)
+ self.assertEqual(len(specs), len(workflows_after))
+
+ # All workflows should be inactive|active
+ for workflow in workflows_after:
+ self.assertEqual(workflow.is_active, is_active)
+
diff --git a/tests/test_tasks_api.py b/tests/test_tasks_api.py
index 530767cb..75af55c5 100644
--- a/tests/test_tasks_api.py
+++ b/tests/test_tasks_api.py
@@ -4,6 +4,7 @@ import os
from crc import session, app
from crc.models.api_models import WorkflowApiSchema, Task
from crc.models.file import FileModelSchema
+from crc.models.stats import WorkflowStatsModel, TaskEventModel
from crc.models.study import StudyModel
from crc.models.workflow import WorkflowSpecModelSchema, WorkflowModel, WorkflowStatus
from crc.services.workflow_processor import WorkflowProcessor
@@ -28,6 +29,7 @@ class TestTasksApi(BaseTest):
def get_workflow_api(self, workflow, soft_reset=False, hard_reset=False):
rv = self.app.get('/v1.0/workflow/%i?soft_reset=%s&hard_reset=%s' %
(workflow.id, str(soft_reset), str(hard_reset)),
+ headers=self.logged_in_headers(),
content_type="application/json")
self.assert_success(rv)
json_data = json.loads(rv.get_data(as_text=True))
@@ -37,10 +39,24 @@ class TestTasksApi(BaseTest):
def complete_form(self, workflow, task, dict_data):
rv = self.app.put('/v1.0/workflow/%i/task/%s/data' % (workflow.id, task.id),
+ headers=self.logged_in_headers(),
content_type="application/json",
data=json.dumps(dict_data))
self.assert_success(rv)
json_data = json.loads(rv.get_data(as_text=True))
+
+ num_stats = session.query(WorkflowStatsModel) \
+ .filter_by(workflow_id=workflow.id) \
+ .filter_by(workflow_spec_id=workflow.workflow_spec_id) \
+ .count()
+ self.assertGreater(num_stats, 0)
+
+ num_task_events = session.query(TaskEventModel) \
+ .filter_by(workflow_id=workflow.id) \
+ .filter_by(task_id=task.id) \
+ .count()
+ self.assertGreater(num_task_events, 0)
+
workflow = WorkflowApiSchema().load(json_data)
return workflow
@@ -143,9 +159,9 @@ class TestTasksApi(BaseTest):
}
workflow_api = self.complete_form(workflow, tasks[0], data)
self.assertIsNotNone(workflow_api.next_task)
- self.assertEquals("EndEvent_0evb22x", workflow_api.next_task['name'])
+ self.assertEqual("EndEvent_0evb22x", workflow_api.next_task['name'])
self.assertTrue(workflow_api.status == WorkflowStatus.complete)
- rv = self.app.get('/v1.0/file?workflow_id=%i' % workflow.id)
+ rv = self.app.get('/v1.0/file?workflow_id=%i' % workflow.id, headers=self.logged_in_headers())
self.assert_success(rv)
json_data = json.loads(rv.get_data(as_text=True))
files = FileModelSchema(many=True).load(json_data, session=session)
@@ -245,10 +261,38 @@ class TestTasksApi(BaseTest):
# perform a soft reset returns an error
rv = self.app.get('/v1.0/workflow/%i?soft_reset=%s&hard_reset=%s' %
(workflow.id, "true", "false"),
- content_type="application/json")
+ content_type="application/json",
+ headers=self.logged_in_headers())
self.assert_failure(rv, error_code="unexpected_workflow_structure")
# Try again without a soft reset, and we are still ok, and on the original version.
workflow_api = self.get_workflow_api(workflow)
self.assertTrue(workflow_api.spec_version.startswith("v1 "))
self.assertFalse(workflow_api.is_latest_spec)
+
+ def test_get_workflow_stats(self):
+ self.load_example_data()
+ workflow = self.create_workflow('exclusive_gateway')
+
+ response_before = self.app.get('/v1.0/workflow/%i/stats' % workflow.id,
+ content_type="application/json",
+ headers=self.logged_in_headers())
+ self.assert_success(response_before)
+ db_stats_before = session.query(WorkflowStatsModel).filter_by(workflow_id=workflow.id).first()
+ self.assertIsNone(db_stats_before)
+
+ # Start the workflow.
+ tasks = self.get_workflow_api(workflow).user_tasks
+ self.complete_form(workflow, tasks[0], {"has_bananas": True})
+
+ response_after = self.app.get('/v1.0/workflow/%i/stats' % workflow.id,
+ content_type="application/json",
+ headers=self.logged_in_headers())
+ self.assert_success(response_after)
+ db_stats_after = session.query(WorkflowStatsModel).filter_by(workflow_id=workflow.id).first()
+ self.assertIsNotNone(db_stats_after)
+ self.assertGreater(db_stats_after.num_tasks_complete, 0)
+ self.assertGreater(db_stats_after.num_tasks_incomplete, 0)
+ self.assertGreater(db_stats_after.num_tasks_total, 0)
+ self.assertEqual(db_stats_after.num_tasks_total,
+ db_stats_after.num_tasks_complete + db_stats_after.num_tasks_incomplete)
diff --git a/tests/test_workflow_processor.py b/tests/test_workflow_processor.py
index c4ff3f21..02aa0300 100644
--- a/tests/test_workflow_processor.py
+++ b/tests/test_workflow_processor.py
@@ -165,7 +165,7 @@ class TestWorkflowProcessor(BaseTest):
study = session.query(StudyModel).first()
with self.assertRaises(ApiError) as context:
WorkflowProcessor.create(study.id, workflow_spec_model.id)
- self.assertEquals("workflow_validation_error", context.exception.code)
+ self.assertEqual("workflow_validation_error", context.exception.code)
self.assertTrue("bpmn:startEvent" in context.exception.message)
def test_workflow_spec_key_error(self):
@@ -311,14 +311,14 @@ class TestWorkflowProcessor(BaseTest):
# Setting up another processor should not error out, but doesn't pick up the update.
workflow_model.bpmn_workflow_json = processor.serialize()
processor2 = WorkflowProcessor(workflow_model)
- self.assertEquals("Step 1", processor2.bpmn_workflow.last_task.task_spec.description)
- self.assertNotEquals("# This is some documentation I wanted to add.",
+ self.assertEqual("Step 1", processor2.bpmn_workflow.last_task.task_spec.description)
+ self.assertNotEqual("# This is some documentation I wanted to add.",
processor2.bpmn_workflow.last_task.task_spec.documentation)
# You can do a soft update and get the right response.
processor3 = WorkflowProcessor(workflow_model, soft_reset=True)
- self.assertEquals("Step 1", processor3.bpmn_workflow.last_task.task_spec.description)
- self.assertEquals("# This is some documentation I wanted to add.",
+ self.assertEqual("Step 1", processor3.bpmn_workflow.last_task.task_spec.description)
+ self.assertEqual("# This is some documentation I wanted to add.",
processor3.bpmn_workflow.last_task.task_spec.documentation)
@@ -336,7 +336,7 @@ class TestWorkflowProcessor(BaseTest):
task.data = {"color": "blue"}
processor.complete_task(task)
next_task = processor.next_task()
- self.assertEquals("Step 2", next_task.task_spec.description)
+ self.assertEqual("Step 2", next_task.task_spec.description)
# Modify the specification, with a major change that alters the flow and can't be serialized effectively.
file_path = os.path.join(app.root_path, '..', 'tests', 'data', 'two_forms', 'mods', 'two_forms_struc_mod.bpmn')
@@ -349,13 +349,40 @@ class TestWorkflowProcessor(BaseTest):
# Do a hard reset, which should bring us back to the beginning, but retain the data.
processor3 = WorkflowProcessor(workflow_model, hard_reset=True)
- self.assertEquals("Step 1", processor3.next_task().task_spec.description)
- self.assertEquals({"color": "blue"}, processor3.next_task().data)
+ self.assertEqual("Step 1", processor3.next_task().task_spec.description)
+ self.assertEqual({"color": "blue"}, processor3.next_task().data)
processor3.complete_task(processor3.next_task())
- self.assertEquals("New Step", processor3.next_task().task_spec.description)
- self.assertEquals({"color": "blue"}, processor3.next_task().data)
+ self.assertEqual("New Step", processor3.next_task().task_spec.description)
+ self.assertEqual({"color": "blue"}, processor3.next_task().data)
def test_get_latest_spec_version(self):
workflow_spec_model = self.load_test_spec("two_forms")
version = WorkflowProcessor.get_latest_version_string("two_forms")
self.assertTrue(version.startswith("v1 "))
+
+ def test_status_bpmn(self):
+ self.load_example_data()
+
+ specs = session.query(WorkflowSpecModel).all()
+
+ study = session.query(StudyModel).first()
+ workflow_spec_model = self.load_test_spec("status")
+
+ for enabled in [True, False]:
+ processor = WorkflowProcessor.create(study.id, workflow_spec_model.id)
+ task = processor.next_task()
+
+ # Turn all specs on or off
+ task.data = {"some_input": enabled}
+ processor.complete_task(task)
+
+ # Finish out rest of workflow
+ while processor.get_status() == WorkflowStatus.waiting:
+ task = processor.next_task()
+ processor.complete_task(task)
+
+ self.assertEqual(processor.get_status(), WorkflowStatus.complete)
+
+ # Enabled status of all specs should match the value set in the first task
+ for spec in specs:
+ self.assertEqual(task.data[spec.id], enabled)
diff --git a/tests/test_workflow_spec_api.py b/tests/test_workflow_spec_api.py
index 85c539bf..b5553247 100644
--- a/tests/test_workflow_spec_api.py
+++ b/tests/test_workflow_spec_api.py
@@ -2,7 +2,7 @@ import json
from crc import session
from crc.models.file import FileModel
-from crc.models.workflow import WorkflowSpecModel, WorkflowSpecModelSchema, WorkflowModel
+from crc.models.workflow import WorkflowSpecModel, WorkflowSpecModelSchema, WorkflowModel, WorkflowSpecCategoryModel
from tests.base_test import BaseTest
@@ -25,9 +25,11 @@ class TestWorkflowSpec(BaseTest):
def test_add_new_workflow_specification(self):
self.load_example_data();
num_before = session.query(WorkflowSpecModel).count()
- spec = WorkflowSpecModel(id='make_cookies', display_name='Cooooookies',
+ spec = WorkflowSpecModel(id='make_cookies', name='make_cookies', display_name='Cooooookies',
description='Om nom nom delicious cookies')
- rv = self.app.post('/v1.0/workflow-specification', content_type="application/json",
+ rv = self.app.post('/v1.0/workflow-specification',
+ headers=self.logged_in_headers(),
+ content_type="application/json",
data=json.dumps(WorkflowSpecModelSchema().dump(spec)))
self.assert_success(rv)
db_spec = session.query(WorkflowSpecModel).filter_by(id='make_cookies').first()
@@ -38,12 +40,38 @@ class TestWorkflowSpec(BaseTest):
def test_get_workflow_specification(self):
self.load_example_data()
db_spec = session.query(WorkflowSpecModel).first()
- rv = self.app.get('/v1.0/workflow-specification/%s' % db_spec.id)
+ rv = self.app.get('/v1.0/workflow-specification/%s' % db_spec.id, headers=self.logged_in_headers())
self.assert_success(rv)
json_data = json.loads(rv.get_data(as_text=True))
api_spec = WorkflowSpecModelSchema().load(json_data, session=session)
self.assertEqual(db_spec, api_spec)
+ def test_update_workflow_specification(self):
+ self.load_example_data()
+
+ category = WorkflowSpecCategoryModel(id=0, name='trap', display_name="It's a trap!")
+ session.add(category)
+ session.commit()
+
+ db_spec_before: WorkflowSpecModel = session.query(WorkflowSpecModel).first()
+ spec_id = db_spec_before.id
+ self.assertIsNone(db_spec_before.workflow_spec_category_id)
+
+ db_spec_before.workflow_spec_category_id = 0
+ rv = self.app.put('/v1.0/workflow-specification/%s' % spec_id,
+ content_type="application/json",
+ headers=self.logged_in_headers(),
+ data=json.dumps(WorkflowSpecModelSchema().dump(db_spec_before)))
+ self.assert_success(rv)
+ json_data = json.loads(rv.get_data(as_text=True))
+ api_spec = WorkflowSpecModelSchema().load(json_data, session=session)
+ self.assertEqual(db_spec_before, api_spec)
+
+ db_spec_after: WorkflowSpecModel = session.query(WorkflowSpecModel).filter_by(id=spec_id).first()
+ self.assertIsNotNone(db_spec_after.workflow_spec_category_id)
+ self.assertIsNotNone(db_spec_after.workflow_spec_category)
+ self.assertEqual(db_spec_after.workflow_spec_category.display_name, category.display_name)
+
def test_delete_workflow_specification(self):
self.load_example_data()
spec_id = 'random_fact'
@@ -55,7 +83,7 @@ class TestWorkflowSpec(BaseTest):
num_workflows_before = session.query(WorkflowModel).filter_by(workflow_spec_id=spec_id).count()
self.assertGreater(num_files_before + num_workflows_before, 0)
- rv = self.app.delete('/v1.0/workflow-specification/' + spec_id)
+ rv = self.app.delete('/v1.0/workflow-specification/' + spec_id, headers=self.logged_in_headers())
self.assert_success(rv)
num_specs_after = session.query(WorkflowSpecModel).filter_by(id=spec_id).count()