diff --git a/crc/__init__.py b/crc/__init__.py index cab9ccde..11c0c4d6 100644 --- a/crc/__init__.py +++ b/crc/__init__.py @@ -15,6 +15,7 @@ connexion_app = connexion.FlaskApp(__name__) app = connexion_app.app app.config.from_object('config.default') +app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False auth = HTTPTokenAuth('Bearer') if "TESTING" in os.environ and os.environ["TESTING"] == "true": diff --git a/crc/api.yml b/crc/api.yml index 5d120aaa..1b8a7c98 100644 --- a/crc/api.yml +++ b/crc/api.yml @@ -443,11 +443,14 @@ paths: content: multipart/form-data: schema: + x-body-name: file type: object properties: file: type: string format: binary + required: + - file responses: '200': description: Returns the actual file @@ -716,10 +719,31 @@ components: properties: id: type: string + name: + type: string display_name: type: string description: type: string + primary_process_id: + type: string + nullable: true + workflow_spec_category_id: + type: integer + nullable: true + workflow_spec_category: + $ref: "#/components/schemas/WorkflowSpecCategory" + is_status: + type: boolean + nullable: true + WorkflowSpecCategory: + properties: + id: + type: integer + name: + type: string + display_name: + type: string File: properties: id: diff --git a/crc/api/study.py b/crc/api/study.py index 9a77c36d..fbdc96c1 100644 --- a/crc/api/study.py +++ b/crc/api/study.py @@ -23,16 +23,46 @@ def all_studies(): @auth.login_required def add_study(body): - study = StudyModelSchema().load(body, session=session) + study: StudyModel = StudyModelSchema().load(body, session=session) + status_spec = __get_status_spec(study.status_spec_id) + + # Get latest status spec version + if status_spec is not None: + study.status_spec_id = status_spec.id + study.status_spec_version = WorkflowProcessor.get_latest_version_string(status_spec.id) + session.add(study) session.commit() - # FIXME: We need to ask the protocol builder what workflows to add to the study, not just add them all. - for spec in session.query(WorkflowSpecModel).all(): - WorkflowProcessor.create(study.id, spec.id) + __add_study_workflows_from_status(study.id, status_spec) return StudyModelSchema().dump(study) +def __get_status_spec(status_spec_id): + if status_spec_id is None: + return session.query(WorkflowSpecModel).filter_by(is_status=True).first() + else: + return session.query(WorkflowSpecModel).filter_by(id=status_spec_id).first() + + +def __add_study_workflows_from_status(study_id, status_spec): + all_specs = session.query(WorkflowSpecModel).all() + if status_spec is not None: + # Run status spec to get list of workflow specs applicable to this study + status_processor = WorkflowProcessor.create(study_id, status_spec) + status_processor.do_engine_steps() + status_data = status_processor.next_task().data + + # Only add workflow specs listed in status spec + for spec in all_specs: + if spec.id in status_data and status_data[spec.id]: + WorkflowProcessor.create(study_id, spec.id) + else: + # No status spec. Just add all workflows. + for spec in all_specs: + WorkflowProcessor.create(study_id, spec.id) + + @auth.login_required def update_study(study_id, body): if study_id is None: @@ -130,23 +160,63 @@ def post_update_study_from_protocol_builder(study_id): @auth.login_required def get_study_workflows(study_id): - workflow_models = session.query(WorkflowModel).filter_by(study_id=study_id).all() + + # Get study + study: StudyModel = session.query(StudyModel).filter_by(id=study_id).first() + + # Get study status spec + status_spec: WorkflowSpecModel = session.query(WorkflowSpecModel)\ + .filter_by(is_status=True).first() + + status_data = None + + if status_spec is not None: + # Run status spec + status_workflow_model: WorkflowModel = session.query(WorkflowModel)\ + .filter_by(study_id=study.id)\ + .filter_by(workflow_spec_id=status_spec.id)\ + .first() + status_processor = WorkflowProcessor(status_workflow_model) + + # Get list of active workflow specs for study + status_processor.do_engine_steps() + status_data = status_processor.bpmn_workflow.last_task.data + + # Get study workflows + workflow_models = session.query(WorkflowModel)\ + .filter_by(study_id=study_id)\ + .filter(WorkflowModel.workflow_spec_id != status_spec.id)\ + .all() + else: + # Get study workflows + workflow_models = session.query(WorkflowModel)\ + .filter_by(study_id=study_id)\ + .all() api_models = [] for workflow_model in workflow_models: processor = WorkflowProcessor(workflow_model, workflow_model.bpmn_workflow_json) - api_models.append(__get_workflow_api_model(processor)) + api_models.append(__get_workflow_api_model(processor, status_data)) schema = WorkflowApiSchema(many=True) return schema.dump(api_models) @auth.login_required def add_workflow_to_study(study_id, body): - workflow_spec_model = session.query(WorkflowSpecModel).filter_by(id=body["id"]).first() + workflow_spec_model: WorkflowSpecModel = session.query(WorkflowSpecModel).filter_by(id=body["id"]).first() if workflow_spec_model is None: error = ApiError('unknown_spec', 'The specification "' + body['id'] + '" is not recognized.') return ApiErrorSchema.dump(error), 404 processor = WorkflowProcessor.create(study_id, workflow_spec_model.id) + + # If workflow spec is a status spec, update the study status spec + if workflow_spec_model.is_status: + study = session.query(StudyModel).filter_by(id=study_id).first() + study.status_spec_id = workflow_spec_model.id + study.status_spec_version = processor.get_spec_version() + session.add(study) + session.commit() + return WorkflowApiSchema().dump(__get_workflow_api_model(processor)) @@ -193,3 +263,4 @@ def map_pb_study_to_study(pb_study): study_info['inactive'] = False return study_info + diff --git a/crc/api/workflow.py b/crc/api/workflow.py index 49990bec..25536423 100644 --- a/crc/api/workflow.py +++ b/crc/api/workflow.py @@ -17,7 +17,8 @@ def all_specifications(): @auth.login_required def add_workflow_specification(body): - new_spec = WorkflowSpecModelSchema().load(body, session=session) + new_spec: WorkflowSpecModel = WorkflowSpecModelSchema().load(body, session=session) + new_spec.is_status = new_spec.id == 'status' session.add(new_spec) session.commit() return WorkflowSpecModelSchema().dump(new_spec) @@ -69,9 +70,14 @@ def delete_workflow_specification(spec_id): session.commit() -def __get_workflow_api_model(processor: WorkflowProcessor): +def __get_workflow_api_model(processor: WorkflowProcessor, status_data=None): spiff_tasks = processor.get_all_user_tasks() user_tasks = list(map(Task.from_spiff, spiff_tasks)) + is_active = True + + if status_data is not None and processor.workflow_spec_id in status_data: + is_active = status_data[processor.workflow_spec_id] + workflow_api = WorkflowApi( id=processor.get_workflow_id(), status=processor.get_status(), @@ -80,7 +86,8 @@ def __get_workflow_api_model(processor: WorkflowProcessor): user_tasks=user_tasks, workflow_spec_id=processor.workflow_spec_id, spec_version=processor.get_spec_version(), - is_latest_spec=processor.get_spec_version() == processor.get_latest_version_string(processor.workflow_spec_id) + is_latest_spec=processor.get_spec_version() == processor.get_latest_version_string(processor.workflow_spec_id), + is_active=is_active ) if processor.next_task(): workflow_api.next_task = Task.from_spiff(processor.next_task()) @@ -89,9 +96,8 @@ def __get_workflow_api_model(processor: WorkflowProcessor): @auth.login_required def get_workflow(workflow_id, soft_reset=False, hard_reset=False): - workflow_model = session.query(WorkflowModel).filter_by(id=workflow_id).first() + workflow_model: WorkflowModel = session.query(WorkflowModel).filter_by(id=workflow_id).first() processor = WorkflowProcessor(workflow_model, soft_reset=soft_reset, hard_reset=hard_reset) - workflow_api_model = __get_workflow_api_model(processor) update_workflow_stats(workflow_model, workflow_api_model) return WorkflowApiSchema().dump(workflow_api_model) diff --git a/crc/models/api_models.py b/crc/models/api_models.py index 0e325f9d..6c4c7b2d 100644 --- a/crc/models/api_models.py +++ b/crc/models/api_models.py @@ -95,7 +95,8 @@ class TaskSchema(ma.Schema): class WorkflowApi(object): - def __init__(self, id, status, user_tasks, last_task, next_task, workflow_spec_id, spec_version, is_latest_spec): + def __init__(self, id, status, user_tasks, last_task, next_task, workflow_spec_id, spec_version, + is_latest_spec, is_active): self.id = id self.status = status self.user_tasks = user_tasks @@ -104,13 +105,13 @@ class WorkflowApi(object): self.workflow_spec_id = workflow_spec_id self.spec_version = spec_version self.is_latest_spec = is_latest_spec + self.is_active = is_active class WorkflowApiSchema(ma.Schema): class Meta: model = WorkflowApi fields = ["id", "status", "user_tasks", "last_task", "next_task", - "workflow_spec_id", "spec_version", "is_latest_spec", - "num_tasks_total", "num_tasks_complete", "num_tasks_incomplete"] + "workflow_spec_id", "spec_version", "is_latest_spec", "is_active"] unknown = INCLUDE status = EnumField(WorkflowStatus) @@ -120,15 +121,7 @@ class WorkflowApiSchema(ma.Schema): @marshmallow.post_load def make_workflow(self, data, **kwargs): - keys = [ - 'id', - 'status', - 'user_tasks', - 'last_task', - 'next_task', - 'workflow_spec_id', - 'spec_version', - 'is_latest_spec' - ] + keys = ['id', 'status', 'user_tasks', 'last_task', 'next_task', + 'workflow_spec_id', 'spec_version', 'is_latest_spec', "is_active"] filtered_fields = {key: data[key] for key in keys} return WorkflowApi(**filtered_fields) diff --git a/crc/models/file.py b/crc/models/file.py index ce7c8a34..a98ca68e 100644 --- a/crc/models/file.py +++ b/crc/models/file.py @@ -1,7 +1,7 @@ import enum from marshmallow_enum import EnumField -from marshmallow_sqlalchemy import ModelSchema +from marshmallow_sqlalchemy import SQLAlchemyAutoSchema from sqlalchemy import func from sqlalchemy.dialects.postgresql import UUID @@ -71,6 +71,7 @@ class FileModel(db.Model): name = db.Column(db.String) type = db.Column(db.Enum(FileType)) primary = db.Column(db.Boolean) + is_status = db.Column(db.Boolean) content_type = db.Column(db.String) workflow_spec_id = db.Column(db.String, db.ForeignKey('workflow_spec.id'), nullable=True) workflow_id = db.Column(db.Integer, db.ForeignKey('workflow.id'), nullable=True) @@ -80,8 +81,10 @@ class FileModel(db.Model): latest_version = db.Column(db.Integer, default=0) -class FileModelSchema(ModelSchema): +class FileModelSchema(SQLAlchemyAutoSchema): class Meta: model = FileModel + load_instance = True + include_relationships = True include_fk = True # Includes foreign keys type = EnumField(FileType) diff --git a/crc/models/stats.py b/crc/models/stats.py index 70132b22..1bc1935d 100644 --- a/crc/models/stats.py +++ b/crc/models/stats.py @@ -1,4 +1,4 @@ -from marshmallow_sqlalchemy import ModelSchema +from marshmallow_sqlalchemy import SQLAlchemyAutoSchema from crc import db @@ -16,9 +16,11 @@ class WorkflowStatsModel(db.Model): last_updated = db.Column(db.DateTime) -class WorkflowStatsModelSchema(ModelSchema): +class WorkflowStatsModelSchema(SQLAlchemyAutoSchema): class Meta: model = WorkflowStatsModel + load_instance = True + include_relationships = True include_fk = True # Includes foreign keys @@ -35,7 +37,9 @@ class TaskEventModel(db.Model): date = db.Column(db.DateTime) -class TaskEventModelSchema(ModelSchema): +class TaskEventModelSchema(SQLAlchemyAutoSchema): class Meta: model = TaskEventModel + load_instance = True + include_relationships = True include_fk = True # Includes foreign keys diff --git a/crc/models/study.py b/crc/models/study.py index ac7d708d..7e0d6dda 100644 --- a/crc/models/study.py +++ b/crc/models/study.py @@ -1,5 +1,5 @@ from marshmallow_enum import EnumField -from marshmallow_sqlalchemy import ModelSchema +from marshmallow_sqlalchemy import SQLAlchemyAutoSchema from sqlalchemy import func from crc import db @@ -20,11 +20,15 @@ class StudyModel(db.Model): investigator_uids = db.Column(db.ARRAY(db.String), nullable=True) inactive = db.Column(db.Boolean, default=False) requirements = db.Column(db.ARRAY(db.Integer), nullable=True) + status_spec_id = db.Column(db.String, db.ForeignKey('workflow_spec.id')) + status_spec_version = db.Column(db.String) -class StudyModelSchema(ModelSchema): +class StudyModelSchema(SQLAlchemyAutoSchema): class Meta: model = StudyModel + load_instance = True + include_relationships = True include_fk = True # Includes foreign keys protocol_builder_status = EnumField(ProtocolBuilderStatus) diff --git a/crc/models/user.py b/crc/models/user.py index 16a528c8..65b46de4 100644 --- a/crc/models/user.py +++ b/crc/models/user.py @@ -1,7 +1,7 @@ import datetime import jwt -from marshmallow_sqlalchemy import ModelSchema +from marshmallow_sqlalchemy import SQLAlchemyAutoSchema from crc import db, app from crc.api.common import ApiError @@ -52,7 +52,9 @@ class UserModel(db.Model): raise ApiError('token_invalid', 'The Authentication token you provided. You need a new token. ') -class UserModelSchema(ModelSchema): +class UserModelSchema(SQLAlchemyAutoSchema): class Meta: model = UserModel + load_instance = True + include_relationships = True diff --git a/crc/models/workflow.py b/crc/models/workflow.py index 92e74137..438daab7 100644 --- a/crc/models/workflow.py +++ b/crc/models/workflow.py @@ -1,9 +1,22 @@ import enum -from marshmallow_sqlalchemy import ModelSchema +from marshmallow_sqlalchemy import SQLAlchemyAutoSchema from crc import db +class WorkflowSpecCategoryModel(db.Model): + __tablename__ = 'workflow_spec_category' + id = db.Column(db.Integer, primary_key=True) + name = db.Column(db.String) + display_name = db.Column(db.String) + + +class WorkflowSpecCategoryModelSchema(SQLAlchemyAutoSchema): + class Meta: + model = WorkflowSpecCategoryModel + load_instance = True + include_relationships = True + class WorkflowSpecModel(db.Model): __tablename__ = 'workflow_spec' @@ -12,11 +25,17 @@ class WorkflowSpecModel(db.Model): display_name = db.Column(db.String) description = db.Column(db.Text) primary_process_id = db.Column(db.String) + workflow_spec_category_id = db.Column(db.Integer, db.ForeignKey('workflow_spec_category.id'), nullable=True) + workflow_spec_category = db.relationship("WorkflowSpecCategoryModel") + is_status = db.Column(db.Boolean, default=False) -class WorkflowSpecModelSchema(ModelSchema): +class WorkflowSpecModelSchema(SQLAlchemyAutoSchema): class Meta: model = WorkflowSpecModel + load_instance = True + include_relationships = True + include_fk = True # Includes foreign keys class WorkflowStatus(enum.Enum): @@ -34,4 +53,3 @@ class WorkflowModel(db.Model): study_id = db.Column(db.Integer, db.ForeignKey('study.id')) workflow_spec_id = db.Column(db.String, db.ForeignKey('workflow_spec.id')) spec_version = db.Column(db.String) - diff --git a/crc/scripts/complete_template.py b/crc/scripts/complete_template.py index ee25d48f..8b190038 100644 --- a/crc/scripts/complete_template.py +++ b/crc/scripts/complete_template.py @@ -28,6 +28,11 @@ class CompleteTemplate(Script): "the name of the docx template to use.") file_name = args[0] workflow_spec_model = self.find_spec_model_in_db(task.workflow) + task_study_id = task.workflow.data[WorkflowProcessor.STUDY_ID_KEY] + + if task_study_id != study_id: + raise ApiError(code="invalid_argument", + message="The given task does not match the given study.") if workflow_spec_model is None: raise ApiError(code="workflow_model_error", @@ -44,7 +49,6 @@ class CompleteTemplate(Script): "within workflow specification '%s'") % (args[0], workflow_spec_model.id) final_document_stream = self.make_template(BytesIO(file_data_model.data), task.data) - study_id = task.workflow.data[WorkflowProcessor.STUDY_ID_KEY] workflow_id = task.workflow.data[WorkflowProcessor.WORKFLOW_ID_KEY] FileService.add_task_file(study_id=study_id, workflow_id=workflow_id, task_id=task.id, name=file_name, diff --git a/crc/services/file_service.py b/crc/services/file_service.py index 0163ab11..aa0182d1 100644 --- a/crc/services/file_service.py +++ b/crc/services/file_service.py @@ -16,12 +16,13 @@ class FileService(object): @staticmethod def add_workflow_spec_file(workflow_spec: WorkflowSpecModel, - name, content_type, binary_data, primary=False): + name, content_type, binary_data, primary=False, is_status=False): """Create a new file and associate it with a workflow spec.""" file_model = FileModel( workflow_spec_id=workflow_spec.id, name=name, - primary=primary + primary=primary, + is_status=is_status ) if primary: bpmn: ElementTree.Element = ElementTree.fromstring(binary_data) diff --git a/example_data.py b/example_data.py index ddbcd9b0..dc1d1869 100644 --- a/example_data.py +++ b/example_data.py @@ -82,11 +82,11 @@ class ExampleDataLoader: returns an array of data models to be added to the database.""" global file file_service = FileService() - spec = WorkflowSpecModel(id=id, name=name, display_name=display_name, - description=description) + description=description, + is_status=id == 'status') db.session.add(spec) db.session.commit() if not filepath: @@ -95,13 +95,15 @@ class ExampleDataLoader: for file_path in files: noise, file_extension = os.path.splitext(file_path) filename = os.path.basename(file_path) - is_primary = filename.lower() == id + ".bpmn" + + is_status = filename.lower() == 'status.bpmn' + is_primary = filename.lower() == id + '.bpmn' try: - file = open(file_path, "rb") + file = open(file_path, 'rb') data = file.read() content_type = CONTENT_TYPES[file_extension[1:]] file_service.add_workflow_spec_file(workflow_spec=spec, name=filename, content_type=content_type, - binary_data=data, primary=is_primary) + binary_data=data, primary=is_primary, is_status=is_status) except IsADirectoryError as de: # Ignore sub directories pass diff --git a/migrations/versions/5f06108116ae_.py b/migrations/versions/5f06108116ae_.py new file mode 100644 index 00000000..9b651a70 --- /dev/null +++ b/migrations/versions/5f06108116ae_.py @@ -0,0 +1,41 @@ +"""empty message + +Revision ID: 5f06108116ae +Revises: 90dd63672e0a +Create Date: 2020-03-13 14:55:32.214380 + +""" +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = '5f06108116ae' +down_revision = '90dd63672e0a' +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_table('workflow_spec_category', + sa.Column('id', sa.Integer(), nullable=False), + sa.Column('name', sa.String(), nullable=True), + sa.Column('display_name', sa.String(), nullable=True), + sa.PrimaryKeyConstraint('id') + ) + op.add_column('file', sa.Column('is_status', sa.Boolean(), nullable=True)) + op.add_column('workflow_spec', sa.Column('is_status', sa.Boolean(), nullable=True)) + op.add_column('workflow_spec', sa.Column('workflow_spec_category_id', sa.Integer(), nullable=True)) + op.create_foreign_key(None, 'workflow_spec', 'workflow_spec_category', ['workflow_spec_category_id'], ['id']) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_constraint(None, 'workflow_spec', type_='foreignkey') + op.drop_column('workflow_spec', 'workflow_spec_category_id') + op.drop_column('workflow_spec', 'is_status') + op.drop_column('file', 'is_status') + op.drop_table('workflow_spec_category') + # ### end Alembic commands ### diff --git a/migrations/versions/65f3fce6031a_.py b/migrations/versions/65f3fce6031a_.py new file mode 100644 index 00000000..5fa2ce0b --- /dev/null +++ b/migrations/versions/65f3fce6031a_.py @@ -0,0 +1,32 @@ +"""empty message + +Revision ID: 65f3fce6031a +Revises: 5f06108116ae +Create Date: 2020-03-15 12:40:42.314190 + +""" +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = '65f3fce6031a' +down_revision = '5f06108116ae' +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column('study', sa.Column('status_spec_id', sa.String(), nullable=True)) + op.add_column('study', sa.Column('status_spec_version', sa.String(), nullable=True)) + op.create_foreign_key(None, 'study', 'workflow_spec', ['status_spec_id'], ['id']) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_constraint(None, 'study', type_='foreignkey') + op.drop_column('study', 'status_spec_version') + op.drop_column('study', 'status_spec_id') + # ### end Alembic commands ### diff --git a/tests/data/status/crc2_training_session_data_security_plan.dmn b/tests/data/status/crc2_training_session_data_security_plan.dmn new file mode 100644 index 00000000..4acadb96 --- /dev/null +++ b/tests/data/status/crc2_training_session_data_security_plan.dmn @@ -0,0 +1,41 @@ + + + + + + + + + + + + + + + + + + + false + + + + + + false + + + + + true + + + + + + true + + + + + diff --git a/tests/data/status/crc2_training_session_enter_core_info.dmn b/tests/data/status/crc2_training_session_enter_core_info.dmn new file mode 100644 index 00000000..2fbdd5ce --- /dev/null +++ b/tests/data/status/crc2_training_session_enter_core_info.dmn @@ -0,0 +1,32 @@ + + + + + + + + + + + + + + + + false + + + false + + + + + true + + + true + + + + + diff --git a/tests/data/status/crc2_training_session_sponsor_funding_source.dmn b/tests/data/status/crc2_training_session_sponsor_funding_source.dmn new file mode 100644 index 00000000..e5020439 --- /dev/null +++ b/tests/data/status/crc2_training_session_sponsor_funding_source.dmn @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + false + + + false + + + + + true + + + true + + + + + diff --git a/tests/data/status/status.bpmn b/tests/data/status/status.bpmn new file mode 100644 index 00000000..13f144ed --- /dev/null +++ b/tests/data/status/status.bpmn @@ -0,0 +1,121 @@ + + + + + SequenceFlow_1ees8ka + + + + Flow_1nimppb + Flow_1txrak2 + + + Flow_1m8285h + Flow_1sggkit + + + Flow_18pl92p + Flow_0x9580l + + + Flow_024q2cw + Flow_1m8285h + Flow_1nimppb + Flow_18pl92p + + + + + + Flow_1txrak2 + Flow_1sggkit + Flow_0x9580l + Flow_0pwtiqm + + + Flow_0pwtiqm + + + + + + + + + + + + + SequenceFlow_1ees8ka + Flow_024q2cw + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/test_study_api.py b/tests/test_study_api.py index 0b49f100..1988c43b 100644 --- a/tests/test_study_api.py +++ b/tests/test_study_api.py @@ -3,7 +3,7 @@ from datetime import datetime, timezone from unittest.mock import patch, Mock from crc import session -from crc.models.api_models import WorkflowApiSchema +from crc.models.api_models import WorkflowApiSchema, WorkflowApi from crc.models.study import StudyModel, StudyModelSchema from crc.models.protocol_builder import ProtocolBuilderStatus, ProtocolBuilderStudyDetailsSchema, \ ProtocolBuilderStudySchema @@ -160,9 +160,6 @@ class TestStudyApi(BaseTest): rv = self.app.delete('/v1.0/study/%i' % study.id) self.assert_failure(rv, error_code="study_integrity_error") - - - def test_delete_workflow(self): self.load_example_data() study = session.query(StudyModel).first() @@ -207,3 +204,79 @@ class TestStudyApi(BaseTest): json_data_after = json.loads(response_after.get_data(as_text=True)) workflows_after = WorkflowApiSchema(many=True).load(json_data_after) self.assertEqual(1, len(workflows_after)) + + """ + Workflow Specs that have been made available (or not) to a particular study via the status.bpmn should be flagged + as available (or not) when the list of a study's workflows is retrieved. + """ + def test_workflow_spec_status(self): + self.load_example_data() + study = session.query(StudyModel).first() + study_id = study.id + + # Add status workflow + self.load_test_spec('status') + + # Add status workflow to the study + status_spec = session.query(WorkflowSpecModel).filter_by(is_status=True).first() + add_status_response = self.app.post('/v1.0/study/%i/workflows' % study.id, + content_type="application/json", + headers=self.logged_in_headers(), + data=json.dumps(WorkflowSpecModelSchema().dump(status_spec))) + self.assert_success(add_status_response) + json_data_status = json.loads(add_status_response.get_data(as_text=True)) + status_workflow: WorkflowApi = WorkflowApiSchema().load(json_data_status) + self.assertIsNotNone(status_workflow) + self.assertIsNotNone(status_workflow.workflow_spec_id) + self.assertIsNotNone(status_workflow.spec_version) + self.assertIsNotNone(status_workflow.next_task) + self.assertIsNotNone(status_workflow.next_task['id']) + status_task_id = status_workflow.next_task['id'] + + # Verify that the study status spec is populated + updated_study: StudyModel = session.query(StudyModel).filter_by(id=study_id).first() + self.assertIsNotNone(updated_study) + self.assertIsNotNone(updated_study.status_spec_id) + self.assertIsNotNone(updated_study.status_spec_version) + self.assertEqual(updated_study.status_spec_id, status_workflow.workflow_spec_id) + self.assertEqual(updated_study.status_spec_version, status_workflow.spec_version) + + # Add all available non-status workflows to the study + specs = session.query(WorkflowSpecModel).filter_by(is_status=False).all() + for spec in specs: + add_response = self.app.post('/v1.0/study/%i/workflows' % study.id, + content_type="application/json", + headers=self.logged_in_headers(), + data=json.dumps(WorkflowSpecModelSchema().dump(spec))) + self.assert_success(add_response) + + for is_active in [False, True]: + # Set all workflow specs to inactive|active + update_status_response = self.app.put('/v1.0/workflow/%i/task/%s/data' % (status_workflow.id, status_task_id), + headers=self.logged_in_headers(), + content_type="application/json", + data=json.dumps({'some_input': is_active})) + self.assert_success(update_status_response) + json_workflow_api = json.loads(update_status_response.get_data(as_text=True)) + updated_workflow_api: WorkflowApi = WorkflowApiSchema().load(json_workflow_api) + self.assertIsNotNone(updated_workflow_api) + self.assertEqual(updated_workflow_api.status, WorkflowStatus.complete) + self.assertIsNotNone(updated_workflow_api.last_task) + self.assertIsNotNone(updated_workflow_api.last_task['data']) + self.assertIsNotNone(updated_workflow_api.last_task['data']['some_input']) + self.assertEqual(updated_workflow_api.last_task['data']['some_input'], is_active) + + # List workflows for study + response_after = self.app.get('/v1.0/study/%i/workflows' % study.id, + content_type="application/json", + headers=self.logged_in_headers()) + self.assert_success(response_after) + + json_data_after = json.loads(response_after.get_data(as_text=True)) + workflows_after = WorkflowApiSchema(many=True).load(json_data_after) + self.assertEqual(len(specs), len(workflows_after)) + + # All workflows should be inactive|active + for workflow in workflows_after: + self.assertEqual(workflow.is_active, is_active) + diff --git a/tests/test_tasks_api.py b/tests/test_tasks_api.py index db2b33f0..75af55c5 100644 --- a/tests/test_tasks_api.py +++ b/tests/test_tasks_api.py @@ -159,7 +159,7 @@ class TestTasksApi(BaseTest): } workflow_api = self.complete_form(workflow, tasks[0], data) self.assertIsNotNone(workflow_api.next_task) - self.assertEquals("EndEvent_0evb22x", workflow_api.next_task['name']) + self.assertEqual("EndEvent_0evb22x", workflow_api.next_task['name']) self.assertTrue(workflow_api.status == WorkflowStatus.complete) rv = self.app.get('/v1.0/file?workflow_id=%i' % workflow.id, headers=self.logged_in_headers()) self.assert_success(rv) diff --git a/tests/test_workflow_processor.py b/tests/test_workflow_processor.py index c4ff3f21..02aa0300 100644 --- a/tests/test_workflow_processor.py +++ b/tests/test_workflow_processor.py @@ -165,7 +165,7 @@ class TestWorkflowProcessor(BaseTest): study = session.query(StudyModel).first() with self.assertRaises(ApiError) as context: WorkflowProcessor.create(study.id, workflow_spec_model.id) - self.assertEquals("workflow_validation_error", context.exception.code) + self.assertEqual("workflow_validation_error", context.exception.code) self.assertTrue("bpmn:startEvent" in context.exception.message) def test_workflow_spec_key_error(self): @@ -311,14 +311,14 @@ class TestWorkflowProcessor(BaseTest): # Setting up another processor should not error out, but doesn't pick up the update. workflow_model.bpmn_workflow_json = processor.serialize() processor2 = WorkflowProcessor(workflow_model) - self.assertEquals("Step 1", processor2.bpmn_workflow.last_task.task_spec.description) - self.assertNotEquals("# This is some documentation I wanted to add.", + self.assertEqual("Step 1", processor2.bpmn_workflow.last_task.task_spec.description) + self.assertNotEqual("# This is some documentation I wanted to add.", processor2.bpmn_workflow.last_task.task_spec.documentation) # You can do a soft update and get the right response. processor3 = WorkflowProcessor(workflow_model, soft_reset=True) - self.assertEquals("Step 1", processor3.bpmn_workflow.last_task.task_spec.description) - self.assertEquals("# This is some documentation I wanted to add.", + self.assertEqual("Step 1", processor3.bpmn_workflow.last_task.task_spec.description) + self.assertEqual("# This is some documentation I wanted to add.", processor3.bpmn_workflow.last_task.task_spec.documentation) @@ -336,7 +336,7 @@ class TestWorkflowProcessor(BaseTest): task.data = {"color": "blue"} processor.complete_task(task) next_task = processor.next_task() - self.assertEquals("Step 2", next_task.task_spec.description) + self.assertEqual("Step 2", next_task.task_spec.description) # Modify the specification, with a major change that alters the flow and can't be serialized effectively. file_path = os.path.join(app.root_path, '..', 'tests', 'data', 'two_forms', 'mods', 'two_forms_struc_mod.bpmn') @@ -349,13 +349,40 @@ class TestWorkflowProcessor(BaseTest): # Do a hard reset, which should bring us back to the beginning, but retain the data. processor3 = WorkflowProcessor(workflow_model, hard_reset=True) - self.assertEquals("Step 1", processor3.next_task().task_spec.description) - self.assertEquals({"color": "blue"}, processor3.next_task().data) + self.assertEqual("Step 1", processor3.next_task().task_spec.description) + self.assertEqual({"color": "blue"}, processor3.next_task().data) processor3.complete_task(processor3.next_task()) - self.assertEquals("New Step", processor3.next_task().task_spec.description) - self.assertEquals({"color": "blue"}, processor3.next_task().data) + self.assertEqual("New Step", processor3.next_task().task_spec.description) + self.assertEqual({"color": "blue"}, processor3.next_task().data) def test_get_latest_spec_version(self): workflow_spec_model = self.load_test_spec("two_forms") version = WorkflowProcessor.get_latest_version_string("two_forms") self.assertTrue(version.startswith("v1 ")) + + def test_status_bpmn(self): + self.load_example_data() + + specs = session.query(WorkflowSpecModel).all() + + study = session.query(StudyModel).first() + workflow_spec_model = self.load_test_spec("status") + + for enabled in [True, False]: + processor = WorkflowProcessor.create(study.id, workflow_spec_model.id) + task = processor.next_task() + + # Turn all specs on or off + task.data = {"some_input": enabled} + processor.complete_task(task) + + # Finish out rest of workflow + while processor.get_status() == WorkflowStatus.waiting: + task = processor.next_task() + processor.complete_task(task) + + self.assertEqual(processor.get_status(), WorkflowStatus.complete) + + # Enabled status of all specs should match the value set in the first task + for spec in specs: + self.assertEqual(task.data[spec.id], enabled) diff --git a/tests/test_workflow_spec_api.py b/tests/test_workflow_spec_api.py index 08faf16f..b5553247 100644 --- a/tests/test_workflow_spec_api.py +++ b/tests/test_workflow_spec_api.py @@ -2,7 +2,7 @@ import json from crc import session from crc.models.file import FileModel -from crc.models.workflow import WorkflowSpecModel, WorkflowSpecModelSchema, WorkflowModel +from crc.models.workflow import WorkflowSpecModel, WorkflowSpecModelSchema, WorkflowModel, WorkflowSpecCategoryModel from tests.base_test import BaseTest @@ -25,7 +25,7 @@ class TestWorkflowSpec(BaseTest): def test_add_new_workflow_specification(self): self.load_example_data(); num_before = session.query(WorkflowSpecModel).count() - spec = WorkflowSpecModel(id='make_cookies', display_name='Cooooookies', + spec = WorkflowSpecModel(id='make_cookies', name='make_cookies', display_name='Cooooookies', description='Om nom nom delicious cookies') rv = self.app.post('/v1.0/workflow-specification', headers=self.logged_in_headers(), @@ -46,6 +46,32 @@ class TestWorkflowSpec(BaseTest): api_spec = WorkflowSpecModelSchema().load(json_data, session=session) self.assertEqual(db_spec, api_spec) + def test_update_workflow_specification(self): + self.load_example_data() + + category = WorkflowSpecCategoryModel(id=0, name='trap', display_name="It's a trap!") + session.add(category) + session.commit() + + db_spec_before: WorkflowSpecModel = session.query(WorkflowSpecModel).first() + spec_id = db_spec_before.id + self.assertIsNone(db_spec_before.workflow_spec_category_id) + + db_spec_before.workflow_spec_category_id = 0 + rv = self.app.put('/v1.0/workflow-specification/%s' % spec_id, + content_type="application/json", + headers=self.logged_in_headers(), + data=json.dumps(WorkflowSpecModelSchema().dump(db_spec_before))) + self.assert_success(rv) + json_data = json.loads(rv.get_data(as_text=True)) + api_spec = WorkflowSpecModelSchema().load(json_data, session=session) + self.assertEqual(db_spec_before, api_spec) + + db_spec_after: WorkflowSpecModel = session.query(WorkflowSpecModel).filter_by(id=spec_id).first() + self.assertIsNotNone(db_spec_after.workflow_spec_category_id) + self.assertIsNotNone(db_spec_after.workflow_spec_category) + self.assertEqual(db_spec_after.workflow_spec_category.display_name, category.display_name) + def test_delete_workflow_specification(self): self.load_example_data() spec_id = 'random_fact'