Merge remote-tracking branch 'origin/master' into feature/status_refactor
Fixing adding a study so all workflows are again added, will add status on those workflows based on output from the master bpmn diagram, which is coming shortly.
This commit is contained in:
commit
b61a35f956
|
@ -914,6 +914,8 @@ components:
|
|||
type: string
|
||||
display_name:
|
||||
type: string
|
||||
display_order:
|
||||
type: integer
|
||||
File:
|
||||
properties:
|
||||
id:
|
||||
|
|
|
@ -1,11 +1,12 @@
|
|||
from typing import List
|
||||
|
||||
from SpiffWorkflow.exceptions import WorkflowException
|
||||
from connexion import NoContent
|
||||
from flask import g
|
||||
from sqlalchemy.exc import IntegrityError
|
||||
|
||||
from crc import session, app
|
||||
from crc.api.common import ApiError
|
||||
from crc.api.common import ApiError, ApiErrorSchema
|
||||
from crc.api.workflow import __get_workflow_api_model
|
||||
from crc.models.api_models import WorkflowApiSchema
|
||||
from crc.models.protocol_builder import ProtocolBuilderStatus, ProtocolBuilderStudy
|
||||
|
@ -15,12 +16,14 @@ from crc.services.protocol_builder import ProtocolBuilderService
|
|||
from crc.services.workflow_processor import WorkflowProcessor
|
||||
|
||||
|
||||
|
||||
def add_study(body):
|
||||
study: StudyModel = StudyModelSchema().load(body, session=session)
|
||||
session.add(study)
|
||||
errors = add_all_workflow_specs_to_study(study)
|
||||
session.commit()
|
||||
return StudyModelSchema().dump(study)
|
||||
study_data = StudyModelSchema().dump(study)
|
||||
study_data["errors"] = ApiErrorSchema(many=True).dump(errors)
|
||||
return study_data
|
||||
|
||||
|
||||
def update_study(study_id, body):
|
||||
|
@ -120,34 +123,20 @@ def get_study_workflows(study_id):
|
|||
return schema.dump(api_models)
|
||||
|
||||
|
||||
def get_study_workflows_with_refresh(study_id):
|
||||
"""Returns all the workflows related to this study, assuring that the status of
|
||||
these workflows is up to date. """
|
||||
|
||||
# Get study
|
||||
study: StudyModel = session.query(StudyModel).filter_by(id=study_id).first()
|
||||
current_workflows = session.query(WorkflowModel).filter_by(study_id=study_id).all()
|
||||
all_specs = session.query(WorkflowSpecModel).filter_by(is_status=False).first()
|
||||
api_models = []
|
||||
|
||||
status_spec = session.query(WorkflowSpecModel).filter_by(is_status=True).first()
|
||||
if status_spec is not None:
|
||||
# Run status spec to get list of workflow specs applicable to this study
|
||||
status_processor = WorkflowProcessor.create(study.id, status_spec)
|
||||
status_processor.do_engine_steps()
|
||||
status_data = status_processor.next_task().data
|
||||
|
||||
# Only add workflow specs listed in status spec
|
||||
for spec in all_specs:
|
||||
if spec.id in status_data and status_data[spec.id]:
|
||||
processor = WorkflowProcessor.create(study.id, spec.id)
|
||||
api_models.append(__get_workflow_api_model(processor, status_data))
|
||||
else:
|
||||
# No status spec. Just add all workflows.
|
||||
for spec in all_specs:
|
||||
processor = WorkflowProcessor.create(study.id, spec.id)
|
||||
api_models.append(__get_workflow_api_model(processor, status_data))
|
||||
|
||||
def add_all_workflow_specs_to_study(study):
|
||||
existing_models = session.query(WorkflowModel).filter(WorkflowModel.study_id == study.id).all()
|
||||
existing_specs = list(m.workflow_spec_id for m in existing_models)
|
||||
new_specs = session.query(WorkflowSpecModel). \
|
||||
filter(WorkflowSpecModel.is_master_spec == False). \
|
||||
filter(WorkflowSpecModel.id.notin_(existing_specs)). \
|
||||
all()
|
||||
errors = []
|
||||
for workflow_spec in new_specs:
|
||||
try:
|
||||
WorkflowProcessor.create(study.id, workflow_spec.id)
|
||||
except WorkflowException as we:
|
||||
errors.append(ApiError.from_task_spec("workflow_execution_exception", str(we), we.sender))
|
||||
return errors
|
||||
|
||||
def add_workflow_to_study(study_id, body):
|
||||
workflow_spec_model: WorkflowSpecModel = session.query(WorkflowSpecModel).filter_by(id=body["id"]).first()
|
||||
|
@ -155,6 +144,3 @@ def add_workflow_to_study(study_id, body):
|
|||
raise ApiError('unknown_spec', 'The specification "' + body['id'] + '" is not recognized.')
|
||||
processor = WorkflowProcessor.create(study_id, workflow_spec_model.id)
|
||||
return WorkflowApiSchema().dump(__get_workflow_api_model(processor))
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -12,6 +12,7 @@ class WorkflowSpecCategoryModel(db.Model):
|
|||
id = db.Column(db.Integer, primary_key=True)
|
||||
name = db.Column(db.String)
|
||||
display_name = db.Column(db.String)
|
||||
display_order = db.Column(db.Integer)
|
||||
|
||||
|
||||
class WorkflowSpecCategoryModelSchema(SQLAlchemyAutoSchema):
|
||||
|
|
|
@ -89,10 +89,10 @@ class ExampleDataLoader:
|
|||
name="irb_api_personnel",
|
||||
display_name="irb_api_personnel",
|
||||
description="irb_api_personnel")
|
||||
self.create_spec(id="irb_api_required_docs",
|
||||
name="irb_api_required_docs",
|
||||
display_name="irb_api_required_docs",
|
||||
description="irb_api_required_docs")
|
||||
# self.create_spec(id="irb_api_required_docs",
|
||||
# name="irb_api_required_docs",
|
||||
# display_name="irb_api_required_docs",
|
||||
# description="irb_api_required_docs")
|
||||
self.create_spec(id="sponsor_funding_source",
|
||||
name="sponsor_funding_source",
|
||||
display_name="sponsor_funding_source",
|
||||
|
|
|
@ -0,0 +1,28 @@
|
|||
"""empty message
|
||||
|
||||
Revision ID: 5e0709e172fa
|
||||
Revises: ddd5fc9ea75b
|
||||
Create Date: 2020-03-26 14:05:06.607043
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '5e0709e172fa'
|
||||
down_revision = 'ddd5fc9ea75b'
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade():
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
op.add_column('workflow_spec_category', sa.Column('display_order', sa.Integer(), nullable=True))
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade():
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
op.drop_column('workflow_spec_category', 'display_order')
|
||||
# ### end Alembic commands ###
|
|
@ -5,7 +5,7 @@ from unittest.mock import patch
|
|||
from crc import session
|
||||
from crc.models.api_models import WorkflowApiSchema, WorkflowApi
|
||||
from crc.models.protocol_builder import ProtocolBuilderStatus, ProtocolBuilderStudyDetailsSchema, \
|
||||
ProtocolBuilderStudySchema
|
||||
ProtocolBuilderStudySchema, ProtocolBuilderInvestigatorSchema, ProtocolBuilderRequiredDocumentSchema
|
||||
from crc.models.study import StudyModel, StudyModelSchema
|
||||
from crc.models.workflow import WorkflowSpecModel, WorkflowSpecModelSchema, WorkflowModel, WorkflowStatus
|
||||
from tests.base_test import BaseTest
|
||||
|
@ -36,16 +36,23 @@ class TestStudyApi(BaseTest):
|
|||
headers=self.logged_in_headers(),
|
||||
data=json.dumps(StudyModelSchema().dump(study)))
|
||||
self.assert_success(rv)
|
||||
study = json.loads(rv.get_data(as_text=True))
|
||||
db_study = session.query(StudyModel).filter_by(id=12345).first()
|
||||
self.assertIsNotNone(db_study)
|
||||
self.assertEqual(study["title"], db_study.title)
|
||||
self.assertAlmostEqual(study["last_updated"], db_study.last_updated)
|
||||
self.assertEqual(study["protocol_builder_status"], db_study.protocol_builder_status)
|
||||
#self.assertAlmostEqual(study["last_updated"], db_study.last_updated)
|
||||
#self.assertEqual(study["protocol_builder_status"], db_study.protocol_builder_status)
|
||||
self.assertEqual(study["primary_investigator_id"], db_study.primary_investigator_id)
|
||||
self.assertEqual(study["sponsor"], db_study.sponsor)
|
||||
self.assertEqual(study["ind_number"], db_study.ind_number)
|
||||
self.assertEqual(study["user_uid"], db_study.user_uid)
|
||||
|
||||
workflow_spec_count =session.query(WorkflowSpecModel).filter(WorkflowSpecModel.is_master_spec == False).count()
|
||||
workflow_count = session.query(WorkflowModel).filter(WorkflowModel.study_id == 12345).count()
|
||||
error_count = len(study["errors"])
|
||||
self.assertEquals(workflow_spec_count, workflow_count + error_count)
|
||||
|
||||
|
||||
def test_update_study(self):
|
||||
self.load_example_data()
|
||||
study: StudyModel = session.query(StudyModel).first()
|
||||
|
@ -213,25 +220,44 @@ class TestStudyApi(BaseTest):
|
|||
self.assertEqual(1, len(workflows_after))
|
||||
|
||||
# """
|
||||
# Assure that when we create a new study, the status of the workflows in that study
|
||||
# reflects information we have read in from the protocol builder.
|
||||
# Workflow Specs that have been made available (or not) to a particular study via the status.bpmn should be flagged
|
||||
# as available (or not) when the list of a study's workflows is retrieved.
|
||||
# """
|
||||
# def test_top_level_workflow(self):
|
||||
# @patch('crc.services.protocol_builder.ProtocolBuilderService.get_studies')
|
||||
# @patch('crc.services.protocol_builder.ProtocolBuilderService.get_investigators')
|
||||
# @patch('crc.services.protocol_builder.ProtocolBuilderService.get_required_docs')
|
||||
# @patch('crc.services.protocol_builder.ProtocolBuilderService.get_study_details')
|
||||
# def test_workflow_spec_status(self,
|
||||
# mock_details,
|
||||
# mock_required_docs,
|
||||
# mock_investigators,
|
||||
# mock_studies):
|
||||
#
|
||||
# # Set up the status workflow
|
||||
# self.load_test_spec('top_level_workflow', master_spec=True)
|
||||
# # Mock Protocol Builder response
|
||||
# studies_response = self.protocol_builder_response('user_studies.json')
|
||||
# mock_studies.return_value = ProtocolBuilderStudySchema(many=True).loads(studies_response)
|
||||
#
|
||||
# investigators_response = self.protocol_builder_response('investigators.json')
|
||||
# mock_investigators.return_value = ProtocolBuilderInvestigatorSchema(many=True).loads(investigators_response)
|
||||
#
|
||||
# required_docs_response = self.protocol_builder_response('required_docs.json')
|
||||
# mock_required_docs.return_value = ProtocolBuilderRequiredDocumentSchema(many=True).loads(required_docs_response)
|
||||
#
|
||||
# details_response = self.protocol_builder_response('study_details.json')
|
||||
# mock_details.return_value = ProtocolBuilderStudyDetailsSchema().loads(details_response)
|
||||
#
|
||||
# self.load_example_data()
|
||||
# study = session.query(StudyModel).first()
|
||||
# study_id = study.id
|
||||
#
|
||||
# # Add status workflow
|
||||
# self.load_test_spec('top_level_workflow')
|
||||
#
|
||||
# # Assure the top_level_workflow is added to the study
|
||||
# top_level_spec = session.query(WorkflowSpecModel).filter_by(is_master_spec=True).first()
|
||||
# self.assertIsNotNone(top_level_spec)
|
||||
#
|
||||
# # Create a new study.
|
||||
# self.
|
||||
#
|
||||
# # Add all available non-status workflows to the study
|
||||
# specs = session.query(WorkflowSpecModel).filter_by(is_status=False).all()
|
||||
# for spec in specs:
|
||||
# add_response = self.app.post('/v1.0/study/%i/workflows' % study.id,
|
||||
# content_type="application/json",
|
||||
# headers=self.logged_in_headers(),
|
||||
# data=json.dumps(WorkflowSpecModelSchema().dump(spec)))
|
||||
# self.assert_success(add_response)
|
||||
#
|
||||
# for is_active in [False, True]:
|
||||
# # Set all workflow specs to inactive|active
|
||||
|
|
|
@ -50,7 +50,7 @@ class TestWorkflowSpec(BaseTest):
|
|||
def test_update_workflow_specification(self):
|
||||
self.load_example_data()
|
||||
|
||||
category = WorkflowSpecCategoryModel(id=0, name='trap', display_name="It's a trap!")
|
||||
category = WorkflowSpecCategoryModel(id=0, name='trap', display_name="It's a trap!", display_order=0)
|
||||
session.add(category)
|
||||
session.commit()
|
||||
|
||||
|
@ -72,6 +72,7 @@ class TestWorkflowSpec(BaseTest):
|
|||
self.assertIsNotNone(db_spec_after.workflow_spec_category_id)
|
||||
self.assertIsNotNone(db_spec_after.workflow_spec_category)
|
||||
self.assertEqual(db_spec_after.workflow_spec_category.display_name, category.display_name)
|
||||
self.assertEqual(db_spec_after.workflow_spec_category.display_order, category.display_order)
|
||||
|
||||
def test_delete_workflow_specification(self):
|
||||
self.load_example_data()
|
||||
|
|
Loading…
Reference in New Issue