2020-02-20 18:30:04 +00:00
|
|
|
import json
|
|
|
|
|
2020-05-27 01:18:09 +00:00
|
|
|
from tests.base_test import BaseTest
|
2020-02-20 18:30:04 +00:00
|
|
|
from crc import session
|
|
|
|
from crc.models.file import FileModel
|
2020-03-16 16:32:39 +00:00
|
|
|
from crc.models.workflow import WorkflowSpecModel, WorkflowSpecModelSchema, WorkflowModel, WorkflowSpecCategoryModel
|
2020-02-20 18:30:04 +00:00
|
|
|
|
|
|
|
|
|
|
|
class TestWorkflowSpec(BaseTest):
|
|
|
|
|
|
|
|
def test_list_workflow_specifications(self):
|
|
|
|
self.load_example_data()
|
|
|
|
spec = session.query(WorkflowSpecModel).first()
|
|
|
|
rv = self.app.get('/v1.0/workflow-specification',
|
|
|
|
follow_redirects=True,
|
2020-03-24 18:15:21 +00:00
|
|
|
content_type="application/json",headers=self.logged_in_headers())
|
2020-02-20 18:30:04 +00:00
|
|
|
self.assert_success(rv)
|
|
|
|
json_data = json.loads(rv.get_data(as_text=True))
|
|
|
|
specs = WorkflowSpecModelSchema(many=True).load(json_data, session=session)
|
|
|
|
spec2 = specs[0]
|
|
|
|
self.assertEqual(spec.id, spec2.id)
|
|
|
|
self.assertEqual(spec.display_name, spec2.display_name)
|
|
|
|
self.assertEqual(spec.description, spec2.description)
|
|
|
|
|
|
|
|
def test_add_new_workflow_specification(self):
|
2020-04-17 17:30:32 +00:00
|
|
|
self.load_example_data()
|
2020-02-20 18:30:04 +00:00
|
|
|
num_before = session.query(WorkflowSpecModel).count()
|
2020-03-16 16:10:32 +00:00
|
|
|
spec = WorkflowSpecModel(id='make_cookies', name='make_cookies', display_name='Cooooookies',
|
2020-02-20 18:30:04 +00:00
|
|
|
description='Om nom nom delicious cookies')
|
2020-03-11 19:27:22 +00:00
|
|
|
rv = self.app.post('/v1.0/workflow-specification',
|
|
|
|
headers=self.logged_in_headers(),
|
|
|
|
content_type="application/json",
|
2020-02-20 18:30:04 +00:00
|
|
|
data=json.dumps(WorkflowSpecModelSchema().dump(spec)))
|
|
|
|
self.assert_success(rv)
|
|
|
|
db_spec = session.query(WorkflowSpecModel).filter_by(id='make_cookies').first()
|
|
|
|
self.assertEqual(spec.display_name, db_spec.display_name)
|
|
|
|
num_after = session.query(WorkflowSpecModel).count()
|
|
|
|
self.assertEqual(num_after, num_before + 1)
|
|
|
|
|
|
|
|
def test_get_workflow_specification(self):
|
|
|
|
self.load_example_data()
|
|
|
|
db_spec = session.query(WorkflowSpecModel).first()
|
2020-03-11 19:27:22 +00:00
|
|
|
rv = self.app.get('/v1.0/workflow-specification/%s' % db_spec.id, headers=self.logged_in_headers())
|
2020-02-20 18:30:04 +00:00
|
|
|
self.assert_success(rv)
|
|
|
|
json_data = json.loads(rv.get_data(as_text=True))
|
|
|
|
api_spec = WorkflowSpecModelSchema().load(json_data, session=session)
|
|
|
|
self.assertEqual(db_spec, api_spec)
|
|
|
|
|
2020-03-16 16:32:39 +00:00
|
|
|
def test_update_workflow_specification(self):
|
|
|
|
self.load_example_data()
|
|
|
|
|
2020-03-27 19:32:07 +00:00
|
|
|
category_id = 99
|
|
|
|
category = WorkflowSpecCategoryModel(id=category_id, name='trap', display_name="It's a trap!", display_order=0)
|
2020-03-16 16:32:39 +00:00
|
|
|
session.add(category)
|
|
|
|
session.commit()
|
|
|
|
|
|
|
|
db_spec_before: WorkflowSpecModel = session.query(WorkflowSpecModel).first()
|
|
|
|
spec_id = db_spec_before.id
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
self.assertNotEqual(db_spec_before.category_id, category_id)
|
2020-03-16 16:32:39 +00:00
|
|
|
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
db_spec_before.category_id = category_id
|
2020-03-16 16:32:39 +00:00
|
|
|
rv = self.app.put('/v1.0/workflow-specification/%s' % spec_id,
|
|
|
|
content_type="application/json",
|
|
|
|
headers=self.logged_in_headers(),
|
|
|
|
data=json.dumps(WorkflowSpecModelSchema().dump(db_spec_before)))
|
|
|
|
self.assert_success(rv)
|
|
|
|
json_data = json.loads(rv.get_data(as_text=True))
|
|
|
|
api_spec = WorkflowSpecModelSchema().load(json_data, session=session)
|
|
|
|
self.assertEqual(db_spec_before, api_spec)
|
|
|
|
|
|
|
|
db_spec_after: WorkflowSpecModel = session.query(WorkflowSpecModel).filter_by(id=spec_id).first()
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 12:00:16 +00:00
|
|
|
self.assertIsNotNone(db_spec_after.category_id)
|
|
|
|
self.assertIsNotNone(db_spec_after.category)
|
|
|
|
self.assertEqual(db_spec_after.category.display_name, category.display_name)
|
|
|
|
self.assertEqual(db_spec_after.category.display_order, category.display_order)
|
2020-03-16 16:32:39 +00:00
|
|
|
|
2020-02-20 18:30:04 +00:00
|
|
|
def test_delete_workflow_specification(self):
|
|
|
|
self.load_example_data()
|
|
|
|
spec_id = 'random_fact'
|
2020-03-03 18:50:22 +00:00
|
|
|
self.load_test_spec(spec_id)
|
2020-02-20 18:30:04 +00:00
|
|
|
num_specs_before = session.query(WorkflowSpecModel).filter_by(id=spec_id).count()
|
|
|
|
self.assertEqual(num_specs_before, 1)
|
|
|
|
|
|
|
|
num_files_before = session.query(FileModel).filter_by(workflow_spec_id=spec_id).count()
|
|
|
|
num_workflows_before = session.query(WorkflowModel).filter_by(workflow_spec_id=spec_id).count()
|
|
|
|
self.assertGreater(num_files_before + num_workflows_before, 0)
|
|
|
|
|
2020-03-11 19:27:22 +00:00
|
|
|
rv = self.app.delete('/v1.0/workflow-specification/' + spec_id, headers=self.logged_in_headers())
|
2020-02-20 18:30:04 +00:00
|
|
|
self.assert_success(rv)
|
|
|
|
|
|
|
|
num_specs_after = session.query(WorkflowSpecModel).filter_by(id=spec_id).count()
|
|
|
|
self.assertEqual(0, num_specs_after)
|
|
|
|
|
|
|
|
# Make sure that all items in the database with the workflow spec ID are deleted as well.
|
|
|
|
num_files_after = session.query(FileModel).filter_by(workflow_spec_id=spec_id).count()
|
|
|
|
num_workflows_after = session.query(WorkflowModel).filter_by(workflow_spec_id=spec_id).count()
|
|
|
|
self.assertEqual(num_files_after + num_workflows_after, 0)
|
2020-03-26 16:51:53 +00:00
|
|
|
|