Merge pull request #116 from sartography/chore/test_coverage
Chore/test coverage
This commit is contained in:
commit
a4a367cb22
|
@ -24,7 +24,6 @@ def get_approval_counts(as_user=None):
|
|||
.all()
|
||||
|
||||
study_ids = [a.study_id for a in db_user_approvals]
|
||||
print('study_ids', study_ids)
|
||||
|
||||
db_other_approvals = db.session.query(ApprovalModel)\
|
||||
.filter(ApprovalModel.study_id.in_(study_ids))\
|
||||
|
@ -39,8 +38,8 @@ def get_approval_counts(as_user=None):
|
|||
other_approvals[approval.study_id] = approval
|
||||
|
||||
counts = {}
|
||||
for status in ApprovalStatus:
|
||||
counts[status.name] = 0
|
||||
for name, value in ApprovalStatus.__members__.items():
|
||||
counts[name] = 0
|
||||
|
||||
for approval in db_user_approvals:
|
||||
# Check if another approval has the same study id
|
||||
|
@ -57,6 +56,8 @@ def get_approval_counts(as_user=None):
|
|||
counts[ApprovalStatus.CANCELED.name] += 1
|
||||
elif other_approval.status == ApprovalStatus.APPROVED.name:
|
||||
counts[approval.status] += 1
|
||||
else:
|
||||
counts[approval.status] += 1
|
||||
else:
|
||||
counts[approval.status] += 1
|
||||
|
||||
|
|
|
@ -31,7 +31,7 @@ def verify_token(token=None):
|
|||
failure_error = ApiError("invalid_token", "Unable to decode the token you provided. Please re-authenticate",
|
||||
status_code=403)
|
||||
|
||||
if not _is_production():
|
||||
if not _is_production() and (token is None or 'user' not in g):
|
||||
g.user = UserModel.query.first()
|
||||
token = g.user.encode_auth_token()
|
||||
|
||||
|
@ -132,6 +132,7 @@ def login(
|
|||
# X-Forwarded-Server: dev.crconnect.uvadcos.io
|
||||
# Connection: Keep-Alive
|
||||
|
||||
|
||||
# If we're in production, override any uid with the uid from the SSO request headers
|
||||
if _is_production():
|
||||
uid = _get_request_uid(request)
|
||||
|
@ -175,6 +176,7 @@ def _handle_login(user_info: LdapModel, redirect_url=None):
|
|||
Response. 302 - Redirects to the frontend auth callback URL, with auth token appended.
|
||||
"""
|
||||
user = _upsert_user(user_info)
|
||||
g.user = user
|
||||
|
||||
# Return the frontend auth callback URL, with auth token appended.
|
||||
auth_token = user.encode_auth_token().decode()
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
import uuid
|
||||
|
||||
from crc import session
|
||||
from flask import g
|
||||
|
||||
from crc import session, app
|
||||
from crc.api.common import ApiError, ApiErrorSchema
|
||||
from crc.models.api_models import WorkflowApi, WorkflowApiSchema, NavigationItem, NavigationItemSchema
|
||||
from crc.models.file import FileModel, LookupDataSchema
|
||||
|
@ -156,6 +158,7 @@ def delete_workflow(workflow_id):
|
|||
|
||||
def set_current_task(workflow_id, task_id):
|
||||
workflow_model = session.query(WorkflowModel).filter_by(id=workflow_id).first()
|
||||
user_uid = __get_user_uid(workflow_model.study.user_uid)
|
||||
processor = WorkflowProcessor(workflow_model)
|
||||
task_id = uuid.UUID(task_id)
|
||||
task = processor.bpmn_workflow.get_task(task_id)
|
||||
|
@ -167,13 +170,21 @@ def set_current_task(workflow_id, task_id):
|
|||
if task.state == task.COMPLETED:
|
||||
task.reset_token(reset_data=False) # we could optionally clear the previous data.
|
||||
processor.save()
|
||||
WorkflowService.log_task_action(processor, task, WorkflowService.TASK_ACTION_TOKEN_RESET)
|
||||
WorkflowService.log_task_action(user_uid, processor, task, WorkflowService.TASK_ACTION_TOKEN_RESET)
|
||||
workflow_api_model = __get_workflow_api_model(processor, task)
|
||||
return WorkflowApiSchema().dump(workflow_api_model)
|
||||
|
||||
|
||||
def update_task(workflow_id, task_id, body):
|
||||
workflow_model = session.query(WorkflowModel).filter_by(id=workflow_id).first()
|
||||
|
||||
if workflow_model is None:
|
||||
raise ApiError("invalid_workflow_id", "The given workflow id is not valid.", status_code=404)
|
||||
|
||||
elif workflow_model.study is None:
|
||||
raise ApiError("invalid_study", "There is no study associated with the given workflow.", status_code=404)
|
||||
|
||||
user_uid = __get_user_uid(workflow_model.study.user_uid)
|
||||
processor = WorkflowProcessor(workflow_model)
|
||||
task_id = uuid.UUID(task_id)
|
||||
task = processor.bpmn_workflow.get_task(task_id)
|
||||
|
@ -184,7 +195,7 @@ def update_task(workflow_id, task_id, body):
|
|||
processor.complete_task(task)
|
||||
processor.do_engine_steps()
|
||||
processor.save()
|
||||
WorkflowService.log_task_action(processor, task, WorkflowService.TASK_ACTION_COMPLETE)
|
||||
WorkflowService.log_task_action(user_uid, processor, task, WorkflowService.TASK_ACTION_COMPLETE)
|
||||
|
||||
workflow_api_model = __get_workflow_api_model(processor)
|
||||
return WorkflowApiSchema().dump(workflow_api_model)
|
||||
|
@ -239,3 +250,14 @@ def lookup(workflow_id, field_id, query, limit):
|
|||
workflow = session.query(WorkflowModel).filter(WorkflowModel.id == workflow_id).first()
|
||||
lookup_data = LookupService.lookup(workflow, field_id, query, limit)
|
||||
return LookupDataSchema(many=True).dump(lookup_data)
|
||||
|
||||
|
||||
def __get_user_uid(user_uid):
|
||||
if 'user' in g:
|
||||
if g.user.uid not in app.config['ADMIN_UIDS'] and user_uid != g.user.uid:
|
||||
raise ApiError("permission_denied", "You are not authorized to edit the task data for this workflow.", status_code=403)
|
||||
else:
|
||||
return g.user.uid
|
||||
|
||||
else:
|
||||
raise ApiError("logged_out", "You are no longer logged in.", status_code=401)
|
||||
|
|
|
@ -19,7 +19,7 @@ class UserModel(db.Model):
|
|||
last_name = db.Column(db.String, nullable=True)
|
||||
title = db.Column(db.String, nullable=True)
|
||||
|
||||
# Add Department and School
|
||||
# TODO: Add Department and School
|
||||
|
||||
|
||||
def encode_auth_token(self):
|
||||
|
|
|
@ -86,8 +86,8 @@ class StudyService(object):
|
|||
def delete_workflow(workflow):
|
||||
for file in session.query(FileModel).filter_by(workflow_id=workflow.id).all():
|
||||
FileService.delete_file(file.id)
|
||||
for deb in workflow.dependencies:
|
||||
session.delete(deb)
|
||||
for dep in workflow.dependencies:
|
||||
session.delete(dep)
|
||||
session.query(TaskEventModel).filter_by(workflow_id=workflow.id).delete()
|
||||
session.query(WorkflowModel).filter_by(id=workflow.id).delete()
|
||||
|
||||
|
|
|
@ -58,7 +58,7 @@ class WorkflowService(object):
|
|||
|
||||
@staticmethod
|
||||
def delete_test_data():
|
||||
for study in db.session.query(StudyModel).filter(StudyModel.user_uid=="test"):
|
||||
for study in db.session.query(StudyModel).filter(StudyModel.user_uid == "test"):
|
||||
StudyService.delete_study(study.id)
|
||||
db.session.commit()
|
||||
|
||||
|
@ -318,12 +318,12 @@ class WorkflowService(object):
|
|||
field.options.append({"id": d.value, "name": d.label})
|
||||
|
||||
@staticmethod
|
||||
def log_task_action(processor, spiff_task, action):
|
||||
def log_task_action(user_uid, processor, spiff_task, action):
|
||||
task = WorkflowService.spiff_task_to_api_task(spiff_task)
|
||||
workflow_model = processor.workflow_model
|
||||
task_event = TaskEventModel(
|
||||
study_id=workflow_model.study_id,
|
||||
user_uid=g.user.uid,
|
||||
user_uid=user_uid,
|
||||
workflow_id=workflow_model.id,
|
||||
workflow_spec_id=workflow_model.workflow_spec_id,
|
||||
spec_version=processor.get_version_string(),
|
||||
|
|
|
@ -2,25 +2,27 @@
|
|||
# IMPORTANT - Environment must be loaded before app, models, etc....
|
||||
import os
|
||||
|
||||
from flask import g
|
||||
from sqlalchemy import Sequence
|
||||
|
||||
os.environ["TESTING"] = "true"
|
||||
|
||||
import json
|
||||
import unittest
|
||||
import urllib.parse
|
||||
import datetime
|
||||
|
||||
from crc.models.protocol_builder import ProtocolBuilderStatus
|
||||
from crc.models.study import StudyModel
|
||||
from crc.services.file_service import FileService
|
||||
from crc.services.study_service import StudyService
|
||||
from crc.models.file import FileModel, FileDataModel, CONTENT_TYPES
|
||||
from crc.models.workflow import WorkflowSpecModel, WorkflowSpecModelSchema, WorkflowModel
|
||||
from crc.models.user import UserModel
|
||||
from flask import g
|
||||
from sqlalchemy import Sequence
|
||||
|
||||
from crc import app, db, session
|
||||
from crc.models.api_models import WorkflowApiSchema, MultiInstanceType
|
||||
from crc.models.approval import ApprovalModel, ApprovalStatus
|
||||
from crc.models.file import FileModel, FileDataModel, CONTENT_TYPES
|
||||
from crc.models.protocol_builder import ProtocolBuilderStatus
|
||||
from crc.models.stats import TaskEventModel
|
||||
from crc.models.study import StudyModel
|
||||
from crc.models.user import UserModel
|
||||
from crc.models.workflow import WorkflowSpecModel, WorkflowSpecModelSchema, WorkflowModel
|
||||
from crc.services.file_service import FileService
|
||||
from crc.services.study_service import StudyService
|
||||
from crc.services.workflow_service import WorkflowService
|
||||
from example_data import ExampleDataLoader
|
||||
|
||||
#UNCOMMENT THIS FOR DEBUGGING SQL ALCHEMY QUERIES
|
||||
|
@ -109,12 +111,15 @@ class BaseTest(unittest.TestCase):
|
|||
|
||||
query_string = self.user_info_to_query_string(user_info, redirect_url)
|
||||
rv = self.app.get("/v1.0/login%s" % query_string, follow_redirects=False)
|
||||
|
||||
self.assertTrue(rv.status_code == 302)
|
||||
self.assertTrue(str.startswith(rv.location, redirect_url))
|
||||
|
||||
user_model = session.query(UserModel).filter_by(uid=uid).first()
|
||||
self.assertIsNotNone(user_model.display_name)
|
||||
self.assertEqual(user_model.uid, uid)
|
||||
self.assertTrue('user' in g, 'User should be in Flask globals')
|
||||
self.assertEqual(uid, g.user.uid, 'Logged in user should match given user uid')
|
||||
|
||||
return dict(Authorization='Bearer ' + user_model.encode_auth_token().decode())
|
||||
|
||||
def load_example_data(self, use_crc_data=False, use_rrt_data=False):
|
||||
|
@ -161,6 +166,7 @@ class BaseTest(unittest.TestCase):
|
|||
@staticmethod
|
||||
def load_test_spec(dir_name, master_spec=False, category_id=None):
|
||||
"""Loads a spec into the database based on a directory in /tests/data"""
|
||||
|
||||
if session.query(WorkflowSpecModel).filter_by(id=dir_name).count() > 0:
|
||||
return session.query(WorkflowSpecModel).filter_by(id=dir_name).first()
|
||||
filepath = os.path.join(app.root_path, '..', 'tests', 'data', dir_name, "*")
|
||||
|
@ -224,12 +230,12 @@ class BaseTest(unittest.TestCase):
|
|||
db.session.commit()
|
||||
return user
|
||||
|
||||
def create_study(self, uid="dhf8r", title="Beer conception in the bipedal software engineer"):
|
||||
study = session.query(StudyModel).first()
|
||||
def create_study(self, uid="dhf8r", title="Beer conception in the bipedal software engineer", primary_investigator_id="lb3dp"):
|
||||
study = session.query(StudyModel).filter_by(user_uid=uid).filter_by(title=title).first()
|
||||
if study is None:
|
||||
user = self.create_user(uid=uid)
|
||||
study = StudyModel(title=title, protocol_builder_status=ProtocolBuilderStatus.ACTIVE,
|
||||
user_uid=user.uid, primary_investigator_id='lb3dp')
|
||||
user_uid=user.uid, primary_investigator_id=primary_investigator_id)
|
||||
db.session.add(study)
|
||||
db.session.commit()
|
||||
return study
|
||||
|
@ -251,3 +257,97 @@ class BaseTest(unittest.TestCase):
|
|||
binary_data=file.read(),
|
||||
content_type=CONTENT_TYPES['xls'])
|
||||
file.close()
|
||||
|
||||
def create_approval(
|
||||
self,
|
||||
study=None,
|
||||
workflow=None,
|
||||
approver_uid=None,
|
||||
status=None,
|
||||
version=None,
|
||||
):
|
||||
study = study or self.create_study()
|
||||
workflow = workflow or self.create_workflow()
|
||||
approver_uid = approver_uid or self.test_uid
|
||||
status = status or ApprovalStatus.PENDING.value
|
||||
version = version or 1
|
||||
approval = ApprovalModel(study=study, workflow=workflow, approver_uid=approver_uid, status=status, version=version)
|
||||
db.session.add(approval)
|
||||
db.session.commit()
|
||||
return approval
|
||||
|
||||
def get_workflow_api(self, workflow, soft_reset=False, hard_reset=False, user_uid="dhf8r"):
|
||||
user = session.query(UserModel).filter_by(uid=user_uid).first()
|
||||
self.assertIsNotNone(user)
|
||||
|
||||
rv = self.app.get('/v1.0/workflow/%i?soft_reset=%s&hard_reset=%s' %
|
||||
(workflow.id, str(soft_reset), str(hard_reset)),
|
||||
headers=self.logged_in_headers(user),
|
||||
content_type="application/json")
|
||||
self.assert_success(rv)
|
||||
json_data = json.loads(rv.get_data(as_text=True))
|
||||
workflow_api = WorkflowApiSchema().load(json_data)
|
||||
self.assertEqual(workflow.workflow_spec_id, workflow_api.workflow_spec_id)
|
||||
return workflow_api
|
||||
|
||||
def complete_form(self, workflow_in, task_in, dict_data, error_code=None, user_uid="dhf8r"):
|
||||
prev_completed_task_count = workflow_in.completed_tasks
|
||||
if isinstance(task_in, dict):
|
||||
task_id = task_in["id"]
|
||||
else:
|
||||
task_id = task_in.id
|
||||
|
||||
user = session.query(UserModel).filter_by(uid=user_uid).first()
|
||||
self.assertIsNotNone(user)
|
||||
|
||||
rv = self.app.put('/v1.0/workflow/%i/task/%s/data' % (workflow_in.id, task_id),
|
||||
headers=self.logged_in_headers(user=user),
|
||||
content_type="application/json",
|
||||
data=json.dumps(dict_data))
|
||||
if error_code:
|
||||
self.assert_failure(rv, error_code=error_code)
|
||||
return
|
||||
|
||||
self.assert_success(rv)
|
||||
json_data = json.loads(rv.get_data(as_text=True))
|
||||
|
||||
# Assure stats are updated on the model
|
||||
workflow = WorkflowApiSchema().load(json_data)
|
||||
# The total number of tasks may change over time, as users move through gateways
|
||||
# branches may be pruned. As we hit parallel Multi-Instance new tasks may be created...
|
||||
self.assertIsNotNone(workflow.total_tasks)
|
||||
self.assertEqual(prev_completed_task_count + 1, workflow.completed_tasks)
|
||||
|
||||
# Assure a record exists in the Task Events
|
||||
task_events = session.query(TaskEventModel) \
|
||||
.filter_by(workflow_id=workflow.id) \
|
||||
.filter_by(task_id=task_id) \
|
||||
.order_by(TaskEventModel.date.desc()).all()
|
||||
self.assertGreater(len(task_events), 0)
|
||||
event = task_events[0]
|
||||
self.assertIsNotNone(event.study_id)
|
||||
self.assertEqual(user_uid, event.user_uid)
|
||||
self.assertEqual(workflow.id, event.workflow_id)
|
||||
self.assertEqual(workflow.workflow_spec_id, event.workflow_spec_id)
|
||||
self.assertEqual(workflow.spec_version, event.spec_version)
|
||||
self.assertEqual(WorkflowService.TASK_ACTION_COMPLETE, event.action)
|
||||
self.assertEqual(task_in.id, task_id)
|
||||
self.assertEqual(task_in.name, event.task_name)
|
||||
self.assertEqual(task_in.title, event.task_title)
|
||||
self.assertEqual(task_in.type, event.task_type)
|
||||
self.assertEqual("COMPLETED", event.task_state)
|
||||
|
||||
# Not sure what voodoo is happening inside of marshmallow to get me in this state.
|
||||
if isinstance(task_in.multi_instance_type, MultiInstanceType):
|
||||
self.assertEqual(task_in.multi_instance_type.value, event.mi_type)
|
||||
else:
|
||||
self.assertEqual(task_in.multi_instance_type, event.mi_type)
|
||||
|
||||
self.assertEqual(task_in.multi_instance_count, event.mi_count)
|
||||
self.assertEqual(task_in.multi_instance_index, event.mi_index)
|
||||
self.assertEqual(task_in.process_name, event.process_name)
|
||||
self.assertIsNotNone(event.date)
|
||||
|
||||
|
||||
workflow = WorkflowApiSchema().load(json_data)
|
||||
return workflow
|
||||
|
|
|
@ -1,53 +1,39 @@
|
|||
import json
|
||||
from tests.base_test import BaseTest
|
||||
import random
|
||||
import string
|
||||
|
||||
from flask import g
|
||||
|
||||
from tests.base_test import BaseTest
|
||||
from crc import session, db
|
||||
from crc.models.approval import ApprovalModel, ApprovalSchema, ApprovalStatus
|
||||
from crc.models.protocol_builder import ProtocolBuilderStatus
|
||||
from crc.models.approval import ApprovalModel, ApprovalStatus
|
||||
from crc.models.study import StudyModel
|
||||
from crc.models.workflow import WorkflowModel
|
||||
|
||||
|
||||
class TestApprovals(BaseTest):
|
||||
def setUp(self):
|
||||
"""Initial setup shared by all TestApprovals tests"""
|
||||
self.load_example_data()
|
||||
self.study = self.create_study()
|
||||
self.workflow = self.create_workflow('random_fact')
|
||||
self.unrelated_study = StudyModel(title="second study",
|
||||
protocol_builder_status=ProtocolBuilderStatus.ACTIVE,
|
||||
user_uid="dhf8r", primary_investigator_id="dhf8r")
|
||||
self.unrelated_workflow = self.create_workflow('random_fact', study=self.unrelated_study)
|
||||
|
||||
# TODO: Move to base_test as a helper
|
||||
self.approval = ApprovalModel(
|
||||
study=self.study,
|
||||
workflow=self.workflow,
|
||||
approver_uid='lb3dp',
|
||||
status=ApprovalStatus.PENDING.value,
|
||||
version=1
|
||||
# Add a study with 2 approvers
|
||||
study_workflow_approvals_1 = self._create_study_workflow_approvals(
|
||||
user_uid="dhf8r", title="first study", primary_investigator_id="lb3dp",
|
||||
approver_uids=["lb3dp", "dhf8r"], statuses=[ApprovalStatus.PENDING.value, ApprovalStatus.PENDING.value]
|
||||
)
|
||||
session.add(self.approval)
|
||||
self.study = study_workflow_approvals_1['study']
|
||||
self.workflow = study_workflow_approvals_1['workflow']
|
||||
self.approval = study_workflow_approvals_1['approvals'][0]
|
||||
self.approval_2 = study_workflow_approvals_1['approvals'][1]
|
||||
|
||||
self.approval_2 = ApprovalModel(
|
||||
study=self.study,
|
||||
workflow=self.workflow,
|
||||
approver_uid='dhf8r',
|
||||
status=ApprovalStatus.PENDING.value,
|
||||
version=1
|
||||
# Add a study with 1 approver
|
||||
study_workflow_approvals_2 = self._create_study_workflow_approvals(
|
||||
user_uid="dhf8r", title="second study", primary_investigator_id="dhf8r",
|
||||
approver_uids=["lb3dp"], statuses=[ApprovalStatus.PENDING.value]
|
||||
)
|
||||
session.add(self.approval_2)
|
||||
|
||||
# A third study, unrelated to the first.
|
||||
self.approval_3 = ApprovalModel(
|
||||
study=self.unrelated_study,
|
||||
workflow=self.unrelated_workflow,
|
||||
approver_uid='lb3dp',
|
||||
status=ApprovalStatus.PENDING.value,
|
||||
version=1
|
||||
)
|
||||
session.add(self.approval_3)
|
||||
|
||||
session.commit()
|
||||
self.unrelated_study = study_workflow_approvals_2['study']
|
||||
self.unrelated_workflow = study_workflow_approvals_2['workflow']
|
||||
self.approval_3 = study_workflow_approvals_2['approvals'][0]
|
||||
|
||||
def test_list_approvals_per_approver(self):
|
||||
"""Only approvals associated with approver should be returned"""
|
||||
|
@ -85,7 +71,7 @@ class TestApprovals(BaseTest):
|
|||
response = json.loads(rv.get_data(as_text=True))
|
||||
response_count = len(response)
|
||||
self.assertEqual(1, response_count)
|
||||
self.assertEqual(1, len(response[0]['related_approvals'])) # this approval has a related approval.
|
||||
self.assertEqual(1, len(response[0]['related_approvals'])) # this approval has a related approval.
|
||||
|
||||
def test_update_approval_fails_if_not_the_approver(self):
|
||||
approval = session.query(ApprovalModel).filter_by(approver_uid='lb3dp').first()
|
||||
|
@ -145,9 +131,130 @@ class TestApprovals(BaseTest):
|
|||
self.assertEqual(approval.status, ApprovalStatus.DECLINED.value)
|
||||
|
||||
def test_csv_export(self):
|
||||
approvals = db.session.query(ApprovalModel).all()
|
||||
for app in approvals:
|
||||
app.status = ApprovalStatus.APPROVED.value
|
||||
db.session.commit()
|
||||
self.load_test_spec('two_forms')
|
||||
self._add_lots_of_random_approvals(n=50, workflow_spec_name='two_forms')
|
||||
|
||||
# Get all workflows
|
||||
workflows = db.session.query(WorkflowModel).filter_by(workflow_spec_id='two_forms').all()
|
||||
|
||||
# For each workflow, complete all tasks
|
||||
for workflow in workflows:
|
||||
workflow_api = self.get_workflow_api(workflow, user_uid=workflow.study.user_uid)
|
||||
self.assertEqual('two_forms', workflow_api.workflow_spec_id)
|
||||
|
||||
# Log current user out.
|
||||
g.user = None
|
||||
self.assertIsNone(g.user)
|
||||
|
||||
# Complete the form for Step one and post it.
|
||||
self.complete_form(workflow, workflow_api.next_task, {"color": "blue"}, error_code=None, user_uid=workflow.study.user_uid)
|
||||
|
||||
# Get the next Task
|
||||
workflow_api = self.get_workflow_api(workflow, user_uid=workflow.study.user_uid)
|
||||
self.assertEqual("StepTwo", workflow_api.next_task.name)
|
||||
|
||||
# Get all user Tasks and check that the data have been saved
|
||||
task = workflow_api.next_task
|
||||
self.assertIsNotNone(task.data)
|
||||
for val in task.data.values():
|
||||
self.assertIsNotNone(val)
|
||||
|
||||
rv = self.app.get(f'/v1.0/approval/csv', headers=self.logged_in_headers())
|
||||
self.assert_success(rv)
|
||||
self.assert_success(rv)
|
||||
|
||||
def test_all_approvals(self):
|
||||
self._add_lots_of_random_approvals()
|
||||
|
||||
not_canceled = session.query(ApprovalModel).filter(ApprovalModel.status != 'CANCELED').all()
|
||||
not_canceled_study_ids = []
|
||||
for a in not_canceled:
|
||||
if a.study_id not in not_canceled_study_ids:
|
||||
not_canceled_study_ids.append(a.study_id)
|
||||
|
||||
rv_all = self.app.get(f'/v1.0/all_approvals?status=false', headers=self.logged_in_headers())
|
||||
self.assert_success(rv_all)
|
||||
all_data = json.loads(rv_all.get_data(as_text=True))
|
||||
self.assertEqual(len(all_data), len(not_canceled_study_ids), 'Should return all non-canceled approvals, grouped by study')
|
||||
|
||||
all_approvals = session.query(ApprovalModel).all()
|
||||
all_approvals_study_ids = []
|
||||
for a in all_approvals:
|
||||
if a.study_id not in all_approvals_study_ids:
|
||||
all_approvals_study_ids.append(a.study_id)
|
||||
|
||||
rv_all = self.app.get(f'/v1.0/all_approvals?status=true', headers=self.logged_in_headers())
|
||||
self.assert_success(rv_all)
|
||||
all_data = json.loads(rv_all.get_data(as_text=True))
|
||||
self.assertEqual(len(all_data), len(all_approvals_study_ids), 'Should return all approvals, grouped by study')
|
||||
|
||||
def test_approvals_counts(self):
|
||||
statuses = [name for name, value in ApprovalStatus.__members__.items()]
|
||||
self._add_lots_of_random_approvals()
|
||||
|
||||
# Get the counts
|
||||
rv_counts = self.app.get(f'/v1.0/approval-counts', headers=self.logged_in_headers())
|
||||
self.assert_success(rv_counts)
|
||||
counts = json.loads(rv_counts.get_data(as_text=True))
|
||||
|
||||
# Get the actual approvals
|
||||
rv_approvals = self.app.get(f'/v1.0/approval', headers=self.logged_in_headers())
|
||||
self.assert_success(rv_approvals)
|
||||
approvals = json.loads(rv_approvals.get_data(as_text=True))
|
||||
|
||||
# Tally up the number of approvals in each status category
|
||||
manual_counts = {}
|
||||
for status in statuses:
|
||||
manual_counts[status] = 0
|
||||
|
||||
for approval in approvals:
|
||||
manual_counts[approval['status']] += 1
|
||||
|
||||
# Numbers in each category should match
|
||||
for status in statuses:
|
||||
self.assertEqual(counts[status], manual_counts[status], 'Approval counts for status %s should match' % status)
|
||||
|
||||
# Total number of approvals should match
|
||||
total_counts = sum(counts[status] for status in statuses)
|
||||
self.assertEqual(total_counts, len(approvals), 'Total approval counts for user should match number of approvals for user')
|
||||
|
||||
def _create_study_workflow_approvals(self, user_uid, title, primary_investigator_id, approver_uids, statuses,
|
||||
workflow_spec_name="random_fact"):
|
||||
study = self.create_study(uid=user_uid, title=title, primary_investigator_id=primary_investigator_id)
|
||||
workflow = self.create_workflow(workflow_name=workflow_spec_name, study=study)
|
||||
approvals = []
|
||||
|
||||
for i in range(len(approver_uids)):
|
||||
approvals.append(self.create_approval(
|
||||
study=study,
|
||||
workflow=workflow,
|
||||
approver_uid=approver_uids[i],
|
||||
status=statuses[i],
|
||||
version=1
|
||||
))
|
||||
|
||||
return {
|
||||
'study': study,
|
||||
'workflow': workflow,
|
||||
'approvals': approvals,
|
||||
}
|
||||
|
||||
def _add_lots_of_random_approvals(self, n=100, workflow_spec_name="random_fact"):
|
||||
num_studies_before = db.session.query(StudyModel).count()
|
||||
statuses = [name for name, value in ApprovalStatus.__members__.items()]
|
||||
|
||||
# Add a whole bunch of approvals with random statuses
|
||||
for i in range(n):
|
||||
approver_uids = random.choices(["lb3dp", "dhf8r"])
|
||||
self._create_study_workflow_approvals(
|
||||
user_uid=random.choice(["lb3dp", "dhf8r"]),
|
||||
title="".join(random.choices(string.ascii_lowercase, k=64)),
|
||||
primary_investigator_id=random.choice(["lb3dp", "dhf8r"]),
|
||||
approver_uids=approver_uids,
|
||||
statuses=random.choices(statuses, k=len(approver_uids)),
|
||||
workflow_spec_name=workflow_spec_name
|
||||
)
|
||||
|
||||
session.flush()
|
||||
num_studies_after = db.session.query(StudyModel).count()
|
||||
self.assertEqual(num_studies_after, num_studies_before + n)
|
||||
|
||||
|
|
|
@ -157,10 +157,12 @@ class TestStudyService(BaseTest):
|
|||
|
||||
def test_get_all_studies(self):
|
||||
user = self.create_user_with_study_and_workflow()
|
||||
study = db.session.query(StudyModel).filter_by(user_uid=user.uid).first()
|
||||
self.assertIsNotNone(study)
|
||||
|
||||
# Add a document to the study with the correct code.
|
||||
workflow1 = self.create_workflow('docx')
|
||||
workflow2 = self.create_workflow('empty_workflow')
|
||||
workflow1 = self.create_workflow('docx', study=study)
|
||||
workflow2 = self.create_workflow('empty_workflow', study=study)
|
||||
|
||||
# Add files to both workflows.
|
||||
FileService.add_workflow_file(workflow_id=workflow1.id,
|
||||
|
|
|
@ -4,85 +4,14 @@ import random
|
|||
from unittest.mock import patch
|
||||
|
||||
from tests.base_test import BaseTest
|
||||
|
||||
from crc import session, app
|
||||
from crc.models.api_models import WorkflowApiSchema, MultiInstanceType, TaskSchema
|
||||
from crc.models.file import FileModelSchema
|
||||
from crc.models.stats import TaskEventModel
|
||||
from crc.models.workflow import WorkflowStatus
|
||||
from crc.services.workflow_service import WorkflowService
|
||||
|
||||
|
||||
class TestTasksApi(BaseTest):
|
||||
|
||||
def get_workflow_api(self, workflow, soft_reset=False, hard_reset=False):
|
||||
rv = self.app.get('/v1.0/workflow/%i?soft_reset=%s&hard_reset=%s' %
|
||||
(workflow.id, str(soft_reset), str(hard_reset)),
|
||||
headers=self.logged_in_headers(),
|
||||
content_type="application/json")
|
||||
self.assert_success(rv)
|
||||
json_data = json.loads(rv.get_data(as_text=True))
|
||||
workflow_api = WorkflowApiSchema().load(json_data)
|
||||
self.assertEqual(workflow.workflow_spec_id, workflow_api.workflow_spec_id)
|
||||
return workflow_api
|
||||
|
||||
def complete_form(self, workflow_in, task_in, dict_data, error_code = None):
|
||||
prev_completed_task_count = workflow_in.completed_tasks
|
||||
if isinstance(task_in, dict):
|
||||
task_id = task_in["id"]
|
||||
else:
|
||||
task_id = task_in.id
|
||||
rv = self.app.put('/v1.0/workflow/%i/task/%s/data' % (workflow_in.id, task_id),
|
||||
headers=self.logged_in_headers(),
|
||||
content_type="application/json",
|
||||
data=json.dumps(dict_data))
|
||||
if error_code:
|
||||
self.assert_failure(rv, error_code=error_code)
|
||||
return
|
||||
|
||||
self.assert_success(rv)
|
||||
json_data = json.loads(rv.get_data(as_text=True))
|
||||
|
||||
# Assure stats are updated on the model
|
||||
workflow = WorkflowApiSchema().load(json_data)
|
||||
# The total number of tasks may change over time, as users move through gateways
|
||||
# branches may be pruned. As we hit parallel Multi-Instance new tasks may be created...
|
||||
self.assertIsNotNone(workflow.total_tasks)
|
||||
self.assertEqual(prev_completed_task_count + 1, workflow.completed_tasks)
|
||||
# Assure a record exists in the Task Events
|
||||
task_events = session.query(TaskEventModel) \
|
||||
.filter_by(workflow_id=workflow.id) \
|
||||
.filter_by(task_id=task_id) \
|
||||
.order_by(TaskEventModel.date.desc()).all()
|
||||
self.assertGreater(len(task_events), 0)
|
||||
event = task_events[0]
|
||||
self.assertIsNotNone(event.study_id)
|
||||
self.assertEqual("dhf8r", event.user_uid)
|
||||
self.assertEqual(workflow.id, event.workflow_id)
|
||||
self.assertEqual(workflow.workflow_spec_id, event.workflow_spec_id)
|
||||
self.assertEqual(workflow.spec_version, event.spec_version)
|
||||
self.assertEqual(WorkflowService.TASK_ACTION_COMPLETE, event.action)
|
||||
self.assertEqual(task_in.id, task_id)
|
||||
self.assertEqual(task_in.name, event.task_name)
|
||||
self.assertEqual(task_in.title, event.task_title)
|
||||
self.assertEqual(task_in.type, event.task_type)
|
||||
self.assertEqual("COMPLETED", event.task_state)
|
||||
# Not sure what vodoo is happening inside of marshmallow to get me in this state.
|
||||
if isinstance(task_in.multi_instance_type, MultiInstanceType):
|
||||
self.assertEqual(task_in.multi_instance_type.value, event.mi_type)
|
||||
else:
|
||||
self.assertEqual(task_in.multi_instance_type, event.mi_type)
|
||||
|
||||
self.assertEqual(task_in.multi_instance_count, event.mi_count)
|
||||
self.assertEqual(task_in.multi_instance_index, event.mi_index)
|
||||
self.assertEqual(task_in.process_name, event.process_name)
|
||||
self.assertIsNotNone(event.date)
|
||||
|
||||
|
||||
workflow = WorkflowApiSchema().load(json_data)
|
||||
return workflow
|
||||
|
||||
|
||||
def test_get_current_user_tasks(self):
|
||||
self.load_example_data()
|
||||
workflow = self.create_workflow('random_fact')
|
||||
|
@ -185,6 +114,7 @@ class TestTasksApi(BaseTest):
|
|||
self.load_example_data()
|
||||
self.create_reference_document()
|
||||
workflow = self.create_workflow('docx')
|
||||
|
||||
# get the first form in the two form workflow.
|
||||
task = self.get_workflow_api(workflow).next_task
|
||||
data = {
|
||||
|
@ -203,6 +133,7 @@ class TestTasksApi(BaseTest):
|
|||
json_data = json.loads(rv.get_data(as_text=True))
|
||||
files = FileModelSchema(many=True).load(json_data, session=session)
|
||||
self.assertTrue(len(files) == 1)
|
||||
|
||||
# Assure we can still delete the study even when there is a file attached to a workflow.
|
||||
rv = self.app.delete('/v1.0/study/%i' % workflow.study_id, headers=self.logged_in_headers())
|
||||
self.assert_success(rv)
|
||||
|
|
Loading…
Reference in New Issue