Prevents non-admin users from editing each others' tasks. Fixes bug where test user uid was not being set from token. Moves complete form and get workflow API test utility methods into BaseTest.
This commit is contained in:
parent
691258a4fb
commit
561e254315
|
@ -31,7 +31,7 @@ def verify_token(token=None):
|
|||
failure_error = ApiError("invalid_token", "Unable to decode the token you provided. Please re-authenticate",
|
||||
status_code=403)
|
||||
|
||||
if not _is_production():
|
||||
if not _is_production() and (token is None or 'user' not in g):
|
||||
g.user = UserModel.query.first()
|
||||
token = g.user.encode_auth_token()
|
||||
|
||||
|
@ -132,6 +132,7 @@ def login(
|
|||
# X-Forwarded-Server: dev.crconnect.uvadcos.io
|
||||
# Connection: Keep-Alive
|
||||
|
||||
|
||||
# If we're in production, override any uid with the uid from the SSO request headers
|
||||
if _is_production():
|
||||
uid = _get_request_uid(request)
|
||||
|
@ -175,6 +176,7 @@ def _handle_login(user_info: LdapModel, redirect_url=None):
|
|||
Response. 302 - Redirects to the frontend auth callback URL, with auth token appended.
|
||||
"""
|
||||
user = _upsert_user(user_info)
|
||||
g.user = user
|
||||
|
||||
# Return the frontend auth callback URL, with auth token appended.
|
||||
auth_token = user.encode_auth_token().decode()
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
import uuid
|
||||
|
||||
from crc import session
|
||||
from flask import g
|
||||
|
||||
from crc import session, app
|
||||
from crc.api.common import ApiError, ApiErrorSchema
|
||||
from crc.models.api_models import WorkflowApi, WorkflowApiSchema, NavigationItem, NavigationItemSchema
|
||||
from crc.models.file import FileModel, LookupDataSchema
|
||||
|
@ -156,6 +158,7 @@ def delete_workflow(workflow_id):
|
|||
|
||||
def set_current_task(workflow_id, task_id):
|
||||
workflow_model = session.query(WorkflowModel).filter_by(id=workflow_id).first()
|
||||
user_uid = __get_user_uid(workflow_model.study.user_uid)
|
||||
processor = WorkflowProcessor(workflow_model)
|
||||
task_id = uuid.UUID(task_id)
|
||||
task = processor.bpmn_workflow.get_task(task_id)
|
||||
|
@ -167,13 +170,21 @@ def set_current_task(workflow_id, task_id):
|
|||
if task.state == task.COMPLETED:
|
||||
task.reset_token(reset_data=False) # we could optionally clear the previous data.
|
||||
processor.save()
|
||||
WorkflowService.log_task_action(processor, task, WorkflowService.TASK_ACTION_TOKEN_RESET)
|
||||
WorkflowService.log_task_action(user_uid, processor, task, WorkflowService.TASK_ACTION_TOKEN_RESET)
|
||||
workflow_api_model = __get_workflow_api_model(processor, task)
|
||||
return WorkflowApiSchema().dump(workflow_api_model)
|
||||
|
||||
|
||||
def update_task(workflow_id, task_id, body):
|
||||
workflow_model = session.query(WorkflowModel).filter_by(id=workflow_id).first()
|
||||
|
||||
if workflow_model is None:
|
||||
raise ApiError("invalid_workflow_id", "The given workflow id is not valid.", status_code=404)
|
||||
|
||||
elif workflow_model.study is None:
|
||||
raise ApiError("invalid_study", "There is no study associated with the given workflow.", status_code=404)
|
||||
|
||||
user_uid = __get_user_uid(workflow_model.study.user_uid)
|
||||
processor = WorkflowProcessor(workflow_model)
|
||||
task_id = uuid.UUID(task_id)
|
||||
task = processor.bpmn_workflow.get_task(task_id)
|
||||
|
@ -184,7 +195,7 @@ def update_task(workflow_id, task_id, body):
|
|||
processor.complete_task(task)
|
||||
processor.do_engine_steps()
|
||||
processor.save()
|
||||
WorkflowService.log_task_action(processor, task, WorkflowService.TASK_ACTION_COMPLETE)
|
||||
WorkflowService.log_task_action(user_uid, processor, task, WorkflowService.TASK_ACTION_COMPLETE)
|
||||
|
||||
workflow_api_model = __get_workflow_api_model(processor)
|
||||
return WorkflowApiSchema().dump(workflow_api_model)
|
||||
|
@ -239,3 +250,14 @@ def lookup(workflow_id, field_id, query, limit):
|
|||
workflow = session.query(WorkflowModel).filter(WorkflowModel.id == workflow_id).first()
|
||||
lookup_data = LookupService.lookup(workflow, field_id, query, limit)
|
||||
return LookupDataSchema(many=True).dump(lookup_data)
|
||||
|
||||
|
||||
def __get_user_uid(user_uid):
|
||||
if 'user' in g:
|
||||
if g.user.uid not in app.config['ADMIN_UIDS'] and user_uid != g.user.uid:
|
||||
raise ApiError("permission_denied", "You are not authorized to edit the task data for this workflow.", status_code=403)
|
||||
else:
|
||||
return g.user.uid
|
||||
|
||||
else:
|
||||
raise ApiError("logged_out", "You are no longer logged in.", status_code=401)
|
||||
|
|
|
@ -19,7 +19,7 @@ class UserModel(db.Model):
|
|||
last_name = db.Column(db.String, nullable=True)
|
||||
title = db.Column(db.String, nullable=True)
|
||||
|
||||
# Add Department and School
|
||||
# TODO: Add Department and School
|
||||
|
||||
|
||||
def encode_auth_token(self):
|
||||
|
|
|
@ -86,8 +86,8 @@ class StudyService(object):
|
|||
def delete_workflow(workflow):
|
||||
for file in session.query(FileModel).filter_by(workflow_id=workflow.id).all():
|
||||
FileService.delete_file(file.id)
|
||||
for deb in workflow.dependencies:
|
||||
session.delete(deb)
|
||||
for dep in workflow.dependencies:
|
||||
session.delete(dep)
|
||||
session.query(TaskEventModel).filter_by(workflow_id=workflow.id).delete()
|
||||
session.query(WorkflowModel).filter_by(id=workflow.id).delete()
|
||||
|
||||
|
|
|
@ -58,7 +58,7 @@ class WorkflowService(object):
|
|||
|
||||
@staticmethod
|
||||
def delete_test_data():
|
||||
for study in db.session.query(StudyModel).filter(StudyModel.user_uid=="test"):
|
||||
for study in db.session.query(StudyModel).filter_by(user_uid="test"):
|
||||
StudyService.delete_study(study.id)
|
||||
db.session.commit()
|
||||
|
||||
|
@ -318,12 +318,12 @@ class WorkflowService(object):
|
|||
field.options.append({"id": d.value, "name": d.label})
|
||||
|
||||
@staticmethod
|
||||
def log_task_action(processor, spiff_task, action):
|
||||
def log_task_action(user_uid, processor, spiff_task, action):
|
||||
task = WorkflowService.spiff_task_to_api_task(spiff_task)
|
||||
workflow_model = processor.workflow_model
|
||||
task_event = TaskEventModel(
|
||||
study_id=workflow_model.study_id,
|
||||
user_uid=g.user.uid,
|
||||
user_uid=user_uid,
|
||||
workflow_id=workflow_model.id,
|
||||
workflow_spec_id=workflow_model.workflow_spec_id,
|
||||
spec_version=processor.get_version_string(),
|
||||
|
|
|
@ -2,26 +2,27 @@
|
|||
# IMPORTANT - Environment must be loaded before app, models, etc....
|
||||
import os
|
||||
|
||||
from flask import g
|
||||
from sqlalchemy import Sequence
|
||||
|
||||
os.environ["TESTING"] = "true"
|
||||
|
||||
import json
|
||||
import unittest
|
||||
import urllib.parse
|
||||
import datetime
|
||||
|
||||
from crc.models.approval import ApprovalModel, ApprovalStatus
|
||||
from crc.models.protocol_builder import ProtocolBuilderStatus
|
||||
from crc.models.study import StudyModel
|
||||
from crc.services.file_service import FileService
|
||||
from crc.services.study_service import StudyService
|
||||
from crc.models.file import FileModel, FileDataModel, CONTENT_TYPES
|
||||
from crc.models.workflow import WorkflowSpecModel, WorkflowSpecModelSchema, WorkflowModel
|
||||
from crc.models.user import UserModel
|
||||
from flask import g
|
||||
from sqlalchemy import Sequence
|
||||
|
||||
from crc import app, db, session
|
||||
from crc.models.api_models import WorkflowApiSchema, MultiInstanceType
|
||||
from crc.models.approval import ApprovalModel, ApprovalStatus
|
||||
from crc.models.file import FileModel, FileDataModel, CONTENT_TYPES
|
||||
from crc.models.protocol_builder import ProtocolBuilderStatus
|
||||
from crc.models.stats import TaskEventModel
|
||||
from crc.models.study import StudyModel
|
||||
from crc.models.user import UserModel
|
||||
from crc.models.workflow import WorkflowSpecModel, WorkflowSpecModelSchema, WorkflowModel
|
||||
from crc.services.file_service import FileService
|
||||
from crc.services.study_service import StudyService
|
||||
from crc.services.workflow_service import WorkflowService
|
||||
from example_data import ExampleDataLoader
|
||||
|
||||
#UNCOMMENT THIS FOR DEBUGGING SQL ALCHEMY QUERIES
|
||||
|
@ -40,6 +41,7 @@ class BaseTest(unittest.TestCase):
|
|||
|
||||
auths = {}
|
||||
test_uid = "dhf8r"
|
||||
flask_globals = g
|
||||
|
||||
users = [
|
||||
{
|
||||
|
@ -97,7 +99,7 @@ class BaseTest(unittest.TestCase):
|
|||
|
||||
def tearDown(self):
|
||||
ExampleDataLoader.clean_db()
|
||||
g.user = None
|
||||
self.flask_globals.user = None
|
||||
self.auths = {}
|
||||
|
||||
def logged_in_headers(self, user=None, redirect_url='http://some/frontend/url'):
|
||||
|
@ -110,12 +112,15 @@ class BaseTest(unittest.TestCase):
|
|||
|
||||
query_string = self.user_info_to_query_string(user_info, redirect_url)
|
||||
rv = self.app.get("/v1.0/login%s" % query_string, follow_redirects=False)
|
||||
|
||||
self.assertTrue(rv.status_code == 302)
|
||||
self.assertTrue(str.startswith(rv.location, redirect_url))
|
||||
|
||||
user_model = session.query(UserModel).filter_by(uid=uid).first()
|
||||
self.assertIsNotNone(user_model.display_name)
|
||||
self.assertEqual(user_model.uid, uid)
|
||||
self.assertTrue('user' in self.flask_globals, 'User should be in Flask globals')
|
||||
self.assertEqual(uid, self.flask_globals.user.uid, 'Logged in user should match given user uid')
|
||||
|
||||
return dict(Authorization='Bearer ' + user_model.encode_auth_token().decode())
|
||||
|
||||
def load_example_data(self, use_crc_data=False, use_rrt_data=False):
|
||||
|
@ -162,6 +167,7 @@ class BaseTest(unittest.TestCase):
|
|||
@staticmethod
|
||||
def load_test_spec(dir_name, master_spec=False, category_id=None):
|
||||
"""Loads a spec into the database based on a directory in /tests/data"""
|
||||
|
||||
if session.query(WorkflowSpecModel).filter_by(id=dir_name).count() > 0:
|
||||
return session.query(WorkflowSpecModel).filter_by(id=dir_name).first()
|
||||
filepath = os.path.join(app.root_path, '..', 'tests', 'data', dir_name, "*")
|
||||
|
@ -271,3 +277,78 @@ class BaseTest(unittest.TestCase):
|
|||
db.session.commit()
|
||||
return approval
|
||||
|
||||
def get_workflow_api(self, workflow, soft_reset=False, hard_reset=False, user_uid="dhf8r"):
|
||||
user = session.query(UserModel).filter_by(uid=user_uid).first()
|
||||
self.assertIsNotNone(user)
|
||||
|
||||
rv = self.app.get('/v1.0/workflow/%i?soft_reset=%s&hard_reset=%s' %
|
||||
(workflow.id, str(soft_reset), str(hard_reset)),
|
||||
headers=self.logged_in_headers(user),
|
||||
content_type="application/json")
|
||||
self.assert_success(rv)
|
||||
json_data = json.loads(rv.get_data(as_text=True))
|
||||
workflow_api = WorkflowApiSchema().load(json_data)
|
||||
self.assertEqual(workflow.workflow_spec_id, workflow_api.workflow_spec_id)
|
||||
return workflow_api
|
||||
|
||||
def complete_form(self, workflow_in, task_in, dict_data, error_code=None, user_uid="dhf8r"):
|
||||
prev_completed_task_count = workflow_in.completed_tasks
|
||||
if isinstance(task_in, dict):
|
||||
task_id = task_in["id"]
|
||||
else:
|
||||
task_id = task_in.id
|
||||
|
||||
user = session.query(UserModel).filter_by(uid=user_uid).first()
|
||||
self.assertIsNotNone(user)
|
||||
|
||||
rv = self.app.put('/v1.0/workflow/%i/task/%s/data' % (workflow_in.id, task_id),
|
||||
headers=self.logged_in_headers(user=user),
|
||||
content_type="application/json",
|
||||
data=json.dumps(dict_data))
|
||||
if error_code:
|
||||
self.assert_failure(rv, error_code=error_code)
|
||||
return
|
||||
|
||||
self.assert_success(rv)
|
||||
json_data = json.loads(rv.get_data(as_text=True))
|
||||
|
||||
# Assure stats are updated on the model
|
||||
workflow = WorkflowApiSchema().load(json_data)
|
||||
# The total number of tasks may change over time, as users move through gateways
|
||||
# branches may be pruned. As we hit parallel Multi-Instance new tasks may be created...
|
||||
self.assertIsNotNone(workflow.total_tasks)
|
||||
self.assertEqual(prev_completed_task_count + 1, workflow.completed_tasks)
|
||||
|
||||
# Assure a record exists in the Task Events
|
||||
task_events = session.query(TaskEventModel) \
|
||||
.filter_by(workflow_id=workflow.id) \
|
||||
.filter_by(task_id=task_id) \
|
||||
.order_by(TaskEventModel.date.desc()).all()
|
||||
self.assertGreater(len(task_events), 0)
|
||||
event = task_events[0]
|
||||
self.assertIsNotNone(event.study_id)
|
||||
self.assertEqual(user_uid, event.user_uid)
|
||||
self.assertEqual(workflow.id, event.workflow_id)
|
||||
self.assertEqual(workflow.workflow_spec_id, event.workflow_spec_id)
|
||||
self.assertEqual(workflow.spec_version, event.spec_version)
|
||||
self.assertEqual(WorkflowService.TASK_ACTION_COMPLETE, event.action)
|
||||
self.assertEqual(task_in.id, task_id)
|
||||
self.assertEqual(task_in.name, event.task_name)
|
||||
self.assertEqual(task_in.title, event.task_title)
|
||||
self.assertEqual(task_in.type, event.task_type)
|
||||
self.assertEqual("COMPLETED", event.task_state)
|
||||
|
||||
# Not sure what voodoo is happening inside of marshmallow to get me in this state.
|
||||
if isinstance(task_in.multi_instance_type, MultiInstanceType):
|
||||
self.assertEqual(task_in.multi_instance_type.value, event.mi_type)
|
||||
else:
|
||||
self.assertEqual(task_in.multi_instance_type, event.mi_type)
|
||||
|
||||
self.assertEqual(task_in.multi_instance_count, event.mi_count)
|
||||
self.assertEqual(task_in.multi_instance_index, event.mi_index)
|
||||
self.assertEqual(task_in.process_name, event.process_name)
|
||||
self.assertIsNotNone(event.date)
|
||||
|
||||
|
||||
workflow = WorkflowApiSchema().load(json_data)
|
||||
return workflow
|
||||
|
|
|
@ -3,11 +3,10 @@ import random
|
|||
import string
|
||||
|
||||
from tests.base_test import BaseTest
|
||||
|
||||
from crc import session, db
|
||||
from crc.models.approval import ApprovalModel, ApprovalSchema, ApprovalStatus
|
||||
from crc.models.protocol_builder import ProtocolBuilderStatus
|
||||
from crc.models.approval import ApprovalModel, ApprovalStatus
|
||||
from crc.models.study import StudyModel
|
||||
from crc.models.workflow import WorkflowModel
|
||||
|
||||
|
||||
class TestApprovals(BaseTest):
|
||||
|
@ -130,12 +129,33 @@ class TestApprovals(BaseTest):
|
|||
self.assertEqual(approval.status, ApprovalStatus.DECLINED.value)
|
||||
|
||||
def test_csv_export(self):
|
||||
self._add_lots_of_random_approvals()
|
||||
self.load_test_spec('two_forms')
|
||||
self._add_lots_of_random_approvals(n=50, workflow_spec_name='two_forms')
|
||||
|
||||
# approvals = db.session.query(ApprovalModel).all()
|
||||
# for app in approvals:
|
||||
# app.status = ApprovalStatus.APPROVED.value
|
||||
# db.session.commit()
|
||||
# Get all workflows
|
||||
workflows = db.session.query(WorkflowModel).filter_by(workflow_spec_id='two_forms').all()
|
||||
|
||||
# For each workflow, complete all tasks
|
||||
for workflow in workflows:
|
||||
workflow_api = self.get_workflow_api(workflow, user_uid=workflow.study.user_uid)
|
||||
self.assertEqual('two_forms', workflow_api.workflow_spec_id)
|
||||
|
||||
# Log current user out.
|
||||
self.flask_globals.user = None
|
||||
self.assertIsNone(self.flask_globals.user)
|
||||
|
||||
# Complete the form for Step one and post it.
|
||||
self.complete_form(workflow, workflow_api.next_task, {"color": "blue"}, error_code=None, user_uid=workflow.study.user_uid)
|
||||
|
||||
# Get the next Task
|
||||
workflow_api = self.get_workflow_api(workflow, user_uid=workflow.study.user_uid)
|
||||
self.assertEqual("StepTwo", workflow_api.next_task.name)
|
||||
|
||||
# Get all user Tasks and check that the data have been saved
|
||||
task = workflow_api.next_task
|
||||
self.assertIsNotNone(task.data)
|
||||
for val in task.data.values():
|
||||
self.assertIsNotNone(val)
|
||||
|
||||
rv = self.app.get(f'/v1.0/approval/csv', headers=self.logged_in_headers())
|
||||
self.assert_success(rv)
|
||||
|
@ -195,9 +215,10 @@ class TestApprovals(BaseTest):
|
|||
total_counts = sum(counts[status] for status in statuses)
|
||||
self.assertEqual(total_counts, len(approvals), 'Total approval counts for user should match number of approvals for user')
|
||||
|
||||
def _create_study_workflow_approvals(self, user_uid, title, primary_investigator_id, approver_uids, statuses):
|
||||
def _create_study_workflow_approvals(self, user_uid, title, primary_investigator_id, approver_uids, statuses,
|
||||
workflow_spec_name="random_fact"):
|
||||
study = self.create_study(uid=user_uid, title=title, primary_investigator_id=primary_investigator_id)
|
||||
workflow = self.create_workflow('random_fact', study=study)
|
||||
workflow = self.create_workflow(workflow_name=workflow_spec_name, study=study)
|
||||
approvals = []
|
||||
|
||||
for i in range(len(approver_uids)):
|
||||
|
@ -215,22 +236,23 @@ class TestApprovals(BaseTest):
|
|||
'approvals': approvals,
|
||||
}
|
||||
|
||||
def _add_lots_of_random_approvals(self):
|
||||
def _add_lots_of_random_approvals(self, n=100, workflow_spec_name="random_fact"):
|
||||
num_studies_before = db.session.query(StudyModel).count()
|
||||
statuses = [name for name, value in ApprovalStatus.__members__.items()]
|
||||
|
||||
# Add a whole bunch of approvals with random statuses
|
||||
for i in range(100):
|
||||
for i in range(n):
|
||||
approver_uids = random.choices(["lb3dp", "dhf8r"])
|
||||
self._create_study_workflow_approvals(
|
||||
user_uid=random.choice(["lb3dp", "dhf8r"]),
|
||||
title="".join(random.choices(string.ascii_lowercase, k=64)),
|
||||
primary_investigator_id=random.choice(["lb3dp", "dhf8r"]),
|
||||
approver_uids=approver_uids,
|
||||
statuses=random.choices(statuses, k=len(approver_uids))
|
||||
statuses=random.choices(statuses, k=len(approver_uids)),
|
||||
workflow_spec_name=workflow_spec_name
|
||||
)
|
||||
|
||||
session.flush()
|
||||
num_studies_after = db.session.query(StudyModel).count()
|
||||
self.assertEqual(num_studies_after, num_studies_before + 100)
|
||||
self.assertEqual(num_studies_after, num_studies_before + n)
|
||||
|
||||
|
|
|
@ -4,85 +4,14 @@ import random
|
|||
from unittest.mock import patch
|
||||
|
||||
from tests.base_test import BaseTest
|
||||
|
||||
from crc import session, app
|
||||
from crc.models.api_models import WorkflowApiSchema, MultiInstanceType, TaskSchema
|
||||
from crc.models.file import FileModelSchema
|
||||
from crc.models.stats import TaskEventModel
|
||||
from crc.models.workflow import WorkflowStatus
|
||||
from crc.services.workflow_service import WorkflowService
|
||||
|
||||
|
||||
class TestTasksApi(BaseTest):
|
||||
|
||||
def get_workflow_api(self, workflow, soft_reset=False, hard_reset=False):
|
||||
rv = self.app.get('/v1.0/workflow/%i?soft_reset=%s&hard_reset=%s' %
|
||||
(workflow.id, str(soft_reset), str(hard_reset)),
|
||||
headers=self.logged_in_headers(),
|
||||
content_type="application/json")
|
||||
self.assert_success(rv)
|
||||
json_data = json.loads(rv.get_data(as_text=True))
|
||||
workflow_api = WorkflowApiSchema().load(json_data)
|
||||
self.assertEqual(workflow.workflow_spec_id, workflow_api.workflow_spec_id)
|
||||
return workflow_api
|
||||
|
||||
def complete_form(self, workflow_in, task_in, dict_data, error_code = None):
|
||||
prev_completed_task_count = workflow_in.completed_tasks
|
||||
if isinstance(task_in, dict):
|
||||
task_id = task_in["id"]
|
||||
else:
|
||||
task_id = task_in.id
|
||||
rv = self.app.put('/v1.0/workflow/%i/task/%s/data' % (workflow_in.id, task_id),
|
||||
headers=self.logged_in_headers(),
|
||||
content_type="application/json",
|
||||
data=json.dumps(dict_data))
|
||||
if error_code:
|
||||
self.assert_failure(rv, error_code=error_code)
|
||||
return
|
||||
|
||||
self.assert_success(rv)
|
||||
json_data = json.loads(rv.get_data(as_text=True))
|
||||
|
||||
# Assure stats are updated on the model
|
||||
workflow = WorkflowApiSchema().load(json_data)
|
||||
# The total number of tasks may change over time, as users move through gateways
|
||||
# branches may be pruned. As we hit parallel Multi-Instance new tasks may be created...
|
||||
self.assertIsNotNone(workflow.total_tasks)
|
||||
self.assertEqual(prev_completed_task_count + 1, workflow.completed_tasks)
|
||||
# Assure a record exists in the Task Events
|
||||
task_events = session.query(TaskEventModel) \
|
||||
.filter_by(workflow_id=workflow.id) \
|
||||
.filter_by(task_id=task_id) \
|
||||
.order_by(TaskEventModel.date.desc()).all()
|
||||
self.assertGreater(len(task_events), 0)
|
||||
event = task_events[0]
|
||||
self.assertIsNotNone(event.study_id)
|
||||
self.assertEqual("dhf8r", event.user_uid)
|
||||
self.assertEqual(workflow.id, event.workflow_id)
|
||||
self.assertEqual(workflow.workflow_spec_id, event.workflow_spec_id)
|
||||
self.assertEqual(workflow.spec_version, event.spec_version)
|
||||
self.assertEqual(WorkflowService.TASK_ACTION_COMPLETE, event.action)
|
||||
self.assertEqual(task_in.id, task_id)
|
||||
self.assertEqual(task_in.name, event.task_name)
|
||||
self.assertEqual(task_in.title, event.task_title)
|
||||
self.assertEqual(task_in.type, event.task_type)
|
||||
self.assertEqual("COMPLETED", event.task_state)
|
||||
# Not sure what vodoo is happening inside of marshmallow to get me in this state.
|
||||
if isinstance(task_in.multi_instance_type, MultiInstanceType):
|
||||
self.assertEqual(task_in.multi_instance_type.value, event.mi_type)
|
||||
else:
|
||||
self.assertEqual(task_in.multi_instance_type, event.mi_type)
|
||||
|
||||
self.assertEqual(task_in.multi_instance_count, event.mi_count)
|
||||
self.assertEqual(task_in.multi_instance_index, event.mi_index)
|
||||
self.assertEqual(task_in.process_name, event.process_name)
|
||||
self.assertIsNotNone(event.date)
|
||||
|
||||
|
||||
workflow = WorkflowApiSchema().load(json_data)
|
||||
return workflow
|
||||
|
||||
|
||||
def test_get_current_user_tasks(self):
|
||||
self.load_example_data()
|
||||
workflow = self.create_workflow('random_fact')
|
||||
|
@ -185,6 +114,7 @@ class TestTasksApi(BaseTest):
|
|||
self.load_example_data()
|
||||
self.create_reference_document()
|
||||
workflow = self.create_workflow('docx')
|
||||
|
||||
# get the first form in the two form workflow.
|
||||
task = self.get_workflow_api(workflow).next_task
|
||||
data = {
|
||||
|
@ -203,6 +133,7 @@ class TestTasksApi(BaseTest):
|
|||
json_data = json.loads(rv.get_data(as_text=True))
|
||||
files = FileModelSchema(many=True).load(json_data, session=session)
|
||||
self.assertTrue(len(files) == 1)
|
||||
|
||||
# Assure we can still delete the study even when there is a file attached to a workflow.
|
||||
rv = self.app.delete('/v1.0/study/%i' % workflow.study_id, headers=self.logged_in_headers())
|
||||
self.assert_success(rv)
|
||||
|
|
Loading…
Reference in New Issue