commit
3d6ed3bb48
|
@ -703,12 +703,19 @@ paths:
|
|||
description: The string to search for in the Value column of the lookup table.
|
||||
schema:
|
||||
type: string
|
||||
- name: value
|
||||
in: query
|
||||
required: false
|
||||
description: An alternative to query, this accepts the specific value or id selected in a dropdown list or auto-complete, and will return the one matching record. Useful for getting additional details about an item selected in a dropdown.
|
||||
schema:
|
||||
type: string
|
||||
- name: limit
|
||||
in: query
|
||||
required: false
|
||||
description: The total number of records to return, defaults to 10.
|
||||
schema:
|
||||
type: integer
|
||||
|
||||
get:
|
||||
operationId: crc.api.workflow.lookup
|
||||
summary: Provides type-ahead search against a lookup table associted with a form field.
|
||||
|
|
|
@ -41,7 +41,6 @@ def get_workflow_specification(spec_id):
|
|||
|
||||
|
||||
def validate_workflow_specification(spec_id):
|
||||
|
||||
errors = []
|
||||
try:
|
||||
WorkflowService.test_spec(spec_id)
|
||||
|
@ -57,7 +56,6 @@ def validate_workflow_specification(spec_id):
|
|||
return ApiErrorSchema(many=True).dump(errors)
|
||||
|
||||
|
||||
|
||||
def update_workflow_specification(spec_id, body):
|
||||
if spec_id is None:
|
||||
raise ApiError('unknown_spec', 'Please provide a valid Workflow Spec ID.')
|
||||
|
@ -200,7 +198,7 @@ def delete_workflow_spec_category(cat_id):
|
|||
session.commit()
|
||||
|
||||
|
||||
def lookup(workflow_id, field_id, query, limit):
|
||||
def lookup(workflow_id, field_id, query=None, value=None, limit=10):
|
||||
"""
|
||||
given a field in a task, attempts to find the lookup table or function associated
|
||||
with that field and runs a full-text query against it to locate the values and
|
||||
|
@ -208,14 +206,15 @@ def lookup(workflow_id, field_id, query, limit):
|
|||
Tries to be fast, but first runs will be very slow.
|
||||
"""
|
||||
workflow = session.query(WorkflowModel).filter(WorkflowModel.id == workflow_id).first()
|
||||
lookup_data = LookupService.lookup(workflow, field_id, query, limit)
|
||||
lookup_data = LookupService.lookup(workflow, field_id, query, value, limit)
|
||||
return LookupDataSchema(many=True).dump(lookup_data)
|
||||
|
||||
|
||||
def __get_user_uid(user_uid):
|
||||
if 'user' in g:
|
||||
if g.user.uid not in app.config['ADMIN_UIDS'] and user_uid != g.user.uid:
|
||||
raise ApiError("permission_denied", "You are not authorized to edit the task data for this workflow.", status_code=403)
|
||||
raise ApiError("permission_denied", "You are not authorized to edit the task data for this workflow.",
|
||||
status_code=403)
|
||||
else:
|
||||
return g.user.uid
|
||||
|
||||
|
|
|
@ -78,7 +78,7 @@ class Approval(object):
|
|||
instance.approver = LdapService.user_info(model.approver_uid)
|
||||
instance.primary_investigator = LdapService.user_info(model.study.primary_investigator_id)
|
||||
except ApiError as ae:
|
||||
app.logger.error("Ldap lookup failed for approval record %i" % model.id)
|
||||
app.logger.error(f'Ldap lookup failed for approval record {model.id}', exc_info=True)
|
||||
|
||||
doc_dictionary = FileService.get_doc_dictionary()
|
||||
instance.associated_files = []
|
||||
|
|
|
@ -153,6 +153,7 @@ class LookupFileModel(db.Model):
|
|||
file_data_model_id = db.Column(db.Integer, db.ForeignKey('file_data.id'))
|
||||
dependencies = db.relationship("LookupDataModel", lazy="select", backref="lookup_file_model", cascade="all, delete, delete-orphan")
|
||||
|
||||
|
||||
class LookupDataModel(db.Model):
|
||||
__tablename__ = 'lookup_data'
|
||||
id = db.Column(db.Integer, primary_key=True)
|
||||
|
@ -181,6 +182,7 @@ class LookupDataSchema(SQLAlchemyAutoSchema):
|
|||
load_instance = True
|
||||
include_relationships = False
|
||||
include_fk = False # Includes foreign keys
|
||||
exclude = ['id'] # Do not include the id field, it should never be used via the API.
|
||||
|
||||
|
||||
class SimpleFileSchema(ma.Schema):
|
||||
|
|
|
@ -31,10 +31,8 @@ class StudyModel(db.Model):
|
|||
self.title = pbs.TITLE
|
||||
self.user_uid = pbs.NETBADGEID
|
||||
self.last_updated = pbs.DATE_MODIFIED
|
||||
self.protocol_builder_status = ProtocolBuilderStatus.INCOMPLETE
|
||||
|
||||
if pbs.Q_COMPLETE:
|
||||
self.protocol_builder_status = ProtocolBuilderStatus.ACTIVE
|
||||
self.protocol_builder_status = ProtocolBuilderStatus.ACTIVE
|
||||
if pbs.HSRNUMBER:
|
||||
self.protocol_builder_status = ProtocolBuilderStatus.OPEN
|
||||
if self.on_hold:
|
||||
|
|
|
@ -52,8 +52,7 @@ Email Subject ApprvlApprvr1 PIComputingID
|
|||
try:
|
||||
uid = task.workflow.script_engine.evaluate_expression(task, arg)
|
||||
except Exception as e:
|
||||
app.logger.error(f'Workflow engines could not parse {arg}')
|
||||
app.logger.error(str(e))
|
||||
app.logger.error(f'Workflow engines could not parse {arg}', exc_info=True)
|
||||
continue
|
||||
user_info = LdapService.user_info(uid)
|
||||
email = user_info.email_address
|
||||
|
|
|
@ -14,7 +14,7 @@ class StudyInfo(Script):
|
|||
"""Please see the detailed description that is provided below. """
|
||||
|
||||
pb = ProtocolBuilderService()
|
||||
type_options = ['info', 'investigators', 'details', 'approvals', 'documents', 'protocol']
|
||||
type_options = ['info', 'investigators', 'roles', 'details', 'approvals', 'documents', 'protocol']
|
||||
|
||||
# This is used for test/workflow validation, as well as documentation.
|
||||
example_data = {
|
||||
|
@ -106,11 +106,20 @@ Returns the basic information such as the id and title
|
|||
### Investigators ###
|
||||
Returns detailed information about related personnel.
|
||||
The order returned is guaranteed to match the order provided in the investigators.xslx reference file.
|
||||
If possible, detailed information is added in from LDAP about each personnel based on their user_id.
|
||||
Detailed information is added in from LDAP about each personnel based on their user_id.
|
||||
```
|
||||
{investigators_example}
|
||||
```
|
||||
|
||||
### Investigator Roles ###
|
||||
Returns a list of all investigator roles, populating any roles with additional information available from
|
||||
the Protocol Builder and LDAP. Its basically just like Investigators, but it includes all the roles, rather
|
||||
that just those that were set in Protocol Builder.
|
||||
```
|
||||
{investigators_example}
|
||||
```
|
||||
|
||||
|
||||
### Details ###
|
||||
Returns detailed information about variable keys read in from the Protocol Builder.
|
||||
|
||||
|
@ -161,6 +170,12 @@ Returns information specific to the protocol.
|
|||
"INVESTIGATORTYPEFULL": "Primary Investigator",
|
||||
"NETBADGEID": "dhf8r"
|
||||
},
|
||||
"roles":
|
||||
{
|
||||
"INVESTIGATORTYPE": "PI",
|
||||
"INVESTIGATORTYPEFULL": "Primary Investigator",
|
||||
"NETBADGEID": "dhf8r"
|
||||
},
|
||||
"details":
|
||||
{
|
||||
"IS_IND": 0,
|
||||
|
@ -198,6 +213,8 @@ Returns information specific to the protocol.
|
|||
self.add_data_to_task(task, {cmd: schema.dump(study)})
|
||||
if cmd == 'investigators':
|
||||
self.add_data_to_task(task, {cmd: StudyService().get_investigators(study_id)})
|
||||
if cmd == 'roles':
|
||||
self.add_data_to_task(task, {cmd: StudyService().get_investigators(study_id, all=True)})
|
||||
if cmd == 'details':
|
||||
self.add_data_to_task(task, {cmd: self.pb.get_study_details(study_id)})
|
||||
if cmd == 'approvals':
|
||||
|
|
|
@ -258,7 +258,7 @@ class ApprovalService(object):
|
|||
f'{approver_info.display_name} - ({approver_info.uid})'
|
||||
)
|
||||
if mail_result:
|
||||
app.logger.error(mail_result)
|
||||
app.logger.error(mail_result, exc_info=True)
|
||||
elif status == ApprovalStatus.DECLINED.value:
|
||||
ldap_service = LdapService()
|
||||
pi_user_info = ldap_service.user_info(db_approval.study.primary_investigator_id)
|
||||
|
@ -270,7 +270,7 @@ class ApprovalService(object):
|
|||
f'{approver_info.display_name} - ({approver_info.uid})'
|
||||
)
|
||||
if mail_result:
|
||||
app.logger.error(mail_result)
|
||||
app.logger.error(mail_result, exc_info=True)
|
||||
first_approval = ApprovalModel().query.filter_by(
|
||||
study_id=db_approval.study_id, workflow_id=db_approval.workflow_id,
|
||||
status=ApprovalStatus.APPROVED.value, version=db_approval.version).first()
|
||||
|
@ -286,7 +286,7 @@ class ApprovalService(object):
|
|||
f'{approver_info.display_name} - ({approver_info.uid})'
|
||||
)
|
||||
if mail_result:
|
||||
app.logger.error(mail_result)
|
||||
app.logger.error(mail_result, exc_info=True)
|
||||
# TODO: Log update action by approver_uid - maybe ?
|
||||
return db_approval
|
||||
|
||||
|
@ -357,7 +357,7 @@ class ApprovalService(object):
|
|||
f'{approver_info.display_name} - ({approver_info.uid})'
|
||||
)
|
||||
if mail_result:
|
||||
app.logger.error(mail_result)
|
||||
app.logger.error(mail_result, exc_info=True)
|
||||
# send rrp approval request for first approver
|
||||
# enhance the second part in case it bombs
|
||||
approver_email = [approver_info.email_address] if approver_info.email_address else app.config['FALLBACK_EMAILS']
|
||||
|
@ -367,7 +367,7 @@ class ApprovalService(object):
|
|||
f'{pi_user_info.display_name} - ({pi_user_info.uid})'
|
||||
)
|
||||
if mail_result:
|
||||
app.logger.error(mail_result)
|
||||
app.logger.error(mail_result, exc_info=True)
|
||||
|
||||
@staticmethod
|
||||
def _create_approval_files(workflow_data_files, approval):
|
||||
|
|
|
@ -36,6 +36,7 @@ class EmailService(object):
|
|||
|
||||
mail.send(msg)
|
||||
except Exception as e:
|
||||
app.logger.error('An exception happened in EmailService', exc_info=True)
|
||||
app.logger.error(str(e))
|
||||
|
||||
db.session.add(email_model)
|
||||
|
|
|
@ -1,7 +1,9 @@
|
|||
import logging
|
||||
import re
|
||||
from collections import OrderedDict
|
||||
|
||||
from pandas import ExcelFile
|
||||
import pandas as pd
|
||||
from pandas import ExcelFile, np
|
||||
from sqlalchemy import func, desc
|
||||
from sqlalchemy.sql.functions import GenericFunction
|
||||
|
||||
|
@ -19,8 +21,8 @@ class TSRank(GenericFunction):
|
|||
package = 'full_text'
|
||||
name = 'ts_rank'
|
||||
|
||||
class LookupService(object):
|
||||
|
||||
class LookupService(object):
|
||||
"""Provides tools for doing lookups for auto-complete fields.
|
||||
This can currently take two forms:
|
||||
1) Lookup from spreadsheet data associated with a workflow specification.
|
||||
|
@ -50,7 +52,7 @@ class LookupService(object):
|
|||
# if not, we need to rebuild the lookup table.
|
||||
is_current = False
|
||||
if lookup_model:
|
||||
is_current = db.session.query(WorkflowSpecDependencyFile).\
|
||||
is_current = db.session.query(WorkflowSpecDependencyFile). \
|
||||
filter(WorkflowSpecDependencyFile.file_data_id == lookup_model.file_data_model_id).count()
|
||||
|
||||
if not is_current:
|
||||
|
@ -62,16 +64,14 @@ class LookupService(object):
|
|||
return lookup_model
|
||||
|
||||
@staticmethod
|
||||
def lookup(workflow, field_id, query, limit):
|
||||
def lookup(workflow, field_id, query, value=None, limit=10):
|
||||
|
||||
lookup_model = LookupService.__get_lookup_model(workflow, field_id)
|
||||
|
||||
if lookup_model.is_ldap:
|
||||
return LookupService._run_ldap_query(query, limit)
|
||||
else:
|
||||
return LookupService._run_lookup_query(lookup_model, query, limit)
|
||||
|
||||
|
||||
return LookupService._run_lookup_query(lookup_model, query, value, limit)
|
||||
|
||||
@staticmethod
|
||||
def create_lookup_model(workflow_model, field_id):
|
||||
|
@ -116,8 +116,8 @@ class LookupService(object):
|
|||
is_ldap=True)
|
||||
else:
|
||||
raise ApiError("unknown_lookup_option",
|
||||
"Lookup supports using spreadsheet options or ldap options, and neither "
|
||||
"was provided.")
|
||||
"Lookup supports using spreadsheet options or ldap options, and neither "
|
||||
"was provided.")
|
||||
db.session.add(lookup_model)
|
||||
db.session.commit()
|
||||
return lookup_model
|
||||
|
@ -130,6 +130,7 @@ class LookupService(object):
|
|||
changed. """
|
||||
xls = ExcelFile(data_model.data)
|
||||
df = xls.parse(xls.sheet_names[0]) # Currently we only look at the fist sheet.
|
||||
df = pd.DataFrame(df).replace({np.nan: None})
|
||||
if value_column not in df:
|
||||
raise ApiError("invalid_emum",
|
||||
"The file %s does not contain a column named % s" % (data_model.file_model.name,
|
||||
|
@ -149,39 +150,40 @@ class LookupService(object):
|
|||
lookup_data = LookupDataModel(lookup_file_model=lookup_model,
|
||||
value=row[value_column],
|
||||
label=row[label_column],
|
||||
data=row.to_json())
|
||||
data=row.to_dict(OrderedDict))
|
||||
db.session.add(lookup_data)
|
||||
db.session.commit()
|
||||
return lookup_model
|
||||
|
||||
@staticmethod
|
||||
def _run_lookup_query(lookup_file_model, query, limit):
|
||||
def _run_lookup_query(lookup_file_model, query, value, limit):
|
||||
db_query = LookupDataModel.query.filter(LookupDataModel.lookup_file_model == lookup_file_model)
|
||||
if value is not None: # Then just find the model with that value
|
||||
db_query = db_query.filter(LookupDataModel.value == value)
|
||||
else:
|
||||
# Build a full text query that takes all the terms provided and executes each term as a prefix query, and
|
||||
# OR's those queries together. The order of the results is handled as a standard "Like" on the original
|
||||
# string which seems to work intuitively for most entries.
|
||||
query = re.sub('[^A-Za-z0-9 ]+', '', query) # Strip out non ascii characters.
|
||||
query = re.sub(r'\s+', ' ', query) # Convert multiple space like characters to just one space, as we split on spaces.
|
||||
print("Query: " + query)
|
||||
query = query.strip()
|
||||
if len(query) > 0:
|
||||
if ' ' in query:
|
||||
terms = query.split(' ')
|
||||
new_terms = ["'%s'" % query]
|
||||
for t in terms:
|
||||
new_terms.append("%s:*" % t)
|
||||
new_query = ' | '.join(new_terms)
|
||||
else:
|
||||
new_query = "%s:*" % query
|
||||
|
||||
query = re.sub('[^A-Za-z0-9 ]+', '', query)
|
||||
print("Query: " + query)
|
||||
query = query.strip()
|
||||
if len(query) > 0:
|
||||
if ' ' in query:
|
||||
terms = query.split(' ')
|
||||
new_terms = ["'%s'" % query]
|
||||
for t in terms:
|
||||
new_terms.append("%s:*" % t)
|
||||
new_query = ' | '.join(new_terms)
|
||||
else:
|
||||
new_query = "%s:*" % query
|
||||
# Run the full text query
|
||||
db_query = db_query.filter(LookupDataModel.label.match(new_query))
|
||||
# But hackishly order by like, which does a good job of
|
||||
# pulling more relevant matches to the top.
|
||||
db_query = db_query.order_by(desc(LookupDataModel.label.like("%" + query + "%")))
|
||||
|
||||
# Run the full text query
|
||||
db_query = db_query.filter(LookupDataModel.label.match(new_query))
|
||||
# But hackishly order by like, which does a good job of
|
||||
# pulling more relevant matches to the top.
|
||||
db_query = db_query.order_by(desc(LookupDataModel.label.like("%" + query + "%")))
|
||||
#ORDER BY name LIKE concat('%', ticker, '%') desc, rank DESC
|
||||
|
||||
# db_query = db_query.order_by(desc(func.full_text.ts_rank(
|
||||
# func.to_tsvector(LookupDataModel.label),
|
||||
# func.to_tsquery(query))))
|
||||
from sqlalchemy.dialects import postgresql
|
||||
logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)
|
||||
result = db_query.limit(limit).all()
|
||||
logging.getLogger('sqlalchemy.engine').setLevel(logging.ERROR)
|
||||
|
@ -196,8 +198,8 @@ class LookupService(object):
|
|||
we return a lookup data model."""
|
||||
user_list = []
|
||||
for user in users:
|
||||
user_list.append( {"value": user['uid'],
|
||||
"label": user['display_name'] + " (" + user['uid'] + ")",
|
||||
"data": user
|
||||
})
|
||||
return user_list
|
||||
user_list.append({"value": user['uid'],
|
||||
"label": user['display_name'] + " (" + user['uid'] + ")",
|
||||
"data": user
|
||||
})
|
||||
return user_list
|
||||
|
|
|
@ -137,7 +137,7 @@ class StudyService(object):
|
|||
try:
|
||||
pb_docs = ProtocolBuilderService.get_required_docs(study_id=study_id)
|
||||
except requests.exceptions.ConnectionError as ce:
|
||||
app.logger.error("Failed to connect to the Protocol Builder - %s" % str(ce))
|
||||
app.logger.error(f'Failed to connect to the Protocol Builder - {str(ce)}', exc_info=True)
|
||||
pb_docs = []
|
||||
else:
|
||||
pb_docs = []
|
||||
|
@ -182,7 +182,7 @@ class StudyService(object):
|
|||
return documents
|
||||
|
||||
@staticmethod
|
||||
def get_investigators(study_id):
|
||||
def get_investigators(study_id, all=False):
|
||||
|
||||
# Loop through all known investigator types as set in the reference file
|
||||
inv_dictionary = FileService.get_reference_data(FileService.INVESTIGATOR_LIST, 'code')
|
||||
|
@ -199,6 +199,8 @@ class StudyService(object):
|
|||
else:
|
||||
inv_dictionary[i_type]['user_id'] = None
|
||||
|
||||
if not all:
|
||||
inv_dictionary = dict(filter(lambda elem: elem[1]['user_id'] is not None, inv_dictionary.items()))
|
||||
return inv_dictionary
|
||||
|
||||
@staticmethod
|
||||
|
|
|
@ -207,8 +207,10 @@ class WorkflowService(object):
|
|||
if spiff_task:
|
||||
nav_item['task'] = WorkflowService.spiff_task_to_api_task(spiff_task, add_docs_and_forms=False)
|
||||
nav_item['title'] = nav_item['task'].title # Prefer the task title.
|
||||
|
||||
else:
|
||||
nav_item['task'] = None
|
||||
|
||||
if not 'is_decision' in nav_item:
|
||||
nav_item['is_decision'] = False
|
||||
|
||||
|
@ -255,9 +257,12 @@ class WorkflowService(object):
|
|||
if latest_event.form_data is not None:
|
||||
return latest_event.form_data
|
||||
else:
|
||||
app.logger.error("missing_form_data", "We have lost data for workflow %i, "
|
||||
"task %s, it is not in the task event model, "
|
||||
"and it should be." % (workflow_id, spiff_task.task_spec.name))
|
||||
missing_form_error = (
|
||||
f'We have lost data for workflow {workflow_id}, '
|
||||
f'task {spiff_task.task_spec.name}, it is not in the task event model, '
|
||||
f'and it should be.'
|
||||
)
|
||||
app.logger.error("missing_form_data", missing_form_error, exc_info=True)
|
||||
return {}
|
||||
else:
|
||||
return {}
|
||||
|
@ -333,10 +338,12 @@ class WorkflowService(object):
|
|||
# otherwise strip off the first word of the task, as that should be following
|
||||
# a BPMN standard, and should not be included in the display.
|
||||
if task.properties and "display_name" in task.properties:
|
||||
task.title = task.properties['display_name']
|
||||
try:
|
||||
task.title = spiff_task.workflow.script_engine.evaluate_expression(spiff_task, task.properties['display_name'])
|
||||
except Exception as e:
|
||||
app.logger.info("Failed to set title on task due to type error." + str(e))
|
||||
elif task.title and ' ' in task.title:
|
||||
task.title = task.title.partition(' ')[2]
|
||||
|
||||
return task
|
||||
|
||||
@staticmethod
|
||||
|
@ -347,7 +354,7 @@ class WorkflowService(object):
|
|||
template = Template(v)
|
||||
props[k] = template.render(**spiff_task.data)
|
||||
except jinja2.exceptions.TemplateError as ue:
|
||||
app.logger.error("Failed to process task property %s " % str(ue))
|
||||
app.logger.error(f'Failed to process task property {str(ue)}', exc_info=True)
|
||||
return props
|
||||
|
||||
@staticmethod
|
||||
|
|
|
@ -17,6 +17,9 @@
|
|||
<camunda:formData>
|
||||
<camunda:formField id="email" label="Email Address:" type="string" />
|
||||
</camunda:formData>
|
||||
<camunda:properties>
|
||||
<camunda:property name="display_name" value="investigator.label" />
|
||||
</camunda:properties>
|
||||
</bpmn:extensionElements>
|
||||
<bpmn:incoming>SequenceFlow_1p568pp</bpmn:incoming>
|
||||
<bpmn:outgoing>Flow_0ugjw69</bpmn:outgoing>
|
||||
|
|
|
@ -168,8 +168,6 @@ class TestStudyApi(BaseTest):
|
|||
num_open = 0
|
||||
|
||||
for study in json_data:
|
||||
if study['protocol_builder_status'] == 'INCOMPLETE': # One study in user_studies.json is not q_complete
|
||||
num_incomplete += 1
|
||||
if study['protocol_builder_status'] == 'ABANDONED': # One study does not exist in user_studies.json
|
||||
num_abandoned += 1
|
||||
if study['protocol_builder_status'] == 'ACTIVE': # One study is marked complete without HSR Number
|
||||
|
@ -182,8 +180,8 @@ class TestStudyApi(BaseTest):
|
|||
self.assertGreater(num_db_studies_after, num_db_studies_before)
|
||||
self.assertEqual(num_abandoned, 1)
|
||||
self.assertEqual(num_open, 1)
|
||||
self.assertEqual(num_active, 1)
|
||||
self.assertEqual(num_incomplete, 1)
|
||||
self.assertEqual(num_active, 2)
|
||||
self.assertEqual(num_incomplete, 0)
|
||||
self.assertEqual(len(json_data), num_db_studies_after)
|
||||
self.assertEqual(num_open + num_active + num_incomplete + num_abandoned, num_db_studies_after)
|
||||
|
||||
|
|
|
@ -183,7 +183,7 @@ class TestStudyService(BaseTest):
|
|||
|
||||
|
||||
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_investigators') # mock_docs
|
||||
def test_get_personnel(self, mock_docs):
|
||||
def test_get_personnel_roles(self, mock_docs):
|
||||
self.load_example_data()
|
||||
|
||||
# mock out the protocol builder
|
||||
|
@ -191,7 +191,7 @@ class TestStudyService(BaseTest):
|
|||
mock_docs.return_value = json.loads(docs_response)
|
||||
|
||||
workflow = self.create_workflow('docx') # The workflow really doesnt matter in this case.
|
||||
investigators = StudyService().get_investigators(workflow.study_id)
|
||||
investigators = StudyService().get_investigators(workflow.study_id, all=True)
|
||||
|
||||
self.assertEqual(9, len(investigators))
|
||||
|
||||
|
@ -207,3 +207,22 @@ class TestStudyService(BaseTest):
|
|||
|
||||
# No value is provided for Department Chair
|
||||
self.assertIsNone(investigators['DEPT_CH']['user_id'])
|
||||
|
||||
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_investigators') # mock_docs
|
||||
def test_get_study_personnel(self, mock_docs):
|
||||
self.load_example_data()
|
||||
|
||||
# mock out the protocol builder
|
||||
docs_response = self.protocol_builder_response('investigators.json')
|
||||
mock_docs.return_value = json.loads(docs_response)
|
||||
|
||||
workflow = self.create_workflow('docx') # The workflow really doesnt matter in this case.
|
||||
investigators = StudyService().get_investigators(workflow.study_id, all=False)
|
||||
|
||||
self.assertEqual(3, len(investigators))
|
||||
|
||||
# dhf8r is in the ldap mock data.
|
||||
self.assertEqual("dhf8r", investigators['PI']['user_id'])
|
||||
self.assertEqual("Dan Funk", investigators['PI']['display_name']) # Data from ldap
|
||||
self.assertEqual("Primary Investigator", investigators['PI']['label']) # Data from xls file.
|
||||
self.assertEqual("Always", investigators['PI']['display']) # Data from xls file.
|
||||
|
|
|
@ -61,6 +61,15 @@ class TestLookupService(BaseTest):
|
|||
lookup_data = session.query(LookupDataModel).filter(LookupDataModel.lookup_file_model == lookup_record).all()
|
||||
self.assertEqual(4, len(lookup_data))
|
||||
|
||||
def test_lookup_based_on_id(self):
|
||||
spec = BaseTest.load_test_spec('enum_options_from_file')
|
||||
workflow = self.create_workflow('enum_options_from_file')
|
||||
processor = WorkflowProcessor(workflow)
|
||||
processor.do_engine_steps()
|
||||
results = LookupService.lookup(workflow, "AllTheNames", "", value="1000", limit=10)
|
||||
self.assertEqual(1, len(results), "It is possible to find an item based on the id, rather than as a search")
|
||||
self.assertIsNotNone(results[0].data)
|
||||
self.assertIsInstance(results[0].data, dict)
|
||||
|
||||
|
||||
def test_some_full_text_queries(self):
|
||||
|
@ -114,6 +123,9 @@ class TestLookupService(BaseTest):
|
|||
results = LookupService.lookup(workflow, "AllTheNames", "1 (!-Something", limit=10)
|
||||
self.assertEqual("1 Something", results[0].label, "special characters don't flake out")
|
||||
|
||||
results = LookupService.lookup(workflow, "AllTheNames", "1 Something", limit=10)
|
||||
self.assertEqual("1 Something", results[0].label, "double spaces should not be an issue.")
|
||||
|
||||
|
||||
|
||||
# 1018 10000 Something Industry
|
||||
|
|
|
@ -322,7 +322,7 @@ class TestTasksApi(BaseTest):
|
|||
self.assertEqual(4, len(navigation)) # Start task, form_task, multi_task, end task
|
||||
self.assertEqual("UserTask", workflow.next_task.type)
|
||||
self.assertEqual(MultiInstanceType.sequential.value, workflow.next_task.multi_instance_type)
|
||||
self.assertEqual(9, workflow.next_task.multi_instance_count)
|
||||
self.assertEqual(3, workflow.next_task.multi_instance_count)
|
||||
|
||||
# Assure that the names for each task are properly updated, so they aren't all the same.
|
||||
self.assertEqual("Primary Investigator", workflow.next_task.properties['display_name'])
|
||||
|
@ -343,6 +343,51 @@ class TestTasksApi(BaseTest):
|
|||
results = json.loads(rv.get_data(as_text=True))
|
||||
self.assertEqual(5, len(results))
|
||||
|
||||
def test_lookup_endpoint_for_task_field_using_lookup_entry_id(self):
|
||||
self.load_example_data()
|
||||
workflow = self.create_workflow('enum_options_with_search')
|
||||
# get the first form in the two form workflow.
|
||||
workflow = self.get_workflow_api(workflow)
|
||||
task = workflow.next_task
|
||||
field_id = task.form['fields'][0]['id']
|
||||
rv = self.app.get('/v1.0/workflow/%i/lookup/%s?query=%s&limit=5' %
|
||||
(workflow.id, field_id, 'c'), # All records with a word that starts with 'c'
|
||||
headers=self.logged_in_headers(),
|
||||
content_type="application/json")
|
||||
self.assert_success(rv)
|
||||
results = json.loads(rv.get_data(as_text=True))
|
||||
self.assertEqual(5, len(results))
|
||||
rv = self.app.get('/v1.0/workflow/%i/lookup/%s?value=%s' %
|
||||
(workflow.id, field_id, results[0]['value']), # All records with a word that starts with 'c'
|
||||
headers=self.logged_in_headers(),
|
||||
content_type="application/json")
|
||||
results = json.loads(rv.get_data(as_text=True))
|
||||
self.assertEqual(1, len(results))
|
||||
self.assertIsInstance(results[0]['data'], dict)
|
||||
self.assertNotIn('id', results[0], "Don't include the internal id, that can be very confusing, and should not be used.")
|
||||
|
||||
def test_lookup_endpoint_also_works_for_enum(self):
|
||||
# Naming here get's a little confusing. fields can be marked as enum or autocomplete.
|
||||
# In the event of an auto-complete it's a type-ahead search field, for an enum the
|
||||
# the key/values from the spreadsheet are added directly to the form and it shows up as
|
||||
# a dropdown. This tests the case of wanting to get additional data when a user selects
|
||||
# something from a drodown.
|
||||
self.load_example_data()
|
||||
workflow = self.create_workflow('enum_options_from_file')
|
||||
# get the first form in the two form workflow.
|
||||
workflow = self.get_workflow_api(workflow)
|
||||
task = workflow.next_task
|
||||
field_id = task.form['fields'][0]['id']
|
||||
option_id = task.form['fields'][0]['options'][0]['id']
|
||||
rv = self.app.get('/v1.0/workflow/%i/lookup/%s?value=%s' %
|
||||
(workflow.id, field_id, option_id), # All records with a word that starts with 'c'
|
||||
headers=self.logged_in_headers(),
|
||||
content_type="application/json")
|
||||
self.assert_success(rv)
|
||||
results = json.loads(rv.get_data(as_text=True))
|
||||
self.assertEqual(1, len(results))
|
||||
self.assertIsInstance(results[0]['data'], dict)
|
||||
|
||||
def test_lookup_endpoint_for_task_ldap_field_lookup(self):
|
||||
self.load_example_data()
|
||||
workflow = self.create_workflow('ldap_lookup')
|
||||
|
@ -435,17 +480,23 @@ class TestTasksApi(BaseTest):
|
|||
workflow = self.create_workflow('multi_instance_parallel')
|
||||
|
||||
workflow_api = self.get_workflow_api(workflow)
|
||||
self.assertEqual(12, len(workflow_api.navigation))
|
||||
self.assertEqual(6, len(workflow_api.navigation))
|
||||
ready_items = [nav for nav in workflow_api.navigation if nav['state'] == "READY"]
|
||||
self.assertEqual(9, len(ready_items))
|
||||
self.assertEqual(3, len(ready_items))
|
||||
|
||||
self.assertEqual("UserTask", workflow_api.next_task.type)
|
||||
self.assertEqual("MultiInstanceTask",workflow_api.next_task.name)
|
||||
self.assertEqual("more information", workflow_api.next_task.title)
|
||||
self.assertEqual("Primary Investigator", workflow_api.next_task.title)
|
||||
|
||||
for i in random.sample(range(9), 9):
|
||||
for i in random.sample(range(3), 3):
|
||||
task = TaskSchema().load(ready_items[i]['task'])
|
||||
data = workflow_api.next_task.data
|
||||
rv = self.app.put('/v1.0/workflow/%i/task/%s/set_token' % (workflow.id, task.id),
|
||||
headers=self.logged_in_headers(),
|
||||
content_type="application/json")
|
||||
self.assert_success(rv)
|
||||
json_data = json.loads(rv.get_data(as_text=True))
|
||||
workflow = WorkflowApiSchema().load(json_data)
|
||||
data = workflow.next_task.data
|
||||
data['investigator']['email'] = "dhf8r@virginia.edu"
|
||||
self.complete_form(workflow, task, data)
|
||||
#tasks = self.get_workflow_api(workflow).user_tasks
|
||||
|
|
|
@ -146,6 +146,11 @@ class TestWorkflowProcessorMultiInstance(BaseTest):
|
|||
|
||||
api_task = WorkflowService.spiff_task_to_api_task(task)
|
||||
self.assertEqual(MultiInstanceType.parallel, api_task.multi_instance_type)
|
||||
|
||||
# Assure navigation picks up the label of the current element variable.
|
||||
nav = WorkflowService.processor_to_workflow_api(processor, task).navigation
|
||||
self.assertEquals("Primary Investigator", nav[2].title)
|
||||
|
||||
task.update_data({"investigator": {"email": "dhf8r@virginia.edu"}})
|
||||
processor.complete_task(task)
|
||||
processor.do_engine_steps()
|
||||
|
|
Loading…
Reference in New Issue