Merge pull request #26 from sartography/feature/customer_lookup

Allow configurators to upload xls files into a workflow for defining …
This commit is contained in:
Dan Funk 2020-04-15 12:41:47 -04:00 committed by GitHub
commit af05a97b3e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 305 additions and 135 deletions

View File

@ -4,7 +4,7 @@ import connexion
from flask import send_file
from crc import session
from crc.api.common import ApiErrorSchema, ApiError
from crc.api.common import ApiError
from crc.models.file import FileModelSchema, FileModel, FileDataModel
from crc.models.workflow import WorkflowSpecModel
from crc.services.file_service import FileService

View File

@ -10,6 +10,7 @@ from crc.models.stats import WorkflowStatsModel, TaskEventModel
from crc.models.workflow import WorkflowModel, WorkflowSpecModelSchema, WorkflowSpecModel, WorkflowSpecCategoryModel, \
WorkflowSpecCategoryModelSchema
from crc.services.workflow_processor import WorkflowProcessor
from crc.services.workflow_service import WorkflowService
def all_specifications():
@ -40,7 +41,7 @@ def validate_workflow_specification(spec_id):
errors = []
try:
WorkflowProcessor.test_spec(spec_id)
WorkflowService.test_spec(spec_id)
except ApiError as ae:
errors.append(ae)
return ApiErrorSchema(many=True).dump(errors)
@ -85,7 +86,7 @@ def delete_workflow_specification(spec_id):
def __get_workflow_api_model(processor: WorkflowProcessor, status_data=None):
spiff_tasks = processor.get_all_user_tasks()
user_tasks = list(map(Task.from_spiff, spiff_tasks))
user_tasks = list(map(WorkflowService.spiff_task_to_api_task, spiff_tasks))
is_active = True
if status_data is not None and processor.workflow_spec_id in status_data:
@ -94,7 +95,7 @@ def __get_workflow_api_model(processor: WorkflowProcessor, status_data=None):
workflow_api = WorkflowApi(
id=processor.get_workflow_id(),
status=processor.get_status(),
last_task=Task.from_spiff(processor.bpmn_workflow.last_task),
last_task=WorkflowService.spiff_task_to_api_task(processor.bpmn_workflow.last_task),
next_task=None,
user_tasks=user_tasks,
workflow_spec_id=processor.workflow_spec_id,
@ -102,7 +103,7 @@ def __get_workflow_api_model(processor: WorkflowProcessor, status_data=None):
is_latest_spec=processor.get_spec_version() == processor.get_latest_version_string(processor.workflow_spec_id),
)
if processor.next_task():
workflow_api.next_task = Task.from_spiff(processor.next_task())
workflow_api.next_task = WorkflowService.spiff_task_to_api_task(processor.next_task())
return workflow_api

View File

@ -1,15 +1,17 @@
import jinja2
import marshmallow
from jinja2 import Template
from marshmallow import INCLUDE
from marshmallow_enum import EnumField
from crc import ma
from crc.api.common import ApiError
from crc.models.workflow import WorkflowStatus
class Task(object):
ENUM_OPTIONS_FILE_PROP = "enum.options.file"
EMUM_OPTIONS_VALUE_COL_PROP = "enum.options.value.column"
EMUM_OPTIONS_LABEL_COL_PROP = "enum.options.label.column"
def __init__(self, id, name, title, type, state, form, documentation, data):
self.id = id
self.name = name
@ -20,35 +22,6 @@ class Task(object):
self.documentation = documentation
self.data = data
@classmethod
def from_spiff(cls, spiff_task):
documentation = spiff_task.task_spec.documentation if hasattr(spiff_task.task_spec, "documentation") else ""
instance = cls(spiff_task.id,
spiff_task.task_spec.name,
spiff_task.task_spec.description,
spiff_task.task_spec.__class__.__name__,
spiff_task.get_state_name(),
None,
documentation,
spiff_task.data)
if hasattr(spiff_task.task_spec, "form"):
instance.form = spiff_task.task_spec.form
if documentation != "" and documentation is not None:
instance.process_documentation(documentation)
return instance
def process_documentation(self, documentation):
'''Runs markdown documentation through the Jinja2 processor to inject data
create loops, etc...'''
try:
template = Template(documentation)
self.documentation = template.render(**self.data)
except jinja2.exceptions.TemplateError as ue:
raise ApiError(code="template_error", message="Error processing template for task %s: %s" %
(self.name, str(ue)), status_code=500)
# TODO: Catch additional errors and report back.
class OptionSchema(ma.Schema):
class Meta:

View File

@ -50,7 +50,6 @@ Takes two arguments:
"the name of the docx template to use. The second "
"argument is a code for the document, as "
"set in the reference document %s. " % FileService.IRB_PRO_CATEGORIES_FILE)
workflow_spec_model = self.find_spec_model_in_db(task.workflow)
task_study_id = task.workflow.data[WorkflowProcessor.STUDY_ID_KEY]
file_name = args[0]
@ -58,21 +57,7 @@ Takes two arguments:
raise ApiError(code="invalid_argument",
message="The given task does not match the given study.")
if workflow_spec_model is None:
raise ApiError(code="workflow_model_error",
message="Something is wrong. I can't find the workflow you are using.")
file_data_model = session.query(FileDataModel) \
.join(FileModel) \
.filter(FileModel.name == file_name) \
.filter(FileModel.workflow_spec_id == workflow_spec_model.id).first()
if file_data_model is None:
raise ApiError(code="file_missing",
message="Can not find a file called '%s' within workflow specification '%s'"
% (args[0], workflow_spec_model.id))
file_data_model = FileService.get_workflow_file_data(task.workflow, file_name)
return self.make_template(BytesIO(file_data_model.data), task.data)
@ -85,15 +70,4 @@ Takes two arguments:
target_stream.seek(0) # move to the beginning of the stream.
return target_stream
def find_spec_model_in_db(self, workflow):
""" Search for the workflow """
# When the workflow spec model is created, we record the primary process id,
# then we can look it up. As there is the potential for sub-workflows, we
# may need to travel up to locate the primary process.
spec = workflow.spec
workflow_model = session.query(WorkflowSpecModel). \
filter(WorkflowSpecModel.primary_process_id == spec.name).first()
if workflow_model is None and workflow != workflow.outer_workflow:
return self.find_spec_model_in_db(workflow.outer_workflow)
return workflow_model

View File

@ -192,3 +192,40 @@ class FileService(object):
if not file_model:
raise ApiError("file_not_found", "There is no reference file with the name '%s'" % file_name)
return FileService.get_file_data(file_model.id, file_model)
@staticmethod
def get_workflow_file_data(workflow, file_name):
"""Given a SPIFF Workflow Model, tracks down a file with the given name in the datbase and returns it's data"""
workflow_spec_model = FileService.__find_spec_model_in_db(workflow)
study_id = workflow.data[WorkflowProcessor.STUDY_ID_KEY]
if workflow_spec_model is None:
raise ApiError(code="workflow_model_error",
message="Something is wrong. I can't find the workflow you are using.")
file_data_model = session.query(FileDataModel) \
.join(FileModel) \
.filter(FileModel.name == file_name) \
.filter(FileModel.workflow_spec_id == workflow_spec_model.id).first()
if file_data_model is None:
raise ApiError(code="file_missing",
message="Can not find a file called '%s' within workflow specification '%s'"
% (file_name, workflow_spec_model.id))
return file_data_model
@staticmethod
def __find_spec_model_in_db(workflow):
""" Search for the workflow """
# When the workflow spec model is created, we record the primary process id,
# then we can look it up. As there is the potential for sub-workflows, we
# may need to travel up to locate the primary process.
spec = workflow.spec
workflow_model = session.query(WorkflowSpecModel). \
filter(WorkflowSpecModel.primary_process_id == spec.name).first()
if workflow_model is None and workflow != workflow.outer_workflow:
return FileService.__find_spec_model_in_db(workflow.outer_workflow)
return workflow_model

View File

@ -17,7 +17,6 @@ from SpiffWorkflow.specs import WorkflowSpec
from crc import session
from crc.api.common import ApiError
from crc.models.api_models import Task
from crc.models.file import FileDataModel, FileModel, FileType
from crc.models.workflow import WorkflowStatus, WorkflowModel
from crc.scripts.script import Script
@ -271,26 +270,7 @@ class WorkflowProcessor(object):
spec.description = version
return spec
@classmethod
def test_spec(cls, spec_id):
spec = WorkflowProcessor.get_spec(spec_id)
bpmn_workflow = BpmnWorkflow(spec, script_engine=cls._script_engine)
bpmn_workflow.data[WorkflowProcessor.STUDY_ID_KEY] = 1
bpmn_workflow.data[WorkflowProcessor.WORKFLOW_ID_KEY] = spec_id
bpmn_workflow.data[WorkflowProcessor.VALIDATION_PROCESS_KEY] = True
while not bpmn_workflow.is_completed():
try:
bpmn_workflow.do_engine_steps()
tasks = bpmn_workflow.get_tasks(SpiffTask.READY)
for task in tasks:
task_api = Task.from_spiff(task) # Assure we try to process the documenation, and raise those errors.
WorkflowProcessor.populate_form_with_random_data(task)
task.complete()
except WorkflowException as we:
raise ApiError.from_task_spec("workflow_execution_exception", str(we),
we.sender)
@staticmethod
def populate_form_with_random_data(task):

View File

@ -0,0 +1,112 @@
from SpiffWorkflow.bpmn.workflow import BpmnWorkflow
from pandas import ExcelFile
from crc.api.common import ApiError
from crc.models.api_models import Task
import jinja2
from jinja2 import Template
from crc.services.file_service import FileService
from crc.services.workflow_processor import WorkflowProcessor, CustomBpmnScriptEngine
from SpiffWorkflow import Task as SpiffTask, WorkflowException
class WorkflowService(object):
"""Provides tools for processing workflows and tasks. This
should at some point, be the only way to work with Workflows, and
the workflow Processor should be hidden behind this service.
This will help maintain a structure that avoids circular dependencies.
But for now, this contains tools for converting spiff-workflow models into our
own API models with additional information and capabilities."""
@classmethod
def test_spec(cls, spec_id):
"""Runs a spec through it's paces to see if it results in any errors. Not full proof, but a good
sanity check."""
spec = WorkflowProcessor.get_spec(spec_id)
bpmn_workflow = BpmnWorkflow(spec, script_engine=CustomBpmnScriptEngine())
bpmn_workflow.data[WorkflowProcessor.STUDY_ID_KEY] = 1
bpmn_workflow.data[WorkflowProcessor.WORKFLOW_ID_KEY] = spec_id
bpmn_workflow.data[WorkflowProcessor.VALIDATION_PROCESS_KEY] = True
while not bpmn_workflow.is_completed():
try:
bpmn_workflow.do_engine_steps()
tasks = bpmn_workflow.get_tasks(SpiffTask.READY)
for task in tasks:
task_api = WorkflowService.spiff_task_to_api_task(
task) # Assure we try to process the documenation, and raise those errors.
WorkflowProcessor.populate_form_with_random_data(task)
task.complete()
except WorkflowException as we:
raise ApiError.from_task_spec("workflow_execution_exception", str(we),
we.sender)
@staticmethod
def spiff_task_to_api_task(spiff_task):
documentation = spiff_task.task_spec.documentation if hasattr(spiff_task.task_spec, "documentation") else ""
task = Task(spiff_task.id,
spiff_task.task_spec.name,
spiff_task.task_spec.description,
spiff_task.task_spec.__class__.__name__,
spiff_task.get_state_name(),
None,
documentation,
spiff_task.data)
# Only process the form and documentation if this is something that is ready or completed.
if not (spiff_task._is_predicted()):
if hasattr(spiff_task.task_spec, "form"):
task.form = spiff_task.task_spec.form
for field in task.form.fields:
WorkflowService._process_options(spiff_task, field)
if documentation != "" and documentation is not None:
WorkflowService._process_documentation(task, documentation)
return task
@staticmethod
def _process_documentation(task, documentation):
"""Runs the given documentation string through the Jinja2 processor to inject data
create loops, etc..."""
try:
template = Template(documentation)
task.documentation = template.render(**task.data)
except jinja2.exceptions.TemplateError as ue:
raise ApiError(code="template_error", message="Error processing template for task %s: %s" %
(task.name, str(ue)), status_code=500)
# TODO: Catch additional errors and report back.
@staticmethod
def _process_options(spiff_task, field):
""" Checks to see if the options are provided in a separate lookup table associated with the
workflow, and populates these if possible. """
if field.has_property(Task.ENUM_OPTIONS_FILE_PROP):
if not field.has_property(Task.EMUM_OPTIONS_VALUE_COL_PROP) or \
not field.has_property(Task.EMUM_OPTIONS_LABEL_COL_PROP):
raise ApiError.from_task("invalid_emum",
"For emumerations based on an xls file, you must include 3 properties: %s, "
"%s, and %s, you supplied %s" % (Task.ENUM_OPTIONS_FILE_PROP,
Task.EMUM_OPTIONS_VALUE_COL_PROP,
Task.EMUM_OPTIONS_LABEL_COL_PROP),
task=spiff_task)
# Get the file data from the File Service
file_name = field.get_property(Task.ENUM_OPTIONS_FILE_PROP)
value_column = field.get_property(Task.EMUM_OPTIONS_VALUE_COL_PROP)
label_column = field.get_property(Task.EMUM_OPTIONS_LABEL_COL_PROP)
data_model = FileService.get_workflow_file_data(spiff_task.workflow, file_name)
xls = ExcelFile(data_model.data)
df = xls.parse(xls.sheet_names[0])
if value_column not in df:
raise ApiError("invalid_emum",
"The file %s does not contain a column named % s" % (file_name, value_column))
if label_column not in df:
raise ApiError("invalid_emum",
"The file %s does not contain a column named % s" % (file_name, label_column))
for index, row in df.iterrows():
field.options.append({"id": row[value_column],
"name": row[label_column]})

View File

@ -0,0 +1 @@
,dan,lilmaker,15.04.2020 11:05,file:///home/dan/.config/libreoffice/4;

Binary file not shown.

View File

@ -0,0 +1,49 @@
<?xml version="1.0" encoding="UTF-8"?>
<bpmn:definitions xmlns:bpmn="http://www.omg.org/spec/BPMN/20100524/MODEL" xmlns:bpmndi="http://www.omg.org/spec/BPMN/20100524/DI" xmlns:dc="http://www.omg.org/spec/DD/20100524/DC" xmlns:camunda="http://camunda.org/schema/1.0/bpmn" xmlns:di="http://www.omg.org/spec/DD/20100524/DI" id="Definitions_1v1rp1q" targetNamespace="http://bpmn.io/schema/bpmn" exporter="Camunda Modeler" exporterVersion="3.4.1">
<bpmn:process id="Process_1vu5nxl" isExecutable="true">
<bpmn:startEvent id="StartEvent_1">
<bpmn:outgoing>SequenceFlow_0lvudp8</bpmn:outgoing>
</bpmn:startEvent>
<bpmn:sequenceFlow id="SequenceFlow_0lvudp8" sourceRef="StartEvent_1" targetRef="Task_14svgcu" />
<bpmn:endEvent id="EndEvent_0q4qzl9">
<bpmn:incoming>SequenceFlow_02vev7n</bpmn:incoming>
</bpmn:endEvent>
<bpmn:sequenceFlow id="SequenceFlow_02vev7n" sourceRef="Task_14svgcu" targetRef="EndEvent_0q4qzl9" />
<bpmn:userTask id="Task_14svgcu" name="Enum Lookup Form" camunda:formKey="EnumForm">
<bpmn:extensionElements>
<camunda:formData>
<camunda:formField id="AllTheNames" label="Select a value" type="enum">
<camunda:properties>
<camunda:property id="enum.options.file" value="customer_list.xls" />
<camunda:property id="enum.options.value.column" value="CUSTOMER_NUMBER" />
<camunda:property id="enum.options.label.column" value="CUSTOMER_NAME" />
</camunda:properties>
</camunda:formField>
</camunda:formData>
</bpmn:extensionElements>
<bpmn:incoming>SequenceFlow_0lvudp8</bpmn:incoming>
<bpmn:outgoing>SequenceFlow_02vev7n</bpmn:outgoing>
</bpmn:userTask>
</bpmn:process>
<bpmndi:BPMNDiagram id="BPMNDiagram_1">
<bpmndi:BPMNPlane id="BPMNPlane_1" bpmnElement="Process_1vu5nxl">
<bpmndi:BPMNShape id="_BPMNShape_StartEvent_2" bpmnElement="StartEvent_1">
<dc:Bounds x="179" y="99" width="36" height="36" />
</bpmndi:BPMNShape>
<bpmndi:BPMNEdge id="SequenceFlow_0lvudp8_di" bpmnElement="SequenceFlow_0lvudp8">
<di:waypoint x="215" y="117" />
<di:waypoint x="270" y="117" />
</bpmndi:BPMNEdge>
<bpmndi:BPMNShape id="EndEvent_0q4qzl9_di" bpmnElement="EndEvent_0q4qzl9">
<dc:Bounds x="432" y="99" width="36" height="36" />
</bpmndi:BPMNShape>
<bpmndi:BPMNEdge id="SequenceFlow_02vev7n_di" bpmnElement="SequenceFlow_02vev7n">
<di:waypoint x="370" y="117" />
<di:waypoint x="432" y="117" />
</bpmndi:BPMNEdge>
<bpmndi:BPMNShape id="UserTask_18ly1yq_di" bpmnElement="Task_14svgcu">
<dc:Bounds x="270" y="77" width="100" height="80" />
</bpmndi:BPMNShape>
</bpmndi:BPMNPlane>
</bpmndi:BPMNDiagram>
</bpmn:definitions>

View File

@ -41,9 +41,14 @@ class TestStudyApi(BaseTest):
study = session.query(StudyModel).first()
self.assertIsNotNone(study)
def test_get_study(self):
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_required_docs') # mock_docs
def test_get_study(self, mock_docs):
"""Generic test, but pretty detailed, in that the study should return a categorized list of workflows
This starts with out loading the example data, to show that all the bases are covered from ground 0."""
docs_response = self.protocol_builder_response('required_docs.json')
mock_docs.return_value = json.loads(docs_response)
new_study = self.add_test_study()
new_study = session.query(StudyModel).filter_by(id=new_study["id"]).first()
# Add a category
@ -109,9 +114,10 @@ class TestStudyApi(BaseTest):
self.assertEqual(study.title, json_data['title'])
self.assertEqual(study.protocol_builder_status.name, json_data['protocol_builder_status'])
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_required_docs') # mock_docs
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_study_details') # mock_details
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_studies') # mock_studies
def test_get_all_studies(self, mock_studies, mock_details):
def test_get_all_studies(self, mock_studies, mock_details, mock_docs):
self.load_example_data()
s = StudyModel(
id=54321, # This matches one of the ids from the study_details_json data.
@ -128,6 +134,8 @@ class TestStudyApi(BaseTest):
mock_studies.return_value = ProtocolBuilderStudySchema(many=True).loads(studies_response)
details_response = self.protocol_builder_response('study_details.json')
mock_details.return_value = json.loads(details_response)
docs_response = self.protocol_builder_response('required_docs.json')
mock_docs.return_value = json.loads(docs_response)
# Make the api call to get all studies
api_response = self.app.get('/v1.0/study', headers=self.logged_in_headers(), content_type="application/json")
@ -155,7 +163,12 @@ class TestStudyApi(BaseTest):
test_study = session.query(StudyModel).filter_by(id=54321).first()
self.assertFalse(test_study.inactive)
def test_get_single_study(self):
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_required_docs') # mock_docs
def test_get_single_study(self, mock_docs):
docs_response = self.protocol_builder_response('required_docs.json')
mock_docs.return_value = json.loads(docs_response)
self.load_example_data()
study = session.query(StudyModel).first()
rv = self.app.get('/v1.0/study/%i' % study.id,

View File

@ -1,3 +1,6 @@
import json
from unittest.mock import patch
from crc import db
from crc.models.protocol_builder import ProtocolBuilderStatus
from crc.models.study import StudyModel
@ -13,10 +16,13 @@ from tests.base_test import BaseTest
class TestStudyService(BaseTest):
"""Largely tested via the test_study_api, and time is tight, but adding new tests here."""
def test_total_tasks_updated(self):
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_required_docs') # mock_docs
def test_total_tasks_updated(self, mock_docs):
"""Assure that as a users progress is available when getting a list of studies for that user."""
docs_response = self.protocol_builder_response('required_docs.json')
mock_docs.return_value = json.loads(docs_response)
# Assure some basic models are in place, This is a damn mess. Our database models need an overhaul to make
# this easier - better relationship modeling is now critical.
self.load_test_spec("top_level_workflow", master_spec=True)

View File

@ -2,18 +2,15 @@ import json
import os
from crc import session, app
from crc.models.api_models import WorkflowApiSchema, Task
from crc.models.api_models import WorkflowApiSchema
from crc.models.file import FileModelSchema
from crc.models.stats import WorkflowStatsModel, TaskEventModel
from crc.models.study import StudyModel
from crc.models.workflow import WorkflowSpecModelSchema, WorkflowModel, WorkflowStatus
from crc.services.workflow_processor import WorkflowProcessor
from crc.models.workflow import WorkflowStatus
from tests.base_test import BaseTest
class TestTasksApi(BaseTest):
def get_workflow_api(self, workflow, soft_reset=False, hard_reset=False):
rv = self.app.get('/v1.0/workflow/%i?soft_reset=%s&hard_reset=%s' %
(workflow.id, str(soft_reset), str(hard_reset)),
@ -158,47 +155,6 @@ class TestTasksApi(BaseTest):
files = FileModelSchema(many=True).load(json_data, session=session)
self.assertTrue(len(files) == 1)
def test_documentation_processing_handles_replacements(self):
docs = "Some simple docs"
task = Task(1, "bill", "bill", "", "started", {}, docs, {})
task.process_documentation(docs)
self.assertEqual(docs, task.documentation)
task.data = {"replace_me": "new_thing"}
task.process_documentation("{{replace_me}}")
self.assertEqual("new_thing", task.documentation)
documentation = """
# Bigger Test
* bullet one
* bullet two has {{replace_me}}
# other stuff.
"""
expected = """
# Bigger Test
* bullet one
* bullet two has new_thing
# other stuff.
"""
task.process_documentation(documentation)
self.assertEqual(expected, task.documentation)
def test_documentation_processing_handles_conditionals(self):
docs = "This test {% if works == 'yes' %}works{% endif %}"
task = Task(1, "bill", "bill", "", "started", {}, docs, {})
task.process_documentation(docs)
self.assertEqual("This test ", task.documentation)
task.data = {"works": 'yes'}
task.process_documentation(docs)
self.assertEqual("This test works", task.documentation)
def test_get_documentation_populated_in_end(self):
self.load_example_data()
workflow = self.create_workflow('random_fact')

View File

@ -0,0 +1,68 @@
import json
import os
from crc import session, app
from crc.models.api_models import WorkflowApiSchema, Task
from crc.models.file import FileModelSchema
from crc.models.stats import WorkflowStatsModel, TaskEventModel
from crc.models.study import StudyModel
from crc.models.workflow import WorkflowSpecModelSchema, WorkflowModel, WorkflowStatus
from crc.services.workflow_processor import WorkflowProcessor
from crc.services.workflow_service import WorkflowService
from tests.base_test import BaseTest
class TestWorkflowService(BaseTest):
def test_documentation_processing_handles_replacements(self):
docs = "Some simple docs"
task = Task(1, "bill", "bill", "", "started", {}, docs, {})
WorkflowService._process_documentation(task, docs)
self.assertEqual(docs, task.documentation)
task.data = {"replace_me": "new_thing"}
WorkflowService._process_documentation(task, "{{replace_me}}")
self.assertEqual("new_thing", task.documentation)
documentation = """
# Bigger Test
* bullet one
* bullet two has {{replace_me}}
# other stuff.
"""
expected = """
# Bigger Test
* bullet one
* bullet two has new_thing
# other stuff.
"""
WorkflowService._process_documentation(task,(documentation))
self.assertEqual(expected, task.documentation)
def test_documentation_processing_handles_conditionals(self):
docs = "This test {% if works == 'yes' %}works{% endif %}"
task = Task(1, "bill", "bill", "", "started", {}, docs, {})
WorkflowService._process_documentation(task, docs)
self.assertEqual("This test ", task.documentation)
task.data = {"works": 'yes'}
WorkflowService._process_documentation(task, docs)
self.assertEqual("This test works", task.documentation)
def test_enum_options_from_file(self):
self.load_example_data()
workflow = self.create_workflow('enum_options_from_file')
processor = WorkflowProcessor(workflow)
processor.do_engine_steps()
task = processor.next_task()
WorkflowService._process_options(task, task.task_spec.form.fields[0])
options = task.task_spec.form.fields[0].options
self.assertEquals(19, len(options))
self.assertEquals(1000, options[0]['id'])
self.assertEquals("UVA - INTERNAL - GM USE ONLY", options[0]['name'])