Removed total_tasks and completed_tasks - as they are no longer used by the front end, and were dubious as hell to begin with.

Upraded SpiffWorkflow and now use th new get_subprocess_specs
updated the calculate_stats in the workflow processor - as the serialization had changed drastically, and needed to debug some performance issues.
Added a get_navigation method that will calcuate a basic navigation list MUCH faster than using the get_flat_nav_list in Spiffworkflows Navigation object.
Modified a hellton of tests because we don't have total_task and completed_task counts, or a complex nested navigation list anymore.
This commit is contained in:
Dan 2022-07-20 12:10:23 -04:00
parent 706a7a5859
commit 4305b36b74
13 changed files with 128 additions and 154 deletions

View File

@ -52,7 +52,7 @@ dateparser = "*"
# pipenv install --editable ../SpiffWorkflow (but fix things back before commiting!)
# Merged Commit https://github.com/sartography/SpiffWorkflow/pull/178 broke usage of SpiffWorkflow
# References to task states will need to be updated to allow using newest version
spiffworkflow = {git = "https://github.com/sartography/SpiffWorkflow"}
spiffworkflow = {editable = true, path = "./../SpiffWorkflow"}
[requires]
python_version = "3.9"

87
Pipfile.lock generated
View File

@ -1,7 +1,7 @@
{
"_meta": {
"hash": {
"sha256": "c2fd582e7167dd91efba5115ea49717313e8bc5cf0589e5469b6e641326cfc8b"
"sha256": "a15f79f021bb6153794e84699caf6a3e52e098ece570b1c09dea83d85a359e02"
},
"pipfile-spec": 6,
"requires": {
@ -36,7 +36,7 @@
"sha256:2c1b13fecc0893e946c65cbd5f36427861cffa4ea2201d8f6fca22e2a373b5e2",
"sha256:6f0956d2c23d8fa6e7691934d8c3930eadb44972cbbd1a7ae3a520f735d43359"
],
"markers": "python_version >= '3.6'",
"markers": "python_full_version >= '3.6.0'",
"version": "==5.1.1"
},
"aniso8601": {
@ -67,7 +67,7 @@
"sha256:7614553711ee97490f732126dc077f8d0ae084ebc6a96e23db1482afabdb2c51",
"sha256:ff56f4892c1c4bf0d814575ea23471c230d544203c7748e8c68f0089478d48eb"
],
"markers": "python_version >= '3.6'",
"markers": "python_full_version >= '3.6.0'",
"version": "==2.10.3"
},
"bcrypt": {
@ -84,7 +84,7 @@
"sha256:a2c46100e315c3a5b90fdc53e429c006c5f962529bc27e1dfd656292c20ccc40",
"sha256:cd43303d6b8a165c29ec6756afd169faba9396a9472cdff753fe9f19b96ce2fa"
],
"markers": "python_version >= '3.6'",
"markers": "python_full_version >= '3.6.0'",
"version": "==3.2.2"
},
"beautifulsoup4": {
@ -92,7 +92,7 @@
"sha256:58d5c3d29f5a36ffeb94f02f0d786cd53014cf9b3b3951d42e0080d8a9498d30",
"sha256:ad9aa55b65ef2808eb405f46cf74df7fcb7044d5cbc26487f96eb2ef2e436693"
],
"markers": "python_version >= '3.6'",
"markers": "python_full_version >= '3.6.0'",
"version": "==4.11.1"
},
"billiard": {
@ -104,9 +104,10 @@
},
"blinker": {
"hashes": [
"sha256:471aee25f3992bd325afa3772f1063dbdbbca947a041b8b89466dc00d606f8b6"
"sha256:1eb563df6fdbc39eeddc177d953203f99f097e9bf0e2b8f9f3cf18b6ca425e36",
"sha256:923e5e2f69c155f2cc42dafbbd70e16e3fde24d2d4aa2ab72fbe386238892462"
],
"version": "==1.4"
"version": "==1.5"
},
"celery": {
"hashes": [
@ -121,7 +122,7 @@
"sha256:84c85a9078b11105f04f3036a9482ae10e4621616db313fe045dd24743a0820d",
"sha256:fe86415d55e84719d75f8b69414f6438ac3547d2078ab91b67e779ef69378412"
],
"markers": "python_version >= '3.6'",
"markers": "python_full_version >= '3.6.0'",
"version": "==2022.6.15"
},
"cffi": {
@ -198,7 +199,7 @@
"sha256:5189b6f22b01957427f35b6a08d9a0bc45b46d3788ef5a92e978433c7a35f8a5",
"sha256:575e708016ff3a5e3681541cb9d79312c416835686d054a23accb873b254f413"
],
"markers": "python_version >= '3.6'",
"markers": "python_full_version >= '3.6.0'",
"version": "==2.1.0"
},
"click": {
@ -206,7 +207,7 @@
"sha256:7682dc8afb30297001674575ea00d1814d808d6a36af415a82bd481d37ba7b8e",
"sha256:bb4d8133cb15a609f44e8213d9b391b0809795062913b383c62be0ee95b1db48"
],
"markers": "python_version >= '3.7'",
"markers": "python_full_version >= '3.7.0'",
"version": "==8.1.3"
},
"click-didyoumean": {
@ -214,7 +215,7 @@
"sha256:a0713dc7a1de3f06bc0df5a9567ad19ead2d3d5689b434768a6145bff77c0667",
"sha256:f184f0d851d96b6d29297354ed981b7dd71df7ff500d82fa6d11f0856bee8035"
],
"markers": "python_full_version >= '3.6.2' and python_full_version < '4.0.0'",
"markers": "python_version < '4' and python_full_version >= '3.6.2'",
"version": "==0.3.0"
},
"click-plugins": {
@ -250,7 +251,7 @@
"sha256:1b35798fdf1713f1c3139016cfcbc461f09edbf099d1fb658d4b7479fcaa3daa",
"sha256:e8b39238fb6f0153a069aa253d349467c3c4737934f253ef6abac5fe0eca1e5d"
],
"markers": "python_version >= '3.6'",
"markers": "python_full_version >= '3.6.0'",
"version": "==5.2.0"
},
"connexion": {
@ -354,7 +355,7 @@
"sha256:8eb9e2bc2f8c97e37a2dc85a09ecdcdec9d8a396530a6d5a33b30b9a92da0c5c",
"sha256:a2ba85d1d6a74ef63837eed693bcb89c3f752169b0e3e7ae5b16ca5e1b3deada"
],
"markers": "python_version >= '3.6'",
"markers": "python_full_version >= '3.6.0'",
"version": "==1.1.0"
},
"flask": {
@ -432,7 +433,7 @@
"sha256:8033ad4e853066ba6ca92050b9df2f89301b8fc8bf7e9324d412a63f8bf1a8fd",
"sha256:bac2fd45c0a1c9cf619e63a90d62bdc63892ef92387424b855792a6cabe789aa"
],
"markers": "python_version >= '3.6'",
"markers": "python_full_version >= '3.6.0'",
"version": "==4.0.9"
},
"gitpython": {
@ -556,7 +557,7 @@
"sha256:2c2349112351b88699d8d4b6b075022c0808887cb7ad10069318a8b0bc88db44",
"sha256:5dbbc68b317e5e42f327f9021763545dc3fc3bfe22e6deb96aaf1fc38874156a"
],
"markers": "python_version >= '3.7'",
"markers": "python_full_version >= '3.7.0'",
"version": "==2.1.2"
},
"jinja2": {
@ -564,7 +565,7 @@
"sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852",
"sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61"
],
"markers": "python_version >= '3.7'",
"markers": "python_full_version >= '3.7.0'",
"version": "==3.1.2"
},
"jsonschema": {
@ -572,7 +573,7 @@
"sha256:73764f461d61eb97a057c929368610a134d1d1fffd858acfe88864ee94f1f1d3",
"sha256:c7448a421b25e424fccfceea86b4e3a8672b4436e1988ccbde92c80828d4f085"
],
"markers": "python_version >= '3.7'",
"markers": "python_full_version >= '3.7.0'",
"version": "==4.7.2"
},
"kombu": {
@ -580,7 +581,7 @@
"sha256:37cee3ee725f94ea8bb173eaab7c1760203ea53bbebae226328600f9d2799610",
"sha256:8b213b24293d3417bcf0d2f5537b7f756079e3ea232a8386dcc89a59fd2361a4"
],
"markers": "python_version >= '3.7'",
"markers": "python_full_version >= '3.7.0'",
"version": "==5.2.4"
},
"ldap3": {
@ -675,16 +676,16 @@
"sha256:df3921c3081b013c8a2d5ff03c18375651684921ae83fd12e64800b7da923257",
"sha256:f054a5ff4743492f1aa9ecc47172cb33b42b9d993cffcc146c9de17e717b0307"
],
"markers": "python_version >= '3.7'",
"markers": "python_full_version >= '3.7.0'",
"version": "==1.2.1"
},
"markdown": {
"hashes": [
"sha256:cbb516f16218e643d8e0a95b309f77eb118cb138d39a4f27851e6a63581db874",
"sha256:f5da449a6e1c989a4cea2631aa8ee67caa5a2ef855d551c88f9e309f4634c621"
"sha256:08fb8465cffd03d10b9dd34a5c3fea908e20391a2a90b88d66362cb05beed186",
"sha256:3b809086bb6efad416156e00a0da66fe47618a5d6918dd688f53f40c8e4cfeff"
],
"index": "pypi",
"version": "==3.3.7"
"version": "==3.4.1"
},
"markupsafe": {
"hashes": [
@ -729,7 +730,7 @@
"sha256:f121a1420d4e173a5d96e47e9a0c0dcff965afdf1626d28de1460815f7c4ee7a",
"sha256:fc7b548b17d238737688817ab67deebb30e8073c95749d55538ed473130ec0c7"
],
"markers": "python_version >= '3.7'",
"markers": "python_full_version >= '3.7.0'",
"version": "==2.1.1"
},
"marshmallow": {
@ -797,7 +798,7 @@
"sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb",
"sha256:ef103e05f519cdc783ae24ea4e2e0f508a9c99b2d4969652eed6a2e1ea5bd522"
],
"markers": "python_version >= '3.6'",
"markers": "python_full_version >= '3.6.0'",
"version": "==21.3"
},
"pandas": {
@ -936,7 +937,7 @@
"sha256:5eb116118f9612ff1ee89ac96437bb6b49e8f04d8a13b514ba26f620208e26eb",
"sha256:dc9c10fb40944260f6ed4c688ece0cd2048414940f1cea51b8b226318411c519"
],
"markers": "python_version >= '3.6'",
"markers": "python_full_version >= '3.6.0'",
"version": "==2.12.0"
},
"pyjwt": {
@ -960,7 +961,7 @@
"sha256:a422368fc821589c228f4c49438a368831cb5bbc0eab5ebe1d7fac9dded6567b",
"sha256:e46dae94e34b085175f8abb3b0aaa7da40767865ac82c928eeb9e57e1ea8a543"
],
"markers": "python_version >= '3.6'",
"markers": "python_full_version >= '3.6.0'",
"version": "==1.5.0"
},
"pyparsing": {
@ -995,7 +996,7 @@
"sha256:f87cc2863ef33c709e237d4b5f4502a62a00fab450c9e020892e8e2ede5847f5",
"sha256:fd8da6d0124efa2f67d86fa70c851022f87c98e205f0594e1fae044e7119a5a6"
],
"markers": "python_version >= '3.7'",
"markers": "python_full_version >= '3.7.0'",
"version": "==0.18.1"
},
"python-dateutil": {
@ -1063,7 +1064,7 @@
"sha256:e61ceaab6f49fb8bdfaa0f92c4b57bcfbea54c09277b1b4f7ac376bfb7a7c174",
"sha256:f84fbc98b019fef2ee9a1cb3ce93e3187a6df0b2538a651bfb890254ba9f90b5"
],
"markers": "python_version >= '3.6'",
"markers": "python_full_version >= '3.6.0'",
"version": "==6.0"
},
"recommonmark": {
@ -1151,7 +1152,7 @@
"sha256:fbc88d3ba402b5d041d204ec2449c4078898f89c4a6e6f0ed1c1a510ef1e221d",
"sha256:fbd3fe37353c62fd0eb19fb76f78aa693716262bcd5f9c14bb9e5aca4b3f0dc4"
],
"markers": "python_version >= '3.6'",
"markers": "python_full_version >= '3.6.0'",
"version": "==2022.3.2"
},
"requests": {
@ -1178,7 +1179,7 @@
"sha256:0d33c374d41c7863419fc8f6c10bfe25b7b498aa34164d135c622e52580c6b16",
"sha256:c04b44a57a6265fe34a4a444e965884716d34bae963119a76353434d6f18e450"
],
"markers": "python_version >= '3.7'",
"markers": "python_full_version >= '3.7.0'",
"version": "==63.2.0"
},
"six": {
@ -1194,7 +1195,7 @@
"sha256:2aba19d6a040e78d8b09de5c57e96207b09ed71d8e55ce0959eeee6c8e190d94",
"sha256:c840e62059cd3be204b0c9c9f74be2c09d5648eddd4580d9314c3ecde0b30936"
],
"markers": "python_version >= '3.6'",
"markers": "python_full_version >= '3.6.0'",
"version": "==5.0.0"
},
"snowballstemmer": {
@ -1209,7 +1210,7 @@
"sha256:3b2503d3c7084a42b1ebd08116e5f81aadfaea95863628c80a3b774a11b7c759",
"sha256:fc53893b3da2c33de295667a0e19f078c14bf86544af307354de5fcf12a3f30d"
],
"markers": "python_version >= '3.6'",
"markers": "python_full_version >= '3.6.0'",
"version": "==2.3.2.post1"
},
"sphinx": {
@ -1241,7 +1242,7 @@
"sha256:d412243dfb797ae3ec2b59eca0e52dac12e75a241bf0e4eb861e450d06c6ed07",
"sha256:f5f8bb2d0d629f398bf47d0d69c07bc13b65f75a81ad9e2f71a63d4b7a2f6db2"
],
"markers": "python_version >= '3.6'",
"markers": "python_full_version >= '3.6.0'",
"version": "==2.0.0"
},
"sphinxcontrib-jsmath": {
@ -1269,8 +1270,8 @@
"version": "==1.1.5"
},
"spiffworkflow": {
"git": "https://github.com/sartography/SpiffWorkflow",
"ref": "888326a0419fb0fd1f96a2bd8a9f9c3894f2dbdc"
"editable": true,
"path": "./../SpiffWorkflow"
},
"sqlalchemy": {
"hashes": [
@ -1327,7 +1328,7 @@
"sha256:238e70234214138ed7b4e8a0fab0e5e13872edab3be586ab8198c407620e2ab9",
"sha256:8b536a8ec63dc0751342b3984193a3118f8fca2afe25752bb9b7fffd398552d3"
],
"markers": "python_version >= '3.6'",
"markers": "python_full_version >= '3.6.0'",
"version": "==2022.1"
},
"tzlocal": {
@ -1335,7 +1336,7 @@
"sha256:89885494684c929d9191c57aa27502afc87a579be5cdd3225c77c463ea043745",
"sha256:ee5842fa3a795f023514ac2d801c4a81d1743bbe642e3940143326b3a00addd7"
],
"markers": "python_version >= '3.6'",
"markers": "python_full_version >= '3.6.0'",
"version": "==4.2"
},
"urllib3": {
@ -1343,7 +1344,7 @@
"sha256:8298d6d56d39be0e3bc13c1c97d133f9b45d797169a0e11cdd0e0489d786f7ec",
"sha256:879ba4d1e89654d9769ce13121e0f94310ea32e8d2f8cf587b77c08bbcdb30d6"
],
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5' and python_full_version < '4.0.0'",
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5' and python_version < '4'",
"version": "==1.26.10"
},
"vine": {
@ -1351,7 +1352,7 @@
"sha256:4c9dceab6f76ed92105027c49c823800dd33cacce13bdedc5b914e3514b7fb30",
"sha256:7d3b1624a953da82ef63462013bbd271d3eb75751489f9807598e8f340bd637e"
],
"markers": "python_version >= '3.6'",
"markers": "python_full_version >= '3.6.0'",
"version": "==5.0.0"
},
"waitress": {
@ -1359,7 +1360,7 @@
"sha256:7500c9625927c8ec60f54377d590f67b30c8e70ef4b8894214ac6e4cad233d2a",
"sha256:780a4082c5fbc0fde6a2fcfe5e26e6efc1e8f425730863c04085769781f51eba"
],
"markers": "python_version >= '3.7'",
"markers": "python_full_version >= '3.7.0'",
"version": "==2.1.2"
},
"wcwidth": {
@ -1468,7 +1469,7 @@
"sha256:6b351bbb12dd58af57ffef05bc78425d08d1914e0fd68ee14143b7ade023c5bc",
"sha256:837f2f0e0ca79481b92884962b914eba4e72b7a2daaf1f939c890ed0124b834b"
],
"markers": "python_version >= '3.7'",
"markers": "python_full_version >= '3.7.0'",
"version": "==3.0.1"
},
"xlrd": {
@ -1492,7 +1493,7 @@
"sha256:05b45f1ee8f807d0cc928485ca40a07cb491cf092ff587c0df9cb1fd154848d2",
"sha256:47c40d7fe183a6f21403a199b3e4192cca5774656965b0a4988ad2f8feb5f009"
],
"markers": "python_version >= '3.7'",
"markers": "python_full_version >= '3.7.0'",
"version": "==3.8.1"
}
},
@ -1564,7 +1565,7 @@
"sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb",
"sha256:ef103e05f519cdc783ae24ea4e2e0f508a9c99b2d4969652eed6a2e1ea5bd522"
],
"markers": "python_version >= '3.6'",
"markers": "python_full_version >= '3.6.0'",
"version": "==21.3"
},
"pbr": {

View File

@ -212,15 +212,13 @@ class DocumentDirectory(object):
class WorkflowApi(object):
def __init__(self, id, status, next_task, navigation,
workflow_spec_id, total_tasks, completed_tasks,
workflow_spec_id,
last_updated, is_review, title, study_id, state, is_admin_workflow=False):
self.id = id
self.status = status
self.next_task = next_task # The next task that requires user input.
self.navigation = navigation
self.workflow_spec_id = workflow_spec_id
self.total_tasks = total_tasks
self.completed_tasks = completed_tasks
self.last_updated = last_updated
self.title = title
self.is_review = is_review
@ -245,7 +243,7 @@ class WorkflowApiSchema(ma.Schema):
@marshmallow.post_load
def make_workflow(self, data, **kwargs):
keys = ['id', 'status', 'next_task', 'navigation',
'workflow_spec_id', "total_tasks", "completed_tasks",
'workflow_spec_id',
"last_updated", "is_review", "title", "study_id", "state", "is_admin_workflow"]
filtered_fields = {key: data[key] for key in keys}
filtered_fields['next_task'] = TaskSchema().make_task(data['next_task'])

View File

@ -169,8 +169,6 @@ class WorkflowMetadata(object):
state=workflow.state or WorkflowState.optional.value,
state_message=workflow.state_message,
status=workflow.status,
total_tasks=workflow.total_tasks,
completed_tasks=workflow.completed_tasks,
is_review=spec.is_review,
display_order=spec.display_order,
workflow_spec_id=workflow.workflow_spec_id

View File

@ -114,8 +114,6 @@ class WorkflowModel(db.Model):
study_id = db.Column(db.Integer, db.ForeignKey('study.id'))
study = db.relationship("StudyModel", backref='workflow', lazy='select')
workflow_spec_id = db.Column(db.String)
total_tasks = db.Column(db.Integer, default=0)
completed_tasks = db.Column(db.Integer, default=0)
last_updated = db.Column(db.DateTime(timezone=True), server_default=func.now())
user_id = db.Column(db.String, default=None)
state = db.Column(db.String, nullable=True)

View File

@ -121,7 +121,7 @@ class WorkflowProcessor(object):
try:
parser = self.get_spec_parser(self.spec_files, spec_info)
top_level = parser.get_spec(spec_info.primary_process_id)
subprocesses = parser.get_process_specs()
subprocesses = parser.get_subprocess_specs(spec_info.primary_process_id)
self.bpmn_workflow = BpmnWorkflow(top_level, subprocesses, script_engine=self._script_engine)
self.bpmn_workflow.data[WorkflowProcessor.STUDY_ID_KEY] = workflow_model.study_id
self.bpmn_workflow.data[WorkflowProcessor.VALIDATION_PROCESS_KEY] = validate_only
@ -152,31 +152,21 @@ class WorkflowProcessor(object):
json_size = B / MB
if json_size > 1:
wf_json = json.loads(workflow_model.bpmn_workflow_json)
if 'spec' in wf_json and 'tasks' in wf_json: #
task_tree = wf_json['tasks']
test_spec = wf_json['spec']
task_size = "{:.2f}".format(len(json.dumps(task_tree).encode('utf-8')) / MB)
spec_size = "{:.2f}".format(len(json.dumps(test_spec).encode('utf-8')) / MB)
message = 'Workflow ' + workflow_model.workflow_spec_id + ' JSON Size is over 1MB:{0:.2f} MB'.format(
json_size)
message += f"\n Task Size: {task_size}"
message += f"\n Spec Size: {spec_size}"
app.logger.warning(message)
message = 'Workflow ' + workflow_model.workflow_spec_id + ' JSON Size is over 1MB:{0:.2f} MB'.format(
json_size)
for k,v in wf_json.items():
size = len(json.dumps(v).encode('utf-8')) / MB
if size > 0.2:
size_str = "{:.2f}".format(size)
message += f"\n {k} Size: {size_str}"
sub_size = len(json.dumps(wf_json['subprocesses']).encode('utf-8')) / MB
message += f"\n TOTAL Subprocesses: { len(wf_json['subprocesses']) }"
for sp in wf_json['subprocesses'].values():
sp_size = len(json.dumps(sp).encode('utf-8')) / MB
message += f"\n {list(sp['tasks'].values())[0]['workflow_name']} :: {sp_size}"
def check_sub_specs(test_spec, indent=0, show_all=False):
for my_spec_name in test_spec['task_specs']:
my_spec = test_spec['task_specs'][my_spec_name]
my_spec_size = len(json.dumps(my_spec).encode('utf-8')) / MB
if my_spec_size > 0.1 or show_all:
app.logger.warning(
(' ' * indent) + 'Sub-Spec ' + my_spec['name'] + ' :' + "{:.2f}".format(my_spec_size))
if 'spec' in my_spec:
my_show_all = False
if my_spec['name'] == 'Call_Emails_Process_Email':
my_show_all = True
check_sub_specs(my_spec['spec'], indent + 5)
check_sub_specs(test_spec, 5)
app.logger.warning(message)
@staticmethod
def reset(workflow_model, clear_data=False):
@ -261,7 +251,7 @@ class WorkflowProcessor(object):
parser = WorkflowProcessor.get_spec_parser(spec_files, spec_model)
try:
top_level = parser.get_spec(spec_model.primary_process_id)
subprocesses = parser.get_process_specs()
subprocesses = parser.get_subprocess_specs(spec_model.primary_process_id)
bpmn_workflow = BpmnWorkflow(top_level, subprocesses, script_engine=WorkflowProcessor._script_engine)
bpmn_workflow.data[WorkflowProcessor.STUDY_ID_KEY] = study.id
bpmn_workflow.data[WorkflowProcessor.VALIDATION_PROCESS_KEY] = False

View File

@ -126,7 +126,7 @@ class WorkflowService(object):
@staticmethod
def get_erroring_workflows():
workflows = session.query(WorkflowModel).filter(WorkflowModel.status==WorkflowStatus.erroring).all()
workflows = session.query(WorkflowModel).filter(WorkflowModel.status == WorkflowStatus.erroring).all()
return workflows
@staticmethod
@ -181,8 +181,6 @@ class WorkflowService(object):
count = 0
while not processor.bpmn_workflow.is_completed():
processor.bpmn_workflow.get_deep_nav_list() # Assure no errors with navigation.
exit_task = processor.bpmn_workflow.do_engine_steps(exit_at=test_until)
if (exit_task != None):
raise ApiError.from_task("validation_break",
@ -197,7 +195,8 @@ class WorkflowService(object):
f" current task data must have information mapping this role to "
f" a unique user id.", task)
if task.task_spec.lane is not None:
if isinstance(task.data[task.task_spec.lane], str) and not LdapService().user_exists(task.data[task.task_spec.lane]):
if isinstance(task.data[task.task_spec.lane], str) and not LdapService().user_exists(
task.data[task.task_spec.lane]):
raise ApiError.from_task("missing_user",
f"The user '{task.data[task.task_spec.lane]}' "
f"could not be found in LDAP. ", task)
@ -215,9 +214,9 @@ class WorkflowService(object):
if hasattr(task_api, 'form') and task_api.form is not None:
if task_api.form.key == '':
raise ApiError(code='missing_form_key',
message='Forms must include a Form Key.',
task_id=task.id,
task_name=task.get_name())
message='Forms must include a Form Key.',
task_id=task.id,
task_name=task.get_name())
WorkflowService.populate_form_with_random_data(task, task_api, required_only)
if not WorkflowService.validate_form(task, task_api):
# In the process of completing the form, it is possible for fields to become required
@ -247,7 +246,8 @@ class WorkflowService(object):
raise
except Exception as e:
# Catch generic exceptions so that the finally clause always executes
app.logger.error(f'Unexpected exception caught during validation. Original exception: {str(e)}', exc_info=True)
app.logger.error(f'Unexpected exception caught during validation. Original exception: {str(e)}',
exc_info=True)
raise ApiError(code='unknown_exception',
message=f'We caught an unexpected exception during validation. Original exception is: {str(e)}')
finally:
@ -335,8 +335,9 @@ class WorkflowService(object):
try:
form_data[field.id] = WorkflowService.get_default_value(field, task, data)
except Exception as e:
raise ApiError.from_task("bad default value", f'The default value "{field.default_value}" in field {field.id} '
f'could not be understood or evaluated. ',
raise ApiError.from_task("bad default value",
f'The default value "{field.default_value}" in field {field.id} '
f'could not be understood or evaluated. ',
task=task)
# If we have a good default value, and we aren't dealing with a repeat, we can stop here.
if form_data[field.id] is not None and not field.has_property(Task.FIELD_PROP_REPEAT):
@ -374,7 +375,8 @@ class WorkflowService(object):
f'for repeat and group expressions that is not also used for a field name.'
, task=task)
if field.has_property(Task.FIELD_PROP_REPEAT_HIDE_EXPRESSION):
result = WorkflowService.evaluate_property(Task.FIELD_PROP_REPEAT_HIDE_EXPRESSION, field, task, form_data)
result = WorkflowService.evaluate_property(Task.FIELD_PROP_REPEAT_HIDE_EXPRESSION, field, task,
form_data)
if not result:
hide_groups.append(group)
if group not in form_data and group not in hide_groups:
@ -554,7 +556,7 @@ class WorkflowService(object):
return default
else:
raise ApiError.from_task("unknown_lookup_option", "The settings for this auto complete field "
"are incorrect: %s " % field.id, task)
"are incorrect: %s " % field.id, task)
elif field.type == 'boolean':
default = str(default).lower()
if default == 'true' or default == 't':
@ -655,6 +657,30 @@ class WorkflowService(object):
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(string_length))
@staticmethod
def get_navigation(processor):
"""Finds all the ready, completed, and future tasks and created nav_item objects for them."""
tasks = []
navigation = []
# Get ready, completed, and fuiture tasks
tasks.extend(processor.bpmn_workflow.get_tasks(TaskState.READY | TaskState.COMPLETED | TaskState.FUTURE))
# Filter this out to just the user tasks and the start task
user_tasks = list(filter(lambda task: isinstance(task.task_spec, UserTask)
or isinstance(task.task_spec, ManualTask)
or isinstance(task.task_spec, StartEvent), tasks))
for user_task in user_tasks:
if any(nav.name == user_task.task_spec.name and user_task.state == TaskState.FUTURE for nav in navigation):
continue # Don't re-add the same spec for future items
nav_item = NavItem.from_spec(spec=user_task.task_spec)
nav_item.state = user_task.state.name
nav_item.task_id = user_task.id
nav_item.indent = 0 # we should remove indent, this is not nested now.
navigation.append(nav_item)
WorkflowService.update_navigation(navigation, processor)
return navigation
@staticmethod
def processor_to_workflow_api(processor: WorkflowProcessor, next_task=None):
"""Returns an API model representing the state of the current workflow, if requested, and
@ -668,10 +694,8 @@ class WorkflowService(object):
id=processor.get_workflow_id(),
status=processor.get_status(),
next_task=None,
navigation=navigation,
navigation=WorkflowService.get_navigation(processor),
workflow_spec_id=processor.workflow_spec_id,
total_tasks=len(navigation),
completed_tasks=processor.workflow_model.completed_tasks,
last_updated=processor.workflow_model.last_updated,
is_review=spec.is_review,
title=spec.display_name,
@ -711,7 +735,6 @@ class WorkflowService(object):
impersonator_is_admin = UserService.user_is_admin(allow_admin_impersonate=True)
if not in_list and not impersonator_is_admin:
nav_item.state = WorkflowService.TASK_STATE_LOCKED
print('StartEvent: ')
else:
# Strip off the first word in the description, to meet guidlines for BPMN.
if nav_item.description:
@ -806,7 +829,7 @@ class WorkflowService(object):
for i, field in enumerate(task.form.fields):
task.form.fields[i] = WorkflowService.process_options(spiff_task, field)
# If there is a default value, set it.
#if field.id not in task.data and WorkflowService.get_default_value(field, spiff_task) is not None:
# if field.id not in task.data and WorkflowService.get_default_value(field, spiff_task) is not None:
# task.data[field.id] = WorkflowService.get_default_value(field, spiff_task)
task.documentation = WorkflowService._process_documentation(spiff_task)
@ -844,8 +867,9 @@ class WorkflowService(object):
title = spiff_task.workflow.script_engine.evaluate(spiff_task, title)
except Exception as e:
# if the task is ready, we should raise an error, but if it is in the future or the past, we may not
# have the information we need to properly set the title, so don't error out, and just use what is
# provided.
# have the information we need to properly set the title, so don't error out, and just use the base
# description for now.
title = spiff_task.task_spec.description
if spiff_task.state == TaskState.READY:
raise ApiError.from_task(code="task_title_error",
message="Could not set task title on task %s with '%s' property because %s" %
@ -855,7 +879,6 @@ class WorkflowService(object):
title = title.partition(' ')[2]
return title
@staticmethod
def _process_properties(spiff_task, props):
"""Runs all the property values through the Jinja2 processor to inject data."""

View File

@ -357,14 +357,6 @@ class BaseTest(unittest.TestCase):
# Assure task events are updated on the model
workflow = WorkflowApiSchema().load(json_data)
# The total number of tasks may change over time, as users move through gateways
# branches may be pruned. As we hit parallel Multi-Instance new tasks may be created...
self.assertIsNotNone(workflow.total_tasks)
# presumably, we also need to deal with sequential items here too . .
# You may loop back to a previous task, which would actually reduce the number of
# completed tasks. So this test doesn't seem correct.
# if not task_in.multi_instance_type == 'looping' and not update_all:
# self.assertEqual(prev_completed_task_count + 1, workflow.completed_tasks)
# Assure a record exists in the Task Events
task_events = session.query(TaskEventModel) \

View File

@ -72,8 +72,6 @@ class TestStudyApi(BaseTest):
self.assertEqual("random_fact", workflow["display_name"])
self.assertEqual("optional", workflow["state"])
self.assertEqual("not_started", workflow["status"])
self.assertEqual(0, workflow["total_tasks"])
self.assertEqual(0, workflow["completed_tasks"])
def test_get_study_updates_workflow_state(self):
self.load_test_spec('test_master_workflow', master_spec=True)

View File

@ -30,7 +30,7 @@ class TestMultiinstanceTasksApi(BaseTest):
# get the first form in the two form workflow.
workflow_api = self.get_workflow_api(workflow)
navigation = self.get_workflow_api(workflow_api).navigation
self.assertEqual(5, len(navigation)) # Start task, form_task, multi_task, end task
self.assertEqual(2, len(navigation)) # Start task, multi-instance/user task
self.assertEqual("UserTask", workflow_api.next_task.type)
self.assertEqual(MultiInstanceType.sequential.value, workflow_api.next_task.multi_instance_type)
self.assertEqual(5, workflow_api.next_task.multi_instance_count)
@ -52,9 +52,9 @@ class TestMultiinstanceTasksApi(BaseTest):
workflow = self.create_workflow('multi_instance_parallel')
workflow_api = self.get_workflow_api(workflow)
self.assertEqual(9, len(workflow_api.navigation))
self.assertEqual(6, len(workflow_api.navigation)) # Start event + 5 investigators
ready_items = [nav for nav in workflow_api.navigation if nav.state == "READY"]
self.assertEqual(5, len(ready_items))
self.assertEqual(5, len(ready_items)) # Just the 5 investigators.
self.assertEqual("UserTask", workflow_api.next_task.type)
self.assertEqual("MultiInstanceTask",workflow_api.next_task.name)
@ -90,7 +90,7 @@ class TestMultiinstanceTasksApi(BaseTest):
workflow = self.create_workflow('multi_instance_parallel')
workflow_api = self.get_workflow_api(workflow)
self.assertEqual(9, len(workflow_api.navigation))
self.assertEqual(6, len(workflow_api.navigation)) # Start + 5 multi-instance investigators
ready_items = [nav for nav in workflow_api.navigation if nav.state == "READY"]
self.assertEqual(5, len(ready_items))

View File

@ -8,6 +8,7 @@ from crc.models.api_models import WorkflowApiSchema
from crc.models.file import FileModelSchema
from crc.models.workflow import WorkflowModel, WorkflowState, WorkflowStatus
from crc.models.task_event import TaskEventModel
from crc.services.workflow_processor import WorkflowProcessor
class TestTasksApi(BaseTest):
@ -60,7 +61,7 @@ class TestTasksApi(BaseTest):
# get the first form in the two form workflow.
workflow_api = self.get_workflow_api(workflow)
self.assertEqual('two_forms', workflow_api.workflow_spec_id)
self.assertEqual(5, len(workflow_api.navigation))
self.assertEqual(3, len(workflow_api.navigation)) # start and 2 forms
self.assertIsNotNone(workflow_api.next_task.form)
self.assertEqual("UserTask", workflow_api.next_task.type)
self.assertEqual("StepOne", workflow_api.next_task.name)
@ -99,43 +100,36 @@ class TestTasksApi(BaseTest):
def test_navigation_with_parallel_forms(self):
workflow = self.create_workflow('exclusive_gateway')
# get the first form in the two form workflow.
workflow_api = self.get_workflow_api(workflow)
self.assertIsNotNone(workflow_api.navigation)
nav = workflow_api.navigation
processor = WorkflowProcessor(workflow)
nav = processor.bpmn_workflow.get_deep_nav_list()
self.assertEqual(4, len(nav))
self.assertEqual("Do You Have Bananas", nav[1].description)
self.assertEqual("Bananas?", nav[2].description)
self.assertEqual("MAYBE", nav[2].state)
self.assertEqual("Enter Do You Have Bananas", nav[1].description)
self.assertEqual("Has Bananas?", nav[2].description)
self.assertEqual("yes", nav[2].children[0].description)
self.assertEqual("MAYBE", nav[2].children[0].state)
self.assertEqual("of Bananas", nav[2].children[0].children[0].description)
self.assertEqual("Number of Bananas", nav[2].children[0].children[0].description)
self.assertEqual("EndEvent", nav[2].children[0].children[1].spec_type)
self.assertEqual("no", nav[2].children[1].description)
self.assertEqual("MAYBE", nav[2].children[1].state)
self.assertEqual("no bananas", nav[2].children[1].children[0].description)
self.assertEqual("Why no bananas", nav[2].children[1].children[0].description)
self.assertEqual("EndEvent", nav[2].children[1].children[1].spec_type)
def test_navigation_with_exclusive_gateway(self):
workflow = self.create_workflow('exclusive_gateway_2')
processor = WorkflowProcessor(workflow)
# get the first form in the two form workflow.
workflow_api = self.get_workflow_api(workflow)
self.assertIsNotNone(workflow_api.navigation)
nav = workflow_api.navigation
nav = processor.bpmn_workflow.get_deep_nav_list()
self.assertEqual(7, len(nav))
self.assertEqual("Task 1", nav[1].description)
self.assertEqual("Which Branch?", nav[2].description)
self.assertEqual("Enter Task 1", nav[1].description)
self.assertEqual("Decide Which Branch?", nav[2].description)
self.assertEqual("a", nav[2].children[0].description)
self.assertEqual("Task 2a", nav[2].children[0].children[0].description)
self.assertEqual("Enter Task 2a", nav[2].children[0].children[0].description)
self.assertEqual("b", nav[2].children[1].description)
self.assertEqual("Task 2b", nav[2].children[1].children[0].description)
self.assertEqual("Enter Task 2b", nav[2].children[1].children[0].description)
self.assertEqual(None, nav[3].description)
self.assertEqual("Task 3", nav[4].description)
self.assertEqual("Enter Task 3", nav[4].description)
self.assertEqual("EndEvent", nav[5].spec_type)
def test_document_added_to_workflow_shows_up_in_file_list(self):
@ -356,7 +350,7 @@ class TestTasksApi(BaseTest):
navigation = workflow_api.navigation
task = workflow_api.next_task
self.assertEqual(5, len(navigation))
self.assertEqual(4, len(navigation)) # 2 start events + 2 user tasks
self.assertEqual("UserTask", task.type)
self.assertEqual("Activity_A", task.name)
self.assertEqual("My Sub Process", task.process_name)

View File

@ -64,7 +64,7 @@ class TestUserRoles(BaseTest):
workflow_api = self.get_workflow_api(workflow, user_uid=submitter.uid)
nav = workflow_api.navigation
self.assertEqual(4, len(nav))
self.assertEqual(3, len(nav))
self.assertEqual("supervisor", nav[2].lane)
def test_get_outstanding_tasks_awaiting_current_user(self):
@ -123,10 +123,9 @@ class TestUserRoles(BaseTest):
# Navigation as Submitter with ready task.
workflow_api = self.get_workflow_api(workflow, user_uid=submitter.uid)
nav = workflow_api.navigation
self.assertEqual(4, len(nav))
self.assertEqual(3, len(nav))
self.assertEqual('READY', nav[1].state) # First item is ready, no progress yet.
self.assertEqual('LOCKED', nav[2].state) # Second item is locked, it is the review and doesn't belong to this user.
self.assertEqual('MAYBE', nav[3].state) # Third item is a gateway, which contains things that are also locked.
self.assertEqual('READY', workflow_api.next_task.state)
# Navigation as Submitter after handoff to supervisor
@ -136,7 +135,6 @@ class TestUserRoles(BaseTest):
nav = workflow_api.navigation
self.assertEqual('COMPLETED', nav[1].state) # First item is ready, no progress yet.
self.assertEqual('LOCKED', nav[2].state) # Second item is locked, it is the review and doesn't belong to this user.
self.assertEqual('MAYBE', nav[3].state) # third item is a gateway, and belongs to no one
# In the event the next task is locked, we should say something sensible here.
# It is possible to look at the role of the task, and say The next task "TASK TITLE" will
# be handled by 'dhf8r', who is full-filling the role of supervisor. the Task Data
@ -150,7 +148,6 @@ class TestUserRoles(BaseTest):
nav = workflow_api.navigation
self.assertEqual('LOCKED', nav[1].state) # First item belongs to the submitter, and is locked.
self.assertEqual('READY', nav[2].state) # Second item is ready, as we are now the supervisor.
self.assertEqual('MAYBE', nav[3].state) # Feedback is locked.
self.assertEqual('READY', workflow_api.next_task.state)
data = workflow_api.next_task.data
@ -161,15 +158,6 @@ class TestUserRoles(BaseTest):
nav = workflow_api.navigation
self.assertEqual('LOCKED', nav[1].state) # First item belongs to the submitter, and is locked.
self.assertEqual('COMPLETED', nav[2].state) # Second item is locked, it is the review and doesn't belong to this user.
self.assertEqual('READY', nav[3].state) # Gateway is ready, and should be unfolded
# order of these is unclear ...
approved = list(filter(lambda child: child.name == 'approved', nav[3].children))[0]
rejected = list(filter(lambda child: child.name == 'rejected', nav[3].children))[0]
self.assertEqual(None, approved.state) # sequence flow for approved is none - we aren't going this way.
self.assertEqual('READY', rejected.state) # sequence flow for denied is ready
self.assertEqual('LOCKED', rejected.children[0].state) # Feedback is locked, it belongs to submitter
self.assertEqual('LOCKED', rejected.children[0].state) # Approval is locked, it belongs to the submitter
self.assertEqual('LOCKED', workflow_api.next_task.state)
# Navigation as Submitter, coming back in to a rejected workflow to view the rejection message.
workflow_api = self.get_workflow_api(workflow, user_uid=submitter.uid)
@ -178,12 +166,6 @@ class TestUserRoles(BaseTest):
self.assertEqual('COMPLETED', nav[1].state) # First item belongs to the submitter, and is locked.
self.assertEqual('LOCKED', nav[2].state) # Second item is locked, it is the review and doesn't belong to this user.
self.assertEqual('READY', nav[3].state)
# order of these is unclear ...
approved = list(filter(lambda child: child.name == 'approved', nav[3].children))[0]
rejected = list(filter(lambda child: child.name == 'rejected', nav[3].children))[0]
self.assertEqual(None, approved.state) # sequence flow for approved is none - we aren't going this way.
self.assertEqual('READY', rejected.state) # sequence flow for denied is ready
self.assertEqual('READY', rejected.children[0].state) # Feedback is locked, it belongs to submitter
# Navigation as Submitter, re-completing the original request a second time, and sending it for review.
workflow_api = self.complete_form(workflow, workflow_api.next_task, data, user_uid=submitter.uid)

View File

@ -171,7 +171,7 @@ class TestWorkflowProcessorMultiInstance(BaseTest):
# Assure navigation picks up the label of the current element variable.
nav = WorkflowService.processor_to_workflow_api(processor, task).navigation
self.assertEqual("Primary Investigator", nav[2].description)
self.assertEqual("Primary Investigator", nav[1].description)
task.update_data({"investigator": {"email": "dhf8r@virginia.edu"}})
processor.complete_task(task)