added example blueprint items and fixed some formatting w/ burnettk

This commit is contained in:
jasquat 2022-05-13 14:08:07 -04:00
parent 116145e91f
commit 5ea9dd3a44
13 changed files with 388 additions and 72 deletions

View File

@ -1 +1,21 @@
"""Spiff Workflow Webapp."""
from flask import Flask
from .routes.api import api
from .routes.main import main
from spiff_workflow_webapp.extensions import db
from spiff_workflow_webapp.extensions import migrate
def create_app():
"""Create_app."""
app = Flask(__name__)
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///db.sqlite3"
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
db.init_app(app)
migrate.init_app(app, db)
app.register_blueprint(main)
app.register_blueprint(api)
return app

Binary file not shown.

View File

@ -0,0 +1,5 @@
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
db = SQLAlchemy()
migrate = Migrate()

View File

@ -1,11 +1,10 @@
from crc import db
from flask_marshmallow.sqla import SQLAlchemyAutoSchema
from sqlalchemy import func
from crc import db
class DataStoreModel(db.Model):
__tablename__ = 'data_store'
__tablename__ = "data_store"
id = db.Column(db.Integer, primary_key=True)
last_updated = db.Column(db.DateTime(timezone=True), server_default=func.now())
key = db.Column(db.String, nullable=False)
@ -14,7 +13,7 @@ class DataStoreModel(db.Model):
task_spec = db.Column(db.String)
spec_id = db.Column(db.String)
user_id = db.Column(db.String, nullable=True)
file_id = db.Column(db.Integer, db.ForeignKey('file.id'), nullable=True)
file_id = db.Column(db.Integer, db.ForeignKey("file.id"), nullable=True)
value = db.Column(db.String)

View File

@ -0,0 +1,5 @@
from ..extensions import db
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50))

View File

@ -0,0 +1,6 @@
from ..extensions import db
class Video(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50))
url = db.Column(db.String(50))

View File

@ -1,16 +1,22 @@
import enum
import marshmallow
from marshmallow import EXCLUDE, post_load, fields, INCLUDE
from crc import db
from crc import ma
from marshmallow import EXCLUDE
from marshmallow import fields
from marshmallow import INCLUDE
from marshmallow import post_load
from sqlalchemy import func
from sqlalchemy.orm import deferred
from crc import db, ma
class WorkflowSpecCategory(object):
class WorkflowSpecCategory:
def __init__(self, id, display_name, display_order=0, admin=False):
self.id = id # A unique string name, lower case, under scores (ie, 'my_category')
"""__init__."""
self.id = (
id # A unique string name, lower case, under scores (ie, 'my_category')
)
self.display_name = display_name
self.display_order = display_order
self.admin = admin
@ -19,6 +25,7 @@ class WorkflowSpecCategory(object):
self.meta = None # For storing category metadata
def __eq__(self, other):
"""__eq__."""
if not isinstance(other, WorkflowSpecCategory):
return False
if other.id == self.id:
@ -33,13 +40,26 @@ class WorkflowSpecCategorySchema(ma.Schema):
@post_load
def make_cat(self, data, **kwargs):
"""Make_cat."""
return WorkflowSpecCategory(**data)
class WorkflowSpecInfo(object):
def __init__(self, id, display_name, description, is_master_spec=False,
standalone=False, library=False, primary_file_name='', primary_process_id='',
libraries=[], category_id="", display_order=0, is_review=False):
class WorkflowSpecInfo:
def __init__(
self,
id,
display_name,
description,
is_master_spec=False,
standalone=False,
library=False,
primary_file_name="",
primary_process_id="",
libraries=[],
category_id="",
display_order=0,
is_review=False,
):
self.id = id # Sting unique id
self.display_name = display_name
self.description = description
@ -64,6 +84,7 @@ class WorkflowSpecInfo(object):
class WorkflowSpecInfoSchema(ma.Schema):
class Meta:
model = WorkflowSpecInfo
id = marshmallow.fields.String(required=True)
display_name = marshmallow.fields.String(required=True)
description = marshmallow.fields.String()
@ -107,12 +128,12 @@ class WorkflowStatus(enum.Enum):
class WorkflowModel(db.Model):
__tablename__ = 'workflow'
__tablename__ = "workflow"
id = db.Column(db.Integer, primary_key=True)
bpmn_workflow_json = deferred(db.Column(db.JSON))
status = db.Column(db.Enum(WorkflowStatus))
study_id = db.Column(db.Integer, db.ForeignKey('study.id'))
study = db.relationship("StudyModel", backref='workflow', lazy='select')
study_id = db.Column(db.Integer, db.ForeignKey("study.id"))
study = db.relationship("StudyModel", backref="workflow", lazy="select")
workflow_spec_id = db.Column(db.String)
total_tasks = db.Column(db.Integer, default=0)
completed_tasks = db.Column(db.Integer, default=0)

View File

@ -0,0 +1,13 @@
from flask import Blueprint
from ..extensions import db
from ..models.user import User
api = Blueprint('api', __name__)
@api.route('/user/<name>')
def create_user(name):
"""Create_user."""
user = User.query.filter_by(name='Anthony').first()
return {'user': user.name}

View File

@ -0,0 +1,16 @@
from flask import Blueprint
from ..extensions import db
from ..models.user import User
from ..models.video import Video
main = Blueprint('main', __name__)
@main.route('/user/<name>')
def create_user(name):
"""Create_user."""
user = User(name=name)
db.session.add(user)
db.session.commit()
return 'Created User!'

View File

@ -1,82 +1,126 @@
from crc import session
from spiff_workflow_webapp.api.api_error import ApiError
from crc.models.data_store import DataStoreModel
from crc.models.workflow import WorkflowModel
from flask import g
from sqlalchemy import desc
from spiff_workflow_webapp.api.api_error import ApiError
class DataStoreBase(object):
def set_validate_common(self, task_id, study_id, workflow_id, script_name, user_id, file_id, *args):
class DataStoreBase:
def set_validate_common(
self, task_id, study_id, workflow_id, script_name, user_id, file_id, *args
):
"""Set_validate_common."""
self.check_args_2(args, script_name)
key = args[0]
value = args[1]
if script_name == 'study_data_set':
record = {'task_id': task_id, 'study_id': study_id, 'workflow_id': workflow_id, key: value}
elif script_name == 'file_data_set':
record = {'task_id': task_id, 'study_id': study_id, 'workflow_id': workflow_id, 'file_id': file_id, key: value}
elif script_name == 'user_data_set':
record = {'task_id': task_id, 'study_id': study_id, 'workflow_id': workflow_id, 'user_id': user_id, key: value}
if script_name == "study_data_set":
record = {
"task_id": task_id,
"study_id": study_id,
"workflow_id": workflow_id,
key: value,
}
elif script_name == "file_data_set":
record = {
"task_id": task_id,
"study_id": study_id,
"workflow_id": workflow_id,
"file_id": file_id,
key: value,
}
elif script_name == "user_data_set":
record = {
"task_id": task_id,
"study_id": study_id,
"workflow_id": workflow_id,
"user_id": user_id,
key: value,
}
g.validation_data_store.append(record)
return record
def get_validate_common(self, script_name, study_id=None, user_id=None, file_id=None, *args):
def get_validate_common(
self, script_name, study_id=None, user_id=None, file_id=None, *args
):
# This method uses a temporary validation_data_store that is only available for the current validation request.
# This allows us to set data_store values during validation that don't affect the real data_store.
# For data_store `gets`, we first look in the temporary validation_data_store.
# If we don't find an entry in validation_data_store, we look in the real data_store.
key = args[0]
if script_name == 'study_data_get':
if script_name == "study_data_get":
# If it's in the validation data store, return it
for record in g.validation_data_store:
if 'study_id' in record and record['study_id'] == study_id and key in record:
if (
"study_id" in record
and record["study_id"] == study_id
and key in record
):
return record[key]
# If not in validation_data_store, look in the actual data_store
return self.get_data_common(study_id, user_id, 'study_data_get', file_id, *args)
elif script_name == 'file_data_get':
return self.get_data_common(
study_id, user_id, "study_data_get", file_id, *args
)
elif script_name == "file_data_get":
for record in g.validation_data_store:
if 'file_id' in record and record['file_id'] == file_id and key in record:
if (
"file_id" in record
and record["file_id"] == file_id
and key in record
):
return record[key]
return self.get_data_common(study_id, user_id, 'file_data_get', file_id, *args)
elif script_name == 'user_data_get':
return self.get_data_common(
study_id, user_id, "file_data_get", file_id, *args
)
elif script_name == "user_data_get":
for record in g.validation_data_store:
if 'user_id' in record and record['user_id'] == user_id and key in record:
if (
"user_id" in record
and record["user_id"] == user_id
and key in record
):
return record[key]
return self.get_data_common(study_id, user_id, 'user_data_get', file_id, *args)
return self.get_data_common(
study_id, user_id, "user_data_get", file_id, *args
)
@staticmethod
def check_args(args, maxlen=1, script_name='study_data_get'):
def check_args(args, maxlen=1, script_name="study_data_get"):
"""Check_args."""
if len(args) < 1 or len(args) > maxlen:
raise ApiError(code="missing_argument",
message=f"The {script_name} script takes either one or two arguments, "
f"starting with the key and an optional default")
raise ApiError(
code="missing_argument",
message=f"The {script_name} script takes either one or two arguments, "
f"starting with the key and an optional default",
)
@staticmethod
def check_args_2(args, script_name='study_data_set'):
def check_args_2(args, script_name="study_data_set"):
"""Check_args_2."""
if len(args) != 2:
raise ApiError(code="missing_argument",
message=f"The {script_name} script takes two arguments, key and value, in that order.")
raise ApiError(
code="missing_argument",
message=f"The {script_name} script takes two arguments, key and value, in that order.",
)
def set_data_common(self,
task_spec,
study_id,
user_id,
workflow_id,
script_name,
file_id,
*args):
def set_data_common(
self, task_spec, study_id, user_id, workflow_id, script_name, file_id, *args
):
self.check_args_2(args, script_name=script_name)
key = args[0]
value = args[1]
if value == '' or value is None:
if value == "" or value is None:
# We delete the data store if the value is empty
return self.delete_data_store(study_id, user_id, file_id, *args)
workflow_spec_id = None
if workflow_id is not None:
workflow = session.query(WorkflowModel).filter(WorkflowModel.id == workflow_id).first()
workflow = (
session.query(WorkflowModel)
.filter(WorkflowModel.id == workflow_id)
.first()
)
workflow_spec_id = workflow.workflow_spec_id
# Check if this data store is previously set
@ -102,14 +146,16 @@ class DataStoreBase(object):
# This just gets rid of all the old unused records
self.delete_extra_data_stores(result[1:])
else:
dsm = DataStoreModel(key=key,
value=value,
study_id=study_id,
task_spec=task_spec,
user_id=user_id, # Make this available to any User
file_id=file_id,
workflow_id=workflow_id,
spec_id=workflow_spec_id)
dsm = DataStoreModel(
key=key,
value=value,
study_id=study_id,
task_spec=task_spec,
user_id=user_id, # Make this available to any User
file_id=file_id,
workflow_id=workflow_id,
spec_id=workflow_spec_id,
)
session.add(dsm)
session.commit()
@ -117,10 +163,11 @@ class DataStoreBase(object):
def get_data_common(self, study_id, user_id, script_name, file_id=None, *args):
self.check_args(args, 2, script_name)
record = session.query(DataStoreModel).filter_by(study_id=study_id,
user_id=user_id,
file_id=file_id,
key=args[0]).first()
record = (
session.query(DataStoreModel)
.filter_by(study_id=study_id, user_id=user_id, file_id=file_id, key=args[0])
.first()
)
if record:
return record.value
else:
@ -130,9 +177,9 @@ class DataStoreBase(object):
@staticmethod
def get_multi_common(study_id, user_id, file_id=None):
results = session.query(DataStoreModel).filter_by(study_id=study_id,
user_id=user_id,
file_id=file_id)
results = session.query(DataStoreModel).filter_by(
study_id=study_id, user_id=user_id, file_id=file_id
)
return results
@staticmethod
@ -153,8 +200,10 @@ class DataStoreBase(object):
@staticmethod
def delete_extra_data_stores(records):
"""We had a bug where we created new records instead of updating existing records.
We use this to clean up all the extra records.
We may remove this method in the future."""
We use this to clean up all the extra records.
We may remove this method in the future."""
for record in records:
session.query(DataStoreModel).filter(DataStoreModel.id == record.id).delete()
session.query(DataStoreModel).filter(
DataStoreModel.id == record.id
).delete()
session.commit()

View File

@ -0,0 +1,182 @@
"""Spiff Workflow Connector."""
import argparse
import sys
import traceback
import json
from jinja2 import Template
from SpiffWorkflow.task import Task
from SpiffWorkflow.bpmn.workflow import BpmnWorkflow
from SpiffWorkflow.bpmn.specs.ManualTask import ManualTask
from SpiffWorkflow.bpmn.specs.ScriptTask import ScriptTask
from SpiffWorkflow.bpmn.specs.events.event_types import CatchingEvent, ThrowingEvent
from SpiffWorkflow.camunda.parser.CamundaParser import CamundaParser
from SpiffWorkflow.camunda.specs.UserTask import EnumFormField, UserTask
from SpiffWorkflow.dmn.parser.BpmnDmnParser import BpmnDmnParser
from SpiffWorkflow.bpmn.serializer.workflow import BpmnWorkflowSerializer
from SpiffWorkflow.camunda.serializer.task_spec_converters import UserTaskConverter
from SpiffWorkflow.dmn.serializer.task_spec_converters import BusinessRuleTaskConverter
from custom_script_engine import CustomScriptEngine
wf_spec_converter = BpmnWorkflowSerializer.configure_workflow_spec_converter(
[UserTaskConverter, BusinessRuleTaskConverter])
serializer = BpmnWorkflowSerializer(wf_spec_converter)
class Parser(BpmnDmnParser):
OVERRIDE_PARSER_CLASSES = BpmnDmnParser.OVERRIDE_PARSER_CLASSES
OVERRIDE_PARSER_CLASSES.update(CamundaParser.OVERRIDE_PARSER_CLASSES)
def parse(process, bpmn_files, dmn_files):
parser = Parser()
parser.add_bpmn_files(bpmn_files)
if dmn_files:
parser.add_dmn_files(dmn_files)
return BpmnWorkflow(parser.get_spec(process), script_engine=CustomScriptEngine)
def select_option(prompt, options):
option = input(prompt)
while option not in options:
print("Invalid selection")
option = input(prompt)
return option
def display_task(task):
print(f'\n{task.task_spec.description}')
if task.task_spec.documentation is not None:
template = Template(task.task_spec.documentation)
print(template.render(task.data))
def format_task(task, include_state=True):
if hasattr(task.task_spec, 'lane') and task.task_spec.lane is not None:
lane = f'[{task.task_spec.lane}]'
else:
lane = ''
state = f'[{task.get_state_name()}]' if include_state else ''
return f'{lane} {task.task_spec.description} ({task.task_spec.name}) {state}'
def complete_user_task(task):
display_task(task)
if task.data is None:
task.data = {}
for field in task.task_spec.form.fields:
if isinstance(field, EnumFormField):
option_map = dict([(opt.name, opt.id) for opt in field.options])
options = "(" + ', '.join(option_map) + ")"
prompt = f"{field.label} {options} "
option = select_option(prompt, option_map.keys())
response = option_map[option]
else:
response = input(f"{field.label} ")
if field.type == "long":
response = int(response)
task.update_data_var(field.id, response)
def complete_manual_task(task):
display_task(task)
input("Press any key to mark task complete")
def print_state(workflow):
task = workflow.last_task
print('\nLast Task')
print(format_task(task))
print(json.dumps(task.data, indent=2, separators=[', ', ': ']))
display_types = (UserTask, ManualTask, ScriptTask, ThrowingEvent, CatchingEvent)
all_tasks = [task for task in workflow.get_tasks() if isinstance(task.task_spec, display_types)]
upcoming_tasks = [task for task in all_tasks if task.state in [Task.READY, Task.WAITING]]
print('\nUpcoming Tasks')
for idx, task in enumerate(upcoming_tasks):
print(format_task(task))
if input('\nShow all tasks? ').lower() == 'y':
for idx, task in enumerate(all_tasks):
print(format_task(task))
def run(workflow, step):
workflow.do_engine_steps()
while not workflow.is_completed():
ready_tasks = workflow.get_ready_user_tasks()
options = {}
print()
for idx, task in enumerate(ready_tasks):
option = format_task(task, False)
options[str(idx + 1)] = task
print(f'{idx + 1}. {option}')
selected = None
while selected not in options and selected not in ['', 'D', 'd', "exit"]:
selected = input('Select task to complete, enter to wait, or D to dump the workflow state: ')
if selected.lower() == 'd':
filename = input('Enter filename: ')
state = serializer.serialize_json(workflow)
with open(filename, 'w') as dump:
dump.write(state)
elif selected == 'exit':
exit()
elif selected != '':
next_task = options[selected]
if isinstance(next_task.task_spec, UserTask):
complete_user_task(next_task)
next_task.complete()
elif isinstance(next_task.task_spec, ManualTask):
complete_manual_task(next_task)
next_task.complete()
else:
next_task.complete()
workflow.refresh_waiting_tasks()
workflow.do_engine_steps()
if step:
print_state(workflow)
print('\nWorkflow Data')
print(json.dumps(workflow.data, indent=2, separators=[', ', ': ']))
if __name__ == '__main__':
parser = argparse.ArgumentParser('Simple BPMN runner')
parser.add_argument('-p', '--process', dest='process', help='The top-level BPMN Process ID')
parser.add_argument('-b', '--bpmn', dest='bpmn', nargs='+', help='BPMN files to load')
parser.add_argument('-d', '--dmn', dest='dmn', nargs='*', help='DMN files to load')
parser.add_argument('-r', '--restore', dest='restore', metavar='FILE', help='Restore state from %(metavar)s')
parser.add_argument('-s', '--step', dest='step', action='store_true', help='Display state after each step')
args = parser.parse_args()
try:
if args.restore is not None:
with open(args.restore) as state:
wf = serializer.deserialize_json(state.read())
else:
wf = parse(args.process, args.bpmn, args.dmn)
run(wf, args.step)
except Exception as exc:
sys.stderr.write(traceback.format_exc())
sys.exit(1)