2019-12-18 14:02:17 -05:00
|
|
|
import enum
|
|
|
|
|
2022-02-09 08:50:00 -05:00
|
|
|
import marshmallow
|
2022-02-09 11:37:54 -05:00
|
|
|
from marshmallow import EXCLUDE, post_load, fields, INCLUDE
|
2021-04-28 10:00:22 -04:00
|
|
|
from sqlalchemy import func
|
2022-02-24 20:56:57 -05:00
|
|
|
from sqlalchemy.orm import deferred
|
2019-12-18 14:02:17 -05:00
|
|
|
|
2022-02-04 13:51:39 -05:00
|
|
|
from crc import db, ma
|
2019-12-18 14:02:17 -05:00
|
|
|
|
2020-03-18 16:58:50 -04:00
|
|
|
|
2022-02-04 13:51:39 -05:00
|
|
|
class WorkflowSpecCategory(object):
|
2022-02-09 11:24:41 -05:00
|
|
|
def __init__(self, id, display_name, display_order=0, admin=False):
|
2022-02-09 10:55:02 -05:00
|
|
|
self.id = id # A unique string name, lower case, under scores (ie, 'my_category')
|
2022-02-04 13:51:39 -05:00
|
|
|
self.display_name = display_name
|
|
|
|
self.display_order = display_order
|
|
|
|
self.admin = admin
|
2022-02-09 08:50:00 -05:00
|
|
|
self.workflows = [] # For storing Workflow Metadata
|
2022-02-09 10:55:02 -05:00
|
|
|
self.specs = [] # For the list of specifications associated with a category
|
2022-03-16 12:49:35 -04:00
|
|
|
self.meta = None # For storing category metadata
|
2022-02-09 10:55:02 -05:00
|
|
|
|
2022-02-09 21:06:55 -05:00
|
|
|
def __eq__(self, other):
|
|
|
|
if not isinstance(other, WorkflowSpecCategory):
|
|
|
|
return False
|
|
|
|
if other.id == self.id:
|
|
|
|
return True
|
|
|
|
return False
|
2021-10-05 13:10:36 -04:00
|
|
|
|
2022-03-15 10:49:41 -04:00
|
|
|
|
2022-02-04 13:51:39 -05:00
|
|
|
class WorkflowSpecCategorySchema(ma.Schema):
|
2020-03-13 14:56:46 -04:00
|
|
|
class Meta:
|
2022-02-04 13:51:39 -05:00
|
|
|
model = WorkflowSpecCategory
|
2022-03-16 12:49:35 -04:00
|
|
|
fields = ["id", "display_name", "display_order", "admin"]
|
2022-02-04 13:51:39 -05:00
|
|
|
|
2022-02-07 12:30:20 -05:00
|
|
|
@post_load
|
|
|
|
def make_cat(self, data, **kwargs):
|
|
|
|
return WorkflowSpecCategory(**data)
|
|
|
|
|
2022-02-04 13:51:39 -05:00
|
|
|
|
|
|
|
class WorkflowSpecInfo(object):
|
2022-02-08 14:40:33 -05:00
|
|
|
def __init__(self, id, display_name, description, is_master_spec=False,
|
|
|
|
standalone=False, library=False, primary_file_name='', primary_process_id='',
|
2022-02-09 11:37:54 -05:00
|
|
|
libraries=[], category_id="", display_order=0, is_review=False):
|
2022-02-07 11:00:19 -05:00
|
|
|
self.id = id # Sting unique id
|
2022-02-04 13:51:39 -05:00
|
|
|
self.display_name = display_name
|
|
|
|
self.description = description
|
2022-02-07 09:12:11 -05:00
|
|
|
self.display_order = display_order
|
2022-02-04 13:51:39 -05:00
|
|
|
self.is_master_spec = is_master_spec
|
|
|
|
self.standalone = standalone
|
|
|
|
self.library = library
|
|
|
|
self.primary_file_name = primary_file_name
|
|
|
|
self.primary_process_id = primary_process_id
|
|
|
|
self.is_review = is_review
|
|
|
|
self.libraries = libraries
|
2022-02-08 11:30:13 -05:00
|
|
|
self.category_id = category_id
|
2022-02-04 13:51:39 -05:00
|
|
|
|
2022-02-09 21:06:55 -05:00
|
|
|
def __eq__(self, other):
|
|
|
|
if not isinstance(other, WorkflowSpecInfo):
|
|
|
|
return False
|
|
|
|
if other.id == self.id:
|
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
2022-02-04 13:51:39 -05:00
|
|
|
class WorkflowSpecInfoSchema(ma.Schema):
|
2019-12-18 14:02:17 -05:00
|
|
|
class Meta:
|
2022-02-04 13:51:39 -05:00
|
|
|
model = WorkflowSpecInfo
|
2022-02-10 16:53:15 -05:00
|
|
|
id = marshmallow.fields.String(required=True)
|
|
|
|
display_name = marshmallow.fields.String(required=True)
|
|
|
|
description = marshmallow.fields.String()
|
|
|
|
is_master_spec = marshmallow.fields.Boolean(required=True)
|
|
|
|
standalone = marshmallow.fields.Boolean(required=True)
|
|
|
|
library = marshmallow.fields.Boolean(required=True)
|
|
|
|
display_order = marshmallow.fields.Integer(allow_none=True)
|
|
|
|
primary_file_name = marshmallow.fields.String(allow_none=True)
|
|
|
|
primary_process_id = marshmallow.fields.String(allow_none=True)
|
|
|
|
is_review = marshmallow.fields.Boolean(allow_none=True)
|
|
|
|
category_id = marshmallow.fields.String(allow_none=True)
|
|
|
|
libraries = marshmallow.fields.List(marshmallow.fields.String(), allow_none=True)
|
2022-02-07 16:27:45 -05:00
|
|
|
|
2022-02-07 12:30:20 -05:00
|
|
|
@post_load
|
|
|
|
def make_spec(self, data, **kwargs):
|
|
|
|
return WorkflowSpecInfo(**data)
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 08:00:16 -04:00
|
|
|
|
2022-04-29 17:30:38 -04:00
|
|
|
|
2020-03-26 12:51:53 -04:00
|
|
|
class WorkflowState(enum.Enum):
|
|
|
|
hidden = "hidden"
|
|
|
|
disabled = "disabled"
|
|
|
|
required = "required"
|
|
|
|
optional = "optional"
|
2022-04-29 17:30:38 -04:00
|
|
|
locked = "locked"
|
2020-03-26 12:51:53 -04:00
|
|
|
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 08:00:16 -04:00
|
|
|
@classmethod
|
|
|
|
def has_value(cls, value):
|
|
|
|
return value in cls._value2member_map_
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def list():
|
|
|
|
return list(map(lambda c: c.value, WorkflowState))
|
2020-01-03 11:44:24 -05:00
|
|
|
|
2020-05-04 10:57:09 -04:00
|
|
|
|
2019-12-18 14:02:17 -05:00
|
|
|
class WorkflowStatus(enum.Enum):
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 08:00:16 -04:00
|
|
|
not_started = "not_started"
|
2019-12-18 14:02:17 -05:00
|
|
|
user_input_required = "user_input_required"
|
|
|
|
waiting = "waiting"
|
|
|
|
complete = "complete"
|
2021-12-09 15:33:39 -05:00
|
|
|
erroring = "erroring"
|
2019-12-18 14:02:17 -05:00
|
|
|
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 08:00:16 -04:00
|
|
|
|
2019-12-18 14:02:17 -05:00
|
|
|
class WorkflowModel(db.Model):
|
|
|
|
__tablename__ = 'workflow'
|
|
|
|
id = db.Column(db.Integer, primary_key=True)
|
2022-02-24 20:56:57 -05:00
|
|
|
bpmn_workflow_json = deferred(db.Column(db.JSON))
|
2019-12-18 14:02:17 -05:00
|
|
|
status = db.Column(db.Enum(WorkflowStatus))
|
|
|
|
study_id = db.Column(db.Integer, db.ForeignKey('study.id'))
|
2022-02-24 20:56:57 -05:00
|
|
|
study = db.relationship("StudyModel", backref='workflow', lazy='select')
|
2022-02-07 12:18:32 -05:00
|
|
|
workflow_spec_id = db.Column(db.String)
|
Created a "StudyService" and moved all complex logic around study manipulation out of the study api, and this service, as things were getting complicated. The Workflow Processor no longer creates the WorkflowModel, the study object handles that, and only passes the model into the workflow processor when it is ready to start the workflow.
Created a Study object (seperate from the StudyModel) that can cronstructed on request, and contains a different data structure than we store in the DB. This allows us to return underlying Categories and Workflows in a clean way.
Added a new status to workflows called "not_started", meaning we have not yet instantiated a processor or created a BPMN, they have no version yet and no stored data, just the possiblity of being started.
The Top Level Workflow or "Master" workflow is now a part of the sample data, and loaded at all times.
Removed the ability to "add a workflow to a study" and "remove a workflow from a study", a study contains all possible workflows by definition.
Example data no longer creates users or studies, it just creates the specs.
2020-03-30 08:00:16 -04:00
|
|
|
total_tasks = db.Column(db.Integer, default=0)
|
|
|
|
completed_tasks = db.Column(db.Integer, default=0)
|
2021-05-14 12:28:50 -04:00
|
|
|
last_updated = db.Column(db.DateTime(timezone=True), server_default=func.now())
|
2021-04-26 08:33:55 -04:00
|
|
|
user_id = db.Column(db.String, default=None)
|
2022-05-06 14:02:11 -04:00
|
|
|
state = db.Column(db.String, nullable=True)
|