diff --git a/poetry.lock b/poetry.lock
index e04957dd..b4ef97f9 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1948,7 +1948,7 @@ testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-
[metadata]
lock-version = "1.1"
python-versions = "^3.8"
-content-hash = "5d1c5760fc1a25e81b1bea11877c16480113adce42eb4b2ebdeb3df730665419"
+content-hash = "d0f2edc038a129a994d9b2d2fd9a30df000450d062ef183316197f54775fce5a"
[metadata.files]
alabaster = [
diff --git a/pyproject.toml b/pyproject.toml
index 436b1fbc..e4b00ead 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -38,6 +38,7 @@ pytest-flask-sqlalchemy = "^1.1.0"
psycopg2 = "^2.9.3"
typing-extensions = "^4.2.0"
connexion = "^2.13.1"
+lxml = "^4.8.0"
[tool.poetry.dev-dependencies]
diff --git a/src/spiff_workflow_webapp/api.yml b/src/spiff_workflow_webapp/api.yml
index fb5a5547..ceb09f68 100755
--- a/src/spiff_workflow_webapp/api.yml
+++ b/src/spiff_workflow_webapp/api.yml
@@ -54,6 +54,111 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/WorkflowSpec"
+ /workflow-specification/{spec_id}/file:
+ parameters:
+ - name: spec_id
+ in: path
+ required: true
+ description: The unique id of an existing workflow specification to validate.
+ schema:
+ type: string
+ # get:
+ # operationId: spiff_workflow_webapp.api.process_api_blueprint.get_files
+ # summary: Provide a list of workflow spec files for the given workflow_spec_id. IMPORTANT, only includes metadata, not the file content.
+ # tags:
+ # - Spec Files
+ # responses:
+ # '200':
+ # description: An array of file descriptions (not the file content)
+ # content:
+ # application/json:
+ # schema:
+ # type: array
+ # items:
+ # $ref: "#/components/schemas/File"
+ post:
+ operationId: spiff_workflow_webapp.routes.process_api_blueprint.add_file
+ summary: Add a new workflow spec file
+ tags:
+ - Spec Files
+ requestBody:
+ content:
+ multipart/form-data:
+ schema:
+ type: object
+ properties:
+ file:
+ type: string
+ format: binary
+ responses:
+ '200':
+ description: Metadata about the uploaded file, but not the file content.
+ content:
+ application/json:
+ schema:
+ $ref: "#components/schemas/File"
+ /workflow-specification/{spec_id}/file/{file_name}:
+ parameters:
+ - name: spec_id
+ in: path
+ required: true
+ description: The unique id of an existing workflow specification to validate.
+ schema:
+ type: string
+ - name: file_name
+ in: path
+ required: true
+ description: The id of the spec file
+ schema:
+ type: string
+ get:
+ operationId: spiff_workflow_webapp.routes.process_api_blueprint.get_file
+ summary: Returns metadata about the file
+ tags:
+ - Spec Files
+ responses:
+ '200':
+ description: Returns the file information requested.
+ content:
+ application/json:
+ schema:
+ $ref: "#components/schemas/File"
+ # put:
+ # operationId: crc.api.spec_file.update
+ # summary: updates the given file to be the primary file and process, if so specified.
+ # tags:
+ # - Spec Files
+ # parameters:
+ # - name: is_primary
+ # in: query
+ # required: true
+ # description: Whether to make this the primary file for the workflow.
+ # schema:
+ # type: boolean
+ # requestBody:
+ # description: Log Pagination Request
+ # required: false
+ # content:
+ # application/json:
+ # schema:
+ # $ref: '#/components/schemas/File'
+ # responses:
+ # '200':
+ # description: Returns the file information.
+ # content:
+ # application/json:
+ # schema:
+ # $ref: "#components/schemas/File"
+ # delete:
+ # operationId: crc.api.spec_file.delete
+ # summary: Removes an existing workflow spec file.
+ # tags:
+ # - Spec Files
+ # responses:
+ # '204':
+ # description: The file was removed.
+
+
components:
diff --git a/src/spiff_workflow_webapp/models/file.py b/src/spiff_workflow_webapp/models/file.py
index eaf2078f..1d2fe7fd 100644
--- a/src/spiff_workflow_webapp/models/file.py
+++ b/src/spiff_workflow_webapp/models/file.py
@@ -1,5 +1,6 @@
"""File."""
import enum
+from marshmallow import Schema, INCLUDE
class FileType(enum.Enum):
@@ -82,3 +83,25 @@ class File(object):
# fixme: How to track the user id?
instance.data_store = {}
return instance
+
+
+class FileSchema(Schema):
+ class Meta:
+ model = File
+ fields = ["id", "name", "content_type", "workflow_id",
+ "irb_doc_code", "last_modified", "type", "archived",
+ "size", "data_store", "document", "user_uid", "url"]
+ unknown = INCLUDE
+ # url = Method("get_url")
+ #
+ # def get_url(self, obj):
+ # token = 'not_available'
+ # if hasattr(obj, 'id') and obj.id is not None:
+ # file_url = url_for("/v1_0.crc_api_file_get_file_data_link", file_id=obj.id, _external=True)
+ # if hasattr(flask.g, 'user'):
+ # token = flask.g.user.encode_auth_token()
+ # url = file_url + '?auth_token=' + urllib.parse.quote_plus(token)
+ # return url
+ # else:
+ # return ""
+ #
diff --git a/src/spiff_workflow_webapp/routes/process_api_blueprint.py b/src/spiff_workflow_webapp/routes/process_api_blueprint.py
index f3d27965..4eadc4a7 100644
--- a/src/spiff_workflow_webapp/routes/process_api_blueprint.py
+++ b/src/spiff_workflow_webapp/routes/process_api_blueprint.py
@@ -1,15 +1,18 @@
"""APIs for dealing with process groups, process models, and process instances."""
+import connexion
from flask import Blueprint
-from SpiffWorkflow.bpmn.serializer.workflow import BpmnWorkflowSerializer # type: ignore
-from SpiffWorkflow.camunda.serializer.task_spec_converters import UserTaskConverter # type: ignore
-from SpiffWorkflow.dmn.serializer.task_spec_converters import BusinessRuleTaskConverter # type: ignore
+# from SpiffWorkflow.bpmn.serializer.workflow import BpmnWorkflowSerializer # type: ignore
+# from SpiffWorkflow.camunda.serializer.task_spec_converters import UserTaskConverter # type: ignore
+# from SpiffWorkflow.dmn.serializer.task_spec_converters import BusinessRuleTaskConverter # type: ignore
from flask_bpmn.api.api_error import ApiError
from spiff_workflow_webapp.models.process_model import ProcessModelInfoSchema
from spiff_workflow_webapp.services.process_model_service import ProcessModelService
+from spiff_workflow_webapp.services.spec_file_service import SpecFileService
+from spiff_workflow_webapp.models.file import FileSchema, FileType
# from spiff_workflow_webapp.spiff_workflow_connector import parse
# from spiff_workflow_webapp.spiff_workflow_connector import run
@@ -80,6 +83,28 @@ def add_workflow_specification(body):
return ProcessModelInfoSchema().dump(spec)
+def get_file(spec_id, file_name):
+ workflow_spec_service = ProcessModelService()
+ workflow_spec = workflow_spec_service.get_spec(spec_id)
+ files = SpecFileService.get_files(workflow_spec, file_name)
+ if len(files) == 0:
+ raise ApiError(code='unknown file',
+ message=f'No information exists for file {file_name}'
+ f' it does not exist in workflow {spec_id}.', status_code=404)
+ return FileSchema().dump(files[0])
+
+
+def add_file(spec_id):
+ workflow_spec_service = ProcessModelService()
+ workflow_spec = workflow_spec_service.get_spec(spec_id)
+ file = connexion.request.files['file']
+ file = SpecFileService.add_file(workflow_spec, file.filename, file.stream.read())
+ if not workflow_spec.primary_process_id and file.type == FileType.bpmn.value:
+ SpecFileService.set_primary_bpmn(workflow_spec, file.name)
+ workflow_spec_service.update_spec(workflow_spec)
+ return FileSchema().dump(file)
+
+
# def get_workflow_specification(spec_id):
# """Get_workflow_specification."""
# spec_service = ProcessModelService()
diff --git a/src/spiff_workflow_webapp/services/spec_file_service.py b/src/spiff_workflow_webapp/services/spec_file_service.py
new file mode 100644
index 00000000..01b112fe
--- /dev/null
+++ b/src/spiff_workflow_webapp/services/spec_file_service.py
@@ -0,0 +1,160 @@
+import datetime
+import os
+import shutil
+from typing import List
+
+from flask_bpmn.api.api_error import ApiError
+from spiff_workflow_webapp.models.file import File, FileType
+
+from SpiffWorkflow.bpmn.parser.ValidationException import ValidationException
+
+from lxml import etree
+
+from spiff_workflow_webapp.models.process_model import ProcessModelInfo
+from spiff_workflow_webapp.services.file_system_service import FileSystemService
+
+
+class SpecFileService(FileSystemService):
+
+ """We store spec files on the file system. This allows us to take advantage of Git for
+ syncing and versioning.
+ The files are stored in a directory whose path is determined by the category and spec names.
+ """
+
+ @staticmethod
+ def get_files(workflow_spec: ProcessModelInfo, file_name=None, include_libraries=False) -> List[File]:
+ """ Returns all files associated with a workflow specification """
+ path = SpecFileService.workflow_path(workflow_spec)
+ files = SpecFileService._get_files(path, file_name)
+ if include_libraries:
+ for lib_name in workflow_spec.libraries:
+ lib_path = SpecFileService.library_path(lib_name)
+ files.extend(SpecFileService._get_files(lib_path, file_name))
+ return files
+
+ @staticmethod
+ def add_file(workflow_spec: ProcessModelInfo, file_name: str, binary_data: bytearray) -> File:
+ # Same as update
+ return SpecFileService.update_file(workflow_spec, file_name, binary_data)
+
+ @staticmethod
+ def update_file(workflow_spec: ProcessModelInfo, file_name: str, binary_data) -> File:
+ SpecFileService.assert_valid_file_name(file_name)
+ file_path = SpecFileService.file_path(workflow_spec, file_name)
+ SpecFileService.write_file_data_to_system(file_path, binary_data)
+ file = SpecFileService.to_file_object(file_name, file_path)
+ if file_name == workflow_spec.primary_file_name:
+ SpecFileService.set_primary_bpmn(workflow_spec, file_name, binary_data)
+ elif workflow_spec.primary_file_name is None and file.type == FileType.bpmn:
+ # If no primary process exists, make this pirmary process.
+ SpecFileService.set_primary_bpmn(workflow_spec, file_name, binary_data)
+ return file
+
+ @staticmethod
+ def get_data(workflow_spec: ProcessModelInfo, file_name: str):
+ file_path = SpecFileService.file_path(workflow_spec, file_name)
+ if not os.path.exists(file_path):
+ # If the file isn't here, it may be in a library
+ for lib in workflow_spec.libraries:
+ file_path = SpecFileService.library_path(lib)
+ file_path = os.path.join(file_path, file_name)
+ if os.path.exists(file_path):
+ break
+ if not os.path.exists(file_path):
+ raise ApiError("unknown_file", f"No file found with name {file_name} in {workflow_spec.display_name}")
+ with open(file_path, 'rb') as f_handle:
+ spec_file_data = f_handle.read()
+ return spec_file_data
+
+ @staticmethod
+ def file_path(spec: ProcessModelInfo, file_name: str):
+ return os.path.join(SpecFileService.workflow_path(spec), file_name)
+
+ @staticmethod
+ def last_modified(spec: ProcessModelInfo, file_name: str):
+ path = SpecFileService.file_path(spec, file_name)
+ return FileSystemService._last_modified(path)
+
+ @staticmethod
+ def timestamp(spec: ProcessModelInfo, file_name: str):
+ path = SpecFileService.file_path(spec, file_name)
+ return FileSystemService._timestamp(path)
+
+ @staticmethod
+ def delete_file(spec, file_name):
+ # Fixme: Remember to remove the lookup files when the spec file is removed.
+ # lookup_files = session.query(LookupFileModel).filter_by(file_model_id=file_id).all()
+ # for lf in lookup_files:
+ # session.query(LookupDataModel).filter_by(lookup_file_model_id=lf.id).delete()
+ # session.query(LookupFileModel).filter_by(id=lf.id).delete()
+ file_path = SpecFileService.file_path(spec, file_name)
+ os.remove(file_path)
+
+ @staticmethod
+ def delete_all_files(spec):
+ dir_path = SpecFileService.workflow_path(spec)
+ if os.path.exists(dir_path):
+ shutil.rmtree(dir_path)
+
+ @staticmethod
+ def set_primary_bpmn(workflow_spec: ProcessModelInfo, file_name: str, binary_data=None):
+ # If this is a BPMN, extract the process id, and determine if it is contains swim lanes.
+ extension = SpecFileService.get_extension(file_name)
+ file_type = FileType[extension]
+ if file_type == FileType.bpmn:
+ if not binary_data:
+ binary_data = SpecFileService.get_data(workflow_spec, file_name)
+ try:
+ bpmn: etree.Element = etree.fromstring(binary_data)
+ workflow_spec.primary_process_id = SpecFileService.get_process_id(bpmn)
+ workflow_spec.primary_file_name = file_name
+ workflow_spec.is_review = SpecFileService.has_swimlane(bpmn)
+
+ except etree.XMLSyntaxError as xse:
+ raise ApiError("invalid_xml", "Failed to parse xml: " + str(xse), file_name=file_name)
+ except ValidationException as ve:
+ if ve.args[0].find('No executable process tag found') >= 0:
+ raise ApiError(code='missing_executable_option',
+ message='No executable process tag found. Please make sure the Executable option is set in the workflow.')
+ else:
+ raise ApiError(code='validation_error',
+ message=f'There was an error validating your workflow. Original message is: {ve}')
+ else:
+ raise ApiError("invalid_xml", "Only a BPMN can be the primary file.", file_name=file_name)
+
+ @staticmethod
+ def has_swimlane(et_root: etree.Element):
+ """
+ Look through XML and determine if there are any lanes present that have a label.
+ """
+ elements = et_root.xpath('//bpmn:lane',
+ namespaces={'bpmn': 'http://www.omg.org/spec/BPMN/20100524/MODEL'})
+ retval = False
+ for el in elements:
+ if el.get('name'):
+ retval = True
+ return retval
+
+ @staticmethod
+ def get_process_id(et_root: etree.Element):
+ process_elements = []
+ for child in et_root:
+ if child.tag.endswith('process') and child.attrib.get('isExecutable', False):
+ process_elements.append(child)
+
+ if len(process_elements) == 0:
+ raise ValidationException('No executable process tag found')
+
+ # There are multiple root elements
+ if len(process_elements) > 1:
+
+ # Look for the element that has the startEvent in it
+ for e in process_elements:
+ this_element: etree.Element = e
+ for child_element in list(this_element):
+ if child_element.tag.endswith('startEvent'):
+ return this_element.attrib['id']
+
+ raise ValidationException('No start event found in %s' % et_root.attrib['id'])
+
+ return process_elements[0].attrib['id']
diff --git a/tests/data/random_fact/random_fact.bpmn b/tests/data/random_fact/random_fact.bpmn
new file mode 100644
index 00000000..7e92bba4
--- /dev/null
+++ b/tests/data/random_fact/random_fact.bpmn
@@ -0,0 +1,200 @@
+
+
+
+
+ SequenceFlow_0c7wlth
+
+
+ # h1 Heading 8-)
+## h2 Heading
+### h3 Heading
+#### h4 Heading
+##### h5 Heading
+###### h6 Heading
+
+
+## Horizontal Rules
+
+___
+
+---
+
+***
+
+
+## Typographic replacements
+
+"double quotes" and 'single quotes'
+
+
+## Emphasis
+
+**This is bold text**
+
+__This is bold text__
+
+*This is italic text*
+
+_This is italic text_
+
+~~Strikethrough~~
+
+
+## Blockquotes
+
+
+> Blockquotes can also be nested...
+>> ...by using additional greater-than signs right next to each other...
+> > > ...or with spaces between arrows.
+
+
+## Lists
+
+Unordered
+
++ Create a list by starting a line with `+`, `-`, or `*`
++ Sub-lists are made by indenting 2 spaces:
+ - Marker character change forces new list start:
+ * Ac tristique libero volutpat at
+ + Facilisis in pretium nisl aliquet
+ - Nulla volutpat aliquam velit
++ Very easy!
+
+Ordered
+
+1. Lorem ipsum dolor sit amet
+2. Consectetur adipiscing elit
+3. Integer molestie lorem at massa
+
+
+1. You can use sequential numbers...
+1. ...or keep all the numbers as `1.`
+
+Start numbering with offset:
+
+57. foo
+1. bar
+
+## Tables
+
+| Option | Description |
+| ------ | ----------- |
+| data | path to data files to supply the data that will be passed into templates. |
+| engine | engine to be used for processing templates. Handlebars is the default. |
+| ext | extension to be used for dest files. |
+
+Right aligned columns
+
+| Option | Description |
+| ------:| -----------:|
+| data | path to data files to supply the data that will be passed into templates. |
+| engine | engine to be used for processing templates. Handlebars is the default. |
+| ext | extension to be used for dest files. |
+
+
+## Links
+
+[link text](http://dev.nodeca.com)
+
+[link with title](http://nodeca.github.io/pica/demo/ "title text!")
+
+Autoconverted link https://github.com/nodeca/pica (enable linkify to see)
+
+
+## Images
+
+![Minion](https://octodex.github.com/images/minion.png)
+![Stormtroopocat](https://octodex.github.com/images/stormtroopocat.jpg "The Stormtroopocat")
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ SequenceFlow_0c7wlth
+ SequenceFlow_0641sh6
+
+
+
+
+
+
+
+
+ SequenceFlow_0641sh6
+ SequenceFlow_0t29gjo
+ FactService = fact_service()
+
+
+ # Great Job!
+You have completed the random fact generator.
+You chose to receive a random fact of the type: "{{type}}"
+
+Your random fact is:
+{{details}}
+ SequenceFlow_0t29gjo
+
+
+
+
+
+ User sets the Fact.type to cat, norris, or buzzword
+
+
+
+ Makes an API call to get a fact of the required type.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/tests/data/random_fact/random_fact2.bpmn b/tests/data/random_fact/random_fact2.bpmn
new file mode 100644
index 00000000..22d580f2
--- /dev/null
+++ b/tests/data/random_fact/random_fact2.bpmn
@@ -0,0 +1,200 @@
+
+
+
+
+ SequenceFlow_0c7wlth
+
+
+ # h1 Heading 8-)
+## h2 Heading
+### h3 Heading
+#### h4 Heading
+##### h5 Heading
+###### h6 Heading
+
+
+## Horizontal Rules
+
+___
+
+---
+
+***
+
+
+## Typographic replacements
+
+"double quotes" and 'single quotes'
+
+
+## Emphasis
+
+**This is bold text**
+
+__This is bold text__
+
+*This is italic text*
+
+_This is italic text_
+
+~~Strikethrough~~
+
+
+## Blockquotes
+
+
+> Blockquotes can also be nested...
+>> ...by using additional greater-than signs right next to each other...
+> > > ...or with spaces between arrows.
+
+
+## Lists
+
+Unordered
+
++ Create a list by starting a line with `+`, `-`, or `*`
++ Sub-lists are made by indenting 2 spaces:
+ - Marker character change forces new list start:
+ * Ac tristique libero volutpat at
+ + Facilisis in pretium nisl aliquet
+ - Nulla volutpat aliquam velit
++ Very easy!
+
+Ordered
+
+1. Lorem ipsum dolor sit amet
+2. Consectetur adipiscing elit
+3. Integer molestie lorem at massa
+
+
+1. You can use sequential numbers...
+1. ...or keep all the numbers as `1.`
+
+Start numbering with offset:
+
+57. foo
+1. bar
+
+## Tables
+
+| Option | Description |
+| ------ | ----------- |
+| data | path to data files to supply the data that will be passed into templates. |
+| engine | engine to be used for processing templates. Handlebars is the default. |
+| ext | extension to be used for dest files. |
+
+Right aligned columns
+
+| Option | Description |
+| ------:| -----------:|
+| data | path to data files to supply the data that will be passed into templates. |
+| engine | engine to be used for processing templates. Handlebars is the default. |
+| ext | extension to be used for dest files. |
+
+
+## Links
+
+[link text](http://dev.nodeca.com)
+
+[link with title](http://nodeca.github.io/pica/demo/ "title text!")
+
+Autoconverted link https://github.com/nodeca/pica (enable linkify to see)
+
+
+## Images
+
+![Minion](https://octodex.github.com/images/minion.png)
+![Stormtroopocat](https://octodex.github.com/images/stormtroopocat.jpg "The Stormtroopocat")
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ SequenceFlow_0c7wlth
+ SequenceFlow_0641sh6
+
+
+
+
+
+
+
+
+ SequenceFlow_0641sh6
+ SequenceFlow_0t29gjo
+ FactService = fact_service()
+
+
+ # Great Job!
+You have completed the random fact generator.
+You chose to receive a random fact of the type: "{{type}}"
+
+Your random fact is:
+{{details}}
+ SequenceFlow_0t29gjo
+
+
+
+
+
+ User sets the Fact.type to cat, norris, or buzzword
+
+
+
+ Makes an API call to get a fact of the required type.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/tests/spiff_workflow_webapp/files/bpmn_specs/call_activity_multi.bpmn b/tests/data/spiff_example/call_activity_multi.bpmn
similarity index 100%
rename from tests/spiff_workflow_webapp/files/bpmn_specs/call_activity_multi.bpmn
rename to tests/data/spiff_example/call_activity_multi.bpmn
diff --git a/tests/spiff_workflow_webapp/files/bpmn_specs/multiinstance.bpmn b/tests/data/spiff_example/multiinstance.bpmn
similarity index 100%
rename from tests/spiff_workflow_webapp/files/bpmn_specs/multiinstance.bpmn
rename to tests/data/spiff_example/multiinstance.bpmn
diff --git a/tests/spiff_workflow_webapp/files/bpmn_specs/product_prices.dmn b/tests/data/spiff_example/product_prices.dmn
similarity index 100%
rename from tests/spiff_workflow_webapp/files/bpmn_specs/product_prices.dmn
rename to tests/data/spiff_example/product_prices.dmn
diff --git a/tests/spiff_workflow_webapp/files/bpmn_specs/shipping_costs.dmn b/tests/data/spiff_example/shipping_costs.dmn
similarity index 100%
rename from tests/spiff_workflow_webapp/files/bpmn_specs/shipping_costs.dmn
rename to tests/data/spiff_example/shipping_costs.dmn
diff --git a/tests/spiff_workflow_webapp/helpers/example_data.py b/tests/spiff_workflow_webapp/helpers/example_data.py
new file mode 100644
index 00000000..a0c4766d
--- /dev/null
+++ b/tests/spiff_workflow_webapp/helpers/example_data.py
@@ -0,0 +1,65 @@
+"""example_data."""
+import glob
+import os
+
+from flask import current_app
+from spiff_workflow_webapp.models.process_model import ProcessModelInfo
+from spiff_workflow_webapp.services.spec_file_service import SpecFileService
+from spiff_workflow_webapp.services.process_model_service import ProcessModelService
+
+
+class ExampleDataLoader:
+ """ExampleDataLoader."""
+
+ def create_spec(self, id, display_name="", description="", filepath=None, master_spec=False,
+ process_group_id='', display_order=0, from_tests=False, standalone=False, library=False):
+ """Assumes that a directory exists in static/bpmn with the same name as the given id.
+
+ further assumes that the [id].bpmn is the primary file for the process model.
+ returns an array of data models to be added to the database.
+ """
+ global file
+ spec = ProcessModelInfo(id=id,
+ display_name=display_name,
+ description=description,
+ process_group_id=process_group_id,
+ display_order=display_order,
+ is_master_spec=master_spec,
+ standalone=standalone,
+ library=library,
+ primary_file_name="",
+ primary_process_id="",
+ is_review=False,
+ libraries=[])
+ workflow_spec_service = ProcessModelService()
+ workflow_spec_service.add_spec(spec)
+
+ if not filepath and not from_tests:
+ filepath = os.path.join(current_app.root_path, 'static', 'bpmn', id, "*.*")
+ if not filepath and from_tests:
+ filepath = os.path.join(current_app.root_path, '..', 'tests', 'data', id, "*.*")
+
+ files = glob.glob(filepath)
+ for file_path in files:
+ if os.path.isdir(file_path):
+ continue # Don't try to process sub directories
+
+ noise, file_extension = os.path.splitext(file_path)
+ filename = os.path.basename(file_path)
+ is_primary = filename.lower() == id + '.bpmn'
+ file = None
+ try:
+ file = open(file_path, 'rb')
+ data = file.read()
+ SpecFileService.add_file(workflow_spec=spec, file_name=filename, binary_data=data)
+ if is_primary:
+ SpecFileService.set_primary_bpmn(spec, filename, data)
+ workflow_spec_service = ProcessModelService()
+ workflow_spec_service.update_spec(spec)
+ except IsADirectoryError:
+ # Ignore sub directories
+ pass
+ finally:
+ if file:
+ file.close()
+ return spec
diff --git a/tests/spiff_workflow_webapp/helpers/test_data.py b/tests/spiff_workflow_webapp/helpers/test_data.py
index d3beb0c4..77223046 100644
--- a/tests/spiff_workflow_webapp/helpers/test_data.py
+++ b/tests/spiff_workflow_webapp/helpers/test_data.py
@@ -1,27 +1,59 @@
"""User."""
-from typing import Any
-
-from flask_bpmn.models.db import db
-
-from spiff_workflow_webapp.models.process_group import ProcessGroupModel
-from spiff_workflow_webapp.models.user import UserModel
+import os
-def find_or_create_user(username: str = "test_user1") -> Any:
- user = UserModel.query.filter_by(username=username).first()
- if user is None:
- user = UserModel(username=username)
- db.session.add(user)
- db.session.commit()
+from spiff_workflow_webapp.models.process_group import ProcessGroup
+from spiff_workflow_webapp.services.process_model_service import ProcessModelService
- return user
+from tests.spiff_workflow_webapp.helpers.example_data import ExampleDataLoader
-def find_or_create_process_group(name: str = "test_group1") -> Any:
- process_group = ProcessGroupModel.query.filter_by(name=name).first()
+# def find_or_create_user(username: str = "test_user1") -> Any:
+# user = UserModel.query.filter_by(username=username).first()
+# if user is None:
+# user = UserModel(username=username)
+# db.session.add(user)
+# db.session.commit()
+#
+# return user
+#
+#
+# def find_or_create_process_group(name: str = "test_group1") -> Any:
+# process_group = ProcessGroupModel.query.filter_by(name=name).first()
+# if process_group is None:
+# process_group = ProcessGroupModel(name=name)
+# db.session.add(process_group)
+# db.session.commit()
+#
+# return process_group
+
+
+def assure_process_group_exists(process_group_id=None):
+ """Assure_process_group_exists."""
+ process_group = None
+ workflow_spec_service = ProcessModelService()
+ if process_group_id is not None:
+ process_group = workflow_spec_service.get_process_group(process_group_id)
if process_group is None:
- process_group = ProcessGroupModel(name=name)
- db.session.add(process_group)
- db.session.commit()
-
+ process_group = ProcessGroup(id="test_process_group", display_name="Test Workflows", admin=False, display_order=0)
+ workflow_spec_service.add_process_group(process_group)
return process_group
+
+
+def load_test_spec(app, dir_name, display_name=None, master_spec=False, process_group_id=None, library=False):
+ """Loads a spec into the database based on a directory in /tests/data."""
+ process_group = None
+ workflow_spec_service = ProcessModelService()
+ if not master_spec and not library:
+ process_group = assure_process_group_exists(process_group_id)
+ process_group_id = process_group.id
+ workflow_spec = workflow_spec_service.get_spec(dir_name)
+ if workflow_spec:
+ return workflow_spec
+ else:
+ filepath = os.path.join(app.root_path, '..', 'tests', 'data', dir_name, "*")
+ if display_name is None:
+ display_name = dir_name
+ spec = ExampleDataLoader().create_spec(id=dir_name, filepath=filepath, master_spec=master_spec,
+ display_name=display_name, process_group_id=process_group_id, library=library)
+ return spec
diff --git a/tests/spiff_workflow_webapp/integration/test_process_api.py b/tests/spiff_workflow_webapp/integration/test_process_api.py
index f95493cf..46a00414 100644
--- a/tests/spiff_workflow_webapp/integration/test_process_api.py
+++ b/tests/spiff_workflow_webapp/integration/test_process_api.py
@@ -2,35 +2,37 @@
import json
import pytest
import os
+import io
import shutil
-from typing import Union
from flask.testing import FlaskClient
-from flask_bpmn.models.db import db
-from spiff_workflow_webapp.models.process_instance import ProcessInstanceModel
from spiff_workflow_webapp.models.process_model import ProcessModelInfoSchema, ProcessModelInfo
from spiff_workflow_webapp.models.process_group import ProcessGroup
from spiff_workflow_webapp.services.process_model_service import ProcessModelService
+from spiff_workflow_webapp.models.file import FileType
+
+from tests.spiff_workflow_webapp.helpers.test_data import load_test_spec
@pytest.fixture()
-def process_group_resource():
+def with_bpmn_file_cleanup():
+ """Process_group_resource."""
print("setup")
process_model_service = ProcessModelService()
- bpmn_root_path_test_cat = os.path.join(process_model_service.root_path(), "test_cat")
- if os.path.exists(bpmn_root_path_test_cat):
- shutil.rmtree(bpmn_root_path_test_cat)
+ if os.path.exists(process_model_service.root_path()):
+ shutil.rmtree(process_model_service.root_path())
yield "resource"
print("teardown")
- if os.path.exists(bpmn_root_path_test_cat):
- shutil.rmtree(bpmn_root_path_test_cat)
+ if os.path.exists(process_model_service.root_path()):
+ shutil.rmtree(process_model_service.root_path())
-def test_add_new_process_modelification(client: FlaskClient, process_group_resource):
+def test_add_new_process_modelification(client: FlaskClient, with_bpmn_file_cleanup):
+ """Test_add_new_process_modelification."""
process_model_service = ProcessModelService()
assert(0 == len(process_model_service.get_specs()))
assert(0 == len(process_model_service.get_process_groups()))
@@ -41,9 +43,9 @@ def test_add_new_process_modelification(client: FlaskClient, process_group_resou
standalone=False, is_review=False, is_master_spec=False, libraries=[], library=False,
primary_process_id='', primary_file_name='')
rv = client.post('/v1.0/workflow-specification',
- # headers=logged_in_headers(),
- content_type="application/json",
- data=json.dumps(ProcessModelInfoSchema().dump(spec)))
+ # headers=logged_in_headers(),
+ content_type="application/json",
+ data=json.dumps(ProcessModelInfoSchema().dump(spec)))
assert rv.status_code == 200
fs_spec = process_model_service.get_spec('make_cookies')
@@ -54,7 +56,7 @@ def test_add_new_process_modelification(client: FlaskClient, process_group_resou
# def test_get_process_modelification(self):
#
# load_test_spec('random_fact')
-# rv = app.get('/v1.0/workflow-specification/random_fact', headers=logged_in_headers())
+# rv = client.get('/v1.0/workflow-specification/random_fact', headers=logged_in_headers())
# assert_success(rv)
# json_data = json.loads(rv.get_data(as_text=True))
# api_spec = WorkflowSpecInfoSchema().load(json_data)
@@ -62,3 +64,22 @@ def test_add_new_process_modelification(client: FlaskClient, process_group_resou
# fs_spec = process_model_service.get_spec('random_fact')
# assert(WorkflowSpecInfoSchema().dump(fs_spec) == json_data)
#
+
+
+def test_create_spec_file(app, client: FlaskClient, with_bpmn_file_cleanup):
+ """Test_create_spec_file."""
+ spec = load_test_spec(app, 'random_fact')
+ data = {'file': (io.BytesIO(b"abcdef"), 'random_fact.svg')}
+ rv = client.post('/v1.0/workflow-specification/%s/file' % spec.id, data=data, follow_redirects=True,
+ content_type='multipart/form-data')
+
+ assert rv.status_code == 200
+ assert(rv.get_data() is not None)
+ file = json.loads(rv.get_data(as_text=True))
+ assert(FileType.svg.value == file['type'])
+ assert("image/svg+xml" == file['content_type'])
+
+ rv = client.get(f'/v1.0/workflow-specification/{spec.id}/file/random_fact.svg')
+ assert rv.status_code == 200
+ file2 = json.loads(rv.get_data(as_text=True))
+ assert(file == file2)