2020-05-20 04:12:48 +00:00
|
|
|
import hashlib
|
2020-04-08 17:28:43 +00:00
|
|
|
import json
|
2020-02-10 21:19:23 +00:00
|
|
|
import os
|
|
|
|
from datetime import datetime
|
2020-08-14 17:04:22 +00:00
|
|
|
from github import Github, GithubObject, UnknownObjectException
|
2020-03-04 18:40:25 +00:00
|
|
|
from uuid import UUID
|
2020-06-25 18:02:16 +00:00
|
|
|
from lxml import etree
|
2020-02-10 21:19:23 +00:00
|
|
|
|
2020-05-29 00:03:50 +00:00
|
|
|
from SpiffWorkflow.bpmn.parser.ValidationException import ValidationException
|
2020-03-19 21:13:30 +00:00
|
|
|
from pandas import ExcelFile
|
2020-05-29 00:03:50 +00:00
|
|
|
from sqlalchemy import desc
|
2020-05-30 19:37:04 +00:00
|
|
|
from sqlalchemy.exc import IntegrityError
|
2020-03-19 21:13:30 +00:00
|
|
|
|
2020-05-30 19:37:04 +00:00
|
|
|
from crc import session, app
|
2020-03-13 19:03:57 +00:00
|
|
|
from crc.api.common import ApiError
|
2020-04-24 10:58:24 +00:00
|
|
|
from crc.models.file import FileType, FileDataModel, FileModel, LookupFileModel, LookupDataModel
|
2020-05-29 00:03:50 +00:00
|
|
|
from crc.models.workflow import WorkflowSpecModel, WorkflowModel, WorkflowSpecDependencyFile
|
2020-02-10 21:19:23 +00:00
|
|
|
|
|
|
|
|
|
|
|
class FileService(object):
|
|
|
|
"""Provides consistent management and rules for storing, retrieving and processing files."""
|
2020-05-07 17:57:24 +00:00
|
|
|
DOCUMENT_LIST = "irb_documents.xlsx"
|
|
|
|
INVESTIGATOR_LIST = "investigators.xlsx"
|
2020-02-10 21:19:23 +00:00
|
|
|
|
2020-06-01 01:15:40 +00:00
|
|
|
__doc_dictionary = None
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def get_doc_dictionary():
|
|
|
|
if not FileService.__doc_dictionary:
|
|
|
|
FileService.__doc_dictionary = FileService.get_reference_data(FileService.DOCUMENT_LIST, 'code', ['id'])
|
|
|
|
return FileService.__doc_dictionary
|
|
|
|
|
2020-02-10 21:19:23 +00:00
|
|
|
@staticmethod
|
2020-03-04 18:40:25 +00:00
|
|
|
def add_workflow_spec_file(workflow_spec: WorkflowSpecModel,
|
2020-03-13 18:56:46 +00:00
|
|
|
name, content_type, binary_data, primary=False, is_status=False):
|
2020-02-10 21:19:23 +00:00
|
|
|
"""Create a new file and associate it with a workflow spec."""
|
|
|
|
file_model = FileModel(
|
2020-03-04 18:40:25 +00:00
|
|
|
workflow_spec_id=workflow_spec.id,
|
2020-02-10 21:19:23 +00:00
|
|
|
name=name,
|
2020-03-13 18:56:46 +00:00
|
|
|
primary=primary,
|
2020-04-17 17:30:32 +00:00
|
|
|
is_status=is_status,
|
2020-02-10 21:19:23 +00:00
|
|
|
)
|
2020-03-04 18:40:25 +00:00
|
|
|
|
2020-02-10 21:19:23 +00:00
|
|
|
return FileService.update_file(file_model, binary_data, content_type)
|
|
|
|
|
2020-05-07 17:57:24 +00:00
|
|
|
@staticmethod
|
|
|
|
def is_allowed_document(code):
|
2020-06-04 18:59:36 +00:00
|
|
|
doc_dict = FileService.get_doc_dictionary()
|
|
|
|
return code in doc_dict
|
2020-04-17 17:30:32 +00:00
|
|
|
|
2020-02-11 20:03:25 +00:00
|
|
|
@staticmethod
|
A major refactor of how we search and store files, as there was a lot of confusing bits in here.
From an API point of view you can do the following (and only the following)
/files?workflow_spec_id=x
* You can find all files associated with a workflow_spec_id, and add a file with a workflow_spec_id
/files?workflow_id=x
* You can find all files associated with a workflow_id, and add a file that is directly associated with the workflow
/files?workflow_id=x&form_field_key=y
* You can find all files associated with a form element on a running workflow, and add a new file.
Note: you can add multiple files to the same form_field_key, IF they have different file names. If the same name, the original file is archived,
and the new file takes its place.
The study endpoints always return a list of the file metadata associated with the study. Removed /studies-files, but there is an
endpoint called
/studies/all - that returns all the studies in the system, and does include their files.
On a deeper level:
The File model no longer contains:
- study_id,
- task_id,
- form_field_key
Instead, if the file is associated with workflow - then that is the one way it is connected to the study, and we use this relationship to find files for a study.
A file is never associated with a task_id, as these change when the workflow is reloaded.
The form_field_key must match the irb_doc_code, so when requesting files for a form field, we just look up the irb_doc_code.
2020-05-28 12:27:26 +00:00
|
|
|
def add_workflow_file(workflow_id, irb_doc_code, name, content_type, binary_data):
|
|
|
|
"""Create a new file and associate it with the workflow
|
|
|
|
Please note that the irb_doc_code MUST be a known file in the irb_documents.xslx reference document."""
|
|
|
|
if not FileService.is_allowed_document(irb_doc_code):
|
2020-03-19 21:13:30 +00:00
|
|
|
raise ApiError("invalid_form_field_key",
|
|
|
|
"When uploading files, the form field id must match a known document in the "
|
A major refactor of how we search and store files, as there was a lot of confusing bits in here.
From an API point of view you can do the following (and only the following)
/files?workflow_spec_id=x
* You can find all files associated with a workflow_spec_id, and add a file with a workflow_spec_id
/files?workflow_id=x
* You can find all files associated with a workflow_id, and add a file that is directly associated with the workflow
/files?workflow_id=x&form_field_key=y
* You can find all files associated with a form element on a running workflow, and add a new file.
Note: you can add multiple files to the same form_field_key, IF they have different file names. If the same name, the original file is archived,
and the new file takes its place.
The study endpoints always return a list of the file metadata associated with the study. Removed /studies-files, but there is an
endpoint called
/studies/all - that returns all the studies in the system, and does include their files.
On a deeper level:
The File model no longer contains:
- study_id,
- task_id,
- form_field_key
Instead, if the file is associated with workflow - then that is the one way it is connected to the study, and we use this relationship to find files for a study.
A file is never associated with a task_id, as these change when the workflow is reloaded.
The form_field_key must match the irb_doc_code, so when requesting files for a form field, we just look up the irb_doc_code.
2020-05-28 12:27:26 +00:00
|
|
|
"irb_docunents.xslx reference file. This code is not found in that file '%s'" % irb_doc_code)
|
2020-03-19 21:13:30 +00:00
|
|
|
|
2020-05-23 19:08:17 +00:00
|
|
|
"""Assure this is unique to the workflow, task, and document code AND the Name
|
2020-06-10 04:57:56 +00:00
|
|
|
Because we will allow users to upload multiple files for the same form field
|
2020-05-23 19:08:17 +00:00
|
|
|
in some cases """
|
|
|
|
file_model = session.query(FileModel)\
|
|
|
|
.filter(FileModel.workflow_id == workflow_id)\
|
|
|
|
.filter(FileModel.name == name)\
|
A major refactor of how we search and store files, as there was a lot of confusing bits in here.
From an API point of view you can do the following (and only the following)
/files?workflow_spec_id=x
* You can find all files associated with a workflow_spec_id, and add a file with a workflow_spec_id
/files?workflow_id=x
* You can find all files associated with a workflow_id, and add a file that is directly associated with the workflow
/files?workflow_id=x&form_field_key=y
* You can find all files associated with a form element on a running workflow, and add a new file.
Note: you can add multiple files to the same form_field_key, IF they have different file names. If the same name, the original file is archived,
and the new file takes its place.
The study endpoints always return a list of the file metadata associated with the study. Removed /studies-files, but there is an
endpoint called
/studies/all - that returns all the studies in the system, and does include their files.
On a deeper level:
The File model no longer contains:
- study_id,
- task_id,
- form_field_key
Instead, if the file is associated with workflow - then that is the one way it is connected to the study, and we use this relationship to find files for a study.
A file is never associated with a task_id, as these change when the workflow is reloaded.
The form_field_key must match the irb_doc_code, so when requesting files for a form field, we just look up the irb_doc_code.
2020-05-28 12:27:26 +00:00
|
|
|
.filter(FileModel.irb_doc_code == irb_doc_code).first()
|
2020-05-23 19:08:17 +00:00
|
|
|
|
|
|
|
if not file_model:
|
|
|
|
file_model = FileModel(
|
|
|
|
workflow_id=workflow_id,
|
|
|
|
name=name,
|
A major refactor of how we search and store files, as there was a lot of confusing bits in here.
From an API point of view you can do the following (and only the following)
/files?workflow_spec_id=x
* You can find all files associated with a workflow_spec_id, and add a file with a workflow_spec_id
/files?workflow_id=x
* You can find all files associated with a workflow_id, and add a file that is directly associated with the workflow
/files?workflow_id=x&form_field_key=y
* You can find all files associated with a form element on a running workflow, and add a new file.
Note: you can add multiple files to the same form_field_key, IF they have different file names. If the same name, the original file is archived,
and the new file takes its place.
The study endpoints always return a list of the file metadata associated with the study. Removed /studies-files, but there is an
endpoint called
/studies/all - that returns all the studies in the system, and does include their files.
On a deeper level:
The File model no longer contains:
- study_id,
- task_id,
- form_field_key
Instead, if the file is associated with workflow - then that is the one way it is connected to the study, and we use this relationship to find files for a study.
A file is never associated with a task_id, as these change when the workflow is reloaded.
The form_field_key must match the irb_doc_code, so when requesting files for a form field, we just look up the irb_doc_code.
2020-05-28 12:27:26 +00:00
|
|
|
irb_doc_code=irb_doc_code
|
2020-05-23 19:08:17 +00:00
|
|
|
)
|
2020-02-11 20:03:25 +00:00
|
|
|
return FileService.update_file(file_model, binary_data, content_type)
|
|
|
|
|
2020-02-10 21:19:23 +00:00
|
|
|
@staticmethod
|
2020-05-07 17:57:24 +00:00
|
|
|
def get_reference_data(reference_file_name, index_column, int_columns=[]):
|
|
|
|
""" Opens a reference file (assumes that it is xls file) and returns the data as a
|
|
|
|
dictionary, each row keyed on the given index_column name. If there are columns
|
|
|
|
that should be represented as integers, pass these as an array of int_columns, lest
|
2020-07-28 14:16:48 +00:00
|
|
|
you get '1.0' rather than '1'
|
|
|
|
fixme: This is stupid stupid slow. Place it in the database and just check if it is up to date."""
|
2020-05-07 17:57:24 +00:00
|
|
|
data_model = FileService.get_reference_file_data(reference_file_name)
|
2020-12-14 15:30:10 +00:00
|
|
|
xls = ExcelFile(data_model.data, engine='openpyxl')
|
2020-03-19 21:13:30 +00:00
|
|
|
df = xls.parse(xls.sheet_names[0])
|
2020-05-07 17:57:24 +00:00
|
|
|
for c in int_columns:
|
|
|
|
df[c] = df[c].fillna(0)
|
|
|
|
df = df.astype({c: 'Int64'})
|
2020-04-08 17:28:43 +00:00
|
|
|
df = df.fillna('')
|
|
|
|
df = df.applymap(str)
|
2020-05-07 17:57:24 +00:00
|
|
|
df = df.set_index(index_column)
|
2020-04-08 17:28:43 +00:00
|
|
|
return json.loads(df.to_json(orient='index'))
|
2020-03-19 21:13:30 +00:00
|
|
|
|
2020-05-23 19:08:17 +00:00
|
|
|
@staticmethod
|
|
|
|
def get_workflow_files(workflow_id):
|
|
|
|
"""Returns all the file models associated with a running workflow."""
|
|
|
|
return session.query(FileModel).filter(FileModel.workflow_id == workflow_id).\
|
2020-06-04 13:49:42 +00:00
|
|
|
filter(FileModel.archived == False).\
|
2020-05-23 19:08:17 +00:00
|
|
|
order_by(FileModel.id).all()
|
|
|
|
|
2020-03-13 19:03:57 +00:00
|
|
|
@staticmethod
|
|
|
|
def add_reference_file(name, content_type, binary_data):
|
|
|
|
"""Create a file with the given name, but not associated with a spec or workflow.
|
|
|
|
Only one file with the given reference name can exist."""
|
2020-03-19 14:40:07 +00:00
|
|
|
file_model = session.query(FileModel). \
|
|
|
|
filter(FileModel.is_reference == True). \
|
|
|
|
filter(FileModel.name == name).first()
|
|
|
|
if not file_model:
|
|
|
|
file_model = FileModel(
|
|
|
|
name=name,
|
|
|
|
is_reference=True
|
|
|
|
)
|
2020-03-13 19:03:57 +00:00
|
|
|
return FileService.update_file(file_model, binary_data, content_type)
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def get_extension(file_name):
|
|
|
|
basename, file_extension = os.path.splitext(file_name)
|
|
|
|
return file_extension.lower().strip()[1:]
|
|
|
|
|
2020-02-10 21:19:23 +00:00
|
|
|
@staticmethod
|
|
|
|
def update_file(file_model, binary_data, content_type):
|
2020-05-20 14:02:30 +00:00
|
|
|
session.flush() # Assure the database is up-to-date before running this.
|
2020-02-10 21:19:23 +00:00
|
|
|
|
2020-05-29 00:03:50 +00:00
|
|
|
latest_data_model = session.query(FileDataModel). \
|
|
|
|
filter(FileDataModel.file_model_id == file_model.id).\
|
|
|
|
order_by(desc(FileDataModel.date_created)).first()
|
|
|
|
|
2020-03-04 18:40:25 +00:00
|
|
|
md5_checksum = UUID(hashlib.md5(binary_data).hexdigest())
|
2020-05-29 00:03:50 +00:00
|
|
|
if (latest_data_model is not None) and (md5_checksum == latest_data_model.md5_hash):
|
2020-06-04 14:09:36 +00:00
|
|
|
# This file does not need to be updated, it's the same file. If it is arhived,
|
|
|
|
# then de-arvhive it.
|
|
|
|
file_model.archived = False
|
|
|
|
session.add(file_model)
|
|
|
|
session.commit()
|
2020-03-04 18:40:25 +00:00
|
|
|
return file_model
|
2020-02-10 21:19:23 +00:00
|
|
|
|
|
|
|
# Verify the extension
|
2020-03-13 19:03:57 +00:00
|
|
|
file_extension = FileService.get_extension(file_model.name)
|
2020-02-10 21:19:23 +00:00
|
|
|
if file_extension not in FileType._member_names_:
|
2020-03-13 19:03:57 +00:00
|
|
|
raise ApiError('unknown_extension',
|
|
|
|
'The file you provided does not have an accepted extension:' +
|
|
|
|
file_extension, status_code=404)
|
2020-02-10 21:19:23 +00:00
|
|
|
else:
|
|
|
|
file_model.type = FileType[file_extension]
|
2020-03-04 18:40:25 +00:00
|
|
|
file_model.content_type = content_type
|
2020-06-04 13:49:42 +00:00
|
|
|
file_model.archived = False # Unarchive the file if it is archived.
|
2020-02-10 21:19:23 +00:00
|
|
|
|
2020-05-29 00:03:50 +00:00
|
|
|
if latest_data_model is None:
|
2020-03-04 18:40:25 +00:00
|
|
|
version = 1
|
2020-02-10 21:19:23 +00:00
|
|
|
else:
|
2020-05-29 00:03:50 +00:00
|
|
|
version = latest_data_model.version + 1
|
2020-03-04 18:40:25 +00:00
|
|
|
|
2020-04-17 17:30:32 +00:00
|
|
|
# If this is a BPMN, extract the process id.
|
|
|
|
if file_model.type == FileType.bpmn:
|
2020-06-25 18:02:16 +00:00
|
|
|
bpmn: etree.Element = etree.fromstring(binary_data)
|
2020-05-29 00:03:50 +00:00
|
|
|
file_model.primary_process_id = FileService.get_process_id(bpmn)
|
2020-04-17 17:30:32 +00:00
|
|
|
|
2020-05-20 04:10:32 +00:00
|
|
|
new_file_data_model = FileDataModel(
|
|
|
|
data=binary_data, file_model_id=file_model.id, file_model=file_model,
|
2020-05-29 00:03:50 +00:00
|
|
|
version=version, md5_hash=md5_checksum, date_created=datetime.now()
|
2020-05-20 04:10:32 +00:00
|
|
|
)
|
|
|
|
session.add_all([file_model, new_file_data_model])
|
2020-02-10 21:19:23 +00:00
|
|
|
session.commit()
|
|
|
|
session.flush() # Assure the id is set on the model before returning it.
|
2020-05-20 04:10:32 +00:00
|
|
|
|
2020-02-10 21:19:23 +00:00
|
|
|
return file_model
|
|
|
|
|
2020-05-29 00:03:50 +00:00
|
|
|
@staticmethod
|
2020-06-25 18:02:16 +00:00
|
|
|
def get_process_id(et_root: etree.Element):
|
2020-05-29 00:03:50 +00:00
|
|
|
process_elements = []
|
|
|
|
for child in et_root:
|
|
|
|
if child.tag.endswith('process') and child.attrib.get('isExecutable', False):
|
|
|
|
process_elements.append(child)
|
|
|
|
|
|
|
|
if len(process_elements) == 0:
|
|
|
|
raise ValidationException('No executable process tag found')
|
|
|
|
|
|
|
|
# There are multiple root elements
|
|
|
|
if len(process_elements) > 1:
|
|
|
|
|
|
|
|
# Look for the element that has the startEvent in it
|
|
|
|
for e in process_elements:
|
2020-06-25 18:02:16 +00:00
|
|
|
this_element: etree.Element = e
|
2020-05-29 00:03:50 +00:00
|
|
|
for child_element in list(this_element):
|
|
|
|
if child_element.tag.endswith('startEvent'):
|
|
|
|
return this_element.attrib['id']
|
|
|
|
|
|
|
|
raise ValidationException('No start event found in %s' % et_root.attrib['id'])
|
|
|
|
|
|
|
|
return process_elements[0].attrib['id']
|
|
|
|
|
2020-02-10 21:19:23 +00:00
|
|
|
@staticmethod
|
A major refactor of how we search and store files, as there was a lot of confusing bits in here.
From an API point of view you can do the following (and only the following)
/files?workflow_spec_id=x
* You can find all files associated with a workflow_spec_id, and add a file with a workflow_spec_id
/files?workflow_id=x
* You can find all files associated with a workflow_id, and add a file that is directly associated with the workflow
/files?workflow_id=x&form_field_key=y
* You can find all files associated with a form element on a running workflow, and add a new file.
Note: you can add multiple files to the same form_field_key, IF they have different file names. If the same name, the original file is archived,
and the new file takes its place.
The study endpoints always return a list of the file metadata associated with the study. Removed /studies-files, but there is an
endpoint called
/studies/all - that returns all the studies in the system, and does include their files.
On a deeper level:
The File model no longer contains:
- study_id,
- task_id,
- form_field_key
Instead, if the file is associated with workflow - then that is the one way it is connected to the study, and we use this relationship to find files for a study.
A file is never associated with a task_id, as these change when the workflow is reloaded.
The form_field_key must match the irb_doc_code, so when requesting files for a form field, we just look up the irb_doc_code.
2020-05-28 12:27:26 +00:00
|
|
|
def get_files_for_study(study_id, irb_doc_code=None):
|
|
|
|
query = session.query(FileModel).\
|
|
|
|
join(WorkflowModel).\
|
2020-06-04 13:49:42 +00:00
|
|
|
filter(WorkflowModel.study_id == study_id).\
|
|
|
|
filter(FileModel.archived == False)
|
A major refactor of how we search and store files, as there was a lot of confusing bits in here.
From an API point of view you can do the following (and only the following)
/files?workflow_spec_id=x
* You can find all files associated with a workflow_spec_id, and add a file with a workflow_spec_id
/files?workflow_id=x
* You can find all files associated with a workflow_id, and add a file that is directly associated with the workflow
/files?workflow_id=x&form_field_key=y
* You can find all files associated with a form element on a running workflow, and add a new file.
Note: you can add multiple files to the same form_field_key, IF they have different file names. If the same name, the original file is archived,
and the new file takes its place.
The study endpoints always return a list of the file metadata associated with the study. Removed /studies-files, but there is an
endpoint called
/studies/all - that returns all the studies in the system, and does include their files.
On a deeper level:
The File model no longer contains:
- study_id,
- task_id,
- form_field_key
Instead, if the file is associated with workflow - then that is the one way it is connected to the study, and we use this relationship to find files for a study.
A file is never associated with a task_id, as these change when the workflow is reloaded.
The form_field_key must match the irb_doc_code, so when requesting files for a form field, we just look up the irb_doc_code.
2020-05-28 12:27:26 +00:00
|
|
|
if irb_doc_code:
|
|
|
|
query = query.filter(FileModel.irb_doc_code == irb_doc_code)
|
|
|
|
return query.all()
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def get_files(workflow_spec_id=None, workflow_id=None,
|
2020-06-04 13:49:42 +00:00
|
|
|
name=None, is_reference=False, irb_doc_code=None):
|
2020-03-13 19:03:57 +00:00
|
|
|
query = session.query(FileModel).filter_by(is_reference=is_reference)
|
2020-02-10 21:27:57 +00:00
|
|
|
if workflow_spec_id:
|
|
|
|
query = query.filter_by(workflow_spec_id=workflow_spec_id)
|
A major refactor of how we search and store files, as there was a lot of confusing bits in here.
From an API point of view you can do the following (and only the following)
/files?workflow_spec_id=x
* You can find all files associated with a workflow_spec_id, and add a file with a workflow_spec_id
/files?workflow_id=x
* You can find all files associated with a workflow_id, and add a file that is directly associated with the workflow
/files?workflow_id=x&form_field_key=y
* You can find all files associated with a form element on a running workflow, and add a new file.
Note: you can add multiple files to the same form_field_key, IF they have different file names. If the same name, the original file is archived,
and the new file takes its place.
The study endpoints always return a list of the file metadata associated with the study. Removed /studies-files, but there is an
endpoint called
/studies/all - that returns all the studies in the system, and does include their files.
On a deeper level:
The File model no longer contains:
- study_id,
- task_id,
- form_field_key
Instead, if the file is associated with workflow - then that is the one way it is connected to the study, and we use this relationship to find files for a study.
A file is never associated with a task_id, as these change when the workflow is reloaded.
The form_field_key must match the irb_doc_code, so when requesting files for a form field, we just look up the irb_doc_code.
2020-05-28 12:27:26 +00:00
|
|
|
elif workflow_id:
|
|
|
|
query = query.filter_by(workflow_id=workflow_id)
|
2020-05-20 01:51:54 +00:00
|
|
|
if irb_doc_code:
|
|
|
|
query = query.filter_by(irb_doc_code=irb_doc_code)
|
A major refactor of how we search and store files, as there was a lot of confusing bits in here.
From an API point of view you can do the following (and only the following)
/files?workflow_spec_id=x
* You can find all files associated with a workflow_spec_id, and add a file with a workflow_spec_id
/files?workflow_id=x
* You can find all files associated with a workflow_id, and add a file that is directly associated with the workflow
/files?workflow_id=x&form_field_key=y
* You can find all files associated with a form element on a running workflow, and add a new file.
Note: you can add multiple files to the same form_field_key, IF they have different file names. If the same name, the original file is archived,
and the new file takes its place.
The study endpoints always return a list of the file metadata associated with the study. Removed /studies-files, but there is an
endpoint called
/studies/all - that returns all the studies in the system, and does include their files.
On a deeper level:
The File model no longer contains:
- study_id,
- task_id,
- form_field_key
Instead, if the file is associated with workflow - then that is the one way it is connected to the study, and we use this relationship to find files for a study.
A file is never associated with a task_id, as these change when the workflow is reloaded.
The form_field_key must match the irb_doc_code, so when requesting files for a form field, we just look up the irb_doc_code.
2020-05-28 12:27:26 +00:00
|
|
|
elif is_reference:
|
|
|
|
query = query.filter_by(is_reference=True)
|
|
|
|
|
|
|
|
if name:
|
|
|
|
query = query.filter_by(name=name)
|
2020-06-03 21:34:27 +00:00
|
|
|
|
2020-06-04 13:49:42 +00:00
|
|
|
query = query.filter(FileModel.archived == False)
|
2020-06-03 21:34:27 +00:00
|
|
|
|
2020-05-29 00:03:50 +00:00
|
|
|
query = query.order_by(FileModel.id)
|
2020-02-10 21:27:57 +00:00
|
|
|
|
|
|
|
results = query.all()
|
2020-02-10 21:19:23 +00:00
|
|
|
return results
|
|
|
|
|
|
|
|
@staticmethod
|
2020-05-29 05:39:39 +00:00
|
|
|
def get_spec_data_files(workflow_spec_id, workflow_id=None, name=None):
|
2020-05-29 00:03:50 +00:00
|
|
|
"""Returns all the FileDataModels related to a workflow specification.
|
|
|
|
If a workflow is specified, returns the version of the spec relatted
|
2020-10-09 12:46:14 +00:00
|
|
|
to that workflow, otherwise, returns the lastest files."""
|
2020-05-29 00:03:50 +00:00
|
|
|
if workflow_id:
|
2020-05-29 05:39:39 +00:00
|
|
|
query = session.query(FileDataModel) \
|
|
|
|
.join(WorkflowSpecDependencyFile) \
|
|
|
|
.filter(WorkflowSpecDependencyFile.workflow_id == workflow_id) \
|
|
|
|
.order_by(FileDataModel.id)
|
|
|
|
if name:
|
|
|
|
query = query.join(FileModel).filter(FileModel.name == name)
|
|
|
|
return query.all()
|
2020-05-29 00:03:50 +00:00
|
|
|
else:
|
|
|
|
"""Returns all the latest files related to a workflow specification"""
|
|
|
|
file_models = FileService.get_files(workflow_spec_id=workflow_spec_id)
|
|
|
|
latest_data_files = []
|
|
|
|
for file_model in file_models:
|
2020-05-29 05:39:39 +00:00
|
|
|
if name and file_model.name == name:
|
|
|
|
latest_data_files.append(FileService.get_file_data(file_model.id))
|
|
|
|
elif not name:
|
|
|
|
latest_data_files.append(FileService.get_file_data(file_model.id))
|
2020-05-29 00:03:50 +00:00
|
|
|
return latest_data_files
|
2020-05-23 19:21:30 +00:00
|
|
|
|
2020-05-29 00:03:50 +00:00
|
|
|
@staticmethod
|
2020-06-04 13:49:42 +00:00
|
|
|
def get_workflow_data_files(workflow_id=None):
|
2020-05-29 00:03:50 +00:00
|
|
|
"""Returns all the FileDataModels related to a running workflow -
|
|
|
|
So these are the latest data files that were uploaded or generated
|
|
|
|
that go along with this workflow. Not related to the spec in any way"""
|
2020-06-04 13:49:42 +00:00
|
|
|
file_models = FileService.get_files(workflow_id=workflow_id)
|
2020-05-29 00:03:50 +00:00
|
|
|
latest_data_files = []
|
|
|
|
for file_model in file_models:
|
|
|
|
latest_data_files.append(FileService.get_file_data(file_model.id))
|
|
|
|
return latest_data_files
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def get_file_data(file_id: int, version: int = None):
|
|
|
|
"""Returns the file data with the given version, or the lastest file, if version isn't provided."""
|
|
|
|
query = session.query(FileDataModel) \
|
|
|
|
.filter(FileDataModel.file_model_id == file_id)
|
|
|
|
if version:
|
|
|
|
query = query.filter(FileDataModel.version == version)
|
|
|
|
else:
|
|
|
|
query = query.order_by(desc(FileDataModel.date_created))
|
|
|
|
return query.first()
|
2020-03-13 19:03:57 +00:00
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def get_reference_file_data(file_name):
|
|
|
|
file_model = session.query(FileModel). \
|
|
|
|
filter(FileModel.is_reference == True). \
|
|
|
|
filter(FileModel.name == file_name).first()
|
|
|
|
if not file_model:
|
|
|
|
raise ApiError("file_not_found", "There is no reference file with the name '%s'" % file_name)
|
2020-05-29 00:03:50 +00:00
|
|
|
return FileService.get_file_data(file_model.id)
|
2020-04-15 15:13:32 +00:00
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def get_workflow_file_data(workflow, file_name):
|
2020-06-04 13:49:42 +00:00
|
|
|
"""This method should be deleted, find where it is used, and remove this method.
|
|
|
|
Given a SPIFF Workflow Model, tracks down a file with the given name in the database and returns its data"""
|
2020-04-19 19:14:10 +00:00
|
|
|
workflow_spec_model = FileService.find_spec_model_in_db(workflow)
|
2020-04-15 15:13:32 +00:00
|
|
|
|
|
|
|
if workflow_spec_model is None:
|
2020-06-03 19:03:22 +00:00
|
|
|
raise ApiError(code="unknown_workflow",
|
2020-04-15 15:13:32 +00:00
|
|
|
message="Something is wrong. I can't find the workflow you are using.")
|
|
|
|
|
|
|
|
file_data_model = session.query(FileDataModel) \
|
|
|
|
.join(FileModel) \
|
|
|
|
.filter(FileModel.name == file_name) \
|
|
|
|
.filter(FileModel.workflow_spec_id == workflow_spec_model.id).first()
|
|
|
|
|
|
|
|
if file_data_model is None:
|
|
|
|
raise ApiError(code="file_missing",
|
|
|
|
message="Can not find a file called '%s' within workflow specification '%s'"
|
|
|
|
% (file_name, workflow_spec_model.id))
|
|
|
|
|
|
|
|
return file_data_model
|
|
|
|
|
|
|
|
@staticmethod
|
2020-04-19 19:14:10 +00:00
|
|
|
def find_spec_model_in_db(workflow):
|
2020-04-15 15:13:32 +00:00
|
|
|
""" Search for the workflow """
|
|
|
|
# When the workflow spec model is created, we record the primary process id,
|
|
|
|
# then we can look it up. As there is the potential for sub-workflows, we
|
|
|
|
# may need to travel up to locate the primary process.
|
|
|
|
spec = workflow.spec
|
2020-04-17 17:30:32 +00:00
|
|
|
workflow_model = session.query(WorkflowSpecModel).join(FileModel). \
|
|
|
|
filter(FileModel.primary_process_id == spec.name).first()
|
2020-04-15 15:13:32 +00:00
|
|
|
if workflow_model is None and workflow != workflow.outer_workflow:
|
2020-04-19 19:14:10 +00:00
|
|
|
return FileService.find_spec_model_in_db(workflow.outer_workflow)
|
2020-04-15 15:13:32 +00:00
|
|
|
|
|
|
|
return workflow_model
|
|
|
|
|
2020-04-24 10:58:24 +00:00
|
|
|
@staticmethod
|
|
|
|
def delete_file(file_id):
|
2020-05-30 19:37:04 +00:00
|
|
|
try:
|
|
|
|
data_models = session.query(FileDataModel).filter_by(file_model_id=file_id).all()
|
|
|
|
for dm in data_models:
|
|
|
|
lookup_files = session.query(LookupFileModel).filter_by(file_data_model_id=dm.id).all()
|
|
|
|
for lf in lookup_files:
|
|
|
|
session.query(LookupDataModel).filter_by(lookup_file_model_id=lf.id).delete()
|
|
|
|
session.query(LookupFileModel).filter_by(id=lf.id).delete()
|
|
|
|
session.query(FileDataModel).filter_by(file_model_id=file_id).delete()
|
|
|
|
session.query(FileModel).filter_by(id=file_id).delete()
|
|
|
|
session.commit()
|
|
|
|
except IntegrityError as ie:
|
2020-06-03 21:34:27 +00:00
|
|
|
# We can't delete the file or file data, because it is referenced elsewhere,
|
|
|
|
# but we can at least mark it as deleted on the table.
|
|
|
|
session.rollback()
|
|
|
|
file_model = session.query(FileModel).filter_by(id=file_id).first()
|
|
|
|
file_model.archived = True
|
|
|
|
session.commit()
|
2020-06-04 18:59:36 +00:00
|
|
|
app.logger.info("Failed to delete file, so archiving it instead. %i, due to %s" % (file_id, str(ie)))
|
2020-07-17 22:59:25 +00:00
|
|
|
|
|
|
|
@staticmethod
|
2020-08-14 17:04:22 +00:00
|
|
|
def get_repo_branches():
|
2020-07-17 22:59:25 +00:00
|
|
|
gh_token = app.config['GITHUB_TOKEN']
|
2020-08-14 17:04:22 +00:00
|
|
|
github_repo = app.config['GITHUB_REPO']
|
2020-07-17 22:59:25 +00:00
|
|
|
_github = Github(gh_token)
|
2020-08-14 17:04:22 +00:00
|
|
|
repo = _github.get_user().get_repo(github_repo)
|
|
|
|
branches = [branch.name for branch in repo.get_branches()]
|
|
|
|
return branches
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def update_from_github(file_ids, source_target=GithubObject.NotSet):
|
|
|
|
gh_token = app.config['GITHUB_TOKEN']
|
|
|
|
github_repo = app.config['GITHUB_REPO']
|
|
|
|
_github = Github(gh_token)
|
|
|
|
repo = _github.get_user().get_repo(github_repo)
|
2020-07-17 22:59:25 +00:00
|
|
|
|
|
|
|
for file_id in file_ids:
|
|
|
|
file_data_model = FileDataModel.query.filter_by(
|
|
|
|
file_model_id=file_id
|
|
|
|
).order_by(
|
|
|
|
desc(FileDataModel.version)
|
|
|
|
).first()
|
|
|
|
try:
|
2020-08-14 17:04:22 +00:00
|
|
|
repo_file = repo.get_contents(file_data_model.file_model.name, ref=source_target)
|
2020-07-17 22:59:25 +00:00
|
|
|
except UnknownObjectException:
|
2020-08-14 17:04:22 +00:00
|
|
|
return {'error': 'Attempted to update from repository but file was not present'}
|
2020-07-17 22:59:25 +00:00
|
|
|
else:
|
|
|
|
file_data_model.data = repo_file.decoded_content
|
|
|
|
session.add(file_data_model)
|
|
|
|
session.commit()
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def publish_to_github(file_ids):
|
2020-08-14 17:04:22 +00:00
|
|
|
target_branch = app.config['TARGET_BRANCH'] if app.config['TARGET_BRANCH'] else GithubObject.NotSet
|
2020-07-17 22:59:25 +00:00
|
|
|
gh_token = app.config['GITHUB_TOKEN']
|
2020-08-14 17:04:22 +00:00
|
|
|
github_repo = app.config['GITHUB_REPO']
|
2020-07-17 22:59:25 +00:00
|
|
|
_github = Github(gh_token)
|
2020-08-14 17:04:22 +00:00
|
|
|
repo = _github.get_user().get_repo(github_repo)
|
2020-07-17 22:59:25 +00:00
|
|
|
for file_id in file_ids:
|
|
|
|
file_data_model = FileDataModel.query.filter_by(file_model_id=file_id).first()
|
|
|
|
try:
|
2020-08-14 17:04:22 +00:00
|
|
|
repo_file = repo.get_contents(file_data_model.file_model.name, ref=target_branch)
|
2020-07-17 22:59:25 +00:00
|
|
|
except UnknownObjectException:
|
|
|
|
repo.create_file(
|
|
|
|
path=file_data_model.file_model.name,
|
|
|
|
message=f'Creating {file_data_model.file_model.name}',
|
2020-08-14 17:04:22 +00:00
|
|
|
content=file_data_model.data,
|
|
|
|
branch=target_branch
|
2020-07-17 22:59:25 +00:00
|
|
|
)
|
|
|
|
return {'created': True}
|
|
|
|
else:
|
|
|
|
updated = repo.update_file(
|
|
|
|
path=repo_file.path,
|
|
|
|
message=f'Updating {file_data_model.file_model.name}',
|
2020-08-14 17:04:22 +00:00
|
|
|
content=file_data_model.data + b'brah-model',
|
|
|
|
sha=repo_file.sha,
|
|
|
|
branch=target_branch
|
2020-07-17 22:59:25 +00:00
|
|
|
)
|
|
|
|
return {'updated': True}
|