*** WIP ***
**Many** tests are failing! Committing so I can merge dev into this branch
This commit is contained in:
parent
110e2c56f8
commit
86a6039dc8
|
@ -30,3 +30,5 @@ print('TESTING = ', TESTING)
|
|||
|
||||
#Use the mock ldap.
|
||||
LDAP_URL = 'mock'
|
||||
|
||||
SYNC_FILE_ROOT = 'test_sync_files'
|
||||
|
|
|
@ -110,8 +110,9 @@ print('ADMIN_UIDS = ', app.config['ADMIN_UIDS'])
|
|||
@app.cli.command()
|
||||
def load_files_from_filesystem():
|
||||
"""Load file data into the database."""
|
||||
from crc.services.temp_migration_service import FromFilesystemService, SYNC_FILE_ROOT
|
||||
FromFilesystemService().update_file_metadata_from_filesystem(SYNC_FILE_ROOT)
|
||||
from crc.services.temp_migration_service import FromFilesystemService
|
||||
location = app.config['SYNC_FILE_ROOT']
|
||||
FromFilesystemService().update_file_metadata_from_filesystem(location)
|
||||
|
||||
|
||||
@app.cli.command()
|
||||
|
|
|
@ -178,7 +178,11 @@ def update_file_info(file_id, body):
|
|||
|
||||
|
||||
def delete_file(file_id):
|
||||
FileService.delete_file(file_id)
|
||||
workflow_spec_id = session.query(FileModel.workflow_spec_id).filter(FileModel.id==file_id).scalar()
|
||||
if workflow_spec_id is not None:
|
||||
FileService.delete_spec_file(file_id)
|
||||
else:
|
||||
FileService.delete_file(file_id)
|
||||
|
||||
|
||||
def dmn_from_ss():
|
||||
|
|
|
@ -212,14 +212,13 @@ class DocumentDirectory(object):
|
|||
|
||||
class WorkflowApi(object):
|
||||
def __init__(self, id, status, next_task, navigation,
|
||||
spec_version, is_latest_spec, workflow_spec_id, total_tasks, completed_tasks,
|
||||
is_latest_spec, workflow_spec_id, total_tasks, completed_tasks,
|
||||
last_updated, is_review, title, study_id):
|
||||
self.id = id
|
||||
self.status = status
|
||||
self.next_task = next_task # The next task that requires user input.
|
||||
self.navigation = navigation
|
||||
self.workflow_spec_id = workflow_spec_id
|
||||
self.spec_version = spec_version
|
||||
self.is_latest_spec = is_latest_spec
|
||||
self.total_tasks = total_tasks
|
||||
self.completed_tasks = completed_tasks
|
||||
|
@ -232,7 +231,7 @@ class WorkflowApiSchema(ma.Schema):
|
|||
class Meta:
|
||||
model = WorkflowApi
|
||||
fields = ["id", "status", "next_task", "navigation",
|
||||
"workflow_spec_id", "spec_version", "is_latest_spec", "total_tasks", "completed_tasks",
|
||||
"workflow_spec_id", "is_latest_spec", "total_tasks", "completed_tasks",
|
||||
"last_updated", "is_review", "title", "study_id"]
|
||||
unknown = INCLUDE
|
||||
|
||||
|
@ -243,7 +242,7 @@ class WorkflowApiSchema(ma.Schema):
|
|||
@marshmallow.post_load
|
||||
def make_workflow(self, data, **kwargs):
|
||||
keys = ['id', 'status', 'next_task', 'navigation',
|
||||
'workflow_spec_id', 'spec_version', 'is_latest_spec', "total_tasks", "completed_tasks",
|
||||
'workflow_spec_id', 'is_latest_spec', "total_tasks", "completed_tasks",
|
||||
"last_updated", "is_review", "title", "study_id"]
|
||||
filtered_fields = {key: data[key] for key in keys}
|
||||
filtered_fields['next_task'] = TaskSchema().make_task(data['next_task'])
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
import hashlib
|
||||
import io
|
||||
import json
|
||||
import os
|
||||
import random
|
||||
import string
|
||||
|
@ -17,8 +18,8 @@ from sqlalchemy.exc import IntegrityError
|
|||
from crc import session, app
|
||||
from crc.api.common import ApiError
|
||||
from crc.models.data_store import DataStoreModel
|
||||
from crc.models.file import FileType, FileDataModel, FileModel, LookupFileModel, LookupDataModel
|
||||
from crc.models.workflow import WorkflowSpecModel, WorkflowModel, WorkflowSpecDependencyFile, WorkflowLibraryModel
|
||||
from crc.models.file import FileType, FileDataModel, FileModel, FileModelSchema, LookupFileModel, LookupDataModel
|
||||
from crc.models.workflow import WorkflowSpecModel, WorkflowModel, WorkflowSpecDependencyFile, WorkflowLibraryModel, WorkflowSpecCategoryModel
|
||||
from crc.services.cache_service import cache
|
||||
from crc.services.user_service import UserService
|
||||
import re
|
||||
|
@ -60,7 +61,7 @@ class FileService(object):
|
|||
is_status=is_status,
|
||||
)
|
||||
|
||||
return FileService.update_file(file_model, binary_data, content_type)
|
||||
return FileService.update_workflow_spec_file(workflow_spec, file_model, binary_data, content_type)
|
||||
|
||||
|
||||
|
||||
|
@ -123,13 +124,103 @@ class FileService(object):
|
|||
name=name,
|
||||
is_reference=True
|
||||
)
|
||||
return FileService.update_file(file_model, binary_data, content_type)
|
||||
return FileService().update_reference_file(file_model, binary_data, content_type)
|
||||
|
||||
@staticmethod
|
||||
def get_extension(file_name):
|
||||
basename, file_extension = os.path.splitext(file_name)
|
||||
return file_extension.lower().strip()[1:]
|
||||
|
||||
@staticmethod
|
||||
def get_sync_file_root():
|
||||
dir_name = app.config['SYNC_FILE_ROOT']
|
||||
app_root = app.root_path
|
||||
return os.path.join(app_root, '..', 'tests', dir_name)
|
||||
|
||||
def write_file_to_system(self, workflow_spec_model, file_model, file_data):
|
||||
|
||||
category_name = None
|
||||
sync_file_root = self.get_sync_file_root()
|
||||
# sync_file_root = app.config['SYNC_FILE_ROOT']
|
||||
|
||||
# if file_model.workflow_spec_id is not None:
|
||||
# # we have a workflow spec file
|
||||
# workflow_spec_model = session.query(WorkflowSpecModel).filter(WorkflowSpecModel.id == file_model.workflow_spec_id).first()
|
||||
# if workflow_spec_model:
|
||||
|
||||
if workflow_spec_model is not None:
|
||||
category_name = self.get_spec_file_category_name(workflow_spec_model)
|
||||
if category_name is None and file_model.is_reference:
|
||||
category_name = 'Reference'
|
||||
|
||||
|
||||
# if workflow_spec_model.category_id is not None:
|
||||
# category_name = session.query(WorkflowSpecCategoryModel.display_name).filter(WorkflowSpecCategoryModel.id == workflow_spec_model.category_id).scalar()
|
||||
#
|
||||
# elif workflow_spec_model.is_master_spec:
|
||||
# category_name = 'Master Specification'
|
||||
#
|
||||
# elif workflow_spec_model.library:
|
||||
# category_name = 'Library Specs'
|
||||
|
||||
if category_name is not None:
|
||||
if workflow_spec_model is not None:
|
||||
file_path = os.path.join(sync_file_root,
|
||||
category_name,
|
||||
workflow_spec_model.display_name,
|
||||
file_model.name)
|
||||
else:
|
||||
# Reference files all sit in the 'Reference' directory
|
||||
file_path = os.path.join(sync_file_root,
|
||||
category_name,
|
||||
file_model.name)
|
||||
|
||||
# self.process_workflow_spec_file(file_model, file_path)
|
||||
os.makedirs(os.path.dirname(file_path), exist_ok=True)
|
||||
with open(file_path, 'wb') as f_handle:
|
||||
f_handle.write(file_data)
|
||||
json_file_path = f'{file_path}.json'
|
||||
latest_file_model = session.query(FileModel).filter(FileModel.id==file_model.id).first()
|
||||
file_schema = FileModelSchema().dumps(latest_file_model)
|
||||
with open(json_file_path, 'w') as j_handle:
|
||||
j_handle.write(file_schema)
|
||||
|
||||
@staticmethod
|
||||
def update_workflow_spec_file(workflow_spec, file_model, binary_data, content_type):
|
||||
# Verify the extension
|
||||
file_extension = FileService.get_extension(file_model.name)
|
||||
if file_extension not in FileType._member_names_:
|
||||
raise ApiError('unknown_extension',
|
||||
'The file you provided does not have an accepted extension:' +
|
||||
file_extension, status_code=404)
|
||||
else:
|
||||
file_model.type = FileType[file_extension]
|
||||
file_model.content_type = content_type
|
||||
file_model.archived = False # Unarchive the file if it is archived.
|
||||
|
||||
# If this is a BPMN, extract the process id.
|
||||
if file_model.type == FileType.bpmn:
|
||||
try:
|
||||
bpmn: etree.Element = etree.fromstring(binary_data)
|
||||
file_model.primary_process_id = FileService.get_process_id(bpmn)
|
||||
file_model.is_review = FileService.has_swimlane(bpmn)
|
||||
except XMLSyntaxError as xse:
|
||||
raise ApiError("invalid_xml", "Failed to parse xml: " + str(xse), file_name=file_model.name)
|
||||
|
||||
session.add(file_model)
|
||||
session.commit()
|
||||
|
||||
# Write file to filesystem
|
||||
FileService().write_file_to_system(workflow_spec, file_model, binary_data)
|
||||
return file_model
|
||||
|
||||
def update_reference_file(self, file_model, binary_data, content_type):
|
||||
session.add(file_model)
|
||||
session.commit()
|
||||
self.write_file_to_system(None, file_model, binary_data)
|
||||
print('update_reference_file')
|
||||
return file_model
|
||||
|
||||
@staticmethod
|
||||
def update_file(file_model, binary_data, content_type):
|
||||
session.flush() # Assure the database is up-to-date before running this.
|
||||
|
@ -165,15 +256,15 @@ class FileService(object):
|
|||
else:
|
||||
version = latest_data_model.version + 1
|
||||
|
||||
# If this is a BPMN, extract the process id.
|
||||
if file_model.type == FileType.bpmn:
|
||||
try:
|
||||
bpmn: etree.Element = etree.fromstring(binary_data)
|
||||
file_model.primary_process_id = FileService.get_process_id(bpmn)
|
||||
file_model.is_review = FileService.has_swimlane(bpmn)
|
||||
except XMLSyntaxError as xse:
|
||||
raise ApiError("invalid_xml", "Failed to parse xml: " + str(xse), file_name=file_model.name)
|
||||
|
||||
# # If this is a BPMN, extract the process id.
|
||||
# if file_model.type == FileType.bpmn:
|
||||
# try:
|
||||
# bpmn: etree.Element = etree.fromstring(binary_data)
|
||||
# file_model.primary_process_id = FileService.get_process_id(bpmn)
|
||||
# file_model.is_review = FileService.has_swimlane(bpmn)
|
||||
# except XMLSyntaxError as xse:
|
||||
# raise ApiError("invalid_xml", "Failed to parse xml: " + str(xse), file_name=file_model.name)
|
||||
#
|
||||
try:
|
||||
user_uid = UserService.current_user().uid
|
||||
except ApiError as ae:
|
||||
|
@ -267,29 +358,64 @@ class FileService(object):
|
|||
results = query.all()
|
||||
return results
|
||||
|
||||
|
||||
@staticmethod
|
||||
def get_spec_data_files(workflow_spec_id, workflow_id=None, name=None, include_libraries=False):
|
||||
"""Returns all the FileDataModels related to a workflow specification.
|
||||
If a workflow is specified, returns the version of the spec related
|
||||
to that workflow, otherwise, returns the lastest files."""
|
||||
if workflow_id:
|
||||
query = session.query(FileDataModel) \
|
||||
.join(WorkflowSpecDependencyFile) \
|
||||
.filter(WorkflowSpecDependencyFile.workflow_id == workflow_id) \
|
||||
.order_by(FileDataModel.id)
|
||||
if name:
|
||||
query = query.join(FileModel).filter(FileModel.name == name)
|
||||
return query.all()
|
||||
else:
|
||||
"""Returns all the latest files related to a workflow specification"""
|
||||
file_models = FileService.get_files(workflow_spec_id=workflow_spec_id,include_libraries=include_libraries)
|
||||
latest_data_files = []
|
||||
for file_model in file_models:
|
||||
if name and file_model.name == name:
|
||||
latest_data_files.append(FileService.get_file_data(file_model.id))
|
||||
elif not name:
|
||||
latest_data_files.append(FileService.get_file_data(file_model.id))
|
||||
return latest_data_files
|
||||
def get_workflow_spec_category_name(workflow_spec_id):
|
||||
category_name = None
|
||||
workflow_spec = session.query(WorkflowSpecModel).filter(WorkflowSpecModel.id==workflow_spec_id).first()
|
||||
category_id = workflow_spec.category_id
|
||||
if category_id is not None:
|
||||
category_name = session.query(WorkflowSpecCategoryModel.display_name).filter(WorkflowSpecCategoryModel.id==category_id).scalar()
|
||||
elif workflow_spec.is_master_spec:
|
||||
category_name = 'Master Specification'
|
||||
elif workflow_spec.library:
|
||||
category_name = 'Library Specs'
|
||||
elif workflow_spec.standalone:
|
||||
category_name = 'Standalone'
|
||||
return category_name
|
||||
|
||||
def get_spec_data_files(self, workflow_spec_id, workflow_id=None, name=None, include_libraries=False):
|
||||
"""Returns all the files related to a workflow specification."""
|
||||
spec_data_files = []
|
||||
workflow_spec = session.query(WorkflowSpecModel).filter(WorkflowSpecModel.id==workflow_spec_id).first()
|
||||
workflow_spec_name = workflow_spec.display_name
|
||||
category_name = self.get_workflow_spec_category_name(workflow_spec_id)
|
||||
sync_file_root = self.get_sync_file_root()
|
||||
spec_path = os.path.join(sync_file_root,
|
||||
category_name,
|
||||
workflow_spec_name)
|
||||
directory_items = os.scandir(spec_path)
|
||||
for item in directory_items:
|
||||
if item.is_file() and not item.name.endswith('json'):
|
||||
with open(item.path, 'rb') as f_open:
|
||||
json_path = f'{item.path}.json'
|
||||
with open(json_path, 'r') as j_open:
|
||||
json_data = j_open.read()
|
||||
json_obj = json.loads(json_data)
|
||||
file_data = f_open.read()
|
||||
file_dict = {'meta': json_obj,
|
||||
'data': file_data}
|
||||
spec_data_files.append(file_dict)
|
||||
print('get_spec_data_files')
|
||||
return spec_data_files
|
||||
# if workflow_id:
|
||||
# query = session.query(FileDataModel) \
|
||||
# .join(WorkflowSpecDependencyFile) \
|
||||
# .filter(WorkflowSpecDependencyFile.workflow_id == workflow_id) \
|
||||
# .order_by(FileDataModel.id)
|
||||
# if name:
|
||||
# query = query.join(FileModel).filter(FileModel.name == name)
|
||||
# return query.all()
|
||||
# else:
|
||||
# """Returns all the latest files related to a workflow specification"""
|
||||
# file_models = FileService.get_files(workflow_spec_id=workflow_spec_id,include_libraries=include_libraries)
|
||||
# latest_data_files = []
|
||||
# for file_model in file_models:
|
||||
# if name and file_model.name == name:
|
||||
# latest_data_files.append(FileService.get_file_data(file_model.id))
|
||||
# elif not name:
|
||||
# latest_data_files.append(FileService.get_file_data(file_model.id))
|
||||
# return latest_data_files
|
||||
|
||||
@staticmethod
|
||||
def get_workflow_data_files(workflow_id=None):
|
||||
|
@ -313,14 +439,54 @@ class FileService(object):
|
|||
query = query.order_by(desc(FileDataModel.date_created))
|
||||
return query.first()
|
||||
|
||||
@staticmethod
|
||||
def get_spec_file_category_name(spec_model):
|
||||
category_name = None
|
||||
if hasattr(spec_model, 'category_id') and spec_model.category_id is not None:
|
||||
category_model = session.query(WorkflowSpecCategoryModel).\
|
||||
filter(WorkflowSpecCategoryModel.id == spec_model.category_id).\
|
||||
first()
|
||||
category_name = category_model.display_name
|
||||
|
||||
elif spec_model.is_master_spec:
|
||||
category_name = 'Master Specification'
|
||||
|
||||
elif spec_model.library:
|
||||
category_name = 'Library Specs'
|
||||
|
||||
elif spec_model.standalone:
|
||||
category_name = 'Standalone'
|
||||
|
||||
return category_name
|
||||
|
||||
def get_spec_file_data(self, file_id: int):
|
||||
file_model = session.query(FileModel).filter(FileModel.id==file_id).first()
|
||||
if file_model is not None:
|
||||
spec_model = session.query(WorkflowSpecModel).filter(WorkflowSpecModel.id==file_model.workflow_spec_id).first()
|
||||
if spec_model is not None:
|
||||
category_name = self.get_spec_file_category_name(spec_model)
|
||||
sync_file_root = self.get_sync_file_root()
|
||||
file_path = os.path.join(sync_file_root, category_name, spec_model.display_name, file_model.name)
|
||||
with open(file_path, 'rb') as f_handle:
|
||||
spec_file_data = f_handle.read()
|
||||
return spec_file_data
|
||||
else:
|
||||
raise ApiError(code='spec_not_found',
|
||||
message=f'No spec found for file with file_id: {file_id}, and spec_id: {file_model.workflow_spec_id}')
|
||||
else:
|
||||
raise ApiError(code='model_not_found',
|
||||
message=f'No model found for file with file_id: {file_id}')
|
||||
|
||||
@staticmethod
|
||||
def get_reference_file_data(file_name):
|
||||
file_model = session.query(FileModel). \
|
||||
filter(FileModel.is_reference == True). \
|
||||
filter(FileModel.name == file_name).first()
|
||||
if not file_model:
|
||||
sync_file_root = FileService().get_sync_file_root()
|
||||
file_path = os.path.join(sync_file_root, 'Reference', file_name)
|
||||
if os.path.exists(file_path):
|
||||
with open(file_path, 'rb') as f_open:
|
||||
file_data = f_open.read()
|
||||
return file_data
|
||||
else:
|
||||
raise ApiError("file_not_found", "There is no reference file with the name '%s'" % file_name)
|
||||
return FileService.get_file_data(file_model.id)
|
||||
|
||||
@staticmethod
|
||||
def get_workflow_file_data(workflow, file_name):
|
||||
|
@ -358,6 +524,34 @@ class FileService(object):
|
|||
|
||||
return workflow_model
|
||||
|
||||
def delete_spec_file(self, file_id):
|
||||
sync_file_root = self.get_sync_file_root()
|
||||
file_model = session.query(FileModel).filter(FileModel.id==file_id).first()
|
||||
workflow_spec_id = file_model.workflow_spec_id
|
||||
workflow_spec_model = session.query(WorkflowSpecModel).filter(WorkflowSpecModel.id==workflow_spec_id).first()
|
||||
category_name = self.get_spec_file_category_name(workflow_spec_model)
|
||||
file_model_name = file_model.name
|
||||
spec_directory_path = os.path.join(sync_file_root,
|
||||
category_name,
|
||||
workflow_spec_model.display_name)
|
||||
file_path = os.path.join(spec_directory_path,
|
||||
file_model_name)
|
||||
json_file_path = os.path.join(spec_directory_path,
|
||||
f'{file_model_name}.json')
|
||||
|
||||
try:
|
||||
os.remove(file_path)
|
||||
os.remove(json_file_path)
|
||||
# os.rmdir(spec_directory_path)
|
||||
session.delete(file_model)
|
||||
session.commit()
|
||||
except IntegrityError as ie:
|
||||
session.rollback()
|
||||
file_model = session.query(FileModel).filter_by(id=file_id).first()
|
||||
file_model.archived = True
|
||||
session.commit()
|
||||
app.logger.info("Failed to delete file, so archiving it instead. %i, due to %s" % (file_id, str(ie)))
|
||||
|
||||
@staticmethod
|
||||
def delete_file(file_id):
|
||||
try:
|
||||
|
|
|
@ -131,15 +131,15 @@ class LookupService(object):
|
|||
file_name = field.get_property(Task.FIELD_PROP_SPREADSHEET_NAME)
|
||||
value_column = field.get_property(Task.FIELD_PROP_VALUE_COLUMN)
|
||||
label_column = field.get_property(Task.FIELD_PROP_LABEL_COLUMN)
|
||||
latest_files = FileService.get_spec_data_files(workflow_spec_id=workflow_model.workflow_spec_id,
|
||||
latest_files = FileService().get_spec_data_files(workflow_spec_id=workflow_model.workflow_spec_id,
|
||||
workflow_id=workflow_model.id,
|
||||
name=file_name)
|
||||
if len(latest_files) < 1:
|
||||
raise ApiError("invalid_enum", "Unable to locate the lookup data file '%s'" % file_name)
|
||||
else:
|
||||
data_model = latest_files[0]
|
||||
data_dict = latest_files[0]
|
||||
|
||||
lookup_model = LookupService.build_lookup_table(data_model, value_column, label_column,
|
||||
lookup_model = LookupService.build_lookup_table(data_dict, value_column, label_column,
|
||||
workflow_model.workflow_spec_id, task_spec_id, field_id)
|
||||
|
||||
# Use the results of an LDAP request to populate enum field options
|
||||
|
@ -158,19 +158,23 @@ class LookupService(object):
|
|||
return lookup_model
|
||||
|
||||
@staticmethod
|
||||
def build_lookup_table(data_model: FileDataModel, value_column, label_column,
|
||||
def build_lookup_table(data_dict, value_column, label_column,
|
||||
workflow_spec_id=None, task_spec_id=None, field_id=None):
|
||||
""" In some cases the lookup table can be very large. This method will add all values to the database
|
||||
in a way that can be searched and returned via an api call - rather than sending the full set of
|
||||
options along with the form. It will only open the file and process the options if something has
|
||||
changed. """
|
||||
if workflow_spec_id is not None:
|
||||
file_data = data_dict['data']
|
||||
else:
|
||||
file_data = data_dict.data
|
||||
try:
|
||||
xlsx = ExcelFile(data_model.data, engine='openpyxl')
|
||||
xlsx = ExcelFile(file_data, engine='openpyxl')
|
||||
# Pandas--or at least openpyxl, cannot read old xls files.
|
||||
# The error comes back as zipfile.BadZipFile because xlsx files are zipped xml files
|
||||
except BadZipFile:
|
||||
raise ApiError(code='excel_error',
|
||||
message=f'Error opening excel file {data_model.file_model.name}. You may have an older .xls spreadsheet. (file_model_id: {data_model.file_model_id} workflow_spec_id: {workflow_spec_id}, task_spec_id: {task_spec_id}, and field_id: {field_id})')
|
||||
message=f"Error opening excel file {data_dict['meta']['name']}. You may have an older .xls spreadsheet. (file_model_id: {data_dict['meta']['file_model_id']} workflow_spec_id: {workflow_spec_id}, task_spec_id: {task_spec_id}, and field_id: {field_id})")
|
||||
df = xlsx.parse(xlsx.sheet_names[0]) # Currently we only look at the fist sheet.
|
||||
df = df.convert_dtypes()
|
||||
df = df.loc[:, ~df.columns.str.contains('^Unnamed')] # Drop unnamed columns.
|
||||
|
@ -179,17 +183,17 @@ class LookupService(object):
|
|||
|
||||
if value_column not in df:
|
||||
raise ApiError("invalid_enum",
|
||||
"The file %s does not contain a column named % s" % (data_model.file_model.name,
|
||||
"The file %s does not contain a column named % s" % (data_dict['meta']['name'],
|
||||
value_column))
|
||||
if label_column not in df:
|
||||
raise ApiError("invalid_enum",
|
||||
"The file %s does not contain a column named % s" % (data_model.file_model.name,
|
||||
"The file %s does not contain a column named % s" % (data_dict['meta']['name'],
|
||||
label_column))
|
||||
|
||||
lookup_model = LookupFileModel(workflow_spec_id=workflow_spec_id,
|
||||
field_id=field_id,
|
||||
task_spec_id=task_spec_id,
|
||||
file_data_model_id=data_model.id,
|
||||
file_data_model_id=data_dict['meta']['id'],
|
||||
is_ldap=False)
|
||||
|
||||
db.session.add(lookup_model)
|
||||
|
|
|
@ -9,7 +9,7 @@ import json
|
|||
import os
|
||||
|
||||
|
||||
SYNC_FILE_ROOT = os.path.join(app.root_path, '..', 'files')
|
||||
SYNC_FILE_ROOT = app.config['SYNC_FILE_ROOT']
|
||||
|
||||
|
||||
class FromFilesystemService(object):
|
||||
|
@ -85,7 +85,7 @@ class FromFilesystemService(object):
|
|||
content_type=CONTENT_TYPES[spec_file_name.split('.')[-1]],
|
||||
binary_data=spec_handle.read())
|
||||
|
||||
print(f'process_workflow_spec_directory: data_obj: {data_obj}')
|
||||
print(f'process_workflow_spec_file: data_obj: {data_obj}')
|
||||
return workflow_spec_file_model
|
||||
|
||||
@staticmethod
|
||||
|
@ -219,6 +219,9 @@ class ToFilesystemService(object):
|
|||
elif workflow_spec_model.library:
|
||||
category_name = 'Library Specs'
|
||||
|
||||
elif workflow_spec_model.standalone:
|
||||
category_name = 'Standalone'
|
||||
|
||||
if category_name is not None:
|
||||
# Only process if we have a workflow_spec_model and category_name
|
||||
self.process_workflow_spec(location, workflow_spec_model, category_name)
|
||||
|
|
|
@ -108,11 +108,11 @@ class WorkflowProcessor(object):
|
|||
self.workflow_model = workflow_model
|
||||
|
||||
if workflow_model.bpmn_workflow_json is None: # The workflow was never started.
|
||||
self.spec_data_files = FileService.get_spec_data_files(
|
||||
self.spec_data_files = FileService().get_spec_data_files(
|
||||
workflow_spec_id=workflow_model.workflow_spec_id,include_libraries=True)
|
||||
spec = self.get_spec(self.spec_data_files, workflow_model.workflow_spec_id)
|
||||
else:
|
||||
self.spec_data_files = FileService.get_spec_data_files(
|
||||
self.spec_data_files = FileService().get_spec_data_files(
|
||||
workflow_spec_id=workflow_model.workflow_spec_id,
|
||||
workflow_id=workflow_model.id)
|
||||
spec = None
|
||||
|
@ -146,11 +146,11 @@ class WorkflowProcessor(object):
|
|||
except MissingSpecError as ke:
|
||||
raise ApiError(code="unexpected_workflow_structure",
|
||||
message="Failed to deserialize workflow"
|
||||
" '%s' version %s, due to a mis-placed or missing task '%s'" %
|
||||
(self.workflow_spec_id, self.get_version_string(), str(ke)))
|
||||
" '%s' due to a mis-placed or missing task '%s'" %
|
||||
(self.workflow_spec_id, str(ke)))
|
||||
|
||||
# set whether this is the latest spec file.
|
||||
if self.spec_data_files == FileService.get_spec_data_files(workflow_spec_id=workflow_model.workflow_spec_id):
|
||||
if self.spec_data_files == FileService().get_spec_data_files(workflow_spec_id=workflow_model.workflow_spec_id):
|
||||
self.is_latest_spec = True
|
||||
else:
|
||||
self.is_latest_spec = False
|
||||
|
@ -214,13 +214,13 @@ class WorkflowProcessor(object):
|
|||
# this could potentially become expensive to load all the data in the data models.
|
||||
# in which case we might consider using a deferred loader for the actual data, but
|
||||
# trying not to pre-optimize.
|
||||
file_data_models = FileService.get_spec_data_files(self.workflow_model.workflow_spec_id,
|
||||
file_data_models = FileService().get_spec_data_files(self.workflow_model.workflow_spec_id,
|
||||
self.workflow_model.id)
|
||||
return WorkflowProcessor.__get_version_string_for_data_models(file_data_models)
|
||||
|
||||
@staticmethod
|
||||
def get_latest_version_string_for_spec(spec_id):
|
||||
file_data_models = FileService.get_spec_data_files(spec_id)
|
||||
file_data_models = FileService().get_spec_data_files(spec_id)
|
||||
return WorkflowProcessor.__get_version_string_for_data_models(file_data_models)
|
||||
|
||||
@staticmethod
|
||||
|
@ -249,7 +249,7 @@ class WorkflowProcessor(object):
|
|||
return full_version
|
||||
|
||||
def update_dependencies(self, spec_data_files):
|
||||
existing_dependencies = FileService.get_spec_data_files(
|
||||
existing_dependencies = FileService().get_spec_data_files(
|
||||
workflow_spec_id=self.workflow_model.workflow_spec_id,
|
||||
workflow_id=self.workflow_model.id)
|
||||
|
||||
|
@ -268,7 +268,7 @@ class WorkflowProcessor(object):
|
|||
"""Executes a BPMN specification for the given study, without recording any information to the database
|
||||
Useful for running the master specification, which should not persist. """
|
||||
lasttime = firsttime()
|
||||
spec_data_files = FileService.get_spec_data_files(spec_model.id)
|
||||
spec_data_files = FileService().get_spec_data_files(spec_model.id)
|
||||
lasttime = sincetime('load Files', lasttime)
|
||||
spec = WorkflowProcessor.get_spec(spec_data_files, spec_model.id)
|
||||
lasttime = sincetime('get spec', lasttime)
|
||||
|
@ -294,22 +294,22 @@ class WorkflowProcessor(object):
|
|||
return parser
|
||||
|
||||
@staticmethod
|
||||
def get_spec(file_data_models: List[FileDataModel], workflow_spec_id):
|
||||
def get_spec(files, workflow_spec_id):
|
||||
"""Returns a SpiffWorkflow specification for the given workflow spec,
|
||||
using the files provided. The Workflow_spec_id is only used to generate
|
||||
better error messages."""
|
||||
parser = WorkflowProcessor.get_parser()
|
||||
process_id = None
|
||||
|
||||
for file_data in file_data_models:
|
||||
if file_data.file_model.type == FileType.bpmn:
|
||||
bpmn: etree.Element = etree.fromstring(file_data.data)
|
||||
if file_data.file_model.primary and file_data.file_model.workflow_spec_id == workflow_spec_id:
|
||||
for file in files:
|
||||
if file['meta']['name'][-4:] == FileType.bpmn.value:
|
||||
bpmn: etree.Element = etree.fromstring(file['data'])
|
||||
if file['meta']['primary'] and file['meta']['workflow_spec_id'] == workflow_spec_id:
|
||||
process_id = FileService.get_process_id(bpmn)
|
||||
parser.add_bpmn_xml(bpmn, filename=file_data.file_model.name)
|
||||
elif file_data.file_model.type == FileType.dmn:
|
||||
dmn: etree.Element = etree.fromstring(file_data.data)
|
||||
parser.add_dmn_xml(dmn, filename=file_data.file_model.name)
|
||||
parser.add_bpmn_xml(bpmn, filename=file['meta']['name'])
|
||||
elif file['meta']['name'][-3:] == FileType.dmn.value:
|
||||
dmn: etree.Element = etree.fromstring(file['data'])
|
||||
parser.add_dmn_xml(dmn, filename=file['meta']['name'])
|
||||
if process_id is None:
|
||||
raise (ApiError(code="no_primary_bpmn_error",
|
||||
message="There is no primary BPMN model defined for workflow %s" % workflow_spec_id))
|
||||
|
|
|
@ -576,7 +576,6 @@ class WorkflowService(object):
|
|||
next_task=None,
|
||||
navigation=navigation,
|
||||
workflow_spec_id=processor.workflow_spec_id,
|
||||
spec_version=processor.get_version_string(),
|
||||
is_latest_spec=processor.is_latest_spec,
|
||||
total_tasks=len(navigation),
|
||||
completed_tasks=processor.workflow_model.completed_tasks,
|
||||
|
@ -914,7 +913,6 @@ class WorkflowService(object):
|
|||
user_uid=user_uid,
|
||||
workflow_id=processor.workflow_model.id,
|
||||
workflow_spec_id=processor.workflow_model.workflow_spec_id,
|
||||
spec_version=processor.get_version_string(),
|
||||
action=action,
|
||||
task_id=task.id,
|
||||
task_name=task.name,
|
||||
|
|
|
@ -14,7 +14,7 @@ from crc import app, session
|
|||
from crc.models.file import FileModel, FileDataModel, LookupFileModel
|
||||
from crc.models.workflow import WorkflowSpecDependencyFile
|
||||
from crc.services.file_service import FileService
|
||||
from crc.services.temp_migration_service import FromFilesystemService, ToFilesystemService, SYNC_FILE_ROOT
|
||||
from crc.services.temp_migration_service import FromFilesystemService, ToFilesystemService
|
||||
|
||||
import os
|
||||
|
||||
|
@ -24,6 +24,8 @@ down_revision = '65b5ed6ae05b'
|
|||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
SYNC_FILE_ROOT = app.config['SYNC_FILE_ROOT']
|
||||
|
||||
|
||||
def upgrade():
|
||||
|
||||
|
|
|
@ -9,6 +9,7 @@ import json
|
|||
import unittest
|
||||
import urllib.parse
|
||||
import datetime
|
||||
import shutil
|
||||
from flask import g
|
||||
|
||||
from crc import app, db, session
|
||||
|
@ -82,6 +83,7 @@ class BaseTest(unittest.TestCase):
|
|||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
print('setUpClass')
|
||||
app.config.from_object('config.testing')
|
||||
cls.ctx = app.test_request_context()
|
||||
cls.app = app.test_client()
|
||||
|
@ -90,17 +92,27 @@ class BaseTest(unittest.TestCase):
|
|||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
print('tearDownClass')
|
||||
cls.ctx.pop()
|
||||
db.drop_all()
|
||||
pass
|
||||
|
||||
def setUp(self):
|
||||
print('setUp')
|
||||
pass
|
||||
|
||||
def tearDown(self):
|
||||
print('tearDown')
|
||||
ExampleDataLoader.clean_db()
|
||||
self.logout()
|
||||
self.auths = {}
|
||||
self.clear_test_sync_files()
|
||||
|
||||
@staticmethod
|
||||
def clear_test_sync_files():
|
||||
sync_file_root = FileService().get_sync_file_root()
|
||||
if os.path.exists(sync_file_root):
|
||||
shutil.rmtree(sync_file_root)
|
||||
|
||||
def logged_in_headers(self, user=None, redirect_url='http://some/frontend/url'):
|
||||
if user is None:
|
||||
|
@ -172,7 +184,8 @@ class BaseTest(unittest.TestCase):
|
|||
self.assertIsNotNone(files)
|
||||
self.assertGreater(len(files), 0)
|
||||
for file in files:
|
||||
file_data = session.query(FileDataModel).filter_by(file_model_id=file.id).all()
|
||||
# file_data = session.query(FileDataModel).filter_by(file_model_id=file.id).all()
|
||||
file_data = FileService().get_spec_file_data(file.id)
|
||||
self.assertIsNotNone(file_data)
|
||||
self.assertGreater(len(file_data), 0)
|
||||
|
||||
|
@ -379,7 +392,6 @@ class BaseTest(unittest.TestCase):
|
|||
self.assertEqual(user_uid, event.user_uid)
|
||||
self.assertEqual(workflow.id, event.workflow_id)
|
||||
self.assertEqual(workflow.workflow_spec_id, event.workflow_spec_id)
|
||||
self.assertEqual(workflow.spec_version, event.spec_version)
|
||||
self.assertEqual(WorkflowService.TASK_ACTION_COMPLETE, event.action)
|
||||
self.assertEqual(task_in.id, task_id)
|
||||
self.assertEqual(task_in.name, event.task_name)
|
||||
|
@ -416,4 +428,3 @@ class BaseTest(unittest.TestCase):
|
|||
"""Returns a bytesIO object of a well formed BPMN xml file with some string content of your choosing."""
|
||||
minimal_dbpm = "<x><process id='1' isExecutable='false'><startEvent id='a'/></process>%s</x>"
|
||||
return (minimal_dbpm % content).encode()
|
||||
|
||||
|
|
|
@ -1,13 +1,17 @@
|
|||
from github import UnknownObjectException
|
||||
from sqlalchemy import desc
|
||||
from sqlalchemy import desc, column
|
||||
from tests.base_test import BaseTest
|
||||
from unittest.mock import patch, Mock
|
||||
|
||||
from crc import db
|
||||
from crc.models.file import FileDataModel
|
||||
from crc import db, session
|
||||
from crc.api.common import ApiError
|
||||
from crc.models.file import FileModel, FileDataModel, CONTENT_TYPES
|
||||
from crc.models.workflow import WorkflowModel, WorkflowSpecModel
|
||||
from crc.services.file_service import FileService
|
||||
from crc.services.workflow_processor import WorkflowProcessor
|
||||
|
||||
from io import BytesIO
|
||||
|
||||
|
||||
class FakeGithubCreates(Mock):
|
||||
def get_user(var):
|
||||
|
@ -225,3 +229,45 @@ class TestFileService(BaseTest):
|
|||
branches = FileService.get_repo_branches()
|
||||
|
||||
self.assertIsInstance(branches, list)
|
||||
|
||||
def test_add_workflow_spec_file(self):
|
||||
|
||||
self.load_example_data()
|
||||
spec = db.session.query(WorkflowSpecModel).first()
|
||||
|
||||
file_data = b"abcdef"
|
||||
file_name = 'random_fact.svg'
|
||||
content_type = CONTENT_TYPES[file_name[-3:]]
|
||||
|
||||
# This creates a file on the filesystem
|
||||
file_model = FileService().add_workflow_spec_file(spec, file_name, content_type, file_data)
|
||||
|
||||
# This reads from a file on the filesystem
|
||||
spec_file_data = FileService().get_spec_file_data(file_model.id)
|
||||
|
||||
self.assertEqual(file_data, spec_file_data)
|
||||
|
||||
def test_delete_workflow_spec_file(self):
|
||||
self.load_example_data()
|
||||
file_model = session.query(FileModel).filter(column('workflow_spec_id').isnot(None)).first()
|
||||
file_data_before = FileService().get_spec_file_data(file_model.id)
|
||||
self.assertGreater(len(file_data_before), 0)
|
||||
|
||||
FileService().delete_spec_file(file_model.id)
|
||||
|
||||
with self.assertRaises(ApiError) as ae:
|
||||
FileService().get_spec_file_data(file_model.id)
|
||||
|
||||
self.assertIn('No model found for file with file_id', ae.exception.message)
|
||||
print('test_delete_workflow_spec_file')
|
||||
|
||||
def test_get_spec_files(self):
|
||||
self.load_example_data()
|
||||
spec = session.query(WorkflowSpecModel.id).first()
|
||||
spec_files = FileService().get_spec_data_files(spec.id)
|
||||
workflow = session.query(WorkflowModel).first()
|
||||
processor = WorkflowProcessor(workflow)
|
||||
|
||||
self.assertIsInstance(processor, WorkflowProcessor)
|
||||
|
||||
print('test_get_spec_files')
|
||||
|
|
|
@ -1,6 +1,9 @@
|
|||
from tests.base_test import BaseTest
|
||||
|
||||
from crc.services.temp_migration_service import FromFilesystemService, SYNC_FILE_ROOT
|
||||
from crc import app
|
||||
from crc.services.temp_migration_service import FromFilesystemService
|
||||
|
||||
SYNC_FILE_ROOT = app.config['SYNC_FILE_ROOT']
|
||||
|
||||
|
||||
class TestFilesFromFilesystem(BaseTest):
|
||||
|
|
Loading…
Reference in New Issue