1. Created a UserFileService, so it's clear what we use when for File Services, UserFiles, SpecFiles, and ReferenceFiles each function differently.

2. Reference Files and Spec Files are written to disk, they do not exist in the database at all.
This commit is contained in:
Dan 2022-02-02 12:59:56 -05:00
parent 4c00a5762f
commit 4ec6e403f5
61 changed files with 763 additions and 1503 deletions

View File

@ -31,4 +31,4 @@ print('TESTING = ', TESTING)
#Use the mock ldap.
LDAP_URL = 'mock'
SYNC_FILE_ROOT = 'tests/test_sync_files'
SYNC_FILE_ROOT = 'tests/SPECS'

View File

@ -52,7 +52,6 @@ ma = Marshmallow(app)
from crc import models
from crc import api
from crc.api import admin
from crc.services.file_service import FileService
from crc.services.workflow_service import WorkflowService
connexion_app.add_api('api.yml', base_path='/v1.0')

View File

@ -539,7 +539,7 @@ paths:
application/json:
schema:
$ref: "#components/schemas/File"
/workflow-specification/{spec_id}/file/{file_id}:
/workflow-specification/{spec_id}/file/{file_name}:
parameters:
- name: spec_id
in: path
@ -547,12 +547,12 @@ paths:
description: The unique id of an existing workflow specification to validate.
schema:
type: string
- name: file_id
- name: file_name
in: path
required: true
description: The id of the spec file
schema:
type: integer
type: string
get:
operationId: crc.api.spec_file.get_file
summary: Returns metadata about the file
@ -565,6 +565,32 @@ paths:
application/json:
schema:
$ref: "#components/schemas/File"
put:
operationId: crc.api.spec_file.update
summary: updates the given file to be the primary file and process, if so specified.
tags:
- Spec Files
parameters:
- name: is_primary
in: query
required: true
description: Whether to make this the primary file for the workflow.
schema:
type: boolean
requestBody:
description: Log Pagination Request
required: false
content:
application/json:
schema:
$ref: '#/components/schemas/File'
responses:
'200':
description: Returns the file information.
content:
application/json:
schema:
$ref: "#components/schemas/File"
delete:
operationId: crc.api.spec_file.delete
summary: Removes an existing workflow spec file.
@ -573,7 +599,7 @@ paths:
responses:
'204':
description: The file was removed.
/workflow-specification/{spec_id}/file/{file_id}/data:
/workflow-specification/{spec_id}/file/{file_name}/data:
parameters:
- name: spec_id
in: path
@ -581,12 +607,12 @@ paths:
description: The unique id of an existing workflow specification to validate.
schema:
type: string
- name: file_id
- name: file_name
in: path
required: true
description: The id of the requested file
schema:
type: integer
type: string
get:
operationId: crc.api.spec_file.get_data
summary: Returns only the spec file content
@ -665,7 +691,7 @@ paths:
required: true
description: The unique id of an existing workflow spec category to modify.
schema:
type: string
type: integer
get:
operationId: crc.api.workflow.get_workflow_spec_category
summary: Returns a single workflow spec category
@ -838,7 +864,7 @@ paths:
$ref: "#/components/schemas/File"
delete:
operationId: crc.api.file.delete_file
summary: Removes an existing file. In the event the file can not be deleted, it is marked as "archived" in the database and is no longer returned unless specifically requested by id.
summary: Removes an existing file.
tags:
- Files
responses:
@ -992,25 +1018,6 @@ paths:
type: string
format: binary
example: '<?xml version="1.0" encoding="UTF-8"?><bpmn:definitions></bpmn:definitions>'
put:
operationId: crc.api.reference_file.update_reference_file_info
security:
- auth_admin: ['secret']
summary: Update the file_info of a named reference file.
tags:
- Reference Files
requestBody:
content:
application/json:
schema:
$ref: "#/components/schemas/File"
responses:
'200':
description: File info updated successfully
content:
application/json:
schema:
$ref: "#/components/schemas/File"
delete:
operationId: crc.api.reference_file.delete_reference_file
summary: Remove an existing reference file.

View File

@ -11,13 +11,13 @@ from werkzeug.utils import redirect
from jinja2 import Markup
from crc import db, app
from crc.api.common import ApiError
from crc.api.user import verify_token, verify_token_admin
from crc.models.file import FileModel, FileDataModel
from crc.models.task_event import TaskEventModel
from crc.models.study import StudyModel
from crc.models.user import UserModel
from crc.models.workflow import WorkflowModel
from crc.services.file_service import FileService
class AdminModelView(sqla.ModelView):
@ -59,11 +59,11 @@ class FileView(AdminModelView):
@action('publish', 'Publish', 'Are you sure you want to publish this file(s)?')
def action_publish(self, ids):
FileService.publish_to_github(ids)
raise ApiError("not_implemented", "This method is not yet implemented.")
@action('update', 'Update', 'Are you sure you want to update this file(s)?')
def action_update(self, ids):
FileService.update_from_github(ids)
raise ApiError("not_implemented", "This method is not yet implemented.")
def json_formatter(view, context, model, name):

View File

@ -1,8 +1,8 @@
from crc.models.api_models import DocumentDirectorySchema
from crc.models.file import File
from crc.services.document_service import DocumentService
from crc.services.file_service import FileService
from crc.services.lookup_service import LookupService
from crc.services.user_file_service import UserFileService
def get_document_directory(study_id, workflow_id=None):
@ -10,9 +10,9 @@ def get_document_directory(study_id, workflow_id=None):
return a nested list of files arranged according to the category hierarchy
defined in the doc dictionary
"""
file_models = FileService.get_files_for_study(study_id=study_id)
file_models = UserFileService.get_files_for_study(study_id=study_id)
doc_dict = DocumentService.get_dictionary()
files = (File.from_models(model, FileService.get_file_data(model.id), doc_dict) for model in file_models)
files = (File.from_models(model, UserFileService.get_file_data(model.id), doc_dict) for model in file_models)
directory = DocumentService.get_directory(doc_dict, files, workflow_id)
return DocumentDirectorySchema(many=True).dump(directory)

View File

@ -9,19 +9,11 @@ from crc.api.common import ApiError
from crc.api.user import verify_token
from crc.models.file import FileSchema, FileModel, File, FileModelSchema, FileDataModel
from crc.services.document_service import DocumentService
from crc.services.file_service import FileService
from crc.services.reference_file_service import ReferenceFileService
from crc.services.spec_file_service import SpecFileService
from crc.services.user_file_service import UserFileService
def to_file_api(file_model):
"""Converts a FileModel object to something we can return via the api"""
if file_model.workflow_spec_id is not None:
file_data_model = SpecFileService().get_spec_file_data(file_model.id)
elif file_model.is_reference:
file_data_model = ReferenceFileService().get_reference_file_data(file_model.name)
else:
file_data_model = FileService.get_file_data(file_model.id)
file_data_model = UserFileService.get_file_data(file_model.id)
return File.from_models(file_model, file_data_model,
DocumentService.get_dictionary())
@ -32,9 +24,9 @@ def get_files(workflow_id=None, form_field_key=None, study_id=None):
'Please specify a workflow_id with an optional form_field_key')
if study_id is not None:
file_models = FileService.get_files_for_study(study_id=study_id, irb_doc_code=form_field_key)
file_models = UserFileService.get_files_for_study(study_id=study_id, irb_doc_code=form_field_key)
else:
file_models = FileService.get_files(workflow_id=workflow_id,
file_models = UserFileService.get_files(workflow_id=workflow_id,
irb_doc_code=form_field_key)
files = (to_file_api(model) for model in file_models)
@ -50,7 +42,7 @@ def add_file(workflow_id=None, task_spec_name=None, form_field_key=None):
if task_spec_name is None:
raise ApiError('invalid_workflow_file',
'When adding a workflow related file, you must specify a task_spec_name')
file_model = FileService.add_workflow_file(workflow_id=workflow_id, irb_doc_code=form_field_key,
file_model = UserFileService.add_workflow_file(workflow_id=workflow_id, irb_doc_code=form_field_key,
task_spec_name=task_spec_name,
name=file.filename, content_type=file.content_type,
binary_data=file.stream.read())
@ -65,7 +57,7 @@ def update_file_data(file_id):
file = connexion.request.files['file']
if file_model is None:
raise ApiError('no_such_file', f'The file id you provided ({file_id}) does not exist')
file_model = FileService.update_file(file_model, file.stream.read(), file.content_type)
file_model = UserFileService.update_file(file_model, file.stream.read(), file.content_type)
return FileSchema().dump(to_file_api(file_model))
@ -77,7 +69,7 @@ def get_file_data_by_hash(md5_hash):
def get_file_data(file_id, version=None):
file_model = session.query(FileModel).filter(FileModel.id==file_id).first()
if file_model is not None:
file_data_model = FileService.get_file_data(file_id, version)
file_data_model = UserFileService.get_file_data(file_id, version)
if file_data_model is not None:
return send_file(
io.BytesIO(file_data_model.data),
@ -95,12 +87,7 @@ def get_file_data_link(file_id, auth_token, version=None):
if not verify_token(auth_token):
raise ApiError('not_authenticated', 'You need to include an authorization token in the URL with this')
file_model = session.query(FileModel).filter(FileModel.id==file_id).first()
if file_model.workflow_spec_id is not None:
file_data = SpecFileService().get_spec_file_data(file_id)
elif file_model.is_reference:
file_data = ReferenceFileService().get_reference_file_data(file_id)
else:
file_data = FileService.get_file_data(file_id, version)
file_data = UserFileService.get_file_data(file_id, version)
if file_data is None:
raise ApiError('no_such_file', f'The file id you provided ({file_id}) does not exist')
return send_file(
@ -136,12 +123,12 @@ def update_file_info(file_id, body):
def delete_file(file_id):
FileService.delete_file(file_id)
UserFileService.delete_file(file_id)
def dmn_from_ss():
file = connexion.request.files['file']
result = FileService.dmn_from_spreadsheet(file)
result = UserFileService.dmn_from_spreadsheet(file)
return send_file(
io.BytesIO(result),
attachment_filename='temp_dmn.dmn',

View File

@ -2,7 +2,6 @@ from crc import session
from crc.api.common import ApiError
from crc.api.file import to_file_api
from crc.models.file import FileModel, FileSchema, CONTENT_TYPES
from crc.services.file_service import FileService
from crc.services.reference_file_service import ReferenceFileService
from flask import send_file
@ -13,33 +12,24 @@ import connexion
def get_reference_files():
"""Gets a list of all reference files"""
results = ReferenceFileService.get_reference_files()
files = (to_file_api(model) for model in results)
files = ReferenceFileService.get_reference_files()
return FileSchema(many=True).dump(files)
def get_reference_file_data(name):
file_extension = FileService.get_extension(name)
file_extension = ReferenceFileService.get_extension(name)
content_type = CONTENT_TYPES[file_extension]
file_data = ReferenceFileService().get_reference_file_data(name)
file_data = ReferenceFileService().get_data(name)
return send_file(
io.BytesIO(file_data.data),
io.BytesIO(file_data),
attachment_filename=name,
mimetype=content_type,
cache_timeout=-1 # Don't cache these files on the browser.
)
def get_reference_file_info(name):
"""Return metadata for a reference file"""
file_model = session.query(FileModel).\
filter_by(name=name).with_for_update().\
filter_by(archived=False).with_for_update().\
first()
if file_model is None:
# TODO: Should this be 204 or 404?
raise ApiError('no_such_file', f'The reference file name you provided ({name}) does not exist', status_code=404)
return FileSchema().dump(to_file_api(file_model))
return FileSchema().dump(ReferenceFileService.get_reference_file(name))
def update_reference_file_data(name):
@ -49,46 +39,23 @@ def update_reference_file_data(name):
'Expected a file named "file" in the multipart form request', status_code=400)
file = connexion.request.files['file']
name_extension = FileService.get_extension(name)
file_extension = FileService.get_extension(file.filename)
name_extension = ReferenceFileService.get_extension(name)
file_extension = ReferenceFileService.get_extension(file.filename)
if name_extension != file_extension:
raise ApiError('invalid_file_type',
"The file you uploaded has an extension '%s', but it should have an extension of '%s' " %
(file_extension, name_extension))
file_model = session.query(FileModel).filter(FileModel.name==name).first()
if not file_model:
raise ApiError(code='file_does_not_exist',
message=f"The reference file {name} does not exist.")
else:
ReferenceFileService().update_reference_file(file_model, file.stream.read())
return FileSchema().dump(to_file_api(file_model))
# TODO: do we need a test for this?
def update_reference_file_info(name, body):
if name is None:
raise ApiError(code='missing_parameter',
message='Please provide a reference file name')
file_model = session.query(FileModel).filter(FileModel.name==name).first()
if file_model is None:
raise ApiError(code='no_such_file',
message=f"No reference file was found with name: {name}")
new_file_model = ReferenceFileService.update_reference_file_info(file_model, body)
return FileSchema().dump(to_file_api(new_file_model))
return_file = ReferenceFileService.update_reference_file(file_name=name, binary_data=file.stream.read())
return FileSchema().dump(return_file)
def add_reference_file():
file = connexion.request.files['file']
file_model = ReferenceFileService.add_reference_file(name=file.filename,
content_type=file.content_type,
binary_data=file.stream.read())
return FileSchema().dump(to_file_api(file_model))
file_model = ReferenceFileService.add_reference_file(file.filename, file.stream.read())
return FileSchema().dump(file_model)
def delete_reference_file(name):
ReferenceFileService().delete_reference_file(name)
ReferenceFileService().delete(name)

View File

@ -10,52 +10,68 @@ import io
import connexion
def get_files(workflow_spec_id, include_libraries=False):
if workflow_spec_id is None:
def get_files(spec_id, include_libraries=False):
if spec_id is None:
raise ApiError(code='missing_spec_id',
message='Please specify the workflow_spec_id.')
workflow_spec = session.query(WorkflowSpecModel).filter_by(id=workflow_spec_id).first()
workflow_spec = session.query(WorkflowSpecModel).filter_by(id=spec_id).first()
if workflow_spec is None:
raise ApiError(code='unknown_spec',
message=f'Unknown Spec: {spec_id}')
files = SpecFileService.get_files(workflow_spec,
include_libraries=include_libraries)
return FileSchema(many=True).dump(files)
def get_file(workflow_spec_id, file_name):
workflow_spec = session.query(WorkflowSpecModel).filter_by(id=workflow_spec_id).first()
def get_file(spec_id, file_name):
workflow_spec = session.query(WorkflowSpecModel).filter_by(id=spec_id).first()
files = SpecFileService.get_files(workflow_spec, file_name)
if len(files) == 0:
raise ApiError(code='unknown file',
message=f'No information exists for file {file_name}'
f' it does not exist in workflow {workflow_spec_id}.')
f' it does not exist in workflow {spec_id}.', status_code=404)
return FileSchema().dump(files[0])
def add_file(workflow_spec_id):
workflow_spec = session.query(WorkflowSpecModel).filter_by(id=workflow_spec_id).first()
def add_file(spec_id):
workflow_spec = session.query(WorkflowSpecModel).filter_by(id=spec_id).first()
file = connexion.request.files['file']
file = SpecFileService.add_file(workflow_spec, file.filename, file.stream.read(), file.content_type)
file = SpecFileService.add_file(workflow_spec, file.filename, file.stream.read())
if not WorkflowSpecModel.primary_process_id and file.type == FileType.bpmn:
SpecFileService.set_primary_bpmn(workflow_spec, file.name)
return FileSchema().dump(file)
def update_data(workflow_spec_id, file_name):
workflow_spec_model = session.query(WorkflowSpecModel).filter_by(id=workflow_spec_id).first()
if workflow_spec_model is None:
raise ApiError(code='missing_spec',
message=f'The workflow spec for id {workflow_spec_id} does not exist.')
file_data = connexion.request.files['file']
file = SpecFileService.update_file(workflow_spec_model, file_name, file_data.stream.read(), file_data.content_type)
def update(spec_id, file_name, is_primary):
workflow_spec = session.query(WorkflowSpecModel).filter_by(id=spec_id).first()
files = SpecFileService.get_files(workflow_spec, file_name)
if len(files) < 1:
raise ApiError(code='unknown file',
message=f'No information exists for file {file_name}'
f' it does not exist in workflow {spec_id}.')
file = files[0]
if is_primary:
SpecFileService.set_primary_bpmn(workflow_spec, file_name)
return FileSchema().dump(file)
def get_data(workflow_spec_id, file_name):
workflow_spec_model = session.query(WorkflowSpecModel).filter_by(id=workflow_spec_id).first()
def update_data(spec_id, file_name):
workflow_spec_model = session.query(WorkflowSpecModel).filter_by(id=spec_id).first()
if workflow_spec_model is None:
raise ApiError(code='missing_spec',
message=f'The workflow spec for id {spec_id} does not exist.')
file_data = connexion.request.files['file']
file = SpecFileService.update_file(workflow_spec_model, file_name, file_data.stream.read())
return FileSchema().dump(file)
def get_data(spec_id, file_name):
workflow_spec_model = session.query(WorkflowSpecModel).filter_by(id=spec_id).first()
file_data = SpecFileService.get_data(workflow_spec_model, file_name)
if file_data is not None:
file_info = SpecFileService.get_files(workflow_spec_model, file_name)[0]
return send_file(
io.BytesIO(file_data.data),
io.BytesIO(file_data),
attachment_filename=file_name,
mimetype=file_info.content_type,
cache_timeout=-1 # Don't cache these files on the browser.
@ -63,9 +79,9 @@ def get_data(workflow_spec_id, file_name):
else:
raise ApiError(code='missing_data_model',
message=f'The data model for file {file_name}'
f' does not exist in workflow {workflow_spec_id}.')
f' does not exist in workflow {spec_id}.')
def delete(workflow_spec_id, file_name):
workflow_spec_model = session.query(WorkflowSpecModel).filter_by(id=workflow_spec_id).first()
def delete(spec_id, file_name):
workflow_spec_model = session.query(WorkflowSpecModel).filter_by(id=spec_id).first()
SpecFileService.delete_file(workflow_spec_model, file_name)

View File

@ -12,8 +12,8 @@ from crc.models.task_log import TaskLogModelSchema, TaskLogQuery, TaskLogQuerySc
from crc.models.workflow import WorkflowModel, WorkflowSpecModelSchema, WorkflowSpecModel, WorkflowSpecCategoryModel, \
WorkflowSpecCategoryModelSchema, WorkflowLibraryModel, WorkflowLibraryModelSchema
from crc.services.error_service import ValidationErrorService
from crc.services.file_service import FileService
from crc.services.lookup_service import LookupService
from crc.services.spec_file_service import SpecFileService
from crc.services.study_service import StudyService
from crc.services.task_logging_service import TaskLoggingService
from crc.services.user_service import UserService
@ -153,7 +153,7 @@ def delete_workflow_specification(spec_id):
WorkflowService.delete_workflow_spec_workflow_models(spec_id)
# Delete all files related to this specification
WorkflowService.delete_workflow_spec_files(spec_id)
SpecFileService.delete_all_files(spec)
# Delete all events related to this specification
WorkflowService.delete_workflow_spec_task_events(spec_id)

View File

@ -83,8 +83,7 @@ class FileModel(db.Model):
content_type = db.Column(db.String)
workflow_id = db.Column(db.Integer, db.ForeignKey('workflow.id'), nullable=True)
task_spec = db.Column(db.String, nullable=True)
irb_doc_code = db.Column(db.String, nullable=True) # Code reference to the irb_documents.xlsx reference file.
is_review = db.Column(db.Boolean, default=False, nullable=True)
irb_doc_code = db.Column(db.String, nullable=True) # Code reference to the documents.xlsx reference file.
data_stores = relationship(DataStoreModel, cascade="all,delete", backref="file")
@ -129,8 +128,8 @@ class File(object):
return instance
@classmethod
def spec_file(cls, workflow_spec, file_name, file_type, content_type,
last_modified, file_size):
def from_file_system(cls, file_name, file_type, content_type,
last_modified, file_size):
instance = cls()
instance.name = file_name
@ -166,31 +165,27 @@ class FileSchema(Schema):
def get_url(self, obj):
token = 'not_available'
if obj.id is None:
return "" # We can't return a url for a file that isn't stored yet.
file_url = url_for("/v1_0.crc_api_file_get_file_data_link", file_id=obj.id, _external=True)
if hasattr(flask.g, 'user'):
token = flask.g.user.encode_auth_token()
url = file_url + '?auth_token=' + urllib.parse.quote_plus(token)
return url
if hasattr(obj, 'id') and obj.id is not None:
file_url = url_for("/v1_0.crc_api_file_get_file_data_link", file_id=obj.id, _external=True)
if hasattr(flask.g, 'user'):
token = flask.g.user.encode_auth_token()
url = file_url + '?auth_token=' + urllib.parse.quote_plus(token)
return url
else:
return ""
class LookupFileModel(db.Model):
"""Gives us a quick way to tell what kind of lookup is set on a form field.
Connected to the file data model, so that if a new version of the same file is
created, we can update the listing."""
"""Gives us a quick way to tell what kind of lookup is set on a form field."""
__tablename__ = 'lookup_file'
id = db.Column(db.Integer, primary_key=True)
workflow_spec_id = db.Column(db.String)
task_spec_id = db.Column(db.String)
field_id = db.Column(db.String)
file_name = db.Column(db.String)
is_ldap = db.Column(db.Boolean) # Allows us to run an ldap query instead of a db lookup.
file_model_id = db.Column(db.Integer, db.ForeignKey('file.id'))
last_updated = db.Column(db.DateTime(timezone=True))
dependencies = db.relationship("LookupDataModel", lazy="select", backref="lookup_file_model",
cascade="all, delete, delete-orphan")
file_model = db.relationship("FileModel")
class LookupDataModel(db.Model):
__tablename__ = 'lookup_data'

View File

@ -11,7 +11,6 @@ from crc.models.file import FileSchema
from crc.models.ldap import LdapModel, LdapSchema
from crc.models.protocol_builder import ProtocolBuilderCreatorStudy
from crc.models.workflow import WorkflowSpecCategoryModel, WorkflowState, WorkflowStatus, WorkflowModel
from crc.services.file_service import FileService
class StudyStatus(enum.Enum):
@ -135,7 +134,6 @@ class WorkflowMetadata(object):
@classmethod
def from_workflow(cls, workflow: WorkflowModel):
is_review = FileService.is_workflow_review(workflow.workflow_spec_id)
instance = cls(
id=workflow.id,
display_name=workflow.workflow_spec.display_name,
@ -146,7 +144,7 @@ class WorkflowMetadata(object):
status=workflow.status,
total_tasks=workflow.total_tasks,
completed_tasks=workflow.completed_tasks,
is_review=is_review,
is_review=workflow.workflow_spec.is_review,
display_order=workflow.workflow_spec.display_order,
workflow_spec_id=workflow.workflow_spec_id
)

View File

@ -8,9 +8,9 @@ from crc.api.common import ApiError
from crc.models.file import CONTENT_TYPES, FileModel
from crc.models.workflow import WorkflowModel
from crc.scripts.script import Script
from crc.services.file_service import FileService
from crc.services.jinja_service import JinjaService
from crc.services.spec_file_service import SpecFileService
from crc.services.user_file_service import UserFileService
from crc.services.workflow_processor import WorkflowProcessor
@ -22,7 +22,7 @@ a word document that contains Jinja markup. Please see https://docxtpl.readthed
for more information on exact syntax.
Takes two arguments:
1. The name of a MS Word docx file to use as a template.
2. The 'code' of the IRB Document as set in the irb_documents.xlsx file."
2. The 'code' of the IRB Document as set in the documents.xlsx file."
"""
def do_task_validate_only(self, task, study_id, workflow_id, *args, **kwargs):
@ -35,12 +35,12 @@ Takes two arguments:
final_document_stream = self.process_template(task, study_id, workflow, *args, **kwargs)
file_name = args[0]
irb_doc_code = args[1]
FileService.add_workflow_file(workflow_id=workflow_id,
task_spec_name=task.get_name(),
name=file_name,
content_type=CONTENT_TYPES['docx'],
binary_data=final_document_stream.read(),
irb_doc_code=irb_doc_code)
UserFileService.add_workflow_file(workflow_id=workflow_id,
task_spec_name=task.get_name(),
name=file_name,
content_type=CONTENT_TYPES['docx'],
binary_data=final_document_stream.read(),
irb_doc_code=irb_doc_code)
def process_template(self, task, study_id, workflow=None, *args, **kwargs):
"""Entry point, mostly worried about wiring it all up."""
@ -49,7 +49,7 @@ Takes two arguments:
message="The CompleteTemplate script requires 2 arguments. The first argument is "
"the name of the docx template to use. The second "
"argument is a code for the document, as "
"set in the reference document %s. " % FileService.DOCUMENT_LIST)
"set in the reference document.")
task_study_id = task.workflow.data[WorkflowProcessor.STUDY_ID_KEY]
file_name = args[0]
@ -59,15 +59,7 @@ Takes two arguments:
file_data = None
if workflow is not None:
# Get the workflow specification file with the given name.
file_models = SpecFileService().get_spec_files(
workflow_spec_id=workflow.workflow_spec_id, file_name=file_name)
if len(file_models) > 0:
file_model = file_models[0]
else:
raise ApiError(code="invalid_argument",
message="Uable to locate a file with the given name.")
file_data = SpecFileService().get_spec_file_data(file_model.id).data
file_data = SpecFileService().get_data(workflow.workflow_spec, file_name)
# Get images from file/files fields
if len(args) == 3:
@ -101,7 +93,7 @@ Takes two arguments:
if not task.workflow.data[WorkflowProcessor.VALIDATION_PROCESS_KEY]:
# Get the actual image data
image_file_model = session.query(FileModel).filter_by(id=file_id).first()
image_file_data_model = FileService.get_file_data(file_id, image_file_model)
image_file_data_model = UserFileService.get_file_data(file_id, image_file_model)
if image_file_data_model is not None:
image_file_data.append(image_file_data_model)

View File

@ -3,7 +3,7 @@ from crc.api.common import ApiError
from crc.models.file import FileModel
from crc.scripts.script import Script
from crc.services.document_service import DocumentService
from crc.services.file_service import FileService
from crc.services.user_file_service import UserFileService
class DeleteFile(Script):
@ -15,7 +15,7 @@ class DeleteFile(Script):
FileModel.workflow_id == workflow_id, FileModel.irb_doc_code == doc_code).all()
if isinstance(result, list) and len(result) > 0 and isinstance(result[0], FileModel):
for file in result:
FileService.delete_file(file.id)
UserFileService.delete_file(file.id)
else:
raise ApiError.from_task(code='no_document_found',
message=f'No document of type {doc_code} was found for this workflow.',

View File

@ -5,7 +5,7 @@ from crc.models.file import FileModel
from crc.models.task_event import TaskEventModel
from crc.scripts.script import Script
from crc.services.document_service import DocumentService
from crc.services.file_service import FileService
from crc.services.user_file_service import UserFileService
from crc.services.workflow_service import WorkflowService
@ -44,7 +44,7 @@ class DeleteTaskData(Script):
# delete files
for file in files_to_delete:
FileService().delete_file(file.id)
UserFileService().delete_file(file.id)
# delete the data store
session.query(DataStoreModel). \

View File

@ -4,7 +4,7 @@ from crc.api.common import ApiError
from crc.services.data_store_service import DataStoreBase
from crc.scripts.script import Script
from crc.services.document_service import DocumentService
from crc.services.file_service import FileService
from crc.services.user_file_service import UserFileService
class FileDataSet(Script, DataStoreBase):
@ -51,7 +51,7 @@ class FileDataSet(Script, DataStoreBase):
del(kwargs['file_id'])
if kwargs['key'] == 'irb_code':
irb_doc_code = kwargs['value']
FileService.update_irb_code(fileid,irb_doc_code)
UserFileService.update_irb_code(fileid, irb_doc_code)
return self.set_data_common(task.id,

View File

@ -3,12 +3,13 @@ from crc.api.common import ApiError
from crc.api.file import to_file_api
from crc.models.file import FileModel, FileDataModel, FileSchema
from crc.scripts.script import Script
from crc.services.file_service import FileService
from crc.services.study_service import StudyService
import tempfile
import zipfile
from crc.services.user_file_service import UserFileService
class GetZippedFiles(Script):
@ -47,8 +48,8 @@ class GetZippedFiles(Script):
zfw.writestr(file_name, file_data.data)
with open(temp_file.name, mode='rb') as handle:
file_model = FileService().add_workflow_file(workflow_id, None, task.get_name(), zip_filename,
'application/zip', handle.read())
file_model = UserFileService().add_workflow_file(workflow_id, None, task.get_name(),
zip_filename, 'application/zip', handle.read())
# return file_model
return FileSchema().dump(to_file_api(file_model))
else:

View File

@ -1,5 +1,5 @@
from crc.scripts.script import Script
from crc.services.file_service import FileService
from crc.services.user_file_service import UserFileService
class IsFileUploaded(Script):
@ -10,11 +10,11 @@ class IsFileUploaded(Script):
def do_task_validate_only(self, task, study_id, workflow_id, *args, **kwargs):
doc_code = args[0]
files = FileService.get_files_for_study(study_id)
files = UserFileService.get_files_for_study(study_id)
def do_task(self, task, study_id, workflow_id, *args, **kwargs):
files = FileService.get_files_for_study(study_id)
files = UserFileService.get_files_for_study(study_id)
if len(files) > 0:
doc_code = args[0]
for file in files:

View File

@ -5,13 +5,9 @@ from SpiffWorkflow.util.metrics import timeit
from crc import session
from crc.api.common import ApiError
from crc.api.workflow import get_workflow
from crc.models.protocol_builder import ProtocolBuilderInvestigatorType
from crc.models.study import StudyModel, StudySchema
from crc.api import workflow as workflow_api
from crc.scripts.script import Script
from crc.services.document_service import DocumentService
from crc.services.file_service import FileService
from crc.services.protocol_builder import ProtocolBuilderService
from crc.services.study_service import StudyService

View File

@ -3,6 +3,7 @@ from crc.api.common import ApiError
from crc.models.api_models import DocumentDirectory
from crc.models.file import FileModel
from crc.services.lookup_service import LookupService
from crc.services.reference_file_service import ReferenceFileService
class DocumentService(object):
@ -38,11 +39,8 @@ class DocumentService(object):
@staticmethod
def get_dictionary():
"""Returns a dictionary of document details keyed on the doc_code."""
file_id = session.query(FileModel.id). \
filter(FileModel.name == DocumentService.DOCUMENT_LIST). \
filter(FileModel.is_reference == True). \
scalar()
lookup_model = LookupService.get_lookup_model_for_file_data(file_id, DocumentService.DOCUMENT_LIST, 'code', 'description')
lookup_model = LookupService.get_lookup_model_for_reference(DocumentService.DOCUMENT_LIST,
'code', 'description')
doc_dict = {}
for lookup_data in lookup_model.dependencies:
doc_dict[lookup_data.value] = lookup_data.data

View File

@ -0,0 +1,91 @@
import datetime
import os
from typing import List
from crc import app, session
from crc.api.common import ApiError
from crc.models.file import FileType, CONTENT_TYPES, File
from crc.models.workflow import WorkflowSpecModel, WorkflowLibraryModel
from SpiffWorkflow.bpmn.parser.ValidationException import ValidationException
from lxml import etree
class FileSystemService(object):
""" Simple Service meant for extension that provides some useful methods for dealing with the
File system.
"""
@staticmethod
def root_path():
# fixme: allow absolute files
dir_name = app.config['SYNC_FILE_ROOT']
app_root = app.root_path
return os.path.join(app_root, '..', dir_name)
@staticmethod
def write_file_data_to_system(file_path, file_data):
os.makedirs(os.path.dirname(file_path), exist_ok=True)
with open(file_path, 'wb') as f_handle:
f_handle.write(file_data)
@staticmethod
def get_extension(file_name):
basename, file_extension = os.path.splitext(file_name)
return file_extension.lower().strip()[1:]
@staticmethod
def assert_valid_file_name(file_name):
file_extension = FileSystemService.get_extension(file_name)
if file_extension not in FileType._member_names_:
raise ApiError('unknown_extension',
'The file you provided does not have an accepted extension:' +
file_extension, status_code=404)
@staticmethod
def _last_modified(file_path: str):
# Returns the last modified date of the given file.
timestamp = os.path.getmtime(file_path)
return datetime.datetime.fromtimestamp(timestamp)
@staticmethod
def file_type(file_name):
extension = FileSystemService.get_extension(file_name)
return FileType[extension]
@staticmethod
def _get_files(file_path: str, file_name=None) -> List[File]:
"""Returns an array of File objects at the given path, can be restricted to just one file"""
files = []
items = os.scandir(file_path)
for item in items:
if item.is_file():
if file_name is not None and item.name != file_name:
continue
file = FileSystemService.to_file_object_from_dir_entry(item)
files.append(file)
return files
@staticmethod
def to_file_object(file_name: str, file_path: str) -> File:
file_type = FileSystemService.file_type(file_name)
content_type = CONTENT_TYPES[file_type.name]
last_modified = FileSystemService._last_modified(file_path)
size = os.path.getsize(file_path)
file = File.from_file_system(file_name, file_type, content_type, last_modified, size)
return file
@staticmethod
def to_file_object_from_dir_entry(item: os.DirEntry):
extension = FileSystemService.get_extension(item.name)
try:
file_type = FileType[extension]
content_type = CONTENT_TYPES[file_type.name]
except KeyError:
raise ApiError("invalid_type", "Invalid File Type: %s, for file $%s" % (extension, item.name))
stats = item.stat()
file_size = stats.st_size
last_modified = datetime.datetime.fromtimestamp(stats.st_mtime)
return File.from_file_system(item.name, file_type, content_type, last_modified, file_size)

View File

@ -50,12 +50,15 @@ class LookupService(object):
return LookupService.__get_lookup_model(workflow, spiff_task.task_spec.name, field.id)
@staticmethod
def get_lookup_model_for_file_data(file_id, file_name, value_column, label_column):
file_data = ReferenceFileService().get_reference_file_data(file_name)
lookup_model = db.session.query(LookupFileModel).filter(LookupFileModel.file_model_id == file_id).first()
def get_lookup_model_for_reference(file_name, value_column, label_column):
lookup_model = db.session.query(LookupFileModel).\
filter(LookupFileModel.file_name == file_name). \
filter(LookupFileModel.workflow_spec_id is None).\
first()
if not lookup_model:
logging.warning("!!!! Making a very expensive call to update the lookup model.")
lookup_model = LookupService.build_lookup_table(file_id, file_name, file_data.data, value_column, label_column)
file_data = ReferenceFileService().get_data(file_name)
lookup_model = LookupService.build_lookup_table(file_name, file_data, value_column, label_column)
return lookup_model
@staticmethod
@ -73,7 +76,7 @@ class LookupService(object):
if lookup_model.is_ldap: # LDAP is always current
is_current = True
else:
current_date = SpecFileService.last_modified(lookup_model.file_model.id)
current_date = SpecFileService.last_modified(workflow.workflow_spec, lookup_model.file_name)
is_current = current_date == lookup_model.last_updated
if not is_current:
@ -130,16 +133,16 @@ class LookupService(object):
file_name = field.get_property(Task.FIELD_PROP_SPREADSHEET_NAME)
value_column = field.get_property(Task.FIELD_PROP_VALUE_COLUMN)
label_column = field.get_property(Task.FIELD_PROP_LABEL_COLUMN)
latest_files = SpecFileService().get_spec_files(workflow_spec_id=workflow_model.workflow_spec_id,
file_name=file_name)
latest_files = SpecFileService().get_files(workflow_model.workflow_spec, file_name=file_name)
if len(latest_files) < 1:
raise ApiError("invalid_enum", "Unable to locate the lookup data file '%s'" % file_name)
else:
file = latest_files[0]
file_data = SpecFileService().get_spec_file_data(file.id).data
workflow_spec = processor.workflow_model.workflow_spec
file_data = SpecFileService().get_data(workflow_spec, file_name)
lookup_model = LookupService.build_lookup_table(file.id, file_name, file_data, value_column, label_column,
lookup_model = LookupService.build_lookup_table(file_name, file_data, value_column, label_column,
workflow_model.workflow_spec_id, task_spec_id, field_id)
# Use the results of an LDAP request to populate enum field options
@ -158,7 +161,7 @@ class LookupService(object):
return lookup_model
@staticmethod
def build_lookup_table(file_id, file_name, file_data, value_column, label_column,
def build_lookup_table(file_name, file_data, value_column, label_column,
workflow_spec_id=None, task_spec_id=None, field_id=None):
""" In some cases the lookup table can be very large. This method will add all values to the database
in a way that can be searched and returned via an api call - rather than sending the full set of
@ -170,13 +173,17 @@ class LookupService(object):
# The error comes back as zipfile.BadZipFile because xlsx files are zipped xml files
except BadZipFile:
raise ApiError(code='excel_error',
message=f"Error opening excel file {file_name}. You may have an older .xls spreadsheet. (file_model_id: {file_id} workflow_spec_id: {workflow_spec_id}, task_spec_id: {task_spec_id}, and field_id: {field_id})")
message=f"Error opening excel file {file_name}. You may have an older .xls spreadsheet. (workflow_spec_id: {workflow_spec_id}, task_spec_id: {task_spec_id}, and field_id: {field_id})")
df = xlsx.parse(xlsx.sheet_names[0]) # Currently we only look at the fist sheet.
df = df.convert_dtypes()
df = df.loc[:, ~df.columns.str.contains('^Unnamed')] # Drop unnamed columns.
df = pd.DataFrame(df).dropna(how='all') # Drop null rows
df = pd.DataFrame(df).replace({NA: ''})
for (column_name, column_data) in df.iteritems():
type = df.dtypes[column_name].name
if type == 'string':
df[column_name] = df[column_name].fillna('')
else:
df[column_name] = df[column_name].fillna(0)
if value_column not in df:
raise ApiError("invalid_enum",
"The file %s does not contain a column named % s" % (file_name,
@ -189,7 +196,7 @@ class LookupService(object):
lookup_model = LookupFileModel(workflow_spec_id=workflow_spec_id,
field_id=field_id,
task_spec_id=task_spec_id,
file_model_id=file_id,
file_name=file_name,
is_ldap=False)
db.session.add(lookup_model)

View File

@ -4,139 +4,73 @@ import os
from crc import app, session
from crc.api.common import ApiError
from crc.models.file import FileModel, FileModelSchema, FileDataModel
from crc.services.file_service import FileService, FileType
from crc.services.spec_file_service import SpecFileService
from crc.models.file import FileModel, FileModelSchema, FileDataModel, FileType, File
from crc.services.file_system_service import FileSystemService
from uuid import UUID
from sqlalchemy.exc import IntegrityError
class ReferenceFileService(object):
class ReferenceFileService(FileSystemService):
SUB_DIR = "Reference"
@staticmethod
def get_reference_file_path(file_name):
# Fixme: The services should not talk to each other.
sync_file_root = SpecFileService().root_path()
file_path = os.path.join(sync_file_root, 'Reference', file_name)
def root_path():
# fixme: allow absolute directory names (but support relative)
dir_name = app.config['SYNC_FILE_ROOT']
app_root = app.root_path
return os.path.join(app_root, '..', dir_name, ReferenceFileService.SUB_DIR)
@staticmethod
def file_path(file_name: str):
sync_file_root = ReferenceFileService().root_path()
file_path = os.path.join(sync_file_root, file_name)
return file_path
@staticmethod
def add_reference_file(name, content_type, binary_data):
"""Create a file with the given name, but not associated with a spec or workflow.
Only one file with the given reference name can exist."""
file_model = session.query(FileModel). \
filter(FileModel.name == name).first()
# fixme: no need for this is_reference filter.
# filter(FileModel.is_reference == True). \
if not file_model:
file_extension = FileService.get_extension(name)
file_type = FileType[file_extension].value
def add_reference_file(file_name: str, binary_data: bytes) -> File:
return ReferenceFileService.update_reference_file(file_name, binary_data)
file_model = FileModel(
name=name,
# is_reference=True,
type=file_type,
content_type=content_type
)
session.add(file_model)
session.commit()
@staticmethod
def update_reference_file(file_name: str, binary_data: bytes) -> File:
ReferenceFileService.assert_valid_file_name(file_name)
file_path = ReferenceFileService.file_path(file_name)
ReferenceFileService.write_to_file_system(file_name, binary_data)
return ReferenceFileService.to_file_object(file_name, file_path)
@staticmethod
def get_data(file_name):
file_path = ReferenceFileService.file_path(file_name)
if os.path.exists(file_path):
with open(file_path, 'rb') as f_handle:
spec_file_data = f_handle.read()
return spec_file_data
else:
raise ApiError(code='file_already_exists',
message=f"The reference file {name} already exists.")
return ReferenceFileService().update_reference_file(file_model, binary_data)
raise ApiError('file_not_found',
f"There is not a reference file named '{file_name}'")
def update_reference_file(self, file_model, binary_data):
self.write_reference_file_to_system(file_model, binary_data)
print('update_reference_file')
return file_model
# TODO: need a test for this?
def update_reference_file_info(self, old_file_model, body):
file_data = self.get_reference_file_data(old_file_model.name)
old_file_path = self.get_reference_file_path(old_file_model.name)
self.delete_reference_file_data(old_file_path)
self.delete_reference_file_info(old_file_path)
new_file_model = FileModelSchema().load(body, session=session)
new_file_path = self.get_reference_file_path(new_file_model.name)
self.write_reference_file_data_to_system(new_file_path, file_data.data)
self.write_reference_file_info_to_system(new_file_path, new_file_model)
return new_file_model
def get_reference_file_data(self, file_name):
file_model = session.query(FileModel).filter(FileModel.name == file_name).filter(
FileModel.is_reference == True).first()
if file_model is not None:
file_path = self.get_reference_file_path(file_model.name)
if os.path.exists(file_path):
mtime = os.path.getmtime(file_path)
with open(file_path, 'rb') as f_open:
reference_file_data = f_open.read()
size = len(reference_file_data)
md5_checksum = UUID(hashlib.md5(reference_file_data).hexdigest())
reference_file_data_model = FileDataModel(data=reference_file_data,
md5_hash=md5_checksum,
size=size,
date_created=datetime.datetime.fromtimestamp(mtime),
file_model_id=file_model.id
)
return reference_file_data_model
else:
raise ApiError('file_not_found',
f"There was no file in the location: {file_path}")
else:
raise ApiError("file_not_found", "There is no reference file with the name '%s'" % file_name)
def write_reference_file_to_system(self, file_model, file_data):
file_path = self.write_reference_file_data_to_system(file_model.name, file_data)
self.write_reference_file_info_to_system(file_path, file_model)
def write_reference_file_data_to_system(self, file_name, file_data):
file_path = self.get_reference_file_path(file_name)
@staticmethod
def write_to_file_system(file_name, file_data):
file_path = ReferenceFileService.file_path(file_name)
os.makedirs(os.path.dirname(file_path), exist_ok=True)
with open(file_path, 'wb') as f_handle:
f_handle.write(file_data)
# SpecFileService.write_file_data_to_system(file_path, file_data)
return file_path
@staticmethod
def write_reference_file_info_to_system(file_path, file_model):
pass
# fixme: correct this stuff.
# SpecFileService.write_file_info_to_system(file_path, file_model)
@staticmethod
def get_reference_files():
reference_files = session.query(FileModel). \
filter_by(is_reference=True). \
filter(FileModel.archived == False). \
all()
return reference_files
def delete_reference_file_data(self, file_name):
file_path = self.get_reference_file_path(file_name)
json_file_path = f'{file_path}.json'
os.remove(file_path)
os.remove(json_file_path)
return FileSystemService._get_files(ReferenceFileService.root_path())
@staticmethod
def delete_reference_file_info(file_name):
file_model = session.query(FileModel).filter(FileModel.name==file_name).first()
try:
session.delete(file_model)
session.commit()
except IntegrityError as ie:
session.rollback()
file_model = session.query(FileModel).filter(FileModel.name==file_name).first()
file_model.archived = True
session.commit()
app.logger.info("Failed to delete file: %s, so archiving it instead. Due to %s" % (file_name, str(ie)))
def get_reference_file(name: str):
files = FileSystemService._get_files(ReferenceFileService.root_path(), file_name=name)
if len(files) < 1:
raise ApiError('unknown_file', f"No reference file found with the name {name}", 404)
return FileSystemService._get_files(ReferenceFileService.root_path(), file_name=name)[0]
def delete_reference_file(self, file_name):
"""This should remove the record in the file table, and both files on the filesystem."""
self.delete_reference_file_data(file_name)
self.delete_reference_file_info(file_name)
@staticmethod
def delete(file_name):
file_path = ReferenceFileService.file_path(file_name)
os.remove(file_path)

View File

@ -1,5 +1,6 @@
import datetime
import os
import shutil
from typing import List
from crc import app, session
@ -11,8 +12,10 @@ from SpiffWorkflow.bpmn.parser.ValidationException import ValidationException
from lxml import etree
from crc.services.file_system_service import FileSystemService
class SpecFileService(object):
class SpecFileService(FileSystemService):
"""We store spec files on the file system. This allows us to take advantage of Git for
syncing and versioning.
@ -20,50 +23,30 @@ class SpecFileService(object):
"""
@staticmethod
def get_files(workflow_spec, file_name=None, include_libraries=False) -> List[File]:
def get_files(workflow_spec: WorkflowSpecModel, file_name=None, include_libraries=False) -> List[File]:
""" Returns all files associated with a workflow specification """
files = SpecFileService.__get_files(workflow_spec, file_name)
path = SpecFileService.workflow_path(workflow_spec)
files = SpecFileService._get_files(path, file_name)
if include_libraries:
libraries = session.query(WorkflowLibraryModel).filter(
WorkflowLibraryModel.workflow_spec_id == workflow_spec.id).all()
for lib in libraries:
files.extend(SpecFileService.__get_files(lib, file_name))
lib_path = SpecFileService.workflow_path(workflow_spec)
files.extend(SpecFileService._get_files(lib_path, file_name))
return files
@staticmethod
def __get_files(workflow_spec: WorkflowSpecModel, file_name=None) -> List[File]:
files = []
items = os.scandir(SpecFileService.workflow_path(workflow_spec))
for item in items:
if item.is_file():
if file_name is not None and item.name != file_name:
continue
extension = SpecFileService.get_extension(item.name)
file_type = FileType[extension]
content_type = CONTENT_TYPES[file_type.name]
stats = item.stat()
file_size = stats.st_size
last_modified = datetime.datetime.fromtimestamp(stats.st_mtime)
files.append(File.spec_file(workflow_spec, item.name, file_type, content_type,
last_modified, file_size))
return files
@staticmethod
def add_file(workflow_spec: WorkflowSpecModel, file_name: str, binary_data: bytearray, content_type: str) -> File:
def add_file(workflow_spec: WorkflowSpecModel, file_name: str, binary_data: bytearray) -> File:
# Same as update
return SpecFileService.update_file(workflow_spec, file_name, binary_data, content_type)
return SpecFileService.update_file(workflow_spec, file_name, binary_data)
@staticmethod
def update_file(workflow_spec: WorkflowSpecModel, file_name: str, binary_data, content_type) -> File:
def update_file(workflow_spec: WorkflowSpecModel, file_name: str, binary_data) -> File:
SpecFileService.assert_valid_file_name(file_name)
file_path = SpecFileService.file_path(workflow_spec, file_name)
SpecFileService.write_file_data_to_system(file_path, binary_data)
extension = SpecFileService.get_extension(file_name)
file_type = FileType[extension]
last_modified = SpecFileService.__last_modified(file_path)
size = os.path.getsize(file_path)
file = File.spec_file(workflow_spec, file_name, file_type, content_type, last_modified, size)
if file_name == workflow_spec.primary_file_name:
file = SpecFileService.to_file_object(file_name, file_path)
if file_name == workflow_spec.primary_file_name or workflow_spec.primary_file_name is None:
SpecFileService.set_primary_bpmn(workflow_spec, file_name, binary_data)
return file
@ -101,6 +84,7 @@ class SpecFileService(object):
#
@staticmethod
def root_path():
# fixme: allow absolute files
dir_name = app.config['SYNC_FILE_ROOT']
app_root = app.root_path
return os.path.join(app_root, '..', dir_name)
@ -132,23 +116,9 @@ class SpecFileService(object):
return os.path.join(SpecFileService.workflow_path(spec), file_name)
@staticmethod
def write_file_data_to_system(file_path, file_data):
os.makedirs(os.path.dirname(file_path), exist_ok=True)
with open(file_path, 'wb') as f_handle:
f_handle.write(file_data)
@staticmethod
def get_extension(file_name):
basename, file_extension = os.path.splitext(file_name)
return file_extension.lower().strip()[1:]
@staticmethod
def assert_valid_file_name(file_name):
file_extension = SpecFileService.get_extension(file_name)
if file_extension not in FileType._member_names_:
raise ApiError('unknown_extension',
'The file you provided does not have an accepted extension:' +
file_extension, status_code=404)
def last_modified(spec: WorkflowSpecModel, file_name: str):
path = SpecFileService.file_path(spec, file_name)
return FileSystemService._last_modified(path)
@staticmethod
def has_swimlane(et_root: etree.Element):
@ -165,14 +135,19 @@ class SpecFileService(object):
@staticmethod
def delete_file(spec, file_name):
# Fixme: Remember to remove the lookup files when the spec file is removed.
# lookup_files = session.query(LookupFileModel).filter_by(file_model_id=file_id).all()
# for lf in lookup_files:
# session.query(LookupDataModel).filter_by(lookup_file_model_id=lf.id).delete()
# session.query(LookupFileModel).filter_by(id=lf.id).delete()
file_path = SpecFileService.file_path(spec, file_name)
os.remove(file_path)
@staticmethod
def __last_modified(file_path: str):
# Returns the last modified date of the given file.
timestamp = os.path.getmtime(file_path)
return datetime.datetime.fromtimestamp(timestamp)
def delete_all_files(spec):
dir_path = SpecFileService.workflow_path(spec)
if os.path.exists(dir_path):
shutil.rmtree(dir_path)
@staticmethod
def get_process_id(et_root: etree.Element):

View File

@ -11,7 +11,7 @@ from ldap3.core.exceptions import LDAPSocketOpenError
from crc import db, session, app
from crc.api.common import ApiError
from crc.models.email import EmailModel
from crc.models.file import FileModel, File, FileSchema
from crc.models.file import FileModel, File, FileSchema, FileDataModel
from crc.models.ldap import LdapSchema
from crc.models.protocol_builder import ProtocolBuilderCreatorStudy
@ -22,10 +22,10 @@ from crc.models.task_log import TaskLogModel
from crc.models.workflow import WorkflowSpecCategoryModel, WorkflowModel, WorkflowSpecModel, WorkflowState, \
WorkflowStatus
from crc.services.document_service import DocumentService
from crc.services.file_service import FileService
from crc.services.ldap_service import LdapService
from crc.services.lookup_service import LookupService
from crc.services.protocol_builder import ProtocolBuilderService
from crc.services.user_file_service import UserFileService
from crc.services.workflow_processor import WorkflowProcessor
@ -41,7 +41,9 @@ class StudyService(object):
study_info = study_details[0]
# The review types 2, 3, 23, 24 correspond to review type names
# `Full Committee`, `Expedited`, `Non-UVA IRB Full Board`, and `Non-UVA IRB Expedited`
if isinstance(study_info, dict) and 'REVIEW_TYPE' in study_info.keys() and study_info['REVIEW_TYPE'] in [2, 3, 23, 24]:
if isinstance(study_info, dict) and 'REVIEW_TYPE' in study_info.keys() and study_info['REVIEW_TYPE'] in [2, 3,
23,
24]:
return True
return False
@ -65,7 +67,7 @@ class StudyService(object):
studies = []
for s in db_studies:
study = Study.from_model(s)
study.files = FileService.get_files_for_study(study.id)
study.files = UserFileService.get_files_for_study(study.id)
studies.append(study)
return studies
@ -90,8 +92,8 @@ class StudyService(object):
study.last_activity_date = last_event.date
study.categories = StudyService.get_categories()
workflow_metas = StudyService._get_workflow_metas(study_id)
files = FileService.get_files_for_study(study.id)
files = (File.from_models(model, FileService.get_file_data(model.id),
files = UserFileService.get_files_for_study(study.id)
files = (File.from_models(model, UserFileService.get_file_data(model.id),
DocumentService.get_dictionary()) for model in files)
study.files = list(files)
# Calling this line repeatedly is very very slow. It creates the
@ -236,7 +238,10 @@ class StudyService(object):
return
session.query(TaskEventModel).filter_by(workflow_id=workflow.id).delete()
session.query(FileModel).filter_by(workflow_id=workflow_id).update({'archived': True, 'workflow_id': None})
files = session.query(FileModel).filter_by(workflow_id=workflow_id).all()
for file in files:
session.query(FileDataModel).filter(FileDataModel.file_model_id == file.id).delete()
session.delete(file)
session.delete(workflow)
session.commit()
@ -275,7 +280,9 @@ class StudyService(object):
doc['required'] = False
if ProtocolBuilderService.is_enabled() and doc['id'] != '':
pb_data = next((item for item in pb_docs['AUXDOCS'] if int(item['SS_AUXILIARY_DOC_TYPE_ID']) == int(doc['id'])), None)
pb_data = next(
(item for item in pb_docs['AUXDOCS'] if int(item['SS_AUXILIARY_DOC_TYPE_ID']) == int(doc['id'])),
None)
if pb_data:
doc['required'] = True
@ -290,12 +297,12 @@ class StudyService(object):
doc['display_name'] = ' / '.join(name_list)
# For each file, get associated workflow status
doc_files = FileService.get_files_for_study(study_id=study_id, irb_doc_code=code)
doc_files = UserFileService.get_files_for_study(study_id=study_id, irb_doc_code=code)
doc['count'] = len(doc_files)
doc['files'] = []
for file_model in doc_files:
file = File.from_models(file_model, FileService.get_file_data(file_model.id), [])
file = File.from_models(file_model, UserFileService.get_file_data(file_model.id), [])
file_data = FileSchema().dump(file)
del file_data['document']
doc['files'].append(Box(file_data))
@ -309,19 +316,13 @@ class StudyService(object):
@staticmethod
def get_investigator_dictionary():
"""Returns a dictionary of document details keyed on the doc_code."""
file_id = session.query(FileModel.id). \
filter(FileModel.name == StudyService.INVESTIGATOR_LIST). \
filter(FileModel.is_reference == True). \
scalar()
lookup_model = LookupService.get_lookup_model_for_file_data(file_id, StudyService.INVESTIGATOR_LIST, 'code', 'label')
lookup_model = LookupService.get_lookup_model_for_reference(StudyService.INVESTIGATOR_LIST, 'code', 'label')
doc_dict = {}
for lookup_data in lookup_model.dependencies:
doc_dict[lookup_data.value] = lookup_data.data
return doc_dict
@staticmethod
def get_investigators(study_id, all=False):
"""Convert array of investigators from protocol builder into a dictionary keyed on the type. """
@ -433,13 +434,14 @@ class StudyService(object):
def _update_status_of_workflow_meta(workflow_metas, status):
# Update the status on each workflow
warnings = []
unused_statuses = status.copy() # A list of all the statuses that are not used.
unused_statuses = status.copy() # A list of all the statuses that are not used.
for wfm in workflow_metas:
unused_statuses.pop(wfm.workflow_spec_id, None)
wfm.state_message = ''
# do we have a status for you
if wfm.workflow_spec_id not in status.keys():
warnings.append(ApiError("missing_status", "No status information provided about workflow %s" % wfm.workflow_spec_id))
warnings.append(ApiError("missing_status",
"No status information provided about workflow %s" % wfm.workflow_spec_id))
continue
if not isinstance(status[wfm.workflow_spec_id], dict):
warnings.append(ApiError(code='invalid_status',
@ -454,7 +456,8 @@ class StudyService(object):
if not WorkflowState.has_value(status[wfm.workflow_spec_id]['status']):
warnings.append(ApiError("invalid_state",
"Workflow '%s' can not be set to '%s', should be one of %s" % (
wfm.workflow_spec_id, status[wfm.workflow_spec_id]['status'], ",".join(WorkflowState.list())
wfm.workflow_spec_id, status[wfm.workflow_spec_id]['status'],
",".join(WorkflowState.list())
)))
continue
@ -463,8 +466,8 @@ class StudyService(object):
for status in unused_statuses:
if isinstance(unused_statuses[status], dict) and 'status' in unused_statuses[status]:
warnings.append(ApiError("unmatched_status", "The master workflow provided a status for '%s' a "
"workflow that doesn't seem to exist." %
status))
"workflow that doesn't seem to exist." %
status))
return warnings

View File

@ -35,7 +35,7 @@ def camel_to_snake(camel):
return re.sub(r'(?<!^)(?=[A-Z])', '_', camel).lower()
class FileService(object):
class UserFileService(object):
@staticmethod
@cache
@ -75,13 +75,12 @@ class FileService(object):
task_spec=task_spec_name,
irb_doc_code=irb_doc_code
)
return FileService.update_file(file_model, binary_data, content_type)
return UserFileService.update_file(file_model, binary_data, content_type)
@staticmethod
def get_workflow_files(workflow_id):
"""Returns all the file models associated with a running workflow."""
return session.query(FileModel).filter(FileModel.workflow_id == workflow_id).\
filter(FileModel.archived == False).\
order_by(FileModel.id).all()
@staticmethod
@ -103,13 +102,12 @@ class FileService(object):
if (latest_data_model is not None) and (md5_checksum == latest_data_model.md5_hash):
# This file does not need to be updated, it's the same file. If it is arhived,
# then de-arvhive it.
file_model.archived = False
session.add(file_model)
session.commit()
return file_model
# Verify the extension
file_extension = FileService.get_extension(file_model.name)
file_extension = UserFileService.get_extension(file_model.name)
if file_extension not in FileType._member_names_:
raise ApiError('unknown_extension',
'The file you provided does not have an accepted extension:' +
@ -117,7 +115,6 @@ class FileService(object):
else:
file_model.type = FileType[file_extension]
file_model.content_type = content_type
file_model.archived = False # Unarchive the file if it is archived.
if latest_data_model is None:
version = 1
@ -143,8 +140,7 @@ class FileService(object):
def get_files_for_study(study_id, irb_doc_code=None):
query = session.query(FileModel).\
join(WorkflowModel).\
filter(WorkflowModel.study_id == study_id).\
filter(FileModel.archived == False)
filter(WorkflowModel.study_id == study_id)
if irb_doc_code:
query = query.filter(FileModel.irb_doc_code == irb_doc_code)
return query.all()
@ -159,7 +155,6 @@ class FileService(object):
if name:
query = query.filter_by(name=name)
query = query.filter(FileModel.archived == False)
query = query.order_by(FileModel.id)
results = query.all()
@ -170,10 +165,10 @@ class FileService(object):
"""Returns all the FileDataModels related to a running workflow -
So these are the latest data files that were uploaded or generated
that go along with this workflow. Not related to the spec in any way"""
file_models = FileService.get_files(workflow_id=workflow_id)
file_models = UserFileService.get_files(workflow_id=workflow_id)
latest_data_files = []
for file_model in file_models:
latest_data_files.append(FileService.get_file_data(file_model.id))
latest_data_files.append(UserFileService.get_file_data(file_model.id))
return latest_data_files
@staticmethod
@ -190,22 +185,15 @@ class FileService(object):
@staticmethod
def delete_file(file_id):
try:
lookup_files = session.query(LookupFileModel).filter_by(file_model_id=file_id).all()
for lf in lookup_files:
session.query(LookupDataModel).filter_by(lookup_file_model_id=lf.id).delete()
session.query(LookupFileModel).filter_by(id=lf.id).delete()
session.query(FileDataModel).filter_by(file_model_id=file_id).delete()
session.query(DataStoreModel).filter_by(file_id=file_id).delete()
session.query(FileModel).filter_by(id=file_id).delete()
session.commit()
except IntegrityError as ie:
# We can't delete the file or file data, because it is referenced elsewhere,
# but we can at least mark it as deleted on the table.
session.rollback()
file_model = session.query(FileModel).filter_by(id=file_id).first()
file_model.archived = True
session.commit()
app.logger.info("Failed to delete file, so archiving it instead. %i, due to %s" % (file_id, str(ie)))
raise ApiError('Delete Failed', "Unable to delete file. ")
@staticmethod
def get_repo_branches():

View File

@ -18,14 +18,14 @@ from SpiffWorkflow.specs import WorkflowSpec
from crc import session
from crc.api.common import ApiError
from crc.models.file import FileModel, FileType
from crc.models.file import FileModel, FileType, File
from crc.models.task_event import TaskEventModel
from crc.models.user import UserModelSchema
from crc.models.workflow import WorkflowStatus, WorkflowModel
from crc.models.workflow import WorkflowStatus, WorkflowModel, WorkflowSpecModel
from crc.scripts.script import Script
from crc.services.file_service import FileService
from crc import app
from crc.services.spec_file_service import SpecFileService
from crc.services.user_file_service import UserFileService
from crc.services.user_service import UserService
@ -106,9 +106,8 @@ class WorkflowProcessor(object):
spec = None
if workflow_model.bpmn_workflow_json is None:
self.spec_files = SpecFileService().get_spec_files(
workflow_spec_id=workflow_model.workflow_spec_id, include_libraries=True)
spec = self.get_spec(self.spec_files, workflow_model.workflow_spec_id)
self.spec_files = SpecFileService.get_files(workflow_model.workflow_spec, include_libraries=True)
spec = self.get_spec(self.spec_files, workflow_model.workflow_spec)
self.workflow_spec_id = workflow_model.workflow_spec_id
@ -166,7 +165,7 @@ class WorkflowProcessor(object):
if delete_files:
files = FileModel.query.filter(FileModel.workflow_id == workflow_model.id).all()
for file in files:
FileService.delete_file(file.id)
UserFileService.delete_file(file.id)
session.commit()
return WorkflowProcessor(workflow_model)
@ -198,9 +197,9 @@ class WorkflowProcessor(object):
"""Executes a BPMN specification for the given study, without recording any information to the database
Useful for running the master specification, which should not persist. """
lasttime = firsttime()
spec_files = SpecFileService().get_spec_files(spec_model.id, include_libraries=True)
spec_files = SpecFileService().get_files(spec_model, include_libraries=True)
lasttime = sincetime('load Files', lasttime)
spec = WorkflowProcessor.get_spec(spec_files, spec_model.id)
spec = WorkflowProcessor.get_spec(spec_files, spec_model)
lasttime = sincetime('get spec', lasttime)
try:
bpmn_workflow = BpmnWorkflow(spec, script_engine=WorkflowProcessor._script_engine)
@ -224,28 +223,24 @@ class WorkflowProcessor(object):
return parser
@staticmethod
def get_spec(files: List[FileModel], workflow_spec_id):
def get_spec(files: List[File], workflow_spec_model: WorkflowSpecModel):
"""Returns a SpiffWorkflow specification for the given workflow spec,
using the files provided. The Workflow_spec_id is only used to generate
better error messages."""
using the files provided. """
parser = WorkflowProcessor.get_parser()
process_id = None
for file in files:
data = SpecFileService().get_spec_file_data(file.id).data
data = SpecFileService.get_data(workflow_spec_model, file.name)
if file.type == FileType.bpmn:
bpmn: etree.Element = etree.fromstring(data)
if file.primary and file.workflow_spec_id == workflow_spec_id:
process_id = SpecFileService.get_process_id(bpmn)
parser.add_bpmn_xml(bpmn, filename=file.name)
elif file.type == FileType.dmn:
dmn: etree.Element = etree.fromstring(data)
parser.add_dmn_xml(dmn, filename=file.name)
if process_id is None:
if workflow_spec_model.primary_process_id is None:
raise (ApiError(code="no_primary_bpmn_error",
message="There is no primary BPMN model defined for workflow %s" % workflow_spec_id))
message="There is no primary BPMN model defined for workflow %s" % workflow_spec_model.id))
try:
spec = parser.get_spec(process_id)
spec = parser.get_spec(workflow_spec_model.primary_process_id)
except ValidationException as ve:
raise ApiError(code="workflow_validation_error",
message="Failed to parse the Workflow Specification. " +

View File

@ -19,6 +19,7 @@ from SpiffWorkflow.dmn.specs.BusinessRuleTask import BusinessRuleTask
from SpiffWorkflow.specs import CancelTask, StartTask
from SpiffWorkflow.util.deep_merge import DeepMerge
from SpiffWorkflow.util.metrics import timeit
from sqlalchemy.exc import InvalidRequestError
from crc import db, app, session
from crc.api.common import ApiError
@ -32,7 +33,6 @@ from crc.models.workflow import WorkflowModel, WorkflowStatus, WorkflowSpecModel
from crc.services.data_store_service import DataStoreBase
from crc.services.document_service import DocumentService
from crc.services.file_service import FileService
from crc.services.jinja_service import JinjaService
from crc.services.lookup_service import LookupService
from crc.services.spec_file_service import SpecFileService
@ -81,15 +81,19 @@ class WorkflowService(object):
db.session.add(StudyModel(user_uid=user.uid, title="test"))
db.session.commit()
study = db.session.query(StudyModel).filter_by(user_uid=user.uid).first()
spec = db.session.query(WorkflowSpecModel).filter(WorkflowSpecModel.id == spec_id).first()
workflow_model = WorkflowModel(status=WorkflowStatus.not_started,
workflow_spec_id=spec_id,
workflow_spec=spec,
last_updated=datetime.utcnow(),
study=study)
return workflow_model
@staticmethod
def delete_test_data(workflow: WorkflowModel):
db.session.delete(workflow)
try:
db.session.delete(workflow)
except InvalidRequestError as e:
pass
# Also, delete any test study or user models that may have been created.
for study in db.session.query(StudyModel).filter(StudyModel.user_uid == "test"):
StudyService.delete_study(study.id)
@ -567,10 +571,7 @@ class WorkflowService(object):
navigation = processor.bpmn_workflow.get_deep_nav_list()
WorkflowService.update_navigation(navigation, processor)
spec = db.session.query(WorkflowSpecModel).filter_by(id=processor.workflow_spec_id).first()
is_review = FileService.is_workflow_review(processor.workflow_spec_id)
workflow_api = WorkflowApi(
id=processor.get_workflow_id(),
status=processor.get_status(),
@ -580,7 +581,7 @@ class WorkflowService(object):
total_tasks=len(navigation),
completed_tasks=processor.workflow_model.completed_tasks,
last_updated=processor.workflow_model.last_updated,
is_review=is_review,
is_review=spec.is_review,
title=spec.display_name,
study_id=processor.workflow_model.study_id or None
)
@ -764,7 +765,8 @@ class WorkflowService(object):
try:
doc_file_name = spiff_task.task_spec.name + ".md"
workflow_id = spiff_task.workflow.data[WorkflowProcessor.WORKFLOW_ID_KEY]
workflow = db.session.query(WorkflowModel).filter(WorkflowModel.id == spiff_task.workflow.data['workflow_spec_id'])
workflow = db.session.query(WorkflowModel).\
filter(WorkflowModel.id == spiff_task.workflow.data['workflow_id']).first()
data = SpecFileService.get_data(workflow.workflow_spec, doc_file_name)
raw_doc = data.decode("utf-8")
except ApiError:
@ -1015,15 +1017,6 @@ class WorkflowService(object):
specs = db.session.query(WorkflowSpecModel).filter_by(library=True).all()
return specs
@staticmethod
def get_primary_workflow(workflow_spec_id):
# Returns the FileModel of the primary workflow for a workflow_spec
primary = None
file = db.session.query(FileModel).filter(FileModel.workflow_spec_id==workflow_spec_id, FileModel.primary==True).first()
if file:
primary = file
return primary
@staticmethod
def reorder_workflow_spec(spec, direction):
category_id = spec.category_id
@ -1105,12 +1098,6 @@ class WorkflowService(object):
new_order += 1
session.commit()
@staticmethod
def delete_workflow_spec_files(spec_id):
files = session.query(FileModel).filter_by(workflow_spec_id=spec_id).all()
for file in files:
FileService.delete_file(file.id)
@staticmethod
def delete_workflow_spec_task_events(spec_id):
session.query(TaskEventModel).filter(TaskEventModel.workflow_spec_id == spec_id).delete()

View File

@ -7,10 +7,10 @@ from crc.models.ldap import LdapModel
from crc.models.user import UserModel
from crc.models.workflow import WorkflowSpecModel, WorkflowSpecCategoryModel
from crc.services.document_service import DocumentService
from crc.services.file_service import FileService
from crc.services.reference_file_service import ReferenceFileService
from crc.services.spec_file_service import SpecFileService
from crc.services.study_service import StudyService
from crc.services.user_file_service import UserFileService
class ExampleDataLoader:
@ -188,9 +188,9 @@ class ExampleDataLoader:
def load_rrt(self):
file_path = os.path.join(app.root_path, 'static', 'reference', 'rrt_documents.xlsx')
file = open(file_path, "rb")
ReferenceFileService.add_reference_file(FileService.DOCUMENT_LIST,
binary_data=file.read(),
content_type=CONTENT_TYPES['xls'])
ReferenceFileService.add_reference_file(UserFileService.DOCUMENT_LIST,
binary_data=file.read(),
content_type=CONTENT_TYPES['xls'])
file.close()
category = WorkflowSpecCategoryModel(
@ -246,7 +246,6 @@ class ExampleDataLoader:
further assumes that the [id].bpmn is the primary file for the workflow.
returns an array of data models to be added to the database."""
global file
file_service = FileService()
spec = WorkflowSpecModel(id=id,
display_name=display_name,
description=description,
@ -269,15 +268,15 @@ class ExampleDataLoader:
noise, file_extension = os.path.splitext(file_path)
filename = os.path.basename(file_path)
is_status = filename.lower() == 'status.bpmn'
is_primary = filename.lower() == id + '.bpmn'
file = None
try:
file = open(file_path, 'rb')
data = file.read()
content_type = CONTENT_TYPES[file_extension[1:]]
SpecFileService.add_file(workflow_spec=spec, file_name=filename, binary_data=data, content_type=content_type)
SpecFileService.add_file(workflow_spec=spec, file_name=filename, binary_data=data)
if is_primary:
SpecFileService.set_primary_bpmn(spec, filename)
except IsADirectoryError as de:
# Ignore sub directories
pass
@ -287,18 +286,16 @@ class ExampleDataLoader:
return spec
def load_reference_documents(self):
file_path = os.path.join(app.root_path, 'static', 'reference', 'irb_documents.xlsx')
file_path = os.path.join(app.root_path, 'static', 'reference', 'documents.xlsx')
file = open(file_path, "rb")
ReferenceFileService.add_reference_file(DocumentService.DOCUMENT_LIST,
binary_data=file.read(),
content_type=CONTENT_TYPES['xlsx'])
file.read())
file.close()
file_path = os.path.join(app.root_path, 'static', 'reference', 'investigators.xlsx')
file = open(file_path, "rb")
ReferenceFileService.add_reference_file(StudyService.INVESTIGATOR_LIST,
binary_data=file.read(),
content_type=CONTENT_TYPES['xlsx'])
file.read())
file.close()
def load_default_user(self):

View File

@ -27,6 +27,7 @@ from crc.services.user_service import UserService
from crc.services.workflow_service import WorkflowService
from crc.services.document_service import DocumentService
from example_data import ExampleDataLoader
from crc.services.user_file_service import UserFileService
# UNCOMMENT THIS FOR DEBUGGING SQL ALCHEMY QUERIES
import logging
@ -162,6 +163,8 @@ class BaseTest(unittest.TestCase):
# else:
# ExampleDataLoader().load_test_data()
self.create_reference_document()
session.commit()
for study_json in self.studies:
study_model = StudyModel(**study_json)
@ -187,21 +190,35 @@ class BaseTest(unittest.TestCase):
# self.assertGreater(len(file_data), 0)
@staticmethod
def load_test_spec(dir_name, display_name=None, master_spec=False, category_id=None):
"""Loads a spec into the database based on a directory in /tests/data"""
if category_id is None:
category = WorkflowSpecCategoryModel(display_name="Test Workflows", display_order=0)
session.add(category)
session.commit()
def assure_category_exists(category_id=None):
category = None
if category_id is not None:
category = db.session.query(WorkflowSpecCategoryModel).filter(WorkflowSpecCategoryModel.id == category_id).first()
if category is None:
category = db.session.query(WorkflowSpecCategoryModel).filter(WorkflowSpecCategoryModel.display_name == "Test Workflows").first()
if not category:
category = WorkflowSpecCategoryModel(display_name="Test Workflows", display_order=0)
session.add(category)
session.commit()
category_id = category.id
return category
@staticmethod
def load_test_spec(dir_name, display_name=None, master_spec=False, category_id=None, library=False):
"""Loads a spec into the database based on a directory in /tests/data"""
category = BaseTest.assure_category_exists(category_id)
category_id = category.id
if session.query(WorkflowSpecModel).filter_by(id=dir_name).count() > 0:
return session.query(WorkflowSpecModel).filter_by(id=dir_name).first()
filepath = os.path.join(app.root_path, '..', 'tests', 'data', dir_name, "*")
if display_name is None:
display_name = dir_name
return ExampleDataLoader().create_spec(id=dir_name, filepath=filepath, master_spec=master_spec,
display_name=display_name, category_id=category_id)
spec = ExampleDataLoader().create_spec(id=dir_name, filepath=filepath, master_spec=master_spec,
display_name=display_name, category_id=category_id, library=library)
db.session.add(spec)
db.session.commit()
return spec
@staticmethod
def protocol_builder_response(file_name):
@ -254,16 +271,11 @@ class BaseTest(unittest.TestCase):
return '?%s' % '&'.join(query_string_list)
def replace_file(self, name, file_path):
def replace_file(self, spec, name, file_path):
"""Replaces a stored file with the given name with the contents of the file at the given path."""
file = open(file_path, "rb")
data = file.read()
file_model = session.query(FileModel).filter(FileModel.name == name).first()
workflow_spec_model = session.query(WorkflowSpecModel).filter(WorkflowSpecModel.id==file_model.workflow_spec_id).first()
noise, file_extension = os.path.splitext(file_path)
content_type = CONTENT_TYPES[file_extension[1:]]
SpecFileService().update_spec_file_data(workflow_spec_model, file_model.name, data)
SpecFileService().update_file(spec, name, data)
def create_user(self, uid="dhf8r", email="daniel.h.funk@gmail.com", display_name="Hoopy Frood"):
user = session.query(UserModel).filter(UserModel.uid == uid).first()
@ -285,7 +297,6 @@ class BaseTest(unittest.TestCase):
session.commit()
return study
def create_workflow(self, dir_name, display_name=None, study=None, category_id=None, as_user="dhf8r"):
session.flush()
spec = session.query(WorkflowSpecModel).filter(WorkflowSpecModel.id == dir_name).first()
@ -299,11 +310,14 @@ class BaseTest(unittest.TestCase):
return workflow_model
def create_reference_document(self):
file_path = os.path.join(app.root_path, 'static', 'reference', 'irb_documents.xlsx')
file_path = os.path.join(app.root_path, 'static', 'reference', 'documents.xlsx')
with open(file_path, "rb") as file:
ReferenceFileService.add_reference_file(DocumentService.DOCUMENT_LIST,
content_type=CONTENT_TYPES['xlsx'],
binary_data=file.read())
file.read())
file_path = os.path.join(app.root_path, 'static', 'reference', 'investigators.xlsx')
with open(file_path, "rb") as file:
ReferenceFileService.add_reference_file('investigators.xlsx',
file.read())
def get_workflow_common(self, url, user):
rv = self.app.get(url,

View File

@ -1,5 +1,6 @@
from github import UnknownObjectException
from sqlalchemy import desc, column
from tests.base_test import BaseTest
from unittest.mock import patch, Mock
@ -7,8 +8,8 @@ from crc import db, session
from crc.api.common import ApiError
from crc.models.file import FileModel, FileDataModel, CONTENT_TYPES
from crc.models.workflow import WorkflowModel, WorkflowSpecModel
from crc.services.file_service import FileService
from crc.services.workflow_processor import WorkflowProcessor
from crc.services.user_file_service import UserFileService
class FakeGithubCreates(Mock):
@ -59,20 +60,20 @@ class TestFileService(BaseTest):
processor = WorkflowProcessor(workflow)
task = processor.next_task()
irb_code = "UVACompl_PRCAppr" # The first file referenced in pb required docs.
FileService.add_workflow_file(workflow_id=workflow.id,
task_spec_name=task.get_name(),
name="anything.png", content_type="text",
binary_data=b'1234', irb_doc_code=irb_code)
UserFileService.add_workflow_file(workflow_id=workflow.id,
task_spec_name=task.get_name(),
name="anything.png", content_type="text",
binary_data=b'1234', irb_doc_code=irb_code)
# Add the file again with different data
FileService.add_workflow_file(workflow_id=workflow.id,
task_spec_name=task.get_name(),
name="anything.png", content_type="text",
binary_data=b'5678', irb_doc_code=irb_code)
UserFileService.add_workflow_file(workflow_id=workflow.id,
task_spec_name=task.get_name(),
name="anything.png", content_type="text",
binary_data=b'5678', irb_doc_code=irb_code)
file_models = FileService.get_workflow_files(workflow_id=workflow.id)
file_models = UserFileService.get_workflow_files(workflow_id=workflow.id)
self.assertEqual(1, len(file_models))
file_data = FileService.get_workflow_data_files(workflow_id=workflow.id)
file_data = UserFileService.get_workflow_data_files(workflow_id=workflow.id)
self.assertEqual(1, len(file_data))
self.assertEqual(2, file_data[0].version)
self.assertEqual(4, file_data[0].size) # File dat size is included.
@ -83,140 +84,35 @@ class TestFileService(BaseTest):
processor = WorkflowProcessor(workflow)
task = processor.next_task()
irb_code = "UVACompl_PRCAppr" # The first file referenced in pb required docs.
FileService.add_workflow_file(workflow_id=workflow.id,
UserFileService.add_workflow_file(workflow_id=workflow.id,
irb_doc_code=irb_code,
task_spec_name=task.get_name(),
name="anything.png", content_type="text",
binary_data=b'1234')
# Add the file again with different data
FileService.add_workflow_file(workflow_id=workflow.id,
UserFileService.add_workflow_file(workflow_id=workflow.id,
irb_doc_code=irb_code,
task_spec_name=task.get_name(),
name="anything.png", content_type="text",
binary_data=b'5678')
def test_replace_archive_file_unarchives_the_file_and_updates(self):
self.load_example_data()
workflow = self.create_workflow('file_upload_form')
processor = WorkflowProcessor(workflow)
task = processor.next_task()
irb_code = "UVACompl_PRCAppr" # The first file referenced in pb required docs.
FileService.add_workflow_file(workflow_id=workflow.id,
irb_doc_code=irb_code,
task_spec_name=task.get_name(),
name="anything.png", content_type="text",
binary_data=b'1234')
# Archive the file
file_models = FileService.get_workflow_files(workflow_id=workflow.id)
self.assertEqual(1, len(file_models))
file_model = file_models[0]
file_model.archived = True
db.session.add(file_model)
# Assure that the file no longer comes back.
file_models = FileService.get_workflow_files(workflow_id=workflow.id)
self.assertEqual(0, len(file_models))
# Add the file again with different data
FileService.add_workflow_file(workflow_id=workflow.id,
irb_doc_code=irb_code,
task_spec_name=task.get_name(),
name="anything.png", content_type="text",
binary_data=b'5678')
file_models = FileService.get_workflow_files(workflow_id=workflow.id)
self.assertEqual(1, len(file_models))
file_data = FileService.get_workflow_data_files(workflow_id=workflow.id)
self.assertEqual(1, len(file_data))
self.assertEqual(2, file_data[0].version)
self.assertEqual(b'5678', file_data[0].data)
def test_add_file_from_form_allows_multiple_files_with_different_names(self):
self.load_example_data()
workflow = self.create_workflow('file_upload_form')
processor = WorkflowProcessor(workflow)
task = processor.next_task()
irb_code = "UVACompl_PRCAppr" # The first file referenced in pb required docs.
FileService.add_workflow_file(workflow_id=workflow.id,
UserFileService.add_workflow_file(workflow_id=workflow.id,
irb_doc_code=irb_code,
task_spec_name=task.get_name(),
name="anything.png", content_type="text",
binary_data=b'1234')
# Add the file again with different data
FileService.add_workflow_file(workflow_id=workflow.id,
UserFileService.add_workflow_file(workflow_id=workflow.id,
irb_doc_code=irb_code,
task_spec_name=task.get_name(),
name="a_different_thing.png", content_type="text",
binary_data=b'5678')
file_models = FileService.get_workflow_files(workflow_id=workflow.id)
file_models = UserFileService.get_workflow_files(workflow_id=workflow.id)
self.assertEqual(2, len(file_models))
@patch('crc.services.file_service.Github')
def test_update_from_github(self, mock_github):
mock_github.return_value = FakeGithub()
self.load_example_data()
workflow = self.create_workflow('file_upload_form')
processor = WorkflowProcessor(workflow)
task = processor.next_task()
irb_code = "UVACompl_PRCAppr" # The first file referenced in pb required docs.
file_model = FileService.add_workflow_file(workflow_id=workflow.id,
irb_doc_code=irb_code,
task_spec_name=task.get_name(),
name="anything.png", content_type="text",
binary_data=b'1234')
FileService.update_from_github([file_model.id])
file_model_data = FileDataModel.query.filter_by(
file_model_id=file_model.id
).order_by(
desc(FileDataModel.version)
).first()
self.assertEqual(file_model_data.data, b'Some bytes')
@patch('crc.services.file_service.Github')
def test_publish_to_github_creates(self, mock_github):
mock_github.return_value = FakeGithubCreates()
self.load_example_data()
workflow = self.create_workflow('file_upload_form')
processor = WorkflowProcessor(workflow)
task = processor.next_task()
irb_code = "UVACompl_PRCAppr" # The first file referenced in pb required docs.
file_model = FileService.add_workflow_file(workflow_id=workflow.id,
irb_doc_code=irb_code,
task_spec_name=task.get_name(),
name="anything.png", content_type="text",
binary_data=b'1234')
result = FileService.publish_to_github([file_model.id])
self.assertEqual(result['created'], True)
@patch('crc.services.file_service.Github')
def test_publish_to_github_updates(self, mock_github):
mock_github.return_value = FakeGithub()
self.load_example_data()
workflow = self.create_workflow('file_upload_form')
processor = WorkflowProcessor(workflow)
task = processor.next_task()
irb_code = "UVACompl_PRCAppr" # The first file referenced in pb required docs.
file_model = FileService.add_workflow_file(workflow_id=workflow.id,
irb_doc_code=irb_code,
task_spec_name=task.get_name(),
name="anything.png", content_type="text",
binary_data=b'1234')
result = FileService.publish_to_github([file_model.id])
self.assertEqual(result['updated'], True)
@patch('crc.services.file_service.Github')
def test_get_repo_branches(self, mock_github):
mock_github.return_value = FakeGithub()
branches = FileService.get_repo_branches()
self.assertIsInstance(branches, list)

View File

@ -5,69 +5,20 @@ import os
from tests.base_test import BaseTest
from crc import session, db, app
from crc.models.file import FileModel, FileType, FileModelSchema
from crc.models.file import FileModel, FileType, FileModelSchema, FileSchema
from crc.models.workflow import WorkflowSpecModel
from crc.services.file_service import FileService
from crc.services.spec_file_service import SpecFileService
from crc.services.workflow_processor import WorkflowProcessor
from crc.models.data_store import DataStoreModel
from crc.services.document_service import DocumentService
from example_data import ExampleDataLoader
from crc.services.user_file_service import UserFileService
from sqlalchemy import column
class TestFilesApi(BaseTest):
def test_list_files_for_workflow_spec(self):
self.load_example_data(use_crc_data=True)
spec_id = 'core_info'
spec = session.query(WorkflowSpecModel).filter_by(id=spec_id).first()
rv = self.app.get('/v1.0/spec_file?workflow_spec_id=%s' % spec_id,
follow_redirects=True,
content_type="application/json", headers=self.logged_in_headers())
self.assert_success(rv)
json_data = json.loads(rv.get_data(as_text=True))
self.assertEqual(5, len(json_data))
files = FileModelSchema(many=True).load(json_data, session=session)
file_names = [f.name for f in files]
self.assertTrue("%s.bpmn" % spec.id in file_names)
def btest_list_multiple_files_for_workflow_spec(self):
self.load_example_data()
spec = self.load_test_spec("random_fact")
data = {'file': (io.BytesIO(b"abcdef"), 'test.svg')}
self.app.post('/v1.0/spec_file?workflow_spec_id=%s' % spec.id, data=data, follow_redirects=True,
content_type='multipart/form-data', headers=self.logged_in_headers())
rv = self.app.get('/v1.0/spec_file?workflow_spec_id=%s' % spec.id,
follow_redirects=True,
content_type="application/json", headers=self.logged_in_headers())
self.assert_success(rv)
json_data = json.loads(rv.get_data(as_text=True))
self.assertEqual(3, len(json_data))
def test_create_spec_file(self):
self.load_example_data()
spec = session.query(WorkflowSpecModel).first()
data = {'file': (io.BytesIO(b"abcdef"), 'random_fact.svg')}
rv = self.app.post('/v1.0/spec_file?workflow_spec_id=%s' % spec.id, data=data, follow_redirects=True,
content_type='multipart/form-data', headers=self.logged_in_headers())
self.assert_success(rv)
self.assertIsNotNone(rv.get_data())
json_data = json.loads(rv.get_data(as_text=True))
file = FileModelSchema().load(json_data, session=session)
self.assertEqual(FileType.svg, file.type)
self.assertFalse(file.primary)
self.assertEqual("image/svg+xml", file.content_type)
self.assertEqual(spec.id, file.workflow_spec_id)
rv = self.app.get('/v1.0/file/%i' % file.id, headers=self.logged_in_headers())
self.assert_success(rv)
json_data = json.loads(rv.get_data(as_text=True))
file2 = FileModelSchema().load(json_data, session=session)
self.assertEqual(file, file2)
def test_add_file_from_task_and_form_errors_on_invalid_form_field_name(self):
self.create_reference_document()
workflow = self.create_workflow('file_upload_form')
@ -84,39 +35,10 @@ class TestFilesApi(BaseTest):
content_type='multipart/form-data', headers=self.logged_in_headers())
self.assert_success(rv)
def test_archive_file_no_longer_shows_up(self):
self.load_example_data()
workflow = self.create_workflow('file_upload_form')
processor = WorkflowProcessor(workflow)
processor.do_engine_steps()
task = processor.next_task()
data = {'file': (io.BytesIO(b"abcdef"), 'random_fact.svg')}
correct_name = task.task_spec.form.fields[0].id
data = {'file': (io.BytesIO(b"abcdef"), 'random_fact.svg')}
rv = self.app.post('/v1.0/file?study_id=%i&workflow_id=%s&task_spec_name=%s&form_field_key=%s' %
(workflow.study_id, workflow.id, task.get_name(), correct_name), data=data,
follow_redirects=True,
content_type='multipart/form-data', headers=self.logged_in_headers())
self.assert_success(rv)
rv = self.app.get('/v1.0/file?workflow_id=%s' % workflow.id, headers=self.logged_in_headers())
self.assert_success(rv)
self.assertEqual(1, len(json.loads(rv.get_data(as_text=True))))
file_model = db.session.query(FileModel).filter(FileModel.workflow_id == workflow.id).all()
self.assertEqual(1, len(file_model))
file_model[0].archived = True
db.session.commit()
rv = self.app.get('/v1.0/file?workflow_id=%s' % workflow.id, headers=self.logged_in_headers())
self.assert_success(rv)
self.assertEqual(0, len(json.loads(rv.get_data(as_text=True))))
def test_update_reference_file_data(self):
self.load_example_data()
file_name = "documents.xlsx"
filepath = os.path.join(app.root_path, 'static', 'reference', 'irb_documents.xlsx')
filepath = os.path.join(app.root_path, 'static', 'reference', 'documents.xlsx')
with open(filepath, 'rb') as myfile:
file_data = myfile.read()
data = {'file': (io.BytesIO(file_data), file_name)}
@ -127,7 +49,6 @@ class TestFilesApi(BaseTest):
json_data = json.loads(rv.get_data(as_text=True))
file = FileModelSchema().load(json_data, session=session)
self.assertEqual(FileType.xlsx, file.type)
self.assertTrue(file.is_reference)
self.assertEqual("application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", file.content_type)
# self.assertEqual('dhf8r', json_data['user_uid'])
@ -141,7 +62,7 @@ class TestFilesApi(BaseTest):
def test_get_reference_file_data(self):
ExampleDataLoader().load_reference_documents()
file_name = "irb_document_types.xls"
filepath = os.path.join(app.root_path, 'static', 'reference', 'irb_documents.xlsx')
filepath = os.path.join(app.root_path, 'static', 'reference', 'documents.xlsx')
with open(filepath, 'rb') as f_open:
file_data = f_open.read()
data = {'file': (io.BytesIO(file_data), file_name)}
@ -154,16 +75,13 @@ class TestFilesApi(BaseTest):
def test_get_reference_file_info(self):
self.load_example_data()
reference_file_model = session.query(FileModel).filter(FileModel.is_reference==True).first()
name = reference_file_model.name
rv = self.app.get('/v1.0/reference_file/%s' % name, headers=self.logged_in_headers())
rv = self.app.get('/v1.0/reference_file/documents.xlsx', headers=self.logged_in_headers())
self.assert_success(rv)
self.assertIsNotNone(rv.get_data())
json_data = json.loads(rv.get_data(as_text=True))
self.assertEqual(reference_file_model.name, json_data['name'])
self.assertEqual(reference_file_model.type.value, json_data['type'])
self.assertEqual(reference_file_model.id, json_data['id'])
self.assertEqual("documents.xlsx", json_data['name'])
self.assertEqual("xlsx", json_data['type'])
def test_add_reference_file(self):
ExampleDataLoader().load_reference_documents()
@ -177,27 +95,24 @@ class TestFilesApi(BaseTest):
json_data = json.loads(rv.get_data(as_text=True))
file = FileModelSchema().load(json_data, session=session)
self.assertEqual(FileType.xlsx, file.type)
self.assertFalse(file.primary)
self.assertEqual(True, file.is_reference)
def test_delete_reference_file(self):
ExampleDataLoader().load_reference_documents()
reference_file = session.query(FileModel).filter(FileModel.is_reference == True).first()
rv = self.app.get('/v1.0/reference_file/%s' % reference_file.name, headers=self.logged_in_headers())
name = "documents.xlsx"
rv = self.app.get('/v1.0/reference_file/%s' % name, headers=self.logged_in_headers())
self.assert_success(rv)
self.app.delete('/v1.0/reference_file/%s' % reference_file.name, headers=self.logged_in_headers())
self.app.delete('/v1.0/reference_file/%s' % name, headers=self.logged_in_headers())
db.session.flush()
rv = self.app.get('/v1.0/reference_file/%s' % reference_file.name, headers=self.logged_in_headers())
rv = self.app.get('/v1.0/reference_file/%s' % name, headers=self.logged_in_headers())
self.assertEqual(404, rv.status_code)
self.assertIsNotNone(rv.get_data())
json_data = json.loads(rv.get_data(as_text=True))
self.assertIn('The reference file name you provided', json_data['message'])
self.assertIn('No reference file found with the name documents.xlsx', json_data['message'])
def test_list_reference_files(self):
ExampleDataLoader.clean_db()
file_name = DocumentService.DOCUMENT_LIST
filepath = os.path.join(app.root_path, 'static', 'reference', 'irb_documents.xlsx')
filepath = os.path.join(app.root_path, 'static', 'reference', 'documents.xlsx')
with open(filepath, 'rb') as myfile:
file_data = myfile.read()
data = {'file': (io.BytesIO(file_data), file_name)}
@ -212,35 +127,26 @@ class TestFilesApi(BaseTest):
self.assertEqual(1, len(json_data))
file = FileModelSchema(many=True).load(json_data, session=session)
self.assertEqual(file_name, file[0].name)
self.assertTrue(file[0].is_reference)
def test_update_file_info(self):
self.load_example_data()
file: FileModel = session.query(FileModel).filter(column('workflow_spec_id').isnot(None)).first()
file_model = FileModel(id=file.id,
name="silly_new_name.bpmn",
type=file.type,
content_type=file.content_type,
is_reference=file.is_reference,
primary=file.primary,
primary_process_id=file.primary_process_id,
workflow_id=file.workflow_id,
workflow_spec_id=file.workflow_spec_id,
archived=file.archived)
# file.name = "silly_new_name.bpmn"
def create_user_file(self):
self.create_reference_document()
workflow = self.create_workflow('file_upload_form')
processor = WorkflowProcessor(workflow)
processor.do_engine_steps()
task = processor.next_task()
correct_name = task.task_spec.form.fields[0].id
rv = self.app.put('/v1.0/spec_file/%i' % file.id,
content_type="application/json",
data=json.dumps(FileModelSchema().dump(file_model)), headers=self.logged_in_headers())
data = {'file': (io.BytesIO(b"abcdef"), 'random_fact.svg')}
rv = self.app.post('/v1.0/file?study_id=%i&workflow_id=%s&task_spec_name=%s&form_field_key=%s' %
(workflow.study_id, workflow.id, task.get_name(), correct_name), data=data,
follow_redirects=True,
content_type='multipart/form-data', headers=self.logged_in_headers())
self.assert_success(rv)
db_file = session.query(FileModel).filter_by(id=file.id).first()
self.assertIsNotNone(db_file)
self.assertEqual("silly_new_name.bpmn", db_file.name)
return json.loads(rv.get_data(as_text=True))
def test_load_valid_url_for_files(self):
self.load_example_data()
file: FileModel = session.query(FileModel).filter(FileModel.is_reference == False).first()
rv = self.app.get('/v1.0/file/%i' % file.id, content_type="application/json", headers=self.logged_in_headers())
file = self.create_user_file()
rv = self.app.get('/v1.0/file/%i' % file['id'], content_type="application/json", headers=self.logged_in_headers())
self.assert_success(rv)
file_json = json.loads(rv.get_data(as_text=True))
print(file_json)
@ -248,60 +154,11 @@ class TestFilesApi(BaseTest):
file_data_rv = self.app.get(file_json['url'])
self.assert_success(file_data_rv)
def test_update_file_data(self):
self.load_example_data()
spec = session.query(WorkflowSpecModel).first()
data = {}
data['file'] = io.BytesIO(self.minimal_bpmn("abcdef")), 'my_new_file.bpmn'
rv_1 = self.app.post('/v1.0/spec_file?workflow_spec_id=%s' % spec.id, data=data, follow_redirects=True,
content_type='multipart/form-data', headers=self.logged_in_headers())
file_json_1 = json.loads(rv_1.get_data(as_text=True))
self.assertEqual(80, file_json_1['size'])
file_id = file_json_1['id']
rv_2 = self.app.get('/v1.0/spec_file/%i/data' % file_id, headers=self.logged_in_headers())
self.assert_success(rv_2)
rv_data_2 = rv_2.get_data()
self.assertIsNotNone(rv_data_2)
self.assertEqual(self.minimal_bpmn("abcdef"), rv_data_2)
data['file'] = io.BytesIO(self.minimal_bpmn("efghijk")), 'my_new_file.bpmn'
rv_3 = self.app.put('/v1.0/spec_file/%i/data' % file_id, data=data, follow_redirects=True,
content_type='multipart/form-data', headers=self.logged_in_headers())
self.assert_success(rv_3)
self.assertIsNotNone(rv_3.get_data())
file_json_3 = json.loads(rv_3.get_data(as_text=True))
self.assertEqual(FileType.bpmn.value, file_json_3['type'])
self.assertEqual("application/octet-stream", file_json_3['content_type'])
self.assertEqual(spec.id, file_json_3['workflow_spec_id'])
# Assure it is updated in the database and properly persisted.
file_model = session.query(FileModel).filter(FileModel.id == file_id).first()
file_data = SpecFileService().get_spec_file_data(file_model.id)
self.assertEqual(81, len(file_data.data))
rv_4 = self.app.get('/v1.0/spec_file/%i/data' % file_id, headers=self.logged_in_headers())
self.assert_success(rv_4)
data = rv_4.get_data()
self.assertIsNotNone(data)
self.assertEqual(self.minimal_bpmn("efghijk"), data)
def test_get_file(self):
self.load_example_data()
spec = session.query(WorkflowSpecModel).first()
file = session.query(FileModel).filter_by(workflow_spec_id=spec.id).first()
rv = self.app.get('/v1.0/spec_file/%i/data' % file.id, headers=self.logged_in_headers())
self.assert_success(rv)
self.assertEqual("text/xml; charset=utf-8", rv.content_type)
self.assertTrue(rv.content_length > 1)
def test_get_file_contains_data_store_elements(self):
self.load_example_data()
spec = session.query(WorkflowSpecModel).first()
file = session.query(FileModel).filter_by(workflow_spec_id=spec.id).first()
ds = DataStoreModel(key="my_key", value="my_value", file_id=file.id);
file = self.create_user_file()
ds = DataStoreModel(key="my_key", value="my_value", file_id=file['id'])
db.session.add(ds)
rv = self.app.get('/v1.0/file/%i' % file.id, headers=self.logged_in_headers())
rv = self.app.get('/v1.0/file/%i' % file['id'], headers=self.logged_in_headers())
self.assert_success(rv)
json_data = json.loads(rv.get_data(as_text=True))
self.assertEqual("my_value", json_data['data_store']['my_key'])
@ -330,8 +187,8 @@ class TestFilesApi(BaseTest):
self.assertEqual(len(json_data), 1)
# Add another file for a different document type
FileService().add_workflow_file(workflow.id, 'Study_App_Doc', task.get_name(), 'otherdoc.docx',
'application/xcode', b"asdfasdf")
UserFileService().add_workflow_file(workflow.id, 'Study_App_Doc', task.get_name(), 'otherdoc.docx',
'application/xcode', b"asdfasdf")
# Note: this call can be made WITHOUT the task spec name.
rv = self.app.get('/v1.0/file?study_id=%i&workflow_id=%s&form_field_key=%s' %
@ -359,89 +216,3 @@ class TestFilesApi(BaseTest):
self.assertEqual('Ancillary Document', json_data['document']['category1'])
self.assertEqual('Study Team', json_data['document']['who_uploads?'])
def test_delete_spec_file(self):
self.load_example_data()
spec = session.query(WorkflowSpecModel).first()
file = session.query(FileModel).filter_by(workflow_spec_id=spec.id).first()
file_id = file.id
rv = self.app.get('/v1.0/spec_file/%i' % file.id, headers=self.logged_in_headers())
self.assert_success(rv)
self.app.delete('/v1.0/spec_file/%i' % file.id, headers=self.logged_in_headers())
db.session.flush()
rv = self.app.get('/v1.0/spec_file/%i' % file_id, headers=self.logged_in_headers())
self.assertEqual(404, rv.status_code)
def test_change_primary_bpmn(self):
self.load_example_data()
spec = session.query(WorkflowSpecModel).first()
data = {}
data['file'] = io.BytesIO(self.minimal_bpmn("abcdef")), 'my_new_file.bpmn'
# Add a new BPMN file to the specification
rv = self.app.post('/v1.0/spec_file?workflow_spec_id=%s' % spec.id, data=data, follow_redirects=True,
content_type='multipart/form-data', headers=self.logged_in_headers())
self.assert_success(rv)
self.assertIsNotNone(rv.get_data())
json_data = json.loads(rv.get_data(as_text=True))
file = FileModelSchema().load(json_data, session=session)
# Delete the primary BPMN file for the workflow.
orig_model = session.query(FileModel). \
filter(FileModel.primary == True). \
filter(FileModel.workflow_spec_id == spec.id).first()
rv = self.app.delete('/v1.0/spec_file?file_id=%s' % orig_model.id, headers=self.logged_in_headers())
# Set that new file to be the primary BPMN, assure it has a primary_process_id
file.primary = True
rv = self.app.put('/v1.0/spec_file/%i' % file.id,
content_type="application/json",
data=json.dumps(FileModelSchema().dump(file)), headers=self.logged_in_headers())
self.assert_success(rv)
json_data = json.loads(rv.get_data(as_text=True))
self.assertTrue(json_data['primary'])
self.assertIsNotNone(json_data['primary_process_id'])
def test_file_upload_with_previous_name(self):
self.load_example_data()
workflow_spec_model = session.query(WorkflowSpecModel).first()
# Add file
data = {'file': (io.BytesIO(b'asdf'), 'test_file.xlsx')}
rv = self.app.post('/v1.0/spec_file?workflow_spec_id=%s' % workflow_spec_model.id,
data=data,
follow_redirects=True,
content_type='multipart/form-data',
headers=self.logged_in_headers())
self.assert_success(rv)
file_json = json.loads(rv.get_data(as_text=True))
file_id = file_json['id']
# Set file to archived
file_model = session.query(FileModel).filter_by(id=file_id).first()
file_model.archived = True
session.commit()
# Assert we have the correct file data and the file is archived
file_data_model = SpecFileService().get_spec_file_data(file_model.id)
self.assertEqual(b'asdf', file_data_model.data)
file_model = session.query(FileModel).filter_by(id=file_model.id).first()
self.assertEqual(True, file_model.archived)
# Upload file with same name
data = {'file': (io.BytesIO(b'xyzpdq'), 'test_file.xlsx')}
rv = self.app.post('/v1.0/spec_file?workflow_spec_id=%s' % workflow_spec_model.id,
data=data,
follow_redirects=True,
content_type='multipart/form-data',
headers=self.logged_in_headers())
self.assert_success(rv)
file_json = json.loads(rv.get_data(as_text=True))
file_id = file_json['id']
# Assert we have the correct file data and the file is *not* archived
file_data_model = SpecFileService().get_spec_file_data(file_id)
self.assertEqual(b'xyzpdq', file_data_model.data)
file_model = session.query(FileModel).filter_by(id=file_id).first()
self.assertEqual(False, file_model.archived)

View File

@ -37,7 +37,7 @@ class TestSpecFileService(BaseTest):
self.assertIsNotNone(data)
spec_files = SpecFileService().get_files(spec_dt)
self.assertEqual(0, len(SpecFileService().get_files(spec_dt, "random_fact.bpmn")))
SpecFileService.add_file(spec_dt, "random_fact.bpmn", data, "text/xml")
SpecFileService.add_file(spec_dt, "random_fact.bpmn", data)
self.assertEqual(1, len(SpecFileService().get_files(spec_dt, "random_fact.bpmn")))
orig = SpecFileService.get_files(spec_random, "random_fact.bpmn")[0]

View File

@ -7,7 +7,6 @@ from tests.base_test import BaseTest
from crc import session, db, app
from crc.models.file import FileModel, FileType, FileModelSchema
from crc.models.workflow import WorkflowSpecModel
from crc.services.file_service import FileService
from crc.services.spec_file_service import SpecFileService
from crc.services.workflow_processor import WorkflowProcessor
from crc.models.data_store import DataStoreModel
@ -21,14 +20,14 @@ class TestFilesApi(BaseTest):
def test_list_files_for_workflow_spec(self):
self.load_example_data(use_crc_data=True)
spec_id = 'core_info'
spec = session.query(WorkflowSpecModel).filter_by(id=spec_id).first()
rv = self.app.get('/v1.0/spec_file?workflow_spec_id=%s' % spec_id,
spec_id = 'random_fact'
spec = self.load_test_spec(spec_id)
rv = self.app.get('/v1.0/workflow-specification/%s/file' % spec_id,
follow_redirects=True,
content_type="application/json", headers=self.logged_in_headers())
self.assert_success(rv)
json_data = json.loads(rv.get_data(as_text=True))
self.assertEqual(5, len(json_data))
self.assertEqual(2, len(json_data))
files = FileModelSchema(many=True).load(json_data, session=session)
file_names = [f.name for f in files]
self.assertTrue("%s.bpmn" % spec.id in file_names)
@ -37,9 +36,9 @@ class TestFilesApi(BaseTest):
self.load_example_data()
spec = self.load_test_spec("random_fact")
data = {'file': (io.BytesIO(b"abcdef"), 'test.svg')}
self.app.post('/v1.0/spec_file?workflow_spec_id=%s' % spec.id, data=data, follow_redirects=True,
self.app.post('/v1.0/workflow-specification/%s/file' % spec.id, data=data, follow_redirects=True,
content_type='multipart/form-data', headers=self.logged_in_headers())
rv = self.app.get('/v1.0/spec_file?workflow_spec_id=%s' % spec.id,
rv = self.app.get('/v1.0/workflow-specification/%s/file' % spec.id,
follow_redirects=True,
content_type="application/json", headers=self.logged_in_headers())
self.assert_success(rv)
@ -48,366 +47,130 @@ class TestFilesApi(BaseTest):
def test_create_spec_file(self):
self.load_example_data()
spec = session.query(WorkflowSpecModel).first()
spec = self.load_test_spec('random_fact')
data = {'file': (io.BytesIO(b"abcdef"), 'random_fact.svg')}
rv = self.app.post('/v1.0/spec_file?workflow_spec_id=%s' % spec.id, data=data, follow_redirects=True,
rv = self.app.post('/v1.0/workflow-specification/%s/file' % spec.id, data=data, follow_redirects=True,
content_type='multipart/form-data', headers=self.logged_in_headers())
self.assert_success(rv)
self.assertIsNotNone(rv.get_data())
json_data = json.loads(rv.get_data(as_text=True))
file = FileModelSchema().load(json_data, session=session)
self.assertEqual(FileType.svg, file.type)
self.assertFalse(file.primary)
self.assertEqual("image/svg+xml", file.content_type)
self.assertEqual(spec.id, file.workflow_spec_id)
file = json.loads(rv.get_data(as_text=True))
self.assertEqual(FileType.svg.value, file['type'])
self.assertEqual("image/svg+xml", file['content_type'])
rv = self.app.get('/v1.0/file/%i' % file.id, headers=self.logged_in_headers())
rv = self.app.get(f'/v1.0/workflow-specification/{spec.id}/file/random_fact.svg', headers=self.logged_in_headers())
self.assert_success(rv)
json_data = json.loads(rv.get_data(as_text=True))
file2 = FileModelSchema().load(json_data, session=session)
file2 = json.loads(rv.get_data(as_text=True))
self.assertEqual(file, file2)
def test_add_file_from_task_and_form_errors_on_invalid_form_field_name(self):
self.create_reference_document()
workflow = self.create_workflow('file_upload_form')
processor = WorkflowProcessor(workflow)
processor.do_engine_steps()
task = processor.next_task()
data = {'file': (io.BytesIO(b"abcdef"), 'random_fact.svg')}
correct_name = task.task_spec.form.fields[0].id
data = {'file': (io.BytesIO(b"abcdef"), 'random_fact.svg')}
rv = self.app.post('/v1.0/file?study_id=%i&workflow_id=%s&task_spec_name=%s&form_field_key=%s' %
(workflow.study_id, workflow.id, task.get_name(), correct_name), data=data,
follow_redirects=True,
content_type='multipart/form-data', headers=self.logged_in_headers())
self.assert_success(rv)
def test_archive_file_no_longer_shows_up(self):
def test_update_spec_file_data(self):
self.load_example_data()
workflow = self.create_workflow('file_upload_form')
processor = WorkflowProcessor(workflow)
processor.do_engine_steps()
task = processor.next_task()
data = {'file': (io.BytesIO(b"abcdef"), 'random_fact.svg')}
correct_name = task.task_spec.form.fields[0].id
data = {'file': (io.BytesIO(b"abcdef"), 'random_fact.svg')}
rv = self.app.post('/v1.0/file?study_id=%i&workflow_id=%s&task_spec_name=%s&form_field_key=%s' %
(workflow.study_id, workflow.id, task.get_name(), correct_name), data=data,
follow_redirects=True,
content_type='multipart/form-data', headers=self.logged_in_headers())
self.assert_success(rv)
rv = self.app.get('/v1.0/file?workflow_id=%s' % workflow.id, headers=self.logged_in_headers())
self.assert_success(rv)
self.assertEqual(1, len(json.loads(rv.get_data(as_text=True))))
file_model = db.session.query(FileModel).filter(FileModel.workflow_id == workflow.id).all()
self.assertEqual(1, len(file_model))
file_model[0].archived = True
db.session.commit()
rv = self.app.get('/v1.0/file?workflow_id=%s' % workflow.id, headers=self.logged_in_headers())
self.assert_success(rv)
self.assertEqual(0, len(json.loads(rv.get_data(as_text=True))))
def test_update_reference_file_data(self):
self.load_example_data()
file_name = "documents.xlsx"
filepath = os.path.join(app.root_path, 'static', 'reference', 'irb_documents.xlsx')
with open(filepath, 'rb') as myfile:
file_data = myfile.read()
data = {'file': (io.BytesIO(file_data), file_name)}
rv = self.app.put('/v1.0/reference_file/%s/data' % file_name, data=data, follow_redirects=True,
content_type='multipart/form-data', headers=self.logged_in_headers())
self.assert_success(rv)
self.assertIsNotNone(rv.get_data())
json_data = json.loads(rv.get_data(as_text=True))
file = FileModelSchema().load(json_data, session=session)
self.assertEqual(FileType.xlsx, file.type)
self.assertTrue(file.is_reference)
self.assertEqual("application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", file.content_type)
# self.assertEqual('dhf8r', json_data['user_uid'])
def test_set_reference_file_bad_extension(self):
file_name = DocumentService.DOCUMENT_LIST
data = {'file': (io.BytesIO(b"abcdef"), "does_not_matter.ppt")}
rv = self.app.put('/v1.0/reference_file/%s/data' % file_name, data=data, follow_redirects=True,
content_type='multipart/form-data', headers=self.logged_in_headers())
self.assert_failure(rv, error_code="invalid_file_type")
def test_get_reference_file_data(self):
ExampleDataLoader().load_reference_documents()
file_name = "irb_document_types.xls"
filepath = os.path.join(app.root_path, 'static', 'reference', 'irb_documents.xlsx')
with open(filepath, 'rb') as f_open:
file_data = f_open.read()
data = {'file': (io.BytesIO(file_data), file_name)}
self.app.post('/v1.0/reference_file', data=data, follow_redirects=True,
content_type='multipart/form-data', headers=self.logged_in_headers())
rv = self.app.get('/v1.0/reference_file/%s/data' % file_name, headers=self.logged_in_headers())
self.assert_success(rv)
data_out = rv.get_data()
self.assertEqual(file_data, data_out)
def test_get_reference_file_info(self):
self.load_example_data()
reference_file_model = session.query(FileModel).filter(FileModel.is_reference==True).first()
name = reference_file_model.name
rv = self.app.get('/v1.0/reference_file/%s' % name, headers=self.logged_in_headers())
self.assert_success(rv)
self.assertIsNotNone(rv.get_data())
json_data = json.loads(rv.get_data(as_text=True))
self.assertEqual(reference_file_model.name, json_data['name'])
self.assertEqual(reference_file_model.type.value, json_data['type'])
self.assertEqual(reference_file_model.id, json_data['id'])
def test_add_reference_file(self):
ExampleDataLoader().load_reference_documents()
file_name = 'new.xlsx'
data = {'file': (io.BytesIO(b"abcdef"), file_name)}
rv = self.app.post('/v1.0/reference_file', data=data,
follow_redirects=True,
content_type='multipart/form-data', headers=self.logged_in_headers())
self.assertIsNotNone(rv.get_data())
json_data = json.loads(rv.get_data(as_text=True))
file = FileModelSchema().load(json_data, session=session)
self.assertEqual(FileType.xlsx, file.type)
self.assertFalse(file.primary)
self.assertEqual(True, file.is_reference)
def test_delete_reference_file(self):
ExampleDataLoader().load_reference_documents()
reference_file = session.query(FileModel).filter(FileModel.is_reference == True).first()
rv = self.app.get('/v1.0/reference_file/%s' % reference_file.name, headers=self.logged_in_headers())
self.assert_success(rv)
self.app.delete('/v1.0/reference_file/%s' % reference_file.name, headers=self.logged_in_headers())
db.session.flush()
rv = self.app.get('/v1.0/reference_file/%s' % reference_file.name, headers=self.logged_in_headers())
self.assertEqual(404, rv.status_code)
self.assertIsNotNone(rv.get_data())
json_data = json.loads(rv.get_data(as_text=True))
self.assertIn('The reference file name you provided', json_data['message'])
def test_list_reference_files(self):
ExampleDataLoader.clean_db()
file_name = DocumentService.DOCUMENT_LIST
filepath = os.path.join(app.root_path, 'static', 'reference', 'irb_documents.xlsx')
with open(filepath, 'rb') as myfile:
file_data = myfile.read()
data = {'file': (io.BytesIO(file_data), file_name)}
rv = self.app.post('/v1.0/reference_file', data=data, follow_redirects=True,
content_type='multipart/form-data', headers=self.logged_in_headers())
self.assert_success(rv)
rv = self.app.get('/v1.0/reference_file',
follow_redirects=True,
content_type="application/json", headers=self.logged_in_headers())
self.assert_success(rv)
json_data = json.loads(rv.get_data(as_text=True))
self.assertEqual(1, len(json_data))
file = FileModelSchema(many=True).load(json_data, session=session)
self.assertEqual(file_name, file[0].name)
self.assertTrue(file[0].is_reference)
def test_update_file_info(self):
self.load_example_data()
file: FileModel = session.query(FileModel).filter(column('workflow_spec_id').isnot(None)).first()
file_model = FileModel(id=file.id,
name="silly_new_name.bpmn",
type=file.type,
content_type=file.content_type,
is_reference=file.is_reference,
primary=file.primary,
primary_process_id=file.primary_process_id,
workflow_id=file.workflow_id,
workflow_spec_id=file.workflow_spec_id,
archived=file.archived)
# file.name = "silly_new_name.bpmn"
rv = self.app.put('/v1.0/spec_file/%i' % file.id,
content_type="application/json",
data=json.dumps(FileModelSchema().dump(file_model)), headers=self.logged_in_headers())
self.assert_success(rv)
db_file = session.query(FileModel).filter_by(id=file.id).first()
self.assertIsNotNone(db_file)
self.assertEqual("silly_new_name.bpmn", db_file.name)
def test_load_valid_url_for_files(self):
self.load_example_data()
file: FileModel = session.query(FileModel).filter(FileModel.is_reference == False).first()
rv = self.app.get('/v1.0/file/%i' % file.id, content_type="application/json", headers=self.logged_in_headers())
self.assert_success(rv)
file_json = json.loads(rv.get_data(as_text=True))
print(file_json)
self.assertIsNotNone(file_json['url'])
file_data_rv = self.app.get(file_json['url'])
self.assert_success(file_data_rv)
def test_update_file_data(self):
self.load_example_data()
spec = session.query(WorkflowSpecModel).first()
spec = self.load_test_spec('random_fact')
data = {}
data['file'] = io.BytesIO(self.minimal_bpmn("abcdef")), 'my_new_file.bpmn'
rv_1 = self.app.post('/v1.0/spec_file?workflow_spec_id=%s' % spec.id, data=data, follow_redirects=True,
rv_1 = self.app.post('/v1.0/workflow-specification/%s/file' % spec.id, data=data, follow_redirects=True,
content_type='multipart/form-data', headers=self.logged_in_headers())
file_json_1 = json.loads(rv_1.get_data(as_text=True))
self.assertEqual(80, file_json_1['size'])
file_id = file_json_1['id']
rv_2 = self.app.get('/v1.0/spec_file/%i/data' % file_id, headers=self.logged_in_headers())
rv_2 = self.app.get(f'/v1.0/workflow-specification/{spec.id}/file/my_new_file.bpmn/data',
headers=self.logged_in_headers())
self.assert_success(rv_2)
rv_data_2 = rv_2.get_data()
self.assertIsNotNone(rv_data_2)
self.assertEqual(self.minimal_bpmn("abcdef"), rv_data_2)
data['file'] = io.BytesIO(self.minimal_bpmn("efghijk")), 'my_new_file.bpmn'
rv_3 = self.app.put('/v1.0/spec_file/%i/data' % file_id, data=data, follow_redirects=True,
rv_3 = self.app.put(f'/v1.0/workflow-specification/{spec.id}/file/my_new_file.bpmn/data',
data=data, follow_redirects=True,
content_type='multipart/form-data', headers=self.logged_in_headers())
self.assert_success(rv_3)
self.assertIsNotNone(rv_3.get_data())
file_json_3 = json.loads(rv_3.get_data(as_text=True))
self.assertEqual(FileType.bpmn.value, file_json_3['type'])
self.assertEqual("application/octet-stream", file_json_3['content_type'])
self.assertEqual(spec.id, file_json_3['workflow_spec_id'])
self.assertEqual("text/xml", file_json_3['content_type'])
# Assure it is updated in the database and properly persisted.
file_model = session.query(FileModel).filter(FileModel.id == file_id).first()
file_data = SpecFileService().get_spec_file_data(file_model.id)
self.assertEqual(81, len(file_data.data))
file_data = SpecFileService().get_data(spec, "my_new_file.bpmn")
self.assertEqual(81, len(file_data))
rv_4 = self.app.get('/v1.0/spec_file/%i/data' % file_id, headers=self.logged_in_headers())
rv_4 = self.app.get(f'/v1.0/workflow-specification/{spec.id}/file/my_new_file.bpmn/data',
headers=self.logged_in_headers())
self.assert_success(rv_4)
data = rv_4.get_data()
self.assertIsNotNone(data)
self.assertEqual(self.minimal_bpmn("efghijk"), data)
def test_get_file(self):
def test_get_spec_file(self):
self.load_example_data()
spec = self.load_test_spec('random_fact')
spec = session.query(WorkflowSpecModel).first()
file = session.query(FileModel).filter_by(workflow_spec_id=spec.id).first()
rv = self.app.get('/v1.0/spec_file/%i/data' % file.id, headers=self.logged_in_headers())
rv = self.app.get(f'/v1.0/workflow-specification/{spec.id}/file',
headers=self.logged_in_headers())
files = json.loads(rv.get_data(as_text=True))
rv = self.app.get(f'/v1.0/workflow-specification/{spec.id}/file/{files[0]["name"]}/data',
headers=self.logged_in_headers())
self.assert_success(rv)
self.assertEqual("text/xml; charset=utf-8", rv.content_type)
self.assertTrue(rv.content_length > 1)
def test_get_file_contains_data_store_elements(self):
self.load_example_data()
spec = session.query(WorkflowSpecModel).first()
file = session.query(FileModel).filter_by(workflow_spec_id=spec.id).first()
ds = DataStoreModel(key="my_key", value="my_value", file_id=file.id);
db.session.add(ds)
rv = self.app.get('/v1.0/file/%i' % file.id, headers=self.logged_in_headers())
self.assert_success(rv)
json_data = json.loads(rv.get_data(as_text=True))
self.assertEqual("my_value", json_data['data_store']['my_key'])
def test_get_files_for_form_field_returns_only_those_files(self):
self.create_reference_document()
workflow = self.create_workflow('file_upload_form')
processor = WorkflowProcessor(workflow)
processor.do_engine_steps()
task = processor.next_task()
correct_name = task.task_spec.form.fields[0].id
data = {'file': (io.BytesIO(b"abcdef"), 'random_fact.svg')}
rv = self.app.post('/v1.0/file?study_id=%i&workflow_id=%s&task_spec_name=%s&form_field_key=%s' %
(workflow.study_id, workflow.id, task.get_name(), correct_name), data=data,
follow_redirects=True,
content_type='multipart/form-data', headers=self.logged_in_headers())
self.assert_success(rv)
# Note: this call can be made WITHOUT the task id.
rv = self.app.get('/v1.0/file?study_id=%i&workflow_id=%s&form_field_key=%s' %
(workflow.study_id, workflow.id, correct_name), follow_redirects=True,
content_type='multipart/form-data', headers=self.logged_in_headers())
self.assert_success(rv)
json_data = json.loads(rv.get_data(as_text=True))
self.assertEqual(len(json_data), 1)
# Add another file for a different document type
FileService().add_workflow_file(workflow.id, 'Study_App_Doc', task.get_name(), 'otherdoc.docx',
'application/xcode', b"asdfasdf")
# Note: this call can be made WITHOUT the task spec name.
rv = self.app.get('/v1.0/file?study_id=%i&workflow_id=%s&form_field_key=%s' %
(workflow.study_id, workflow.id, correct_name), follow_redirects=True,
content_type='multipart/form-data', headers=self.logged_in_headers())
self.assert_success(rv)
json_data = json.loads(rv.get_data(as_text=True))
self.assertEqual(len(json_data), 1)
def test_add_file_returns_document_metadata(self):
self.create_reference_document()
workflow = self.create_workflow('file_upload_form_single')
processor = WorkflowProcessor(workflow)
processor.do_engine_steps()
task = processor.next_task()
correct_name = task.task_spec.form.fields[0].id
data = {'file': (io.BytesIO(b"abcdef"), 'random_fact.svg')}
rv = self.app.post('/v1.0/file?study_id=%i&workflow_id=%s&task_spec_name=%s&form_field_key=%s' %
(workflow.study_id, workflow.id, task.get_name(), correct_name), data=data,
follow_redirects=True,
content_type='multipart/form-data', headers=self.logged_in_headers())
self.assert_success(rv)
json_data = json.loads(rv.get_data(as_text=True))
self.assertEqual('Ancillary Document', json_data['document']['category1'])
self.assertEqual('Study Team', json_data['document']['who_uploads?'])
def test_delete_spec_file(self):
self.load_example_data()
spec = session.query(WorkflowSpecModel).first()
file = session.query(FileModel).filter_by(workflow_spec_id=spec.id).first()
file_id = file.id
rv = self.app.get('/v1.0/spec_file/%i' % file.id, headers=self.logged_in_headers())
spec = self.load_test_spec('random_fact')
rv = self.app.get(f'/v1.0/workflow-specification/{spec.id}/file/random_fact.bpmn',
headers=self.logged_in_headers())
self.assert_success(rv)
self.app.delete('/v1.0/spec_file/%i' % file.id, headers=self.logged_in_headers())
db.session.flush()
rv = self.app.get('/v1.0/spec_file/%i' % file_id, headers=self.logged_in_headers())
rv = self.app.delete(f'/v1.0/workflow-specification/{spec.id}/file/random_fact.bpmn',
headers=self.logged_in_headers())
self.assert_success(rv)
rv = self.app.get(f'/v1.0/workflow-specification/{spec.id}/file/random_fact.bpmn',
headers=self.logged_in_headers())
self.assertEqual(404, rv.status_code)
def test_change_primary_bpmn(self):
self.load_example_data()
spec = self.load_test_spec('random_fact')
spec = session.query(WorkflowSpecModel).first()
data = {}
data['file'] = io.BytesIO(self.minimal_bpmn("abcdef")), 'my_new_file.bpmn'
# Add a new BPMN file to the specification
rv = self.app.post('/v1.0/spec_file?workflow_spec_id=%s' % spec.id, data=data, follow_redirects=True,
rv = self.app.post(f'/v1.0/workflow-specification/{spec.id}/file', data=data, follow_redirects=True,
content_type='multipart/form-data', headers=self.logged_in_headers())
self.assert_success(rv)
self.assertIsNotNone(rv.get_data())
json_data = json.loads(rv.get_data(as_text=True))
file = FileModelSchema().load(json_data, session=session)
# Delete the primary BPMN file for the workflow.
orig_model = session.query(FileModel). \
filter(FileModel.primary == True). \
filter(FileModel.workflow_spec_id == spec.id).first()
rv = self.app.delete('/v1.0/spec_file?file_id=%s' % orig_model.id, headers=self.logged_in_headers())
# Set that new file to be the primary BPMN, assure it has a primary_process_id
file.primary = True
rv = self.app.put('/v1.0/spec_file/%i' % file.id,
content_type="application/json",
data=json.dumps(FileModelSchema().dump(file)), headers=self.logged_in_headers())
# get that mf.
rv = self.app.get(f'/v1.0/workflow-specification/{spec.id}/file/random_fact.bpmn',
headers=self.logged_in_headers())
self.assert_success(rv)
json_data = json.loads(rv.get_data(as_text=True))
self.assertTrue(json_data['primary'])
self.assertIsNotNone(json_data['primary_process_id'])
# Delete the original BPMN file for the workflow.
rv = self.app.delete(f'/v1.0/workflow-specification/{spec.id}/file/random_fact.bpmn',
headers=self.logged_in_headers())
self.assert_success(rv)
# Set new file to be the primary BPMN file
rv = self.app.put(f'/v1.0/workflow-specification/{spec.id}/file/my_new_file.bpmn?is_primary=True',
headers=self.logged_in_headers())
self.assert_success(rv)
# Get the workflow_spec
rv = self.app.get(f'/v1.0/workflow-specification/{spec.id}', headers=self.logged_in_headers())
workflow_spec = json.loads(rv.get_data(as_text=True))
self.assertEqual('my_new_file.bpmn', workflow_spec['primary_file_name'])
self.assertIsNotNone(workflow_spec['primary_process_id'])
def test_file_upload_with_previous_name(self):
self.load_example_data()
workflow_spec_model = session.query(WorkflowSpecModel).first()
workflow_spec_model = self.load_test_spec('random_fact')
# Add file
data = {'file': (io.BytesIO(b'asdf'), 'test_file.xlsx')}
rv = self.app.post('/v1.0/spec_file?workflow_spec_id=%s' % workflow_spec_model.id,
rv = self.app.post('/v1.0/workflow-specification/%s/file' % workflow_spec_model.id,
data=data,
follow_redirects=True,
content_type='multipart/form-data',
@ -415,33 +178,21 @@ class TestFilesApi(BaseTest):
self.assert_success(rv)
file_json = json.loads(rv.get_data(as_text=True))
file_id = file_json['id']
# Set file to archived
file_model = session.query(FileModel).filter_by(id=file_id).first()
file_model.archived = True
session.commit()
# Assert we have the correct file data and the file is archived
file_data_model = SpecFileService().get_spec_file_data(file_model.id)
self.assertEqual(b'asdf', file_data_model.data)
file_model = session.query(FileModel).filter_by(id=file_model.id).first()
self.assertEqual(True, file_model.archived)
# Assert we have the correct file data
file_data = SpecFileService().get_data(workflow_spec_model, 'test_file.xlsx')
self.assertEqual(b'asdf', file_data)
# Upload file with same name
data = {'file': (io.BytesIO(b'xyzpdq'), 'test_file.xlsx')}
rv = self.app.post('/v1.0/spec_file?workflow_spec_id=%s' % workflow_spec_model.id,
rv = self.app.post('/v1.0/workflow-specification/%s/file' % workflow_spec_model.id,
data=data,
follow_redirects=True,
content_type='multipart/form-data',
headers=self.logged_in_headers())
self.assert_success(rv)
file_json = json.loads(rv.get_data(as_text=True))
file_id = file_json['id']
# Assert we have the correct file data and the file is *not* archived
file_data_model = SpecFileService().get_spec_file_data(file_id)
self.assertEqual(b'xyzpdq', file_data_model.data)
file_model = session.query(FileModel).filter_by(id=file_id).first()
self.assertEqual(False, file_model.archived)
# Assert we have the correct file data
file_data = SpecFileService().get_data(workflow_spec_model, 'test_file.xlsx')
self.assertEqual(b'xyzpdq', file_data)

View File

@ -14,6 +14,7 @@ class TestGetWorkflowStatus(BaseTest):
def test_get_workflow_status(self):
self.load_example_data()
self.create_workflow('random_fact')
workflow_model_1 = session.query(WorkflowModel).filter(WorkflowModel.id == 1).first()
search_workflow_id = workflow_model_1.id
workflow = self.create_workflow('get_workflow_status')

View File

@ -2,7 +2,7 @@ from tests.base_test import BaseTest
from crc import session
from crc.models.file import FileDataModel
from crc.services.file_service import FileService
from crc.services.user_file_service import UserFileService
import io
import os
@ -20,15 +20,15 @@ class TestGetZippedFiles(BaseTest):
task = workflow_api.next_task
# Add files to use in the test
model_1 = FileService.add_workflow_file(workflow_id=workflow.id,
model_1 = UserFileService.add_workflow_file(workflow_id=workflow.id,
name="document_1.png", content_type="text",
task_spec_name=task.name,
binary_data=b'1234', irb_doc_code='Study_Protocol_Document')
model_2 = FileService.add_workflow_file(workflow_id=workflow.id,
model_2 = UserFileService.add_workflow_file(workflow_id=workflow.id,
name="document_2.txt", content_type="text",
task_spec_name=task.name,
binary_data=b'1234', irb_doc_code='Study_App_Doc')
model_3 = FileService.add_workflow_file(workflow_id=workflow.id,
model_3 = UserFileService.add_workflow_file(workflow_id=workflow.id,
name="document_3.pdf", content_type="text",
task_spec_name=task.name,
binary_data=b'1234', irb_doc_code='AD_Consent_Model')

View File

@ -1,4 +1,5 @@
import json
from tests.base_test import BaseTest
from crc.services.ldap_service import LdapService
@ -13,8 +14,8 @@ from crc.models.file import FileModel
from crc.models.task_event import TaskEventModel
from crc.models.study import StudyEvent, StudyModel, StudySchema, StudyStatus, StudyEventType, StudyAssociated
from crc.models.workflow import WorkflowSpecModel, WorkflowModel
from crc.services.file_service import FileService
from crc.services.workflow_processor import WorkflowProcessor
from crc.services.user_file_service import UserFileService
class TestStudyApi(BaseTest):
@ -49,14 +50,17 @@ class TestStudyApi(BaseTest):
"""NOTE: The protocol builder is not enabled or mocked out. As the master workflow (which is empty),
and the test workflow do not need it, and it is disabled in the configuration."""
self.load_example_data()
self.load_test_spec('empty_workflow', master_spec=True)
self.load_test_spec('random_fact')
new_study = self.add_test_study()
new_study = session.query(StudyModel).filter_by(id=new_study["id"]).first()
api_response = self.app.get('/v1.0/study/%i' % new_study.id,
headers=self.logged_in_headers(), content_type="application/json")
self.assert_success(api_response)
study = StudySchema().loads(api_response.get_data(as_text=True))
self.create_workflow('random_fact', study=new_study)
study = StudySchema().loads(api_response.get_data(as_text=True))
self.assertEqual(study.title, self.TEST_STUDY['title'])
self.assertEqual(study.primary_investigator_id, self.TEST_STUDY['primary_investigator_id'])
self.assertEqual(study.user_uid, self.TEST_STUDY['user_uid'])
@ -64,11 +68,10 @@ class TestStudyApi(BaseTest):
# Categories are read only, so switching to sub-scripting here.
# This assumes there is one test category set up in the example data.
category = study.categories[0]
self.assertEqual("Test Category", category['display_name'])
self.assertEqual(False, category['admin'])
self.assertEqual("Test Workflows", category['display_name'])
self.assertEqual(1, len(category["workflows"]))
workflow = category["workflows"][0]
self.assertEqual("Random Fact", workflow["display_name"])
self.assertEqual("random_fact", workflow["display_name"])
self.assertEqual("optional", workflow["state"])
self.assertEqual("not_started", workflow["status"])
self.assertEqual(0, workflow["total_tasks"])
@ -82,10 +85,10 @@ class TestStudyApi(BaseTest):
processor = WorkflowProcessor(workflow)
task = processor.next_task()
irb_code = "UVACompl_PRCAppr" # The first file referenced in pb required docs.
FileService.add_workflow_file(workflow_id=workflow.id,
task_spec_name=task.get_name(),
name="anything.png", content_type="png",
binary_data=b'1234', irb_doc_code=irb_code)
UserFileService.add_workflow_file(workflow_id=workflow.id,
task_spec_name=task.get_name(),
name="anything.png", content_type="png",
binary_data=b'1234', irb_doc_code=irb_code)
api_response = self.app.get('/v1.0/study/%i' % workflow.study_id,
headers=self.logged_in_headers(), content_type="application/json")
@ -99,6 +102,7 @@ class TestStudyApi(BaseTest):
def test_add_study(self):
self.load_example_data()
self.load_test_spec('empty_workflow', master_spec=True)
study = self.add_test_study()
db_study = session.query(StudyModel).filter_by(id=study['id']).first()
self.assertIsNotNone(db_study)
@ -255,10 +259,13 @@ class TestStudyApi(BaseTest):
def test_delete_workflow(self):
self.load_example_data()
self.load_test_spec('random_fact')
self.load_test_spec('empty_workflow', master_spec=True)
self.add_test_study()
workflow = session.query(WorkflowModel).first()
FileService.add_workflow_file(workflow_id=workflow.id, task_spec_name='TaskSpec01',
name="anything.png", content_type="text",
binary_data=b'5678', irb_doc_code="UVACompl_PRCAppr" )
UserFileService.add_workflow_file(workflow_id=workflow.id, task_spec_name='TaskSpec01',
name="anything.png", content_type="text",
binary_data=b'5678', irb_doc_code="UVACompl_PRCAppr" )
workflow_files = session.query(FileModel).filter_by(workflow_id=workflow.id)
self.assertEqual(workflow_files.count(), 1)
@ -271,14 +278,12 @@ class TestStudyApi(BaseTest):
workflow_files = session.query(FileModel).filter_by(workflow_id=workflow.id)
self.assertEqual(workflow_files.count(), 0)
# Finally, let's confirm the file was archived
workflow_files = session.query(FileModel).filter(FileModel.id.in_(workflow_files_ids))
for file in workflow_files:
self.assertTrue(file.archived)
self.assertIsNone(file.workflow_id)
def test_delete_study_with_workflow_and_status_etc(self):
self.load_example_data()
self.load_test_spec('random_fact')
self.load_test_spec('empty_workflow', master_spec=True)
self.add_test_study()
workflow = session.query(WorkflowModel).first()
stats1 = StudyEvent(
study_id=workflow.study_id,

View File

@ -10,11 +10,12 @@ from crc.models.file import FileDataModel, FileModel
from crc.models.protocol_builder import ProtocolBuilderRequiredDocumentSchema
from crc.models.study import StudyModel
from crc.scripts.study_info import StudyInfo
from crc.services.file_service import FileService
from crc.services.study_service import StudyService
from crc.services.workflow_processor import WorkflowProcessor
from crc.scripts.file_data_set import FileDataSet
from crc.services.document_service import DocumentService
from crc.services.user_file_service import UserFileService
from crc.services.reference_file_service import ReferenceFileService
class TestStudyDetailsDocumentsScript(BaseTest):
@ -41,14 +42,7 @@ class TestStudyDetailsDocumentsScript(BaseTest):
task = processor.next_task()
# Remove the reference file.
file_model = db.session.query(FileModel). \
filter(FileModel.is_reference is True). \
filter(FileModel.name == DocumentService.DOCUMENT_LIST).first()
if file_model:
db.session.query(FileDataModel).filter(FileDataModel.file_model_id == file_model.id).delete()
db.session.query(FileModel).filter(FileModel.id == file_model.id).delete()
db.session.commit()
db.session.flush()
ReferenceFileService.delete(DocumentService.DOCUMENT_LIST)
with self.assertRaises(ApiError):
StudyInfo().do_task_validate_only(task, study.id, "documents")
@ -99,10 +93,10 @@ class TestStudyDetailsDocumentsScript(BaseTest):
workflow_spec_model = self.load_test_spec("two_forms")
workflow_model = StudyService._create_workflow_model(study, workflow_spec_model)
irb_code = "UVACompl_PRCAppr" # The first file referenced in pb required docs.
file = FileService.add_workflow_file(workflow_id=workflow_model.id,
task_spec_name='Acitivity01',
name="anything.png", content_type="text",
binary_data=b'1234', irb_doc_code=irb_code)
file = UserFileService.add_workflow_file(workflow_id=workflow_model.id,
task_spec_name='Acitivity01',
name="anything.png", content_type="text",
binary_data=b'1234', irb_doc_code=irb_code)
processor = WorkflowProcessor(workflow_model)
task = processor.next_task()
FileDataSet().do_task(task, study.id, workflow_model.id, key="ginger", value="doodle", file_id=file.id)
@ -120,10 +114,10 @@ class TestStudyDetailsDocumentsScript(BaseTest):
workflow_spec_model = self.load_test_spec("two_forms")
workflow_model = StudyService._create_workflow_model(study, workflow_spec_model)
irb_code = "UVACompl_PRCAppr" # The first file referenced in pb required docs.
file = FileService.add_workflow_file(workflow_id=workflow_model.id,
task_spec_name='TaskSpec01',
name="anything.png", content_type="text",
binary_data=b'1234', irb_doc_code=irb_code)
file = UserFileService.add_workflow_file(workflow_id=workflow_model.id,
task_spec_name='TaskSpec01',
name="anything.png", content_type="text",
binary_data=b'1234', irb_doc_code=irb_code)
processor = WorkflowProcessor(workflow_model)
task = processor.next_task()
FileDataSet().do_task(task, study.id, workflow_model.id, key="irb_code", value="Study_App_Doc", file_id=file.id)
@ -142,10 +136,10 @@ class TestStudyDetailsDocumentsScript(BaseTest):
workflow_spec_model = self.load_test_spec("two_forms")
workflow_model = StudyService._create_workflow_model(study, workflow_spec_model)
irb_code = "UVACompl_PRCAppr" # The first file referenced in pb required docs.
file = FileService.add_workflow_file(workflow_id=workflow_model.id,
task_spec_name='Activity01',
name="anything.png", content_type="text",
binary_data=b'1234', irb_doc_code=irb_code)
file = UserFileService.add_workflow_file(workflow_id=workflow_model.id,
task_spec_name='Activity01',
name="anything.png", content_type="text",
binary_data=b'1234', irb_doc_code=irb_code)
processor = WorkflowProcessor(workflow_model)
task = processor.next_task()
with self.assertRaises(ApiError):

View File

@ -2,6 +2,7 @@ import json
from datetime import datetime
from unittest.mock import patch
from crc.services.user_file_service import UserFileService
from tests.base_test import BaseTest
from crc import db, app
@ -9,7 +10,6 @@ from crc.models.study import StudyModel, StudyStatus, StudyAssociatedSchema
from crc.models.user import UserModel
from crc.models.workflow import WorkflowModel, WorkflowStatus, \
WorkflowSpecCategoryModel
from crc.services.file_service import FileService
from crc.services.ldap_service import LdapService
from crc.services.study_service import StudyService
from crc.services.workflow_processor import WorkflowProcessor
@ -145,10 +145,10 @@ class TestStudyService(BaseTest):
# Add a document to the study with the correct code.
workflow = self.create_workflow('docx')
irb_code = "UVACompl_PRCAppr" # The first file referenced in pb required docs.
FileService.add_workflow_file(workflow_id=workflow.id,
task_spec_name='t1',
name="anything.png", content_type="text",
binary_data=b'1234', irb_doc_code=irb_code)
UserFileService.add_workflow_file(workflow_id=workflow.id,
task_spec_name='t1',
name="anything.png", content_type="text",
binary_data=b'1234', irb_doc_code=irb_code)
docs = StudyService().get_documents_status(workflow.study_id)
self.assertIsNotNone(docs)
@ -168,18 +168,18 @@ class TestStudyService(BaseTest):
workflow2 = self.create_workflow('empty_workflow', study=study)
# Add files to both workflows.
FileService.add_workflow_file(workflow_id=workflow1.id,
task_spec_name="t1",
name="anything.png", content_type="text",
binary_data=b'1234', irb_doc_code="UVACompl_PRCAppr" )
FileService.add_workflow_file(workflow_id=workflow1.id,
task_spec_name="t1",
name="anything.png", content_type="text",
binary_data=b'1234', irb_doc_code="AD_Consent_Model")
FileService.add_workflow_file(workflow_id=workflow2.id,
task_spec_name="t1",
name="anything.png", content_type="text",
binary_data=b'1234', irb_doc_code="UVACompl_PRCAppr" )
UserFileService.add_workflow_file(workflow_id=workflow1.id,
task_spec_name="t1",
name="anything.png", content_type="text",
binary_data=b'1234', irb_doc_code="UVACompl_PRCAppr" )
UserFileService.add_workflow_file(workflow_id=workflow1.id,
task_spec_name="t1",
name="anything.png", content_type="text",
binary_data=b'1234', irb_doc_code="AD_Consent_Model")
UserFileService.add_workflow_file(workflow_id=workflow2.id,
task_spec_name="t1",
name="anything.png", content_type="text",
binary_data=b'1234', irb_doc_code="UVACompl_PRCAppr" )
studies = StudyService().get_all_studies_with_files()
self.assertEqual(1, len(studies))

View File

@ -14,6 +14,7 @@ class TestStudyStatusMessage(BaseTest):
# shared code
self.load_example_data()
study_model = session.query(StudyModel).first()
self.create_workflow('random_fact', study=study_model)
workflow_metas = StudyService._get_workflow_metas(study_model.id)
warnings = StudyService._update_status_of_workflow_meta(workflow_metas, status)
return workflow_metas, warnings

View File

@ -131,6 +131,7 @@ class TestAuthentication(BaseTest):
app.config['PRODUCTION'] = True
self.load_example_data()
self.load_test_spec('empty_workflow', master_spec=True)
admin_user = self._login_as_admin()
admin_study = self._make_fake_study(admin_user.uid)
@ -164,6 +165,7 @@ class TestAuthentication(BaseTest):
app.config['PRODUCTION'] = True
self.load_example_data()
self.load_test_spec('empty_workflow', master_spec=True)
# Non-admin user should not be able to delete a study
non_admin_user = self._login_as_non_admin()
@ -215,23 +217,11 @@ class TestAuthentication(BaseTest):
app.config['PRODUCTION'] = True
self.load_example_data()
self.load_test_spec('empty_workflow', master_spec=True)
admin_user = self._login_as_admin()
admin_token_headers = dict(Authorization='Bearer ' + admin_user.encode_auth_token())
# User should not be in the system yet.
# non_admin_user = session.query(UserModel).filter(UserModel.uid == self.non_admin_uid).first()
# self.assertIsNone(non_admin_user)
# Admin should not be able to impersonate non-existent user
# rv_1 = self.app.get(
# '/v1.0/user?admin_impersonate_uid=' + self.non_admin_uid,
# content_type="application/json",
# headers=admin_token_headers,
# follow_redirects=False
#)
# self.assert_failure(rv_1, 400)
# Add the non-admin user now
self.logout()
non_admin_user = self._login_as_non_admin()

View File

@ -10,10 +10,10 @@ class TestAutoSetPrimaryBPMN(BaseTest):
def test_auto_set_primary_bpmn(self):
self.load_example_data()
category_id = session.query(WorkflowSpecCategoryModel).first().id
category = self.assure_category_exists()
# Add a workflow spec
spec = WorkflowSpecModel(id='make_cookies', display_name='Cooooookies',
description='Om nom nom delicious cookies', category_id=category_id,
description='Om nom nom delicious cookies', category_id=category.id,
standalone=False)
rv = self.app.post('/v1.0/workflow-specification',
headers=self.logged_in_headers(),
@ -23,18 +23,17 @@ class TestAutoSetPrimaryBPMN(BaseTest):
# grab the spec from the db
db_spec = session.query(WorkflowSpecModel).filter_by(id='make_cookies').first()
self.assertEqual(spec.display_name, db_spec.display_name)
# Make sure we don't already have a primary bpmn file
have_primary = FileModel.query.filter(FileModel.workflow_spec_id==db_spec.id, FileModel.type==FileType.bpmn, FileModel.primary==True).all()
self.assertEqual(have_primary, [])
self.assertIsNone(db_spec.primary_process_id)
self.assertIsNone(db_spec.primary_file_name)
data = {}
data['file'] = io.BytesIO(self.minimal_bpmn("abcdef")), 'my_new_file.bpmn'
# Add a new BPMN file to the specification
rv = self.app.post('/v1.0/spec_file?workflow_spec_id=%s' % db_spec.id, data=data, follow_redirects=True,
rv = self.app.post(f'/v1.0/workflow-specification/{db_spec.id}/file', data=data,
follow_redirects=True,
content_type='multipart/form-data', headers=self.logged_in_headers())
self.assert_success(rv)
file_id = rv.json['id']
# Make sure we now have a primary bpmn
have_primary = FileModel.query.filter(FileModel.workflow_spec_id==db_spec.id, FileModel.type==FileType.bpmn, FileModel.primary==True).all()
self.assertEqual(len(have_primary), 1)
self.assertEqual(file_id, have_primary[0].id)
db_spec = session.query(WorkflowSpecModel).filter_by(id='make_cookies').first()
self.assertEqual(db_spec.primary_process_id, '1')
self.assertEqual(db_spec.primary_file_name, 'my_new_file.bpmn')

View File

@ -1,6 +1,7 @@
from tests.base_test import BaseTest
from crc.models.data_store import DataStoreModel, DataStoreSchema
from crc.services.user_file_service import UserFileService
from crc.models.file import FileModel
from crc import session
@ -127,7 +128,9 @@ class DataStoreTest(BaseTest):
def test_datastore_file(self):
self.load_example_data()
test_file = session.query(FileModel).first()
workflow = self.create_workflow('random_fact')
self.add_test_user_data()
test_file = UserFileService.add_workflow_file(workflow.id, 'xxx', 'xxx', 'my_file.docx', 'docx', b'this is it.')
# make sure we don't already have a datastore
api_response = self.app.get(f'/v1.0/datastore/file/{test_file.id}',
@ -153,7 +156,9 @@ class DataStoreTest(BaseTest):
def test_datastore_files(self):
self.load_example_data()
test_file = session.query(FileModel).first()
workflow = self.create_workflow('random_fact')
self.add_test_user_data()
test_file = UserFileService.add_workflow_file(workflow.id, 'xxx', 'xxx', 'my_file.docx', 'docx', b'this is it.')
# add datastore
value_1 = 'Some File Data Value 1'

View File

@ -1,6 +1,7 @@
import json
from tests.base_test import BaseTest
from crc.services.file_service import FileService
from crc.services.user_file_service import UserFileService
class TestDocumentDirectories(BaseTest):
@ -15,15 +16,15 @@ class TestDocumentDirectories(BaseTest):
study_id = workflow.study_id
# Add a file
FileService.add_workflow_file(workflow_id=workflow.id,
task_spec_name=first_task.name,
name="something.png", content_type="text",
binary_data=b'1234', irb_doc_code=irb_code_1)
UserFileService.add_workflow_file(workflow_id=workflow.id,
task_spec_name=first_task.name,
name="something.png", content_type="text",
binary_data=b'1234', irb_doc_code=irb_code_1)
# Add second file
FileService.add_workflow_file(workflow_id=workflow.id,
task_spec_name=first_task.name,
name="anything.png", content_type="text",
binary_data=b'5678', irb_doc_code=irb_code_2)
UserFileService.add_workflow_file(workflow_id=workflow.id,
task_spec_name=first_task.name,
name="anything.png", content_type="text",
binary_data=b'5678', irb_doc_code=irb_code_2)
# Get back the list of documents and their directories.
rv = self.app.get('/v1.0/document_directory/%i' % study_id, headers=self.logged_in_headers())

View File

@ -1,7 +1,7 @@
from tests.base_test import BaseTest
from crc import mail
from crc.services.file_service import FileService
from crc.services.study_service import StudyService
from crc.services.user_file_service import UserFileService
class TestEmailScript(BaseTest):
@ -92,18 +92,18 @@ class TestEmailScript(BaseTest):
workflow_api = self.get_workflow_api(workflow)
first_task = workflow_api.next_task
FileService.add_workflow_file(workflow_id=workflow.id,
task_spec_name=first_task.name,
name="something.png", content_type="text",
binary_data=b'1234', irb_doc_code=irb_code_1)
FileService.add_workflow_file(workflow_id=workflow.id,
task_spec_name=first_task.name,
name="another.png", content_type="text",
binary_data=b'67890', irb_doc_code=irb_code_1)
FileService.add_workflow_file(workflow_id=workflow.id,
task_spec_name=first_task.name,
name="anything.png", content_type="text",
binary_data=b'5678', irb_doc_code=irb_code_2)
UserFileService.add_workflow_file(workflow_id=workflow.id,
task_spec_name=first_task.name,
name="something.png", content_type="text",
binary_data=b'1234', irb_doc_code=irb_code_1)
UserFileService.add_workflow_file(workflow_id=workflow.id,
task_spec_name=first_task.name,
name="another.png", content_type="text",
binary_data=b'67890', irb_doc_code=irb_code_1)
UserFileService.add_workflow_file(workflow_id=workflow.id,
task_spec_name=first_task.name,
name="anything.png", content_type="text",
binary_data=b'5678', irb_doc_code=irb_code_2)
with mail.record_messages() as outbox:
self.complete_form(workflow, first_task, {'subject': 'My Test Subject', 'recipients': 'user@example.com',

View File

@ -2,8 +2,8 @@ from tests.base_test import BaseTest
from crc import db
from crc.models.data_store import DataStoreModel
from crc.services.file_service import FileService
from crc.services.workflow_processor import WorkflowProcessor
from crc.services.user_file_service import UserFileService
from io import BytesIO
@ -18,10 +18,10 @@ class TestFileDatastore(BaseTest):
# for this study
workflow = self.create_workflow('file_data_store')
irb_code = "UVACompl_PRCAppr" # The first file referenced in pb required docs.
FileService.add_workflow_file(workflow_id=workflow.id,
task_spec_name='task1',
name="anything.png", content_type="text",
binary_data=b'1234', irb_doc_code=irb_code)
UserFileService.add_workflow_file(workflow_id=workflow.id,
task_spec_name='task1',
name="anything.png", content_type="text",
binary_data=b'1234', irb_doc_code=irb_code)
processor = WorkflowProcessor(workflow)
processor.do_engine_steps()

View File

@ -1,5 +1,5 @@
from tests.base_test import BaseTest
from crc.services.file_service import FileService
from crc.services.user_file_service import UserFileService
from crc.scripts.is_file_uploaded import IsFileUploaded
@ -15,29 +15,29 @@ class TestIsFileUploaded(BaseTest):
study_id = workflow.study_id
# We shouldn't have any files yet.
files = FileService.get_files_for_study(study_id)
files = UserFileService.get_files_for_study(study_id)
self.assertEqual(0, len(files))
self.assertEqual(False, IsFileUploaded.do_task(IsFileUploaded, first_task, study_id, workflow.id, irb_code_1))
# Add a file
FileService.add_workflow_file(workflow_id=workflow.id,
UserFileService.add_workflow_file(workflow_id=workflow.id,
task_spec_name=first_task.name,
name="something.png", content_type="text",
binary_data=b'1234', irb_doc_code=irb_code_1)
# Make sure we find the file
files = FileService.get_files_for_study(study_id)
files = UserFileService.get_files_for_study(study_id)
self.assertEqual(1, len(files))
self.assertEqual(True, IsFileUploaded.do_task(IsFileUploaded, first_task, study_id, workflow.id, irb_code_1))
# Add second file
FileService.add_workflow_file(workflow_id=workflow.id,
UserFileService.add_workflow_file(workflow_id=workflow.id,
task_spec_name=first_task.name,
name="anything.png", content_type="text",
binary_data=b'5678', irb_doc_code=irb_code_2)
# Make sure we find both files.
files = FileService.get_files_for_study(study_id)
files = UserFileService.get_files_for_study(study_id)
self.assertEqual(2, len(files))
self.assertEqual(True, IsFileUploaded.do_task(IsFileUploaded, first_task, study_id, workflow.id, irb_code_1))
self.assertEqual(True, IsFileUploaded.do_task(IsFileUploaded, first_task, study_id, workflow.id, irb_code_2))

View File

@ -2,7 +2,6 @@ import os
from tests.base_test import BaseTest
from crc.services.file_service import FileService
from crc.api.common import ApiError
from crc import session, app
from crc.models.file import FileDataModel, FileModel, LookupFileModel, LookupDataModel, CONTENT_TYPES
@ -39,7 +38,6 @@ class TestLookupService(BaseTest):
def test_updates_to_file_cause_lookup_rebuild(self):
spec = BaseTest.load_test_spec('enum_options_with_search')
workflow = self.create_workflow('enum_options_with_search')
file_model = session.query(FileModel).filter(FileModel.name == "sponsors.xlsx").first()
LookupService.lookup(workflow, "Task_Enum_Lookup", "sponsor", "sam", limit=10)
lookup_records = session.query(LookupFileModel).all()
self.assertIsNotNone(lookup_records)
@ -52,17 +50,12 @@ class TestLookupService(BaseTest):
file_path = os.path.join(app.root_path, '..', 'tests', 'data',
'enum_options_with_search', 'sponsors_modified.xlsx')
file = open(file_path, 'rb')
if file_model.workflow_spec_id is not None:
workflow_spec_model = session.query(WorkflowSpecModel).filter(WorkflowSpecModel.id==file_model.workflow_spec_id).first()
SpecFileService().update_spec_file_data(workflow_spec_model, file_model.name, file.read())
elif file_model.is_reference:
ReferenceFileService().update_reference_file(file_model, file.read())
else:
FileService.update_file(file_model, file.read(), CONTENT_TYPES['xlsx'])
workflow_spec_model = session.query(WorkflowSpecModel)\
.filter(WorkflowSpecModel.id == workflow.workflow_spec_id).first()
SpecFileService().update_file(workflow_spec_model, "sponsors.xlsx", file.read())
file.close()
# restart the workflow, so it can pick up the changes.
processor = WorkflowProcessor.reset(workflow)
workflow = processor.workflow_model
@ -187,21 +180,18 @@ class TestLookupService(BaseTest):
self.assertEquals('UVA - INTERNAL - GM USE ONLY', first_result['CUSTOMER_NAME'])
def test_lookup_fails_for_xls(self):
BaseTest.load_test_spec('enum_options_with_search')
spec = BaseTest.load_test_spec('enum_options_with_search')
# Using an old xls file should raise an error
file_model_xls = session.query(FileModel).filter(FileModel.name == 'sponsors.xls').first()
file_data_xls = SpecFileService().get_spec_file_data(file_model_xls.id)
# file_data_model_xls = session.query(FileDataModel).filter(FileDataModel.file_model_id == file_model_xls.id).first()
file_data_xls = SpecFileService().get_data(spec, 'sponsors.xls')
with self.assertRaises(ApiError) as ae:
LookupService.build_lookup_table(file_model_xls.id, 'sponsors.xls', file_data_xls.data, 'CUSTOMER_NUMBER', 'CUSTOMER_NAME')
LookupService.build_lookup_table('sponsors.xls', file_data_xls, 'CUSTOMER_NUMBER', 'CUSTOMER_NAME')
self.assertIn('Error opening excel file', ae.exception.args[0])
# Using an xlsx file should work
file_model_xlsx = session.query(FileModel).filter(FileModel.name == 'sponsors.xlsx').first()
file_data_xlsx = SpecFileService().get_spec_file_data(file_model_xlsx.id)
# file_data_model_xlsx = session.query(FileDataModel).filter(FileDataModel.file_model_id == file_model_xlsx.id).first()
lookup_model = LookupService.build_lookup_table(file_model_xlsx.id, 'sponsors.xlsx', file_data_xlsx.data, 'CUSTOMER_NUMBER', 'CUSTOMER_NAME')
file_data_xlsx = SpecFileService().get_data(spec, 'sponsors.xlsx')
lookup_model = LookupService.build_lookup_table('sponsors.xlsx', file_data_xlsx,
'CUSTOMER_NUMBER', 'CUSTOMER_NAME')
self.assertEqual(28, len(lookup_model.dependencies))
self.assertIn('CUSTOMER_NAME', lookup_model.dependencies[0].data.keys())
self.assertIn('CUSTOMER_NUMBER', lookup_model.dependencies[0].data.keys())

View File

@ -189,7 +189,7 @@ class TestTasksApi(BaseTest):
# Modify the specification, with a major change that alters the flow and can't be deserialized
# effectively, if it uses the latest spec files.
file_path = os.path.join(app.root_path, '..', 'tests', 'data', 'two_forms', 'modified', 'two_forms_struc_mod.bpmn')
self.replace_file("two_forms.bpmn", file_path)
self.replace_file(workflow.workflow_spec, "two_forms.bpmn", file_path)
# This should use the original workflow spec, and just move to the next task
workflow_api_2 = self.get_workflow_api(workflow)

View File

@ -1,10 +1,10 @@
from crc.models.workflow import WorkflowLibraryModel
from tests.base_test import BaseTest
from crc import session
from crc.models.user import UserModel
from crc.services.user_service import UserService
from crc.services.workflow_service import WorkflowService
from crc.models.workflow import WorkflowLibraryModel
from example_data import ExampleDataLoader
@ -16,7 +16,7 @@ class TestWorkflowApi(BaseTest):
def test_get_task_events(self):
self.load_example_data()
spec = ExampleDataLoader().create_spec('hello_world', 'Hello World', category_id=0, standalone=True, from_tests=True)
spec = self.load_test_spec('hello_world')
user = session.query(UserModel).first()
self.assertIsNotNone(user)
WorkflowService.get_workflow_from_spec(spec.id, user)
@ -27,17 +27,10 @@ class TestWorkflowApi(BaseTest):
headers=self.logged_in_headers())
self.assert_success(rv)
def test_library_code(self):
self.load_example_data()
spec1 = ExampleDataLoader().create_spec('hello_world', 'Hello World', category_id=0, library=False,
from_tests=True)
spec2 = ExampleDataLoader().create_spec('hello_world_lib', 'Hello World Library', category_id=0, library=True,
from_tests=True)
user = session.query(UserModel).first()
self.assertIsNotNone(user)
spec1 = self.load_test_spec('hello_world')
spec2 = self.load_test_spec('hello_world_lib', library=True)
rv = self.app.post(f'/v1.0/workflow-specification/%s/library/%s'%(spec1.id,spec2.id),
follow_redirects=True,
content_type="application/json",
@ -63,11 +56,8 @@ class TestWorkflowApi(BaseTest):
def test_library_cleanup(self):
self.load_example_data()
spec1 = ExampleDataLoader().create_spec('hello_world', 'Hello World', category_id=0, library=False,
from_tests=True)
spec2 = ExampleDataLoader().create_spec('hello_world_lib', 'Hello World Library', category_id=0, library=True,
from_tests=True)
spec1 = self.load_test_spec('hello_world')
spec2 = self.load_test_spec('hello_world_lib', library=True)
user = session.query(UserModel).first()
self.assertIsNotNone(user)

View File

@ -12,21 +12,15 @@ class TestDuplicateWorkflowSpecFile(BaseTest):
# Users should not be able to upload a file that already exists.
self.load_example_data()
spec = session.query(WorkflowSpecModel).first()
spec = self.load_test_spec('random_fact')
# Add a file
file_model = SpecFileService.add_workflow_spec_file(spec,
name="something.png",
content_type="text",
binary_data=b'1234')
file_model = SpecFileService.add_file(spec, "something.png", b'1234')
self.assertEqual(file_model.name, 'something.png')
self.assertEqual(file_model.content_type, 'text')
self.assertEqual(file_model.content_type, 'image/png')
# Try to add it again
try:
SpecFileService.add_workflow_spec_file(spec,
name="something.png",
content_type="text",
binary_data=b'5678')
file_model = SpecFileService.add_file(spec, "something.png", b'1234')
except ApiError as ae:
self.assertEqual(ae.message, 'If you want to replace the file, use the update mechanism.')

View File

@ -1,7 +1,7 @@
from tests.base_test import BaseTest
from crc.api.common import ApiError
from crc.services.file_service import FileService
from crc.scripts.is_file_uploaded import IsFileUploaded
from crc.services.user_file_service import UserFileService
class TestDeleteIRBDocument(BaseTest):
@ -17,16 +17,16 @@ class TestDeleteIRBDocument(BaseTest):
first_task = workflow_api.next_task
# Should not have any files yet
files = FileService.get_files_for_study(study_id)
files = UserFileService.get_files_for_study(study_id)
self.assertEqual(0, len(files))
self.assertEqual(False, IsFileUploaded.do_task(
IsFileUploaded, first_task, study_id, workflow.id, irb_code))
# Add a file
FileService.add_workflow_file(workflow_id=workflow.id,
task_spec_name=first_task.name,
name="filename.txt", content_type="text",
binary_data=b'1234', irb_doc_code=irb_code)
UserFileService.add_workflow_file(workflow_id=workflow.id,
task_spec_name=first_task.name,
name="filename.txt", content_type="text",
binary_data=b'1234', irb_doc_code=irb_code)
# Assert we have the file
self.assertEqual(True, IsFileUploaded.do_task(
IsFileUploaded, first_task, study_id, workflow.id, irb_code))
@ -55,21 +55,21 @@ class TestDeleteIRBDocument(BaseTest):
first_task = workflow_api.next_task
# Should not have any files yet
files = FileService.get_files_for_study(study_id)
files = UserFileService.get_files_for_study(study_id)
self.assertEqual(0, len(files))
self.assertEqual(False, IsFileUploaded.do_task(IsFileUploaded, first_task, study_id, workflow.id, irb_code_1))
self.assertEqual(False, IsFileUploaded.do_task(IsFileUploaded, first_task, study_id, workflow.id, irb_code_2))
# Add a file
FileService.add_workflow_file(workflow_id=workflow.id,
task_spec_name=first_task.name,
name="filename.txt", content_type="text",
binary_data=b'1234', irb_doc_code=irb_code_1)
UserFileService.add_workflow_file(workflow_id=workflow.id,
task_spec_name=first_task.name,
name="filename.txt", content_type="text",
binary_data=b'1234', irb_doc_code=irb_code_1)
# Add another file
FileService.add_workflow_file(workflow_id=workflow.id,
task_spec_name=first_task.name,
name="filename.txt", content_type="text",
binary_data=b'1234', irb_doc_code=irb_code_2)
UserFileService.add_workflow_file(workflow_id=workflow.id,
task_spec_name=first_task.name,
name="filename.txt", content_type="text",
binary_data=b'1234', irb_doc_code=irb_code_2)
self.assertEqual(True, IsFileUploaded.do_task(
IsFileUploaded, first_task, study_id, workflow.id, irb_code_1))
self.assertEqual(True, IsFileUploaded.do_task(

View File

@ -17,6 +17,7 @@ from crc.models.workflow import WorkflowSpecModel, WorkflowStatus
from crc.services.study_service import StudyService
from crc.services.workflow_processor import WorkflowProcessor
from crc.services.workflow_service import WorkflowService
from crc.services.spec_file_service import SpecFileService
class TestWorkflowProcessor(BaseTest):
@ -60,7 +61,7 @@ class TestWorkflowProcessor(BaseTest):
self.load_example_data()
study = session.query(StudyModel).first()
workflow_spec_model = self.load_test_spec("decision_table")
files = session.query(FileModel).filter_by(workflow_spec_id='decision_table').all()
files = SpecFileService.get_files(workflow_spec_model)
self.assertEqual(2, len(files))
processor = self.get_processor(study, workflow_spec_model)
processor.do_engine_steps()
@ -207,7 +208,7 @@ class TestWorkflowProcessor(BaseTest):
self.load_example_data()
study = session.query(StudyModel).first()
workflow_spec_model = self.load_test_spec("docx")
files = session.query(FileModel).filter_by(workflow_spec_id='docx').all()
files = SpecFileService.get_files(workflow_spec_model)
self.assertEqual(2, len(files))
workflow_spec_model = session.query(WorkflowSpecModel).filter_by(id="docx").first()
processor = self.get_processor(study, workflow_spec_model)
@ -262,7 +263,7 @@ class TestWorkflowProcessor(BaseTest):
# Modify the specification, with a major change that alters the flow and can't be serialized effectively.
file_path = os.path.join(app.root_path, '..', 'tests', 'data', 'two_forms', 'modified', 'two_forms_struc_mod.bpmn')
self.replace_file("two_forms.bpmn", file_path)
self.replace_file(workflow_spec_model, "two_forms.bpmn", file_path)
# Assure that creating a new processor doesn't cause any issues, and maintains the spec version.
processor.workflow_model.bpmn_workflow_json = processor.serialize()
@ -315,54 +316,6 @@ class TestWorkflowProcessor(BaseTest):
task = processor.next_task()
self.assertEqual("A1", task.task_spec.name)
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_studies')
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_investigators')
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_required_docs')
@patch('crc.services.protocol_builder.ProtocolBuilderService.get_study_details')
def test_master_bpmn_for_crc(self, mock_details, mock_required_docs, mock_investigators, mock_studies):
# Mock Protocol Builder response
studies_response = self.protocol_builder_response('user_studies.json')
mock_studies.return_value = ProtocolBuilderCreatorStudySchema(many=True).loads(studies_response)
investigators_response = self.protocol_builder_response('investigators.json')
mock_investigators.return_value = json.loads(investigators_response)
required_docs_response = self.protocol_builder_response('required_docs.json')
mock_required_docs.return_value = json.loads(required_docs_response)
details_response = self.protocol_builder_response('study_details.json')
mock_details.return_value = json.loads(details_response)
self.load_example_data(use_crc_data=True)
app.config['PB_ENABLED'] = True
study = session.query(StudyModel).first()
workflow_spec_model = db.session.query(WorkflowSpecModel).\
filter(WorkflowSpecModel.id == "top_level_workflow").first()
self.assertIsNotNone(workflow_spec_model)
processor = self.get_processor(study, workflow_spec_model)
processor.do_engine_steps()
self.assertTrue("Top level process is fully automatic.", processor.bpmn_workflow.is_completed())
data = processor.bpmn_workflow.last_task.data
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
# It should mark Enter Core Data as required, because it is always required.
self.assertTrue("enter_core_info" in data)
self.assertEqual("required", data["enter_core_info"])
# It should mark Personnel as required, because StudyInfo.investigators is not empty.
self.assertTrue("personnel" in data)
self.assertEqual("required", data["personnel"])
# It should mark the sponsor funding source as disabled since the funding required (12) is not included in the required docs.
self.assertTrue("sponsor_funding_source" in data)
self.assertEqual("required", data["sponsor_funding_source"])
def test_enum_with_no_choices_raises_api_error(self):
self.load_example_data()
workflow_spec_model = self.load_test_spec("random_fact")
@ -371,8 +324,6 @@ class TestWorkflowProcessor(BaseTest):
processor.do_engine_steps()
tasks = processor.next_user_tasks()
task = tasks[0]
field = FormField()
field.id = "test_enum_field"
field.type = "enum"

View File

@ -2,8 +2,8 @@ from tests.base_test import BaseTest
from crc import session
from crc.models.study import StudyModel
from crc.services.file_service import FileService
from crc.scripts.is_file_uploaded import IsFileUploaded
from crc.services.user_file_service import UserFileService
class TestWorkflowRestart(BaseTest):
@ -47,16 +47,16 @@ class TestWorkflowRestart(BaseTest):
first_task = workflow_api.next_task
# Should not have any files yet
files = FileService.get_files_for_study(study_id)
files = UserFileService.get_files_for_study(study_id)
self.assertEqual(0, len(files))
self.assertEqual(False, IsFileUploaded.do_task(
IsFileUploaded, first_task, study_id, workflow.id, irb_code))
# Add a file
FileService.add_workflow_file(workflow_id=workflow.id,
task_spec_name=first_task.name,
name="filename.txt", content_type="text",
binary_data=b'1234', irb_doc_code=irb_code)
UserFileService.add_workflow_file(workflow_id=workflow.id,
task_spec_name=first_task.name,
name="filename.txt", content_type="text",
binary_data=b'1234', irb_doc_code=irb_code)
# Assert we have the file
self.assertEqual(True, IsFileUploaded.do_task(
IsFileUploaded, first_task, study_id, workflow.id, irb_code))

View File

@ -138,11 +138,3 @@ class TestWorkflowService(BaseTest):
result2 = WorkflowService.get_dot_value(path, {"a.b.c":"garbage"})
self.assertEqual("garbage", result2)
def test_get_primary_workflow(self):
workflow = self.create_workflow('hello_world')
workflow_spec_id = workflow.workflow_spec.id
primary_workflow = WorkflowService.get_primary_workflow(workflow_spec_id)
self.assertIsInstance(primary_workflow, FileModel)
self.assertEqual(workflow_spec_id, primary_workflow.workflow_spec_id)
self.assertEqual('hello_world.bpmn', primary_workflow.name)

View File

@ -1,9 +1,11 @@
import json
import os.path
from tests.base_test import BaseTest
from crc import session
from crc.models.file import FileModel
from crc.models.workflow import WorkflowSpecModel, WorkflowSpecModelSchema, WorkflowModel, WorkflowSpecCategoryModel, WorkflowSpecCategoryModelSchema
from crc.services.spec_file_service import SpecFileService
from example_data import ExampleDataLoader
@ -12,6 +14,7 @@ class TestWorkflowSpec(BaseTest):
def test_list_workflow_specifications(self):
self.load_example_data()
self.load_test_spec('random_fact')
spec = session.query(WorkflowSpecModel).first()
rv = self.app.get('/v1.0/workflow-specification',
follow_redirects=True,
@ -26,6 +29,7 @@ class TestWorkflowSpec(BaseTest):
def test_add_new_workflow_specification(self):
self.load_example_data()
self.load_test_spec('random_fact')
num_before = session.query(WorkflowSpecModel).count()
category_id = session.query(WorkflowSpecCategoryModel).first().id
category_count = session.query(WorkflowSpecModel).filter_by(category_id=category_id).count()
@ -47,6 +51,7 @@ class TestWorkflowSpec(BaseTest):
def test_get_workflow_specification(self):
self.load_example_data()
self.load_test_spec('random_fact')
db_spec = session.query(WorkflowSpecModel).first()
rv = self.app.get('/v1.0/workflow-specification/%s' % db_spec.id, headers=self.logged_in_headers())
self.assert_success(rv)
@ -56,7 +61,7 @@ class TestWorkflowSpec(BaseTest):
def test_update_workflow_specification(self):
self.load_example_data()
self.load_test_spec('random_fact')
category_id = 99
category = WorkflowSpecCategoryModel(id=category_id, display_name="It's a trap!", display_order=0)
session.add(category)
@ -88,11 +93,12 @@ class TestWorkflowSpec(BaseTest):
spec = self.load_test_spec(spec_id)
workflow = self.create_workflow(spec_id)
workflow_api = self.get_workflow_api(workflow)
workflow_path = SpecFileService.workflow_path(spec)
num_specs_before = session.query(WorkflowSpecModel).filter_by(id=spec_id).count()
self.assertEqual(num_specs_before, 1)
num_files_before = session.query(FileModel).filter_by(workflow_spec_id=spec_id).count()
num_files_before = len(SpecFileService.get_files(spec))
num_workflows_before = session.query(WorkflowModel).filter_by(workflow_spec_id=spec_id).count()
self.assertGreater(num_files_before + num_workflows_before, 0)
@ -102,13 +108,14 @@ class TestWorkflowSpec(BaseTest):
num_specs_after = session.query(WorkflowSpecModel).filter_by(id=spec_id).count()
self.assertEqual(0, num_specs_after)
# Make sure that all items in the database with the workflow spec ID are deleted as well.
num_files_after = session.query(FileModel).filter_by(workflow_spec_id=spec_id).count()
# Make sure that all items in the database and file system are deleted as well.
self.assertFalse(os.path.exists(workflow_path))
num_workflows_after = session.query(WorkflowModel).filter_by(workflow_spec_id=spec_id).count()
self.assertEqual(num_files_after + num_workflows_after, 0)
self.assertEqual(num_workflows_after, 0)
def test_display_order_after_delete_spec(self):
self.load_example_data()
self.load_test_spec('random_fact')
workflow_spec_category = session.query(WorkflowSpecCategoryModel).first()
spec_model_1 = WorkflowSpecModel(id='test_spec_1', display_name='Test Spec 1',
description='Test Spec 1 Description', category_id=workflow_spec_category.id,
@ -137,6 +144,7 @@ class TestWorkflowSpec(BaseTest):
def test_get_standalone_workflow_specs(self):
self.load_example_data()
self.load_test_spec('random_fact')
category = session.query(WorkflowSpecCategoryModel).first()
ExampleDataLoader().create_spec('hello_world', 'Hello World', category_id=category.id,
standalone=True, from_tests=True)
@ -177,6 +185,7 @@ class TestWorkflowSpec(BaseTest):
def test_update_workflow_spec_category(self):
self.load_example_data()
self.load_test_spec('random_fact')
category = session.query(WorkflowSpecCategoryModel).first()
display_name_before = category.display_name
new_display_name = display_name_before + '_asdf'
@ -214,7 +223,8 @@ class TestWorkflowSpec(BaseTest):
session.add(category_model_3)
session.commit()
self.app.delete('/v1.0/workflow-spec-category/2', headers=self.logged_in_headers())
rv = self.app.delete('/v1.0/workflow-specification-category/2', headers=self.logged_in_headers())
self.assert_success(rv)
test_order = 0
categories = session.query(WorkflowSpecCategoryModel).order_by(WorkflowSpecCategoryModel.display_order).all()
for test_category in categories:
@ -223,6 +233,7 @@ class TestWorkflowSpec(BaseTest):
def test_add_library_with_category_id(self):
self.load_example_data()
self.load_test_spec('random_fact')
category_id = session.query(WorkflowSpecCategoryModel).first().id
spec = WorkflowSpecModel(id='test_spec', display_name='Test Spec',
description='Library with a category id', category_id=category_id,

View File

@ -35,10 +35,9 @@ class TestWorkflowSpecCategoryReorder(BaseTest):
self.load_example_data()
self._load_test_categories()
initial_order = session.query(WorkflowSpecCategoryModel).order_by(WorkflowSpecCategoryModel.display_order).all()
self.assertEqual(0, initial_order[0].id)
self.assertEqual(1, initial_order[1].id)
self.assertEqual(2, initial_order[2].id)
self.assertEqual(3, initial_order[3].id)
self.assertEqual(1, initial_order[0].id)
self.assertEqual(2, initial_order[1].id)
self.assertEqual(3, initial_order[2].id)
def test_workflow_spec_category_reorder_up(self):
self.load_example_data()
@ -47,13 +46,13 @@ class TestWorkflowSpecCategoryReorder(BaseTest):
# Move category 2 up
rv = self.app.put(f"/v1.0/workflow-specification-category/2/reorder?direction=up",
headers=self.logged_in_headers())
self.assert_success(rv)
# Make sure category 2 is in position 1 now
self.assertEqual(2, rv.json[1]['id'])
self.assertEqual(2, rv.json[0]['id'])
ordered = session.query(WorkflowSpecCategoryModel).\
order_by(WorkflowSpecCategoryModel.display_order).all()
self.assertEqual(2, ordered[1].id)
self.assertEqual(2, ordered[0].id)
def test_workflow_spec_category_reorder_down(self):
self.load_example_data()
@ -64,11 +63,11 @@ class TestWorkflowSpecCategoryReorder(BaseTest):
headers=self.logged_in_headers())
# Make sure category 2 is in position 3 now
self.assertEqual(2, rv.json[3]['id'])
self.assertEqual(2, rv.json[2]['id'])
ordered = session.query(WorkflowSpecCategoryModel). \
order_by(WorkflowSpecCategoryModel.display_order).all()
self.assertEqual(2, ordered[3].id)
self.assertEqual(2, ordered[2].id)
def test_workflow_spec_category_reorder_bad_direction(self):
self.load_example_data()
@ -109,8 +108,8 @@ class TestWorkflowSpecCategoryReorder(BaseTest):
self._load_test_categories()
ordered = session.query(WorkflowSpecCategoryModel).order_by(WorkflowSpecCategoryModel.display_order).all()
# Try to move 0 up
rv = self.app.put(f"/v1.0/workflow-specification-category/0/reorder?direction=up",
# Try to move 1 up
rv = self.app.put(f"/v1.0/workflow-specification-category/1/reorder?direction=up",
headers=self.logged_in_headers())
# Make sure we don't get an error
self.assert_success(rv)
@ -140,28 +139,18 @@ class TestWorkflowSpecCategoryReorder(BaseTest):
# Confirm the bad display_orders
self.assertEqual('Test Category 1', bad_ordered[0].display_name)
self.assertEqual(1, bad_ordered[0].display_order)
self.assertEqual('Test Category', bad_ordered[1].display_name)
self.assertEqual('Test Category 2', bad_ordered[1].display_name)
self.assertEqual(1, bad_ordered[1].display_order)
self.assertEqual('Test Category 2', bad_ordered[2].display_name)
self.assertEqual('Test Category 3', bad_ordered[2].display_name)
self.assertEqual(1, bad_ordered[2].display_order)
self.assertEqual('Test Category 3', bad_ordered[3].display_name)
self.assertEqual(3, bad_ordered[3].display_order)
# Reorder 2 up
# Reorder 1 up
# This should cause a cleanup of the display_orders
# I don't know how Postgres/SQLAlchemy determine the order when
# multiple categories have the same display_order
# But, it ends up
# Test Category 1, Test Category, Test Category 2, Test Category 3
# So, after moving 2 up, we should end up with
# Test Category 1, Test Category 2, Test Category, Test Category 3
rv = self.app.put(f"/v1.0/workflow-specification-category/2/reorder?direction=up",
rv = self.app.put(f"/v1.0/workflow-specification-category/1/reorder?direction=up",
headers=self.logged_in_headers())
self.assertEqual('Test Category 1', rv.json[0]['display_name'])
self.assertEqual(0, rv.json[0]['display_order'])
self.assertEqual('Test Category 2', rv.json[1]['display_name'])
self.assertEqual(1, rv.json[1]['display_order'])
self.assertEqual('Test Category', rv.json[2]['display_name'])
self.assertEqual('Test Category 3', rv.json[2]['display_name'])
self.assertEqual(2, rv.json[2]['display_order'])
self.assertEqual('Test Category 3', rv.json[3]['display_name'])
self.assertEqual(3, rv.json[3]['display_order'])

View File

@ -8,7 +8,10 @@ import json
class TestWorkflowSpecReorder(BaseTest):
def _load_sample_workflow_specs(self):
self.load_example_data()
self.load_test_spec('random_fact')
workflow_spec_category = session.query(WorkflowSpecCategoryModel).first()
spec_model_1 = WorkflowSpecModel(id='test_spec_1', display_name='Test Spec 1',
description='Test Spec 1 Description', category_id=workflow_spec_category.id,
@ -34,7 +37,6 @@ class TestWorkflowSpecReorder(BaseTest):
return rv_1, rv_2, rv_3
def test_load_sample_workflow_specs(self):
self.load_example_data()
rv_1, rv_2, rv_3 = self._load_sample_workflow_specs()
self.assertEqual(1, rv_1.json['display_order'])
self.assertEqual('test_spec_1', rv_1.json['id'])
@ -44,7 +46,6 @@ class TestWorkflowSpecReorder(BaseTest):
self.assertEqual('test_spec_3', rv_3.json['id'])
def test_workflow_spec_reorder_bad_direction(self):
self.load_example_data()
self._load_sample_workflow_specs()
rv = self.app.put(f"/v1.0/workflow-specification/test_spec_2/reorder?direction=asdf",
headers=self.logged_in_headers())
@ -60,12 +61,11 @@ class TestWorkflowSpecReorder(BaseTest):
self.assertEqual('The spec_id 10 did not return a specification. Please check that it is valid.', rv.json['message'])
def test_workflow_spec_reorder_up(self):
self.load_example_data()
self._load_sample_workflow_specs()
rv_1, rv_2, rv_3 = self._load_sample_workflow_specs()
category_id = rv_1.json['category_id']
# Check what order is in the DB
ordered = session.query(WorkflowSpecModel).\
filter(WorkflowSpecModel.category_id == 0).\
filter(WorkflowSpecModel.category_id == category_id).\
order_by(WorkflowSpecModel.display_order).\
all()
self.assertEqual('test_spec_2', ordered[2].id)
@ -80,19 +80,19 @@ class TestWorkflowSpecReorder(BaseTest):
# Check what new order is in the DB
reordered = session.query(WorkflowSpecModel).\
filter(WorkflowSpecModel.category_id == 0).\
filter(WorkflowSpecModel.category_id == category_id).\
order_by(WorkflowSpecModel.display_order).\
all()
self.assertEqual('test_spec_2', reordered[1].id)
print('test_workflow_spec_reorder_up')
def test_workflow_spec_reorder_down(self):
self.load_example_data()
self._load_sample_workflow_specs()
rv_1, rv_2, rv_3 = self._load_sample_workflow_specs()
category_id = rv_1.json['category_id']
# Check what order is in the DB
ordered = session.query(WorkflowSpecModel).\
filter(WorkflowSpecModel.category_id == 0).\
filter(WorkflowSpecModel.category_id == category_id).\
order_by(WorkflowSpecModel.display_order).\
all()
self.assertEqual('test_spec_2', ordered[2].id)
@ -107,17 +107,17 @@ class TestWorkflowSpecReorder(BaseTest):
# Check what new order is in the DB
reordered = session.query(WorkflowSpecModel).\
filter(WorkflowSpecModel.category_id == 0).\
filter(WorkflowSpecModel.category_id == category_id).\
order_by(WorkflowSpecModel.display_order).\
all()
self.assertEqual('test_spec_2', reordered[3].id)
def test_workflow_spec_reorder_down_bad(self):
self.load_example_data()
self._load_sample_workflow_specs()
rv_1, rv_2, rv_3 = self._load_sample_workflow_specs()
category_id = rv_1.json['category_id']
ordered = session.query(WorkflowSpecModel).\
filter(WorkflowSpecModel.category_id == 0).\
filter(WorkflowSpecModel.category_id == category_id).\
order_by(WorkflowSpecModel.display_order).\
all()
@ -129,16 +129,17 @@ class TestWorkflowSpecReorder(BaseTest):
# Make sure we get the original list back.
reordered = session.query(WorkflowSpecModel).\
filter(WorkflowSpecModel.category_id == 0).\
filter(WorkflowSpecModel.category_id == category_id).\
order_by(WorkflowSpecModel.display_order).\
all()
self.assertEqual(ordered, reordered)
def test_workflow_spec_reorder_bad_order(self):
self.load_example_data()
self._load_sample_workflow_specs()
rv_1, rv_2, rv_3 = self._load_sample_workflow_specs()
category_id = rv_1.json['category_id']
ordered = session.query(WorkflowSpecModel).\
filter(WorkflowSpecModel.category_id == 0).\
filter(WorkflowSpecModel.category_id == category_id).\
order_by(WorkflowSpecModel.display_order).\
all()
@ -155,7 +156,7 @@ class TestWorkflowSpecReorder(BaseTest):
session.commit()
bad_orders = session.query(WorkflowSpecModel).\
filter(WorkflowSpecModel.category_id == 0).\
filter(WorkflowSpecModel.category_id == category_id).\
order_by(WorkflowSpecModel.display_order).\
all()
# Not sure how Postgres chooses an order

View File

@ -126,6 +126,7 @@ class TestWorkflowSpecValidation(BaseTest):
"""A disabled workflow spec should fail validation"""
app.config['PB_ENABLED'] = True
self.load_example_data()
category = self.assure_category_exists()
study_model = session.query(StudyModel).first()
# workflow spec to validate
@ -133,7 +134,7 @@ class TestWorkflowSpecValidation(BaseTest):
display_name='Data Security Plan',
description='Data Security Plan',
is_master_spec=False,
category_id=0,
category_id=category.id,
display_order=0,
standalone=False,
library=False)

View File

@ -1,20 +0,0 @@
import json
import unittest
from tests.base_test import BaseTest
from crc.models.workflow import WorkflowSpecCategoryModel, WorkflowSpecModel
from crc.services.workflow_sync import WorkflowSyncService
from crc import db
class TestWorkflowSync(BaseTest):
def test_clear_data(self):
self.load_example_data()
self.assertFalse(db.session.query(WorkflowSpecCategoryModel).count() == 0)
self.assertFalse(db.session.query(WorkflowSpecModel).count() == 0)
WorkflowSyncService.clear_database()
self.assertTrue(db.session.query(WorkflowSpecCategoryModel).count() == 0)
self.assertTrue(db.session.query(WorkflowSpecModel).count() == 0)