When retrieving the study, only update the status of underlying workflows if specifically requested.
Record the size of a file in the database for quick access (this helps with a frontend refactor, so it isn't downloading the file just to see it's size) Cleaning up the timing/performance metric reporting to make it easier to read. Fixing a bug that prevented non-admins for getting the document-directory
This commit is contained in:
parent
71a63c049d
commit
a719cf4bf9
|
@ -83,6 +83,8 @@ paths:
|
||||||
type : integer
|
type : integer
|
||||||
get:
|
get:
|
||||||
operationId: crc.api.file.get_document_directory
|
operationId: crc.api.file.get_document_directory
|
||||||
|
security:
|
||||||
|
- auth_admin: ['secret']
|
||||||
summary: Returns a directory of all files for study in a nested structure
|
summary: Returns a directory of all files for study in a nested structure
|
||||||
tags:
|
tags:
|
||||||
- Document Categories
|
- Document Categories
|
||||||
|
@ -349,6 +351,12 @@ paths:
|
||||||
schema:
|
schema:
|
||||||
type: integer
|
type: integer
|
||||||
format: int32
|
format: int32
|
||||||
|
- name: update_status
|
||||||
|
in: query
|
||||||
|
required: false
|
||||||
|
description: If set to true, will synch the study with protocol builder and assure the status of all workflows is up to date (expensive).
|
||||||
|
schema:
|
||||||
|
type: boolean
|
||||||
get:
|
get:
|
||||||
operationId: crc.api.study.get_study
|
operationId: crc.api.study.get_study
|
||||||
summary: Provides a single study
|
summary: Provides a single study
|
||||||
|
|
|
@ -74,8 +74,8 @@ def update_study(study_id, body):
|
||||||
return StudySchema().dump(study)
|
return StudySchema().dump(study)
|
||||||
|
|
||||||
|
|
||||||
def get_study(study_id):
|
def get_study(study_id, update_status=False):
|
||||||
study = StudyService.get_study(study_id)
|
study = StudyService.get_study(study_id, do_status=update_status)
|
||||||
if (study is None):
|
if (study is None):
|
||||||
raise ApiError("unknown_study", 'The study "' + study_id + '" is not recognized.', status_code=404)
|
raise ApiError("unknown_study", 'The study "' + study_id + '" is not recognized.', status_code=404)
|
||||||
return StudySchema().dump(study)
|
return StudySchema().dump(study)
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
import enum
|
import enum
|
||||||
from typing import cast
|
from typing import cast
|
||||||
|
|
||||||
from marshmallow import INCLUDE, EXCLUDE
|
from marshmallow import INCLUDE, EXCLUDE, fields, Schema
|
||||||
from marshmallow_enum import EnumField
|
from marshmallow_enum import EnumField
|
||||||
from marshmallow_sqlalchemy import SQLAlchemyAutoSchema
|
from marshmallow_sqlalchemy import SQLAlchemyAutoSchema
|
||||||
from sqlalchemy import func, Index
|
from sqlalchemy import func, Index
|
||||||
|
@ -65,11 +65,13 @@ class FileDataModel(db.Model):
|
||||||
md5_hash = db.Column(UUID(as_uuid=True), unique=False, nullable=False)
|
md5_hash = db.Column(UUID(as_uuid=True), unique=False, nullable=False)
|
||||||
data = deferred(db.Column(db.LargeBinary)) # Don't load it unless you have to.
|
data = deferred(db.Column(db.LargeBinary)) # Don't load it unless you have to.
|
||||||
version = db.Column(db.Integer, default=0)
|
version = db.Column(db.Integer, default=0)
|
||||||
|
size = db.Column(db.Integer, default=0)
|
||||||
date_created = db.Column(db.DateTime(timezone=True), default=func.now())
|
date_created = db.Column(db.DateTime(timezone=True), default=func.now())
|
||||||
file_model_id = db.Column(db.Integer, db.ForeignKey('file.id'))
|
file_model_id = db.Column(db.Integer, db.ForeignKey('file.id'))
|
||||||
file_model = db.relationship("FileModel", foreign_keys=[file_model_id])
|
file_model = db.relationship("FileModel", foreign_keys=[file_model_id])
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
class FileModel(db.Model):
|
class FileModel(db.Model):
|
||||||
__tablename__ = 'file'
|
__tablename__ = 'file'
|
||||||
id = db.Column(db.Integer, primary_key=True)
|
id = db.Column(db.Integer, primary_key=True)
|
||||||
|
@ -117,11 +119,13 @@ class File(object):
|
||||||
if data_model:
|
if data_model:
|
||||||
instance.last_modified = data_model.date_created
|
instance.last_modified = data_model.date_created
|
||||||
instance.latest_version = data_model.version
|
instance.latest_version = data_model.version
|
||||||
|
instance.size = data_model.size
|
||||||
else:
|
else:
|
||||||
instance.last_modified = None
|
instance.last_modified = None
|
||||||
instance.latest_version = None
|
instance.latest_version = None
|
||||||
return instance
|
return instance
|
||||||
|
|
||||||
|
|
||||||
class FileModelSchema(SQLAlchemyAutoSchema):
|
class FileModelSchema(SQLAlchemyAutoSchema):
|
||||||
class Meta:
|
class Meta:
|
||||||
model = FileModel
|
model = FileModel
|
||||||
|
@ -132,17 +136,19 @@ class FileModelSchema(SQLAlchemyAutoSchema):
|
||||||
type = EnumField(FileType)
|
type = EnumField(FileType)
|
||||||
|
|
||||||
|
|
||||||
class FileSchema(ma.Schema):
|
class FileSchema(Schema):
|
||||||
class Meta:
|
class Meta:
|
||||||
model = File
|
model = File
|
||||||
fields = ["id", "name", "is_status", "is_reference", "content_type",
|
fields = ["id", "name", "is_status", "is_reference", "content_type",
|
||||||
"primary", "primary_process_id", "workflow_spec_id", "workflow_id",
|
"primary", "primary_process_id", "workflow_spec_id", "workflow_id",
|
||||||
"irb_doc_code", "last_modified", "latest_version", "type", "categories",
|
"irb_doc_code", "last_modified", "latest_version", "type", "categories",
|
||||||
"description", "category", "description", "download_name"]
|
"description", "category", "description", "download_name", "size"]
|
||||||
|
|
||||||
unknown = INCLUDE
|
unknown = INCLUDE
|
||||||
type = EnumField(FileType)
|
type = EnumField(FileType)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
class LookupFileModel(db.Model):
|
class LookupFileModel(db.Model):
|
||||||
"""Gives us a quick way to tell what kind of lookup is set on a form field.
|
"""Gives us a quick way to tell what kind of lookup is set on a form field.
|
||||||
Connected to the file data model, so that if a new version of the same file is
|
Connected to the file data model, so that if a new version of the same file is
|
||||||
|
|
|
@ -10,7 +10,7 @@ def firsttime():
|
||||||
|
|
||||||
def sincetime(txt,lasttime):
|
def sincetime(txt,lasttime):
|
||||||
thistime=firsttime()
|
thistime=firsttime()
|
||||||
print('%s runtime was %2f'%(txt,thistime-lasttime))
|
print('%2.4f sec | %s' % (thistime-lasttime, txt))
|
||||||
return thistime
|
return thistime
|
||||||
|
|
||||||
def timeit(f):
|
def timeit(f):
|
||||||
|
@ -20,7 +20,7 @@ def timeit(f):
|
||||||
ts = time.time()
|
ts = time.time()
|
||||||
result = f(*args, **kw)
|
result = f(*args, **kw)
|
||||||
te = time.time()
|
te = time.time()
|
||||||
print('func:%r args:[%r, %r] took: %2.4f sec' % (f.__name__, args, kw, te-ts))
|
print('%2.4f sec | func:%r args:[%r, %r] ' % (te-ts, f.__name__, args, kw))
|
||||||
return result
|
return result
|
||||||
|
|
||||||
return timed
|
return timed
|
||||||
|
|
|
@ -175,6 +175,8 @@ class FileService(object):
|
||||||
order_by(desc(FileDataModel.date_created)).first()
|
order_by(desc(FileDataModel.date_created)).first()
|
||||||
|
|
||||||
md5_checksum = UUID(hashlib.md5(binary_data).hexdigest())
|
md5_checksum = UUID(hashlib.md5(binary_data).hexdigest())
|
||||||
|
size = len(binary_data)
|
||||||
|
|
||||||
if (latest_data_model is not None) and (md5_checksum == latest_data_model.md5_hash):
|
if (latest_data_model is not None) and (md5_checksum == latest_data_model.md5_hash):
|
||||||
# This file does not need to be updated, it's the same file. If it is arhived,
|
# This file does not need to be updated, it's the same file. If it is arhived,
|
||||||
# then de-arvhive it.
|
# then de-arvhive it.
|
||||||
|
@ -210,7 +212,8 @@ class FileService(object):
|
||||||
|
|
||||||
new_file_data_model = FileDataModel(
|
new_file_data_model = FileDataModel(
|
||||||
data=binary_data, file_model_id=file_model.id, file_model=file_model,
|
data=binary_data, file_model_id=file_model.id, file_model=file_model,
|
||||||
version=version, md5_hash=md5_checksum, date_created=datetime.now()
|
version=version, md5_hash=md5_checksum, date_created=datetime.now(),
|
||||||
|
size=size
|
||||||
)
|
)
|
||||||
session.add_all([file_model, new_file_data_model])
|
session.add_all([file_model, new_file_data_model])
|
||||||
session.commit()
|
session.commit()
|
||||||
|
|
|
@ -53,7 +53,7 @@ class StudyService(object):
|
||||||
return studies
|
return studies
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_study(study_id, study_model: StudyModel = None, do_status=True):
|
def get_study(study_id, study_model: StudyModel = None, do_status=False):
|
||||||
"""Returns a study model that contains all the workflows organized by category.
|
"""Returns a study model that contains all the workflows organized by category.
|
||||||
IMPORTANT: This is intended to be a lightweight call, it should never involve
|
IMPORTANT: This is intended to be a lightweight call, it should never involve
|
||||||
loading up and executing all the workflows in a study to calculate information."""
|
loading up and executing all the workflows in a study to calculate information."""
|
||||||
|
|
|
@ -42,7 +42,7 @@ class CustomBpmnScriptEngine(BpmnScriptEngine):
|
||||||
"""
|
"""
|
||||||
return self.evaluate_expression(task, expression)
|
return self.evaluate_expression(task, expression)
|
||||||
|
|
||||||
|
@timeit
|
||||||
def execute(self, task: SpiffTask, script, data):
|
def execute(self, task: SpiffTask, script, data):
|
||||||
|
|
||||||
study_id = task.workflow.data[WorkflowProcessor.STUDY_ID_KEY]
|
study_id = task.workflow.data[WorkflowProcessor.STUDY_ID_KEY]
|
||||||
|
|
|
@ -0,0 +1,28 @@
|
||||||
|
"""empty message
|
||||||
|
|
||||||
|
Revision ID: 62910318009f
|
||||||
|
Revises: 665624ac29f1
|
||||||
|
Create Date: 2021-04-28 14:09:57.648732
|
||||||
|
|
||||||
|
"""
|
||||||
|
from alembic import op
|
||||||
|
import sqlalchemy as sa
|
||||||
|
|
||||||
|
|
||||||
|
# revision identifiers, used by Alembic.
|
||||||
|
revision = '62910318009f'
|
||||||
|
down_revision = '665624ac29f1'
|
||||||
|
branch_labels = None
|
||||||
|
depends_on = None
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade():
|
||||||
|
# ### commands auto generated by Alembic - please adjust! ###
|
||||||
|
op.add_column('file_data', sa.Column('size', sa.Integer(), nullable=True))
|
||||||
|
# ### end Alembic commands ###
|
||||||
|
|
||||||
|
|
||||||
|
def downgrade():
|
||||||
|
# ### commands auto generated by Alembic - please adjust! ###
|
||||||
|
op.drop_column('file_data', 'size')
|
||||||
|
# ### end Alembic commands ###
|
|
@ -72,7 +72,7 @@ class TestFileService(BaseTest):
|
||||||
file_data = FileService.get_workflow_data_files(workflow_id=workflow.id)
|
file_data = FileService.get_workflow_data_files(workflow_id=workflow.id)
|
||||||
self.assertEqual(1, len(file_data))
|
self.assertEqual(1, len(file_data))
|
||||||
self.assertEqual(2, file_data[0].version)
|
self.assertEqual(2, file_data[0].version)
|
||||||
|
self.assertEquals(4, file_data[0].size) # File dat size is included.
|
||||||
|
|
||||||
def test_add_file_from_form_increments_version_and_replaces_on_subsequent_add_with_same_name(self):
|
def test_add_file_from_form_increments_version_and_replaces_on_subsequent_add_with_same_name(self):
|
||||||
self.load_example_data()
|
self.load_example_data()
|
||||||
|
|
|
@ -181,11 +181,11 @@ class TestFilesApi(BaseTest):
|
||||||
data['file'] = io.BytesIO(self.minimal_bpmn("abcdef")), 'my_new_file.bpmn'
|
data['file'] = io.BytesIO(self.minimal_bpmn("abcdef")), 'my_new_file.bpmn'
|
||||||
rv = self.app.post('/v1.0/file?workflow_spec_id=%s' % spec.id, data=data, follow_redirects=True,
|
rv = self.app.post('/v1.0/file?workflow_spec_id=%s' % spec.id, data=data, follow_redirects=True,
|
||||||
content_type='multipart/form-data', headers=self.logged_in_headers())
|
content_type='multipart/form-data', headers=self.logged_in_headers())
|
||||||
json_data = json.loads(rv.get_data(as_text=True))
|
file_json = json.loads(rv.get_data(as_text=True))
|
||||||
file = FileModelSchema().load(json_data, session=session)
|
self.assertEquals(80, file_json['size'])
|
||||||
|
|
||||||
data['file'] = io.BytesIO(self.minimal_bpmn("efghijk")), 'my_new_file.bpmn'
|
data['file'] = io.BytesIO(self.minimal_bpmn("efghijk")), 'my_new_file.bpmn'
|
||||||
rv = self.app.put('/v1.0/file/%i/data' % file.id, data=data, follow_redirects=True,
|
rv = self.app.put('/v1.0/file/%i/data' % file_json['id'], data=data, follow_redirects=True,
|
||||||
content_type='multipart/form-data', headers=self.logged_in_headers())
|
content_type='multipart/form-data', headers=self.logged_in_headers())
|
||||||
self.assert_success(rv)
|
self.assert_success(rv)
|
||||||
self.assertIsNotNone(rv.get_data())
|
self.assertIsNotNone(rv.get_data())
|
||||||
|
@ -193,14 +193,14 @@ class TestFilesApi(BaseTest):
|
||||||
self.assertEqual(2, file_json['latest_version'])
|
self.assertEqual(2, file_json['latest_version'])
|
||||||
self.assertEqual(FileType.bpmn.value, file_json['type'])
|
self.assertEqual(FileType.bpmn.value, file_json['type'])
|
||||||
self.assertEqual("application/octet-stream", file_json['content_type'])
|
self.assertEqual("application/octet-stream", file_json['content_type'])
|
||||||
self.assertEqual(spec.id, file.workflow_spec_id)
|
self.assertEqual(spec.id, file_json['workflow_spec_id'])
|
||||||
|
|
||||||
# Assure it is updated in the database and properly persisted.
|
# Assure it is updated in the database and properly persisted.
|
||||||
file_model = session.query(FileModel).filter(FileModel.id == file.id).first()
|
file_model = session.query(FileModel).filter(FileModel.id == file_json['id']).first()
|
||||||
file_data = FileService.get_file_data(file_model.id)
|
file_data = FileService.get_file_data(file_model.id)
|
||||||
self.assertEqual(2, file_data.version)
|
self.assertEqual(2, file_data.version)
|
||||||
|
|
||||||
rv = self.app.get('/v1.0/file/%i/data' % file.id, headers=self.logged_in_headers())
|
rv = self.app.get('/v1.0/file/%i/data' % file_json['id'], headers=self.logged_in_headers())
|
||||||
self.assert_success(rv)
|
self.assert_success(rv)
|
||||||
data = rv.get_data()
|
data = rv.get_data()
|
||||||
self.assertIsNotNone(data)
|
self.assertIsNotNone(data)
|
||||||
|
|
Loading…
Reference in New Issue