mirror of
https://github.com/sartography/cr-connect-workflow.git
synced 2025-02-23 05:08:32 +00:00
Merge branch 'dev' into chore/fix_validation_errors
This commit is contained in:
commit
90bfbed6fd
60
crc/api.yml
60
crc/api.yml
@ -284,6 +284,36 @@ paths:
|
||||
type: array
|
||||
items:
|
||||
$ref: "#/components/schemas/TaskLog"
|
||||
/study/{study_id}/log/download:
|
||||
parameters:
|
||||
- name: study_id
|
||||
in: path
|
||||
required: true
|
||||
description: The id of the study for which logs should be returned.
|
||||
schema:
|
||||
type: integer
|
||||
format: int32
|
||||
- name : auth_token
|
||||
in : query
|
||||
required : true
|
||||
description : User Auth Toeken
|
||||
schema:
|
||||
type: string
|
||||
get:
|
||||
operationId: crc.api.study.download_logs_for_study
|
||||
summary: Returns a csv file of logged events that occured within a study
|
||||
security: [] # Will verify manually with provided Auth Token.
|
||||
tags:
|
||||
- Studies
|
||||
responses:
|
||||
'200':
|
||||
description: Returns the spreadsheet file of logged events
|
||||
content:
|
||||
application/octet-stream:
|
||||
schema:
|
||||
type: string
|
||||
format: binary
|
||||
|
||||
/workflow-specification:
|
||||
get:
|
||||
operationId: crc.api.workflow.all_specifications
|
||||
@ -1202,34 +1232,6 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/Workflow"
|
||||
/workflow/{workflow_id}/log:
|
||||
parameters:
|
||||
- name: workflow_id
|
||||
in: path
|
||||
required: true
|
||||
description: The id of the workflow for which logs should be returned.
|
||||
schema:
|
||||
type: integer
|
||||
format: int32
|
||||
put:
|
||||
operationId: crc.api.workflow.get_logs_for_workflow
|
||||
summary: Provides a paginated list of logged events that occured within a study,
|
||||
tags:
|
||||
- Workflows and Tasks
|
||||
requestBody:
|
||||
description: Log Pagination Request
|
||||
required: true
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/PaginatedTaskLog"
|
||||
responses:
|
||||
'200':
|
||||
description: list of logs - events that have occured within a specific workflow.
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/PaginatedTaskLog"
|
||||
/workflow/{workflow_id}/task/{task_id}/data:
|
||||
parameters:
|
||||
- name: workflow_id
|
||||
@ -2117,7 +2119,7 @@ components:
|
||||
has_next:
|
||||
type: boolean
|
||||
example: true
|
||||
has_previous:
|
||||
has_prev:
|
||||
type: boolean
|
||||
example: false
|
||||
TaskLog:
|
||||
|
@ -1,20 +1,22 @@
|
||||
from datetime import datetime
|
||||
|
||||
from flask import g
|
||||
from flask import g, send_file
|
||||
from sqlalchemy.exc import IntegrityError
|
||||
|
||||
from crc import session
|
||||
from crc.api.common import ApiError, ApiErrorSchema
|
||||
from crc.models.protocol_builder import ProtocolBuilderStatus
|
||||
from crc.models.study import Study, StudyEvent, StudyEventType, StudyModel, StudySchema, StudyForUpdateSchema, \
|
||||
from crc.models.study import Study, StudyEventType, StudyModel, StudySchema, StudyForUpdateSchema, \
|
||||
StudyStatus, StudyAssociatedSchema
|
||||
from crc.models.task_log import TaskLogModelSchema, TaskLogQuery, TaskLogQuerySchema
|
||||
from crc.models.task_log import TaskLogQuery, TaskLogQuerySchema
|
||||
from crc.services.spreadsheet_service import SpreadsheetService
|
||||
from crc.services.study_service import StudyService
|
||||
from crc.services.task_logging_service import TaskLoggingService
|
||||
from crc.services.user_service import UserService
|
||||
from crc.services.workflow_processor import WorkflowProcessor
|
||||
from crc.services.workflow_service import WorkflowService
|
||||
from crc.services.workflow_spec_service import WorkflowSpecService
|
||||
from crc.api.user import verify_token
|
||||
|
||||
import io
|
||||
|
||||
|
||||
def add_study(body):
|
||||
@ -115,8 +117,28 @@ def get_study_associates(study_id):
|
||||
|
||||
def get_logs_for_study(study_id, body):
|
||||
task_log_query = TaskLogQuery(**body)
|
||||
task_log_query.study_id = study_id # Force the study id
|
||||
return TaskLogQuerySchema().dump(
|
||||
TaskLoggingService.get_logs_for_study(study_id, task_log_query))
|
||||
TaskLoggingService.get_logs_for_study_paginated(study_id, task_log_query))
|
||||
|
||||
|
||||
def download_logs_for_study(study_id, auth_token):
|
||||
# Download links incorporate an auth token in the request for direct download
|
||||
if not verify_token(auth_token):
|
||||
raise ApiError('not_authenticated', 'You need to include an authorization token in the URL with this')
|
||||
|
||||
title = f'Study {study_id}'
|
||||
logs, headers = TaskLoggingService.get_log_data_for_download(study_id)
|
||||
spreadsheet = SpreadsheetService.create_spreadsheet(logs, headers, title)
|
||||
|
||||
return send_file(
|
||||
io.BytesIO(spreadsheet),
|
||||
attachment_filename='logs.xlsx',
|
||||
mimetype='xlsx',
|
||||
cache_timeout=-1, # Don't cache these files on the browser.
|
||||
last_modified=datetime.now(),
|
||||
as_attachment=True
|
||||
)
|
||||
|
||||
|
||||
def delete_study(study_id):
|
||||
|
@ -422,9 +422,3 @@ def _verify_user_and_role(processor, spiff_task):
|
||||
raise ApiError.from_task("permission_denied",
|
||||
f"This task must be completed by '{allowed_users}', "
|
||||
f"but you are {user.uid}", spiff_task)
|
||||
|
||||
|
||||
def get_logs_for_workflow(workflow_id, body):
|
||||
task_log_query = TaskLogQuery(**body)
|
||||
return TaskLogQuerySchema().dump(
|
||||
TaskLoggingService.get_logs_for_workflow(workflow_id, task_log_query))
|
||||
|
@ -1,10 +1,15 @@
|
||||
import enum
|
||||
import urllib
|
||||
|
||||
import flask
|
||||
import marshmallow
|
||||
from flask import url_for
|
||||
from marshmallow.fields import Method
|
||||
|
||||
from crc import db, ma
|
||||
from crc.models.study import StudyModel
|
||||
from crc.models.workflow import WorkflowModel
|
||||
from crc.services.workflow_spec_service import WorkflowSpecService
|
||||
from sqlalchemy import func
|
||||
|
||||
|
||||
@ -31,6 +36,7 @@ class TaskLogModel(db.Model):
|
||||
user_uid = db.Column(db.String)
|
||||
study_id = db.Column(db.Integer, db.ForeignKey(StudyModel.id), nullable=False)
|
||||
workflow_id = db.Column(db.Integer, db.ForeignKey(WorkflowModel.id), nullable=False)
|
||||
workflow_spec_id = db.Column(db.String)
|
||||
task = db.Column(db.String)
|
||||
timestamp = db.Column(db.DateTime(timezone=True), server_default=func.now())
|
||||
|
||||
@ -38,15 +44,32 @@ class TaskLogModel(db.Model):
|
||||
class TaskLogModelSchema(ma.Schema):
|
||||
class Meta:
|
||||
model = TaskLogModel
|
||||
fields = ["id", "level", "code", "message", "study_id", "workflow_id", "user_uid", "timestamp"]
|
||||
fields = ["id", "level", "code", "message", "study_id", "workflow", "workflow_id",
|
||||
"workflow_spec_id", "category", "user_uid", "timestamp"]
|
||||
category = marshmallow.fields.Method('get_category')
|
||||
workflow = marshmallow.fields.Method('get_workflow')
|
||||
|
||||
@staticmethod
|
||||
def get_category(obj):
|
||||
if hasattr(obj, 'workflow_spec_id') and obj.workflow_spec_id is not None:
|
||||
workflow_spec = WorkflowSpecService().get_spec(obj.workflow_spec_id)
|
||||
category = WorkflowSpecService().get_category(workflow_spec.category_id)
|
||||
return category.display_name
|
||||
|
||||
@staticmethod
|
||||
def get_workflow(obj):
|
||||
if hasattr(obj, 'workflow_spec_id') and obj.workflow_spec_id is not None:
|
||||
workflow_spec = WorkflowSpecService().get_spec(obj.workflow_spec_id)
|
||||
return workflow_spec.display_name
|
||||
|
||||
|
||||
class TaskLogQuery:
|
||||
"""Encapsulates the paginated queries and results when retrieving and filtering task logs over the
|
||||
API"""
|
||||
def __init__(self, code="", level="", user="", page=1, per_page=10,
|
||||
def __init__(self, study_id=None, code="", level="", user="", page=0, per_page=10,
|
||||
sort_column=None, sort_reverse=False, items=None,
|
||||
pages=0, total=0, has_next=False, has_prev=False):
|
||||
pages=0, total=0, has_next=False, has_prev=False, download_url=None):
|
||||
self.study_id = study_id # Filter on Study.
|
||||
self.code = code # Filter on code.
|
||||
self.level = level # Filter on level.
|
||||
self.user = user # Filter on user.
|
||||
@ -59,21 +82,36 @@ class TaskLogQuery:
|
||||
self.pages = pages
|
||||
self.has_next = False
|
||||
self.has_prev = False
|
||||
self.download_url = None
|
||||
|
||||
def update_from_sqlalchemy_paginator(self, paginator):
|
||||
"""Updates this with results that are returned from the paginator"""
|
||||
self.items = paginator.items
|
||||
self.page = paginator.page
|
||||
self.page = paginator.page - 1
|
||||
self.per_page = paginator.per_page
|
||||
self.pages = paginator.pages
|
||||
self.has_next = paginator.has_next
|
||||
self.has_prev = paginator.has_prev
|
||||
self.total = paginator.total
|
||||
|
||||
|
||||
class TaskLogQuerySchema(ma.Schema):
|
||||
class Meta:
|
||||
model = TaskLogModel
|
||||
fields = ["code", "level", "user",
|
||||
"page", "per_page", "sort_column", "sort_reverse", "items", "pages", "total",
|
||||
"has_next", "has_prev"]
|
||||
"has_next", "has_prev", "download_url"]
|
||||
items = marshmallow.fields.List(marshmallow.fields.Nested(TaskLogModelSchema))
|
||||
download_url = Method("get_url")
|
||||
|
||||
def get_url(self, obj):
|
||||
token = 'not_available'
|
||||
if hasattr(obj, 'study_id') and obj.study_id is not None:
|
||||
file_url = url_for("/v1_0.crc_api_study_download_logs_for_study", study_id=obj.study_id, _external=True)
|
||||
if hasattr(flask.g, 'user'):
|
||||
token = flask.g.user.encode_auth_token()
|
||||
url = file_url + '?auth_token=' + urllib.parse.quote_plus(token)
|
||||
return url
|
||||
else:
|
||||
return ""
|
||||
|
||||
|
@ -13,7 +13,7 @@ class GetLogsByWorkflow(Script):
|
||||
"""
|
||||
|
||||
def do_task_validate_only(self, task, study_id, workflow_id, *args, **kwargs):
|
||||
log_model = TaskLogModel(level='info',
|
||||
log_model = TaskLogModel(level='metrics',
|
||||
code='mocked_code',
|
||||
message='This is my logging message',
|
||||
study_id=study_id,
|
||||
@ -22,17 +22,21 @@ class GetLogsByWorkflow(Script):
|
||||
return TaskLogModelSchema(many=True).dump([log_model])
|
||||
|
||||
def do_task(self, task, study_id, workflow_id, *args, **kwargs):
|
||||
level = None
|
||||
code = None
|
||||
size = 10
|
||||
size = None
|
||||
if 'level' in kwargs:
|
||||
level = kwargs['level']
|
||||
elif len(args) > 0:
|
||||
level = args[0]
|
||||
if 'code' in kwargs:
|
||||
code = kwargs['code']
|
||||
elif len(args) > 0:
|
||||
code = args[0]
|
||||
elif len(args) > 1:
|
||||
code = args[1]
|
||||
if 'size' in kwargs:
|
||||
size = kwargs['size']
|
||||
elif len(args) > 1:
|
||||
size = args[1]
|
||||
elif len(args) > 2:
|
||||
size = args[2]
|
||||
|
||||
query = TaskLogQuery(code=code, per_page=size)
|
||||
log_models = TaskLoggingService.get_logs_for_study(study_id, query).items
|
||||
log_models = TaskLoggingService().get_logs_for_study(study_id, level, code, size)
|
||||
return TaskLogModelSchema(many=True).dump(log_models)
|
||||
|
@ -5,6 +5,25 @@ from crc.services.task_logging_service import TaskLoggingService
|
||||
|
||||
|
||||
class GetLogsByWorkflow(Script):
|
||||
@staticmethod
|
||||
def get_parameters(args, kwargs):
|
||||
code = None
|
||||
level = None
|
||||
size = None
|
||||
if 'level' in kwargs:
|
||||
level = kwargs['level']
|
||||
elif len(args) > 0:
|
||||
level = args[0]
|
||||
if 'code' in kwargs:
|
||||
code = kwargs['code']
|
||||
elif len(args) > 1:
|
||||
code = args[1]
|
||||
if 'size' in kwargs:
|
||||
size = kwargs['size']
|
||||
elif len(args) > 2:
|
||||
size = args[2]
|
||||
|
||||
return level, code, size
|
||||
|
||||
def get_description(self):
|
||||
return """Script to retrieve logs for the current workflow.
|
||||
@ -22,17 +41,8 @@ class GetLogsByWorkflow(Script):
|
||||
TaskLogModelSchema(many=True).dump([log_model])
|
||||
|
||||
def do_task(self, task, study_id, workflow_id, *args, **kwargs):
|
||||
code = None
|
||||
size = 10
|
||||
if 'code' in kwargs:
|
||||
code = kwargs['code']
|
||||
elif len(args) > 0:
|
||||
code = args[0]
|
||||
if 'size' in kwargs:
|
||||
size = kwargs['size']
|
||||
elif len(args) > 1:
|
||||
size = args[1]
|
||||
|
||||
query = TaskLogQuery(code=code, per_page=size)
|
||||
log_models = TaskLoggingService.get_logs_for_workflow(workflow_id, query).items
|
||||
level, code, size = self.get_parameters(args, kwargs)
|
||||
log_models = TaskLoggingService().get_logs_for_workflow(workflow_id=workflow_id, level=level, code=code, size=size)
|
||||
# query = TaskLogQuery(code=code, per_page=size)
|
||||
# log_models = TaskLoggingService.get_logs_for_workflow(workflow_id, query).items
|
||||
return TaskLogModelSchema(many=True).dump(log_models)
|
28
crc/services/spreadsheet_service.py
Normal file
28
crc/services/spreadsheet_service.py
Normal file
@ -0,0 +1,28 @@
|
||||
from openpyxl import Workbook
|
||||
from tempfile import NamedTemporaryFile
|
||||
|
||||
from typing import List
|
||||
|
||||
|
||||
class SpreadsheetService(object):
|
||||
|
||||
@staticmethod
|
||||
def create_spreadsheet(data: List[dict], headers: List[str] = None, title: str = None):
|
||||
"""The length of headers must be the same as the number of items in the dictionaries,
|
||||
and the order must match up.
|
||||
The title is used for the worksheet, not the filename."""
|
||||
|
||||
wb = Workbook(write_only=True)
|
||||
ws = wb.create_sheet()
|
||||
if title:
|
||||
ws.title = title
|
||||
if headers:
|
||||
ws.append(headers)
|
||||
for row in data:
|
||||
ws.append(list(row.values()))
|
||||
|
||||
with NamedTemporaryFile() as tmp:
|
||||
wb.save(tmp.name)
|
||||
tmp.seek(0)
|
||||
stream = tmp.read()
|
||||
return stream
|
@ -1,22 +1,12 @@
|
||||
import markdown
|
||||
import re
|
||||
from crc import app, session
|
||||
from crc.api.common import ApiError
|
||||
from crc.models.task_log import TaskLogModel, TaskLogLevels, TaskLogQuery, TaskLogModelSchema
|
||||
from crc.services.user_service import UserService
|
||||
|
||||
from flask import render_template
|
||||
from flask_mail import Message
|
||||
from jinja2 import Template
|
||||
from sqlalchemy import desc
|
||||
|
||||
from crc import app, db, mail, session
|
||||
from crc.api.common import ApiError
|
||||
|
||||
from crc.models.email import EmailModel
|
||||
from crc.models.file import FileDataModel
|
||||
from crc.models.study import StudyModel
|
||||
from crc.models.task_log import TaskLogModel, TaskLogLevels, TaskLogQuery
|
||||
from crc.models.user import UserModel
|
||||
|
||||
from crc.services.jinja_service import JinjaService
|
||||
from crc.services.user_service import UserService
|
||||
import dateparser
|
||||
import pytz
|
||||
|
||||
|
||||
class TaskLoggingService(object):
|
||||
@ -47,14 +37,35 @@ class TaskLoggingService(object):
|
||||
session.commit()
|
||||
return log_model
|
||||
|
||||
@staticmethod
|
||||
def get_logs_for_workflow(workflow_id, tq: TaskLogQuery):
|
||||
""" Returns an updated TaskLogQuery, with items in reverse chronological order by default. """
|
||||
query = session.query(TaskLogModel).filter(TaskLogModel.workflow_id == workflow_id)
|
||||
return TaskLoggingService.__paginate(query, tq)
|
||||
def get_logs_for_workflow(self, workflow_id: int, level: str = None, code: str = None, size: int = None):
|
||||
logs = self.get_logs(workflow_id=workflow_id, level=level, code=code, size=size)
|
||||
return logs
|
||||
|
||||
def get_logs_for_study(self, study_id: int, level: str = None, code: str = None, size: int = None):
|
||||
logs = self.get_logs(study_id=study_id, level=level, code=code, size=size)
|
||||
return logs
|
||||
|
||||
@staticmethod
|
||||
def get_logs_for_study(study_id, tq: TaskLogQuery):
|
||||
def get_logs(study_id: int = None, workflow_id: int = None, level: str = None, code: str = None, size: int = None):
|
||||
"""We should almost always get a study_id or a workflow_id.
|
||||
In *very* rare circumstances, an admin may want all the logs.
|
||||
This could be a *lot* of logs."""
|
||||
query = session.query(TaskLogModel)
|
||||
if study_id:
|
||||
query = query.filter(TaskLogModel.study_id == study_id)
|
||||
if workflow_id:
|
||||
query = query.filter(TaskLogModel.workflow_id == workflow_id)
|
||||
if level:
|
||||
query = query.filter(TaskLogModel.level == level)
|
||||
if code:
|
||||
query = query.filter(TaskLogModel.code == code)
|
||||
if size:
|
||||
query = query.limit(size)
|
||||
logs = query.all()
|
||||
return logs
|
||||
|
||||
@staticmethod
|
||||
def get_logs_for_study_paginated(study_id, tq: TaskLogQuery):
|
||||
""" Returns an updated TaskLogQuery, with items in reverse chronological order by default. """
|
||||
query = session.query(TaskLogModel).filter(TaskLogModel.study_id == study_id)
|
||||
return TaskLoggingService.__paginate(query, tq)
|
||||
@ -78,7 +89,39 @@ class TaskLoggingService(object):
|
||||
sort_column = desc(task_log_query.sort_column)
|
||||
else:
|
||||
sort_column = task_log_query.sort_column
|
||||
paginator = sql_query.order_by(sort_column).paginate(task_log_query.page, task_log_query.per_page,
|
||||
paginator = sql_query.order_by(sort_column).paginate(task_log_query.page + 1, task_log_query.per_page,
|
||||
error_out=False)
|
||||
task_log_query.update_from_sqlalchemy_paginator(paginator)
|
||||
return task_log_query
|
||||
|
||||
@staticmethod
|
||||
def get_log_data_for_download(study_id):
|
||||
# Admins can download the metrics logs for a study as an Excel file
|
||||
# We only use a subset of the fields
|
||||
logs = []
|
||||
headers = []
|
||||
result = session.query(TaskLogModel).\
|
||||
filter(TaskLogModel.study_id == study_id).\
|
||||
filter(TaskLogModel.level == 'metrics').\
|
||||
all()
|
||||
schemas = TaskLogModelSchema(many=True).dump(result)
|
||||
# We only use these fields
|
||||
fields = ['category', 'workflow', 'level', 'code', 'message', 'user_uid', 'timestamp', 'workflow_id', 'workflow_spec_id']
|
||||
for schema in schemas:
|
||||
# Build a dictionary using the items in fields
|
||||
log = {}
|
||||
for field in fields:
|
||||
if field == 'timestamp':
|
||||
# Excel doesn't accept timezones,
|
||||
# so we return a local datetime without the timezone
|
||||
# TODO: detect the local timezone with something like dateutil.tz.tzlocal()
|
||||
parsed_timestamp = dateparser.parse(str(schema['timestamp']))
|
||||
localtime = parsed_timestamp.astimezone(pytz.timezone('US/Eastern'))
|
||||
log[field] = localtime.strftime('%Y-%m-%d %H:%M:%S')
|
||||
else:
|
||||
log[field] = schema[field]
|
||||
if field.capitalize() not in headers:
|
||||
headers.append(field.capitalize())
|
||||
logs.append(log)
|
||||
|
||||
return logs, headers
|
||||
|
24
migrations/versions/28752ce0775c_merge_conflicting_heads.py
Normal file
24
migrations/versions/28752ce0775c_merge_conflicting_heads.py
Normal file
@ -0,0 +1,24 @@
|
||||
"""merge conflicting heads
|
||||
|
||||
Revision ID: 28752ce0775c
|
||||
Revises: f214ee53ca26, d9a34e9d7cfa
|
||||
Create Date: 2022-03-12 16:22:17.724988
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '28752ce0775c'
|
||||
down_revision = ('f214ee53ca26', 'd9a34e9d7cfa')
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade():
|
||||
pass
|
||||
|
||||
|
||||
def downgrade():
|
||||
pass
|
@ -0,0 +1,36 @@
|
||||
"""Add workflow_spec_id to TaskLogModel
|
||||
|
||||
Revision ID: d9a34e9d7cfa
|
||||
Revises: cf57eba23a16
|
||||
Create Date: 2022-03-08 13:37:24.773814
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
from crc.models.task_log import TaskLogModel
|
||||
from crc.models.workflow import WorkflowModel
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = 'd9a34e9d7cfa'
|
||||
down_revision = 'cf57eba23a16'
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade():
|
||||
op.add_column('task_log', sa.Column('workflow_spec_id', sa.String()))
|
||||
bind = op.get_bind()
|
||||
session = sa.orm.Session(bind=bind)
|
||||
session.flush()
|
||||
task_logs = session.query(TaskLogModel).all()
|
||||
for task_log in task_logs:
|
||||
workflow = session.query(WorkflowModel).filter(WorkflowModel.id==task_log.workflow_id).first()
|
||||
if workflow and workflow.workflow_spec_id:
|
||||
task_log.workflow_spec_id = workflow.workflow_spec_id
|
||||
session.commit()
|
||||
|
||||
|
||||
def downgrade():
|
||||
op.drop_column('task_log', 'workflow_spec_id')
|
50
migrations/versions/f214ee53ca26_.py
Normal file
50
migrations/versions/f214ee53ca26_.py
Normal file
@ -0,0 +1,50 @@
|
||||
"""empty message
|
||||
|
||||
Revision ID: f214ee53ca26
|
||||
Revises: cf57eba23a16
|
||||
Create Date: 2022-03-12 15:03:32.193996
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
from crc.models.study import StudyModel, ProgressStatus
|
||||
|
||||
revision = 'f214ee53ca26'
|
||||
down_revision = 'cf57eba23a16'
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade():
|
||||
op.execute('ALTER TYPE progressstatus RENAME TO progressstatus_old;')
|
||||
op.execute(
|
||||
"CREATE TYPE progressstatus AS ENUM('in_progress', 'submitted_for_pre_review', 'in_pre_review', "
|
||||
"'returned_from_pre_review', 'pre_review_complete', 'agenda_date_set', 'approved', "
|
||||
"'approved_with_conditions', 'deferred', 'disapproved', "
|
||||
"'ready_for_pre_review', 'resubmitted_for_pre_review')")
|
||||
op.execute("ALTER TABLE study ALTER COLUMN progress_status TYPE "
|
||||
"progressstatus USING progress_status::text::progressstatus;")
|
||||
op.execute('DROP TYPE progressstatus_old;')
|
||||
|
||||
def downgrade():
|
||||
# Removing ready_for_pre_review, resubmitted_for_pre_review, so change those to in_progress first
|
||||
bind = op.get_bind()
|
||||
session = sa.orm.Session(bind=bind)
|
||||
session.flush()
|
||||
studies = session.query(StudyModel).filter(
|
||||
StudyModel.progress_status == 'ready_for_pre_review' or StudyModel.progress_status == 'resubmitted_for_pre_review').all()
|
||||
for study in studies:
|
||||
study.progress_status = ProgressStatus('in_progress')
|
||||
session.commit()
|
||||
|
||||
# delete those statuses from progress status
|
||||
op.execute('ALTER TYPE progressstatus RENAME TO progressstatus_old;')
|
||||
op.execute("CREATE TYPE progressstatus AS ENUM('in_progress', 'submitted_for_pre_review', "
|
||||
"'in_pre_review', 'returned_from_pre_review', 'pre_review_complete', "
|
||||
"'agenda_date_set', 'approved', 'approved_with_conditions', 'deferred', 'disapproved')")
|
||||
op.execute("ALTER TABLE study ALTER COLUMN progress_status"
|
||||
" TYPE progressstatus USING progress_status::text::progressstatus;")
|
||||
op.execute('DROP TYPE progressstatus_old;')
|
@ -1,5 +1,5 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<bpmn:definitions xmlns:bpmn="http://www.omg.org/spec/BPMN/20100524/MODEL" xmlns:bpmndi="http://www.omg.org/spec/BPMN/20100524/DI" xmlns:dc="http://www.omg.org/spec/DD/20100524/DC" xmlns:di="http://www.omg.org/spec/DD/20100524/DI" id="Definitions_e3059e6" targetNamespace="http://bpmn.io/schema/bpmn" exporter="Camunda Modeler" exporterVersion="4.2.0">
|
||||
<bpmn:definitions xmlns:bpmn="http://www.omg.org/spec/BPMN/20100524/MODEL" xmlns:bpmndi="http://www.omg.org/spec/BPMN/20100524/DI" xmlns:dc="http://www.omg.org/spec/DD/20100524/DC" xmlns:di="http://www.omg.org/spec/DD/20100524/DI" id="Definitions_e3059e6" targetNamespace="http://bpmn.io/schema/bpmn" exporter="Camunda Modeler" exporterVersion="4.10.0">
|
||||
<bpmn:process id="Process_LoggingTask" name="Logging Task" isExecutable="true">
|
||||
<bpmn:startEvent id="StartEvent_1">
|
||||
<bpmn:outgoing>Flow_0d5wpav</bpmn:outgoing>
|
||||
@ -37,14 +37,14 @@ log_model_debug = log(level='debug', code='debug_test_code', message='This is my
|
||||
<bpmn:scriptTask id="Activity_GetLoggingPre" name="Get Logging Pre">
|
||||
<bpmn:incoming>Flow_0d5wpav</bpmn:incoming>
|
||||
<bpmn:outgoing>Flow_0pc42yp</bpmn:outgoing>
|
||||
<bpmn:script>logging_models_pre = get_logs()</bpmn:script>
|
||||
<bpmn:script>logging_models_pre = get_logs_for_workflow()</bpmn:script>
|
||||
</bpmn:scriptTask>
|
||||
<bpmn:scriptTask id="Activity_GetLoggingPost" name="Get Logging Post">
|
||||
<bpmn:incoming>Flow_0n34cdi</bpmn:incoming>
|
||||
<bpmn:outgoing>Flow_07j4f0v</bpmn:outgoing>
|
||||
<bpmn:script>logging_models_all_post = get_logs()
|
||||
logging_models_info_post = get_logs('test_code')
|
||||
logging_models_debug_post = get_logs('debug_test_code')</bpmn:script>
|
||||
<bpmn:script>logging_models_all_post = get_logs_for_workflow()
|
||||
logging_models_info_post = get_logs_for_workflow(code='test_code')
|
||||
logging_models_debug_post = get_logs_for_workflow(code='debug_test_code')</bpmn:script>
|
||||
</bpmn:scriptTask>
|
||||
<bpmn:sequenceFlow id="Flow_0d5wpav" sourceRef="StartEvent_1" targetRef="Activity_GetLoggingPre" />
|
||||
<bpmn:sequenceFlow id="Flow_0pc42yp" sourceRef="Activity_GetLoggingPre" targetRef="Activity_LogEvent" />
|
||||
|
@ -24,7 +24,7 @@ log('debug', 'debug_code', f'This message has a { some_text }!')</bpmn:script>
|
||||
<bpmn:scriptTask id="Activity_GetLogs" name="Get Logs">
|
||||
<bpmn:incoming>Flow_10fc3fk</bpmn:incoming>
|
||||
<bpmn:outgoing>Flow_1dfqchi</bpmn:outgoing>
|
||||
<bpmn:script>workflow_logs = get_logs()
|
||||
<bpmn:script>workflow_logs = get_logs_for_workflow()
|
||||
study_logs = get_logs_for_study()</bpmn:script>
|
||||
</bpmn:scriptTask>
|
||||
<bpmn:sequenceFlow id="Flow_1dfqchi" sourceRef="Activity_GetLogs" targetRef="Activity_DisplayInfo" />
|
||||
|
@ -130,14 +130,6 @@ class TestTaskLogging(BaseTest):
|
||||
self.assertEqual(self.test_uid, logs[0]['user_uid'])
|
||||
self.assertEqual('You forgot to include the correct data.', logs[0]['message'])
|
||||
|
||||
url = f'/v1.0/workflow/{workflow.id}/log'
|
||||
rv = self.app.put(url, headers=self.logged_in_headers(user), content_type="application/json",
|
||||
data=TaskLogQuerySchema().dump(task_log_query))
|
||||
|
||||
self.assert_success(rv)
|
||||
wf_logs = json.loads(rv.get_data(as_text=True))['items']
|
||||
self.assertEqual(wf_logs, logs, "Logs returned for the workflow should be identical to those returned from study")
|
||||
|
||||
def test_logging_service_paginates_and_sorts(self):
|
||||
self.add_studies()
|
||||
study = session.query(StudyModel).first()
|
||||
@ -155,22 +147,24 @@ class TestTaskLogging(BaseTest):
|
||||
TaskLog().do_task(task, study.id, workflow_model.id, level='info', code='debug_code',
|
||||
message=f'This is my info message # {i}.')
|
||||
|
||||
results = TaskLoggingService.get_logs_for_study(study.id, TaskLogQuery(per_page=100))
|
||||
results = TaskLoggingService().get_logs_for_study_paginated(study.id, TaskLogQuery(per_page=100))
|
||||
self.assertEqual(40, len(results.items), "There should be 40 logs total")
|
||||
|
||||
logs = TaskLoggingService.get_logs_for_study(study.id, TaskLogQuery(per_page=5))
|
||||
logs = TaskLoggingService().get_logs_for_study_paginated(study.id, TaskLogQuery(per_page=5))
|
||||
self.assertEqual(40, logs.total)
|
||||
self.assertEqual(5, len(logs.items), "I can limit results to 5")
|
||||
self.assertEqual(1, logs.page)
|
||||
self.assertEqual(0, logs.page)
|
||||
self.assertEqual(8, logs.pages)
|
||||
self.assertEqual(5, logs.per_page)
|
||||
self.assertEqual(True, logs.has_next)
|
||||
self.assertEqual(False, logs.has_prev)
|
||||
|
||||
logs = TaskLoggingService.get_logs_for_study(study.id, TaskLogQuery(per_page=5, sort_column="level"))
|
||||
logs = TaskLoggingService.get_logs_for_study_paginated(study.id, TaskLogQuery(per_page=5, sort_column="level"))
|
||||
for i in range(0, 5):
|
||||
self.assertEqual('critical', logs.items[i].level, "It is possible to sort on a column")
|
||||
|
||||
logs = TaskLoggingService.get_logs_for_study(study.id, TaskLogQuery(per_page=5, sort_column="level", sort_reverse=True))
|
||||
logs = TaskLoggingService.get_logs_for_study_paginated(study.id, TaskLogQuery(per_page=5, sort_column="level", sort_reverse=True))
|
||||
for i in range(0, 5):
|
||||
self.assertEqual('info', logs.items[i].level, "It is possible to sort on a column")
|
||||
|
||||
|
||||
|
66
tests/study/test_study_download_logs.py
Normal file
66
tests/study/test_study_download_logs.py
Normal file
@ -0,0 +1,66 @@
|
||||
import json
|
||||
|
||||
from tests.base_test import BaseTest
|
||||
|
||||
from crc import session
|
||||
from crc.models.task_log import TaskLogModel, TaskLogQuery, TaskLogQuerySchema
|
||||
from crc.models.user import UserModel
|
||||
|
||||
from openpyxl import load_workbook
|
||||
from io import BytesIO
|
||||
|
||||
|
||||
class TestDownloadLogsForStudy(BaseTest):
|
||||
@staticmethod
|
||||
def add_log(study_id, workflow_id, task, workflow_spec_id, log_data):
|
||||
task_log = TaskLogModel(level=log_data['level'],
|
||||
code=log_data['code'],
|
||||
message=log_data['message'],
|
||||
study_id=study_id,
|
||||
workflow_id=workflow_id,
|
||||
task=task,
|
||||
user_uid='joe',
|
||||
workflow_spec_id=workflow_spec_id)
|
||||
session.add(task_log)
|
||||
session.commit()
|
||||
|
||||
def test_download_logs_for_study(self):
|
||||
workflow = self.create_workflow('empty_workflow')
|
||||
workflow_api = self.get_workflow_api(workflow)
|
||||
task = workflow_api.next_task
|
||||
study_id = workflow.study_id
|
||||
|
||||
log_data = {'level': 'metrics',
|
||||
'code': 'test_code',
|
||||
'message': 'This is a message.'}
|
||||
self.add_log(study_id, workflow.id, task.name, 'empty_workflow', log_data)
|
||||
log_data = {'level': 'metrics',
|
||||
'code': 'another_test_code',
|
||||
'message': 'This is another message.'}
|
||||
self.add_log(study_id, workflow.id, task.name, 'empty_workflow', log_data)
|
||||
log_data = {'level': 'metrics',
|
||||
'code': 'a_third_test_code',
|
||||
'message': 'This is a third message.'}
|
||||
self.add_log(study_id, workflow.id, task.name, 'empty_workflow', log_data)
|
||||
|
||||
# Run the query, which should include a 'download_url' link that we can click on.
|
||||
url = f'/v1.0/study/{workflow.study_id}/log'
|
||||
task_log_query = TaskLogQuery()
|
||||
user = session.query(UserModel).filter_by(uid=self.test_uid).first()
|
||||
rv = self.app.put(url, headers=self.logged_in_headers(user), content_type="application/json",
|
||||
data=TaskLogQuerySchema().dump(task_log_query))
|
||||
self.assert_success(rv)
|
||||
log_query = json.loads(rv.get_data(as_text=True))
|
||||
self.assertIsNotNone(log_query['download_url'])
|
||||
|
||||
# Use the provided link to get the file.
|
||||
rv = self.app.get(log_query['download_url'])
|
||||
self.assert_success(rv)
|
||||
wb = load_workbook(BytesIO(rv.data))
|
||||
ws = wb.active
|
||||
|
||||
self.assertEqual(4, ws.max_row)
|
||||
self.assertEqual('Category', ws['A1'].value)
|
||||
self.assertEqual('empty_workflow', ws['B2'].value)
|
||||
self.assertEqual('metrics', ws['C3'].value)
|
||||
self.assertEqual('a_third_test_code', ws['D4'].value)
|
Loading…
x
Reference in New Issue
Block a user