updates to the spiff log api for pagination and some cleanup w/ burnettk cullerton

This commit is contained in:
jasquat 2022-09-09 13:20:14 -04:00
parent 79bd9a5853
commit c76b08ae12
9 changed files with 90 additions and 51 deletions

View File

@ -1,8 +1,8 @@
"""empty message
Revision ID: 240bdce32a9f
Revision ID: 9005f01aecf4
Revises:
Create Date: 2022-09-08 12:49:51.609196
Create Date: 2022-09-09 12:55:35.301314
"""
from alembic import op
@ -10,7 +10,7 @@ import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '240bdce32a9f'
revision = '9005f01aecf4'
down_revision = None
branch_labels = None
depends_on = None
@ -48,11 +48,11 @@ def upgrade():
op.create_index(op.f('ix_message_model_name'), 'message_model', ['name'], unique=True)
op.create_table('spiff_logging',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('process_instance_id', sa.Integer(), nullable=True),
sa.Column('process_id', sa.String(length=50), nullable=True),
sa.Column('task', sa.String(length=50), nullable=True),
sa.Column('process_instance_id', sa.Integer(), nullable=False),
sa.Column('bpmn_process_identifier', sa.String(length=50), nullable=False),
sa.Column('task', sa.String(length=50), nullable=False),
sa.Column('timestamp', sa.Float(), nullable=False),
sa.Column('message', sa.String(length=50), nullable=True),
sa.Column('timestamp', sa.Float(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('user',

2
poetry.lock generated
View File

@ -1847,7 +1847,7 @@ pytz = "*"
type = "git"
url = "https://github.com/sartography/SpiffWorkflow"
reference = "main"
resolved_reference = "ecd01f20a0d7142115f4ac66ccef341fcf2b176f"
resolved_reference = "1db24c7043a3281481305310e320e27270c70f95"
[[package]]
name = "sqlalchemy"

View File

@ -925,18 +925,42 @@ paths:
schema:
$ref: "#/components/schemas/Workflow"
/process-instance/{process_instance_id}/logs:
/process-models/{process_group_id}/{process_model_id}/process-instances/{process_instance_id}/logs:
parameters:
- name: process_group_id
in: path
required: true
description: The unique id of an existing process group
schema:
type: string
- name: process_model_id
in: path
required: true
description: The unique id of an existing process model.
schema:
type: string
- name: process_instance_id
in: path
required: true
description: the id of the process instance
schema:
type: integer
- name: page
in: query
required: false
description: The page number to return. Defaults to page 1.
schema:
type: integer
- name: per_page
in: query
required: false
description: The number of items to show per page. Defaults to page 10.
schema:
type: integer
get:
tags:
- Process Instances
operationId: spiffworkflow_backend.routes.process_api_blueprint.get_process_instance_logs
operationId: spiffworkflow_backend.routes.process_api_blueprint.process_instance_log_list
summary: returns a list of logs associated with the process instance
responses:
"200":

View File

@ -32,5 +32,5 @@ OPEN_ID_CLIENT_SECRET_KEY = environ.get(
) # noqa: S105
SPIFFWORKFLOW_BACKEND_LOG_TO_FILE = (
environ.get("SPIFFWORKFLOW_BACKEND_LOG_TO_FILE", default="false") == "false"
environ.get("SPIFFWORKFLOW_BACKEND_LOG_TO_FILE", default="false") == "true"
)

View File

@ -1,26 +1,24 @@
"""Spiff_logging."""
from dataclasses import dataclass
from typing import Optional
from flask_bpmn.models.db import db
from flask_bpmn.models.db import SpiffworkflowBaseDBModel
from marshmallow import Schema
@dataclass
class SpiffLoggingModel(SpiffworkflowBaseDBModel):
"""LoggingModel."""
__tablename__ = "spiff_logging"
id: int = db.Column(db.Integer, primary_key=True)
process_instance_id: int = db.Column(db.Integer) # record.process_instance_id
process_id: str = db.Column(db.String(50)) # record.workflow
task: str = db.Column(db.String(50)) # record.task_id
message: str = db.Column(db.String(50)) # record.msg
timestamp: float = db.Column(db.Float()) # record.created
process_instance_id: int = db.Column(
db.Integer, nullable=False
) # record.process_instance_id
bpmn_process_identifier: str = db.Column(
db.String(50), nullable=False
) # record.workflow
task: str = db.Column(db.String(50), nullable=False) # record.task_id
timestamp: float = db.Column(db.Float(), nullable=False) # record.created
class SpiffLoggingModelSchema(Schema):
"""SpiffLoggingModelSchema."""
class Meta:
"""Meta."""
model = SpiffLoggingModel
fields = ["process_instance_id", "process_id", "task", "message", "timestamp"]
message: Optional[str] = db.Column(db.String(50), nullable=True) # record.msg

View File

@ -43,7 +43,6 @@ from spiffworkflow_backend.models.process_instance_report import (
from spiffworkflow_backend.models.process_model import ProcessModelInfo
from spiffworkflow_backend.models.process_model import ProcessModelInfoSchema
from spiffworkflow_backend.models.spiff_logging import SpiffLoggingModel
from spiffworkflow_backend.models.spiff_logging import SpiffLoggingModelSchema
from spiffworkflow_backend.services.error_handling_service import ErrorHandlingService
from spiffworkflow_backend.services.message_service import MessageService
from spiffworkflow_backend.services.process_instance_processor import (
@ -353,18 +352,36 @@ def process_instance_terminate(
return Response(json.dumps({"ok": True}), status=200, mimetype="application/json")
def get_process_instance_logs(process_instance_id: int) -> Response:
"""Get_process_instance_logs."""
logs = SpiffLoggingModel.query.filter(
SpiffLoggingModel.process_instance_id == process_instance_id
).all()
log_schema = SpiffLoggingModelSchema(many=True).dump(logs)
return Response(
json.dumps(log_schema),
status=200,
mimetype="application/json",
def process_instance_log_list(
process_group_id: str,
process_model_id: str,
process_instance_id: int,
page: int = 1,
per_page: int = 100,
) -> flask.wrappers.Response:
"""Process_instance_log_list."""
# to make sure the process instance exists
process_instance = find_process_instance_by_id_or_raise(process_instance_id)
logs = (
SpiffLoggingModel.query.filter(
SpiffLoggingModel.process_instance_id == process_instance.id
)
.order_by(SpiffLoggingModel.timestamp.desc()) # type: ignore
.paginate(page, per_page, False)
)
response_json = {
"results": logs.items,
"pagination": {
"count": len(logs.items),
"total": logs.total,
"pages": logs.pages,
},
}
return make_response(jsonify(response_json), 200)
# body: {
# payload: dict,

View File

@ -169,13 +169,13 @@ class DBHandler(logging.Handler):
def emit(self, record: logging.LogRecord) -> None:
"""Emit."""
if record:
process_id = record.workflow if hasattr(record, "workflow") else None # type: ignore
task = str(record.task_id) if hasattr(record, "task_id") else None # type: ignore
bpmn_process_identifier = record.workflow # type: ignore
task = str(record.task_id) # type: ignore
timestamp = record.created
message = record.msg if hasattr(record, "msg") else None
timestamp = record.created if hasattr(record, "created") else None
spiff_log = SpiffLoggingModel(
process_instance_id=record.process_instance_id, # type: ignore
process_id=process_id,
bpmn_process_identifier=bpmn_process_identifier,
task=task,
message=message,
timestamp=timestamp,

View File

@ -234,8 +234,8 @@ class SpecFileService(FileSystemService):
def get_executable_process_ids(et_root: _Element) -> list[str]:
"""Get_executable_process_ids."""
process_elements = SpecFileService.get_executable_process_elements(et_root)
process_ids = [pe.attrib["id"] for pe in process_elements]
return process_ids
bpmn_process_identifiers = [pe.attrib["id"] for pe in process_elements]
return bpmn_process_identifiers
@staticmethod
def get_process_id(et_root: _Element) -> str:
@ -270,14 +270,14 @@ class SpecFileService(FileSystemService):
relative_bpmn_file_path = os.path.join(
relative_process_model_path, bpmn_file_name
)
process_ids = SpecFileService.get_executable_process_ids(et_root)
for process_id in process_ids:
bpmn_process_identifiers = SpecFileService.get_executable_process_ids(et_root)
for bpmn_process_identifier in bpmn_process_identifiers:
process_id_lookup = BpmnProcessIdLookup.query.filter_by(
bpmn_process_identifier=process_id
bpmn_process_identifier=bpmn_process_identifier
).first()
if process_id_lookup is None:
process_id_lookup = BpmnProcessIdLookup(
bpmn_process_identifier=process_id,
bpmn_process_identifier=bpmn_process_identifier,
bpmn_file_relative_path=relative_bpmn_file_path,
)
db.session.add(process_id_lookup)
@ -291,7 +291,7 @@ class SpecFileService(FileSystemService):
# on the file system. Otherwise, assume it is a duplicate process id and error.
if os.path.isfile(full_bpmn_file_path):
raise ValidationException(
f"Process id ({process_id}) has already been used for "
f"Process id ({bpmn_process_identifier}) has already been used for "
f"{process_id_lookup.bpmn_file_relative_path}. It cannot be reused."
)
else:

View File

@ -28,14 +28,14 @@ class TestLoggingService(BaseTest):
assert response.status_code == 200
log_response = client.get(
f"/v1.0/process-instance/{process_instance_id}/logs",
f"/v1.0/process-models/{process_group_id}/{process_model_id}/process-instances/{process_instance_id}/logs",
headers=logged_in_headers(user),
)
assert log_response.status_code == 200
assert log_response.json
logs: list = log_response.json
logs: list = log_response.json["results"]
assert len(logs) > 0
for log in logs:
assert log["process_instance_id"] == process_instance_id
for key in ["timestamp", "task", "process_id", "message"]:
for key in ["timestamp", "task", "bpmn_process_identifier", "message"]:
assert key in log.keys()