WIP more metadata reporting w/ burnettk

This commit is contained in:
jasquat 2022-11-29 15:59:46 -05:00
parent ddadefee60
commit e5f04d10a9
5 changed files with 1977 additions and 11 deletions

1908
spiffworkflow-backend/: Normal file

File diff suppressed because it is too large Load Diff

View File

@ -12,6 +12,7 @@ from typing import Union
import connexion # type: ignore
import flask.wrappers
import jinja2
from spiffworkflow_backend.models.process_instance_metadata import ProcessInstanceMetadataModel
import werkzeug
from flask import Blueprint
from flask import current_app
@ -27,10 +28,10 @@ from lxml import etree # type: ignore
from lxml.builder import ElementMaker # type: ignore
from SpiffWorkflow.task import Task as SpiffTask # type: ignore
from SpiffWorkflow.task import TaskState
from sqlalchemy import and_
from sqlalchemy import and_, func
from sqlalchemy import asc
from sqlalchemy import desc
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import aliased, joinedload
from spiffworkflow_backend.exceptions.process_entity_not_found_error import (
ProcessEntityNotFoundError,
@ -928,6 +929,26 @@ def process_instance_list(
UserGroupAssignmentModel.user_id == g.user.id
)
# userSkillF = aliased(UserSkill)
# userSkillI = aliased(UserSkill)
# import pdb; pdb.set_trace()
stock_columns = ProcessInstanceReportService.get_column_names_for_model(ProcessInstanceModel)
# print(f"stock_columns: {stock_columns}")
# import pdb; pdb.set_trace()
# for column in process_instance_report.report_metadata['columns']:
# if column not in stock_columns:
# # continue
for column in [{'accessor': 'key1'}]:
# print(f"column: {column['accessor']}")
# process_instance_query = process_instance_query.outerjoin(ProcessInstanceMetadataModel, ProcessInstanceModel.id == ProcessInstanceMetadataModel.process_instance_id, ProcessInstanceMetadataModel.key == column['accessor'])
instance_metadata_alias = aliased(ProcessInstanceMetadataModel)
process_instance_query = (
process_instance_query.options(joinedload(instance_metadata_alias, ProcessInstanceModel.id == instance_metadata_alias.process_instance_id, innerjoin=False)).filter(instance_metadata_alias.key == column['accessor'])
.add_column(func.max(instance_metadata_alias.value).label(column['accessor']))
)
# import pdb; pdb.set_trace()
process_instances = (
process_instance_query.group_by(ProcessInstanceModel.id)
.order_by(
@ -935,14 +956,26 @@ def process_instance_list(
)
.paginate(page=page, per_page=per_page, error_out=False)
)
import pdb; pdb.set_trace()
results = list(
map(
ProcessInstanceService.serialize_flat_with_task_data,
process_instances.items,
)
)
# def awesome_serialize(process_instance)
# dict_thing = process_instance.serialize
#
# # add columns since we have access to columns here
# dict_thing['awesome'] = 'awesome'
#
# return dict_thing
# results = list(
# map(
# ProcessInstanceService.serialize_flat_with_task_data,
# process_instances.items,
# )
# )
results = ProcessInstanceReportService.add_metadata_columns_to_process_instance(process_instances.items, process_instance_report.report_metadata['columns'])
report_metadata = process_instance_report.report_metadata
print(f"results: {results}")
import pdb; pdb.set_trace()
response_json = {
"report_identifier": process_instance_report.identifier,

View File

@ -235,8 +235,9 @@ class AuthenticationService:
refresh_token_object: RefreshTokenModel = RefreshTokenModel.query.filter(
RefreshTokenModel.user_id == user_id
).first()
assert refresh_token_object # noqa: S101
if refresh_token_object:
return refresh_token_object.token
return None
@classmethod
def get_auth_token_from_refresh_token(cls, refresh_token: str) -> dict:

View File

@ -1,6 +1,8 @@
"""Process_instance_report_service."""
from dataclasses import dataclass
from flask_bpmn.models.db import db
from typing import Optional
from spiffworkflow_backend.models.process_instance import ProcessInstanceModel
from spiffworkflow_backend.models.process_instance_report import (
ProcessInstanceReportModel,
@ -241,3 +243,20 @@ class ProcessInstanceReportService:
)
return report_filter
@classmethod
def add_metadata_columns_to_process_instance(cls, process_instance_sqlalchemy_rows, metadata_columns: list[dict]) -> list[dict]:
stock_columns = cls.get_column_names_for_model(ProcessInstanceModel)
results = []
for process_instance in process_instance_sqlalchemy_rows:
process_instance_dict = process_instance['ProcessInstanceModel'].serialized
for metadata_column in metadata_columns:
if metadata_column['accessor'] not in stock_columns:
process_instance_dict[metadata_column['accessor']] = process_instance[metadata_column['accessor']]
results.append(process_instance_dict)
return results
@classmethod
def get_column_names_for_model(cls, model: db.Model) -> list[str]:
return [i.name for i in model.__table__.columns]

View File

@ -2588,12 +2588,17 @@ class TestProcessApi(BaseTest):
response = client.get(
f"/v1.0/process-instances?report_identifier={process_instance_report.identifier}",
# f"/v1.0/process-instances?report_identifier=demo1",
headers=self.logged_in_headers(with_super_admin_user),
)
print(f"response.json: {response.json}")
assert response.status_code == 200
assert response.json is not None
assert response.status_code == 200
assert len(response.json["results"]) == 1
assert response.json["results"][0]["status"] == "complete"
assert response.json["results"][0]["id"] == process_instance.id
# assert response.json["results"][0]["key1"] == "value1"
assert response.json["pagination"]["count"] == 1
assert response.json["pagination"]["pages"] == 1
assert response.json["pagination"]["total"] == 1