added process group display name to model search and cache the groups to avoid extra lookups w/ burnettk

This commit is contained in:
jasquat 2023-01-04 13:12:36 -05:00
parent 23550583b2
commit 4c0d11dda4
6 changed files with 62 additions and 18 deletions

View File

@ -0,0 +1,20 @@
from typing import NewType, TypedDict
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from spiffworkflow_backend.models.process_group import ProcessGroup
IdToProcessGroupMapping = NewType("IdToProcessGroupMapping", dict[str, "ProcessGroup"])
class ProcessGroupLite(TypedDict):
id: str
display_name: str
class ProcessGroupLitesWithCache(TypedDict):
cache: dict[str, "ProcessGroup"]
process_groups: list[ProcessGroupLite]

View File

@ -10,6 +10,7 @@ from typing import Any
import marshmallow import marshmallow
from marshmallow import Schema from marshmallow import Schema
from marshmallow.decorators import post_load from marshmallow.decorators import post_load
from spiffworkflow_backend.interfaces import ProcessGroupLite
from spiffworkflow_backend.models.file import File from spiffworkflow_backend.models.file import File
@ -37,7 +38,7 @@ class ProcessModelInfo:
files: list[File] | None = field(default_factory=list[File]) files: list[File] | None = field(default_factory=list[File])
fault_or_suspend_on_exception: str = NotificationType.fault.value fault_or_suspend_on_exception: str = NotificationType.fault.value
exception_notification_addresses: list[str] = field(default_factory=list) exception_notification_addresses: list[str] = field(default_factory=list)
parent_groups: list[dict] | None = None parent_groups: list[ProcessGroupLite] | None = None
metadata_extraction_paths: list[dict[str, str]] | None = None metadata_extraction_paths: list[dict[str, str]] | None = None
def __post_init__(self) -> None: def __post_init__(self) -> None:

View File

@ -15,6 +15,7 @@ from flask import jsonify
from flask import make_response from flask import make_response
from flask.wrappers import Response from flask.wrappers import Response
from flask_bpmn.api.api_error import ApiError from flask_bpmn.api.api_error import ApiError
from spiffworkflow_backend.interfaces import IdToProcessGroupMapping
from spiffworkflow_backend.models.file import FileSchema from spiffworkflow_backend.models.file import FileSchema
from spiffworkflow_backend.models.process_group import ProcessGroup from spiffworkflow_backend.models.process_group import ProcessGroup
@ -172,6 +173,7 @@ def process_model_list(
process_group_identifier: Optional[str] = None, process_group_identifier: Optional[str] = None,
recursive: Optional[bool] = False, recursive: Optional[bool] = False,
filter_runnable_by_user: Optional[bool] = False, filter_runnable_by_user: Optional[bool] = False,
include_parent_groups: Optional[bool] = False,
page: int = 1, page: int = 1,
per_page: int = 100, per_page: int = 100,
) -> flask.wrappers.Response: ) -> flask.wrappers.Response:
@ -181,22 +183,29 @@ def process_model_list(
recursive=recursive, recursive=recursive,
filter_runnable_by_user=filter_runnable_by_user, filter_runnable_by_user=filter_runnable_by_user,
) )
batch = ProcessModelService().get_batch( process_models_to_return = ProcessModelService().get_batch(
process_models, page=page, per_page=per_page process_models, page=page, per_page=per_page
) )
if include_parent_groups:
process_group_cache = IdToProcessGroupMapping({})
for process_model in process_models_to_return:
parent_group_lites_with_cache = ProcessModelService.get_parent_group_array_and_cache_it(process_model.id, process_group_cache)
process_model.parent_groups = parent_group_lites_with_cache['process_groups']
pages = len(process_models) // per_page pages = len(process_models) // per_page
remainder = len(process_models) % per_page remainder = len(process_models) % per_page
if remainder > 0: if remainder > 0:
pages += 1 pages += 1
response_json = { response_json = {
"results": ProcessModelInfoSchema(many=True).dump(batch), "results": process_models_to_return,
"pagination": { "pagination": {
"count": len(batch), "count": len(process_models_to_return),
"total": len(process_models), "total": len(process_models),
"pages": pages, "pages": pages,
}, },
} }
return Response(json.dumps(response_json), status=200, mimetype="application/json") return make_response(jsonify(response_json), 200)
def process_model_file_update( def process_model_file_update(

View File

@ -1,5 +1,6 @@
"""Process_model_service.""" """Process_model_service."""
import json import json
from typing import TypedDict
import os import os
import shutil import shutil
from glob import glob from glob import glob
@ -13,6 +14,7 @@ from flask_bpmn.api.api_error import ApiError
from spiffworkflow_backend.exceptions.process_entity_not_found_error import ( from spiffworkflow_backend.exceptions.process_entity_not_found_error import (
ProcessEntityNotFoundError, ProcessEntityNotFoundError,
) )
from spiffworkflow_backend.interfaces import ProcessGroupLite, ProcessGroupLitesWithCache
from spiffworkflow_backend.models.process_group import ProcessGroup from spiffworkflow_backend.models.process_group import ProcessGroup
from spiffworkflow_backend.models.process_group import ProcessGroupSchema from spiffworkflow_backend.models.process_group import ProcessGroupSchema
from spiffworkflow_backend.models.process_instance import ProcessInstanceModel from spiffworkflow_backend.models.process_instance import ProcessInstanceModel
@ -237,21 +239,32 @@ class ProcessModelService(FileSystemService):
return process_models return process_models
@classmethod @classmethod
def get_parent_group_array(cls, process_identifier: str) -> list[dict]: def get_parent_group_array_and_cache_it(cls, process_identifier: str, process_group_cache: dict[str, ProcessGroup]) -> ProcessGroupLitesWithCache:
"""Get_parent_group_array.""" """Get_parent_group_array."""
full_group_id_path = None full_group_id_path = None
parent_group_array = [] parent_group_array: list[ProcessGroupLite] = []
for process_group_id_segment in process_identifier.split("/")[0:-1]: for process_group_id_segment in process_identifier.split("/")[0:-1]:
if full_group_id_path is None: if full_group_id_path is None:
full_group_id_path = process_group_id_segment full_group_id_path = process_group_id_segment
else: else:
full_group_id_path = os.path.join(full_group_id_path, process_group_id_segment) # type: ignore full_group_id_path = os.path.join(full_group_id_path, process_group_id_segment) # type: ignore
parent_group = ProcessModelService.get_process_group(full_group_id_path) parent_group = process_group_cache.get(full_group_id_path, None)
if parent_group is None:
parent_group = ProcessModelService.get_process_group(full_group_id_path)
if parent_group: if parent_group:
if full_group_id_path not in process_group_cache:
process_group_cache[full_group_id_path] = parent_group
parent_group_array.append( parent_group_array.append(
{"id": parent_group.id, "display_name": parent_group.display_name} {"id": parent_group.id, "display_name": parent_group.display_name}
) )
return parent_group_array return {'cache': process_group_cache, 'process_groups': parent_group_array}
@classmethod
def get_parent_group_array(cls, process_identifier: str) -> list[ProcessGroupLite]:
"""Get_parent_group_array."""
parent_group_lites_with_cache = cls.get_parent_group_array_and_cache_it(process_identifier, {})
return parent_group_lites_with_cache['process_groups']
@classmethod @classmethod
def get_process_groups( def get_process_groups(

View File

@ -309,7 +309,7 @@ export default function ProcessInstanceListTable({
if (filtersEnabled) { if (filtersEnabled) {
// populate process model selection // populate process model selection
HttpService.makeCallToBackend({ HttpService.makeCallToBackend({
path: `/process-models?per_page=1000&recursive=true`, path: `/process-models?per_page=1000&recursive=true&include_parent_groups=true`,
successCallback: processResultForProcessModels, successCallback: processResultForProcessModels,
}); });
} else { } else {

View File

@ -24,17 +24,21 @@ export default function ProcessModelSearch({
.map((parentGroup: ProcessGroupLite) => { .map((parentGroup: ProcessGroupLite) => {
return parentGroup.display_name; return parentGroup.display_name;
}) })
.join(' '); .join(' / ');
} }
return ''; return '';
}; };
const getFullProcessModelLabel = (processModel: ProcessModel) => {
return `${processModel.id} (${getParentGroupsDisplayName(processModel)} ${
processModel.display_name
})`;
};
const shouldFilterProcessModel = (options: any) => { const shouldFilterProcessModel = (options: any) => {
const processModel: ProcessModel = options.item; const processModel: ProcessModel = options.item;
const { inputValue } = options; const { inputValue } = options;
return `${processModel.id} (${getParentGroupsDisplayName(processModel)} ${ return getFullProcessModelLabel(processModel).includes(inputValue);
processModel.display_name
})`.includes(inputValue);
}; };
return ( return (
<ComboBox <ComboBox
@ -44,10 +48,7 @@ export default function ProcessModelSearch({
items={processModels} items={processModels}
itemToString={(processModel: ProcessModel) => { itemToString={(processModel: ProcessModel) => {
if (processModel) { if (processModel) {
return `${processModel.id} (${truncateString( return getFullProcessModelLabel(processModel);
processModel.display_name,
75
)})`;
} }
return null; return null;
}} }}