spiffworkflow-backend/bin/import_tickets_for_script_task.py

87 lines
3.0 KiB
Python
Raw Normal View History

2022-06-20 17:17:51 -04:00
"""Import tickets, for use in script task."""
2022-06-20 10:56:32 -04:00
import csv
2022-06-19 19:02:42 -04:00
2022-06-20 10:56:32 -04:00
from flask_bpmn.models.db import db
2022-06-19 19:02:42 -04:00
2022-06-20 10:56:32 -04:00
from spiffworkflow_backend.models.process_instance import ProcessInstanceModel
2022-06-19 19:02:42 -04:00
from spiffworkflow_backend.models.user import UserModel
2022-06-20 10:56:32 -04:00
from spiffworkflow_backend.services.process_instance_processor import (
ProcessInstanceProcessor,
)
from spiffworkflow_backend.services.process_instance_service import (
ProcessInstanceService,
)
2022-06-19 19:02:42 -04:00
from spiffworkflow_backend.services.process_model_service import ProcessModelService
process_model_identifier_ticket = "ticket"
2022-06-20 10:56:32 -04:00
db.session.query(ProcessInstanceModel).filter(
ProcessInstanceModel.process_model_identifier == process_model_identifier_ticket
).delete()
2022-06-19 19:02:42 -04:00
db.session.commit()
"""Print process instance count."""
2022-06-20 10:56:32 -04:00
process_instances = ProcessInstanceModel.query.filter_by(
process_model_identifier=process_model_identifier_ticket
).all()
2022-06-19 19:02:42 -04:00
process_instance_count = len(process_instances)
print(f"process_instance_count: {process_instance_count}")
process_model = ProcessModelService().get_process_model(process_model_identifier_ticket)
2022-06-20 10:56:32 -04:00
columns_to_data_key_mappings = {
"Month": "month",
"MS": "milestone",
"ID": "req_id",
"Dev Days": "dev_days",
"Feature": "feature",
"Priority": "priority",
}
2022-06-19 19:02:42 -04:00
columns_to_header_index_mappings = {}
2022-06-20 10:56:32 -04:00
user = UserModel.query.filter_by(username="test_user1").first()
2022-06-19 19:02:42 -04:00
2022-06-19 22:57:57 -04:00
with open("tests/files/tickets.csv") as infile:
2022-06-19 19:02:42 -04:00
reader = csv.reader(infile, delimiter=",")
2022-06-19 22:57:57 -04:00
# first row is garbage
next(reader)
2022-06-19 19:02:42 -04:00
header = next(reader)
for column_name in columns_to_data_key_mappings:
columns_to_header_index_mappings[column_name] = header.index(column_name)
id_index = header.index("ID")
priority_index = header.index("Priority")
print(f"header: {header}")
for row in reader:
ticket_identifier = row[id_index]
priority = row[priority_index]
print(f"ticket_identifier: {ticket_identifier}")
print(f"priority: {priority}")
process_instance = ProcessInstanceService.create_process_instance(
process_model_identifier_ticket, user
)
processor = ProcessInstanceProcessor(process_instance)
processor.do_engine_steps()
2022-06-19 22:57:57 -04:00
# processor.save()
2022-06-19 19:02:42 -04:00
for column_name, desired_data_key in columns_to_data_key_mappings.items():
appropriate_index = columns_to_header_index_mappings[column_name]
print(f"appropriate_index: {appropriate_index}")
2022-06-20 10:56:32 -04:00
processor.bpmn_process_instance.data[desired_data_key] = row[
appropriate_index
]
2022-06-19 22:57:57 -04:00
# you at least need a month, or else this row in the csv is considered garbage
month_value = processor.bpmn_process_instance.data["month"]
if month_value == "" or month_value is None:
db.delete(process_instance)
db.session.commit()
continue
2022-06-19 19:02:42 -04:00
processor.save()
process_instance_data = processor.get_data()
print(f"process_instance_data: {process_instance_data}")