update import tickets script

This commit is contained in:
burnettk 2022-07-06 22:59:44 -04:00
parent 1dcfe2e453
commit 7249fa69e1

View File

@ -12,79 +12,102 @@
<bpmn:scriptTask id="Activity_17wwliq" name="Import tickets from csv inside backend" scriptFormat="python">
<bpmn:incoming>Flow_0pvp5mz</bpmn:incoming>
<bpmn:outgoing>Flow_04qc4ur</bpmn:outgoing>
<bpmn:script>import os
from flask_bpmn.models.db import db
from spiffworkflow_backend.models.process_instance import ProcessInstanceModel
import csv
from spiffworkflow_backend import create_app
from spiffworkflow_backend.models.user import UserModel
from spiffworkflow_backend.services.process_instance_processor import ProcessInstanceProcessor
from spiffworkflow_backend.services.process_instance_service import ProcessInstanceService
from flask import current_app
from spiffworkflow_backend.services.process_model_service import ProcessModelService
<bpmn:script>"""Import tickets, for use in script task."""
process_model_identifier_ticket = "ticket"
db.session.query(ProcessInstanceModel).filter(ProcessInstanceModel.process_model_identifier == process_model_identifier_ticket).delete()
db.session.commit()
def main():
"""Use main to avoid global namespace."""
import csv
"""Print process instance count."""
process_instances = ProcessInstanceModel.query.filter_by(process_model_identifier=process_model_identifier_ticket).all()
process_instance_count = len(process_instances)
print(f"process_instance_count: {process_instance_count}")
from flask_bpmn.models.db import db
process_model = ProcessModelService().get_process_model(process_model_identifier_ticket)
columns_to_data_key_mappings = {"Month": "month", "MS": "milestone", "ID": "req_id", "Dev Days": "dev_days", "Feature": "feature", "Priority": "priority"}
columns_to_header_index_mappings = {}
from spiffworkflow_backend.models.process_instance import ProcessInstanceModel
from spiffworkflow_backend.models.user import UserModel
from spiffworkflow_backend.services.process_instance_processor import (
ProcessInstanceProcessor,
)
from spiffworkflow_backend.services.process_instance_service import (
ProcessInstanceService,
)
user = UserModel.query.filter_by(username='test_user1').first()
process_model_identifier_ticket = "ticket"
db.session.query(ProcessInstanceModel).filter(
ProcessInstanceModel.process_model_identifier == process_model_identifier_ticket
).delete()
db.session.commit()
with open("tests/files/tickets.csv") as infile:
reader = csv.reader(infile, delimiter=",")
"""Print process instance count."""
process_instances = ProcessInstanceModel.query.filter_by(
process_model_identifier=process_model_identifier_ticket
).all()
process_instance_count = len(process_instances)
print(f"process_instance_count: {process_instance_count}")
# first row is garbage
next(reader)
columns_to_data_key_mappings = {
"Month": "month",
"MS": "milestone",
"ID": "req_id",
"Dev Days": "dev_days",
"Feature": "feature",
"Priority": "priority",
}
columns_to_header_index_mappings = {}
header = next(reader)
for column_name in columns_to_data_key_mappings:
columns_to_header_index_mappings[column_name] = header.index(column_name)
id_index = header.index("ID")
priority_index = header.index("Priority")
print(f"header: {header}")
for row in reader:
ticket_identifier = row[id_index]
priority = row[priority_index]
print(f"ticket_identifier: {ticket_identifier}")
print(f"priority: {priority}")
user = UserModel.query.filter_by(username="test_user1").first()
process_instance = ProcessInstanceService.create_process_instance(
process_model_identifier_ticket, user
)
processor = ProcessInstanceProcessor(process_instance)
with open("tests/files/tickets.csv") as infile:
reader = csv.reader(infile, delimiter=",")
processor.do_engine_steps()
# processor.save()
# first row is garbage
next(reader)
for column_name, desired_data_key in columns_to_data_key_mappings.items():
appropriate_index = columns_to_header_index_mappings[column_name]
print(f"appropriate_index: {appropriate_index}")
processor.bpmn_process_instance.data[desired_data_key] = row[appropriate_index]
header = next(reader)
for column_name in columns_to_data_key_mappings:
columns_to_header_index_mappings[column_name] = header.index(column_name)
id_index = header.index("ID")
priority_index = header.index("Priority")
month_index = header.index("Month")
print(f"header: {header}")
for row in reader:
ticket_identifier = row[id_index]
priority = row[priority_index]
month = row[month_index]
print(f"ticket_identifier: {ticket_identifier}")
print(f"priority: {priority}")
# if there is no month, who cares about it.
if month:
process_instance = ProcessInstanceService.create_process_instance(
process_model_identifier=process_model_identifier_ticket, user=user, process_group_identifier='sartography-admin'
)
processor = ProcessInstanceProcessor(process_instance)
# you at least need a month, or else this row in the csv is considered garbage
month_value = processor.bpmn_process_instance.data["month"]
if month_value == "" or month_value is None:
db.delete(process_instance)
db.session.commit()
continue
processor.do_engine_steps()
# processor.save()
processor.save()
for column_name, desired_data_key in columns_to_data_key_mappings.items():
appropriate_index = columns_to_header_index_mappings[column_name]
print(f"appropriate_index: {appropriate_index}")
processor.bpmn_process_instance.data[desired_data_key] = row[
appropriate_index
]
process_instance_data = processor.get_data()
print(f"process_instance_data: {process_instance_data}")
# you at least need a month, or else this row in the csv is considered garbage
month_value = processor.bpmn_process_instance.data["month"]
if month_value == "" or month_value is None:
db.delete(process_instance)
db.session.commit()
continue
processor.save()
process_instance_data = processor.get_data()
print(f"process_instance_data: {process_instance_data}")
main()
# to avoid serialization issues
del main
</bpmn:script>
</bpmn:scriptTask>
</bpmn:process>