update import scripts

This commit is contained in:
burnettk 2022-06-19 22:57:57 -04:00
parent a38ef72c26
commit d88095f077
2 changed files with 36 additions and 18 deletions

View File

@ -30,19 +30,26 @@ def main():
with app.app_context(): with app.app_context():
process_model_identifier_ticket = "ticket" process_model_identifier_ticket = "ticket"
bpmn_spec_dir = current_app.config["BPMN_SPEC_ABSOLUTE_DIR"]
print(f"bpmn_spec_dir: {bpmn_spec_dir}")
db.session.query(ProcessInstanceModel).filter(ProcessInstanceModel.process_model_identifier == process_model_identifier_ticket).delete() db.session.query(ProcessInstanceModel).filter(ProcessInstanceModel.process_model_identifier == process_model_identifier_ticket).delete()
db.session.commit() db.session.commit()
print_process_instance_count(process_model_identifier_ticket)
"""Print process instance count."""
process_instances = ProcessInstanceModel.query.filter_by(process_model_identifier=process_model_identifier_ticket).all()
process_instance_count = len(process_instances)
print(f"process_instance_count: {process_instance_count}")
process_model = ProcessModelService().get_process_model(process_model_identifier_ticket) process_model = ProcessModelService().get_process_model(process_model_identifier_ticket)
columns_to_data_key_mappings = {"Month": "month", "MS": "milestone", "ID": "req_id", "Dev Days": "dev_days"} columns_to_data_key_mappings = {"Month": "month", "MS": "milestone", "ID": "req_id", "Dev Days": "dev_days", "Feature": "feature", "Priority": "priority"}
columns_to_header_index_mappings = {} columns_to_header_index_mappings = {}
user = UserModel.query.filter_by(username='test_user1').first() user = UserModel.query.filter_by(username='test_user1').first()
with open("tests/files/tickets.csv") as infile: with open("tests/files/tickets.csv") as infile:
reader = csv.reader(infile, delimiter=",") reader = csv.reader(infile, delimiter=",")
# first row is garbage
next(reader)
header = next(reader) header = next(reader)
for column_name in columns_to_data_key_mappings: for column_name in columns_to_data_key_mappings:
columns_to_header_index_mappings[column_name] = header.index(column_name) columns_to_header_index_mappings[column_name] = header.index(column_name)
@ -61,19 +68,22 @@ def main():
processor = ProcessInstanceProcessor(process_instance) processor = ProcessInstanceProcessor(process_instance)
processor.do_engine_steps() processor.do_engine_steps()
processor.save() # processor.save()
for column_name, desired_data_key in columns_to_data_key_mappings.items(): for column_name, desired_data_key in columns_to_data_key_mappings.items():
appropriate_index = columns_to_header_index_mappings[column_name] appropriate_index = columns_to_header_index_mappings[column_name]
print(f"appropriate_index: {appropriate_index}")
processor.bpmn_process_instance.data[desired_data_key] = row[appropriate_index] processor.bpmn_process_instance.data[desired_data_key] = row[appropriate_index]
print(f"datas: {processor.bpmn_process_instance.data}")
if processor.bpmn_process_instance.data["month"] == "":
continue
# you at least need a month, or else this row in the csv is considered garbage
# if processor.bpmn_process_instance.data["month"] is None:
# continue
processor.save() processor.save()
process_instance_data = processor.get_data()
print(f"process_instance_data: {process_instance_data}")
print(f"columns_to_header_index_mappings: {columns_to_header_index_mappings}")
print_process_instance_count(process_model_identifier_ticket)
# if __name__ == "__main__": # if __name__ == "__main__":

View File

@ -24,15 +24,17 @@ process_instance_count = len(process_instances)
print(f"process_instance_count: {process_instance_count}") print(f"process_instance_count: {process_instance_count}")
process_model = ProcessModelService().get_process_model(process_model_identifier_ticket) process_model = ProcessModelService().get_process_model(process_model_identifier_ticket)
columns_to_data_key_mappings = {"Month": "month", "MS": "milestone", "ID": "req_id", "Dev Days": "dev_days"} columns_to_data_key_mappings = {"Month": "month", "MS": "milestone", "ID": "req_id", "Dev Days": "dev_days", "Feature": "feature", "Priority": "priority"}
columns_to_header_index_mappings = {} columns_to_header_index_mappings = {}
user = UserModel.query.filter_by(username='test_user1').first() user = UserModel.query.filter_by(username='test_user1').first()
a = 1 with open("tests/files/tickets.csv") as infile:
with open("tickets.csv") as infile:
b = 2
reader = csv.reader(infile, delimiter=",") reader = csv.reader(infile, delimiter=",")
# first row is garbage
next(reader)
header = next(reader) header = next(reader)
for column_name in columns_to_data_key_mappings: for column_name in columns_to_data_key_mappings:
columns_to_header_index_mappings[column_name] = header.index(column_name) columns_to_header_index_mappings[column_name] = header.index(column_name)
@ -51,15 +53,21 @@ with open("tickets.csv") as infile:
processor = ProcessInstanceProcessor(process_instance) processor = ProcessInstanceProcessor(process_instance)
processor.do_engine_steps() processor.do_engine_steps()
processor.save() # processor.save()
for column_name, desired_data_key in columns_to_data_key_mappings.items(): for column_name, desired_data_key in columns_to_data_key_mappings.items():
appropriate_index = columns_to_header_index_mappings[column_name] appropriate_index = columns_to_header_index_mappings[column_name]
print(f"appropriate_index: {appropriate_index}") print(f"appropriate_index: {appropriate_index}")
processor.bpmn_process_instance.data[desired_data_key] = row[appropriate_index] processor.bpmn_process_instance.data[desired_data_key] = row[appropriate_index]
# you at least need a month, or else this row in the csv is considered garbage
month_value = processor.bpmn_process_instance.data["month"]
if month_value == "" or month_value is None:
db.delete(process_instance)
db.session.commit()
continue
processor.save() processor.save()
process_instance_data = processor.get_data() process_instance_data = processor.get_data()
print(f"process_instance_data: {process_instance_data}") print(f"process_instance_data: {process_instance_data}")
print(f"columns_to_header_index_mappings: {columns_to_header_index_mappings}")