mirror of
https://github.com/logos-co/nomos-simulations.git
synced 2025-01-09 18:25:54 +00:00
min/max latency with message id
This commit is contained in:
parent
ce1c3764d3
commit
eba1a81c34
@ -25,6 +25,18 @@ class Message:
|
|||||||
if self.step_a is not None and self.step_b is not None:
|
if self.step_a is not None and self.step_b is not None:
|
||||||
return abs(self.step_a - self.step_b)
|
return abs(self.step_a - self.step_b)
|
||||||
|
|
||||||
|
def __eq__(self, other):
|
||||||
|
if not isinstance(other, Message):
|
||||||
|
return NotImplemented
|
||||||
|
return self.latency == other.latency
|
||||||
|
|
||||||
|
def __lt__(self, other):
|
||||||
|
if not isinstance(other, Message):
|
||||||
|
return NotImplemented
|
||||||
|
if self.latency is None or other.latency is None:
|
||||||
|
return NotImplemented
|
||||||
|
return self.latency < other.latency
|
||||||
|
|
||||||
|
|
||||||
MessageStorage = Dict[str, Message]
|
MessageStorage = Dict[str, Message]
|
||||||
|
|
||||||
@ -32,32 +44,47 @@ MessageStorage = Dict[str, Message]
|
|||||||
def compute_results(
|
def compute_results(
|
||||||
message_storage: MessageStorage, step_duration: int
|
message_storage: MessageStorage, step_duration: int
|
||||||
) -> dict[str, int | float | str]:
|
) -> dict[str, int | float | str]:
|
||||||
latencies = [message_record.latency for message_record in message_storage.values()]
|
complete_messages = [
|
||||||
valued_latencies = [latency for latency in latencies if latency is not None]
|
message for message in message_storage.values() if message.latency is not None
|
||||||
incomplete_latencies = sum((1 for latency in latencies if latency is None))
|
]
|
||||||
|
incomplete_messages = sum(
|
||||||
|
(1 for message in message_storage.values() if message.latency is None)
|
||||||
|
)
|
||||||
|
|
||||||
total_messages = len(latencies)
|
total_messages = len(message_storage)
|
||||||
total_messages_full_latency = len(valued_latencies)
|
total_complete_messages = len(complete_messages)
|
||||||
total_messages_incomplete_latency = incomplete_latencies
|
total_incomplete_messages = incomplete_messages
|
||||||
latency_average_steps = statistics.mean(valued_latencies)
|
|
||||||
|
complete_latencies = [
|
||||||
|
message.latency for message in complete_messages if message.latency is not None
|
||||||
|
]
|
||||||
|
latency_average_steps = statistics.mean(complete_latencies)
|
||||||
latency_average_ms = "{:.2f}".format(latency_average_steps * step_duration)
|
latency_average_ms = "{:.2f}".format(latency_average_steps * step_duration)
|
||||||
latency_median_steps = statistics.median(valued_latencies)
|
latency_median_steps = statistics.median(complete_latencies)
|
||||||
latency_median_ms = "{:.2f}".format(latency_median_steps * step_duration)
|
latency_median_ms = "{:.2f}".format(latency_median_steps * step_duration)
|
||||||
max_latency_steps = max(valued_latencies)
|
|
||||||
|
max_message = max(complete_messages)
|
||||||
|
max_latency_steps = max_message.latency
|
||||||
|
assert max_latency_steps is not None
|
||||||
max_latency_ms = "{:.2f}".format(max_latency_steps * step_duration)
|
max_latency_ms = "{:.2f}".format(max_latency_steps * step_duration)
|
||||||
min_latency_steps = min(valued_latencies)
|
|
||||||
|
min_message = min(complete_messages)
|
||||||
|
min_latency_steps = min_message.latency
|
||||||
|
assert min_latency_steps is not None
|
||||||
min_latency_ms = "{:.2f}".format(min_latency_steps * step_duration)
|
min_latency_ms = "{:.2f}".format(min_latency_steps * step_duration)
|
||||||
|
|
||||||
return {
|
return {
|
||||||
"total_messages": total_messages,
|
"total_messages": total_messages,
|
||||||
"total_messages_full_latency": total_messages_full_latency,
|
"total_complete_messages": total_complete_messages,
|
||||||
"total_messages_incomplete_latency": total_messages_incomplete_latency,
|
"total_incomplete_messages": total_incomplete_messages,
|
||||||
"latency_average_steps": latency_average_steps,
|
"latency_average_steps": latency_average_steps,
|
||||||
"latency_average_ms": latency_average_ms,
|
"latency_average_ms": latency_average_ms,
|
||||||
"latency_median_steps": latency_median_steps,
|
"latency_median_steps": latency_median_steps,
|
||||||
"latency_median_ms": latency_median_ms,
|
"latency_median_ms": latency_median_ms,
|
||||||
|
"max_latency_message_id": max_message.id,
|
||||||
"max_latency_steps": max_latency_steps,
|
"max_latency_steps": max_latency_steps,
|
||||||
"max_latency_ms": max_latency_ms,
|
"max_latency_ms": max_latency_ms,
|
||||||
|
"min_latency_message_id": min_message.id,
|
||||||
"min_latency_steps": min_latency_steps,
|
"min_latency_steps": min_latency_steps,
|
||||||
"min_latency_ms": min_latency_ms,
|
"min_latency_ms": min_latency_ms,
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user