This commit is contained in:
alrevuelta 2023-10-20 09:40:31 +02:00
parent 56b7833ccb
commit e5dd8cd1c9
No known key found for this signature in database
GPG Key ID: F345C9F3CCDB886E
8 changed files with 30006 additions and 48 deletions

3
.gitignore vendored Normal file
View File

@ -0,0 +1,3 @@
**/.DS_Store
rln-delay-simulations/.idea/*
**/__pycache__

View File

@ -6,6 +6,7 @@ This folder contains a `shadow` configuration to simulate `1000` `nwaku` nodes i
* Focused on measuring message propagation delays. Each message that is sent, encodes the timestamp when it was created.
* Requires significant resources to run (tested with 256 GB RAM)
* See simulation parameters: latency, bandwidth, amount of nodes, amount of publishers.
* Note that due to TCP flow control, when using big messages the first ones to arrive will show a higher delay. Filter them out to not bias the measurements.
## How to run
@ -27,7 +28,10 @@ grep -nr 'ended_simulation' shadow.data | wc -l
# expected: 1000 (simulation finished ok in all nodes)
grep -nr 'tx_msg' shadow.data | wc -l
# expected: 15 (total of published messages)
# expected: 10 (total of published messages)
grep -nr 'rx_msg' shadow.data | wc -l
# expected: 9990 (total rx messages)
```
Get metrics:

View File

@ -1,23 +1,26 @@
import numpy as np
import sys
def load(file_name, field):
latencies = []
with open(file_name, "r") as file:
for line in file.readlines():
if field in line:
# first message bias the latency due to tcp flow control
if "seq=" in line:
seq = int(line.strip().split("seq=")[1].split(" ")[0])
if seq in [0]:
continue
x = line.strip().split(field)[1].split(" ")[0]
latencies.append(int(x))
return np.array(latencies)
file = sys.argv[1]
field = sys.argv[2]
print("Data file:", file, "field:", field)
latencies = []
with open(file, "r") as file:
for line in file.readlines():
if field in line:
# first message bias the latency due to tcp flow control
if "seq=" in line:
seq = int(line.strip().split("seq=")[1].split(" ")[0])
if seq in [0]:
continue
x = line.strip().split(field)[1].split(" ")[0]
latencies.append(int(x))
if __name__ == "__main__":
file = sys.argv[1]
field = sys.argv[2]
array = load(file, field)
array = np.array(latencies)
print(f"number_samples={array.size}")
print(f"Percentiles. P75={np.percentile(array, 75)} P95={np.percentile(array, 95)}")
print(f"Statistics. mean={np.mean(array)} max={array.max()} min={array.min()}")
print("Data file:", file, "field:", field)
print(f"number_samples={array.size}")
print(f"Percentiles. P75={np.percentile(array, 75)} P95={np.percentile(array, 95)}")
print(f"Statistics. mean={np.mean(array)} max={array.max()} min={array.min()}")

Binary file not shown.

After

Width:  |  Height:  |  Size: 226 KiB

View File

@ -1,55 +1,33 @@
import matplotlib.pyplot as plt
import scienceplots
import numpy as np
import math
import random
from analyze import load
def load(file):
field = "arrival_diff="
latencies = []
with open(file, "r") as file:
for line in file.readlines():
if field in line:
x = line.strip().split(field)[1].split(" ")[0]
latencies.append(int(x))
return np.array(latencies)
latencies = [load("raw/latency_10kb.txt", "arrival_diff="),
load("raw/latency_100kb.txt", "arrival_diff="),
load("raw/latency_500kb.txt", "arrival_diff=")]
latencies = [load("latency_10kb.txt"), load("latency_100kb.txt"), load("latency_500kb.txt")]
print(latencies)
with plt.style.context(['science', 'ieee']):
fig, ax = plt.subplots()
labels = []
bp = ax.boxplot(latencies,notch=True, vert=True, patch_artist=True,
showfliers=True, whis=100000000000)
for patch, color in zip(bp['boxes'], ['red', 'blue', 'green']):
patch.set_facecolor(color)
ax.set(title="Message rate (msg/s)", xlabel='Scenario', ylabel='Message propagation time (ms)')
ax.set(title="Message latencies distribution\nD=6 nodes=1000 samples="+str(latencies[1].size), xlabel='Scenario', ylabel='Message propagation time (ms)')
ax.grid(linestyle='-')
my_legend = []
for msg_size in [10, 100, 500]:
my_legend.append("Message size: " + str(msg_size) + " kB")
ax.legend([bp["boxes"][i] for i in range(len(my_legend))], my_legend, loc='upper left', fontsize=5)
ax.autoscale(tight=False)
#means = [i.mean(axis=0) for i in q]
#stds =[i.std(axis=0) for i in q]
#per_95 = [np.percentile(i, 95) for i in q]
#for i, line in enumerate(bp['medians']):
# x, y = line.get_xydata()[1]
# text = r' $ \mu =$ ' + '{:.2f}\n'.format(means[i]) + r' $ p_{95} = $ ' + '{:.2f}'.format(per_95[i])
# ax.annotate(text, xy=(x+0.1, y), fontsize=6)
#
#
for i, line in enumerate(bp['medians']):
x, y = line.get_xydata()[1]
text = r' $ \mu =$ ' + '{:.0f} ms\n'.format(latencies[i].mean(axis=0)) + r' $ p_{95} = $ ' + '{:.0f} ms'.format(np.percentile(latencies[i], 95))
ax.annotate(text, xy=(x + 0.1, y), fontsize=6)
fig.set_size_inches(4, 3)
fig.savefig('plot.jpg', dpi=600)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff