cli is fully working: version 0

This commit is contained in:
0xFugue 2023-07-19 18:37:51 +05:30
parent 4881d785f7
commit 91c2bcc665
1 changed files with 321 additions and 321 deletions

View File

@ -8,25 +8,30 @@ import numpy as np
import math
from pathlib import Path
import sys
import typer
import logging as log
from enum import Enum, EnumMeta
class networkType(Enum):
NEWMANWATTSSTROGATZ = "newmanwattsstrogatz" # mesh, smallworld
REGULAR = "regular" # libp2p
REGULAR = "regular" # d_lazy
class Keys:
GENNET="gennet"
GENLOAD="wls"
CONFIG="config"
class Config:
'''
def __init__(self): # the defaults
self.num_nodes = 4 # number of wakunodes = 4
self.fanout = 6 # 'average' node degree = 6
self.network_type = networkType.REGULAR.value # regular nw: avg node degree is exact
self.msg_size = 2 # msg size in KBytes
self.network_type = networkType.REGULAR.value # regular nw: avg node degree is 'exact'
self.msg_size = 0.002 # msg size in MBytes
self.msgpsec = 0.00139 # msgs per sec in single pubsub topic/shard = 5 msgs/hr
self.gossip_msg_size = 0.05 # gossip message size in KBytes = 50 bytes
self.hwindow = 3 # the history window for gossips = 3
@ -35,310 +40,224 @@ class Config:
self.shards_per_node = 3 # avg number of shards a wakunode participates
self.per_hop_delay = 100 # avg delay per hop = 0.1 sec / 100 msec
self.d_lazy = sef.fanout # gossip degree = 6
self.d_lazy = self.fanout # gossip degree = 6
'''
def __init__(self, num_nodes, fanout,
network_type, msg_size, gossip_msg_size,
cache, gossip_to_reply_ratio, nodes_per_shard,
shards_per_node, per_hop_delay):
def __init__(self, num_nodes=4, fanout=6,
network_type=networkType.REGULAR.value,
msg_size=2, msgpsec=0.00139,
gossip_msg_size=0.002, gossip_hwindow=3, gossip2reply_ratio=0.01,
nodes_per_shard=10000, shards_per_node=3,
per_hop_delay=100):
self.num_nodes = num_nodes
self.fanout = fanout
self.network_type = network_type
self.msg_size = msg_size
self.msgpsec = msgpsec
self.msgphr = msgpsec*60*60
self.gossip_msg_size = gossip_msg_size
self.hwindow = cache
self.gossip2reply_ratio = gossip__to_reply_ratio
self.gossip_hwindow = gossip_hwindow
self.gossip2reply_ratio = gossip2reply_ratio
self.nodes_per_shard = nodes_per_shard
self.shards_per_node = shards_per_node
self.per_hop_delay = per_hop_delay
self.d_lazy = sef.fanout # gossip degree = 6
# Users sent messages at a constant rate
# The network topology is a d-regular graph (gossipsub aims at achieving this).
# general / topology
average_node_degree = 6 # has to be even
message_size = 0.002 # in MB (Mega Bytes)
messages_sent_per_hour = 5 # ona a single pubsub topic / shard
# gossip
gossip_message_size = 0.00005 # 50Bytes in MB (see https://github.com/libp2p/specs/pull/413#discussion_r1018821589 )
d_lazy = 6 # gossip out degree
mcache_gossip = 3 # Number of history windows to use when emitting gossip (see https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.0.md)
avg_ratio_gossip_replys = 0.01 # -> this is a wild guess! (todo: investigate)
# multi shard
avg_nodes_per_shard = 10000 # average number of nodes that a part of a single shard
avg_shards_per_node = 3 # average number of shards a given node is part of
# Util and format functions
#-----------------------------------------------------------
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def sizeof_fmt(num):
return "%.1f%s" % (num, "MB")
def sizeof_fmt_kb(num):
return "%.2f%s" % (num*1024, "KB")
def magnitude_fmt(num):
for x in ['','k','m']:
if num < 1000:
return "%2d%s" % (num, x)
num /= 1000
# Color format based on daily bandwidth usage
# <10mb/d = good, <30mb/d ok, <100mb/d bad, 100mb/d+ fail.
def load_color_prefix(load):
if load < (10):
color_level = bcolors.OKBLUE
elif load < (30):
color_level = bcolors.OKGREEN
elif load < (100):
color_level = bcolors.WARNING
else:
color_level = bcolors.FAIL
return color_level
def load_color_fmt(load, string):
return load_color_prefix(load) + string + bcolors.ENDC
def print_header(string):
print(bcolors.HEADER + string + bcolors.ENDC + "\n")
def print_assumptions(xs):
print("Assumptions/Simplifications:")
for x in xs:
print(x)
print("")
def usage_str(load_users_fn, n_users):
load = load_users_fn(n_users)
return load_color_fmt(load, "For " + magnitude_fmt(n_users) + " users, receiving bandwidth is " + sizeof_fmt(load_users_fn(n_users)) + "/hour")
def print_usage(load_users):
print(usage_str(load_users, 100))
print(usage_str(load_users, 100 * 100))
print(usage_str(load_users, 100 * 100 * 100))
def latency_str(latency_users_fn, n_users, degree):
latency = latency_users_fn(n_users, degree)
return load_color_fmt(latency, "For " + magnitude_fmt(n_users) + " the average latency is " + ("%.3f" % latency_users_fn(n_users, degree)) + " s")
def print_latency(latency_users):
print(latency_str(latency_users, 100, average_node_degree))
print(latency_str(latency_users, 100 * 100, average_node_degree))
print(latency_str(latency_users, 100 * 100 * 100, average_node_degree))
def num_edges_dregular(num_nodes, degree):
# we assume and even d; d-regular graphs with both where both n and d are odd don't exist
return num_nodes * (degree/2)
def avg_node_distance_upper_bound(n_users, degree):
return math.log(n_users, degree)
# Assumptions
#-----------------------------------------------------------
# Users sent messages at a constant rate
# The network topology is a d-regular graph (gossipsub aims at achieving this).
# general / topology
average_node_degree = 6 # has to be even
message_size = 0.002 # in MB (Mega Bytes)
messages_sent_per_hour = 5 # ona a single pubsub topic / shard
# gossip
gossip_message_size = 0.00005 # 50Bytes in MB (see https://github.com/libp2p/specs/pull/413#discussion_r1018821589 )
d_lazy = 6 # gossip out degree
mcache_gossip = 3 # Number of history windows to use when emitting gossip (see https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.0.md)
avg_ratio_gossip_replys = 0.01 # -> this is a wild guess! (todo: investigate)
# multi shard
avg_nodes_per_shard = 10000 # average number of nodes that a part of a single shard
avg_shards_per_node = 3 # average number of shards a given node is part of
# latency
average_delay_per_hop = 0.1 #s
# TODO: load case for status control messages (note: this also introduces messages by currently online, but not active users.)
# TODO: spread in the latency distribution (the highest 10%ish of latencies might be too high)
self.d_lazy = self.fanout # gossip degree = 6
# Assumption strings (general/topology)
a1 = "- A01. Message size (static): " + sizeof_fmt_kb(message_size)
a2 = "- A02. Messages sent per node per hour (static) (assuming no spam; but also no rate limiting.): " + str(messages_sent_per_hour)
a3 = "- A03. The network topology is a d-regular graph of degree (static): " + str(average_node_degree)
a4 = "- A04. Messages outside of Waku Relay are not considered, e.g. store messages."
a5 = "- A05. Messages are only sent once along an edge. (requires delays before sending)"
a6 = "- A06. Messages are sent to all d-1 neighbours as soon as receiving a message (current operation)" # Thanks @Mmenduist
a7 = "- A07. Single shard (i.e. single pubsub mesh)"
a8 = "- A08. Multiple shards; mapping of content topic (multicast group) to shard is 1 to 1"
a9 = "- A09. Max number of nodes per shard (static) " + str(avg_nodes_per_shard)
a10 = "- A10. Number of shards a given node is part of (static) " + str(avg_shards_per_node)
a11 = "- A11. Number of nodes in the network is variable.\n\
These nodes are distributed evenly over " + str(avg_shards_per_node) + " shards.\n\
Once all of these shards have " + str(avg_nodes_per_shard) + " nodes, new shards are spawned.\n\
These new shards have no influcene on this model, because the nodes we look at are not part of these new shards."
a12 = "- A12. Including 1:1 chat. Messages sent to a given user are sent into a 1:1 shard associated with that user's node.\n\
Effectively, 1:1 chat adds a receive load corresponding to one additional shard a given node has to be part of."
a13 = "- A13. 1:1 chat messages sent per node per hour (static): " + str(messages_sent_per_hour) # could introduce a separate variable here
a14 = "- A14. 1:1 chat shards are filled one by one (not evenly distributed over the shards).\n\
This acts as an upper bound and overestimates the 1:1 load for lower node counts."
a15 = "- A15. Naive light node. Requests all messages in shards that have (large) 1:1 mapped multicast groups the light node is interested in."
self.Assumptions = {
"a1" : "- A01. Message size (static): " + sizeof_fmt_kb(self.msg_size),
"a2" : "- A02. Messages sent per node per hour (static) (assuming no spam; but also no rate limiting.): " + str(self.msgphr),
"a3" : "- A03. The network topology is a d-regular graph of degree (static): " + str(self.fanout),
"a4" : "- A04. Messages outside of Waku Relay are not considered, e.g. store messages.",
"a5" : "- A05. Messages are only sent once along an edge. (requires delays before sending)",
"a6" : "- A06. Messages are sent to all d-1 neighbours as soon as receiving a message (current operation)", # Thanks @Mmenduist
"a7" : "- A07. Single shard (i.e. single pubsub mesh)",
"a8" : "- A08. Multiple shards; mapping of content topic (multicast group) to shard is 1 to 1",
"a9" : "- A09. Max number of nodes per shard (static) " + str(self.nodes_per_shard),
"a10" : "- A10. Number of shards a given node is part of (static) " + str(self.shards_per_node),
"a11" : "- A11. Number of nodes in the network is variable.\n\
These nodes are distributed evenly over " + str(self.shards_per_node) + " shards.\n\
Once all of these shards have " + str(self.nodes_per_shard) + " nodes, new shards are spawned.\n\
These new shards have no influcene on this model, because the nodes we look at are not part of these new shards.",
"a12" : "- A12. Including 1:1 chat. Messages sent to a given user are sent into a 1:1 shard associated with that user's node.\n\
Effectively, 1:1 chat adds a receive load corresponding to one additional shard a given node has to be part of.",
"a13" : "- A13. 1:1 chat messages sent per node per hour (static): " + str(self.msgphr), # could introduce a separate variable here
"a14" : "- A14. 1:1 chat shards are filled one by one (not evenly distributed over the shards).\n\
This acts as an upper bound and overestimates the 1:1 load for lower node counts.",
"a15" : "- A15. Naive light node. Requests all messages in shards that have (large) 1:1 mapped multicast groups the light node is interested in.",
# Assumption strings (store)
a21 = "- A21. Store nodes do not store duplicate messages."
"a21" : "- A21. Store nodes do not store duplicate messages.",
# Assumption strings (gossip)
a31 = "- A21. Gossip is not considered."
a32 = "- A32. Gossip message size (IHAVE/IWANT) (static):" + sizeof_fmt_kb(gossip_message_size)
a33 = "- A33. Ratio of IHAVEs followed-up by an IWANT (incl. the actual requested message):" + str(avg_ratio_gossip_replys)
"a31" : "- A21. Gossip is not considered.",
"a32" : "- A32. Gossip message size (IHAVE/IWANT) (static):" + sizeof_fmt_kb(self.gossip_msg_size),
"a33" : "- A33. Ratio of IHAVEs followed-up by an IWANT (incl. the actual requested message):" + str(self.gossip2reply_ratio),
# Assumption strings (delay)
a41 = "- A41. Delay is calculated based on an upper bound of the expected distance."
a42 = "- A42. Average delay per hop (static): " + str(average_delay_per_hop) + "s."
"a41" : "- A41. Delay is calculated based on an upper bound of the expected distance.",
"a42" : "- A42. Average delay per hop (static): " + str(self.per_hop_delay) + "s."
}
# Cases Load Per Node
#-----------------------------------------------------------
def print_assumptions1(self, xs):
print("Assumptions/Simplifications:")
alist = ["a1", "a2", "a3", "a4"] + xs
for a in alist:
if a in self.Assumptions:
print(self.Assumptions[a])
else:
log.error(f'Unknown assumption: ' + a)
sys.exit(0)
print("")
def print_assumptions(self, xs):
print("Assumptions/Simplifications:")
for a in xs:
if a in self.Assumptions:
print(self.Assumptions[a])
else:
log.error(f'Unknown assumption: ' + a)
sys.exit(0)
print("")
class Analysis(Config):
def __init__(self, num_nodes, fanout,
network_type, msg_size, msgpsec, gossip_msg_size,
cache, gossip_to_reply_ratio, nodes_per_shard,
shards_per_node, per_hop_delay):
Config.__init__(self, num_nodes, fanout,
network_type, msg_size, msgpsec, gossip_msg_size,
cache, gossip_to_reply_ratio, nodes_per_shard,
shards_per_node, per_hop_delay)
# Case 1 :: singe shard, unique messages, store
def load_case1(n_users):
return message_size * messages_sent_per_hour * n_users
# sharding case 1: multi shard, n*(d-1) messages, gossip
def load_sharding_case1(self, n_users):
load_per_node_per_shard = self.load_case4(np.minimum(n_users/3, self.nodes_per_shard))
return self.shards_per_node * load_per_node_per_shard
def print_load_case1():
def load_case1(self, n_users):
return self.msg_size * self.msgphr * n_users
def print_load_case1(self):
print("")
print_header("Load case 1 (store load; corresponds to received load per naive light node)")
print_assumptions([a1, a2, a3, a4, a7, a21])
print_usage(load_case1)
self.print_assumptions1(["a7", "a21"])
print_usage(self.load_case1)
print("")
print("------------------------------------------------------------")
# Case 2 :: single shard, (n*d)/2 messages
def load_case2(n_users):
return message_size * messages_sent_per_hour * num_edges_dregular(n_users, average_node_degree)
def load_case2(self, n_users):
return self.msg_size * self.msgphr * num_edges_dregular(n_users, self.fanout)
def print_load_case2():
def print_load_case2(self):
print("")
print_header("Load case 2 (received load per node)")
print_assumptions([a1, a2, a3, a4, a5, a7, a31])
print_usage(load_case2)
self.print_assumptions1(["a5", "a7", "a31"])
print_usage(self.load_case2)
print("")
print("------------------------------------------------------------")
# Case 3 :: single shard n*(d-1) messages
def load_case3(n_users):
return message_size * messages_sent_per_hour * n_users * (average_node_degree-1)
def load_case3(self, n_users):
return self.msg_size * self.msgphr * n_users * (self.fanout-1)
def print_load_case3():
def print_load_case3(self):
print("")
print_header("Load case 3 (received load per node)")
print_assumptions([a1, a2, a3, a4, a6, a7, a31])
print_usage(load_case3)
self.print_assumptions1(["a6", "a7", "a31"])
print_usage(self.load_case3)
print("")
print("------------------------------------------------------------")
# Case 4:single shard n*(d-1) messages, gossip
def load_case4(n_users):
messages_received_per_hour = messages_sent_per_hour * n_users * (average_node_degree-1) # see case 3
messages_load = message_size * messages_received_per_hour
num_ihave = messages_received_per_hour * d_lazy * mcache_gossip
ihave_load = num_ihave * gossip_message_size
gossip_response_load = (num_ihave * (gossip_message_size + message_size)) * avg_ratio_gossip_replys # reply load contains both an IWANT (from requester to sender), and the actual wanted message (from sender to requester)
def load_case4(self, n_users):
messages_received_per_hour = self.msgphr * n_users * (self.fanout-1) # see case 3
messages_load = self.msg_size * messages_received_per_hour
num_ihave = messages_received_per_hour * self.d_lazy * self.gossip_hwindow
ihave_load = num_ihave * self.gossip_msg_size
gossip_response_load = (num_ihave * (self.gossip_msg_size + self.msg_size)) * self.gossip2reply_ratio # reply load contains both an IWANT (from requester to sender), and the actual wanted message (from sender to requester)
gossip_total = ihave_load + gossip_response_load
return messages_load + gossip_total
def print_load_case4():
def print_load_case4(self):
print("")
print_header("Load case 4 (received load per node incl. gossip)")
print_assumptions([a1, a2, a3, a4, a6, a7, a32, a33])
print_usage(load_case4)
self.print_assumptions1(["a6", "a7", "a32", "a33"])
print_usage(self.load_case4)
print("")
print("------------------------------------------------------------")
# sharding case 1: multi shard, n*(d-1) messages, gossip
def load_sharding_case1(n_users):
load_per_node_per_shard = load_case4(np.minimum(n_users/3, avg_nodes_per_shard))
return avg_shards_per_node * load_per_node_per_shard
# latency cases
def latency_case1(self, n_users, degree):
return avg_node_distance_upper_bound(n_users, degree) * self.per_hop_delay
def print_load_sharding_case1():
def print_latency_case1(self):
print("")
print_header("Latency case 1 :: Topology: 6-regular graph. No gossip (note: gossip would help here)")
self.print_assumptions(["a3", "a41", "a42"])
print_latency(self.latency_case1, self.fanout)
print("")
print("------------------------------------------------------------")
def print_load_sharding_case1(self):
print("")
print_header("load sharding case 1 (received load per node incl. gossip)")
print_assumptions([a1, a2, a3, a4, a6, a8, a9, a10, a11, a32, a33])
print_usage(load_sharding_case1)
self.print_assumptions1(["a6", "a8", "a9", "a10", "a11", "a32", "a33"])
print_usage(self.load_sharding_case1)
print("")
print("------------------------------------------------------------")
# sharding case 2: multi shard, n*(d-1) messages, gossip, 1:1 chat
def load_sharding_case2(n_users):
load_per_node_per_shard = load_case4(np.minimum(n_users/3, avg_nodes_per_shard))
load_per_node_1to1_shard = load_case4(np.minimum(n_users, avg_nodes_per_shard))
return (avg_shards_per_node * load_per_node_per_shard) + load_per_node_1to1_shard
def load_sharding_case2(self, n_users):
load_per_node_per_shard = self.load_case4(np.minimum(n_users/3, self.nodes_per_shard))
load_per_node_1to1_shard = self.load_case4(np.minimum(n_users, self.nodes_per_shard))
return (self.shards_per_node * load_per_node_per_shard) + load_per_node_1to1_shard
def print_load_sharding_case2():
def print_load_sharding_case2(self):
print("")
print_header("load sharding case 2 (received load per node incl. gossip and 1:1 chat)")
print_assumptions([a1, a2, a3, a4, a6, a8, a9, a10, a11, a12, a13, a14, a32, a33])
print_usage(load_sharding_case2)
self.print_assumptions1(["a6", "a8", "a9", "a10", "a11", "a12", "a13", "a14", "a32", "a33"])
print_usage(self.load_sharding_case2)
print("")
print("------------------------------------------------------------")
# sharding case 3: multi shard, naive light node
def load_sharding_case3(n_users):
load_per_node_per_shard = load_case1(np.minimum(n_users/3, avg_nodes_per_shard))
return avg_shards_per_node * load_per_node_per_shard
def load_sharding_case3(self, n_users):
load_per_node_per_shard = self.load_case1(np.minimum(n_users/3, self.nodes_per_shard))
return self.shards_per_node * load_per_node_per_shard
def print_load_sharding_case3():
def print_load_sharding_case3(self):
print("")
print_header("load sharding case 3 (received load naive light node.)")
print_assumptions([a1, a2, a3, a4, a6, a8, a9, a10, a15, a32, a33])
print_usage(load_sharding_case3)
self.print_assumptions1(["a6", "a8", "a9", "a10", "a15", "a32", "a33"])
print_usage(self.load_sharding_case3)
print("")
print("------------------------------------------------------------")
def run(self):
self.print_load_case1()
self.print_load_case2()
self.print_load_case3()
self.print_load_case4()
self.print_latency_case1()
self.print_load_sharding_case1()
self.print_load_sharding_case2()
self.print_load_sharding_case3()
# Cases average latency
#-----------------------------------------------------------
def latency_case1(n_users, degree):
return avg_node_distance_upper_bound(n_users, degree) * average_delay_per_hop
def print_latency_case1():
print("")
print_header("Latency case 1 :: Topology: 6-regular graph. No gossip (note: gossip would help here)")
print_assumptions([a3, a41, a42])
print_latency(latency_case1)
print("")
print("------------------------------------------------------------")
# Plot
#-----------------------------------------------------------
def plot_load():
def plot_load(self):
plt.clf() # clear current plot
n_users = np.logspace(2, 6, num=5)
@ -371,14 +290,14 @@ def plot_load():
caption = "Plot 1: single shard."
plt.figtext(0.5, 0.01, caption, wrap=True, horizontalalignment='center', fontsize=12)
# plt.show()
plt.show()
figure = plt.gcf() # get current figure
figure.set_size_inches(16, 9)
# plt.savefig("waku_scaling_plot.svg")
plt.savefig("waku_scaling_single_shard_plot.png", dpi=300, orientation="landscape")
#plt.savefig("waku_scaling_single_shard_plot.png", dpi=300, orientation="landscape")
def plot_load_sharding():
def plot_load_sharding(self):
plt.clf() # clear current plot
n_users = np.logspace(2, 6, num=5)
@ -411,12 +330,99 @@ def plot_load_sharding():
caption = "Plot 2: multi shard."
plt.figtext(0.5, 0.01, caption, wrap=True, horizontalalignment='center', fontsize=12)
# plt.show()
plt.show()
figure = plt.gcf() # get current figure
figure.set_size_inches(16, 9)
# plt.savefig("waku_scaling_plot.svg")
plt.savefig("waku_scaling_multi_shard_plot.png", dpi=300, orientation="landscape")
#plt.savefig("waku_scaling_multi_shard_plot.png", dpi=300, orientation="landscape")
def plot(self):
self.plot_load()
self.plot_load_sharding()
# Util and format functions
#-----------------------------------------------------------
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def sizeof_fmt(num):
return "%.1f%s" % (num, "MB")
def sizeof_fmt_kb(num):
return "%.2f%s" % (num*1024, "KB")
def magnitude_fmt(num):
for x in ['','k','m']:
if num < 1000:
return "%2d%s" % (num, x)
num /= 1000
# Color format based on daily bandwidth usage
# <10mb/d = good, <30mb/d ok, <100mb/d bad, 100mb/d+ fail.
def load_color_prefix(load):
if load < (10):
color_level = bcolors.OKBLUE
elif load < (30):
color_level = bcolors.OKGREEN
elif load < (100):
color_level = bcolors.WARNING
else:
color_level = bcolors.FAIL
return color_level
def load_color_fmt(load, string):
return load_color_prefix(load) + string + bcolors.ENDC
def print_header(string):
print(bcolors.HEADER + string + bcolors.ENDC + "\n")
def usage_str(load_users_fn, n_users):
load = load_users_fn(n_users)
return load_color_fmt(load, "For " + magnitude_fmt(n_users) + " users, receiving bandwidth is " + sizeof_fmt(load_users_fn(n_users)) + "/hour")
def print_usage(load_users):
print(usage_str(load_users, 100))
print(usage_str(load_users, 100 * 100))
print(usage_str(load_users, 100 * 100 * 100))
def latency_str(latency_users_fn, n_users, degree):
latency = latency_users_fn(n_users, degree)
return load_color_fmt(latency, "For " + magnitude_fmt(n_users) + " the average latency is " + ("%.3f" % latency_users_fn(n_users, degree)) + " s")
def print_latency(latency_users, average_node_degree):
print(latency_str(latency_users, 100, average_node_degree))
print(latency_str(latency_users, 100 * 100, average_node_degree))
print(latency_str(latency_users, 100 * 100 * 100, average_node_degree))
def num_edges_dregular(num_nodes, degree):
# we assume and even d; d-regular graphs with both where both n and d are odd don't exist
return num_nodes * (degree/2)
def avg_node_distance_upper_bound(n_users, degree):
return math.log(n_users, degree)
def _sanity_check(fname, keys, ftype="json"):
@ -437,17 +443,18 @@ def _sanity_check(fname, keys, ftype="json"):
sys.exit(0)
except Exception as ex:
raise typer.BadParameter(str(ex))
log.debug(f'sanity check: All Ok')
app = typer.Typer()
@app.command()
def kurtosis(ctx: typer.Context, config_file: Path):
_sanity_check(config_file, "json", [GENNET, GENLOAD])
_sanity_check(config_file, "json", [Keys.GENNET, Keys.GENLOAD])
print("kurtosis: done")
@app.command()
def batch(ctx: typer.Context, batch_file: Path):
_sanity_check(batch_file, "json", [CONFIG])
_sanity_check(batch_file, "json", [Keys.CONFIG])
print("batch: done")
@app.command()
@ -469,9 +476,9 @@ def cli(ctx: typer.Context,
help="Set message rate per second on a shard/topic"),
gossip_msg_size: float = typer.Option(0.05,
help="Set gossip message size in KBytes"),
cache: int = typer.Option(3,
help="Set gossip window size"),
gossip__to_reply_ratio: float = typer.Option(0.01,
hwindow: int = typer.Option(3,
help="Set gossip history window size"),
gossip2reply_ratio: float = typer.Option(0.01,
help="Set the Gossip to reply ratio"),
nodes_per_shard: int = typer.Option(10000,
help="Set the number of nodes per shard/topic"),
@ -486,18 +493,11 @@ def cli(ctx: typer.Context,
print("")
print(bcolors.HEADER + "Waku relay theoretical model results (single shard and multi shard scenarios)." + bcolors.ENDC)
print_load_case1()
print_load_case2()
print_load_case3()
print_load_case4()
print_load_sharding_case1()
print_load_sharding_case2()
print_load_sharding_case3()
print_latency_case1()
plot_load()
plot_load_sharding()
analysis = Analysis(num_nodes, fanout,
network_type, msg_size, msgpsec, gossip_msg_size,
hwindow, gossip2reply_ratio, nodes_per_shard,
shards_per_node, per_hop_delay)
analysis.run()
print("cli: done")
if __name__ == "__main__":
@ -507,10 +507,10 @@ if __name__ == "__main__":
# general / topology
average_node_degree = 6 # has to be even
message_size = 0.002 # in MB (Mega Bytes)
messages_sent_per_hour = 5 # ona a single pubsub topic / shard
self.msgphr = 5 # ona a single pubsub topic / shard
# gossip
gossip_message_size = 0.00005 # 50Bytes in MB (see https://github.com/libp2p/specs/pull/413#discussion_r1018821589 )
self.gossip_msg_size = 0.00005 # 50Bytes in MB (see https://github.com/libp2p/specs/pull/413#discussion_r1018821589 )
d_lazy = 6 # gossip out degree
mcache_gossip = 3 # Number of history windows to use when emitting gossip (see https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.0.md)
avg_ratio_gossip_replys = 0.01 # -> this is a wild guess! (todo: investigate)