removed tomli, traits into dockerfile, build/run context update (#104)

* removed tomli, traits into dockerfile, build/run context update

* issue 98

* build_docker.sh, --traits-dir

* minor edits
This commit is contained in:
0xFugue 2023-04-06 11:49:00 +05:30 committed by Alberto
parent 3aa8eaac0f
commit cf24c7f44e
6 changed files with 90 additions and 69 deletions

View File

@ -13,14 +13,16 @@ sudo apt install kurtosis-cli=$kurtosis_version
sudo apt-mark hold kurtosis-cli
sudo rm /etc/apt/sources.list.d/kurtosis.list
# Build WLS and Gennet docker image
# Build Gennet & WLS docker images
cd gennet-module
sh ./build_docker.sh
cd ..
cd wls-module
docker build -t wls:0.0.1 .
cd ..
cd gennet-module
docker build -t gennet .
cd ..
git clone git@github.com:waku-org/go-waku.git
cd go-waku

View File

@ -28,4 +28,3 @@ ENV PATH="/opt/venv/bin:$PATH"
# Set the entrypoint
ENTRYPOINT ["python", "gennet.py"]

7
gennet-module/build_docker.sh Executable file
View File

@ -0,0 +1,7 @@
#!/bin/sh
rm -f traits
cp -r ../config/traits .
docker build -t gennet .
rm -rf traits
ln -s ../config/traits .

132
gennet-module/gennet.py Executable file → Normal file
View File

@ -9,9 +9,11 @@ import sys, os
import json, ast
from collections import defaultdict
from pathlib import Path
import time, tracemalloc
import string
import typer, tomli
import typer
from enum import Enum, EnumMeta
@ -23,29 +25,29 @@ class MetaEnum(EnumMeta):
cls(item)
except ValueError:
return False
return True
return True
class BaseEnum(Enum, metaclass=MetaEnum):
pass
class Trait(BaseEnum):
NWAKU = "nwaku"
GOWAKU = "gowaku"
DISCV5 = "discv5"
DNSDISC = "dnsdisc"
DNS = "dns"
FLTER = "flter"
NWAKU = "nwaku"
GOWAKU = "gowaku"
DISCV5 = "discv5"
DNSDISC = "dnsdisc"
DNS = "dns"
FLTER = "flter"
LIGHTPUSH = "lightpush"
METRICS = "metrics"
NODE = "node"
PEER = "peer"
METRICS = "metrics"
NODE = "node"
PEER = "peer"
PEERXCHNG = "peerxchng"
RELAY = "relay"
REST = "rest"
RLN = "rln"
RPC = "rpc"
STORE = "store"
SWAP = "swap"
RELAY = "relay"
REST = "rest"
RLN = "rln"
RPC = "rpc"
STORE = "store"
SWAP = "swap"
WEBSOCKET = "websocket"
# To add a new node type, add appropriate entries to the nodeType and nodeTypeToDocker
@ -71,10 +73,9 @@ class networkType(Enum):
NW_DATA_FNAME = "network_data.json"
EXTERNAL_NODES_PREFIX, NODE_PREFIX, SUBNET_PREFIX, CONTAINER_PREFIX = \
"nodes", "node", "subnetwork", "containers"
NODES_JSON, NODE_PREFIX, SUBNET_PREFIX, CONTAINERS_JSON, CONTAINER_PREFIX = \
"nodes", "node", "subnetwork", "containers", "containers"
ID_STR_SEPARATOR = "-"
DEFAULT_TRAITS_DIR="../config/traits"
### I/O related fns ##############################################################
@ -260,7 +261,7 @@ def generate_subnets(G, num_subnets):
# Remember, these are *sorted* offsets in the range of 0..n and without replacement; so
# they will all index correctly.
# Finally, assign all these node to the current subnet.
for i in range(start, end + 1):
for i in range(start, end + 1):
node2subnet[f"{NODE_PREFIX}{ID_STR_SEPARATOR}{lst[i]}"] = f"{SUBNET_PREFIX}_{subnet_id}"
#node2subnet[lst[i]] = subnet_id
start = end # roll over the start to the end of the last offset
@ -280,12 +281,15 @@ def generate_toml(traits_dir, topics, traits_list):
topic_str = f"\"{topic_str}\""
for trait in traits_list[1:]: # skip the first trait as it is docker/node selector.
with open(f"{traits_dir}/{trait}.toml", 'rb') as f:
toml = ""
for key, value in tomli.load(f).items():
toml += f"{key} = {str(value)}\n"
tomls += toml
return f"{tomls}topics = {topic_str}\n"
toml = f'#{trait}\n'
tomlf = f"{traits_dir}/{trait}.toml"
if not os.path.isfile(tomlf):
raise ValueError(f"traits: missing trait file {tomlf}")
with open(tomlf, 'rb') as f:
strlines = [l.decode("utf-8").strip() for l in f if not len(l.split()) == 0]
toml += ''.join([f'{l}\n' for l in strlines if not l.startswith('#')])
tomls += toml + '\n'
return f"{tomls}#topics\ntopics = {topic_str}\n"
# Convert a dict to pair of arrays
@ -370,29 +374,29 @@ def generate_and_write_files(ctx: typer, G):
node2container = pack_nodes(ctx.params["container_size"], node2subnet)
container2nodes = invert_dict_of_list(node2container, 1)
json_dump, json_dump[CONTAINER_PREFIX], json_dump[EXTERNAL_NODES_PREFIX] = {}, {}, {}
json_dump, json_dump[CONTAINERS_JSON], json_dump[NODES_JSON] = {}, {}, {}
for container, nodes in container2nodes.items():
json_dump[CONTAINER_PREFIX][container] = nodes
json_dump[CONTAINERS_JSON][container] = nodes
i, traits_dir = 0, ctx.params["traits_dir"]
i, traits_dir = 0, ctx.params["traits_dir"]
for node in G.nodes:
# write the per node toml for the i^ith node of appropriate type
traits_list, i = traits_distribution[i].split(":"), i+1
node_type = nodeType(traits_list[0])
write_toml(ctx.params["output_dir"], node, generate_toml(traits_dir, topics, traits_list))
json_dump[EXTERNAL_NODES_PREFIX][node] = {}
json_dump[EXTERNAL_NODES_PREFIX][node]["static_nodes"] = []
json_dump[NODES_JSON][node] = {}
json_dump[NODES_JSON][node]["static_nodes"] = []
for edge in G.edges(node):
json_dump[EXTERNAL_NODES_PREFIX][node]["static_nodes"].append(edge[1])
json_dump[EXTERNAL_NODES_PREFIX][node][SUBNET_PREFIX] = node2subnet[node]
json_dump[EXTERNAL_NODES_PREFIX][node]["image"] = nodeTypeToDocker.get(node_type)
json_dump[NODES_JSON][node]["static_nodes"].append(edge[1])
json_dump[NODES_JSON][node][SUBNET_PREFIX] = node2subnet[node]
json_dump[NODES_JSON][node]["image"] = nodeTypeToDocker.get(node_type)
# the per node tomls will continue for now as they include topics
json_dump[EXTERNAL_NODES_PREFIX][node]["node_config"] = f"{node}.toml"
json_dump[NODES_JSON][node]["node_config"] = f"{node}.toml"
# logs ought to continue as they need to be unique
json_dump[EXTERNAL_NODES_PREFIX][node]["node_log"] = f"{node}.log"
json_dump[NODES_JSON][node]["node_log"] = f"{node}.log"
port_shift, cid = node2container[node]
json_dump[EXTERNAL_NODES_PREFIX][node]["port_shift"] = port_shift
json_dump[EXTERNAL_NODES_PREFIX][node]["container_id"] = cid
json_dump[NODES_JSON][node]["port_shift"] = port_shift
json_dump[NODES_JSON][node]["container_id"] = cid
write_json(ctx.params["output_dir"], json_dump) # network wide json
@ -406,7 +410,7 @@ def _config_file_callback(ctx: typer.Context, param: typer.CallbackParam, cfile:
conf = json.load(f)
if "gennet" not in conf:
print(
f"Gennet configuration not found in {cfile}. Skipping topology generation.")
f"Gennet configuration not found in {cfile}. Skipping network generation.")
sys.exit(0)
if "general" in conf and "prng_seed" in conf["general"]:
conf["gennet"]["prng_seed"] = conf["general"]["prng_seed"]
@ -437,20 +441,35 @@ def _num_subnets_callback(ctx: typer, Context, num_subnets: int):
def main(ctx: typer.Context,
benchmark: bool = typer.Option(False, help="Measure CPU/Mem usage of Gennet"),
draw: bool = typer.Option(False, help="Draw the generated network"),
container_size: int = typer.Option(1, help="Set the number of nodes per container"),
output_dir: str = typer.Option("network_data", help="Set the output directory for Gennet generated files"),
prng_seed: int = typer.Option(1, help="Set the random seed"),
num_nodes: int = typer.Option(4, help="Set the number of nodes"),
num_topics: int = typer.Option(1, help="Set the number of topics"),
fanout: int = typer.Option(3, help="Set the arity for trees & newmanwattsstrogatz"),
node_type_distribution: str = typer.Argument("{\"nwaku\" : 100 }" ,callback=ast.literal_eval, help="Set the node type distribution"),
node_config: str = typer.Argument("{}" ,callback=ast.literal_eval, help="Set the node configuration"),
network_type: networkType = typer.Option(networkType.NEWMANWATTSSTROGATZ.value, help="Set the node type"),
num_subnets: int = typer.Option(1, callback=_num_subnets_callback, help="Set the number of subnets"),
num_partitions: int = typer.Option(1, callback=_num_partitions_callback, help="Set the number of network partitions"),
config_file: str = typer.Option("", callback=_config_file_callback, is_eager=True, help="Set the input config file (JSON)")):
benchmark: bool = typer.Option(False,
help="Measure CPU/Mem usage of Gennet"),
draw: bool = typer.Option(False,
help="Draw the generated network"),
container_size: int = typer.Option(1,
help="Set the number of nodes per container"),
output_dir: str = typer.Option("network_data",
help="Set the output directory for Gennet generated files"),
prng_seed: int = typer.Option(1,
help="Set the random seed"),
num_nodes: int = typer.Option(4,
help="Set the number of nodes"),
num_topics: int = typer.Option(1,
help="Set the number of topics"),
fanout: int = typer.Option(3,
help="Set the arity for trees & newmanwattsstrogatz"),
node_type_distribution: str = typer.Argument("{\"nwaku\" : 100 }",
callback=ast.literal_eval, help="Set the node type distribution"),
network_type: networkType = typer.Option(networkType.NEWMANWATTSSTROGATZ.value,
help="Set the node type"),
num_subnets: int = typer.Option(1, callback=_num_subnets_callback,
help="Set the number of subnets"),
num_partitions: int = typer.Option(1, callback=_num_partitions_callback,
help="Set the number of network partitions"),
config_file: str = typer.Option("", callback=_config_file_callback, is_eager=True,
help="Set the input config file (JSON)"),
traits_dir: Path = typer.Option("./traits", exists=True, file_okay=False,
dir_okay=True, readable=True, resolve_path=True, help="Set the traits directory")):
# Benchmarking: record start time and start tracing mallocs
if benchmark:
@ -462,11 +481,6 @@ def main(ctx: typer.Context,
random.seed(prng_seed)
np.random.seed(prng_seed)
if ctx.params["config_file"] == "":
ctx.params["traits_dir"] = DEFAULT_TRAITS_DIR
else:
ctx.params["traits_dir"] = os.path.dirname(ctx.params["config_file"]) + f"/traits"
# validate node type distribution
validate_traits_distribution(ctx.params["traits_dir"], node_type_distribution)

View File

@ -8,5 +8,4 @@ Pillow==9.4.0
pyparsing==3.0.9
python-dateutil==2.8.2
six==1.16.0
tomli==2.0.1
typer==0.7.0

8
run.sh
View File

@ -47,14 +47,12 @@ rm -rf ./config/topology_generated > /dev/null 2>&1
# Remove previous logs
rm -rf ./$enclave_name_logs > /dev/null 2>&1
# Create and run Gennet docker container
# Run Gennet docker container
echo -e "\nRunning network generation"
docker rm gennet-container > /dev/null 2>&1 # cleanup the old docker if any
cd gennet-module
docker run --name gennet-container -v ${dir}/config/:/config gennet --config-file /config/${wakurtosis_config_file} --output-dir /config/topology_generated
docker run --name gennet-container -v ${dir}/config/:/config:ro gennet --config-file /config/${wakurtosis_config_file} --traits-dir /config/traits
err=$?
cd ..
if [ $err != 0 ]
then
@ -62,6 +60,8 @@ then
exit
fi
docker cp gennet-container:/gennet/network_data ${dir}/config/topology_generated
docker rm gennet-container > /dev/null 2>&1
# Create the new enclave and run the simulation