2022-12-16 12:13:12 +00:00
|
|
|
WAKU_IMAGE = "statusteam/nim-waku:deploy-status-prod"
|
2022-12-06 10:43:00 +00:00
|
|
|
WAKU_RPC_PORT_ID = "rpc"
|
2022-12-01 16:38:36 +00:00
|
|
|
TCP_PORT = 8545
|
|
|
|
|
2022-12-14 10:22:05 +00:00
|
|
|
# Waku Matrics Port
|
2022-12-16 12:13:12 +00:00
|
|
|
PROMETHEUS_IMAGE = "prom/prometheus:latest"
|
2022-12-14 10:22:05 +00:00
|
|
|
PROMETHEUS_PORT_ID = "prometheus"
|
|
|
|
PROMETHEUS_TCP_PORT = 8008
|
2022-12-16 12:13:12 +00:00
|
|
|
PROMETHEUS_CONFIGURATION_PATH = "github.com/logos-co/wakurtosis/prometheus.yml"
|
2022-12-14 10:22:05 +00:00
|
|
|
|
2022-12-12 12:04:22 +00:00
|
|
|
POST_RELAY_MESSAGE = "post_waku_v2_relay_v1_message"
|
2022-12-01 16:38:36 +00:00
|
|
|
GET_WAKU_INFO_METHOD = "get_waku_v2_debug_v1_info"
|
|
|
|
CONNECT_TO_PEER_METHOD = "post_waku_v2_admin_v1_peers"
|
|
|
|
|
2022-12-16 12:13:12 +00:00
|
|
|
GENERAL_TOML_CONFIGURATION_PATH = "github.com/logos-co/wakurtosis/kurtosis-module/starlark/config_files/waku_general.toml"
|
2022-12-14 11:40:03 +00:00
|
|
|
GENERAL_TOML_CONFIGURATION_NAME = "waku_general.toml"
|
|
|
|
|
2022-12-01 16:38:36 +00:00
|
|
|
|
2022-12-06 10:43:00 +00:00
|
|
|
def create_waku_id(other_node_info):
|
|
|
|
ip = other_node_info["service"].ip_address
|
2022-12-16 12:13:12 +00:00
|
|
|
port = other_node_info["service"].ports[WAKU_RPC_PORT_ID].number
|
2022-12-06 10:43:00 +00:00
|
|
|
node_id = other_node_info["id"]
|
|
|
|
|
2022-12-12 15:23:47 +00:00
|
|
|
return '["/ip4/' + str(ip) + '/tcp/' + str(port) + '/p2p/' + node_id + '"]'
|
2022-12-06 10:43:00 +00:00
|
|
|
|
|
|
|
|
|
|
|
def connect_wakunode_to_peer(service_id, port_id, other_node_info):
|
|
|
|
method = CONNECT_TO_PEER_METHOD
|
|
|
|
|
|
|
|
params = create_waku_id(other_node_info)
|
|
|
|
|
|
|
|
response = send_json_rpc(service_id, port_id, method, params)
|
|
|
|
|
|
|
|
print(response)
|
|
|
|
|
2022-12-12 12:04:22 +00:00
|
|
|
|
|
|
|
def send_waku_message(service_id, topic):
|
|
|
|
topic = topic
|
|
|
|
waku_message = '{"payload": "0x1a2b3c4d5e6f", "timestamp": 1626813243}'
|
2022-12-12 15:23:47 +00:00
|
|
|
params = '"' + topic + '"' + ", " + waku_message
|
2022-12-12 12:04:22 +00:00
|
|
|
response = send_json_rpc(service_id, WAKU_RPC_PORT_ID, POST_RELAY_MESSAGE, params)
|
|
|
|
print(response)
|
|
|
|
|
2022-12-12 15:23:47 +00:00
|
|
|
|
2022-12-06 10:43:00 +00:00
|
|
|
def send_json_rpc(service_id, port_id, method, params):
|
2022-12-01 16:38:36 +00:00
|
|
|
recipe = struct(
|
|
|
|
service_id=service_id,
|
|
|
|
port_id=port_id,
|
|
|
|
endpoint="",
|
|
|
|
method="POST",
|
|
|
|
content_type="application/json",
|
2022-12-06 10:43:00 +00:00
|
|
|
body='{ "jsonrpc": "2.0", "method": "' + method + '", "params": [' + params + '], "id": 1}'
|
2022-12-01 16:38:36 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
response = get_value(recipe=recipe)
|
|
|
|
|
2022-12-06 10:43:00 +00:00
|
|
|
return response
|
|
|
|
|
|
|
|
|
|
|
|
def get_wakunode_id(service_id, port_id):
|
|
|
|
response = send_json_rpc(service_id, port_id, GET_WAKU_INFO_METHOD, "")
|
|
|
|
|
2022-12-12 12:04:22 +00:00
|
|
|
result = extract(response.body, '.result.listenAddresses | .[0] | split("/") | .[-1]')
|
|
|
|
print(result)
|
2022-12-06 10:43:00 +00:00
|
|
|
|
2022-12-12 12:04:22 +00:00
|
|
|
return result
|
2022-12-06 10:43:00 +00:00
|
|
|
|
2022-12-01 16:38:36 +00:00
|
|
|
|
2022-12-14 11:40:03 +00:00
|
|
|
def get_toml_configuration_artifact(wakunode_name, same_toml_configuration):
|
|
|
|
if same_toml_configuration:
|
|
|
|
artifact_id = upload_files(
|
|
|
|
src=GENERAL_TOML_CONFIGURATION_PATH
|
|
|
|
)
|
|
|
|
file_name = GENERAL_TOML_CONFIGURATION_NAME
|
|
|
|
else:
|
|
|
|
artifact_id = upload_files(
|
|
|
|
src="github.com/logos-co/wakurtosis/kurtosis-module/starlark/config_files/" + wakunode_name + ".toml"
|
|
|
|
)
|
|
|
|
file_name = wakunode_name + ".toml"
|
|
|
|
|
|
|
|
return artifact_id, file_name
|
|
|
|
|
|
|
|
|
|
|
|
def instantiate_waku_nodes(waku_topology, same_toml_configuration):
|
2022-12-01 16:38:36 +00:00
|
|
|
services = {}
|
|
|
|
|
|
|
|
# Get up all waku nodes
|
2022-12-12 15:23:47 +00:00
|
|
|
for wakunode_name in waku_topology.keys():
|
|
|
|
CONFIG_LOCATION = "/tmp"
|
2022-12-14 11:40:03 +00:00
|
|
|
|
|
|
|
artifact_id, configuration_file = get_toml_configuration_artifact(wakunode_name, same_toml_configuration)
|
2022-12-12 12:04:22 +00:00
|
|
|
|
2022-12-01 16:38:36 +00:00
|
|
|
waku_service = add_service(
|
|
|
|
service_id=wakunode_name,
|
|
|
|
config=struct(
|
2022-12-16 12:13:12 +00:00
|
|
|
image=WAKU_IMAGE,
|
2022-12-14 10:22:05 +00:00
|
|
|
ports={
|
|
|
|
WAKU_RPC_PORT_ID: struct(number=TCP_PORT, protocol="TCP"),
|
|
|
|
PROMETHEUS_PORT_ID: struct(number=PROMETHEUS_TCP_PORT, protocol="TCP")
|
|
|
|
},
|
2022-12-12 12:04:22 +00:00
|
|
|
files={
|
|
|
|
artifact_id: CONFIG_LOCATION
|
|
|
|
},
|
2022-12-01 16:38:36 +00:00
|
|
|
entrypoint=[
|
2022-12-14 10:22:05 +00:00
|
|
|
"/usr/bin/wakunode", "--rpc-address=0.0.0.0",
|
|
|
|
"--metrics-server-address=0.0.0.0"
|
2022-12-12 15:23:47 +00:00
|
|
|
],
|
|
|
|
cmd=[
|
2022-12-12 16:47:15 +00:00
|
|
|
"--topics='" + waku_topology[wakunode_name]["topics"] + "'",
|
2022-12-15 09:51:43 +00:00
|
|
|
'--metrics-server=true',
|
2022-12-14 11:40:03 +00:00
|
|
|
"--config-file=" + configuration_file
|
2022-12-01 16:38:36 +00:00
|
|
|
]
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
2022-12-06 10:43:00 +00:00
|
|
|
waku_info = {}
|
2022-12-13 11:02:05 +00:00
|
|
|
exec(wakunode_name, ["sleep", "10"])
|
2022-12-06 10:43:00 +00:00
|
|
|
id = get_wakunode_id(wakunode_name, WAKU_RPC_PORT_ID)
|
|
|
|
waku_info["id"] = id
|
|
|
|
waku_info["service"] = waku_service
|
|
|
|
|
|
|
|
services[wakunode_name] = waku_info
|
|
|
|
|
2022-12-12 16:47:15 +00:00
|
|
|
return services
|
2022-12-12 15:23:47 +00:00
|
|
|
|
|
|
|
|
|
|
|
def interconnect_waku_nodes(topology_information, services):
|
2022-12-01 16:38:36 +00:00
|
|
|
# Interconnect them
|
2022-12-12 15:23:47 +00:00
|
|
|
for wakunode_name in topology_information.keys():
|
|
|
|
peers = topology_information[wakunode_name]["static_nodes"]
|
|
|
|
|
|
|
|
# todo: change to do only one rpc call
|
|
|
|
for peer in peers:
|
|
|
|
connect_wakunode_to_peer(wakunode_name, WAKU_RPC_PORT_ID, services[peer])
|
|
|
|
|
|
|
|
|
|
|
|
def send_test_messages(topology_information):
|
|
|
|
for wakunode_name in topology_information.keys():
|
|
|
|
# send message in topic
|
|
|
|
send_waku_message(wakunode_name, "test")
|
|
|
|
|
|
|
|
|
2022-12-16 12:13:12 +00:00
|
|
|
def generate_template_data(services):
|
|
|
|
template_data = {}
|
|
|
|
node_data = []
|
|
|
|
for wakunode_name in services.keys():
|
|
|
|
node_data.append(
|
|
|
|
services[wakunode_name]["service"].ip_address + ":" + str(services[wakunode_name]["service"].ports[
|
|
|
|
PROMETHEUS_PORT_ID].number))
|
|
|
|
|
|
|
|
template_data["targets"] = node_data
|
|
|
|
|
|
|
|
return template_data
|
|
|
|
|
|
|
|
|
|
|
|
def create_prometheus_targets(services):
|
|
|
|
# get ip and ports of all nodes
|
|
|
|
template_data = generate_template_data(services)
|
|
|
|
|
|
|
|
# template
|
|
|
|
template = "[{\"labels\": {\"job\": \"wakurtosis\"}, \"targets\" : [{{.targets}}] } ]"
|
|
|
|
|
|
|
|
artifact_id = render_templates(
|
|
|
|
config={
|
|
|
|
"/tmp/targets.json": struct(
|
|
|
|
template=template,
|
|
|
|
data=template_data,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
def set_up_prometheus(services):
|
|
|
|
# Create targets.json
|
|
|
|
|
|
|
|
create_prometheus_targets(services)
|
|
|
|
|
|
|
|
# Set up prometheus
|
|
|
|
CONFIG_LOCATION = "/tmp"
|
|
|
|
artifact_id = upload_files(
|
|
|
|
src=PROMETHEUS_CONFIGURATION_PATH
|
|
|
|
)
|
|
|
|
prometheus_service = add_service(
|
|
|
|
service_id="prometheus",
|
|
|
|
config=struct(
|
|
|
|
image=PROMETHEUS_IMAGE,
|
|
|
|
ports={
|
|
|
|
WAKU_RPC_PORT_ID: struct(number=TCP_PORT, protocol="TCP"),
|
|
|
|
PROMETHEUS_PORT_ID: struct(number=PROMETHEUS_TCP_PORT, protocol="TCP")
|
|
|
|
},
|
|
|
|
files={
|
|
|
|
artifact_id: CONFIG_LOCATION
|
|
|
|
},
|
|
|
|
cmd=[
|
|
|
|
"--config.file=" + CONFIG_LOCATION + "/prometheus.yml"
|
|
|
|
]
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
|
|
|
return prometheus_service
|
|
|
|
|
|
|
|
|
2022-12-12 15:23:47 +00:00
|
|
|
def run(args):
|
2022-12-16 12:13:12 +00:00
|
|
|
waku_topology = read_file(src="github.com/logos-co/wakurtosis/kurtosis-module/starlark/waku_test_topology.json")
|
2022-12-12 15:23:47 +00:00
|
|
|
|
2022-12-14 11:40:03 +00:00
|
|
|
same_toml_configuration = args.same_toml_configuration
|
2022-12-16 12:13:12 +00:00
|
|
|
# waku_topology = json.decode(waku_topology)
|
2022-12-12 15:23:47 +00:00
|
|
|
|
2022-12-16 12:13:12 +00:00
|
|
|
waku_topology = {
|
2022-12-12 15:23:47 +00:00
|
|
|
"waku_0": {
|
|
|
|
"ports_shift": 0,
|
|
|
|
"topics": "test",
|
|
|
|
"static_nodes": [
|
|
|
|
"waku_1",
|
|
|
|
]
|
|
|
|
},
|
|
|
|
"waku_1": {
|
|
|
|
"ports_shift": 1,
|
|
|
|
"topics": "test",
|
|
|
|
"static_nodes": [
|
|
|
|
"waku_0"
|
|
|
|
]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-12-16 12:13:12 +00:00
|
|
|
services = instantiate_waku_nodes(waku_topology, same_toml_configuration)
|
|
|
|
|
|
|
|
# Set up prometheus + graphana
|
|
|
|
set_up_prometheus(services)
|
2022-12-12 12:04:22 +00:00
|
|
|
|
2022-12-16 12:13:12 +00:00
|
|
|
interconnect_waku_nodes(waku_topology, services)
|
2022-12-01 16:38:36 +00:00
|
|
|
|
2022-12-16 12:13:12 +00:00
|
|
|
send_test_messages(waku_topology)
|