mirror of
https://github.com/logos-blockchain/logos-blockchain-e2e-tests.git
synced 2026-01-07 15:43:05 +00:00
Merge pull request #4 from logos-co/test-networking-privacy
Test/networking and privacy
This commit is contained in:
commit
d087db7285
@ -1,31 +0,0 @@
|
|||||||
port: 4400
|
|
||||||
n_hosts: 4
|
|
||||||
timeout: 30
|
|
||||||
|
|
||||||
# ConsensusConfig related parameters
|
|
||||||
security_param: 10
|
|
||||||
active_slot_coeff: 0.9
|
|
||||||
|
|
||||||
# DaConfig related parameters
|
|
||||||
subnetwork_size: 2
|
|
||||||
dispersal_factor: 2
|
|
||||||
num_samples: 1
|
|
||||||
num_subnets: 2
|
|
||||||
old_blobs_check_interval_secs: 5
|
|
||||||
blobs_validity_duration_secs: 60
|
|
||||||
global_params_path: "/kzgrs_test_params"
|
|
||||||
|
|
||||||
# Tracing
|
|
||||||
tracing_settings:
|
|
||||||
logger: Stdout
|
|
||||||
tracing: !Otlp
|
|
||||||
endpoint: http://tempo:4317/
|
|
||||||
sample_ratio: 0.5
|
|
||||||
service_name: node
|
|
||||||
filter: !EnvFilter
|
|
||||||
filters:
|
|
||||||
nomos: debug
|
|
||||||
metrics: !Otlp
|
|
||||||
endpoint: http://prometheus:9090/api/v1/otlp/v1/metrics
|
|
||||||
host_identifier: node
|
|
||||||
level: INFO
|
|
||||||
@ -1,5 +1,5 @@
|
|||||||
port: 4400
|
port: 4400
|
||||||
n_hosts: 2
|
n_hosts: {{ num_hosts }}
|
||||||
timeout: 30
|
timeout: 30
|
||||||
|
|
||||||
# ConsensusConfig related parameters
|
# ConsensusConfig related parameters
|
||||||
@ -7,7 +7,7 @@ security_param: 10
|
|||||||
active_slot_coeff: 0.9
|
active_slot_coeff: 0.9
|
||||||
|
|
||||||
# DaConfig related parameters
|
# DaConfig related parameters
|
||||||
subnetwork_size: 2
|
subnetwork_size: {{ subnet_size }}
|
||||||
dispersal_factor: 2
|
dispersal_factor: 2
|
||||||
num_samples: 1
|
num_samples: 1
|
||||||
num_subnets: 2
|
num_subnets: 2
|
||||||
@ -7,7 +7,7 @@ security_param: 10
|
|||||||
active_slot_coeff: 0.9
|
active_slot_coeff: 0.9
|
||||||
|
|
||||||
# DaConfig related parameters
|
# DaConfig related parameters
|
||||||
subnetwork_size: 2
|
subnetwork_size: 1024
|
||||||
dispersal_factor: 2
|
dispersal_factor: 2
|
||||||
num_samples: 1
|
num_samples: 1
|
||||||
num_subnets: 2
|
num_subnets: 2
|
||||||
|
|||||||
@ -39,3 +39,5 @@ typing-inspect==0.9.0
|
|||||||
typing_extensions==4.9.0
|
typing_extensions==4.9.0
|
||||||
urllib3==2.2.2
|
urllib3==2.2.2
|
||||||
virtualenv==20.25.0
|
virtualenv==20.25.0
|
||||||
|
Jinja2~=3.1.5
|
||||||
|
psutil~=7.0.0
|
||||||
@ -8,15 +8,23 @@ from src.env_vars import CFGSYNC, NOMOS, NOMOS_EXECUTOR
|
|||||||
from src.libs.custom_logger import get_custom_logger
|
from src.libs.custom_logger import get_custom_logger
|
||||||
from src.node.nomos_node import NomosNode
|
from src.node.nomos_node import NomosNode
|
||||||
|
|
||||||
|
from jinja2 import Template
|
||||||
|
|
||||||
logger = get_custom_logger(__name__)
|
logger = get_custom_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def prepare_cluster_config(node_count):
|
def prepare_cluster_config(node_count, subnetwork_size=2):
|
||||||
cwd = os.getcwd()
|
cwd = os.getcwd()
|
||||||
config_dir = "cluster_config"
|
config_dir = "cluster_config"
|
||||||
src = f"{cwd}/{config_dir}/cfgsync-{node_count}node.yaml"
|
|
||||||
dst = f"{cwd}/{config_dir}/cfgsync.yaml"
|
with open(f"{cwd}/{config_dir}/cfgsync-template.yaml", "r") as file:
|
||||||
shutil.copyfile(src, dst)
|
template_content = file.read()
|
||||||
|
template = Template(template_content)
|
||||||
|
|
||||||
|
rendered = template.render(num_hosts=node_count, subnet_size=subnetwork_size)
|
||||||
|
|
||||||
|
with open(f"{cwd}/{config_dir}/cfgsync.yaml", "w") as outfile:
|
||||||
|
outfile.write(rendered)
|
||||||
|
|
||||||
|
|
||||||
def start_nodes(nodes):
|
def start_nodes(nodes):
|
||||||
@ -38,7 +46,13 @@ class StepsCommon:
|
|||||||
@pytest.fixture(scope="function")
|
@pytest.fixture(scope="function")
|
||||||
def setup_2_node_cluster(self, request):
|
def setup_2_node_cluster(self, request):
|
||||||
logger.debug(f"Running fixture setup: {inspect.currentframe().f_code.co_name}")
|
logger.debug(f"Running fixture setup: {inspect.currentframe().f_code.co_name}")
|
||||||
prepare_cluster_config(2)
|
|
||||||
|
if hasattr(request, "param"):
|
||||||
|
subnet_size = request.param
|
||||||
|
else:
|
||||||
|
subnet_size = 2
|
||||||
|
|
||||||
|
prepare_cluster_config(2, subnet_size)
|
||||||
self.node1 = NomosNode(CFGSYNC, "cfgsync")
|
self.node1 = NomosNode(CFGSYNC, "cfgsync")
|
||||||
self.node2 = NomosNode(NOMOS, "nomos_node_0")
|
self.node2 = NomosNode(NOMOS, "nomos_node_0")
|
||||||
self.node3 = NomosNode(NOMOS_EXECUTOR, "nomos_node_1")
|
self.node3 = NomosNode(NOMOS_EXECUTOR, "nomos_node_1")
|
||||||
|
|||||||
@ -77,13 +77,16 @@ class StepsDataAvailability(StepsCommon):
|
|||||||
@allure.step
|
@allure.step
|
||||||
@retry(stop=stop_after_delay(65), wait=wait_fixed(1), reraise=True)
|
@retry(stop=stop_after_delay(65), wait=wait_fixed(1), reraise=True)
|
||||||
def disperse_data(self, data, app_id, index):
|
def disperse_data(self, data, app_id, index):
|
||||||
|
response = []
|
||||||
request = prepare_dispersal_request(data, app_id, index)
|
request = prepare_dispersal_request(data, app_id, index)
|
||||||
executor = self.find_executor_node()
|
executor = self.find_executor_node()
|
||||||
try:
|
try:
|
||||||
executor.send_dispersal_request(request)
|
response = executor.send_dispersal_request(request)
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
assert "Bad Request" in str(ex) or "Internal Server Error" in str(ex)
|
assert "Bad Request" in str(ex) or "Internal Server Error" in str(ex)
|
||||||
|
|
||||||
|
assert response.status_code == 200, "Send dispersal finished with unexpected response code"
|
||||||
|
|
||||||
@allure.step
|
@allure.step
|
||||||
@retry(stop=stop_after_delay(45), wait=wait_fixed(1), reraise=True)
|
@retry(stop=stop_after_delay(45), wait=wait_fixed(1), reraise=True)
|
||||||
def get_data_range(self, node, app_id, start, end):
|
def get_data_range(self, node, app_id, start, end):
|
||||||
|
|||||||
@ -33,4 +33,5 @@ DATA_TO_DISPERSE = [
|
|||||||
"🚀🌟✨",
|
"🚀🌟✨",
|
||||||
"Lorem ipsum dolor sit amet",
|
"Lorem ipsum dolor sit amet",
|
||||||
"<html><body>Hello</body></html>",
|
"<html><body>Hello</body></html>",
|
||||||
|
"0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF01234",
|
||||||
]
|
]
|
||||||
|
|||||||
0
tests/networking_privacy/__init__.py
Normal file
0
tests/networking_privacy/__init__.py
Normal file
44
tests/networking_privacy/test_networking_privacy.py
Normal file
44
tests/networking_privacy/test_networking_privacy.py
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
import pytest
|
||||||
|
import psutil
|
||||||
|
|
||||||
|
from src.libs.common import delay, to_app_id, to_index
|
||||||
|
from src.libs.custom_logger import get_custom_logger
|
||||||
|
from src.steps.da import StepsDataAvailability
|
||||||
|
from src.test_data import DATA_TO_DISPERSE
|
||||||
|
|
||||||
|
logger = get_custom_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class TestNetworkingPrivacy(StepsDataAvailability):
|
||||||
|
main_nodes = []
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("setup_2_node_cluster", [2], indirect=True)
|
||||||
|
def test_consumed_bandwidth_dispersal(self, setup_2_node_cluster):
|
||||||
|
delay(5)
|
||||||
|
net_io = psutil.net_io_counters()
|
||||||
|
prev_total = net_io.bytes_sent + net_io.bytes_recv
|
||||||
|
|
||||||
|
successful_dispersals = 0
|
||||||
|
for i in range(20):
|
||||||
|
try:
|
||||||
|
self.disperse_data(DATA_TO_DISPERSE[7], to_app_id(1), to_index(0))
|
||||||
|
successful_dispersals += 1
|
||||||
|
except Exception as ex:
|
||||||
|
logger.warning(f"Dispersal #{i} was not successful with error {ex}")
|
||||||
|
|
||||||
|
if successful_dispersals == 10:
|
||||||
|
break
|
||||||
|
|
||||||
|
delay(0.1)
|
||||||
|
|
||||||
|
net_io = psutil.net_io_counters()
|
||||||
|
curr_total = net_io.bytes_sent + net_io.bytes_recv
|
||||||
|
|
||||||
|
consumed = curr_total - prev_total
|
||||||
|
|
||||||
|
assert successful_dispersals == 10, "Unable to finish 10 successful dispersals"
|
||||||
|
|
||||||
|
data_sent = 2 * successful_dispersals * len(DATA_TO_DISPERSE[7])
|
||||||
|
overhead = (consumed - data_sent) / data_sent
|
||||||
|
|
||||||
|
assert overhead < 400, "Dispersal overhead is too high"
|
||||||
Loading…
x
Reference in New Issue
Block a user