fix: add dispersal overhead check

- improve error handling for disperse_data
This commit is contained in:
Roman 2025-02-21 19:00:04 +11:00
parent 9336c2a3c1
commit db1856de69
No known key found for this signature in database
GPG Key ID: B8FE070B54E11B75
3 changed files with 25 additions and 10 deletions

View File

@ -77,13 +77,16 @@ class StepsDataAvailability(StepsCommon):
@allure.step @allure.step
@retry(stop=stop_after_delay(65), wait=wait_fixed(1), reraise=True) @retry(stop=stop_after_delay(65), wait=wait_fixed(1), reraise=True)
def disperse_data(self, data, app_id, index): def disperse_data(self, data, app_id, index):
response = []
request = prepare_dispersal_request(data, app_id, index) request = prepare_dispersal_request(data, app_id, index)
executor = self.find_executor_node() executor = self.find_executor_node()
try: try:
executor.send_dispersal_request(request) response = executor.send_dispersal_request(request)
except Exception as ex: except Exception as ex:
assert "Bad Request" in str(ex) or "Internal Server Error" in str(ex) assert "Bad Request" in str(ex) or "Internal Server Error" in str(ex)
assert response.status_code == 200, "Send dispersal finished with unexpected response code"
@allure.step @allure.step
@retry(stop=stop_after_delay(45), wait=wait_fixed(1), reraise=True) @retry(stop=stop_after_delay(45), wait=wait_fixed(1), reraise=True)
def get_data_range(self, node, app_id, start, end): def get_data_range(self, node, app_id, start, end):

View File

@ -33,4 +33,5 @@ DATA_TO_DISPERSE = [
"🚀🌟✨", "🚀🌟✨",
"Lorem ipsum dolor sit amet", "Lorem ipsum dolor sit amet",
"<html><body>Hello</body></html>", "<html><body>Hello</body></html>",
"0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF01234",
] ]

View File

@ -12,22 +12,33 @@ logger = get_custom_logger(__name__)
class TestNetworkingPrivacy(StepsDataAvailability): class TestNetworkingPrivacy(StepsDataAvailability):
main_nodes = [] main_nodes = []
@pytest.mark.parametrize("setup_2_node_cluster", [1024], indirect=True) @pytest.mark.parametrize("setup_2_node_cluster", [2], indirect=True)
def test_consumed_bandwidth_dispersal(self, setup_2_node_cluster): def test_consumed_bandwidth_dispersal(self, setup_2_node_cluster):
delay(5) delay(5)
net_io = psutil.net_io_counters() net_io = psutil.net_io_counters()
prev_total = net_io.bytes_sent + net_io.bytes_recv prev_total = net_io.bytes_sent + net_io.bytes_recv
self.disperse_data(DATA_TO_DISPERSE[1], to_app_id(1), to_index(0))
successful_dispersals = 0
for i in range(20):
try:
self.disperse_data(DATA_TO_DISPERSE[7], to_app_id(1), to_index(0))
successful_dispersals += 1
except Exception as ex:
logger.warning(f"Dispersal #{i} was not successful with error {ex}")
if successful_dispersals == 10:
break
delay(0.1)
net_io = psutil.net_io_counters() net_io = psutil.net_io_counters()
curr_total = net_io.bytes_sent + net_io.bytes_recv curr_total = net_io.bytes_sent + net_io.bytes_recv
logger.debug(f"prev_total: {prev_total}")
logger.debug(f"curr_total: {curr_total}")
consumed = curr_total - prev_total consumed = curr_total - prev_total
logger.debug(f"consumed: {consumed}") assert successful_dispersals == 10, "Unable to finish 10 successful dispersals"
delay(5) data_sent = 2 * successful_dispersals * len(DATA_TO_DISPERSE[7])
rcv_data = self.get_data_range(self.node2, to_app_id(1), to_index(0), to_index(5)) overhead = (consumed - data_sent) / data_sent
logger.debug(f"Received data: {rcv_data}")
assert overhead < 400, "Dispersal overhead is too high"