mirror of
https://github.com/logos-blockchain/logos-blockchain-e2e-tests.git
synced 2026-01-02 13:13:08 +00:00
Merge branch 'master' into test-dos-robustness
This commit is contained in:
commit
1022a14e76
@ -46,3 +46,8 @@ def random_divide_k(n, k):
|
||||
cuts = sorted(random.sample(range(1, n), k - 1))
|
||||
parts = [cuts[0]] + [cuts[i] - cuts[i - 1] for i in range(1, len(cuts))] + [n - cuts[-1]]
|
||||
return parts
|
||||
|
||||
|
||||
def generate_random_bytes(n=31):
|
||||
return os.urandom(n)
|
||||
|
||||
|
||||
@ -123,6 +123,9 @@ class NomosNode:
|
||||
def node_type(self):
|
||||
return self._node_type
|
||||
|
||||
def name(self):
|
||||
return self._container_name
|
||||
|
||||
def check_nomos_log_errors(self, whitelist=None):
|
||||
keywords = LOG_ERROR_KEYWORDS
|
||||
|
||||
|
||||
@ -100,6 +100,7 @@ class StepsDataAvailability(StepsCommon):
|
||||
|
||||
disperse()
|
||||
|
||||
|
||||
@allure.step
|
||||
def get_data_range(self, node, app_id, start, end, timeout_duration=45):
|
||||
@retry(stop=stop_after_delay(timeout_duration), wait=wait_fixed(1), reraise=True)
|
||||
|
||||
@ -19,14 +19,20 @@ class TestDataIntegrity(StepsDataAvailability):
|
||||
def test_da_identify_retrieve_missing_columns(self):
|
||||
self.disperse_data(DATA_TO_DISPERSE[1], to_app_id(1), to_index(0))
|
||||
delay(5)
|
||||
# Select one target node at random to get blob data for 1/2 columns
|
||||
selected_node = self.main_nodes[random.randint(1, 3)]
|
||||
rcv_data = self.get_data_range(selected_node, to_app_id(1), to_index(0), to_index(5))
|
||||
rcv_data_json = json.dumps(rcv_data)
|
||||
test_results = []
|
||||
# Iterate through standard nodes to get blob data for 1/2 columns
|
||||
for node in self.main_nodes[1:4]:
|
||||
rcv_data = self.get_data_range(node, to_app_id(1), to_index(0), to_index(5))
|
||||
rcv_data_json = json.dumps(rcv_data)
|
||||
|
||||
reconstructed_data = NomosCli(command="reconstruct").run(input_values=[rcv_data_json])
|
||||
reconstructed_data = NomosCli(command="reconstruct").run(input_values=[rcv_data_json])
|
||||
|
||||
assert DATA_TO_DISPERSE[1] == reconstructed_data, "Reconstructed data are not same with original data"
|
||||
if DATA_TO_DISPERSE[1] == reconstructed_data:
|
||||
test_results.append(node.name())
|
||||
|
||||
assert len(test_results) > 0, "Dispersed data were not received by any node"
|
||||
|
||||
logger.info(f"Dispersed data received by : {test_results}")
|
||||
|
||||
@pytest.mark.usefixtures("setup_2_node_cluster")
|
||||
def test_da_sampling_determines_data_presence(self):
|
||||
|
||||
@ -1,8 +1,6 @@
|
||||
import pytest
|
||||
|
||||
from src.env_vars import CFGSYNC, NOMOS, NOMOS_EXECUTOR
|
||||
from src.libs.custom_logger import get_custom_logger
|
||||
from src.node.nomos_node import NomosNode
|
||||
from src.steps.common import StepsCommon
|
||||
|
||||
logger = get_custom_logger(__name__)
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
import pytest
|
||||
import psutil
|
||||
|
||||
from src.libs.common import delay, to_app_id, to_index
|
||||
from src.libs.common import delay, to_app_id, to_index, generate_random_bytes
|
||||
from src.libs.custom_logger import get_custom_logger
|
||||
from src.steps.da import StepsDataAvailability
|
||||
from src.test_data import DATA_TO_DISPERSE
|
||||
@ -12,7 +12,7 @@ logger = get_custom_logger(__name__)
|
||||
class TestNetworkingPrivacy(StepsDataAvailability):
|
||||
main_nodes = []
|
||||
|
||||
@pytest.mark.parametrize("setup_2_node_cluster", [2], indirect=True)
|
||||
@pytest.mark.usefixtures("setup_2_node_cluster")
|
||||
def test_consumed_bandwidth_dispersal(self, setup_2_node_cluster):
|
||||
net_io = psutil.net_io_counters()
|
||||
prev_total = net_io.bytes_sent + net_io.bytes_recv
|
||||
@ -41,3 +41,36 @@ class TestNetworkingPrivacy(StepsDataAvailability):
|
||||
overhead = (consumed - data_sent) / data_sent
|
||||
|
||||
assert overhead < 400, "Dispersal overhead is too high"
|
||||
|
||||
@pytest.mark.usefixtures("setup_2_node_cluster")
|
||||
def test_consumed_bandwidth_random_data_dispersal(self):
|
||||
net_io = psutil.net_io_counters()
|
||||
prev_total = net_io.bytes_sent + net_io.bytes_recv
|
||||
|
||||
data_to_disperse = generate_random_bytes()
|
||||
logger.debug(f"Using random data to disperse: {list(data_to_disperse)}")
|
||||
|
||||
successful_dispersals = 0
|
||||
for i in range(20):
|
||||
try:
|
||||
self.disperse_data(data_to_disperse, to_app_id(1), to_index(0), utf8=False, padding=False)
|
||||
successful_dispersals += 1
|
||||
except Exception as ex:
|
||||
logger.warning(f"Dispersal #{i} was not successful with error {ex}")
|
||||
|
||||
if successful_dispersals == 10:
|
||||
break
|
||||
|
||||
delay(0.1)
|
||||
|
||||
net_io = psutil.net_io_counters()
|
||||
curr_total = net_io.bytes_sent + net_io.bytes_recv
|
||||
|
||||
consumed = curr_total - prev_total
|
||||
|
||||
assert successful_dispersals == 10, "Unable to finish 10 successful dispersals"
|
||||
|
||||
data_sent = 2 * successful_dispersals * len(data_to_disperse)
|
||||
overhead = (consumed - data_sent) / data_sent
|
||||
|
||||
assert overhead < 400, "Dispersal overhead is too high"
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user