mirror of
https://github.com/logos-blockchain/logos-blockchain-e2e-tests.git
synced 2026-01-04 06:03:12 +00:00
Merge pull request #12 from logos-co/test-edge-cases
Test/da edge cases
This commit is contained in:
commit
c3ef9302c0
@ -28,6 +28,10 @@ mempool_publish_strategy: !SampleSubnetworks
|
|||||||
secs: 0
|
secs: 0
|
||||||
nanos: 100000000
|
nanos: 100000000
|
||||||
|
|
||||||
|
replication_settings:
|
||||||
|
seen_message_cache_size: 204800
|
||||||
|
seen_message_ttl_secs: 900
|
||||||
|
|
||||||
# Tracing
|
# Tracing
|
||||||
tracing_settings:
|
tracing_settings:
|
||||||
logger: Stdout
|
logger: Stdout
|
||||||
|
|||||||
@ -40,4 +40,4 @@ class BaseClient:
|
|||||||
def print_request_size(self, data):
|
def print_request_size(self, data):
|
||||||
body_size = len(data) if data else 0
|
body_size = len(data) if data else 0
|
||||||
body_kb = body_size / 1024
|
body_kb = body_size / 1024
|
||||||
logger.debug(f"Body size: {body_kb:.2f}kB")
|
logger.debug(f"Request body size: {body_kb:.2f}kB")
|
||||||
|
|||||||
@ -15,7 +15,7 @@ def get_env_var(var_name, default=None):
|
|||||||
|
|
||||||
|
|
||||||
# Configuration constants. Need to be upercase to appear in reports
|
# Configuration constants. Need to be upercase to appear in reports
|
||||||
DEFAULT_NOMOS_IMAGE = "ghcr.io/logos-co/nomos-node:testnet"
|
DEFAULT_NOMOS_IMAGE = "ghcr.io/logos-co/nomos:testnet"
|
||||||
NOMOS_IMAGE = get_env_var("NOMOS_IMAGE", DEFAULT_NOMOS_IMAGE)
|
NOMOS_IMAGE = get_env_var("NOMOS_IMAGE", DEFAULT_NOMOS_IMAGE)
|
||||||
|
|
||||||
DEFAULT_PROXY_IMAGE = "bitnami/configurable-http-proxy:latest"
|
DEFAULT_PROXY_IMAGE = "bitnami/configurable-http-proxy:latest"
|
||||||
|
|||||||
@ -1,6 +1,4 @@
|
|||||||
import json
|
import json
|
||||||
import random
|
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from src.client.nomos_cli import NomosCli
|
from src.client.nomos_cli import NomosCli
|
||||||
@ -20,7 +18,7 @@ class TestDataIntegrity(StepsDataAvailability):
|
|||||||
self.disperse_data(DATA_TO_DISPERSE[1], to_app_id(1), to_index(0))
|
self.disperse_data(DATA_TO_DISPERSE[1], to_app_id(1), to_index(0))
|
||||||
delay(5)
|
delay(5)
|
||||||
test_results = []
|
test_results = []
|
||||||
# Iterate through standard nodes to get blob data for 1/2 columns
|
# Iterate through standard nodes 1-3 to get blob data for 1/2 columns
|
||||||
for node in self.main_nodes[1:4]:
|
for node in self.main_nodes[1:4]:
|
||||||
rcv_data = self.get_data_range(node, to_app_id(1), to_index(0), to_index(5))
|
rcv_data = self.get_data_range(node, to_app_id(1), to_index(0), to_index(5))
|
||||||
rcv_data_json = json.dumps(rcv_data)
|
rcv_data_json = json.dumps(rcv_data)
|
||||||
@ -34,6 +32,29 @@ class TestDataIntegrity(StepsDataAvailability):
|
|||||||
|
|
||||||
logger.info(f"Dispersed data received by : {test_results}")
|
logger.info(f"Dispersed data received by : {test_results}")
|
||||||
|
|
||||||
|
@pytest.mark.usefixtures("setup_4_node_cluster")
|
||||||
|
def test_da_disperse_retrieve_one_node_stopped(self):
|
||||||
|
|
||||||
|
# Stop nomos node 1 - dispersal and data retrieval should be still possible
|
||||||
|
self.main_nodes[1].stop()
|
||||||
|
|
||||||
|
self.disperse_data(DATA_TO_DISPERSE[1], to_app_id(1), to_index(0))
|
||||||
|
delay(5)
|
||||||
|
test_results = []
|
||||||
|
# Iterate through standard nodes 2-3 to get blob data for 1/2 columns
|
||||||
|
for node in self.main_nodes[2:4]:
|
||||||
|
rcv_data = self.get_data_range(node, to_app_id(1), to_index(0), to_index(5))
|
||||||
|
rcv_data_json = json.dumps(rcv_data)
|
||||||
|
|
||||||
|
reconstructed_data = NomosCli(command="reconstruct").run(input_values=[rcv_data_json])
|
||||||
|
|
||||||
|
if DATA_TO_DISPERSE[1] == reconstructed_data:
|
||||||
|
test_results.append(node.name())
|
||||||
|
|
||||||
|
assert len(test_results) > 0, "Dispersed data were not received by any node"
|
||||||
|
|
||||||
|
logger.info(f"Dispersed data received by : {test_results}")
|
||||||
|
|
||||||
@pytest.mark.usefixtures("setup_2_node_cluster")
|
@pytest.mark.usefixtures("setup_2_node_cluster")
|
||||||
def test_da_sampling_determines_data_presence(self):
|
def test_da_sampling_determines_data_presence(self):
|
||||||
self.disperse_data(DATA_TO_DISPERSE[1], to_app_id(1), to_index(0))
|
self.disperse_data(DATA_TO_DISPERSE[1], to_app_id(1), to_index(0))
|
||||||
@ -44,3 +65,10 @@ class TestDataIntegrity(StepsDataAvailability):
|
|||||||
decoded_data = NomosCli(command="reconstruct").run(input_values=[rcv_data_json], decode_only=True)
|
decoded_data = NomosCli(command="reconstruct").run(input_values=[rcv_data_json], decode_only=True)
|
||||||
|
|
||||||
assert DATA_TO_DISPERSE[1] == decoded_data, "Retrieved data are not same with original data"
|
assert DATA_TO_DISPERSE[1] == decoded_data, "Retrieved data are not same with original data"
|
||||||
|
|
||||||
|
@pytest.mark.usefixtures("setup_2_node_cluster")
|
||||||
|
def test_da_disperse_empty_data(self):
|
||||||
|
empty_data = b""
|
||||||
|
response = self.disperse_data(empty_data, to_app_id(1), to_index(0), utf8=False, padding=False, timeout_duration=0)
|
||||||
|
|
||||||
|
assert response.status_code == 400, "Dispersal of empty data should be rejected as bad request"
|
||||||
|
|||||||
@ -1,7 +1,7 @@
|
|||||||
import pytest
|
import pytest
|
||||||
import psutil
|
import psutil
|
||||||
|
|
||||||
from src.libs.common import delay, to_app_id, to_index, generate_random_bytes
|
from src.libs.common import delay, to_app_id, to_index, generate_random_bytes, generate_text_data
|
||||||
from src.libs.custom_logger import get_custom_logger
|
from src.libs.custom_logger import get_custom_logger
|
||||||
from src.steps.da import StepsDataAvailability
|
from src.steps.da import StepsDataAvailability
|
||||||
from src.test_data import DATA_TO_DISPERSE
|
from src.test_data import DATA_TO_DISPERSE
|
||||||
@ -74,3 +74,36 @@ class TestNetworkingPrivacy(StepsDataAvailability):
|
|||||||
overhead = (consumed - data_sent) / data_sent
|
overhead = (consumed - data_sent) / data_sent
|
||||||
|
|
||||||
assert overhead < 400, "Dispersal overhead is too high"
|
assert overhead < 400, "Dispersal overhead is too high"
|
||||||
|
|
||||||
|
@pytest.mark.usefixtures("setup_4_node_cluster")
|
||||||
|
def test_consumed_bandwidth_large_data_dispersal(self):
|
||||||
|
net_io = psutil.net_io_counters()
|
||||||
|
prev_total = net_io.bytes_sent + net_io.bytes_recv
|
||||||
|
|
||||||
|
data_to_disperse = generate_text_data(2048) # ~10kB
|
||||||
|
|
||||||
|
successful_dispersals = 0
|
||||||
|
for i in range(20):
|
||||||
|
try:
|
||||||
|
self.disperse_data(data_to_disperse, to_app_id(10), to_index(0))
|
||||||
|
successful_dispersals += 1
|
||||||
|
except Exception as ex:
|
||||||
|
logger.warning(f"Dispersal #{i} was not successful with error {ex}")
|
||||||
|
|
||||||
|
if successful_dispersals == 10:
|
||||||
|
break
|
||||||
|
|
||||||
|
delay(0.1)
|
||||||
|
|
||||||
|
net_io = psutil.net_io_counters()
|
||||||
|
curr_total = net_io.bytes_sent + net_io.bytes_recv
|
||||||
|
|
||||||
|
consumed = curr_total - prev_total
|
||||||
|
|
||||||
|
assert successful_dispersals == 10, "Unable to finish 10 successful dispersals"
|
||||||
|
|
||||||
|
data_sent = 2 * successful_dispersals * len(data_to_disperse)
|
||||||
|
overhead = (consumed - data_sent) / data_sent
|
||||||
|
|
||||||
|
# Large data should have less transfer overhead
|
||||||
|
assert overhead < 300, "Dispersal overhead is too high"
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user