mirror of
https://github.com/logos-blockchain/logos-blockchain-e2e-tests.git
synced 2026-01-02 13:13:08 +00:00
fix: introduce CONSENSUS_SLOT_TIME for delay
This commit is contained in:
parent
006d091253
commit
e2def60741
@ -41,3 +41,4 @@ GATEWAY = get_env_var("GATEWAY", "172.19.0.1")
|
||||
RUNNING_IN_CI = get_env_var("CI")
|
||||
API_REQUEST_TIMEOUT = get_env_var("API_REQUEST_TIMEOUT", 20)
|
||||
CHECK_LOG_ERRORS = get_env_var("CHECK_LOG_ERRORS", False)
|
||||
CONSENSUS_SLOT_TIME = get_env_var("CONSENSUS_SLOT_TIME", 5)
|
||||
|
||||
@ -4,7 +4,7 @@ import os
|
||||
import pytest
|
||||
|
||||
from src.client.proxy_client import ProxyClient
|
||||
from src.env_vars import CFGSYNC, NOMOS, NOMOS_EXECUTOR
|
||||
from src.env_vars import CFGSYNC, NOMOS, NOMOS_EXECUTOR, CONSENSUS_SLOT_TIME
|
||||
from src.libs.common import delay
|
||||
from src.libs.custom_logger import get_custom_logger
|
||||
from src.node.nomos_node import NomosNode
|
||||
@ -72,7 +72,7 @@ class StepsCommon:
|
||||
logger.error(f"REST service did not become ready in time: {ex}")
|
||||
raise
|
||||
|
||||
delay(5)
|
||||
delay(CONSENSUS_SLOT_TIME)
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def setup_4_node_cluster(self, request):
|
||||
@ -97,7 +97,7 @@ class StepsCommon:
|
||||
logger.error(f"REST service did not become ready in time: {ex}")
|
||||
raise
|
||||
|
||||
delay(5)
|
||||
delay(CONSENSUS_SLOT_TIME)
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def setup_proxy_clients(self, request):
|
||||
|
||||
@ -2,6 +2,7 @@ import json
|
||||
import pytest
|
||||
|
||||
from src.client.nomos_cli import NomosCli
|
||||
from src.env_vars import CONSENSUS_SLOT_TIME
|
||||
from src.libs.common import delay, to_app_id, to_index
|
||||
from src.libs.custom_logger import get_custom_logger
|
||||
from src.steps.da import StepsDataAvailability
|
||||
@ -16,7 +17,7 @@ class TestDataIntegrity(StepsDataAvailability):
|
||||
@pytest.mark.usefixtures("setup_4_node_cluster")
|
||||
def test_da_identify_retrieve_missing_columns(self):
|
||||
self.disperse_data(DATA_TO_DISPERSE[1], to_app_id(1), to_index(0))
|
||||
delay(5)
|
||||
delay(CONSENSUS_SLOT_TIME)
|
||||
test_results = []
|
||||
# Iterate through standard nodes 1-3 to get blob data for 1/2 columns
|
||||
for node in self.main_nodes[1:4]:
|
||||
@ -39,7 +40,7 @@ class TestDataIntegrity(StepsDataAvailability):
|
||||
self.main_nodes[1].stop()
|
||||
|
||||
self.disperse_data(DATA_TO_DISPERSE[1], to_app_id(1), to_index(0))
|
||||
delay(5)
|
||||
delay(CONSENSUS_SLOT_TIME)
|
||||
test_results = []
|
||||
# Iterate through standard nodes 2-3 to get blob data for 1/2 columns
|
||||
for node in self.main_nodes[2:4]:
|
||||
@ -58,7 +59,7 @@ class TestDataIntegrity(StepsDataAvailability):
|
||||
@pytest.mark.usefixtures("setup_2_node_cluster")
|
||||
def test_da_sampling_determines_data_presence(self):
|
||||
self.disperse_data(DATA_TO_DISPERSE[1], to_app_id(1), to_index(0))
|
||||
delay(5)
|
||||
delay(CONSENSUS_SLOT_TIME)
|
||||
rcv_data = self.get_data_range(self.node2, to_app_id(1), to_index(0), to_index(5))
|
||||
rcv_data_json = json.dumps(rcv_data)
|
||||
|
||||
|
||||
@ -3,6 +3,7 @@ import time
|
||||
|
||||
import pytest
|
||||
|
||||
from src.env_vars import CONSENSUS_SLOT_TIME
|
||||
from src.libs.common import to_app_id, to_index, delay
|
||||
from src.steps.da import StepsDataAvailability, logger
|
||||
from src.test_data import DATA_TO_DISPERSE
|
||||
@ -44,7 +45,7 @@ class TestHighLoadDos(StepsDataAvailability):
|
||||
response = self.disperse_data(DATA_TO_DISPERSE[7], to_app_id(1), to_index(0))
|
||||
assert response.status_code == 200, "Initial dispersal was not successful"
|
||||
|
||||
delay(5)
|
||||
delay(CONSENSUS_SLOT_TIME)
|
||||
start_time = time.time()
|
||||
|
||||
while time.time() - start_time < timeout:
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
import pytest
|
||||
|
||||
from src.env_vars import CONSENSUS_SLOT_TIME
|
||||
from src.libs.common import delay, generate_text_data, to_app_id, to_index
|
||||
from src.libs.custom_logger import get_custom_logger
|
||||
from src.steps.da import StepsDataAvailability
|
||||
@ -29,7 +30,7 @@ class TestLargeVolume(StepsDataAvailability):
|
||||
|
||||
assert response.status_code == 200
|
||||
|
||||
delay(5)
|
||||
delay(CONSENSUS_SLOT_TIME)
|
||||
self.get_data_range(self.node2, to_app_id(1), to_index(0), to_index(5), timeout_duration=20, interval=1)
|
||||
|
||||
@pytest.mark.usefixtures("setup_2_node_cluster")
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
import pytest
|
||||
|
||||
from src.env_vars import CONSENSUS_SLOT_TIME
|
||||
from src.libs.common import to_app_id, to_index, delay
|
||||
from src.libs.custom_logger import get_custom_logger
|
||||
from src.steps.consensus import StepsConsensus
|
||||
@ -47,7 +48,7 @@ class TestApiCompatibility(StepsDataAvailability, StepsConsensus, StepsStorage):
|
||||
@pytest.mark.usefixtures("setup_2_node_cluster")
|
||||
def test_da_consensus_compatibility(self):
|
||||
self.disperse_data(DATA_TO_DISPERSE[2], to_app_id(1), to_index(0))
|
||||
delay(5)
|
||||
delay(CONSENSUS_SLOT_TIME)
|
||||
index_shares = self.get_data_range(self.node2, to_app_id(1), to_index(0), to_index(5))
|
||||
column_commitments, rows_commitments = extract_commitments(index_shares)
|
||||
|
||||
@ -75,7 +76,7 @@ class TestApiCompatibility(StepsDataAvailability, StepsConsensus, StepsStorage):
|
||||
@pytest.mark.usefixtures("setup_4_node_cluster")
|
||||
def test_da_cross_nodes_consensus_compatibility(self):
|
||||
self.disperse_data(DATA_TO_DISPERSE[2], to_app_id(1), to_index(0))
|
||||
delay(5)
|
||||
delay(CONSENSUS_SLOT_TIME)
|
||||
index_shares = self.get_data_range(self.node2, to_app_id(1), to_index(0), to_index(5))
|
||||
column_commitments, rows_commitments = extract_commitments(index_shares)
|
||||
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
import pytest
|
||||
|
||||
from src.env_vars import CONSENSUS_SLOT_TIME
|
||||
from src.libs.common import to_app_id, to_index, delay, to_blob_id
|
||||
from src.libs.custom_logger import get_custom_logger
|
||||
from src.steps.consensus import StepsConsensus
|
||||
@ -22,7 +23,7 @@ class TestInteractionDataFlow(StepsDataAvailability, StepsMempool):
|
||||
def test_da_dispersal_integration(self):
|
||||
|
||||
self.disperse_data(DATA_TO_DISPERSE[3], to_app_id(1), to_index(0))
|
||||
delay(5)
|
||||
delay(CONSENSUS_SLOT_TIME)
|
||||
index_shares = self.get_data_range(self.node2, to_app_id(1), to_index(0), to_index(5))
|
||||
da_shares = extract_da_shares(index_shares)
|
||||
|
||||
@ -33,19 +34,19 @@ class TestInteractionDataFlow(StepsDataAvailability, StepsMempool):
|
||||
|
||||
self.add_publish_share(self.node2, modified_da_share)
|
||||
|
||||
delay(CONSENSUS_SLOT_TIME)
|
||||
|
||||
index_shares = self.get_data_range(self.node2, to_app_id(1), to_index(0), to_index(8))
|
||||
da_shares = extract_da_shares(index_shares)
|
||||
|
||||
delay(5)
|
||||
|
||||
assert len(da_shares) < 3, "Modified da_share should not get published"
|
||||
|
||||
@pytest.mark.usefixtures("setup_2_node_cluster")
|
||||
def test_da_mempool_interaction(self):
|
||||
self.disperse_data(DATA_TO_DISPERSE[3], to_app_id(1), to_index(0))
|
||||
self.disperse_data(DATA_TO_DISPERSE[4], to_app_id(1), to_index(0))
|
||||
self.add_dispersed_blob_info(self.node2, to_blob_id(10), to_app_id(1), to_index(0))
|
||||
|
||||
delay(5)
|
||||
delay(CONSENSUS_SLOT_TIME)
|
||||
|
||||
index_shares = self.get_data_range(self.node2, to_app_id(1), to_index(0), to_index(5))
|
||||
da_shares = extract_da_shares(index_shares)
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user