mirror of
https://github.com/logos-blockchain/logos-blockchain-e2e-tests.git
synced 2026-05-05 00:53:10 +00:00
fix: refactor disperse_data and get_data_range
- make check log errors optional
This commit is contained in:
parent
f95f57a7ce
commit
463875f7bf
@ -14,6 +14,11 @@ pre-commit install
|
|||||||
(optional) Overwrite default vars from src/env_vars.py via env vars or by adding a .env file
|
(optional) Overwrite default vars from src/env_vars.py via env vars or by adding a .env file
|
||||||
pytest
|
pytest
|
||||||
```
|
```
|
||||||
|
Set optional environment variable to search logs for errors after each tests:
|
||||||
|
```shell
|
||||||
|
export CHECK_LOG_ERRORS=True
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
## License
|
## License
|
||||||
|
|
||||||
|
|||||||
@ -40,3 +40,4 @@ IP_RANGE = get_env_var("IP_RANGE", "172.19.0.0/24")
|
|||||||
GATEWAY = get_env_var("GATEWAY", "172.19.0.1")
|
GATEWAY = get_env_var("GATEWAY", "172.19.0.1")
|
||||||
RUNNING_IN_CI = get_env_var("CI")
|
RUNNING_IN_CI = get_env_var("CI")
|
||||||
API_REQUEST_TIMEOUT = get_env_var("API_REQUEST_TIMEOUT", 20)
|
API_REQUEST_TIMEOUT = get_env_var("API_REQUEST_TIMEOUT", 20)
|
||||||
|
CHECK_LOG_ERRORS = get_env_var("CHECK_LOG_ERRORS", False)
|
||||||
|
|||||||
@ -45,15 +45,21 @@ class StepsDataAvailability(StepsCommon):
|
|||||||
return executor
|
return executor
|
||||||
|
|
||||||
@allure.step
|
@allure.step
|
||||||
def disperse_data(self, data, app_id, index, client_node=None, timeout_duration=65, utf8=True, padding=True, send_invalid=False):
|
def disperse_data(self, data, app_id, index, client_node=None, **kwargs):
|
||||||
|
|
||||||
|
timeout_duration = kwargs.get("timeout_duration", 65)
|
||||||
|
utf8 = kwargs.get("utf8", True)
|
||||||
|
padding = kwargs.get("padding", True)
|
||||||
|
send_invalid = kwargs.get("send_invalid", False)
|
||||||
|
|
||||||
|
request = prepare_dispersal_request(data, app_id, index, utf8=utf8, padding=padding)
|
||||||
|
|
||||||
@retry(stop=stop_after_delay(timeout_duration), wait=wait_fixed(0.1), reraise=True)
|
@retry(stop=stop_after_delay(timeout_duration), wait=wait_fixed(0.1), reraise=True)
|
||||||
def disperse(my_self=self):
|
def disperse(my_self=self):
|
||||||
response = []
|
response = []
|
||||||
request = prepare_dispersal_request(data, app_id, index, utf8=utf8, padding=padding)
|
|
||||||
executor = my_self.find_executor_node()
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if client_node is None:
|
if client_node is None:
|
||||||
|
executor = my_self.find_executor_node()
|
||||||
response = executor.send_dispersal_request(request)
|
response = executor.send_dispersal_request(request)
|
||||||
else:
|
else:
|
||||||
response = client_node.send_dispersal_request(request, send_invalid=send_invalid)
|
response = client_node.send_dispersal_request(request, send_invalid=send_invalid)
|
||||||
@ -67,11 +73,16 @@ class StepsDataAvailability(StepsCommon):
|
|||||||
return disperse()
|
return disperse()
|
||||||
|
|
||||||
@allure.step
|
@allure.step
|
||||||
def get_data_range(self, node, app_id, start, end, client_node=None, timeout_duration=45, send_invalid=False):
|
def get_data_range(self, node, app_id, start, end, client_node=None, **kwargs):
|
||||||
|
|
||||||
|
timeout_duration = kwargs.get("timeout_duration", 65)
|
||||||
|
send_invalid = kwargs.get("send_invalid", False)
|
||||||
|
|
||||||
|
query = prepare_get_range_request(app_id, start, end)
|
||||||
|
|
||||||
@retry(stop=stop_after_delay(timeout_duration), wait=wait_fixed(0.1), reraise=True)
|
@retry(stop=stop_after_delay(timeout_duration), wait=wait_fixed(0.1), reraise=True)
|
||||||
def get_range():
|
def get_range():
|
||||||
response = []
|
response = []
|
||||||
query = prepare_get_range_request(app_id, start, end)
|
|
||||||
try:
|
try:
|
||||||
if client_node is None:
|
if client_node is None:
|
||||||
response = node.send_get_data_range_request(query)
|
response = node.send_get_data_range_request(query)
|
||||||
|
|||||||
@ -3,6 +3,7 @@ import inspect
|
|||||||
import glob
|
import glob
|
||||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||||
|
|
||||||
|
from src.env_vars import CHECK_LOG_ERRORS
|
||||||
from src.libs.custom_logger import get_custom_logger
|
from src.libs.custom_logger import get_custom_logger
|
||||||
import os
|
import os
|
||||||
import pytest
|
import pytest
|
||||||
@ -99,6 +100,7 @@ def close_open_nodes(attach_logs_on_fail):
|
|||||||
@pytest.fixture(scope="function", autouse=True)
|
@pytest.fixture(scope="function", autouse=True)
|
||||||
def check_nomos_log_errors(request):
|
def check_nomos_log_errors(request):
|
||||||
yield
|
yield
|
||||||
logger.debug(f"Running fixture teardown: {inspect.currentframe().f_code.co_name}")
|
if CHECK_LOG_ERRORS.lower() == "true" or CHECK_LOG_ERRORS.lower() == "yes":
|
||||||
for node in DS.nomos_nodes:
|
logger.debug(f"Running fixture teardown: {inspect.currentframe().f_code.co_name}")
|
||||||
node.check_nomos_log_errors()
|
for node in DS.nomos_nodes:
|
||||||
|
node.check_nomos_log_errors()
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user