2023-11-21 09:29:48 +02:00
|
|
|
import inspect
|
2023-11-17 08:47:22 +02:00
|
|
|
from src.libs.custom_logger import get_custom_logger
|
|
|
|
|
import math
|
|
|
|
|
from time import time
|
2023-11-01 14:02:29 +02:00
|
|
|
import pytest
|
2023-11-01 16:44:42 +02:00
|
|
|
import allure
|
2023-11-17 08:47:22 +02:00
|
|
|
from src.libs.common import to_base64, delay
|
2023-11-03 17:01:00 +02:00
|
|
|
from src.data_classes import message_rpc_response_schema
|
2023-11-21 09:29:48 +02:00
|
|
|
from src.env_vars import NODE_1, NODE_2, ADDITIONAL_NODES, NODEKEY, RUNNING_IN_CI
|
2023-11-01 14:02:29 +02:00
|
|
|
from src.node.waku_node import WakuNode
|
|
|
|
|
from tenacity import retry, stop_after_delay, wait_fixed
|
|
|
|
|
|
2023-11-17 08:47:22 +02:00
|
|
|
logger = get_custom_logger(__name__)
|
2023-11-01 14:02:29 +02:00
|
|
|
|
|
|
|
|
|
|
|
|
|
class StepsRelay:
|
2023-11-21 09:29:48 +02:00
|
|
|
test_pubsub_topic = "/waku/2/rs/18/1"
|
|
|
|
|
test_content_topic = "/test/1/waku-relay/proto"
|
|
|
|
|
test_payload = "Relay works!!"
|
|
|
|
|
|
|
|
|
|
@pytest.fixture(scope="function")
|
|
|
|
|
def setup_main_relay_nodes(self, request):
|
|
|
|
|
logger.debug(f"Running fixture setup: {inspect.currentframe().f_code.co_name}")
|
|
|
|
|
self.node1 = WakuNode(NODE_1, f"node1_{request.cls.test_id}")
|
2023-11-17 08:47:22 +02:00
|
|
|
self.node1.start(relay="true", discv5_discovery="true", peer_exchange="true", nodekey=NODEKEY)
|
2023-11-21 09:29:48 +02:00
|
|
|
self.enr_uri = self.node1.info()["enrUri"]
|
|
|
|
|
self.node2 = WakuNode(NODE_2, f"node1_{request.cls.test_id}")
|
|
|
|
|
self.node2.start(relay="true", discv5_discovery="true", discv5_bootstrap_node=self.enr_uri, peer_exchange="true")
|
|
|
|
|
self.main_nodes = [self.node1, self.node2]
|
|
|
|
|
self.optional_nodes = []
|
|
|
|
|
|
|
|
|
|
@pytest.fixture(scope="function")
|
|
|
|
|
def setup_optional_relay_nodes(self, request):
|
|
|
|
|
logger.debug(f"Running fixture setup: {inspect.currentframe().f_code.co_name}")
|
|
|
|
|
if ADDITIONAL_NODES:
|
|
|
|
|
nodes = [node.strip() for node in ADDITIONAL_NODES.split(",")]
|
|
|
|
|
else:
|
|
|
|
|
pytest.skip("ADDITIONAL_NODES is empty, cannot run test")
|
|
|
|
|
for index, node in enumerate(nodes):
|
|
|
|
|
node = WakuNode(node, f"node{index}_{request.cls.test_id}")
|
|
|
|
|
node.start(relay="true", discv5_discovery="true", discv5_bootstrap_node=self.enr_uri, peer_exchange="true")
|
|
|
|
|
self.optional_nodes.append(node)
|
|
|
|
|
|
|
|
|
|
@pytest.fixture(scope="function")
|
|
|
|
|
def subscribe_main_relay_nodes(self):
|
|
|
|
|
logger.debug(f"Running fixture setup: {inspect.currentframe().f_code.co_name}")
|
|
|
|
|
self.ensure_subscriptions_on_nodes(self.main_nodes, [self.test_pubsub_topic])
|
2023-11-01 14:02:29 +02:00
|
|
|
|
2023-11-21 09:29:48 +02:00
|
|
|
@pytest.fixture(scope="function")
|
|
|
|
|
def subscribe_optional_relay_nodes(self):
|
|
|
|
|
logger.debug(f"Running fixture setup: {inspect.currentframe().f_code.co_name}")
|
|
|
|
|
self.ensure_subscriptions_on_nodes(self.optional_nodes, [self.test_pubsub_topic])
|
|
|
|
|
|
|
|
|
|
@pytest.fixture(scope="function")
|
|
|
|
|
def relay_warm_up(self):
|
2023-11-17 08:47:22 +02:00
|
|
|
try:
|
2023-11-21 09:29:48 +02:00
|
|
|
self.wait_for_published_message_to_reach_peer()
|
|
|
|
|
logger.info("WARM UP successful!!")
|
2023-11-17 08:47:22 +02:00
|
|
|
except Exception as ex:
|
|
|
|
|
raise TimeoutError(f"WARM UP FAILED WITH: {ex}")
|
|
|
|
|
|
2023-11-21 09:29:48 +02:00
|
|
|
# this method should be used only for the tests that use the warm_up fixture
|
|
|
|
|
# otherwise use wait_for_published_message_to_reach_peer
|
2023-11-01 16:44:42 +02:00
|
|
|
@allure.step
|
2023-11-21 09:29:48 +02:00
|
|
|
def check_published_message_reaches_peer(self, message, pubsub_topic=None, message_propagation_delay=0.1, sender=None, peer_list=None):
|
|
|
|
|
if not sender:
|
|
|
|
|
sender = self.node1
|
|
|
|
|
if not peer_list:
|
|
|
|
|
peer_list = self.main_nodes + self.optional_nodes
|
|
|
|
|
sender.send_message(message, pubsub_topic or self.test_pubsub_topic)
|
2023-11-17 08:47:22 +02:00
|
|
|
delay(message_propagation_delay)
|
2023-11-21 09:29:48 +02:00
|
|
|
for index, peer in enumerate(peer_list):
|
|
|
|
|
logger.debug(f"Checking that peer NODE_{index + 1}:{peer.image} can find the published message")
|
|
|
|
|
get_messages_response = peer.get_messages(pubsub_topic or self.test_pubsub_topic)
|
|
|
|
|
assert get_messages_response, f"Peer NODE_{index}:{peer.image} couldn't find any messages"
|
|
|
|
|
received_message = message_rpc_response_schema.load(get_messages_response[0])
|
|
|
|
|
self.assert_received_message(message, received_message)
|
|
|
|
|
|
|
|
|
|
# we need much bigger timeout in CI because we run tests in parallel there and the machine itself is slower
|
|
|
|
|
@allure.step
|
|
|
|
|
def wait_for_published_message_to_reach_peer(
|
|
|
|
|
self, timeout_duration=120 if RUNNING_IN_CI else 20, time_between_retries=1, sender=None, peer_list=None
|
|
|
|
|
):
|
|
|
|
|
@retry(stop=stop_after_delay(timeout_duration), wait=wait_fixed(time_between_retries), reraise=True)
|
|
|
|
|
def check_peer_connection():
|
|
|
|
|
message = {"payload": to_base64(self.test_payload), "contentTopic": self.test_content_topic, "timestamp": int(time() * 1e9)}
|
|
|
|
|
self.check_published_message_reaches_peer(message, sender=sender, peer_list=peer_list)
|
2023-11-17 08:47:22 +02:00
|
|
|
|
2023-11-21 09:29:48 +02:00
|
|
|
check_peer_connection()
|
|
|
|
|
|
|
|
|
|
@allure.step
|
2023-11-17 08:47:22 +02:00
|
|
|
def assert_received_message(self, sent_message, received_message):
|
|
|
|
|
def assert_fail_message(field_name):
|
|
|
|
|
return f"Incorrect field: {field_name}. Published: {sent_message[field_name]} Received: {getattr(received_message, field_name)}"
|
|
|
|
|
|
|
|
|
|
assert received_message.payload == sent_message["payload"], assert_fail_message("payload")
|
|
|
|
|
assert received_message.contentTopic == sent_message["contentTopic"], assert_fail_message("contentTopic")
|
|
|
|
|
if sent_message.get("timestamp") is not None:
|
|
|
|
|
if isinstance(sent_message["timestamp"], float):
|
|
|
|
|
assert math.isclose(float(received_message.timestamp), sent_message["timestamp"], rel_tol=1e-9), assert_fail_message("timestamp")
|
|
|
|
|
else:
|
|
|
|
|
assert str(received_message.timestamp) == str(sent_message["timestamp"]), assert_fail_message("timestamp")
|
|
|
|
|
if "version" in sent_message:
|
|
|
|
|
assert str(received_message.version) == str(sent_message["version"]), assert_fail_message("version")
|
|
|
|
|
if "meta" in sent_message:
|
|
|
|
|
assert str(received_message.meta) == str(sent_message["meta"]), assert_fail_message("meta")
|
|
|
|
|
if "ephemeral" in sent_message:
|
|
|
|
|
assert str(received_message.ephemeral) == str(sent_message["ephemeral"]), assert_fail_message("ephemeral")
|
|
|
|
|
if "rateLimitProof" in sent_message:
|
|
|
|
|
assert str(received_message.rateLimitProof) == str(sent_message["rateLimitProof"]), assert_fail_message("rateLimitProof")
|
|
|
|
|
|
2023-11-21 09:29:48 +02:00
|
|
|
@allure.step
|
2023-11-17 08:47:22 +02:00
|
|
|
def ensure_subscriptions_on_nodes(self, node_list, pubsub_topic_list):
|
|
|
|
|
for node in node_list:
|
|
|
|
|
node.set_subscriptions(pubsub_topic_list)
|
|
|
|
|
|
|
|
|
|
def create_message(self, **kwargs):
|
|
|
|
|
message = {"payload": to_base64(self.test_payload), "contentTopic": self.test_content_topic, "timestamp": int(time() * 1e9)}
|
|
|
|
|
message.update(kwargs)
|
|
|
|
|
return message
|