Test/peer connection management (#45)
* test: get peers from peer store * test: add peers to peer store * fix: add go_waku support * test: placeholder for Issue 1549 solution test * test: expect two peers for two protocols/services * test: restart survival with persistent peer store * fix: add issue to skip mark * fix: move Waku related methods to waku_node module * test: full peer store takeover * fix: add kill of Node 1 * fix: remove debug info - skip while waiting on issue 2792 * test: peer store content after node restart * fix: revert nwaku image back to use latest * fix: added delays to let nodes finish discovery * fix: move peer_persistence config steps out of start * fix: NODE1 default back to DEFAULT_GOWAKU * fix: partial refactor for setup_first-second_relay_node * test: daily workflow after refactor * test: shorter name for daily workflow * test: RLN only * test: increase timeout * test: check Sepolia API endpoint * test: run RLN tests in single thread * fix: remove Sepolia API check * fix: skipp all RLN tests till registration works * fix: remove test workflow
This commit is contained in:
parent
aab71a92a7
commit
ccad2a12f6
|
@ -1,6 +1,8 @@
|
|||
import errno
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
|
||||
import pytest
|
||||
import requests
|
||||
from src.libs.common import delay
|
||||
|
@ -39,6 +41,21 @@ def rln_credential_store_ready(creds_file_path):
|
|||
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), creds_file_path)
|
||||
|
||||
|
||||
def peer_info2multiaddr(peer, is_nwaku=True):
|
||||
if is_nwaku:
|
||||
return peer["multiaddr"]
|
||||
else:
|
||||
return peer["multiaddrs"][0]
|
||||
|
||||
|
||||
def peer_info2id(peer, is_nwaku=True):
|
||||
return peer_info2multiaddr(peer, is_nwaku).split("/")[-1]
|
||||
|
||||
|
||||
def multiaddr2id(multiaddr):
|
||||
return multiaddr.split("/")[-1]
|
||||
|
||||
|
||||
class WakuNode:
|
||||
def __init__(self, docker_image, docker_log_prefix=""):
|
||||
self._image_name = docker_image
|
||||
|
@ -106,6 +123,8 @@ class WakuNode:
|
|||
else:
|
||||
remove_container = True
|
||||
|
||||
kwargs = self.parse_peer_persistence_config(kwargs)
|
||||
|
||||
default_args.update(sanitize_docker_flags(kwargs))
|
||||
|
||||
rln_args, rln_creds_set, keystore_path = self.parse_rln_credentials(default_args, False)
|
||||
|
@ -187,6 +206,18 @@ class WakuNode:
|
|||
self._container = None
|
||||
logger.debug("Container stopped.")
|
||||
|
||||
@retry(stop=stop_after_delay(5), wait=wait_fixed(0.1), reraise=True)
|
||||
def kill(self):
|
||||
if self._container:
|
||||
logger.debug(f"Killing container with id {self._container.short_id}")
|
||||
self._container.kill()
|
||||
try:
|
||||
self._container.remove()
|
||||
except:
|
||||
pass
|
||||
self._container = None
|
||||
logger.debug("Container killed.")
|
||||
|
||||
def restart(self):
|
||||
if self._container:
|
||||
logger.debug(f"Restarting container with id {self._container.short_id}")
|
||||
|
@ -233,6 +264,12 @@ class WakuNode:
|
|||
check_healthy()
|
||||
check_ready()
|
||||
|
||||
def get_id(self):
|
||||
try:
|
||||
return self.info_response["listenAddresses"][0].split("/")[-1]
|
||||
except Exception as ex:
|
||||
raise AttributeError(f"Could not find ID in the info call because of error: {str(ex)}")
|
||||
|
||||
def get_enr_uri(self):
|
||||
try:
|
||||
return self.info_response["enrUri"]
|
||||
|
@ -422,6 +459,25 @@ class WakuNode:
|
|||
|
||||
return rln_args, True, keystore_path
|
||||
|
||||
def parse_peer_persistence_config(self, kwargs):
|
||||
if kwargs.get("peer_persistence") == "true":
|
||||
if self.is_gowaku():
|
||||
kwargs["persist_peers"] = kwargs["peer_persistence"]
|
||||
del kwargs["peer_persistence"]
|
||||
|
||||
cwd = os.getcwd()
|
||||
# Please note, as of now, peerdb is stored directly at / which is not shareable between containers.
|
||||
# Volume related code is usable after https://github.com/waku-org/nwaku/issues/2792 would be resolved.
|
||||
self._volumes.extend(
|
||||
[
|
||||
cwd + "/peerdb" + ":/shared",
|
||||
]
|
||||
)
|
||||
|
||||
shutil.rmtree(cwd + "/peerdb")
|
||||
|
||||
return kwargs
|
||||
|
||||
@property
|
||||
def container(self):
|
||||
return self._container
|
||||
|
|
|
@ -73,6 +73,38 @@ class StepsRelay(StepsCommon):
|
|||
except Exception as ex:
|
||||
raise TimeoutError(f"WARM UP FAILED WITH: {ex}")
|
||||
|
||||
# Refactor candidate
|
||||
@allure.step
|
||||
def setup_first_relay_node(self, **kwargs):
|
||||
self.node1 = WakuNode(NODE_1, f"node1_{self.test_id}")
|
||||
self.node1.start(relay="true", nodekey=NODEKEY, **kwargs)
|
||||
self.enr_uri = self.node1.get_enr_uri()
|
||||
self.multiaddr_with_id = self.node1.get_multiaddr_with_id()
|
||||
self.main_nodes.extend([self.node1])
|
||||
|
||||
# Refactor candidate
|
||||
@allure.step
|
||||
def setup_second_relay_node(self, **kwargs):
|
||||
self.node2 = WakuNode(NODE_2, f"node2_{self.test_id}")
|
||||
self.node2.start(
|
||||
relay="true",
|
||||
discv5_bootstrap_node=self.enr_uri,
|
||||
**kwargs,
|
||||
)
|
||||
self.add_node_peer(self.node2, [self.multiaddr_with_id])
|
||||
self.main_nodes.extend([self.node2])
|
||||
|
||||
@allure.step
|
||||
def setup_third_relay_node(self, **kwargs):
|
||||
self.node3 = WakuNode(NODE_1, f"node3_{self.test_id}")
|
||||
self.node3.start(
|
||||
relay="true",
|
||||
discv5_bootstrap_node=self.enr_uri,
|
||||
**kwargs,
|
||||
)
|
||||
self.add_node_peer(self.node3, [self.multiaddr_with_id])
|
||||
self.optional_nodes.extend([self.node3])
|
||||
|
||||
# this method should be used only for the tests that use the relay_warm_up fixture
|
||||
# otherwise use wait_for_published_message_to_reach_relay_peer
|
||||
@allure.step
|
||||
|
|
|
@ -3,7 +3,6 @@ import inspect
|
|||
import pytest
|
||||
import allure
|
||||
|
||||
from src.node.waku_message import WakuMessage
|
||||
from src.steps.common import StepsCommon
|
||||
from src.test_data import PUBSUB_TOPICS_RLN
|
||||
from src.env_vars import DEFAULT_NWAKU, RLN_CREDENTIALS, NODEKEY, NODE_1, NODE_2, ADDITIONAL_NODES
|
||||
|
@ -104,25 +103,6 @@ class StepsRLN(StepsCommon):
|
|||
self.lightpush_nodes.extend([self.light_push_node2])
|
||||
self.add_node_peer(self.light_push_node2, self.multiaddr_list)
|
||||
|
||||
@allure.step
|
||||
def setup_first_relay_node(self, **kwargs):
|
||||
self.node1 = WakuNode(NODE_1, f"node1_{self.test_id}")
|
||||
self.node1.start(relay="true", nodekey=NODEKEY, **kwargs)
|
||||
self.enr_uri = self.node1.get_enr_uri()
|
||||
self.multiaddr_with_id = self.node1.get_multiaddr_with_id()
|
||||
self.main_nodes.extend([self.node1])
|
||||
|
||||
@allure.step
|
||||
def setup_second_relay_node(self, **kwargs):
|
||||
self.node2 = WakuNode(NODE_2, f"node2_{self.test_id}")
|
||||
self.node2.start(
|
||||
relay="true",
|
||||
discv5_bootstrap_node=self.enr_uri,
|
||||
**kwargs,
|
||||
)
|
||||
self.add_node_peer(self.node2, [self.multiaddr_with_id])
|
||||
self.main_nodes.extend([self.node2])
|
||||
|
||||
@allure.step
|
||||
def register_rln_single_node(self, **kwargs):
|
||||
logger.debug("Registering RLN credentials for single node")
|
||||
|
|
|
@ -0,0 +1,106 @@
|
|||
import pytest
|
||||
|
||||
from src.env_vars import NODE_1, NODE_2
|
||||
from src.libs.common import delay
|
||||
from src.libs.custom_logger import get_custom_logger
|
||||
from src.node.waku_node import peer_info2id, peer_info2multiaddr, multiaddr2id
|
||||
from src.steps.relay import StepsRelay
|
||||
from src.steps.store import StepsStore
|
||||
|
||||
logger = get_custom_logger(__name__)
|
||||
|
||||
|
||||
class TestPeerStore(StepsRelay, StepsStore):
|
||||
@pytest.mark.usefixtures("setup_main_relay_nodes", "setup_optional_relay_nodes")
|
||||
def test_get_peers(self):
|
||||
nodes = [self.node1, self.node2]
|
||||
nodes.extend(self.optional_nodes)
|
||||
delay(1)
|
||||
ids = []
|
||||
for node in nodes:
|
||||
node_id = node.get_id()
|
||||
ids.append(node_id)
|
||||
|
||||
for i in range(5):
|
||||
others = []
|
||||
for peer_info in nodes[i].get_peers():
|
||||
peer_id = peer_info2id(peer_info, nodes[i].is_nwaku())
|
||||
others.append(peer_id)
|
||||
|
||||
assert (i == 0 and len(others) == 4) or (i > 0 and len(others) >= 1), f"Some nodes missing in the peer store of Node ID {ids[i]}"
|
||||
|
||||
@pytest.mark.usefixtures("setup_main_relay_nodes", "setup_optional_relay_nodes")
|
||||
def test_add_peers(self):
|
||||
nodes = [self.node1, self.node2]
|
||||
nodes.extend(self.optional_nodes)
|
||||
delay(1)
|
||||
peers_multiaddr = set()
|
||||
for i in range(2):
|
||||
for peer_info in nodes[i].get_peers():
|
||||
multiaddr = peer_info2multiaddr(peer_info, nodes[i].is_nwaku())
|
||||
peers_multiaddr.add(multiaddr)
|
||||
|
||||
assert len(peers_multiaddr) == 5, f"Exactly 5 multi addresses are expected"
|
||||
|
||||
# Add peers one by one excluding self for Nodes 2-5
|
||||
for i in range(1, 5):
|
||||
for peer in list(peers_multiaddr):
|
||||
if nodes[i].get_id() != multiaddr2id(peer):
|
||||
try:
|
||||
if nodes[i].is_nwaku():
|
||||
nodes[i].add_peers([peer])
|
||||
else:
|
||||
peer_info = {"multiaddr": peer, "shards": [0], "protocols": ["/vac/waku/relay/2.0.0"]}
|
||||
nodes[i].add_peers(peer_info)
|
||||
except Exception as ex:
|
||||
logger.error(f"Failed to add peer to Node {i} peer store: {ex}")
|
||||
raise
|
||||
|
||||
@pytest.mark.skip(reason="waiting for https://github.com/waku-org/nwaku/issues/1549 resolution")
|
||||
def test_get_peers_two_protocols(self):
|
||||
self.setup_first_publishing_node(store="true", relay="true")
|
||||
self.setup_first_store_node(store="true", relay="false")
|
||||
delay(1)
|
||||
node1_peers = self.publishing_node1.get_peers()
|
||||
node2_peers = self.store_node1.get_peers()
|
||||
logger.debug(f"Node 1 connected peers {node1_peers}")
|
||||
logger.debug(f"Node 2 connected peers {node2_peers}")
|
||||
|
||||
assert len(node1_peers) == 2 and len(node2_peers) == 2, f"Some nodes and/or their services are missing"
|
||||
|
||||
@pytest.mark.skip(reason="pending on https://github.com/waku-org/nwaku/issues/2792")
|
||||
@pytest.mark.skipif("go-waku" in (NODE_1 + NODE_2), reason="Test works only with nwaku")
|
||||
def test_use_persistent_storage_survive_restart(self):
|
||||
self.setup_first_relay_node(peer_persistence="true")
|
||||
self.setup_second_relay_node()
|
||||
delay(1)
|
||||
node1_peers = self.node1.get_peers()
|
||||
node2_peers = self.node2.get_peers()
|
||||
node1_id = self.node1.get_id()
|
||||
node2_id = self.node2.get_id()
|
||||
assert node1_id == peer_info2id(node2_peers[0], self.node2.is_nwaku())
|
||||
assert node2_id == peer_info2id(node1_peers[0], self.node1.is_nwaku())
|
||||
|
||||
# Node 3 takes over Node 1
|
||||
self.setup_third_relay_node(peer_persistence="true")
|
||||
self.node1.kill()
|
||||
node2_peers = self.node2.get_peers()
|
||||
node3_peers = self.node3.get_peers()
|
||||
assert node1_id == peer_info2id(node2_peers[0], self.node2.is_nwaku())
|
||||
assert node2_id == peer_info2id(node3_peers[0], self.node3.is_nwaku())
|
||||
|
||||
@pytest.mark.skip(reason="waiting for https://github.com/waku-org/nwaku/issues/2592 resolution")
|
||||
@pytest.mark.skipif("go-waku" in (NODE_1 + NODE_2), reason="Test works only with nwaku")
|
||||
def test_peer_store_content_after_node2_restarts(self):
|
||||
self.setup_first_relay_node()
|
||||
self.setup_second_relay_node()
|
||||
delay(1)
|
||||
node1_peers = self.node1.get_peers()
|
||||
node2_peers = self.node2.get_peers()
|
||||
assert len(node1_peers) == len(node2_peers), "Nodes should have each other in the peer store"
|
||||
self.node2.restart()
|
||||
self.node2.ensure_ready()
|
||||
delay(1)
|
||||
node1_peers = self.node1.get_peers()
|
||||
node2_peers = self.node2.get_peers()
|
||||
assert len(node1_peers) == len(node2_peers), "Nodes should have each other in the peer store"
|
|
@ -16,6 +16,7 @@ logger = get_custom_logger(__name__)
|
|||
@pytest.mark.xdist_group(name="RLN serial tests")
|
||||
@pytest.mark.usefixtures("register_main_rln_relay_nodes")
|
||||
@pytest.mark.skipif("go-waku" in (NODE_1 + NODE_2), reason="Test works only with nwaku")
|
||||
@pytest.mark.skip(reason="waiting to resolve registration https://github.com/waku-org/nwaku/issues/2837")
|
||||
class TestRelayRLN(StepsRLN, StepsRelay):
|
||||
def test_valid_payloads_at_slow_rate(self):
|
||||
self.setup_main_rln_relay_nodes()
|
||||
|
|
Loading…
Reference in New Issue