mirror of
https://github.com/logos-messaging/logos-messaging-interop-tests.git
synced 2026-01-07 00:13:06 +00:00
Test/peer exchange (#51)
* test: establish basic node setup for pe * test: debug peer retrieval error with nwaku * fix: disable discovery v5 on the requester node * fix: assert node3 peer store to exact content * test: get peers for filter node * test: get peers after node1 was restarted * fix: allow tests to run only when Node2 is Go-waku * fix: revert NODE_1 to its original default
This commit is contained in:
parent
07f4e1f438
commit
6aa7716793
@ -18,7 +18,7 @@ DEFAULT_NWAKU = "harbor.status.im/wakuorg/nwaku:latest"
|
|||||||
DEFAULT_GOWAKU = "harbor.status.im/wakuorg/go-waku:latest"
|
DEFAULT_GOWAKU = "harbor.status.im/wakuorg/go-waku:latest"
|
||||||
NODE_1 = get_env_var("NODE_1", DEFAULT_GOWAKU)
|
NODE_1 = get_env_var("NODE_1", DEFAULT_GOWAKU)
|
||||||
NODE_2 = get_env_var("NODE_2", DEFAULT_NWAKU)
|
NODE_2 = get_env_var("NODE_2", DEFAULT_NWAKU)
|
||||||
ADDITIONAL_NODES = get_env_var("ADDITIONAL_NODES", f"{DEFAULT_NWAKU},{DEFAULT_GOWAKU},{DEFAULT_NWAKU}")
|
ADDITIONAL_NODES = get_env_var("ADDITIONAL_NODES", f"{DEFAULT_NWAKU},{DEFAULT_NWAKU},{DEFAULT_NWAKU}")
|
||||||
# more nodes need to follow the NODE_X pattern
|
# more nodes need to follow the NODE_X pattern
|
||||||
DOCKER_LOG_DIR = get_env_var("DOCKER_LOG_DIR", "./log/docker")
|
DOCKER_LOG_DIR = get_env_var("DOCKER_LOG_DIR", "./log/docker")
|
||||||
NETWORK_NAME = get_env_var("NETWORK_NAME", "waku")
|
NETWORK_NAME = get_env_var("NETWORK_NAME", "waku")
|
||||||
|
|||||||
30
src/steps/peer_exchange.py
Normal file
30
src/steps/peer_exchange.py
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
from uuid import uuid4
|
||||||
|
|
||||||
|
from src.env_vars import NODE_2
|
||||||
|
from src.node.waku_node import WakuNode
|
||||||
|
|
||||||
|
import allure
|
||||||
|
|
||||||
|
from src.steps.relay import StepsRelay
|
||||||
|
|
||||||
|
|
||||||
|
class StepsPeerExchange(StepsRelay):
|
||||||
|
responder_multiaddr = ""
|
||||||
|
|
||||||
|
@allure.step
|
||||||
|
def setup_third_node_as_peer_exchange_requester(self, **kwargs):
|
||||||
|
self.node3 = WakuNode(NODE_2, f"node3_{self.test_id}")
|
||||||
|
self.node3.start(
|
||||||
|
relay="false",
|
||||||
|
peer_exchange_node=self.responder_multiaddr,
|
||||||
|
**kwargs,
|
||||||
|
)
|
||||||
|
self.add_node_peer(self.node3, [self.multiaddr_with_id])
|
||||||
|
self.main_nodes.extend([self.node3])
|
||||||
|
|
||||||
|
@allure.step
|
||||||
|
def setup_fourth_node_as_filter(self, **kwargs):
|
||||||
|
self.node4 = WakuNode(NODE_2, f"node4_{self.test_id}")
|
||||||
|
self.node4.start(relay="false", **kwargs)
|
||||||
|
self.add_node_peer(self.node4, [self.multiaddr_with_id])
|
||||||
|
self.main_nodes.extend([self.node4])
|
||||||
68
tests/peer_exchange/test_peer_exchange.py
Normal file
68
tests/peer_exchange/test_peer_exchange.py
Normal file
@ -0,0 +1,68 @@
|
|||||||
|
import pytest
|
||||||
|
from src.env_vars import NODE_2
|
||||||
|
from src.libs.common import delay
|
||||||
|
from src.node.waku_node import peer_info2multiaddr, multiaddr2id
|
||||||
|
from src.steps.peer_exchange import StepsPeerExchange
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.skipif("go-waku" not in NODE_2, reason="Test works only with go-waku as responder - https://github.com/waku-org/nwaku/issues/2875")
|
||||||
|
class TestPeerExchange(StepsPeerExchange):
|
||||||
|
def test_get_peers_for_blank_node(self):
|
||||||
|
self.setup_first_relay_node(relay_peer_exchange="true")
|
||||||
|
self.setup_second_relay_node(peer_exchange="true")
|
||||||
|
delay(1)
|
||||||
|
node1_peers = self.node1.get_peers()
|
||||||
|
assert len(node1_peers) == 1
|
||||||
|
self.responder_multiaddr = peer_info2multiaddr(node1_peers[0], self.node1.is_nwaku())
|
||||||
|
self.setup_third_node_as_peer_exchange_requester(discv5_discovery="false")
|
||||||
|
|
||||||
|
others = {self.node1.get_id(), self.node2.get_id()}
|
||||||
|
|
||||||
|
own = set()
|
||||||
|
for peer_info in self.node3.get_peers():
|
||||||
|
peer_id = multiaddr2id(peer_info2multiaddr(peer_info, self.node3.is_nwaku()))
|
||||||
|
own.add(peer_id)
|
||||||
|
|
||||||
|
assert own == others, f"Not all nodes found as expected in peer store of Node3"
|
||||||
|
|
||||||
|
def test_get_peers_for_filter_node(self):
|
||||||
|
self.setup_first_relay_node(filter="true", relay_peer_exchange="true")
|
||||||
|
self.setup_second_relay_node(filter="true", peer_exchange="true")
|
||||||
|
delay(1)
|
||||||
|
node1_peers = self.node1.get_peers()
|
||||||
|
assert len(node1_peers) == 1
|
||||||
|
self.responder_multiaddr = peer_info2multiaddr(node1_peers[0], self.node1.is_nwaku())
|
||||||
|
self.setup_third_node_as_peer_exchange_requester(discv5_discovery="false")
|
||||||
|
|
||||||
|
suitable_peers = []
|
||||||
|
for peer_info in self.node3.get_peers():
|
||||||
|
multiaddr = peer_info2multiaddr(peer_info, self.node3.is_nwaku())
|
||||||
|
suitable_peers.append(multiaddr)
|
||||||
|
|
||||||
|
assert len(suitable_peers) == 2
|
||||||
|
|
||||||
|
self.setup_fourth_node_as_filter(filternode=suitable_peers[0])
|
||||||
|
|
||||||
|
def test_get_peers_after_node1_was_restarted(self):
|
||||||
|
self.setup_first_relay_node(relay_peer_exchange="true")
|
||||||
|
self.setup_second_relay_node(peer_exchange="true")
|
||||||
|
delay(1)
|
||||||
|
node1_peers = self.node1.get_peers()
|
||||||
|
assert len(node1_peers) == 1
|
||||||
|
self.responder_multiaddr = peer_info2multiaddr(node1_peers[0], self.node1.is_nwaku())
|
||||||
|
|
||||||
|
self.node1.restart()
|
||||||
|
|
||||||
|
# Give Node2 time to update its peer store
|
||||||
|
delay(1)
|
||||||
|
|
||||||
|
# Start Node3
|
||||||
|
self.setup_third_node_as_peer_exchange_requester(discv5_discovery="false")
|
||||||
|
others = {self.node1.get_id(), self.node2.get_id()}
|
||||||
|
|
||||||
|
own = set()
|
||||||
|
for peer_info in self.node3.get_peers():
|
||||||
|
peer_id = multiaddr2id(peer_info2multiaddr(peer_info, self.node3.is_nwaku()))
|
||||||
|
own.add(peer_id)
|
||||||
|
|
||||||
|
assert own == others, f"Two nodes are expected in peer store of Node3"
|
||||||
Loading…
x
Reference in New Issue
Block a user