E2E_Tests_part1 (#80)

* adding initial draft for repo example test

* Adding more steps to the example_six_nodes test

* draft3 with adding step5 to the testcase

* adding test test_store_filter_interaction_with_six_nodes

* adding draft of test test_2_filter_nodes

* adding 2 tests
1- test_msg_not_stored_when_ephemeral_true
2- test_msg_stored_when_ephemeral_false

* fix review comments and add final changes for additional 3 tests

* Fix indentation issue

* Fix indentation issue

* Fixing review comments

* Enhancements based on review

* Adding changes to make tests pass with NODE_2= GO-WAKU

* Fix CI failure in test_store_filter_interaction_with_six_nodes

* Fixing CI failure for test test_store_filter_interaction_with_six_nodes

* Add wait after filter request to fix CI failure

* increase wait for CI failure
This commit is contained in:
AYAHASSAN287 2024-11-07 14:27:54 +02:00 committed by GitHub
parent 4d8e7e4955
commit da90b9c3e1
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 122 additions and 7 deletions

View File

@ -177,3 +177,120 @@ class TestE2E(StepsFilter, StepsStore, StepsRelay, StepsLightPush):
# self.node1 relays and we check that self.node10 receives the message
self.check_published_message_reaches_relay_peer(sender=self.node1, peer_list=[self.node10], message_propagation_delay=1)
def test_store_filter_interaction_with_six_nodes(self):
logger.debug("Create 6 nodes")
self.node4 = WakuNode(NODE_2, f"node3_{self.test_id}")
self.node5 = WakuNode(NODE_2, f"node3_{self.test_id}")
self.node6 = WakuNode(NODE_2, f"node3_{self.test_id}")
logger.debug("Start 5 nodes with their corresponding config")
self.node1.start(relay="true", store="true")
self.node2.start(relay="true", store="true", discv5_bootstrap_node=self.node1.get_enr_uri())
self.node3.start(relay="true", store="true", discv5_bootstrap_node=self.node2.get_enr_uri())
self.node4.start(relay="true", filter="true", store="true", discv5_bootstrap_node=self.node3.get_enr_uri())
self.node6.start(relay="false", filternode=self.node4.get_multiaddr_with_id(), discv5_bootstrap_node=self.node4.get_enr_uri())
logger.debug("Subscribe nodes to relay pubsub topics")
node_list = [self.node1, self.node2, self.node3, self.node4]
for node in node_list:
node.set_relay_subscriptions([self.test_pubsub_topic])
logger.debug(f"Node6 subscribe to filter for pubsubtopic {self.test_pubsub_topic}")
node_list.append(self.node6)
self.node6.set_filter_subscriptions({"requestId": "1", "contentFilters": [self.test_content_topic], "pubsubTopic": self.test_pubsub_topic})
self.wait_for_autoconnection(node_list, hard_wait=50)
logger.debug(f"Node1 publish message for topic {self.test_pubsub_topic}")
message = self.create_message()
self.publish_message(sender=self.node1, pubsub_topic=self.test_pubsub_topic, message=message)
delay(4)
logger.debug(f"Node6 inquery for filter messages on pubsubtopic {self.test_pubsub_topic} & contenttopic{self.test_content_topic}")
messages_response = self.get_filter_messages(self.test_content_topic, pubsub_topic=self.test_pubsub_topic, node=self.node6)
logger.debug(f"Filter inquiry response is {messages_response}")
assert len(messages_response) == 1, f"filtered messages count doesn't match published messages"
logger.debug("Node5 goes live !!")
self.node5.start(relay="false", storenode=self.node4.get_multiaddr_with_id(), discv5_bootstrap_node=self.node4.get_enr_uri())
delay(2)
logger.debug("Node5 makes request to get stored messages ")
self.check_published_message_is_stored(page_size=50, ascending="true", store_node=self.node5, messages_to_check=[message])
def test_repeated_filter_requestID(self):
logger.debug("Create 3 nodes")
logger.debug("Start 3 nodes with their corresponding config")
self.node1.start(relay="true", store="true")
self.node2.start(relay="true", store="true", filter="true", discv5_bootstrap_node=self.node1.get_enr_uri())
self.node3.start(relay="true", filternode=self.node2.get_multiaddr_with_id(), store="false", discv5_bootstrap_node=self.node2.get_enr_uri())
logger.debug("Subscribe nodes to relay pubsub topics")
self.node1.set_relay_subscriptions([self.test_pubsub_topic])
self.node2.set_relay_subscriptions([self.test_pubsub_topic])
logger.debug("Wait for all nodes auto connection")
node_list = [self.node1, self.node2]
self.wait_for_autoconnection(node_list, hard_wait=30)
logger.debug(f"Node3 subscribe to filter for pubsubtopic {self.test_pubsub_topic} 2 times with same request id")
self.node3.set_filter_subscriptions({"requestId": "1", "contentFilters": [self.test_content_topic], "pubsubTopic": self.test_pubsub_topic})
try:
self.node3.set_filter_subscriptions(
{"requestId": "1", "contentFilters": [self.test_content_topic], "pubsubTopic": self.test_pubsub_topic}
)
except Exception as e:
logger.debug(f"Request ID not unique cause error str{e}")
logger.debug(f"Node1 publish message for topic {self.test_pubsub_topic}")
self.publish_message(sender=self.node1, pubsub_topic=self.test_pubsub_topic, message=self.create_message())
delay(5)
messages_response = self.get_filter_messages(self.test_content_topic, pubsub_topic=self.test_pubsub_topic, node=self.node3)
logger.debug(f"Response for node 3 is {messages_response}")
# This assert will be uncommented once know what is the expected behavior
# assert len(messages_response) == 1, f"filtered messages count doesn't match published messages"
messages_response = self.get_filter_messages(self.test_content_topic, pubsub_topic=self.test_pubsub_topic, node=self.node3)
logger.debug(f"Response for node3 using same request ID is {messages_response}")
# note: additional steps will be added to test the correct expected response on sending 2 requests with same ID
def test_msg_not_stored_when_ephemeral_true(self):
logger.debug("Start 3 nodes ")
self.node1.start(relay="true", store="true")
self.node2.start(store="true", relay="true", discv5_bootstrap_node=self.node1.get_enr_uri())
self.node3.start(relay="false", storenode=self.node2.get_multiaddr_with_id(), discv5_bootstrap_node=self.node2.get_enr_uri())
logger.debug(f"Subscribe node1 ,2 to pubtopic {self.test_pubsub_topic}")
self.node1.set_relay_subscriptions([self.test_pubsub_topic])
self.node2.set_relay_subscriptions([self.test_pubsub_topic])
self.wait_for_autoconnection([self.node1, self.node2], hard_wait=30)
logger.debug("Node1 publish message with flag ephemeral = True")
message = self.create_message(ephemeral=True)
self.publish_message(sender=self.node1, pubsub_topic=self.test_pubsub_topic, message=message)
delay(3)
try:
logger.debug("Node3 makes store request to get messages")
self.check_published_message_is_stored(page_size=50, ascending="true", store_node=self.node3, messages_to_check=[message])
raise Exception("Messages shouldn't be stores when ephemeral = true")
except Exception as e:
logger.debug(f"Response for store when ephemeral = true is str{e}")
assert e.args[0].find("'messages': []"), "response for store shouldn't contain messages"
logger.debug("Message isn't stored as ephemeral = True")
@pytest.mark.skipif("go-waku" in NODE_2, reason="Test works only with nwaku")
def test_msg_stored_when_ephemeral_false(self):
logger.debug("Start 3 nodes")
self.node1.start(relay="true", store="true")
self.node2.start(store="true", relay="true", discv5_bootstrap_node=self.node1.get_enr_uri())
self.node3.start(relay="false", storenode=self.node2.get_multiaddr_with_id(), discv5_bootstrap_node=self.node2.get_enr_uri())
logger.debug(f"Subscribe node1 ,2 to pubtopic {self.test_pubsub_topic}")
self.node1.set_relay_subscriptions([self.test_pubsub_topic])
self.node2.set_relay_subscriptions([self.test_pubsub_topic])
self.wait_for_autoconnection([self.node1, self.node2], hard_wait=30)
logger.debug("Node1 publish message with ephemeral = false")
message = self.create_message(ephemeral=False)
self.publish_message(sender=self.node1, pubsub_topic=self.test_pubsub_topic, message=message)
delay(3)
logger.debug("Check if message is stored ")
self.check_published_message_is_stored(page_size=50, ascending="true", store_node=self.node3, messages_to_check=[message])

View File

@ -125,18 +125,16 @@ class TestTimeFilter(StepsStore):
logger.debug(f"response for wrong time message is {store_response.response}")
assert len(store_response.messages) == 0, "got messages with start time after end time !"
def test_time_filter_negative_start_time(self):
ts_pass = self.get_time_list_pass()
for timestamp in ts_pass:
message = self.create_message(timestamp=timestamp["value"])
self.publish_message(message=message)
def test_time_filter_negative_start_time(self):
ts_pass = self.get_time_list_pass()
for timestamp in ts_pass:
message = self.create_message(timestamp=timestamp["value"])
self.publish_message(message=message)
start_time = -10000
logger.debug(f"inquering stored messages with start time {start_time}")
for node in self.store_nodes:
store_response = self.get_messages_from_store(node, page_size=20, start_time=start_time, include_data=True)
logger.debug(f"number of messages stored for " f"start time = {start_time} is {len(store_response.messages)}")
assert len(store_response.messages) == 6, "number of messages retrieved doesn't match time filter "
def test_time_filter_zero_start_time(self):