mirror of
https://github.com/logos-messaging/logos-messaging-interop-tests.git
synced 2026-01-05 07:23:11 +00:00
added 2 test with incorrect query param in cursor and hashes
This commit is contained in:
parent
546089bc36
commit
7d1ace8ad8
@ -43,7 +43,8 @@ class TestCursor(StepsStore):
|
||||
store_response = self.get_messages_from_store(node, page_size=100, ascending="true", cursor=cursor)
|
||||
assert len(store_response.messages) == message_count - cursor_index
|
||||
for index in range(len(store_response.messages)):
|
||||
assert store_response.message_hash(index) == message_hash_list[cursor_index + index], f"Message hash at index {index} doesn't match"
|
||||
assert store_response.message_hash(index) == message_hash_list[
|
||||
cursor_index + index], f"Message hash at index {index} doesn't match"
|
||||
|
||||
def test_passing_cursor_not_returned_in_paginationCursor(self):
|
||||
cursor = ""
|
||||
@ -70,7 +71,8 @@ class TestCursor(StepsStore):
|
||||
assert not store_response.messages, "Messages found"
|
||||
|
||||
@pytest.mark.xfail("go-waku" in NODE_2, reason="Bug reported: https://github.com/waku-org/go-waku/issues/1110")
|
||||
@pytest.mark.xfail("nwaku" in (NODE_1 + NODE_2), reason="Bug reported: https://github.com/waku-org/nwaku/issues/2716")
|
||||
@pytest.mark.xfail("nwaku" in (NODE_1 + NODE_2),
|
||||
reason="Bug reported: https://github.com/waku-org/nwaku/issues/2716")
|
||||
def test_passing_cursor_of_non_existing_message_from_the_store(self):
|
||||
for i in range(4):
|
||||
self.publish_message(message=self.create_message(payload=to_base64(f"Message_{i}")))
|
||||
@ -82,7 +84,8 @@ class TestCursor(StepsStore):
|
||||
assert not store_response.messages, "Messages found"
|
||||
|
||||
@pytest.mark.xfail("go-waku" in NODE_2, reason="Bug reported: https://github.com/waku-org/go-waku/issues/1110")
|
||||
@pytest.mark.xfail("nwaku" in (NODE_1 + NODE_2), reason="Bug reported: https://github.com/waku-org/nwaku/issues/2716")
|
||||
@pytest.mark.xfail("nwaku" in (NODE_1 + NODE_2),
|
||||
reason="Bug reported: https://github.com/waku-org/nwaku/issues/2716")
|
||||
def test_passing_invalid_cursor(self):
|
||||
for i in range(4):
|
||||
self.publish_message(message=self.create_message(payload=to_base64(f"Message_{i}")))
|
||||
@ -93,7 +96,8 @@ class TestCursor(StepsStore):
|
||||
assert not store_response.messages, "Messages found"
|
||||
|
||||
@pytest.mark.xfail("go-waku" in NODE_2, reason="Bug reported: https://github.com/waku-org/go-waku/issues/1110")
|
||||
@pytest.mark.xfail("nwaku" in (NODE_1 + NODE_2), reason="Bug reported: https://github.com/waku-org/nwaku/issues/2716")
|
||||
@pytest.mark.xfail("nwaku" in (NODE_1 + NODE_2),
|
||||
reason="Bug reported: https://github.com/waku-org/nwaku/issues/2716")
|
||||
def test_passing_non_base64_cursor(self):
|
||||
for i in range(4):
|
||||
self.publish_message(message=self.create_message(payload=to_base64(f"Message_{i}")))
|
||||
@ -143,7 +147,8 @@ class TestCursor(StepsStore):
|
||||
store_response = self.get_store_messages_with_errors(node=node, page_size=100, cursor=cursor)
|
||||
|
||||
# Assert that the error code is 500 for the deleted message scenario
|
||||
assert store_response["status_code"] == 500, f"Expected status code 500, got {store_response['status_code']}"
|
||||
assert store_response[
|
||||
"status_code"] == 500, f"Expected status code 500, got {store_response['status_code']}"
|
||||
|
||||
# Define a partial expected error message (since the actual response includes more details)
|
||||
expected_error_fragment = "error in handleSelfStoreRequest: BAD_RESPONSE: archive error: DIRVER_ERROR: cursor not found"
|
||||
@ -151,7 +156,7 @@ class TestCursor(StepsStore):
|
||||
# Extract the actual error message and ensure it contains the expected error fragment
|
||||
actual_error_message = store_response["error_message"]
|
||||
assert (
|
||||
expected_error_fragment in actual_error_message
|
||||
expected_error_fragment in actual_error_message
|
||||
), f"Expected error message fragment '{expected_error_fragment}', but got '{actual_error_message}'"
|
||||
|
||||
# Test if the API returns the expected messages when the cursor points to the first message in the store.
|
||||
@ -195,3 +200,52 @@ class TestCursor(StepsStore):
|
||||
|
||||
# Validate that paginationCursor is **not** present when we reach the boundary (end of pagination)
|
||||
assert store_response.pagination_cursor is None, "paginationCursor should be absent when at the boundary"
|
||||
|
||||
# This test publishes 5 messages and retrieves them using the store API with a page size of 3.
|
||||
# It attempts to use an invalid 'paginationCursor' query parameter instead of the correct 'cursor'.
|
||||
# The test then validates that the incorrect parameter doesn't affect pagination and that the correct
|
||||
# 'cursor' parameter successfully retrieves the remaining messages.
|
||||
def test_invalid_pagination_cursor_param(self):
|
||||
# Store the timestamps used when creating messages
|
||||
timestamps = []
|
||||
|
||||
# Publish 5 messages
|
||||
for i in range(5):
|
||||
message = self.create_message(payload=to_base64(f"Message_{i}"))
|
||||
timestamps.append(message["timestamp"]) # Save the timestamp
|
||||
self.publish_message(message=message)
|
||||
|
||||
for node in self.store_nodes:
|
||||
# Step 1: Request first page with pageSize = 3
|
||||
store_response = self.get_messages_from_store(node, page_size=3)
|
||||
assert len(store_response.messages) == 3, "Message count mismatch on first page"
|
||||
pagination_cursor = store_response.pagination_cursor
|
||||
|
||||
# Step 2: Attempt to use invalid paginationCursor param (expect 200 but no page change)
|
||||
invalid_cursor = pagination_cursor
|
||||
store_response_invalid = self.get_messages_from_store(node, page_size=3, paginationCursor=invalid_cursor)
|
||||
assert store_response_invalid.status_code == 200, "Expected 200 response with invalid paginationCursor param"
|
||||
assert len(
|
||||
store_response_invalid.messages) == 3, "Expected the same page content since paginationCursor is ignored"
|
||||
assert store_response_invalid.messages == store_response.messages, "Messages should be the same as the first page"
|
||||
|
||||
# Step 3: Use correct cursor to get the remaining messages
|
||||
store_response_valid = self.get_messages_from_store(node, page_size=3, cursor=pagination_cursor)
|
||||
assert len(store_response_valid.messages) == 2, "Message count mismatch on second page"
|
||||
assert store_response_valid.pagination_cursor is None, "There should be no pagination cursor for the last page"
|
||||
|
||||
# Validate the message content using the correct timestamp
|
||||
expected_message_hashes = [
|
||||
self.compute_message_hash(self.test_pubsub_topic, {
|
||||
"payload": to_base64(f"Message_3"),
|
||||
"contentTopic": "/myapp/1/latest/proto",
|
||||
"timestamp": timestamps[3] # Use the stored timestamp for Message_3
|
||||
}),
|
||||
self.compute_message_hash(self.test_pubsub_topic, {
|
||||
"payload": to_base64(f"Message_4"),
|
||||
"contentTopic": "/myapp/1/latest/proto",
|
||||
"timestamp": timestamps[4] # Use the stored timestamp for Message_4
|
||||
}),
|
||||
]
|
||||
for i, message in enumerate(store_response_valid.messages):
|
||||
assert message["messageHash"] == expected_message_hashes[i], f"Message hash mismatch for message {i}"
|
||||
|
||||
@ -88,17 +88,15 @@ class TestHashes(StepsStore):
|
||||
|
||||
for node in self.store_nodes:
|
||||
store_response = self.get_store_messages_with_errors(node, hashes=excessive_length_hash, page_size=50)
|
||||
print("store_response:", store_response)
|
||||
|
||||
# Check if the response has a "messages" key and if it's empty
|
||||
assert "messages" not in store_response or not store_response.get("messages",
|
||||
[]), "Messages found for an excessive length hash"
|
||||
assert "messages" not in store_response, "Messages found for an excessive length hash"
|
||||
|
||||
# Test the behavior when you supply an empty hash alongside valid hashes.
|
||||
def test_store_with_empty_and_valid_hash(self):
|
||||
message_hash_list = []
|
||||
for payload in SAMPLE_INPUTS:
|
||||
message = self.create_message(payload=to_base64(payload["value"]))
|
||||
for i in range(4):
|
||||
message = self.create_message(payload=to_base64(f"Message_{i}"))
|
||||
self.publish_message(message=message)
|
||||
message_hash_list.append(self.compute_message_hash(self.test_pubsub_topic, message))
|
||||
|
||||
@ -106,10 +104,12 @@ class TestHashes(StepsStore):
|
||||
for node in self.store_nodes:
|
||||
try:
|
||||
# Combining valid hash with an empty hash
|
||||
store_response = self.get_messages_from_store(node, hashes=f"{message_hash_list[0]},{empty_hash}", page_size=50)
|
||||
store_response = self.get_messages_from_store(node, hashes=f"{message_hash_list[0]},{empty_hash}",
|
||||
page_size=50)
|
||||
assert len(store_response.messages) == 1, "Message count mismatch with empty and valid hashes"
|
||||
except Exception as ex:
|
||||
assert "waku message hash parsing error" in str(ex), "Unexpected error for combined empty and valid hash"
|
||||
assert "waku message hash parsing error" in str(
|
||||
ex), "Unexpected error for combined empty and valid hash"
|
||||
|
||||
# Test for hashes that include non-Base64 characters.
|
||||
def test_store_with_non_base64_characters_in_hash(self):
|
||||
@ -126,8 +126,8 @@ class TestHashes(StepsStore):
|
||||
# Test when duplicate valid hashes are provided.
|
||||
def test_store_with_duplicate_hashes(self):
|
||||
message_hash_list = []
|
||||
for payload in SAMPLE_INPUTS:
|
||||
message = self.create_message(payload=to_base64(payload["value"]))
|
||||
for i in range(4):
|
||||
message = self.create_message(payload=to_base64(f"Message_{i}"))
|
||||
self.publish_message(message=message)
|
||||
message_hash_list.append(self.compute_message_hash(self.test_pubsub_topic, message))
|
||||
|
||||
@ -136,4 +136,38 @@ class TestHashes(StepsStore):
|
||||
for node in self.store_nodes:
|
||||
store_response = self.get_messages_from_store(node, hashes=duplicate_hash, page_size=50)
|
||||
assert len(store_response.messages) == 1, "Expected only one message for duplicate hashes"
|
||||
assert store_response.message_hash(0) == message_hash_list[0], "Incorrect message returned for duplicate hashes"
|
||||
assert store_response.message_hash(0) == message_hash_list[
|
||||
0], "Incorrect message returned for duplicate hashes"
|
||||
|
||||
# Invalid Query Parameter (hash) for Hashes
|
||||
def test_invalid_hash_param(self):
|
||||
# Publish 4 messages
|
||||
published_messages = []
|
||||
for i in range(4):
|
||||
message = self.create_message(payload=to_base64(f"Message_{i}"))
|
||||
self.publish_message(message=message)
|
||||
published_messages.append(message)
|
||||
|
||||
for node in self.store_nodes:
|
||||
# Step 1: Request messages with the correct 'hashes' parameter
|
||||
correct_hash = self.compute_message_hash(self.test_pubsub_topic, published_messages[2])
|
||||
store_response_valid = self.get_messages_from_store(node, hashes=correct_hash)
|
||||
|
||||
assert store_response_valid.status_code == 200, "Expected 200 response with correct 'hashes' parameter"
|
||||
assert len(store_response_valid.messages) == 1, "Expected exactly one message in the response"
|
||||
assert store_response_valid.messages[0][
|
||||
"messageHash"] == correct_hash, "Returned message hash does not match the expected hash"
|
||||
|
||||
# Step 2: Attempt to use the invalid 'hash' parameter (expect all messages to be returned)
|
||||
store_response_invalid = self.get_messages_from_store(node, hash=correct_hash)
|
||||
|
||||
assert store_response_invalid.status_code == 200, "Expected 200 response with invalid 'hash' parameter"
|
||||
assert len(
|
||||
store_response_invalid.messages) == 4, "Expected all messages to be returned since 'hash' filter is ignored"
|
||||
|
||||
# Collect the hashes of all published messages
|
||||
expected_hashes = [self.compute_message_hash(self.test_pubsub_topic, msg) for msg in published_messages]
|
||||
returned_hashes = [msg["messageHash"] for msg in store_response_invalid.messages]
|
||||
|
||||
assert set(returned_hashes) == set(
|
||||
expected_hashes), "Returned message hashes do not match the expected hashes"
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user