mirror of
https://github.com/logos-messaging/logos-delivery-interop-tests.git
synced 2026-04-16 05:53:08 +00:00
Add REST API traffic bypass for network conditions manipulation
- Introduced methods to apply packet loss only to P2P traffic, excluding REST API traffic. - Simplified test cases to leverage new differentiated packet loss handling. - Removed unused and legacy metrics/tests for cleaner configuration and coverage.
This commit is contained in:
parent
33596417e3
commit
ec6cb89c37
@ -133,3 +133,99 @@ class TrafficController:
|
||||
],
|
||||
iface=iface,
|
||||
)
|
||||
|
||||
def _install_rest_bypass_prio(self, node, iface: str, netem_args: list[str]):
|
||||
"""
|
||||
Build a prio qdisc where REST API traffic bypasses netem.
|
||||
|
||||
Layout:
|
||||
root 1: prio (3 bands)
|
||||
|-- 1:1 unshaped (REST traffic lands here via u32 filter)
|
||||
|-- 1:2 leaf -> netem (default band per priomap; everything else)
|
||||
`-- 1:3 unshaped (unused)
|
||||
|
||||
The filter matches packets whose TCP source OR destination port equals
|
||||
the node's REST port, so both incoming requests and outgoing responses
|
||||
bypass the netem queue. Libp2p and other traffic hits netem.
|
||||
"""
|
||||
rest_port = str(node._rest_port)
|
||||
|
||||
self._exec(node, ["qdisc", "add", "dev", iface, "root", "handle", "1:", "prio"], iface=iface)
|
||||
self._exec(
|
||||
node,
|
||||
["qdisc", "add", "dev", iface, "parent", "1:2", "handle", "20:", "netem"] + netem_args,
|
||||
iface=iface,
|
||||
)
|
||||
self._exec(
|
||||
node,
|
||||
[
|
||||
"filter",
|
||||
"add",
|
||||
"dev",
|
||||
iface,
|
||||
"protocol",
|
||||
"ip",
|
||||
"parent",
|
||||
"1:",
|
||||
"prio",
|
||||
"1",
|
||||
"u32",
|
||||
"match",
|
||||
"ip",
|
||||
"sport",
|
||||
rest_port,
|
||||
"0xffff",
|
||||
"flowid",
|
||||
"1:1",
|
||||
],
|
||||
iface=iface,
|
||||
)
|
||||
self._exec(
|
||||
node,
|
||||
[
|
||||
"filter",
|
||||
"add",
|
||||
"dev",
|
||||
iface,
|
||||
"protocol",
|
||||
"ip",
|
||||
"parent",
|
||||
"1:",
|
||||
"prio",
|
||||
"1",
|
||||
"u32",
|
||||
"match",
|
||||
"ip",
|
||||
"dport",
|
||||
rest_port,
|
||||
"0xffff",
|
||||
"flowid",
|
||||
"1:1",
|
||||
],
|
||||
iface=iface,
|
||||
)
|
||||
|
||||
def add_packet_loss_p2p_only(self, node, percent: float, iface: str = "eth0"):
|
||||
"""
|
||||
Apply packet loss to all traffic EXCEPT the node's REST API port.
|
||||
|
||||
Use this instead of add_packet_loss when measuring Waku protocol
|
||||
behavior under loss, to avoid contaminating the test harness's
|
||||
control plane (REST requests/responses between pytest and the node).
|
||||
"""
|
||||
self.clear(node, iface=iface)
|
||||
self._install_rest_bypass_prio(node, iface, ["loss", f"{percent}%"])
|
||||
|
||||
def add_packet_loss_correlated_p2p_only(
|
||||
self,
|
||||
node,
|
||||
percent: float,
|
||||
correlation: float,
|
||||
iface: str = "eth0",
|
||||
):
|
||||
"""
|
||||
Correlated packet loss variant that leaves REST API traffic untouched.
|
||||
See add_packet_loss_p2p_only for the rationale.
|
||||
"""
|
||||
self.clear(node, iface=iface)
|
||||
self._install_rest_bypass_prio(node, iface, ["loss", f"{percent}%", f"{correlation}%"])
|
||||
|
||||
@ -277,40 +277,6 @@ METRICS_WITH_INITIAL_VALUE_ZERO = [
|
||||
'waku_archive_query_duration_seconds_bucket{le="7.5"}',
|
||||
'waku_archive_query_duration_seconds_bucket{le="10.0"}',
|
||||
'waku_archive_query_duration_seconds_bucket{le="+Inf"}',
|
||||
"waku_legacy_archive_insert_duration_seconds_sum",
|
||||
"waku_legacy_archive_insert_duration_seconds_count",
|
||||
'waku_legacy_archive_insert_duration_seconds_bucket{le="0.005"}',
|
||||
'waku_legacy_archive_insert_duration_seconds_bucket{le="0.01"}',
|
||||
'waku_legacy_archive_insert_duration_seconds_bucket{le="0.025"}',
|
||||
'waku_legacy_archive_insert_duration_seconds_bucket{le="0.05"}',
|
||||
'waku_legacy_archive_insert_duration_seconds_bucket{le="0.075"}',
|
||||
'waku_legacy_archive_insert_duration_seconds_bucket{le="0.1"}',
|
||||
'waku_legacy_archive_insert_duration_seconds_bucket{le="0.25"}',
|
||||
'waku_legacy_archive_insert_duration_seconds_bucket{le="0.5"}',
|
||||
'waku_legacy_archive_insert_duration_seconds_bucket{le="0.75"}',
|
||||
'waku_legacy_archive_insert_duration_seconds_bucket{le="1.0"}',
|
||||
'waku_legacy_archive_insert_duration_seconds_bucket{le="2.5"}',
|
||||
'waku_legacy_archive_insert_duration_seconds_bucket{le="5.0"}',
|
||||
'waku_legacy_archive_insert_duration_seconds_bucket{le="7.5"}',
|
||||
'waku_legacy_archive_insert_duration_seconds_bucket{le="10.0"}',
|
||||
'waku_legacy_archive_insert_duration_seconds_bucket{le="+Inf"}',
|
||||
"waku_legacy_archive_query_duration_seconds_sum",
|
||||
"waku_legacy_archive_query_duration_seconds_count",
|
||||
'waku_legacy_archive_query_duration_seconds_bucket{le="0.005"}',
|
||||
'waku_legacy_archive_query_duration_seconds_bucket{le="0.01"}',
|
||||
'waku_legacy_archive_query_duration_seconds_bucket{le="0.025"}',
|
||||
'waku_legacy_archive_query_duration_seconds_bucket{le="0.05"}',
|
||||
'waku_legacy_archive_query_duration_seconds_bucket{le="0.075"}',
|
||||
'waku_legacy_archive_query_duration_seconds_bucket{le="0.1"}',
|
||||
'waku_legacy_archive_query_duration_seconds_bucket{le="0.25"}',
|
||||
'waku_legacy_archive_query_duration_seconds_bucket{le="0.5"}',
|
||||
'waku_legacy_archive_query_duration_seconds_bucket{le="0.75"}',
|
||||
'waku_legacy_archive_query_duration_seconds_bucket{le="1.0"}',
|
||||
'waku_legacy_archive_query_duration_seconds_bucket{le="2.5"}',
|
||||
'waku_legacy_archive_query_duration_seconds_bucket{le="5.0"}',
|
||||
'waku_legacy_archive_query_duration_seconds_bucket{le="7.5"}',
|
||||
'waku_legacy_archive_query_duration_seconds_bucket{le="10.0"}',
|
||||
'waku_legacy_archive_query_duration_seconds_bucket{le="+Inf"}',
|
||||
"waku_filter_subscriptions",
|
||||
"waku_filter_handle_message_duration_seconds_sum",
|
||||
"waku_filter_handle_message_duration_seconds_count",
|
||||
@ -383,7 +349,6 @@ METRICS_WITH_INITIAL_VALUE_ZERO = [
|
||||
'waku_filter_handle_message_duration_seconds_bucket{le="20.0"}',
|
||||
'waku_filter_handle_message_duration_seconds_bucket{le="30.0"}',
|
||||
"total_messages_cached",
|
||||
"waku_legacy_store_queries_total",
|
||||
"waku_store_queries_total",
|
||||
"mix_pool_size",
|
||||
"libp2p_gossipsub_imreceiving_saved_messages_total",
|
||||
|
||||
@ -1,12 +1,10 @@
|
||||
import pytest
|
||||
import logging
|
||||
from time import time, sleep
|
||||
from src.libs.custom_logger import get_custom_logger
|
||||
from src.env_vars import NODE_1, NODE_2
|
||||
from src.node.waku_node import WakuNode
|
||||
from src.steps.relay import StepsRelay
|
||||
from src.libs.common import delay
|
||||
from src.steps.common import StepsCommon
|
||||
from src.steps.network_conditions import TrafficController
|
||||
from src.libs.common import to_base64
|
||||
|
||||
@ -426,7 +424,7 @@ class TestNetworkConditions(StepsRelay):
|
||||
window_s = 30.0
|
||||
loss = 50.0
|
||||
|
||||
self.tc.add_packet_loss(self.node1, percent=loss)
|
||||
self.tc.add_packet_loss_p2p_only(self.node1, percent=loss)
|
||||
_ = self.node4.get_relay_messages(self.test_pubsub_topic)
|
||||
|
||||
for _ in range(total_msgs):
|
||||
@ -436,7 +434,7 @@ class TestNetworkConditions(StepsRelay):
|
||||
uncorrelated = len(self.node4.get_relay_messages(self.test_pubsub_topic) or [])
|
||||
self.tc.clear(self.node1)
|
||||
|
||||
self.tc.add_packet_loss_correlated(self.node1, percent=loss, correlation=75.0)
|
||||
self.tc.add_packet_loss_correlated_p2p_only(self.node1, percent=loss, correlation=75.0)
|
||||
_ = self.node4.get_relay_messages(self.test_pubsub_topic)
|
||||
|
||||
for _ in range(total_msgs):
|
||||
|
||||
@ -94,11 +94,6 @@ class TestMetrics(StepsRelay, StepsMetrics, StepsFilter, StepsLightPush, StepsSt
|
||||
self.check_metric(self.publishing_node1, "waku_histogram_message_size_count", 1)
|
||||
self.check_metric(self.publishing_node1, 'waku_node_messages_total{type="relay"}', 1)
|
||||
if self.store_node1.is_nwaku():
|
||||
self.check_metric(
|
||||
self.store_node1,
|
||||
f'waku_service_peers{{protocol="/vac/waku/store/2.0.0-beta4",peerId="{self.publishing_node1.get_tcp_address()}"}}',
|
||||
1,
|
||||
)
|
||||
self.check_metric(
|
||||
self.store_node1,
|
||||
f'waku_service_peers{{protocol="/vac/waku/store-query/3.0.0",peerId="{self.publishing_node1.get_tcp_address()}"}}',
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user