consul/agent/xds/clusters_test.go

666 lines
21 KiB
Go
Raw Normal View History

package xds
import (
"bytes"
"path/filepath"
"sort"
"testing"
"text/template"
envoy_cluster_v3 "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3"
"github.com/golang/protobuf/ptypes/wrappers"
testinf "github.com/mitchellh/go-testing-interface"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/agent/proxycfg"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/agent/xds/proxysupport"
"github.com/hashicorp/consul/agent/xds/xdscommon"
"github.com/hashicorp/consul/sdk/testutil"
)
func TestClustersFromSnapshot(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
tests := []struct {
name string
create func(t testinf.T) *proxycfg.ConfigSnapshot
overrideGoldenName string
}{
{
name: "defaults",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshot(t, nil, nil)
},
},
{
name: "custom-local-app",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshot(t, func(ns *structs.NodeService) {
ns.Proxy.Config["envoy_local_cluster_json"] =
customAppClusterJSON(t, customClusterJSONOptions{
Name: "mylocal",
})
}, nil)
},
},
{
name: "custom-upstream",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshot(t, func(ns *structs.NodeService) {
ns.Proxy.Upstreams[0].Config["envoy_cluster_json"] =
customAppClusterJSON(t, customClusterJSONOptions{
Name: "myservice",
})
}, nil)
},
},
{
name: "custom-upstream-default-chain",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshotDiscoveryChain(t, "default", func(ns *structs.NodeService) {
ns.Proxy.Upstreams[0].Config["envoy_cluster_json"] =
customAppClusterJSON(t, customClusterJSONOptions{
Name: "myservice",
})
}, nil)
},
},
{
name: "custom-upstream-ignores-tls",
overrideGoldenName: "custom-upstream", // should be the same
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshot(t, func(ns *structs.NodeService) {
ns.Proxy.Upstreams[0].Config["envoy_cluster_json"] =
customAppClusterJSON(t, customClusterJSONOptions{
Name: "myservice",
// Attempt to override the TLS context should be ignored
TLSContext: `"allowRenegotiation": false`,
})
}, nil)
},
},
{
name: "custom-timeouts",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshot(t, func(ns *structs.NodeService) {
ns.Proxy.Config["local_connect_timeout_ms"] = 1234
ns.Proxy.Upstreams[0].Config["connect_timeout_ms"] = 2345
}, nil)
},
},
{
name: "custom-limits-max-connections-only",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshot(t, func(ns *structs.NodeService) {
for i := range ns.Proxy.Upstreams {
// We check if Config is nil because the prepared_query upstream is
// initialized without a Config map. Use Upstreams[i] syntax to
// modify the actual ConfigSnapshot instead of copying the Upstream
// in the range.
if ns.Proxy.Upstreams[i].Config == nil {
ns.Proxy.Upstreams[i].Config = map[string]interface{}{}
}
ns.Proxy.Upstreams[i].Config["limits"] = map[string]interface{}{
"max_connections": 500,
}
}
}, nil)
},
},
{
name: "custom-limits-set-to-zero",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshot(t, func(ns *structs.NodeService) {
for i := range ns.Proxy.Upstreams {
if ns.Proxy.Upstreams[i].Config == nil {
ns.Proxy.Upstreams[i].Config = map[string]interface{}{}
}
ns.Proxy.Upstreams[i].Config["limits"] = map[string]interface{}{
"max_connections": 0,
"max_pending_requests": 0,
"max_concurrent_requests": 0,
}
}
}, nil)
},
},
{
name: "custom-limits",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshot(t, func(ns *structs.NodeService) {
for i := range ns.Proxy.Upstreams {
if ns.Proxy.Upstreams[i].Config == nil {
ns.Proxy.Upstreams[i].Config = map[string]interface{}{}
}
ns.Proxy.Upstreams[i].Config["limits"] = map[string]interface{}{
"max_connections": 500,
"max_pending_requests": 600,
"max_concurrent_requests": 700,
}
}
}, nil)
},
},
{
name: "connect-proxy-with-chain",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshotDiscoveryChain(t, "simple", nil, nil)
},
},
{
name: "connect-proxy-with-chain-external-sni",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshotDiscoveryChain(t, "external-sni", nil, nil)
},
},
connect: reconcile how upstream configuration works with discovery chains (#6225) * connect: reconcile how upstream configuration works with discovery chains The following upstream config fields for connect sidecars sanely integrate into discovery chain resolution: - Destination Namespace/Datacenter: Compilation occurs locally but using different default values for namespaces and datacenters. The xDS clusters that are created are named as they normally would be. - Mesh Gateway Mode (single upstream): If set this value overrides any value computed for any resolver for the entire discovery chain. The xDS clusters that are created may be named differently (see below). - Mesh Gateway Mode (whole sidecar): If set this value overrides any value computed for any resolver for the entire discovery chain. If this is specifically overridden for a single upstream this value is ignored in that case. The xDS clusters that are created may be named differently (see below). - Protocol (in opaque config): If set this value overrides the value computed when evaluating the entire discovery chain. If the normal chain would be TCP or if this override is set to TCP then the result is that we explicitly disable L7 Routing and Splitting. The xDS clusters that are created may be named differently (see below). - Connect Timeout (in opaque config): If set this value overrides the value for any resolver in the entire discovery chain. The xDS clusters that are created may be named differently (see below). If any of the above overrides affect the actual result of compiling the discovery chain (i.e. "tcp" becomes "grpc" instead of being a no-op override to "tcp") then the relevant parameters are hashed and provided to the xDS layer as a prefix for use in naming the Clusters. This is to ensure that if one Upstream discovery chain has no overrides and tangentially needs a cluster named "api.default.XXX", and another Upstream does have overrides for "api.default.XXX" that they won't cross-pollinate against the operator's wishes. Fixes #6159
2019-08-02 03:03:34 +00:00
{
name: "connect-proxy-with-chain-and-overrides",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshotDiscoveryChain(t, "simple-with-overrides", nil, nil)
},
connect: reconcile how upstream configuration works with discovery chains (#6225) * connect: reconcile how upstream configuration works with discovery chains The following upstream config fields for connect sidecars sanely integrate into discovery chain resolution: - Destination Namespace/Datacenter: Compilation occurs locally but using different default values for namespaces and datacenters. The xDS clusters that are created are named as they normally would be. - Mesh Gateway Mode (single upstream): If set this value overrides any value computed for any resolver for the entire discovery chain. The xDS clusters that are created may be named differently (see below). - Mesh Gateway Mode (whole sidecar): If set this value overrides any value computed for any resolver for the entire discovery chain. If this is specifically overridden for a single upstream this value is ignored in that case. The xDS clusters that are created may be named differently (see below). - Protocol (in opaque config): If set this value overrides the value computed when evaluating the entire discovery chain. If the normal chain would be TCP or if this override is set to TCP then the result is that we explicitly disable L7 Routing and Splitting. The xDS clusters that are created may be named differently (see below). - Connect Timeout (in opaque config): If set this value overrides the value for any resolver in the entire discovery chain. The xDS clusters that are created may be named differently (see below). If any of the above overrides affect the actual result of compiling the discovery chain (i.e. "tcp" becomes "grpc" instead of being a no-op override to "tcp") then the relevant parameters are hashed and provided to the xDS layer as a prefix for use in naming the Clusters. This is to ensure that if one Upstream discovery chain has no overrides and tangentially needs a cluster named "api.default.XXX", and another Upstream does have overrides for "api.default.XXX" that they won't cross-pollinate against the operator's wishes. Fixes #6159
2019-08-02 03:03:34 +00:00
},
{
name: "connect-proxy-with-chain-and-failover",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshotDiscoveryChain(t, "failover", nil, nil)
},
},
connect: fix failover through a mesh gateway to a remote datacenter (#6259) Failover is pushed entirely down to the data plane by creating envoy clusters and putting each successive destination in a different load assignment priority band. For example this shows that normally requests go to 1.2.3.4:8080 but when that fails they go to 6.7.8.9:8080: - name: foo load_assignment: cluster_name: foo policy: overprovisioning_factor: 100000 endpoints: - priority: 0 lb_endpoints: - endpoint: address: socket_address: address: 1.2.3.4 port_value: 8080 - priority: 1 lb_endpoints: - endpoint: address: socket_address: address: 6.7.8.9 port_value: 8080 Mesh gateways route requests based solely on the SNI header tacked onto the TLS layer. Envoy currently only lets you configure the outbound SNI header at the cluster layer. If you try to failover through a mesh gateway you ideally would configure the SNI value per endpoint, but that's not possible in envoy today. This PR introduces a simpler way around the problem for now: 1. We identify any target of failover that will use mesh gateway mode local or remote and then further isolate any resolver node in the compiled discovery chain that has a failover destination set to one of those targets. 2. For each of these resolvers we will perform a small measurement of comparative healths of the endpoints that come back from the health API for the set of primary target and serial failover targets. We walk the list of targets in order and if any endpoint is healthy we return that target, otherwise we move on to the next target. 3. The CDS and EDS endpoints both perform the measurements in (2) for the affected resolver nodes. 4. For CDS this measurement selects which TLS SNI field to use for the cluster (note the cluster is always going to be named for the primary target) 5. For EDS this measurement selects which set of endpoints will populate the cluster. Priority tiered failover is ignored. One of the big downsides to this approach to failover is that the failover detection and correction is going to be controlled by consul rather than deferring that entirely to the data plane as with the prior version. This also means that we are bound to only failover using official health signals and cannot make use of data plane signals like outlier detection to affect failover. In this specific scenario the lack of data plane signals is ok because the effectiveness is already muted by the fact that the ultimate destination endpoints will have their data plane signals scrambled when they pass through the mesh gateway wrapper anyway so we're not losing much. Another related fix is that we now use the endpoint health from the underlying service, not the health of the gateway (regardless of failover mode).
2019-08-05 18:30:35 +00:00
{
name: "connect-proxy-with-tcp-chain-failover-through-remote-gateway",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshotDiscoveryChain(t, "failover-through-remote-gateway", nil, nil)
},
connect: fix failover through a mesh gateway to a remote datacenter (#6259) Failover is pushed entirely down to the data plane by creating envoy clusters and putting each successive destination in a different load assignment priority band. For example this shows that normally requests go to 1.2.3.4:8080 but when that fails they go to 6.7.8.9:8080: - name: foo load_assignment: cluster_name: foo policy: overprovisioning_factor: 100000 endpoints: - priority: 0 lb_endpoints: - endpoint: address: socket_address: address: 1.2.3.4 port_value: 8080 - priority: 1 lb_endpoints: - endpoint: address: socket_address: address: 6.7.8.9 port_value: 8080 Mesh gateways route requests based solely on the SNI header tacked onto the TLS layer. Envoy currently only lets you configure the outbound SNI header at the cluster layer. If you try to failover through a mesh gateway you ideally would configure the SNI value per endpoint, but that's not possible in envoy today. This PR introduces a simpler way around the problem for now: 1. We identify any target of failover that will use mesh gateway mode local or remote and then further isolate any resolver node in the compiled discovery chain that has a failover destination set to one of those targets. 2. For each of these resolvers we will perform a small measurement of comparative healths of the endpoints that come back from the health API for the set of primary target and serial failover targets. We walk the list of targets in order and if any endpoint is healthy we return that target, otherwise we move on to the next target. 3. The CDS and EDS endpoints both perform the measurements in (2) for the affected resolver nodes. 4. For CDS this measurement selects which TLS SNI field to use for the cluster (note the cluster is always going to be named for the primary target) 5. For EDS this measurement selects which set of endpoints will populate the cluster. Priority tiered failover is ignored. One of the big downsides to this approach to failover is that the failover detection and correction is going to be controlled by consul rather than deferring that entirely to the data plane as with the prior version. This also means that we are bound to only failover using official health signals and cannot make use of data plane signals like outlier detection to affect failover. In this specific scenario the lack of data plane signals is ok because the effectiveness is already muted by the fact that the ultimate destination endpoints will have their data plane signals scrambled when they pass through the mesh gateway wrapper anyway so we're not losing much. Another related fix is that we now use the endpoint health from the underlying service, not the health of the gateway (regardless of failover mode).
2019-08-05 18:30:35 +00:00
},
{
name: "connect-proxy-with-tcp-chain-failover-through-remote-gateway-triggered",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshotDiscoveryChain(t, "failover-through-remote-gateway-triggered", nil, nil)
},
connect: fix failover through a mesh gateway to a remote datacenter (#6259) Failover is pushed entirely down to the data plane by creating envoy clusters and putting each successive destination in a different load assignment priority band. For example this shows that normally requests go to 1.2.3.4:8080 but when that fails they go to 6.7.8.9:8080: - name: foo load_assignment: cluster_name: foo policy: overprovisioning_factor: 100000 endpoints: - priority: 0 lb_endpoints: - endpoint: address: socket_address: address: 1.2.3.4 port_value: 8080 - priority: 1 lb_endpoints: - endpoint: address: socket_address: address: 6.7.8.9 port_value: 8080 Mesh gateways route requests based solely on the SNI header tacked onto the TLS layer. Envoy currently only lets you configure the outbound SNI header at the cluster layer. If you try to failover through a mesh gateway you ideally would configure the SNI value per endpoint, but that's not possible in envoy today. This PR introduces a simpler way around the problem for now: 1. We identify any target of failover that will use mesh gateway mode local or remote and then further isolate any resolver node in the compiled discovery chain that has a failover destination set to one of those targets. 2. For each of these resolvers we will perform a small measurement of comparative healths of the endpoints that come back from the health API for the set of primary target and serial failover targets. We walk the list of targets in order and if any endpoint is healthy we return that target, otherwise we move on to the next target. 3. The CDS and EDS endpoints both perform the measurements in (2) for the affected resolver nodes. 4. For CDS this measurement selects which TLS SNI field to use for the cluster (note the cluster is always going to be named for the primary target) 5. For EDS this measurement selects which set of endpoints will populate the cluster. Priority tiered failover is ignored. One of the big downsides to this approach to failover is that the failover detection and correction is going to be controlled by consul rather than deferring that entirely to the data plane as with the prior version. This also means that we are bound to only failover using official health signals and cannot make use of data plane signals like outlier detection to affect failover. In this specific scenario the lack of data plane signals is ok because the effectiveness is already muted by the fact that the ultimate destination endpoints will have their data plane signals scrambled when they pass through the mesh gateway wrapper anyway so we're not losing much. Another related fix is that we now use the endpoint health from the underlying service, not the health of the gateway (regardless of failover mode).
2019-08-05 18:30:35 +00:00
},
{
name: "connect-proxy-with-tcp-chain-double-failover-through-remote-gateway",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshotDiscoveryChain(t, "failover-through-double-remote-gateway", nil, nil)
},
connect: fix failover through a mesh gateway to a remote datacenter (#6259) Failover is pushed entirely down to the data plane by creating envoy clusters and putting each successive destination in a different load assignment priority band. For example this shows that normally requests go to 1.2.3.4:8080 but when that fails they go to 6.7.8.9:8080: - name: foo load_assignment: cluster_name: foo policy: overprovisioning_factor: 100000 endpoints: - priority: 0 lb_endpoints: - endpoint: address: socket_address: address: 1.2.3.4 port_value: 8080 - priority: 1 lb_endpoints: - endpoint: address: socket_address: address: 6.7.8.9 port_value: 8080 Mesh gateways route requests based solely on the SNI header tacked onto the TLS layer. Envoy currently only lets you configure the outbound SNI header at the cluster layer. If you try to failover through a mesh gateway you ideally would configure the SNI value per endpoint, but that's not possible in envoy today. This PR introduces a simpler way around the problem for now: 1. We identify any target of failover that will use mesh gateway mode local or remote and then further isolate any resolver node in the compiled discovery chain that has a failover destination set to one of those targets. 2. For each of these resolvers we will perform a small measurement of comparative healths of the endpoints that come back from the health API for the set of primary target and serial failover targets. We walk the list of targets in order and if any endpoint is healthy we return that target, otherwise we move on to the next target. 3. The CDS and EDS endpoints both perform the measurements in (2) for the affected resolver nodes. 4. For CDS this measurement selects which TLS SNI field to use for the cluster (note the cluster is always going to be named for the primary target) 5. For EDS this measurement selects which set of endpoints will populate the cluster. Priority tiered failover is ignored. One of the big downsides to this approach to failover is that the failover detection and correction is going to be controlled by consul rather than deferring that entirely to the data plane as with the prior version. This also means that we are bound to only failover using official health signals and cannot make use of data plane signals like outlier detection to affect failover. In this specific scenario the lack of data plane signals is ok because the effectiveness is already muted by the fact that the ultimate destination endpoints will have their data plane signals scrambled when they pass through the mesh gateway wrapper anyway so we're not losing much. Another related fix is that we now use the endpoint health from the underlying service, not the health of the gateway (regardless of failover mode).
2019-08-05 18:30:35 +00:00
},
{
name: "connect-proxy-with-tcp-chain-double-failover-through-remote-gateway-triggered",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshotDiscoveryChain(t, "failover-through-double-remote-gateway-triggered", nil, nil)
},
connect: fix failover through a mesh gateway to a remote datacenter (#6259) Failover is pushed entirely down to the data plane by creating envoy clusters and putting each successive destination in a different load assignment priority band. For example this shows that normally requests go to 1.2.3.4:8080 but when that fails they go to 6.7.8.9:8080: - name: foo load_assignment: cluster_name: foo policy: overprovisioning_factor: 100000 endpoints: - priority: 0 lb_endpoints: - endpoint: address: socket_address: address: 1.2.3.4 port_value: 8080 - priority: 1 lb_endpoints: - endpoint: address: socket_address: address: 6.7.8.9 port_value: 8080 Mesh gateways route requests based solely on the SNI header tacked onto the TLS layer. Envoy currently only lets you configure the outbound SNI header at the cluster layer. If you try to failover through a mesh gateway you ideally would configure the SNI value per endpoint, but that's not possible in envoy today. This PR introduces a simpler way around the problem for now: 1. We identify any target of failover that will use mesh gateway mode local or remote and then further isolate any resolver node in the compiled discovery chain that has a failover destination set to one of those targets. 2. For each of these resolvers we will perform a small measurement of comparative healths of the endpoints that come back from the health API for the set of primary target and serial failover targets. We walk the list of targets in order and if any endpoint is healthy we return that target, otherwise we move on to the next target. 3. The CDS and EDS endpoints both perform the measurements in (2) for the affected resolver nodes. 4. For CDS this measurement selects which TLS SNI field to use for the cluster (note the cluster is always going to be named for the primary target) 5. For EDS this measurement selects which set of endpoints will populate the cluster. Priority tiered failover is ignored. One of the big downsides to this approach to failover is that the failover detection and correction is going to be controlled by consul rather than deferring that entirely to the data plane as with the prior version. This also means that we are bound to only failover using official health signals and cannot make use of data plane signals like outlier detection to affect failover. In this specific scenario the lack of data plane signals is ok because the effectiveness is already muted by the fact that the ultimate destination endpoints will have their data plane signals scrambled when they pass through the mesh gateway wrapper anyway so we're not losing much. Another related fix is that we now use the endpoint health from the underlying service, not the health of the gateway (regardless of failover mode).
2019-08-05 18:30:35 +00:00
},
{
name: "connect-proxy-with-tcp-chain-failover-through-local-gateway",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshotDiscoveryChain(t, "failover-through-local-gateway", nil, nil)
},
connect: fix failover through a mesh gateway to a remote datacenter (#6259) Failover is pushed entirely down to the data plane by creating envoy clusters and putting each successive destination in a different load assignment priority band. For example this shows that normally requests go to 1.2.3.4:8080 but when that fails they go to 6.7.8.9:8080: - name: foo load_assignment: cluster_name: foo policy: overprovisioning_factor: 100000 endpoints: - priority: 0 lb_endpoints: - endpoint: address: socket_address: address: 1.2.3.4 port_value: 8080 - priority: 1 lb_endpoints: - endpoint: address: socket_address: address: 6.7.8.9 port_value: 8080 Mesh gateways route requests based solely on the SNI header tacked onto the TLS layer. Envoy currently only lets you configure the outbound SNI header at the cluster layer. If you try to failover through a mesh gateway you ideally would configure the SNI value per endpoint, but that's not possible in envoy today. This PR introduces a simpler way around the problem for now: 1. We identify any target of failover that will use mesh gateway mode local or remote and then further isolate any resolver node in the compiled discovery chain that has a failover destination set to one of those targets. 2. For each of these resolvers we will perform a small measurement of comparative healths of the endpoints that come back from the health API for the set of primary target and serial failover targets. We walk the list of targets in order and if any endpoint is healthy we return that target, otherwise we move on to the next target. 3. The CDS and EDS endpoints both perform the measurements in (2) for the affected resolver nodes. 4. For CDS this measurement selects which TLS SNI field to use for the cluster (note the cluster is always going to be named for the primary target) 5. For EDS this measurement selects which set of endpoints will populate the cluster. Priority tiered failover is ignored. One of the big downsides to this approach to failover is that the failover detection and correction is going to be controlled by consul rather than deferring that entirely to the data plane as with the prior version. This also means that we are bound to only failover using official health signals and cannot make use of data plane signals like outlier detection to affect failover. In this specific scenario the lack of data plane signals is ok because the effectiveness is already muted by the fact that the ultimate destination endpoints will have their data plane signals scrambled when they pass through the mesh gateway wrapper anyway so we're not losing much. Another related fix is that we now use the endpoint health from the underlying service, not the health of the gateway (regardless of failover mode).
2019-08-05 18:30:35 +00:00
},
{
name: "connect-proxy-with-tcp-chain-failover-through-local-gateway-triggered",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshotDiscoveryChain(t, "failover-through-local-gateway-triggered", nil, nil)
},
connect: fix failover through a mesh gateway to a remote datacenter (#6259) Failover is pushed entirely down to the data plane by creating envoy clusters and putting each successive destination in a different load assignment priority band. For example this shows that normally requests go to 1.2.3.4:8080 but when that fails they go to 6.7.8.9:8080: - name: foo load_assignment: cluster_name: foo policy: overprovisioning_factor: 100000 endpoints: - priority: 0 lb_endpoints: - endpoint: address: socket_address: address: 1.2.3.4 port_value: 8080 - priority: 1 lb_endpoints: - endpoint: address: socket_address: address: 6.7.8.9 port_value: 8080 Mesh gateways route requests based solely on the SNI header tacked onto the TLS layer. Envoy currently only lets you configure the outbound SNI header at the cluster layer. If you try to failover through a mesh gateway you ideally would configure the SNI value per endpoint, but that's not possible in envoy today. This PR introduces a simpler way around the problem for now: 1. We identify any target of failover that will use mesh gateway mode local or remote and then further isolate any resolver node in the compiled discovery chain that has a failover destination set to one of those targets. 2. For each of these resolvers we will perform a small measurement of comparative healths of the endpoints that come back from the health API for the set of primary target and serial failover targets. We walk the list of targets in order and if any endpoint is healthy we return that target, otherwise we move on to the next target. 3. The CDS and EDS endpoints both perform the measurements in (2) for the affected resolver nodes. 4. For CDS this measurement selects which TLS SNI field to use for the cluster (note the cluster is always going to be named for the primary target) 5. For EDS this measurement selects which set of endpoints will populate the cluster. Priority tiered failover is ignored. One of the big downsides to this approach to failover is that the failover detection and correction is going to be controlled by consul rather than deferring that entirely to the data plane as with the prior version. This also means that we are bound to only failover using official health signals and cannot make use of data plane signals like outlier detection to affect failover. In this specific scenario the lack of data plane signals is ok because the effectiveness is already muted by the fact that the ultimate destination endpoints will have their data plane signals scrambled when they pass through the mesh gateway wrapper anyway so we're not losing much. Another related fix is that we now use the endpoint health from the underlying service, not the health of the gateway (regardless of failover mode).
2019-08-05 18:30:35 +00:00
},
{
name: "connect-proxy-with-tcp-chain-double-failover-through-local-gateway",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshotDiscoveryChain(t, "failover-through-double-local-gateway", nil, nil)
},
connect: fix failover through a mesh gateway to a remote datacenter (#6259) Failover is pushed entirely down to the data plane by creating envoy clusters and putting each successive destination in a different load assignment priority band. For example this shows that normally requests go to 1.2.3.4:8080 but when that fails they go to 6.7.8.9:8080: - name: foo load_assignment: cluster_name: foo policy: overprovisioning_factor: 100000 endpoints: - priority: 0 lb_endpoints: - endpoint: address: socket_address: address: 1.2.3.4 port_value: 8080 - priority: 1 lb_endpoints: - endpoint: address: socket_address: address: 6.7.8.9 port_value: 8080 Mesh gateways route requests based solely on the SNI header tacked onto the TLS layer. Envoy currently only lets you configure the outbound SNI header at the cluster layer. If you try to failover through a mesh gateway you ideally would configure the SNI value per endpoint, but that's not possible in envoy today. This PR introduces a simpler way around the problem for now: 1. We identify any target of failover that will use mesh gateway mode local or remote and then further isolate any resolver node in the compiled discovery chain that has a failover destination set to one of those targets. 2. For each of these resolvers we will perform a small measurement of comparative healths of the endpoints that come back from the health API for the set of primary target and serial failover targets. We walk the list of targets in order and if any endpoint is healthy we return that target, otherwise we move on to the next target. 3. The CDS and EDS endpoints both perform the measurements in (2) for the affected resolver nodes. 4. For CDS this measurement selects which TLS SNI field to use for the cluster (note the cluster is always going to be named for the primary target) 5. For EDS this measurement selects which set of endpoints will populate the cluster. Priority tiered failover is ignored. One of the big downsides to this approach to failover is that the failover detection and correction is going to be controlled by consul rather than deferring that entirely to the data plane as with the prior version. This also means that we are bound to only failover using official health signals and cannot make use of data plane signals like outlier detection to affect failover. In this specific scenario the lack of data plane signals is ok because the effectiveness is already muted by the fact that the ultimate destination endpoints will have their data plane signals scrambled when they pass through the mesh gateway wrapper anyway so we're not losing much. Another related fix is that we now use the endpoint health from the underlying service, not the health of the gateway (regardless of failover mode).
2019-08-05 18:30:35 +00:00
},
{
name: "connect-proxy-with-tcp-chain-double-failover-through-local-gateway-triggered",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshotDiscoveryChain(t, "failover-through-double-local-gateway-triggered", nil, nil)
},
connect: fix failover through a mesh gateway to a remote datacenter (#6259) Failover is pushed entirely down to the data plane by creating envoy clusters and putting each successive destination in a different load assignment priority band. For example this shows that normally requests go to 1.2.3.4:8080 but when that fails they go to 6.7.8.9:8080: - name: foo load_assignment: cluster_name: foo policy: overprovisioning_factor: 100000 endpoints: - priority: 0 lb_endpoints: - endpoint: address: socket_address: address: 1.2.3.4 port_value: 8080 - priority: 1 lb_endpoints: - endpoint: address: socket_address: address: 6.7.8.9 port_value: 8080 Mesh gateways route requests based solely on the SNI header tacked onto the TLS layer. Envoy currently only lets you configure the outbound SNI header at the cluster layer. If you try to failover through a mesh gateway you ideally would configure the SNI value per endpoint, but that's not possible in envoy today. This PR introduces a simpler way around the problem for now: 1. We identify any target of failover that will use mesh gateway mode local or remote and then further isolate any resolver node in the compiled discovery chain that has a failover destination set to one of those targets. 2. For each of these resolvers we will perform a small measurement of comparative healths of the endpoints that come back from the health API for the set of primary target and serial failover targets. We walk the list of targets in order and if any endpoint is healthy we return that target, otherwise we move on to the next target. 3. The CDS and EDS endpoints both perform the measurements in (2) for the affected resolver nodes. 4. For CDS this measurement selects which TLS SNI field to use for the cluster (note the cluster is always going to be named for the primary target) 5. For EDS this measurement selects which set of endpoints will populate the cluster. Priority tiered failover is ignored. One of the big downsides to this approach to failover is that the failover detection and correction is going to be controlled by consul rather than deferring that entirely to the data plane as with the prior version. This also means that we are bound to only failover using official health signals and cannot make use of data plane signals like outlier detection to affect failover. In this specific scenario the lack of data plane signals is ok because the effectiveness is already muted by the fact that the ultimate destination endpoints will have their data plane signals scrambled when they pass through the mesh gateway wrapper anyway so we're not losing much. Another related fix is that we now use the endpoint health from the underlying service, not the health of the gateway (regardless of failover mode).
2019-08-05 18:30:35 +00:00
},
{
name: "splitter-with-resolver-redirect",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshotDiscoveryChain(t, "splitter-with-resolver-redirect-multidc", nil, nil)
},
},
2020-08-28 20:27:40 +00:00
{
name: "connect-proxy-lb-in-resolver",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshotDiscoveryChain(t, "lb-resolver", nil, nil)
},
2020-08-28 20:27:40 +00:00
},
{
name: "expose-paths-local-app-paths",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshotExposeConfig(t, nil)
},
},
{
name: "downstream-service-with-unix-sockets",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshot(t, func(ns *structs.NodeService) {
ns.Address = ""
ns.Port = 0
ns.Proxy.LocalServiceAddress = ""
ns.Proxy.LocalServicePort = 0
ns.Proxy.LocalServiceSocketPath = "/tmp/downstream_proxy.sock"
}, nil)
},
},
{
name: "expose-paths-new-cluster-http2",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshotExposeConfig(t, func(ns *structs.NodeService) {
ns.Proxy.Expose.Paths[1] = structs.ExposePath{
LocalPathPort: 9090,
Path: "/grpc.health.v1.Health/Check",
ListenerPort: 21501,
Protocol: "http2",
}
})
},
},
{
name: "expose-paths-grpc-new-cluster-http1",
create: proxycfg.TestConfigSnapshotGRPCExposeHTTP1,
},
{
name: "mesh-gateway",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshotMeshGateway(t, "default", nil, nil)
},
},
{
name: "mesh-gateway-using-federation-states",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshotMeshGateway(t, "federation-states", nil, nil)
},
},
{
name: "mesh-gateway-no-services",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshotMeshGateway(t, "no-services", nil, nil)
},
},
{
name: "mesh-gateway-service-subsets",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshotMeshGateway(t, "service-subsets", nil, nil)
2020-08-28 20:27:40 +00:00
},
},
{
name: "mesh-gateway-ignore-extra-resolvers",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshotMeshGateway(t, "ignore-extra-resolvers", nil, nil)
},
},
{
name: "mesh-gateway-service-timeouts",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshotMeshGateway(t, "service-timeouts", nil, nil)
},
},
{
name: "mesh-gateway-non-hash-lb-injected",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshotMeshGateway(t, "non-hash-lb-injected", nil, nil)
},
},
{
name: "mesh-gateway-hash-lb-ignored",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshotMeshGateway(t, "hash-lb-ignored", nil, nil)
},
},
{
name: "ingress-gateway",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshotIngressGateway(t, true, "tcp",
"default", nil, nil, nil)
},
},
{
name: "ingress-gateway-no-services",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshotIngressGateway(t, false, "tcp",
"default", nil, nil, nil)
},
},
{
name: "ingress-with-chain",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshotIngressGateway(t, true, "tcp",
"simple", nil, nil, nil)
},
},
{
name: "ingress-with-chain-external-sni",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshotIngressGateway(t, true, "tcp",
"external-sni", nil, nil, nil)
},
},
{
name: "ingress-with-chain-and-failover",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshotIngressGateway(t, true, "tcp",
"failover", nil, nil, nil)
},
},
{
name: "ingress-with-tcp-chain-failover-through-remote-gateway",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshotIngressGateway(t, true, "tcp",
"failover-through-remote-gateway", nil, nil, nil)
},
},
{
name: "ingress-with-tcp-chain-failover-through-remote-gateway-triggered",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshotIngressGateway(t, true, "tcp",
"failover-through-remote-gateway-triggered", nil, nil, nil)
},
},
{
name: "ingress-with-tcp-chain-double-failover-through-remote-gateway",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshotIngressGateway(t, true, "tcp",
"failover-through-double-remote-gateway", nil, nil, nil)
},
},
{
name: "ingress-with-tcp-chain-double-failover-through-remote-gateway-triggered",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshotIngressGateway(t, true, "tcp",
"failover-through-double-remote-gateway-triggered", nil, nil, nil)
},
},
{
name: "ingress-with-tcp-chain-failover-through-local-gateway",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshotIngressGateway(t, true, "tcp",
"failover-through-local-gateway", nil, nil, nil)
},
},
{
name: "ingress-with-tcp-chain-failover-through-local-gateway-triggered",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshotIngressGateway(t, true, "tcp",
"failover-through-local-gateway-triggered", nil, nil, nil)
},
},
2020-08-28 20:27:40 +00:00
{
name: "ingress-with-tcp-chain-double-failover-through-local-gateway",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshotIngressGateway(t, true, "tcp",
"failover-through-double-local-gateway", nil, nil, nil)
},
2020-08-28 20:27:40 +00:00
},
{
name: "ingress-with-tcp-chain-double-failover-through-local-gateway-triggered",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshotIngressGateway(t, true, "tcp",
"failover-through-double-local-gateway-triggered", nil, nil, nil)
},
},
{
name: "ingress-splitter-with-resolver-redirect",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshotIngressGateway(t, true, "http",
"splitter-with-resolver-redirect-multidc", nil, nil, nil)
},
},
2020-04-14 14:59:23 +00:00
{
name: "ingress-lb-in-resolver",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshotIngressGateway(t, true, "http",
"lb-resolver", nil, nil, nil)
2020-04-14 14:59:23 +00:00
},
},
{
name: "terminating-gateway",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshotTerminatingGateway(t, true, nil, nil)
},
},
2020-04-14 14:59:23 +00:00
{
name: "terminating-gateway-no-services",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshotTerminatingGateway(t, false, nil, nil)
2020-04-14 14:59:23 +00:00
},
},
{
name: "terminating-gateway-service-subsets",
create: proxycfg.TestConfigSnapshotTerminatingGatewayServiceSubsetsWebAndCache,
},
{
name: "terminating-gateway-hostname-service-subsets",
create: proxycfg.TestConfigSnapshotTerminatingGatewayHostnameSubsets,
},
{
name: "terminating-gateway-ignore-extra-resolvers",
create: proxycfg.TestConfigSnapshotTerminatingGatewayIgnoreExtraResolvers,
},
2020-08-28 20:27:40 +00:00
{
name: "terminating-gateway-lb-config",
create: proxycfg.TestConfigSnapshotTerminatingGatewayLBConfigNoHashPolicies,
2020-08-28 20:27:40 +00:00
},
{
name: "ingress-multiple-listeners-duplicate-service",
create: proxycfg.TestConfigSnapshotIngress_MultipleListenersDuplicateService,
},
2021-03-17 19:40:49 +00:00
{
name: "transparent-proxy",
create: proxycfg.TestConfigSnapshotTransparentProxy,
},
{
name: "transparent-proxy-catalog-destinations-only",
create: proxycfg.TestConfigSnapshotTransparentProxyCatalogDestinationsOnly,
2021-03-17 19:40:49 +00:00
},
{
name: "transparent-proxy-dial-instances-directly",
create: proxycfg.TestConfigSnapshotTransparentProxyDialDirectly,
},
}
latestEnvoyVersion := proxysupport.EnvoyVersions[0]
for _, envoyVersion := range proxysupport.EnvoyVersions {
sf, err := determineSupportedProxyFeaturesFromString(envoyVersion)
require.NoError(t, err)
t.Run("envoy-"+envoyVersion, func(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Sanity check default with no overrides first
snap := tt.create(t)
// We need to replace the TLS certs with deterministic ones to make golden
// files workable. Note we don't update these otherwise they'd change
// golder files for every test case and so not be any use!
setupTLSRootsAndLeaf(t, snap)
// Need server just for logger dependency
Support Incremental xDS mode (#9855) This adds support for the Incremental xDS protocol when using xDS v3. This is best reviewed commit-by-commit and will not be squashed when merged. Union of all commit messages follows to give an overarching summary: xds: exclusively support incremental xDS when using xDS v3 Attempts to use SoTW via v3 will fail, much like attempts to use incremental via v2 will fail. Work around a strange older envoy behavior involving empty CDS responses over incremental xDS. xds: various cleanups and refactors that don't strictly concern the addition of incremental xDS support Dissolve the connectionInfo struct in favor of per-connection ResourceGenerators instead. Do a better job of ensuring the xds code uses a well configured logger that accurately describes the connected client. xds: pull out checkStreamACLs method in advance of a later commit xds: rewrite SoTW xDS protocol tests to use protobufs rather than hand-rolled json strings In the test we very lightly reuse some of the more boring protobuf construction helper code that is also technically under test. The important thing of the protocol tests is testing the protocol. The actual inputs and outputs are largely already handled by the xds golden output tests now so these protocol tests don't have to do double-duty. This also updates the SoTW protocol test to exclusively use xDS v2 which is the only variant of SoTW that will be supported in Consul 1.10. xds: default xds.Server.AuthCheckFrequency at use-time instead of construction-time
2021-04-29 18:54:05 +00:00
g := newResourceGenerator(testutil.Logger(t), nil, nil, false)
g.ProxyFeatures = sf
Support Incremental xDS mode (#9855) This adds support for the Incremental xDS protocol when using xDS v3. This is best reviewed commit-by-commit and will not be squashed when merged. Union of all commit messages follows to give an overarching summary: xds: exclusively support incremental xDS when using xDS v3 Attempts to use SoTW via v3 will fail, much like attempts to use incremental via v2 will fail. Work around a strange older envoy behavior involving empty CDS responses over incremental xDS. xds: various cleanups and refactors that don't strictly concern the addition of incremental xDS support Dissolve the connectionInfo struct in favor of per-connection ResourceGenerators instead. Do a better job of ensuring the xds code uses a well configured logger that accurately describes the connected client. xds: pull out checkStreamACLs method in advance of a later commit xds: rewrite SoTW xDS protocol tests to use protobufs rather than hand-rolled json strings In the test we very lightly reuse some of the more boring protobuf construction helper code that is also technically under test. The important thing of the protocol tests is testing the protocol. The actual inputs and outputs are largely already handled by the xds golden output tests now so these protocol tests don't have to do double-duty. This also updates the SoTW protocol test to exclusively use xDS v2 which is the only variant of SoTW that will be supported in Consul 1.10. xds: default xds.Server.AuthCheckFrequency at use-time instead of construction-time
2021-04-29 18:54:05 +00:00
clusters, err := g.clustersFromSnapshot(snap)
require.NoError(t, err)
sort.Slice(clusters, func(i, j int) bool {
return clusters[i].(*envoy_cluster_v3.Cluster).Name < clusters[j].(*envoy_cluster_v3.Cluster).Name
})
r, err := createResponse(xdscommon.ClusterType, "00000001", "00000001", clusters)
require.NoError(t, err)
t.Run("current", func(t *testing.T) {
gotJSON := protoToJSON(t, r)
gName := tt.name
if tt.overrideGoldenName != "" {
gName = tt.overrideGoldenName
}
require.JSONEq(t, goldenEnvoy(t, filepath.Join("clusters", gName), envoyVersion, latestEnvoyVersion, gotJSON), gotJSON)
})
})
}
})
}
}
type customClusterJSONOptions struct {
Name string
TLSContext string
}
var customAppClusterJSONTpl = `{
"@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster",
{{ if .TLSContext -}}
"transport_socket": {
"name": "tls",
"typed_config": {
"@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext",
{{ .TLSContext }}
}
},
{{- end }}
"name": "{{ .Name }}",
"connectTimeout": "15s",
"loadAssignment": {
"clusterName": "{{ .Name }}",
"endpoints": [
{
"lbEndpoints": [
{
"endpoint": {
"address": {
"socketAddress": {
"address": "127.0.0.1",
"portValue": 8080
}
}
}
}
]
}
]
}
}`
var customAppClusterJSONTemplate = template.Must(template.New("").Parse(customAppClusterJSONTpl))
func customAppClusterJSON(t testinf.T, opts customClusterJSONOptions) string {
t.Helper()
var buf bytes.Buffer
err := customAppClusterJSONTemplate.Execute(&buf, opts)
require.NoError(t, err)
return buf.String()
}
func setupTLSRootsAndLeaf(t *testing.T, snap *proxycfg.ConfigSnapshot) {
if snap.Leaf() != nil {
switch snap.Kind {
case structs.ServiceKindConnectProxy:
snap.ConnectProxy.Leaf.CertPEM = loadTestResource(t, "test-leaf-cert")
snap.ConnectProxy.Leaf.PrivateKeyPEM = loadTestResource(t, "test-leaf-key")
case structs.ServiceKindIngressGateway:
snap.IngressGateway.Leaf.CertPEM = loadTestResource(t, "test-leaf-cert")
snap.IngressGateway.Leaf.PrivateKeyPEM = loadTestResource(t, "test-leaf-key")
}
}
if snap.Roots != nil {
snap.Roots.Roots[0].RootCert = loadTestResource(t, "test-root-cert")
}
}
func TestEnvoyLBConfig_InjectToCluster(t *testing.T) {
var tests = []struct {
name string
2020-09-11 15:21:43 +00:00
lb *structs.LoadBalancer
expected *envoy_cluster_v3.Cluster
}{
{
name: "skip empty",
2020-09-11 15:21:43 +00:00
lb: &structs.LoadBalancer{
Policy: "",
},
expected: &envoy_cluster_v3.Cluster{},
},
{
name: "round robin",
2020-09-11 15:21:43 +00:00
lb: &structs.LoadBalancer{
Policy: structs.LBPolicyRoundRobin,
},
expected: &envoy_cluster_v3.Cluster{LbPolicy: envoy_cluster_v3.Cluster_ROUND_ROBIN},
},
{
name: "random",
2020-09-11 15:21:43 +00:00
lb: &structs.LoadBalancer{
Policy: structs.LBPolicyRandom,
},
expected: &envoy_cluster_v3.Cluster{LbPolicy: envoy_cluster_v3.Cluster_RANDOM},
},
{
name: "maglev",
2020-09-11 15:21:43 +00:00
lb: &structs.LoadBalancer{
Policy: structs.LBPolicyMaglev,
},
expected: &envoy_cluster_v3.Cluster{LbPolicy: envoy_cluster_v3.Cluster_MAGLEV},
},
{
name: "ring_hash",
2020-09-11 15:21:43 +00:00
lb: &structs.LoadBalancer{
Policy: structs.LBPolicyRingHash,
RingHashConfig: &structs.RingHashConfig{
MinimumRingSize: 3,
MaximumRingSize: 7,
},
},
expected: &envoy_cluster_v3.Cluster{
LbPolicy: envoy_cluster_v3.Cluster_RING_HASH,
LbConfig: &envoy_cluster_v3.Cluster_RingHashLbConfig_{
RingHashLbConfig: &envoy_cluster_v3.Cluster_RingHashLbConfig{
MinimumRingSize: &wrappers.UInt64Value{Value: 3},
MaximumRingSize: &wrappers.UInt64Value{Value: 7},
},
},
},
},
{
name: "least_request",
2020-09-11 15:21:43 +00:00
lb: &structs.LoadBalancer{
Policy: "least_request",
LeastRequestConfig: &structs.LeastRequestConfig{
ChoiceCount: 3,
},
},
expected: &envoy_cluster_v3.Cluster{
LbPolicy: envoy_cluster_v3.Cluster_LEAST_REQUEST,
LbConfig: &envoy_cluster_v3.Cluster_LeastRequestLbConfig_{
LeastRequestLbConfig: &envoy_cluster_v3.Cluster_LeastRequestLbConfig{
ChoiceCount: &wrappers.UInt32Value{Value: 3},
},
},
},
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
var c envoy_cluster_v3.Cluster
err := injectLBToCluster(tc.lb, &c)
require.NoError(t, err)
require.Equal(t, tc.expected, &c)
})
}
}
// UID is just a convenience function to aid in writing tests less verbosely.
func UID(input string) proxycfg.UpstreamID {
return proxycfg.UpstreamIDFromString(input)
}