consul/agent/proxycfg/testing_connect_proxy.go

392 lines
10 KiB
Go
Raw Normal View History

// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package proxycfg
import (
Allow HCP metrics collection for Envoy proxies Co-authored-by: Ashvitha Sridharan <ashvitha.sridharan@hashicorp.com> Co-authored-by: Freddy <freddygv@users.noreply.github.com> Add a new envoy flag: "envoy_hcp_metrics_bind_socket_dir", a directory where a unix socket will be created with the name `<namespace>_<proxy_id>.sock` to forward Envoy metrics. If set, this will configure: - In bootstrap configuration a local stats_sink and static cluster. These will forward metrics to a loopback listener sent over xDS. - A dynamic listener listening at the socket path that the previously defined static cluster is sending metrics to. - A dynamic cluster that will forward traffic received at this listener to the hcp-metrics-collector service. Reasons for having a static cluster pointing at a dynamic listener: - We want to secure the metrics stream using TLS, but the stats sink can only be defined in bootstrap config. With dynamic listeners/clusters we can use the proxy's leaf certificate issued by the Connect CA, which isn't available at bootstrap time. - We want to intelligently route to the HCP collector. Configuring its addreess at bootstrap time limits our flexibility routing-wise. More on this below. Reasons for defining the collector as an upstream in `proxycfg`: - The HCP collector will be deployed as a mesh service. - Certificate management is taken care of, as mentioned above. - Service discovery and routing logic is automatically taken care of, meaning that no code changes are required in the xds package. - Custom routing rules can be added for the collector using discovery chain config entries. Initially the collector is expected to be deployed to each admin partition, but in the future could be deployed centrally in the default partition. These config entries could even be managed by HCP itself.
2023-03-10 20:52:54 +00:00
"fmt"
"time"
"github.com/mitchellh/go-testing-interface"
"github.com/stretchr/testify/assert"
2023-03-22 18:56:18 +00:00
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/connect"
"github.com/hashicorp/consul/agent/consul/discoverychain"
"github.com/hashicorp/consul/agent/structs"
Allow HCP metrics collection for Envoy proxies Co-authored-by: Ashvitha Sridharan <ashvitha.sridharan@hashicorp.com> Co-authored-by: Freddy <freddygv@users.noreply.github.com> Add a new envoy flag: "envoy_hcp_metrics_bind_socket_dir", a directory where a unix socket will be created with the name `<namespace>_<proxy_id>.sock` to forward Envoy metrics. If set, this will configure: - In bootstrap configuration a local stats_sink and static cluster. These will forward metrics to a loopback listener sent over xDS. - A dynamic listener listening at the socket path that the previously defined static cluster is sending metrics to. - A dynamic cluster that will forward traffic received at this listener to the hcp-metrics-collector service. Reasons for having a static cluster pointing at a dynamic listener: - We want to secure the metrics stream using TLS, but the stats sink can only be defined in bootstrap config. With dynamic listeners/clusters we can use the proxy's leaf certificate issued by the Connect CA, which isn't available at bootstrap time. - We want to intelligently route to the HCP collector. Configuring its addreess at bootstrap time limits our flexibility routing-wise. More on this below. Reasons for defining the collector as an upstream in `proxycfg`: - The HCP collector will be deployed as a mesh service. - Certificate management is taken care of, as mentioned above. - Service discovery and routing logic is automatically taken care of, meaning that no code changes are required in the xds package. - Custom routing rules can be added for the collector using discovery chain config entries. Initially the collector is expected to be deployed to each admin partition, but in the future could be deployed centrally in the default partition. These config entries could even be managed by HCP itself.
2023-03-10 20:52:54 +00:00
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/types"
)
// TestConfigSnapshot returns a fully populated snapshot
func TestConfigSnapshot(t testing.T, nsFn func(ns *structs.NodeService), extraUpdates []UpdateEvent) *ConfigSnapshot {
roots, leaf := TestCerts(t)
// no entries implies we'll get a default chain
dbChain := discoverychain.TestCompileConfigEntries(t, "db", "default", "default", "dc1", connect.TestClusterID+".consul", nil, nil)
assert.True(t, dbChain.Default)
var (
2023-03-22 18:56:18 +00:00
upstreams = structs.TestUpstreams(t, false)
dbUpstream = upstreams[0]
geoUpstream = upstreams[1]
dbUID = NewUpstreamID(&dbUpstream)
geoUID = NewUpstreamID(&geoUpstream)
webSN = structs.ServiceIDString("web", nil)
)
baseEvents := []UpdateEvent{
{
CorrelationID: rootsWatchID,
Result: roots,
},
{
CorrelationID: leafWatchID,
Result: leaf,
},
{
CorrelationID: intentionsWatchID,
Result: structs.SimplifiedIntentions{}, // no intentions defined
},
{
CorrelationID: svcChecksWatchIDPrefix + webSN,
Result: []structs.CheckType{},
},
{
CorrelationID: "upstream:" + geoUID.String(),
Result: &structs.PreparedQueryExecuteResponse{
Nodes: TestPreparedQueryNodes(t, "geo-cache"),
},
},
{
CorrelationID: "discovery-chain:" + dbUID.String(),
Result: &structs.DiscoveryChainResponse{
Chain: dbChain,
},
},
{
CorrelationID: "upstream-target:" + dbChain.ID() + ":" + dbUID.String(),
Result: &structs.IndexedCheckServiceNodes{
Nodes: TestUpstreamNodes(t, "db"),
},
},
}
return testConfigSnapshotFixture(t, &structs.NodeService{
Kind: structs.ServiceKindConnectProxy,
Service: "web-sidecar-proxy",
Port: 9999,
Proxy: structs.ConnectProxyConfig{
DestinationServiceID: "web",
DestinationServiceName: "web",
LocalServiceAddress: "127.0.0.1",
LocalServicePort: 8080,
Config: map[string]interface{}{
"foo": "bar",
},
Upstreams: upstreams,
},
Meta: nil,
TaggedAddresses: nil,
}, nsFn, nil, testSpliceEvents(baseEvents, extraUpdates))
}
// TestConfigSnapshotDiscoveryChain returns a fully populated snapshot using a discovery chain
func TestConfigSnapshotDiscoveryChain(
t testing.T,
variation string,
2023-03-22 18:56:18 +00:00
enterprise bool,
nsFn func(ns *structs.NodeService),
extraUpdates []UpdateEvent,
additionalEntries ...structs.ConfigEntry,
) *ConfigSnapshot {
roots, leaf := TestCerts(t)
2023-03-22 18:56:18 +00:00
var entMeta acl.EnterpriseMeta
if enterprise {
entMeta = acl.NewEnterpriseMetaWithPartition("ap1", "ns1")
}
var (
2023-03-22 18:56:18 +00:00
upstreams = structs.TestUpstreams(t, enterprise)
geoUpstream = upstreams[1]
geoUID = NewUpstreamID(&geoUpstream)
2023-03-22 18:56:18 +00:00
webSN = structs.ServiceIDString("web", &entMeta)
)
baseEvents := testSpliceEvents([]UpdateEvent{
{
CorrelationID: rootsWatchID,
Result: roots,
},
{
CorrelationID: leafWatchID,
Result: leaf,
},
{
CorrelationID: intentionsWatchID,
Result: structs.SimplifiedIntentions{}, // no intentions defined
},
{
CorrelationID: meshConfigEntryID,
Result: &structs.ConfigEntryResponse{
Entry: nil,
},
},
{
CorrelationID: svcChecksWatchIDPrefix + webSN,
Result: []structs.CheckType{},
},
{
CorrelationID: "upstream:" + geoUID.String(),
Result: &structs.PreparedQueryExecuteResponse{
Nodes: TestPreparedQueryNodes(t, "geo-cache"),
},
},
}, setupTestVariationConfigEntriesAndSnapshot(
t, variation, enterprise, upstreams, additionalEntries...,
))
return testConfigSnapshotFixture(t, &structs.NodeService{
Kind: structs.ServiceKindConnectProxy,
Service: "web-sidecar-proxy",
Port: 9999,
Proxy: structs.ConnectProxyConfig{
DestinationServiceID: "web",
DestinationServiceName: "web",
LocalServiceAddress: "127.0.0.1",
LocalServicePort: 8080,
Config: map[string]interface{}{
"foo": "bar",
},
Upstreams: upstreams,
},
Meta: nil,
TaggedAddresses: nil,
2023-03-22 18:56:18 +00:00
EnterpriseMeta: entMeta,
}, nsFn, nil, testSpliceEvents(baseEvents, extraUpdates))
}
func TestConfigSnapshotExposeConfig(t testing.T, nsFn func(ns *structs.NodeService)) *ConfigSnapshot {
roots, leaf := TestCerts(t)
var (
webSN = structs.ServiceIDString("web", nil)
)
baseEvents := []UpdateEvent{
{
CorrelationID: rootsWatchID,
Result: roots,
},
{
CorrelationID: leafWatchID, Result: leaf,
},
{
CorrelationID: intentionsWatchID,
Result: structs.SimplifiedIntentions{}, // no intentions defined
},
{
CorrelationID: svcChecksWatchIDPrefix + webSN,
Result: []structs.CheckType{},
},
}
return testConfigSnapshotFixture(t, &structs.NodeService{
Kind: structs.ServiceKindConnectProxy,
Service: "web-sidecar-proxy",
Address: "1.2.3.4",
Port: 8080,
Proxy: structs.ConnectProxyConfig{
DestinationServiceID: "web",
DestinationServiceName: "web",
LocalServicePort: 8080,
Expose: structs.ExposeConfig{
Checks: false,
Paths: []structs.ExposePath{
{
LocalPathPort: 8080,
Path: "/health1",
ListenerPort: 21500,
},
{
LocalPathPort: 8080,
Path: "/health2",
ListenerPort: 21501,
},
},
},
},
Meta: nil,
TaggedAddresses: nil,
}, nsFn, nil, baseEvents)
}
func TestConfigSnapshotExposeChecks(t testing.T) *ConfigSnapshot {
return testConfigSnapshotExposedChecks(t, false)
}
func TestConfigSnapshotExposeChecksWithBindOverride(t testing.T) *ConfigSnapshot {
return testConfigSnapshotExposedChecks(t, true)
}
func testConfigSnapshotExposedChecks(t testing.T, overrideBind bool) *ConfigSnapshot {
return TestConfigSnapshot(t,
func(ns *structs.NodeService) {
ns.Address = "1.2.3.4"
ns.Port = 8080
ns.Proxy.Upstreams = nil
ns.Proxy.Expose = structs.ExposeConfig{
Checks: true,
}
if overrideBind {
if ns.Proxy.Config == nil {
ns.Proxy.Config = map[string]any{}
}
ns.Proxy.Config["bind_address"] = "6.7.8.9"
}
},
[]UpdateEvent{
{
CorrelationID: svcChecksWatchIDPrefix + structs.ServiceIDString("web", nil),
Result: []structs.CheckType{{
CheckID: types.CheckID("http"),
Name: "http",
HTTP: "http://127.0.0.1:8181/debug",
ProxyHTTP: "http://:21500/debug",
Method: "GET",
Interval: 10 * time.Second,
Timeout: 1 * time.Second,
}},
},
},
)
}
func TestConfigSnapshotExposeChecksGRPC(t testing.T) *ConfigSnapshot {
return TestConfigSnapshot(t,
func(ns *structs.NodeService) {
ns.Address = "1.2.3.4"
ns.Port = 9090
ns.Proxy.Upstreams = nil
ns.Proxy.Expose = structs.ExposeConfig{
Checks: true,
}
},
[]UpdateEvent{
{
CorrelationID: svcChecksWatchIDPrefix + structs.ServiceIDString("web", nil),
Result: []structs.CheckType{{
CheckID: types.CheckID("grpc"),
Name: "grpc",
GRPC: "localhost:9090/v1.Health",
ProxyGRPC: "localhost:21501/myservice",
Interval: 10 * time.Second,
Timeout: 1 * time.Second,
}},
},
},
)
}
func TestConfigSnapshotGRPCExposeHTTP1(t testing.T) *ConfigSnapshot {
roots, leaf := TestCerts(t)
return testConfigSnapshotFixture(t, &structs.NodeService{
Kind: structs.ServiceKindConnectProxy,
Service: "grpc-proxy",
Address: "1.2.3.4",
Port: 8080,
Proxy: structs.ConnectProxyConfig{
DestinationServiceName: "grpc",
DestinationServiceID: "grpc",
LocalServicePort: 8080,
Config: map[string]interface{}{
"protocol": "grpc",
},
Expose: structs.ExposeConfig{
Checks: false,
Paths: []structs.ExposePath{
{
LocalPathPort: 8090,
Path: "/healthz",
ListenerPort: 21500,
Protocol: "http",
},
},
},
},
Meta: nil,
TaggedAddresses: nil,
}, nil, nil, []UpdateEvent{
{
CorrelationID: rootsWatchID,
Result: roots,
},
{
CorrelationID: leafWatchID,
Result: leaf,
},
{
CorrelationID: intentionsWatchID,
Result: structs.SimplifiedIntentions{}, // no intentions defined
},
{
CorrelationID: svcChecksWatchIDPrefix + structs.ServiceIDString("grpc", nil),
Result: []structs.CheckType{},
},
})
}
Allow HCP metrics collection for Envoy proxies Co-authored-by: Ashvitha Sridharan <ashvitha.sridharan@hashicorp.com> Co-authored-by: Freddy <freddygv@users.noreply.github.com> Add a new envoy flag: "envoy_hcp_metrics_bind_socket_dir", a directory where a unix socket will be created with the name `<namespace>_<proxy_id>.sock` to forward Envoy metrics. If set, this will configure: - In bootstrap configuration a local stats_sink and static cluster. These will forward metrics to a loopback listener sent over xDS. - A dynamic listener listening at the socket path that the previously defined static cluster is sending metrics to. - A dynamic cluster that will forward traffic received at this listener to the hcp-metrics-collector service. Reasons for having a static cluster pointing at a dynamic listener: - We want to secure the metrics stream using TLS, but the stats sink can only be defined in bootstrap config. With dynamic listeners/clusters we can use the proxy's leaf certificate issued by the Connect CA, which isn't available at bootstrap time. - We want to intelligently route to the HCP collector. Configuring its addreess at bootstrap time limits our flexibility routing-wise. More on this below. Reasons for defining the collector as an upstream in `proxycfg`: - The HCP collector will be deployed as a mesh service. - Certificate management is taken care of, as mentioned above. - Service discovery and routing logic is automatically taken care of, meaning that no code changes are required in the xds package. - Custom routing rules can be added for the collector using discovery chain config entries. Initially the collector is expected to be deployed to each admin partition, but in the future could be deployed centrally in the default partition. These config entries could even be managed by HCP itself.
2023-03-10 20:52:54 +00:00
// TestConfigSnapshotTelemetryCollector returns a fully populated snapshot using a discovery chain
func TestConfigSnapshotTelemetryCollector(t testing.T) *ConfigSnapshot {
Allow HCP metrics collection for Envoy proxies Co-authored-by: Ashvitha Sridharan <ashvitha.sridharan@hashicorp.com> Co-authored-by: Freddy <freddygv@users.noreply.github.com> Add a new envoy flag: "envoy_hcp_metrics_bind_socket_dir", a directory where a unix socket will be created with the name `<namespace>_<proxy_id>.sock` to forward Envoy metrics. If set, this will configure: - In bootstrap configuration a local stats_sink and static cluster. These will forward metrics to a loopback listener sent over xDS. - A dynamic listener listening at the socket path that the previously defined static cluster is sending metrics to. - A dynamic cluster that will forward traffic received at this listener to the hcp-metrics-collector service. Reasons for having a static cluster pointing at a dynamic listener: - We want to secure the metrics stream using TLS, but the stats sink can only be defined in bootstrap config. With dynamic listeners/clusters we can use the proxy's leaf certificate issued by the Connect CA, which isn't available at bootstrap time. - We want to intelligently route to the HCP collector. Configuring its addreess at bootstrap time limits our flexibility routing-wise. More on this below. Reasons for defining the collector as an upstream in `proxycfg`: - The HCP collector will be deployed as a mesh service. - Certificate management is taken care of, as mentioned above. - Service discovery and routing logic is automatically taken care of, meaning that no code changes are required in the xds package. - Custom routing rules can be added for the collector using discovery chain config entries. Initially the collector is expected to be deployed to each admin partition, but in the future could be deployed centrally in the default partition. These config entries could even be managed by HCP itself.
2023-03-10 20:52:54 +00:00
// DiscoveryChain without an UpstreamConfig should yield a
// filter chain when in transparent proxy mode
var (
collector = structs.NewServiceName(api.TelemetryCollectorName, nil)
Allow HCP metrics collection for Envoy proxies Co-authored-by: Ashvitha Sridharan <ashvitha.sridharan@hashicorp.com> Co-authored-by: Freddy <freddygv@users.noreply.github.com> Add a new envoy flag: "envoy_hcp_metrics_bind_socket_dir", a directory where a unix socket will be created with the name `<namespace>_<proxy_id>.sock` to forward Envoy metrics. If set, this will configure: - In bootstrap configuration a local stats_sink and static cluster. These will forward metrics to a loopback listener sent over xDS. - A dynamic listener listening at the socket path that the previously defined static cluster is sending metrics to. - A dynamic cluster that will forward traffic received at this listener to the hcp-metrics-collector service. Reasons for having a static cluster pointing at a dynamic listener: - We want to secure the metrics stream using TLS, but the stats sink can only be defined in bootstrap config. With dynamic listeners/clusters we can use the proxy's leaf certificate issued by the Connect CA, which isn't available at bootstrap time. - We want to intelligently route to the HCP collector. Configuring its addreess at bootstrap time limits our flexibility routing-wise. More on this below. Reasons for defining the collector as an upstream in `proxycfg`: - The HCP collector will be deployed as a mesh service. - Certificate management is taken care of, as mentioned above. - Service discovery and routing logic is automatically taken care of, meaning that no code changes are required in the xds package. - Custom routing rules can be added for the collector using discovery chain config entries. Initially the collector is expected to be deployed to each admin partition, but in the future could be deployed centrally in the default partition. These config entries could even be managed by HCP itself.
2023-03-10 20:52:54 +00:00
collectorUID = NewUpstreamIDFromServiceName(collector)
collectorChain = discoverychain.TestCompileConfigEntries(t, api.TelemetryCollectorName, "default", "default", "dc1", connect.TestClusterID+".consul", nil, nil)
Allow HCP metrics collection for Envoy proxies Co-authored-by: Ashvitha Sridharan <ashvitha.sridharan@hashicorp.com> Co-authored-by: Freddy <freddygv@users.noreply.github.com> Add a new envoy flag: "envoy_hcp_metrics_bind_socket_dir", a directory where a unix socket will be created with the name `<namespace>_<proxy_id>.sock` to forward Envoy metrics. If set, this will configure: - In bootstrap configuration a local stats_sink and static cluster. These will forward metrics to a loopback listener sent over xDS. - A dynamic listener listening at the socket path that the previously defined static cluster is sending metrics to. - A dynamic cluster that will forward traffic received at this listener to the hcp-metrics-collector service. Reasons for having a static cluster pointing at a dynamic listener: - We want to secure the metrics stream using TLS, but the stats sink can only be defined in bootstrap config. With dynamic listeners/clusters we can use the proxy's leaf certificate issued by the Connect CA, which isn't available at bootstrap time. - We want to intelligently route to the HCP collector. Configuring its addreess at bootstrap time limits our flexibility routing-wise. More on this below. Reasons for defining the collector as an upstream in `proxycfg`: - The HCP collector will be deployed as a mesh service. - Certificate management is taken care of, as mentioned above. - Service discovery and routing logic is automatically taken care of, meaning that no code changes are required in the xds package. - Custom routing rules can be added for the collector using discovery chain config entries. Initially the collector is expected to be deployed to each admin partition, but in the future could be deployed centrally in the default partition. These config entries could even be managed by HCP itself.
2023-03-10 20:52:54 +00:00
)
return TestConfigSnapshot(t, func(ns *structs.NodeService) {
ns.Proxy.Config = map[string]interface{}{
"envoy_telemetry_collector_bind_socket_dir": "/tmp/consul/telemetry-collector",
Allow HCP metrics collection for Envoy proxies Co-authored-by: Ashvitha Sridharan <ashvitha.sridharan@hashicorp.com> Co-authored-by: Freddy <freddygv@users.noreply.github.com> Add a new envoy flag: "envoy_hcp_metrics_bind_socket_dir", a directory where a unix socket will be created with the name `<namespace>_<proxy_id>.sock` to forward Envoy metrics. If set, this will configure: - In bootstrap configuration a local stats_sink and static cluster. These will forward metrics to a loopback listener sent over xDS. - A dynamic listener listening at the socket path that the previously defined static cluster is sending metrics to. - A dynamic cluster that will forward traffic received at this listener to the hcp-metrics-collector service. Reasons for having a static cluster pointing at a dynamic listener: - We want to secure the metrics stream using TLS, but the stats sink can only be defined in bootstrap config. With dynamic listeners/clusters we can use the proxy's leaf certificate issued by the Connect CA, which isn't available at bootstrap time. - We want to intelligently route to the HCP collector. Configuring its addreess at bootstrap time limits our flexibility routing-wise. More on this below. Reasons for defining the collector as an upstream in `proxycfg`: - The HCP collector will be deployed as a mesh service. - Certificate management is taken care of, as mentioned above. - Service discovery and routing logic is automatically taken care of, meaning that no code changes are required in the xds package. - Custom routing rules can be added for the collector using discovery chain config entries. Initially the collector is expected to be deployed to each admin partition, but in the future could be deployed centrally in the default partition. These config entries could even be managed by HCP itself.
2023-03-10 20:52:54 +00:00
}
}, []UpdateEvent{
{
CorrelationID: meshConfigEntryID,
Result: &structs.ConfigEntryResponse{
Entry: nil,
},
},
{
CorrelationID: "discovery-chain:" + collectorUID.String(),
Result: &structs.DiscoveryChainResponse{
Chain: collectorChain,
},
},
{
CorrelationID: fmt.Sprintf("upstream-target:%s.default.default.dc1:", api.TelemetryCollectorName) + collectorUID.String(),
Allow HCP metrics collection for Envoy proxies Co-authored-by: Ashvitha Sridharan <ashvitha.sridharan@hashicorp.com> Co-authored-by: Freddy <freddygv@users.noreply.github.com> Add a new envoy flag: "envoy_hcp_metrics_bind_socket_dir", a directory where a unix socket will be created with the name `<namespace>_<proxy_id>.sock` to forward Envoy metrics. If set, this will configure: - In bootstrap configuration a local stats_sink and static cluster. These will forward metrics to a loopback listener sent over xDS. - A dynamic listener listening at the socket path that the previously defined static cluster is sending metrics to. - A dynamic cluster that will forward traffic received at this listener to the hcp-metrics-collector service. Reasons for having a static cluster pointing at a dynamic listener: - We want to secure the metrics stream using TLS, but the stats sink can only be defined in bootstrap config. With dynamic listeners/clusters we can use the proxy's leaf certificate issued by the Connect CA, which isn't available at bootstrap time. - We want to intelligently route to the HCP collector. Configuring its addreess at bootstrap time limits our flexibility routing-wise. More on this below. Reasons for defining the collector as an upstream in `proxycfg`: - The HCP collector will be deployed as a mesh service. - Certificate management is taken care of, as mentioned above. - Service discovery and routing logic is automatically taken care of, meaning that no code changes are required in the xds package. - Custom routing rules can be added for the collector using discovery chain config entries. Initially the collector is expected to be deployed to each admin partition, but in the future could be deployed centrally in the default partition. These config entries could even be managed by HCP itself.
2023-03-10 20:52:54 +00:00
Result: &structs.IndexedCheckServiceNodes{
Nodes: []structs.CheckServiceNode{
{
Node: &structs.Node{
Address: "8.8.8.8",
Datacenter: "dc1",
},
Service: &structs.NodeService{
Service: api.TelemetryCollectorName,
Allow HCP metrics collection for Envoy proxies Co-authored-by: Ashvitha Sridharan <ashvitha.sridharan@hashicorp.com> Co-authored-by: Freddy <freddygv@users.noreply.github.com> Add a new envoy flag: "envoy_hcp_metrics_bind_socket_dir", a directory where a unix socket will be created with the name `<namespace>_<proxy_id>.sock` to forward Envoy metrics. If set, this will configure: - In bootstrap configuration a local stats_sink and static cluster. These will forward metrics to a loopback listener sent over xDS. - A dynamic listener listening at the socket path that the previously defined static cluster is sending metrics to. - A dynamic cluster that will forward traffic received at this listener to the hcp-metrics-collector service. Reasons for having a static cluster pointing at a dynamic listener: - We want to secure the metrics stream using TLS, but the stats sink can only be defined in bootstrap config. With dynamic listeners/clusters we can use the proxy's leaf certificate issued by the Connect CA, which isn't available at bootstrap time. - We want to intelligently route to the HCP collector. Configuring its addreess at bootstrap time limits our flexibility routing-wise. More on this below. Reasons for defining the collector as an upstream in `proxycfg`: - The HCP collector will be deployed as a mesh service. - Certificate management is taken care of, as mentioned above. - Service discovery and routing logic is automatically taken care of, meaning that no code changes are required in the xds package. - Custom routing rules can be added for the collector using discovery chain config entries. Initially the collector is expected to be deployed to each admin partition, but in the future could be deployed centrally in the default partition. These config entries could even be managed by HCP itself.
2023-03-10 20:52:54 +00:00
Address: "9.9.9.9",
Port: 9090,
},
},
},
},
},
})
}