2018-10-03 18:18:55 +00:00
|
|
|
package xds
|
|
|
|
|
|
|
|
import (
|
|
|
|
"errors"
|
2019-02-19 13:45:33 +00:00
|
|
|
"fmt"
|
2021-06-30 16:16:33 +00:00
|
|
|
"sort"
|
2018-10-03 18:18:55 +00:00
|
|
|
"time"
|
|
|
|
|
2021-02-26 22:23:15 +00:00
|
|
|
envoy_cluster_v3 "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3"
|
|
|
|
envoy_core_v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
|
|
|
|
envoy_endpoint_v3 "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3"
|
|
|
|
envoy_tls_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3"
|
2022-03-30 16:51:56 +00:00
|
|
|
envoy_upstreams_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/upstreams/http/v3"
|
2021-06-29 01:58:12 +00:00
|
|
|
envoy_matcher_v3 "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3"
|
2021-02-26 22:23:15 +00:00
|
|
|
envoy_type_v3 "github.com/envoyproxy/go-control-plane/envoy/type/v3"
|
2022-06-10 21:11:40 +00:00
|
|
|
|
2020-06-23 20:19:56 +00:00
|
|
|
"github.com/golang/protobuf/jsonpb"
|
|
|
|
"github.com/golang/protobuf/proto"
|
|
|
|
"github.com/golang/protobuf/ptypes/any"
|
2020-09-02 21:13:50 +00:00
|
|
|
"github.com/golang/protobuf/ptypes/wrappers"
|
2022-06-10 21:11:40 +00:00
|
|
|
"github.com/hashicorp/go-hclog"
|
2022-03-30 16:51:56 +00:00
|
|
|
"google.golang.org/protobuf/types/known/anypb"
|
2022-05-25 01:44:54 +00:00
|
|
|
"google.golang.org/protobuf/types/known/durationpb"
|
2021-02-22 21:00:15 +00:00
|
|
|
|
2019-08-19 18:03:03 +00:00
|
|
|
"github.com/hashicorp/consul/agent/connect"
|
2018-10-03 18:18:55 +00:00
|
|
|
"github.com/hashicorp/consul/agent/proxycfg"
|
2019-02-19 13:45:33 +00:00
|
|
|
"github.com/hashicorp/consul/agent/structs"
|
2018-10-03 18:18:55 +00:00
|
|
|
)
|
|
|
|
|
2022-06-28 19:52:25 +00:00
|
|
|
const (
|
|
|
|
meshGatewayExportedClusterNamePrefix = "exported~"
|
|
|
|
)
|
|
|
|
|
2019-06-24 19:05:36 +00:00
|
|
|
// clustersFromSnapshot returns the xDS API representation of the "clusters" in the snapshot.
|
2021-04-29 18:54:05 +00:00
|
|
|
func (s *ResourceGenerator) clustersFromSnapshot(cfgSnap *proxycfg.ConfigSnapshot) ([]proto.Message, error) {
|
2019-06-24 19:05:36 +00:00
|
|
|
if cfgSnap == nil {
|
|
|
|
return nil, errors.New("nil config given")
|
|
|
|
}
|
|
|
|
|
|
|
|
switch cfgSnap.Kind {
|
|
|
|
case structs.ServiceKindConnectProxy:
|
2020-03-27 21:57:16 +00:00
|
|
|
return s.clustersFromSnapshotConnectProxy(cfgSnap)
|
2020-04-13 16:33:01 +00:00
|
|
|
case structs.ServiceKindTerminatingGateway:
|
2022-05-24 16:51:52 +00:00
|
|
|
res, err := s.clustersFromSnapshotTerminatingGateway(cfgSnap)
|
2021-04-29 18:54:05 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2021-09-22 18:48:50 +00:00
|
|
|
return res, nil
|
2019-06-18 00:52:01 +00:00
|
|
|
case structs.ServiceKindMeshGateway:
|
2021-04-29 18:54:05 +00:00
|
|
|
res, err := s.clustersFromSnapshotMeshGateway(cfgSnap)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2021-09-22 18:48:50 +00:00
|
|
|
return res, nil
|
2020-04-16 21:00:48 +00:00
|
|
|
case structs.ServiceKindIngressGateway:
|
2021-04-29 18:54:05 +00:00
|
|
|
res, err := s.clustersFromSnapshotIngressGateway(cfgSnap)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2021-09-22 18:48:50 +00:00
|
|
|
return res, nil
|
2019-06-24 19:05:36 +00:00
|
|
|
default:
|
|
|
|
return nil, fmt.Errorf("Invalid service kind: %v", cfgSnap.Kind)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-03 18:18:55 +00:00
|
|
|
// clustersFromSnapshot returns the xDS API representation of the "clusters"
|
|
|
|
// (upstreams) in the snapshot.
|
2021-04-29 18:54:05 +00:00
|
|
|
func (s *ResourceGenerator) clustersFromSnapshotConnectProxy(cfgSnap *proxycfg.ConfigSnapshot) ([]proto.Message, error) {
|
2021-03-17 19:40:49 +00:00
|
|
|
// This sizing is a lower bound.
|
|
|
|
clusters := make([]proto.Message, 0, len(cfgSnap.ConnectProxy.DiscoveryChain)+1)
|
2018-10-03 18:18:55 +00:00
|
|
|
|
2019-07-02 03:10:51 +00:00
|
|
|
// Include the "app" cluster for the public listener
|
2019-09-26 02:55:52 +00:00
|
|
|
appCluster, err := s.makeAppCluster(cfgSnap, LocalAppClusterName, "", cfgSnap.Proxy.LocalServicePort)
|
2019-02-19 13:45:33 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-07-02 03:10:51 +00:00
|
|
|
clusters = append(clusters, appCluster)
|
|
|
|
|
2021-06-09 20:34:17 +00:00
|
|
|
if cfgSnap.Proxy.Mode == structs.ProxyModeTransparent {
|
|
|
|
passthroughs, err := makePassthroughClusters(cfgSnap)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("failed to make passthrough clusters for transparent proxy: %v", err)
|
|
|
|
}
|
|
|
|
clusters = append(clusters, passthroughs...)
|
2021-03-17 19:40:49 +00:00
|
|
|
}
|
2019-07-02 03:10:51 +00:00
|
|
|
|
2022-06-01 21:53:52 +00:00
|
|
|
// NOTE: Any time we skip a chain below we MUST also skip that discovery chain in endpoints.go
|
|
|
|
// so that the sets of endpoints generated matches the sets of clusters.
|
2022-01-20 16:12:04 +00:00
|
|
|
for uid, chain := range cfgSnap.ConnectProxy.DiscoveryChain {
|
2022-06-28 19:52:25 +00:00
|
|
|
upstream := cfgSnap.ConnectProxy.UpstreamConfig[uid]
|
2021-12-13 22:30:49 +00:00
|
|
|
|
2022-06-28 19:52:25 +00:00
|
|
|
explicit := upstream.HasLocalPortOrSocket()
|
2022-07-13 20:12:01 +00:00
|
|
|
implicit := cfgSnap.ConnectProxy.IsImplicitUpstream(uid)
|
|
|
|
if !implicit && !explicit {
|
2021-12-13 22:30:49 +00:00
|
|
|
// Discovery chain is not associated with a known explicit or implicit upstream so it is skipped.
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2022-01-20 16:12:04 +00:00
|
|
|
chainEndpoints, ok := cfgSnap.ConnectProxy.WatchedUpstreamEndpoints[uid]
|
2021-03-17 19:40:49 +00:00
|
|
|
if !ok {
|
|
|
|
// this should not happen
|
2022-01-20 16:12:04 +00:00
|
|
|
return nil, fmt.Errorf("no endpoint map for upstream %q", uid)
|
2021-03-17 19:40:49 +00:00
|
|
|
}
|
2019-07-02 03:10:51 +00:00
|
|
|
|
2022-06-28 19:52:25 +00:00
|
|
|
upstreamClusters, err := s.makeUpstreamClustersForDiscoveryChain(
|
|
|
|
uid,
|
|
|
|
upstream,
|
|
|
|
chain,
|
|
|
|
chainEndpoints,
|
|
|
|
cfgSnap,
|
|
|
|
false,
|
|
|
|
)
|
2021-03-17 19:40:49 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-04-16 21:00:48 +00:00
|
|
|
|
2021-03-17 19:40:49 +00:00
|
|
|
for _, cluster := range upstreamClusters {
|
|
|
|
clusters = append(clusters, cluster)
|
|
|
|
}
|
|
|
|
}
|
2019-07-02 03:10:51 +00:00
|
|
|
|
2022-06-03 21:42:50 +00:00
|
|
|
// NOTE: Any time we skip an upstream below we MUST also skip that same
|
|
|
|
// upstream in endpoints.go so that the sets of endpoints generated matches
|
|
|
|
// the sets of clusters.
|
|
|
|
for _, uid := range cfgSnap.ConnectProxy.PeeredUpstreamIDs() {
|
|
|
|
upstreamCfg := cfgSnap.ConnectProxy.UpstreamConfig[uid]
|
|
|
|
|
|
|
|
explicit := upstreamCfg.HasLocalPortOrSocket()
|
2022-07-13 20:12:01 +00:00
|
|
|
implicit := cfgSnap.ConnectProxy.IsImplicitUpstream(uid)
|
|
|
|
if !implicit && !explicit {
|
2022-06-03 21:42:50 +00:00
|
|
|
// Not associated with a known explicit or implicit upstream so it is skipped.
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
peerMeta := cfgSnap.ConnectProxy.UpstreamPeerMeta(uid)
|
|
|
|
|
2022-07-19 18:56:28 +00:00
|
|
|
upstreamCluster, err := s.makeUpstreamClusterForPeerService(uid, upstreamCfg, peerMeta, cfgSnap)
|
2022-06-03 21:42:50 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
clusters = append(clusters, upstreamCluster)
|
|
|
|
}
|
|
|
|
|
2021-03-17 19:40:49 +00:00
|
|
|
for _, u := range cfgSnap.Proxy.Upstreams {
|
|
|
|
if u.DestinationType != structs.UpstreamDestTypePreparedQuery {
|
|
|
|
continue
|
2019-02-19 13:45:33 +00:00
|
|
|
}
|
2021-03-17 19:40:49 +00:00
|
|
|
|
|
|
|
upstreamCluster, err := s.makeUpstreamClusterForPreparedQuery(u, cfgSnap)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
clusters = append(clusters, upstreamCluster)
|
2018-10-03 18:18:55 +00:00
|
|
|
}
|
|
|
|
|
2020-01-28 23:50:41 +00:00
|
|
|
cfgSnap.Proxy.Expose.Finalize()
|
2019-09-26 02:55:52 +00:00
|
|
|
paths := cfgSnap.Proxy.Expose.Paths
|
|
|
|
|
|
|
|
// Add service health checks to the list of paths to create clusters for if needed
|
|
|
|
if cfgSnap.Proxy.Expose.Checks {
|
2020-01-24 15:04:58 +00:00
|
|
|
psid := structs.NewServiceID(cfgSnap.Proxy.DestinationServiceID, &cfgSnap.ProxyID.EnterpriseMeta)
|
2022-06-06 14:15:33 +00:00
|
|
|
for _, check := range cfgSnap.ConnectProxy.WatchedServiceChecks[psid] {
|
2019-09-26 02:55:52 +00:00
|
|
|
p, err := parseCheckPath(check)
|
|
|
|
if err != nil {
|
2020-01-28 23:50:41 +00:00
|
|
|
s.Logger.Warn("failed to create cluster for", "check", check.CheckID, "error", err)
|
2019-09-26 02:55:52 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
paths = append(paths, p)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a new cluster if we need to expose a port that is different from the service port
|
|
|
|
for _, path := range paths {
|
|
|
|
if path.LocalPathPort == cfgSnap.Proxy.LocalServicePort {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
c, err := s.makeAppCluster(cfgSnap, makeExposeClusterName(path.LocalPathPort), path.Protocol, path.LocalPathPort)
|
|
|
|
if err != nil {
|
2020-01-28 23:50:41 +00:00
|
|
|
s.Logger.Warn("failed to make local cluster", "path", path.Path, "error", err)
|
2019-09-26 02:55:52 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
clusters = append(clusters, c)
|
|
|
|
}
|
2018-10-03 18:18:55 +00:00
|
|
|
return clusters, nil
|
|
|
|
}
|
|
|
|
|
2019-09-26 02:55:52 +00:00
|
|
|
func makeExposeClusterName(destinationPort int) string {
|
|
|
|
return fmt.Sprintf("exposed_cluster_%d", destinationPort)
|
|
|
|
}
|
|
|
|
|
2021-06-09 20:34:17 +00:00
|
|
|
// In transparent proxy mode there are potentially multiple passthrough clusters added.
|
|
|
|
// The first is for destinations outside of Consul's catalog. This is for a plain TCP proxy.
|
|
|
|
// All of these use Envoy's ORIGINAL_DST listener filter, which forwards to the original
|
|
|
|
// destination address (before the iptables redirection).
|
|
|
|
// The rest are for destinations inside the mesh, which require certificates for mTLS.
|
|
|
|
func makePassthroughClusters(cfgSnap *proxycfg.ConfigSnapshot) ([]proto.Message, error) {
|
|
|
|
// This size is an upper bound.
|
|
|
|
clusters := make([]proto.Message, 0, len(cfgSnap.ConnectProxy.PassthroughUpstreams)+1)
|
|
|
|
|
2022-03-30 18:43:59 +00:00
|
|
|
if meshConf := cfgSnap.MeshConfig(); meshConf == nil ||
|
|
|
|
!meshConf.TransparentProxy.MeshDestinationsOnly {
|
2021-06-09 20:34:17 +00:00
|
|
|
|
|
|
|
clusters = append(clusters, &envoy_cluster_v3.Cluster{
|
|
|
|
Name: OriginalDestinationClusterName,
|
|
|
|
ClusterDiscoveryType: &envoy_cluster_v3.Cluster_Type{
|
|
|
|
Type: envoy_cluster_v3.Cluster_ORIGINAL_DST,
|
|
|
|
},
|
|
|
|
LbPolicy: envoy_cluster_v3.Cluster_CLUSTER_PROVIDED,
|
2022-05-25 01:44:54 +00:00
|
|
|
ConnectTimeout: durationpb.New(5 * time.Second),
|
2021-06-09 20:34:17 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2022-04-07 21:58:21 +00:00
|
|
|
for uid, chain := range cfgSnap.ConnectProxy.DiscoveryChain {
|
|
|
|
targetMap, ok := cfgSnap.ConnectProxy.PassthroughUpstreams[uid]
|
|
|
|
if !ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
for targetID := range targetMap {
|
|
|
|
uid := proxycfg.NewUpstreamIDFromTargetID(targetID)
|
2021-06-09 20:34:17 +00:00
|
|
|
|
2022-01-28 03:52:26 +00:00
|
|
|
sni := connect.ServiceSNI(
|
|
|
|
uid.Name, "", uid.NamespaceOrDefault(), uid.PartitionOrDefault(), cfgSnap.Datacenter, cfgSnap.Roots.TrustDomain)
|
2021-06-09 20:34:17 +00:00
|
|
|
|
2022-01-28 03:52:26 +00:00
|
|
|
// Prefixed with passthrough to distinguish from non-passthrough clusters for the same upstream.
|
|
|
|
name := "passthrough~" + sni
|
2021-06-09 20:34:17 +00:00
|
|
|
|
2022-01-28 03:52:26 +00:00
|
|
|
c := envoy_cluster_v3.Cluster{
|
|
|
|
Name: name,
|
|
|
|
ClusterDiscoveryType: &envoy_cluster_v3.Cluster_Type{
|
|
|
|
Type: envoy_cluster_v3.Cluster_ORIGINAL_DST,
|
|
|
|
},
|
|
|
|
LbPolicy: envoy_cluster_v3.Cluster_CLUSTER_PROVIDED,
|
|
|
|
|
2022-05-25 01:44:54 +00:00
|
|
|
ConnectTimeout: durationpb.New(5 * time.Second),
|
2022-01-28 03:52:26 +00:00
|
|
|
}
|
|
|
|
|
2022-04-07 21:58:21 +00:00
|
|
|
if discoTarget, ok := chain.Targets[targetID]; ok && discoTarget.ConnectTimeout > 0 {
|
2022-05-25 01:44:54 +00:00
|
|
|
c.ConnectTimeout = durationpb.New(discoTarget.ConnectTimeout)
|
2022-04-07 21:58:21 +00:00
|
|
|
}
|
|
|
|
|
2022-07-14 18:45:51 +00:00
|
|
|
transportSocket, err := makeMTLSTransportSocket(cfgSnap, uid, sni)
|
2022-01-28 03:52:26 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
c.TransportSocket = transportSocket
|
|
|
|
clusters = append(clusters, &c)
|
2021-06-09 20:34:17 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-07-14 18:45:51 +00:00
|
|
|
err := cfgSnap.ConnectProxy.DestinationsUpstream.ForEachKeyE(func(uid proxycfg.UpstreamID) error {
|
|
|
|
name := clusterNameForDestination(cfgSnap, uid.Name, uid.NamespaceOrDefault(), uid.PartitionOrDefault())
|
|
|
|
|
|
|
|
c := envoy_cluster_v3.Cluster{
|
|
|
|
Name: name,
|
|
|
|
AltStatName: name,
|
|
|
|
ConnectTimeout: durationpb.New(5 * time.Second),
|
|
|
|
CommonLbConfig: &envoy_cluster_v3.Cluster_CommonLbConfig{
|
|
|
|
HealthyPanicThreshold: &envoy_type_v3.Percent{
|
|
|
|
Value: 0, // disable panic threshold
|
|
|
|
},
|
|
|
|
},
|
|
|
|
ClusterDiscoveryType: &envoy_cluster_v3.Cluster_Type{Type: envoy_cluster_v3.Cluster_EDS},
|
|
|
|
EdsClusterConfig: &envoy_cluster_v3.Cluster_EdsClusterConfig{
|
|
|
|
EdsConfig: &envoy_core_v3.ConfigSource{
|
|
|
|
ResourceApiVersion: envoy_core_v3.ApiVersion_V3,
|
|
|
|
ConfigSourceSpecifier: &envoy_core_v3.ConfigSource_Ads{
|
|
|
|
Ads: &envoy_core_v3.AggregatedConfigSource{},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
// Endpoints are managed separately by EDS
|
|
|
|
// Having an empty config enables outlier detection with default config.
|
|
|
|
OutlierDetection: &envoy_cluster_v3.OutlierDetection{},
|
|
|
|
}
|
|
|
|
|
|
|
|
// Use the cluster name as the SNI to match on in the terminating gateway
|
|
|
|
transportSocket, err := makeMTLSTransportSocket(cfgSnap, uid, name)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
c.TransportSocket = transportSocket
|
|
|
|
clusters = append(clusters, &c)
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2021-06-09 20:34:17 +00:00
|
|
|
return clusters, nil
|
|
|
|
}
|
|
|
|
|
2022-07-14 18:45:51 +00:00
|
|
|
func makeMTLSTransportSocket(cfgSnap *proxycfg.ConfigSnapshot, uid proxycfg.UpstreamID, sni string) (*envoy_core_v3.TransportSocket, error) {
|
|
|
|
spiffeID := connect.SpiffeIDService{
|
|
|
|
Host: cfgSnap.Roots.TrustDomain,
|
|
|
|
Partition: uid.PartitionOrDefault(),
|
|
|
|
Namespace: uid.NamespaceOrDefault(),
|
|
|
|
Datacenter: cfgSnap.Datacenter,
|
|
|
|
Service: uid.Name,
|
|
|
|
}
|
|
|
|
|
|
|
|
commonTLSContext := makeCommonTLSContext(
|
|
|
|
cfgSnap.Leaf(),
|
|
|
|
cfgSnap.RootPEMs(),
|
|
|
|
makeTLSParametersFromProxyTLSConfig(cfgSnap.MeshConfigTLSOutgoing()),
|
|
|
|
)
|
|
|
|
err := injectSANMatcher(commonTLSContext, spiffeID.URI().String())
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("failed to inject SAN matcher rules for cluster %q: %v", sni, err)
|
|
|
|
}
|
|
|
|
tlsContext := envoy_tls_v3.UpstreamTlsContext{
|
|
|
|
CommonTlsContext: commonTLSContext,
|
|
|
|
Sni: sni,
|
|
|
|
}
|
|
|
|
transportSocket, err := makeUpstreamTLSTransportSocket(&tlsContext)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return transportSocket, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func clusterNameForDestination(cfgSnap *proxycfg.ConfigSnapshot, name string, namespace string, partition string) string {
|
|
|
|
sni := connect.ServiceSNI(name, "", namespace, partition, cfgSnap.Datacenter, cfgSnap.Roots.TrustDomain)
|
|
|
|
|
|
|
|
// Prefixed with destination to distinguish from non-passthrough clusters for the same upstream.
|
|
|
|
return "destination~" + sni
|
|
|
|
}
|
|
|
|
|
2019-06-18 00:52:01 +00:00
|
|
|
// clustersFromSnapshotMeshGateway returns the xDS API representation of the "clusters"
|
|
|
|
// for a mesh gateway. This will include 1 cluster per remote datacenter as well as
|
|
|
|
// 1 cluster for each service subset.
|
2021-04-29 18:54:05 +00:00
|
|
|
func (s *ResourceGenerator) clustersFromSnapshotMeshGateway(cfgSnap *proxycfg.ConfigSnapshot) ([]proto.Message, error) {
|
2021-10-26 21:58:23 +00:00
|
|
|
keys := cfgSnap.MeshGateway.GatewayKeys()
|
2020-03-09 20:59:02 +00:00
|
|
|
|
2021-10-27 18:36:44 +00:00
|
|
|
// 1 cluster per remote dc/partition + 1 cluster per local service (this is a lower bound - all subset specific clusters will be appended)
|
2021-10-23 20:17:29 +00:00
|
|
|
clusters := make([]proto.Message, 0, len(keys)+len(cfgSnap.MeshGateway.ServiceGroups))
|
2019-06-18 00:52:01 +00:00
|
|
|
|
2021-10-27 18:36:44 +00:00
|
|
|
// Generate the remote clusters
|
2021-10-23 20:17:29 +00:00
|
|
|
for _, key := range keys {
|
2021-10-29 00:41:58 +00:00
|
|
|
if key.Matches(cfgSnap.Datacenter, cfgSnap.ProxyID.PartitionOrDefault()) {
|
2020-03-09 20:59:02 +00:00
|
|
|
continue // skip local
|
|
|
|
}
|
2019-06-18 00:52:01 +00:00
|
|
|
|
2022-05-24 16:51:52 +00:00
|
|
|
opts := clusterOpts{
|
2021-10-24 15:16:28 +00:00
|
|
|
name: connect.GatewaySNI(key.Datacenter, key.Partition, cfgSnap.Roots.TrustDomain),
|
2021-10-23 20:17:29 +00:00
|
|
|
hostnameEndpoints: cfgSnap.MeshGateway.HostnameDatacenters[key.String()],
|
2021-10-29 00:41:58 +00:00
|
|
|
isRemote: true,
|
2019-06-18 00:52:01 +00:00
|
|
|
}
|
2020-06-03 21:28:45 +00:00
|
|
|
cluster := s.makeGatewayCluster(cfgSnap, opts)
|
2019-07-02 13:43:35 +00:00
|
|
|
clusters = append(clusters, cluster)
|
2019-06-18 00:52:01 +00:00
|
|
|
}
|
|
|
|
|
2021-10-26 22:10:30 +00:00
|
|
|
if cfgSnap.ProxyID.InDefaultPartition() &&
|
2021-10-26 21:58:23 +00:00
|
|
|
cfgSnap.ServiceMeta[structs.MetaWANFederationKey] == "1" &&
|
|
|
|
cfgSnap.ServerSNIFn != nil {
|
|
|
|
|
2020-03-09 20:59:02 +00:00
|
|
|
// Add all of the remote wildcard datacenter mappings for servers.
|
2021-10-23 20:17:29 +00:00
|
|
|
for _, key := range keys {
|
|
|
|
hostnameEndpoints := cfgSnap.MeshGateway.HostnameDatacenters[key.String()]
|
2020-03-09 20:59:02 +00:00
|
|
|
|
2020-06-03 21:28:45 +00:00
|
|
|
// If the DC is our current DC then this cluster is for traffic from a remote DC to a local server.
|
|
|
|
// HostnameDatacenters is populated with gateway addresses, so it does not apply here.
|
2021-10-23 20:17:29 +00:00
|
|
|
if key.Datacenter == cfgSnap.Datacenter {
|
2020-06-03 21:28:45 +00:00
|
|
|
hostnameEndpoints = nil
|
|
|
|
}
|
2022-05-24 16:51:52 +00:00
|
|
|
opts := clusterOpts{
|
2021-10-23 20:17:29 +00:00
|
|
|
name: cfgSnap.ServerSNIFn(key.Datacenter, ""),
|
2020-06-03 21:28:45 +00:00
|
|
|
hostnameEndpoints: hostnameEndpoints,
|
2021-10-29 00:41:58 +00:00
|
|
|
isRemote: !key.Matches(cfgSnap.Datacenter, cfgSnap.ProxyID.PartitionOrDefault()),
|
2020-03-09 20:59:02 +00:00
|
|
|
}
|
2020-06-03 21:28:45 +00:00
|
|
|
cluster := s.makeGatewayCluster(cfgSnap, opts)
|
2020-03-09 20:59:02 +00:00
|
|
|
clusters = append(clusters, cluster)
|
|
|
|
}
|
|
|
|
|
|
|
|
// And for the current datacenter, send all flavors appropriately.
|
|
|
|
for _, srv := range cfgSnap.MeshGateway.ConsulServers {
|
2022-05-24 16:51:52 +00:00
|
|
|
opts := clusterOpts{
|
2020-06-03 21:28:45 +00:00
|
|
|
name: cfgSnap.ServerSNIFn(cfgSnap.Datacenter, srv.Node.Node),
|
2020-03-09 20:59:02 +00:00
|
|
|
}
|
2020-06-03 21:28:45 +00:00
|
|
|
cluster := s.makeGatewayCluster(cfgSnap, opts)
|
2020-03-09 20:59:02 +00:00
|
|
|
clusters = append(clusters, cluster)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-14 14:59:23 +00:00
|
|
|
// generate the per-service/subset clusters
|
2020-09-11 16:49:26 +00:00
|
|
|
c, err := s.makeGatewayServiceClusters(cfgSnap, cfgSnap.MeshGateway.ServiceGroups, cfgSnap.MeshGateway.ServiceResolvers)
|
2020-04-14 14:59:23 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
clusters = append(clusters, c...)
|
|
|
|
|
2022-06-28 19:52:25 +00:00
|
|
|
// Generate per-target clusters for all exported discovery chains.
|
|
|
|
c, err = s.makeExportedUpstreamClustersForMeshGateway(cfgSnap)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
clusters = append(clusters, c...)
|
|
|
|
|
2020-04-14 14:59:23 +00:00
|
|
|
return clusters, nil
|
|
|
|
}
|
|
|
|
|
2022-05-24 16:51:52 +00:00
|
|
|
// clustersFromSnapshotTerminatingGateway returns the xDS API representation of the "clusters"
|
|
|
|
// for a terminating gateway. This will include 1 cluster per Destination associated with this terminating gateway.
|
|
|
|
func (s *ResourceGenerator) clustersFromSnapshotTerminatingGateway(cfgSnap *proxycfg.ConfigSnapshot) ([]proto.Message, error) {
|
|
|
|
res := []proto.Message{}
|
|
|
|
gwClusters, err := s.makeGatewayServiceClusters(cfgSnap, cfgSnap.TerminatingGateway.ServiceGroups, cfgSnap.TerminatingGateway.ServiceResolvers)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
res = append(res, gwClusters...)
|
|
|
|
|
|
|
|
destClusters, err := s.makeDestinationClusters(cfgSnap)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
res = append(res, destClusters...)
|
|
|
|
|
|
|
|
return res, nil
|
|
|
|
}
|
|
|
|
|
2021-04-29 18:54:05 +00:00
|
|
|
func (s *ResourceGenerator) makeGatewayServiceClusters(
|
2020-09-11 16:49:26 +00:00
|
|
|
cfgSnap *proxycfg.ConfigSnapshot,
|
|
|
|
services map[structs.ServiceName]structs.CheckServiceNodes,
|
|
|
|
resolvers map[structs.ServiceName]*structs.ServiceResolverConfigEntry,
|
|
|
|
) ([]proto.Message, error) {
|
2020-06-03 21:28:45 +00:00
|
|
|
var hostnameEndpoints structs.CheckServiceNodes
|
2020-04-27 22:25:37 +00:00
|
|
|
|
|
|
|
switch cfgSnap.Kind {
|
2020-09-11 16:49:26 +00:00
|
|
|
case structs.ServiceKindTerminatingGateway, structs.ServiceKindMeshGateway:
|
2020-04-27 22:25:37 +00:00
|
|
|
default:
|
|
|
|
return nil, fmt.Errorf("unsupported gateway kind %q", cfgSnap.Kind)
|
|
|
|
}
|
2020-04-14 14:59:23 +00:00
|
|
|
|
|
|
|
clusters := make([]proto.Message, 0, len(services))
|
|
|
|
|
2020-06-16 17:19:31 +00:00
|
|
|
for svc := range services {
|
2021-09-01 14:35:39 +00:00
|
|
|
clusterName := connect.ServiceSNI(svc.Name, "", svc.NamespaceOrDefault(), svc.PartitionOrDefault(), cfgSnap.Datacenter, cfgSnap.Roots.TrustDomain)
|
2020-04-14 14:59:23 +00:00
|
|
|
resolver, hasResolver := resolvers[svc]
|
2019-06-18 00:52:01 +00:00
|
|
|
|
2020-09-11 15:21:43 +00:00
|
|
|
var loadBalancer *structs.LoadBalancer
|
2020-08-28 20:27:40 +00:00
|
|
|
|
2020-09-03 14:57:48 +00:00
|
|
|
if !hasResolver {
|
2020-04-27 22:25:37 +00:00
|
|
|
// Use a zero value resolver with no timeout and no subsets
|
|
|
|
resolver = &structs.ServiceResolverConfigEntry{}
|
2020-03-17 19:50:14 +00:00
|
|
|
}
|
2020-09-03 14:57:48 +00:00
|
|
|
if resolver.LoadBalancer != nil {
|
2020-09-11 15:21:43 +00:00
|
|
|
loadBalancer = resolver.LoadBalancer
|
2020-09-03 14:57:48 +00:00
|
|
|
}
|
2020-06-03 21:28:45 +00:00
|
|
|
|
|
|
|
// When making service clusters we only pass endpoints with hostnames if the kind is a terminating gateway
|
|
|
|
// This is because the services a mesh gateway will route to are not external services and are not addressed by a hostname.
|
|
|
|
if cfgSnap.Kind == structs.ServiceKindTerminatingGateway {
|
|
|
|
hostnameEndpoints = cfgSnap.TerminatingGateway.HostnameServices[svc]
|
2019-06-18 00:52:01 +00:00
|
|
|
}
|
2020-04-27 22:25:37 +00:00
|
|
|
|
2021-10-29 00:41:58 +00:00
|
|
|
var isRemote bool
|
|
|
|
if len(services[svc]) > 0 {
|
2021-10-29 00:47:42 +00:00
|
|
|
isRemote = !cfgSnap.Locality.Matches(services[svc][0].Node.Datacenter, services[svc][0].Node.PartitionOrDefault())
|
2021-10-29 00:41:58 +00:00
|
|
|
}
|
|
|
|
|
2022-05-24 16:51:52 +00:00
|
|
|
opts := clusterOpts{
|
2020-06-03 21:28:45 +00:00
|
|
|
name: clusterName,
|
|
|
|
hostnameEndpoints: hostnameEndpoints,
|
|
|
|
connectTimeout: resolver.ConnectTimeout,
|
2021-10-29 00:41:58 +00:00
|
|
|
isRemote: isRemote,
|
2020-06-03 21:28:45 +00:00
|
|
|
}
|
|
|
|
cluster := s.makeGatewayCluster(cfgSnap, opts)
|
|
|
|
|
2020-09-11 16:49:26 +00:00
|
|
|
if err := s.injectGatewayServiceAddons(cfgSnap, cluster, svc, loadBalancer); err != nil {
|
|
|
|
return nil, err
|
2020-04-27 22:25:37 +00:00
|
|
|
}
|
2019-07-02 13:43:35 +00:00
|
|
|
clusters = append(clusters, cluster)
|
|
|
|
|
2022-07-12 23:09:35 +00:00
|
|
|
svcConfig, ok := cfgSnap.TerminatingGateway.ServiceConfigs[svc]
|
|
|
|
isHTTP2 := false
|
|
|
|
if ok {
|
|
|
|
upstreamCfg, err := structs.ParseUpstreamConfig(svcConfig.ProxyConfig)
|
|
|
|
if err != nil {
|
|
|
|
// Don't hard fail on a config typo, just warn. The parse func returns
|
|
|
|
// default config if there is an error so it's safe to continue.
|
|
|
|
s.Logger.Warn("failed to parse", "upstream", svc, "error", err)
|
|
|
|
}
|
|
|
|
isHTTP2 = upstreamCfg.Protocol == "http2" || upstreamCfg.Protocol == "grpc"
|
|
|
|
}
|
|
|
|
|
2022-07-08 20:21:05 +00:00
|
|
|
if isHTTP2 {
|
|
|
|
if err := s.setHttp2ProtocolOptions(cluster); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-27 22:25:37 +00:00
|
|
|
// If there is a service-resolver for this service then also setup a cluster for each subset
|
2020-06-03 21:28:45 +00:00
|
|
|
for name, subset := range resolver.Subsets {
|
|
|
|
subsetHostnameEndpoints, err := s.filterSubsetEndpoints(&subset, hostnameEndpoints)
|
2020-04-27 22:25:37 +00:00
|
|
|
if err != nil {
|
2020-06-03 21:28:45 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2022-05-24 16:51:52 +00:00
|
|
|
opts := clusterOpts{
|
2021-09-01 14:35:39 +00:00
|
|
|
name: connect.ServiceSNI(svc.Name, name, svc.NamespaceOrDefault(), svc.PartitionOrDefault(), cfgSnap.Datacenter, cfgSnap.Roots.TrustDomain),
|
2020-06-03 21:28:45 +00:00
|
|
|
hostnameEndpoints: subsetHostnameEndpoints,
|
|
|
|
onlyPassing: subset.OnlyPassing,
|
|
|
|
connectTimeout: resolver.ConnectTimeout,
|
2021-10-29 00:41:58 +00:00
|
|
|
isRemote: isRemote,
|
2020-04-27 22:25:37 +00:00
|
|
|
}
|
2020-06-03 21:28:45 +00:00
|
|
|
cluster := s.makeGatewayCluster(cfgSnap, opts)
|
2020-04-27 22:25:37 +00:00
|
|
|
|
2020-09-11 16:49:26 +00:00
|
|
|
if err := s.injectGatewayServiceAddons(cfgSnap, cluster, svc, loadBalancer); err != nil {
|
|
|
|
return nil, err
|
2019-07-02 13:43:35 +00:00
|
|
|
}
|
2022-07-08 20:21:05 +00:00
|
|
|
if isHTTP2 {
|
|
|
|
if err := s.setHttp2ProtocolOptions(cluster); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
2020-04-27 22:25:37 +00:00
|
|
|
clusters = append(clusters, cluster)
|
2019-07-02 13:43:35 +00:00
|
|
|
}
|
2019-06-18 00:52:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return clusters, nil
|
|
|
|
}
|
|
|
|
|
2022-05-24 16:51:52 +00:00
|
|
|
func (s *ResourceGenerator) makeDestinationClusters(cfgSnap *proxycfg.ConfigSnapshot) ([]proto.Message, error) {
|
|
|
|
serviceConfigs := cfgSnap.TerminatingGateway.ServiceConfigs
|
|
|
|
|
|
|
|
clusters := make([]proto.Message, 0, len(cfgSnap.TerminatingGateway.DestinationServices))
|
|
|
|
|
|
|
|
for _, svcName := range cfgSnap.TerminatingGateway.ValidDestinations() {
|
|
|
|
svcConfig, _ := serviceConfigs[svcName]
|
|
|
|
dest := svcConfig.Destination
|
|
|
|
|
|
|
|
opts := clusterOpts{
|
2022-07-14 18:45:51 +00:00
|
|
|
name: clusterNameForDestination(cfgSnap, svcName.Name, svcName.NamespaceOrDefault(), svcName.PartitionOrDefault()),
|
|
|
|
addressEndpoint: dest,
|
2022-05-24 16:51:52 +00:00
|
|
|
}
|
|
|
|
|
2022-07-14 18:45:51 +00:00
|
|
|
var cluster *envoy_cluster_v3.Cluster
|
|
|
|
if dest.HasIP() {
|
|
|
|
cluster = s.makeTerminatingIPCluster(cfgSnap, opts)
|
|
|
|
} else {
|
|
|
|
cluster = s.makeTerminatingHostnameCluster(cfgSnap, opts)
|
|
|
|
}
|
2022-05-24 16:51:52 +00:00
|
|
|
clusters = append(clusters, cluster)
|
|
|
|
}
|
|
|
|
return clusters, nil
|
|
|
|
}
|
|
|
|
|
2021-04-29 18:54:05 +00:00
|
|
|
func (s *ResourceGenerator) injectGatewayServiceAddons(cfgSnap *proxycfg.ConfigSnapshot, c *envoy_cluster_v3.Cluster, svc structs.ServiceName, lb *structs.LoadBalancer) error {
|
2020-09-11 16:49:26 +00:00
|
|
|
switch cfgSnap.Kind {
|
|
|
|
case structs.ServiceKindMeshGateway:
|
|
|
|
// We can't apply hash based LB config to mesh gateways because they rely on inspecting HTTP attributes
|
|
|
|
// and mesh gateways do not decrypt traffic
|
|
|
|
if !lb.IsHashBased() {
|
|
|
|
if err := injectLBToCluster(lb, c); err != nil {
|
|
|
|
return fmt.Errorf("failed to apply load balancer configuration to cluster %q: %v", c.Name, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
case structs.ServiceKindTerminatingGateway:
|
|
|
|
// Context used for TLS origination to the cluster
|
|
|
|
if mapping, ok := cfgSnap.TerminatingGateway.GatewayServices[svc]; ok && mapping.CAFile != "" {
|
2021-02-26 22:23:15 +00:00
|
|
|
tlsContext := &envoy_tls_v3.UpstreamTlsContext{
|
2020-09-11 16:49:26 +00:00
|
|
|
CommonTlsContext: makeCommonTLSContextFromFiles(mapping.CAFile, mapping.CertFile, mapping.KeyFile),
|
|
|
|
}
|
|
|
|
if mapping.SNI != "" {
|
2021-02-22 21:00:15 +00:00
|
|
|
tlsContext.Sni = mapping.SNI
|
2022-05-27 01:24:55 +00:00
|
|
|
if err := injectSANMatcher(tlsContext.CommonTlsContext, mapping.SNI); err != nil {
|
2022-03-31 20:46:14 +00:00
|
|
|
return fmt.Errorf("failed to inject SNI matcher into TLS context: %v", err)
|
|
|
|
}
|
2021-02-22 21:00:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
transportSocket, err := makeUpstreamTLSTransportSocket(tlsContext)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2020-09-11 16:49:26 +00:00
|
|
|
}
|
2021-02-22 21:00:15 +00:00
|
|
|
c.TransportSocket = transportSocket
|
2020-09-11 16:49:26 +00:00
|
|
|
}
|
|
|
|
if err := injectLBToCluster(lb, c); err != nil {
|
|
|
|
return fmt.Errorf("failed to apply load balancer configuration to cluster %q: %v", c.Name, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-04-29 18:54:05 +00:00
|
|
|
func (s *ResourceGenerator) clustersFromSnapshotIngressGateway(cfgSnap *proxycfg.ConfigSnapshot) ([]proto.Message, error) {
|
2020-04-16 21:00:48 +00:00
|
|
|
var clusters []proto.Message
|
2022-01-20 16:12:04 +00:00
|
|
|
createdClusters := make(map[proxycfg.UpstreamID]bool)
|
2020-04-16 23:24:11 +00:00
|
|
|
for _, upstreams := range cfgSnap.IngressGateway.Upstreams {
|
|
|
|
for _, u := range upstreams {
|
2022-01-20 16:12:04 +00:00
|
|
|
uid := proxycfg.NewUpstreamID(&u)
|
2020-04-21 21:06:23 +00:00
|
|
|
|
|
|
|
// If we've already created a cluster for this upstream, skip it. Multiple listeners may
|
|
|
|
// reference the same upstream, so we don't need to create duplicate clusters in that case.
|
2022-01-20 16:12:04 +00:00
|
|
|
if createdClusters[uid] {
|
2020-04-21 21:06:23 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2022-01-20 16:12:04 +00:00
|
|
|
chain, ok := cfgSnap.IngressGateway.DiscoveryChain[uid]
|
2020-04-16 23:24:11 +00:00
|
|
|
if !ok {
|
|
|
|
// this should not happen
|
2022-01-20 16:12:04 +00:00
|
|
|
return nil, fmt.Errorf("no discovery chain for upstream %q", uid)
|
2020-04-16 23:24:11 +00:00
|
|
|
}
|
2020-04-16 21:00:48 +00:00
|
|
|
|
2022-01-20 16:12:04 +00:00
|
|
|
chainEndpoints, ok := cfgSnap.IngressGateway.WatchedUpstreamEndpoints[uid]
|
2020-04-16 23:24:11 +00:00
|
|
|
if !ok {
|
|
|
|
// this should not happen
|
2022-01-20 16:12:04 +00:00
|
|
|
return nil, fmt.Errorf("no endpoint map for upstream %q", uid)
|
2020-04-16 23:24:11 +00:00
|
|
|
}
|
2020-04-16 21:00:48 +00:00
|
|
|
|
2022-06-28 19:52:25 +00:00
|
|
|
upstreamClusters, err := s.makeUpstreamClustersForDiscoveryChain(
|
|
|
|
uid,
|
|
|
|
&u,
|
|
|
|
chain,
|
|
|
|
chainEndpoints,
|
|
|
|
cfgSnap,
|
|
|
|
false,
|
|
|
|
)
|
2020-04-16 23:24:11 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-04-16 21:00:48 +00:00
|
|
|
|
2020-04-16 23:24:11 +00:00
|
|
|
for _, c := range upstreamClusters {
|
|
|
|
clusters = append(clusters, c)
|
|
|
|
}
|
2022-01-20 16:12:04 +00:00
|
|
|
createdClusters[uid] = true
|
2020-04-16 21:00:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return clusters, nil
|
|
|
|
}
|
|
|
|
|
2021-04-29 18:54:05 +00:00
|
|
|
func (s *ResourceGenerator) makeAppCluster(cfgSnap *proxycfg.ConfigSnapshot, name, pathProtocol string, port int) (*envoy_cluster_v3.Cluster, error) {
|
2021-02-26 22:23:15 +00:00
|
|
|
var c *envoy_cluster_v3.Cluster
|
2019-02-19 13:45:33 +00:00
|
|
|
var err error
|
|
|
|
|
2019-04-29 16:27:57 +00:00
|
|
|
cfg, err := ParseProxyConfig(cfgSnap.Proxy.Config)
|
|
|
|
if err != nil {
|
|
|
|
// Don't hard fail on a config typo, just warn. The parse func returns
|
|
|
|
// default config if there is an error so it's safe to continue.
|
2020-01-28 23:50:41 +00:00
|
|
|
s.Logger.Warn("failed to parse Connect.Proxy.Config", "error", err)
|
2019-04-29 16:27:57 +00:00
|
|
|
}
|
|
|
|
|
2019-03-06 17:13:28 +00:00
|
|
|
// If we have overridden local cluster config try to parse it into an Envoy cluster
|
2019-04-29 16:27:57 +00:00
|
|
|
if cfg.LocalClusterJSON != "" {
|
|
|
|
return makeClusterFromUserConfig(cfg.LocalClusterJSON)
|
2018-10-03 18:18:55 +00:00
|
|
|
}
|
2019-02-19 13:45:33 +00:00
|
|
|
|
2021-05-04 04:43:55 +00:00
|
|
|
var endpoint *envoy_endpoint_v3.LbEndpoint
|
|
|
|
if cfgSnap.Proxy.LocalServiceSocketPath != "" {
|
|
|
|
endpoint = makePipeEndpoint(cfgSnap.Proxy.LocalServiceSocketPath)
|
|
|
|
} else {
|
|
|
|
addr := cfgSnap.Proxy.LocalServiceAddress
|
|
|
|
if addr == "" {
|
|
|
|
addr = "127.0.0.1"
|
|
|
|
}
|
|
|
|
endpoint = makeEndpoint(addr, port)
|
2019-07-19 11:53:42 +00:00
|
|
|
}
|
2021-05-04 04:43:55 +00:00
|
|
|
|
2021-02-26 22:23:15 +00:00
|
|
|
c = &envoy_cluster_v3.Cluster{
|
2019-09-26 02:55:52 +00:00
|
|
|
Name: name,
|
2022-05-25 01:44:54 +00:00
|
|
|
ConnectTimeout: durationpb.New(time.Duration(cfg.LocalConnectTimeoutMs) * time.Millisecond),
|
2021-02-26 22:23:15 +00:00
|
|
|
ClusterDiscoveryType: &envoy_cluster_v3.Cluster_Type{Type: envoy_cluster_v3.Cluster_STATIC},
|
|
|
|
LoadAssignment: &envoy_endpoint_v3.ClusterLoadAssignment{
|
2019-09-26 02:55:52 +00:00
|
|
|
ClusterName: name,
|
2021-02-26 22:23:15 +00:00
|
|
|
Endpoints: []*envoy_endpoint_v3.LocalityLbEndpoints{
|
2019-07-19 11:53:42 +00:00
|
|
|
{
|
2021-02-26 22:23:15 +00:00
|
|
|
LbEndpoints: []*envoy_endpoint_v3.LbEndpoint{
|
2021-05-04 04:43:55 +00:00
|
|
|
endpoint,
|
2019-04-29 16:27:57 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2019-07-19 11:53:42 +00:00
|
|
|
},
|
|
|
|
}
|
2020-04-02 07:35:04 +00:00
|
|
|
protocol := pathProtocol
|
|
|
|
if protocol == "" {
|
|
|
|
protocol = cfg.Protocol
|
|
|
|
}
|
|
|
|
if protocol == "http2" || protocol == "grpc" {
|
2022-03-30 18:27:49 +00:00
|
|
|
if err := s.setHttp2ProtocolOptions(c); err != nil {
|
|
|
|
return c, err
|
|
|
|
}
|
2018-10-03 18:18:55 +00:00
|
|
|
}
|
2022-05-19 17:06:13 +00:00
|
|
|
if cfg.MaxInboundConnections > 0 {
|
|
|
|
c.CircuitBreakers = &envoy_cluster_v3.CircuitBreakers{
|
|
|
|
Thresholds: []*envoy_cluster_v3.CircuitBreakers_Thresholds{
|
|
|
|
{
|
|
|
|
MaxConnections: makeUint32Value(cfg.MaxInboundConnections),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
2019-02-19 13:45:33 +00:00
|
|
|
|
|
|
|
return c, err
|
2018-10-03 18:18:55 +00:00
|
|
|
}
|
|
|
|
|
2022-06-03 21:42:50 +00:00
|
|
|
func (s *ResourceGenerator) makeUpstreamClusterForPeerService(
|
2022-07-19 18:56:28 +00:00
|
|
|
uid proxycfg.UpstreamID,
|
2022-06-03 21:42:50 +00:00
|
|
|
upstream *structs.Upstream,
|
|
|
|
peerMeta structs.PeeringServiceMeta,
|
|
|
|
cfgSnap *proxycfg.ConfigSnapshot,
|
|
|
|
) (*envoy_cluster_v3.Cluster, error) {
|
|
|
|
var (
|
|
|
|
c *envoy_cluster_v3.Cluster
|
|
|
|
err error
|
|
|
|
)
|
|
|
|
|
|
|
|
cfg := s.getAndModifyUpstreamConfigForPeeredListener(uid, upstream, peerMeta)
|
|
|
|
if cfg.EnvoyClusterJSON != "" {
|
|
|
|
c, err = makeClusterFromUserConfig(cfg.EnvoyClusterJSON)
|
|
|
|
if err != nil {
|
|
|
|
return c, err
|
|
|
|
}
|
|
|
|
// In the happy path don't return yet as we need to inject TLS config still.
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO(peering): if we replicated service metadata separately from the
|
|
|
|
// instances we wouldn't have to flip/flop this cluster name like this.
|
|
|
|
clusterName := peerMeta.PrimarySNI()
|
|
|
|
if clusterName == "" {
|
|
|
|
clusterName = uid.EnvoyID()
|
|
|
|
}
|
|
|
|
|
|
|
|
s.Logger.Trace("generating cluster for", "cluster", clusterName)
|
|
|
|
if c == nil {
|
|
|
|
c = &envoy_cluster_v3.Cluster{
|
2022-06-10 21:11:40 +00:00
|
|
|
Name: clusterName,
|
|
|
|
AltStatName: clusterName,
|
|
|
|
ConnectTimeout: durationpb.New(time.Duration(cfg.ConnectTimeoutMs) * time.Millisecond),
|
2022-06-03 21:42:50 +00:00
|
|
|
CommonLbConfig: &envoy_cluster_v3.Cluster_CommonLbConfig{
|
|
|
|
HealthyPanicThreshold: &envoy_type_v3.Percent{
|
|
|
|
Value: 0, // disable panic threshold
|
|
|
|
},
|
|
|
|
},
|
|
|
|
CircuitBreakers: &envoy_cluster_v3.CircuitBreakers{
|
|
|
|
Thresholds: makeThresholdsIfNeeded(cfg.Limits),
|
|
|
|
},
|
|
|
|
OutlierDetection: ToOutlierDetection(cfg.PassiveHealthCheck),
|
|
|
|
}
|
|
|
|
if cfg.Protocol == "http2" || cfg.Protocol == "grpc" {
|
|
|
|
if err := s.setHttp2ProtocolOptions(c); err != nil {
|
|
|
|
return c, err
|
|
|
|
}
|
|
|
|
}
|
2022-06-10 21:11:40 +00:00
|
|
|
|
|
|
|
useEDS := true
|
|
|
|
if _, ok := cfgSnap.ConnectProxy.PeerUpstreamEndpointsUseHostnames[uid]; ok {
|
|
|
|
useEDS = false
|
|
|
|
}
|
|
|
|
|
|
|
|
// If none of the service instances are addressed by a hostname we
|
|
|
|
// provide the endpoint IP addresses via EDS
|
|
|
|
if useEDS {
|
|
|
|
c.ClusterDiscoveryType = &envoy_cluster_v3.Cluster_Type{Type: envoy_cluster_v3.Cluster_EDS}
|
|
|
|
c.EdsClusterConfig = &envoy_cluster_v3.Cluster_EdsClusterConfig{
|
|
|
|
EdsConfig: &envoy_core_v3.ConfigSource{
|
|
|
|
ResourceApiVersion: envoy_core_v3.ApiVersion_V3,
|
|
|
|
ConfigSourceSpecifier: &envoy_core_v3.ConfigSource_Ads{
|
|
|
|
Ads: &envoy_core_v3.AggregatedConfigSource{},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
} else {
|
2022-07-13 16:14:57 +00:00
|
|
|
ep, _ := cfgSnap.ConnectProxy.PeerUpstreamEndpoints.Get(uid)
|
2022-06-10 21:11:40 +00:00
|
|
|
configureClusterWithHostnames(
|
|
|
|
s.Logger,
|
|
|
|
c,
|
|
|
|
"", /*TODO:make configurable?*/
|
2022-07-13 16:14:57 +00:00
|
|
|
ep,
|
2022-06-10 21:11:40 +00:00
|
|
|
true, /*isRemote*/
|
|
|
|
false, /*onlyPassing*/
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2022-06-03 21:42:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
rootPEMs := cfgSnap.RootPEMs()
|
|
|
|
if uid.Peer != "" {
|
2022-07-13 16:14:57 +00:00
|
|
|
tbs, _ := cfgSnap.ConnectProxy.UpstreamPeerTrustBundles.Get(uid.Peer)
|
|
|
|
rootPEMs = tbs.ConcatenatedRootPEMs()
|
2022-06-03 21:42:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Enable TLS upstream with the configured client certificate.
|
|
|
|
commonTLSContext := makeCommonTLSContext(
|
|
|
|
cfgSnap.Leaf(),
|
|
|
|
rootPEMs,
|
|
|
|
makeTLSParametersFromProxyTLSConfig(cfgSnap.MeshConfigTLSOutgoing()),
|
|
|
|
)
|
|
|
|
err = injectSANMatcher(commonTLSContext, peerMeta.SpiffeID...)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("failed to inject SAN matcher rules for cluster %q: %v", clusterName, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
tlsContext := &envoy_tls_v3.UpstreamTlsContext{
|
|
|
|
CommonTlsContext: commonTLSContext,
|
|
|
|
Sni: peerMeta.PrimarySNI(),
|
|
|
|
}
|
|
|
|
|
|
|
|
transportSocket, err := makeUpstreamTLSTransportSocket(tlsContext)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
c.TransportSocket = transportSocket
|
|
|
|
|
|
|
|
return c, nil
|
|
|
|
}
|
|
|
|
|
2021-04-29 18:54:05 +00:00
|
|
|
func (s *ResourceGenerator) makeUpstreamClusterForPreparedQuery(upstream structs.Upstream, cfgSnap *proxycfg.ConfigSnapshot) (*envoy_cluster_v3.Cluster, error) {
|
2021-02-26 22:23:15 +00:00
|
|
|
var c *envoy_cluster_v3.Cluster
|
2019-02-19 13:45:33 +00:00
|
|
|
var err error
|
|
|
|
|
2022-01-20 16:12:04 +00:00
|
|
|
uid := proxycfg.NewUpstreamID(&upstream)
|
|
|
|
|
2019-08-19 18:03:03 +00:00
|
|
|
dc := upstream.Datacenter
|
|
|
|
if dc == "" {
|
|
|
|
dc = cfgSnap.Datacenter
|
2019-06-18 00:52:01 +00:00
|
|
|
}
|
2019-08-19 18:03:03 +00:00
|
|
|
sni := connect.UpstreamSNI(&upstream, "", dc, cfgSnap.Roots.TrustDomain)
|
2019-06-18 00:52:01 +00:00
|
|
|
|
2021-03-11 04:04:13 +00:00
|
|
|
cfg, err := structs.ParseUpstreamConfig(upstream.Config)
|
2019-04-29 16:27:57 +00:00
|
|
|
if err != nil {
|
|
|
|
// Don't hard fail on a config typo, just warn. The parse func returns
|
|
|
|
// default config if there is an error so it's safe to continue.
|
2022-01-20 16:12:04 +00:00
|
|
|
s.Logger.Warn("failed to parse", "upstream", uid, "error", err)
|
2019-04-29 16:27:57 +00:00
|
|
|
}
|
2021-03-15 20:12:57 +00:00
|
|
|
if cfg.EnvoyClusterJSON != "" {
|
|
|
|
c, err = makeClusterFromUserConfig(cfg.EnvoyClusterJSON)
|
2019-04-29 16:27:57 +00:00
|
|
|
if err != nil {
|
|
|
|
return c, err
|
2019-02-19 13:45:33 +00:00
|
|
|
}
|
2019-04-29 16:27:57 +00:00
|
|
|
// In the happy path don't return yet as we need to inject TLS config still.
|
2019-02-19 13:45:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if c == nil {
|
2021-02-26 22:23:15 +00:00
|
|
|
c = &envoy_cluster_v3.Cluster{
|
2019-07-08 11:48:48 +00:00
|
|
|
Name: sni,
|
2022-05-25 01:44:54 +00:00
|
|
|
ConnectTimeout: durationpb.New(time.Duration(cfg.ConnectTimeoutMs) * time.Millisecond),
|
2021-02-26 22:23:15 +00:00
|
|
|
ClusterDiscoveryType: &envoy_cluster_v3.Cluster_Type{Type: envoy_cluster_v3.Cluster_EDS},
|
|
|
|
EdsClusterConfig: &envoy_cluster_v3.Cluster_EdsClusterConfig{
|
|
|
|
EdsConfig: &envoy_core_v3.ConfigSource{
|
|
|
|
ResourceApiVersion: envoy_core_v3.ApiVersion_V3,
|
|
|
|
ConfigSourceSpecifier: &envoy_core_v3.ConfigSource_Ads{
|
|
|
|
Ads: &envoy_core_v3.AggregatedConfigSource{},
|
2019-02-19 13:45:33 +00:00
|
|
|
},
|
2018-10-03 18:18:55 +00:00
|
|
|
},
|
|
|
|
},
|
2021-02-26 22:23:15 +00:00
|
|
|
CircuitBreakers: &envoy_cluster_v3.CircuitBreakers{
|
2019-12-03 20:13:33 +00:00
|
|
|
Thresholds: makeThresholdsIfNeeded(cfg.Limits),
|
|
|
|
},
|
2021-03-09 05:10:27 +00:00
|
|
|
OutlierDetection: ToOutlierDetection(cfg.PassiveHealthCheck),
|
2019-02-19 13:45:33 +00:00
|
|
|
}
|
2019-04-29 16:27:57 +00:00
|
|
|
if cfg.Protocol == "http2" || cfg.Protocol == "grpc" {
|
2022-03-30 18:27:49 +00:00
|
|
|
if err := s.setHttp2ProtocolOptions(c); err != nil {
|
|
|
|
return c, err
|
|
|
|
}
|
2019-04-29 16:27:57 +00:00
|
|
|
}
|
2019-02-19 13:45:33 +00:00
|
|
|
}
|
|
|
|
|
2022-01-20 16:12:04 +00:00
|
|
|
endpoints := cfgSnap.ConnectProxy.PreparedQueryEndpoints[uid]
|
2021-08-19 00:06:41 +00:00
|
|
|
var (
|
2022-05-27 01:24:55 +00:00
|
|
|
spiffeIDs = make([]string, 0)
|
2021-08-19 00:06:41 +00:00
|
|
|
seen = make(map[string]struct{})
|
|
|
|
)
|
|
|
|
for _, e := range endpoints {
|
|
|
|
id := fmt.Sprintf("%s/%s", e.Node.Datacenter, e.Service.CompoundServiceName())
|
|
|
|
if _, ok := seen[id]; ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
seen[id] = struct{}{}
|
|
|
|
|
|
|
|
name := e.Service.Proxy.DestinationServiceName
|
|
|
|
if e.Service.Connect.Native {
|
|
|
|
name = e.Service.Service
|
|
|
|
}
|
2022-05-27 01:24:55 +00:00
|
|
|
|
2021-08-19 00:06:41 +00:00
|
|
|
spiffeIDs = append(spiffeIDs, connect.SpiffeIDService{
|
|
|
|
Host: cfgSnap.Roots.TrustDomain,
|
|
|
|
Namespace: e.Service.NamespaceOrDefault(),
|
|
|
|
Partition: e.Service.PartitionOrDefault(),
|
|
|
|
Datacenter: e.Node.Datacenter,
|
|
|
|
Service: name,
|
2022-05-27 01:24:55 +00:00
|
|
|
}.URI().String())
|
2021-06-29 01:58:12 +00:00
|
|
|
}
|
|
|
|
|
2019-02-19 13:45:33 +00:00
|
|
|
// Enable TLS upstream with the configured client certificate.
|
2022-06-01 21:53:52 +00:00
|
|
|
commonTLSContext := makeCommonTLSContext(
|
2022-03-30 18:43:59 +00:00
|
|
|
cfgSnap.Leaf(),
|
2022-06-01 21:53:52 +00:00
|
|
|
cfgSnap.RootPEMs(),
|
2022-03-30 18:43:59 +00:00
|
|
|
makeTLSParametersFromProxyTLSConfig(cfgSnap.MeshConfigTLSOutgoing()),
|
|
|
|
)
|
2021-08-19 00:06:41 +00:00
|
|
|
err = injectSANMatcher(commonTLSContext, spiffeIDs...)
|
2021-06-29 01:58:12 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("failed to inject SAN matcher rules for cluster %q: %v", sni, err)
|
|
|
|
}
|
|
|
|
|
2021-02-26 22:23:15 +00:00
|
|
|
tlsContext := &envoy_tls_v3.UpstreamTlsContext{
|
2021-06-29 01:58:12 +00:00
|
|
|
CommonTlsContext: commonTLSContext,
|
2019-06-18 00:52:01 +00:00
|
|
|
Sni: sni,
|
2018-10-03 18:18:55 +00:00
|
|
|
}
|
2019-02-19 13:45:33 +00:00
|
|
|
|
2021-02-22 21:00:15 +00:00
|
|
|
transportSocket, err := makeUpstreamTLSTransportSocket(tlsContext)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
c.TransportSocket = transportSocket
|
|
|
|
|
2019-02-19 13:45:33 +00:00
|
|
|
return c, nil
|
|
|
|
}
|
|
|
|
|
2021-04-29 18:54:05 +00:00
|
|
|
func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain(
|
2022-01-20 16:12:04 +00:00
|
|
|
uid proxycfg.UpstreamID,
|
2021-03-17 19:40:49 +00:00
|
|
|
upstream *structs.Upstream,
|
2019-07-02 03:10:51 +00:00
|
|
|
chain *structs.CompiledDiscoveryChain,
|
2020-04-16 21:00:48 +00:00
|
|
|
chainEndpoints map[string]structs.CheckServiceNodes,
|
2019-07-02 03:10:51 +00:00
|
|
|
cfgSnap *proxycfg.ConfigSnapshot,
|
2022-06-28 19:52:25 +00:00
|
|
|
forMeshGateway bool,
|
2021-02-26 22:23:15 +00:00
|
|
|
) ([]*envoy_cluster_v3.Cluster, error) {
|
2019-08-22 20:11:56 +00:00
|
|
|
if chain == nil {
|
2022-01-20 16:12:04 +00:00
|
|
|
return nil, fmt.Errorf("cannot create upstream cluster without discovery chain for %s", uid)
|
2019-08-22 20:11:56 +00:00
|
|
|
}
|
|
|
|
|
2022-06-28 19:52:25 +00:00
|
|
|
if uid.Peer != "" && forMeshGateway {
|
|
|
|
return nil, fmt.Errorf("impossible to get a peer discovery chain in a mesh gateway")
|
|
|
|
}
|
|
|
|
|
|
|
|
upstreamConfigMap := make(map[string]interface{})
|
2021-03-17 19:40:49 +00:00
|
|
|
if upstream != nil {
|
2022-06-28 19:52:25 +00:00
|
|
|
upstreamConfigMap = upstream.Config
|
2021-03-17 19:40:49 +00:00
|
|
|
}
|
2022-06-28 19:52:25 +00:00
|
|
|
|
|
|
|
cfg, err := structs.ParseUpstreamConfigNoDefaults(upstreamConfigMap)
|
2019-07-08 11:48:48 +00:00
|
|
|
if err != nil {
|
|
|
|
// Don't hard fail on a config typo, just warn. The parse func returns
|
|
|
|
// default config if there is an error so it's safe to continue.
|
2022-01-20 16:12:04 +00:00
|
|
|
s.Logger.Warn("failed to parse", "upstream", uid,
|
2020-01-28 23:50:41 +00:00
|
|
|
"error", err)
|
2019-07-08 11:48:48 +00:00
|
|
|
}
|
|
|
|
|
2021-02-26 22:23:15 +00:00
|
|
|
var escapeHatchCluster *envoy_cluster_v3.Cluster
|
2022-06-28 19:52:25 +00:00
|
|
|
if !forMeshGateway {
|
|
|
|
if cfg.EnvoyClusterJSON != "" {
|
|
|
|
if chain.Default {
|
|
|
|
// If you haven't done anything to setup the discovery chain, then
|
|
|
|
// you can use the envoy_cluster_json escape hatch.
|
|
|
|
escapeHatchCluster, err = makeClusterFromUserConfig(cfg.EnvoyClusterJSON)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
s.Logger.Warn("ignoring escape hatch setting, because a discovery chain is configured for",
|
|
|
|
"discovery chain", chain.ServiceName, "upstream", uid,
|
|
|
|
"envoy_cluster_json", chain.ServiceName)
|
2019-08-22 20:11:56 +00:00
|
|
|
}
|
|
|
|
}
|
2019-07-02 03:10:51 +00:00
|
|
|
}
|
|
|
|
|
2021-02-26 22:23:15 +00:00
|
|
|
var out []*envoy_cluster_v3.Cluster
|
2019-08-02 03:44:05 +00:00
|
|
|
for _, node := range chain.Nodes {
|
|
|
|
if node.Type != structs.DiscoveryGraphNodeTypeResolver {
|
|
|
|
continue
|
|
|
|
}
|
2019-08-05 18:30:35 +00:00
|
|
|
failover := node.Resolver.Failover
|
2019-08-02 20:34:54 +00:00
|
|
|
targetID := node.Resolver.Target
|
|
|
|
|
|
|
|
target := chain.Targets[targetID]
|
2019-07-02 03:10:51 +00:00
|
|
|
|
2022-07-12 16:03:41 +00:00
|
|
|
if forMeshGateway && !cfgSnap.Locality.Matches(target.Datacenter, target.Partition) {
|
|
|
|
s.Logger.Warn("ignoring discovery chain target that crosses a datacenter or partition boundary in a mesh gateway",
|
|
|
|
"target", target,
|
|
|
|
"gatewayLocality", cfgSnap.Locality,
|
|
|
|
)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2019-08-05 18:30:35 +00:00
|
|
|
// Determine if we have to generate the entire cluster differently.
|
2022-06-28 19:52:25 +00:00
|
|
|
failoverThroughMeshGateway := chain.WillFailoverThroughMeshGateway(node) && !forMeshGateway
|
2019-08-05 18:30:35 +00:00
|
|
|
|
2019-08-19 18:03:03 +00:00
|
|
|
sni := target.SNI
|
|
|
|
clusterName := CustomizeClusterName(target.Name, chain)
|
2022-06-28 19:52:25 +00:00
|
|
|
if forMeshGateway {
|
|
|
|
clusterName = meshGatewayExportedClusterNamePrefix + clusterName
|
|
|
|
}
|
2019-08-02 03:03:34 +00:00
|
|
|
|
2022-05-27 02:18:47 +00:00
|
|
|
// Get the SpiffeID for upstream SAN validation.
|
|
|
|
//
|
|
|
|
// For imported services the SpiffeID is embedded in the proxy instances.
|
|
|
|
// Whereas for local services we can construct the SpiffeID from the chain target.
|
|
|
|
var targetSpiffeID string
|
2022-06-28 19:52:25 +00:00
|
|
|
var additionalSpiffeIDs []string
|
2022-05-27 02:18:47 +00:00
|
|
|
if uid.Peer != "" {
|
|
|
|
for _, e := range chainEndpoints[targetID] {
|
|
|
|
targetSpiffeID = e.Service.Connect.PeerMeta.SpiffeID[0]
|
2022-06-28 19:52:25 +00:00
|
|
|
additionalSpiffeIDs = e.Service.Connect.PeerMeta.SpiffeID[1:]
|
2022-05-27 02:18:47 +00:00
|
|
|
|
2022-06-28 19:52:25 +00:00
|
|
|
// Only grab the first instance because it is the same for all instances.
|
2022-05-27 02:18:47 +00:00
|
|
|
break
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
targetSpiffeID = connect.SpiffeIDService{
|
|
|
|
Host: cfgSnap.Roots.TrustDomain,
|
|
|
|
Namespace: target.Namespace,
|
|
|
|
Partition: target.Partition,
|
|
|
|
Datacenter: target.Datacenter,
|
|
|
|
Service: target.Service,
|
|
|
|
}.URI().String()
|
|
|
|
}
|
2021-06-29 01:58:12 +00:00
|
|
|
|
2019-08-05 18:30:35 +00:00
|
|
|
if failoverThroughMeshGateway {
|
|
|
|
actualTargetID := firstHealthyTarget(
|
|
|
|
chain.Targets,
|
2020-04-16 21:00:48 +00:00
|
|
|
chainEndpoints,
|
2019-08-05 18:30:35 +00:00
|
|
|
targetID,
|
|
|
|
failover.Targets,
|
|
|
|
)
|
|
|
|
|
|
|
|
if actualTargetID != targetID {
|
|
|
|
actualTarget := chain.Targets[actualTargetID]
|
2019-08-19 18:03:03 +00:00
|
|
|
sni = actualTarget.SNI
|
2021-06-30 16:16:33 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-06-28 19:52:25 +00:00
|
|
|
spiffeIDs := append([]string{targetSpiffeID}, additionalSpiffeIDs...)
|
2021-06-30 16:16:33 +00:00
|
|
|
seenIDs := map[string]struct{}{
|
2022-05-27 01:24:55 +00:00
|
|
|
targetSpiffeID: {},
|
2021-06-30 16:16:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if failover != nil {
|
|
|
|
// When failovers are present we need to add them as valid SANs to validate against.
|
|
|
|
// Envoy makes the failover decision independently based on the endpoint health it has available.
|
|
|
|
for _, tid := range failover.Targets {
|
|
|
|
target, ok := chain.Targets[tid]
|
|
|
|
if !ok {
|
|
|
|
continue
|
|
|
|
}
|
2021-06-29 01:58:12 +00:00
|
|
|
|
2021-06-30 16:16:33 +00:00
|
|
|
id := connect.SpiffeIDService{
|
2021-06-29 01:58:12 +00:00
|
|
|
Host: cfgSnap.Roots.TrustDomain,
|
2021-06-30 16:16:33 +00:00
|
|
|
Namespace: target.Namespace,
|
2021-09-15 02:43:38 +00:00
|
|
|
Partition: target.Partition,
|
2021-06-30 16:16:33 +00:00
|
|
|
Datacenter: target.Datacenter,
|
|
|
|
Service: target.Service,
|
2022-05-27 01:24:55 +00:00
|
|
|
}.URI().String()
|
2021-06-30 16:16:33 +00:00
|
|
|
|
|
|
|
// Failover targets might be subsets of the same service, so these are deduplicated.
|
2022-05-27 01:24:55 +00:00
|
|
|
if _, ok := seenIDs[id]; ok {
|
2021-06-30 16:16:33 +00:00
|
|
|
continue
|
2021-06-29 01:58:12 +00:00
|
|
|
}
|
2022-05-27 01:24:55 +00:00
|
|
|
seenIDs[id] = struct{}{}
|
2021-06-30 16:16:33 +00:00
|
|
|
|
|
|
|
spiffeIDs = append(spiffeIDs, id)
|
2019-08-05 18:30:35 +00:00
|
|
|
}
|
|
|
|
}
|
2022-05-27 02:18:47 +00:00
|
|
|
sort.Strings(spiffeIDs)
|
2019-08-05 18:30:35 +00:00
|
|
|
|
2022-06-03 21:42:50 +00:00
|
|
|
s.Logger.Trace("generating cluster for", "cluster", clusterName)
|
2021-02-26 22:23:15 +00:00
|
|
|
c := &envoy_cluster_v3.Cluster{
|
2019-08-02 03:03:34 +00:00
|
|
|
Name: clusterName,
|
|
|
|
AltStatName: clusterName,
|
2022-05-25 01:44:54 +00:00
|
|
|
ConnectTimeout: durationpb.New(node.Resolver.ConnectTimeout),
|
2021-02-26 22:23:15 +00:00
|
|
|
ClusterDiscoveryType: &envoy_cluster_v3.Cluster_Type{Type: envoy_cluster_v3.Cluster_EDS},
|
|
|
|
CommonLbConfig: &envoy_cluster_v3.Cluster_CommonLbConfig{
|
|
|
|
HealthyPanicThreshold: &envoy_type_v3.Percent{
|
2019-07-02 03:10:51 +00:00
|
|
|
Value: 0, // disable panic threshold
|
|
|
|
},
|
|
|
|
},
|
2021-02-26 22:23:15 +00:00
|
|
|
EdsClusterConfig: &envoy_cluster_v3.Cluster_EdsClusterConfig{
|
|
|
|
EdsConfig: &envoy_core_v3.ConfigSource{
|
|
|
|
ResourceApiVersion: envoy_core_v3.ApiVersion_V3,
|
|
|
|
ConfigSourceSpecifier: &envoy_core_v3.ConfigSource_Ads{
|
|
|
|
Ads: &envoy_core_v3.AggregatedConfigSource{},
|
2019-07-02 03:10:51 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2022-06-28 19:52:25 +00:00
|
|
|
// TODO(peering): make circuit breakers or outlier detection work?
|
2021-02-26 22:23:15 +00:00
|
|
|
CircuitBreakers: &envoy_cluster_v3.CircuitBreakers{
|
2019-12-03 20:13:33 +00:00
|
|
|
Thresholds: makeThresholdsIfNeeded(cfg.Limits),
|
|
|
|
},
|
2021-03-09 05:10:27 +00:00
|
|
|
OutlierDetection: ToOutlierDetection(cfg.PassiveHealthCheck),
|
2019-07-02 03:10:51 +00:00
|
|
|
}
|
2019-07-08 11:48:48 +00:00
|
|
|
|
2020-09-11 15:21:43 +00:00
|
|
|
var lb *structs.LoadBalancer
|
2020-09-02 15:10:50 +00:00
|
|
|
if node.LoadBalancer != nil {
|
2020-09-11 15:21:43 +00:00
|
|
|
lb = node.LoadBalancer
|
2020-09-02 15:10:50 +00:00
|
|
|
}
|
2020-09-02 21:13:50 +00:00
|
|
|
if err := injectLBToCluster(lb, c); err != nil {
|
2020-09-02 15:10:50 +00:00
|
|
|
return nil, fmt.Errorf("failed to apply load balancer configuration to cluster %q: %v", clusterName, err)
|
2020-08-28 20:27:40 +00:00
|
|
|
}
|
|
|
|
|
2022-06-28 19:52:25 +00:00
|
|
|
var proto string
|
|
|
|
if !forMeshGateway {
|
|
|
|
proto = cfg.Protocol
|
|
|
|
}
|
2019-07-08 11:48:48 +00:00
|
|
|
if proto == "" {
|
|
|
|
proto = chain.Protocol
|
|
|
|
}
|
|
|
|
|
|
|
|
if proto == "" {
|
|
|
|
proto = "tcp"
|
|
|
|
}
|
|
|
|
|
|
|
|
if proto == "http2" || proto == "grpc" {
|
2022-03-30 18:27:49 +00:00
|
|
|
if err := s.setHttp2ProtocolOptions(c); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-07-02 03:10:51 +00:00
|
|
|
}
|
|
|
|
|
2022-06-28 19:52:25 +00:00
|
|
|
configureTLS := true
|
|
|
|
if forMeshGateway {
|
|
|
|
// We only initiate TLS if we're doing an L7 proxy.
|
|
|
|
configureTLS = structs.IsProtocolHTTPLike(proto)
|
2022-06-01 21:53:52 +00:00
|
|
|
}
|
2022-03-30 18:43:59 +00:00
|
|
|
|
2022-06-28 19:52:25 +00:00
|
|
|
if configureTLS {
|
|
|
|
commonTLSContext := makeCommonTLSContext(
|
|
|
|
cfgSnap.Leaf(),
|
2022-07-13 16:14:57 +00:00
|
|
|
cfgSnap.RootPEMs(),
|
2022-06-28 19:52:25 +00:00
|
|
|
makeTLSParametersFromProxyTLSConfig(cfgSnap.MeshConfigTLSOutgoing()),
|
|
|
|
)
|
2021-06-29 01:58:12 +00:00
|
|
|
|
2022-06-28 19:52:25 +00:00
|
|
|
err = injectSANMatcher(commonTLSContext, spiffeIDs...)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("failed to inject SAN matcher rules for cluster %q: %v", sni, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
tlsContext := &envoy_tls_v3.UpstreamTlsContext{
|
|
|
|
CommonTlsContext: commonTLSContext,
|
|
|
|
Sni: sni,
|
|
|
|
}
|
|
|
|
transportSocket, err := makeUpstreamTLSTransportSocket(tlsContext)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
c.TransportSocket = transportSocket
|
2021-02-22 21:00:15 +00:00
|
|
|
}
|
|
|
|
|
2019-07-02 03:10:51 +00:00
|
|
|
out = append(out, c)
|
|
|
|
}
|
|
|
|
|
2019-08-22 20:11:56 +00:00
|
|
|
if escapeHatchCluster != nil {
|
|
|
|
if len(out) != 1 {
|
|
|
|
return nil, fmt.Errorf("cannot inject escape hatch cluster when discovery chain had no nodes")
|
|
|
|
}
|
|
|
|
defaultCluster := out[0]
|
|
|
|
|
|
|
|
// Overlay what the user provided.
|
2021-02-22 21:00:15 +00:00
|
|
|
escapeHatchCluster.TransportSocket = defaultCluster.TransportSocket
|
2019-08-22 20:11:56 +00:00
|
|
|
|
2021-02-26 22:23:15 +00:00
|
|
|
out = []*envoy_cluster_v3.Cluster{escapeHatchCluster}
|
2019-08-22 20:11:56 +00:00
|
|
|
}
|
|
|
|
|
2019-07-02 03:10:51 +00:00
|
|
|
return out, nil
|
|
|
|
}
|
|
|
|
|
2022-06-28 19:52:25 +00:00
|
|
|
func (s *ResourceGenerator) makeExportedUpstreamClustersForMeshGateway(cfgSnap *proxycfg.ConfigSnapshot) ([]proto.Message, error) {
|
|
|
|
// NOTE: Despite the mesh gateway already having one cluster per service
|
|
|
|
// (and subset) in the local datacenter we cannot reliably use those to
|
|
|
|
// send inbound peered traffic targeting a discovery chain.
|
|
|
|
//
|
|
|
|
// For starters, none of those add TLS so they'd be unusable for http-like
|
|
|
|
// L7 protocols.
|
|
|
|
//
|
|
|
|
// Additionally, those other clusters are all thin wrappers around simple
|
|
|
|
// catalog resolutions and are largely not impacted by various
|
|
|
|
// customizations related to a service-resolver, such as configuring the
|
|
|
|
// failover section.
|
|
|
|
//
|
|
|
|
// Instead we create brand new clusters solely to accept incoming peered
|
|
|
|
// traffic and give them a unique cluster prefix name to avoid collisions
|
|
|
|
// to keep the two use cases separate.
|
|
|
|
var clusters []proto.Message
|
|
|
|
|
|
|
|
createdExportedClusters := make(map[string]struct{}) // key=clusterName
|
|
|
|
for _, svc := range cfgSnap.MeshGatewayValidExportedServices() {
|
|
|
|
chain := cfgSnap.MeshGateway.DiscoveryChain[svc]
|
|
|
|
|
|
|
|
exportClusters, err := s.makeUpstreamClustersForDiscoveryChain(
|
|
|
|
proxycfg.NewUpstreamIDFromServiceName(svc),
|
|
|
|
nil,
|
|
|
|
chain,
|
|
|
|
nil,
|
|
|
|
cfgSnap,
|
|
|
|
true,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, cluster := range exportClusters {
|
|
|
|
if _, ok := createdExportedClusters[cluster.Name]; ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
createdExportedClusters[cluster.Name] = struct{}{}
|
|
|
|
clusters = append(clusters, cluster)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return clusters, nil
|
|
|
|
}
|
|
|
|
|
2021-06-29 01:58:12 +00:00
|
|
|
// injectSANMatcher updates a TLS context so that it verifies the upstream SAN.
|
2022-05-27 01:24:55 +00:00
|
|
|
func injectSANMatcher(tlsContext *envoy_tls_v3.CommonTlsContext, matchStrings ...string) error {
|
2021-06-29 01:58:12 +00:00
|
|
|
validationCtx, ok := tlsContext.ValidationContextType.(*envoy_tls_v3.CommonTlsContext_ValidationContext)
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("invalid type: expected CommonTlsContext_ValidationContext, got %T",
|
|
|
|
tlsContext.ValidationContextType)
|
|
|
|
}
|
|
|
|
|
2021-06-30 16:16:33 +00:00
|
|
|
var matchers []*envoy_matcher_v3.StringMatcher
|
2022-03-31 20:46:14 +00:00
|
|
|
for _, m := range matchStrings {
|
2021-06-30 16:16:33 +00:00
|
|
|
matchers = append(matchers, &envoy_matcher_v3.StringMatcher{
|
|
|
|
MatchPattern: &envoy_matcher_v3.StringMatcher_Exact{
|
2022-03-31 20:46:14 +00:00
|
|
|
Exact: m,
|
2021-06-30 16:16:33 +00:00
|
|
|
},
|
|
|
|
})
|
2021-06-29 01:58:12 +00:00
|
|
|
}
|
2021-06-30 16:16:33 +00:00
|
|
|
validationCtx.ValidationContext.MatchSubjectAltNames = matchers
|
|
|
|
|
2021-06-29 01:58:12 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-02-19 13:45:33 +00:00
|
|
|
// makeClusterFromUserConfig returns the listener config decoded from an
|
|
|
|
// arbitrary proto3 json format string or an error if it's invalid.
|
|
|
|
//
|
|
|
|
// For now we only support embedding in JSON strings because of the hcl parsing
|
2020-06-09 21:43:05 +00:00
|
|
|
// pain (see Background section in the comment for decode.HookWeakDecodeFromSlice).
|
|
|
|
// This may be fixed in decode.HookWeakDecodeFromSlice in the future.
|
2019-02-19 13:45:33 +00:00
|
|
|
//
|
|
|
|
// When we do that we can support just nesting the config directly into the
|
|
|
|
// JSON/hcl naturally but this is a stop-gap that gets us an escape hatch
|
|
|
|
// immediately. It's also probably not a bad thing to support long-term since
|
|
|
|
// any config generated by other systems will likely be in canonical protobuf
|
|
|
|
// from rather than our slight variant in JSON/hcl.
|
2021-02-26 22:23:15 +00:00
|
|
|
func makeClusterFromUserConfig(configJSON string) (*envoy_cluster_v3.Cluster, error) {
|
|
|
|
// Type field is present so decode it as a types.Any
|
|
|
|
var any any.Any
|
|
|
|
err := jsonpb.UnmarshalString(configJSON, &any)
|
|
|
|
if err != nil {
|
2019-02-19 13:45:33 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2021-02-26 22:23:15 +00:00
|
|
|
// And then unmarshal the listener again...
|
|
|
|
var c envoy_cluster_v3.Cluster
|
|
|
|
err = proto.Unmarshal(any.Value, &c)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2019-02-19 13:45:33 +00:00
|
|
|
}
|
|
|
|
return &c, err
|
2018-10-03 18:18:55 +00:00
|
|
|
}
|
2019-06-18 00:52:01 +00:00
|
|
|
|
2022-05-24 16:51:52 +00:00
|
|
|
type clusterOpts struct {
|
2020-06-03 21:28:45 +00:00
|
|
|
// name for the cluster
|
|
|
|
name string
|
|
|
|
|
|
|
|
// isRemote determines whether the cluster is in a remote DC and we should prefer a WAN address
|
|
|
|
isRemote bool
|
|
|
|
|
|
|
|
// onlyPassing determines whether endpoints that do not have a passing status should be considered unhealthy
|
|
|
|
onlyPassing bool
|
2020-03-17 19:50:14 +00:00
|
|
|
|
2020-06-03 21:28:45 +00:00
|
|
|
// connectTimeout is the timeout for new network connections to hosts in the cluster
|
|
|
|
connectTimeout time.Duration
|
2020-04-27 22:25:37 +00:00
|
|
|
|
2020-06-03 21:28:45 +00:00
|
|
|
// hostnameEndpoints is a list of endpoints with a hostname as their address
|
|
|
|
hostnameEndpoints structs.CheckServiceNodes
|
2022-05-24 16:51:52 +00:00
|
|
|
|
|
|
|
// addressEndpoint is a singular ip/port endpoint
|
|
|
|
addressEndpoint structs.DestinationConfig
|
2020-06-03 21:28:45 +00:00
|
|
|
}
|
|
|
|
|
2020-06-12 19:46:17 +00:00
|
|
|
// makeGatewayCluster creates an Envoy cluster for a mesh or terminating gateway
|
2022-05-24 16:51:52 +00:00
|
|
|
func (s *ResourceGenerator) makeGatewayCluster(snap *proxycfg.ConfigSnapshot, opts clusterOpts) *envoy_cluster_v3.Cluster {
|
2020-06-03 21:28:45 +00:00
|
|
|
cfg, err := ParseGatewayConfig(snap.Proxy.Config)
|
2019-06-18 00:52:01 +00:00
|
|
|
if err != nil {
|
|
|
|
// Don't hard fail on a config typo, just warn. The parse func returns
|
|
|
|
// default config if there is an error so it's safe to continue.
|
2020-04-13 16:33:01 +00:00
|
|
|
s.Logger.Warn("failed to parse gateway config", "error", err)
|
2019-06-18 00:52:01 +00:00
|
|
|
}
|
2020-06-03 21:28:45 +00:00
|
|
|
if opts.connectTimeout <= 0 {
|
|
|
|
opts.connectTimeout = time.Duration(cfg.ConnectTimeoutMs) * time.Millisecond
|
|
|
|
}
|
2019-06-18 00:52:01 +00:00
|
|
|
|
2021-02-26 22:23:15 +00:00
|
|
|
cluster := &envoy_cluster_v3.Cluster{
|
2020-06-03 21:28:45 +00:00
|
|
|
Name: opts.name,
|
2022-05-25 01:44:54 +00:00
|
|
|
ConnectTimeout: durationpb.New(opts.connectTimeout),
|
2020-06-03 21:28:45 +00:00
|
|
|
|
|
|
|
// Having an empty config enables outlier detection with default config.
|
2021-02-26 22:23:15 +00:00
|
|
|
OutlierDetection: &envoy_cluster_v3.OutlierDetection{},
|
2020-03-17 19:50:14 +00:00
|
|
|
}
|
|
|
|
|
2020-06-03 21:28:45 +00:00
|
|
|
useEDS := true
|
|
|
|
if len(opts.hostnameEndpoints) > 0 {
|
|
|
|
useEDS = false
|
|
|
|
}
|
|
|
|
|
|
|
|
// If none of the service instances are addressed by a hostname we provide the endpoint IP addresses via EDS
|
|
|
|
if useEDS {
|
2021-02-26 22:23:15 +00:00
|
|
|
cluster.ClusterDiscoveryType = &envoy_cluster_v3.Cluster_Type{Type: envoy_cluster_v3.Cluster_EDS}
|
|
|
|
cluster.EdsClusterConfig = &envoy_cluster_v3.Cluster_EdsClusterConfig{
|
|
|
|
EdsConfig: &envoy_core_v3.ConfigSource{
|
|
|
|
ResourceApiVersion: envoy_core_v3.ApiVersion_V3,
|
|
|
|
ConfigSourceSpecifier: &envoy_core_v3.ConfigSource_Ads{
|
|
|
|
Ads: &envoy_core_v3.AggregatedConfigSource{},
|
2019-06-18 00:52:01 +00:00
|
|
|
},
|
|
|
|
},
|
2020-06-03 21:28:45 +00:00
|
|
|
}
|
2022-06-10 21:11:40 +00:00
|
|
|
} else {
|
|
|
|
configureClusterWithHostnames(
|
|
|
|
s.Logger,
|
|
|
|
cluster,
|
|
|
|
cfg.DNSDiscoveryType,
|
|
|
|
opts.hostnameEndpoints,
|
|
|
|
opts.isRemote,
|
|
|
|
opts.onlyPassing,
|
|
|
|
)
|
2020-06-03 21:28:45 +00:00
|
|
|
}
|
|
|
|
|
2022-06-10 21:11:40 +00:00
|
|
|
return cluster
|
|
|
|
}
|
|
|
|
|
|
|
|
func configureClusterWithHostnames(
|
|
|
|
logger hclog.Logger,
|
|
|
|
cluster *envoy_cluster_v3.Cluster,
|
|
|
|
dnsDiscoveryType string,
|
|
|
|
// hostnameEndpoints is a list of endpoints with a hostname as their address
|
|
|
|
hostnameEndpoints structs.CheckServiceNodes,
|
|
|
|
// isRemote determines whether the cluster is in a remote DC or partition and we should prefer a WAN address
|
|
|
|
isRemote bool,
|
|
|
|
// onlyPassing determines whether endpoints that do not have a passing status should be considered unhealthy
|
|
|
|
onlyPassing bool,
|
|
|
|
) {
|
2020-06-03 21:28:45 +00:00
|
|
|
// When a service instance is addressed by a hostname we have Envoy do the DNS resolution
|
|
|
|
// by setting a DNS cluster type and passing the hostname endpoints via CDS.
|
|
|
|
rate := 10 * time.Second
|
2022-05-25 01:44:54 +00:00
|
|
|
cluster.DnsRefreshRate = durationpb.New(rate)
|
2021-02-26 22:23:15 +00:00
|
|
|
cluster.DnsLookupFamily = envoy_cluster_v3.Cluster_V4_ONLY
|
2020-06-03 21:28:45 +00:00
|
|
|
|
2021-02-26 22:23:15 +00:00
|
|
|
discoveryType := envoy_cluster_v3.Cluster_Type{Type: envoy_cluster_v3.Cluster_LOGICAL_DNS}
|
2022-06-10 21:11:40 +00:00
|
|
|
if dnsDiscoveryType == "strict_dns" {
|
2021-02-26 22:23:15 +00:00
|
|
|
discoveryType.Type = envoy_cluster_v3.Cluster_STRICT_DNS
|
2020-04-27 22:25:37 +00:00
|
|
|
}
|
2020-06-03 21:28:45 +00:00
|
|
|
cluster.ClusterDiscoveryType = &discoveryType
|
2020-04-27 22:25:37 +00:00
|
|
|
|
2021-02-26 22:23:15 +00:00
|
|
|
endpoints := make([]*envoy_endpoint_v3.LbEndpoint, 0, 1)
|
2020-06-12 19:46:17 +00:00
|
|
|
uniqueHostnames := make(map[string]bool)
|
2020-06-03 21:28:45 +00:00
|
|
|
|
2020-06-12 19:46:17 +00:00
|
|
|
var (
|
|
|
|
hostname string
|
|
|
|
idx int
|
2021-02-26 22:23:15 +00:00
|
|
|
fallback *envoy_endpoint_v3.LbEndpoint
|
2020-06-12 19:46:17 +00:00
|
|
|
)
|
2022-06-10 21:11:40 +00:00
|
|
|
for i, e := range hostnameEndpoints {
|
|
|
|
_, addr, port := e.BestAddress(isRemote)
|
2020-06-12 19:46:17 +00:00
|
|
|
uniqueHostnames[addr] = true
|
|
|
|
|
2022-06-10 21:11:40 +00:00
|
|
|
health, weight := calculateEndpointHealthAndWeight(e, onlyPassing)
|
2021-02-26 22:23:15 +00:00
|
|
|
if health == envoy_core_v3.HealthStatus_UNHEALTHY {
|
2020-06-19 19:31:39 +00:00
|
|
|
fallback = makeLbEndpoint(addr, port, health, weight)
|
2020-06-12 19:46:17 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(endpoints) == 0 {
|
|
|
|
endpoints = append(endpoints, makeLbEndpoint(addr, port, health, weight))
|
|
|
|
|
|
|
|
hostname = addr
|
|
|
|
idx = i
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-06-10 21:11:40 +00:00
|
|
|
dc := hostnameEndpoints[idx].Node.Datacenter
|
|
|
|
service := hostnameEndpoints[idx].Service.CompoundServiceName()
|
2020-06-12 19:46:17 +00:00
|
|
|
|
2020-06-19 19:31:39 +00:00
|
|
|
// Fall back to last unhealthy endpoint if none were healthy
|
2020-06-12 19:46:17 +00:00
|
|
|
if len(endpoints) == 0 {
|
2022-06-10 21:11:40 +00:00
|
|
|
logger.Warn("upstream service does not contain any healthy instances",
|
2020-06-19 19:31:39 +00:00
|
|
|
"dc", dc, "service", service.String())
|
2020-06-12 19:46:17 +00:00
|
|
|
|
2020-06-19 19:31:39 +00:00
|
|
|
endpoints = append(endpoints, fallback)
|
2020-06-12 19:46:17 +00:00
|
|
|
}
|
|
|
|
if len(uniqueHostnames) > 1 {
|
2022-06-10 21:11:40 +00:00
|
|
|
logger.Warn(fmt.Sprintf("service contains instances with more than one unique hostname; only %q be resolved by Envoy", hostname),
|
2021-04-29 18:54:05 +00:00
|
|
|
"dc", dc, "service", service.String())
|
2020-06-03 21:28:45 +00:00
|
|
|
}
|
2020-06-12 19:46:17 +00:00
|
|
|
|
2021-02-26 22:23:15 +00:00
|
|
|
cluster.LoadAssignment = &envoy_endpoint_v3.ClusterLoadAssignment{
|
2020-06-03 21:28:45 +00:00
|
|
|
ClusterName: cluster.Name,
|
2021-02-26 22:23:15 +00:00
|
|
|
Endpoints: []*envoy_endpoint_v3.LocalityLbEndpoints{
|
2020-06-03 21:28:45 +00:00
|
|
|
{
|
|
|
|
LbEndpoints: endpoints,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
2020-04-27 22:25:37 +00:00
|
|
|
}
|
|
|
|
|
2022-07-14 18:45:51 +00:00
|
|
|
// makeTerminatingIPCluster creates an Envoy cluster for a terminating gateway with an ip destination
|
2022-05-24 16:51:52 +00:00
|
|
|
func (s *ResourceGenerator) makeTerminatingIPCluster(snap *proxycfg.ConfigSnapshot, opts clusterOpts) *envoy_cluster_v3.Cluster {
|
|
|
|
cfg, err := ParseGatewayConfig(snap.Proxy.Config)
|
|
|
|
if err != nil {
|
|
|
|
// Don't hard fail on a config typo, just warn. The parse func returns
|
|
|
|
// default config if there is an error so it's safe to continue.
|
|
|
|
s.Logger.Warn("failed to parse gateway config", "error", err)
|
|
|
|
}
|
|
|
|
if opts.connectTimeout <= 0 {
|
|
|
|
opts.connectTimeout = time.Duration(cfg.ConnectTimeoutMs) * time.Millisecond
|
|
|
|
}
|
|
|
|
|
|
|
|
cluster := &envoy_cluster_v3.Cluster{
|
|
|
|
Name: opts.name,
|
|
|
|
ConnectTimeout: durationpb.New(opts.connectTimeout),
|
|
|
|
|
|
|
|
// Having an empty config enables outlier detection with default config.
|
2022-07-14 18:45:51 +00:00
|
|
|
OutlierDetection: &envoy_cluster_v3.OutlierDetection{},
|
|
|
|
ClusterDiscoveryType: &envoy_cluster_v3.Cluster_Type{Type: envoy_cluster_v3.Cluster_STATIC},
|
2022-05-24 16:51:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
endpoints := []*envoy_endpoint_v3.LbEndpoint{
|
|
|
|
makeEndpoint(opts.addressEndpoint.Address, opts.addressEndpoint.Port),
|
|
|
|
}
|
|
|
|
|
|
|
|
cluster.LoadAssignment = &envoy_endpoint_v3.ClusterLoadAssignment{
|
|
|
|
ClusterName: cluster.Name,
|
|
|
|
Endpoints: []*envoy_endpoint_v3.LocalityLbEndpoints{
|
|
|
|
{
|
|
|
|
LbEndpoints: endpoints,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
return cluster
|
|
|
|
}
|
|
|
|
|
2022-07-14 18:45:51 +00:00
|
|
|
// makeTerminatingHostnameCluster creates an Envoy cluster for a terminating gateway with a hostname destination
|
|
|
|
func (s *ResourceGenerator) makeTerminatingHostnameCluster(snap *proxycfg.ConfigSnapshot, opts clusterOpts) *envoy_cluster_v3.Cluster {
|
2022-05-24 16:51:52 +00:00
|
|
|
cfg, err := ParseGatewayConfig(snap.Proxy.Config)
|
|
|
|
if err != nil {
|
|
|
|
// Don't hard fail on a config typo, just warn. The parse func returns
|
|
|
|
// default config if there is an error so it's safe to continue.
|
|
|
|
s.Logger.Warn("failed to parse gateway config", "error", err)
|
|
|
|
}
|
2022-07-14 18:45:51 +00:00
|
|
|
opts.connectTimeout = time.Duration(cfg.ConnectTimeoutMs) * time.Millisecond
|
2022-05-24 16:51:52 +00:00
|
|
|
|
|
|
|
cluster := &envoy_cluster_v3.Cluster{
|
|
|
|
Name: opts.name,
|
|
|
|
ConnectTimeout: durationpb.New(opts.connectTimeout),
|
|
|
|
|
2022-07-14 18:45:51 +00:00
|
|
|
// Having an empty config enables outlier detection with default config.
|
|
|
|
OutlierDetection: &envoy_cluster_v3.OutlierDetection{},
|
|
|
|
ClusterDiscoveryType: &envoy_cluster_v3.Cluster_Type{Type: envoy_cluster_v3.Cluster_LOGICAL_DNS},
|
|
|
|
DnsLookupFamily: envoy_cluster_v3.Cluster_AUTO,
|
2022-05-24 16:51:52 +00:00
|
|
|
}
|
|
|
|
|
2022-07-14 18:45:51 +00:00
|
|
|
rate := 10 * time.Second
|
|
|
|
cluster.DnsRefreshRate = durationpb.New(rate)
|
|
|
|
|
|
|
|
address := makeAddress(opts.addressEndpoint.Address, opts.addressEndpoint.Port)
|
|
|
|
|
|
|
|
endpoints := []*envoy_endpoint_v3.LbEndpoint{
|
|
|
|
{
|
|
|
|
HostIdentifier: &envoy_endpoint_v3.LbEndpoint_Endpoint{
|
|
|
|
Endpoint: &envoy_endpoint_v3.Endpoint{
|
|
|
|
Address: address,
|
|
|
|
},
|
|
|
|
},
|
2022-05-24 16:51:52 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2022-07-14 18:45:51 +00:00
|
|
|
cluster.LoadAssignment = &envoy_endpoint_v3.ClusterLoadAssignment{
|
|
|
|
ClusterName: cluster.Name,
|
|
|
|
Endpoints: []*envoy_endpoint_v3.LocalityLbEndpoints{{
|
|
|
|
LbEndpoints: endpoints,
|
|
|
|
}},
|
2022-05-24 16:51:52 +00:00
|
|
|
}
|
2022-07-14 18:45:51 +00:00
|
|
|
|
|
|
|
return cluster
|
2022-05-24 16:51:52 +00:00
|
|
|
}
|
|
|
|
|
2021-03-11 18:04:40 +00:00
|
|
|
func makeThresholdsIfNeeded(limits *structs.UpstreamLimits) []*envoy_cluster_v3.CircuitBreakers_Thresholds {
|
|
|
|
if limits == nil {
|
2019-12-03 20:13:33 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-02-26 22:23:15 +00:00
|
|
|
threshold := &envoy_cluster_v3.CircuitBreakers_Thresholds{}
|
2021-03-09 05:10:27 +00:00
|
|
|
|
2019-12-03 20:13:33 +00:00
|
|
|
// Likewise, make sure to not set any threshold values on the zero-value in
|
|
|
|
// order to rely on Envoy defaults
|
|
|
|
if limits.MaxConnections != nil {
|
|
|
|
threshold.MaxConnections = makeUint32Value(*limits.MaxConnections)
|
|
|
|
}
|
|
|
|
if limits.MaxPendingRequests != nil {
|
|
|
|
threshold.MaxPendingRequests = makeUint32Value(*limits.MaxPendingRequests)
|
|
|
|
}
|
|
|
|
if limits.MaxConcurrentRequests != nil {
|
|
|
|
threshold.MaxRequests = makeUint32Value(*limits.MaxConcurrentRequests)
|
|
|
|
}
|
|
|
|
|
2021-02-26 22:23:15 +00:00
|
|
|
return []*envoy_cluster_v3.CircuitBreakers_Thresholds{threshold}
|
2019-12-03 20:13:33 +00:00
|
|
|
}
|
2020-06-03 21:28:45 +00:00
|
|
|
|
2021-02-26 22:23:15 +00:00
|
|
|
func makeLbEndpoint(addr string, port int, health envoy_core_v3.HealthStatus, weight int) *envoy_endpoint_v3.LbEndpoint {
|
|
|
|
return &envoy_endpoint_v3.LbEndpoint{
|
|
|
|
HostIdentifier: &envoy_endpoint_v3.LbEndpoint_Endpoint{
|
|
|
|
Endpoint: &envoy_endpoint_v3.Endpoint{
|
|
|
|
Address: &envoy_core_v3.Address{
|
|
|
|
Address: &envoy_core_v3.Address_SocketAddress{
|
|
|
|
SocketAddress: &envoy_core_v3.SocketAddress{
|
2020-06-03 21:28:45 +00:00
|
|
|
Address: addr,
|
2021-02-26 22:23:15 +00:00
|
|
|
PortSpecifier: &envoy_core_v3.SocketAddress_PortValue{
|
2020-06-03 21:28:45 +00:00
|
|
|
PortValue: uint32(port),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
HealthStatus: health,
|
|
|
|
LoadBalancingWeight: makeUint32Value(weight),
|
|
|
|
}
|
|
|
|
}
|
2020-09-02 21:13:50 +00:00
|
|
|
|
2021-02-26 22:23:15 +00:00
|
|
|
func injectLBToCluster(ec *structs.LoadBalancer, c *envoy_cluster_v3.Cluster) error {
|
2020-09-02 21:13:50 +00:00
|
|
|
if ec == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
switch ec.Policy {
|
|
|
|
case "":
|
|
|
|
return nil
|
|
|
|
case structs.LBPolicyLeastRequest:
|
2021-02-26 22:23:15 +00:00
|
|
|
c.LbPolicy = envoy_cluster_v3.Cluster_LEAST_REQUEST
|
2020-09-02 21:13:50 +00:00
|
|
|
|
|
|
|
if ec.LeastRequestConfig != nil {
|
2021-02-26 22:23:15 +00:00
|
|
|
c.LbConfig = &envoy_cluster_v3.Cluster_LeastRequestLbConfig_{
|
|
|
|
LeastRequestLbConfig: &envoy_cluster_v3.Cluster_LeastRequestLbConfig{
|
2020-09-02 21:13:50 +00:00
|
|
|
ChoiceCount: &wrappers.UInt32Value{Value: ec.LeastRequestConfig.ChoiceCount},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
case structs.LBPolicyRoundRobin:
|
2021-02-26 22:23:15 +00:00
|
|
|
c.LbPolicy = envoy_cluster_v3.Cluster_ROUND_ROBIN
|
2020-09-02 21:13:50 +00:00
|
|
|
|
|
|
|
case structs.LBPolicyRandom:
|
2021-02-26 22:23:15 +00:00
|
|
|
c.LbPolicy = envoy_cluster_v3.Cluster_RANDOM
|
2020-09-02 21:13:50 +00:00
|
|
|
|
|
|
|
case structs.LBPolicyRingHash:
|
2021-02-26 22:23:15 +00:00
|
|
|
c.LbPolicy = envoy_cluster_v3.Cluster_RING_HASH
|
2020-09-02 21:13:50 +00:00
|
|
|
|
|
|
|
if ec.RingHashConfig != nil {
|
2021-02-26 22:23:15 +00:00
|
|
|
c.LbConfig = &envoy_cluster_v3.Cluster_RingHashLbConfig_{
|
|
|
|
RingHashLbConfig: &envoy_cluster_v3.Cluster_RingHashLbConfig{
|
2020-09-02 21:13:50 +00:00
|
|
|
MinimumRingSize: &wrappers.UInt64Value{Value: ec.RingHashConfig.MinimumRingSize},
|
|
|
|
MaximumRingSize: &wrappers.UInt64Value{Value: ec.RingHashConfig.MaximumRingSize},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
case structs.LBPolicyMaglev:
|
2021-02-26 22:23:15 +00:00
|
|
|
c.LbPolicy = envoy_cluster_v3.Cluster_MAGLEV
|
2020-09-02 21:13:50 +00:00
|
|
|
|
|
|
|
default:
|
|
|
|
return fmt.Errorf("unsupported load balancer policy %q for cluster %q", ec.Policy, c.Name)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2022-03-30 16:51:56 +00:00
|
|
|
|
2022-03-30 18:27:49 +00:00
|
|
|
func (s *ResourceGenerator) setHttp2ProtocolOptions(c *envoy_cluster_v3.Cluster) error {
|
|
|
|
cfg := &envoy_upstreams_v3.HttpProtocolOptions{
|
2022-03-30 16:51:56 +00:00
|
|
|
UpstreamProtocolOptions: &envoy_upstreams_v3.HttpProtocolOptions_ExplicitHttpConfig_{
|
|
|
|
ExplicitHttpConfig: &envoy_upstreams_v3.HttpProtocolOptions_ExplicitHttpConfig{
|
|
|
|
ProtocolConfig: &envoy_upstreams_v3.HttpProtocolOptions_ExplicitHttpConfig_Http2ProtocolOptions{
|
|
|
|
Http2ProtocolOptions: &envoy_core_v3.Http2ProtocolOptions{},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
2022-05-25 01:44:54 +00:00
|
|
|
any, err := anypb.New(cfg)
|
2022-03-30 16:51:56 +00:00
|
|
|
if err != nil {
|
2022-03-30 18:27:49 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
c.TypedExtensionProtocolOptions = map[string]*anypb.Any{
|
|
|
|
"envoy.extensions.upstreams.http.v3.HttpProtocolOptions": any,
|
2022-03-30 16:51:56 +00:00
|
|
|
}
|
2022-03-30 18:27:49 +00:00
|
|
|
|
|
|
|
return nil
|
2022-03-30 16:51:56 +00:00
|
|
|
}
|