2023-03-28 18:39:22 +00:00
|
|
|
// Copyright (c) HashiCorp, Inc.
|
2023-08-11 13:12:13 +00:00
|
|
|
// SPDX-License-Identifier: BUSL-1.1
|
2023-03-28 18:39:22 +00:00
|
|
|
|
2020-12-24 19:11:13 +00:00
|
|
|
package proxycfg
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2023-05-09 18:20:26 +00:00
|
|
|
"crypto/sha1"
|
|
|
|
"encoding/base64"
|
2020-12-24 19:11:13 +00:00
|
|
|
"fmt"
|
2023-03-10 20:52:54 +00:00
|
|
|
"path"
|
2020-12-24 19:11:13 +00:00
|
|
|
"strings"
|
|
|
|
|
2023-06-13 15:54:45 +00:00
|
|
|
"github.com/mitchellh/mapstructure"
|
|
|
|
|
2023-03-10 20:52:54 +00:00
|
|
|
"github.com/hashicorp/consul/acl"
|
2020-12-24 19:11:13 +00:00
|
|
|
cachetype "github.com/hashicorp/consul/agent/cache-types"
|
2023-06-13 15:54:45 +00:00
|
|
|
"github.com/hashicorp/consul/agent/leafcert"
|
2022-07-13 16:14:57 +00:00
|
|
|
"github.com/hashicorp/consul/agent/proxycfg/internal/watch"
|
2020-12-24 19:11:13 +00:00
|
|
|
"github.com/hashicorp/consul/agent/structs"
|
2023-03-10 20:52:54 +00:00
|
|
|
"github.com/hashicorp/consul/api"
|
2023-02-17 21:14:46 +00:00
|
|
|
"github.com/hashicorp/consul/proto/private/pbpeering"
|
2020-12-24 19:11:13 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
type handlerConnectProxy struct {
|
|
|
|
handlerState
|
|
|
|
}
|
|
|
|
|
|
|
|
// initialize sets up the watches needed based on current proxy registration
|
|
|
|
// state.
|
|
|
|
func (s *handlerConnectProxy) initialize(ctx context.Context) (ConfigSnapshot, error) {
|
|
|
|
snap := newConfigSnapshotFromServiceInstance(s.serviceInstance, s.stateConfig)
|
2022-01-20 16:12:04 +00:00
|
|
|
snap.ConnectProxy.DiscoveryChain = make(map[UpstreamID]*structs.CompiledDiscoveryChain)
|
|
|
|
snap.ConnectProxy.WatchedDiscoveryChains = make(map[UpstreamID]context.CancelFunc)
|
|
|
|
snap.ConnectProxy.WatchedUpstreams = make(map[UpstreamID]map[string]context.CancelFunc)
|
|
|
|
snap.ConnectProxy.WatchedUpstreamEndpoints = make(map[UpstreamID]map[string]structs.CheckServiceNodes)
|
2022-07-13 16:14:57 +00:00
|
|
|
snap.ConnectProxy.UpstreamPeerTrustBundles = watch.NewMap[string, *pbpeering.PeeringTrustBundle]()
|
2022-01-20 16:12:04 +00:00
|
|
|
snap.ConnectProxy.WatchedGateways = make(map[UpstreamID]map[string]context.CancelFunc)
|
|
|
|
snap.ConnectProxy.WatchedGatewayEndpoints = make(map[UpstreamID]map[string]structs.CheckServiceNodes)
|
2022-09-27 13:49:28 +00:00
|
|
|
snap.ConnectProxy.WatchedLocalGWEndpoints = watch.NewMap[string, structs.CheckServiceNodes]()
|
2020-12-24 19:11:13 +00:00
|
|
|
snap.ConnectProxy.WatchedServiceChecks = make(map[structs.ServiceID][]structs.CheckType)
|
2022-01-20 16:12:04 +00:00
|
|
|
snap.ConnectProxy.PreparedQueryEndpoints = make(map[UpstreamID]structs.CheckServiceNodes)
|
2022-07-14 18:45:51 +00:00
|
|
|
snap.ConnectProxy.DestinationsUpstream = watch.NewMap[UpstreamID, *structs.ServiceConfigEntry]()
|
2022-01-20 16:12:04 +00:00
|
|
|
snap.ConnectProxy.UpstreamConfig = make(map[UpstreamID]*structs.Upstream)
|
2022-01-28 03:52:26 +00:00
|
|
|
snap.ConnectProxy.PassthroughUpstreams = make(map[UpstreamID]map[string]map[string]struct{})
|
2022-01-28 06:49:06 +00:00
|
|
|
snap.ConnectProxy.PassthroughIndices = make(map[string]indexedTarget)
|
2022-07-13 16:14:57 +00:00
|
|
|
snap.ConnectProxy.PeerUpstreamEndpoints = watch.NewMap[UpstreamID, structs.CheckServiceNodes]()
|
2022-07-14 18:45:51 +00:00
|
|
|
snap.ConnectProxy.DestinationGateways = watch.NewMap[UpstreamID, structs.CheckServiceNodes]()
|
2022-06-10 21:11:40 +00:00
|
|
|
snap.ConnectProxy.PeerUpstreamEndpointsUseHostnames = make(map[UpstreamID]struct{})
|
2020-12-24 19:11:13 +00:00
|
|
|
|
|
|
|
// Watch for root changes
|
2022-06-01 15:18:06 +00:00
|
|
|
err := s.dataSources.CARoots.Notify(ctx, &structs.DCSpecificRequest{
|
2020-12-24 19:11:13 +00:00
|
|
|
Datacenter: s.source.Datacenter,
|
|
|
|
QueryOptions: structs.QueryOptions{Token: s.token},
|
|
|
|
Source: *s.source,
|
|
|
|
}, rootsWatchID, s.ch)
|
|
|
|
if err != nil {
|
|
|
|
return snap, err
|
|
|
|
}
|
|
|
|
|
2022-07-12 23:18:05 +00:00
|
|
|
err = s.dataSources.TrustBundleList.Notify(ctx, &cachetype.TrustBundleListRequest{
|
|
|
|
Request: &pbpeering.TrustBundleListByServiceRequest{
|
|
|
|
ServiceName: s.proxyCfg.DestinationServiceName,
|
|
|
|
Namespace: s.proxyID.NamespaceOrDefault(),
|
|
|
|
Partition: s.proxyID.PartitionOrDefault(),
|
|
|
|
},
|
|
|
|
QueryOptions: structs.QueryOptions{Token: s.token},
|
2022-06-01 20:31:37 +00:00
|
|
|
}, peeringTrustBundlesWatchID, s.ch)
|
|
|
|
if err != nil {
|
|
|
|
return snap, err
|
|
|
|
}
|
|
|
|
|
2020-12-24 19:11:13 +00:00
|
|
|
// Watch the leaf cert
|
2023-06-13 15:54:45 +00:00
|
|
|
err = s.dataSources.LeafCertificate.Notify(ctx, &leafcert.ConnectCALeafRequest{
|
2020-12-24 19:11:13 +00:00
|
|
|
Datacenter: s.source.Datacenter,
|
|
|
|
Token: s.token,
|
|
|
|
Service: s.proxyCfg.DestinationServiceName,
|
|
|
|
EnterpriseMeta: s.proxyID.EnterpriseMeta,
|
|
|
|
}, leafWatchID, s.ch)
|
|
|
|
if err != nil {
|
|
|
|
return snap, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Watch for intention updates
|
2022-07-01 15:15:49 +00:00
|
|
|
err = s.dataSources.Intentions.Notify(ctx, &structs.ServiceSpecificRequest{
|
|
|
|
Datacenter: s.source.Datacenter,
|
|
|
|
QueryOptions: structs.QueryOptions{Token: s.token},
|
|
|
|
EnterpriseMeta: s.proxyID.EnterpriseMeta,
|
|
|
|
ServiceName: s.proxyCfg.DestinationServiceName,
|
2020-12-24 19:11:13 +00:00
|
|
|
}, intentionsWatchID, s.ch)
|
|
|
|
if err != nil {
|
|
|
|
return snap, err
|
|
|
|
}
|
|
|
|
|
2023-05-19 22:14:16 +00:00
|
|
|
// Watch for JWT provider updates.
|
|
|
|
// While we could optimize by only watching providers referenced by intentions,
|
|
|
|
// this should be okay because we expect few JWT providers and infrequent JWT
|
|
|
|
// provider updates.
|
|
|
|
err = s.dataSources.ConfigEntryList.Notify(ctx, &structs.ConfigEntryQuery{
|
|
|
|
Kind: structs.JWTProvider,
|
|
|
|
Datacenter: s.source.Datacenter,
|
|
|
|
QueryOptions: structs.QueryOptions{Token: s.token},
|
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInPartition(s.proxyID.PartitionOrDefault()),
|
|
|
|
}, jwtProviderID, s.ch)
|
|
|
|
if err != nil {
|
|
|
|
return snap, err
|
|
|
|
}
|
|
|
|
|
2022-03-30 18:43:59 +00:00
|
|
|
// Get information about the entire service mesh.
|
2022-06-01 15:18:06 +00:00
|
|
|
err = s.dataSources.ConfigEntry.Notify(ctx, &structs.ConfigEntryQuery{
|
2022-03-30 18:43:59 +00:00
|
|
|
Kind: structs.MeshConfig,
|
|
|
|
Name: structs.MeshConfigMesh,
|
|
|
|
Datacenter: s.source.Datacenter,
|
|
|
|
QueryOptions: structs.QueryOptions{Token: s.token},
|
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInPartition(s.proxyID.PartitionOrDefault()),
|
|
|
|
}, meshConfigEntryID, s.ch)
|
|
|
|
if err != nil {
|
|
|
|
return snap, err
|
|
|
|
}
|
|
|
|
|
2020-12-24 19:11:13 +00:00
|
|
|
// Watch for service check updates
|
2022-06-01 15:18:06 +00:00
|
|
|
err = s.dataSources.HTTPChecks.Notify(ctx, &cachetype.ServiceHTTPChecksRequest{
|
2020-12-24 19:11:13 +00:00
|
|
|
ServiceID: s.proxyCfg.DestinationServiceID,
|
2022-10-12 14:49:56 +00:00
|
|
|
NodeName: s.source.Node,
|
2020-12-24 19:11:13 +00:00
|
|
|
EnterpriseMeta: s.proxyID.EnterpriseMeta,
|
|
|
|
}, svcChecksWatchIDPrefix+structs.ServiceIDString(s.proxyCfg.DestinationServiceID, &s.proxyID.EnterpriseMeta), s.ch)
|
|
|
|
if err != nil {
|
|
|
|
return snap, err
|
|
|
|
}
|
|
|
|
|
2023-05-16 18:36:05 +00:00
|
|
|
if err := s.maybeInitializeTelemetryCollectorWatches(ctx, snap); err != nil {
|
|
|
|
return snap, fmt.Errorf("failed to initialize telemetry collector watches: %w", err)
|
2023-03-10 20:52:54 +00:00
|
|
|
}
|
|
|
|
|
2020-12-24 19:11:13 +00:00
|
|
|
if s.proxyCfg.Mode == structs.ProxyModeTransparent {
|
|
|
|
// When in transparent proxy we will infer upstreams from intentions with this source
|
2022-06-01 15:18:06 +00:00
|
|
|
err := s.dataSources.IntentionUpstreams.Notify(ctx, &structs.ServiceSpecificRequest{
|
2020-12-24 19:11:13 +00:00
|
|
|
Datacenter: s.source.Datacenter,
|
|
|
|
QueryOptions: structs.QueryOptions{Token: s.token},
|
|
|
|
ServiceName: s.proxyCfg.DestinationServiceName,
|
2021-08-19 20:09:42 +00:00
|
|
|
EnterpriseMeta: s.proxyID.EnterpriseMeta,
|
2020-12-24 19:11:13 +00:00
|
|
|
}, intentionUpstreamsID, s.ch)
|
|
|
|
if err != nil {
|
|
|
|
return snap, err
|
|
|
|
}
|
2022-07-13 16:14:57 +00:00
|
|
|
err = s.dataSources.PeeredUpstreams.Notify(ctx, &structs.PartitionSpecificRequest{
|
|
|
|
QueryOptions: structs.QueryOptions{Token: s.token},
|
|
|
|
Datacenter: s.source.Datacenter,
|
|
|
|
EnterpriseMeta: s.proxyID.EnterpriseMeta,
|
|
|
|
}, peeredUpstreamsID, s.ch)
|
|
|
|
if err != nil {
|
|
|
|
return snap, err
|
|
|
|
}
|
2022-07-14 18:45:51 +00:00
|
|
|
// We also infer upstreams from destinations (egress points)
|
|
|
|
err = s.dataSources.IntentionUpstreamsDestination.Notify(ctx, &structs.ServiceSpecificRequest{
|
|
|
|
Datacenter: s.source.Datacenter,
|
|
|
|
QueryOptions: structs.QueryOptions{Token: s.token},
|
|
|
|
ServiceName: s.proxyCfg.DestinationServiceName,
|
|
|
|
EnterpriseMeta: s.proxyID.EnterpriseMeta,
|
|
|
|
}, intentionUpstreamsDestinationID, s.ch)
|
|
|
|
if err != nil {
|
|
|
|
return snap, err
|
|
|
|
}
|
2020-12-24 19:11:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Watch for updates to service endpoints for all upstreams
|
|
|
|
for i := range s.proxyCfg.Upstreams {
|
|
|
|
u := s.proxyCfg.Upstreams[i]
|
|
|
|
|
2022-01-20 16:12:04 +00:00
|
|
|
uid := NewUpstreamID(&u)
|
|
|
|
|
2020-12-24 19:11:13 +00:00
|
|
|
// Store defaults keyed under wildcard so they can be applied to centrally configured upstreams
|
|
|
|
if u.DestinationName == structs.WildcardSpecifier {
|
2022-01-20 16:12:04 +00:00
|
|
|
snap.ConnectProxy.UpstreamConfig[uid] = &u
|
2020-12-24 19:11:13 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2022-04-07 21:58:21 +00:00
|
|
|
snap.ConnectProxy.UpstreamConfig[uid] = &u
|
2020-12-24 19:11:13 +00:00
|
|
|
// This can be true if the upstream is a synthetic entry populated from centralized upstream config.
|
|
|
|
// Watches should not be created for them.
|
|
|
|
if u.CentrallyConfigured {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
dc := s.source.Datacenter
|
|
|
|
if u.Datacenter != "" {
|
|
|
|
dc = u.Datacenter
|
|
|
|
}
|
|
|
|
|
2021-12-13 17:43:33 +00:00
|
|
|
// Default the partition and namespace to the namespace of this proxy service.
|
2021-09-07 20:29:32 +00:00
|
|
|
partition := s.proxyID.PartitionOrDefault()
|
|
|
|
if u.DestinationPartition != "" {
|
|
|
|
partition = u.DestinationPartition
|
|
|
|
}
|
2021-12-13 17:43:33 +00:00
|
|
|
ns := s.proxyID.NamespaceOrDefault()
|
|
|
|
if u.DestinationNamespace != "" {
|
|
|
|
ns = u.DestinationNamespace
|
|
|
|
}
|
2021-09-07 20:29:32 +00:00
|
|
|
|
2020-12-24 19:11:13 +00:00
|
|
|
cfg, err := parseReducedUpstreamConfig(u.Config)
|
|
|
|
if err != nil {
|
|
|
|
// Don't hard fail on a config typo, just warn. We'll fall back on
|
|
|
|
// the plain discovery chain if there is an error so it's safe to
|
|
|
|
// continue.
|
|
|
|
s.logger.Warn("failed to parse upstream config",
|
2022-01-20 16:12:04 +00:00
|
|
|
"upstream", uid.String(),
|
2020-12-24 19:11:13 +00:00
|
|
|
"error", err,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
switch u.DestinationType {
|
|
|
|
case structs.UpstreamDestTypePreparedQuery:
|
2022-09-27 13:49:28 +00:00
|
|
|
err := s.dataSources.PreparedQuery.Notify(ctx, &structs.PreparedQueryExecuteRequest{
|
2020-12-24 19:11:13 +00:00
|
|
|
Datacenter: dc,
|
|
|
|
QueryOptions: structs.QueryOptions{Token: s.token, MaxAge: defaultPreparedQueryPollInterval},
|
|
|
|
QueryIDOrName: u.DestinationName,
|
|
|
|
Connect: true,
|
|
|
|
Source: *s.source,
|
2022-01-20 16:12:04 +00:00
|
|
|
}, "upstream:"+uid.String(), s.ch)
|
2020-12-24 19:11:13 +00:00
|
|
|
if err != nil {
|
|
|
|
return snap, err
|
|
|
|
}
|
|
|
|
|
|
|
|
case structs.UpstreamDestTypeService:
|
|
|
|
fallthrough
|
|
|
|
|
2021-09-07 20:29:32 +00:00
|
|
|
case "":
|
2022-05-04 20:25:25 +00:00
|
|
|
if u.DestinationPeer != "" {
|
2023-01-18 19:43:53 +00:00
|
|
|
err := s.setupWatchesForPeeredUpstream(ctx, snap.ConnectProxy, NewUpstreamID(&u), dc)
|
2022-05-04 20:25:25 +00:00
|
|
|
if err != nil {
|
Fix mesh gateway configuration with proxy-defaults (#15186)
* Fix mesh gateway proxy-defaults not affecting upstreams.
* Clarify distinction with upstream settings
Top-level mesh gateway mode in proxy-defaults and service-defaults gets
merged into NodeService.Proxy.MeshGateway, and only gets merged with
the mode attached to an an upstream in proxycfg/xds.
* Fix mgw mode usage for peered upstreams
There were a couple issues with how mgw mode was being handled for
peered upstreams.
For starters, mesh gateway mode from proxy-defaults
and the top-level of service-defaults gets stored in
NodeService.Proxy.MeshGateway, but the upstream watch for peered data
was only considering the mesh gateway config attached in
NodeService.Proxy.Upstreams[i]. This means that applying a mesh gateway
mode via global proxy-defaults or service-defaults on the downstream
would not have an effect.
Separately, transparent proxy watches for peered upstreams didn't
consider mesh gateway mode at all.
This commit addresses the first issue by ensuring that we overlay the
upstream config for peered upstreams as we do for non-peered. The second
issue is addressed by re-using setupWatchesForPeeredUpstream when
handling transparent proxy updates.
Note that for transparent proxies we do not yet support mesh gateway
mode per upstream, so the NodeService.Proxy.MeshGateway mode is used.
* Fix upstream mesh gateway mode handling in xds
This commit ensures that when determining the mesh gateway mode for
peered upstreams we consider the NodeService.Proxy.MeshGateway config as
a baseline.
In absense of this change, setting a mesh gateway mode via
proxy-defaults or the top-level of service-defaults will not have an
effect for peered upstreams.
* Merge service/proxy defaults in cfg resolver
Previously the mesh gateway mode for connect proxies would be
merged at three points:
1. On servers, in ComputeResolvedServiceConfig.
2. On clients, in MergeServiceConfig.
3. On clients, in proxycfg/xds.
The first merge returns a ServiceConfigResponse where there is a
top-level MeshGateway config from proxy/service-defaults, along with
per-upstream config.
The second merge combines per-upstream config specified at the service
instance with per-upstream config specified centrally.
The third merge combines the NodeService.Proxy.MeshGateway
config containing proxy/service-defaults data with the per-upstream
mode. This third merge is easy to miss, which led to peered upstreams
not considering the mesh gateway mode from proxy-defaults.
This commit removes the third merge, and ensures that all mesh gateway
config is available at the upstream. This way proxycfg/xds do not need
to do additional overlays.
* Ensure that proxy-defaults is considered in wc
Upstream defaults become a synthetic Upstream definition under a
wildcard key "*". Now that proxycfg/xds expect Upstream definitions to
have the final MeshGateway values, this commit ensures that values from
proxy-defaults/service-defaults are the default for this synthetic
upstream.
* Add changelog.
Co-authored-by: freddygv <freddy@hashicorp.com>
2022-11-09 16:14:29 +00:00
|
|
|
return snap, fmt.Errorf("failed to setup watches for peered upstream %q: %w", uid.String(), err)
|
2022-05-04 20:25:25 +00:00
|
|
|
}
|
2022-05-27 02:18:47 +00:00
|
|
|
continue
|
2022-05-04 20:25:25 +00:00
|
|
|
}
|
|
|
|
|
2022-09-27 13:49:28 +00:00
|
|
|
err := s.dataSources.CompiledDiscoveryChain.Notify(ctx, &structs.DiscoveryChainRequest{
|
2020-12-24 19:11:13 +00:00
|
|
|
Datacenter: s.source.Datacenter,
|
|
|
|
QueryOptions: structs.QueryOptions{Token: s.token},
|
|
|
|
Name: u.DestinationName,
|
|
|
|
EvaluateInDatacenter: dc,
|
|
|
|
EvaluateInNamespace: ns,
|
2021-09-07 20:29:32 +00:00
|
|
|
EvaluateInPartition: partition,
|
Fix mesh gateway configuration with proxy-defaults (#15186)
* Fix mesh gateway proxy-defaults not affecting upstreams.
* Clarify distinction with upstream settings
Top-level mesh gateway mode in proxy-defaults and service-defaults gets
merged into NodeService.Proxy.MeshGateway, and only gets merged with
the mode attached to an an upstream in proxycfg/xds.
* Fix mgw mode usage for peered upstreams
There were a couple issues with how mgw mode was being handled for
peered upstreams.
For starters, mesh gateway mode from proxy-defaults
and the top-level of service-defaults gets stored in
NodeService.Proxy.MeshGateway, but the upstream watch for peered data
was only considering the mesh gateway config attached in
NodeService.Proxy.Upstreams[i]. This means that applying a mesh gateway
mode via global proxy-defaults or service-defaults on the downstream
would not have an effect.
Separately, transparent proxy watches for peered upstreams didn't
consider mesh gateway mode at all.
This commit addresses the first issue by ensuring that we overlay the
upstream config for peered upstreams as we do for non-peered. The second
issue is addressed by re-using setupWatchesForPeeredUpstream when
handling transparent proxy updates.
Note that for transparent proxies we do not yet support mesh gateway
mode per upstream, so the NodeService.Proxy.MeshGateway mode is used.
* Fix upstream mesh gateway mode handling in xds
This commit ensures that when determining the mesh gateway mode for
peered upstreams we consider the NodeService.Proxy.MeshGateway config as
a baseline.
In absense of this change, setting a mesh gateway mode via
proxy-defaults or the top-level of service-defaults will not have an
effect for peered upstreams.
* Merge service/proxy defaults in cfg resolver
Previously the mesh gateway mode for connect proxies would be
merged at three points:
1. On servers, in ComputeResolvedServiceConfig.
2. On clients, in MergeServiceConfig.
3. On clients, in proxycfg/xds.
The first merge returns a ServiceConfigResponse where there is a
top-level MeshGateway config from proxy/service-defaults, along with
per-upstream config.
The second merge combines per-upstream config specified at the service
instance with per-upstream config specified centrally.
The third merge combines the NodeService.Proxy.MeshGateway
config containing proxy/service-defaults data with the per-upstream
mode. This third merge is easy to miss, which led to peered upstreams
not considering the mesh gateway mode from proxy-defaults.
This commit removes the third merge, and ensures that all mesh gateway
config is available at the upstream. This way proxycfg/xds do not need
to do additional overlays.
* Ensure that proxy-defaults is considered in wc
Upstream defaults become a synthetic Upstream definition under a
wildcard key "*". Now that proxycfg/xds expect Upstream definitions to
have the final MeshGateway values, this commit ensures that values from
proxy-defaults/service-defaults are the default for this synthetic
upstream.
* Add changelog.
Co-authored-by: freddygv <freddy@hashicorp.com>
2022-11-09 16:14:29 +00:00
|
|
|
OverrideMeshGateway: u.MeshGateway,
|
2020-12-24 19:11:13 +00:00
|
|
|
OverrideProtocol: cfg.Protocol,
|
|
|
|
OverrideConnectTimeout: cfg.ConnectTimeout(),
|
2022-01-20 16:12:04 +00:00
|
|
|
}, "discovery-chain:"+uid.String(), s.ch)
|
2020-12-24 19:11:13 +00:00
|
|
|
if err != nil {
|
2022-01-20 16:12:04 +00:00
|
|
|
return snap, fmt.Errorf("failed to watch discovery chain for %s: %v", uid.String(), err)
|
2020-12-24 19:11:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
default:
|
|
|
|
return snap, fmt.Errorf("unknown upstream type: %q", u.DestinationType)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return snap, nil
|
|
|
|
}
|
|
|
|
|
2022-09-27 13:49:28 +00:00
|
|
|
func (s *handlerConnectProxy) setupWatchesForPeeredUpstream(
|
|
|
|
ctx context.Context,
|
|
|
|
snapConnectProxy configSnapshotConnectProxy,
|
Fix mesh gateway configuration with proxy-defaults (#15186)
* Fix mesh gateway proxy-defaults not affecting upstreams.
* Clarify distinction with upstream settings
Top-level mesh gateway mode in proxy-defaults and service-defaults gets
merged into NodeService.Proxy.MeshGateway, and only gets merged with
the mode attached to an an upstream in proxycfg/xds.
* Fix mgw mode usage for peered upstreams
There were a couple issues with how mgw mode was being handled for
peered upstreams.
For starters, mesh gateway mode from proxy-defaults
and the top-level of service-defaults gets stored in
NodeService.Proxy.MeshGateway, but the upstream watch for peered data
was only considering the mesh gateway config attached in
NodeService.Proxy.Upstreams[i]. This means that applying a mesh gateway
mode via global proxy-defaults or service-defaults on the downstream
would not have an effect.
Separately, transparent proxy watches for peered upstreams didn't
consider mesh gateway mode at all.
This commit addresses the first issue by ensuring that we overlay the
upstream config for peered upstreams as we do for non-peered. The second
issue is addressed by re-using setupWatchesForPeeredUpstream when
handling transparent proxy updates.
Note that for transparent proxies we do not yet support mesh gateway
mode per upstream, so the NodeService.Proxy.MeshGateway mode is used.
* Fix upstream mesh gateway mode handling in xds
This commit ensures that when determining the mesh gateway mode for
peered upstreams we consider the NodeService.Proxy.MeshGateway config as
a baseline.
In absense of this change, setting a mesh gateway mode via
proxy-defaults or the top-level of service-defaults will not have an
effect for peered upstreams.
* Merge service/proxy defaults in cfg resolver
Previously the mesh gateway mode for connect proxies would be
merged at three points:
1. On servers, in ComputeResolvedServiceConfig.
2. On clients, in MergeServiceConfig.
3. On clients, in proxycfg/xds.
The first merge returns a ServiceConfigResponse where there is a
top-level MeshGateway config from proxy/service-defaults, along with
per-upstream config.
The second merge combines per-upstream config specified at the service
instance with per-upstream config specified centrally.
The third merge combines the NodeService.Proxy.MeshGateway
config containing proxy/service-defaults data with the per-upstream
mode. This third merge is easy to miss, which led to peered upstreams
not considering the mesh gateway mode from proxy-defaults.
This commit removes the third merge, and ensures that all mesh gateway
config is available at the upstream. This way proxycfg/xds do not need
to do additional overlays.
* Ensure that proxy-defaults is considered in wc
Upstream defaults become a synthetic Upstream definition under a
wildcard key "*". Now that proxycfg/xds expect Upstream definitions to
have the final MeshGateway values, this commit ensures that values from
proxy-defaults/service-defaults are the default for this synthetic
upstream.
* Add changelog.
Co-authored-by: freddygv <freddy@hashicorp.com>
2022-11-09 16:14:29 +00:00
|
|
|
uid UpstreamID,
|
2022-09-27 13:49:28 +00:00
|
|
|
dc string,
|
|
|
|
) error {
|
|
|
|
s.logger.Trace("initializing watch of peered upstream", "upstream", uid)
|
|
|
|
|
|
|
|
// NOTE: An upstream that points to a peer by definition will
|
|
|
|
// only ever watch a single catalog query, so a map key of just
|
|
|
|
// "UID" is sufficient to cover the peer data watches here.
|
|
|
|
err := s.dataSources.Health.Notify(ctx, &structs.ServiceSpecificRequest{
|
|
|
|
PeerName: uid.Peer,
|
|
|
|
Datacenter: dc,
|
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
Token: s.token,
|
|
|
|
},
|
Fix mesh gateway configuration with proxy-defaults (#15186)
* Fix mesh gateway proxy-defaults not affecting upstreams.
* Clarify distinction with upstream settings
Top-level mesh gateway mode in proxy-defaults and service-defaults gets
merged into NodeService.Proxy.MeshGateway, and only gets merged with
the mode attached to an an upstream in proxycfg/xds.
* Fix mgw mode usage for peered upstreams
There were a couple issues with how mgw mode was being handled for
peered upstreams.
For starters, mesh gateway mode from proxy-defaults
and the top-level of service-defaults gets stored in
NodeService.Proxy.MeshGateway, but the upstream watch for peered data
was only considering the mesh gateway config attached in
NodeService.Proxy.Upstreams[i]. This means that applying a mesh gateway
mode via global proxy-defaults or service-defaults on the downstream
would not have an effect.
Separately, transparent proxy watches for peered upstreams didn't
consider mesh gateway mode at all.
This commit addresses the first issue by ensuring that we overlay the
upstream config for peered upstreams as we do for non-peered. The second
issue is addressed by re-using setupWatchesForPeeredUpstream when
handling transparent proxy updates.
Note that for transparent proxies we do not yet support mesh gateway
mode per upstream, so the NodeService.Proxy.MeshGateway mode is used.
* Fix upstream mesh gateway mode handling in xds
This commit ensures that when determining the mesh gateway mode for
peered upstreams we consider the NodeService.Proxy.MeshGateway config as
a baseline.
In absense of this change, setting a mesh gateway mode via
proxy-defaults or the top-level of service-defaults will not have an
effect for peered upstreams.
* Merge service/proxy defaults in cfg resolver
Previously the mesh gateway mode for connect proxies would be
merged at three points:
1. On servers, in ComputeResolvedServiceConfig.
2. On clients, in MergeServiceConfig.
3. On clients, in proxycfg/xds.
The first merge returns a ServiceConfigResponse where there is a
top-level MeshGateway config from proxy/service-defaults, along with
per-upstream config.
The second merge combines per-upstream config specified at the service
instance with per-upstream config specified centrally.
The third merge combines the NodeService.Proxy.MeshGateway
config containing proxy/service-defaults data with the per-upstream
mode. This third merge is easy to miss, which led to peered upstreams
not considering the mesh gateway mode from proxy-defaults.
This commit removes the third merge, and ensures that all mesh gateway
config is available at the upstream. This way proxycfg/xds do not need
to do additional overlays.
* Ensure that proxy-defaults is considered in wc
Upstream defaults become a synthetic Upstream definition under a
wildcard key "*". Now that proxycfg/xds expect Upstream definitions to
have the final MeshGateway values, this commit ensures that values from
proxy-defaults/service-defaults are the default for this synthetic
upstream.
* Add changelog.
Co-authored-by: freddygv <freddy@hashicorp.com>
2022-11-09 16:14:29 +00:00
|
|
|
ServiceName: uid.Name,
|
2022-09-27 13:49:28 +00:00
|
|
|
Connect: true,
|
|
|
|
Source: *s.source,
|
|
|
|
EnterpriseMeta: uid.EnterpriseMeta,
|
|
|
|
}, upstreamPeerWatchIDPrefix+uid.String(), s.ch)
|
|
|
|
if err != nil {
|
Fix mesh gateway configuration with proxy-defaults (#15186)
* Fix mesh gateway proxy-defaults not affecting upstreams.
* Clarify distinction with upstream settings
Top-level mesh gateway mode in proxy-defaults and service-defaults gets
merged into NodeService.Proxy.MeshGateway, and only gets merged with
the mode attached to an an upstream in proxycfg/xds.
* Fix mgw mode usage for peered upstreams
There were a couple issues with how mgw mode was being handled for
peered upstreams.
For starters, mesh gateway mode from proxy-defaults
and the top-level of service-defaults gets stored in
NodeService.Proxy.MeshGateway, but the upstream watch for peered data
was only considering the mesh gateway config attached in
NodeService.Proxy.Upstreams[i]. This means that applying a mesh gateway
mode via global proxy-defaults or service-defaults on the downstream
would not have an effect.
Separately, transparent proxy watches for peered upstreams didn't
consider mesh gateway mode at all.
This commit addresses the first issue by ensuring that we overlay the
upstream config for peered upstreams as we do for non-peered. The second
issue is addressed by re-using setupWatchesForPeeredUpstream when
handling transparent proxy updates.
Note that for transparent proxies we do not yet support mesh gateway
mode per upstream, so the NodeService.Proxy.MeshGateway mode is used.
* Fix upstream mesh gateway mode handling in xds
This commit ensures that when determining the mesh gateway mode for
peered upstreams we consider the NodeService.Proxy.MeshGateway config as
a baseline.
In absense of this change, setting a mesh gateway mode via
proxy-defaults or the top-level of service-defaults will not have an
effect for peered upstreams.
* Merge service/proxy defaults in cfg resolver
Previously the mesh gateway mode for connect proxies would be
merged at three points:
1. On servers, in ComputeResolvedServiceConfig.
2. On clients, in MergeServiceConfig.
3. On clients, in proxycfg/xds.
The first merge returns a ServiceConfigResponse where there is a
top-level MeshGateway config from proxy/service-defaults, along with
per-upstream config.
The second merge combines per-upstream config specified at the service
instance with per-upstream config specified centrally.
The third merge combines the NodeService.Proxy.MeshGateway
config containing proxy/service-defaults data with the per-upstream
mode. This third merge is easy to miss, which led to peered upstreams
not considering the mesh gateway mode from proxy-defaults.
This commit removes the third merge, and ensures that all mesh gateway
config is available at the upstream. This way proxycfg/xds do not need
to do additional overlays.
* Ensure that proxy-defaults is considered in wc
Upstream defaults become a synthetic Upstream definition under a
wildcard key "*". Now that proxycfg/xds expect Upstream definitions to
have the final MeshGateway values, this commit ensures that values from
proxy-defaults/service-defaults are the default for this synthetic
upstream.
* Add changelog.
Co-authored-by: freddygv <freddy@hashicorp.com>
2022-11-09 16:14:29 +00:00
|
|
|
return fmt.Errorf("failed to watch health for %s: %v", uid, err)
|
2022-09-27 13:49:28 +00:00
|
|
|
}
|
Fix mesh gateway configuration with proxy-defaults (#15186)
* Fix mesh gateway proxy-defaults not affecting upstreams.
* Clarify distinction with upstream settings
Top-level mesh gateway mode in proxy-defaults and service-defaults gets
merged into NodeService.Proxy.MeshGateway, and only gets merged with
the mode attached to an an upstream in proxycfg/xds.
* Fix mgw mode usage for peered upstreams
There were a couple issues with how mgw mode was being handled for
peered upstreams.
For starters, mesh gateway mode from proxy-defaults
and the top-level of service-defaults gets stored in
NodeService.Proxy.MeshGateway, but the upstream watch for peered data
was only considering the mesh gateway config attached in
NodeService.Proxy.Upstreams[i]. This means that applying a mesh gateway
mode via global proxy-defaults or service-defaults on the downstream
would not have an effect.
Separately, transparent proxy watches for peered upstreams didn't
consider mesh gateway mode at all.
This commit addresses the first issue by ensuring that we overlay the
upstream config for peered upstreams as we do for non-peered. The second
issue is addressed by re-using setupWatchesForPeeredUpstream when
handling transparent proxy updates.
Note that for transparent proxies we do not yet support mesh gateway
mode per upstream, so the NodeService.Proxy.MeshGateway mode is used.
* Fix upstream mesh gateway mode handling in xds
This commit ensures that when determining the mesh gateway mode for
peered upstreams we consider the NodeService.Proxy.MeshGateway config as
a baseline.
In absense of this change, setting a mesh gateway mode via
proxy-defaults or the top-level of service-defaults will not have an
effect for peered upstreams.
* Merge service/proxy defaults in cfg resolver
Previously the mesh gateway mode for connect proxies would be
merged at three points:
1. On servers, in ComputeResolvedServiceConfig.
2. On clients, in MergeServiceConfig.
3. On clients, in proxycfg/xds.
The first merge returns a ServiceConfigResponse where there is a
top-level MeshGateway config from proxy/service-defaults, along with
per-upstream config.
The second merge combines per-upstream config specified at the service
instance with per-upstream config specified centrally.
The third merge combines the NodeService.Proxy.MeshGateway
config containing proxy/service-defaults data with the per-upstream
mode. This third merge is easy to miss, which led to peered upstreams
not considering the mesh gateway mode from proxy-defaults.
This commit removes the third merge, and ensures that all mesh gateway
config is available at the upstream. This way proxycfg/xds do not need
to do additional overlays.
* Ensure that proxy-defaults is considered in wc
Upstream defaults become a synthetic Upstream definition under a
wildcard key "*". Now that proxycfg/xds expect Upstream definitions to
have the final MeshGateway values, this commit ensures that values from
proxy-defaults/service-defaults are the default for this synthetic
upstream.
* Add changelog.
Co-authored-by: freddygv <freddy@hashicorp.com>
2022-11-09 16:14:29 +00:00
|
|
|
snapConnectProxy.PeerUpstreamEndpoints.InitWatch(uid, nil)
|
2022-09-27 13:49:28 +00:00
|
|
|
|
|
|
|
// Check whether a watch for this peer exists to avoid duplicates.
|
|
|
|
if ok := snapConnectProxy.UpstreamPeerTrustBundles.IsWatched(uid.Peer); !ok {
|
|
|
|
peerCtx, cancel := context.WithCancel(ctx)
|
|
|
|
if err := s.dataSources.TrustBundle.Notify(peerCtx, &cachetype.TrustBundleReadRequest{
|
|
|
|
Request: &pbpeering.TrustBundleReadRequest{
|
|
|
|
Name: uid.Peer,
|
|
|
|
Partition: uid.PartitionOrDefault(),
|
|
|
|
},
|
|
|
|
QueryOptions: structs.QueryOptions{Token: s.token},
|
|
|
|
}, peerTrustBundleIDPrefix+uid.Peer, s.ch); err != nil {
|
|
|
|
cancel()
|
|
|
|
return fmt.Errorf("error while watching trust bundle for peer %q: %w", uid.Peer, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
snapConnectProxy.UpstreamPeerTrustBundles.InitWatch(uid.Peer, cancel)
|
|
|
|
}
|
|
|
|
|
2023-01-18 19:43:53 +00:00
|
|
|
// Always watch local GW endpoints for peer upstreams so that we don't have to worry about
|
|
|
|
// the timing on whether the wildcard upstream config was fetched yet.
|
|
|
|
up := &handlerUpstreams{handlerState: s.handlerState}
|
|
|
|
up.setupWatchForLocalGWEndpoints(ctx, &snapConnectProxy.ConfigSnapshotUpstreams)
|
2022-09-27 13:49:28 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-05-20 14:47:40 +00:00
|
|
|
func (s *handlerConnectProxy) handleUpdate(ctx context.Context, u UpdateEvent, snap *ConfigSnapshot) error {
|
2020-12-24 19:11:13 +00:00
|
|
|
if u.Err != nil {
|
|
|
|
return fmt.Errorf("error filling agent cache: %v", u.Err)
|
|
|
|
}
|
|
|
|
|
|
|
|
switch {
|
|
|
|
case u.CorrelationID == rootsWatchID:
|
|
|
|
roots, ok := u.Result.(*structs.IndexedCARoots)
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("invalid type for response: %T", u.Result)
|
|
|
|
}
|
|
|
|
snap.Roots = roots
|
2022-06-01 21:53:52 +00:00
|
|
|
|
2022-06-01 20:31:37 +00:00
|
|
|
case u.CorrelationID == peeringTrustBundlesWatchID:
|
|
|
|
resp, ok := u.Result.(*pbpeering.TrustBundleListByServiceResponse)
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("invalid type for response: %T", u.Result)
|
|
|
|
}
|
|
|
|
if len(resp.Bundles) > 0 {
|
2022-06-21 02:47:14 +00:00
|
|
|
snap.ConnectProxy.InboundPeerTrustBundles = resp.Bundles
|
2022-06-01 20:31:37 +00:00
|
|
|
}
|
2022-06-21 02:47:14 +00:00
|
|
|
snap.ConnectProxy.InboundPeerTrustBundlesSet = true
|
2022-06-01 20:31:37 +00:00
|
|
|
|
2020-12-24 19:11:13 +00:00
|
|
|
case u.CorrelationID == intentionsWatchID:
|
2023-04-20 16:16:04 +00:00
|
|
|
resp, ok := u.Result.(structs.SimplifiedIntentions)
|
2020-12-24 19:11:13 +00:00
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("invalid type for response: %T", u.Result)
|
|
|
|
}
|
2022-07-01 15:15:49 +00:00
|
|
|
snap.ConnectProxy.Intentions = resp
|
2020-12-24 19:11:13 +00:00
|
|
|
snap.ConnectProxy.IntentionsSet = true
|
|
|
|
|
2023-05-19 22:14:16 +00:00
|
|
|
case u.CorrelationID == jwtProviderID:
|
|
|
|
resp, ok := u.Result.(*structs.IndexedConfigEntries)
|
|
|
|
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("invalid type for response: %T", u.Result)
|
|
|
|
}
|
|
|
|
|
|
|
|
providers := make(map[string]*structs.JWTProviderConfigEntry, len(resp.Entries))
|
|
|
|
for _, entry := range resp.Entries {
|
|
|
|
jwtEntry, ok := entry.(*structs.JWTProviderConfigEntry)
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("invalid type for response: %T", entry)
|
|
|
|
}
|
|
|
|
providers[jwtEntry.Name] = jwtEntry
|
|
|
|
}
|
|
|
|
|
|
|
|
snap.JWTProviders = providers
|
2022-07-13 16:14:57 +00:00
|
|
|
case u.CorrelationID == peeredUpstreamsID:
|
|
|
|
resp, ok := u.Result.(*structs.IndexedPeeredServiceList)
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("invalid type for response %T", u.Result)
|
|
|
|
}
|
|
|
|
|
|
|
|
seenUpstreams := make(map[UpstreamID]struct{})
|
|
|
|
for _, psn := range resp.Services {
|
|
|
|
uid := NewUpstreamIDFromPeeredServiceName(psn)
|
|
|
|
|
|
|
|
if _, ok := seenUpstreams[uid]; ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
seenUpstreams[uid] = struct{}{}
|
|
|
|
|
2023-01-18 19:43:53 +00:00
|
|
|
err := s.setupWatchesForPeeredUpstream(ctx, snap.ConnectProxy, uid, s.source.Datacenter)
|
2022-07-13 16:14:57 +00:00
|
|
|
if err != nil {
|
Fix mesh gateway configuration with proxy-defaults (#15186)
* Fix mesh gateway proxy-defaults not affecting upstreams.
* Clarify distinction with upstream settings
Top-level mesh gateway mode in proxy-defaults and service-defaults gets
merged into NodeService.Proxy.MeshGateway, and only gets merged with
the mode attached to an an upstream in proxycfg/xds.
* Fix mgw mode usage for peered upstreams
There were a couple issues with how mgw mode was being handled for
peered upstreams.
For starters, mesh gateway mode from proxy-defaults
and the top-level of service-defaults gets stored in
NodeService.Proxy.MeshGateway, but the upstream watch for peered data
was only considering the mesh gateway config attached in
NodeService.Proxy.Upstreams[i]. This means that applying a mesh gateway
mode via global proxy-defaults or service-defaults on the downstream
would not have an effect.
Separately, transparent proxy watches for peered upstreams didn't
consider mesh gateway mode at all.
This commit addresses the first issue by ensuring that we overlay the
upstream config for peered upstreams as we do for non-peered. The second
issue is addressed by re-using setupWatchesForPeeredUpstream when
handling transparent proxy updates.
Note that for transparent proxies we do not yet support mesh gateway
mode per upstream, so the NodeService.Proxy.MeshGateway mode is used.
* Fix upstream mesh gateway mode handling in xds
This commit ensures that when determining the mesh gateway mode for
peered upstreams we consider the NodeService.Proxy.MeshGateway config as
a baseline.
In absense of this change, setting a mesh gateway mode via
proxy-defaults or the top-level of service-defaults will not have an
effect for peered upstreams.
* Merge service/proxy defaults in cfg resolver
Previously the mesh gateway mode for connect proxies would be
merged at three points:
1. On servers, in ComputeResolvedServiceConfig.
2. On clients, in MergeServiceConfig.
3. On clients, in proxycfg/xds.
The first merge returns a ServiceConfigResponse where there is a
top-level MeshGateway config from proxy/service-defaults, along with
per-upstream config.
The second merge combines per-upstream config specified at the service
instance with per-upstream config specified centrally.
The third merge combines the NodeService.Proxy.MeshGateway
config containing proxy/service-defaults data with the per-upstream
mode. This third merge is easy to miss, which led to peered upstreams
not considering the mesh gateway mode from proxy-defaults.
This commit removes the third merge, and ensures that all mesh gateway
config is available at the upstream. This way proxycfg/xds do not need
to do additional overlays.
* Ensure that proxy-defaults is considered in wc
Upstream defaults become a synthetic Upstream definition under a
wildcard key "*". Now that proxycfg/xds expect Upstream definitions to
have the final MeshGateway values, this commit ensures that values from
proxy-defaults/service-defaults are the default for this synthetic
upstream.
* Add changelog.
Co-authored-by: freddygv <freddy@hashicorp.com>
2022-11-09 16:14:29 +00:00
|
|
|
return fmt.Errorf("failed to setup watches for peered upstream %q: %w", uid.String(), err)
|
2022-07-13 16:14:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
snap.ConnectProxy.PeeredUpstreams = seenUpstreams
|
|
|
|
|
|
|
|
//
|
|
|
|
// Clean up data
|
|
|
|
//
|
|
|
|
|
2022-08-30 15:46:34 +00:00
|
|
|
peeredChainTargets := make(map[UpstreamID]struct{})
|
|
|
|
for _, discoChain := range snap.ConnectProxy.DiscoveryChain {
|
|
|
|
for _, target := range discoChain.Targets {
|
|
|
|
if target.Peer == "" {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
uid := NewUpstreamIDFromTargetID(target.ID)
|
|
|
|
peeredChainTargets[uid] = struct{}{}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-07-13 16:14:57 +00:00
|
|
|
validPeerNames := make(map[string]struct{})
|
|
|
|
|
|
|
|
// Iterate through all known endpoints and remove references to upstream IDs that weren't in the update
|
|
|
|
snap.ConnectProxy.PeerUpstreamEndpoints.ForEachKey(func(uid UpstreamID) bool {
|
|
|
|
// Peered upstream is explicitly defined in upstream config
|
|
|
|
if _, ok := snap.ConnectProxy.UpstreamConfig[uid]; ok {
|
|
|
|
validPeerNames[uid.Peer] = struct{}{}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
// Peered upstream came from dynamic source of imported services
|
|
|
|
if _, ok := seenUpstreams[uid]; ok {
|
|
|
|
validPeerNames[uid.Peer] = struct{}{}
|
|
|
|
return true
|
|
|
|
}
|
2022-08-30 15:46:34 +00:00
|
|
|
// Peered upstream came from a discovery chain target
|
|
|
|
if _, ok := peeredChainTargets[uid]; ok {
|
|
|
|
validPeerNames[uid.Peer] = struct{}{}
|
|
|
|
return true
|
|
|
|
}
|
2022-07-13 16:14:57 +00:00
|
|
|
snap.ConnectProxy.PeerUpstreamEndpoints.CancelWatch(uid)
|
|
|
|
return true
|
|
|
|
})
|
|
|
|
|
|
|
|
// Iterate through all known trust bundles and remove references to any unseen peer names
|
|
|
|
snap.ConnectProxy.UpstreamPeerTrustBundles.ForEachKey(func(peerName PeerName) bool {
|
|
|
|
if _, ok := validPeerNames[peerName]; !ok {
|
|
|
|
snap.ConnectProxy.UpstreamPeerTrustBundles.CancelWatch(peerName)
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
})
|
|
|
|
|
2020-12-24 19:11:13 +00:00
|
|
|
case u.CorrelationID == intentionUpstreamsID:
|
|
|
|
resp, ok := u.Result.(*structs.IndexedServiceList)
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("invalid type for response %T", u.Result)
|
|
|
|
}
|
|
|
|
|
2022-01-20 16:12:04 +00:00
|
|
|
seenUpstreams := make(map[UpstreamID]struct{})
|
2020-12-24 19:11:13 +00:00
|
|
|
for _, svc := range resp.Services {
|
2022-01-20 16:12:04 +00:00
|
|
|
uid := NewUpstreamIDFromServiceName(svc)
|
|
|
|
|
|
|
|
seenUpstreams[uid] = struct{}{}
|
2020-12-24 19:11:13 +00:00
|
|
|
|
|
|
|
cfgMap := make(map[string]interface{})
|
2022-01-20 16:12:04 +00:00
|
|
|
u, ok := snap.ConnectProxy.UpstreamConfig[uid]
|
2020-12-24 19:11:13 +00:00
|
|
|
if ok {
|
|
|
|
cfgMap = u.Config
|
|
|
|
} else {
|
|
|
|
// Use the centralized upstream defaults if they exist and there isn't specific configuration for this upstream
|
|
|
|
// This is only relevant to upstreams from intentions because for explicit upstreams the defaulting is handled
|
|
|
|
// by the ResolveServiceConfig endpoint.
|
2023-01-18 19:43:53 +00:00
|
|
|
wildcardUID := NewWildcardUID(&s.proxyID.EnterpriseMeta)
|
2022-01-20 16:12:04 +00:00
|
|
|
defaults, ok := snap.ConnectProxy.UpstreamConfig[wildcardUID]
|
2020-12-24 19:11:13 +00:00
|
|
|
if ok {
|
|
|
|
u = defaults
|
|
|
|
cfgMap = defaults.Config
|
2022-01-20 16:12:04 +00:00
|
|
|
snap.ConnectProxy.UpstreamConfig[uid] = defaults
|
2020-12-24 19:11:13 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
cfg, err := parseReducedUpstreamConfig(cfgMap)
|
|
|
|
if err != nil {
|
|
|
|
// Don't hard fail on a config typo, just warn. We'll fall back on
|
|
|
|
// the plain discovery chain if there is an error so it's safe to
|
|
|
|
// continue.
|
|
|
|
s.logger.Warn("failed to parse upstream config",
|
2022-01-20 16:12:04 +00:00
|
|
|
"upstream", uid,
|
2020-12-24 19:11:13 +00:00
|
|
|
"error", err,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
meshGateway := s.proxyCfg.MeshGateway
|
|
|
|
if u != nil {
|
Fix mesh gateway configuration with proxy-defaults (#15186)
* Fix mesh gateway proxy-defaults not affecting upstreams.
* Clarify distinction with upstream settings
Top-level mesh gateway mode in proxy-defaults and service-defaults gets
merged into NodeService.Proxy.MeshGateway, and only gets merged with
the mode attached to an an upstream in proxycfg/xds.
* Fix mgw mode usage for peered upstreams
There were a couple issues with how mgw mode was being handled for
peered upstreams.
For starters, mesh gateway mode from proxy-defaults
and the top-level of service-defaults gets stored in
NodeService.Proxy.MeshGateway, but the upstream watch for peered data
was only considering the mesh gateway config attached in
NodeService.Proxy.Upstreams[i]. This means that applying a mesh gateway
mode via global proxy-defaults or service-defaults on the downstream
would not have an effect.
Separately, transparent proxy watches for peered upstreams didn't
consider mesh gateway mode at all.
This commit addresses the first issue by ensuring that we overlay the
upstream config for peered upstreams as we do for non-peered. The second
issue is addressed by re-using setupWatchesForPeeredUpstream when
handling transparent proxy updates.
Note that for transparent proxies we do not yet support mesh gateway
mode per upstream, so the NodeService.Proxy.MeshGateway mode is used.
* Fix upstream mesh gateway mode handling in xds
This commit ensures that when determining the mesh gateway mode for
peered upstreams we consider the NodeService.Proxy.MeshGateway config as
a baseline.
In absense of this change, setting a mesh gateway mode via
proxy-defaults or the top-level of service-defaults will not have an
effect for peered upstreams.
* Merge service/proxy defaults in cfg resolver
Previously the mesh gateway mode for connect proxies would be
merged at three points:
1. On servers, in ComputeResolvedServiceConfig.
2. On clients, in MergeServiceConfig.
3. On clients, in proxycfg/xds.
The first merge returns a ServiceConfigResponse where there is a
top-level MeshGateway config from proxy/service-defaults, along with
per-upstream config.
The second merge combines per-upstream config specified at the service
instance with per-upstream config specified centrally.
The third merge combines the NodeService.Proxy.MeshGateway
config containing proxy/service-defaults data with the per-upstream
mode. This third merge is easy to miss, which led to peered upstreams
not considering the mesh gateway mode from proxy-defaults.
This commit removes the third merge, and ensures that all mesh gateway
config is available at the upstream. This way proxycfg/xds do not need
to do additional overlays.
* Ensure that proxy-defaults is considered in wc
Upstream defaults become a synthetic Upstream definition under a
wildcard key "*". Now that proxycfg/xds expect Upstream definitions to
have the final MeshGateway values, this commit ensures that values from
proxy-defaults/service-defaults are the default for this synthetic
upstream.
* Add changelog.
Co-authored-by: freddygv <freddy@hashicorp.com>
2022-11-09 16:14:29 +00:00
|
|
|
meshGateway = u.MeshGateway
|
2020-12-24 19:11:13 +00:00
|
|
|
}
|
|
|
|
watchOpts := discoveryChainWatchOpts{
|
2022-01-20 16:12:04 +00:00
|
|
|
id: NewUpstreamIDFromServiceName(svc),
|
2020-12-24 19:11:13 +00:00
|
|
|
name: svc.Name,
|
|
|
|
namespace: svc.NamespaceOrDefault(),
|
2021-09-16 19:31:19 +00:00
|
|
|
partition: svc.PartitionOrDefault(),
|
2020-12-24 19:11:13 +00:00
|
|
|
datacenter: s.source.Datacenter,
|
|
|
|
cfg: cfg,
|
|
|
|
meshGateway: meshGateway,
|
|
|
|
}
|
|
|
|
up := &handlerUpstreams{handlerState: s.handlerState}
|
|
|
|
err = up.watchDiscoveryChain(ctx, snap, watchOpts)
|
|
|
|
if err != nil {
|
2022-01-20 16:12:04 +00:00
|
|
|
return fmt.Errorf("failed to watch discovery chain for %s: %v", uid, err)
|
2020-12-24 19:11:13 +00:00
|
|
|
}
|
|
|
|
}
|
2022-01-20 16:12:04 +00:00
|
|
|
snap.ConnectProxy.IntentionUpstreams = seenUpstreams
|
2020-12-24 19:11:13 +00:00
|
|
|
|
|
|
|
// Clean up data from services that were not in the update
|
2022-01-20 16:12:04 +00:00
|
|
|
for uid, targets := range snap.ConnectProxy.WatchedUpstreams {
|
2022-10-05 19:38:25 +00:00
|
|
|
if upstream, ok := snap.ConnectProxy.UpstreamConfig[uid]; ok && !upstream.CentrallyConfigured {
|
2020-12-24 19:11:13 +00:00
|
|
|
continue
|
|
|
|
}
|
2022-01-20 16:12:04 +00:00
|
|
|
if _, ok := seenUpstreams[uid]; !ok {
|
2022-08-30 15:46:34 +00:00
|
|
|
for targetID, cancelFn := range targets {
|
2021-12-13 21:56:17 +00:00
|
|
|
cancelFn()
|
2022-08-30 15:46:34 +00:00
|
|
|
|
|
|
|
targetUID := NewUpstreamIDFromTargetID(targetID)
|
|
|
|
if targetUID.Peer != "" {
|
|
|
|
snap.ConnectProxy.PeerUpstreamEndpoints.CancelWatch(targetUID)
|
|
|
|
snap.ConnectProxy.UpstreamPeerTrustBundles.CancelWatch(targetUID.Peer)
|
|
|
|
}
|
2021-12-13 21:56:17 +00:00
|
|
|
}
|
2022-01-20 16:12:04 +00:00
|
|
|
delete(snap.ConnectProxy.WatchedUpstreams, uid)
|
2020-12-24 19:11:13 +00:00
|
|
|
}
|
|
|
|
}
|
2022-01-20 16:12:04 +00:00
|
|
|
for uid := range snap.ConnectProxy.WatchedUpstreamEndpoints {
|
2022-10-05 19:38:25 +00:00
|
|
|
if upstream, ok := snap.ConnectProxy.UpstreamConfig[uid]; ok && !upstream.CentrallyConfigured {
|
2020-12-24 19:11:13 +00:00
|
|
|
continue
|
|
|
|
}
|
2022-01-20 16:12:04 +00:00
|
|
|
if _, ok := seenUpstreams[uid]; !ok {
|
|
|
|
delete(snap.ConnectProxy.WatchedUpstreamEndpoints, uid)
|
2020-12-24 19:11:13 +00:00
|
|
|
}
|
|
|
|
}
|
2022-01-20 16:12:04 +00:00
|
|
|
for uid, cancelMap := range snap.ConnectProxy.WatchedGateways {
|
2022-10-05 19:38:25 +00:00
|
|
|
if upstream, ok := snap.ConnectProxy.UpstreamConfig[uid]; ok && !upstream.CentrallyConfigured {
|
2020-12-24 19:11:13 +00:00
|
|
|
continue
|
|
|
|
}
|
2022-01-20 16:12:04 +00:00
|
|
|
if _, ok := seenUpstreams[uid]; !ok {
|
2021-12-13 21:56:17 +00:00
|
|
|
for _, cancelFn := range cancelMap {
|
|
|
|
cancelFn()
|
|
|
|
}
|
2022-01-20 16:12:04 +00:00
|
|
|
delete(snap.ConnectProxy.WatchedGateways, uid)
|
2020-12-24 19:11:13 +00:00
|
|
|
}
|
|
|
|
}
|
2022-01-20 16:12:04 +00:00
|
|
|
for uid := range snap.ConnectProxy.WatchedGatewayEndpoints {
|
2022-10-05 19:38:25 +00:00
|
|
|
if upstream, ok := snap.ConnectProxy.UpstreamConfig[uid]; ok && !upstream.CentrallyConfigured {
|
2020-12-24 19:11:13 +00:00
|
|
|
continue
|
|
|
|
}
|
2022-01-20 16:12:04 +00:00
|
|
|
if _, ok := seenUpstreams[uid]; !ok {
|
|
|
|
delete(snap.ConnectProxy.WatchedGatewayEndpoints, uid)
|
2020-12-24 19:11:13 +00:00
|
|
|
}
|
|
|
|
}
|
2022-01-20 16:12:04 +00:00
|
|
|
for uid, cancelFn := range snap.ConnectProxy.WatchedDiscoveryChains {
|
2022-10-05 19:38:25 +00:00
|
|
|
if upstream, ok := snap.ConnectProxy.UpstreamConfig[uid]; ok && !upstream.CentrallyConfigured {
|
2020-12-24 19:11:13 +00:00
|
|
|
continue
|
|
|
|
}
|
2022-01-20 16:12:04 +00:00
|
|
|
if _, ok := seenUpstreams[uid]; !ok {
|
2020-12-24 19:11:13 +00:00
|
|
|
cancelFn()
|
2022-01-20 16:12:04 +00:00
|
|
|
delete(snap.ConnectProxy.WatchedDiscoveryChains, uid)
|
2021-12-13 22:40:37 +00:00
|
|
|
}
|
|
|
|
}
|
2022-01-28 06:49:06 +00:00
|
|
|
for uid := range snap.ConnectProxy.PassthroughUpstreams {
|
2022-01-28 03:52:26 +00:00
|
|
|
if _, ok := seenUpstreams[uid]; !ok {
|
|
|
|
delete(snap.ConnectProxy.PassthroughUpstreams, uid)
|
|
|
|
}
|
|
|
|
}
|
2022-01-28 06:49:06 +00:00
|
|
|
for addr, indexed := range snap.ConnectProxy.PassthroughIndices {
|
|
|
|
if _, ok := seenUpstreams[indexed.upstreamID]; !ok {
|
|
|
|
delete(snap.ConnectProxy.PassthroughIndices, addr)
|
|
|
|
}
|
|
|
|
}
|
2022-01-28 03:52:26 +00:00
|
|
|
|
2021-12-13 22:40:37 +00:00
|
|
|
// These entries are intentionally handled separately from the WatchedDiscoveryChains above.
|
|
|
|
// There have been situations where a discovery watch was cancelled, then fired.
|
|
|
|
// That update event then re-populated the DiscoveryChain map entry, which wouldn't get cleaned up
|
|
|
|
// since there was no known watch for it.
|
2022-01-20 16:12:04 +00:00
|
|
|
for uid := range snap.ConnectProxy.DiscoveryChain {
|
2022-10-05 19:38:25 +00:00
|
|
|
if upstream, ok := snap.ConnectProxy.UpstreamConfig[uid]; ok && !upstream.CentrallyConfigured {
|
2021-12-13 22:40:37 +00:00
|
|
|
continue
|
|
|
|
}
|
2022-01-20 16:12:04 +00:00
|
|
|
if _, ok := seenUpstreams[uid]; !ok {
|
|
|
|
delete(snap.ConnectProxy.DiscoveryChain, uid)
|
2020-12-24 19:11:13 +00:00
|
|
|
}
|
|
|
|
}
|
2022-07-14 18:45:51 +00:00
|
|
|
case u.CorrelationID == intentionUpstreamsDestinationID:
|
|
|
|
resp, ok := u.Result.(*structs.IndexedServiceList)
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("invalid type for response %T", u.Result)
|
|
|
|
}
|
|
|
|
seenUpstreams := make(map[UpstreamID]struct{})
|
|
|
|
for _, svc := range resp.Services {
|
|
|
|
uid := NewUpstreamIDFromServiceName(svc)
|
|
|
|
seenUpstreams[uid] = struct{}{}
|
|
|
|
{
|
|
|
|
childCtx, cancel := context.WithCancel(ctx)
|
|
|
|
err := s.dataSources.ConfigEntry.Notify(childCtx, &structs.ConfigEntryQuery{
|
|
|
|
Kind: structs.ServiceDefaults,
|
|
|
|
Name: svc.Name,
|
|
|
|
Datacenter: s.source.Datacenter,
|
|
|
|
QueryOptions: structs.QueryOptions{Token: s.token},
|
|
|
|
EnterpriseMeta: svc.EnterpriseMeta,
|
|
|
|
}, DestinationConfigEntryID+svc.String(), s.ch)
|
|
|
|
if err != nil {
|
|
|
|
cancel()
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
snap.ConnectProxy.DestinationsUpstream.InitWatch(uid, cancel)
|
|
|
|
}
|
|
|
|
{
|
|
|
|
childCtx, cancel := context.WithCancel(ctx)
|
|
|
|
err := s.dataSources.ServiceGateways.Notify(childCtx, &structs.ServiceSpecificRequest{
|
|
|
|
ServiceName: svc.Name,
|
|
|
|
Datacenter: s.source.Datacenter,
|
|
|
|
QueryOptions: structs.QueryOptions{Token: s.token},
|
|
|
|
EnterpriseMeta: svc.EnterpriseMeta,
|
|
|
|
ServiceKind: structs.ServiceKindTerminatingGateway,
|
|
|
|
}, DestinationGatewayID+svc.String(), s.ch)
|
|
|
|
if err != nil {
|
|
|
|
cancel()
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
snap.ConnectProxy.DestinationGateways.InitWatch(uid, cancel)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
snap.ConnectProxy.DestinationsUpstream.ForEachKey(func(uid UpstreamID) bool {
|
|
|
|
if _, ok := seenUpstreams[uid]; !ok {
|
|
|
|
snap.ConnectProxy.DestinationsUpstream.CancelWatch(uid)
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
})
|
2020-12-24 19:11:13 +00:00
|
|
|
|
2022-07-14 18:45:51 +00:00
|
|
|
snap.ConnectProxy.DestinationGateways.ForEachKey(func(uid UpstreamID) bool {
|
|
|
|
if _, ok := seenUpstreams[uid]; !ok {
|
|
|
|
snap.ConnectProxy.DestinationGateways.CancelWatch(uid)
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
})
|
|
|
|
case strings.HasPrefix(u.CorrelationID, DestinationConfigEntryID):
|
|
|
|
resp, ok := u.Result.(*structs.ConfigEntryResponse)
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("invalid type for response: %T", u.Result)
|
|
|
|
}
|
|
|
|
|
|
|
|
pq := strings.TrimPrefix(u.CorrelationID, DestinationConfigEntryID)
|
|
|
|
uid := UpstreamIDFromString(pq)
|
|
|
|
serviceConf, ok := resp.Entry.(*structs.ServiceConfigEntry)
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("invalid type for service default: %T", resp.Entry.GetName())
|
|
|
|
}
|
|
|
|
|
|
|
|
snap.ConnectProxy.DestinationsUpstream.Set(uid, serviceConf)
|
|
|
|
case strings.HasPrefix(u.CorrelationID, DestinationGatewayID):
|
|
|
|
resp, ok := u.Result.(*structs.IndexedCheckServiceNodes)
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("invalid type for response: %T", u.Result)
|
|
|
|
}
|
|
|
|
|
|
|
|
pq := strings.TrimPrefix(u.CorrelationID, DestinationGatewayID)
|
|
|
|
uid := UpstreamIDFromString(pq)
|
|
|
|
snap.ConnectProxy.DestinationGateways.Set(uid, resp.Nodes)
|
2020-12-24 19:11:13 +00:00
|
|
|
case strings.HasPrefix(u.CorrelationID, "upstream:"+preparedQueryIDPrefix):
|
|
|
|
resp, ok := u.Result.(*structs.PreparedQueryExecuteResponse)
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("invalid type for response: %T", u.Result)
|
|
|
|
}
|
|
|
|
pq := strings.TrimPrefix(u.CorrelationID, "upstream:")
|
2022-01-20 16:12:04 +00:00
|
|
|
uid := UpstreamIDFromString(pq)
|
|
|
|
snap.ConnectProxy.PreparedQueryEndpoints[uid] = resp.Nodes
|
2020-12-24 19:11:13 +00:00
|
|
|
|
|
|
|
case strings.HasPrefix(u.CorrelationID, svcChecksWatchIDPrefix):
|
|
|
|
resp, ok := u.Result.([]structs.CheckType)
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("invalid type for service checks response: %T, want: []structs.CheckType", u.Result)
|
|
|
|
}
|
|
|
|
svcID := structs.ServiceIDFromString(strings.TrimPrefix(u.CorrelationID, svcChecksWatchIDPrefix))
|
|
|
|
snap.ConnectProxy.WatchedServiceChecks[svcID] = resp
|
|
|
|
|
|
|
|
default:
|
|
|
|
return (*handlerUpstreams)(s).handleUpdateUpstreams(ctx, u, snap)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2023-03-10 20:52:54 +00:00
|
|
|
|
2023-05-16 18:36:05 +00:00
|
|
|
// telemetryCollectorConfig represents the basic opaque config values for pushing telemetry to
|
|
|
|
// a consul telemetry collector.
|
|
|
|
type telemetryCollectorConfig struct {
|
|
|
|
// TelemetryCollectorBindSocketDir is a string that configures the directory for a
|
2023-03-10 20:52:54 +00:00
|
|
|
// unix socket where Envoy will forward metrics. These metrics get pushed to
|
2023-05-16 18:36:05 +00:00
|
|
|
// the Consul Telemetry collector.
|
|
|
|
TelemetryCollectorBindSocketDir string `mapstructure:"envoy_telemetry_collector_bind_socket_dir"`
|
2023-03-10 20:52:54 +00:00
|
|
|
}
|
|
|
|
|
2023-05-16 18:36:05 +00:00
|
|
|
func parseTelemetryCollectorConfig(m map[string]interface{}) (telemetryCollectorConfig, error) {
|
|
|
|
var cfg telemetryCollectorConfig
|
2023-03-10 20:52:54 +00:00
|
|
|
err := mapstructure.WeakDecode(m, &cfg)
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return cfg, fmt.Errorf("failed to decode: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return cfg, nil
|
|
|
|
}
|
|
|
|
|
2023-05-16 18:36:05 +00:00
|
|
|
// maybeInitializeTelemetryCollectorWatches will initialize a synthetic upstream and discovery chain
|
|
|
|
// watch for the consul telemetry collector, if telemetry data collection is enabled on the proxy registration.
|
|
|
|
func (s *handlerConnectProxy) maybeInitializeTelemetryCollectorWatches(ctx context.Context, snap ConfigSnapshot) error {
|
|
|
|
cfg, err := parseTelemetryCollectorConfig(s.proxyCfg.Config)
|
2023-03-10 20:52:54 +00:00
|
|
|
if err != nil {
|
|
|
|
s.logger.Error("failed to parse connect.proxy.config", "error", err)
|
|
|
|
}
|
|
|
|
|
2023-05-16 18:36:05 +00:00
|
|
|
if cfg.TelemetryCollectorBindSocketDir == "" {
|
|
|
|
// telemetry collection is not enabled, return early.
|
2023-03-10 20:52:54 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// The path includes the proxy ID so that when multiple proxies are on the same host
|
2023-05-16 18:36:05 +00:00
|
|
|
// they each have a distinct path to send their telemetry data.
|
2023-05-09 18:20:26 +00:00
|
|
|
id := s.proxyID.NamespaceOrDefault() + "_" + s.proxyID.ID
|
|
|
|
|
|
|
|
// UNIX domain sockets paths have a max length of 108, so we take a hash of the compound ID
|
|
|
|
// to limit the length of the socket path.
|
|
|
|
h := sha1.New()
|
|
|
|
h.Write([]byte(id))
|
|
|
|
hash := base64.RawURLEncoding.EncodeToString(h.Sum(nil))
|
2023-05-16 18:36:05 +00:00
|
|
|
path := path.Join(cfg.TelemetryCollectorBindSocketDir, hash+".sock")
|
2023-03-10 20:52:54 +00:00
|
|
|
|
|
|
|
upstream := structs.Upstream{
|
|
|
|
DestinationNamespace: acl.DefaultNamespaceName,
|
|
|
|
DestinationPartition: s.proxyID.PartitionOrDefault(),
|
2023-05-16 18:36:05 +00:00
|
|
|
DestinationName: api.TelemetryCollectorName,
|
2023-03-10 20:52:54 +00:00
|
|
|
LocalBindSocketPath: path,
|
|
|
|
Config: map[string]interface{}{
|
|
|
|
"protocol": "grpc",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
uid := NewUpstreamID(&upstream)
|
|
|
|
snap.ConnectProxy.UpstreamConfig[uid] = &upstream
|
|
|
|
|
|
|
|
err = s.dataSources.CompiledDiscoveryChain.Notify(ctx, &structs.DiscoveryChainRequest{
|
|
|
|
Datacenter: s.source.Datacenter,
|
|
|
|
QueryOptions: structs.QueryOptions{Token: s.token},
|
|
|
|
Name: upstream.DestinationName,
|
|
|
|
EvaluateInDatacenter: s.source.Datacenter,
|
|
|
|
EvaluateInNamespace: uid.NamespaceOrDefault(),
|
|
|
|
EvaluateInPartition: uid.PartitionOrDefault(),
|
|
|
|
}, "discovery-chain:"+uid.String(), s.ch)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to watch discovery chain for %s: %v", uid.String(), err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|