2018-10-03 12:36:38 +00:00
|
|
|
package proxycfg
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"errors"
|
|
|
|
"fmt"
|
|
|
|
"reflect"
|
|
|
|
"strings"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/hashicorp/consul/agent/cache"
|
|
|
|
cachetype "github.com/hashicorp/consul/agent/cache-types"
|
|
|
|
"github.com/hashicorp/consul/agent/structs"
|
2020-01-28 23:50:41 +00:00
|
|
|
"github.com/hashicorp/consul/logging"
|
|
|
|
"github.com/hashicorp/go-hclog"
|
2018-10-03 12:36:38 +00:00
|
|
|
"github.com/mitchellh/copystructure"
|
2019-08-02 03:03:34 +00:00
|
|
|
"github.com/mitchellh/mapstructure"
|
2018-10-03 12:36:38 +00:00
|
|
|
)
|
|
|
|
|
2019-07-12 21:19:37 +00:00
|
|
|
type CacheNotifier interface {
|
|
|
|
Notify(ctx context.Context, t string, r cache.Request,
|
|
|
|
correlationID string, ch chan<- cache.UpdateEvent) error
|
|
|
|
}
|
|
|
|
|
2018-10-03 12:36:38 +00:00
|
|
|
const (
|
2020-03-09 20:59:02 +00:00
|
|
|
coalesceTimeout = 200 * time.Millisecond
|
|
|
|
rootsWatchID = "roots"
|
|
|
|
leafWatchID = "leaf"
|
|
|
|
intentionsWatchID = "intentions"
|
|
|
|
serviceListWatchID = "service-list"
|
|
|
|
federationStateListGatewaysWatchID = "federation-state-list-mesh-gateways"
|
|
|
|
consulServerListWatchID = "consul-server-list"
|
|
|
|
datacentersWatchID = "datacenters"
|
|
|
|
serviceResolversWatchID = "service-resolvers"
|
|
|
|
svcChecksWatchIDPrefix = cachetype.ServiceHTTPChecksName + ":"
|
|
|
|
serviceIDPrefix = string(structs.UpstreamDestTypeService) + ":"
|
|
|
|
preparedQueryIDPrefix = string(structs.UpstreamDestTypePreparedQuery) + ":"
|
|
|
|
defaultPreparedQueryPollInterval = 30 * time.Second
|
2018-10-03 12:36:38 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// state holds all the state needed to maintain the config for a registered
|
|
|
|
// connect-proxy service. When a proxy registration is changed, the entire state
|
|
|
|
// is discarded and a new one created.
|
|
|
|
type state struct {
|
|
|
|
// logger, source and cache are required to be set before calling Watch.
|
2020-03-09 20:59:02 +00:00
|
|
|
logger hclog.Logger
|
|
|
|
source *structs.QuerySource
|
|
|
|
cache CacheNotifier
|
|
|
|
serverSNIFn ServerSNIFunc
|
2018-10-03 12:36:38 +00:00
|
|
|
|
|
|
|
// ctx and cancel store the context created during initWatches call
|
|
|
|
ctx context.Context
|
|
|
|
cancel func()
|
|
|
|
|
2019-06-18 00:52:01 +00:00
|
|
|
kind structs.ServiceKind
|
|
|
|
service string
|
2020-01-24 15:04:58 +00:00
|
|
|
proxyID structs.ServiceID
|
2019-06-18 00:52:01 +00:00
|
|
|
address string
|
|
|
|
port int
|
2020-03-09 20:59:02 +00:00
|
|
|
meta map[string]string
|
2019-06-18 00:52:01 +00:00
|
|
|
taggedAddresses map[string]structs.ServiceAddress
|
|
|
|
proxyCfg structs.ConnectProxyConfig
|
|
|
|
token string
|
2018-10-03 12:36:38 +00:00
|
|
|
|
|
|
|
ch chan cache.UpdateEvent
|
|
|
|
snapCh chan ConfigSnapshot
|
|
|
|
reqCh chan chan *ConfigSnapshot
|
|
|
|
}
|
|
|
|
|
2020-03-09 20:59:02 +00:00
|
|
|
type ServerSNIFunc func(dc, nodeName string) string
|
|
|
|
|
2020-02-06 15:52:25 +00:00
|
|
|
func copyProxyConfig(ns *structs.NodeService) (structs.ConnectProxyConfig, error) {
|
|
|
|
if ns == nil {
|
|
|
|
return structs.ConnectProxyConfig{}, nil
|
|
|
|
}
|
|
|
|
// Copy the config map
|
|
|
|
proxyCfgRaw, err := copystructure.Copy(ns.Proxy)
|
|
|
|
if err != nil {
|
|
|
|
return structs.ConnectProxyConfig{}, err
|
|
|
|
}
|
|
|
|
proxyCfg, ok := proxyCfgRaw.(structs.ConnectProxyConfig)
|
|
|
|
if !ok {
|
|
|
|
return structs.ConnectProxyConfig{}, errors.New("failed to copy proxy config")
|
|
|
|
}
|
|
|
|
|
|
|
|
// we can safely modify these since we just copied them
|
|
|
|
for idx, _ := range proxyCfg.Upstreams {
|
|
|
|
us := &proxyCfg.Upstreams[idx]
|
|
|
|
if us.DestinationType != structs.UpstreamDestTypePreparedQuery && us.DestinationNamespace == "" {
|
|
|
|
// default the upstreams target namespace to the namespace of the proxy
|
|
|
|
// doing this here prevents needing much more complex logic a bunch of other
|
|
|
|
// places and makes tracking these upstreams simpler as we can dedup them
|
|
|
|
// with the maps tracking upstream ids being watched.
|
|
|
|
proxyCfg.Upstreams[idx].DestinationNamespace = ns.EnterpriseMeta.NamespaceOrDefault()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return proxyCfg, nil
|
|
|
|
}
|
|
|
|
|
2018-10-03 12:36:38 +00:00
|
|
|
// newState populates the state struct by copying relevant fields from the
|
|
|
|
// NodeService and Token. We copy so that we can use them in a separate
|
|
|
|
// goroutine later without reasoning about races with the NodeService passed
|
|
|
|
// (especially for embedded fields like maps and slices).
|
|
|
|
//
|
2019-07-02 03:10:51 +00:00
|
|
|
// The returned state needs its required dependencies to be set before Watch
|
2018-10-03 12:36:38 +00:00
|
|
|
// can be called.
|
|
|
|
func newState(ns *structs.NodeService, token string) (*state, error) {
|
2019-06-18 00:52:01 +00:00
|
|
|
if ns.Kind != structs.ServiceKindConnectProxy && ns.Kind != structs.ServiceKindMeshGateway {
|
|
|
|
return nil, errors.New("not a connect-proxy or mesh-gateway")
|
2018-10-03 12:36:38 +00:00
|
|
|
}
|
|
|
|
|
2020-02-06 15:52:25 +00:00
|
|
|
proxyCfg, err := copyProxyConfig(ns)
|
2018-10-03 12:36:38 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2019-06-18 00:52:01 +00:00
|
|
|
taggedAddresses := make(map[string]structs.ServiceAddress)
|
|
|
|
for k, v := range ns.TaggedAddresses {
|
|
|
|
taggedAddresses[k] = v
|
|
|
|
}
|
|
|
|
|
2020-03-09 20:59:02 +00:00
|
|
|
meta := make(map[string]string)
|
|
|
|
for k, v := range ns.Meta {
|
|
|
|
meta[k] = v
|
|
|
|
}
|
|
|
|
|
2018-10-03 12:36:38 +00:00
|
|
|
return &state{
|
2019-06-18 00:52:01 +00:00
|
|
|
kind: ns.Kind,
|
|
|
|
service: ns.Service,
|
2020-01-24 15:04:58 +00:00
|
|
|
proxyID: ns.CompoundServiceID(),
|
2019-06-18 00:52:01 +00:00
|
|
|
address: ns.Address,
|
|
|
|
port: ns.Port,
|
2020-03-09 20:59:02 +00:00
|
|
|
meta: meta,
|
2019-06-18 00:52:01 +00:00
|
|
|
taggedAddresses: taggedAddresses,
|
|
|
|
proxyCfg: proxyCfg,
|
|
|
|
token: token,
|
2018-10-03 12:36:38 +00:00
|
|
|
// 10 is fairly arbitrary here but allow for the 3 mandatory and a
|
|
|
|
// reasonable number of upstream watches to all deliver their initial
|
|
|
|
// messages in parallel without blocking the cache.Notify loops. It's not a
|
|
|
|
// huge deal if we do for a short period so we don't need to be more
|
|
|
|
// conservative to handle larger numbers of upstreams correctly but gives
|
|
|
|
// some head room for normal operation to be non-blocking in most typical
|
|
|
|
// cases.
|
|
|
|
ch: make(chan cache.UpdateEvent, 10),
|
|
|
|
snapCh: make(chan ConfigSnapshot, 1),
|
|
|
|
reqCh: make(chan chan *ConfigSnapshot, 1),
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2019-03-06 17:13:28 +00:00
|
|
|
// Watch initialized watches on all necessary cache data for the current proxy
|
2018-10-03 12:36:38 +00:00
|
|
|
// registration state and returns a chan to observe updates to the
|
|
|
|
// ConfigSnapshot that contains all necessary config state. The chan is closed
|
|
|
|
// when the state is Closed.
|
|
|
|
func (s *state) Watch() (<-chan ConfigSnapshot, error) {
|
|
|
|
s.ctx, s.cancel = context.WithCancel(context.Background())
|
|
|
|
|
|
|
|
err := s.initWatches()
|
|
|
|
if err != nil {
|
|
|
|
s.cancel()
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
go s.run()
|
|
|
|
|
|
|
|
return s.snapCh, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Close discards the state and stops any long-running watches.
|
|
|
|
func (s *state) Close() error {
|
|
|
|
if s.cancel != nil {
|
|
|
|
s.cancel()
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-06-24 19:05:36 +00:00
|
|
|
// initWatches sets up the watches needed for the particular service
|
2018-10-03 12:36:38 +00:00
|
|
|
func (s *state) initWatches() error {
|
2019-06-24 19:05:36 +00:00
|
|
|
switch s.kind {
|
|
|
|
case structs.ServiceKindConnectProxy:
|
|
|
|
return s.initWatchesConnectProxy()
|
2019-06-18 00:52:01 +00:00
|
|
|
case structs.ServiceKindMeshGateway:
|
|
|
|
return s.initWatchesMeshGateway()
|
2019-06-24 19:05:36 +00:00
|
|
|
default:
|
|
|
|
return fmt.Errorf("Unsupported service kind")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-05 18:30:35 +00:00
|
|
|
func (s *state) watchMeshGateway(ctx context.Context, dc string, upstreamID string) error {
|
|
|
|
return s.cache.Notify(ctx, cachetype.InternalServiceDumpName, &structs.ServiceDumpRequest{
|
|
|
|
Datacenter: dc,
|
|
|
|
QueryOptions: structs.QueryOptions{Token: s.token},
|
|
|
|
ServiceKind: structs.ServiceKindMeshGateway,
|
|
|
|
UseServiceKind: true,
|
|
|
|
Source: *s.source,
|
2020-01-24 15:04:58 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
|
2019-08-05 18:30:35 +00:00
|
|
|
}, "mesh-gateway:"+dc+":"+upstreamID, s.ch)
|
|
|
|
}
|
|
|
|
|
2020-01-24 15:04:58 +00:00
|
|
|
func (s *state) watchConnectProxyService(ctx context.Context, correlationId string, service string, dc string, filter string, entMeta *structs.EnterpriseMeta) error {
|
|
|
|
var finalMeta structs.EnterpriseMeta
|
|
|
|
finalMeta.Merge(entMeta)
|
|
|
|
|
2019-08-05 18:30:35 +00:00
|
|
|
return s.cache.Notify(ctx, cachetype.HealthServicesName, &structs.ServiceSpecificRequest{
|
|
|
|
Datacenter: dc,
|
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
Token: s.token,
|
|
|
|
Filter: filter,
|
|
|
|
},
|
|
|
|
ServiceName: service,
|
|
|
|
Connect: true,
|
|
|
|
// Note that Identifier doesn't type-prefix for service any more as it's
|
|
|
|
// the default and makes metrics and other things much cleaner. It's
|
|
|
|
// simpler for us if we have the type to make things unambiguous.
|
2020-01-24 15:04:58 +00:00
|
|
|
Source: *s.source,
|
|
|
|
EnterpriseMeta: finalMeta,
|
2019-08-05 18:30:35 +00:00
|
|
|
}, correlationId, s.ch)
|
2019-06-18 00:52:01 +00:00
|
|
|
}
|
|
|
|
|
2019-06-24 19:05:36 +00:00
|
|
|
// initWatchesConnectProxy sets up the watches needed based on current proxy registration
|
|
|
|
// state.
|
|
|
|
func (s *state) initWatchesConnectProxy() error {
|
2018-10-03 12:36:38 +00:00
|
|
|
// Watch for root changes
|
|
|
|
err := s.cache.Notify(s.ctx, cachetype.ConnectCARootName, &structs.DCSpecificRequest{
|
|
|
|
Datacenter: s.source.Datacenter,
|
|
|
|
QueryOptions: structs.QueryOptions{Token: s.token},
|
2019-07-04 15:17:49 +00:00
|
|
|
Source: *s.source,
|
2018-10-03 12:36:38 +00:00
|
|
|
}, rootsWatchID, s.ch)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Watch the leaf cert
|
|
|
|
err = s.cache.Notify(s.ctx, cachetype.ConnectCALeafName, &cachetype.ConnectCALeafRequest{
|
2020-01-24 15:04:58 +00:00
|
|
|
Datacenter: s.source.Datacenter,
|
|
|
|
Token: s.token,
|
|
|
|
Service: s.proxyCfg.DestinationServiceName,
|
|
|
|
EnterpriseMeta: s.proxyID.EnterpriseMeta,
|
2018-10-03 12:36:38 +00:00
|
|
|
}, leafWatchID, s.ch)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Watch for intention updates
|
|
|
|
err = s.cache.Notify(s.ctx, cachetype.IntentionMatchName, &structs.IntentionQueryRequest{
|
|
|
|
Datacenter: s.source.Datacenter,
|
|
|
|
QueryOptions: structs.QueryOptions{Token: s.token},
|
|
|
|
Match: &structs.IntentionQueryMatch{
|
|
|
|
Type: structs.IntentionMatchDestination,
|
|
|
|
Entries: []structs.IntentionMatchEntry{
|
|
|
|
{
|
2020-01-24 15:04:58 +00:00
|
|
|
Namespace: s.proxyID.NamespaceOrDefault(),
|
2018-10-03 12:36:38 +00:00
|
|
|
Name: s.proxyCfg.DestinationServiceName,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}, intentionsWatchID, s.ch)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-09-26 02:55:52 +00:00
|
|
|
// Watch for service check updates
|
|
|
|
err = s.cache.Notify(s.ctx, cachetype.ServiceHTTPChecksName, &cachetype.ServiceHTTPChecksRequest{
|
2020-01-24 15:04:58 +00:00
|
|
|
ServiceID: s.proxyCfg.DestinationServiceID,
|
|
|
|
EnterpriseMeta: s.proxyID.EnterpriseMeta,
|
|
|
|
}, svcChecksWatchIDPrefix+structs.ServiceIDString(s.proxyCfg.DestinationServiceID, &s.proxyID.EnterpriseMeta), s.ch)
|
2019-09-26 02:55:52 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-02-06 15:52:25 +00:00
|
|
|
// default the namespace to the namespace of this proxy service
|
|
|
|
currentNamespace := s.proxyID.NamespaceOrDefault()
|
2019-08-02 03:03:34 +00:00
|
|
|
|
2018-10-03 12:36:38 +00:00
|
|
|
// Watch for updates to service endpoints for all upstreams
|
|
|
|
for _, u := range s.proxyCfg.Upstreams {
|
|
|
|
dc := s.source.Datacenter
|
|
|
|
if u.Datacenter != "" {
|
|
|
|
dc = u.Datacenter
|
|
|
|
}
|
|
|
|
|
2019-08-02 03:03:34 +00:00
|
|
|
ns := currentNamespace
|
|
|
|
if u.DestinationNamespace != "" {
|
|
|
|
ns = u.DestinationNamespace
|
|
|
|
}
|
|
|
|
|
|
|
|
cfg, err := parseReducedUpstreamConfig(u.Config)
|
|
|
|
if err != nil {
|
|
|
|
// Don't hard fail on a config typo, just warn. We'll fall back on
|
|
|
|
// the plain discovery chain if there is an error so it's safe to
|
|
|
|
// continue.
|
2020-01-28 23:50:41 +00:00
|
|
|
s.logger.Warn("failed to parse upstream config",
|
|
|
|
"upstream", u.Identifier(),
|
|
|
|
"error", err,
|
|
|
|
)
|
2019-08-02 03:03:34 +00:00
|
|
|
}
|
|
|
|
|
2018-10-03 12:36:38 +00:00
|
|
|
switch u.DestinationType {
|
|
|
|
case structs.UpstreamDestTypePreparedQuery:
|
2019-01-18 17:44:04 +00:00
|
|
|
err = s.cache.Notify(s.ctx, cachetype.PreparedQueryName, &structs.PreparedQueryExecuteRequest{
|
|
|
|
Datacenter: dc,
|
|
|
|
QueryOptions: structs.QueryOptions{Token: s.token, MaxAge: defaultPreparedQueryPollInterval},
|
|
|
|
QueryIDOrName: u.DestinationName,
|
|
|
|
Connect: true,
|
2019-07-04 15:17:49 +00:00
|
|
|
Source: *s.source,
|
2019-04-29 16:27:57 +00:00
|
|
|
}, "upstream:"+u.Identifier(), s.ch)
|
2020-02-18 13:41:01 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-08-02 03:03:34 +00:00
|
|
|
|
2018-10-03 12:36:38 +00:00
|
|
|
case structs.UpstreamDestTypeService:
|
|
|
|
fallthrough
|
2019-07-02 03:10:51 +00:00
|
|
|
|
2019-08-02 03:03:34 +00:00
|
|
|
case "": // Treat unset as the default Service type
|
|
|
|
err = s.cache.Notify(s.ctx, cachetype.CompiledDiscoveryChainName, &structs.DiscoveryChainRequest{
|
|
|
|
Datacenter: s.source.Datacenter,
|
|
|
|
QueryOptions: structs.QueryOptions{Token: s.token},
|
|
|
|
Name: u.DestinationName,
|
|
|
|
EvaluateInDatacenter: dc,
|
|
|
|
EvaluateInNamespace: ns,
|
|
|
|
OverrideMeshGateway: s.proxyCfg.MeshGateway.OverlayWith(u.MeshGateway),
|
|
|
|
OverrideProtocol: cfg.Protocol,
|
|
|
|
OverrideConnectTimeout: cfg.ConnectTimeout(),
|
|
|
|
}, "discovery-chain:"+u.Identifier(), s.ch)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2018-10-03 12:36:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
default:
|
|
|
|
return fmt.Errorf("unknown upstream type: %q", u.DestinationType)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-08-02 03:03:34 +00:00
|
|
|
// reducedProxyConfig represents the basic opaque config values that are now
|
|
|
|
// managed with the discovery chain but for backwards compatibility reasons
|
|
|
|
// should still affect how the proxy is configured.
|
|
|
|
//
|
|
|
|
// The full-blown config is agent/xds.UpstreamConfig
|
|
|
|
type reducedUpstreamConfig struct {
|
|
|
|
Protocol string `mapstructure:"protocol"`
|
|
|
|
ConnectTimeoutMs int `mapstructure:"connect_timeout_ms"`
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *reducedUpstreamConfig) ConnectTimeout() time.Duration {
|
|
|
|
return time.Duration(c.ConnectTimeoutMs) * time.Millisecond
|
|
|
|
}
|
|
|
|
|
|
|
|
func parseReducedUpstreamConfig(m map[string]interface{}) (reducedUpstreamConfig, error) {
|
|
|
|
var cfg reducedUpstreamConfig
|
|
|
|
err := mapstructure.WeakDecode(m, &cfg)
|
|
|
|
return cfg, err
|
|
|
|
}
|
|
|
|
|
2019-06-18 00:52:01 +00:00
|
|
|
// initWatchesMeshGateway sets up the watches needed based on the current mesh gateway registration
|
|
|
|
func (s *state) initWatchesMeshGateway() error {
|
|
|
|
// Watch for root changes
|
|
|
|
err := s.cache.Notify(s.ctx, cachetype.ConnectCARootName, &structs.DCSpecificRequest{
|
|
|
|
Datacenter: s.source.Datacenter,
|
|
|
|
QueryOptions: structs.QueryOptions{Token: s.token},
|
2019-07-04 15:17:49 +00:00
|
|
|
Source: *s.source,
|
2019-06-18 00:52:01 +00:00
|
|
|
}, rootsWatchID, s.ch)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Watch for all services
|
2020-01-24 15:04:58 +00:00
|
|
|
err = s.cache.Notify(s.ctx, cachetype.CatalogServiceListName, &structs.DCSpecificRequest{
|
|
|
|
Datacenter: s.source.Datacenter,
|
|
|
|
QueryOptions: structs.QueryOptions{Token: s.token},
|
|
|
|
Source: *s.source,
|
|
|
|
EnterpriseMeta: *structs.WildcardEnterpriseMeta(),
|
2019-06-18 00:52:01 +00:00
|
|
|
}, serviceListWatchID, s.ch)
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-03-09 20:59:02 +00:00
|
|
|
if s.meta[structs.MetaWANFederationKey] == "1" {
|
|
|
|
// Conveniently we can just use this service meta attribute in one
|
|
|
|
// place here to set the machinery in motion and leave the conditional
|
|
|
|
// behavior out of the rest of the package.
|
|
|
|
err = s.cache.Notify(s.ctx, cachetype.FederationStateListMeshGatewaysName, &structs.DCSpecificRequest{
|
|
|
|
Datacenter: s.source.Datacenter,
|
|
|
|
QueryOptions: structs.QueryOptions{Token: s.token},
|
|
|
|
Source: *s.source,
|
|
|
|
}, federationStateListGatewaysWatchID, s.ch)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
err = s.cache.Notify(s.ctx, cachetype.HealthServicesName, &structs.ServiceSpecificRequest{
|
|
|
|
Datacenter: s.source.Datacenter,
|
|
|
|
QueryOptions: structs.QueryOptions{Token: s.token},
|
|
|
|
ServiceName: structs.ConsulServiceName,
|
|
|
|
}, consulServerListWatchID, s.ch)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-18 00:52:01 +00:00
|
|
|
// Eventually we will have to watch connect enable instances for each service as well as the
|
|
|
|
// destination services themselves but those notifications will be setup later. However we
|
|
|
|
// cannot setup those watches until we know what the services are. from the service list
|
|
|
|
// watch above
|
|
|
|
|
|
|
|
err = s.cache.Notify(s.ctx, cachetype.CatalogDatacentersName, &structs.DatacentersRequest{
|
|
|
|
QueryOptions: structs.QueryOptions{Token: s.token, MaxAge: 30 * time.Second},
|
|
|
|
}, datacentersWatchID, s.ch)
|
2020-02-18 13:41:01 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-06-18 00:52:01 +00:00
|
|
|
|
|
|
|
// Once we start getting notified about the datacenters we will setup watches on the
|
|
|
|
// gateways within those other datacenters. We cannot do that here because we don't
|
|
|
|
// know what they are yet.
|
|
|
|
|
2020-01-10 15:30:13 +00:00
|
|
|
// Watch service-resolvers so we can setup service subset clusters
|
|
|
|
err = s.cache.Notify(s.ctx, cachetype.ConfigEntriesName, &structs.ConfigEntryQuery{
|
2020-01-24 15:04:58 +00:00
|
|
|
Datacenter: s.source.Datacenter,
|
|
|
|
QueryOptions: structs.QueryOptions{Token: s.token},
|
|
|
|
Kind: structs.ServiceResolver,
|
|
|
|
EnterpriseMeta: *structs.WildcardEnterpriseMeta(),
|
2020-01-10 15:30:13 +00:00
|
|
|
}, serviceResolversWatchID, s.ch)
|
|
|
|
|
|
|
|
if err != nil {
|
2020-01-28 23:50:41 +00:00
|
|
|
s.logger.Named(logging.MeshGateway).
|
|
|
|
Error("failed to register watch for service-resolver config entries", "error", err)
|
2020-01-10 15:30:13 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-06-18 00:52:01 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-07-12 21:19:37 +00:00
|
|
|
func (s *state) initialConfigSnapshot() ConfigSnapshot {
|
2018-10-03 12:36:38 +00:00
|
|
|
snap := ConfigSnapshot{
|
2019-06-18 00:52:01 +00:00
|
|
|
Kind: s.kind,
|
|
|
|
Service: s.service,
|
|
|
|
ProxyID: s.proxyID,
|
|
|
|
Address: s.address,
|
|
|
|
Port: s.port,
|
2020-03-09 20:59:02 +00:00
|
|
|
ServiceMeta: s.meta,
|
2019-06-18 00:52:01 +00:00
|
|
|
TaggedAddresses: s.taggedAddresses,
|
|
|
|
Proxy: s.proxyCfg,
|
|
|
|
Datacenter: s.source.Datacenter,
|
2020-03-09 20:59:02 +00:00
|
|
|
ServerSNIFn: s.serverSNIFn,
|
2018-10-03 12:36:38 +00:00
|
|
|
}
|
2019-06-24 19:05:36 +00:00
|
|
|
|
|
|
|
switch s.kind {
|
|
|
|
case structs.ServiceKindConnectProxy:
|
2019-07-02 03:10:51 +00:00
|
|
|
snap.ConnectProxy.DiscoveryChain = make(map[string]*structs.CompiledDiscoveryChain)
|
2019-08-02 20:34:54 +00:00
|
|
|
snap.ConnectProxy.WatchedUpstreams = make(map[string]map[string]context.CancelFunc)
|
|
|
|
snap.ConnectProxy.WatchedUpstreamEndpoints = make(map[string]map[string]structs.CheckServiceNodes)
|
2019-08-05 18:30:35 +00:00
|
|
|
snap.ConnectProxy.WatchedGateways = make(map[string]map[string]context.CancelFunc)
|
|
|
|
snap.ConnectProxy.WatchedGatewayEndpoints = make(map[string]map[string]structs.CheckServiceNodes)
|
2020-01-24 15:04:58 +00:00
|
|
|
snap.ConnectProxy.WatchedServiceChecks = make(map[structs.ServiceID][]structs.CheckType)
|
2020-03-09 20:59:02 +00:00
|
|
|
snap.ConnectProxy.PreparedQueryEndpoints = make(map[string]structs.CheckServiceNodes)
|
2019-08-05 18:30:35 +00:00
|
|
|
|
2019-06-18 00:52:01 +00:00
|
|
|
case structs.ServiceKindMeshGateway:
|
2020-01-24 15:04:58 +00:00
|
|
|
snap.MeshGateway.WatchedServices = make(map[structs.ServiceID]context.CancelFunc)
|
2019-06-18 00:52:01 +00:00
|
|
|
snap.MeshGateway.WatchedDatacenters = make(map[string]context.CancelFunc)
|
2020-01-24 15:04:58 +00:00
|
|
|
snap.MeshGateway.ServiceGroups = make(map[structs.ServiceID]structs.CheckServiceNodes)
|
2019-06-18 00:52:01 +00:00
|
|
|
snap.MeshGateway.GatewayGroups = make(map[string]structs.CheckServiceNodes)
|
2020-01-24 15:04:58 +00:00
|
|
|
snap.MeshGateway.ServiceResolvers = make(map[structs.ServiceID]*structs.ServiceResolverConfigEntry)
|
2019-07-02 13:43:35 +00:00
|
|
|
// there is no need to initialize the map of service resolvers as we
|
|
|
|
// fully rebuild it every time we get updates
|
2019-06-24 19:05:36 +00:00
|
|
|
}
|
|
|
|
|
2019-07-12 21:19:37 +00:00
|
|
|
return snap
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *state) run() {
|
|
|
|
// Close the channel we return from Watch when we stop so consumers can stop
|
|
|
|
// watching and clean up their goroutines. It's important we do this here and
|
|
|
|
// not in Close since this routine sends on this chan and so might panic if it
|
|
|
|
// gets closed from another goroutine.
|
|
|
|
defer close(s.snapCh)
|
|
|
|
|
|
|
|
snap := s.initialConfigSnapshot()
|
|
|
|
|
2018-10-03 12:36:38 +00:00
|
|
|
// This turns out to be really fiddly/painful by just using time.Timer.C
|
|
|
|
// directly in the code below since you can't detect when a timer is stopped
|
|
|
|
// vs waiting in order to know to reset it. So just use a chan to send
|
|
|
|
// ourselves messages.
|
|
|
|
sendCh := make(chan struct{})
|
|
|
|
var coalesceTimer *time.Timer
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-s.ctx.Done():
|
|
|
|
return
|
|
|
|
case u := <-s.ch:
|
|
|
|
if err := s.handleUpdate(u, &snap); err != nil {
|
2020-01-28 23:50:41 +00:00
|
|
|
s.logger.Error("watch error",
|
|
|
|
"id", u.CorrelationID,
|
|
|
|
"error", err,
|
|
|
|
)
|
2018-10-03 12:36:38 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
case <-sendCh:
|
|
|
|
// Make a deep copy of snap so we don't mutate any of the embedded structs
|
|
|
|
// etc on future updates.
|
|
|
|
snapCopy, err := snap.Clone()
|
|
|
|
if err != nil {
|
2020-01-28 23:50:41 +00:00
|
|
|
s.logger.Error("Failed to copy config snapshot for proxy",
|
|
|
|
"proxy", s.proxyID,
|
|
|
|
"error", err,
|
|
|
|
)
|
2018-10-03 12:36:38 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
s.snapCh <- *snapCopy
|
|
|
|
// Allow the next change to trigger a send
|
|
|
|
coalesceTimer = nil
|
|
|
|
|
|
|
|
// Skip rest of loop - there is nothing to send since nothing changed on
|
|
|
|
// this iteration
|
|
|
|
continue
|
|
|
|
|
|
|
|
case replyCh := <-s.reqCh:
|
|
|
|
if !snap.Valid() {
|
|
|
|
// Not valid yet just respond with nil and move on to next task.
|
|
|
|
replyCh <- nil
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// Make a deep copy of snap so we don't mutate any of the embedded structs
|
|
|
|
// etc on future updates.
|
|
|
|
snapCopy, err := snap.Clone()
|
|
|
|
if err != nil {
|
2020-01-28 23:50:41 +00:00
|
|
|
s.logger.Error("Failed to copy config snapshot for proxy",
|
|
|
|
"proxy", s.proxyID,
|
|
|
|
"error", err,
|
|
|
|
)
|
2018-10-03 12:36:38 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
replyCh <- snapCopy
|
|
|
|
|
|
|
|
// Skip rest of loop - there is nothing to send since nothing changed on
|
|
|
|
// this iteration
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if snap is complete enough to be a valid config to deliver to a
|
|
|
|
// proxy yet.
|
|
|
|
if snap.Valid() {
|
|
|
|
// Don't send it right away, set a short timer that will wait for updates
|
|
|
|
// from any of the other cache values and deliver them all together.
|
|
|
|
if coalesceTimer == nil {
|
|
|
|
coalesceTimer = time.AfterFunc(coalesceTimeout, func() {
|
|
|
|
// This runs in another goroutine so we can't just do the send
|
|
|
|
// directly here as access to snap is racy. Instead, signal the main
|
|
|
|
// loop above.
|
|
|
|
sendCh <- struct{}{}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *state) handleUpdate(u cache.UpdateEvent, snap *ConfigSnapshot) error {
|
2019-06-24 19:05:36 +00:00
|
|
|
switch s.kind {
|
|
|
|
case structs.ServiceKindConnectProxy:
|
|
|
|
return s.handleUpdateConnectProxy(u, snap)
|
2019-06-18 00:52:01 +00:00
|
|
|
case structs.ServiceKindMeshGateway:
|
|
|
|
return s.handleUpdateMeshGateway(u, snap)
|
2019-06-24 19:05:36 +00:00
|
|
|
default:
|
|
|
|
return fmt.Errorf("Unsupported service kind")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *state) handleUpdateConnectProxy(u cache.UpdateEvent, snap *ConfigSnapshot) error {
|
2019-09-26 15:42:17 +00:00
|
|
|
if u.Err != nil {
|
|
|
|
return fmt.Errorf("error filling agent cache: %v", u.Err)
|
|
|
|
}
|
|
|
|
|
2019-07-02 03:10:51 +00:00
|
|
|
switch {
|
|
|
|
case u.CorrelationID == rootsWatchID:
|
2018-10-03 12:36:38 +00:00
|
|
|
roots, ok := u.Result.(*structs.IndexedCARoots)
|
|
|
|
if !ok {
|
2019-09-26 15:42:17 +00:00
|
|
|
return fmt.Errorf("invalid type for response: %T", u.Result)
|
2018-10-03 12:36:38 +00:00
|
|
|
}
|
|
|
|
snap.Roots = roots
|
2019-07-02 03:10:51 +00:00
|
|
|
|
|
|
|
case u.CorrelationID == leafWatchID:
|
2018-10-03 12:36:38 +00:00
|
|
|
leaf, ok := u.Result.(*structs.IssuedCert)
|
|
|
|
if !ok {
|
2019-09-26 15:42:17 +00:00
|
|
|
return fmt.Errorf("invalid type for response: %T", u.Result)
|
2018-10-03 12:36:38 +00:00
|
|
|
}
|
2019-06-18 00:52:01 +00:00
|
|
|
snap.ConnectProxy.Leaf = leaf
|
2019-07-02 03:10:51 +00:00
|
|
|
|
|
|
|
case u.CorrelationID == intentionsWatchID:
|
2018-10-03 12:36:38 +00:00
|
|
|
// Not in snapshot currently, no op
|
2019-07-02 03:10:51 +00:00
|
|
|
|
|
|
|
case strings.HasPrefix(u.CorrelationID, "discovery-chain:"):
|
|
|
|
resp, ok := u.Result.(*structs.DiscoveryChainResponse)
|
|
|
|
if !ok {
|
2019-09-26 15:42:17 +00:00
|
|
|
return fmt.Errorf("invalid type for response: %T", u.Result)
|
2019-07-02 03:10:51 +00:00
|
|
|
}
|
|
|
|
svc := strings.TrimPrefix(u.CorrelationID, "discovery-chain:")
|
|
|
|
snap.ConnectProxy.DiscoveryChain[svc] = resp.Chain
|
|
|
|
|
|
|
|
if err := s.resetWatchesFromChain(svc, resp.Chain, snap); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
case strings.HasPrefix(u.CorrelationID, "upstream-target:"):
|
|
|
|
resp, ok := u.Result.(*structs.IndexedCheckServiceNodes)
|
|
|
|
if !ok {
|
2019-09-26 15:42:17 +00:00
|
|
|
return fmt.Errorf("invalid type for response: %T", u.Result)
|
2019-07-02 03:10:51 +00:00
|
|
|
}
|
|
|
|
correlationID := strings.TrimPrefix(u.CorrelationID, "upstream-target:")
|
2019-08-02 20:34:54 +00:00
|
|
|
targetID, svc, ok := removeColonPrefix(correlationID)
|
2019-07-02 03:10:51 +00:00
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("invalid correlation id %q", u.CorrelationID)
|
|
|
|
}
|
|
|
|
|
|
|
|
m, ok := snap.ConnectProxy.WatchedUpstreamEndpoints[svc]
|
|
|
|
if !ok {
|
2019-08-02 20:34:54 +00:00
|
|
|
m = make(map[string]structs.CheckServiceNodes)
|
2019-07-02 03:10:51 +00:00
|
|
|
snap.ConnectProxy.WatchedUpstreamEndpoints[svc] = m
|
|
|
|
}
|
2019-08-02 20:34:54 +00:00
|
|
|
snap.ConnectProxy.WatchedUpstreamEndpoints[svc][targetID] = resp.Nodes
|
2019-07-02 03:10:51 +00:00
|
|
|
|
2019-08-05 18:30:35 +00:00
|
|
|
case strings.HasPrefix(u.CorrelationID, "mesh-gateway:"):
|
|
|
|
resp, ok := u.Result.(*structs.IndexedCheckServiceNodes)
|
|
|
|
if !ok {
|
2019-09-26 15:42:17 +00:00
|
|
|
return fmt.Errorf("invalid type for response: %T", u.Result)
|
2019-08-05 18:30:35 +00:00
|
|
|
}
|
|
|
|
correlationID := strings.TrimPrefix(u.CorrelationID, "mesh-gateway:")
|
|
|
|
dc, svc, ok := removeColonPrefix(correlationID)
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("invalid correlation id %q", u.CorrelationID)
|
|
|
|
}
|
|
|
|
m, ok := snap.ConnectProxy.WatchedGatewayEndpoints[svc]
|
|
|
|
if !ok {
|
|
|
|
m = make(map[string]structs.CheckServiceNodes)
|
|
|
|
snap.ConnectProxy.WatchedGatewayEndpoints[svc] = m
|
|
|
|
}
|
|
|
|
snap.ConnectProxy.WatchedGatewayEndpoints[svc][dc] = resp.Nodes
|
|
|
|
|
2019-07-02 03:10:51 +00:00
|
|
|
case strings.HasPrefix(u.CorrelationID, "upstream:"+preparedQueryIDPrefix):
|
|
|
|
resp, ok := u.Result.(*structs.PreparedQueryExecuteResponse)
|
|
|
|
if !ok {
|
2019-09-26 15:42:17 +00:00
|
|
|
return fmt.Errorf("invalid type for response: %T", u.Result)
|
2019-07-02 03:10:51 +00:00
|
|
|
}
|
|
|
|
pq := strings.TrimPrefix(u.CorrelationID, "upstream:")
|
2020-01-24 15:04:58 +00:00
|
|
|
snap.ConnectProxy.PreparedQueryEndpoints[pq] = resp.Nodes
|
2019-07-02 03:10:51 +00:00
|
|
|
|
2019-09-26 02:55:52 +00:00
|
|
|
case strings.HasPrefix(u.CorrelationID, svcChecksWatchIDPrefix):
|
|
|
|
resp, ok := u.Result.([]structs.CheckType)
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("invalid type for service checks response: %T, want: []structs.CheckType", u.Result)
|
|
|
|
}
|
2020-01-24 15:04:58 +00:00
|
|
|
svcID := structs.ServiceIDFromString(strings.TrimPrefix(u.CorrelationID, svcChecksWatchIDPrefix))
|
2019-09-26 02:55:52 +00:00
|
|
|
snap.ConnectProxy.WatchedServiceChecks[svcID] = resp
|
|
|
|
|
2018-10-03 12:36:38 +00:00
|
|
|
default:
|
2019-09-26 15:42:17 +00:00
|
|
|
return fmt.Errorf("unknown correlation ID: %s", u.CorrelationID)
|
2019-07-02 03:10:51 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func removeColonPrefix(s string) (string, string, bool) {
|
|
|
|
idx := strings.Index(s, ":")
|
|
|
|
if idx == -1 {
|
|
|
|
return "", "", false
|
|
|
|
}
|
|
|
|
return s[0:idx], s[idx+1:], true
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *state) resetWatchesFromChain(
|
|
|
|
id string,
|
|
|
|
chain *structs.CompiledDiscoveryChain,
|
|
|
|
snap *ConfigSnapshot,
|
|
|
|
) error {
|
2020-02-03 14:26:47 +00:00
|
|
|
s.logger.Trace("resetting watches for discovery chain", "id", id)
|
2019-07-02 03:10:51 +00:00
|
|
|
if chain == nil {
|
|
|
|
return fmt.Errorf("not possible to arrive here with no discovery chain")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Initialize relevant sub maps.
|
|
|
|
if _, ok := snap.ConnectProxy.WatchedUpstreams[id]; !ok {
|
2019-08-02 20:34:54 +00:00
|
|
|
snap.ConnectProxy.WatchedUpstreams[id] = make(map[string]context.CancelFunc)
|
2019-07-02 03:10:51 +00:00
|
|
|
}
|
|
|
|
if _, ok := snap.ConnectProxy.WatchedUpstreamEndpoints[id]; !ok {
|
2019-08-02 20:34:54 +00:00
|
|
|
snap.ConnectProxy.WatchedUpstreamEndpoints[id] = make(map[string]structs.CheckServiceNodes)
|
2019-07-02 03:10:51 +00:00
|
|
|
}
|
2019-08-05 18:30:35 +00:00
|
|
|
if _, ok := snap.ConnectProxy.WatchedGateways[id]; !ok {
|
|
|
|
snap.ConnectProxy.WatchedGateways[id] = make(map[string]context.CancelFunc)
|
|
|
|
}
|
|
|
|
if _, ok := snap.ConnectProxy.WatchedGatewayEndpoints[id]; !ok {
|
|
|
|
snap.ConnectProxy.WatchedGatewayEndpoints[id] = make(map[string]structs.CheckServiceNodes)
|
|
|
|
}
|
2019-07-02 03:10:51 +00:00
|
|
|
|
|
|
|
// We could invalidate this selectively based on a hash of the relevant
|
|
|
|
// resolver information, but for now just reset anything about this
|
|
|
|
// upstream when the chain changes in any way.
|
|
|
|
//
|
|
|
|
// TODO(rb): content hash based add/remove
|
2019-08-02 20:34:54 +00:00
|
|
|
for targetID, cancelFn := range snap.ConnectProxy.WatchedUpstreams[id] {
|
2020-01-28 23:50:41 +00:00
|
|
|
s.logger.Trace("stopping watch of target",
|
|
|
|
"upstream", id,
|
|
|
|
"chain", chain.ServiceName,
|
|
|
|
"target", targetID,
|
|
|
|
)
|
2019-08-02 20:34:54 +00:00
|
|
|
delete(snap.ConnectProxy.WatchedUpstreams[id], targetID)
|
|
|
|
delete(snap.ConnectProxy.WatchedUpstreamEndpoints[id], targetID)
|
2019-07-02 03:10:51 +00:00
|
|
|
cancelFn()
|
|
|
|
}
|
|
|
|
|
2019-08-05 18:30:35 +00:00
|
|
|
needGateways := make(map[string]struct{})
|
2019-08-02 20:34:54 +00:00
|
|
|
for _, target := range chain.Targets {
|
2020-01-28 23:50:41 +00:00
|
|
|
s.logger.Trace("initializing watch of target",
|
|
|
|
"upstream", id,
|
|
|
|
"chain", chain.ServiceName,
|
|
|
|
"target", target.ID,
|
2020-02-03 14:26:47 +00:00
|
|
|
"mesh-gateway-mode", target.MeshGateway.Mode,
|
2020-01-28 23:50:41 +00:00
|
|
|
)
|
2019-07-02 03:10:51 +00:00
|
|
|
|
2019-08-05 18:30:35 +00:00
|
|
|
// We'll get endpoints from the gateway query, but the health still has
|
|
|
|
// to come from the backing service query.
|
|
|
|
switch target.MeshGateway.Mode {
|
|
|
|
case structs.MeshGatewayModeRemote:
|
|
|
|
needGateways[target.Datacenter] = struct{}{}
|
|
|
|
case structs.MeshGatewayModeLocal:
|
|
|
|
needGateways[s.source.Datacenter] = struct{}{}
|
2019-07-02 03:10:51 +00:00
|
|
|
}
|
|
|
|
|
2019-08-05 18:30:35 +00:00
|
|
|
ctx, cancel := context.WithCancel(s.ctx)
|
2019-08-02 03:44:05 +00:00
|
|
|
err := s.watchConnectProxyService(
|
2019-07-02 03:10:51 +00:00
|
|
|
ctx,
|
2019-08-02 20:34:54 +00:00
|
|
|
"upstream-target:"+target.ID+":"+id,
|
2019-07-02 03:10:51 +00:00
|
|
|
target.Service,
|
|
|
|
target.Datacenter,
|
2019-08-02 20:34:54 +00:00
|
|
|
target.Subset.Filter,
|
2020-01-24 15:04:58 +00:00
|
|
|
target.GetEnterpriseMetadata(),
|
2019-07-02 03:10:51 +00:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
cancel()
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-08-02 20:34:54 +00:00
|
|
|
snap.ConnectProxy.WatchedUpstreams[id][target.ID] = cancel
|
2018-10-03 12:36:38 +00:00
|
|
|
}
|
2019-07-02 03:10:51 +00:00
|
|
|
|
2019-08-05 18:30:35 +00:00
|
|
|
for dc, _ := range needGateways {
|
|
|
|
if _, ok := snap.ConnectProxy.WatchedGateways[id][dc]; ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2020-01-28 23:50:41 +00:00
|
|
|
s.logger.Trace("initializing watch of mesh gateway in datacenter",
|
|
|
|
"upstream", id,
|
|
|
|
"chain", chain.ServiceName,
|
|
|
|
"datacenter", dc,
|
|
|
|
)
|
2019-08-05 18:30:35 +00:00
|
|
|
|
|
|
|
ctx, cancel := context.WithCancel(s.ctx)
|
|
|
|
err := s.watchMeshGateway(ctx, dc, id)
|
|
|
|
if err != nil {
|
|
|
|
cancel()
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
snap.ConnectProxy.WatchedGateways[id][dc] = cancel
|
|
|
|
}
|
|
|
|
|
|
|
|
for dc, cancelFn := range snap.ConnectProxy.WatchedGateways[id] {
|
|
|
|
if _, ok := needGateways[dc]; ok {
|
|
|
|
continue
|
|
|
|
}
|
2020-01-28 23:50:41 +00:00
|
|
|
s.logger.Trace("stopping watch of mesh gateway in datacenter",
|
|
|
|
"upstream", id,
|
|
|
|
"chain", chain.ServiceName,
|
|
|
|
"datacenter", dc,
|
|
|
|
)
|
2019-08-05 18:30:35 +00:00
|
|
|
delete(snap.ConnectProxy.WatchedGateways[id], dc)
|
|
|
|
delete(snap.ConnectProxy.WatchedGatewayEndpoints[id], dc)
|
|
|
|
cancelFn()
|
|
|
|
}
|
|
|
|
|
2018-10-03 12:36:38 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-06-18 00:52:01 +00:00
|
|
|
func (s *state) handleUpdateMeshGateway(u cache.UpdateEvent, snap *ConfigSnapshot) error {
|
2019-09-26 15:42:17 +00:00
|
|
|
if u.Err != nil {
|
|
|
|
return fmt.Errorf("error filling agent cache: %v", u.Err)
|
|
|
|
}
|
|
|
|
|
2020-01-28 23:50:41 +00:00
|
|
|
meshLogger := s.logger.Named(logging.MeshGateway)
|
|
|
|
|
2019-06-18 00:52:01 +00:00
|
|
|
switch u.CorrelationID {
|
|
|
|
case rootsWatchID:
|
|
|
|
roots, ok := u.Result.(*structs.IndexedCARoots)
|
|
|
|
if !ok {
|
2019-09-26 15:42:17 +00:00
|
|
|
return fmt.Errorf("invalid type for response: %T", u.Result)
|
2019-06-18 00:52:01 +00:00
|
|
|
}
|
|
|
|
snap.Roots = roots
|
2020-03-09 20:59:02 +00:00
|
|
|
case federationStateListGatewaysWatchID:
|
|
|
|
dcIndexedNodes, ok := u.Result.(*structs.DatacenterIndexedCheckServiceNodes)
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("invalid type for response: %T", u.Result)
|
|
|
|
}
|
|
|
|
snap.MeshGateway.FedStateGateways = dcIndexedNodes.DatacenterNodes
|
2019-06-18 00:52:01 +00:00
|
|
|
case serviceListWatchID:
|
2020-01-24 15:04:58 +00:00
|
|
|
services, ok := u.Result.(*structs.IndexedServiceList)
|
2019-06-18 00:52:01 +00:00
|
|
|
if !ok {
|
2019-09-26 15:42:17 +00:00
|
|
|
return fmt.Errorf("invalid type for response: %T", u.Result)
|
2019-06-18 00:52:01 +00:00
|
|
|
}
|
|
|
|
|
2020-01-24 15:04:58 +00:00
|
|
|
svcMap := make(map[structs.ServiceID]struct{})
|
|
|
|
for _, svc := range services.Services {
|
|
|
|
sid := svc.ToServiceID()
|
|
|
|
if _, ok := snap.MeshGateway.WatchedServices[sid]; !ok {
|
2019-06-18 00:52:01 +00:00
|
|
|
ctx, cancel := context.WithCancel(s.ctx)
|
|
|
|
err := s.cache.Notify(ctx, cachetype.HealthServicesName, &structs.ServiceSpecificRequest{
|
2020-01-24 15:04:58 +00:00
|
|
|
Datacenter: s.source.Datacenter,
|
|
|
|
QueryOptions: structs.QueryOptions{Token: s.token},
|
|
|
|
ServiceName: svc.Name,
|
|
|
|
Connect: true,
|
|
|
|
EnterpriseMeta: sid.EnterpriseMeta,
|
|
|
|
}, fmt.Sprintf("connect-service:%s", sid.String()), s.ch)
|
2019-06-18 00:52:01 +00:00
|
|
|
|
|
|
|
if err != nil {
|
2020-01-28 23:50:41 +00:00
|
|
|
meshLogger.Error("failed to register watch for connect-service",
|
|
|
|
"service", sid.String(),
|
|
|
|
"error", err,
|
|
|
|
)
|
2019-06-18 00:52:01 +00:00
|
|
|
cancel()
|
|
|
|
return err
|
|
|
|
}
|
2020-01-24 15:04:58 +00:00
|
|
|
snap.MeshGateway.WatchedServices[sid] = cancel
|
|
|
|
svcMap[sid] = struct{}{}
|
2019-06-18 00:52:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-24 15:04:58 +00:00
|
|
|
for sid, cancelFn := range snap.MeshGateway.WatchedServices {
|
|
|
|
if _, ok := svcMap[sid]; !ok {
|
|
|
|
delete(snap.MeshGateway.WatchedServices, sid)
|
2019-06-18 00:52:01 +00:00
|
|
|
cancelFn()
|
|
|
|
}
|
|
|
|
}
|
2019-10-17 21:46:49 +00:00
|
|
|
|
|
|
|
snap.MeshGateway.WatchedServicesSet = true
|
2019-06-18 00:52:01 +00:00
|
|
|
case datacentersWatchID:
|
|
|
|
datacentersRaw, ok := u.Result.(*[]string)
|
|
|
|
if !ok {
|
2019-09-26 15:42:17 +00:00
|
|
|
return fmt.Errorf("invalid type for response: %T", u.Result)
|
2019-06-18 00:52:01 +00:00
|
|
|
}
|
|
|
|
if datacentersRaw == nil {
|
|
|
|
return fmt.Errorf("invalid response with a nil datacenter list")
|
|
|
|
}
|
|
|
|
|
|
|
|
datacenters := *datacentersRaw
|
|
|
|
|
|
|
|
for _, dc := range datacenters {
|
|
|
|
if dc == s.source.Datacenter {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, ok := snap.MeshGateway.WatchedDatacenters[dc]; !ok {
|
|
|
|
ctx, cancel := context.WithCancel(s.ctx)
|
|
|
|
err := s.cache.Notify(ctx, cachetype.InternalServiceDumpName, &structs.ServiceDumpRequest{
|
|
|
|
Datacenter: dc,
|
|
|
|
QueryOptions: structs.QueryOptions{Token: s.token},
|
|
|
|
ServiceKind: structs.ServiceKindMeshGateway,
|
|
|
|
UseServiceKind: true,
|
2019-07-04 15:17:49 +00:00
|
|
|
Source: *s.source,
|
2020-01-24 15:04:58 +00:00
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
|
2019-06-18 00:52:01 +00:00
|
|
|
}, fmt.Sprintf("mesh-gateway:%s", dc), s.ch)
|
|
|
|
|
|
|
|
if err != nil {
|
2020-01-28 23:50:41 +00:00
|
|
|
meshLogger.Error("failed to register watch for mesh-gateway",
|
|
|
|
"datacenter", dc,
|
|
|
|
"error", err,
|
|
|
|
)
|
2019-06-18 00:52:01 +00:00
|
|
|
cancel()
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
snap.MeshGateway.WatchedDatacenters[dc] = cancel
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for dc, cancelFn := range snap.MeshGateway.WatchedDatacenters {
|
|
|
|
found := false
|
|
|
|
for _, dcCurrent := range datacenters {
|
|
|
|
if dcCurrent == dc {
|
|
|
|
found = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if !found {
|
|
|
|
delete(snap.MeshGateway.WatchedDatacenters, dc)
|
|
|
|
cancelFn()
|
|
|
|
}
|
|
|
|
}
|
2019-07-02 13:43:35 +00:00
|
|
|
case serviceResolversWatchID:
|
|
|
|
configEntries, ok := u.Result.(*structs.IndexedConfigEntries)
|
|
|
|
if !ok {
|
2019-09-26 15:42:17 +00:00
|
|
|
return fmt.Errorf("invalid type for response: %T", u.Result)
|
2019-07-02 13:43:35 +00:00
|
|
|
}
|
|
|
|
|
2020-01-24 15:04:58 +00:00
|
|
|
resolvers := make(map[structs.ServiceID]*structs.ServiceResolverConfigEntry)
|
2019-07-02 13:43:35 +00:00
|
|
|
for _, entry := range configEntries.Entries {
|
|
|
|
if resolver, ok := entry.(*structs.ServiceResolverConfigEntry); ok {
|
2020-01-24 15:04:58 +00:00
|
|
|
resolvers[structs.NewServiceID(resolver.Name, &resolver.EnterpriseMeta)] = resolver
|
2019-07-02 13:43:35 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
snap.MeshGateway.ServiceResolvers = resolvers
|
2020-03-09 20:59:02 +00:00
|
|
|
|
|
|
|
case consulServerListWatchID:
|
|
|
|
resp, ok := u.Result.(*structs.IndexedCheckServiceNodes)
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("invalid type for response: %T", u.Result)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Do some initial sanity checks to avoid doing something dumb.
|
|
|
|
for _, csn := range resp.Nodes {
|
|
|
|
if csn.Service.Service != structs.ConsulServiceName {
|
|
|
|
return fmt.Errorf("expected service name %q but got %q",
|
|
|
|
structs.ConsulServiceName, csn.Service.Service)
|
|
|
|
}
|
|
|
|
if csn.Node.Datacenter != snap.Datacenter {
|
|
|
|
return fmt.Errorf("expected datacenter %q but got %q",
|
|
|
|
snap.Datacenter, csn.Node.Datacenter)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
snap.MeshGateway.ConsulServers = resp.Nodes
|
|
|
|
|
2019-06-18 00:52:01 +00:00
|
|
|
default:
|
|
|
|
switch {
|
|
|
|
case strings.HasPrefix(u.CorrelationID, "connect-service:"):
|
|
|
|
resp, ok := u.Result.(*structs.IndexedCheckServiceNodes)
|
|
|
|
if !ok {
|
2019-09-26 15:42:17 +00:00
|
|
|
return fmt.Errorf("invalid type for response: %T", u.Result)
|
2019-06-18 00:52:01 +00:00
|
|
|
}
|
|
|
|
|
2020-01-24 15:04:58 +00:00
|
|
|
sid := structs.ServiceIDFromString(strings.TrimPrefix(u.CorrelationID, "connect-service:"))
|
2019-06-18 00:52:01 +00:00
|
|
|
|
|
|
|
if len(resp.Nodes) > 0 {
|
2020-01-24 15:04:58 +00:00
|
|
|
snap.MeshGateway.ServiceGroups[sid] = resp.Nodes
|
|
|
|
} else if _, ok := snap.MeshGateway.ServiceGroups[sid]; ok {
|
|
|
|
delete(snap.MeshGateway.ServiceGroups, sid)
|
2019-06-18 00:52:01 +00:00
|
|
|
}
|
|
|
|
case strings.HasPrefix(u.CorrelationID, "mesh-gateway:"):
|
|
|
|
resp, ok := u.Result.(*structs.IndexedCheckServiceNodes)
|
|
|
|
if !ok {
|
2019-09-26 15:42:17 +00:00
|
|
|
return fmt.Errorf("invalid type for response: %T", u.Result)
|
2019-06-18 00:52:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
dc := strings.TrimPrefix(u.CorrelationID, "mesh-gateway:")
|
|
|
|
|
|
|
|
if len(resp.Nodes) > 0 {
|
|
|
|
snap.MeshGateway.GatewayGroups[dc] = resp.Nodes
|
|
|
|
} else if _, ok := snap.MeshGateway.GatewayGroups[dc]; ok {
|
|
|
|
delete(snap.MeshGateway.GatewayGroups, dc)
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
// do nothing for now
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-10-03 12:36:38 +00:00
|
|
|
// CurrentSnapshot synchronously returns the current ConfigSnapshot if there is
|
|
|
|
// one ready. If we don't have one yet because not all necessary parts have been
|
|
|
|
// returned (i.e. both roots and leaf cert), nil is returned.
|
|
|
|
func (s *state) CurrentSnapshot() *ConfigSnapshot {
|
|
|
|
// Make a chan for the response to be sent on
|
|
|
|
ch := make(chan *ConfigSnapshot, 1)
|
|
|
|
s.reqCh <- ch
|
|
|
|
// Wait for the response
|
|
|
|
return <-ch
|
|
|
|
}
|
|
|
|
|
|
|
|
// Changed returns whether or not the passed NodeService has had any of the
|
|
|
|
// fields we care about for config state watching changed or a different token.
|
|
|
|
func (s *state) Changed(ns *structs.NodeService, token string) bool {
|
|
|
|
if ns == nil {
|
|
|
|
return true
|
|
|
|
}
|
2020-02-06 15:52:25 +00:00
|
|
|
|
|
|
|
proxyCfg, err := copyProxyConfig(ns)
|
|
|
|
if err != nil {
|
|
|
|
s.logger.Warn("Failed to parse proxy config and will treat the new service as unchanged")
|
|
|
|
}
|
|
|
|
|
2019-06-24 19:05:36 +00:00
|
|
|
return ns.Kind != s.kind ||
|
2020-01-24 15:04:58 +00:00
|
|
|
s.proxyID != ns.CompoundServiceID() ||
|
2018-10-03 12:36:38 +00:00
|
|
|
s.address != ns.Address ||
|
|
|
|
s.port != ns.Port ||
|
2020-02-06 15:52:25 +00:00
|
|
|
!reflect.DeepEqual(s.proxyCfg, proxyCfg) ||
|
2018-10-03 12:36:38 +00:00
|
|
|
s.token != token
|
|
|
|
}
|