2023-03-28 18:39:22 +00:00
|
|
|
// Copyright (c) HashiCorp, Inc.
|
2023-08-11 13:12:13 +00:00
|
|
|
// SPDX-License-Identifier: BUSL-1.1
|
2023-03-28 18:39:22 +00:00
|
|
|
|
2018-10-03 12:36:38 +00:00
|
|
|
package proxycfg
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"errors"
|
2021-10-22 21:22:55 +00:00
|
|
|
"fmt"
|
2020-06-03 21:28:45 +00:00
|
|
|
"net"
|
2018-10-03 12:36:38 +00:00
|
|
|
"reflect"
|
2023-02-15 17:54:44 +00:00
|
|
|
"runtime/debug"
|
2022-08-11 09:19:36 +00:00
|
|
|
"sync/atomic"
|
2018-10-03 12:36:38 +00:00
|
|
|
"time"
|
|
|
|
|
2020-11-10 23:57:35 +00:00
|
|
|
"github.com/hashicorp/go-hclog"
|
2022-10-14 14:52:00 +00:00
|
|
|
"golang.org/x/time/rate"
|
2020-11-10 23:57:35 +00:00
|
|
|
|
2018-10-03 12:36:38 +00:00
|
|
|
cachetype "github.com/hashicorp/consul/agent/cache-types"
|
|
|
|
"github.com/hashicorp/consul/agent/structs"
|
2020-01-28 23:50:41 +00:00
|
|
|
"github.com/hashicorp/consul/logging"
|
2018-10-03 12:36:38 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
2020-03-09 20:59:02 +00:00
|
|
|
coalesceTimeout = 200 * time.Millisecond
|
|
|
|
rootsWatchID = "roots"
|
2022-06-01 20:31:37 +00:00
|
|
|
peeringTrustBundlesWatchID = "peering-trust-bundles"
|
2020-03-09 20:59:02 +00:00
|
|
|
leafWatchID = "leaf"
|
2022-06-01 21:53:52 +00:00
|
|
|
peerTrustBundleIDPrefix = "peer-trust-bundle:"
|
2020-03-09 20:59:02 +00:00
|
|
|
intentionsWatchID = "intentions"
|
|
|
|
serviceListWatchID = "service-list"
|
2022-10-06 13:54:14 +00:00
|
|
|
peeringServiceListWatchID = "peering-service-list:"
|
2020-03-09 20:59:02 +00:00
|
|
|
federationStateListGatewaysWatchID = "federation-state-list-mesh-gateways"
|
|
|
|
consulServerListWatchID = "consul-server-list"
|
|
|
|
datacentersWatchID = "datacenters"
|
|
|
|
serviceResolversWatchID = "service-resolvers"
|
2024-04-16 15:59:41 +00:00
|
|
|
serviceDefaultsWatchID = "service-defaults"
|
2020-04-16 21:00:48 +00:00
|
|
|
gatewayServicesWatchID = "gateway-services"
|
2020-04-27 23:36:20 +00:00
|
|
|
gatewayConfigWatchID = "gateway-config"
|
2023-07-10 21:08:06 +00:00
|
|
|
apiGatewayConfigWatchID = "api-gateway-config"
|
|
|
|
boundGatewayConfigWatchID = "bound-gateway-config"
|
2024-04-15 20:45:05 +00:00
|
|
|
fileSystemCertificateConfigWatchID = "file-system-certificate-config"
|
2023-02-08 21:52:12 +00:00
|
|
|
inlineCertificateConfigWatchID = "inline-certificate-config"
|
|
|
|
routeConfigWatchID = "route-config"
|
2020-04-20 19:42:33 +00:00
|
|
|
externalServiceIDPrefix = "external-service:"
|
|
|
|
serviceLeafIDPrefix = "service-leaf:"
|
2020-08-27 17:20:58 +00:00
|
|
|
serviceConfigIDPrefix = "service-config:"
|
2020-04-20 19:42:33 +00:00
|
|
|
serviceResolverIDPrefix = "service-resolver:"
|
|
|
|
serviceIntentionsIDPrefix = "service-intentions:"
|
2021-03-17 19:40:39 +00:00
|
|
|
intentionUpstreamsID = "intention-upstreams"
|
2023-05-19 22:14:16 +00:00
|
|
|
jwtProviderID = "jwt-provider"
|
2022-09-26 16:50:17 +00:00
|
|
|
peerServersWatchID = "peer-servers"
|
2022-07-13 16:14:57 +00:00
|
|
|
peeredUpstreamsID = "peered-upstreams"
|
2022-07-14 18:45:51 +00:00
|
|
|
intentionUpstreamsDestinationID = "intention-upstreams-destination"
|
2022-06-03 21:42:50 +00:00
|
|
|
upstreamPeerWatchIDPrefix = "upstream-peer:"
|
2022-06-06 19:20:41 +00:00
|
|
|
exportedServiceListWatchID = "exported-service-list"
|
2021-04-28 22:13:29 +00:00
|
|
|
meshConfigEntryID = "mesh"
|
2022-07-14 18:45:51 +00:00
|
|
|
DestinationConfigEntryID = "destination:"
|
|
|
|
DestinationGatewayID = "dest-gateway:"
|
2020-03-09 20:59:02 +00:00
|
|
|
svcChecksWatchIDPrefix = cachetype.ServiceHTTPChecksName + ":"
|
|
|
|
preparedQueryIDPrefix = string(structs.UpstreamDestTypePreparedQuery) + ":"
|
|
|
|
defaultPreparedQueryPollInterval = 30 * time.Second
|
2018-10-03 12:36:38 +00:00
|
|
|
)
|
|
|
|
|
2020-12-23 22:29:20 +00:00
|
|
|
type stateConfig struct {
|
2020-08-27 17:20:58 +00:00
|
|
|
logger hclog.Logger
|
|
|
|
source *structs.QuerySource
|
2022-06-01 15:18:06 +00:00
|
|
|
dataSources DataSources
|
2020-08-27 17:20:58 +00:00
|
|
|
dnsConfig DNSConfig
|
|
|
|
serverSNIFn ServerSNIFunc
|
|
|
|
intentionDefaultAllow bool
|
2020-12-23 22:29:20 +00:00
|
|
|
}
|
2018-10-03 12:36:38 +00:00
|
|
|
|
2020-12-23 22:29:20 +00:00
|
|
|
// state holds all the state needed to maintain the config for a registered
|
|
|
|
// connect-proxy service. When a proxy registration is changed, the entire state
|
|
|
|
// is discarded and a new one created.
|
|
|
|
type state struct {
|
2022-05-27 11:38:52 +00:00
|
|
|
source ProxySource
|
2020-12-23 23:03:30 +00:00
|
|
|
logger hclog.Logger
|
|
|
|
serviceInstance serviceInstance
|
|
|
|
handler kindHandler
|
2018-10-03 12:36:38 +00:00
|
|
|
|
2020-12-23 22:12:36 +00:00
|
|
|
// cancel is set by Watch and called by Close to stop the goroutine started
|
|
|
|
// in Watch.
|
2018-10-03 12:36:38 +00:00
|
|
|
cancel func()
|
|
|
|
|
2022-08-11 09:19:36 +00:00
|
|
|
// failedFlag is (atomically) set to 1 (by Close) when run exits because a data
|
|
|
|
// source is in an irrecoverable state. It can be read with failed.
|
|
|
|
failedFlag int32
|
|
|
|
|
2022-05-20 14:47:40 +00:00
|
|
|
ch chan UpdateEvent
|
2018-10-03 12:36:38 +00:00
|
|
|
snapCh chan ConfigSnapshot
|
|
|
|
reqCh chan chan *ConfigSnapshot
|
2023-03-03 20:27:53 +00:00
|
|
|
doneCh chan struct{}
|
2022-10-14 14:52:00 +00:00
|
|
|
|
|
|
|
rateLimiter *rate.Limiter
|
2018-10-03 12:36:38 +00:00
|
|
|
}
|
|
|
|
|
2023-03-03 20:27:53 +00:00
|
|
|
func (s *state) stoppedRunning() bool {
|
|
|
|
select {
|
|
|
|
case <-s.doneCh:
|
|
|
|
return true
|
|
|
|
default:
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-08-11 09:19:36 +00:00
|
|
|
// failed returns whether run exited because a data source is in an
|
|
|
|
// irrecoverable state.
|
|
|
|
func (s *state) failed() bool {
|
|
|
|
return atomic.LoadInt32(&s.failedFlag) == 1
|
|
|
|
}
|
|
|
|
|
2020-04-27 23:36:20 +00:00
|
|
|
type DNSConfig struct {
|
|
|
|
Domain string
|
|
|
|
AltDomain string
|
|
|
|
}
|
|
|
|
|
2020-03-09 20:59:02 +00:00
|
|
|
type ServerSNIFunc func(dc, nodeName string) string
|
|
|
|
|
2020-12-23 22:29:20 +00:00
|
|
|
type serviceInstance struct {
|
|
|
|
kind structs.ServiceKind
|
|
|
|
service string
|
2022-05-27 11:38:52 +00:00
|
|
|
proxyID ProxyID
|
2020-12-23 22:29:20 +00:00
|
|
|
address string
|
|
|
|
port int
|
|
|
|
meta map[string]string
|
|
|
|
taggedAddresses map[string]structs.ServiceAddress
|
|
|
|
proxyCfg structs.ConnectProxyConfig
|
|
|
|
token string
|
2023-06-21 16:39:53 +00:00
|
|
|
locality *structs.Locality
|
2020-12-23 22:29:20 +00:00
|
|
|
}
|
|
|
|
|
2020-02-06 15:52:25 +00:00
|
|
|
func copyProxyConfig(ns *structs.NodeService) (structs.ConnectProxyConfig, error) {
|
|
|
|
if ns == nil {
|
|
|
|
return structs.ConnectProxyConfig{}, nil
|
|
|
|
}
|
2022-10-14 09:26:42 +00:00
|
|
|
|
|
|
|
proxyCfg := *(&ns.Proxy).DeepCopy()
|
2020-02-06 15:52:25 +00:00
|
|
|
|
|
|
|
// we can safely modify these since we just copied them
|
2020-06-16 17:19:31 +00:00
|
|
|
for idx := range proxyCfg.Upstreams {
|
2020-02-06 15:52:25 +00:00
|
|
|
us := &proxyCfg.Upstreams[idx]
|
2021-12-13 17:43:33 +00:00
|
|
|
if us.DestinationType != structs.UpstreamDestTypePreparedQuery {
|
Update filter chain creation for sidecar/ingress listeners (#11245)
The duo of `makeUpstreamFilterChainForDiscoveryChain` and `makeListenerForDiscoveryChain` were really hard to reason about, and led to concealing a bug in their branching logic. There were several issues here:
- They tried to accomplish too much: determining filter name, cluster name, and whether RDS should be used.
- They embedded logic to handle significantly different kinds of upstream listeners (passthrough, prepared query, typical services, and catch-all)
- They needed to coalesce different data sources (Upstream and CompiledDiscoveryChain)
Rather than handling all of those tasks inside of these functions, this PR pulls out the RDS/clusterName/filterName logic.
This refactor also fixed a bug with the handling of [UpstreamDefaults](https://www.consul.io/docs/connect/config-entries/service-defaults#defaults). These defaults get stored as UpstreamConfig in the proxy snapshot with a DestinationName of "*", since they apply to all upstreams. However, this wildcard destination name must not be used when creating the name of the associated upstream cluster. The coalescing logic in the original functions here was in some situations creating clusters with a `*.` prefix, which is not a valid destination.
2021-11-09 21:43:51 +00:00
|
|
|
// default the upstreams target namespace and partition to those of the proxy
|
2020-02-06 15:52:25 +00:00
|
|
|
// doing this here prevents needing much more complex logic a bunch of other
|
|
|
|
// places and makes tracking these upstreams simpler as we can dedup them
|
|
|
|
// with the maps tracking upstream ids being watched.
|
2021-12-13 17:43:33 +00:00
|
|
|
if us.DestinationPartition == "" {
|
|
|
|
proxyCfg.Upstreams[idx].DestinationPartition = ns.EnterpriseMeta.PartitionOrDefault()
|
|
|
|
}
|
|
|
|
if us.DestinationNamespace == "" {
|
|
|
|
proxyCfg.Upstreams[idx].DestinationNamespace = ns.EnterpriseMeta.NamespaceOrDefault()
|
|
|
|
}
|
2022-04-29 22:12:51 +00:00
|
|
|
|
|
|
|
// If PeerName is not empty, the DestinationPartition refers
|
|
|
|
// to the local Partition in which the Peer exists and the
|
|
|
|
// DestinationNamespace refers to the Namespace residing in
|
|
|
|
// the remote peer
|
|
|
|
if us.DestinationPeer == "" {
|
|
|
|
proxyCfg.Upstreams[idx].DestinationPeer = ns.PeerName
|
|
|
|
}
|
2020-02-06 15:52:25 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return proxyCfg, nil
|
|
|
|
}
|
|
|
|
|
2018-10-03 12:36:38 +00:00
|
|
|
// newState populates the state struct by copying relevant fields from the
|
|
|
|
// NodeService and Token. We copy so that we can use them in a separate
|
|
|
|
// goroutine later without reasoning about races with the NodeService passed
|
|
|
|
// (especially for embedded fields like maps and slices).
|
|
|
|
//
|
2019-07-02 03:10:51 +00:00
|
|
|
// The returned state needs its required dependencies to be set before Watch
|
2018-10-03 12:36:38 +00:00
|
|
|
// can be called.
|
2022-10-14 14:52:00 +00:00
|
|
|
func newState(id ProxyID, ns *structs.NodeService, source ProxySource, token string, config stateConfig, rateLimiter *rate.Limiter) (*state, error) {
|
2020-12-23 23:03:30 +00:00
|
|
|
// 10 is fairly arbitrary here but allow for the 3 mandatory and a
|
|
|
|
// reasonable number of upstream watches to all deliver their initial
|
|
|
|
// messages in parallel without blocking the cache.Notify loops. It's not a
|
|
|
|
// huge deal if we do for a short period so we don't need to be more
|
|
|
|
// conservative to handle larger numbers of upstreams correctly but gives
|
|
|
|
// some head room for normal operation to be non-blocking in most typical
|
|
|
|
// cases.
|
2022-05-20 14:47:40 +00:00
|
|
|
ch := make(chan UpdateEvent, 10)
|
2020-12-23 23:03:30 +00:00
|
|
|
|
2022-05-27 11:38:52 +00:00
|
|
|
s, err := newServiceInstanceFromNodeService(id, ns, token)
|
2020-12-23 23:03:30 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2022-03-07 17:47:14 +00:00
|
|
|
handler, err := newKindHandler(config, s, ch)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return &state{
|
2022-05-27 11:38:52 +00:00
|
|
|
source: source,
|
2022-03-07 17:47:14 +00:00
|
|
|
logger: config.logger.With("proxy", s.proxyID, "kind", s.kind),
|
|
|
|
serviceInstance: s,
|
|
|
|
handler: handler,
|
|
|
|
ch: ch,
|
|
|
|
snapCh: make(chan ConfigSnapshot, 1),
|
|
|
|
reqCh: make(chan chan *ConfigSnapshot, 1),
|
2023-03-03 20:27:53 +00:00
|
|
|
doneCh: make(chan struct{}),
|
2022-10-14 14:52:00 +00:00
|
|
|
rateLimiter: rateLimiter,
|
2022-03-07 17:47:14 +00:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2022-05-20 14:47:40 +00:00
|
|
|
func newKindHandler(config stateConfig, s serviceInstance, ch chan UpdateEvent) (kindHandler, error) {
|
2020-12-23 23:03:30 +00:00
|
|
|
var handler kindHandler
|
2021-06-17 16:53:25 +00:00
|
|
|
h := handlerState{stateConfig: config, serviceInstance: s, ch: ch}
|
|
|
|
|
2022-03-07 17:47:14 +00:00
|
|
|
switch s.kind {
|
2020-04-16 21:00:48 +00:00
|
|
|
case structs.ServiceKindConnectProxy:
|
2021-06-17 16:53:25 +00:00
|
|
|
handler = &handlerConnectProxy{handlerState: h}
|
2020-04-10 18:06:08 +00:00
|
|
|
case structs.ServiceKindTerminatingGateway:
|
2021-06-17 16:53:25 +00:00
|
|
|
h.stateConfig.logger = config.logger.Named(logging.TerminatingGateway)
|
|
|
|
handler = &handlerTerminatingGateway{handlerState: h}
|
2020-04-16 21:00:48 +00:00
|
|
|
case structs.ServiceKindMeshGateway:
|
2021-06-17 16:53:25 +00:00
|
|
|
h.stateConfig.logger = config.logger.Named(logging.MeshGateway)
|
|
|
|
handler = &handlerMeshGateway{handlerState: h}
|
2020-04-16 21:00:48 +00:00
|
|
|
case structs.ServiceKindIngressGateway:
|
2021-06-17 16:53:25 +00:00
|
|
|
handler = &handlerIngressGateway{handlerState: h}
|
2023-02-08 21:52:12 +00:00
|
|
|
case structs.ServiceKindAPIGateway:
|
|
|
|
handler = &handlerAPIGateway{handlerState: h}
|
2020-04-16 21:00:48 +00:00
|
|
|
default:
|
2020-04-10 18:06:08 +00:00
|
|
|
return nil, errors.New("not a connect-proxy, terminating-gateway, mesh-gateway, or ingress-gateway")
|
2018-10-03 12:36:38 +00:00
|
|
|
}
|
|
|
|
|
2022-03-07 17:47:14 +00:00
|
|
|
return handler, nil
|
2020-12-23 22:29:20 +00:00
|
|
|
}
|
|
|
|
|
2022-05-27 11:38:52 +00:00
|
|
|
func newServiceInstanceFromNodeService(id ProxyID, ns *structs.NodeService, token string) (serviceInstance, error) {
|
2020-12-23 22:29:20 +00:00
|
|
|
proxyCfg, err := copyProxyConfig(ns)
|
|
|
|
if err != nil {
|
|
|
|
return serviceInstance{}, err
|
|
|
|
}
|
|
|
|
|
2019-06-18 00:52:01 +00:00
|
|
|
taggedAddresses := make(map[string]structs.ServiceAddress)
|
|
|
|
for k, v := range ns.TaggedAddresses {
|
|
|
|
taggedAddresses[k] = v
|
|
|
|
}
|
|
|
|
|
2020-03-09 20:59:02 +00:00
|
|
|
meta := make(map[string]string)
|
|
|
|
for k, v := range ns.Meta {
|
|
|
|
meta[k] = v
|
|
|
|
}
|
|
|
|
|
2020-12-23 22:29:20 +00:00
|
|
|
return serviceInstance{
|
2019-06-18 00:52:01 +00:00
|
|
|
kind: ns.Kind,
|
|
|
|
service: ns.Service,
|
2023-06-21 16:39:53 +00:00
|
|
|
locality: ns.Locality,
|
2022-05-27 11:38:52 +00:00
|
|
|
proxyID: id,
|
2019-06-18 00:52:01 +00:00
|
|
|
address: ns.Address,
|
|
|
|
port: ns.Port,
|
2020-03-09 20:59:02 +00:00
|
|
|
meta: meta,
|
2019-06-18 00:52:01 +00:00
|
|
|
taggedAddresses: taggedAddresses,
|
|
|
|
proxyCfg: proxyCfg,
|
|
|
|
token: token,
|
2018-10-03 12:36:38 +00:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2020-12-23 23:03:30 +00:00
|
|
|
type kindHandler interface {
|
|
|
|
initialize(ctx context.Context) (ConfigSnapshot, error)
|
2022-05-20 14:47:40 +00:00
|
|
|
handleUpdate(ctx context.Context, u UpdateEvent, snap *ConfigSnapshot) error
|
2020-12-23 23:03:30 +00:00
|
|
|
}
|
|
|
|
|
2019-03-06 17:13:28 +00:00
|
|
|
// Watch initialized watches on all necessary cache data for the current proxy
|
2018-10-03 12:36:38 +00:00
|
|
|
// registration state and returns a chan to observe updates to the
|
|
|
|
// ConfigSnapshot that contains all necessary config state. The chan is closed
|
|
|
|
// when the state is Closed.
|
|
|
|
func (s *state) Watch() (<-chan ConfigSnapshot, error) {
|
2020-12-23 22:12:36 +00:00
|
|
|
var ctx context.Context
|
|
|
|
ctx, s.cancel = context.WithCancel(context.Background())
|
2018-10-03 12:36:38 +00:00
|
|
|
|
2020-12-23 23:03:30 +00:00
|
|
|
snap, err := s.handler.initialize(ctx)
|
2018-10-03 12:36:38 +00:00
|
|
|
if err != nil {
|
|
|
|
s.cancel()
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2020-12-23 22:12:36 +00:00
|
|
|
go s.run(ctx, &snap)
|
2018-10-03 12:36:38 +00:00
|
|
|
|
|
|
|
return s.snapCh, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Close discards the state and stops any long-running watches.
|
2022-08-11 09:19:36 +00:00
|
|
|
func (s *state) Close(failed bool) error {
|
2023-03-03 20:27:53 +00:00
|
|
|
if s.stoppedRunning() {
|
|
|
|
return nil
|
|
|
|
}
|
2018-10-03 12:36:38 +00:00
|
|
|
if s.cancel != nil {
|
|
|
|
s.cancel()
|
|
|
|
}
|
2022-08-11 09:19:36 +00:00
|
|
|
if failed {
|
|
|
|
atomic.StoreInt32(&s.failedFlag, 1)
|
|
|
|
}
|
2018-10-03 12:36:38 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-06-17 16:53:25 +00:00
|
|
|
type handlerState struct {
|
2020-12-23 23:03:30 +00:00
|
|
|
stateConfig // TODO: un-embed
|
|
|
|
serviceInstance // TODO: un-embed
|
2022-05-20 14:47:40 +00:00
|
|
|
ch chan UpdateEvent
|
2019-06-24 19:05:36 +00:00
|
|
|
}
|
|
|
|
|
2020-12-23 23:03:30 +00:00
|
|
|
func newConfigSnapshotFromServiceInstance(s serviceInstance, config stateConfig) ConfigSnapshot {
|
|
|
|
// TODO: use serviceInstance type in ConfigSnapshot
|
|
|
|
return ConfigSnapshot{
|
2020-08-27 17:20:58 +00:00
|
|
|
Kind: s.kind,
|
|
|
|
Service: s.service,
|
2023-06-21 16:39:53 +00:00
|
|
|
ServiceLocality: s.locality,
|
2020-08-27 17:20:58 +00:00
|
|
|
ProxyID: s.proxyID,
|
|
|
|
Address: s.address,
|
|
|
|
Port: s.port,
|
|
|
|
ServiceMeta: s.meta,
|
|
|
|
TaggedAddresses: s.taggedAddresses,
|
|
|
|
Proxy: s.proxyCfg,
|
2020-12-23 23:03:30 +00:00
|
|
|
Datacenter: config.source.Datacenter,
|
2021-10-29 00:47:42 +00:00
|
|
|
Locality: GatewayKey{Datacenter: config.source.Datacenter, Partition: s.proxyID.PartitionOrDefault()},
|
2020-12-23 23:03:30 +00:00
|
|
|
ServerSNIFn: config.serverSNIFn,
|
|
|
|
IntentionDefaultAllow: config.intentionDefaultAllow,
|
2018-10-03 12:36:38 +00:00
|
|
|
}
|
2019-07-12 21:19:37 +00:00
|
|
|
}
|
|
|
|
|
2020-12-23 22:12:36 +00:00
|
|
|
func (s *state) run(ctx context.Context, snap *ConfigSnapshot) {
|
2023-02-15 17:54:44 +00:00
|
|
|
// Add a recover here so than any panics do not make their way up
|
|
|
|
// into the server / agent.
|
|
|
|
defer func() {
|
|
|
|
if r := recover(); r != nil {
|
|
|
|
s.logger.Error("unexpected panic while running proxycfg",
|
|
|
|
"node", s.serviceInstance.proxyID.NodeName,
|
|
|
|
"service", s.serviceInstance.proxyID.ServiceID,
|
|
|
|
"message", r,
|
|
|
|
"stacktrace", string(debug.Stack()))
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
s.unsafeRun(ctx, snap)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *state) unsafeRun(ctx context.Context, snap *ConfigSnapshot) {
|
2023-03-03 20:27:53 +00:00
|
|
|
// Closing the done channel signals that this entire state is no longer
|
|
|
|
// going to be updated.
|
|
|
|
defer close(s.doneCh)
|
2019-07-12 21:19:37 +00:00
|
|
|
// Close the channel we return from Watch when we stop so consumers can stop
|
|
|
|
// watching and clean up their goroutines. It's important we do this here and
|
|
|
|
// not in Close since this routine sends on this chan and so might panic if it
|
|
|
|
// gets closed from another goroutine.
|
|
|
|
defer close(s.snapCh)
|
|
|
|
|
2018-10-03 12:36:38 +00:00
|
|
|
// This turns out to be really fiddly/painful by just using time.Timer.C
|
|
|
|
// directly in the code below since you can't detect when a timer is stopped
|
|
|
|
// vs waiting in order to know to reset it. So just use a chan to send
|
|
|
|
// ourselves messages.
|
|
|
|
sendCh := make(chan struct{})
|
|
|
|
var coalesceTimer *time.Timer
|
|
|
|
|
2022-10-14 14:52:00 +00:00
|
|
|
scheduleUpdate := func() {
|
|
|
|
// Wait for MAX(<rate limiter delay>, coalesceTimeout)
|
|
|
|
delay := s.rateLimiter.Reserve().Delay()
|
|
|
|
if delay < coalesceTimeout {
|
|
|
|
delay = coalesceTimeout
|
|
|
|
}
|
|
|
|
coalesceTimer = time.AfterFunc(delay, func() {
|
|
|
|
// This runs in another goroutine so we can't just do the send
|
|
|
|
// directly here as access to snap is racy. Instead, signal the main
|
|
|
|
// loop above.
|
2022-10-17 10:31:10 +00:00
|
|
|
select {
|
|
|
|
case sendCh <- struct{}{}:
|
|
|
|
case <-ctx.Done():
|
|
|
|
}
|
2022-10-14 14:52:00 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2018-10-03 12:36:38 +00:00
|
|
|
for {
|
|
|
|
select {
|
2020-12-23 22:12:36 +00:00
|
|
|
case <-ctx.Done():
|
2018-10-03 12:36:38 +00:00
|
|
|
return
|
|
|
|
case u := <-s.ch:
|
2022-08-11 09:19:36 +00:00
|
|
|
s.logger.Trace("Data source returned; handling snapshot update", "correlationID", u.CorrelationID)
|
|
|
|
|
|
|
|
if IsTerminalError(u.Err) {
|
|
|
|
s.logger.Error("Data source in an irrecoverable state; exiting", "error", u.Err, "correlationID", u.CorrelationID)
|
|
|
|
s.Close(true)
|
|
|
|
return
|
|
|
|
}
|
2021-02-02 19:26:38 +00:00
|
|
|
|
2020-12-23 23:03:30 +00:00
|
|
|
if err := s.handler.handleUpdate(ctx, u, snap); err != nil {
|
2021-02-05 22:14:49 +00:00
|
|
|
s.logger.Error("Failed to handle update from watch",
|
2021-02-02 19:26:38 +00:00
|
|
|
"id", u.CorrelationID, "error", err,
|
2020-01-28 23:50:41 +00:00
|
|
|
)
|
2018-10-03 12:36:38 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
case <-sendCh:
|
2022-01-05 17:17:47 +00:00
|
|
|
// Allow the next change to trigger a send
|
|
|
|
coalesceTimer = nil
|
2018-10-03 12:36:38 +00:00
|
|
|
// Make a deep copy of snap so we don't mutate any of the embedded structs
|
|
|
|
// etc on future updates.
|
2022-10-14 09:26:42 +00:00
|
|
|
snapCopy := snap.Clone()
|
2021-02-02 18:31:14 +00:00
|
|
|
|
|
|
|
select {
|
2021-02-08 16:45:45 +00:00
|
|
|
// Try to send
|
2021-02-02 18:31:14 +00:00
|
|
|
case s.snapCh <- *snapCopy:
|
2021-02-05 22:14:49 +00:00
|
|
|
s.logger.Trace("Delivered new snapshot to proxy config watchers")
|
2021-02-02 19:26:38 +00:00
|
|
|
|
2021-02-06 01:00:59 +00:00
|
|
|
// Skip rest of loop - there is nothing to send since nothing changed on
|
|
|
|
// this iteration
|
|
|
|
continue
|
|
|
|
|
2021-02-08 16:45:45 +00:00
|
|
|
// Avoid blocking if a snapshot is already buffered in snapCh as this can result in a deadlock.
|
|
|
|
// See PR #9689 for more details.
|
2021-02-02 18:31:14 +00:00
|
|
|
default:
|
2021-02-05 22:14:49 +00:00
|
|
|
s.logger.Trace("Failed to deliver new snapshot to proxy config watchers")
|
2021-02-02 18:31:14 +00:00
|
|
|
|
2021-02-08 16:45:45 +00:00
|
|
|
// Reset the timer to retry later. This is to ensure we attempt to redeliver the updated snapshot shortly.
|
2022-10-14 14:52:00 +00:00
|
|
|
scheduleUpdate()
|
2018-10-03 12:36:38 +00:00
|
|
|
|
2021-02-06 01:00:59 +00:00
|
|
|
// Do not reset coalesceTimer since we just queued a timer-based refresh
|
|
|
|
continue
|
|
|
|
}
|
2018-10-03 12:36:38 +00:00
|
|
|
|
|
|
|
case replyCh := <-s.reqCh:
|
2021-02-05 22:14:49 +00:00
|
|
|
s.logger.Trace("A proxy config snapshot was requested")
|
2021-02-02 19:26:38 +00:00
|
|
|
|
2018-10-03 12:36:38 +00:00
|
|
|
if !snap.Valid() {
|
|
|
|
// Not valid yet just respond with nil and move on to next task.
|
|
|
|
replyCh <- nil
|
2021-02-02 19:26:38 +00:00
|
|
|
|
2021-02-05 22:14:49 +00:00
|
|
|
s.logger.Trace("The proxy's config snapshot is not valid yet")
|
2018-10-03 12:36:38 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
// Make a deep copy of snap so we don't mutate any of the embedded structs
|
|
|
|
// etc on future updates.
|
2022-10-14 09:26:42 +00:00
|
|
|
replyCh <- snap.Clone()
|
2018-10-03 12:36:38 +00:00
|
|
|
|
|
|
|
// Skip rest of loop - there is nothing to send since nothing changed on
|
|
|
|
// this iteration
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if snap is complete enough to be a valid config to deliver to a
|
|
|
|
// proxy yet.
|
|
|
|
if snap.Valid() {
|
|
|
|
if coalesceTimer == nil {
|
2022-10-14 14:52:00 +00:00
|
|
|
// Don't send it right away, set a short timer that will wait for updates
|
|
|
|
// from any of the other cache values and deliver them all together.
|
|
|
|
scheduleUpdate()
|
2018-10-03 12:36:38 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// CurrentSnapshot synchronously returns the current ConfigSnapshot if there is
|
|
|
|
// one ready. If we don't have one yet because not all necessary parts have been
|
|
|
|
// returned (i.e. both roots and leaf cert), nil is returned.
|
|
|
|
func (s *state) CurrentSnapshot() *ConfigSnapshot {
|
|
|
|
// Make a chan for the response to be sent on
|
|
|
|
ch := make(chan *ConfigSnapshot, 1)
|
2023-03-03 20:27:53 +00:00
|
|
|
|
|
|
|
select {
|
|
|
|
case <-s.doneCh:
|
|
|
|
return nil
|
|
|
|
case s.reqCh <- ch:
|
|
|
|
}
|
|
|
|
|
2018-10-03 12:36:38 +00:00
|
|
|
// Wait for the response
|
2023-03-03 20:27:53 +00:00
|
|
|
select {
|
|
|
|
case <-s.doneCh:
|
|
|
|
return nil
|
|
|
|
case resp := <-ch:
|
|
|
|
return resp
|
|
|
|
}
|
2018-10-03 12:36:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Changed returns whether or not the passed NodeService has had any of the
|
|
|
|
// fields we care about for config state watching changed or a different token.
|
|
|
|
func (s *state) Changed(ns *structs.NodeService, token string) bool {
|
|
|
|
if ns == nil {
|
|
|
|
return true
|
|
|
|
}
|
2020-02-06 15:52:25 +00:00
|
|
|
|
|
|
|
proxyCfg, err := copyProxyConfig(ns)
|
|
|
|
if err != nil {
|
|
|
|
s.logger.Warn("Failed to parse proxy config and will treat the new service as unchanged")
|
|
|
|
}
|
|
|
|
|
2020-12-23 23:03:30 +00:00
|
|
|
i := s.serviceInstance
|
|
|
|
return ns.Kind != i.kind ||
|
|
|
|
i.address != ns.Address ||
|
|
|
|
i.port != ns.Port ||
|
|
|
|
!reflect.DeepEqual(i.proxyCfg, proxyCfg) ||
|
|
|
|
i.token != token
|
2018-10-03 12:36:38 +00:00
|
|
|
}
|
2020-06-03 21:28:45 +00:00
|
|
|
|
|
|
|
// hostnameEndpoints returns all CheckServiceNodes that have hostnames instead of IPs as the address.
|
|
|
|
// Envoy cannot resolve hostnames provided through EDS, so we exclusively use CDS for these clusters.
|
|
|
|
// If there is a mix of hostnames and addresses we exclusively use the hostnames, since clusters cannot discover
|
|
|
|
// services with both EDS and DNS.
|
2021-10-29 00:41:48 +00:00
|
|
|
func hostnameEndpoints(logger hclog.Logger, localKey GatewayKey, nodes structs.CheckServiceNodes) structs.CheckServiceNodes {
|
2020-06-03 21:28:45 +00:00
|
|
|
var (
|
|
|
|
hasIP bool
|
|
|
|
hasHostname bool
|
|
|
|
resp structs.CheckServiceNodes
|
|
|
|
)
|
|
|
|
|
|
|
|
for _, n := range nodes {
|
2022-01-28 06:49:06 +00:00
|
|
|
_, addr, _ := n.BestAddress(!localKey.Matches(n.Node.Datacenter, n.Node.PartitionOrDefault()))
|
2020-06-03 21:28:45 +00:00
|
|
|
if net.ParseIP(addr) != nil {
|
|
|
|
hasIP = true
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
hasHostname = true
|
|
|
|
resp = append(resp, n)
|
|
|
|
}
|
|
|
|
|
|
|
|
if hasHostname && hasIP {
|
|
|
|
dc := nodes[0].Node.Datacenter
|
2020-06-12 14:57:41 +00:00
|
|
|
sn := nodes[0].Service.CompoundServiceName()
|
2020-06-03 21:28:45 +00:00
|
|
|
|
2020-12-23 19:17:15 +00:00
|
|
|
logger.Warn("service contains instances with mix of hostnames and IP addresses; only hostnames will be passed to Envoy",
|
|
|
|
"dc", dc, "service", sn.String())
|
2020-06-03 21:28:45 +00:00
|
|
|
}
|
|
|
|
return resp
|
|
|
|
}
|
2021-10-22 21:22:55 +00:00
|
|
|
|
|
|
|
type gatewayWatchOpts struct {
|
2022-06-01 15:18:06 +00:00
|
|
|
internalServiceDump InternalServiceDump
|
|
|
|
notifyCh chan UpdateEvent
|
|
|
|
source structs.QuerySource
|
|
|
|
token string
|
|
|
|
key GatewayKey
|
|
|
|
upstreamID UpstreamID
|
2021-10-22 21:22:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func watchMeshGateway(ctx context.Context, opts gatewayWatchOpts) error {
|
2022-09-27 13:49:28 +00:00
|
|
|
var correlationId string
|
|
|
|
if opts.upstreamID.Name == "" {
|
|
|
|
correlationId = fmt.Sprintf("mesh-gateway:%s", opts.key.String())
|
|
|
|
} else {
|
|
|
|
correlationId = fmt.Sprintf("mesh-gateway:%s:%s", opts.key.String(), opts.upstreamID.String())
|
|
|
|
}
|
|
|
|
|
2022-06-01 15:18:06 +00:00
|
|
|
return opts.internalServiceDump.Notify(ctx, &structs.ServiceDumpRequest{
|
2021-10-22 21:22:55 +00:00
|
|
|
Datacenter: opts.key.Datacenter,
|
|
|
|
QueryOptions: structs.QueryOptions{Token: opts.token},
|
|
|
|
ServiceKind: structs.ServiceKindMeshGateway,
|
|
|
|
UseServiceKind: true,
|
2024-01-18 22:44:53 +00:00
|
|
|
NodesOnly: true,
|
2021-10-22 21:22:55 +00:00
|
|
|
Source: opts.source,
|
|
|
|
EnterpriseMeta: *structs.DefaultEnterpriseMetaInPartition(opts.key.Partition),
|
2022-09-27 13:49:28 +00:00
|
|
|
}, correlationId, opts.notifyCh)
|
2021-10-22 21:22:55 +00:00
|
|
|
}
|