2023-03-28 18:39:22 +00:00
|
|
|
// Copyright (c) HashiCorp, Inc.
|
2023-08-11 13:12:13 +00:00
|
|
|
// SPDX-License-Identifier: BUSL-1.1
|
2023-03-28 18:39:22 +00:00
|
|
|
|
2018-10-03 12:36:38 +00:00
|
|
|
package proxycfg
|
|
|
|
|
|
|
|
import (
|
|
|
|
"errors"
|
2023-02-15 17:54:44 +00:00
|
|
|
"runtime/debug"
|
2018-10-03 12:36:38 +00:00
|
|
|
"sync"
|
|
|
|
|
2024-03-15 18:57:11 +00:00
|
|
|
"github.com/hashicorp/consul/lib/channels"
|
|
|
|
|
2020-11-10 23:57:35 +00:00
|
|
|
"github.com/hashicorp/go-hclog"
|
2022-10-14 14:52:00 +00:00
|
|
|
"golang.org/x/time/rate"
|
2020-11-10 23:57:35 +00:00
|
|
|
|
2018-10-03 12:36:38 +00:00
|
|
|
"github.com/hashicorp/consul/agent/structs"
|
2023-08-29 21:39:29 +00:00
|
|
|
proxysnapshot "github.com/hashicorp/consul/internal/mesh/proxy-snapshot"
|
2020-03-09 20:59:02 +00:00
|
|
|
"github.com/hashicorp/consul/tlsutil"
|
2018-10-03 12:36:38 +00:00
|
|
|
)
|
|
|
|
|
2022-05-27 11:38:52 +00:00
|
|
|
// ProxyID is a handle on a proxy service instance being tracked by Manager.
|
|
|
|
type ProxyID struct {
|
|
|
|
structs.ServiceID
|
2018-10-03 12:36:38 +00:00
|
|
|
|
2022-05-27 11:38:52 +00:00
|
|
|
// NodeName identifies the node to which the proxy is registered.
|
|
|
|
NodeName string
|
|
|
|
|
|
|
|
// Token is used to track watches on the same proxy with different ACL tokens
|
|
|
|
// separately, to prevent accidental security bugs.
|
|
|
|
//
|
|
|
|
// Note: this can be different to the ACL token used for authorization that is
|
|
|
|
// passed to Register (e.g. agent-local services are registered ahead-of-time
|
|
|
|
// with a token that may be different to the one presented in the xDS stream).
|
|
|
|
Token string
|
|
|
|
}
|
|
|
|
|
|
|
|
// ProxySource identifies where a proxy service tracked by Manager came from,
|
|
|
|
// such as the agent's local state or the catalog. It's used to prevent sources
|
|
|
|
// from overwriting each other's registrations.
|
|
|
|
type ProxySource string
|
2018-10-03 12:36:38 +00:00
|
|
|
|
2024-03-15 18:57:11 +00:00
|
|
|
// SrcTerminatedChan indicates that the config-source for the proxycfg is no longer running
|
|
|
|
// and will stop receiving updates when it is closed.
|
|
|
|
type SrcTerminatedChan <-chan struct{}
|
|
|
|
|
2022-05-27 11:38:52 +00:00
|
|
|
// Manager provides an API with which proxy services can be registered, and
|
|
|
|
// coordinates the fetching (and refreshing) of intentions, upstreams, discovery
|
|
|
|
// chain, certificates etc.
|
|
|
|
//
|
|
|
|
// Consumers such as the xDS server can then subscribe to receive snapshots of
|
|
|
|
// this data whenever it changes.
|
2018-10-03 12:36:38 +00:00
|
|
|
//
|
2022-05-27 11:38:52 +00:00
|
|
|
// See package docs for more detail.
|
2018-10-03 12:36:38 +00:00
|
|
|
type Manager struct {
|
|
|
|
ManagerConfig
|
|
|
|
|
2022-10-14 14:52:00 +00:00
|
|
|
rateLimiter *rate.Limiter
|
|
|
|
|
2022-05-27 11:38:52 +00:00
|
|
|
mu sync.Mutex
|
|
|
|
proxies map[ProxyID]*state
|
2023-08-29 15:15:34 +00:00
|
|
|
watchers map[ProxyID]map[uint64]chan proxysnapshot.ProxySnapshot
|
2022-05-27 11:38:52 +00:00
|
|
|
maxWatchID uint64
|
2018-10-03 12:36:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// ManagerConfig holds the required external dependencies for a Manager
|
|
|
|
// instance. All fields must be set to something valid or the manager will
|
|
|
|
// panic. The ManagerConfig is passed by value to NewManager so the passed value
|
|
|
|
// can be mutated safely.
|
|
|
|
type ManagerConfig struct {
|
2022-06-01 15:18:06 +00:00
|
|
|
// DataSources contains the dependencies used to consume data used to configure
|
|
|
|
// proxies.
|
|
|
|
DataSources DataSources
|
2018-10-03 12:36:38 +00:00
|
|
|
// source describes the current agent's identity, it's used directly for
|
|
|
|
// prepared query discovery but also indirectly as a way to pass current
|
|
|
|
// Datacenter name into other request types that need it. This is sufficient
|
|
|
|
// for now and cleaner than passing the entire RuntimeConfig.
|
|
|
|
Source *structs.QuerySource
|
2020-04-27 23:36:20 +00:00
|
|
|
// DNSConfig is the agent's relevant DNS config for any proxies.
|
|
|
|
DNSConfig DNSConfig
|
2018-10-03 12:36:38 +00:00
|
|
|
// logger is the agent's logger to be used for logging logs.
|
2020-03-09 20:59:02 +00:00
|
|
|
Logger hclog.Logger
|
|
|
|
TLSConfigurator *tlsutil.Configurator
|
2020-08-27 17:20:58 +00:00
|
|
|
|
|
|
|
// IntentionDefaultAllow is set by the agent so that we can pass this
|
|
|
|
// information to proxies that need to make intention decisions on their
|
|
|
|
// own.
|
|
|
|
IntentionDefaultAllow bool
|
2022-10-14 14:52:00 +00:00
|
|
|
|
|
|
|
// UpdateRateLimit controls the rate at which config snapshots are delivered
|
|
|
|
// when updates are received from data sources. This enables us to reduce the
|
|
|
|
// impact of updates to "global" resources (e.g. proxy-defaults and wildcard
|
|
|
|
// intentions) that could otherwise saturate system resources, and cause Raft
|
|
|
|
// or gossip instability.
|
|
|
|
//
|
|
|
|
// Defaults to rate.Inf (no rate limit).
|
|
|
|
UpdateRateLimit rate.Limit
|
2018-10-03 12:36:38 +00:00
|
|
|
}
|
|
|
|
|
2022-05-27 11:38:52 +00:00
|
|
|
// NewManager constructs a Manager.
|
2018-10-03 12:36:38 +00:00
|
|
|
func NewManager(cfg ManagerConfig) (*Manager, error) {
|
2022-06-01 15:18:06 +00:00
|
|
|
if cfg.Source == nil || cfg.Logger == nil {
|
2018-10-03 12:36:38 +00:00
|
|
|
return nil, errors.New("all ManagerConfig fields must be provided")
|
|
|
|
}
|
2022-10-14 14:52:00 +00:00
|
|
|
|
|
|
|
if cfg.UpdateRateLimit == 0 {
|
|
|
|
cfg.UpdateRateLimit = rate.Inf
|
|
|
|
}
|
|
|
|
|
2018-10-03 12:36:38 +00:00
|
|
|
m := &Manager{
|
|
|
|
ManagerConfig: cfg,
|
2022-05-27 11:38:52 +00:00
|
|
|
proxies: make(map[ProxyID]*state),
|
2023-08-29 15:15:34 +00:00
|
|
|
watchers: make(map[ProxyID]map[uint64]chan proxysnapshot.ProxySnapshot),
|
2022-10-14 14:52:00 +00:00
|
|
|
rateLimiter: rate.NewLimiter(cfg.UpdateRateLimit, 1),
|
2018-10-03 12:36:38 +00:00
|
|
|
}
|
|
|
|
return m, nil
|
|
|
|
}
|
|
|
|
|
2022-10-14 14:52:00 +00:00
|
|
|
// UpdateRateLimit returns the configured update rate limit (see ManagerConfig).
|
|
|
|
func (m *Manager) UpdateRateLimit() rate.Limit {
|
|
|
|
return m.rateLimiter.Limit()
|
|
|
|
}
|
|
|
|
|
|
|
|
// SetUpdateRateLimit configures the update rate limit (see ManagerConfig).
|
|
|
|
func (m *Manager) SetUpdateRateLimit(l rate.Limit) {
|
|
|
|
m.rateLimiter.SetLimit(l)
|
|
|
|
}
|
|
|
|
|
2022-05-27 11:38:52 +00:00
|
|
|
// RegisteredProxies returns a list of the proxies tracked by Manager, filtered
|
|
|
|
// by source.
|
|
|
|
func (m *Manager) RegisteredProxies(source ProxySource) []ProxyID {
|
2018-10-03 12:36:38 +00:00
|
|
|
m.mu.Lock()
|
2022-05-27 11:38:52 +00:00
|
|
|
defer m.mu.Unlock()
|
2018-10-03 12:36:38 +00:00
|
|
|
|
2022-05-27 11:38:52 +00:00
|
|
|
proxies := make([]ProxyID, 0, len(m.proxies))
|
|
|
|
for id, state := range m.proxies {
|
|
|
|
if state.source != source {
|
|
|
|
continue
|
2018-10-03 12:36:38 +00:00
|
|
|
}
|
2022-05-27 11:38:52 +00:00
|
|
|
proxies = append(proxies, id)
|
2018-10-03 12:36:38 +00:00
|
|
|
}
|
2022-05-27 11:38:52 +00:00
|
|
|
return proxies
|
2018-10-03 12:36:38 +00:00
|
|
|
}
|
|
|
|
|
2022-05-27 11:38:52 +00:00
|
|
|
// Register and start fetching resources for the given proxy service. If the
|
|
|
|
// given service was already registered by a different source (e.g. we began
|
|
|
|
// tracking it from the catalog, but then it was registered to the server
|
|
|
|
// agent locally) the service will be left as-is unless overwrite is true.
|
|
|
|
func (m *Manager) Register(id ProxyID, ns *structs.NodeService, source ProxySource, token string, overwrite bool) error {
|
2018-10-09 16:57:26 +00:00
|
|
|
m.mu.Lock()
|
|
|
|
defer m.mu.Unlock()
|
|
|
|
|
2023-02-15 17:54:44 +00:00
|
|
|
defer func() {
|
|
|
|
if r := recover(); r != nil {
|
|
|
|
m.Logger.Error("unexpected panic during service manager registration",
|
|
|
|
"node", id.NodeName,
|
|
|
|
"service", id.ServiceID,
|
|
|
|
"message", r,
|
|
|
|
"stacktrace", string(debug.Stack()),
|
|
|
|
)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
return m.register(id, ns, source, token, overwrite)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *Manager) register(id ProxyID, ns *structs.NodeService, source ProxySource, token string, overwrite bool) error {
|
2022-05-27 11:38:52 +00:00
|
|
|
state, ok := m.proxies[id]
|
2023-03-03 20:27:53 +00:00
|
|
|
if ok && !state.stoppedRunning() {
|
2022-05-27 11:38:52 +00:00
|
|
|
if state.source != source && !overwrite {
|
|
|
|
// Registered by a different source, leave as-is.
|
|
|
|
return nil
|
2018-10-09 16:57:26 +00:00
|
|
|
}
|
2018-10-03 12:36:38 +00:00
|
|
|
|
|
|
|
if !state.Changed(ns, token) {
|
|
|
|
// No change
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-07-02 03:10:51 +00:00
|
|
|
// We are updating the proxy, close its old state
|
2022-08-11 09:19:36 +00:00
|
|
|
state.Close(false)
|
2018-10-03 12:36:38 +00:00
|
|
|
}
|
|
|
|
|
2020-12-23 22:29:20 +00:00
|
|
|
// TODO: move to a function that translates ManagerConfig->stateConfig
|
2020-12-23 23:03:30 +00:00
|
|
|
stateConfig := stateConfig{
|
2022-05-27 11:38:52 +00:00
|
|
|
logger: m.Logger.With("service_id", id.String()),
|
2022-06-01 15:18:06 +00:00
|
|
|
dataSources: m.DataSources,
|
2020-12-23 22:29:20 +00:00
|
|
|
source: m.Source,
|
|
|
|
dnsConfig: m.DNSConfig,
|
|
|
|
intentionDefaultAllow: m.IntentionDefaultAllow,
|
|
|
|
}
|
2020-03-09 20:59:02 +00:00
|
|
|
if m.TLSConfigurator != nil {
|
2020-12-23 23:03:30 +00:00
|
|
|
stateConfig.serverSNIFn = m.TLSConfigurator.ServerSNI
|
|
|
|
}
|
|
|
|
|
|
|
|
var err error
|
2022-10-14 14:52:00 +00:00
|
|
|
state, err = newState(id, ns, source, token, stateConfig, m.rateLimiter)
|
2020-12-23 23:03:30 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
2020-03-09 20:59:02 +00:00
|
|
|
}
|
2018-10-03 12:36:38 +00:00
|
|
|
|
2022-08-11 09:19:36 +00:00
|
|
|
if _, err = state.Watch(); err != nil {
|
2018-10-03 12:36:38 +00:00
|
|
|
return err
|
|
|
|
}
|
2022-05-27 11:38:52 +00:00
|
|
|
m.proxies[id] = state
|
2018-10-03 12:36:38 +00:00
|
|
|
|
|
|
|
// Start a goroutine that will wait for changes and broadcast them to watchers.
|
2022-08-11 09:19:36 +00:00
|
|
|
go m.notifyBroadcast(id, state)
|
2018-10-03 12:36:38 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-05-27 11:38:52 +00:00
|
|
|
// Deregister the given proxy service, but only if it was registered by the same
|
|
|
|
// source.
|
|
|
|
func (m *Manager) Deregister(id ProxyID, source ProxySource) {
|
|
|
|
m.mu.Lock()
|
|
|
|
defer m.mu.Unlock()
|
2022-01-05 17:17:47 +00:00
|
|
|
|
2022-05-27 11:38:52 +00:00
|
|
|
state, ok := m.proxies[id]
|
2018-10-03 12:36:38 +00:00
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-05-27 11:38:52 +00:00
|
|
|
if state.source != source {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Closing state will let the goroutine we started in Register finish since
|
2022-08-11 09:19:36 +00:00
|
|
|
// watch chan is closed
|
|
|
|
state.Close(false)
|
2022-05-27 11:38:52 +00:00
|
|
|
delete(m.proxies, id)
|
2018-10-03 12:36:38 +00:00
|
|
|
|
|
|
|
// We intentionally leave potential watchers hanging here - there is no new
|
|
|
|
// config for them and closing their channels might be indistinguishable from
|
|
|
|
// an error that they should retry. We rely for them to eventually give up
|
|
|
|
// (because they are in fact not running any more) and so the watches be
|
|
|
|
// cleaned up naturally.
|
|
|
|
}
|
|
|
|
|
2022-08-11 09:19:36 +00:00
|
|
|
func (m *Manager) notifyBroadcast(proxyID ProxyID, state *state) {
|
|
|
|
// Run until ch is closed (by a defer in state.run).
|
|
|
|
for snap := range state.snapCh {
|
2022-05-27 11:38:52 +00:00
|
|
|
m.notify(&snap)
|
|
|
|
}
|
2022-08-11 09:19:36 +00:00
|
|
|
|
|
|
|
// If state.run exited because of an irrecoverable error, close all of the
|
|
|
|
// watchers so that the consumers reconnect/retry at a higher level.
|
|
|
|
if state.failed() {
|
|
|
|
m.closeAllWatchers(proxyID)
|
|
|
|
}
|
2022-05-27 11:38:52 +00:00
|
|
|
}
|
|
|
|
|
2018-10-03 12:36:38 +00:00
|
|
|
func (m *Manager) notify(snap *ConfigSnapshot) {
|
|
|
|
m.mu.Lock()
|
|
|
|
defer m.mu.Unlock()
|
|
|
|
|
|
|
|
watchers, ok := m.watchers[snap.ProxyID]
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, ch := range watchers {
|
2018-10-09 16:57:26 +00:00
|
|
|
m.deliverLatest(snap, ch)
|
2018-10-03 12:36:38 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// deliverLatest delivers the snapshot to a watch chan. If the delivery blocks,
|
|
|
|
// it will drain the chan and then re-attempt delivery so that a slow consumer
|
|
|
|
// gets the latest config earlier. This MUST be called from a method where m.mu
|
|
|
|
// is held to be safe since it assumes we are the only goroutine sending on ch.
|
2023-11-07 23:03:37 +00:00
|
|
|
func (m *Manager) deliverLatest(snap proxysnapshot.ProxySnapshot, ch chan proxysnapshot.ProxySnapshot) {
|
|
|
|
m.Logger.Trace("delivering latest proxy snapshot to proxy", "proxyID", snap.(*ConfigSnapshot).ProxyID)
|
|
|
|
err := channels.DeliverLatest(snap, ch)
|
|
|
|
if err != nil {
|
|
|
|
m.Logger.Error("failed to deliver proxyState to proxy",
|
|
|
|
"proxy", snap.(*ConfigSnapshot).ProxyID,
|
2020-01-28 23:50:41 +00:00
|
|
|
)
|
2018-10-03 12:36:38 +00:00
|
|
|
}
|
2023-11-07 23:03:37 +00:00
|
|
|
|
2018-10-03 12:36:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Watch registers a watch on a proxy. It might not exist yet in which case this
|
|
|
|
// will not fail, but no updates will be delivered until the proxy is
|
|
|
|
// registered. If there is already a valid snapshot in memory, it will be
|
|
|
|
// delivered immediately.
|
2023-08-29 15:15:34 +00:00
|
|
|
func (m *Manager) Watch(id ProxyID) (<-chan proxysnapshot.ProxySnapshot, proxysnapshot.CancelFunc) {
|
2018-10-03 12:36:38 +00:00
|
|
|
m.mu.Lock()
|
|
|
|
defer m.mu.Unlock()
|
|
|
|
|
|
|
|
// This buffering is crucial otherwise we'd block immediately trying to
|
|
|
|
// deliver the current snapshot below if we already have one.
|
2023-08-29 15:15:34 +00:00
|
|
|
ch := make(chan proxysnapshot.ProxySnapshot, 1)
|
2022-05-27 11:38:52 +00:00
|
|
|
watchers, ok := m.watchers[id]
|
2018-10-03 12:36:38 +00:00
|
|
|
if !ok {
|
2023-08-29 15:15:34 +00:00
|
|
|
watchers = make(map[uint64]chan proxysnapshot.ProxySnapshot)
|
2018-10-03 12:36:38 +00:00
|
|
|
}
|
2022-05-27 11:38:52 +00:00
|
|
|
watchID := m.maxWatchID
|
|
|
|
m.maxWatchID++
|
|
|
|
watchers[watchID] = ch
|
|
|
|
m.watchers[id] = watchers
|
2018-10-03 12:36:38 +00:00
|
|
|
|
|
|
|
// Deliver the current snapshot immediately if there is one ready
|
2022-05-27 11:38:52 +00:00
|
|
|
if state, ok := m.proxies[id]; ok {
|
2018-10-03 12:36:38 +00:00
|
|
|
if snap := state.CurrentSnapshot(); snap != nil {
|
|
|
|
// We rely on ch being buffered above and that it's not been passed
|
|
|
|
// anywhere so we must be the only writer so this will never block and
|
|
|
|
// deadlock.
|
|
|
|
ch <- snap
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return ch, func() {
|
|
|
|
m.mu.Lock()
|
|
|
|
defer m.mu.Unlock()
|
2022-05-27 11:38:52 +00:00
|
|
|
m.closeWatchLocked(id, watchID)
|
2018-10-03 12:36:38 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-08-11 09:19:36 +00:00
|
|
|
func (m *Manager) closeAllWatchers(proxyID ProxyID) {
|
|
|
|
m.mu.Lock()
|
|
|
|
defer m.mu.Unlock()
|
|
|
|
|
|
|
|
watchers, ok := m.watchers[proxyID]
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
for watchID := range watchers {
|
|
|
|
m.closeWatchLocked(proxyID, watchID)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-03 12:36:38 +00:00
|
|
|
// closeWatchLocked cleans up state related to a single watcher. It assumes the
|
|
|
|
// lock is held.
|
2022-05-27 11:38:52 +00:00
|
|
|
func (m *Manager) closeWatchLocked(proxyID ProxyID, watchID uint64) {
|
2018-10-03 12:36:38 +00:00
|
|
|
if watchers, ok := m.watchers[proxyID]; ok {
|
2022-05-27 11:38:52 +00:00
|
|
|
if ch, ok := watchers[watchID]; ok {
|
|
|
|
delete(watchers, watchID)
|
2018-10-03 12:36:38 +00:00
|
|
|
close(ch)
|
|
|
|
if len(watchers) == 0 {
|
|
|
|
delete(m.watchers, proxyID)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Close removes all state and stops all running goroutines.
|
|
|
|
func (m *Manager) Close() error {
|
|
|
|
m.mu.Lock()
|
|
|
|
defer m.mu.Unlock()
|
|
|
|
|
|
|
|
// Close all current watchers first
|
|
|
|
for proxyID, watchers := range m.watchers {
|
2022-05-27 11:38:52 +00:00
|
|
|
for watchID := range watchers {
|
|
|
|
m.closeWatchLocked(proxyID, watchID)
|
2018-10-03 12:36:38 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Then close all states
|
|
|
|
for proxyID, state := range m.proxies {
|
2022-08-11 09:19:36 +00:00
|
|
|
state.Close(false)
|
2018-10-03 12:36:38 +00:00
|
|
|
delete(m.proxies, proxyID)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|