2019-04-18 04:35:19 +00:00
|
|
|
package agent
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"sync"
|
|
|
|
|
2019-05-01 23:39:31 +00:00
|
|
|
"github.com/imdario/mergo"
|
|
|
|
"github.com/mitchellh/copystructure"
|
2019-04-18 04:35:19 +00:00
|
|
|
"golang.org/x/net/context"
|
2020-11-30 18:26:58 +00:00
|
|
|
|
|
|
|
"github.com/hashicorp/consul/agent/cache"
|
|
|
|
cachetype "github.com/hashicorp/consul/agent/cache-types"
|
|
|
|
"github.com/hashicorp/consul/agent/structs"
|
2019-04-18 04:35:19 +00:00
|
|
|
)
|
|
|
|
|
2019-04-23 10:31:24 +00:00
|
|
|
// The ServiceManager is a layer for service registration in between the agent
|
|
|
|
// and the local state. Any services must be registered with the ServiceManager,
|
|
|
|
// which then maintains a long-running watch of any globally-set service or proxy
|
|
|
|
// configuration that applies to the service in order to register the final, merged
|
|
|
|
// service configuration locally in the agent state.
|
2019-04-18 04:35:19 +00:00
|
|
|
type ServiceManager struct {
|
2019-09-24 15:04:48 +00:00
|
|
|
agent *Agent
|
|
|
|
|
|
|
|
// servicesLock guards the services map, but not the watches contained
|
|
|
|
// therein
|
|
|
|
servicesLock sync.Mutex
|
|
|
|
|
|
|
|
// services tracks all active watches for registered services
|
2019-12-10 02:26:41 +00:00
|
|
|
services map[structs.ServiceID]*serviceConfigWatch
|
2019-04-18 04:35:19 +00:00
|
|
|
|
2020-04-17 20:50:25 +00:00
|
|
|
// registerCh is a channel for receiving service registration requests from
|
|
|
|
// from serviceConfigWatchers.
|
|
|
|
// The registrations are handled in the background when watches are notified of
|
|
|
|
// changes. All sends and receives must also obey the ctx.Done() channel to
|
|
|
|
// avoid a deadlock during shutdown.
|
2019-09-24 15:04:48 +00:00
|
|
|
registerCh chan *asyncRegisterRequest
|
|
|
|
|
|
|
|
// ctx is the shared context for all goroutines launched
|
|
|
|
ctx context.Context
|
|
|
|
|
|
|
|
// cancel can be used to stop all goroutines launched
|
|
|
|
cancel context.CancelFunc
|
|
|
|
|
|
|
|
// running keeps track of live goroutines (worker and watcher)
|
|
|
|
running sync.WaitGroup
|
2019-04-18 04:35:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func NewServiceManager(agent *Agent) *ServiceManager {
|
2019-09-24 15:04:48 +00:00
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
2019-04-18 04:35:19 +00:00
|
|
|
return &ServiceManager{
|
2019-09-24 15:04:48 +00:00
|
|
|
agent: agent,
|
2019-12-10 02:26:41 +00:00
|
|
|
services: make(map[structs.ServiceID]*serviceConfigWatch),
|
2019-09-24 15:04:48 +00:00
|
|
|
registerCh: make(chan *asyncRegisterRequest), // must be unbuffered
|
|
|
|
ctx: ctx,
|
|
|
|
cancel: cancel,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stop forces all background goroutines to terminate and blocks until they complete.
|
|
|
|
//
|
|
|
|
// NOTE: the caller must NOT hold the Agent.stateLock!
|
|
|
|
func (s *ServiceManager) Stop() {
|
|
|
|
s.cancel()
|
|
|
|
s.running.Wait()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Start starts a background worker goroutine that writes back into the Agent
|
|
|
|
// state. This only exists to keep the need to lock the agent state lock out of
|
|
|
|
// the main AddService/RemoveService codepaths to avoid deadlocks.
|
|
|
|
func (s *ServiceManager) Start() {
|
|
|
|
s.running.Add(1)
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
defer s.running.Done()
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-s.ctx.Done():
|
|
|
|
return
|
|
|
|
case req := <-s.registerCh:
|
|
|
|
req.Reply <- s.registerOnce(req.Args)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
|
|
|
// runOnce will process a single registration request
|
2020-11-30 18:46:14 +00:00
|
|
|
func (s *ServiceManager) registerOnce(args addServiceInternalRequest) error {
|
2019-09-24 15:04:48 +00:00
|
|
|
s.agent.stateLock.Lock()
|
|
|
|
defer s.agent.stateLock.Unlock()
|
|
|
|
|
2020-09-24 21:24:04 +00:00
|
|
|
if args.snap == nil {
|
|
|
|
args.snap = s.agent.snapshotCheckState()
|
|
|
|
}
|
|
|
|
|
|
|
|
err := s.agent.addServiceInternal(args)
|
2019-09-24 15:04:48 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("error updating service registration: %v", err)
|
2019-04-18 04:35:19 +00:00
|
|
|
}
|
2019-09-24 15:04:48 +00:00
|
|
|
return nil
|
2019-04-18 04:35:19 +00:00
|
|
|
}
|
|
|
|
|
2019-09-24 15:04:48 +00:00
|
|
|
// AddService will (re)create a serviceConfigWatch on the given service. For
|
|
|
|
// each call of this function the first registration will happen inline and
|
|
|
|
// will read the merged global defaults for the service through the agent cache
|
|
|
|
// (regardless of whether or not the service was already registered). This
|
|
|
|
// lets validation or authorization related errors bubble back up to the
|
|
|
|
// caller's RPC inline with their request. Upon success a goroutine will keep
|
|
|
|
// this updated in the background.
|
|
|
|
//
|
|
|
|
// If waitForCentralConfig=true is used, the initial registration blocks on
|
|
|
|
// fetching the merged global config through the cache. If false, no such RPC
|
|
|
|
// occurs and only the previousDefaults are used.
|
|
|
|
//
|
|
|
|
// persistServiceConfig controls if the INITIAL registration will result in
|
|
|
|
// persisting the service config to disk again. All background updates will
|
|
|
|
// always persist.
|
|
|
|
//
|
|
|
|
// service, chkTypes, persist, token, replaceExistingChecks, and source are
|
|
|
|
// basically pass-through arguments to Agent.addServiceInternal that follow the
|
|
|
|
// semantics there. The one key difference is that the service provided will be
|
|
|
|
// merged with the global defaults before registration.
|
|
|
|
//
|
|
|
|
// NOTE: the caller must hold the Agent.stateLock!
|
2020-11-30 18:46:14 +00:00
|
|
|
func (s *ServiceManager) AddService(req AddServiceRequest) error {
|
2020-11-30 18:26:58 +00:00
|
|
|
req.Service.EnterpriseMeta.Normalize()
|
2019-12-10 02:26:41 +00:00
|
|
|
|
2020-03-26 16:20:56 +00:00
|
|
|
// For now only proxies have anything that can be configured
|
2019-05-01 23:39:31 +00:00
|
|
|
// centrally. So bypass the whole manager for regular services.
|
2020-11-30 18:26:58 +00:00
|
|
|
if !req.Service.IsSidecarProxy() && !req.Service.IsGateway() {
|
2019-09-24 15:04:48 +00:00
|
|
|
req.persistServiceConfig = false
|
2020-11-30 18:46:14 +00:00
|
|
|
return s.agent.addServiceInternal(addServiceInternalRequest{AddServiceRequest: req})
|
2019-05-01 23:39:31 +00:00
|
|
|
}
|
|
|
|
|
2019-09-24 15:04:48 +00:00
|
|
|
var (
|
2020-11-30 18:26:58 +00:00
|
|
|
service = req.Service
|
2019-09-24 15:04:48 +00:00
|
|
|
chkTypes = req.chkTypes
|
|
|
|
previousDefaults = req.previousDefaults
|
|
|
|
waitForCentralConfig = req.waitForCentralConfig
|
|
|
|
persist = req.persist
|
|
|
|
persistServiceConfig = req.persistServiceConfig
|
|
|
|
token = req.token
|
|
|
|
replaceExistingChecks = req.replaceExistingChecks
|
2020-11-30 18:26:58 +00:00
|
|
|
source = req.Source
|
2019-09-24 15:04:48 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
reg := &serviceRegistration{
|
|
|
|
service: service,
|
|
|
|
chkTypes: chkTypes,
|
|
|
|
persist: persist,
|
|
|
|
token: token,
|
|
|
|
replaceExistingChecks: replaceExistingChecks,
|
|
|
|
source: source,
|
2019-04-18 04:35:19 +00:00
|
|
|
}
|
|
|
|
|
2019-09-24 15:04:48 +00:00
|
|
|
s.servicesLock.Lock()
|
|
|
|
defer s.servicesLock.Unlock()
|
2019-04-23 06:39:02 +00:00
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
sid := service.CompoundServiceID()
|
|
|
|
|
2019-09-24 15:04:48 +00:00
|
|
|
// If a service watch already exists, shut it down and replace it.
|
2019-12-10 02:26:41 +00:00
|
|
|
oldWatch, updating := s.services[sid]
|
2019-09-24 15:04:48 +00:00
|
|
|
if updating {
|
|
|
|
oldWatch.Stop()
|
2019-12-10 02:26:41 +00:00
|
|
|
delete(s.services, sid)
|
2019-09-24 15:04:48 +00:00
|
|
|
}
|
2019-04-18 04:35:19 +00:00
|
|
|
|
2019-09-24 15:04:48 +00:00
|
|
|
// Get the existing global config and do the initial registration with the
|
|
|
|
// merged config.
|
|
|
|
watch := &serviceConfigWatch{
|
|
|
|
registration: reg,
|
|
|
|
agent: s.agent,
|
|
|
|
registerCh: s.registerCh,
|
|
|
|
}
|
|
|
|
|
|
|
|
err := watch.RegisterAndStart(
|
2020-04-17 19:25:54 +00:00
|
|
|
s.ctx,
|
2019-09-24 15:04:48 +00:00
|
|
|
previousDefaults,
|
|
|
|
waitForCentralConfig,
|
|
|
|
persistServiceConfig,
|
|
|
|
&s.running,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-04-24 13:11:08 +00:00
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
s.services[sid] = watch
|
2019-04-24 13:11:08 +00:00
|
|
|
|
2019-09-24 15:04:48 +00:00
|
|
|
if updating {
|
2020-01-28 23:50:41 +00:00
|
|
|
s.agent.logger.Debug("updated local registration for service", "service", service.ID)
|
2019-09-24 15:04:48 +00:00
|
|
|
} else {
|
2020-01-28 23:50:41 +00:00
|
|
|
s.agent.logger.Debug("added local registration for service", "service", service.ID)
|
2019-04-18 04:35:19 +00:00
|
|
|
}
|
2019-04-23 06:39:02 +00:00
|
|
|
|
|
|
|
return nil
|
2019-04-18 04:35:19 +00:00
|
|
|
}
|
|
|
|
|
2019-09-24 15:04:48 +00:00
|
|
|
// NOTE: the caller must hold the Agent.stateLock!
|
2019-12-10 02:26:41 +00:00
|
|
|
func (s *ServiceManager) RemoveService(serviceID structs.ServiceID) {
|
2019-09-24 15:04:48 +00:00
|
|
|
s.servicesLock.Lock()
|
|
|
|
defer s.servicesLock.Unlock()
|
2019-04-18 04:35:19 +00:00
|
|
|
|
2019-09-24 15:04:48 +00:00
|
|
|
if oldWatch, exists := s.services[serviceID]; exists {
|
|
|
|
oldWatch.Stop()
|
|
|
|
delete(s.services, serviceID)
|
2019-04-18 04:35:19 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-23 10:31:24 +00:00
|
|
|
// serviceRegistration represents a locally registered service.
|
2019-04-18 04:35:19 +00:00
|
|
|
type serviceRegistration struct {
|
2019-09-24 15:04:48 +00:00
|
|
|
service *structs.NodeService
|
|
|
|
chkTypes []*structs.CheckType
|
|
|
|
persist bool
|
|
|
|
token string
|
|
|
|
replaceExistingChecks bool
|
|
|
|
source configSource
|
2019-04-18 04:35:19 +00:00
|
|
|
}
|
|
|
|
|
2019-04-23 10:31:24 +00:00
|
|
|
// serviceConfigWatch is a long running helper for composing the end config
|
|
|
|
// for a given service from both the local registration and the global
|
|
|
|
// service/proxy defaults.
|
2019-04-18 04:35:19 +00:00
|
|
|
type serviceConfigWatch struct {
|
|
|
|
registration *serviceRegistration
|
|
|
|
|
2019-09-24 15:04:48 +00:00
|
|
|
agent *Agent
|
|
|
|
registerCh chan<- *asyncRegisterRequest
|
2019-04-18 04:35:19 +00:00
|
|
|
|
2019-05-01 23:39:31 +00:00
|
|
|
// cacheKey stores the key of the current request, when registration changes
|
|
|
|
// we check to see if a new cache watch is needed.
|
|
|
|
cacheKey string
|
|
|
|
|
2019-09-24 15:04:48 +00:00
|
|
|
cancelFunc func()
|
|
|
|
running sync.WaitGroup
|
|
|
|
}
|
|
|
|
|
|
|
|
// NOTE: this is called while holding the Agent.stateLock
|
|
|
|
func (w *serviceConfigWatch) RegisterAndStart(
|
2020-04-17 19:25:54 +00:00
|
|
|
ctx context.Context,
|
2020-04-17 20:50:25 +00:00
|
|
|
serviceDefaults *structs.ServiceConfigResponse,
|
2019-09-24 15:04:48 +00:00
|
|
|
waitForCentralConfig bool,
|
|
|
|
persistServiceConfig bool,
|
|
|
|
wg *sync.WaitGroup,
|
|
|
|
) error {
|
|
|
|
// Either we explicitly block waiting for defaults before registering,
|
|
|
|
// or we feed it some seed data (or NO data) and bypass the blocking
|
|
|
|
// operation. Either way the watcher will end up with something flagged
|
|
|
|
// as defaults even if they don't actually reflect actual defaults.
|
|
|
|
if waitForCentralConfig {
|
2020-04-17 20:50:25 +00:00
|
|
|
var err error
|
|
|
|
serviceDefaults, err = w.fetchDefaults(ctx)
|
|
|
|
if err != nil {
|
2020-04-17 21:04:58 +00:00
|
|
|
return fmt.Errorf("could not retrieve initial service_defaults config for service %q: %v",
|
|
|
|
w.registration.service.ID, err)
|
2019-09-24 15:04:48 +00:00
|
|
|
}
|
|
|
|
}
|
2019-05-01 23:39:31 +00:00
|
|
|
|
2019-09-24 15:04:48 +00:00
|
|
|
// Merge the local registration with the central defaults and update this service
|
|
|
|
// in the local state.
|
2020-04-17 20:50:25 +00:00
|
|
|
merged, err := mergeServiceConfig(serviceDefaults, w.registration.service)
|
2019-09-24 15:04:48 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// The first time we do this interactively, we need to know if it
|
|
|
|
// failed for validation reasons which we only get back from the
|
|
|
|
// initial underlying add service call.
|
2020-11-30 18:46:14 +00:00
|
|
|
err = w.agent.addServiceInternal(addServiceInternalRequest{
|
|
|
|
AddServiceRequest: AddServiceRequest{
|
|
|
|
Service: merged,
|
|
|
|
chkTypes: w.registration.chkTypes,
|
|
|
|
persist: w.registration.persist,
|
|
|
|
persistServiceConfig: persistServiceConfig,
|
|
|
|
token: w.registration.token,
|
|
|
|
replaceExistingChecks: w.registration.replaceExistingChecks,
|
|
|
|
Source: w.registration.source,
|
|
|
|
snap: w.agent.snapshotCheckState(),
|
|
|
|
},
|
|
|
|
persistService: w.registration.service,
|
|
|
|
persistDefaults: serviceDefaults,
|
2020-09-24 21:24:04 +00:00
|
|
|
})
|
2019-09-24 15:04:48 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("error updating service registration: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Start the config watch, which starts a blocking query for the
|
|
|
|
// resolved service config in the background.
|
|
|
|
return w.start(ctx, wg)
|
2019-04-18 04:35:19 +00:00
|
|
|
}
|
|
|
|
|
2019-09-24 15:04:48 +00:00
|
|
|
// NOTE: this is called while holding the Agent.stateLock
|
2020-04-17 20:50:25 +00:00
|
|
|
func (w *serviceConfigWatch) fetchDefaults(ctx context.Context) (*structs.ServiceConfigResponse, error) {
|
2019-09-24 15:04:48 +00:00
|
|
|
req := makeConfigRequest(w.agent, w.registration)
|
|
|
|
|
2020-06-15 15:01:25 +00:00
|
|
|
raw, _, err := w.agent.cache.Get(ctx, cachetype.ResolvedServiceConfigName, req)
|
2019-09-24 15:04:48 +00:00
|
|
|
if err != nil {
|
2020-04-17 20:50:25 +00:00
|
|
|
return nil, err
|
2019-04-18 04:35:19 +00:00
|
|
|
}
|
|
|
|
|
2020-04-17 20:50:25 +00:00
|
|
|
serviceConfig, ok := raw.(*structs.ServiceConfigResponse)
|
2019-09-24 15:04:48 +00:00
|
|
|
if !ok {
|
|
|
|
// This should never happen, but we want to protect against panics
|
2020-04-17 20:50:25 +00:00
|
|
|
return nil, fmt.Errorf("internal error: response type not correct")
|
2019-09-24 15:04:48 +00:00
|
|
|
}
|
2020-04-17 20:50:25 +00:00
|
|
|
return serviceConfig, nil
|
2019-04-18 04:35:19 +00:00
|
|
|
}
|
|
|
|
|
2019-09-24 15:04:48 +00:00
|
|
|
// Start starts the config watch and a goroutine to handle updates over the
|
|
|
|
// updateCh. This is safe to call more than once assuming you have called Stop
|
|
|
|
// after each Start.
|
|
|
|
//
|
|
|
|
// NOTE: this is called while holding the Agent.stateLock
|
|
|
|
func (w *serviceConfigWatch) start(ctx context.Context, wg *sync.WaitGroup) error {
|
2020-04-17 19:25:54 +00:00
|
|
|
ctx, w.cancelFunc = context.WithCancel(ctx)
|
2019-09-24 15:04:48 +00:00
|
|
|
|
|
|
|
// Configure and start a cache.Notify goroutine to run a continuous
|
|
|
|
// blocking query on the resolved service config for this service.
|
|
|
|
req := makeConfigRequest(w.agent, w.registration)
|
|
|
|
w.cacheKey = req.CacheInfo().Key
|
|
|
|
|
2020-04-17 21:04:58 +00:00
|
|
|
updateCh := make(chan cache.UpdateEvent, 1)
|
|
|
|
|
2019-09-24 15:04:48 +00:00
|
|
|
// We use the cache key as the correlationID here. Notify in general will not
|
|
|
|
// respond on the updateCh after the context is cancelled however there could
|
|
|
|
// possible be a race where it has only just got an update and checked the
|
|
|
|
// context before we cancel and so might still deliver the old event. Using
|
|
|
|
// the cacheKey allows us to ignore updates from the old cache watch and makes
|
|
|
|
// even this rare edge case safe.
|
|
|
|
err := w.agent.cache.Notify(
|
2020-04-17 19:25:54 +00:00
|
|
|
ctx,
|
2019-09-24 15:04:48 +00:00
|
|
|
cachetype.ResolvedServiceConfigName,
|
|
|
|
req,
|
|
|
|
w.cacheKey,
|
2020-04-17 21:04:58 +00:00
|
|
|
updateCh,
|
2019-09-24 15:04:48 +00:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
w.cancelFunc()
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
w.running.Add(1)
|
|
|
|
wg.Add(1)
|
2020-04-17 21:04:58 +00:00
|
|
|
go w.runWatch(ctx, wg, updateCh)
|
2019-09-24 15:04:48 +00:00
|
|
|
|
|
|
|
return nil
|
2019-04-23 10:31:24 +00:00
|
|
|
}
|
|
|
|
|
2019-09-24 15:04:48 +00:00
|
|
|
func (w *serviceConfigWatch) Stop() {
|
|
|
|
w.cancelFunc()
|
|
|
|
w.running.Wait()
|
2019-04-24 13:11:08 +00:00
|
|
|
}
|
|
|
|
|
2019-04-23 10:31:24 +00:00
|
|
|
// runWatch handles any update events from the cache.Notify until the
|
|
|
|
// config watch is shut down.
|
2019-09-24 15:04:48 +00:00
|
|
|
//
|
|
|
|
// NOTE: the caller must NOT hold the Agent.stateLock!
|
2020-04-17 21:04:58 +00:00
|
|
|
func (w *serviceConfigWatch) runWatch(ctx context.Context, wg *sync.WaitGroup, updateCh chan cache.UpdateEvent) {
|
2019-09-24 15:04:48 +00:00
|
|
|
defer wg.Done()
|
|
|
|
defer w.running.Done()
|
|
|
|
|
2019-04-18 04:35:19 +00:00
|
|
|
for {
|
|
|
|
select {
|
2020-04-17 19:25:54 +00:00
|
|
|
case <-ctx.Done():
|
2019-04-18 04:35:19 +00:00
|
|
|
return
|
2020-04-17 21:04:58 +00:00
|
|
|
case event := <-updateCh:
|
2020-04-17 19:25:54 +00:00
|
|
|
if err := w.handleUpdate(ctx, event); err != nil {
|
2020-01-28 23:50:41 +00:00
|
|
|
w.agent.logger.Error("error handling service update", "error", err)
|
2019-04-23 06:39:02 +00:00
|
|
|
}
|
2019-04-18 04:35:19 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-24 15:04:48 +00:00
|
|
|
// handleUpdate receives an update event the global config defaults, updates
|
|
|
|
// the local state and re-registers the service with the newly merged config.
|
|
|
|
//
|
|
|
|
// NOTE: the caller must NOT hold the Agent.stateLock!
|
2020-04-17 19:25:54 +00:00
|
|
|
func (w *serviceConfigWatch) handleUpdate(ctx context.Context, event cache.UpdateEvent) error {
|
2019-04-24 13:11:08 +00:00
|
|
|
// If we got an error, log a warning if this is the first update; otherwise return the error.
|
|
|
|
// We want the initial update to cause a service registration no matter what.
|
2019-04-23 06:39:02 +00:00
|
|
|
if event.Err != nil {
|
2019-09-24 15:04:48 +00:00
|
|
|
return fmt.Errorf("error watching service config: %v", event.Err)
|
2019-04-18 04:35:19 +00:00
|
|
|
}
|
|
|
|
|
2020-04-17 20:50:25 +00:00
|
|
|
serviceDefaults, ok := event.Result.(*structs.ServiceConfigResponse)
|
2019-09-24 15:04:48 +00:00
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("unknown update event type: %T", event)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Sanity check this even came from the currently active watch to ignore
|
|
|
|
// rare races when switching cache keys
|
|
|
|
if event.CorrelationID != w.cacheKey {
|
|
|
|
// It's a no-op. The new watcher will deliver (or may have already
|
|
|
|
// delivered) the correct config so just ignore this old message.
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-05-01 23:39:31 +00:00
|
|
|
// Merge the local registration with the central defaults and update this service
|
|
|
|
// in the local state.
|
2020-04-17 20:50:25 +00:00
|
|
|
merged, err := mergeServiceConfig(serviceDefaults, w.registration.service)
|
2019-04-18 04:35:19 +00:00
|
|
|
if err != nil {
|
2019-05-01 23:39:31 +00:00
|
|
|
return err
|
|
|
|
}
|
2019-09-24 15:04:48 +00:00
|
|
|
|
|
|
|
// While we were waiting on the agent state lock we may have been shutdown.
|
|
|
|
// So avoid doing a registration in that case.
|
2020-04-17 21:04:58 +00:00
|
|
|
if err := ctx.Err(); err != nil {
|
2019-09-24 15:04:48 +00:00
|
|
|
return nil
|
2019-04-23 06:39:02 +00:00
|
|
|
}
|
|
|
|
|
2019-09-24 15:04:48 +00:00
|
|
|
registerReq := &asyncRegisterRequest{
|
2020-11-30 18:46:14 +00:00
|
|
|
Args: addServiceInternalRequest{
|
|
|
|
AddServiceRequest: AddServiceRequest{
|
|
|
|
Service: merged,
|
|
|
|
chkTypes: w.registration.chkTypes,
|
|
|
|
persist: w.registration.persist,
|
|
|
|
persistServiceConfig: true,
|
|
|
|
token: w.registration.token,
|
|
|
|
replaceExistingChecks: w.registration.replaceExistingChecks,
|
|
|
|
Source: w.registration.source,
|
|
|
|
},
|
|
|
|
persistService: w.registration.service,
|
|
|
|
persistDefaults: serviceDefaults,
|
2019-09-24 15:04:48 +00:00
|
|
|
},
|
|
|
|
Reply: make(chan error, 1),
|
2019-04-24 13:11:08 +00:00
|
|
|
}
|
|
|
|
|
2019-09-24 15:04:48 +00:00
|
|
|
select {
|
2020-04-17 19:25:54 +00:00
|
|
|
case <-ctx.Done():
|
2019-09-24 15:04:48 +00:00
|
|
|
return nil
|
|
|
|
case w.registerCh <- registerReq:
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
2020-04-17 19:25:54 +00:00
|
|
|
case <-ctx.Done():
|
2019-09-24 15:04:48 +00:00
|
|
|
return nil
|
|
|
|
|
|
|
|
case err := <-registerReq.Reply:
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("error updating service registration: %v", err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2019-04-18 04:35:19 +00:00
|
|
|
}
|
|
|
|
|
2019-09-24 15:04:48 +00:00
|
|
|
type asyncRegisterRequest struct {
|
2020-11-30 18:46:14 +00:00
|
|
|
Args addServiceInternalRequest
|
2019-09-24 15:04:48 +00:00
|
|
|
Reply chan error
|
2019-05-01 23:39:31 +00:00
|
|
|
}
|
|
|
|
|
2019-09-24 15:04:48 +00:00
|
|
|
func makeConfigRequest(agent *Agent, registration *serviceRegistration) *structs.ServiceConfigRequest {
|
|
|
|
ns := registration.service
|
2019-05-01 23:39:31 +00:00
|
|
|
name := ns.Service
|
2020-01-24 15:04:58 +00:00
|
|
|
var upstreams []structs.ServiceID
|
2019-05-01 23:39:31 +00:00
|
|
|
|
|
|
|
// Note that only sidecar proxies should even make it here for now although
|
|
|
|
// later that will change to add the condition.
|
|
|
|
if ns.IsSidecarProxy() {
|
|
|
|
// This is a sidecar proxy, ignore the proxy service's config since we are
|
|
|
|
// managed by the target service config.
|
|
|
|
name = ns.Proxy.DestinationServiceName
|
|
|
|
|
|
|
|
// Also if we have any upstreams defined, add them to the request so we can
|
|
|
|
// learn about their configs.
|
|
|
|
for _, us := range ns.Proxy.Upstreams {
|
|
|
|
if us.DestinationType == "" || us.DestinationType == structs.UpstreamDestTypeService {
|
2020-01-24 15:04:58 +00:00
|
|
|
sid := us.DestinationID()
|
|
|
|
sid.EnterpriseMeta.Merge(&ns.EnterpriseMeta)
|
|
|
|
upstreams = append(upstreams, sid)
|
2019-05-01 23:39:31 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-04-18 04:35:19 +00:00
|
|
|
|
|
|
|
req := &structs.ServiceConfigRequest{
|
2020-01-24 15:04:58 +00:00
|
|
|
Name: name,
|
|
|
|
Datacenter: agent.config.Datacenter,
|
|
|
|
QueryOptions: structs.QueryOptions{Token: agent.tokens.AgentToken()},
|
|
|
|
UpstreamIDs: upstreams,
|
|
|
|
EnterpriseMeta: ns.EnterpriseMeta,
|
2019-04-23 06:39:02 +00:00
|
|
|
}
|
2019-09-24 15:04:48 +00:00
|
|
|
if registration.token != "" {
|
|
|
|
req.QueryOptions.Token = registration.token
|
2019-04-18 04:35:19 +00:00
|
|
|
}
|
2019-09-24 15:04:48 +00:00
|
|
|
return req
|
2019-04-18 04:35:19 +00:00
|
|
|
}
|
|
|
|
|
2020-04-17 20:50:25 +00:00
|
|
|
// mergeServiceConfig from service into defaults to produce the final effective
|
|
|
|
// config for the watched service.
|
|
|
|
func mergeServiceConfig(defaults *structs.ServiceConfigResponse, service *structs.NodeService) (*structs.NodeService, error) {
|
|
|
|
if defaults == nil {
|
|
|
|
return service, nil
|
2019-04-23 06:39:02 +00:00
|
|
|
}
|
|
|
|
|
2019-05-01 23:39:31 +00:00
|
|
|
// We don't want to change s.registration in place since it is our source of
|
|
|
|
// truth about what was actually registered before defaults applied. So copy
|
|
|
|
// it first.
|
2020-04-17 20:50:25 +00:00
|
|
|
nsRaw, err := copystructure.Copy(service)
|
2019-05-01 23:39:31 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-04-23 06:39:02 +00:00
|
|
|
|
2019-05-01 23:39:31 +00:00
|
|
|
// Merge proxy defaults
|
|
|
|
ns := nsRaw.(*structs.NodeService)
|
|
|
|
|
2020-04-17 20:50:25 +00:00
|
|
|
if err := mergo.Merge(&ns.Proxy.Config, defaults.ProxyConfig); err != nil {
|
2019-05-01 23:39:31 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
2019-06-18 00:52:01 +00:00
|
|
|
|
2020-04-17 20:50:25 +00:00
|
|
|
if err := mergo.Merge(&ns.Proxy.Expose, defaults.Expose); err != nil {
|
2019-09-26 02:55:52 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2019-06-18 00:52:01 +00:00
|
|
|
if ns.Proxy.MeshGateway.Mode == structs.MeshGatewayModeDefault {
|
2020-04-17 20:50:25 +00:00
|
|
|
ns.Proxy.MeshGateway.Mode = defaults.MeshGateway.Mode
|
2019-06-18 00:52:01 +00:00
|
|
|
}
|
|
|
|
|
2019-05-01 23:39:31 +00:00
|
|
|
// Merge upstream defaults if there were any returned
|
|
|
|
for i := range ns.Proxy.Upstreams {
|
|
|
|
// Get a pointer not a value copy of the upstream struct
|
|
|
|
us := &ns.Proxy.Upstreams[i]
|
|
|
|
if us.DestinationType != "" && us.DestinationType != structs.UpstreamDestTypeService {
|
|
|
|
continue
|
|
|
|
}
|
2019-06-18 00:52:01 +00:00
|
|
|
|
|
|
|
// default the upstreams gateway mode if it didn't specify one
|
|
|
|
if us.MeshGateway.Mode == structs.MeshGatewayModeDefault {
|
|
|
|
us.MeshGateway.Mode = ns.Proxy.MeshGateway.Mode
|
|
|
|
}
|
|
|
|
|
2020-04-17 20:50:25 +00:00
|
|
|
usCfg, ok := defaults.UpstreamIDConfigs.GetUpstreamConfig(us.DestinationID())
|
2019-05-01 23:39:31 +00:00
|
|
|
if !ok {
|
|
|
|
// No config defaults to merge
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if err := mergo.Merge(&us.Config, usCfg); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ns, err
|
2019-04-18 04:35:19 +00:00
|
|
|
}
|