2014-01-16 01:14:50 +00:00
|
|
|
package agent
|
|
|
|
|
|
|
|
import (
|
2015-01-27 09:11:57 +00:00
|
|
|
"fmt"
|
2014-01-21 19:52:25 +00:00
|
|
|
"log"
|
2016-02-07 21:12:42 +00:00
|
|
|
"reflect"
|
2014-12-01 19:43:01 +00:00
|
|
|
"strings"
|
2014-01-16 01:14:50 +00:00
|
|
|
"sync"
|
2014-02-07 19:58:24 +00:00
|
|
|
"sync/atomic"
|
2014-01-16 01:14:50 +00:00
|
|
|
"time"
|
2014-12-01 19:43:01 +00:00
|
|
|
|
pkg refactor
command/agent/* -> agent/*
command/consul/* -> agent/consul/*
command/agent/command{,_test}.go -> command/agent{,_test}.go
command/base/command.go -> command/base.go
command/base/* -> command/*
commands.go -> command/commands.go
The script which did the refactor is:
(
cd $GOPATH/src/github.com/hashicorp/consul
git mv command/agent/command.go command/agent.go
git mv command/agent/command_test.go command/agent_test.go
git mv command/agent/flag_slice_value{,_test}.go command/
git mv command/agent .
git mv command/base/command.go command/base.go
git mv command/base/config_util{,_test}.go command/
git mv commands.go command/
git mv consul agent
rmdir command/base/
gsed -i -e 's|package agent|package command|' command/agent{,_test}.go
gsed -i -e 's|package agent|package command|' command/flag_slice_value{,_test}.go
gsed -i -e 's|package base|package command|' command/base.go command/config_util{,_test}.go
gsed -i -e 's|package main|package command|' command/commands.go
gsed -i -e 's|base.Command|BaseCommand|' command/commands.go
gsed -i -e 's|agent.Command|AgentCommand|' command/commands.go
gsed -i -e 's|\tCommand:|\tBaseCommand:|' command/commands.go
gsed -i -e 's|base\.||' command/commands.go
gsed -i -e 's|command\.||' command/commands.go
gsed -i -e 's|command|c|' main.go
gsed -i -e 's|range Commands|range command.Commands|' main.go
gsed -i -e 's|Commands: Commands|Commands: command.Commands|' main.go
gsed -i -e 's|base\.BoolValue|BoolValue|' command/operator_autopilot_set.go
gsed -i -e 's|base\.DurationValue|DurationValue|' command/operator_autopilot_set.go
gsed -i -e 's|base\.StringValue|StringValue|' command/operator_autopilot_set.go
gsed -i -e 's|base\.UintValue|UintValue|' command/operator_autopilot_set.go
gsed -i -e 's|\bCommand\b|BaseCommand|' command/base.go
gsed -i -e 's|BaseCommand Options|Command Options|' command/base.go
gsed -i -e 's|base.Command|BaseCommand|' command/*.go
gsed -i -e 's|c\.Command|c.BaseCommand|g' command/*.go
gsed -i -e 's|\tCommand:|\tBaseCommand:|' command/*_test.go
gsed -i -e 's|base\.||' command/*_test.go
gsed -i -e 's|\bCommand\b|AgentCommand|' command/agent{,_test}.go
gsed -i -e 's|cmd.AgentCommand|cmd.BaseCommand|' command/agent.go
gsed -i -e 's|cli.AgentCommand = new(Command)|cli.Command = new(AgentCommand)|' command/agent_test.go
gsed -i -e 's|exec.AgentCommand|exec.Command|' command/agent_test.go
gsed -i -e 's|exec.BaseCommand|exec.Command|' command/agent_test.go
gsed -i -e 's|NewTestAgent|agent.NewTestAgent|' command/agent_test.go
gsed -i -e 's|= TestConfig|= agent.TestConfig|' command/agent_test.go
gsed -i -e 's|: RetryJoin|: agent.RetryJoin|' command/agent_test.go
gsed -i -e 's|\.\./\.\./|../|' command/config_util_test.go
gsed -i -e 's|\bverifyUniqueListeners|VerifyUniqueListeners|' agent/config{,_test}.go command/agent.go
gsed -i -e 's|\bserfLANKeyring\b|SerfLANKeyring|g' agent/{agent,keyring,testagent}.go command/agent.go
gsed -i -e 's|\bserfWANKeyring\b|SerfWANKeyring|g' agent/{agent,keyring,testagent}.go command/agent.go
gsed -i -e 's|\bNewAgent\b|agent.New|g' command/agent{,_test}.go
gsed -i -e 's|\bNewAgent|New|' agent/{acl_test,agent,testagent}.go
gsed -i -e 's|\bAgent\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bBool\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bDefaultConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bDevConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bMergeConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bReadConfigPaths\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bParseMetaPair\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bSerfLANKeyring\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bSerfWANKeyring\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|circonus\.agent|circonus|g' command/agent{,_test}.go
gsed -i -e 's|logger\.agent|logger|g' command/agent{,_test}.go
gsed -i -e 's|metrics\.agent|metrics|g' command/agent{,_test}.go
gsed -i -e 's|// agent.Agent|// agent|' command/agent{,_test}.go
gsed -i -e 's|a\.agent\.Config|a.Config|' command/agent{,_test}.go
gsed -i -e 's|agent\.AppendSliceValue|AppendSliceValue|' command/{configtest,validate}.go
gsed -i -e 's|consul/consul|agent/consul|' GNUmakefile
gsed -i -e 's|\.\./test|../../test|' agent/consul/server_test.go
# fix imports
f=$(grep -rl 'github.com/hashicorp/consul/command/agent' * | grep '\.go')
gsed -i -e 's|github.com/hashicorp/consul/command/agent|github.com/hashicorp/consul/agent|' $f
goimports -w $f
f=$(grep -rl 'github.com/hashicorp/consul/consul' * | grep '\.go')
gsed -i -e 's|github.com/hashicorp/consul/consul|github.com/hashicorp/consul/agent/consul|' $f
goimports -w $f
goimports -w command/*.go main.go
)
2017-06-09 22:28:28 +00:00
|
|
|
"github.com/hashicorp/consul/agent/consul"
|
|
|
|
"github.com/hashicorp/consul/agent/consul/structs"
|
2017-04-19 23:00:11 +00:00
|
|
|
"github.com/hashicorp/consul/api"
|
2016-01-29 19:42:34 +00:00
|
|
|
"github.com/hashicorp/consul/lib"
|
2016-06-06 20:19:31 +00:00
|
|
|
"github.com/hashicorp/consul/types"
|
2014-01-16 01:14:50 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
2014-04-23 19:21:34 +00:00
|
|
|
syncStaggerIntv = 3 * time.Second
|
|
|
|
syncRetryIntv = 15 * time.Second
|
2014-01-16 01:14:50 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// syncStatus is used to represent the difference between
|
|
|
|
// the local and remote state, and if action needs to be taken
|
|
|
|
type syncStatus struct {
|
2016-09-21 23:52:43 +00:00
|
|
|
inSync bool // Is this in sync with the server
|
2014-01-16 01:14:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// localState is used to represent the node's services,
|
|
|
|
// and checks. We used it to perform anti-entropy with the
|
|
|
|
// catalog representation
|
|
|
|
type localState struct {
|
2014-02-07 19:58:24 +00:00
|
|
|
// paused is used to check if we are paused. Must be the first
|
|
|
|
// element due to a go bug.
|
|
|
|
paused int32
|
|
|
|
|
2015-04-28 05:01:01 +00:00
|
|
|
sync.RWMutex
|
2014-01-21 19:52:25 +00:00
|
|
|
logger *log.Logger
|
2014-01-16 01:14:50 +00:00
|
|
|
|
2014-01-21 19:52:25 +00:00
|
|
|
// Config is the agent config
|
|
|
|
config *Config
|
|
|
|
|
2017-06-15 09:48:27 +00:00
|
|
|
// delegate is the consul interface to use for keeping in sync
|
|
|
|
delegate delegate
|
2014-01-16 01:14:50 +00:00
|
|
|
|
2016-02-07 21:12:42 +00:00
|
|
|
// nodeInfoInSync tracks whether the server has our correct top-level
|
2017-01-18 22:26:42 +00:00
|
|
|
// node information in sync
|
2016-02-07 21:12:42 +00:00
|
|
|
nodeInfoInSync bool
|
|
|
|
|
2014-01-16 01:14:50 +00:00
|
|
|
// Services tracks the local services
|
|
|
|
services map[string]*structs.NodeService
|
|
|
|
serviceStatus map[string]syncStatus
|
2015-04-28 01:26:23 +00:00
|
|
|
serviceTokens map[string]string
|
2014-01-16 01:14:50 +00:00
|
|
|
|
|
|
|
// Checks tracks the local checks
|
2016-08-16 07:05:55 +00:00
|
|
|
checks map[types.CheckID]*structs.HealthCheck
|
|
|
|
checkStatus map[types.CheckID]syncStatus
|
|
|
|
checkTokens map[types.CheckID]string
|
|
|
|
checkCriticalTime map[types.CheckID]time.Time
|
2014-01-16 01:14:50 +00:00
|
|
|
|
2014-12-04 23:25:06 +00:00
|
|
|
// Used to track checks that are being deferred
|
2016-06-06 20:19:31 +00:00
|
|
|
deferCheck map[types.CheckID]*time.Timer
|
2014-06-10 17:42:55 +00:00
|
|
|
|
2017-01-05 22:10:26 +00:00
|
|
|
// metadata tracks the local metadata fields
|
|
|
|
metadata map[string]string
|
|
|
|
|
2014-02-07 20:03:31 +00:00
|
|
|
// consulCh is used to inform of a change to the known
|
|
|
|
// consul nodes. This may be used to retry a sync run
|
|
|
|
consulCh chan struct{}
|
|
|
|
|
2014-01-16 01:14:50 +00:00
|
|
|
// triggerCh is used to inform of a change to local state
|
|
|
|
// that requires anti-entropy with the server
|
|
|
|
triggerCh chan struct{}
|
|
|
|
}
|
|
|
|
|
2014-01-21 19:52:25 +00:00
|
|
|
// Init is used to initialize the local state
|
2017-06-15 09:48:27 +00:00
|
|
|
func (l *localState) Init(c *Config, lg *log.Logger, d delegate) {
|
|
|
|
l.config = c
|
|
|
|
l.delegate = d
|
|
|
|
l.logger = lg
|
2014-01-21 19:52:25 +00:00
|
|
|
l.services = make(map[string]*structs.NodeService)
|
|
|
|
l.serviceStatus = make(map[string]syncStatus)
|
2015-04-28 01:26:23 +00:00
|
|
|
l.serviceTokens = make(map[string]string)
|
2016-06-06 20:19:31 +00:00
|
|
|
l.checks = make(map[types.CheckID]*structs.HealthCheck)
|
|
|
|
l.checkStatus = make(map[types.CheckID]syncStatus)
|
|
|
|
l.checkTokens = make(map[types.CheckID]string)
|
2016-08-16 07:05:55 +00:00
|
|
|
l.checkCriticalTime = make(map[types.CheckID]time.Time)
|
2016-06-06 20:19:31 +00:00
|
|
|
l.deferCheck = make(map[types.CheckID]*time.Timer)
|
2017-01-05 22:10:26 +00:00
|
|
|
l.metadata = make(map[string]string)
|
2014-02-07 20:03:31 +00:00
|
|
|
l.consulCh = make(chan struct{}, 1)
|
2014-01-21 19:52:25 +00:00
|
|
|
l.triggerCh = make(chan struct{}, 1)
|
|
|
|
}
|
|
|
|
|
2014-01-16 01:14:50 +00:00
|
|
|
// changeMade is used to trigger an anti-entropy run
|
|
|
|
func (l *localState) changeMade() {
|
|
|
|
select {
|
|
|
|
case l.triggerCh <- struct{}{}:
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-02-07 20:03:31 +00:00
|
|
|
// ConsulServerUp is used to inform that a new consul server is now
|
|
|
|
// up. This can be used to speed up the sync process if we are blocking
|
|
|
|
// waiting to discover a consul server
|
|
|
|
func (l *localState) ConsulServerUp() {
|
|
|
|
select {
|
|
|
|
case l.consulCh <- struct{}{}:
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-12-04 23:25:06 +00:00
|
|
|
// Pause is used to pause state synchronization, this can be
|
2014-02-07 19:58:24 +00:00
|
|
|
// used to make batch changes
|
|
|
|
func (l *localState) Pause() {
|
2015-09-11 16:28:06 +00:00
|
|
|
atomic.AddInt32(&l.paused, 1)
|
2014-02-07 19:58:24 +00:00
|
|
|
}
|
|
|
|
|
2014-12-04 23:25:06 +00:00
|
|
|
// Resume is used to resume state synchronization
|
2014-02-07 20:19:56 +00:00
|
|
|
func (l *localState) Resume() {
|
2015-09-17 09:32:08 +00:00
|
|
|
paused := atomic.AddInt32(&l.paused, -1)
|
|
|
|
if paused < 0 {
|
|
|
|
panic("unbalanced localState.Resume() detected")
|
|
|
|
}
|
2014-02-07 19:58:24 +00:00
|
|
|
l.changeMade()
|
|
|
|
}
|
|
|
|
|
|
|
|
// isPaused is used to check if we are paused
|
|
|
|
func (l *localState) isPaused() bool {
|
2015-09-11 16:28:06 +00:00
|
|
|
return atomic.LoadInt32(&l.paused) > 0
|
2014-02-07 19:58:24 +00:00
|
|
|
}
|
|
|
|
|
2015-04-28 05:01:01 +00:00
|
|
|
// ServiceToken returns the configured ACL token for the given
|
|
|
|
// service ID. If none is present, the agent's token is returned.
|
|
|
|
func (l *localState) ServiceToken(id string) string {
|
2015-04-28 18:53:53 +00:00
|
|
|
l.RLock()
|
|
|
|
defer l.RUnlock()
|
|
|
|
return l.serviceToken(id)
|
|
|
|
}
|
|
|
|
|
|
|
|
// serviceToken returns an ACL token associated with a service.
|
|
|
|
func (l *localState) serviceToken(id string) string {
|
2015-04-28 05:01:01 +00:00
|
|
|
token := l.serviceTokens[id]
|
|
|
|
if token == "" {
|
|
|
|
token = l.config.ACLToken
|
|
|
|
}
|
|
|
|
return token
|
|
|
|
}
|
|
|
|
|
2014-01-16 01:14:50 +00:00
|
|
|
// AddService is used to add a service entry to the local state.
|
|
|
|
// This entry is persistent and the agent will make a best effort to
|
|
|
|
// ensure it is registered
|
2015-05-05 00:36:17 +00:00
|
|
|
func (l *localState) AddService(service *structs.NodeService, token string) {
|
2014-01-21 00:22:59 +00:00
|
|
|
// Assign the ID if none given
|
|
|
|
if service.ID == "" && service.Service != "" {
|
|
|
|
service.ID = service.Service
|
|
|
|
}
|
|
|
|
|
2014-01-21 19:52:25 +00:00
|
|
|
l.Lock()
|
|
|
|
defer l.Unlock()
|
2014-01-16 01:14:50 +00:00
|
|
|
|
2014-01-21 19:52:25 +00:00
|
|
|
l.services[service.ID] = service
|
|
|
|
l.serviceStatus[service.ID] = syncStatus{}
|
2015-05-05 00:36:17 +00:00
|
|
|
l.serviceTokens[service.ID] = token
|
2014-01-21 19:52:25 +00:00
|
|
|
l.changeMade()
|
2014-01-16 01:14:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// RemoveService is used to remove a service entry from the local state.
|
|
|
|
// The agent will make a best effort to ensure it is deregistered
|
2016-11-09 21:56:54 +00:00
|
|
|
func (l *localState) RemoveService(serviceID string) error {
|
2014-01-21 19:52:25 +00:00
|
|
|
l.Lock()
|
|
|
|
defer l.Unlock()
|
2014-01-16 01:14:50 +00:00
|
|
|
|
2016-11-09 21:56:54 +00:00
|
|
|
if _, ok := l.services[serviceID]; ok {
|
|
|
|
delete(l.services, serviceID)
|
2017-03-25 00:15:20 +00:00
|
|
|
// Leave the service token around, if any, until we successfully
|
|
|
|
// delete the service.
|
2016-11-09 21:56:54 +00:00
|
|
|
l.serviceStatus[serviceID] = syncStatus{inSync: false}
|
|
|
|
l.changeMade()
|
|
|
|
} else {
|
|
|
|
return fmt.Errorf("Service does not exist")
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
2014-01-16 01:14:50 +00:00
|
|
|
}
|
|
|
|
|
2014-01-21 01:00:52 +00:00
|
|
|
// Services returns the locally registered services that the
|
|
|
|
// agent is aware of and are being kept in sync with the server
|
2014-01-21 19:52:25 +00:00
|
|
|
func (l *localState) Services() map[string]*structs.NodeService {
|
2014-01-21 01:00:52 +00:00
|
|
|
services := make(map[string]*structs.NodeService)
|
2015-04-28 05:01:01 +00:00
|
|
|
l.RLock()
|
|
|
|
defer l.RUnlock()
|
2014-01-21 01:00:52 +00:00
|
|
|
|
2014-01-21 19:52:25 +00:00
|
|
|
for name, serv := range l.services {
|
2014-01-21 01:00:52 +00:00
|
|
|
services[name] = serv
|
|
|
|
}
|
|
|
|
return services
|
|
|
|
}
|
|
|
|
|
2016-06-07 20:24:51 +00:00
|
|
|
// CheckToken is used to return the configured health check token for a
|
|
|
|
// Check, or if none is configured, the default agent ACL token.
|
|
|
|
func (l *localState) CheckToken(checkID types.CheckID) string {
|
2015-04-28 18:53:53 +00:00
|
|
|
l.RLock()
|
|
|
|
defer l.RUnlock()
|
2016-06-07 20:24:51 +00:00
|
|
|
return l.checkToken(checkID)
|
2015-04-28 18:53:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// checkToken returns an ACL token associated with a check.
|
2016-06-07 20:24:51 +00:00
|
|
|
func (l *localState) checkToken(checkID types.CheckID) string {
|
|
|
|
token := l.checkTokens[checkID]
|
2015-04-28 05:01:01 +00:00
|
|
|
if token == "" {
|
|
|
|
token = l.config.ACLToken
|
|
|
|
}
|
|
|
|
return token
|
|
|
|
}
|
|
|
|
|
2014-01-16 01:14:50 +00:00
|
|
|
// AddCheck is used to add a health check to the local state.
|
|
|
|
// This entry is persistent and the agent will make a best effort to
|
|
|
|
// ensure it is registered
|
2015-05-05 00:36:17 +00:00
|
|
|
func (l *localState) AddCheck(check *structs.HealthCheck, token string) {
|
2014-01-21 01:06:44 +00:00
|
|
|
// Set the node name
|
2014-01-21 19:52:25 +00:00
|
|
|
check.Node = l.config.NodeName
|
2014-01-21 01:06:44 +00:00
|
|
|
|
2014-01-21 19:52:25 +00:00
|
|
|
l.Lock()
|
|
|
|
defer l.Unlock()
|
2014-01-16 01:14:50 +00:00
|
|
|
|
2014-01-21 19:52:25 +00:00
|
|
|
l.checks[check.CheckID] = check
|
|
|
|
l.checkStatus[check.CheckID] = syncStatus{}
|
2015-05-05 00:36:17 +00:00
|
|
|
l.checkTokens[check.CheckID] = token
|
2016-08-16 07:05:55 +00:00
|
|
|
delete(l.checkCriticalTime, check.CheckID)
|
2014-01-21 19:52:25 +00:00
|
|
|
l.changeMade()
|
2014-01-16 01:14:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// RemoveCheck is used to remove a health check from the local state.
|
|
|
|
// The agent will make a best effort to ensure it is deregistered
|
2016-06-06 20:19:31 +00:00
|
|
|
func (l *localState) RemoveCheck(checkID types.CheckID) {
|
2014-01-21 19:52:25 +00:00
|
|
|
l.Lock()
|
|
|
|
defer l.Unlock()
|
2014-01-16 01:14:50 +00:00
|
|
|
|
2014-01-21 19:52:25 +00:00
|
|
|
delete(l.checks, checkID)
|
2017-03-25 00:15:20 +00:00
|
|
|
// Leave the check token around, if any, until we successfully delete
|
|
|
|
// the check.
|
2016-08-16 07:05:55 +00:00
|
|
|
delete(l.checkCriticalTime, checkID)
|
2016-09-21 23:52:43 +00:00
|
|
|
l.checkStatus[checkID] = syncStatus{inSync: false}
|
2014-01-21 19:52:25 +00:00
|
|
|
l.changeMade()
|
2014-01-16 01:14:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// UpdateCheck is used to update the status of a check
|
2016-06-06 20:19:31 +00:00
|
|
|
func (l *localState) UpdateCheck(checkID types.CheckID, status, output string) {
|
2014-01-21 19:52:25 +00:00
|
|
|
l.Lock()
|
|
|
|
defer l.Unlock()
|
2014-01-16 01:14:50 +00:00
|
|
|
|
2014-01-21 19:52:25 +00:00
|
|
|
check, ok := l.checks[checkID]
|
2014-01-16 01:14:50 +00:00
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2016-08-16 07:05:55 +00:00
|
|
|
// Update the critical time tracking (this doesn't cause a server updates
|
|
|
|
// so we can always keep this up to date).
|
2017-04-19 23:00:11 +00:00
|
|
|
if status == api.HealthCritical {
|
2016-08-16 07:05:55 +00:00
|
|
|
_, wasCritical := l.checkCriticalTime[checkID]
|
|
|
|
if !wasCritical {
|
|
|
|
l.checkCriticalTime[checkID] = time.Now()
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
delete(l.checkCriticalTime, checkID)
|
|
|
|
}
|
|
|
|
|
2014-01-16 01:14:50 +00:00
|
|
|
// Do nothing if update is idempotent
|
2014-04-21 23:20:22 +00:00
|
|
|
if check.Status == status && check.Output == output {
|
2014-01-16 01:14:50 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2014-06-09 19:46:29 +00:00
|
|
|
// Defer a sync if the output has changed. This is an optimization around
|
|
|
|
// frequent updates of output. Instead, we update the output internally,
|
|
|
|
// and periodically do a write-back to the servers. If there is a status
|
|
|
|
// change we do the write immediately.
|
|
|
|
if l.config.CheckUpdateInterval > 0 && check.Status == status {
|
|
|
|
check.Output = output
|
2014-06-10 17:42:55 +00:00
|
|
|
if _, ok := l.deferCheck[checkID]; !ok {
|
2016-01-29 19:42:34 +00:00
|
|
|
intv := time.Duration(uint64(l.config.CheckUpdateInterval)/2) + lib.RandomStagger(l.config.CheckUpdateInterval)
|
2015-04-23 20:37:20 +00:00
|
|
|
deferSync := time.AfterFunc(intv, func() {
|
2014-06-09 19:46:29 +00:00
|
|
|
l.Lock()
|
2014-06-10 17:42:55 +00:00
|
|
|
if _, ok := l.checkStatus[checkID]; ok {
|
2014-06-09 23:00:25 +00:00
|
|
|
l.checkStatus[checkID] = syncStatus{inSync: false}
|
|
|
|
l.changeMade()
|
|
|
|
}
|
2014-06-10 17:42:55 +00:00
|
|
|
delete(l.deferCheck, checkID)
|
2014-06-09 19:46:29 +00:00
|
|
|
l.Unlock()
|
|
|
|
})
|
2014-06-10 17:42:55 +00:00
|
|
|
l.deferCheck[checkID] = deferSync
|
2014-06-09 19:46:29 +00:00
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2014-01-16 01:14:50 +00:00
|
|
|
// Update status and mark out of sync
|
|
|
|
check.Status = status
|
2014-04-21 23:20:22 +00:00
|
|
|
check.Output = output
|
2014-01-21 19:52:25 +00:00
|
|
|
l.checkStatus[checkID] = syncStatus{inSync: false}
|
|
|
|
l.changeMade()
|
2014-01-16 01:14:50 +00:00
|
|
|
}
|
|
|
|
|
2014-01-21 01:00:52 +00:00
|
|
|
// Checks returns the locally registered checks that the
|
|
|
|
// agent is aware of and are being kept in sync with the server
|
2016-06-06 20:19:31 +00:00
|
|
|
func (l *localState) Checks() map[types.CheckID]*structs.HealthCheck {
|
|
|
|
checks := make(map[types.CheckID]*structs.HealthCheck)
|
2015-04-28 05:01:01 +00:00
|
|
|
l.RLock()
|
|
|
|
defer l.RUnlock()
|
2014-01-21 01:00:52 +00:00
|
|
|
|
2016-06-06 08:53:30 +00:00
|
|
|
for checkID, check := range l.checks {
|
|
|
|
checks[checkID] = check
|
2014-01-21 01:00:52 +00:00
|
|
|
}
|
|
|
|
return checks
|
|
|
|
}
|
|
|
|
|
2016-08-16 07:05:55 +00:00
|
|
|
// CriticalCheck is used to return the duration a check has been critical along
|
|
|
|
// with its associated health check.
|
|
|
|
type CriticalCheck struct {
|
|
|
|
CriticalFor time.Duration
|
|
|
|
Check *structs.HealthCheck
|
|
|
|
}
|
|
|
|
|
|
|
|
// CriticalChecks returns locally registered health checks that the agent is
|
|
|
|
// aware of and are being kept in sync with the server, and that are in a
|
|
|
|
// critical state. This also returns information about how long each check has
|
|
|
|
// been critical.
|
|
|
|
func (l *localState) CriticalChecks() map[types.CheckID]CriticalCheck {
|
|
|
|
checks := make(map[types.CheckID]CriticalCheck)
|
|
|
|
|
|
|
|
l.RLock()
|
|
|
|
defer l.RUnlock()
|
|
|
|
|
|
|
|
now := time.Now()
|
|
|
|
for checkID, criticalTime := range l.checkCriticalTime {
|
|
|
|
checks[checkID] = CriticalCheck{
|
|
|
|
CriticalFor: now.Sub(criticalTime),
|
|
|
|
Check: l.checks[checkID],
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return checks
|
|
|
|
}
|
|
|
|
|
2017-01-05 22:10:26 +00:00
|
|
|
// Metadata returns the local node metadata fields that the
|
|
|
|
// agent is aware of and are being kept in sync with the server
|
|
|
|
func (l *localState) Metadata() map[string]string {
|
|
|
|
metadata := make(map[string]string)
|
|
|
|
l.RLock()
|
|
|
|
defer l.RUnlock()
|
|
|
|
|
|
|
|
for key, value := range l.metadata {
|
|
|
|
metadata[key] = value
|
|
|
|
}
|
|
|
|
return metadata
|
|
|
|
}
|
|
|
|
|
2014-01-16 01:14:50 +00:00
|
|
|
// antiEntropy is a long running method used to perform anti-entropy
|
|
|
|
// between local and remote state.
|
2014-01-21 19:52:25 +00:00
|
|
|
func (l *localState) antiEntropy(shutdownCh chan struct{}) {
|
2014-01-16 01:14:50 +00:00
|
|
|
SYNC:
|
|
|
|
// Sync our state with the servers
|
2014-01-21 19:52:25 +00:00
|
|
|
for {
|
2014-04-14 19:47:58 +00:00
|
|
|
err := l.setSyncState()
|
|
|
|
if err == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
l.logger.Printf("[ERR] agent: failed to sync remote state: %v", err)
|
|
|
|
select {
|
|
|
|
case <-l.consulCh:
|
2014-04-23 19:21:34 +00:00
|
|
|
// Stagger the retry on leader election, avoid a thundering heard
|
|
|
|
select {
|
2017-06-15 09:48:27 +00:00
|
|
|
case <-time.After(lib.RandomStagger(aeScale(syncStaggerIntv, len(l.delegate.LANMembers())))):
|
2014-04-23 19:21:34 +00:00
|
|
|
case <-shutdownCh:
|
|
|
|
return
|
|
|
|
}
|
2017-06-15 09:48:27 +00:00
|
|
|
case <-time.After(syncRetryIntv + lib.RandomStagger(aeScale(syncRetryIntv, len(l.delegate.LANMembers())))):
|
2014-04-14 19:47:58 +00:00
|
|
|
case <-shutdownCh:
|
|
|
|
return
|
2014-01-16 01:14:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Force-trigger AE to pickup any changes
|
2014-01-21 19:52:25 +00:00
|
|
|
l.changeMade()
|
2014-01-16 01:14:50 +00:00
|
|
|
|
|
|
|
// Schedule the next full sync, with a random stagger
|
2017-06-15 09:48:27 +00:00
|
|
|
aeIntv := aeScale(l.config.AEInterval, len(l.delegate.LANMembers()))
|
2016-01-29 19:42:34 +00:00
|
|
|
aeIntv = aeIntv + lib.RandomStagger(aeIntv)
|
2014-01-16 01:14:50 +00:00
|
|
|
aeTimer := time.After(aeIntv)
|
|
|
|
|
|
|
|
// Wait for sync events
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-aeTimer:
|
|
|
|
goto SYNC
|
2014-01-21 19:52:25 +00:00
|
|
|
case <-l.triggerCh:
|
2014-02-07 19:58:24 +00:00
|
|
|
// Skip the sync if we are paused
|
|
|
|
if l.isPaused() {
|
|
|
|
continue
|
|
|
|
}
|
2014-01-21 19:52:25 +00:00
|
|
|
if err := l.syncChanges(); err != nil {
|
|
|
|
l.logger.Printf("[ERR] agent: failed to sync changes: %v", err)
|
2014-01-16 01:14:50 +00:00
|
|
|
}
|
2014-01-21 19:52:25 +00:00
|
|
|
case <-shutdownCh:
|
2014-01-16 01:14:50 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// setSyncState does a read of the server state, and updates
|
|
|
|
// the local syncStatus as appropriate
|
2014-01-21 19:52:25 +00:00
|
|
|
func (l *localState) setSyncState() error {
|
2014-01-16 01:14:50 +00:00
|
|
|
req := structs.NodeSpecificRequest{
|
2014-12-01 19:43:01 +00:00
|
|
|
Datacenter: l.config.Datacenter,
|
|
|
|
Node: l.config.NodeName,
|
2016-12-11 19:24:44 +00:00
|
|
|
QueryOptions: structs.QueryOptions{Token: l.config.GetTokenForAgent()},
|
2014-01-16 01:14:50 +00:00
|
|
|
}
|
2014-02-05 22:36:13 +00:00
|
|
|
var out1 structs.IndexedNodeServices
|
|
|
|
var out2 structs.IndexedHealthChecks
|
2017-06-15 09:48:27 +00:00
|
|
|
if e := l.delegate.RPC("Catalog.NodeServices", &req, &out1); e != nil {
|
2014-01-16 01:14:50 +00:00
|
|
|
return e
|
|
|
|
}
|
2017-06-15 09:48:27 +00:00
|
|
|
if err := l.delegate.RPC("Health.NodeChecks", &req, &out2); err != nil {
|
2014-01-16 01:14:50 +00:00
|
|
|
return err
|
|
|
|
}
|
2014-02-05 22:36:13 +00:00
|
|
|
checks := out2.HealthChecks
|
2014-01-16 01:14:50 +00:00
|
|
|
|
2014-01-21 19:52:25 +00:00
|
|
|
l.Lock()
|
|
|
|
defer l.Unlock()
|
2014-01-16 01:14:50 +00:00
|
|
|
|
2017-01-05 22:10:26 +00:00
|
|
|
// Check the node info
|
2016-02-07 23:07:23 +00:00
|
|
|
if out1.NodeServices == nil || out1.NodeServices.Node == nil ||
|
2017-01-18 22:26:42 +00:00
|
|
|
out1.NodeServices.Node.ID != l.config.NodeID ||
|
2017-01-05 22:10:26 +00:00
|
|
|
!reflect.DeepEqual(out1.NodeServices.Node.TaggedAddresses, l.config.TaggedAddresses) ||
|
|
|
|
!reflect.DeepEqual(out1.NodeServices.Node.Meta, l.metadata) {
|
2016-02-07 21:12:42 +00:00
|
|
|
l.nodeInfoInSync = false
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check all our services
|
2015-04-10 18:04:15 +00:00
|
|
|
services := make(map[string]*structs.NodeService)
|
|
|
|
if out1.NodeServices != nil {
|
|
|
|
services = out1.NodeServices.Services
|
|
|
|
}
|
|
|
|
|
2017-04-20 18:42:22 +00:00
|
|
|
for id := range l.services {
|
2015-04-08 19:20:34 +00:00
|
|
|
// If the local service doesn't exist remotely, then sync it
|
2015-04-10 18:04:15 +00:00
|
|
|
if _, ok := services[id]; !ok {
|
2015-04-08 19:20:34 +00:00
|
|
|
l.serviceStatus[id] = syncStatus{inSync: false}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-10 18:04:15 +00:00
|
|
|
for id, service := range services {
|
|
|
|
// If we don't have the service locally, deregister it
|
|
|
|
existing, ok := l.services[id]
|
|
|
|
if !ok {
|
2016-09-21 23:52:43 +00:00
|
|
|
l.serviceStatus[id] = syncStatus{inSync: false}
|
2015-04-10 18:04:15 +00:00
|
|
|
continue
|
2014-03-05 23:03:23 +00:00
|
|
|
}
|
2015-04-10 18:04:15 +00:00
|
|
|
|
2016-04-11 21:53:18 +00:00
|
|
|
// If our definition is different, we need to update it. Make a
|
|
|
|
// copy so that we don't retain a pointer to any actual state
|
|
|
|
// store info for in-memory RPCs.
|
2015-09-11 15:35:29 +00:00
|
|
|
if existing.EnableTagOverride {
|
2016-04-11 21:53:18 +00:00
|
|
|
existing.Tags = make([]string, len(service.Tags))
|
|
|
|
copy(existing.Tags, service.Tags)
|
2015-08-18 21:03:48 +00:00
|
|
|
}
|
2015-10-28 21:32:00 +00:00
|
|
|
equal := existing.IsSame(service)
|
2015-04-10 18:04:15 +00:00
|
|
|
l.serviceStatus[id] = syncStatus{inSync: equal}
|
2014-01-16 01:14:50 +00:00
|
|
|
}
|
|
|
|
|
2015-10-13 03:30:11 +00:00
|
|
|
// Index the remote health checks to improve efficiency
|
2016-06-06 20:19:31 +00:00
|
|
|
checkIndex := make(map[types.CheckID]*structs.HealthCheck, len(checks))
|
2015-10-13 03:30:11 +00:00
|
|
|
for _, check := range checks {
|
|
|
|
checkIndex[check.CheckID] = check
|
|
|
|
}
|
|
|
|
|
|
|
|
// Sync any check which doesn't exist on the remote side
|
2017-04-20 18:42:22 +00:00
|
|
|
for id := range l.checks {
|
2015-10-13 03:30:11 +00:00
|
|
|
if _, ok := checkIndex[id]; !ok {
|
2015-04-08 19:20:34 +00:00
|
|
|
l.checkStatus[id] = syncStatus{inSync: false}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-16 01:14:50 +00:00
|
|
|
for _, check := range checks {
|
|
|
|
// If we don't have the check locally, deregister it
|
|
|
|
id := check.CheckID
|
2014-01-21 19:52:25 +00:00
|
|
|
existing, ok := l.checks[id]
|
2014-01-16 01:14:50 +00:00
|
|
|
if !ok {
|
2014-01-16 03:28:23 +00:00
|
|
|
// The Serf check is created automatically, and does not
|
|
|
|
// need to be registered
|
|
|
|
if id == consul.SerfCheckID {
|
|
|
|
continue
|
|
|
|
}
|
2016-09-21 23:52:43 +00:00
|
|
|
l.checkStatus[id] = syncStatus{inSync: false}
|
2014-01-16 01:14:50 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// If our definition is different, we need to update it
|
2014-06-09 23:00:25 +00:00
|
|
|
var equal bool
|
|
|
|
if l.config.CheckUpdateInterval == 0 {
|
2015-10-28 21:32:00 +00:00
|
|
|
equal = existing.IsSame(check)
|
2014-06-09 23:00:25 +00:00
|
|
|
} else {
|
2016-04-11 04:20:39 +00:00
|
|
|
// Copy the existing check before potentially modifying
|
|
|
|
// it before the compare operation.
|
2016-04-11 07:05:39 +00:00
|
|
|
eCopy := existing.Clone()
|
2016-04-11 04:20:39 +00:00
|
|
|
|
|
|
|
// Copy the server's check before modifying, otherwise
|
2016-04-11 15:58:17 +00:00
|
|
|
// in-memory RPCs will have side effects.
|
2016-04-11 07:05:39 +00:00
|
|
|
cCopy := check.Clone()
|
2016-04-11 04:20:39 +00:00
|
|
|
|
|
|
|
// If there's a defer timer active then we've got a
|
|
|
|
// potentially spammy check so we don't sync the output
|
|
|
|
// during this sweep since the timer will mark the check
|
|
|
|
// out of sync for us. Otherwise, it is safe to sync the
|
|
|
|
// output now. This is especially important for checks
|
|
|
|
// that don't change state after they are created, in
|
|
|
|
// which case we'd never see their output synced back ever.
|
|
|
|
if _, ok := l.deferCheck[id]; ok {
|
|
|
|
eCopy.Output = ""
|
|
|
|
cCopy.Output = ""
|
|
|
|
}
|
|
|
|
equal = eCopy.IsSame(cCopy)
|
2014-06-09 23:00:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Update the status
|
2014-01-21 19:52:25 +00:00
|
|
|
l.checkStatus[id] = syncStatus{inSync: equal}
|
2014-01-16 01:14:50 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// syncChanges is used to scan the status our local services and checks
|
|
|
|
// and update any that are out of sync with the server
|
2014-01-21 19:52:25 +00:00
|
|
|
func (l *localState) syncChanges() error {
|
|
|
|
l.Lock()
|
|
|
|
defer l.Unlock()
|
2014-01-16 01:14:50 +00:00
|
|
|
|
2016-02-07 21:12:42 +00:00
|
|
|
// We will do node-level info syncing at the end, since it will get
|
|
|
|
// updated by a service or check sync anyway, given how the register
|
|
|
|
// API works.
|
|
|
|
|
2015-01-14 19:48:36 +00:00
|
|
|
// Sync the services
|
|
|
|
for id, status := range l.serviceStatus {
|
2016-09-21 23:52:43 +00:00
|
|
|
if _, ok := l.services[id]; !ok {
|
2015-01-14 19:48:36 +00:00
|
|
|
if err := l.deleteService(id); err != nil {
|
2014-01-16 01:14:50 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
} else if !status.inSync {
|
2015-01-14 19:48:36 +00:00
|
|
|
if err := l.syncService(id); err != nil {
|
|
|
|
return err
|
2014-01-16 01:14:50 +00:00
|
|
|
}
|
2014-04-23 19:21:47 +00:00
|
|
|
} else {
|
2015-01-14 19:48:36 +00:00
|
|
|
l.logger.Printf("[DEBUG] agent: Service '%s' in sync", id)
|
2014-01-16 01:14:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-01-14 19:48:36 +00:00
|
|
|
// Sync the checks
|
|
|
|
for id, status := range l.checkStatus {
|
2016-09-21 23:52:43 +00:00
|
|
|
if _, ok := l.checks[id]; !ok {
|
2015-01-14 19:48:36 +00:00
|
|
|
if err := l.deleteCheck(id); err != nil {
|
2014-01-16 01:14:50 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
} else if !status.inSync {
|
2015-01-14 19:48:36 +00:00
|
|
|
// Cancel a deferred sync
|
|
|
|
if timer := l.deferCheck[id]; timer != nil {
|
|
|
|
timer.Stop()
|
|
|
|
delete(l.deferCheck, id)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := l.syncCheck(id); err != nil {
|
2014-01-16 01:14:50 +00:00
|
|
|
return err
|
|
|
|
}
|
2014-04-23 19:21:47 +00:00
|
|
|
} else {
|
2015-01-14 19:48:36 +00:00
|
|
|
l.logger.Printf("[DEBUG] agent: Check '%s' in sync", id)
|
2014-01-16 01:14:50 +00:00
|
|
|
}
|
|
|
|
}
|
2016-02-07 21:12:42 +00:00
|
|
|
|
|
|
|
// Now sync the node level info if we need to, and didn't do any of
|
|
|
|
// the other sync operations.
|
|
|
|
if !l.nodeInfoInSync {
|
|
|
|
if err := l.syncNodeInfo(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-04-11 04:20:39 +00:00
|
|
|
} else {
|
|
|
|
l.logger.Printf("[DEBUG] agent: Node info in sync")
|
2016-02-07 21:12:42 +00:00
|
|
|
}
|
|
|
|
|
2014-01-16 01:14:50 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// deleteService is used to delete a service from the server
|
2014-01-21 19:52:25 +00:00
|
|
|
func (l *localState) deleteService(id string) error {
|
2015-01-27 09:11:57 +00:00
|
|
|
if id == "" {
|
|
|
|
return fmt.Errorf("ServiceID missing")
|
|
|
|
}
|
|
|
|
|
2014-01-16 01:14:50 +00:00
|
|
|
req := structs.DeregisterRequest{
|
2014-12-01 19:43:01 +00:00
|
|
|
Datacenter: l.config.Datacenter,
|
|
|
|
Node: l.config.NodeName,
|
|
|
|
ServiceID: id,
|
2015-04-28 18:53:53 +00:00
|
|
|
WriteRequest: structs.WriteRequest{Token: l.serviceToken(id)},
|
2014-01-16 01:14:50 +00:00
|
|
|
}
|
|
|
|
var out struct{}
|
2017-06-15 09:48:27 +00:00
|
|
|
err := l.delegate.RPC("Catalog.Deregister", &req, &out)
|
2017-03-25 00:15:20 +00:00
|
|
|
if err == nil || strings.Contains(err.Error(), "Unknown service") {
|
2014-01-21 19:52:25 +00:00
|
|
|
delete(l.serviceStatus, id)
|
2017-03-25 00:15:20 +00:00
|
|
|
delete(l.serviceTokens, id)
|
2014-01-21 19:52:25 +00:00
|
|
|
l.logger.Printf("[INFO] agent: Deregistered service '%s'", id)
|
2017-03-25 00:15:20 +00:00
|
|
|
return nil
|
|
|
|
} else if strings.Contains(err.Error(), permissionDenied) {
|
|
|
|
l.serviceStatus[id] = syncStatus{inSync: true}
|
|
|
|
l.logger.Printf("[WARN] agent: Service '%s' deregistration blocked by ACLs", id)
|
|
|
|
return nil
|
2014-01-16 01:14:50 +00:00
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-08-16 07:05:55 +00:00
|
|
|
// deleteCheck is used to delete a check from the server
|
2016-06-06 20:19:31 +00:00
|
|
|
func (l *localState) deleteCheck(id types.CheckID) error {
|
2015-01-27 09:11:57 +00:00
|
|
|
if id == "" {
|
|
|
|
return fmt.Errorf("CheckID missing")
|
|
|
|
}
|
|
|
|
|
2014-01-16 01:14:50 +00:00
|
|
|
req := structs.DeregisterRequest{
|
2014-12-01 19:43:01 +00:00
|
|
|
Datacenter: l.config.Datacenter,
|
|
|
|
Node: l.config.NodeName,
|
|
|
|
CheckID: id,
|
2015-04-28 18:53:53 +00:00
|
|
|
WriteRequest: structs.WriteRequest{Token: l.checkToken(id)},
|
2014-01-16 01:14:50 +00:00
|
|
|
}
|
|
|
|
var out struct{}
|
2017-06-15 09:48:27 +00:00
|
|
|
err := l.delegate.RPC("Catalog.Deregister", &req, &out)
|
2017-03-25 00:15:20 +00:00
|
|
|
if err == nil || strings.Contains(err.Error(), "Unknown check") {
|
2014-01-21 19:52:25 +00:00
|
|
|
delete(l.checkStatus, id)
|
2017-03-25 00:15:20 +00:00
|
|
|
delete(l.checkTokens, id)
|
2014-01-21 19:52:25 +00:00
|
|
|
l.logger.Printf("[INFO] agent: Deregistered check '%s'", id)
|
2017-03-25 00:15:20 +00:00
|
|
|
return nil
|
|
|
|
} else if strings.Contains(err.Error(), permissionDenied) {
|
|
|
|
l.checkStatus[id] = syncStatus{inSync: true}
|
|
|
|
l.logger.Printf("[WARN] agent: Check '%s' deregistration blocked by ACLs", id)
|
|
|
|
return nil
|
2014-01-16 01:14:50 +00:00
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// syncService is used to sync a service to the server
|
2014-01-21 19:52:25 +00:00
|
|
|
func (l *localState) syncService(id string) error {
|
2014-01-16 01:14:50 +00:00
|
|
|
req := structs.RegisterRequest{
|
2016-02-07 18:37:34 +00:00
|
|
|
Datacenter: l.config.Datacenter,
|
2017-01-18 22:26:42 +00:00
|
|
|
ID: l.config.NodeID,
|
2016-02-07 18:37:34 +00:00
|
|
|
Node: l.config.NodeName,
|
|
|
|
Address: l.config.AdvertiseAddr,
|
|
|
|
TaggedAddresses: l.config.TaggedAddresses,
|
2017-01-05 22:10:26 +00:00
|
|
|
NodeMeta: l.metadata,
|
2016-02-07 18:37:34 +00:00
|
|
|
Service: l.services[id],
|
|
|
|
WriteRequest: structs.WriteRequest{Token: l.serviceToken(id)},
|
2014-01-16 01:14:50 +00:00
|
|
|
}
|
2015-01-14 19:48:36 +00:00
|
|
|
|
2015-01-15 07:09:42 +00:00
|
|
|
// If the service has associated checks that are out of sync,
|
|
|
|
// piggyback them on the service sync so they are part of the
|
2017-03-25 00:15:20 +00:00
|
|
|
// same transaction and are registered atomically. We only let
|
|
|
|
// checks ride on service registrations with the same token,
|
|
|
|
// otherwise we need to register them separately so they don't
|
|
|
|
// pick up privileges from the service token.
|
2015-01-14 19:48:36 +00:00
|
|
|
var checks structs.HealthChecks
|
|
|
|
for _, check := range l.checks {
|
2017-03-25 00:15:20 +00:00
|
|
|
if check.ServiceID == id && (l.serviceToken(id) == l.checkToken(check.CheckID)) {
|
2015-01-14 19:48:36 +00:00
|
|
|
if stat, ok := l.checkStatus[check.CheckID]; !ok || !stat.inSync {
|
|
|
|
checks = append(checks, check)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-01-15 07:09:42 +00:00
|
|
|
// Backwards-compatibility for Consul < 0.5
|
2015-01-14 19:48:36 +00:00
|
|
|
if len(checks) == 1 {
|
|
|
|
req.Check = checks[0]
|
|
|
|
} else {
|
|
|
|
req.Checks = checks
|
|
|
|
}
|
|
|
|
|
2014-01-16 01:14:50 +00:00
|
|
|
var out struct{}
|
2017-06-15 09:48:27 +00:00
|
|
|
err := l.delegate.RPC("Catalog.Register", &req, &out)
|
2014-01-16 01:14:50 +00:00
|
|
|
if err == nil {
|
2014-01-21 19:52:25 +00:00
|
|
|
l.serviceStatus[id] = syncStatus{inSync: true}
|
2016-02-07 21:12:42 +00:00
|
|
|
// Given how the register API works, this info is also updated
|
|
|
|
// every time we sync a service.
|
|
|
|
l.nodeInfoInSync = true
|
2014-01-21 19:52:25 +00:00
|
|
|
l.logger.Printf("[INFO] agent: Synced service '%s'", id)
|
2015-01-14 19:48:36 +00:00
|
|
|
for _, check := range checks {
|
|
|
|
l.checkStatus[check.CheckID] = syncStatus{inSync: true}
|
|
|
|
}
|
2014-12-01 19:43:01 +00:00
|
|
|
} else if strings.Contains(err.Error(), permissionDenied) {
|
|
|
|
l.serviceStatus[id] = syncStatus{inSync: true}
|
|
|
|
l.logger.Printf("[WARN] agent: Service '%s' registration blocked by ACLs", id)
|
2015-01-14 19:48:36 +00:00
|
|
|
for _, check := range checks {
|
|
|
|
l.checkStatus[check.CheckID] = syncStatus{inSync: true}
|
|
|
|
}
|
2014-12-01 19:43:01 +00:00
|
|
|
return nil
|
2014-01-16 01:14:50 +00:00
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2015-12-08 09:45:01 +00:00
|
|
|
// syncCheck is used to sync a check to the server
|
2016-06-06 20:19:31 +00:00
|
|
|
func (l *localState) syncCheck(id types.CheckID) error {
|
2015-01-14 19:48:36 +00:00
|
|
|
// Pull in the associated service if any
|
|
|
|
check := l.checks[id]
|
|
|
|
var service *structs.NodeService
|
|
|
|
if check.ServiceID != "" {
|
|
|
|
if serv, ok := l.services[check.ServiceID]; ok {
|
|
|
|
service = serv
|
2015-01-14 07:23:52 +00:00
|
|
|
}
|
|
|
|
}
|
2015-04-28 01:26:23 +00:00
|
|
|
|
2015-01-14 19:48:36 +00:00
|
|
|
req := structs.RegisterRequest{
|
2016-02-07 18:37:34 +00:00
|
|
|
Datacenter: l.config.Datacenter,
|
2017-01-18 22:26:42 +00:00
|
|
|
ID: l.config.NodeID,
|
2016-02-07 18:37:34 +00:00
|
|
|
Node: l.config.NodeName,
|
|
|
|
Address: l.config.AdvertiseAddr,
|
|
|
|
TaggedAddresses: l.config.TaggedAddresses,
|
2017-01-05 22:10:26 +00:00
|
|
|
NodeMeta: l.metadata,
|
2016-02-07 18:37:34 +00:00
|
|
|
Service: service,
|
|
|
|
Check: l.checks[id],
|
|
|
|
WriteRequest: structs.WriteRequest{Token: l.checkToken(id)},
|
2014-01-16 01:14:50 +00:00
|
|
|
}
|
2015-01-14 19:48:36 +00:00
|
|
|
var out struct{}
|
2017-06-15 09:48:27 +00:00
|
|
|
err := l.delegate.RPC("Catalog.Register", &req, &out)
|
2015-01-14 19:48:36 +00:00
|
|
|
if err == nil {
|
|
|
|
l.checkStatus[id] = syncStatus{inSync: true}
|
2016-02-07 21:12:42 +00:00
|
|
|
// Given how the register API works, this info is also updated
|
2017-03-25 00:15:20 +00:00
|
|
|
// every time we sync a check.
|
2016-02-07 21:12:42 +00:00
|
|
|
l.nodeInfoInSync = true
|
2015-01-14 19:48:36 +00:00
|
|
|
l.logger.Printf("[INFO] agent: Synced check '%s'", id)
|
|
|
|
} else if strings.Contains(err.Error(), permissionDenied) {
|
|
|
|
l.checkStatus[id] = syncStatus{inSync: true}
|
|
|
|
l.logger.Printf("[WARN] agent: Check '%s' registration blocked by ACLs", id)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return err
|
2014-01-16 01:14:50 +00:00
|
|
|
}
|
2016-02-07 21:12:42 +00:00
|
|
|
|
|
|
|
func (l *localState) syncNodeInfo() error {
|
|
|
|
req := structs.RegisterRequest{
|
|
|
|
Datacenter: l.config.Datacenter,
|
2017-01-18 22:26:42 +00:00
|
|
|
ID: l.config.NodeID,
|
2016-02-07 21:12:42 +00:00
|
|
|
Node: l.config.NodeName,
|
|
|
|
Address: l.config.AdvertiseAddr,
|
|
|
|
TaggedAddresses: l.config.TaggedAddresses,
|
2017-01-05 22:10:26 +00:00
|
|
|
NodeMeta: l.metadata,
|
2016-12-11 19:24:44 +00:00
|
|
|
WriteRequest: structs.WriteRequest{Token: l.config.GetTokenForAgent()},
|
2016-02-07 21:12:42 +00:00
|
|
|
}
|
|
|
|
var out struct{}
|
2017-06-15 09:48:27 +00:00
|
|
|
err := l.delegate.RPC("Catalog.Register", &req, &out)
|
2016-02-07 21:12:42 +00:00
|
|
|
if err == nil {
|
|
|
|
l.nodeInfoInSync = true
|
|
|
|
l.logger.Printf("[INFO] agent: Synced node info")
|
|
|
|
} else if strings.Contains(err.Error(), permissionDenied) {
|
|
|
|
l.nodeInfoInSync = true
|
|
|
|
l.logger.Printf("[WARN] agent: Node info update blocked by ACLs")
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|