2023-03-28 18:39:22 +00:00
|
|
|
// Copyright (c) HashiCorp, Inc.
|
2023-08-11 13:12:13 +00:00
|
|
|
// SPDX-License-Identifier: BUSL-1.1
|
2023-03-28 18:39:22 +00:00
|
|
|
|
2018-06-30 01:15:48 +00:00
|
|
|
package checks
|
|
|
|
|
|
|
|
import (
|
2022-12-14 15:24:22 +00:00
|
|
|
"context"
|
2018-06-30 01:15:48 +00:00
|
|
|
"fmt"
|
2022-02-24 22:54:47 +00:00
|
|
|
"strings"
|
2018-06-30 01:15:48 +00:00
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
2022-04-05 21:10:06 +00:00
|
|
|
"github.com/hashicorp/consul/acl"
|
2018-06-30 01:15:48 +00:00
|
|
|
"github.com/hashicorp/consul/agent/structs"
|
|
|
|
"github.com/hashicorp/consul/api"
|
|
|
|
)
|
|
|
|
|
|
|
|
// Constants related to alias check backoff.
|
|
|
|
const (
|
|
|
|
checkAliasBackoffMin = 3 // 3 attempts before backing off
|
|
|
|
checkAliasBackoffMaxWait = 1 * time.Minute // maximum backoff wait time
|
|
|
|
)
|
|
|
|
|
|
|
|
// CheckAlias is a check type that aliases the health of another service
|
2018-07-03 03:50:00 +00:00
|
|
|
// instance or node. If the service aliased has any critical health checks, then
|
2018-06-30 01:15:48 +00:00
|
|
|
// this check is critical. If the service has no critical but warnings,
|
|
|
|
// then this check is warning, and if a service has only passing checks, then
|
|
|
|
// this check is passing.
|
|
|
|
type CheckAlias struct {
|
2019-12-10 02:26:41 +00:00
|
|
|
Node string // Node name of the service. If empty, assumed to be this node.
|
|
|
|
ServiceID structs.ServiceID // ID (not name) of the service to alias
|
2018-06-30 01:15:48 +00:00
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
CheckID structs.CheckID // ID of this check
|
2018-06-30 13:38:45 +00:00
|
|
|
RPC RPC // Used to query remote server if necessary
|
|
|
|
RPCReq structs.NodeSpecificRequest // Base request
|
2018-06-30 14:25:57 +00:00
|
|
|
Notify AliasNotifier // For updating the check state
|
2018-06-30 01:15:48 +00:00
|
|
|
|
|
|
|
stop bool
|
|
|
|
stopCh chan struct{}
|
|
|
|
stopLock sync.Mutex
|
2020-07-20 22:55:39 +00:00
|
|
|
stopWg sync.WaitGroup
|
2019-12-10 02:26:41 +00:00
|
|
|
|
2022-04-05 21:10:06 +00:00
|
|
|
acl.EnterpriseMeta
|
2018-06-30 01:15:48 +00:00
|
|
|
}
|
|
|
|
|
2018-06-30 14:25:57 +00:00
|
|
|
// AliasNotifier is a CheckNotifier specifically for the Alias check.
|
|
|
|
// This requires additional methods that are satisfied by the agent
|
|
|
|
// local state.
|
|
|
|
type AliasNotifier interface {
|
|
|
|
CheckNotifier
|
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
AddAliasCheck(structs.CheckID, structs.ServiceID, chan<- struct{}) error
|
|
|
|
RemoveAliasCheck(structs.CheckID, structs.ServiceID)
|
2022-04-05 21:10:06 +00:00
|
|
|
Checks(*acl.EnterpriseMeta) map[structs.CheckID]*structs.HealthCheck
|
2018-06-30 14:25:57 +00:00
|
|
|
}
|
|
|
|
|
2018-07-03 03:50:00 +00:00
|
|
|
// Start is used to start the check, runs until Stop() func (c *CheckAlias) Start() {
|
2018-06-30 01:15:48 +00:00
|
|
|
func (c *CheckAlias) Start() {
|
|
|
|
c.stopLock.Lock()
|
|
|
|
defer c.stopLock.Unlock()
|
|
|
|
c.stop = false
|
|
|
|
c.stopCh = make(chan struct{})
|
2020-07-20 22:55:39 +00:00
|
|
|
c.stopWg.Add(1)
|
2018-06-30 01:15:48 +00:00
|
|
|
go c.run(c.stopCh)
|
|
|
|
}
|
|
|
|
|
2018-07-03 03:50:00 +00:00
|
|
|
// Stop is used to stop the check.
|
2018-06-30 01:15:48 +00:00
|
|
|
func (c *CheckAlias) Stop() {
|
|
|
|
c.stopLock.Lock()
|
|
|
|
if !c.stop {
|
|
|
|
c.stop = true
|
|
|
|
close(c.stopCh)
|
|
|
|
}
|
2019-05-24 18:36:56 +00:00
|
|
|
c.stopLock.Unlock()
|
|
|
|
|
|
|
|
// Wait until the associated goroutine is definitely complete before
|
|
|
|
// returning to the caller. This is to prevent the new and old checks from
|
|
|
|
// both updating the state of the alias check using possibly stale
|
|
|
|
// information.
|
|
|
|
c.stopWg.Wait()
|
2018-06-30 01:15:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// run is invoked in a goroutine until Stop() is called.
|
|
|
|
func (c *CheckAlias) run(stopCh chan struct{}) {
|
2019-05-24 18:36:56 +00:00
|
|
|
defer c.stopWg.Done()
|
|
|
|
|
2018-06-30 14:25:57 +00:00
|
|
|
// If we have a specific node set, then use a blocking query
|
|
|
|
if c.Node != "" {
|
|
|
|
c.runQuery(stopCh)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Use the local state to match the service.
|
|
|
|
c.runLocal(stopCh)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *CheckAlias) runLocal(stopCh chan struct{}) {
|
|
|
|
// Very important this is buffered as 1 so that we do not lose any
|
|
|
|
// queued updates. This only has to be exactly 1 since the existence
|
|
|
|
// of any update triggers us to load the full health check state.
|
|
|
|
notifyCh := make(chan struct{}, 1)
|
|
|
|
c.Notify.AddAliasCheck(c.CheckID, c.ServiceID, notifyCh)
|
|
|
|
defer c.Notify.RemoveAliasCheck(c.CheckID, c.ServiceID)
|
|
|
|
|
2019-05-24 18:36:56 +00:00
|
|
|
// maxDurationBetweenUpdates is maximum time we go between explicit
|
|
|
|
// notifications before we re-query the aliased service checks anyway. This
|
|
|
|
// helps in the case we miss an edge triggered event and the alias does not
|
|
|
|
// accurately reflect the underlying service health status.
|
|
|
|
const maxDurationBetweenUpdates = 1 * time.Minute
|
|
|
|
|
|
|
|
var refreshTimer <-chan time.Time
|
|
|
|
extendRefreshTimer := func() {
|
|
|
|
refreshTimer = time.After(maxDurationBetweenUpdates)
|
|
|
|
}
|
|
|
|
|
2018-09-27 14:00:51 +00:00
|
|
|
updateStatus := func() {
|
2021-09-17 23:36:20 +00:00
|
|
|
checks := c.Notify.Checks(c.WithWildcardNamespace())
|
2018-09-27 14:00:51 +00:00
|
|
|
checksList := make([]*structs.HealthCheck, 0, len(checks))
|
|
|
|
for _, chk := range checks {
|
|
|
|
checksList = append(checksList, chk)
|
|
|
|
}
|
2020-06-04 12:50:52 +00:00
|
|
|
c.processChecks(checksList, func(serviceID *structs.ServiceID) bool {
|
|
|
|
return c.Notify.ServiceExists(*serviceID)
|
|
|
|
})
|
2019-05-24 18:36:56 +00:00
|
|
|
extendRefreshTimer()
|
2018-09-27 14:00:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Immediately run to get the current state of the target service
|
|
|
|
updateStatus()
|
|
|
|
|
2018-06-30 14:25:57 +00:00
|
|
|
for {
|
|
|
|
select {
|
2019-05-24 18:36:56 +00:00
|
|
|
case <-refreshTimer:
|
|
|
|
updateStatus()
|
2018-06-30 14:25:57 +00:00
|
|
|
case <-notifyCh:
|
2018-09-27 14:00:51 +00:00
|
|
|
updateStatus()
|
2018-06-30 14:25:57 +00:00
|
|
|
case <-stopCh:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-04 12:50:52 +00:00
|
|
|
// CheckIfServiceIDExists is used to determine if a service exists
|
|
|
|
type CheckIfServiceIDExists func(*structs.ServiceID) bool
|
|
|
|
|
|
|
|
func (c *CheckAlias) checkServiceExistsOnRemoteServer(serviceID *structs.ServiceID) (bool, error) {
|
|
|
|
args := c.RPCReq
|
|
|
|
args.Node = c.Node
|
|
|
|
args.AllowStale = true
|
|
|
|
args.EnterpriseMeta = c.EnterpriseMeta
|
|
|
|
// We are late at maximum of 15s compared to leader
|
2020-06-04 20:59:06 +00:00
|
|
|
args.MaxStaleDuration = 15 * time.Second
|
2020-06-04 12:50:52 +00:00
|
|
|
attempts := 0
|
|
|
|
RETRY_CALL:
|
|
|
|
var out structs.IndexedNodeServices
|
|
|
|
attempts++
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := c.RPC.RPC(context.Background(), "Catalog.NodeServices", &args, &out); err != nil {
|
2020-06-04 12:50:52 +00:00
|
|
|
if attempts <= 3 {
|
|
|
|
time.Sleep(time.Duration(attempts) * time.Second)
|
|
|
|
goto RETRY_CALL
|
|
|
|
}
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
for _, srv := range out.NodeServices.Services {
|
2020-12-11 21:10:00 +00:00
|
|
|
if serviceID.Matches(srv.CompoundServiceID()) {
|
2020-06-04 12:50:52 +00:00
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
|
2018-06-30 14:25:57 +00:00
|
|
|
func (c *CheckAlias) runQuery(stopCh chan struct{}) {
|
2018-06-30 13:38:45 +00:00
|
|
|
args := c.RPCReq
|
|
|
|
args.Node = c.Node
|
2018-06-30 01:15:48 +00:00
|
|
|
args.AllowStale = true
|
|
|
|
args.MaxQueryTime = 1 * time.Minute
|
2019-12-10 02:26:41 +00:00
|
|
|
args.EnterpriseMeta = c.EnterpriseMeta
|
2020-06-04 12:50:52 +00:00
|
|
|
// We are late at maximum of 15s compared to leader
|
2020-06-04 20:59:06 +00:00
|
|
|
args.MaxStaleDuration = 15 * time.Second
|
2018-06-30 01:15:48 +00:00
|
|
|
|
|
|
|
var attempt uint
|
|
|
|
for {
|
|
|
|
// Check if we're stopped. We fallthrough and block otherwise,
|
|
|
|
// which has a maximum time set above so we'll always check for
|
|
|
|
// stop within a reasonable amount of time.
|
|
|
|
select {
|
|
|
|
case <-stopCh:
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
|
|
|
// Backoff if we have to
|
|
|
|
if attempt > checkAliasBackoffMin {
|
2018-07-12 17:21:49 +00:00
|
|
|
shift := attempt - checkAliasBackoffMin
|
|
|
|
if shift > 31 {
|
|
|
|
shift = 31 // so we don't overflow to 0
|
|
|
|
}
|
|
|
|
waitTime := (1 << shift) * time.Second
|
2018-06-30 01:15:48 +00:00
|
|
|
if waitTime > checkAliasBackoffMaxWait {
|
|
|
|
waitTime = checkAliasBackoffMaxWait
|
|
|
|
}
|
|
|
|
time.Sleep(waitTime)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the current health checks for the specified node.
|
|
|
|
//
|
|
|
|
// NOTE(mitchellh): This currently returns ALL health checks for
|
|
|
|
// a node even though we also have the service ID. This can be
|
|
|
|
// optimized if we introduce a new RPC endpoint to filter both,
|
2018-07-03 03:50:00 +00:00
|
|
|
// but for blocking queries isn't that much more efficient since the checks
|
2018-06-30 01:15:48 +00:00
|
|
|
// index is global to the cluster.
|
|
|
|
var out structs.IndexedHealthChecks
|
2020-06-04 12:50:52 +00:00
|
|
|
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := c.RPC.RPC(context.Background(), "Health.NodeChecks", &args, &out); err != nil {
|
2018-06-30 01:15:48 +00:00
|
|
|
attempt++
|
2018-06-30 14:37:43 +00:00
|
|
|
if attempt > 1 {
|
|
|
|
c.Notify.UpdateCheck(c.CheckID, api.HealthCritical,
|
|
|
|
fmt.Sprintf("Failure checking aliased node or service: %s", err))
|
|
|
|
}
|
|
|
|
|
2018-06-30 01:15:48 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
attempt = 0 // Reset the attempts so we don't backoff the next
|
|
|
|
|
|
|
|
// Set our index for the next request
|
|
|
|
args.MinQueryIndex = out.Index
|
|
|
|
|
|
|
|
// We want to ensure that we're always blocking on subsequent requests
|
|
|
|
// to avoid hot loops. Index 1 is always safe since the min raft index
|
|
|
|
// is at least 5. Note this shouldn't happen but protecting against this
|
|
|
|
// case is safer than a 100% CPU loop.
|
|
|
|
if args.MinQueryIndex < 1 {
|
|
|
|
args.MinQueryIndex = 1
|
|
|
|
}
|
2020-06-04 12:50:52 +00:00
|
|
|
c.processChecks(out.HealthChecks, func(serviceID *structs.ServiceID) bool {
|
|
|
|
ret, err := c.checkServiceExistsOnRemoteServer(serviceID)
|
|
|
|
if err != nil {
|
|
|
|
// We cannot determine if node has the check, let's assume it exists
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
return ret
|
|
|
|
})
|
2018-06-30 14:25:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// processChecks is a common helper for taking a set of health checks and
|
|
|
|
// using them to update our alias. This is abstracted since the checks can
|
|
|
|
// come from both the remote server as well as local state.
|
2020-06-04 12:50:52 +00:00
|
|
|
func (c *CheckAlias) processChecks(checks []*structs.HealthCheck, CheckIfServiceIDExists CheckIfServiceIDExists) {
|
2018-06-30 14:25:57 +00:00
|
|
|
health := api.HealthPassing
|
|
|
|
msg := "No checks found."
|
2020-06-04 12:50:52 +00:00
|
|
|
serviceFound := false
|
2018-06-30 14:25:57 +00:00
|
|
|
for _, chk := range checks {
|
2022-02-24 22:54:47 +00:00
|
|
|
if c.Node != "" && !strings.EqualFold(c.Node, chk.Node) {
|
2018-06-30 14:25:57 +00:00
|
|
|
continue
|
2018-06-30 01:15:48 +00:00
|
|
|
}
|
2020-12-11 21:10:00 +00:00
|
|
|
serviceMatch := c.ServiceID.Matches(chk.CompoundServiceID())
|
2020-06-04 12:50:52 +00:00
|
|
|
if chk.ServiceID != "" && !serviceMatch {
|
2018-06-30 14:25:57 +00:00
|
|
|
continue
|
|
|
|
}
|
2020-06-04 12:50:52 +00:00
|
|
|
// We have at least one healthcheck for this service
|
|
|
|
if serviceMatch {
|
|
|
|
serviceFound = true
|
|
|
|
}
|
2018-06-30 14:25:57 +00:00
|
|
|
if chk.Status == api.HealthCritical || chk.Status == api.HealthWarning {
|
|
|
|
health = chk.Status
|
|
|
|
msg = fmt.Sprintf("Aliased check %q failing: %s", chk.Name, chk.Output)
|
2018-06-30 01:15:48 +00:00
|
|
|
|
2018-06-30 14:25:57 +00:00
|
|
|
// Critical checks exit the for loop immediately since we
|
|
|
|
// know that this is the health state. Warnings do not since
|
|
|
|
// there may still be a critical check.
|
|
|
|
if chk.Status == api.HealthCritical {
|
|
|
|
break
|
2018-06-30 01:15:48 +00:00
|
|
|
}
|
2020-06-04 12:50:52 +00:00
|
|
|
} else {
|
|
|
|
// if current health is warning, don't overwrite it
|
|
|
|
if health == api.HealthPassing {
|
|
|
|
msg = "All checks passing."
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !serviceFound {
|
|
|
|
if !CheckIfServiceIDExists(&c.ServiceID) {
|
|
|
|
msg = fmt.Sprintf("Service %s could not be found on node %s", c.ServiceID.ID, c.Node)
|
|
|
|
health = api.HealthCritical
|
2018-06-30 01:15:48 +00:00
|
|
|
}
|
|
|
|
}
|
2018-06-30 14:25:57 +00:00
|
|
|
c.Notify.UpdateCheck(c.CheckID, health, msg)
|
2018-06-30 01:15:48 +00:00
|
|
|
}
|