2023-03-28 18:39:22 +00:00
|
|
|
// Copyright (c) HashiCorp, Inc.
|
2023-08-11 13:12:13 +00:00
|
|
|
// SPDX-License-Identifier: BUSL-1.1
|
2023-03-28 18:39:22 +00:00
|
|
|
|
2017-10-25 09:18:07 +00:00
|
|
|
package checks
|
|
|
|
|
|
|
|
import (
|
2022-06-06 19:13:19 +00:00
|
|
|
"bufio"
|
2021-04-09 19:12:10 +00:00
|
|
|
"context"
|
2017-10-25 09:18:07 +00:00
|
|
|
"crypto/tls"
|
2022-06-07 17:27:14 +00:00
|
|
|
"errors"
|
2017-10-25 09:18:07 +00:00
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"net"
|
|
|
|
"net/http"
|
|
|
|
"os"
|
|
|
|
osexec "os/exec"
|
2020-02-10 16:27:12 +00:00
|
|
|
"strings"
|
2017-10-25 09:18:07 +00:00
|
|
|
"sync"
|
|
|
|
"syscall"
|
|
|
|
"time"
|
|
|
|
|
2021-09-14 16:47:52 +00:00
|
|
|
http2 "golang.org/x/net/http2"
|
|
|
|
|
2019-10-14 20:49:49 +00:00
|
|
|
"github.com/hashicorp/consul/agent/structs"
|
2020-01-28 23:50:41 +00:00
|
|
|
"github.com/hashicorp/go-hclog"
|
2019-10-14 20:49:49 +00:00
|
|
|
|
2017-10-25 09:18:07 +00:00
|
|
|
"github.com/armon/circbuf"
|
|
|
|
"github.com/hashicorp/consul/agent/exec"
|
|
|
|
"github.com/hashicorp/consul/api"
|
|
|
|
"github.com/hashicorp/consul/lib"
|
|
|
|
"github.com/hashicorp/go-cleanhttp"
|
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
// MinInterval is the minimal interval between
|
|
|
|
// two checks. Do not allow for a interval below this value.
|
|
|
|
// Otherwise we risk fork bombing a system.
|
|
|
|
MinInterval = time.Second
|
|
|
|
|
2019-06-26 15:43:25 +00:00
|
|
|
// DefaultBufSize is the maximum size of the captured
|
2020-01-27 13:00:33 +00:00
|
|
|
// check output by default. Prevents an enormous buffer
|
2017-10-25 09:18:07 +00:00
|
|
|
// from being captured
|
2019-06-26 15:43:25 +00:00
|
|
|
DefaultBufSize = 4 * 1024 // 4KB
|
2017-10-25 09:18:07 +00:00
|
|
|
|
|
|
|
// UserAgent is the value of the User-Agent header
|
|
|
|
// for HTTP health checks.
|
|
|
|
UserAgent = "Consul Health Check"
|
|
|
|
)
|
|
|
|
|
2018-06-30 01:15:48 +00:00
|
|
|
// RPC is an interface that an RPC client must implement. This is a helper
|
|
|
|
// interface that is implemented by the agent delegate for checks that need
|
|
|
|
// to make RPC calls.
|
|
|
|
type RPC interface {
|
2022-12-14 15:24:22 +00:00
|
|
|
RPC(ctx context.Context, method string, args interface{}, reply interface{}) error
|
2018-06-30 01:15:48 +00:00
|
|
|
}
|
|
|
|
|
2017-10-25 09:18:07 +00:00
|
|
|
// CheckNotifier interface is used by the CheckMonitor
|
|
|
|
// to notify when a check has a status update. The update
|
|
|
|
// should take care to be idempotent.
|
|
|
|
type CheckNotifier interface {
|
2019-12-10 02:26:41 +00:00
|
|
|
UpdateCheck(checkID structs.CheckID, status, output string)
|
2020-06-04 12:50:52 +00:00
|
|
|
// ServiceExists return true if the given service does exists
|
|
|
|
ServiceExists(serviceID structs.ServiceID) bool
|
2017-10-25 09:18:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// CheckMonitor is used to periodically invoke a script to
|
|
|
|
// determine the health of a given check. It is compatible with
|
|
|
|
// nagios plugins and expects the output in the same format.
|
2019-10-14 20:49:49 +00:00
|
|
|
// Supports failures_before_critical and success_before_passing.
|
2017-10-25 09:18:07 +00:00
|
|
|
type CheckMonitor struct {
|
2019-06-26 15:43:25 +00:00
|
|
|
Notify CheckNotifier
|
2019-12-10 02:26:41 +00:00
|
|
|
CheckID structs.CheckID
|
|
|
|
ServiceID structs.ServiceID
|
2019-06-26 15:43:25 +00:00
|
|
|
Script string
|
|
|
|
ScriptArgs []string
|
|
|
|
Interval time.Duration
|
|
|
|
Timeout time.Duration
|
2020-01-28 23:50:41 +00:00
|
|
|
Logger hclog.Logger
|
2019-06-26 15:43:25 +00:00
|
|
|
OutputMaxSize int
|
2019-10-14 20:49:49 +00:00
|
|
|
StatusHandler *StatusHandler
|
2017-10-25 09:18:07 +00:00
|
|
|
|
|
|
|
stop bool
|
|
|
|
stopCh chan struct{}
|
|
|
|
stopLock sync.Mutex
|
|
|
|
}
|
|
|
|
|
|
|
|
// Start is used to start a check monitor.
|
|
|
|
// Monitor runs until stop is called
|
|
|
|
func (c *CheckMonitor) Start() {
|
|
|
|
c.stopLock.Lock()
|
|
|
|
defer c.stopLock.Unlock()
|
|
|
|
c.stop = false
|
|
|
|
c.stopCh = make(chan struct{})
|
|
|
|
go c.run()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stop is used to stop a check monitor.
|
|
|
|
func (c *CheckMonitor) Stop() {
|
|
|
|
c.stopLock.Lock()
|
|
|
|
defer c.stopLock.Unlock()
|
|
|
|
if !c.stop {
|
|
|
|
c.stop = true
|
|
|
|
close(c.stopCh)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// run is invoked by a goroutine to run until Stop() is called
|
|
|
|
func (c *CheckMonitor) run() {
|
|
|
|
// Get the randomized initial pause time
|
|
|
|
initialPauseTime := lib.RandomStagger(c.Interval)
|
|
|
|
next := time.After(initialPauseTime)
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-next:
|
|
|
|
c.check()
|
|
|
|
next = time.After(c.Interval)
|
|
|
|
case <-c.stopCh:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// check is invoked periodically to perform the script check
|
|
|
|
func (c *CheckMonitor) check() {
|
|
|
|
// Create the command
|
|
|
|
var cmd *osexec.Cmd
|
|
|
|
var err error
|
|
|
|
if len(c.ScriptArgs) > 0 {
|
|
|
|
cmd, err = exec.Subprocess(c.ScriptArgs)
|
|
|
|
} else {
|
|
|
|
cmd, err = exec.Script(c.Script)
|
|
|
|
}
|
|
|
|
if err != nil {
|
2020-01-28 23:50:41 +00:00
|
|
|
c.Logger.Error("Check failed to setup",
|
|
|
|
"check", c.CheckID.String(),
|
|
|
|
"error", err,
|
|
|
|
)
|
2017-10-25 09:18:07 +00:00
|
|
|
c.Notify.UpdateCheck(c.CheckID, api.HealthCritical, err.Error())
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Collect the output
|
2019-06-26 15:43:25 +00:00
|
|
|
output, _ := circbuf.NewBuffer(int64(c.OutputMaxSize))
|
2017-10-25 09:18:07 +00:00
|
|
|
cmd.Stdout = output
|
|
|
|
cmd.Stderr = output
|
|
|
|
exec.SetSysProcAttr(cmd)
|
|
|
|
|
|
|
|
truncateAndLogOutput := func() string {
|
|
|
|
outputStr := string(output.Bytes())
|
|
|
|
if output.TotalWritten() > output.Size() {
|
|
|
|
outputStr = fmt.Sprintf("Captured %d of %d bytes\n...\n%s",
|
|
|
|
output.Size(), output.TotalWritten(), outputStr)
|
|
|
|
}
|
2020-01-28 23:50:41 +00:00
|
|
|
c.Logger.Trace("Check output",
|
|
|
|
"check", c.CheckID.String(),
|
|
|
|
"output", outputStr,
|
|
|
|
)
|
2017-10-25 09:18:07 +00:00
|
|
|
return outputStr
|
|
|
|
}
|
|
|
|
|
|
|
|
// Start the check
|
|
|
|
if err := cmd.Start(); err != nil {
|
2020-01-28 23:50:41 +00:00
|
|
|
c.Logger.Error("Check failed to invoke",
|
|
|
|
"check", c.CheckID.String(),
|
|
|
|
"error", err,
|
|
|
|
)
|
2017-10-25 09:18:07 +00:00
|
|
|
c.Notify.UpdateCheck(c.CheckID, api.HealthCritical, err.Error())
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for the check to complete
|
|
|
|
waitCh := make(chan error, 1)
|
|
|
|
go func() {
|
|
|
|
waitCh <- cmd.Wait()
|
|
|
|
}()
|
|
|
|
|
|
|
|
timeout := 30 * time.Second
|
|
|
|
if c.Timeout > 0 {
|
|
|
|
timeout = c.Timeout
|
|
|
|
}
|
|
|
|
select {
|
|
|
|
case <-time.After(timeout):
|
|
|
|
if err := exec.KillCommandSubtree(cmd); err != nil {
|
2020-01-28 23:50:41 +00:00
|
|
|
c.Logger.Warn("Check failed to kill after timeout",
|
|
|
|
"check", c.CheckID.String(),
|
|
|
|
"error", err,
|
|
|
|
)
|
2017-10-25 09:18:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
msg := fmt.Sprintf("Timed out (%s) running check", timeout.String())
|
2020-01-28 23:50:41 +00:00
|
|
|
c.Logger.Warn("Timed out running check",
|
|
|
|
"check", c.CheckID.String(),
|
|
|
|
"timeout", timeout.String(),
|
|
|
|
)
|
2017-10-25 09:18:07 +00:00
|
|
|
|
|
|
|
outputStr := truncateAndLogOutput()
|
|
|
|
if len(outputStr) > 0 {
|
|
|
|
msg += "\n\n" + outputStr
|
|
|
|
}
|
|
|
|
c.Notify.UpdateCheck(c.CheckID, api.HealthCritical, msg)
|
|
|
|
|
|
|
|
// Now wait for the process to exit so we never start another
|
|
|
|
// instance concurrently.
|
|
|
|
<-waitCh
|
|
|
|
return
|
|
|
|
|
|
|
|
case err = <-waitCh:
|
|
|
|
// The process returned before the timeout, proceed normally
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if the check passed
|
|
|
|
outputStr := truncateAndLogOutput()
|
|
|
|
if err == nil {
|
2019-10-14 20:49:49 +00:00
|
|
|
c.StatusHandler.updateCheck(c.CheckID, api.HealthPassing, outputStr)
|
2017-10-25 09:18:07 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the exit code is 1, set check as warning
|
|
|
|
exitErr, ok := err.(*osexec.ExitError)
|
|
|
|
if ok {
|
|
|
|
if status, ok := exitErr.Sys().(syscall.WaitStatus); ok {
|
|
|
|
code := status.ExitStatus()
|
|
|
|
if code == 1 {
|
2019-10-14 20:49:49 +00:00
|
|
|
c.StatusHandler.updateCheck(c.CheckID, api.HealthWarning, outputStr)
|
2017-10-25 09:18:07 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set the health as critical
|
2019-10-14 20:49:49 +00:00
|
|
|
c.StatusHandler.updateCheck(c.CheckID, api.HealthCritical, outputStr)
|
2017-10-25 09:18:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// CheckTTL is used to apply a TTL to check status,
|
|
|
|
// and enables clients to set the status of a check
|
|
|
|
// but upon the TTL expiring, the check status is
|
|
|
|
// automatically set to critical.
|
|
|
|
type CheckTTL struct {
|
2019-09-26 02:55:52 +00:00
|
|
|
Notify CheckNotifier
|
2019-12-10 02:26:41 +00:00
|
|
|
CheckID structs.CheckID
|
|
|
|
ServiceID structs.ServiceID
|
2019-09-26 02:55:52 +00:00
|
|
|
TTL time.Duration
|
2020-01-28 23:50:41 +00:00
|
|
|
Logger hclog.Logger
|
2017-10-25 09:18:07 +00:00
|
|
|
|
|
|
|
timer *time.Timer
|
|
|
|
|
|
|
|
lastOutput string
|
|
|
|
lastOutputLock sync.RWMutex
|
|
|
|
|
|
|
|
stop bool
|
|
|
|
stopCh chan struct{}
|
|
|
|
stopLock sync.Mutex
|
2019-06-26 15:43:25 +00:00
|
|
|
|
|
|
|
OutputMaxSize int
|
2017-10-25 09:18:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Start is used to start a check ttl, runs until Stop()
|
|
|
|
func (c *CheckTTL) Start() {
|
|
|
|
c.stopLock.Lock()
|
|
|
|
defer c.stopLock.Unlock()
|
2019-06-26 15:43:25 +00:00
|
|
|
if c.OutputMaxSize < 1 {
|
|
|
|
c.OutputMaxSize = DefaultBufSize
|
|
|
|
}
|
2017-10-25 09:18:07 +00:00
|
|
|
c.stop = false
|
|
|
|
c.stopCh = make(chan struct{})
|
|
|
|
c.timer = time.NewTimer(c.TTL)
|
|
|
|
go c.run()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stop is used to stop a check ttl.
|
|
|
|
func (c *CheckTTL) Stop() {
|
|
|
|
c.stopLock.Lock()
|
|
|
|
defer c.stopLock.Unlock()
|
|
|
|
if !c.stop {
|
|
|
|
c.timer.Stop()
|
|
|
|
c.stop = true
|
|
|
|
close(c.stopCh)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// run is used to handle TTL expiration and to update the check status
|
|
|
|
func (c *CheckTTL) run() {
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-c.timer.C:
|
2020-01-28 23:50:41 +00:00
|
|
|
c.Logger.Warn("Check missed TTL, is now critical",
|
|
|
|
"check", c.CheckID.String(),
|
|
|
|
)
|
2017-10-25 09:18:07 +00:00
|
|
|
c.Notify.UpdateCheck(c.CheckID, api.HealthCritical, c.getExpiredOutput())
|
|
|
|
|
|
|
|
case <-c.stopCh:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// getExpiredOutput formats the output for the case when the TTL is expired.
|
|
|
|
func (c *CheckTTL) getExpiredOutput() string {
|
|
|
|
c.lastOutputLock.RLock()
|
|
|
|
defer c.lastOutputLock.RUnlock()
|
|
|
|
|
|
|
|
const prefix = "TTL expired"
|
|
|
|
if c.lastOutput == "" {
|
|
|
|
return prefix
|
|
|
|
}
|
|
|
|
|
|
|
|
return fmt.Sprintf("%s (last output before timeout follows): %s", prefix, c.lastOutput)
|
|
|
|
}
|
|
|
|
|
|
|
|
// SetStatus is used to update the status of the check,
|
|
|
|
// and to renew the TTL. If expired, TTL is restarted.
|
2019-06-26 15:43:25 +00:00
|
|
|
// output is returned (might be truncated)
|
|
|
|
func (c *CheckTTL) SetStatus(status, output string) string {
|
2020-01-28 23:50:41 +00:00
|
|
|
c.Logger.Debug("Check status updated",
|
|
|
|
"check", c.CheckID.String(),
|
|
|
|
"status", status,
|
|
|
|
)
|
2019-06-26 15:43:25 +00:00
|
|
|
total := len(output)
|
|
|
|
if total > c.OutputMaxSize {
|
|
|
|
output = fmt.Sprintf("%s ... (captured %d of %d bytes)",
|
|
|
|
output[:c.OutputMaxSize], c.OutputMaxSize, total)
|
|
|
|
}
|
2017-10-25 09:18:07 +00:00
|
|
|
c.Notify.UpdateCheck(c.CheckID, status, output)
|
|
|
|
// Store the last output so we can retain it if the TTL expires.
|
|
|
|
c.lastOutputLock.Lock()
|
|
|
|
c.lastOutput = output
|
|
|
|
c.lastOutputLock.Unlock()
|
|
|
|
|
|
|
|
c.timer.Reset(c.TTL)
|
2019-06-26 15:43:25 +00:00
|
|
|
return output
|
2017-10-25 09:18:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// CheckHTTP is used to periodically make an HTTP request to
|
|
|
|
// determine the health of a given check.
|
|
|
|
// The check is passing if the response code is 2XX.
|
|
|
|
// The check is warning if the response code is 429.
|
|
|
|
// The check is critical if the response code is anything else
|
|
|
|
// or if the request returns an error
|
2019-10-14 20:49:49 +00:00
|
|
|
// Supports failures_before_critical and success_before_passing.
|
2017-10-25 09:18:07 +00:00
|
|
|
type CheckHTTP struct {
|
2022-04-01 21:31:15 +00:00
|
|
|
CheckID structs.CheckID
|
|
|
|
ServiceID structs.ServiceID
|
|
|
|
HTTP string
|
|
|
|
Header map[string][]string
|
|
|
|
Method string
|
|
|
|
Body string
|
|
|
|
Interval time.Duration
|
|
|
|
Timeout time.Duration
|
|
|
|
Logger hclog.Logger
|
|
|
|
TLSClientConfig *tls.Config
|
|
|
|
OutputMaxSize int
|
|
|
|
StatusHandler *StatusHandler
|
|
|
|
DisableRedirects bool
|
2017-10-25 09:18:07 +00:00
|
|
|
|
|
|
|
httpClient *http.Client
|
|
|
|
stop bool
|
|
|
|
stopCh chan struct{}
|
|
|
|
stopLock sync.Mutex
|
2020-07-20 22:55:39 +00:00
|
|
|
stopWg sync.WaitGroup
|
2019-09-26 02:55:52 +00:00
|
|
|
|
|
|
|
// Set if checks are exposed through Connect proxies
|
|
|
|
// If set, this is the target of check()
|
|
|
|
ProxyHTTP string
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *CheckHTTP) CheckType() structs.CheckType {
|
|
|
|
return structs.CheckType{
|
2019-12-10 02:26:41 +00:00
|
|
|
CheckID: c.CheckID.ID,
|
2019-09-26 02:55:52 +00:00
|
|
|
HTTP: c.HTTP,
|
|
|
|
Method: c.Method,
|
2020-02-10 16:27:12 +00:00
|
|
|
Body: c.Body,
|
2019-09-26 02:55:52 +00:00
|
|
|
Header: c.Header,
|
|
|
|
Interval: c.Interval,
|
|
|
|
ProxyHTTP: c.ProxyHTTP,
|
|
|
|
Timeout: c.Timeout,
|
|
|
|
OutputMaxSize: c.OutputMaxSize,
|
|
|
|
}
|
2017-10-25 09:18:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Start is used to start an HTTP check.
|
|
|
|
// The check runs until stop is called
|
|
|
|
func (c *CheckHTTP) Start() {
|
|
|
|
c.stopLock.Lock()
|
|
|
|
defer c.stopLock.Unlock()
|
|
|
|
|
|
|
|
if c.httpClient == nil {
|
|
|
|
// Create the transport. We disable HTTP Keep-Alive's to prevent
|
|
|
|
// failing checks due to the keepalive interval.
|
|
|
|
trans := cleanhttp.DefaultTransport()
|
|
|
|
trans.DisableKeepAlives = true
|
|
|
|
|
2017-11-08 02:22:09 +00:00
|
|
|
// Take on the supplied TLS client config.
|
|
|
|
trans.TLSClientConfig = c.TLSClientConfig
|
2017-10-25 09:18:07 +00:00
|
|
|
|
|
|
|
// Create the HTTP client.
|
|
|
|
c.httpClient = &http.Client{
|
|
|
|
Timeout: 10 * time.Second,
|
|
|
|
Transport: trans,
|
|
|
|
}
|
2022-04-01 21:31:15 +00:00
|
|
|
if c.DisableRedirects {
|
|
|
|
c.httpClient.CheckRedirect = func(req *http.Request, via []*http.Request) error {
|
|
|
|
return http.ErrUseLastResponse
|
|
|
|
}
|
|
|
|
}
|
2019-07-16 22:13:26 +00:00
|
|
|
if c.Timeout > 0 {
|
2017-10-25 09:18:07 +00:00
|
|
|
c.httpClient.Timeout = c.Timeout
|
|
|
|
}
|
2019-07-16 22:13:26 +00:00
|
|
|
|
2019-06-26 15:43:25 +00:00
|
|
|
if c.OutputMaxSize < 1 {
|
|
|
|
c.OutputMaxSize = DefaultBufSize
|
|
|
|
}
|
2017-10-25 09:18:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
c.stop = false
|
|
|
|
c.stopCh = make(chan struct{})
|
2020-07-20 22:55:39 +00:00
|
|
|
c.stopWg.Add(1)
|
2017-10-25 09:18:07 +00:00
|
|
|
go c.run()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stop is used to stop an HTTP check.
|
|
|
|
func (c *CheckHTTP) Stop() {
|
|
|
|
c.stopLock.Lock()
|
|
|
|
defer c.stopLock.Unlock()
|
|
|
|
if !c.stop {
|
|
|
|
c.stop = true
|
|
|
|
close(c.stopCh)
|
|
|
|
}
|
2020-07-20 22:55:39 +00:00
|
|
|
|
|
|
|
// Wait for the c.run() goroutine to complete before returning.
|
|
|
|
c.stopWg.Wait()
|
2017-10-25 09:18:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// run is invoked by a goroutine to run until Stop() is called
|
|
|
|
func (c *CheckHTTP) run() {
|
2020-07-20 22:55:39 +00:00
|
|
|
defer c.stopWg.Done()
|
2017-10-25 09:18:07 +00:00
|
|
|
// Get the randomized initial pause time
|
|
|
|
initialPauseTime := lib.RandomStagger(c.Interval)
|
|
|
|
next := time.After(initialPauseTime)
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-next:
|
|
|
|
c.check()
|
|
|
|
next = time.After(c.Interval)
|
|
|
|
case <-c.stopCh:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// check is invoked periodically to perform the HTTP check
|
|
|
|
func (c *CheckHTTP) check() {
|
|
|
|
method := c.Method
|
|
|
|
if method == "" {
|
|
|
|
method = "GET"
|
|
|
|
}
|
|
|
|
|
2019-09-26 02:55:52 +00:00
|
|
|
target := c.HTTP
|
|
|
|
if c.ProxyHTTP != "" {
|
|
|
|
target = c.ProxyHTTP
|
|
|
|
}
|
|
|
|
|
2020-02-10 16:27:12 +00:00
|
|
|
bodyReader := strings.NewReader(c.Body)
|
|
|
|
req, err := http.NewRequest(method, target, bodyReader)
|
2017-10-25 09:18:07 +00:00
|
|
|
if err != nil {
|
2019-10-14 20:49:49 +00:00
|
|
|
c.StatusHandler.updateCheck(c.CheckID, api.HealthCritical, err.Error())
|
2017-10-25 09:18:07 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
req.Header = http.Header(c.Header)
|
|
|
|
|
|
|
|
// this happens during testing but not in prod
|
|
|
|
if req.Header == nil {
|
|
|
|
req.Header = make(http.Header)
|
|
|
|
}
|
|
|
|
|
|
|
|
if host := req.Header.Get("Host"); host != "" {
|
|
|
|
req.Host = host
|
|
|
|
}
|
|
|
|
|
|
|
|
if req.Header.Get("User-Agent") == "" {
|
|
|
|
req.Header.Set("User-Agent", UserAgent)
|
|
|
|
}
|
|
|
|
if req.Header.Get("Accept") == "" {
|
|
|
|
req.Header.Set("Accept", "text/plain, text/*, */*")
|
|
|
|
}
|
|
|
|
|
|
|
|
resp, err := c.httpClient.Do(req)
|
|
|
|
if err != nil {
|
2019-10-14 20:49:49 +00:00
|
|
|
c.StatusHandler.updateCheck(c.CheckID, api.HealthCritical, err.Error())
|
2017-10-25 09:18:07 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
defer resp.Body.Close()
|
|
|
|
|
|
|
|
// Read the response into a circular buffer to limit the size
|
2019-06-26 15:43:25 +00:00
|
|
|
output, _ := circbuf.NewBuffer(int64(c.OutputMaxSize))
|
2017-10-25 09:18:07 +00:00
|
|
|
if _, err := io.Copy(output, resp.Body); err != nil {
|
2020-01-28 23:50:41 +00:00
|
|
|
c.Logger.Warn("Check error while reading body",
|
|
|
|
"check", c.CheckID.String(),
|
|
|
|
"error", err,
|
|
|
|
)
|
2017-10-25 09:18:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Format the response body
|
2019-09-26 02:55:52 +00:00
|
|
|
result := fmt.Sprintf("HTTP %s %s: %s Output: %s", method, target, resp.Status, output.String())
|
2017-10-25 09:18:07 +00:00
|
|
|
|
|
|
|
if resp.StatusCode >= 200 && resp.StatusCode <= 299 {
|
|
|
|
// PASSING (2xx)
|
2019-10-14 20:49:49 +00:00
|
|
|
c.StatusHandler.updateCheck(c.CheckID, api.HealthPassing, result)
|
2017-10-25 09:18:07 +00:00
|
|
|
} else if resp.StatusCode == 429 {
|
|
|
|
// WARNING
|
|
|
|
// 429 Too Many Requests (RFC 6585)
|
|
|
|
// The user has sent too many requests in a given amount of time.
|
2019-10-14 20:49:49 +00:00
|
|
|
c.StatusHandler.updateCheck(c.CheckID, api.HealthWarning, result)
|
2017-10-25 09:18:07 +00:00
|
|
|
} else {
|
|
|
|
// CRITICAL
|
2019-10-14 20:49:49 +00:00
|
|
|
c.StatusHandler.updateCheck(c.CheckID, api.HealthCritical, result)
|
2017-10-25 09:18:07 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-09 19:12:10 +00:00
|
|
|
type CheckH2PING struct {
|
|
|
|
CheckID structs.CheckID
|
|
|
|
ServiceID structs.ServiceID
|
|
|
|
H2PING string
|
|
|
|
Interval time.Duration
|
|
|
|
Timeout time.Duration
|
|
|
|
Logger hclog.Logger
|
|
|
|
TLSClientConfig *tls.Config
|
|
|
|
StatusHandler *StatusHandler
|
|
|
|
|
|
|
|
stop bool
|
|
|
|
stopCh chan struct{}
|
|
|
|
stopLock sync.Mutex
|
2021-04-11 19:12:33 +00:00
|
|
|
stopWg sync.WaitGroup
|
2021-04-09 19:12:10 +00:00
|
|
|
}
|
|
|
|
|
2021-04-29 22:05:50 +00:00
|
|
|
func shutdownHTTP2ClientConn(clientConn *http2.ClientConn, timeout time.Duration, checkIDString string, logger hclog.Logger) {
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), timeout/2)
|
|
|
|
defer cancel()
|
|
|
|
err := clientConn.Shutdown(ctx)
|
|
|
|
if err != nil {
|
|
|
|
logger.Warn("Shutdown of H2Ping check client connection gave an error",
|
|
|
|
"check", checkIDString,
|
|
|
|
"error", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-09 19:12:10 +00:00
|
|
|
func (c *CheckH2PING) check() {
|
2021-07-25 20:08:44 +00:00
|
|
|
t := &http2.Transport{}
|
|
|
|
var dialFunc func(ctx context.Context, network, address string, tlscfg *tls.Config) (net.Conn, error)
|
|
|
|
if c.TLSClientConfig != nil {
|
|
|
|
t.TLSClientConfig = c.TLSClientConfig
|
|
|
|
dialFunc = func(ctx context.Context, network, address string, tlscfg *tls.Config) (net.Conn, error) {
|
|
|
|
dialer := &tls.Dialer{Config: tlscfg}
|
|
|
|
return dialer.DialContext(ctx, network, address)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
t.AllowHTTP = true
|
|
|
|
dialFunc = func(ctx context.Context, network, address string, tlscfg *tls.Config) (net.Conn, error) {
|
|
|
|
dialer := &net.Dialer{}
|
|
|
|
return dialer.DialContext(ctx, network, address)
|
|
|
|
}
|
2021-04-09 19:12:10 +00:00
|
|
|
}
|
|
|
|
target := c.H2PING
|
2021-07-25 20:08:44 +00:00
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), c.Timeout)
|
|
|
|
defer cancel()
|
|
|
|
conn, err := dialFunc(ctx, "tcp", target, c.TLSClientConfig)
|
2021-04-09 19:12:10 +00:00
|
|
|
if err != nil {
|
|
|
|
message := fmt.Sprintf("Failed to dial to %s: %s", target, err)
|
|
|
|
c.StatusHandler.updateCheck(c.CheckID, api.HealthCritical, message)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer conn.Close()
|
|
|
|
clientConn, err := t.NewClientConn(conn)
|
|
|
|
if err != nil {
|
|
|
|
message := fmt.Sprintf("Failed to create client connection %s", err)
|
|
|
|
c.StatusHandler.updateCheck(c.CheckID, api.HealthCritical, message)
|
|
|
|
return
|
|
|
|
}
|
2021-04-29 22:05:50 +00:00
|
|
|
defer shutdownHTTP2ClientConn(clientConn, c.Timeout, c.CheckID.String(), c.Logger)
|
2021-04-09 19:12:10 +00:00
|
|
|
err = clientConn.Ping(ctx)
|
|
|
|
if err == nil {
|
|
|
|
c.StatusHandler.updateCheck(c.CheckID, api.HealthPassing, "HTTP2 ping was successful")
|
|
|
|
} else {
|
|
|
|
message := fmt.Sprintf("HTTP2 ping failed: %s", err)
|
|
|
|
c.StatusHandler.updateCheck(c.CheckID, api.HealthCritical, message)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stop is used to stop an H2PING check.
|
|
|
|
func (c *CheckH2PING) Stop() {
|
|
|
|
c.stopLock.Lock()
|
|
|
|
defer c.stopLock.Unlock()
|
|
|
|
if !c.stop {
|
|
|
|
c.stop = true
|
|
|
|
close(c.stopCh)
|
|
|
|
}
|
2021-04-11 19:12:33 +00:00
|
|
|
c.stopWg.Wait()
|
2021-04-09 19:12:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *CheckH2PING) run() {
|
2021-04-11 19:12:33 +00:00
|
|
|
defer c.stopWg.Done()
|
2021-04-09 19:12:10 +00:00
|
|
|
// Get the randomized initial pause time
|
|
|
|
initialPauseTime := lib.RandomStagger(c.Interval)
|
|
|
|
next := time.After(initialPauseTime)
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-next:
|
|
|
|
c.check()
|
|
|
|
next = time.After(c.Interval)
|
|
|
|
case <-c.stopCh:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *CheckH2PING) Start() {
|
|
|
|
c.stopLock.Lock()
|
|
|
|
defer c.stopLock.Unlock()
|
|
|
|
if c.Timeout <= 0 {
|
|
|
|
c.Timeout = 10 * time.Second
|
|
|
|
}
|
|
|
|
c.stop = false
|
|
|
|
c.stopCh = make(chan struct{})
|
2021-04-11 19:12:33 +00:00
|
|
|
c.stopWg.Add(1)
|
2021-04-09 19:12:10 +00:00
|
|
|
go c.run()
|
|
|
|
}
|
|
|
|
|
2023-09-05 20:34:44 +00:00
|
|
|
// CheckTCP is used to periodically make a TCP connection to determine the
|
|
|
|
// health of a given check.
|
2017-10-25 09:18:07 +00:00
|
|
|
// The check is passing if the connection succeeds
|
|
|
|
// The check is critical if the connection returns an error
|
2019-10-14 20:49:49 +00:00
|
|
|
// Supports failures_before_critical and success_before_passing.
|
2017-10-25 09:18:07 +00:00
|
|
|
type CheckTCP struct {
|
2023-09-05 20:34:44 +00:00
|
|
|
CheckID structs.CheckID
|
|
|
|
ServiceID structs.ServiceID
|
|
|
|
TCP string
|
|
|
|
Interval time.Duration
|
|
|
|
Timeout time.Duration
|
|
|
|
Logger hclog.Logger
|
|
|
|
TLSClientConfig *tls.Config
|
|
|
|
StatusHandler *StatusHandler
|
2017-10-25 09:18:07 +00:00
|
|
|
|
|
|
|
dialer *net.Dialer
|
|
|
|
stop bool
|
|
|
|
stopCh chan struct{}
|
|
|
|
stopLock sync.Mutex
|
|
|
|
}
|
|
|
|
|
|
|
|
// Start is used to start a TCP check.
|
|
|
|
// The check runs until stop is called
|
|
|
|
func (c *CheckTCP) Start() {
|
|
|
|
c.stopLock.Lock()
|
|
|
|
defer c.stopLock.Unlock()
|
|
|
|
|
|
|
|
if c.dialer == nil {
|
|
|
|
// Create the socket dialer
|
2019-07-16 22:13:26 +00:00
|
|
|
c.dialer = &net.Dialer{
|
2022-03-29 18:56:01 +00:00
|
|
|
Timeout: 10 * time.Second,
|
2019-07-16 22:13:26 +00:00
|
|
|
}
|
|
|
|
if c.Timeout > 0 {
|
2017-10-25 09:18:07 +00:00
|
|
|
c.dialer.Timeout = c.Timeout
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
c.stop = false
|
|
|
|
c.stopCh = make(chan struct{})
|
|
|
|
go c.run()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stop is used to stop a TCP check.
|
|
|
|
func (c *CheckTCP) Stop() {
|
|
|
|
c.stopLock.Lock()
|
|
|
|
defer c.stopLock.Unlock()
|
|
|
|
if !c.stop {
|
|
|
|
c.stop = true
|
|
|
|
close(c.stopCh)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// run is invoked by a goroutine to run until Stop() is called
|
|
|
|
func (c *CheckTCP) run() {
|
|
|
|
// Get the randomized initial pause time
|
|
|
|
initialPauseTime := lib.RandomStagger(c.Interval)
|
|
|
|
next := time.After(initialPauseTime)
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-next:
|
|
|
|
c.check()
|
|
|
|
next = time.After(c.Interval)
|
|
|
|
case <-c.stopCh:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// check is invoked periodically to perform the TCP check
|
|
|
|
func (c *CheckTCP) check() {
|
2023-09-05 20:34:44 +00:00
|
|
|
var conn io.Closer
|
|
|
|
var err error
|
|
|
|
var checkType string
|
|
|
|
|
|
|
|
if c.TLSClientConfig == nil {
|
|
|
|
conn, err = c.dialer.Dial(`tcp`, c.TCP)
|
|
|
|
checkType = "TCP"
|
|
|
|
} else {
|
|
|
|
conn, err = tls.DialWithDialer(c.dialer, `tcp`, c.TCP, c.TLSClientConfig)
|
|
|
|
checkType = "TCP+TLS"
|
|
|
|
}
|
|
|
|
|
2017-10-25 09:18:07 +00:00
|
|
|
if err != nil {
|
2023-09-05 20:34:44 +00:00
|
|
|
c.Logger.Warn(fmt.Sprintf("Check %s connection failed", checkType),
|
2020-01-28 23:50:41 +00:00
|
|
|
"check", c.CheckID.String(),
|
|
|
|
"error", err,
|
|
|
|
)
|
2019-10-14 20:49:49 +00:00
|
|
|
c.StatusHandler.updateCheck(c.CheckID, api.HealthCritical, err.Error())
|
2017-10-25 09:18:07 +00:00
|
|
|
return
|
|
|
|
}
|
2023-09-05 20:34:44 +00:00
|
|
|
|
2017-10-25 09:18:07 +00:00
|
|
|
conn.Close()
|
2023-09-05 20:34:44 +00:00
|
|
|
c.StatusHandler.updateCheck(c.CheckID, api.HealthPassing, fmt.Sprintf("%s connect %s: Success", checkType, c.TCP))
|
|
|
|
|
2017-10-25 09:18:07 +00:00
|
|
|
}
|
|
|
|
|
2022-06-06 19:13:19 +00:00
|
|
|
// CheckUDP is used to periodically send a UDP datagram to determine the health of a given check.
|
|
|
|
// The check is passing if the connection succeeds, the response is bytes.Equal to the bytes passed
|
|
|
|
// in or if the error returned is a timeout error
|
|
|
|
// The check is critical if: the connection succeeds but the response is not equal to the bytes passed in,
|
|
|
|
// the connection succeeds but the error returned is not a timeout error or the connection fails
|
|
|
|
type CheckUDP struct {
|
|
|
|
CheckID structs.CheckID
|
|
|
|
ServiceID structs.ServiceID
|
|
|
|
UDP string
|
|
|
|
Message string
|
|
|
|
Interval time.Duration
|
|
|
|
Timeout time.Duration
|
|
|
|
Logger hclog.Logger
|
|
|
|
StatusHandler *StatusHandler
|
|
|
|
|
|
|
|
dialer *net.Dialer
|
|
|
|
stop bool
|
|
|
|
stopCh chan struct{}
|
|
|
|
stopLock sync.Mutex
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *CheckUDP) Start() {
|
|
|
|
c.stopLock.Lock()
|
|
|
|
defer c.stopLock.Unlock()
|
|
|
|
|
|
|
|
if c.dialer == nil {
|
|
|
|
// Create the socket dialer
|
|
|
|
c.dialer = &net.Dialer{
|
|
|
|
Timeout: 10 * time.Second,
|
|
|
|
}
|
|
|
|
if c.Timeout > 0 {
|
|
|
|
c.dialer.Timeout = c.Timeout
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
c.stop = false
|
|
|
|
c.stopCh = make(chan struct{})
|
|
|
|
go c.run()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *CheckUDP) Stop() {
|
|
|
|
c.stopLock.Lock()
|
|
|
|
defer c.stopLock.Unlock()
|
|
|
|
if !c.stop {
|
|
|
|
c.stop = true
|
|
|
|
close(c.stopCh)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *CheckUDP) run() {
|
|
|
|
// Get the randomized initial pause time
|
|
|
|
initialPauseTime := lib.RandomStagger(c.Interval)
|
|
|
|
next := time.After(initialPauseTime)
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-next:
|
|
|
|
c.check()
|
|
|
|
next = time.After(c.Interval)
|
|
|
|
case <-c.stopCh:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *CheckUDP) check() {
|
|
|
|
|
|
|
|
conn, err := c.dialer.Dial(`udp`, c.UDP)
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
if e, ok := err.(net.Error); ok && e.Timeout() {
|
|
|
|
c.StatusHandler.updateCheck(c.CheckID, api.HealthPassing, fmt.Sprintf("UDP connect %s: Success", c.UDP))
|
|
|
|
return
|
|
|
|
} else {
|
|
|
|
c.Logger.Warn("Check socket connection failed",
|
|
|
|
"check", c.CheckID.String(),
|
|
|
|
"error", err,
|
|
|
|
)
|
|
|
|
c.StatusHandler.updateCheck(c.CheckID, api.HealthCritical, err.Error())
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
defer conn.Close()
|
|
|
|
|
|
|
|
n, err := fmt.Fprintf(conn, c.Message)
|
|
|
|
if err != nil {
|
|
|
|
c.Logger.Warn("Check socket write failed",
|
|
|
|
"check", c.CheckID.String(),
|
|
|
|
"error", err,
|
|
|
|
)
|
|
|
|
c.StatusHandler.updateCheck(c.CheckID, api.HealthCritical, err.Error())
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if n != len(c.Message) {
|
|
|
|
c.Logger.Warn("Check socket short write",
|
|
|
|
"check", c.CheckID.String(),
|
|
|
|
"error", err,
|
|
|
|
)
|
|
|
|
c.StatusHandler.updateCheck(c.CheckID, api.HealthCritical, err.Error())
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
c.Logger.Warn("Check socket write failed",
|
|
|
|
"check", c.CheckID.String(),
|
|
|
|
"error", err,
|
|
|
|
)
|
|
|
|
c.StatusHandler.updateCheck(c.CheckID, api.HealthCritical, err.Error())
|
|
|
|
return
|
|
|
|
}
|
|
|
|
_, err = bufio.NewReader(conn).Read(make([]byte, 1))
|
|
|
|
if err != nil {
|
|
|
|
if strings.Contains(err.Error(), "i/o timeout") {
|
|
|
|
c.StatusHandler.updateCheck(c.CheckID, api.HealthPassing, fmt.Sprintf("UDP connect %s: Success", c.UDP))
|
|
|
|
return
|
|
|
|
} else {
|
|
|
|
c.Logger.Warn("Check socket read failed",
|
|
|
|
"check", c.CheckID.String(),
|
|
|
|
"error", err,
|
|
|
|
)
|
|
|
|
c.StatusHandler.updateCheck(c.CheckID, api.HealthCritical, err.Error())
|
|
|
|
return
|
|
|
|
}
|
|
|
|
} else if err == nil {
|
|
|
|
c.StatusHandler.updateCheck(c.CheckID, api.HealthPassing, fmt.Sprintf("UDP connect %s: Success", c.UDP))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-25 09:18:07 +00:00
|
|
|
// CheckDocker is used to periodically invoke a script to
|
|
|
|
// determine the health of an application running inside a
|
|
|
|
// Docker Container. We assume that the script is compatible
|
|
|
|
// with nagios plugins and expects the output in the same format.
|
2019-10-14 20:49:49 +00:00
|
|
|
// Supports failures_before_critical and success_before_passing.
|
2017-10-25 09:18:07 +00:00
|
|
|
type CheckDocker struct {
|
2019-12-10 02:26:41 +00:00
|
|
|
CheckID structs.CheckID
|
|
|
|
ServiceID structs.ServiceID
|
2017-10-25 09:18:07 +00:00
|
|
|
Script string
|
|
|
|
ScriptArgs []string
|
|
|
|
DockerContainerID string
|
|
|
|
Shell string
|
|
|
|
Interval time.Duration
|
2020-01-28 23:50:41 +00:00
|
|
|
Logger hclog.Logger
|
2017-10-25 09:18:07 +00:00
|
|
|
Client *DockerClient
|
2019-10-14 20:49:49 +00:00
|
|
|
StatusHandler *StatusHandler
|
2017-10-25 09:18:07 +00:00
|
|
|
|
|
|
|
stop chan struct{}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *CheckDocker) Start() {
|
|
|
|
if c.stop != nil {
|
|
|
|
panic("Docker check already started")
|
|
|
|
}
|
|
|
|
|
|
|
|
if c.Logger == nil {
|
2022-11-10 16:26:01 +00:00
|
|
|
c.Logger = hclog.New(&hclog.LoggerOptions{Output: io.Discard})
|
2017-10-25 09:18:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if c.Shell == "" {
|
|
|
|
c.Shell = os.Getenv("SHELL")
|
|
|
|
if c.Shell == "" {
|
|
|
|
c.Shell = "/bin/sh"
|
|
|
|
}
|
|
|
|
}
|
|
|
|
c.stop = make(chan struct{})
|
|
|
|
go c.run()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *CheckDocker) Stop() {
|
|
|
|
if c.stop == nil {
|
|
|
|
panic("Stop called before start")
|
|
|
|
}
|
|
|
|
close(c.stop)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *CheckDocker) run() {
|
2017-10-26 09:57:18 +00:00
|
|
|
defer c.Client.Close()
|
2017-10-25 09:18:07 +00:00
|
|
|
firstWait := lib.RandomStagger(c.Interval)
|
|
|
|
next := time.After(firstWait)
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-next:
|
|
|
|
c.check()
|
|
|
|
next = time.After(c.Interval)
|
|
|
|
case <-c.stop:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *CheckDocker) check() {
|
|
|
|
var out string
|
|
|
|
status, b, err := c.doCheck()
|
|
|
|
if err != nil {
|
2020-01-28 23:50:41 +00:00
|
|
|
c.Logger.Debug("Check failed",
|
|
|
|
"check", c.CheckID.String(),
|
|
|
|
"error", err,
|
|
|
|
)
|
2017-10-25 09:18:07 +00:00
|
|
|
out = err.Error()
|
|
|
|
} else {
|
|
|
|
// out is already limited to CheckBufSize since we're getting a
|
|
|
|
// limited buffer. So we don't need to truncate it just report
|
|
|
|
// that it was truncated.
|
|
|
|
out = string(b.Bytes())
|
|
|
|
if int(b.TotalWritten()) > len(out) {
|
|
|
|
out = fmt.Sprintf("Captured %d of %d bytes\n...\n%s", len(out), b.TotalWritten(), out)
|
|
|
|
}
|
2020-01-28 23:50:41 +00:00
|
|
|
c.Logger.Trace("Check output",
|
|
|
|
"check", c.CheckID.String(),
|
|
|
|
"output", out,
|
|
|
|
)
|
2017-10-25 09:18:07 +00:00
|
|
|
}
|
2019-10-14 20:49:49 +00:00
|
|
|
c.StatusHandler.updateCheck(c.CheckID, status, out)
|
2017-10-25 09:18:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *CheckDocker) doCheck() (string, *circbuf.Buffer, error) {
|
|
|
|
var cmd []string
|
|
|
|
if len(c.ScriptArgs) > 0 {
|
|
|
|
cmd = c.ScriptArgs
|
|
|
|
} else {
|
|
|
|
cmd = []string{c.Shell, "-c", c.Script}
|
|
|
|
}
|
|
|
|
|
|
|
|
execID, err := c.Client.CreateExec(c.DockerContainerID, cmd)
|
|
|
|
if err != nil {
|
|
|
|
return api.HealthCritical, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
buf, err := c.Client.StartExec(c.DockerContainerID, execID)
|
|
|
|
if err != nil {
|
|
|
|
return api.HealthCritical, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
exitCode, err := c.Client.InspectExec(c.DockerContainerID, execID)
|
|
|
|
if err != nil {
|
|
|
|
return api.HealthCritical, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
switch exitCode {
|
|
|
|
case 0:
|
|
|
|
return api.HealthPassing, buf, nil
|
|
|
|
case 1:
|
2020-01-28 23:50:41 +00:00
|
|
|
c.Logger.Debug("Check failed",
|
|
|
|
"check", c.CheckID.String(),
|
|
|
|
"exit_code", exitCode,
|
|
|
|
)
|
2017-10-25 09:18:07 +00:00
|
|
|
return api.HealthWarning, buf, nil
|
|
|
|
default:
|
2020-01-28 23:50:41 +00:00
|
|
|
c.Logger.Debug("Check failed",
|
|
|
|
"check", c.CheckID.String(),
|
|
|
|
"exit_code", exitCode,
|
|
|
|
)
|
2017-10-25 09:18:07 +00:00
|
|
|
return api.HealthCritical, buf, nil
|
|
|
|
}
|
|
|
|
}
|
2017-12-27 04:35:22 +00:00
|
|
|
|
|
|
|
// CheckGRPC is used to periodically send request to a gRPC server
|
|
|
|
// application that implements gRPC health-checking protocol.
|
|
|
|
// The check is passing if returned status is SERVING.
|
|
|
|
// The check is critical if connection fails or returned status is
|
|
|
|
// not SERVING.
|
2019-10-14 20:49:49 +00:00
|
|
|
// Supports failures_before_critical and success_before_passing.
|
2017-12-27 04:35:22 +00:00
|
|
|
type CheckGRPC struct {
|
2019-12-10 02:26:41 +00:00
|
|
|
CheckID structs.CheckID
|
|
|
|
ServiceID structs.ServiceID
|
2017-12-27 04:35:22 +00:00
|
|
|
GRPC string
|
|
|
|
Interval time.Duration
|
|
|
|
Timeout time.Duration
|
|
|
|
TLSClientConfig *tls.Config
|
2020-01-28 23:50:41 +00:00
|
|
|
Logger hclog.Logger
|
2019-10-14 20:49:49 +00:00
|
|
|
StatusHandler *StatusHandler
|
2017-12-27 04:35:22 +00:00
|
|
|
|
|
|
|
probe *GrpcHealthProbe
|
|
|
|
stop bool
|
|
|
|
stopCh chan struct{}
|
|
|
|
stopLock sync.Mutex
|
2019-09-26 02:55:52 +00:00
|
|
|
|
|
|
|
// Set if checks are exposed through Connect proxies
|
|
|
|
// If set, this is the target of check()
|
|
|
|
ProxyGRPC string
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *CheckGRPC) CheckType() structs.CheckType {
|
|
|
|
return structs.CheckType{
|
2019-12-10 02:26:41 +00:00
|
|
|
CheckID: c.CheckID.ID,
|
2019-09-26 02:55:52 +00:00
|
|
|
GRPC: c.GRPC,
|
|
|
|
ProxyGRPC: c.ProxyGRPC,
|
|
|
|
Interval: c.Interval,
|
|
|
|
Timeout: c.Timeout,
|
|
|
|
}
|
2017-12-27 04:35:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *CheckGRPC) Start() {
|
|
|
|
c.stopLock.Lock()
|
|
|
|
defer c.stopLock.Unlock()
|
|
|
|
timeout := 10 * time.Second
|
|
|
|
if c.Timeout > 0 {
|
|
|
|
timeout = c.Timeout
|
|
|
|
}
|
|
|
|
c.probe = NewGrpcHealthProbe(c.GRPC, timeout, c.TLSClientConfig)
|
|
|
|
c.stop = false
|
|
|
|
c.stopCh = make(chan struct{})
|
|
|
|
go c.run()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *CheckGRPC) run() {
|
|
|
|
// Get the randomized initial pause time
|
|
|
|
initialPauseTime := lib.RandomStagger(c.Interval)
|
|
|
|
next := time.After(initialPauseTime)
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-next:
|
|
|
|
c.check()
|
|
|
|
next = time.After(c.Interval)
|
|
|
|
case <-c.stopCh:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *CheckGRPC) check() {
|
2019-09-26 02:55:52 +00:00
|
|
|
target := c.GRPC
|
|
|
|
if c.ProxyGRPC != "" {
|
|
|
|
target = c.ProxyGRPC
|
|
|
|
}
|
|
|
|
|
|
|
|
err := c.probe.Check(target)
|
2017-12-27 04:35:22 +00:00
|
|
|
if err != nil {
|
2019-10-14 20:49:49 +00:00
|
|
|
c.StatusHandler.updateCheck(c.CheckID, api.HealthCritical, err.Error())
|
2017-12-27 04:35:22 +00:00
|
|
|
} else {
|
2019-10-14 20:49:49 +00:00
|
|
|
c.StatusHandler.updateCheck(c.CheckID, api.HealthPassing, fmt.Sprintf("gRPC check %s: success", target))
|
2017-12-27 04:35:22 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *CheckGRPC) Stop() {
|
|
|
|
c.stopLock.Lock()
|
|
|
|
defer c.stopLock.Unlock()
|
|
|
|
if !c.stop {
|
|
|
|
c.stop = true
|
|
|
|
close(c.stopCh)
|
|
|
|
}
|
|
|
|
}
|
2019-10-14 20:49:49 +00:00
|
|
|
|
2022-06-07 17:27:14 +00:00
|
|
|
type CheckOSService struct {
|
|
|
|
CheckID structs.CheckID
|
|
|
|
ServiceID structs.ServiceID
|
|
|
|
OSService string
|
|
|
|
Interval time.Duration
|
|
|
|
Timeout time.Duration
|
|
|
|
Logger hclog.Logger
|
|
|
|
StatusHandler *StatusHandler
|
|
|
|
Client *OSServiceClient
|
|
|
|
|
|
|
|
stop bool
|
|
|
|
stopCh chan struct{}
|
|
|
|
stopLock sync.Mutex
|
|
|
|
stopWg sync.WaitGroup
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *CheckOSService) CheckType() structs.CheckType {
|
|
|
|
return structs.CheckType{
|
|
|
|
CheckID: c.CheckID.ID,
|
|
|
|
OSService: c.OSService,
|
|
|
|
Interval: c.Interval,
|
|
|
|
Timeout: c.Timeout,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *CheckOSService) Start() {
|
|
|
|
c.stopLock.Lock()
|
|
|
|
defer c.stopLock.Unlock()
|
|
|
|
c.stop = false
|
|
|
|
c.stopCh = make(chan struct{})
|
|
|
|
c.stopWg.Add(1)
|
|
|
|
go c.run()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *CheckOSService) Stop() {
|
|
|
|
c.stopLock.Lock()
|
|
|
|
defer c.stopLock.Unlock()
|
|
|
|
if !c.stop {
|
|
|
|
c.stop = true
|
|
|
|
close(c.stopCh)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for the c.run() goroutine to complete before returning.
|
|
|
|
c.stopWg.Wait()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *CheckOSService) run() {
|
|
|
|
defer c.stopWg.Done()
|
|
|
|
// Get the randomized initial pause time
|
|
|
|
initialPauseTime := lib.RandomStagger(c.Interval)
|
|
|
|
next := time.After(initialPauseTime)
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-next:
|
|
|
|
c.check()
|
|
|
|
next = time.After(c.Interval)
|
|
|
|
case <-c.stopCh:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *CheckOSService) doCheck() (string, error) {
|
2022-08-28 16:56:30 +00:00
|
|
|
err := c.Client.Check(c.OSService)
|
2022-06-07 17:27:14 +00:00
|
|
|
if err == nil {
|
|
|
|
return api.HealthPassing, nil
|
|
|
|
}
|
|
|
|
if errors.Is(err, ErrOSServiceStatusCritical) {
|
|
|
|
return api.HealthCritical, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return api.HealthWarning, err
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *CheckOSService) check() {
|
|
|
|
var out string
|
2022-08-28 16:56:30 +00:00
|
|
|
var status string
|
|
|
|
var err error
|
|
|
|
|
|
|
|
waitCh := make(chan error, 1)
|
|
|
|
go func() {
|
|
|
|
status, err = c.doCheck()
|
|
|
|
waitCh <- err
|
|
|
|
}()
|
|
|
|
|
|
|
|
timeout := 30 * time.Second
|
|
|
|
if c.Timeout > 0 {
|
|
|
|
timeout = c.Timeout
|
|
|
|
}
|
|
|
|
select {
|
|
|
|
case <-time.After(timeout):
|
|
|
|
msg := fmt.Sprintf("Timed out (%s) running check", timeout.String())
|
|
|
|
c.Logger.Warn("Timed out running check",
|
|
|
|
"check", c.CheckID.String(),
|
|
|
|
"timeout", timeout.String(),
|
|
|
|
)
|
|
|
|
|
|
|
|
c.StatusHandler.updateCheck(c.CheckID, api.HealthCritical, msg)
|
|
|
|
|
|
|
|
// Now wait for the process to exit so we never start another
|
|
|
|
// instance concurrently.
|
|
|
|
<-waitCh
|
|
|
|
return
|
|
|
|
|
|
|
|
case err = <-waitCh:
|
|
|
|
// The process returned before the timeout, proceed normally
|
|
|
|
}
|
|
|
|
|
2022-09-21 08:27:33 +00:00
|
|
|
out = fmt.Sprintf("Service \"%s\" is healthy", c.OSService)
|
2022-06-07 17:27:14 +00:00
|
|
|
if err != nil {
|
|
|
|
c.Logger.Debug("Check failed",
|
|
|
|
"check", c.CheckID.String(),
|
|
|
|
"error", err,
|
|
|
|
)
|
|
|
|
out = err.Error()
|
|
|
|
}
|
|
|
|
c.StatusHandler.updateCheck(c.CheckID, status, out)
|
|
|
|
}
|
|
|
|
|
2019-10-14 20:49:49 +00:00
|
|
|
// StatusHandler keep tracks of successive error/success counts and ensures
|
|
|
|
// that status can be set to critical/passing only once the successive number of event
|
|
|
|
// reaches the given threshold.
|
|
|
|
type StatusHandler struct {
|
|
|
|
inner CheckNotifier
|
2020-01-28 23:50:41 +00:00
|
|
|
logger hclog.Logger
|
2019-10-14 20:49:49 +00:00
|
|
|
successBeforePassing int
|
|
|
|
successCounter int
|
2021-09-14 16:47:52 +00:00
|
|
|
failuresBeforeWarning int
|
2019-10-14 20:49:49 +00:00
|
|
|
failuresBeforeCritical int
|
|
|
|
failuresCounter int
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewStatusHandler set counters values to threshold in order to immediatly update status after first check.
|
2021-09-14 16:47:52 +00:00
|
|
|
func NewStatusHandler(inner CheckNotifier, logger hclog.Logger, successBeforePassing, failuresBeforeWarning, failuresBeforeCritical int) *StatusHandler {
|
2019-10-14 20:49:49 +00:00
|
|
|
return &StatusHandler{
|
|
|
|
logger: logger,
|
|
|
|
inner: inner,
|
|
|
|
successBeforePassing: successBeforePassing,
|
|
|
|
successCounter: successBeforePassing,
|
2021-09-14 16:47:52 +00:00
|
|
|
failuresBeforeWarning: failuresBeforeWarning,
|
2019-10-14 20:49:49 +00:00
|
|
|
failuresBeforeCritical: failuresBeforeCritical,
|
|
|
|
failuresCounter: failuresBeforeCritical,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
func (s *StatusHandler) updateCheck(checkID structs.CheckID, status, output string) {
|
2019-10-14 20:49:49 +00:00
|
|
|
|
|
|
|
if status == api.HealthPassing || status == api.HealthWarning {
|
|
|
|
s.successCounter++
|
|
|
|
s.failuresCounter = 0
|
|
|
|
if s.successCounter >= s.successBeforePassing {
|
2020-01-28 23:50:41 +00:00
|
|
|
s.logger.Debug("Check status updated",
|
|
|
|
"check", checkID.String(),
|
|
|
|
"status", status,
|
|
|
|
)
|
2019-10-14 20:49:49 +00:00
|
|
|
s.inner.UpdateCheck(checkID, status, output)
|
|
|
|
return
|
|
|
|
}
|
2020-01-28 23:50:41 +00:00
|
|
|
s.logger.Warn("Check passed but has not reached success threshold",
|
|
|
|
"check", checkID.String(),
|
|
|
|
"status", status,
|
|
|
|
"success_count", s.successCounter,
|
|
|
|
"success_threshold", s.successBeforePassing,
|
|
|
|
)
|
2019-10-14 20:49:49 +00:00
|
|
|
} else {
|
|
|
|
s.failuresCounter++
|
|
|
|
s.successCounter = 0
|
|
|
|
if s.failuresCounter >= s.failuresBeforeCritical {
|
2020-01-28 23:50:41 +00:00
|
|
|
s.logger.Warn("Check is now critical", "check", checkID.String())
|
2019-10-14 20:49:49 +00:00
|
|
|
s.inner.UpdateCheck(checkID, status, output)
|
|
|
|
return
|
|
|
|
}
|
2021-09-14 16:47:52 +00:00
|
|
|
// Defaults to same value as failuresBeforeCritical if not set.
|
|
|
|
if s.failuresCounter >= s.failuresBeforeWarning {
|
|
|
|
s.logger.Warn("Check is now warning", "check", checkID.String())
|
|
|
|
s.inner.UpdateCheck(checkID, api.HealthWarning, output)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
s.logger.Warn("Check failed but has not reached warning/failure threshold",
|
2020-01-28 23:50:41 +00:00
|
|
|
"check", checkID.String(),
|
|
|
|
"status", status,
|
|
|
|
"failure_count", s.failuresCounter,
|
2021-09-14 16:47:52 +00:00
|
|
|
"warning_threshold", s.failuresBeforeWarning,
|
2020-01-28 23:50:41 +00:00
|
|
|
"failure_threshold", s.failuresBeforeCritical,
|
|
|
|
)
|
2019-10-14 20:49:49 +00:00
|
|
|
}
|
|
|
|
}
|