Merge pull request #10804 from hashicorp/dnephin/debug-filenames

debug: rename cluster.json -> members.json  and fix handling of Interrupt Signal
This commit is contained in:
Daniel Nephin 2021-08-18 13:18:29 -04:00 committed by GitHub
commit a98b5bc31c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 206 additions and 171 deletions

3
.changelog/10804.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
debug: rename cluster capture target to members, to be more consistent with the terms used by the API.
```

View File

@ -1,7 +1,9 @@
package api
import (
"context"
"fmt"
"io"
"io/ioutil"
"strconv"
)
@ -70,6 +72,26 @@ func (d *Debug) Profile(seconds int) ([]byte, error) {
return body, nil
}
// PProf returns a pprof profile for the specified number of seconds. The caller
// is responsible for closing the returned io.ReadCloser once all bytes are read.
func (d *Debug) PProf(ctx context.Context, name string, seconds int) (io.ReadCloser, error) {
r := d.c.newRequest("GET", "/debug/pprof/"+name)
r.ctx = ctx
// Capture a profile for the specified number of seconds
r.params.Set("seconds", strconv.Itoa(seconds))
_, resp, err := d.c.doRequest(r)
if err != nil {
return nil, fmt.Errorf("error making request: %s", err)
}
if resp.StatusCode != 200 {
return nil, generateUnexpectedResponseCodeError(resp)
}
return resp.Body, nil
}
// Trace returns an execution trace
func (d *Debug) Trace(seconds int) ([]byte, error) {
r := d.c.newRequest("GET", "/debug/pprof/trace")

View File

@ -166,7 +166,7 @@ func init() {
Register("connect envoy pipe-bootstrap", func(ui cli.Ui) (cli.Command, error) { return pipebootstrap.New(ui), nil })
Register("connect expose", func(ui cli.Ui) (cli.Command, error) { return expose.New(ui), nil })
Register("connect redirect-traffic", func(ui cli.Ui) (cli.Command, error) { return redirecttraffic.New(ui), nil })
Register("debug", func(ui cli.Ui) (cli.Command, error) { return debug.New(ui, MakeShutdownCh()), nil })
Register("debug", func(ui cli.Ui) (cli.Command, error) { return debug.New(ui), nil })
Register("event", func(ui cli.Ui) (cli.Command, error) { return event.New(ui), nil })
Register("exec", func(ui cli.Ui) (cli.Command, error) { return exec.New(ui, MakeShutdownCh()), nil })
Register("force-leave", func(ui cli.Ui) (cli.Command, error) { return forceleave.New(ui), nil })

View File

@ -12,8 +12,10 @@ import (
"io"
"io/ioutil"
"os"
"os/signal"
"path/filepath"
"strings"
"syscall"
"time"
"golang.org/x/sync/errgroup"
@ -55,7 +57,7 @@ const (
debugProtocolVersion = 1
)
func New(ui cli.Ui, shutdownCh <-chan struct{}) *cmd {
func New(ui cli.Ui) *cmd {
ui = &cli.PrefixedUi{
OutputPrefix: "==> ",
InfoPrefix: " ",
@ -63,7 +65,7 @@ func New(ui cli.Ui, shutdownCh <-chan struct{}) *cmd {
Ui: ui,
}
c := &cmd{UI: ui, shutdownCh: shutdownCh}
c := &cmd{UI: ui}
c.init()
return c
}
@ -74,8 +76,6 @@ type cmd struct {
http *flags.HTTPFlags
help string
shutdownCh <-chan struct{}
// flags
interval time.Duration
duration time.Duration
@ -114,7 +114,7 @@ func (c *cmd) init() {
fmt.Sprintf("One or more types of information to capture. This can be used "+
"to capture a subset of information, and defaults to capturing "+
"everything available. Possible information for capture: %s. "+
"This can be repeated multiple times.", strings.Join(c.defaultTargets(), ", ")))
"This can be repeated multiple times.", strings.Join(defaultTargets, ", ")))
c.flags.DurationVar(&c.interval, "interval", debugInterval,
fmt.Sprintf("The interval in which to capture dynamic information such as "+
"telemetry, and profiling. Defaults to %s.", debugInterval))
@ -136,6 +136,9 @@ func (c *cmd) init() {
}
func (c *cmd) Run(args []string) int {
ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
defer cancel()
if err := c.flags.Parse(args); err != nil {
c.UI.Error(fmt.Sprintf("Error parsing flags: %s", err))
return 1
@ -195,10 +198,14 @@ func (c *cmd) Run(args []string) int {
}
// Capture dynamic information from the target agent, blocking for duration
if c.configuredTarget("metrics") || c.configuredTarget("logs") || c.configuredTarget("pprof") {
if c.captureTarget(targetMetrics) || c.captureTarget(targetLogs) || c.captureTarget(targetProfiles) {
g := new(errgroup.Group)
g.Go(c.captureInterval)
g.Go(c.captureLongRunning)
g.Go(func() error {
return c.captureInterval(ctx)
})
g.Go(func() error {
return c.captureLongRunning(ctx)
})
err = g.Wait()
if err != nil {
c.UI.Error(fmt.Sprintf("Error encountered during collection: %v", err))
@ -264,11 +271,11 @@ func (c *cmd) prepare() (version string, err error) {
// If none are specified we will collect information from
// all by default
if len(c.capture) == 0 {
c.capture = c.defaultTargets()
c.capture = defaultTargets
}
for _, t := range c.capture {
if !c.allowedTarget(t) {
if !allowedTarget(t) {
return version, fmt.Errorf("target not found: %s", t)
}
}
@ -288,60 +295,52 @@ func (c *cmd) prepare() (version string, err error) {
// captureStatic captures static target information and writes it
// to the output path
func (c *cmd) captureStatic() error {
// Collect errors via multierror as we want to gracefully
// fail if an API is inaccessible
var errs error
// Collect the named outputs here
outputs := make(map[string]interface{})
// Capture host information
if c.configuredTarget("host") {
if c.captureTarget(targetHost) {
host, err := c.client.Agent().Host()
if err != nil {
errs = multierror.Append(errs, err)
}
outputs["host"] = host
if err := writeJSONFile(filepath.Join(c.output, targetHost+".json"), host); err != nil {
errs = multierror.Append(errs, err)
}
}
// Capture agent information
if c.configuredTarget("agent") {
if c.captureTarget(targetAgent) {
agent, err := c.client.Agent().Self()
if err != nil {
errs = multierror.Append(errs, err)
}
outputs["agent"] = agent
if err := writeJSONFile(filepath.Join(c.output, targetAgent+".json"), agent); err != nil {
errs = multierror.Append(errs, err)
}
}
// Capture cluster members information, including WAN
if c.configuredTarget("cluster") {
if c.captureTarget(targetMembers) {
members, err := c.client.Agent().Members(true)
if err != nil {
errs = multierror.Append(errs, err)
}
outputs["cluster"] = members
}
// Write all outputs to disk as JSON
for output, v := range outputs {
marshaled, err := json.MarshalIndent(v, "", "\t")
if err != nil {
errs = multierror.Append(errs, err)
}
err = ioutil.WriteFile(fmt.Sprintf("%s/%s.json", c.output, output), marshaled, 0644)
if err != nil {
if err := writeJSONFile(filepath.Join(c.output, targetMembers+".json"), members); err != nil {
errs = multierror.Append(errs, err)
}
}
return errs
}
func writeJSONFile(filename string, content interface{}) error {
marshaled, err := json.MarshalIndent(content, "", "\t")
if err != nil {
return err
}
return ioutil.WriteFile(filename, marshaled, 0644)
}
// captureInterval blocks for the duration of the command
// specified by the duration flag, capturing the dynamic
// targets at the interval specified
func (c *cmd) captureInterval() error {
func (c *cmd) captureInterval(ctx context.Context) error {
intervalChn := time.NewTicker(c.interval)
defer intervalChn.Stop()
durationChn := time.After(c.duration)
@ -366,7 +365,7 @@ func (c *cmd) captureInterval() error {
case <-durationChn:
intervalChn.Stop()
return nil
case <-c.shutdownCh:
case <-ctx.Done():
return errors.New("stopping collection due to shutdown signal")
}
}
@ -380,7 +379,7 @@ func captureShortLived(c *cmd) error {
if err != nil {
return err
}
if c.configuredTarget("pprof") {
if c.captureTarget(targetProfiles) {
g.Go(func() error {
return c.captureHeap(timestampDir)
})
@ -403,7 +402,7 @@ func (c *cmd) createTimestampDir(timestamp int64) (string, error) {
return timestampDir, nil
}
func (c *cmd) captureLongRunning() error {
func (c *cmd) captureLongRunning(ctx context.Context) error {
timestamp := time.Now().Local().Unix()
timestampDir, err := c.createTimestampDir(timestamp)
@ -417,26 +416,29 @@ func (c *cmd) captureLongRunning() error {
if s < 1 {
s = 1
}
if c.configuredTarget("pprof") {
if c.captureTarget(targetProfiles) {
g.Go(func() error {
return c.captureProfile(s, timestampDir)
// use ctx without a timeout to allow the profile to finish sending
return c.captureProfile(ctx, s, timestampDir)
})
g.Go(func() error {
return c.captureTrace(s, timestampDir)
// use ctx without a timeout to allow the trace to finish sending
return c.captureTrace(ctx, s, timestampDir)
})
}
if c.configuredTarget("logs") {
if c.captureTarget(targetLogs) {
g.Go(func() error {
return c.captureLogs(timestampDir)
ctx, cancel := context.WithTimeout(ctx, c.duration)
defer cancel()
return c.captureLogs(ctx, timestampDir)
})
}
if c.configuredTarget("metrics") {
// TODO: pass in context from caller
ctx, cancel := context.WithTimeout(context.Background(), c.duration)
defer cancel()
if c.captureTarget(targetMetrics) {
g.Go(func() error {
g.Go(func() error {
ctx, cancel := context.WithTimeout(ctx, c.duration)
defer cancel()
return c.captureMetrics(ctx, timestampDir)
})
}
@ -450,27 +452,40 @@ func (c *cmd) captureGoRoutines(timestampDir string) error {
return fmt.Errorf("failed to collect goroutine profile: %w", err)
}
err = ioutil.WriteFile(fmt.Sprintf("%s/goroutine.prof", timestampDir), gr, 0644)
return err
return ioutil.WriteFile(fmt.Sprintf("%s/goroutine.prof", timestampDir), gr, 0644)
}
func (c *cmd) captureTrace(s float64, timestampDir string) error {
trace, err := c.client.Debug().Trace(int(s))
if err != nil {
return fmt.Errorf("failed to collect trace: %w", err)
}
err = ioutil.WriteFile(fmt.Sprintf("%s/trace.out", timestampDir), trace, 0644)
return err
}
func (c *cmd) captureProfile(s float64, timestampDir string) error {
prof, err := c.client.Debug().Profile(int(s))
func (c *cmd) captureTrace(ctx context.Context, s float64, timestampDir string) error {
prof, err := c.client.Debug().PProf(ctx, "trace", int(s))
if err != nil {
return fmt.Errorf("failed to collect cpu profile: %w", err)
}
defer prof.Close()
err = ioutil.WriteFile(fmt.Sprintf("%s/profile.prof", timestampDir), prof, 0644)
r := bufio.NewReader(prof)
fh, err := os.Create(fmt.Sprintf("%s/trace.out", timestampDir))
if err != nil {
return err
}
defer fh.Close()
_, err = r.WriteTo(fh)
return err
}
func (c *cmd) captureProfile(ctx context.Context, s float64, timestampDir string) error {
prof, err := c.client.Debug().PProf(ctx, "profile", int(s))
if err != nil {
return fmt.Errorf("failed to collect cpu profile: %w", err)
}
defer prof.Close()
r := bufio.NewReader(prof)
fh, err := os.Create(fmt.Sprintf("%s/profile.prof", timestampDir))
if err != nil {
return err
}
defer fh.Close()
_, err = r.WriteTo(fh)
return err
}
@ -480,19 +495,14 @@ func (c *cmd) captureHeap(timestampDir string) error {
return fmt.Errorf("failed to collect heap profile: %w", err)
}
err = ioutil.WriteFile(fmt.Sprintf("%s/heap.prof", timestampDir), heap, 0644)
return err
return ioutil.WriteFile(fmt.Sprintf("%s/heap.prof", timestampDir), heap, 0644)
}
func (c *cmd) captureLogs(timestampDir string) error {
endLogChn := make(chan struct{})
timeIsUp := time.After(c.duration)
logCh, err := c.client.Agent().Monitor("DEBUG", endLogChn, nil)
func (c *cmd) captureLogs(ctx context.Context, timestampDir string) error {
logCh, err := c.client.Agent().Monitor("DEBUG", ctx.Done(), nil)
if err != nil {
return err
}
// Close the log stream
defer close(endLogChn)
// Create the log file for writing
f, err := os.Create(fmt.Sprintf("%s/%s", timestampDir, "consul.log"))
@ -510,7 +520,7 @@ func (c *cmd) captureLogs(timestampDir string) error {
if _, err = f.WriteString(log + "\n"); err != nil {
return err
}
case <-timeIsUp:
case <-ctx.Done():
return nil
}
}
@ -538,23 +548,31 @@ func (c *cmd) captureMetrics(ctx context.Context, timestampDir string) error {
return nil
}
// allowedTarget returns a boolean if the target is able to be captured
func (c *cmd) allowedTarget(target string) bool {
for _, dt := range c.defaultTargets() {
// allowedTarget returns true if the target is a recognized name of a capture
// target.
func allowedTarget(target string) bool {
for _, dt := range defaultTargets {
if dt == target {
return true
}
}
for _, t := range deprecatedTargets {
if t == target {
return true
}
}
return false
}
// configuredTarget returns a boolean if the target is configured to be
// captured in the command
func (c *cmd) configuredTarget(target string) bool {
// captureTarget returns true if the target capture type is enabled.
func (c *cmd) captureTarget(target string) bool {
for _, dt := range c.capture {
if dt == target {
return true
}
if target == targetMembers && dt == targetCluster {
return true
}
}
return false
}
@ -676,33 +694,37 @@ func (c *cmd) createArchiveTemp(path string) (tempName string, err error) {
return tempName, nil
}
// defaultTargets specifies the list of all targets that
// will be captured by default
func (c *cmd) defaultTargets() []string {
return append(c.dynamicTargets(), c.staticTargets()...)
const (
targetMetrics = "metrics"
targetLogs = "logs"
targetProfiles = "pprof"
targetHost = "host"
targetAgent = "agent"
targetMembers = "members"
// targetCluster is the now deprecated name for targetMembers
targetCluster = "cluster"
)
// defaultTargets specifies the list of targets that will be captured by default
var defaultTargets = []string{
targetMetrics,
targetLogs,
targetProfiles,
targetHost,
targetAgent,
targetMembers,
}
// dynamicTargets returns all the supported targets
// that are retrieved at the interval specified
func (c *cmd) dynamicTargets() []string {
return []string{"metrics", "logs", "pprof"}
}
// staticTargets returns all the supported targets
// that are retrieved at the start of the command execution
func (c *cmd) staticTargets() []string {
return []string{"host", "agent", "cluster"}
}
var deprecatedTargets = []string{targetCluster}
func (c *cmd) Synopsis() string {
return synopsis
return "Records a debugging archive for operators"
}
func (c *cmd) Help() string {
return c.help
}
const synopsis = "Records a debugging archive for operators"
const help = `
Usage: consul debug [options]

View File

@ -14,20 +14,19 @@ import (
"testing"
"time"
"github.com/google/pprof/profile"
"github.com/mitchellh/cli"
"github.com/stretchr/testify/require"
"github.com/google/pprof/profile"
"gotest.tools/v3/assert"
"gotest.tools/v3/fs"
"github.com/hashicorp/consul/agent"
"github.com/hashicorp/consul/sdk/testutil"
"github.com/hashicorp/consul/testrpc"
)
func TestDebugCommand_noTabs(t *testing.T) {
t.Parallel()
if strings.ContainsRune(New(cli.NewMockUi(), nil).Help(), '\t') {
func TestDebugCommand_Help_TextContainsNoTabs(t *testing.T) {
if strings.ContainsRune(New(cli.NewMockUi()).Help(), '\t') {
t.Fatal("help has tabs")
}
}
@ -47,7 +46,7 @@ func TestDebugCommand(t *testing.T) {
testrpc.WaitForLeader(t, a.RPC, "dc1")
ui := cli.NewMockUi()
cmd := New(ui, nil)
cmd := New(ui)
cmd.validateTiming = false
outputPath := fmt.Sprintf("%s/debug", testDir)
@ -63,6 +62,16 @@ func TestDebugCommand(t *testing.T) {
require.Equal(t, 0, code)
require.Equal(t, "", ui.ErrorWriter.String())
expected := fs.Expected(t,
fs.WithDir("debug",
fs.WithFile("agent.json", "", fs.MatchAnyFileContent),
fs.WithFile("host.json", "", fs.MatchAnyFileContent),
fs.WithFile("index.json", "", fs.MatchAnyFileContent),
fs.WithFile("members.json", "", fs.MatchAnyFileContent),
// TODO: make the sub-directory names predictable)
fs.MatchExtraFiles))
assert.Assert(t, fs.Equal(testDir, expected))
metricsFiles, err := filepath.Glob(fmt.Sprintf("%s/*/%s", outputPath, "metrics.json"))
require.NoError(t, err)
require.Len(t, metricsFiles, 1)
@ -83,7 +92,7 @@ func TestDebugCommand_Archive(t *testing.T) {
testrpc.WaitForLeader(t, a.RPC, "dc1")
ui := cli.NewMockUi()
cmd := New(ui, nil)
cmd := New(ui)
cmd.validateTiming = false
outputPath := fmt.Sprintf("%s/debug", testDir)
@ -127,15 +136,10 @@ func TestDebugCommand_Archive(t *testing.T) {
}
func TestDebugCommand_ArgsBad(t *testing.T) {
t.Parallel()
ui := cli.NewMockUi()
cmd := New(ui, nil)
cmd := New(ui)
args := []string{
"foo",
"bad",
}
args := []string{"foo", "bad"}
if code := cmd.Run(args); code == 0 {
t.Fatalf("should exit non-zero, got code: %d", code)
@ -149,7 +153,7 @@ func TestDebugCommand_ArgsBad(t *testing.T) {
func TestDebugCommand_InvalidFlags(t *testing.T) {
ui := cli.NewMockUi()
cmd := New(ui, nil)
cmd := New(ui)
cmd.validateTiming = false
outputPath := ""
@ -182,7 +186,7 @@ func TestDebugCommand_OutputPathBad(t *testing.T) {
testrpc.WaitForLeader(t, a.RPC, "dc1")
ui := cli.NewMockUi()
cmd := New(ui, nil)
cmd := New(ui)
cmd.validateTiming = false
outputPath := ""
@ -215,7 +219,7 @@ func TestDebugCommand_OutputPathExists(t *testing.T) {
testrpc.WaitForLeader(t, a.RPC, "dc1")
ui := cli.NewMockUi()
cmd := New(ui, nil)
cmd := New(ui)
cmd.validateTiming = false
outputPath := fmt.Sprintf("%s/debug", testDir)
@ -258,17 +262,17 @@ func TestDebugCommand_CaptureTargets(t *testing.T) {
"single": {
[]string{"agent"},
[]string{"agent.json"},
[]string{"host.json", "cluster.json"},
[]string{"host.json", "members.json"},
},
"static": {
[]string{"agent", "host", "cluster"},
[]string{"agent.json", "host.json", "cluster.json"},
[]string{"agent.json", "host.json", "members.json"},
[]string{"*/metrics.json"},
},
"metrics-only": {
[]string{"metrics"},
[]string{"*/metrics.json"},
[]string{"agent.json", "host.json", "cluster.json"},
[]string{"agent.json", "host.json", "members.json"},
},
"all-but-pprof": {
[]string{
@ -281,7 +285,7 @@ func TestDebugCommand_CaptureTargets(t *testing.T) {
[]string{
"host.json",
"agent.json",
"cluster.json",
"members.json",
"*/metrics.json",
"*/consul.log",
},
@ -300,7 +304,7 @@ func TestDebugCommand_CaptureTargets(t *testing.T) {
testrpc.WaitForLeader(t, a.RPC, "dc1")
ui := cli.NewMockUi()
cmd := New(ui, nil)
cmd := New(ui)
cmd.validateTiming = false
outputPath := fmt.Sprintf("%s/debug-%s", testDir, name)
@ -383,7 +387,7 @@ func TestDebugCommand_CaptureLogs(t *testing.T) {
testrpc.WaitForLeader(t, a.RPC, "dc1")
ui := cli.NewMockUi()
cmd := New(ui, nil)
cmd := New(ui)
cmd.validateTiming = false
outputPath := fmt.Sprintf("%s/debug-%s", testDir, name)
@ -476,7 +480,7 @@ func TestDebugCommand_ProfilesExist(t *testing.T) {
testrpc.WaitForLeader(t, a.RPC, "dc1")
ui := cli.NewMockUi()
cmd := New(ui, nil)
cmd := New(ui)
cmd.validateTiming = false
outputPath := fmt.Sprintf("%s/debug", testDir)
@ -518,65 +522,44 @@ func TestDebugCommand_ProfilesExist(t *testing.T) {
}
}
func TestDebugCommand_ValidateTiming(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
func TestDebugCommand_Prepare_ValidateTiming(t *testing.T) {
cases := map[string]struct {
duration string
interval string
output string
code int
expected string
}{
"both": {
"20ms",
"10ms",
"duration must be longer",
1,
duration: "20ms",
interval: "10ms",
expected: "duration must be longer",
},
"short interval": {
"10s",
"10ms",
"interval must be longer",
1,
duration: "10s",
interval: "10ms",
expected: "interval must be longer",
},
"lower duration": {
"20s",
"30s",
"must be longer than interval",
1,
duration: "20s",
interval: "30s",
expected: "must be longer than interval",
},
}
for name, tc := range cases {
// Because we're only testng validation, we want to shut down
// the valid duration test to avoid hanging
shutdownCh := make(chan struct{})
t.Run(name, func(t *testing.T) {
ui := cli.NewMockUi()
cmd := New(ui)
a := agent.NewTestAgent(t, "")
defer a.Shutdown()
testrpc.WaitForLeader(t, a.RPC, "dc1")
args := []string{
"-duration=" + tc.duration,
"-interval=" + tc.interval,
}
err := cmd.flags.Parse(args)
require.NoError(t, err)
ui := cli.NewMockUi()
cmd := New(ui, shutdownCh)
args := []string{
"-http-addr=" + a.HTTPAddr(),
"-duration=" + tc.duration,
"-interval=" + tc.interval,
"-capture=agent",
}
code := cmd.Run(args)
if code != tc.code {
t.Errorf("%s: should exit %d, got code: %d", name, tc.code, code)
}
errOutput := ui.ErrorWriter.String()
if !strings.Contains(errOutput, tc.output) {
t.Errorf("%s: expected error output '%s', got '%q'", name, tc.output, errOutput)
}
_, err = cmd.prepare()
testutil.RequireErrorContains(t, err, tc.expected)
})
}
}
@ -596,7 +579,7 @@ func TestDebugCommand_DebugDisabled(t *testing.T) {
testrpc.WaitForLeader(t, a.RPC, "dc1")
ui := cli.NewMockUi()
cmd := New(ui, nil)
cmd := New(ui)
cmd.validateTiming = false
outputPath := fmt.Sprintf("%s/debug", testDir)

View File

@ -47,6 +47,7 @@ var registry map[string]Factory
// MakeShutdownCh returns a channel that can be used for shutdown notifications
// for commands. This channel will send a message for every interrupt or SIGTERM
// received.
// Deprecated: use signal.NotifyContext
func MakeShutdownCh() <-chan struct{} {
resultCh := make(chan struct{})
signalCh := make(chan os.Signal, 4)

1
go.mod
View File

@ -92,6 +92,7 @@ require (
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55
google.golang.org/grpc v1.25.1
gopkg.in/square/go-jose.v2 v2.5.1
gotest.tools/v3 v3.0.3
k8s.io/api v0.16.9
k8s.io/apimachinery v0.16.9
k8s.io/client-go v0.16.9

3
go.sum
View File

@ -652,6 +652,7 @@ golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
@ -710,6 +711,8 @@ gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0=
gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

View File

@ -8,13 +8,13 @@ page_title: 'Commands: Debug'
Command: `consul debug`
The `consul debug` command monitors a Consul agent for the specified period of
time, recording information about the agent, cluster, and environment to an archive
time, recording information about the agent, cluster membership, and environment to an archive
written to the current directory.
Providing support for complex issues encountered by Consul operators often
requires a large amount of debugging information to be retrieved. This command
aims to shortcut that coordination and provide a simple workflow for accessing
data about Consul agent, cluster, and environment to enable faster
data about Consul agent, cluster membership, and environment to enable faster
isolation and debugging of issues.
This command requires an `operator:read` ACL token in order to retrieve the
@ -75,7 +75,7 @@ information when `debug` is running. By default, it captures all information.
| --------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `agent` | Version and configuration information about the agent. |
| `host` | Information about resources on the host running the target agent such as CPU, memory, and disk. |
| `cluster` | A list of all the WAN and LAN members in the cluster. |
| `members` | A list of all the WAN and LAN members in the cluster. |
| `metrics` | Metrics from the in-memory metrics endpoint in the target, captured at the interval. |
| `logs` | `DEBUG` level logs for the target agent, captured for the duration. |
| `pprof` | Golang heap, CPU, goroutine, and trace profiling. CPU and traces are captured for `duration` in a single file while heap and goroutine are separate snapshots for each `interval`. This information is not retrieved unless [`enable_debug`](/docs/agent/options#enable_debug) is set to `true` on the target agent or ACLs are enable and an ACL token with `operator:read` is provided. |