Merge pull request #4097 from hashicorp/remove-deprecated

Remove deprecated check/service fields and metric names
This commit is contained in:
Jack Pearkes 2018-05-10 15:45:49 -07:00 committed by GitHub
commit 291e8b83ae
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
36 changed files with 62 additions and 261 deletions

View File

@ -143,11 +143,9 @@ func (m *aclManager) lookupACL(a *Agent, id string) (acl.ACL, error) {
cached = raw.(*aclCacheEntry) cached = raw.(*aclCacheEntry)
} }
if cached != nil && time.Now().Before(cached.Expires) { if cached != nil && time.Now().Before(cached.Expires) {
metrics.IncrCounter([]string{"consul", "acl", "cache_hit"}, 1)
metrics.IncrCounter([]string{"acl", "cache_hit"}, 1) metrics.IncrCounter([]string{"acl", "cache_hit"}, 1)
return cached.ACL, nil return cached.ACL, nil
} }
metrics.IncrCounter([]string{"consul", "acl", "cache_miss"}, 1)
metrics.IncrCounter([]string{"acl", "cache_miss"}, 1) metrics.IncrCounter([]string{"acl", "cache_miss"}, 1)
// At this point we might have a stale cached ACL, or none at all, so // At this point we might have a stale cached ACL, or none at all, so

View File

@ -1823,11 +1823,6 @@ func (a *Agent) AddCheck(check *structs.HealthCheck, chkType *structs.CheckType,
check.CheckID, checks.MinInterval)) check.CheckID, checks.MinInterval))
chkType.Interval = checks.MinInterval chkType.Interval = checks.MinInterval
} }
if chkType.Script != "" {
a.logger.Printf("[WARN] agent: check %q has the 'script' field, which has been deprecated "+
"and replaced with the 'args' field. See https://www.consul.io/docs/agent/checks.html",
check.CheckID)
}
if a.dockerClient == nil { if a.dockerClient == nil {
dc, err := checks.NewDockerClient(os.Getenv("DOCKER_HOST"), checks.BufSize) dc, err := checks.NewDockerClient(os.Getenv("DOCKER_HOST"), checks.BufSize)
@ -1844,7 +1839,6 @@ func (a *Agent) AddCheck(check *structs.HealthCheck, chkType *structs.CheckType,
CheckID: check.CheckID, CheckID: check.CheckID,
DockerContainerID: chkType.DockerContainerID, DockerContainerID: chkType.DockerContainerID,
Shell: chkType.Shell, Shell: chkType.Shell,
Script: chkType.Script,
ScriptArgs: chkType.ScriptArgs, ScriptArgs: chkType.ScriptArgs,
Interval: chkType.Interval, Interval: chkType.Interval,
Logger: a.logger, Logger: a.logger,
@ -1866,16 +1860,10 @@ func (a *Agent) AddCheck(check *structs.HealthCheck, chkType *structs.CheckType,
check.CheckID, checks.MinInterval) check.CheckID, checks.MinInterval)
chkType.Interval = checks.MinInterval chkType.Interval = checks.MinInterval
} }
if chkType.Script != "" {
a.logger.Printf("[WARN] agent: check %q has the 'script' field, which has been deprecated "+
"and replaced with the 'args' field. See https://www.consul.io/docs/agent/checks.html",
check.CheckID)
}
monitor := &checks.CheckMonitor{ monitor := &checks.CheckMonitor{
Notify: a.State, Notify: a.State,
CheckID: check.CheckID, CheckID: check.CheckID,
Script: chkType.Script,
ScriptArgs: chkType.ScriptArgs, ScriptArgs: chkType.ScriptArgs,
Interval: chkType.Interval, Interval: chkType.Interval,
Timeout: chkType.Timeout, Timeout: chkType.Timeout,

View File

@ -716,14 +716,6 @@ func TestAgent_RegisterCheck_Scripts(t *testing.T) {
name string name string
check map[string]interface{} check map[string]interface{}
}{ }{
{
"< Consul 1.0.0",
map[string]interface{}{
"Name": "test",
"Interval": "2s",
"Script": "true",
},
},
{ {
"== Consul 1.0.0", "== Consul 1.0.0",
map[string]interface{}{ map[string]interface{}{

View File

@ -651,7 +651,7 @@ func TestAgent_AddCheck(t *testing.T) {
Status: api.HealthCritical, Status: api.HealthCritical,
} }
chk := &structs.CheckType{ chk := &structs.CheckType{
Script: "exit 0", ScriptArgs: []string{"exit", "0"},
Interval: 15 * time.Second, Interval: 15 * time.Second,
} }
err := a.AddCheck(health, chk, false, "") err := a.AddCheck(health, chk, false, "")
@ -690,7 +690,7 @@ func TestAgent_AddCheck_StartPassing(t *testing.T) {
Status: api.HealthPassing, Status: api.HealthPassing,
} }
chk := &structs.CheckType{ chk := &structs.CheckType{
Script: "exit 0", ScriptArgs: []string{"exit", "0"},
Interval: 15 * time.Second, Interval: 15 * time.Second,
} }
err := a.AddCheck(health, chk, false, "") err := a.AddCheck(health, chk, false, "")
@ -729,7 +729,7 @@ func TestAgent_AddCheck_MinInterval(t *testing.T) {
Status: api.HealthCritical, Status: api.HealthCritical,
} }
chk := &structs.CheckType{ chk := &structs.CheckType{
Script: "exit 0", ScriptArgs: []string{"exit", "0"},
Interval: time.Microsecond, Interval: time.Microsecond,
} }
err := a.AddCheck(health, chk, false, "") err := a.AddCheck(health, chk, false, "")
@ -764,7 +764,7 @@ func TestAgent_AddCheck_MissingService(t *testing.T) {
ServiceID: "baz", ServiceID: "baz",
} }
chk := &structs.CheckType{ chk := &structs.CheckType{
Script: "exit 0", ScriptArgs: []string{"exit", "0"},
Interval: time.Microsecond, Interval: time.Microsecond,
} }
err := a.AddCheck(health, chk, false, "") err := a.AddCheck(health, chk, false, "")
@ -829,7 +829,7 @@ func TestAgent_AddCheck_ExecDisable(t *testing.T) {
Status: api.HealthCritical, Status: api.HealthCritical,
} }
chk := &structs.CheckType{ chk := &structs.CheckType{
Script: "exit 0", ScriptArgs: []string{"exit", "0"},
Interval: 15 * time.Second, Interval: 15 * time.Second,
} }
err := a.AddCheck(health, chk, false, "") err := a.AddCheck(health, chk, false, "")
@ -904,7 +904,7 @@ func TestAgent_RemoveCheck(t *testing.T) {
Status: api.HealthCritical, Status: api.HealthCritical,
} }
chk := &structs.CheckType{ chk := &structs.CheckType{
Script: "exit 0", ScriptArgs: []string{"exit", "0"},
Interval: 15 * time.Second, Interval: 15 * time.Second,
} }
err := a.AddCheck(health, chk, false, "") err := a.AddCheck(health, chk, false, "")
@ -1315,7 +1315,7 @@ func TestAgent_PersistCheck(t *testing.T) {
Status: api.HealthPassing, Status: api.HealthPassing,
} }
chkType := &structs.CheckType{ chkType := &structs.CheckType{
Script: "/bin/true", ScriptArgs: []string{"/bin/true"},
Interval: 10 * time.Second, Interval: 10 * time.Second,
} }
@ -1473,7 +1473,7 @@ func TestAgent_PurgeCheckOnDuplicate(t *testing.T) {
id = "mem" id = "mem"
name = "memory check" name = "memory check"
notes = "my cool notes" notes = "my cool notes"
script = "/bin/check-redis.py" args = ["/bin/check-redis.py"]
interval = "30s" interval = "30s"
} }
`) `)

View File

@ -511,14 +511,6 @@ func (b *Builder) Build() (rt RuntimeConfig, err error) {
} }
} }
// Add a filter rule if needed for enabling the deprecated metric names
enableDeprecatedNames := b.boolVal(c.Telemetry.EnableDeprecatedNames)
if enableDeprecatedNames {
telemetryAllowedPrefixes = append(telemetryAllowedPrefixes, "consul.consul.")
} else {
telemetryBlockedPrefixes = append(telemetryBlockedPrefixes, "consul.consul.")
}
// raft performance scaling // raft performance scaling
performanceRaftMultiplier := b.intVal(c.Performance.RaftMultiplier) performanceRaftMultiplier := b.intVal(c.Performance.RaftMultiplier)
if performanceRaftMultiplier < 1 || uint(performanceRaftMultiplier) > consul.MaxRaftMultiplier { if performanceRaftMultiplier < 1 || uint(performanceRaftMultiplier) > consul.MaxRaftMultiplier {
@ -967,7 +959,6 @@ func (b *Builder) checkVal(v *CheckDefinition) *structs.CheckDefinition {
ServiceID: b.stringVal(v.ServiceID), ServiceID: b.stringVal(v.ServiceID),
Token: b.stringVal(v.Token), Token: b.stringVal(v.Token),
Status: b.stringVal(v.Status), Status: b.stringVal(v.Status),
Script: b.stringVal(v.Script),
ScriptArgs: v.ScriptArgs, ScriptArgs: v.ScriptArgs,
HTTP: b.stringVal(v.HTTP), HTTP: b.stringVal(v.HTTP),
Header: v.Header, Header: v.Header,

View File

@ -94,17 +94,14 @@ func Parse(data string, format string) (c Config, err error) {
// CamelCase and snake_case. Since changing either format would break // CamelCase and snake_case. Since changing either format would break
// existing setups we have to support both and slowly transition to one of // existing setups we have to support both and slowly transition to one of
// the formats. Also, there is at least one case where we use the "wrong" // the formats. Also, there is at least one case where we use the "wrong"
// key and want to map that to the new key to support deprecation // key and want to map that to the new key to support deprecation -
// (`check.id` vs `service.check.CheckID`) See [GH-3179]. TranslateKeys // see [GH-3179]. TranslateKeys maps potentially CamelCased values to the
// maps potentially CamelCased values to the snake_case that is used in the // snake_case that is used in the config file parser. If both the CamelCase
// config file parser. If both the CamelCase and snake_case values are set, // and snake_case values are set the snake_case value is used and the other
// the snake_case value is used and the other value is discarded. // value is discarded.
TranslateKeys(m, map[string]string{ TranslateKeys(m, map[string]string{
"check_id": "id",
"checkid": "id",
"deregistercriticalserviceafter": "deregister_critical_service_after", "deregistercriticalserviceafter": "deregister_critical_service_after",
"dockercontainerid": "docker_container_id", "dockercontainerid": "docker_container_id",
"enabletagoverride": "enable_tag_override",
"scriptargs": "args", "scriptargs": "args",
"serviceid": "service_id", "serviceid": "service_id",
"tlsskipverify": "tls_skip_verify", "tlsskipverify": "tls_skip_verify",
@ -334,7 +331,6 @@ type CheckDefinition struct {
ServiceID *string `json:"service_id,omitempty" hcl:"service_id" mapstructure:"service_id"` ServiceID *string `json:"service_id,omitempty" hcl:"service_id" mapstructure:"service_id"`
Token *string `json:"token,omitempty" hcl:"token" mapstructure:"token"` Token *string `json:"token,omitempty" hcl:"token" mapstructure:"token"`
Status *string `json:"status,omitempty" hcl:"status" mapstructure:"status"` Status *string `json:"status,omitempty" hcl:"status" mapstructure:"status"`
Script *string `json:"script,omitempty" hcl:"script" mapstructure:"script"`
ScriptArgs []string `json:"args,omitempty" hcl:"args" mapstructure:"args"` ScriptArgs []string `json:"args,omitempty" hcl:"args" mapstructure:"args"`
HTTP *string `json:"http,omitempty" hcl:"http" mapstructure:"http"` HTTP *string `json:"http,omitempty" hcl:"http" mapstructure:"http"`
Header map[string][]string `json:"header,omitempty" hcl:"header" mapstructure:"header"` Header map[string][]string `json:"header,omitempty" hcl:"header" mapstructure:"header"`
@ -398,7 +394,6 @@ type Telemetry struct {
PrometheusRetentionTime *string `json:"prometheus_retention_time,omitempty" hcl:"prometheus_retention_time" mapstructure:"prometheus_retention_time"` PrometheusRetentionTime *string `json:"prometheus_retention_time,omitempty" hcl:"prometheus_retention_time" mapstructure:"prometheus_retention_time"`
StatsdAddr *string `json:"statsd_address,omitempty" hcl:"statsd_address" mapstructure:"statsd_address"` StatsdAddr *string `json:"statsd_address,omitempty" hcl:"statsd_address" mapstructure:"statsd_address"`
StatsiteAddr *string `json:"statsite_address,omitempty" hcl:"statsite_address" mapstructure:"statsite_address"` StatsiteAddr *string `json:"statsite_address,omitempty" hcl:"statsite_address" mapstructure:"statsite_address"`
EnableDeprecatedNames *bool `json:"enable_deprecated_names" hcl:"enable_deprecated_names" mapstructure:"enable_deprecated_names"`
} }
type Ports struct { type Ports struct {
@ -410,30 +405,6 @@ type Ports struct {
Server *int `json:"server,omitempty" hcl:"server" mapstructure:"server"` Server *int `json:"server,omitempty" hcl:"server" mapstructure:"server"`
} }
type RetryJoinAzure struct {
ClientID *string `json:"client_id,omitempty" hcl:"client_id" mapstructure:"client_id"`
SecretAccessKey *string `json:"secret_access_key,omitempty" hcl:"secret_access_key" mapstructure:"secret_access_key"`
SubscriptionID *string `json:"subscription_id,omitempty" hcl:"subscription_id" mapstructure:"subscription_id"`
TagName *string `json:"tag_name,omitempty" hcl:"tag_name" mapstructure:"tag_name"`
TagValue *string `json:"tag_value,omitempty" hcl:"tag_value" mapstructure:"tag_value"`
TenantID *string `json:"tenant_id,omitempty" hcl:"tenant_id" mapstructure:"tenant_id"`
}
type RetryJoinEC2 struct {
AccessKeyID *string `json:"access_key_id,omitempty" hcl:"access_key_id" mapstructure:"access_key_id"`
Region *string `json:"region,omitempty" hcl:"region" mapstructure:"region"`
SecretAccessKey *string `json:"secret_access_key,omitempty" hcl:"secret_access_key" mapstructure:"secret_access_key"`
TagKey *string `json:"tag_key,omitempty" hcl:"tag_key" mapstructure:"tag_key"`
TagValue *string `json:"tag_value,omitempty" hcl:"tag_value" mapstructure:"tag_value"`
}
type RetryJoinGCE struct {
CredentialsFile *string `json:"credentials_file,omitempty" hcl:"credentials_file" mapstructure:"credentials_file"`
ProjectName *string `json:"project_name,omitempty" hcl:"project_name" mapstructure:"project_name"`
TagValue *string `json:"tag_value,omitempty" hcl:"tag_value" mapstructure:"tag_value"`
ZonePattern *string `json:"zone_pattern,omitempty" hcl:"zone_pattern" mapstructure:"zone_pattern"`
}
type UnixSocket struct { type UnixSocket struct {
Group *string `json:"group,omitempty" hcl:"group" mapstructure:"group"` Group *string `json:"group,omitempty" hcl:"group" mapstructure:"group"`
Mode *string `json:"mode,omitempty" hcl:"mode" mapstructure:"mode"` Mode *string `json:"mode,omitempty" hcl:"mode" mapstructure:"mode"`

View File

@ -1851,28 +1851,10 @@ func TestConfigFlagsAndEdgecases(t *testing.T) {
patch: func(rt *RuntimeConfig) { patch: func(rt *RuntimeConfig) {
rt.DataDir = dataDir rt.DataDir = dataDir
rt.TelemetryAllowedPrefixes = []string{"foo"} rt.TelemetryAllowedPrefixes = []string{"foo"}
rt.TelemetryBlockedPrefixes = []string{"bar", "consul.consul."} rt.TelemetryBlockedPrefixes = []string{"bar"}
}, },
warns: []string{`Filter rule must begin with either '+' or '-': "nix"`}, warns: []string{`Filter rule must begin with either '+' or '-': "nix"`},
}, },
{
desc: "telemetry.enable_deprecated_names adds allow rule for whitelist",
args: []string{
`-data-dir=` + dataDir,
},
json: []string{`{
"telemetry": { "enable_deprecated_names": true, "filter_default": false }
}`},
hcl: []string{`
telemetry = { enable_deprecated_names = true filter_default = false }
`},
patch: func(rt *RuntimeConfig) {
rt.DataDir = dataDir
rt.TelemetryFilterDefault = false
rt.TelemetryAllowedPrefixes = []string{"consul.consul."}
rt.TelemetryBlockedPrefixes = []string{}
},
},
{ {
desc: "encrypt has invalid key", desc: "encrypt has invalid key",
args: []string{ args: []string{
@ -1923,17 +1905,17 @@ func TestConfigFlagsAndEdgecases(t *testing.T) {
`-data-dir=` + dataDir, `-data-dir=` + dataDir,
}, },
json: []string{ json: []string{
`{ "check": { "name": "a", "script": "/bin/true" } }`, `{ "check": { "name": "a", "args": ["/bin/true"] } }`,
`{ "check": { "name": "b", "script": "/bin/false" } }`, `{ "check": { "name": "b", "args": ["/bin/false"] } }`,
}, },
hcl: []string{ hcl: []string{
`check = { name = "a" script = "/bin/true" }`, `check = { name = "a" args = ["/bin/true"] }`,
`check = { name = "b" script = "/bin/false" }`, `check = { name = "b" args = ["/bin/false"] }`,
}, },
patch: func(rt *RuntimeConfig) { patch: func(rt *RuntimeConfig) {
rt.Checks = []*structs.CheckDefinition{ rt.Checks = []*structs.CheckDefinition{
&structs.CheckDefinition{Name: "a", Script: "/bin/true"}, &structs.CheckDefinition{Name: "a", ScriptArgs: []string{"/bin/true"}},
&structs.CheckDefinition{Name: "b", Script: "/bin/false"}, &structs.CheckDefinition{Name: "b", ScriptArgs: []string{"/bin/false"}},
} }
rt.DataDir = dataDir rt.DataDir = dataDir
}, },
@ -2026,9 +2008,9 @@ func TestConfigFlagsAndEdgecases(t *testing.T) {
"service": { "service": {
"name": "a", "name": "a",
"port": 80, "port": 80,
"EnableTagOverride": true, "enable_tag_override": true,
"check": { "check": {
"CheckID": "x", "id": "x",
"name": "y", "name": "y",
"DockerContainerID": "z", "DockerContainerID": "z",
"DeregisterCriticalServiceAfter": "10s", "DeregisterCriticalServiceAfter": "10s",
@ -2041,9 +2023,9 @@ func TestConfigFlagsAndEdgecases(t *testing.T) {
`service = { `service = {
name = "a" name = "a"
port = 80 port = 80
EnableTagOverride = true enable_tag_override = true
check = { check = {
CheckID = "x" id = "x"
name = "y" name = "y"
DockerContainerID = "z" DockerContainerID = "z"
DeregisterCriticalServiceAfter = "10s" DeregisterCriticalServiceAfter = "10s"
@ -2305,7 +2287,6 @@ func TestFullConfig(t *testing.T) {
"service_id": "L8G0QNmR", "service_id": "L8G0QNmR",
"token": "oo4BCTgJ", "token": "oo4BCTgJ",
"status": "qLykAl5u", "status": "qLykAl5u",
"script": "dhGfIF8n",
"args": ["f3BemRjy", "e5zgpef7"], "args": ["f3BemRjy", "e5zgpef7"],
"http": "29B93haH", "http": "29B93haH",
"header": { "header": {
@ -2330,7 +2311,6 @@ func TestFullConfig(t *testing.T) {
"service_id": "lSulPcyz", "service_id": "lSulPcyz",
"token": "toO59sh8", "token": "toO59sh8",
"status": "9RlWsXMV", "status": "9RlWsXMV",
"script": "8qbd8tWw",
"args": ["4BAJttck", "4D2NPtTQ"], "args": ["4BAJttck", "4D2NPtTQ"],
"http": "dohLcyQ2", "http": "dohLcyQ2",
"header": { "header": {
@ -2354,7 +2334,6 @@ func TestFullConfig(t *testing.T) {
"service_id": "CmUUcRna", "service_id": "CmUUcRna",
"token": "a3nQzHuy", "token": "a3nQzHuy",
"status": "irj26nf3", "status": "irj26nf3",
"script": "FJsI1oXt",
"args": ["9s526ogY", "gSlOHj1w"], "args": ["9s526ogY", "gSlOHj1w"],
"http": "yzhgsQ7Y", "http": "yzhgsQ7Y",
"header": { "header": {
@ -2485,11 +2464,10 @@ func TestFullConfig(t *testing.T) {
"port": 24237, "port": 24237,
"enable_tag_override": true, "enable_tag_override": true,
"check": { "check": {
"check_id": "RMi85Dv8", "id": "RMi85Dv8",
"name": "iehanzuq", "name": "iehanzuq",
"status": "rCvn53TH", "status": "rCvn53TH",
"notes": "fti5lfF3", "notes": "fti5lfF3",
"script": "rtj34nfd",
"args": ["16WRUmwS", "QWk7j7ae"], "args": ["16WRUmwS", "QWk7j7ae"],
"http": "dl3Fgme3", "http": "dl3Fgme3",
"header": { "header": {
@ -2512,7 +2490,6 @@ func TestFullConfig(t *testing.T) {
"name": "sgV4F7Pk", "name": "sgV4F7Pk",
"notes": "yP5nKbW0", "notes": "yP5nKbW0",
"status": "7oLMEyfu", "status": "7oLMEyfu",
"script": "NlUQ3nTE",
"args": ["5wEZtZpv", "0Ihyk8cS"], "args": ["5wEZtZpv", "0Ihyk8cS"],
"http": "KyDjGY9H", "http": "KyDjGY9H",
"header": { "header": {
@ -2534,7 +2511,6 @@ func TestFullConfig(t *testing.T) {
"name": "IEqrzrsd", "name": "IEqrzrsd",
"notes": "SVqApqeM", "notes": "SVqApqeM",
"status": "XXkVoZXt", "status": "XXkVoZXt",
"script": "IXLZTM6E",
"args": ["wD05Bvao", "rLYB7kQC"], "args": ["wD05Bvao", "rLYB7kQC"],
"http": "kyICZsn8", "http": "kyICZsn8",
"header": { "header": {
@ -2563,11 +2539,10 @@ func TestFullConfig(t *testing.T) {
"port": 72219, "port": 72219,
"enable_tag_override": true, "enable_tag_override": true,
"check": { "check": {
"check_id": "qmfeO5if", "id": "qmfeO5if",
"name": "atDGP7n5", "name": "atDGP7n5",
"status": "pDQKEhWL", "status": "pDQKEhWL",
"notes": "Yt8EDLev", "notes": "Yt8EDLev",
"script": "MDu7wjlD",
"args": ["81EDZLPa", "bPY5X8xd"], "args": ["81EDZLPa", "bPY5X8xd"],
"http": "qzHYvmJO", "http": "qzHYvmJO",
"header": { "header": {
@ -2599,7 +2574,6 @@ func TestFullConfig(t *testing.T) {
"name": "9OOS93ne", "name": "9OOS93ne",
"notes": "CQy86DH0", "notes": "CQy86DH0",
"status": "P0SWDvrk", "status": "P0SWDvrk",
"script": "6BhLJ7R9",
"args": ["EXvkYIuG", "BATOyt6h"], "args": ["EXvkYIuG", "BATOyt6h"],
"http": "u97ByEiW", "http": "u97ByEiW",
"header": { "header": {
@ -2621,7 +2595,6 @@ func TestFullConfig(t *testing.T) {
"name": "PQSaPWlT", "name": "PQSaPWlT",
"notes": "jKChDOdl", "notes": "jKChDOdl",
"status": "5qFz6OZn", "status": "5qFz6OZn",
"script": "PbdxFZ3K",
"args": ["NMtYWlT9", "vj74JXsm"], "args": ["NMtYWlT9", "vj74JXsm"],
"http": "1LBDJhw4", "http": "1LBDJhw4",
"header": { "header": {
@ -2669,7 +2642,6 @@ func TestFullConfig(t *testing.T) {
"dogstatsd_tags": [ "3N81zSUB","Xtj8AnXZ" ], "dogstatsd_tags": [ "3N81zSUB","Xtj8AnXZ" ],
"filter_default": true, "filter_default": true,
"prefix_filter": [ "+oJotS8XJ","-cazlEhGn" ], "prefix_filter": [ "+oJotS8XJ","-cazlEhGn" ],
"enable_deprecated_names": true,
"metrics_prefix": "ftO6DySn", "metrics_prefix": "ftO6DySn",
"prometheus_retention_time": "15s", "prometheus_retention_time": "15s",
"statsd_address": "drce87cy", "statsd_address": "drce87cy",
@ -2746,7 +2718,6 @@ func TestFullConfig(t *testing.T) {
service_id = "L8G0QNmR" service_id = "L8G0QNmR"
token = "oo4BCTgJ" token = "oo4BCTgJ"
status = "qLykAl5u" status = "qLykAl5u"
script = "dhGfIF8n"
args = ["f3BemRjy", "e5zgpef7"] args = ["f3BemRjy", "e5zgpef7"]
http = "29B93haH" http = "29B93haH"
header = { header = {
@ -2771,7 +2742,6 @@ func TestFullConfig(t *testing.T) {
service_id = "lSulPcyz" service_id = "lSulPcyz"
token = "toO59sh8" token = "toO59sh8"
status = "9RlWsXMV" status = "9RlWsXMV"
script = "8qbd8tWw"
args = ["4BAJttck", "4D2NPtTQ"] args = ["4BAJttck", "4D2NPtTQ"]
http = "dohLcyQ2" http = "dohLcyQ2"
header = { header = {
@ -2795,7 +2765,6 @@ func TestFullConfig(t *testing.T) {
service_id = "CmUUcRna" service_id = "CmUUcRna"
token = "a3nQzHuy" token = "a3nQzHuy"
status = "irj26nf3" status = "irj26nf3"
script = "FJsI1oXt"
args = ["9s526ogY", "gSlOHj1w"] args = ["9s526ogY", "gSlOHj1w"]
http = "yzhgsQ7Y" http = "yzhgsQ7Y"
header = { header = {
@ -2926,11 +2895,10 @@ func TestFullConfig(t *testing.T) {
port = 24237 port = 24237
enable_tag_override = true enable_tag_override = true
check = { check = {
check_id = "RMi85Dv8" id = "RMi85Dv8"
name = "iehanzuq" name = "iehanzuq"
status = "rCvn53TH" status = "rCvn53TH"
notes = "fti5lfF3" notes = "fti5lfF3"
script = "rtj34nfd"
args = ["16WRUmwS", "QWk7j7ae"] args = ["16WRUmwS", "QWk7j7ae"]
http = "dl3Fgme3" http = "dl3Fgme3"
header = { header = {
@ -2953,7 +2921,6 @@ func TestFullConfig(t *testing.T) {
name = "sgV4F7Pk" name = "sgV4F7Pk"
notes = "yP5nKbW0" notes = "yP5nKbW0"
status = "7oLMEyfu" status = "7oLMEyfu"
script = "NlUQ3nTE"
args = ["5wEZtZpv", "0Ihyk8cS"] args = ["5wEZtZpv", "0Ihyk8cS"]
http = "KyDjGY9H" http = "KyDjGY9H"
header = { header = {
@ -2975,7 +2942,6 @@ func TestFullConfig(t *testing.T) {
name = "IEqrzrsd" name = "IEqrzrsd"
notes = "SVqApqeM" notes = "SVqApqeM"
status = "XXkVoZXt" status = "XXkVoZXt"
script = "IXLZTM6E"
args = ["wD05Bvao", "rLYB7kQC"] args = ["wD05Bvao", "rLYB7kQC"]
http = "kyICZsn8" http = "kyICZsn8"
header = { header = {
@ -3004,11 +2970,10 @@ func TestFullConfig(t *testing.T) {
port = 72219 port = 72219
enable_tag_override = true enable_tag_override = true
check = { check = {
check_id = "qmfeO5if" id = "qmfeO5if"
name = "atDGP7n5" name = "atDGP7n5"
status = "pDQKEhWL" status = "pDQKEhWL"
notes = "Yt8EDLev" notes = "Yt8EDLev"
script = "MDu7wjlD"
args = ["81EDZLPa", "bPY5X8xd"] args = ["81EDZLPa", "bPY5X8xd"]
http = "qzHYvmJO" http = "qzHYvmJO"
header = { header = {
@ -3040,7 +3005,6 @@ func TestFullConfig(t *testing.T) {
name = "9OOS93ne" name = "9OOS93ne"
notes = "CQy86DH0" notes = "CQy86DH0"
status = "P0SWDvrk" status = "P0SWDvrk"
script = "6BhLJ7R9"
args = ["EXvkYIuG", "BATOyt6h"] args = ["EXvkYIuG", "BATOyt6h"]
http = "u97ByEiW" http = "u97ByEiW"
header = { header = {
@ -3062,7 +3026,6 @@ func TestFullConfig(t *testing.T) {
name = "PQSaPWlT" name = "PQSaPWlT"
notes = "jKChDOdl" notes = "jKChDOdl"
status = "5qFz6OZn" status = "5qFz6OZn"
script = "PbdxFZ3K"
args = ["NMtYWlT9", "vj74JXsm"] args = ["NMtYWlT9", "vj74JXsm"]
http = "1LBDJhw4" http = "1LBDJhw4"
header = { header = {
@ -3110,7 +3073,6 @@ func TestFullConfig(t *testing.T) {
dogstatsd_tags = [ "3N81zSUB","Xtj8AnXZ" ] dogstatsd_tags = [ "3N81zSUB","Xtj8AnXZ" ]
filter_default = true filter_default = true
prefix_filter = [ "+oJotS8XJ","-cazlEhGn" ] prefix_filter = [ "+oJotS8XJ","-cazlEhGn" ]
enable_deprecated_names = true
metrics_prefix = "ftO6DySn" metrics_prefix = "ftO6DySn"
prometheus_retention_time = "15s" prometheus_retention_time = "15s"
statsd_address = "drce87cy" statsd_address = "drce87cy"
@ -3326,7 +3288,6 @@ func TestFullConfig(t *testing.T) {
ServiceID: "lSulPcyz", ServiceID: "lSulPcyz",
Token: "toO59sh8", Token: "toO59sh8",
Status: "9RlWsXMV", Status: "9RlWsXMV",
Script: "8qbd8tWw",
ScriptArgs: []string{"4BAJttck", "4D2NPtTQ"}, ScriptArgs: []string{"4BAJttck", "4D2NPtTQ"},
HTTP: "dohLcyQ2", HTTP: "dohLcyQ2",
Header: map[string][]string{ Header: map[string][]string{
@ -3350,7 +3311,6 @@ func TestFullConfig(t *testing.T) {
ServiceID: "CmUUcRna", ServiceID: "CmUUcRna",
Token: "a3nQzHuy", Token: "a3nQzHuy",
Status: "irj26nf3", Status: "irj26nf3",
Script: "FJsI1oXt",
ScriptArgs: []string{"9s526ogY", "gSlOHj1w"}, ScriptArgs: []string{"9s526ogY", "gSlOHj1w"},
HTTP: "yzhgsQ7Y", HTTP: "yzhgsQ7Y",
Header: map[string][]string{ Header: map[string][]string{
@ -3374,7 +3334,6 @@ func TestFullConfig(t *testing.T) {
ServiceID: "L8G0QNmR", ServiceID: "L8G0QNmR",
Token: "oo4BCTgJ", Token: "oo4BCTgJ",
Status: "qLykAl5u", Status: "qLykAl5u",
Script: "dhGfIF8n",
ScriptArgs: []string{"f3BemRjy", "e5zgpef7"}, ScriptArgs: []string{"f3BemRjy", "e5zgpef7"},
HTTP: "29B93haH", HTTP: "29B93haH",
Header: map[string][]string{ Header: map[string][]string{
@ -3494,7 +3453,6 @@ func TestFullConfig(t *testing.T) {
Name: "atDGP7n5", Name: "atDGP7n5",
Status: "pDQKEhWL", Status: "pDQKEhWL",
Notes: "Yt8EDLev", Notes: "Yt8EDLev",
Script: "MDu7wjlD",
ScriptArgs: []string{"81EDZLPa", "bPY5X8xd"}, ScriptArgs: []string{"81EDZLPa", "bPY5X8xd"},
HTTP: "qzHYvmJO", HTTP: "qzHYvmJO",
Header: map[string][]string{ Header: map[string][]string{
@ -3527,7 +3485,6 @@ func TestFullConfig(t *testing.T) {
Name: "9OOS93ne", Name: "9OOS93ne",
Notes: "CQy86DH0", Notes: "CQy86DH0",
Status: "P0SWDvrk", Status: "P0SWDvrk",
Script: "6BhLJ7R9",
ScriptArgs: []string{"EXvkYIuG", "BATOyt6h"}, ScriptArgs: []string{"EXvkYIuG", "BATOyt6h"},
HTTP: "u97ByEiW", HTTP: "u97ByEiW",
Header: map[string][]string{ Header: map[string][]string{
@ -3549,7 +3506,6 @@ func TestFullConfig(t *testing.T) {
Name: "PQSaPWlT", Name: "PQSaPWlT",
Notes: "jKChDOdl", Notes: "jKChDOdl",
Status: "5qFz6OZn", Status: "5qFz6OZn",
Script: "PbdxFZ3K",
ScriptArgs: []string{"NMtYWlT9", "vj74JXsm"}, ScriptArgs: []string{"NMtYWlT9", "vj74JXsm"},
HTTP: "1LBDJhw4", HTTP: "1LBDJhw4",
Header: map[string][]string{ Header: map[string][]string{
@ -3583,7 +3539,6 @@ func TestFullConfig(t *testing.T) {
Name: "sgV4F7Pk", Name: "sgV4F7Pk",
Notes: "yP5nKbW0", Notes: "yP5nKbW0",
Status: "7oLMEyfu", Status: "7oLMEyfu",
Script: "NlUQ3nTE",
ScriptArgs: []string{"5wEZtZpv", "0Ihyk8cS"}, ScriptArgs: []string{"5wEZtZpv", "0Ihyk8cS"},
HTTP: "KyDjGY9H", HTTP: "KyDjGY9H",
Header: map[string][]string{ Header: map[string][]string{
@ -3605,7 +3560,6 @@ func TestFullConfig(t *testing.T) {
Name: "IEqrzrsd", Name: "IEqrzrsd",
Notes: "SVqApqeM", Notes: "SVqApqeM",
Status: "XXkVoZXt", Status: "XXkVoZXt",
Script: "IXLZTM6E",
ScriptArgs: []string{"wD05Bvao", "rLYB7kQC"}, ScriptArgs: []string{"wD05Bvao", "rLYB7kQC"},
HTTP: "kyICZsn8", HTTP: "kyICZsn8",
Header: map[string][]string{ Header: map[string][]string{
@ -3627,7 +3581,6 @@ func TestFullConfig(t *testing.T) {
Name: "iehanzuq", Name: "iehanzuq",
Status: "rCvn53TH", Status: "rCvn53TH",
Notes: "fti5lfF3", Notes: "fti5lfF3",
Script: "rtj34nfd",
ScriptArgs: []string{"16WRUmwS", "QWk7j7ae"}, ScriptArgs: []string{"16WRUmwS", "QWk7j7ae"},
HTTP: "dl3Fgme3", HTTP: "dl3Fgme3",
Header: map[string][]string{ Header: map[string][]string{
@ -3673,7 +3626,7 @@ func TestFullConfig(t *testing.T) {
TelemetryDogstatsdAddr: "0wSndumK", TelemetryDogstatsdAddr: "0wSndumK",
TelemetryDogstatsdTags: []string{"3N81zSUB", "Xtj8AnXZ"}, TelemetryDogstatsdTags: []string{"3N81zSUB", "Xtj8AnXZ"},
TelemetryFilterDefault: true, TelemetryFilterDefault: true,
TelemetryAllowedPrefixes: []string{"oJotS8XJ", "consul.consul."}, TelemetryAllowedPrefixes: []string{"oJotS8XJ"},
TelemetryBlockedPrefixes: []string{"cazlEhGn"}, TelemetryBlockedPrefixes: []string{"cazlEhGn"},
TelemetryMetricsPrefix: "ftO6DySn", TelemetryMetricsPrefix: "ftO6DySn",
TelemetryPrometheusRetentionTime: 15 * time.Second, TelemetryPrometheusRetentionTime: 15 * time.Second,
@ -4047,7 +4000,6 @@ func TestSanitize(t *testing.T) {
"Method": "", "Method": "",
"Name": "zoo", "Name": "zoo",
"Notes": "", "Notes": "",
"Script": "",
"ScriptArgs": [], "ScriptArgs": [],
"ServiceID": "", "ServiceID": "",
"Shell": "", "Shell": "",
@ -4179,7 +4131,6 @@ func TestSanitize(t *testing.T) {
"Method": "", "Method": "",
"Name": "blurb", "Name": "blurb",
"Notes": "", "Notes": "",
"Script": "",
"ScriptArgs": [], "ScriptArgs": [],
"Shell": "", "Shell": "",
"Status": "", "Status": "",

View File

@ -41,7 +41,6 @@ type aclCacheEntry struct {
// assumes its running in the ACL datacenter, or in a non-ACL datacenter when // assumes its running in the ACL datacenter, or in a non-ACL datacenter when
// using its replicated ACLs during an outage. // using its replicated ACLs during an outage.
func (s *Server) aclLocalFault(id string) (string, string, error) { func (s *Server) aclLocalFault(id string) (string, string, error) {
defer metrics.MeasureSince([]string{"consul", "acl", "fault"}, time.Now())
defer metrics.MeasureSince([]string{"acl", "fault"}, time.Now()) defer metrics.MeasureSince([]string{"acl", "fault"}, time.Now())
// Query the state store. // Query the state store.
@ -75,7 +74,6 @@ func (s *Server) resolveToken(id string) (acl.ACL, error) {
if len(authDC) == 0 { if len(authDC) == 0 {
return nil, nil return nil, nil
} }
defer metrics.MeasureSince([]string{"consul", "acl", "resolveToken"}, time.Now())
defer metrics.MeasureSince([]string{"acl", "resolveToken"}, time.Now()) defer metrics.MeasureSince([]string{"acl", "resolveToken"}, time.Now())
// Handle the anonymous token // Handle the anonymous token
@ -159,11 +157,9 @@ func (c *aclCache) lookupACL(id, authDC string) (acl.ACL, error) {
// Check for live cache. // Check for live cache.
if cached != nil && time.Now().Before(cached.Expires) { if cached != nil && time.Now().Before(cached.Expires) {
metrics.IncrCounter([]string{"consul", "acl", "cache_hit"}, 1)
metrics.IncrCounter([]string{"acl", "cache_hit"}, 1) metrics.IncrCounter([]string{"acl", "cache_hit"}, 1)
return cached.ACL, nil return cached.ACL, nil
} }
metrics.IncrCounter([]string{"consul", "acl", "cache_miss"}, 1)
metrics.IncrCounter([]string{"acl", "cache_miss"}, 1) metrics.IncrCounter([]string{"acl", "cache_miss"}, 1)
// Attempt to refresh the policy from the ACL datacenter via an RPC. // Attempt to refresh the policy from the ACL datacenter via an RPC.
@ -226,7 +222,6 @@ func (c *aclCache) lookupACL(id, authDC string) (acl.ACL, error) {
// Fake up an ACL datacenter reply and inject it into the cache. // Fake up an ACL datacenter reply and inject it into the cache.
// Note we use the local TTL here, so this'll be used for that // Note we use the local TTL here, so this'll be used for that
// amount of time even once the ACL datacenter becomes available. // amount of time even once the ACL datacenter becomes available.
metrics.IncrCounter([]string{"consul", "acl", "replication_hit"}, 1)
metrics.IncrCounter([]string{"acl", "replication_hit"}, 1) metrics.IncrCounter([]string{"acl", "replication_hit"}, 1)
reply.ETag = makeACLETag(parent, policy) reply.ETag = makeACLETag(parent, policy)
reply.TTL = c.config.ACLTTL reply.TTL = c.config.ACLTTL

View File

@ -145,7 +145,6 @@ func (a *ACL) Apply(args *structs.ACLRequest, reply *string) error {
if done, err := a.srv.forward("ACL.Apply", args, args, reply); done { if done, err := a.srv.forward("ACL.Apply", args, args, reply); done {
return err return err
} }
defer metrics.MeasureSince([]string{"consul", "acl", "apply"}, time.Now())
defer metrics.MeasureSince([]string{"acl", "apply"}, time.Now()) defer metrics.MeasureSince([]string{"acl", "apply"}, time.Now())
// Verify we are allowed to serve this request // Verify we are allowed to serve this request

View File

@ -149,7 +149,6 @@ func (s *Server) fetchLocalACLs() (structs.ACLs, error) {
// datacenter. The lastIndex parameter is a hint about which remote index we // datacenter. The lastIndex parameter is a hint about which remote index we
// have replicated to, so this is expected to block until something changes. // have replicated to, so this is expected to block until something changes.
func (s *Server) fetchRemoteACLs(lastRemoteIndex uint64) (*structs.IndexedACLs, error) { func (s *Server) fetchRemoteACLs(lastRemoteIndex uint64) (*structs.IndexedACLs, error) {
defer metrics.MeasureSince([]string{"consul", "leader", "fetchRemoteACLs"}, time.Now())
defer metrics.MeasureSince([]string{"leader", "fetchRemoteACLs"}, time.Now()) defer metrics.MeasureSince([]string{"leader", "fetchRemoteACLs"}, time.Now())
args := structs.DCSpecificRequest{ args := structs.DCSpecificRequest{
@ -170,7 +169,6 @@ func (s *Server) fetchRemoteACLs(lastRemoteIndex uint64) (*structs.IndexedACLs,
// UpdateLocalACLs is given a list of changes to apply in order to bring the // UpdateLocalACLs is given a list of changes to apply in order to bring the
// local ACLs in-line with the remote ACLs from the ACL datacenter. // local ACLs in-line with the remote ACLs from the ACL datacenter.
func (s *Server) updateLocalACLs(changes structs.ACLRequests) error { func (s *Server) updateLocalACLs(changes structs.ACLRequests) error {
defer metrics.MeasureSince([]string{"consul", "leader", "updateLocalACLs"}, time.Now())
defer metrics.MeasureSince([]string{"leader", "updateLocalACLs"}, time.Now()) defer metrics.MeasureSince([]string{"leader", "updateLocalACLs"}, time.Now())
minTimePerOp := time.Second / time.Duration(s.config.ACLReplicationApplyLimit) minTimePerOp := time.Second / time.Duration(s.config.ACLReplicationApplyLimit)
@ -218,7 +216,6 @@ func (s *Server) replicateACLs(lastRemoteIndex uint64) (uint64, error) {
// Measure everything after the remote query, which can block for long // Measure everything after the remote query, which can block for long
// periods of time. This metric is a good measure of how expensive the // periods of time. This metric is a good measure of how expensive the
// replication process is. // replication process is.
defer metrics.MeasureSince([]string{"consul", "leader", "replicateACLs"}, time.Now())
defer metrics.MeasureSince([]string{"leader", "replicateACLs"}, time.Now()) defer metrics.MeasureSince([]string{"leader", "replicateACLs"}, time.Now())
local, err := s.fetchLocalACLs() local, err := s.fetchLocalACLs()

View File

@ -55,13 +55,10 @@ func (d *AutopilotDelegate) IsServer(m serf.Member) (*autopilot.ServerInfo, erro
// Heartbeat a metric for monitoring if we're the leader // Heartbeat a metric for monitoring if we're the leader
func (d *AutopilotDelegate) NotifyHealth(health autopilot.OperatorHealthReply) { func (d *AutopilotDelegate) NotifyHealth(health autopilot.OperatorHealthReply) {
if d.server.raft.State() == raft.Leader { if d.server.raft.State() == raft.Leader {
metrics.SetGauge([]string{"consul", "autopilot", "failure_tolerance"}, float32(health.FailureTolerance))
metrics.SetGauge([]string{"autopilot", "failure_tolerance"}, float32(health.FailureTolerance)) metrics.SetGauge([]string{"autopilot", "failure_tolerance"}, float32(health.FailureTolerance))
if health.Healthy { if health.Healthy {
metrics.SetGauge([]string{"consul", "autopilot", "healthy"}, 1)
metrics.SetGauge([]string{"autopilot", "healthy"}, 1) metrics.SetGauge([]string{"autopilot", "healthy"}, 1)
} else { } else {
metrics.SetGauge([]string{"consul", "autopilot", "healthy"}, 0)
metrics.SetGauge([]string{"autopilot", "healthy"}, 0) metrics.SetGauge([]string{"autopilot", "healthy"}, 0)
} }
} }

View File

@ -24,7 +24,6 @@ func (c *Catalog) Register(args *structs.RegisterRequest, reply *struct{}) error
if done, err := c.srv.forward("Catalog.Register", args, args, reply); done { if done, err := c.srv.forward("Catalog.Register", args, args, reply); done {
return err return err
} }
defer metrics.MeasureSince([]string{"consul", "catalog", "register"}, time.Now())
defer metrics.MeasureSince([]string{"catalog", "register"}, time.Now()) defer metrics.MeasureSince([]string{"catalog", "register"}, time.Now())
// Verify the args. // Verify the args.
@ -117,7 +116,6 @@ func (c *Catalog) Deregister(args *structs.DeregisterRequest, reply *struct{}) e
if done, err := c.srv.forward("Catalog.Deregister", args, args, reply); done { if done, err := c.srv.forward("Catalog.Deregister", args, args, reply); done {
return err return err
} }
defer metrics.MeasureSince([]string{"consul", "catalog", "deregister"}, time.Now())
defer metrics.MeasureSince([]string{"catalog", "deregister"}, time.Now()) defer metrics.MeasureSince([]string{"catalog", "deregister"}, time.Now())
// Verify the args // Verify the args
@ -279,19 +277,13 @@ func (c *Catalog) ServiceNodes(args *structs.ServiceSpecificRequest, reply *stru
// Provide some metrics // Provide some metrics
if err == nil { if err == nil {
metrics.IncrCounterWithLabels([]string{"consul", "catalog", "service", "query"}, 1,
[]metrics.Label{{Name: "service", Value: args.ServiceName}})
metrics.IncrCounterWithLabels([]string{"catalog", "service", "query"}, 1, metrics.IncrCounterWithLabels([]string{"catalog", "service", "query"}, 1,
[]metrics.Label{{Name: "service", Value: args.ServiceName}}) []metrics.Label{{Name: "service", Value: args.ServiceName}})
if args.ServiceTag != "" { if args.ServiceTag != "" {
metrics.IncrCounterWithLabels([]string{"consul", "catalog", "service", "query-tag"}, 1,
[]metrics.Label{{Name: "service", Value: args.ServiceName}, {Name: "tag", Value: args.ServiceTag}})
metrics.IncrCounterWithLabels([]string{"catalog", "service", "query-tag"}, 1, metrics.IncrCounterWithLabels([]string{"catalog", "service", "query-tag"}, 1,
[]metrics.Label{{Name: "service", Value: args.ServiceName}, {Name: "tag", Value: args.ServiceTag}}) []metrics.Label{{Name: "service", Value: args.ServiceName}, {Name: "tag", Value: args.ServiceTag}})
} }
if len(reply.ServiceNodes) == 0 { if len(reply.ServiceNodes) == 0 {
metrics.IncrCounterWithLabels([]string{"consul", "catalog", "service", "not-found"}, 1,
[]metrics.Label{{Name: "service", Value: args.ServiceName}})
metrics.IncrCounterWithLabels([]string{"catalog", "service", "not-found"}, 1, metrics.IncrCounterWithLabels([]string{"catalog", "service", "not-found"}, 1,
[]metrics.Label{{Name: "service", Value: args.ServiceName}}) []metrics.Label{{Name: "service", Value: args.ServiceName}})
} }

View File

@ -249,10 +249,8 @@ TRY:
} }
// Enforce the RPC limit. // Enforce the RPC limit.
metrics.IncrCounter([]string{"consul", "client", "rpc"}, 1)
metrics.IncrCounter([]string{"client", "rpc"}, 1) metrics.IncrCounter([]string{"client", "rpc"}, 1)
if !c.rpcLimiter.Allow() { if !c.rpcLimiter.Allow() {
metrics.IncrCounter([]string{"consul", "client", "rpc", "exceeded"}, 1)
metrics.IncrCounter([]string{"client", "rpc", "exceeded"}, 1) metrics.IncrCounter([]string{"client", "rpc", "exceeded"}, 1)
return structs.ErrRPCRateExceeded return structs.ErrRPCRateExceeded
} }
@ -293,10 +291,8 @@ func (c *Client) SnapshotRPC(args *structs.SnapshotRequest, in io.Reader, out io
} }
// Enforce the RPC limit. // Enforce the RPC limit.
metrics.IncrCounter([]string{"consul", "client", "rpc"}, 1)
metrics.IncrCounter([]string{"client", "rpc"}, 1) metrics.IncrCounter([]string{"client", "rpc"}, 1)
if !c.rpcLimiter.Allow() { if !c.rpcLimiter.Allow() {
metrics.IncrCounter([]string{"consul", "client", "rpc", "exceeded"}, 1)
metrics.IncrCounter([]string{"client", "rpc", "exceeded"}, 1) metrics.IncrCounter([]string{"client", "rpc", "exceeded"}, 1)
return structs.ErrRPCRateExceeded return structs.ErrRPCRateExceeded
} }

View File

@ -23,7 +23,6 @@ func init() {
} }
func (c *FSM) applyRegister(buf []byte, index uint64) interface{} { func (c *FSM) applyRegister(buf []byte, index uint64) interface{} {
defer metrics.MeasureSince([]string{"consul", "fsm", "register"}, time.Now())
defer metrics.MeasureSince([]string{"fsm", "register"}, time.Now()) defer metrics.MeasureSince([]string{"fsm", "register"}, time.Now())
var req structs.RegisterRequest var req structs.RegisterRequest
if err := structs.Decode(buf, &req); err != nil { if err := structs.Decode(buf, &req); err != nil {
@ -39,7 +38,6 @@ func (c *FSM) applyRegister(buf []byte, index uint64) interface{} {
} }
func (c *FSM) applyDeregister(buf []byte, index uint64) interface{} { func (c *FSM) applyDeregister(buf []byte, index uint64) interface{} {
defer metrics.MeasureSince([]string{"consul", "fsm", "deregister"}, time.Now())
defer metrics.MeasureSince([]string{"fsm", "deregister"}, time.Now()) defer metrics.MeasureSince([]string{"fsm", "deregister"}, time.Now())
var req structs.DeregisterRequest var req structs.DeregisterRequest
if err := structs.Decode(buf, &req); err != nil { if err := structs.Decode(buf, &req); err != nil {
@ -73,8 +71,6 @@ func (c *FSM) applyKVSOperation(buf []byte, index uint64) interface{} {
if err := structs.Decode(buf, &req); err != nil { if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err)) panic(fmt.Errorf("failed to decode request: %v", err))
} }
defer metrics.MeasureSinceWithLabels([]string{"consul", "fsm", "kvs"}, time.Now(),
[]metrics.Label{{Name: "op", Value: string(req.Op)}})
defer metrics.MeasureSinceWithLabels([]string{"fsm", "kvs"}, time.Now(), defer metrics.MeasureSinceWithLabels([]string{"fsm", "kvs"}, time.Now(),
[]metrics.Label{{Name: "op", Value: string(req.Op)}}) []metrics.Label{{Name: "op", Value: string(req.Op)}})
switch req.Op { switch req.Op {
@ -120,8 +116,6 @@ func (c *FSM) applySessionOperation(buf []byte, index uint64) interface{} {
if err := structs.Decode(buf, &req); err != nil { if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err)) panic(fmt.Errorf("failed to decode request: %v", err))
} }
defer metrics.MeasureSinceWithLabels([]string{"consul", "fsm", "session"}, time.Now(),
[]metrics.Label{{Name: "op", Value: string(req.Op)}})
defer metrics.MeasureSinceWithLabels([]string{"fsm", "session"}, time.Now(), defer metrics.MeasureSinceWithLabels([]string{"fsm", "session"}, time.Now(),
[]metrics.Label{{Name: "op", Value: string(req.Op)}}) []metrics.Label{{Name: "op", Value: string(req.Op)}})
switch req.Op { switch req.Op {
@ -143,8 +137,6 @@ func (c *FSM) applyACLOperation(buf []byte, index uint64) interface{} {
if err := structs.Decode(buf, &req); err != nil { if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err)) panic(fmt.Errorf("failed to decode request: %v", err))
} }
defer metrics.MeasureSinceWithLabels([]string{"consul", "fsm", "acl"}, time.Now(),
[]metrics.Label{{Name: "op", Value: string(req.Op)}})
defer metrics.MeasureSinceWithLabels([]string{"fsm", "acl"}, time.Now(), defer metrics.MeasureSinceWithLabels([]string{"fsm", "acl"}, time.Now(),
[]metrics.Label{{Name: "op", Value: string(req.Op)}}) []metrics.Label{{Name: "op", Value: string(req.Op)}})
switch req.Op { switch req.Op {
@ -177,8 +169,6 @@ func (c *FSM) applyTombstoneOperation(buf []byte, index uint64) interface{} {
if err := structs.Decode(buf, &req); err != nil { if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err)) panic(fmt.Errorf("failed to decode request: %v", err))
} }
defer metrics.MeasureSinceWithLabels([]string{"consul", "fsm", "tombstone"}, time.Now(),
[]metrics.Label{{Name: "op", Value: string(req.Op)}})
defer metrics.MeasureSinceWithLabels([]string{"fsm", "tombstone"}, time.Now(), defer metrics.MeasureSinceWithLabels([]string{"fsm", "tombstone"}, time.Now(),
[]metrics.Label{{Name: "op", Value: string(req.Op)}}) []metrics.Label{{Name: "op", Value: string(req.Op)}})
switch req.Op { switch req.Op {
@ -199,7 +189,6 @@ func (c *FSM) applyCoordinateBatchUpdate(buf []byte, index uint64) interface{} {
if err := structs.Decode(buf, &updates); err != nil { if err := structs.Decode(buf, &updates); err != nil {
panic(fmt.Errorf("failed to decode batch updates: %v", err)) panic(fmt.Errorf("failed to decode batch updates: %v", err))
} }
defer metrics.MeasureSince([]string{"consul", "fsm", "coordinate", "batch-update"}, time.Now())
defer metrics.MeasureSince([]string{"fsm", "coordinate", "batch-update"}, time.Now()) defer metrics.MeasureSince([]string{"fsm", "coordinate", "batch-update"}, time.Now())
if err := c.state.CoordinateBatchUpdate(index, updates); err != nil { if err := c.state.CoordinateBatchUpdate(index, updates); err != nil {
return err return err
@ -215,8 +204,6 @@ func (c *FSM) applyPreparedQueryOperation(buf []byte, index uint64) interface{}
panic(fmt.Errorf("failed to decode request: %v", err)) panic(fmt.Errorf("failed to decode request: %v", err))
} }
defer metrics.MeasureSinceWithLabels([]string{"consul", "fsm", "prepared-query"}, time.Now(),
[]metrics.Label{{Name: "op", Value: string(req.Op)}})
defer metrics.MeasureSinceWithLabels([]string{"fsm", "prepared-query"}, time.Now(), defer metrics.MeasureSinceWithLabels([]string{"fsm", "prepared-query"}, time.Now(),
[]metrics.Label{{Name: "op", Value: string(req.Op)}}) []metrics.Label{{Name: "op", Value: string(req.Op)}})
switch req.Op { switch req.Op {
@ -235,7 +222,6 @@ func (c *FSM) applyTxn(buf []byte, index uint64) interface{} {
if err := structs.Decode(buf, &req); err != nil { if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err)) panic(fmt.Errorf("failed to decode request: %v", err))
} }
defer metrics.MeasureSince([]string{"consul", "fsm", "txn"}, time.Now())
defer metrics.MeasureSince([]string{"fsm", "txn"}, time.Now()) defer metrics.MeasureSince([]string{"fsm", "txn"}, time.Now())
results, errors := c.state.TxnRW(index, req.Ops) results, errors := c.state.TxnRW(index, req.Ops)
return structs.TxnResponse{ return structs.TxnResponse{
@ -249,7 +235,6 @@ func (c *FSM) applyAutopilotUpdate(buf []byte, index uint64) interface{} {
if err := structs.Decode(buf, &req); err != nil { if err := structs.Decode(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode request: %v", err)) panic(fmt.Errorf("failed to decode request: %v", err))
} }
defer metrics.MeasureSince([]string{"consul", "fsm", "autopilot"}, time.Now())
defer metrics.MeasureSince([]string{"fsm", "autopilot"}, time.Now()) defer metrics.MeasureSince([]string{"fsm", "autopilot"}, time.Now())
if req.CAS { if req.CAS {

View File

@ -57,7 +57,6 @@ func registerRestorer(msg structs.MessageType, fn restorer) {
// Persist saves the FSM snapshot out to the given sink. // Persist saves the FSM snapshot out to the given sink.
func (s *snapshot) Persist(sink raft.SnapshotSink) error { func (s *snapshot) Persist(sink raft.SnapshotSink) error {
defer metrics.MeasureSince([]string{"consul", "fsm", "persist"}, time.Now())
defer metrics.MeasureSince([]string{"fsm", "persist"}, time.Now()) defer metrics.MeasureSince([]string{"fsm", "persist"}, time.Now())
// Write the header // Write the header

View File

@ -139,19 +139,13 @@ func (h *Health) ServiceNodes(args *structs.ServiceSpecificRequest, reply *struc
// Provide some metrics // Provide some metrics
if err == nil { if err == nil {
metrics.IncrCounterWithLabels([]string{"consul", "health", "service", "query"}, 1,
[]metrics.Label{{Name: "service", Value: args.ServiceName}})
metrics.IncrCounterWithLabels([]string{"health", "service", "query"}, 1, metrics.IncrCounterWithLabels([]string{"health", "service", "query"}, 1,
[]metrics.Label{{Name: "service", Value: args.ServiceName}}) []metrics.Label{{Name: "service", Value: args.ServiceName}})
if args.ServiceTag != "" { if args.ServiceTag != "" {
metrics.IncrCounterWithLabels([]string{"consul", "health", "service", "query-tag"}, 1,
[]metrics.Label{{Name: "service", Value: args.ServiceName}, {Name: "tag", Value: args.ServiceTag}})
metrics.IncrCounterWithLabels([]string{"health", "service", "query-tag"}, 1, metrics.IncrCounterWithLabels([]string{"health", "service", "query-tag"}, 1,
[]metrics.Label{{Name: "service", Value: args.ServiceName}, {Name: "tag", Value: args.ServiceTag}}) []metrics.Label{{Name: "service", Value: args.ServiceName}, {Name: "tag", Value: args.ServiceTag}})
} }
if len(reply.Nodes) == 0 { if len(reply.Nodes) == 0 {
metrics.IncrCounterWithLabels([]string{"consul", "health", "service", "not-found"}, 1,
[]metrics.Label{{Name: "service", Value: args.ServiceName}})
metrics.IncrCounterWithLabels([]string{"health", "service", "not-found"}, 1, metrics.IncrCounterWithLabels([]string{"health", "service", "not-found"}, 1,
[]metrics.Label{{Name: "service", Value: args.ServiceName}}) []metrics.Label{{Name: "service", Value: args.ServiceName}})
} }

View File

@ -81,7 +81,6 @@ func (k *KVS) Apply(args *structs.KVSRequest, reply *bool) error {
if done, err := k.srv.forward("KVS.Apply", args, args, reply); done { if done, err := k.srv.forward("KVS.Apply", args, args, reply); done {
return err return err
} }
defer metrics.MeasureSince([]string{"consul", "kvs", "apply"}, time.Now())
defer metrics.MeasureSince([]string{"kvs", "apply"}, time.Now()) defer metrics.MeasureSince([]string{"kvs", "apply"}, time.Now())
// Perform the pre-apply checks. // Perform the pre-apply checks.

View File

@ -116,7 +116,6 @@ RECONCILE:
s.logger.Printf("[ERR] consul: failed to wait for barrier: %v", err) s.logger.Printf("[ERR] consul: failed to wait for barrier: %v", err)
goto WAIT goto WAIT
} }
metrics.MeasureSince([]string{"consul", "leader", "barrier"}, start)
metrics.MeasureSince([]string{"leader", "barrier"}, start) metrics.MeasureSince([]string{"leader", "barrier"}, start)
// Check if we need to handle initial leadership actions // Check if we need to handle initial leadership actions
@ -183,7 +182,6 @@ WAIT:
// previously inflight transactions have been committed and that our // previously inflight transactions have been committed and that our
// state is up-to-date. // state is up-to-date.
func (s *Server) establishLeadership() error { func (s *Server) establishLeadership() error {
defer metrics.MeasureSince([]string{"consul", "leader", "establish_leadership"}, time.Now())
// This will create the anonymous token and master token (if that is // This will create the anonymous token and master token (if that is
// configured). // configured).
if err := s.initializeACL(); err != nil { if err := s.initializeACL(); err != nil {
@ -219,7 +217,6 @@ func (s *Server) establishLeadership() error {
// revokeLeadership is invoked once we step down as leader. // revokeLeadership is invoked once we step down as leader.
// This is used to cleanup any state that may be specific to a leader. // This is used to cleanup any state that may be specific to a leader.
func (s *Server) revokeLeadership() error { func (s *Server) revokeLeadership() error {
defer metrics.MeasureSince([]string{"consul", "leader", "revoke_leadership"}, time.Now())
// Disable the tombstone GC, since it is only useful as a leader // Disable the tombstone GC, since it is only useful as a leader
s.tombstoneGC.SetEnabled(false) s.tombstoneGC.SetEnabled(false)
@ -444,7 +441,6 @@ func (s *Server) reconcileMember(member serf.Member) error {
s.logger.Printf("[WARN] consul: skipping reconcile of node %v", member) s.logger.Printf("[WARN] consul: skipping reconcile of node %v", member)
return nil return nil
} }
defer metrics.MeasureSince([]string{"consul", "leader", "reconcileMember"}, time.Now())
defer metrics.MeasureSince([]string{"leader", "reconcileMember"}, time.Now()) defer metrics.MeasureSince([]string{"leader", "reconcileMember"}, time.Now())
var err error var err error
switch member.Status { switch member.Status {
@ -805,7 +801,6 @@ func (s *Server) removeConsulServer(m serf.Member, port int) error {
// through Raft to ensure consistency. We do this outside the leader loop // through Raft to ensure consistency. We do this outside the leader loop
// to avoid blocking. // to avoid blocking.
func (s *Server) reapTombstones(index uint64) { func (s *Server) reapTombstones(index uint64) {
defer metrics.MeasureSince([]string{"consul", "leader", "reapTombstones"}, time.Now())
defer metrics.MeasureSince([]string{"leader", "reapTombstones"}, time.Now()) defer metrics.MeasureSince([]string{"leader", "reapTombstones"}, time.Now())
req := structs.TombstoneRequest{ req := structs.TombstoneRequest{
Datacenter: s.config.Datacenter, Datacenter: s.config.Datacenter,

View File

@ -32,7 +32,6 @@ func (p *PreparedQuery) Apply(args *structs.PreparedQueryRequest, reply *string)
if done, err := p.srv.forward("PreparedQuery.Apply", args, args, reply); done { if done, err := p.srv.forward("PreparedQuery.Apply", args, args, reply); done {
return err return err
} }
defer metrics.MeasureSince([]string{"consul", "prepared-query", "apply"}, time.Now())
defer metrics.MeasureSince([]string{"prepared-query", "apply"}, time.Now()) defer metrics.MeasureSince([]string{"prepared-query", "apply"}, time.Now())
// Validate the ID. We must create new IDs before applying to the Raft // Validate the ID. We must create new IDs before applying to the Raft
@ -287,7 +286,6 @@ func (p *PreparedQuery) Explain(args *structs.PreparedQueryExecuteRequest,
if done, err := p.srv.forward("PreparedQuery.Explain", args, args, reply); done { if done, err := p.srv.forward("PreparedQuery.Explain", args, args, reply); done {
return err return err
} }
defer metrics.MeasureSince([]string{"consul", "prepared-query", "explain"}, time.Now())
defer metrics.MeasureSince([]string{"prepared-query", "explain"}, time.Now()) defer metrics.MeasureSince([]string{"prepared-query", "explain"}, time.Now())
// We have to do this ourselves since we are not doing a blocking RPC. // We have to do this ourselves since we are not doing a blocking RPC.
@ -335,7 +333,6 @@ func (p *PreparedQuery) Execute(args *structs.PreparedQueryExecuteRequest,
if done, err := p.srv.forward("PreparedQuery.Execute", args, args, reply); done { if done, err := p.srv.forward("PreparedQuery.Execute", args, args, reply); done {
return err return err
} }
defer metrics.MeasureSince([]string{"consul", "prepared-query", "execute"}, time.Now())
defer metrics.MeasureSince([]string{"prepared-query", "execute"}, time.Now()) defer metrics.MeasureSince([]string{"prepared-query", "execute"}, time.Now())
// We have to do this ourselves since we are not doing a blocking RPC. // We have to do this ourselves since we are not doing a blocking RPC.
@ -471,7 +468,6 @@ func (p *PreparedQuery) ExecuteRemote(args *structs.PreparedQueryExecuteRemoteRe
if done, err := p.srv.forward("PreparedQuery.ExecuteRemote", args, args, reply); done { if done, err := p.srv.forward("PreparedQuery.ExecuteRemote", args, args, reply); done {
return err return err
} }
defer metrics.MeasureSince([]string{"consul", "prepared-query", "execute_remote"}, time.Now())
defer metrics.MeasureSince([]string{"prepared-query", "execute_remote"}, time.Now()) defer metrics.MeasureSince([]string{"prepared-query", "execute_remote"}, time.Now())
// We have to do this ourselves since we are not doing a blocking RPC. // We have to do this ourselves since we are not doing a blocking RPC.

View File

@ -59,7 +59,6 @@ func (s *Server) listen(listener net.Listener) {
} }
go s.handleConn(conn, false) go s.handleConn(conn, false)
metrics.IncrCounter([]string{"consul", "rpc", "accept_conn"}, 1)
metrics.IncrCounter([]string{"rpc", "accept_conn"}, 1) metrics.IncrCounter([]string{"rpc", "accept_conn"}, 1)
} }
} }
@ -97,7 +96,6 @@ func (s *Server) handleConn(conn net.Conn, isTLS bool) {
s.handleConsulConn(conn) s.handleConsulConn(conn)
case pool.RPCRaft: case pool.RPCRaft:
metrics.IncrCounter([]string{"consul", "rpc", "raft_handoff"}, 1)
metrics.IncrCounter([]string{"rpc", "raft_handoff"}, 1) metrics.IncrCounter([]string{"rpc", "raft_handoff"}, 1)
s.raftLayer.Handoff(conn) s.raftLayer.Handoff(conn)
@ -156,12 +154,10 @@ func (s *Server) handleConsulConn(conn net.Conn) {
if err := s.rpcServer.ServeRequest(rpcCodec); err != nil { if err := s.rpcServer.ServeRequest(rpcCodec); err != nil {
if err != io.EOF && !strings.Contains(err.Error(), "closed") { if err != io.EOF && !strings.Contains(err.Error(), "closed") {
s.logger.Printf("[ERR] consul.rpc: RPC error: %v %s", err, logConn(conn)) s.logger.Printf("[ERR] consul.rpc: RPC error: %v %s", err, logConn(conn))
metrics.IncrCounter([]string{"consul", "rpc", "request_error"}, 1)
metrics.IncrCounter([]string{"rpc", "request_error"}, 1) metrics.IncrCounter([]string{"rpc", "request_error"}, 1)
} }
return return
} }
metrics.IncrCounter([]string{"consul", "rpc", "request"}, 1)
metrics.IncrCounter([]string{"rpc", "request"}, 1) metrics.IncrCounter([]string{"rpc", "request"}, 1)
} }
} }
@ -288,8 +284,6 @@ func (s *Server) forwardDC(method, dc string, args interface{}, reply interface{
return structs.ErrNoDCPath return structs.ErrNoDCPath
} }
metrics.IncrCounterWithLabels([]string{"consul", "rpc", "cross-dc"}, 1,
[]metrics.Label{{Name: "datacenter", Value: dc}})
metrics.IncrCounterWithLabels([]string{"rpc", "cross-dc"}, 1, metrics.IncrCounterWithLabels([]string{"rpc", "cross-dc"}, 1,
[]metrics.Label{{Name: "datacenter", Value: dc}}) []metrics.Label{{Name: "datacenter", Value: dc}})
if err := s.connPool.RPC(dc, server.Addr, server.Version, method, server.UseTLS, args, reply); err != nil { if err := s.connPool.RPC(dc, server.Addr, server.Version, method, server.UseTLS, args, reply); err != nil {
@ -401,7 +395,6 @@ RUN_QUERY:
} }
// Run the query. // Run the query.
metrics.IncrCounter([]string{"consul", "rpc", "query"}, 1)
metrics.IncrCounter([]string{"rpc", "query"}, 1) metrics.IncrCounter([]string{"rpc", "query"}, 1)
// Operate on a consistent set of state. This makes sure that the // Operate on a consistent set of state. This makes sure that the
@ -452,7 +445,6 @@ func (s *Server) setQueryMeta(m *structs.QueryMeta) {
// consistentRead is used to ensure we do not perform a stale // consistentRead is used to ensure we do not perform a stale
// read. This is done by verifying leadership before the read. // read. This is done by verifying leadership before the read.
func (s *Server) consistentRead() error { func (s *Server) consistentRead() error {
defer metrics.MeasureSince([]string{"consul", "rpc", "consistentRead"}, time.Now())
defer metrics.MeasureSince([]string{"rpc", "consistentRead"}, time.Now()) defer metrics.MeasureSince([]string{"rpc", "consistentRead"}, time.Now())
future := s.raft.VerifyLeader() future := s.raft.VerifyLeader()
if err := future.Error(); err != nil { if err := future.Error(); err != nil {

View File

@ -59,7 +59,6 @@ func (s *Server) floodSegments(config *Config) {
// all live nodes are registered, all failed nodes are marked as such, and all // all live nodes are registered, all failed nodes are marked as such, and all
// left nodes are de-registered. // left nodes are de-registered.
func (s *Server) reconcile() (err error) { func (s *Server) reconcile() (err error) {
defer metrics.MeasureSince([]string{"consul", "leader", "reconcile"}, time.Now())
defer metrics.MeasureSince([]string{"leader", "reconcile"}, time.Now()) defer metrics.MeasureSince([]string{"leader", "reconcile"}, time.Now())
members := s.serfLAN.Members() members := s.serfLAN.Members()
knownMembers := make(map[string]struct{}) knownMembers := make(map[string]struct{})

View File

@ -23,7 +23,6 @@ func (s *Session) Apply(args *structs.SessionRequest, reply *string) error {
if done, err := s.srv.forward("Session.Apply", args, args, reply); done { if done, err := s.srv.forward("Session.Apply", args, args, reply); done {
return err return err
} }
defer metrics.MeasureSince([]string{"consul", "session", "apply"}, time.Now())
defer metrics.MeasureSince([]string{"session", "apply"}, time.Now()) defer metrics.MeasureSince([]string{"session", "apply"}, time.Now())
// Verify the args // Verify the args
@ -222,7 +221,6 @@ func (s *Session) Renew(args *structs.SessionSpecificRequest,
if done, err := s.srv.forward("Session.Renew", args, args, reply); done { if done, err := s.srv.forward("Session.Renew", args, args, reply); done {
return err return err
} }
defer metrics.MeasureSince([]string{"consul", "session", "renew"}, time.Now())
defer metrics.MeasureSince([]string{"session", "renew"}, time.Now()) defer metrics.MeasureSince([]string{"session", "renew"}, time.Now())
// Get the session, from local state. // Get the session, from local state.

View File

@ -84,7 +84,6 @@ func (s *Server) createSessionTimer(id string, ttl time.Duration) {
// invalidateSession is invoked when a session TTL is reached and we // invalidateSession is invoked when a session TTL is reached and we
// need to invalidate the session. // need to invalidate the session.
func (s *Server) invalidateSession(id string) { func (s *Server) invalidateSession(id string) {
defer metrics.MeasureSince([]string{"consul", "session_ttl", "invalidate"}, time.Now())
defer metrics.MeasureSince([]string{"session_ttl", "invalidate"}, time.Now()) defer metrics.MeasureSince([]string{"session_ttl", "invalidate"}, time.Now())
// Clear the session timer // Clear the session timer
@ -134,7 +133,6 @@ func (s *Server) sessionStats() {
for { for {
select { select {
case <-time.After(5 * time.Second): case <-time.After(5 * time.Second):
metrics.SetGauge([]string{"consul", "session_ttl", "active"}, float32(s.sessionTimers.Len()))
metrics.SetGauge([]string{"session_ttl", "active"}, float32(s.sessionTimers.Len())) metrics.SetGauge([]string{"session_ttl", "active"}, float32(s.sessionTimers.Len()))
case <-s.shutdownCh: case <-s.shutdownCh:

View File

@ -46,7 +46,6 @@ func (t *Txn) Apply(args *structs.TxnRequest, reply *structs.TxnResponse) error
if done, err := t.srv.forward("Txn.Apply", args, args, reply); done { if done, err := t.srv.forward("Txn.Apply", args, args, reply); done {
return err return err
} }
defer metrics.MeasureSince([]string{"consul", "txn", "apply"}, time.Now())
defer metrics.MeasureSince([]string{"txn", "apply"}, time.Now()) defer metrics.MeasureSince([]string{"txn", "apply"}, time.Now())
// Run the pre-checks before we send the transaction into Raft. // Run the pre-checks before we send the transaction into Raft.
@ -90,7 +89,6 @@ func (t *Txn) Read(args *structs.TxnReadRequest, reply *structs.TxnReadResponse)
if done, err := t.srv.forward("Txn.Read", args, args, reply); done { if done, err := t.srv.forward("Txn.Read", args, args, reply); done {
return err return err
} }
defer metrics.MeasureSince([]string{"consul", "txn", "read"}, time.Now())
defer metrics.MeasureSince([]string{"txn", "read"}, time.Now()) defer metrics.MeasureSince([]string{"txn", "read"}, time.Now())
// We have to do this ourselves since we are not doing a blocking RPC. // We have to do this ourselves since we are not doing a blocking RPC.

View File

@ -158,8 +158,6 @@ START:
func (d *DNSServer) handlePtr(resp dns.ResponseWriter, req *dns.Msg) { func (d *DNSServer) handlePtr(resp dns.ResponseWriter, req *dns.Msg) {
q := req.Question[0] q := req.Question[0]
defer func(s time.Time) { defer func(s time.Time) {
metrics.MeasureSinceWithLabels([]string{"consul", "dns", "ptr_query"}, s,
[]metrics.Label{{Name: "node", Value: d.agent.config.NodeName}})
metrics.MeasureSinceWithLabels([]string{"dns", "ptr_query"}, s, metrics.MeasureSinceWithLabels([]string{"dns", "ptr_query"}, s,
[]metrics.Label{{Name: "node", Value: d.agent.config.NodeName}}) []metrics.Label{{Name: "node", Value: d.agent.config.NodeName}})
d.logger.Printf("[DEBUG] dns: request for %v (%v) from client %s (%s)", d.logger.Printf("[DEBUG] dns: request for %v (%v) from client %s (%s)",
@ -230,8 +228,6 @@ func (d *DNSServer) handlePtr(resp dns.ResponseWriter, req *dns.Msg) {
func (d *DNSServer) handleQuery(resp dns.ResponseWriter, req *dns.Msg) { func (d *DNSServer) handleQuery(resp dns.ResponseWriter, req *dns.Msg) {
q := req.Question[0] q := req.Question[0]
defer func(s time.Time) { defer func(s time.Time) {
metrics.MeasureSinceWithLabels([]string{"consul", "dns", "domain_query"}, s,
[]metrics.Label{{Name: "node", Value: d.agent.config.NodeName}})
metrics.MeasureSinceWithLabels([]string{"dns", "domain_query"}, s, metrics.MeasureSinceWithLabels([]string{"dns", "domain_query"}, s,
[]metrics.Label{{Name: "node", Value: d.agent.config.NodeName}}) []metrics.Label{{Name: "node", Value: d.agent.config.NodeName}})
d.logger.Printf("[DEBUG] dns: request for name %v type %v class %v (took %v) from client %s (%s)", d.logger.Printf("[DEBUG] dns: request for name %v type %v class %v (took %v) from client %s (%s)",
@ -542,7 +538,6 @@ RPC:
d.logger.Printf("[WARN] dns: Query results too stale, re-requesting") d.logger.Printf("[WARN] dns: Query results too stale, re-requesting")
goto RPC goto RPC
} else if out.LastContact > staleCounterThreshold { } else if out.LastContact > staleCounterThreshold {
metrics.IncrCounter([]string{"consul", "dns", "stale_queries"}, 1)
metrics.IncrCounter([]string{"dns", "stale_queries"}, 1) metrics.IncrCounter([]string{"dns", "stale_queries"}, 1)
} }
} }
@ -891,7 +886,6 @@ func (d *DNSServer) lookupServiceNodes(datacenter, service, tag string) (structs
} }
if args.AllowStale && out.LastContact > staleCounterThreshold { if args.AllowStale && out.LastContact > staleCounterThreshold {
metrics.IncrCounter([]string{"consul", "dns", "stale_queries"}, 1)
metrics.IncrCounter([]string{"dns", "stale_queries"}, 1) metrics.IncrCounter([]string{"dns", "stale_queries"}, 1)
} }
@ -1042,7 +1036,6 @@ RPC:
d.logger.Printf("[WARN] dns: Query results too stale, re-requesting") d.logger.Printf("[WARN] dns: Query results too stale, re-requesting")
goto RPC goto RPC
} else if out.LastContact > staleCounterThreshold { } else if out.LastContact > staleCounterThreshold {
metrics.IncrCounter([]string{"consul", "dns", "stale_queries"}, 1)
metrics.IncrCounter([]string{"dns", "stale_queries"}, 1) metrics.IncrCounter([]string{"dns", "stale_queries"}, 1)
} }
} }

View File

@ -123,7 +123,6 @@ func (s *HTTPServer) handler(enableDebug bool) http.Handler {
start := time.Now() start := time.Now()
handler(resp, req) handler(resp, req)
key := append([]string{"http", req.Method}, parts...) key := append([]string{"http", req.Method}, parts...)
metrics.MeasureSince(append([]string{"consul"}, key...), start)
metrics.MeasureSince(key, start) metrics.MeasureSince(key, start)
} }

View File

@ -21,7 +21,6 @@ type CheckDefinition struct {
// //
// ID (CheckID), Name, Status, Notes // ID (CheckID), Name, Status, Notes
// //
Script string
ScriptArgs []string ScriptArgs []string
HTTP string HTTP string
Header map[string][]string Header map[string][]string
@ -63,7 +62,6 @@ func (c *CheckDefinition) CheckType() *CheckType {
Status: c.Status, Status: c.Status,
Notes: c.Notes, Notes: c.Notes,
Script: c.Script,
ScriptArgs: c.ScriptArgs, ScriptArgs: c.ScriptArgs,
HTTP: c.HTTP, HTTP: c.HTTP,
GRPC: c.GRPC, GRPC: c.GRPC,

View File

@ -83,7 +83,7 @@ func TestCheckDefinitionToCheckType(t *testing.T) {
ServiceID: "svcid", ServiceID: "svcid",
Token: "tok", Token: "tok",
Script: "/bin/foo", ScriptArgs: []string{"/bin/foo"},
HTTP: "someurl", HTTP: "someurl",
TCP: "host:port", TCP: "host:port",
Interval: 1 * time.Second, Interval: 1 * time.Second,
@ -100,7 +100,7 @@ func TestCheckDefinitionToCheckType(t *testing.T) {
Status: "green", Status: "green",
Notes: "notes", Notes: "notes",
Script: "/bin/foo", ScriptArgs: []string{"/bin/foo"},
HTTP: "someurl", HTTP: "someurl",
TCP: "host:port", TCP: "host:port",
Interval: 1 * time.Second, Interval: 1 * time.Second,

View File

@ -25,7 +25,6 @@ type CheckType struct {
// fields copied to CheckDefinition // fields copied to CheckDefinition
// Update CheckDefinition when adding fields here // Update CheckDefinition when adding fields here
Script string
ScriptArgs []string ScriptArgs []string
HTTP string HTTP string
Header map[string][]string Header map[string][]string
@ -70,7 +69,7 @@ func (c *CheckType) Empty() bool {
// IsScript checks if this is a check that execs some kind of script. // IsScript checks if this is a check that execs some kind of script.
func (c *CheckType) IsScript() bool { func (c *CheckType) IsScript() bool {
return c.Script != "" || len(c.ScriptArgs) > 0 return len(c.ScriptArgs) > 0
} }
// IsTTL checks if this is a TTL type // IsTTL checks if this is a TTL type

View File

@ -14,7 +14,7 @@ func TestAgentStructs_CheckTypes(t *testing.T) {
// Singular Check field works // Singular Check field works
svc.Check = CheckType{ svc.Check = CheckType{
Script: "/foo/bar", ScriptArgs: []string{"/foo/bar"},
Interval: 10 * time.Second, Interval: 10 * time.Second,
} }
@ -26,7 +26,7 @@ func TestAgentStructs_CheckTypes(t *testing.T) {
// Returns Script checks // Returns Script checks
svc.Checks = append(svc.Checks, &CheckType{ svc.Checks = append(svc.Checks, &CheckType{
Script: "/foo/bar", ScriptArgs: []string{"/foo/bar"},
Interval: 10 * time.Second, Interval: 10 * time.Second,
}) })

View File

@ -86,7 +86,6 @@ type AgentServiceCheck struct {
CheckID string `json:",omitempty"` CheckID string `json:",omitempty"`
Name string `json:",omitempty"` Name string `json:",omitempty"`
Args []string `json:"ScriptArgs,omitempty"` Args []string `json:"ScriptArgs,omitempty"`
Script string `json:",omitempty"` // Deprecated, use Args.
DockerContainerID string `json:",omitempty"` DockerContainerID string `json:",omitempty"`
Shell string `json:",omitempty"` // Only supported for Docker. Shell string `json:",omitempty"` // Only supported for Docker.
Interval string `json:",omitempty"` Interval string `json:",omitempty"`

View File

@ -694,7 +694,7 @@ func TestAPI_AgentChecks_Docker(t *testing.T) {
ServiceID: "redis", ServiceID: "redis",
AgentServiceCheck: AgentServiceCheck{ AgentServiceCheck: AgentServiceCheck{
DockerContainerID: "f972c95ebf0e", DockerContainerID: "f972c95ebf0e",
Script: "/bin/true", Args: []string{"/bin/true"},
Shell: "/bin/bash", Shell: "/bin/bash",
Interval: "10s", Interval: "10s",
}, },

View File

@ -197,11 +197,6 @@ and the check is embedded within a service definition a unique check id is
generated. Otherwise, `id` will be set to `name`. If names might conflict, generated. Otherwise, `id` will be set to `name`. If names might conflict,
unique IDs should be provided. unique IDs should be provided.
-> **Note:** Consul 0.9.3 and before require the optional check ID for a check
that is embedded in a service definition to be configured via the `CheckID`
field. Consul 1.0 accepts both `id` and `CheckID` but the latter is
deprecated and will be removed in Consul 1.1.
The `notes` field is opaque to Consul but can be used to provide a human-readable The `notes` field is opaque to Consul but can be used to provide a human-readable
description of the current state of the check. With a script check, the field is description of the current state of the check. With a script check, the field is
set to any output generated by the script. Similarly, an external process updating set to any output generated by the script. Similarly, an external process updating
@ -252,11 +247,6 @@ In Consul 0.9.0 and later, the agent must be configured with
[`enable_script_checks`](/docs/agent/options.html#_enable_script_checks) set to `true` [`enable_script_checks`](/docs/agent/options.html#_enable_script_checks) set to `true`
in order to enable script checks. in order to enable script checks.
Prior to Consul 1.0, checks used a single `script` field to define the command to run, and
would always run in a shell. In Consul 1.0, the `args` array was added so that checks can be
run without a shell. The `script` field is deprecated, and you should include the shell in
the `args` to run under a shell, eg. `"args": ["sh", "-c", "..."]`.
## Initial Health Check Status ## Initial Health Check Status
By default, when checks are registered against a Consul agent, the state is set By default, when checks are registered against a Consul agent, the state is set

View File

@ -1375,10 +1375,6 @@ Consul will not enable TLS for the HTTP API unless the `https` port has been ass
The format is compatible natively with prometheus. When running in this mode, it is recommended to also enable the option The format is compatible natively with prometheus. When running in this mode, it is recommended to also enable the option
<a href="#telemetry-disable_hostname">`disable_hostname`</a> to avoid having prefixed metrics with hostname. <a href="#telemetry-disable_hostname">`disable_hostname`</a> to avoid having prefixed metrics with hostname.
* <a name="telemetry-enable_deprecated_names"></a><a href="#telemetry-enable_deprecated_names">`enable_deprecated_names`
</a>Added in Consul 1.0, this enables old metric names of the format `consul.consul...` to be sent alongside
other metrics. Defaults to false.
* <a name="telemetry-statsd_address"></a><a href="#telemetry-statsd_address">`statsd_address`</a> This provides the * <a name="telemetry-statsd_address"></a><a href="#telemetry-statsd_address">`statsd_address`</a> This provides the
address of a statsd instance in the format `host:port`. If provided, Consul will send various telemetry information to that instance for address of a statsd instance in the format `host:port`. If provided, Consul will send various telemetry information to that instance for
aggregation. This can be used to capture runtime information. This sends UDP packets only and can be used with aggregation. This can be used to capture runtime information. This sends UDP packets only and can be used with

View File

@ -32,7 +32,7 @@ A service definition is a script that looks like:
"enable_tag_override": false, "enable_tag_override": false,
"checks": [ "checks": [
{ {
"script": "/usr/local/bin/check_redis.py", "args": ["/usr/local/bin/check_redis.py"],
"interval": "10s" "interval": "10s"
} }
] ]
@ -48,7 +48,7 @@ unique IDs should be provided.
For Consul 0.9.3 and earlier you need to use `enableTagOverride`. Consul 1.0 For Consul 0.9.3 and earlier you need to use `enableTagOverride`. Consul 1.0
supports both `enable_tag_override` and `enableTagOverride` but the latter is supports both `enable_tag_override` and `enableTagOverride` but the latter is
deprecated and will be removed in Consul 1.1. deprecated and has been removed in Consul 1.1.
The `tags` property is a list of values that are opaque to Consul but The `tags` property is a list of values that are opaque to Consul but
can be used to distinguish between `primary` or `secondary` nodes, can be used to distinguish between `primary` or `secondary` nodes,
@ -80,7 +80,7 @@ node has any failing system-level check, the DNS interface will omit that
node from any service query. node from any service query.
The check must be of the script, HTTP, TCP or TTL type. If it is a script type, The check must be of the script, HTTP, TCP or TTL type. If it is a script type,
`script` and `interval` must be provided. If it is a HTTP type, `http` and `args` and `interval` must be provided. If it is a HTTP type, `http` and
`interval` must be provided. If it is a TCP type, `tcp` and `interval` must be `interval` must be provided. If it is a TCP type, `tcp` and `interval` must be
provided. If it is a TTL type, then only `ttl` must be provided. The check name provided. If it is a TTL type, then only `ttl` must be provided. The check name
is automatically generated as `service:<service-id>`. If there are multiple is automatically generated as `service:<service-id>`. If there are multiple
@ -90,11 +90,6 @@ from `1`.
-> **Note:** There is more information about [checks here](/docs/agent/checks.html). -> **Note:** There is more information about [checks here](/docs/agent/checks.html).
-> **Note:** Consul 0.9.3 and before require the optional check ID for a check
that is embedded in a service definition to be configured via the `CheckID`
field. Consul 1.0 accepts both `id` and `CheckID` but the latter is
deprecated and will be removed in Consul 1.1.
The `enable_tag_override` can optionally be specified to disable the The `enable_tag_override` can optionally be specified to disable the
anti-entropy feature for this service. If `enable_tag_override` is set to anti-entropy feature for this service. If `enable_tag_override` is set to
`TRUE` then external agents can update this service in the `TRUE` then external agents can update this service in the
@ -120,7 +115,7 @@ syncs](/docs/internals/anti-entropy.html) for more info.
For Consul 0.9.3 and earlier you need to use `enableTagOverride`. Consul 1.0 For Consul 0.9.3 and earlier you need to use `enableTagOverride`. Consul 1.0
supports both `enable_tag_override` and `enableTagOverride` but the latter is supports both `enable_tag_override` and `enableTagOverride` but the latter is
deprecated and will be removed in Consul 1.1. deprecated and has been removed as of Consul 1.1.
To configure a service, either provide it as a `-config-file` option to To configure a service, either provide it as a `-config-file` option to
the agent or place it inside the `-config-dir` of the agent. The file the agent or place it inside the `-config-dir` of the agent. The file
@ -147,7 +142,7 @@ Multiple services definitions can be provided at once using the plural
"port": 6000, "port": 6000,
"checks": [ "checks": [
{ {
"script": "/bin/check_redis -p 6000", "args": ["/bin/check_redis", "-p", "6000"],
"interval": "5s", "interval": "5s",
"ttl": "20s" "ttl": "20s"
} }
@ -164,7 +159,7 @@ Multiple services definitions can be provided at once using the plural
"port": 7000, "port": 7000,
"checks": [ "checks": [
{ {
"script": "/bin/check_redis -p 7000", "args": ["/bin/check_redis", "-p", "7000"],
"interval": "30s", "interval": "30s",
"ttl": "60s" "ttl": "60s"
} }

View File

@ -14,6 +14,18 @@ details provided for their upgrades as a result of new features or changed
behavior. This page is used to document those details separately from the behavior. This page is used to document those details separately from the
standard upgrade flow. standard upgrade flow.
## Consul 1.1.0
#### Removal of Deprecated Features
The following previously deprecated fields and config options have been removed:
- `CheckID` has been removed from config file check definitions (use `id` instead).
- `script` has been removed from config file check definitions (use `args` instead).
- `enableTagOverride` is no longer valid in service definitions (use `enable_tag_override` instead).
- The [deprecated set of metric names](/docs/upgrade-specific.html#metric-names-updated) (beginning with `consul.consul.`) has been removed
along with the `enable_deprecated_names` option from the metrics configuration.
## Consul 1.0.1 ## Consul 1.0.1
#### Carefully Check and Remove Stale Servers During Rolling Upgrades #### Carefully Check and Remove Stale Servers During Rolling Upgrades