mirror of https://github.com/status-im/consul.git
Remove deprecated metric names
This commit is contained in:
parent
b73323aa42
commit
ba3971d2c1
|
@ -143,11 +143,9 @@ func (m *aclManager) lookupACL(a *Agent, id string) (acl.ACL, error) {
|
|||
cached = raw.(*aclCacheEntry)
|
||||
}
|
||||
if cached != nil && time.Now().Before(cached.Expires) {
|
||||
metrics.IncrCounter([]string{"consul", "acl", "cache_hit"}, 1)
|
||||
metrics.IncrCounter([]string{"acl", "cache_hit"}, 1)
|
||||
return cached.ACL, nil
|
||||
}
|
||||
metrics.IncrCounter([]string{"consul", "acl", "cache_miss"}, 1)
|
||||
metrics.IncrCounter([]string{"acl", "cache_miss"}, 1)
|
||||
|
||||
// At this point we might have a stale cached ACL, or none at all, so
|
||||
|
|
|
@ -511,14 +511,6 @@ func (b *Builder) Build() (rt RuntimeConfig, err error) {
|
|||
}
|
||||
}
|
||||
|
||||
// Add a filter rule if needed for enabling the deprecated metric names
|
||||
enableDeprecatedNames := b.boolVal(c.Telemetry.EnableDeprecatedNames)
|
||||
if enableDeprecatedNames {
|
||||
telemetryAllowedPrefixes = append(telemetryAllowedPrefixes, "consul.consul.")
|
||||
} else {
|
||||
telemetryBlockedPrefixes = append(telemetryBlockedPrefixes, "consul.consul.")
|
||||
}
|
||||
|
||||
// raft performance scaling
|
||||
performanceRaftMultiplier := b.intVal(c.Performance.RaftMultiplier)
|
||||
if performanceRaftMultiplier < 1 || uint(performanceRaftMultiplier) > consul.MaxRaftMultiplier {
|
||||
|
|
|
@ -394,7 +394,6 @@ type Telemetry struct {
|
|||
PrometheusRetentionTime *string `json:"prometheus_retention_time,omitempty" hcl:"prometheus_retention_time" mapstructure:"prometheus_retention_time"`
|
||||
StatsdAddr *string `json:"statsd_address,omitempty" hcl:"statsd_address" mapstructure:"statsd_address"`
|
||||
StatsiteAddr *string `json:"statsite_address,omitempty" hcl:"statsite_address" mapstructure:"statsite_address"`
|
||||
EnableDeprecatedNames *bool `json:"enable_deprecated_names" hcl:"enable_deprecated_names" mapstructure:"enable_deprecated_names"`
|
||||
}
|
||||
|
||||
type Ports struct {
|
||||
|
|
|
@ -1811,28 +1811,10 @@ func TestConfigFlagsAndEdgecases(t *testing.T) {
|
|||
patch: func(rt *RuntimeConfig) {
|
||||
rt.DataDir = dataDir
|
||||
rt.TelemetryAllowedPrefixes = []string{"foo"}
|
||||
rt.TelemetryBlockedPrefixes = []string{"bar", "consul.consul."}
|
||||
rt.TelemetryBlockedPrefixes = []string{"bar"}
|
||||
},
|
||||
warns: []string{`Filter rule must begin with either '+' or '-': "nix"`},
|
||||
},
|
||||
{
|
||||
desc: "telemetry.enable_deprecated_names adds allow rule for whitelist",
|
||||
args: []string{
|
||||
`-data-dir=` + dataDir,
|
||||
},
|
||||
json: []string{`{
|
||||
"telemetry": { "enable_deprecated_names": true, "filter_default": false }
|
||||
}`},
|
||||
hcl: []string{`
|
||||
telemetry = { enable_deprecated_names = true filter_default = false }
|
||||
`},
|
||||
patch: func(rt *RuntimeConfig) {
|
||||
rt.DataDir = dataDir
|
||||
rt.TelemetryFilterDefault = false
|
||||
rt.TelemetryAllowedPrefixes = []string{"consul.consul."}
|
||||
rt.TelemetryBlockedPrefixes = []string{}
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "encrypt has invalid key",
|
||||
args: []string{
|
||||
|
@ -2620,7 +2602,6 @@ func TestFullConfig(t *testing.T) {
|
|||
"dogstatsd_tags": [ "3N81zSUB","Xtj8AnXZ" ],
|
||||
"filter_default": true,
|
||||
"prefix_filter": [ "+oJotS8XJ","-cazlEhGn" ],
|
||||
"enable_deprecated_names": true,
|
||||
"metrics_prefix": "ftO6DySn",
|
||||
"prometheus_retention_time": "15s",
|
||||
"statsd_address": "drce87cy",
|
||||
|
@ -3052,7 +3033,6 @@ func TestFullConfig(t *testing.T) {
|
|||
dogstatsd_tags = [ "3N81zSUB","Xtj8AnXZ" ]
|
||||
filter_default = true
|
||||
prefix_filter = [ "+oJotS8XJ","-cazlEhGn" ]
|
||||
enable_deprecated_names = true
|
||||
metrics_prefix = "ftO6DySn"
|
||||
prometheus_retention_time = "15s"
|
||||
statsd_address = "drce87cy"
|
||||
|
@ -3606,7 +3586,7 @@ func TestFullConfig(t *testing.T) {
|
|||
TelemetryDogstatsdAddr: "0wSndumK",
|
||||
TelemetryDogstatsdTags: []string{"3N81zSUB", "Xtj8AnXZ"},
|
||||
TelemetryFilterDefault: true,
|
||||
TelemetryAllowedPrefixes: []string{"oJotS8XJ", "consul.consul."},
|
||||
TelemetryAllowedPrefixes: []string{"oJotS8XJ"},
|
||||
TelemetryBlockedPrefixes: []string{"cazlEhGn"},
|
||||
TelemetryMetricsPrefix: "ftO6DySn",
|
||||
TelemetryPrometheusRetentionTime: 15 * time.Second,
|
||||
|
|
|
@ -41,7 +41,6 @@ type aclCacheEntry struct {
|
|||
// assumes its running in the ACL datacenter, or in a non-ACL datacenter when
|
||||
// using its replicated ACLs during an outage.
|
||||
func (s *Server) aclLocalFault(id string) (string, string, error) {
|
||||
defer metrics.MeasureSince([]string{"consul", "acl", "fault"}, time.Now())
|
||||
defer metrics.MeasureSince([]string{"acl", "fault"}, time.Now())
|
||||
|
||||
// Query the state store.
|
||||
|
@ -75,7 +74,6 @@ func (s *Server) resolveToken(id string) (acl.ACL, error) {
|
|||
if len(authDC) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
defer metrics.MeasureSince([]string{"consul", "acl", "resolveToken"}, time.Now())
|
||||
defer metrics.MeasureSince([]string{"acl", "resolveToken"}, time.Now())
|
||||
|
||||
// Handle the anonymous token
|
||||
|
@ -159,11 +157,9 @@ func (c *aclCache) lookupACL(id, authDC string) (acl.ACL, error) {
|
|||
|
||||
// Check for live cache.
|
||||
if cached != nil && time.Now().Before(cached.Expires) {
|
||||
metrics.IncrCounter([]string{"consul", "acl", "cache_hit"}, 1)
|
||||
metrics.IncrCounter([]string{"acl", "cache_hit"}, 1)
|
||||
return cached.ACL, nil
|
||||
}
|
||||
metrics.IncrCounter([]string{"consul", "acl", "cache_miss"}, 1)
|
||||
metrics.IncrCounter([]string{"acl", "cache_miss"}, 1)
|
||||
|
||||
// Attempt to refresh the policy from the ACL datacenter via an RPC.
|
||||
|
@ -226,7 +222,6 @@ func (c *aclCache) lookupACL(id, authDC string) (acl.ACL, error) {
|
|||
// Fake up an ACL datacenter reply and inject it into the cache.
|
||||
// Note we use the local TTL here, so this'll be used for that
|
||||
// amount of time even once the ACL datacenter becomes available.
|
||||
metrics.IncrCounter([]string{"consul", "acl", "replication_hit"}, 1)
|
||||
metrics.IncrCounter([]string{"acl", "replication_hit"}, 1)
|
||||
reply.ETag = makeACLETag(parent, policy)
|
||||
reply.TTL = c.config.ACLTTL
|
||||
|
|
|
@ -145,7 +145,6 @@ func (a *ACL) Apply(args *structs.ACLRequest, reply *string) error {
|
|||
if done, err := a.srv.forward("ACL.Apply", args, args, reply); done {
|
||||
return err
|
||||
}
|
||||
defer metrics.MeasureSince([]string{"consul", "acl", "apply"}, time.Now())
|
||||
defer metrics.MeasureSince([]string{"acl", "apply"}, time.Now())
|
||||
|
||||
// Verify we are allowed to serve this request
|
||||
|
|
|
@ -149,7 +149,6 @@ func (s *Server) fetchLocalACLs() (structs.ACLs, error) {
|
|||
// datacenter. The lastIndex parameter is a hint about which remote index we
|
||||
// have replicated to, so this is expected to block until something changes.
|
||||
func (s *Server) fetchRemoteACLs(lastRemoteIndex uint64) (*structs.IndexedACLs, error) {
|
||||
defer metrics.MeasureSince([]string{"consul", "leader", "fetchRemoteACLs"}, time.Now())
|
||||
defer metrics.MeasureSince([]string{"leader", "fetchRemoteACLs"}, time.Now())
|
||||
|
||||
args := structs.DCSpecificRequest{
|
||||
|
@ -170,7 +169,6 @@ func (s *Server) fetchRemoteACLs(lastRemoteIndex uint64) (*structs.IndexedACLs,
|
|||
// UpdateLocalACLs is given a list of changes to apply in order to bring the
|
||||
// local ACLs in-line with the remote ACLs from the ACL datacenter.
|
||||
func (s *Server) updateLocalACLs(changes structs.ACLRequests) error {
|
||||
defer metrics.MeasureSince([]string{"consul", "leader", "updateLocalACLs"}, time.Now())
|
||||
defer metrics.MeasureSince([]string{"leader", "updateLocalACLs"}, time.Now())
|
||||
|
||||
minTimePerOp := time.Second / time.Duration(s.config.ACLReplicationApplyLimit)
|
||||
|
@ -218,7 +216,6 @@ func (s *Server) replicateACLs(lastRemoteIndex uint64) (uint64, error) {
|
|||
// Measure everything after the remote query, which can block for long
|
||||
// periods of time. This metric is a good measure of how expensive the
|
||||
// replication process is.
|
||||
defer metrics.MeasureSince([]string{"consul", "leader", "replicateACLs"}, time.Now())
|
||||
defer metrics.MeasureSince([]string{"leader", "replicateACLs"}, time.Now())
|
||||
|
||||
local, err := s.fetchLocalACLs()
|
||||
|
|
|
@ -55,13 +55,10 @@ func (d *AutopilotDelegate) IsServer(m serf.Member) (*autopilot.ServerInfo, erro
|
|||
// Heartbeat a metric for monitoring if we're the leader
|
||||
func (d *AutopilotDelegate) NotifyHealth(health autopilot.OperatorHealthReply) {
|
||||
if d.server.raft.State() == raft.Leader {
|
||||
metrics.SetGauge([]string{"consul", "autopilot", "failure_tolerance"}, float32(health.FailureTolerance))
|
||||
metrics.SetGauge([]string{"autopilot", "failure_tolerance"}, float32(health.FailureTolerance))
|
||||
if health.Healthy {
|
||||
metrics.SetGauge([]string{"consul", "autopilot", "healthy"}, 1)
|
||||
metrics.SetGauge([]string{"autopilot", "healthy"}, 1)
|
||||
} else {
|
||||
metrics.SetGauge([]string{"consul", "autopilot", "healthy"}, 0)
|
||||
metrics.SetGauge([]string{"autopilot", "healthy"}, 0)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,7 +24,6 @@ func (c *Catalog) Register(args *structs.RegisterRequest, reply *struct{}) error
|
|||
if done, err := c.srv.forward("Catalog.Register", args, args, reply); done {
|
||||
return err
|
||||
}
|
||||
defer metrics.MeasureSince([]string{"consul", "catalog", "register"}, time.Now())
|
||||
defer metrics.MeasureSince([]string{"catalog", "register"}, time.Now())
|
||||
|
||||
// Verify the args.
|
||||
|
@ -117,7 +116,6 @@ func (c *Catalog) Deregister(args *structs.DeregisterRequest, reply *struct{}) e
|
|||
if done, err := c.srv.forward("Catalog.Deregister", args, args, reply); done {
|
||||
return err
|
||||
}
|
||||
defer metrics.MeasureSince([]string{"consul", "catalog", "deregister"}, time.Now())
|
||||
defer metrics.MeasureSince([]string{"catalog", "deregister"}, time.Now())
|
||||
|
||||
// Verify the args
|
||||
|
@ -279,19 +277,13 @@ func (c *Catalog) ServiceNodes(args *structs.ServiceSpecificRequest, reply *stru
|
|||
|
||||
// Provide some metrics
|
||||
if err == nil {
|
||||
metrics.IncrCounterWithLabels([]string{"consul", "catalog", "service", "query"}, 1,
|
||||
[]metrics.Label{{Name: "service", Value: args.ServiceName}})
|
||||
metrics.IncrCounterWithLabels([]string{"catalog", "service", "query"}, 1,
|
||||
[]metrics.Label{{Name: "service", Value: args.ServiceName}})
|
||||
if args.ServiceTag != "" {
|
||||
metrics.IncrCounterWithLabels([]string{"consul", "catalog", "service", "query-tag"}, 1,
|
||||
[]metrics.Label{{Name: "service", Value: args.ServiceName}, {Name: "tag", Value: args.ServiceTag}})
|
||||
metrics.IncrCounterWithLabels([]string{"catalog", "service", "query-tag"}, 1,
|
||||
[]metrics.Label{{Name: "service", Value: args.ServiceName}, {Name: "tag", Value: args.ServiceTag}})
|
||||
}
|
||||
if len(reply.ServiceNodes) == 0 {
|
||||
metrics.IncrCounterWithLabels([]string{"consul", "catalog", "service", "not-found"}, 1,
|
||||
[]metrics.Label{{Name: "service", Value: args.ServiceName}})
|
||||
metrics.IncrCounterWithLabels([]string{"catalog", "service", "not-found"}, 1,
|
||||
[]metrics.Label{{Name: "service", Value: args.ServiceName}})
|
||||
}
|
||||
|
|
|
@ -249,10 +249,8 @@ TRY:
|
|||
}
|
||||
|
||||
// Enforce the RPC limit.
|
||||
metrics.IncrCounter([]string{"consul", "client", "rpc"}, 1)
|
||||
metrics.IncrCounter([]string{"client", "rpc"}, 1)
|
||||
if !c.rpcLimiter.Allow() {
|
||||
metrics.IncrCounter([]string{"consul", "client", "rpc", "exceeded"}, 1)
|
||||
metrics.IncrCounter([]string{"client", "rpc", "exceeded"}, 1)
|
||||
return structs.ErrRPCRateExceeded
|
||||
}
|
||||
|
@ -293,10 +291,8 @@ func (c *Client) SnapshotRPC(args *structs.SnapshotRequest, in io.Reader, out io
|
|||
}
|
||||
|
||||
// Enforce the RPC limit.
|
||||
metrics.IncrCounter([]string{"consul", "client", "rpc"}, 1)
|
||||
metrics.IncrCounter([]string{"client", "rpc"}, 1)
|
||||
if !c.rpcLimiter.Allow() {
|
||||
metrics.IncrCounter([]string{"consul", "client", "rpc", "exceeded"}, 1)
|
||||
metrics.IncrCounter([]string{"client", "rpc", "exceeded"}, 1)
|
||||
return structs.ErrRPCRateExceeded
|
||||
}
|
||||
|
|
|
@ -23,7 +23,6 @@ func init() {
|
|||
}
|
||||
|
||||
func (c *FSM) applyRegister(buf []byte, index uint64) interface{} {
|
||||
defer metrics.MeasureSince([]string{"consul", "fsm", "register"}, time.Now())
|
||||
defer metrics.MeasureSince([]string{"fsm", "register"}, time.Now())
|
||||
var req structs.RegisterRequest
|
||||
if err := structs.Decode(buf, &req); err != nil {
|
||||
|
@ -39,7 +38,6 @@ func (c *FSM) applyRegister(buf []byte, index uint64) interface{} {
|
|||
}
|
||||
|
||||
func (c *FSM) applyDeregister(buf []byte, index uint64) interface{} {
|
||||
defer metrics.MeasureSince([]string{"consul", "fsm", "deregister"}, time.Now())
|
||||
defer metrics.MeasureSince([]string{"fsm", "deregister"}, time.Now())
|
||||
var req structs.DeregisterRequest
|
||||
if err := structs.Decode(buf, &req); err != nil {
|
||||
|
@ -73,8 +71,6 @@ func (c *FSM) applyKVSOperation(buf []byte, index uint64) interface{} {
|
|||
if err := structs.Decode(buf, &req); err != nil {
|
||||
panic(fmt.Errorf("failed to decode request: %v", err))
|
||||
}
|
||||
defer metrics.MeasureSinceWithLabels([]string{"consul", "fsm", "kvs"}, time.Now(),
|
||||
[]metrics.Label{{Name: "op", Value: string(req.Op)}})
|
||||
defer metrics.MeasureSinceWithLabels([]string{"fsm", "kvs"}, time.Now(),
|
||||
[]metrics.Label{{Name: "op", Value: string(req.Op)}})
|
||||
switch req.Op {
|
||||
|
@ -120,8 +116,6 @@ func (c *FSM) applySessionOperation(buf []byte, index uint64) interface{} {
|
|||
if err := structs.Decode(buf, &req); err != nil {
|
||||
panic(fmt.Errorf("failed to decode request: %v", err))
|
||||
}
|
||||
defer metrics.MeasureSinceWithLabels([]string{"consul", "fsm", "session"}, time.Now(),
|
||||
[]metrics.Label{{Name: "op", Value: string(req.Op)}})
|
||||
defer metrics.MeasureSinceWithLabels([]string{"fsm", "session"}, time.Now(),
|
||||
[]metrics.Label{{Name: "op", Value: string(req.Op)}})
|
||||
switch req.Op {
|
||||
|
@ -143,8 +137,6 @@ func (c *FSM) applyACLOperation(buf []byte, index uint64) interface{} {
|
|||
if err := structs.Decode(buf, &req); err != nil {
|
||||
panic(fmt.Errorf("failed to decode request: %v", err))
|
||||
}
|
||||
defer metrics.MeasureSinceWithLabels([]string{"consul", "fsm", "acl"}, time.Now(),
|
||||
[]metrics.Label{{Name: "op", Value: string(req.Op)}})
|
||||
defer metrics.MeasureSinceWithLabels([]string{"fsm", "acl"}, time.Now(),
|
||||
[]metrics.Label{{Name: "op", Value: string(req.Op)}})
|
||||
switch req.Op {
|
||||
|
@ -177,8 +169,6 @@ func (c *FSM) applyTombstoneOperation(buf []byte, index uint64) interface{} {
|
|||
if err := structs.Decode(buf, &req); err != nil {
|
||||
panic(fmt.Errorf("failed to decode request: %v", err))
|
||||
}
|
||||
defer metrics.MeasureSinceWithLabels([]string{"consul", "fsm", "tombstone"}, time.Now(),
|
||||
[]metrics.Label{{Name: "op", Value: string(req.Op)}})
|
||||
defer metrics.MeasureSinceWithLabels([]string{"fsm", "tombstone"}, time.Now(),
|
||||
[]metrics.Label{{Name: "op", Value: string(req.Op)}})
|
||||
switch req.Op {
|
||||
|
@ -199,7 +189,6 @@ func (c *FSM) applyCoordinateBatchUpdate(buf []byte, index uint64) interface{} {
|
|||
if err := structs.Decode(buf, &updates); err != nil {
|
||||
panic(fmt.Errorf("failed to decode batch updates: %v", err))
|
||||
}
|
||||
defer metrics.MeasureSince([]string{"consul", "fsm", "coordinate", "batch-update"}, time.Now())
|
||||
defer metrics.MeasureSince([]string{"fsm", "coordinate", "batch-update"}, time.Now())
|
||||
if err := c.state.CoordinateBatchUpdate(index, updates); err != nil {
|
||||
return err
|
||||
|
@ -215,8 +204,6 @@ func (c *FSM) applyPreparedQueryOperation(buf []byte, index uint64) interface{}
|
|||
panic(fmt.Errorf("failed to decode request: %v", err))
|
||||
}
|
||||
|
||||
defer metrics.MeasureSinceWithLabels([]string{"consul", "fsm", "prepared-query"}, time.Now(),
|
||||
[]metrics.Label{{Name: "op", Value: string(req.Op)}})
|
||||
defer metrics.MeasureSinceWithLabels([]string{"fsm", "prepared-query"}, time.Now(),
|
||||
[]metrics.Label{{Name: "op", Value: string(req.Op)}})
|
||||
switch req.Op {
|
||||
|
@ -235,7 +222,6 @@ func (c *FSM) applyTxn(buf []byte, index uint64) interface{} {
|
|||
if err := structs.Decode(buf, &req); err != nil {
|
||||
panic(fmt.Errorf("failed to decode request: %v", err))
|
||||
}
|
||||
defer metrics.MeasureSince([]string{"consul", "fsm", "txn"}, time.Now())
|
||||
defer metrics.MeasureSince([]string{"fsm", "txn"}, time.Now())
|
||||
results, errors := c.state.TxnRW(index, req.Ops)
|
||||
return structs.TxnResponse{
|
||||
|
@ -249,7 +235,6 @@ func (c *FSM) applyAutopilotUpdate(buf []byte, index uint64) interface{} {
|
|||
if err := structs.Decode(buf, &req); err != nil {
|
||||
panic(fmt.Errorf("failed to decode request: %v", err))
|
||||
}
|
||||
defer metrics.MeasureSince([]string{"consul", "fsm", "autopilot"}, time.Now())
|
||||
defer metrics.MeasureSince([]string{"fsm", "autopilot"}, time.Now())
|
||||
|
||||
if req.CAS {
|
||||
|
|
|
@ -57,7 +57,6 @@ func registerRestorer(msg structs.MessageType, fn restorer) {
|
|||
|
||||
// Persist saves the FSM snapshot out to the given sink.
|
||||
func (s *snapshot) Persist(sink raft.SnapshotSink) error {
|
||||
defer metrics.MeasureSince([]string{"consul", "fsm", "persist"}, time.Now())
|
||||
defer metrics.MeasureSince([]string{"fsm", "persist"}, time.Now())
|
||||
|
||||
// Write the header
|
||||
|
|
|
@ -139,19 +139,13 @@ func (h *Health) ServiceNodes(args *structs.ServiceSpecificRequest, reply *struc
|
|||
|
||||
// Provide some metrics
|
||||
if err == nil {
|
||||
metrics.IncrCounterWithLabels([]string{"consul", "health", "service", "query"}, 1,
|
||||
[]metrics.Label{{Name: "service", Value: args.ServiceName}})
|
||||
metrics.IncrCounterWithLabels([]string{"health", "service", "query"}, 1,
|
||||
[]metrics.Label{{Name: "service", Value: args.ServiceName}})
|
||||
if args.ServiceTag != "" {
|
||||
metrics.IncrCounterWithLabels([]string{"consul", "health", "service", "query-tag"}, 1,
|
||||
[]metrics.Label{{Name: "service", Value: args.ServiceName}, {Name: "tag", Value: args.ServiceTag}})
|
||||
metrics.IncrCounterWithLabels([]string{"health", "service", "query-tag"}, 1,
|
||||
[]metrics.Label{{Name: "service", Value: args.ServiceName}, {Name: "tag", Value: args.ServiceTag}})
|
||||
}
|
||||
if len(reply.Nodes) == 0 {
|
||||
metrics.IncrCounterWithLabels([]string{"consul", "health", "service", "not-found"}, 1,
|
||||
[]metrics.Label{{Name: "service", Value: args.ServiceName}})
|
||||
metrics.IncrCounterWithLabels([]string{"health", "service", "not-found"}, 1,
|
||||
[]metrics.Label{{Name: "service", Value: args.ServiceName}})
|
||||
}
|
||||
|
|
|
@ -81,7 +81,6 @@ func (k *KVS) Apply(args *structs.KVSRequest, reply *bool) error {
|
|||
if done, err := k.srv.forward("KVS.Apply", args, args, reply); done {
|
||||
return err
|
||||
}
|
||||
defer metrics.MeasureSince([]string{"consul", "kvs", "apply"}, time.Now())
|
||||
defer metrics.MeasureSince([]string{"kvs", "apply"}, time.Now())
|
||||
|
||||
// Perform the pre-apply checks.
|
||||
|
|
|
@ -116,7 +116,6 @@ RECONCILE:
|
|||
s.logger.Printf("[ERR] consul: failed to wait for barrier: %v", err)
|
||||
goto WAIT
|
||||
}
|
||||
metrics.MeasureSince([]string{"consul", "leader", "barrier"}, start)
|
||||
metrics.MeasureSince([]string{"leader", "barrier"}, start)
|
||||
|
||||
// Check if we need to handle initial leadership actions
|
||||
|
@ -183,7 +182,6 @@ WAIT:
|
|||
// previously inflight transactions have been committed and that our
|
||||
// state is up-to-date.
|
||||
func (s *Server) establishLeadership() error {
|
||||
defer metrics.MeasureSince([]string{"consul", "leader", "establish_leadership"}, time.Now())
|
||||
// This will create the anonymous token and master token (if that is
|
||||
// configured).
|
||||
if err := s.initializeACL(); err != nil {
|
||||
|
@ -219,7 +217,6 @@ func (s *Server) establishLeadership() error {
|
|||
// revokeLeadership is invoked once we step down as leader.
|
||||
// This is used to cleanup any state that may be specific to a leader.
|
||||
func (s *Server) revokeLeadership() error {
|
||||
defer metrics.MeasureSince([]string{"consul", "leader", "revoke_leadership"}, time.Now())
|
||||
// Disable the tombstone GC, since it is only useful as a leader
|
||||
s.tombstoneGC.SetEnabled(false)
|
||||
|
||||
|
@ -444,7 +441,6 @@ func (s *Server) reconcileMember(member serf.Member) error {
|
|||
s.logger.Printf("[WARN] consul: skipping reconcile of node %v", member)
|
||||
return nil
|
||||
}
|
||||
defer metrics.MeasureSince([]string{"consul", "leader", "reconcileMember"}, time.Now())
|
||||
defer metrics.MeasureSince([]string{"leader", "reconcileMember"}, time.Now())
|
||||
var err error
|
||||
switch member.Status {
|
||||
|
@ -805,7 +801,6 @@ func (s *Server) removeConsulServer(m serf.Member, port int) error {
|
|||
// through Raft to ensure consistency. We do this outside the leader loop
|
||||
// to avoid blocking.
|
||||
func (s *Server) reapTombstones(index uint64) {
|
||||
defer metrics.MeasureSince([]string{"consul", "leader", "reapTombstones"}, time.Now())
|
||||
defer metrics.MeasureSince([]string{"leader", "reapTombstones"}, time.Now())
|
||||
req := structs.TombstoneRequest{
|
||||
Datacenter: s.config.Datacenter,
|
||||
|
|
|
@ -32,7 +32,6 @@ func (p *PreparedQuery) Apply(args *structs.PreparedQueryRequest, reply *string)
|
|||
if done, err := p.srv.forward("PreparedQuery.Apply", args, args, reply); done {
|
||||
return err
|
||||
}
|
||||
defer metrics.MeasureSince([]string{"consul", "prepared-query", "apply"}, time.Now())
|
||||
defer metrics.MeasureSince([]string{"prepared-query", "apply"}, time.Now())
|
||||
|
||||
// Validate the ID. We must create new IDs before applying to the Raft
|
||||
|
@ -287,7 +286,6 @@ func (p *PreparedQuery) Explain(args *structs.PreparedQueryExecuteRequest,
|
|||
if done, err := p.srv.forward("PreparedQuery.Explain", args, args, reply); done {
|
||||
return err
|
||||
}
|
||||
defer metrics.MeasureSince([]string{"consul", "prepared-query", "explain"}, time.Now())
|
||||
defer metrics.MeasureSince([]string{"prepared-query", "explain"}, time.Now())
|
||||
|
||||
// We have to do this ourselves since we are not doing a blocking RPC.
|
||||
|
@ -335,7 +333,6 @@ func (p *PreparedQuery) Execute(args *structs.PreparedQueryExecuteRequest,
|
|||
if done, err := p.srv.forward("PreparedQuery.Execute", args, args, reply); done {
|
||||
return err
|
||||
}
|
||||
defer metrics.MeasureSince([]string{"consul", "prepared-query", "execute"}, time.Now())
|
||||
defer metrics.MeasureSince([]string{"prepared-query", "execute"}, time.Now())
|
||||
|
||||
// We have to do this ourselves since we are not doing a blocking RPC.
|
||||
|
@ -471,7 +468,6 @@ func (p *PreparedQuery) ExecuteRemote(args *structs.PreparedQueryExecuteRemoteRe
|
|||
if done, err := p.srv.forward("PreparedQuery.ExecuteRemote", args, args, reply); done {
|
||||
return err
|
||||
}
|
||||
defer metrics.MeasureSince([]string{"consul", "prepared-query", "execute_remote"}, time.Now())
|
||||
defer metrics.MeasureSince([]string{"prepared-query", "execute_remote"}, time.Now())
|
||||
|
||||
// We have to do this ourselves since we are not doing a blocking RPC.
|
||||
|
|
|
@ -59,7 +59,6 @@ func (s *Server) listen(listener net.Listener) {
|
|||
}
|
||||
|
||||
go s.handleConn(conn, false)
|
||||
metrics.IncrCounter([]string{"consul", "rpc", "accept_conn"}, 1)
|
||||
metrics.IncrCounter([]string{"rpc", "accept_conn"}, 1)
|
||||
}
|
||||
}
|
||||
|
@ -97,7 +96,6 @@ func (s *Server) handleConn(conn net.Conn, isTLS bool) {
|
|||
s.handleConsulConn(conn)
|
||||
|
||||
case pool.RPCRaft:
|
||||
metrics.IncrCounter([]string{"consul", "rpc", "raft_handoff"}, 1)
|
||||
metrics.IncrCounter([]string{"rpc", "raft_handoff"}, 1)
|
||||
s.raftLayer.Handoff(conn)
|
||||
|
||||
|
@ -156,12 +154,10 @@ func (s *Server) handleConsulConn(conn net.Conn) {
|
|||
if err := s.rpcServer.ServeRequest(rpcCodec); err != nil {
|
||||
if err != io.EOF && !strings.Contains(err.Error(), "closed") {
|
||||
s.logger.Printf("[ERR] consul.rpc: RPC error: %v %s", err, logConn(conn))
|
||||
metrics.IncrCounter([]string{"consul", "rpc", "request_error"}, 1)
|
||||
metrics.IncrCounter([]string{"rpc", "request_error"}, 1)
|
||||
}
|
||||
return
|
||||
}
|
||||
metrics.IncrCounter([]string{"consul", "rpc", "request"}, 1)
|
||||
metrics.IncrCounter([]string{"rpc", "request"}, 1)
|
||||
}
|
||||
}
|
||||
|
@ -288,8 +284,6 @@ func (s *Server) forwardDC(method, dc string, args interface{}, reply interface{
|
|||
return structs.ErrNoDCPath
|
||||
}
|
||||
|
||||
metrics.IncrCounterWithLabels([]string{"consul", "rpc", "cross-dc"}, 1,
|
||||
[]metrics.Label{{Name: "datacenter", Value: dc}})
|
||||
metrics.IncrCounterWithLabels([]string{"rpc", "cross-dc"}, 1,
|
||||
[]metrics.Label{{Name: "datacenter", Value: dc}})
|
||||
if err := s.connPool.RPC(dc, server.Addr, server.Version, method, server.UseTLS, args, reply); err != nil {
|
||||
|
@ -401,7 +395,6 @@ RUN_QUERY:
|
|||
}
|
||||
|
||||
// Run the query.
|
||||
metrics.IncrCounter([]string{"consul", "rpc", "query"}, 1)
|
||||
metrics.IncrCounter([]string{"rpc", "query"}, 1)
|
||||
|
||||
// Operate on a consistent set of state. This makes sure that the
|
||||
|
@ -452,7 +445,6 @@ func (s *Server) setQueryMeta(m *structs.QueryMeta) {
|
|||
// consistentRead is used to ensure we do not perform a stale
|
||||
// read. This is done by verifying leadership before the read.
|
||||
func (s *Server) consistentRead() error {
|
||||
defer metrics.MeasureSince([]string{"consul", "rpc", "consistentRead"}, time.Now())
|
||||
defer metrics.MeasureSince([]string{"rpc", "consistentRead"}, time.Now())
|
||||
future := s.raft.VerifyLeader()
|
||||
if err := future.Error(); err != nil {
|
||||
|
|
|
@ -59,7 +59,6 @@ func (s *Server) floodSegments(config *Config) {
|
|||
// all live nodes are registered, all failed nodes are marked as such, and all
|
||||
// left nodes are de-registered.
|
||||
func (s *Server) reconcile() (err error) {
|
||||
defer metrics.MeasureSince([]string{"consul", "leader", "reconcile"}, time.Now())
|
||||
defer metrics.MeasureSince([]string{"leader", "reconcile"}, time.Now())
|
||||
members := s.serfLAN.Members()
|
||||
knownMembers := make(map[string]struct{})
|
||||
|
|
|
@ -23,7 +23,6 @@ func (s *Session) Apply(args *structs.SessionRequest, reply *string) error {
|
|||
if done, err := s.srv.forward("Session.Apply", args, args, reply); done {
|
||||
return err
|
||||
}
|
||||
defer metrics.MeasureSince([]string{"consul", "session", "apply"}, time.Now())
|
||||
defer metrics.MeasureSince([]string{"session", "apply"}, time.Now())
|
||||
|
||||
// Verify the args
|
||||
|
@ -222,7 +221,6 @@ func (s *Session) Renew(args *structs.SessionSpecificRequest,
|
|||
if done, err := s.srv.forward("Session.Renew", args, args, reply); done {
|
||||
return err
|
||||
}
|
||||
defer metrics.MeasureSince([]string{"consul", "session", "renew"}, time.Now())
|
||||
defer metrics.MeasureSince([]string{"session", "renew"}, time.Now())
|
||||
|
||||
// Get the session, from local state.
|
||||
|
|
|
@ -84,7 +84,6 @@ func (s *Server) createSessionTimer(id string, ttl time.Duration) {
|
|||
// invalidateSession is invoked when a session TTL is reached and we
|
||||
// need to invalidate the session.
|
||||
func (s *Server) invalidateSession(id string) {
|
||||
defer metrics.MeasureSince([]string{"consul", "session_ttl", "invalidate"}, time.Now())
|
||||
defer metrics.MeasureSince([]string{"session_ttl", "invalidate"}, time.Now())
|
||||
|
||||
// Clear the session timer
|
||||
|
@ -134,7 +133,6 @@ func (s *Server) sessionStats() {
|
|||
for {
|
||||
select {
|
||||
case <-time.After(5 * time.Second):
|
||||
metrics.SetGauge([]string{"consul", "session_ttl", "active"}, float32(s.sessionTimers.Len()))
|
||||
metrics.SetGauge([]string{"session_ttl", "active"}, float32(s.sessionTimers.Len()))
|
||||
|
||||
case <-s.shutdownCh:
|
||||
|
|
|
@ -46,7 +46,6 @@ func (t *Txn) Apply(args *structs.TxnRequest, reply *structs.TxnResponse) error
|
|||
if done, err := t.srv.forward("Txn.Apply", args, args, reply); done {
|
||||
return err
|
||||
}
|
||||
defer metrics.MeasureSince([]string{"consul", "txn", "apply"}, time.Now())
|
||||
defer metrics.MeasureSince([]string{"txn", "apply"}, time.Now())
|
||||
|
||||
// Run the pre-checks before we send the transaction into Raft.
|
||||
|
@ -90,7 +89,6 @@ func (t *Txn) Read(args *structs.TxnReadRequest, reply *structs.TxnReadResponse)
|
|||
if done, err := t.srv.forward("Txn.Read", args, args, reply); done {
|
||||
return err
|
||||
}
|
||||
defer metrics.MeasureSince([]string{"consul", "txn", "read"}, time.Now())
|
||||
defer metrics.MeasureSince([]string{"txn", "read"}, time.Now())
|
||||
|
||||
// We have to do this ourselves since we are not doing a blocking RPC.
|
||||
|
|
|
@ -158,8 +158,6 @@ START:
|
|||
func (d *DNSServer) handlePtr(resp dns.ResponseWriter, req *dns.Msg) {
|
||||
q := req.Question[0]
|
||||
defer func(s time.Time) {
|
||||
metrics.MeasureSinceWithLabels([]string{"consul", "dns", "ptr_query"}, s,
|
||||
[]metrics.Label{{Name: "node", Value: d.agent.config.NodeName}})
|
||||
metrics.MeasureSinceWithLabels([]string{"dns", "ptr_query"}, s,
|
||||
[]metrics.Label{{Name: "node", Value: d.agent.config.NodeName}})
|
||||
d.logger.Printf("[DEBUG] dns: request for %v (%v) from client %s (%s)",
|
||||
|
@ -230,8 +228,6 @@ func (d *DNSServer) handlePtr(resp dns.ResponseWriter, req *dns.Msg) {
|
|||
func (d *DNSServer) handleQuery(resp dns.ResponseWriter, req *dns.Msg) {
|
||||
q := req.Question[0]
|
||||
defer func(s time.Time) {
|
||||
metrics.MeasureSinceWithLabels([]string{"consul", "dns", "domain_query"}, s,
|
||||
[]metrics.Label{{Name: "node", Value: d.agent.config.NodeName}})
|
||||
metrics.MeasureSinceWithLabels([]string{"dns", "domain_query"}, s,
|
||||
[]metrics.Label{{Name: "node", Value: d.agent.config.NodeName}})
|
||||
d.logger.Printf("[DEBUG] dns: request for name %v type %v class %v (took %v) from client %s (%s)",
|
||||
|
@ -542,7 +538,6 @@ RPC:
|
|||
d.logger.Printf("[WARN] dns: Query results too stale, re-requesting")
|
||||
goto RPC
|
||||
} else if out.LastContact > staleCounterThreshold {
|
||||
metrics.IncrCounter([]string{"consul", "dns", "stale_queries"}, 1)
|
||||
metrics.IncrCounter([]string{"dns", "stale_queries"}, 1)
|
||||
}
|
||||
}
|
||||
|
@ -891,7 +886,6 @@ func (d *DNSServer) lookupServiceNodes(datacenter, service, tag string) (structs
|
|||
}
|
||||
|
||||
if args.AllowStale && out.LastContact > staleCounterThreshold {
|
||||
metrics.IncrCounter([]string{"consul", "dns", "stale_queries"}, 1)
|
||||
metrics.IncrCounter([]string{"dns", "stale_queries"}, 1)
|
||||
}
|
||||
|
||||
|
@ -1042,7 +1036,6 @@ RPC:
|
|||
d.logger.Printf("[WARN] dns: Query results too stale, re-requesting")
|
||||
goto RPC
|
||||
} else if out.LastContact > staleCounterThreshold {
|
||||
metrics.IncrCounter([]string{"consul", "dns", "stale_queries"}, 1)
|
||||
metrics.IncrCounter([]string{"dns", "stale_queries"}, 1)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -110,7 +110,6 @@ func (s *HTTPServer) handler(enableDebug bool) http.Handler {
|
|||
start := time.Now()
|
||||
handler(resp, req)
|
||||
key := append([]string{"http", req.Method}, parts...)
|
||||
metrics.MeasureSince(append([]string{"consul"}, key...), start)
|
||||
metrics.MeasureSince(key, start)
|
||||
}
|
||||
|
||||
|
|
|
@ -1340,10 +1340,6 @@ Consul will not enable TLS for the HTTP API unless the `https` port has been ass
|
|||
The format is compatible natively with prometheus. When running in this mode, it is recommended to also enable the option
|
||||
<a href="#telemetry-disable_hostname">`disable_hostname`</a> to avoid having prefixed metrics with hostname.
|
||||
|
||||
* <a name="telemetry-enable_deprecated_names"></a><a href="#telemetry-enable_deprecated_names">`enable_deprecated_names`
|
||||
</a>Added in Consul 1.0, this enables old metric names of the format `consul.consul...` to be sent alongside
|
||||
other metrics. Defaults to false.
|
||||
|
||||
* <a name="telemetry-statsd_address"></a><a href="#telemetry-statsd_address">`statsd_address`</a> This provides the
|
||||
address of a statsd instance in the format `host:port`. If provided, Consul will send various telemetry information to that instance for
|
||||
aggregation. This can be used to capture runtime information. This sends UDP packets only and can be used with
|
||||
|
|
Loading…
Reference in New Issue