2013-12-19 20:03:57 +00:00
|
|
|
package structs
|
2013-12-11 22:04:44 +00:00
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
2019-12-10 02:26:41 +00:00
|
|
|
"crypto/md5"
|
2018-12-12 17:14:02 +00:00
|
|
|
"encoding/json"
|
2013-12-11 22:04:44 +00:00
|
|
|
"fmt"
|
2015-11-07 00:59:32 +00:00
|
|
|
"math/rand"
|
2019-08-01 18:26:02 +00:00
|
|
|
"net"
|
2015-10-28 21:32:00 +00:00
|
|
|
"reflect"
|
2017-04-18 12:02:24 +00:00
|
|
|
"regexp"
|
2019-01-07 21:30:47 +00:00
|
|
|
"sort"
|
2018-04-08 13:45:55 +00:00
|
|
|
"strconv"
|
2017-04-18 12:02:24 +00:00
|
|
|
"strings"
|
2014-02-05 18:21:31 +00:00
|
|
|
"time"
|
2014-08-08 22:32:43 +00:00
|
|
|
|
2014-10-18 01:26:19 +00:00
|
|
|
"github.com/hashicorp/go-msgpack/codec"
|
2019-09-26 02:55:52 +00:00
|
|
|
"github.com/hashicorp/go-multierror"
|
2015-03-28 18:52:04 +00:00
|
|
|
"github.com/hashicorp/serf/coordinate"
|
2018-04-08 13:45:55 +00:00
|
|
|
"github.com/mitchellh/hashstructure"
|
2019-05-01 23:39:31 +00:00
|
|
|
|
|
|
|
"github.com/hashicorp/consul/agent/cache"
|
|
|
|
"github.com/hashicorp/consul/api"
|
2019-12-06 16:14:56 +00:00
|
|
|
"github.com/hashicorp/consul/lib"
|
2019-05-01 23:39:31 +00:00
|
|
|
"github.com/hashicorp/consul/types"
|
2013-12-11 22:04:44 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
type MessageType uint8
|
|
|
|
|
2015-08-25 01:10:23 +00:00
|
|
|
// RaftIndex is used to track the index used while creating
|
2015-08-22 19:44:33 +00:00
|
|
|
// or modifying a given struct type.
|
2015-08-25 01:10:23 +00:00
|
|
|
type RaftIndex struct {
|
2019-04-16 16:00:15 +00:00
|
|
|
CreateIndex uint64 `bexpr:"-"`
|
|
|
|
ModifyIndex uint64 `bexpr:"-"`
|
2015-08-22 19:44:33 +00:00
|
|
|
}
|
|
|
|
|
2017-08-03 00:05:18 +00:00
|
|
|
// These are serialized between Consul servers and stored in Consul snapshots,
|
|
|
|
// so entries must only ever be added.
|
2013-12-11 22:04:44 +00:00
|
|
|
const (
|
2019-04-26 17:49:28 +00:00
|
|
|
RegisterRequestType MessageType = 0
|
|
|
|
DeregisterRequestType = 1
|
|
|
|
KVSRequestType = 2
|
|
|
|
SessionRequestType = 3
|
|
|
|
ACLRequestType = 4 // DEPRECATED (ACL-Legacy-Compat)
|
|
|
|
TombstoneRequestType = 5
|
|
|
|
CoordinateBatchUpdateType = 6
|
|
|
|
PreparedQueryRequestType = 7
|
|
|
|
TxnRequestType = 8
|
|
|
|
AutopilotRequestType = 9
|
|
|
|
AreaRequestType = 10
|
|
|
|
ACLBootstrapRequestType = 11
|
|
|
|
IntentionRequestType = 12
|
|
|
|
ConnectCARequestType = 13
|
|
|
|
ConnectCAProviderStateType = 14
|
|
|
|
ConnectCAConfigType = 15 // FSM snapshots only.
|
|
|
|
IndexRequestType = 16 // FSM snapshots only.
|
|
|
|
ACLTokenSetRequestType = 17
|
|
|
|
ACLTokenDeleteRequestType = 18
|
|
|
|
ACLPolicySetRequestType = 19
|
|
|
|
ACLPolicyDeleteRequestType = 20
|
|
|
|
ConnectCALeafRequestType = 21
|
|
|
|
ConfigEntryRequestType = 22
|
|
|
|
ACLRoleSetRequestType = 23
|
|
|
|
ACLRoleDeleteRequestType = 24
|
|
|
|
ACLBindingRuleSetRequestType = 25
|
|
|
|
ACLBindingRuleDeleteRequestType = 26
|
|
|
|
ACLAuthMethodSetRequestType = 27
|
|
|
|
ACLAuthMethodDeleteRequestType = 28
|
2019-07-24 21:06:39 +00:00
|
|
|
ChunkingStateType = 29
|
2020-03-09 20:59:02 +00:00
|
|
|
FederationStateRequestType = 30
|
2013-12-11 22:04:44 +00:00
|
|
|
)
|
|
|
|
|
2015-05-06 02:44:21 +00:00
|
|
|
const (
|
|
|
|
// IgnoreUnknownTypeFlag is set along with a MessageType
|
|
|
|
// to indicate that the message type can be safely ignored
|
|
|
|
// if it is not recognized. This is for future proofing, so
|
|
|
|
// that new commands can be added in a way that won't cause
|
|
|
|
// old servers to crash when the FSM attempts to process them.
|
|
|
|
IgnoreUnknownTypeFlag MessageType = 128
|
|
|
|
|
2016-11-29 21:15:20 +00:00
|
|
|
// NodeMaint is the special key set by a node in maintenance mode.
|
|
|
|
NodeMaint = "_node_maintenance"
|
|
|
|
|
|
|
|
// ServiceMaintPrefix is the prefix for a service in maintenance mode.
|
|
|
|
ServiceMaintPrefix = "_service_maintenance:"
|
2014-01-08 19:21:29 +00:00
|
|
|
|
2017-01-23 23:53:45 +00:00
|
|
|
// The meta key prefix reserved for Consul's internal use
|
|
|
|
metaKeyReservedPrefix = "consul-"
|
|
|
|
|
2017-04-21 03:14:10 +00:00
|
|
|
// metaMaxKeyPairs is maximum number of metadata key pairs allowed to be registered
|
2017-01-23 23:53:45 +00:00
|
|
|
metaMaxKeyPairs = 64
|
|
|
|
|
2017-04-21 03:14:10 +00:00
|
|
|
// metaKeyMaxLength is the maximum allowed length of a metadata key
|
2017-01-23 23:53:45 +00:00
|
|
|
metaKeyMaxLength = 128
|
|
|
|
|
2017-04-21 03:14:10 +00:00
|
|
|
// metaValueMaxLength is the maximum allowed length of a metadata value
|
2017-01-23 23:53:45 +00:00
|
|
|
metaValueMaxLength = 512
|
2015-04-12 00:53:48 +00:00
|
|
|
|
2017-08-14 14:36:07 +00:00
|
|
|
// MetaSegmentKey is the node metadata key used to store the node's network segment
|
|
|
|
MetaSegmentKey = "consul-network-segment"
|
|
|
|
|
2020-03-09 20:59:02 +00:00
|
|
|
// MetaWANFederationKey is the mesh gateway metadata key that indicates a
|
|
|
|
// mesh gateway is usable for wan federation.
|
|
|
|
MetaWANFederationKey = "consul-wan-federation"
|
|
|
|
|
2014-05-19 19:50:29 +00:00
|
|
|
// MaxLockDelay provides a maximum LockDelay value for
|
|
|
|
// a session. Any value above this will not be respected.
|
|
|
|
MaxLockDelay = 60 * time.Second
|
2019-10-29 18:13:36 +00:00
|
|
|
|
|
|
|
// lockDelayMinThreshold is used in JSON decoding to convert a
|
|
|
|
// numeric lockdelay value from nanoseconds to seconds if it is
|
|
|
|
// below thisthreshold. Users often send a value like 5, which
|
|
|
|
// they assumeis seconds, but because Go uses nanosecond granularity,
|
|
|
|
// ends up being very small. If we see a value below this threshold,
|
|
|
|
// we multiply by time.Second
|
|
|
|
lockDelayMinThreshold = 1000
|
2020-01-13 20:51:40 +00:00
|
|
|
|
|
|
|
// WildcardSpecifier is the string which should be used for specifying a wildcard
|
|
|
|
// The exact semantics of the wildcard is left up to the code where its used.
|
|
|
|
WildcardSpecifier = "*"
|
2014-05-19 19:50:29 +00:00
|
|
|
)
|
|
|
|
|
2020-03-09 20:59:02 +00:00
|
|
|
var allowedConsulMetaKeysForMeshGateway = map[string]struct{}{MetaWANFederationKey: struct{}{}}
|
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
var (
|
|
|
|
NodeMaintCheckID = NewCheckID(NodeMaint, nil)
|
|
|
|
)
|
|
|
|
|
2020-01-17 14:54:17 +00:00
|
|
|
const (
|
|
|
|
TaggedAddressWAN = "wan"
|
|
|
|
TaggedAddressWANIPv4 = "wan_ipv4"
|
|
|
|
TaggedAddressWANIPv6 = "wan_ipv6"
|
|
|
|
TaggedAddressLAN = "lan"
|
|
|
|
TaggedAddressLANIPv4 = "lan_ipv4"
|
|
|
|
TaggedAddressLANIPv6 = "lan_ipv6"
|
|
|
|
)
|
|
|
|
|
2017-04-21 03:14:10 +00:00
|
|
|
// metaKeyFormat checks if a metadata key string is valid
|
|
|
|
var metaKeyFormat = regexp.MustCompile(`^[a-zA-Z0-9_-]+$`).MatchString
|
|
|
|
|
|
|
|
func ValidStatus(s string) bool {
|
|
|
|
return s == api.HealthPassing || s == api.HealthWarning || s == api.HealthCritical
|
|
|
|
}
|
|
|
|
|
2014-04-19 00:14:00 +00:00
|
|
|
// RPCInfo is used to describe common information about query
|
|
|
|
type RPCInfo interface {
|
|
|
|
RequestDatacenter() string
|
|
|
|
IsRead() bool
|
|
|
|
AllowStaleRead() bool
|
2018-10-19 16:04:07 +00:00
|
|
|
TokenSecret() string
|
2020-03-10 16:15:22 +00:00
|
|
|
SetTokenSecret(string)
|
2014-04-19 00:14:00 +00:00
|
|
|
}
|
|
|
|
|
2014-04-21 18:31:15 +00:00
|
|
|
// QueryOptions is used to specify various flags for read queries
|
|
|
|
type QueryOptions struct {
|
2014-08-05 22:48:28 +00:00
|
|
|
// Token is the ACL token ID. If not provided, the 'anonymous'
|
|
|
|
// token is assumed for backwards compatibility.
|
|
|
|
Token string
|
|
|
|
|
2014-04-21 18:31:15 +00:00
|
|
|
// If set, wait until query exceeds given index. Must be provided
|
|
|
|
// with MaxQueryTime.
|
2014-02-05 18:21:31 +00:00
|
|
|
MinQueryIndex uint64
|
|
|
|
|
2014-04-21 18:31:15 +00:00
|
|
|
// Provided with MinQueryIndex to wait for change.
|
2014-02-05 18:21:31 +00:00
|
|
|
MaxQueryTime time.Duration
|
|
|
|
|
2014-04-18 23:46:51 +00:00
|
|
|
// If set, any follower can service the request. Results
|
|
|
|
// may be arbitrarily stale.
|
|
|
|
AllowStale bool
|
|
|
|
|
|
|
|
// If set, the leader must verify leadership prior to
|
|
|
|
// servicing the request. Prevents a stale read.
|
|
|
|
RequireConsistent bool
|
2018-03-30 15:14:44 +00:00
|
|
|
|
2018-09-06 10:34:28 +00:00
|
|
|
// If set, the local agent may respond with an arbitrarily stale locally
|
|
|
|
// cached response. The semantics differ from AllowStale since the agent may
|
|
|
|
// be entirely partitioned from the servers and still considered "healthy" by
|
|
|
|
// operators. Stale responses from Servers are also arbitrarily stale, but can
|
|
|
|
// provide additional bounds on the last contact time from the leader. It's
|
|
|
|
// expected that servers that are partitioned are noticed and replaced in a
|
|
|
|
// timely way by operators while the same may not be true for client agents.
|
|
|
|
UseCache bool
|
|
|
|
|
2018-03-30 15:14:44 +00:00
|
|
|
// If set and AllowStale is true, will try first a stale
|
|
|
|
// read, and then will perform a consistent read if stale
|
2018-09-06 10:34:28 +00:00
|
|
|
// read is older than value.
|
2018-03-30 15:14:44 +00:00
|
|
|
MaxStaleDuration time.Duration
|
2018-09-06 10:34:28 +00:00
|
|
|
|
|
|
|
// MaxAge limits how old a cached value will be returned if UseCache is true.
|
|
|
|
// If there is a cached response that is older than the MaxAge, it is treated
|
|
|
|
// as a cache miss and a new fetch invoked. If the fetch fails, the error is
|
|
|
|
// returned. Clients that wish to allow for stale results on error can set
|
2019-03-06 17:13:28 +00:00
|
|
|
// StaleIfError to a longer duration to change this behavior. It is ignored
|
2018-09-06 10:34:28 +00:00
|
|
|
// if the endpoint supports background refresh caching. See
|
|
|
|
// https://www.consul.io/api/index.html#agent-caching for more details.
|
|
|
|
MaxAge time.Duration
|
|
|
|
|
|
|
|
// MustRevalidate forces the agent to fetch a fresh version of a cached
|
|
|
|
// resource or at least validate that the cached version is still fresh. It is
|
|
|
|
// implied by either max-age=0 or must-revalidate Cache-Control headers. It
|
|
|
|
// only makes sense when UseCache is true. We store it since MaxAge = 0 is the
|
|
|
|
// default unset value.
|
|
|
|
MustRevalidate bool
|
|
|
|
|
|
|
|
// StaleIfError specifies how stale the client will accept a cached response
|
|
|
|
// if the servers are unavailable to fetch a fresh one. Only makes sense when
|
|
|
|
// UseCache is true and MaxAge is set to a lower, non-zero value. It is
|
|
|
|
// ignored if the endpoint supports background refresh caching. See
|
|
|
|
// https://www.consul.io/api/index.html#agent-caching for more details.
|
|
|
|
StaleIfError time.Duration
|
2019-04-16 16:00:15 +00:00
|
|
|
|
|
|
|
// Filter specifies the go-bexpr filter expression to be used for
|
|
|
|
// filtering the data prior to returning a response
|
|
|
|
Filter string
|
2014-04-18 23:46:51 +00:00
|
|
|
}
|
|
|
|
|
2017-04-21 03:14:10 +00:00
|
|
|
// IsRead is always true for QueryOption.
|
2014-04-19 00:14:00 +00:00
|
|
|
func (q QueryOptions) IsRead() bool {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2018-03-30 15:14:44 +00:00
|
|
|
// ConsistencyLevel display the consistency required by a request
|
|
|
|
func (q QueryOptions) ConsistencyLevel() string {
|
|
|
|
if q.RequireConsistent {
|
|
|
|
return "consistent"
|
|
|
|
} else if q.AllowStale {
|
|
|
|
return "stale"
|
|
|
|
} else {
|
|
|
|
return "leader"
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-04-19 00:14:00 +00:00
|
|
|
func (q QueryOptions) AllowStaleRead() bool {
|
|
|
|
return q.AllowStale
|
|
|
|
}
|
|
|
|
|
2018-10-19 16:04:07 +00:00
|
|
|
func (q QueryOptions) TokenSecret() string {
|
2014-08-05 22:48:28 +00:00
|
|
|
return q.Token
|
|
|
|
}
|
|
|
|
|
2020-03-10 16:15:22 +00:00
|
|
|
func (q *QueryOptions) SetTokenSecret(s string) {
|
|
|
|
q.Token = s
|
|
|
|
}
|
|
|
|
|
2014-08-05 22:48:28 +00:00
|
|
|
type WriteRequest struct {
|
|
|
|
// Token is the ACL token ID. If not provided, the 'anonymous'
|
|
|
|
// token is assumed for backwards compatibility.
|
|
|
|
Token string
|
|
|
|
}
|
2014-04-19 00:14:00 +00:00
|
|
|
|
|
|
|
// WriteRequest only applies to writes, always false
|
|
|
|
func (w WriteRequest) IsRead() bool {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
func (w WriteRequest) AllowStaleRead() bool {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2018-10-19 16:04:07 +00:00
|
|
|
func (w WriteRequest) TokenSecret() string {
|
2014-08-05 22:48:28 +00:00
|
|
|
return w.Token
|
|
|
|
}
|
|
|
|
|
2020-03-10 16:15:22 +00:00
|
|
|
func (w *WriteRequest) SetTokenSecret(s string) {
|
|
|
|
w.Token = s
|
|
|
|
}
|
|
|
|
|
2014-04-18 23:46:51 +00:00
|
|
|
// QueryMeta allows a query response to include potentially
|
|
|
|
// useful metadata about a query
|
|
|
|
type QueryMeta struct {
|
2014-04-21 18:13:36 +00:00
|
|
|
// This is the index associated with the read
|
|
|
|
Index uint64
|
|
|
|
|
2014-04-18 23:46:51 +00:00
|
|
|
// If AllowStale is used, this is time elapsed since
|
|
|
|
// last contact between the follower and leader. This
|
|
|
|
// can be used to gauge staleness.
|
|
|
|
LastContact time.Duration
|
|
|
|
|
|
|
|
// Used to indicate if there is a known leader node
|
|
|
|
KnownLeader bool
|
2018-03-30 15:14:44 +00:00
|
|
|
|
|
|
|
// Consistencylevel returns the consistency used to serve the query
|
|
|
|
// Having `discovery_max_stale` on the agent can affect whether
|
|
|
|
// the request was served by a leader.
|
|
|
|
ConsistencyLevel string
|
2014-04-18 23:46:51 +00:00
|
|
|
}
|
2019-09-20 18:37:22 +00:00
|
|
|
|
2013-12-11 22:04:44 +00:00
|
|
|
// RegisterRequest is used for the Catalog.Register endpoint
|
|
|
|
// to register a node as providing a service. If no service
|
|
|
|
// is provided, the node is registered.
|
|
|
|
type RegisterRequest struct {
|
2016-02-07 18:37:34 +00:00
|
|
|
Datacenter string
|
2017-01-18 22:26:42 +00:00
|
|
|
ID types.NodeID
|
2016-02-07 18:37:34 +00:00
|
|
|
Node string
|
|
|
|
Address string
|
|
|
|
TaggedAddresses map[string]string
|
2017-01-05 22:10:26 +00:00
|
|
|
NodeMeta map[string]string
|
2016-02-07 18:37:34 +00:00
|
|
|
Service *NodeService
|
|
|
|
Check *HealthCheck
|
|
|
|
Checks HealthChecks
|
2017-03-23 22:01:46 +00:00
|
|
|
|
|
|
|
// SkipNodeUpdate can be used when a register request is intended for
|
|
|
|
// updating a service and/or checks, but doesn't want to overwrite any
|
|
|
|
// node information if the node is already registered. If the node
|
|
|
|
// doesn't exist, it will still be created, but if the node exists, any
|
|
|
|
// node portion of this update will not apply.
|
|
|
|
SkipNodeUpdate bool
|
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
// EnterpriseMeta is the embedded enterprise metadata
|
|
|
|
EnterpriseMeta `hcl:",squash" mapstructure:",squash"`
|
|
|
|
|
2014-04-19 00:14:00 +00:00
|
|
|
WriteRequest
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r *RegisterRequest) RequestDatacenter() string {
|
|
|
|
return r.Datacenter
|
2013-12-11 22:04:44 +00:00
|
|
|
}
|
|
|
|
|
2016-12-09 00:01:01 +00:00
|
|
|
// ChangesNode returns true if the given register request changes the given
|
|
|
|
// node, which can be nil. This only looks for changes to the node record itself,
|
|
|
|
// not any of the health checks.
|
|
|
|
func (r *RegisterRequest) ChangesNode(node *Node) bool {
|
|
|
|
// This means it's creating the node.
|
|
|
|
if node == nil {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2017-03-23 22:01:46 +00:00
|
|
|
// If we've been asked to skip the node update, then say there are no
|
|
|
|
// changes.
|
|
|
|
if r.SkipNodeUpdate {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2016-12-09 00:01:01 +00:00
|
|
|
// Check if any of the node-level fields are being changed.
|
2017-01-18 22:26:42 +00:00
|
|
|
if r.ID != node.ID ||
|
|
|
|
r.Node != node.Node ||
|
2016-12-09 00:01:01 +00:00
|
|
|
r.Address != node.Address ||
|
2017-04-18 12:02:24 +00:00
|
|
|
r.Datacenter != node.Datacenter ||
|
2017-01-05 22:10:26 +00:00
|
|
|
!reflect.DeepEqual(r.TaggedAddresses, node.TaggedAddresses) ||
|
|
|
|
!reflect.DeepEqual(r.NodeMeta, node.Meta) {
|
2016-12-09 00:01:01 +00:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2013-12-11 22:04:44 +00:00
|
|
|
// DeregisterRequest is used for the Catalog.Deregister endpoint
|
|
|
|
// to deregister a node as providing a service. If no service is
|
|
|
|
// provided the entire node is deregistered.
|
|
|
|
type DeregisterRequest struct {
|
2019-12-10 02:26:41 +00:00
|
|
|
Datacenter string
|
|
|
|
Node string
|
|
|
|
ServiceID string
|
|
|
|
CheckID types.CheckID
|
|
|
|
EnterpriseMeta `hcl:",squash" mapstructure:",squash"`
|
2014-04-19 00:14:00 +00:00
|
|
|
WriteRequest
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r *DeregisterRequest) RequestDatacenter() string {
|
|
|
|
return r.Datacenter
|
2013-12-11 22:04:44 +00:00
|
|
|
}
|
|
|
|
|
2019-12-06 20:47:41 +00:00
|
|
|
func (r *DeregisterRequest) UnmarshalJSON(data []byte) error {
|
|
|
|
type Alias DeregisterRequest
|
|
|
|
aux := &struct {
|
|
|
|
Address string // obsolete field - but we want to explicitly allow it
|
|
|
|
*Alias
|
|
|
|
}{
|
|
|
|
Alias: (*Alias)(r),
|
|
|
|
}
|
2019-12-09 01:30:46 +00:00
|
|
|
|
2019-12-06 20:47:41 +00:00
|
|
|
if err := lib.UnmarshalJSON(data, &aux); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-06-30 21:25:40 +00:00
|
|
|
// QuerySource is used to pass along information about the source node
|
|
|
|
// in queries so that we can adjust the response based on its network
|
|
|
|
// coordinates.
|
|
|
|
type QuerySource struct {
|
|
|
|
Datacenter string
|
2017-08-30 00:02:50 +00:00
|
|
|
Segment string
|
2015-06-30 21:25:40 +00:00
|
|
|
Node string
|
2018-04-10 18:50:50 +00:00
|
|
|
Ip string
|
2015-06-30 21:25:40 +00:00
|
|
|
}
|
|
|
|
|
2019-06-24 18:11:34 +00:00
|
|
|
type DatacentersRequest struct {
|
|
|
|
QueryOptions
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r *DatacentersRequest) CacheInfo() cache.RequestInfo {
|
|
|
|
return cache.RequestInfo{
|
|
|
|
Token: "",
|
|
|
|
Datacenter: "",
|
|
|
|
MinIndex: 0,
|
|
|
|
Timeout: r.MaxQueryTime,
|
|
|
|
MaxAge: r.MaxAge,
|
|
|
|
MustRevalidate: r.MustRevalidate,
|
|
|
|
Key: "catalog-datacenters", // must not be empty for cache to work
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-02-05 18:21:31 +00:00
|
|
|
// DCSpecificRequest is used to query about a specific DC
|
|
|
|
type DCSpecificRequest struct {
|
2017-01-11 19:41:12 +00:00
|
|
|
Datacenter string
|
|
|
|
NodeMetaFilters map[string]string
|
|
|
|
Source QuerySource
|
2019-12-10 02:26:41 +00:00
|
|
|
EnterpriseMeta `hcl:",squash" mapstructure:",squash"`
|
2014-04-18 23:46:51 +00:00
|
|
|
QueryOptions
|
2014-02-05 18:21:31 +00:00
|
|
|
}
|
|
|
|
|
2014-04-19 00:14:00 +00:00
|
|
|
func (r *DCSpecificRequest) RequestDatacenter() string {
|
|
|
|
return r.Datacenter
|
|
|
|
}
|
|
|
|
|
2018-04-08 14:08:34 +00:00
|
|
|
func (r *DCSpecificRequest) CacheInfo() cache.RequestInfo {
|
|
|
|
info := cache.RequestInfo{
|
2018-09-06 10:34:28 +00:00
|
|
|
Token: r.Token,
|
|
|
|
Datacenter: r.Datacenter,
|
|
|
|
MinIndex: r.MinQueryIndex,
|
|
|
|
Timeout: r.MaxQueryTime,
|
|
|
|
MaxAge: r.MaxAge,
|
|
|
|
MustRevalidate: r.MustRevalidate,
|
2018-04-08 14:08:34 +00:00
|
|
|
}
|
|
|
|
|
2019-04-16 16:00:15 +00:00
|
|
|
// To calculate the cache key we only hash the node meta filters and the bexpr filter.
|
|
|
|
// The datacenter is handled by the cache framework. The other fields are
|
2018-04-08 13:45:55 +00:00
|
|
|
// not, but should not be used in any cache types.
|
2019-04-16 16:00:15 +00:00
|
|
|
v, err := hashstructure.Hash([]interface{}{
|
|
|
|
r.NodeMetaFilters,
|
|
|
|
r.Filter,
|
2019-12-10 02:26:41 +00:00
|
|
|
r.EnterpriseMeta,
|
2019-04-16 16:00:15 +00:00
|
|
|
}, nil)
|
2018-04-08 14:08:34 +00:00
|
|
|
if err == nil {
|
|
|
|
// If there is an error, we don't set the key. A blank key forces
|
|
|
|
// no cache for this request so the request is forwarded directly
|
|
|
|
// to the server.
|
|
|
|
info.Key = strconv.FormatUint(v, 10)
|
2018-04-08 13:45:55 +00:00
|
|
|
}
|
|
|
|
|
2018-04-08 14:08:34 +00:00
|
|
|
return info
|
2018-04-08 13:45:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (r *DCSpecificRequest) CacheMinIndex() uint64 {
|
|
|
|
return r.QueryOptions.MinQueryIndex
|
|
|
|
}
|
|
|
|
|
2019-06-20 19:04:39 +00:00
|
|
|
type ServiceDumpRequest struct {
|
|
|
|
Datacenter string
|
|
|
|
ServiceKind ServiceKind
|
|
|
|
UseServiceKind bool
|
|
|
|
Source QuerySource
|
2019-12-10 02:26:41 +00:00
|
|
|
EnterpriseMeta `hcl:",squash" mapstructure:",squash"`
|
2019-06-20 19:04:39 +00:00
|
|
|
QueryOptions
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r *ServiceDumpRequest) RequestDatacenter() string {
|
|
|
|
return r.Datacenter
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r *ServiceDumpRequest) CacheInfo() cache.RequestInfo {
|
|
|
|
info := cache.RequestInfo{
|
|
|
|
Token: r.Token,
|
|
|
|
Datacenter: r.Datacenter,
|
|
|
|
MinIndex: r.MinQueryIndex,
|
|
|
|
Timeout: r.MaxQueryTime,
|
|
|
|
MaxAge: r.MaxAge,
|
|
|
|
MustRevalidate: r.MustRevalidate,
|
|
|
|
}
|
|
|
|
|
|
|
|
// When we are not using the service kind we want to normalize the ServiceKind
|
|
|
|
keyKind := ServiceKindTypical
|
|
|
|
if r.UseServiceKind {
|
|
|
|
keyKind = r.ServiceKind
|
|
|
|
}
|
|
|
|
// To calculate the cache key we only hash the node meta filters and the bexpr filter.
|
|
|
|
// The datacenter is handled by the cache framework. The other fields are
|
|
|
|
// not, but should not be used in any cache types.
|
|
|
|
v, err := hashstructure.Hash([]interface{}{
|
|
|
|
keyKind,
|
|
|
|
r.UseServiceKind,
|
|
|
|
r.Filter,
|
2019-12-10 02:26:41 +00:00
|
|
|
r.EnterpriseMeta,
|
2019-06-20 19:04:39 +00:00
|
|
|
}, nil)
|
|
|
|
if err == nil {
|
|
|
|
// If there is an error, we don't set the key. A blank key forces
|
|
|
|
// no cache for this request so the request is forwarded directly
|
|
|
|
// to the server.
|
|
|
|
info.Key = strconv.FormatUint(v, 10)
|
|
|
|
}
|
|
|
|
|
|
|
|
return info
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r *ServiceDumpRequest) CacheMinIndex() uint64 {
|
|
|
|
return r.QueryOptions.MinQueryIndex
|
|
|
|
}
|
|
|
|
|
2015-07-27 21:41:46 +00:00
|
|
|
// ServiceSpecificRequest is used to query about a specific service
|
2014-01-08 22:43:36 +00:00
|
|
|
type ServiceSpecificRequest struct {
|
2017-01-14 01:08:43 +00:00
|
|
|
Datacenter string
|
|
|
|
NodeMetaFilters map[string]string
|
|
|
|
ServiceName string
|
2019-01-07 21:30:47 +00:00
|
|
|
// DEPRECATED (singular-service-tag) - remove this when backwards RPC compat
|
|
|
|
// with 1.2.x is not required.
|
|
|
|
ServiceTag string
|
|
|
|
ServiceTags []string
|
|
|
|
ServiceAddress string
|
|
|
|
TagFilter bool // Controls tag filtering
|
|
|
|
Source QuerySource
|
2018-03-09 16:34:55 +00:00
|
|
|
|
|
|
|
// Connect if true will only search for Connect-compatible services.
|
|
|
|
Connect bool
|
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
EnterpriseMeta `hcl:",squash" mapstructure:",squash"`
|
2014-04-18 23:46:51 +00:00
|
|
|
QueryOptions
|
2014-01-08 22:43:36 +00:00
|
|
|
}
|
|
|
|
|
2014-04-19 00:14:00 +00:00
|
|
|
func (r *ServiceSpecificRequest) RequestDatacenter() string {
|
|
|
|
return r.Datacenter
|
|
|
|
}
|
|
|
|
|
2018-09-06 10:34:28 +00:00
|
|
|
func (r *ServiceSpecificRequest) CacheInfo() cache.RequestInfo {
|
|
|
|
info := cache.RequestInfo{
|
|
|
|
Token: r.Token,
|
|
|
|
Datacenter: r.Datacenter,
|
|
|
|
MinIndex: r.MinQueryIndex,
|
|
|
|
Timeout: r.MaxQueryTime,
|
|
|
|
MaxAge: r.MaxAge,
|
|
|
|
MustRevalidate: r.MustRevalidate,
|
|
|
|
}
|
|
|
|
|
|
|
|
// To calculate the cache key we hash over all the fields that affect the
|
|
|
|
// output other than Datacenter and Token which are dealt with in the cache
|
|
|
|
// framework already. Note the order here is important for the outcome - if we
|
|
|
|
// ever care about cache-invalidation on updates e.g. because we persist
|
|
|
|
// cached results, we need to be careful we maintain the same order of fields
|
|
|
|
// here. We could alternatively use `hash:set` struct tag on an anonymous
|
|
|
|
// struct to make it more robust if it becomes significant.
|
2019-01-07 21:30:47 +00:00
|
|
|
sort.Strings(r.ServiceTags)
|
2018-09-06 10:34:28 +00:00
|
|
|
v, err := hashstructure.Hash([]interface{}{
|
|
|
|
r.NodeMetaFilters,
|
|
|
|
r.ServiceName,
|
2019-01-07 21:30:47 +00:00
|
|
|
// DEPRECATED (singular-service-tag) - remove this when upgrade RPC compat
|
|
|
|
// with 1.2.x is not required. We still need this in because <1.3 agents
|
|
|
|
// might still send RPCs with singular tag set. In fact the only place we
|
|
|
|
// use this method is in agent cache so if the agent is new enough to have
|
|
|
|
// this code this should never be set, but it's safer to include it until we
|
|
|
|
// completely remove this field just in case it's erroneously used anywhere
|
|
|
|
// (e.g. until this change DNS still used it).
|
2018-09-06 10:34:28 +00:00
|
|
|
r.ServiceTag,
|
2019-01-07 21:30:47 +00:00
|
|
|
r.ServiceTags,
|
2018-09-06 10:34:28 +00:00
|
|
|
r.ServiceAddress,
|
|
|
|
r.TagFilter,
|
|
|
|
r.Connect,
|
2019-04-16 16:00:15 +00:00
|
|
|
r.Filter,
|
2019-12-10 02:26:41 +00:00
|
|
|
r.EnterpriseMeta,
|
2018-09-06 10:34:28 +00:00
|
|
|
}, nil)
|
|
|
|
if err == nil {
|
|
|
|
// If there is an error, we don't set the key. A blank key forces
|
|
|
|
// no cache for this request so the request is forwarded directly
|
|
|
|
// to the server.
|
|
|
|
info.Key = strconv.FormatUint(v, 10)
|
|
|
|
}
|
|
|
|
|
|
|
|
return info
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r *ServiceSpecificRequest) CacheMinIndex() uint64 {
|
|
|
|
return r.QueryOptions.MinQueryIndex
|
|
|
|
}
|
|
|
|
|
2014-01-08 22:43:36 +00:00
|
|
|
// NodeSpecificRequest is used to request the information about a single node
|
|
|
|
type NodeSpecificRequest struct {
|
2019-12-10 02:26:41 +00:00
|
|
|
Datacenter string
|
|
|
|
Node string
|
|
|
|
EnterpriseMeta `hcl:",squash" mapstructure:",squash"`
|
2014-04-18 23:46:51 +00:00
|
|
|
QueryOptions
|
2014-01-08 22:43:36 +00:00
|
|
|
}
|
|
|
|
|
2014-04-19 00:14:00 +00:00
|
|
|
func (r *NodeSpecificRequest) RequestDatacenter() string {
|
|
|
|
return r.Datacenter
|
|
|
|
}
|
|
|
|
|
2019-02-25 19:06:01 +00:00
|
|
|
func (r *NodeSpecificRequest) CacheInfo() cache.RequestInfo {
|
|
|
|
info := cache.RequestInfo{
|
|
|
|
Token: r.Token,
|
|
|
|
Datacenter: r.Datacenter,
|
|
|
|
MinIndex: r.MinQueryIndex,
|
|
|
|
Timeout: r.MaxQueryTime,
|
|
|
|
MaxAge: r.MaxAge,
|
|
|
|
MustRevalidate: r.MustRevalidate,
|
|
|
|
}
|
|
|
|
|
|
|
|
v, err := hashstructure.Hash([]interface{}{
|
|
|
|
r.Node,
|
2019-04-16 16:00:15 +00:00
|
|
|
r.Filter,
|
2019-12-10 02:26:41 +00:00
|
|
|
r.EnterpriseMeta,
|
2019-02-25 19:06:01 +00:00
|
|
|
}, nil)
|
|
|
|
if err == nil {
|
|
|
|
// If there is an error, we don't set the key. A blank key forces
|
|
|
|
// no cache for this request so the request is forwarded directly
|
|
|
|
// to the server.
|
|
|
|
info.Key = strconv.FormatUint(v, 10)
|
|
|
|
}
|
|
|
|
|
|
|
|
return info
|
|
|
|
}
|
|
|
|
|
2014-01-08 22:43:36 +00:00
|
|
|
// ChecksInStateRequest is used to query for nodes in a state
|
|
|
|
type ChecksInStateRequest struct {
|
2017-01-14 01:08:43 +00:00
|
|
|
Datacenter string
|
|
|
|
NodeMetaFilters map[string]string
|
|
|
|
State string
|
|
|
|
Source QuerySource
|
2019-12-10 02:26:41 +00:00
|
|
|
|
|
|
|
EnterpriseMeta `mapstructure:",squash"`
|
2014-04-18 23:46:51 +00:00
|
|
|
QueryOptions
|
2014-01-08 22:43:36 +00:00
|
|
|
}
|
|
|
|
|
2014-04-19 00:14:00 +00:00
|
|
|
func (r *ChecksInStateRequest) RequestDatacenter() string {
|
|
|
|
return r.Datacenter
|
|
|
|
}
|
|
|
|
|
2013-12-12 18:48:36 +00:00
|
|
|
// Used to return information about a node
|
|
|
|
type Node struct {
|
2017-01-18 22:26:42 +00:00
|
|
|
ID types.NodeID
|
2016-02-07 18:37:34 +00:00
|
|
|
Node string
|
|
|
|
Address string
|
2017-04-18 12:02:24 +00:00
|
|
|
Datacenter string
|
2016-02-07 18:37:34 +00:00
|
|
|
TaggedAddresses map[string]string
|
2017-01-05 22:10:26 +00:00
|
|
|
Meta map[string]string
|
2015-08-22 19:44:33 +00:00
|
|
|
|
2019-04-16 16:00:15 +00:00
|
|
|
RaftIndex `bexpr:"-"`
|
2013-12-12 18:48:36 +00:00
|
|
|
}
|
2019-06-18 00:52:01 +00:00
|
|
|
|
|
|
|
func (n *Node) BestAddress(wan bool) string {
|
|
|
|
if wan {
|
2020-01-17 14:54:17 +00:00
|
|
|
if addr, ok := n.TaggedAddresses[TaggedAddressWAN]; ok {
|
2019-06-18 00:52:01 +00:00
|
|
|
return addr
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return n.Address
|
|
|
|
}
|
|
|
|
|
2015-08-25 01:10:23 +00:00
|
|
|
type Nodes []*Node
|
2013-12-12 18:48:36 +00:00
|
|
|
|
2018-10-11 11:42:39 +00:00
|
|
|
// IsSame return whether nodes are similar without taking into account
|
|
|
|
// RaftIndex fields.
|
|
|
|
func (n *Node) IsSame(other *Node) bool {
|
|
|
|
return n.ID == other.ID &&
|
|
|
|
n.Node == other.Node &&
|
|
|
|
n.Address == other.Address &&
|
|
|
|
n.Datacenter == other.Datacenter &&
|
|
|
|
reflect.DeepEqual(n.TaggedAddresses, other.TaggedAddresses) &&
|
|
|
|
reflect.DeepEqual(n.Meta, other.Meta)
|
|
|
|
}
|
|
|
|
|
2020-03-09 20:59:02 +00:00
|
|
|
// ValidateNodeMetadata validates a set of key/value pairs from the agent
|
|
|
|
// config for use on a Node.
|
|
|
|
func ValidateNodeMetadata(meta map[string]string, allowConsulPrefix bool) error {
|
|
|
|
return validateMetadata(meta, allowConsulPrefix, nil)
|
|
|
|
}
|
|
|
|
|
|
|
|
// ValidateServiceMetadata validates a set of key/value pairs from the agent config for use on a Service.
|
2017-01-23 23:53:45 +00:00
|
|
|
// ValidateMeta validates a set of key/value pairs from the agent config
|
2020-03-09 20:59:02 +00:00
|
|
|
func ValidateServiceMetadata(kind ServiceKind, meta map[string]string, allowConsulPrefix bool) error {
|
|
|
|
switch kind {
|
|
|
|
case ServiceKindMeshGateway:
|
|
|
|
return validateMetadata(meta, allowConsulPrefix, allowedConsulMetaKeysForMeshGateway)
|
|
|
|
default:
|
|
|
|
return validateMetadata(meta, allowConsulPrefix, nil)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func validateMetadata(meta map[string]string, allowConsulPrefix bool, allowedConsulKeys map[string]struct{}) error {
|
2017-01-23 23:53:45 +00:00
|
|
|
if len(meta) > metaMaxKeyPairs {
|
|
|
|
return fmt.Errorf("Node metadata cannot contain more than %d key/value pairs", metaMaxKeyPairs)
|
|
|
|
}
|
|
|
|
|
|
|
|
for key, value := range meta {
|
2020-03-09 20:59:02 +00:00
|
|
|
if err := validateMetaPair(key, value, allowConsulPrefix, allowedConsulKeys); err != nil {
|
2017-01-23 23:53:45 +00:00
|
|
|
return fmt.Errorf("Couldn't load metadata pair ('%s', '%s'): %s", key, value, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-09-07 14:30:47 +00:00
|
|
|
// ValidateWeights checks the definition of DNS weight is valid
|
|
|
|
func ValidateWeights(weights *Weights) error {
|
|
|
|
if weights == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if weights.Passing < 1 {
|
|
|
|
return fmt.Errorf("Passing must be greater than 0")
|
|
|
|
}
|
|
|
|
if weights.Warning < 0 {
|
|
|
|
return fmt.Errorf("Warning must be greater or equal than 0")
|
|
|
|
}
|
|
|
|
if weights.Passing > 65535 || weights.Warning > 65535 {
|
|
|
|
return fmt.Errorf("DNS Weight must be between 0 and 65535")
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-01-23 23:53:45 +00:00
|
|
|
// validateMetaPair checks that the given key/value pair is in a valid format
|
2020-03-09 20:59:02 +00:00
|
|
|
func validateMetaPair(key, value string, allowConsulPrefix bool, allowedConsulKeys map[string]struct{}) error {
|
2017-01-23 23:53:45 +00:00
|
|
|
if key == "" {
|
|
|
|
return fmt.Errorf("Key cannot be blank")
|
|
|
|
}
|
|
|
|
if !metaKeyFormat(key) {
|
|
|
|
return fmt.Errorf("Key contains invalid characters")
|
|
|
|
}
|
|
|
|
if len(key) > metaKeyMaxLength {
|
|
|
|
return fmt.Errorf("Key is too long (limit: %d characters)", metaKeyMaxLength)
|
|
|
|
}
|
2020-03-09 20:59:02 +00:00
|
|
|
if strings.HasPrefix(key, metaKeyReservedPrefix) {
|
|
|
|
if _, ok := allowedConsulKeys[key]; !allowConsulPrefix && !ok {
|
|
|
|
return fmt.Errorf("Key prefix '%s' is reserved for internal use", metaKeyReservedPrefix)
|
|
|
|
}
|
2017-01-23 23:53:45 +00:00
|
|
|
}
|
|
|
|
if len(value) > metaValueMaxLength {
|
|
|
|
return fmt.Errorf("Value is too long (limit: %d characters)", metaValueMaxLength)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-01-18 00:20:29 +00:00
|
|
|
// SatisfiesMetaFilters returns true if the metadata map contains the given filters
|
2017-01-14 01:08:43 +00:00
|
|
|
func SatisfiesMetaFilters(meta map[string]string, filters map[string]string) bool {
|
|
|
|
for key, value := range filters {
|
|
|
|
if v, ok := meta[key]; !ok || v != value {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2013-12-12 19:07:14 +00:00
|
|
|
// Used to return information about a provided services.
|
|
|
|
// Maps service name to available tags
|
|
|
|
type Services map[string][]string
|
|
|
|
|
2017-01-18 22:26:42 +00:00
|
|
|
// ServiceNode represents a node that is part of a service. ID, Address,
|
|
|
|
// TaggedAddresses, and NodeMeta are node-related fields that are always empty
|
|
|
|
// in the state store and are filled in on the way out by parseServiceNodes().
|
|
|
|
// This is also why PartialClone() skips them, because we know they are blank
|
|
|
|
// already so it would be a waste of time to copy them.
|
2013-12-12 19:37:19 +00:00
|
|
|
type ServiceNode struct {
|
2017-01-18 22:26:42 +00:00
|
|
|
ID types.NodeID
|
2015-10-19 20:55:35 +00:00
|
|
|
Node string
|
|
|
|
Address string
|
2017-04-18 12:02:24 +00:00
|
|
|
Datacenter string
|
2016-06-15 18:02:51 +00:00
|
|
|
TaggedAddresses map[string]string
|
2017-01-05 22:10:26 +00:00
|
|
|
NodeMeta map[string]string
|
2018-03-06 03:56:52 +00:00
|
|
|
ServiceKind ServiceKind
|
2015-10-19 20:55:35 +00:00
|
|
|
ServiceID string
|
|
|
|
ServiceName string
|
|
|
|
ServiceTags []string
|
|
|
|
ServiceAddress string
|
2019-06-17 14:51:50 +00:00
|
|
|
ServiceTaggedAddresses map[string]ServiceAddress `json:",omitempty"`
|
2018-09-07 14:30:47 +00:00
|
|
|
ServiceWeights Weights
|
2018-02-07 00:54:42 +00:00
|
|
|
ServiceMeta map[string]string
|
2015-10-19 20:55:35 +00:00
|
|
|
ServicePort int
|
|
|
|
ServiceEnableTagOverride bool
|
2019-08-09 19:19:30 +00:00
|
|
|
ServiceProxy ConnectProxyConfig
|
|
|
|
ServiceConnect ServiceConnect
|
2015-08-23 21:17:48 +00:00
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
EnterpriseMeta `hcl:",squash" mapstructure:",squash" bexpr:"-"`
|
|
|
|
|
2019-04-16 16:00:15 +00:00
|
|
|
RaftIndex `bexpr:"-"`
|
2013-12-12 19:37:19 +00:00
|
|
|
}
|
2015-10-14 06:38:07 +00:00
|
|
|
|
2016-08-12 23:28:56 +00:00
|
|
|
// PartialClone() returns a clone of the given service node, minus the node-
|
|
|
|
// related fields that get filled in later, Address and TaggedAddresses.
|
|
|
|
func (s *ServiceNode) PartialClone() *ServiceNode {
|
2015-10-14 06:38:07 +00:00
|
|
|
tags := make([]string, len(s.ServiceTags))
|
|
|
|
copy(tags, s.ServiceTags)
|
2018-02-09 00:37:45 +00:00
|
|
|
nsmeta := make(map[string]string)
|
|
|
|
for k, v := range s.ServiceMeta {
|
|
|
|
nsmeta[k] = v
|
|
|
|
}
|
2015-10-14 06:38:07 +00:00
|
|
|
|
2019-06-17 14:51:50 +00:00
|
|
|
var svcTaggedAddrs map[string]ServiceAddress
|
|
|
|
if len(s.ServiceTaggedAddresses) > 0 {
|
|
|
|
svcTaggedAddrs = make(map[string]ServiceAddress)
|
|
|
|
for k, v := range s.ServiceTaggedAddresses {
|
|
|
|
svcTaggedAddrs[k] = v
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-14 06:38:07 +00:00
|
|
|
return &ServiceNode{
|
2017-01-18 22:26:42 +00:00
|
|
|
// Skip ID, see above.
|
2016-08-12 23:28:56 +00:00
|
|
|
Node: s.Node,
|
|
|
|
// Skip Address, see above.
|
|
|
|
// Skip TaggedAddresses, see above.
|
2018-03-07 01:32:41 +00:00
|
|
|
ServiceKind: s.ServiceKind,
|
2015-10-19 20:55:35 +00:00
|
|
|
ServiceID: s.ServiceID,
|
|
|
|
ServiceName: s.ServiceName,
|
|
|
|
ServiceTags: tags,
|
|
|
|
ServiceAddress: s.ServiceAddress,
|
2019-06-17 14:51:50 +00:00
|
|
|
ServiceTaggedAddresses: svcTaggedAddrs,
|
2015-10-19 20:55:35 +00:00
|
|
|
ServicePort: s.ServicePort,
|
2018-02-09 00:37:45 +00:00
|
|
|
ServiceMeta: nsmeta,
|
2018-09-07 14:30:47 +00:00
|
|
|
ServiceWeights: s.ServiceWeights,
|
2015-10-19 20:55:35 +00:00
|
|
|
ServiceEnableTagOverride: s.ServiceEnableTagOverride,
|
2019-08-09 19:19:30 +00:00
|
|
|
ServiceProxy: s.ServiceProxy,
|
|
|
|
ServiceConnect: s.ServiceConnect,
|
2015-10-19 20:55:35 +00:00
|
|
|
RaftIndex: RaftIndex{
|
|
|
|
CreateIndex: s.CreateIndex,
|
|
|
|
ModifyIndex: s.ModifyIndex,
|
|
|
|
},
|
2019-12-10 02:26:41 +00:00
|
|
|
EnterpriseMeta: s.EnterpriseMeta,
|
2015-10-19 20:55:35 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// ToNodeService converts the given service node to a node service.
|
|
|
|
func (s *ServiceNode) ToNodeService() *NodeService {
|
|
|
|
return &NodeService{
|
2018-03-06 03:56:52 +00:00
|
|
|
Kind: s.ServiceKind,
|
2015-10-19 20:55:35 +00:00
|
|
|
ID: s.ServiceID,
|
|
|
|
Service: s.ServiceName,
|
|
|
|
Tags: s.ServiceTags,
|
|
|
|
Address: s.ServiceAddress,
|
2019-06-17 14:51:50 +00:00
|
|
|
TaggedAddresses: s.ServiceTaggedAddresses,
|
2015-10-19 20:55:35 +00:00
|
|
|
Port: s.ServicePort,
|
2018-03-28 14:04:50 +00:00
|
|
|
Meta: s.ServiceMeta,
|
2018-09-07 14:30:47 +00:00
|
|
|
Weights: &s.ServiceWeights,
|
2015-10-19 20:55:35 +00:00
|
|
|
EnableTagOverride: s.ServiceEnableTagOverride,
|
2018-09-12 16:07:47 +00:00
|
|
|
Proxy: s.ServiceProxy,
|
2018-06-05 17:51:05 +00:00
|
|
|
Connect: s.ServiceConnect,
|
2019-12-10 02:26:41 +00:00
|
|
|
EnterpriseMeta: s.EnterpriseMeta,
|
2015-10-14 06:38:07 +00:00
|
|
|
RaftIndex: RaftIndex{
|
|
|
|
CreateIndex: s.CreateIndex,
|
|
|
|
ModifyIndex: s.ModifyIndex,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-24 15:04:58 +00:00
|
|
|
func (sn *ServiceNode) compoundID(preferName bool) ServiceID {
|
|
|
|
var id string
|
|
|
|
if sn.ServiceID == "" || (preferName && sn.ServiceName != "") {
|
|
|
|
id = sn.ServiceName
|
|
|
|
} else {
|
|
|
|
id = sn.ServiceID
|
2019-12-10 02:26:41 +00:00
|
|
|
}
|
|
|
|
|
2020-01-24 15:04:58 +00:00
|
|
|
// copy the ent meta and normalize it
|
|
|
|
entMeta := sn.EnterpriseMeta
|
2019-12-10 02:26:41 +00:00
|
|
|
entMeta.Normalize()
|
|
|
|
|
|
|
|
return ServiceID{
|
|
|
|
ID: id,
|
|
|
|
EnterpriseMeta: entMeta,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-24 15:04:58 +00:00
|
|
|
func (sn *ServiceNode) CompoundServiceID() ServiceID {
|
|
|
|
return sn.compoundID(false)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (sn *ServiceNode) CompoundServiceName() ServiceID {
|
|
|
|
return sn.compoundID(true)
|
|
|
|
}
|
|
|
|
|
2018-09-07 14:30:47 +00:00
|
|
|
// Weights represent the weight used by DNS for a given status
|
|
|
|
type Weights struct {
|
|
|
|
Passing int
|
|
|
|
Warning int
|
|
|
|
}
|
|
|
|
|
2015-08-25 01:10:23 +00:00
|
|
|
type ServiceNodes []*ServiceNode
|
2013-12-12 19:37:19 +00:00
|
|
|
|
2018-03-06 03:56:52 +00:00
|
|
|
// ServiceKind is the kind of service being registered.
|
|
|
|
type ServiceKind string
|
|
|
|
|
|
|
|
const (
|
2018-03-11 16:11:10 +00:00
|
|
|
// ServiceKindTypical is a typical, classic Consul service. This is
|
2018-03-26 15:51:43 +00:00
|
|
|
// represented by the absence of a value. This was chosen for ease of
|
2018-03-11 16:11:10 +00:00
|
|
|
// backwards compatibility: existing services in the catalog would
|
|
|
|
// default to the typical service.
|
|
|
|
ServiceKindTypical ServiceKind = ""
|
2018-03-06 03:56:52 +00:00
|
|
|
|
|
|
|
// ServiceKindConnectProxy is a proxy for the Connect feature. This
|
|
|
|
// service proxies another service within Consul and speaks the connect
|
|
|
|
// protocol.
|
|
|
|
ServiceKindConnectProxy ServiceKind = "connect-proxy"
|
2019-06-18 00:52:01 +00:00
|
|
|
|
|
|
|
// ServiceKindMeshGateway is a Mesh Gateway for the Connect feature. This
|
|
|
|
// service will proxy connections based off the SNI header set by other
|
|
|
|
// connect proxies
|
|
|
|
ServiceKindMeshGateway ServiceKind = "mesh-gateway"
|
2018-03-06 03:56:52 +00:00
|
|
|
|
2020-03-26 16:20:56 +00:00
|
|
|
// ServiceKindTerminatingGateway is a Terminating Gateway for the Connect
|
|
|
|
// feature. This service will proxy connections to services outside the mesh.
|
|
|
|
ServiceKindTerminatingGateway ServiceKind = "terminating-gateway"
|
|
|
|
)
|
2019-06-20 19:04:39 +00:00
|
|
|
|
2019-06-17 14:51:50 +00:00
|
|
|
// Type to hold a address and port of a service
|
|
|
|
type ServiceAddress struct {
|
|
|
|
Address string
|
|
|
|
Port int
|
|
|
|
}
|
|
|
|
|
|
|
|
func (a ServiceAddress) ToAPIServiceAddress() api.ServiceAddress {
|
|
|
|
return api.ServiceAddress{Address: a.Address, Port: a.Port}
|
|
|
|
}
|
|
|
|
|
2013-12-12 19:46:25 +00:00
|
|
|
// NodeService is a service provided by a node
|
|
|
|
type NodeService struct {
|
2018-03-06 03:56:52 +00:00
|
|
|
// Kind is the kind of service this is. Different kinds of services may
|
|
|
|
// have differing validation, DNS behavior, etc. An empty kind will default
|
|
|
|
// to the Default kind. See ServiceKind for the full list of kinds.
|
2018-06-21 15:17:17 +00:00
|
|
|
Kind ServiceKind `json:",omitempty"`
|
2018-03-06 03:56:52 +00:00
|
|
|
|
2015-09-11 15:35:29 +00:00
|
|
|
ID string
|
|
|
|
Service string
|
|
|
|
Tags []string
|
|
|
|
Address string
|
2019-06-17 14:51:50 +00:00
|
|
|
TaggedAddresses map[string]ServiceAddress `json:",omitempty"`
|
2018-03-28 14:04:50 +00:00
|
|
|
Meta map[string]string
|
2015-09-11 15:35:29 +00:00
|
|
|
Port int
|
2018-09-07 14:30:47 +00:00
|
|
|
Weights *Weights
|
2015-09-11 15:35:29 +00:00
|
|
|
EnableTagOverride bool
|
2015-08-22 20:21:38 +00:00
|
|
|
|
2018-09-12 16:07:47 +00:00
|
|
|
// Proxy is the configuration set for Kind = connect-proxy. It is mandatory in
|
|
|
|
// that case and an error to be set for any other kind. This config is part of
|
2019-08-09 19:19:30 +00:00
|
|
|
// a proxy service definition. ProxyConfig may be a more natural name here, but
|
|
|
|
// it's confusing for the UX because one of the fields in ConnectProxyConfig is
|
2018-09-12 16:07:47 +00:00
|
|
|
// also called just "Config"
|
|
|
|
Proxy ConnectProxyConfig
|
|
|
|
|
2018-06-05 17:51:05 +00:00
|
|
|
// Connect are the Connect settings for a service. This is purposely NOT
|
|
|
|
// a pointer so that we never have to nil-check this.
|
|
|
|
Connect ServiceConnect
|
2018-06-04 05:14:01 +00:00
|
|
|
|
2018-09-27 13:33:12 +00:00
|
|
|
// LocallyRegisteredAsSidecar is private as it is only used by a local agent
|
|
|
|
// state to track if the service was registered from a nested sidecar_service
|
|
|
|
// block. We need to track that so we can know whether we need to deregister
|
|
|
|
// it automatically too if it's removed from the service definition or if the
|
|
|
|
// parent service is deregistered. Relying only on ID would cause us to
|
|
|
|
// deregister regular services if they happen to be registered using the same
|
|
|
|
// ID scheme as our sidecars do by default. We could use meta but that gets
|
|
|
|
// unpleasant because we can't use the consul- prefix from an agent (reserved
|
|
|
|
// for use internally but in practice that means within the state store or in
|
2018-10-09 16:57:26 +00:00
|
|
|
// responses only), and it leaks the detail publicly which people might rely
|
2018-09-27 13:33:12 +00:00
|
|
|
// on which is a bit unpleasant for something that is meant to be config-file
|
|
|
|
// syntax sugar. Note this is not translated to ServiceNode and friends and
|
|
|
|
// may not be set on a NodeService that isn't the one the agent registered and
|
|
|
|
// keeps in it's local state. We never want this rendered in JSON as it's
|
|
|
|
// internal only. Right now our agent endpoints return api structs which don't
|
|
|
|
// include it but this is a safety net incase we change that or there is
|
|
|
|
// somewhere this is used in API output.
|
2019-04-16 16:00:15 +00:00
|
|
|
LocallyRegisteredAsSidecar bool `json:"-" bexpr:"-"`
|
2018-09-27 13:33:12 +00:00
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
EnterpriseMeta `hcl:",squash" mapstructure:",squash" bexpr:"-"`
|
|
|
|
|
2019-04-16 16:00:15 +00:00
|
|
|
RaftIndex `bexpr:"-"`
|
2013-12-12 19:46:25 +00:00
|
|
|
}
|
2015-08-22 20:21:38 +00:00
|
|
|
|
2019-06-18 00:52:01 +00:00
|
|
|
func (ns *NodeService) BestAddress(wan bool) (string, int) {
|
|
|
|
addr := ns.Address
|
|
|
|
port := ns.Port
|
|
|
|
|
|
|
|
if wan {
|
2020-01-17 14:54:17 +00:00
|
|
|
if wan, ok := ns.TaggedAddresses[TaggedAddressWAN]; ok {
|
2019-06-18 00:52:01 +00:00
|
|
|
addr = wan.Address
|
|
|
|
if wan.Port != 0 {
|
|
|
|
port = wan.Port
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return addr, port
|
|
|
|
}
|
|
|
|
|
2020-01-24 15:04:58 +00:00
|
|
|
func (ns *NodeService) compoundID(preferName bool) ServiceID {
|
|
|
|
var id string
|
|
|
|
if ns.ID == "" || (preferName && ns.Service != "") {
|
2019-12-10 02:26:41 +00:00
|
|
|
id = ns.Service
|
2020-01-24 15:04:58 +00:00
|
|
|
} else {
|
|
|
|
id = ns.ID
|
2019-12-10 02:26:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// copy the ent meta and normalize it
|
|
|
|
entMeta := ns.EnterpriseMeta
|
|
|
|
entMeta.Normalize()
|
|
|
|
|
|
|
|
return ServiceID{
|
|
|
|
ID: id,
|
|
|
|
EnterpriseMeta: entMeta,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-24 15:04:58 +00:00
|
|
|
func (ns *NodeService) CompoundServiceID() ServiceID {
|
|
|
|
return ns.compoundID(false)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ns *NodeService) CompoundServiceName() ServiceID {
|
|
|
|
return ns.compoundID(true)
|
|
|
|
}
|
|
|
|
|
2018-06-05 17:51:05 +00:00
|
|
|
// ServiceConnect are the shared Connect settings between all service
|
|
|
|
// definitions from the agent to the state store.
|
|
|
|
type ServiceConnect struct {
|
|
|
|
// Native is true when this service can natively understand Connect.
|
2018-09-27 13:33:12 +00:00
|
|
|
Native bool `json:",omitempty"`
|
2018-06-05 17:51:05 +00:00
|
|
|
|
2018-09-27 13:33:12 +00:00
|
|
|
// SidecarService is a nested Service Definition to register at the same time.
|
|
|
|
// It's purely a convenience mechanism to allow specifying a sidecar service
|
|
|
|
// along with the application service definition. It's nested nature allows
|
|
|
|
// all of the fields to be defaulted which can reduce the amount of
|
|
|
|
// boilerplate needed to register a sidecar service separately, but the end
|
|
|
|
// result is identical to just making a second service registration via any
|
|
|
|
// other means.
|
2019-04-16 16:00:15 +00:00
|
|
|
SidecarService *ServiceDefinition `json:",omitempty" bexpr:"-"`
|
2018-06-05 17:51:05 +00:00
|
|
|
}
|
|
|
|
|
2019-10-29 18:13:36 +00:00
|
|
|
func (t *ServiceConnect) UnmarshalJSON(data []byte) (err error) {
|
|
|
|
type Alias ServiceConnect
|
|
|
|
aux := &struct {
|
|
|
|
SidecarServiceSnake *ServiceDefinition `json:"sidecar_service"`
|
|
|
|
|
|
|
|
*Alias
|
|
|
|
}{
|
|
|
|
Alias: (*Alias)(t),
|
|
|
|
}
|
2019-12-10 02:26:41 +00:00
|
|
|
if err = json.Unmarshal(data, &aux); err != nil {
|
2019-10-29 18:13:36 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
if t.SidecarService == nil {
|
|
|
|
t.SidecarService = aux.SidecarServiceSnake
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-05-01 23:39:31 +00:00
|
|
|
// IsSidecarProxy returns true if the NodeService is a sidecar proxy.
|
|
|
|
func (s *NodeService) IsSidecarProxy() bool {
|
|
|
|
return s.Kind == ServiceKindConnectProxy && s.Proxy.DestinationServiceID != ""
|
2019-04-23 06:39:02 +00:00
|
|
|
}
|
|
|
|
|
2020-03-26 16:20:56 +00:00
|
|
|
func (s *NodeService) IsGateway() bool {
|
|
|
|
return s.Kind == ServiceKindMeshGateway || s.Kind == ServiceKindTerminatingGateway
|
2019-06-18 00:52:01 +00:00
|
|
|
}
|
|
|
|
|
2018-03-09 06:13:35 +00:00
|
|
|
// Validate validates the node service configuration.
|
|
|
|
//
|
|
|
|
// NOTE(mitchellh): This currently only validates fields for a ConnectProxy.
|
|
|
|
// Historically validation has been directly in the Catalog.Register RPC.
|
|
|
|
// ConnectProxy validation was moved here for easier table testing, but
|
|
|
|
// other validation still exists in Catalog.Register.
|
|
|
|
func (s *NodeService) Validate() error {
|
|
|
|
var result error
|
|
|
|
|
|
|
|
// ConnectProxy validation
|
|
|
|
if s.Kind == ServiceKindConnectProxy {
|
2018-09-12 16:07:47 +00:00
|
|
|
if strings.TrimSpace(s.Proxy.DestinationServiceName) == "" {
|
2018-03-09 06:13:35 +00:00
|
|
|
result = multierror.Append(result, fmt.Errorf(
|
2018-09-12 16:07:47 +00:00
|
|
|
"Proxy.DestinationServiceName must be non-empty for Connect proxy "+
|
|
|
|
"services"))
|
2018-03-09 06:13:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if s.Port == 0 {
|
|
|
|
result = multierror.Append(result, fmt.Errorf(
|
|
|
|
"Port must be set for a Connect proxy"))
|
|
|
|
}
|
2018-06-05 03:04:45 +00:00
|
|
|
|
2018-06-05 17:51:05 +00:00
|
|
|
if s.Connect.Native {
|
2018-06-05 03:04:45 +00:00
|
|
|
result = multierror.Append(result, fmt.Errorf(
|
2018-09-12 16:07:47 +00:00
|
|
|
"A Proxy cannot also be Connect Native, only typical services"))
|
2018-06-05 03:04:45 +00:00
|
|
|
}
|
2019-08-01 18:26:02 +00:00
|
|
|
|
|
|
|
// ensure we don't have multiple upstreams for the same service
|
|
|
|
var (
|
|
|
|
upstreamKeys = make(map[UpstreamKey]struct{})
|
|
|
|
bindAddrs = make(map[string]struct{})
|
|
|
|
)
|
|
|
|
for _, u := range s.Proxy.Upstreams {
|
|
|
|
if err := u.Validate(); err != nil {
|
|
|
|
result = multierror.Append(result, err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
uk := u.ToKey()
|
|
|
|
if _, ok := upstreamKeys[uk]; ok {
|
|
|
|
result = multierror.Append(result, fmt.Errorf(
|
|
|
|
"upstreams cannot contain duplicates of %s", uk))
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
upstreamKeys[uk] = struct{}{}
|
|
|
|
|
|
|
|
addr := u.LocalBindAddress
|
|
|
|
if addr == "" {
|
|
|
|
addr = "127.0.0.1"
|
|
|
|
}
|
|
|
|
addr = net.JoinHostPort(addr, fmt.Sprintf("%d", u.LocalBindPort))
|
|
|
|
|
|
|
|
if _, ok := bindAddrs[addr]; ok {
|
|
|
|
result = multierror.Append(result, fmt.Errorf(
|
|
|
|
"upstreams cannot contain duplicates by local bind address and port; %q is specified twice", addr))
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
bindAddrs[addr] = struct{}{}
|
|
|
|
}
|
2019-09-26 02:55:52 +00:00
|
|
|
var knownPaths = make(map[string]bool)
|
|
|
|
var knownListeners = make(map[int]bool)
|
|
|
|
for _, path := range s.Proxy.Expose.Paths {
|
|
|
|
if path.Path == "" {
|
|
|
|
result = multierror.Append(result, fmt.Errorf("expose.paths: empty path exposed"))
|
|
|
|
}
|
|
|
|
|
|
|
|
if seen := knownPaths[path.Path]; seen {
|
|
|
|
result = multierror.Append(result, fmt.Errorf("expose.paths: duplicate paths exposed"))
|
|
|
|
}
|
|
|
|
knownPaths[path.Path] = true
|
|
|
|
|
|
|
|
if seen := knownListeners[path.ListenerPort]; seen {
|
|
|
|
result = multierror.Append(result, fmt.Errorf("expose.paths: duplicate listener ports exposed"))
|
|
|
|
}
|
|
|
|
knownListeners[path.ListenerPort] = true
|
|
|
|
|
|
|
|
if path.ListenerPort <= 0 || path.ListenerPort > 65535 {
|
|
|
|
result = multierror.Append(result, fmt.Errorf("expose.paths: invalid listener port: %d", path.ListenerPort))
|
|
|
|
}
|
|
|
|
|
|
|
|
path.Protocol = strings.ToLower(path.Protocol)
|
|
|
|
if ok := allowedExposeProtocols[path.Protocol]; !ok && path.Protocol != "" {
|
|
|
|
protocols := make([]string, 0)
|
|
|
|
for p, _ := range allowedExposeProtocols {
|
|
|
|
protocols = append(protocols, p)
|
|
|
|
}
|
|
|
|
|
|
|
|
result = multierror.Append(result,
|
|
|
|
fmt.Errorf("protocol '%s' not supported for path: %s, must be in: %v",
|
|
|
|
path.Protocol, path.Path, protocols))
|
|
|
|
}
|
|
|
|
}
|
2018-03-09 06:13:35 +00:00
|
|
|
}
|
|
|
|
|
2020-03-26 16:20:56 +00:00
|
|
|
// Gateway validation
|
|
|
|
if s.IsGateway() {
|
2019-06-18 00:52:01 +00:00
|
|
|
// Gateways must have a port
|
|
|
|
if s.Port == 0 {
|
2020-03-26 16:20:56 +00:00
|
|
|
result = multierror.Append(result, fmt.Errorf("Port must be non-zero for a %s", s.Kind))
|
2019-06-18 00:52:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Gateways cannot have sidecars
|
|
|
|
if s.Connect.SidecarService != nil {
|
2020-03-26 16:20:56 +00:00
|
|
|
result = multierror.Append(result, fmt.Errorf("A %s cannot have a sidecar service defined", s.Kind))
|
2019-06-18 00:52:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if s.Proxy.DestinationServiceName != "" {
|
2020-03-26 16:20:56 +00:00
|
|
|
result = multierror.Append(result, fmt.Errorf("The Proxy.DestinationServiceName configuration is invalid for a %s", s.Kind))
|
2019-06-18 00:52:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if s.Proxy.DestinationServiceID != "" {
|
2020-03-26 16:20:56 +00:00
|
|
|
result = multierror.Append(result, fmt.Errorf("The Proxy.DestinationServiceID configuration is invalid for a %s", s.Kind))
|
2019-06-18 00:52:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if s.Proxy.LocalServiceAddress != "" {
|
2020-03-26 16:20:56 +00:00
|
|
|
result = multierror.Append(result, fmt.Errorf("The Proxy.LocalServiceAddress configuration is invalid for a %s", s.Kind))
|
2019-06-18 00:52:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if s.Proxy.LocalServicePort != 0 {
|
2020-03-26 16:20:56 +00:00
|
|
|
result = multierror.Append(result, fmt.Errorf("The Proxy.LocalServicePort configuration is invalid for a %s", s.Kind))
|
2019-06-18 00:52:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if len(s.Proxy.Upstreams) != 0 {
|
2020-03-26 16:20:56 +00:00
|
|
|
result = multierror.Append(result, fmt.Errorf("The Proxy.Upstreams configuration is invalid for a %s", s.Kind))
|
2019-06-18 00:52:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-27 13:33:12 +00:00
|
|
|
// Nested sidecar validation
|
|
|
|
if s.Connect.SidecarService != nil {
|
|
|
|
if s.Connect.SidecarService.ID != "" {
|
|
|
|
result = multierror.Append(result, fmt.Errorf(
|
|
|
|
"A SidecarService cannot specify an ID as this is managed by the "+
|
|
|
|
"agent"))
|
|
|
|
}
|
|
|
|
if s.Connect.SidecarService.Connect != nil {
|
|
|
|
if s.Connect.SidecarService.Connect.SidecarService != nil {
|
|
|
|
result = multierror.Append(result, fmt.Errorf(
|
|
|
|
"A SidecarService cannot have a nested SidecarService"))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-09 06:13:35 +00:00
|
|
|
return result
|
|
|
|
}
|
|
|
|
|
2015-10-28 21:32:00 +00:00
|
|
|
// IsSame checks if one NodeService is the same as another, without looking
|
|
|
|
// at the Raft information (that's why we didn't call it IsEqual). This is
|
|
|
|
// useful for seeing if an update would be idempotent for all the functional
|
|
|
|
// parts of the structure.
|
|
|
|
func (s *NodeService) IsSame(other *NodeService) bool {
|
|
|
|
if s.ID != other.ID ||
|
|
|
|
s.Service != other.Service ||
|
|
|
|
!reflect.DeepEqual(s.Tags, other.Tags) ||
|
|
|
|
s.Address != other.Address ||
|
|
|
|
s.Port != other.Port ||
|
2019-06-17 14:51:50 +00:00
|
|
|
!reflect.DeepEqual(s.TaggedAddresses, other.TaggedAddresses) ||
|
2018-09-07 14:30:47 +00:00
|
|
|
!reflect.DeepEqual(s.Weights, other.Weights) ||
|
2018-03-28 14:04:50 +00:00
|
|
|
!reflect.DeepEqual(s.Meta, other.Meta) ||
|
2018-03-10 01:21:26 +00:00
|
|
|
s.EnableTagOverride != other.EnableTagOverride ||
|
|
|
|
s.Kind != other.Kind ||
|
2018-09-12 16:07:47 +00:00
|
|
|
!reflect.DeepEqual(s.Proxy, other.Proxy) ||
|
2019-12-10 02:26:41 +00:00
|
|
|
s.Connect != other.Connect ||
|
|
|
|
!s.EnterpriseMeta.IsSame(&other.EnterpriseMeta) {
|
2015-10-28 21:32:00 +00:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2018-10-11 11:42:39 +00:00
|
|
|
// IsSameService checks if one Service of a ServiceNode is the same as another,
|
|
|
|
// without looking at the Raft information or Node information (that's why we
|
|
|
|
// didn't call it IsEqual).
|
|
|
|
// This is useful for seeing if an update would be idempotent for all the functional
|
|
|
|
// parts of the structure.
|
|
|
|
// In a similar fashion as ToNodeService(), fields related to Node are ignored
|
|
|
|
// see ServiceNode for more information.
|
|
|
|
func (s *ServiceNode) IsSameService(other *ServiceNode) bool {
|
|
|
|
// Skip the following fields, see ServiceNode definition
|
|
|
|
// Address string
|
|
|
|
// Datacenter string
|
|
|
|
// TaggedAddresses map[string]string
|
|
|
|
// NodeMeta map[string]string
|
|
|
|
if s.ID != other.ID ||
|
|
|
|
s.Node != other.Node ||
|
|
|
|
s.ServiceKind != other.ServiceKind ||
|
|
|
|
s.ServiceID != other.ServiceID ||
|
|
|
|
s.ServiceName != other.ServiceName ||
|
|
|
|
!reflect.DeepEqual(s.ServiceTags, other.ServiceTags) ||
|
|
|
|
s.ServiceAddress != other.ServiceAddress ||
|
2019-06-17 14:51:50 +00:00
|
|
|
!reflect.DeepEqual(s.ServiceTaggedAddresses, other.ServiceTaggedAddresses) ||
|
2018-10-11 11:42:39 +00:00
|
|
|
s.ServicePort != other.ServicePort ||
|
|
|
|
!reflect.DeepEqual(s.ServiceMeta, other.ServiceMeta) ||
|
|
|
|
!reflect.DeepEqual(s.ServiceWeights, other.ServiceWeights) ||
|
|
|
|
s.ServiceEnableTagOverride != other.ServiceEnableTagOverride ||
|
|
|
|
!reflect.DeepEqual(s.ServiceProxy, other.ServiceProxy) ||
|
2019-12-10 02:26:41 +00:00
|
|
|
!reflect.DeepEqual(s.ServiceConnect, other.ServiceConnect) ||
|
|
|
|
!s.EnterpriseMeta.IsSame(&other.EnterpriseMeta) {
|
2018-10-11 11:42:39 +00:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2015-10-19 20:55:35 +00:00
|
|
|
// ToServiceNode converts the given node service to a service node.
|
2016-08-12 23:28:56 +00:00
|
|
|
func (s *NodeService) ToServiceNode(node string) *ServiceNode {
|
2018-09-07 14:30:47 +00:00
|
|
|
theWeights := Weights{
|
|
|
|
Passing: 1,
|
|
|
|
Warning: 1,
|
|
|
|
}
|
|
|
|
if s.Weights != nil {
|
|
|
|
if err := ValidateWeights(s.Weights); err == nil {
|
|
|
|
theWeights = *s.Weights
|
|
|
|
}
|
|
|
|
}
|
2015-10-19 20:55:35 +00:00
|
|
|
return &ServiceNode{
|
2017-01-18 22:26:42 +00:00
|
|
|
// Skip ID, see ServiceNode definition.
|
2016-08-12 23:28:56 +00:00
|
|
|
Node: node,
|
|
|
|
// Skip Address, see ServiceNode definition.
|
|
|
|
// Skip TaggedAddresses, see ServiceNode definition.
|
2018-03-06 03:56:52 +00:00
|
|
|
ServiceKind: s.Kind,
|
2015-10-19 20:55:35 +00:00
|
|
|
ServiceID: s.ID,
|
|
|
|
ServiceName: s.Service,
|
|
|
|
ServiceTags: s.Tags,
|
|
|
|
ServiceAddress: s.Address,
|
2019-06-17 14:51:50 +00:00
|
|
|
ServiceTaggedAddresses: s.TaggedAddresses,
|
2015-10-19 20:55:35 +00:00
|
|
|
ServicePort: s.Port,
|
2018-03-28 14:04:50 +00:00
|
|
|
ServiceMeta: s.Meta,
|
2018-09-07 14:30:47 +00:00
|
|
|
ServiceWeights: theWeights,
|
2015-10-19 20:55:35 +00:00
|
|
|
ServiceEnableTagOverride: s.EnableTagOverride,
|
2018-09-12 16:07:47 +00:00
|
|
|
ServiceProxy: s.Proxy,
|
2018-06-05 17:51:05 +00:00
|
|
|
ServiceConnect: s.Connect,
|
2019-12-10 02:26:41 +00:00
|
|
|
EnterpriseMeta: s.EnterpriseMeta,
|
2015-10-19 20:55:35 +00:00
|
|
|
RaftIndex: RaftIndex{
|
|
|
|
CreateIndex: s.CreateIndex,
|
|
|
|
ModifyIndex: s.ModifyIndex,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-03 01:29:39 +00:00
|
|
|
type NodeServices struct {
|
2015-08-25 01:10:23 +00:00
|
|
|
Node *Node
|
2014-01-08 18:29:29 +00:00
|
|
|
Services map[string]*NodeService
|
2014-01-03 01:29:39 +00:00
|
|
|
}
|
2013-12-12 19:46:25 +00:00
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
type NodeServiceList struct {
|
|
|
|
Node *Node
|
|
|
|
Services []*NodeService
|
|
|
|
}
|
|
|
|
|
2014-01-08 19:21:29 +00:00
|
|
|
// HealthCheck represents a single check on a given node
|
|
|
|
type HealthCheck struct {
|
|
|
|
Node string
|
2016-06-06 20:19:31 +00:00
|
|
|
CheckID types.CheckID // Unique per-node ID
|
|
|
|
Name string // Check name
|
|
|
|
Status string // The current check status
|
|
|
|
Notes string // Additional notes with the status
|
|
|
|
Output string // Holds output of script runs
|
|
|
|
ServiceID string // optional associated service
|
|
|
|
ServiceName string // optional service name
|
2017-04-27 23:03:05 +00:00
|
|
|
ServiceTags []string // optional service tags
|
2019-10-17 18:33:11 +00:00
|
|
|
Type string // Check type: http/ttl/tcp/etc
|
2015-08-25 05:32:18 +00:00
|
|
|
|
2019-04-16 16:00:15 +00:00
|
|
|
Definition HealthCheckDefinition `bexpr:"-"`
|
2017-11-01 21:25:46 +00:00
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
EnterpriseMeta `hcl:",squash" mapstructure:",squash" bexpr:"-"`
|
|
|
|
|
2019-04-16 16:00:15 +00:00
|
|
|
RaftIndex `bexpr:"-"`
|
2017-11-01 21:25:46 +00:00
|
|
|
}
|
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
func (hc *HealthCheck) CompoundServiceID() ServiceID {
|
|
|
|
id := hc.ServiceID
|
|
|
|
if id == "" {
|
|
|
|
id = hc.ServiceName
|
|
|
|
}
|
|
|
|
|
|
|
|
entMeta := hc.EnterpriseMeta
|
|
|
|
entMeta.Normalize()
|
|
|
|
|
|
|
|
return ServiceID{
|
|
|
|
ID: id,
|
|
|
|
EnterpriseMeta: entMeta,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (hc *HealthCheck) CompoundCheckID() CheckID {
|
|
|
|
entMeta := hc.EnterpriseMeta
|
|
|
|
entMeta.Normalize()
|
|
|
|
|
|
|
|
return CheckID{
|
|
|
|
ID: hc.CheckID,
|
|
|
|
EnterpriseMeta: entMeta,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-01 21:25:46 +00:00
|
|
|
type HealthCheckDefinition struct {
|
2018-12-12 17:14:02 +00:00
|
|
|
HTTP string `json:",omitempty"`
|
|
|
|
TLSSkipVerify bool `json:",omitempty"`
|
|
|
|
Header map[string][]string `json:",omitempty"`
|
|
|
|
Method string `json:",omitempty"`
|
2020-02-10 16:27:12 +00:00
|
|
|
Body string `json:",omitempty"`
|
2018-12-12 17:14:02 +00:00
|
|
|
TCP string `json:",omitempty"`
|
|
|
|
Interval time.Duration `json:",omitempty"`
|
2019-06-26 15:43:25 +00:00
|
|
|
OutputMaxSize uint `json:",omitempty"`
|
2018-12-12 17:14:02 +00:00
|
|
|
Timeout time.Duration `json:",omitempty"`
|
|
|
|
DeregisterCriticalServiceAfter time.Duration `json:",omitempty"`
|
2019-09-26 02:55:52 +00:00
|
|
|
ScriptArgs []string `json:",omitempty"`
|
|
|
|
DockerContainerID string `json:",omitempty"`
|
|
|
|
Shell string `json:",omitempty"`
|
|
|
|
GRPC string `json:",omitempty"`
|
|
|
|
GRPCUseTLS bool `json:",omitempty"`
|
|
|
|
AliasNode string `json:",omitempty"`
|
|
|
|
AliasService string `json:",omitempty"`
|
|
|
|
TTL time.Duration `json:",omitempty"`
|
2018-12-12 17:14:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (d *HealthCheckDefinition) MarshalJSON() ([]byte, error) {
|
2019-01-24 16:12:08 +00:00
|
|
|
type Alias HealthCheckDefinition
|
|
|
|
exported := &struct {
|
|
|
|
Interval string `json:",omitempty"`
|
2019-06-26 15:43:25 +00:00
|
|
|
OutputMaxSize uint `json:",omitempty"`
|
2019-01-24 16:12:08 +00:00
|
|
|
Timeout string `json:",omitempty"`
|
|
|
|
DeregisterCriticalServiceAfter string `json:",omitempty"`
|
|
|
|
*Alias
|
|
|
|
}{
|
|
|
|
Interval: d.Interval.String(),
|
2019-06-26 15:43:25 +00:00
|
|
|
OutputMaxSize: d.OutputMaxSize,
|
2019-01-24 16:12:08 +00:00
|
|
|
Timeout: d.Timeout.String(),
|
|
|
|
DeregisterCriticalServiceAfter: d.DeregisterCriticalServiceAfter.String(),
|
|
|
|
Alias: (*Alias)(d),
|
2019-01-16 00:54:07 +00:00
|
|
|
}
|
2019-01-24 16:12:08 +00:00
|
|
|
if d.Interval == 0 {
|
|
|
|
exported.Interval = ""
|
2019-01-16 00:54:07 +00:00
|
|
|
}
|
2019-01-24 16:12:08 +00:00
|
|
|
if d.Timeout == 0 {
|
|
|
|
exported.Timeout = ""
|
2019-01-16 00:54:07 +00:00
|
|
|
}
|
2019-01-24 16:12:08 +00:00
|
|
|
if d.DeregisterCriticalServiceAfter == 0 {
|
|
|
|
exported.DeregisterCriticalServiceAfter = ""
|
2019-01-16 00:54:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return json.Marshal(exported)
|
2018-12-12 17:14:02 +00:00
|
|
|
}
|
|
|
|
|
2019-10-29 18:13:36 +00:00
|
|
|
func (t *HealthCheckDefinition) UnmarshalJSON(data []byte) (err error) {
|
2018-12-12 17:14:02 +00:00
|
|
|
type Alias HealthCheckDefinition
|
|
|
|
aux := &struct {
|
2019-10-29 18:13:36 +00:00
|
|
|
Interval interface{}
|
|
|
|
Timeout interface{}
|
|
|
|
DeregisterCriticalServiceAfter interface{}
|
|
|
|
TTL interface{}
|
2018-12-12 17:14:02 +00:00
|
|
|
*Alias
|
|
|
|
}{
|
2019-10-29 18:13:36 +00:00
|
|
|
Alias: (*Alias)(t),
|
2018-12-12 17:14:02 +00:00
|
|
|
}
|
2019-12-10 02:26:41 +00:00
|
|
|
if err := json.Unmarshal(data, &aux); err != nil {
|
2018-12-12 17:14:02 +00:00
|
|
|
return err
|
|
|
|
}
|
2019-10-29 18:13:36 +00:00
|
|
|
if aux.Interval != nil {
|
|
|
|
switch v := aux.Interval.(type) {
|
|
|
|
case string:
|
|
|
|
if t.Interval, err = time.ParseDuration(v); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
case float64:
|
|
|
|
t.Interval = time.Duration(v)
|
2018-12-12 17:14:02 +00:00
|
|
|
}
|
|
|
|
}
|
2019-10-29 18:13:36 +00:00
|
|
|
if aux.Timeout != nil {
|
|
|
|
switch v := aux.Timeout.(type) {
|
|
|
|
case string:
|
|
|
|
if t.Timeout, err = time.ParseDuration(v); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
case float64:
|
|
|
|
t.Timeout = time.Duration(v)
|
2018-12-12 17:14:02 +00:00
|
|
|
}
|
|
|
|
}
|
2019-10-29 18:13:36 +00:00
|
|
|
if aux.DeregisterCriticalServiceAfter != nil {
|
|
|
|
switch v := aux.DeregisterCriticalServiceAfter.(type) {
|
|
|
|
case string:
|
|
|
|
if t.DeregisterCriticalServiceAfter, err = time.ParseDuration(v); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
case float64:
|
|
|
|
t.DeregisterCriticalServiceAfter = time.Duration(v)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if aux.TTL != nil {
|
|
|
|
switch v := aux.TTL.(type) {
|
|
|
|
case string:
|
|
|
|
if t.TTL, err = time.ParseDuration(v); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
case float64:
|
|
|
|
t.TTL = time.Duration(v)
|
2018-12-12 17:14:02 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
2014-01-08 19:21:29 +00:00
|
|
|
}
|
2015-10-28 21:32:00 +00:00
|
|
|
|
|
|
|
// IsSame checks if one HealthCheck is the same as another, without looking
|
|
|
|
// at the Raft information (that's why we didn't call it IsEqual). This is
|
|
|
|
// useful for seeing if an update would be idempotent for all the functional
|
|
|
|
// parts of the structure.
|
|
|
|
func (c *HealthCheck) IsSame(other *HealthCheck) bool {
|
|
|
|
if c.Node != other.Node ||
|
|
|
|
c.CheckID != other.CheckID ||
|
|
|
|
c.Name != other.Name ||
|
|
|
|
c.Status != other.Status ||
|
|
|
|
c.Notes != other.Notes ||
|
|
|
|
c.Output != other.Output ||
|
|
|
|
c.ServiceID != other.ServiceID ||
|
2017-04-27 23:03:05 +00:00
|
|
|
c.ServiceName != other.ServiceName ||
|
2018-10-29 18:41:42 +00:00
|
|
|
!reflect.DeepEqual(c.ServiceTags, other.ServiceTags) ||
|
2019-12-10 02:26:41 +00:00
|
|
|
!reflect.DeepEqual(c.Definition, other.Definition) ||
|
|
|
|
!c.EnterpriseMeta.IsSame(&other.EnterpriseMeta) {
|
2015-10-28 21:32:00 +00:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2019-05-24 18:36:56 +00:00
|
|
|
// Clone returns a distinct clone of the HealthCheck. Note that the
|
|
|
|
// "ServiceTags" and "Definition.Header" field are not deep copied.
|
2016-04-11 07:05:39 +00:00
|
|
|
func (c *HealthCheck) Clone() *HealthCheck {
|
|
|
|
clone := new(HealthCheck)
|
|
|
|
*clone = *c
|
|
|
|
return clone
|
|
|
|
}
|
|
|
|
|
2019-10-17 18:33:11 +00:00
|
|
|
func (c *HealthCheck) CheckType() *CheckType {
|
|
|
|
return &CheckType{
|
|
|
|
CheckID: c.CheckID,
|
|
|
|
Name: c.Name,
|
|
|
|
Status: c.Status,
|
|
|
|
Notes: c.Notes,
|
|
|
|
|
|
|
|
ScriptArgs: c.Definition.ScriptArgs,
|
|
|
|
AliasNode: c.Definition.AliasNode,
|
|
|
|
AliasService: c.Definition.AliasService,
|
|
|
|
HTTP: c.Definition.HTTP,
|
|
|
|
GRPC: c.Definition.GRPC,
|
|
|
|
GRPCUseTLS: c.Definition.GRPCUseTLS,
|
|
|
|
Header: c.Definition.Header,
|
|
|
|
Method: c.Definition.Method,
|
2020-02-10 16:27:12 +00:00
|
|
|
Body: c.Definition.Body,
|
2019-10-17 18:33:11 +00:00
|
|
|
TCP: c.Definition.TCP,
|
|
|
|
Interval: c.Definition.Interval,
|
|
|
|
DockerContainerID: c.Definition.DockerContainerID,
|
|
|
|
Shell: c.Definition.Shell,
|
|
|
|
TLSSkipVerify: c.Definition.TLSSkipVerify,
|
|
|
|
Timeout: c.Definition.Timeout,
|
|
|
|
TTL: c.Definition.TTL,
|
|
|
|
DeregisterCriticalServiceAfter: c.Definition.DeregisterCriticalServiceAfter,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-29 21:15:20 +00:00
|
|
|
// HealthChecks is a collection of HealthCheck structs.
|
2014-01-08 19:35:27 +00:00
|
|
|
type HealthChecks []*HealthCheck
|
2014-01-08 19:21:29 +00:00
|
|
|
|
2015-09-10 21:40:11 +00:00
|
|
|
// CheckServiceNode is used to provide the node, its service
|
2015-07-27 21:41:46 +00:00
|
|
|
// definition, as well as a HealthCheck that is associated.
|
2014-01-08 22:58:53 +00:00
|
|
|
type CheckServiceNode struct {
|
2015-09-01 04:04:25 +00:00
|
|
|
Node *Node
|
|
|
|
Service *NodeService
|
2014-01-08 22:58:53 +00:00
|
|
|
Checks HealthChecks
|
2014-01-08 21:52:09 +00:00
|
|
|
}
|
2019-06-18 00:52:01 +00:00
|
|
|
|
|
|
|
func (csn *CheckServiceNode) BestAddress(wan bool) (string, int) {
|
|
|
|
// TODO (mesh-gateway) needs a test
|
|
|
|
// best address
|
|
|
|
// wan
|
|
|
|
// wan svc addr
|
|
|
|
// svc addr
|
|
|
|
// wan node addr
|
|
|
|
// node addr
|
|
|
|
// lan
|
|
|
|
// svc addr
|
|
|
|
// node addr
|
|
|
|
|
|
|
|
addr, port := csn.Service.BestAddress(wan)
|
|
|
|
|
|
|
|
if addr == "" {
|
|
|
|
addr = csn.Node.BestAddress(wan)
|
|
|
|
}
|
|
|
|
|
|
|
|
return addr, port
|
|
|
|
}
|
|
|
|
|
2014-01-08 22:58:53 +00:00
|
|
|
type CheckServiceNodes []CheckServiceNode
|
2014-01-08 21:52:09 +00:00
|
|
|
|
2015-11-07 00:59:32 +00:00
|
|
|
// Shuffle does an in-place random shuffle using the Fisher-Yates algorithm.
|
|
|
|
func (nodes CheckServiceNodes) Shuffle() {
|
|
|
|
for i := len(nodes) - 1; i > 0; i-- {
|
2016-02-18 23:17:42 +00:00
|
|
|
j := rand.Int31n(int32(i + 1))
|
2015-11-07 00:59:32 +00:00
|
|
|
nodes[i], nodes[j] = nodes[j], nodes[i]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-09 20:59:02 +00:00
|
|
|
// ShallowClone duplicates the slice and underlying array.
|
|
|
|
func (nodes CheckServiceNodes) ShallowClone() CheckServiceNodes {
|
|
|
|
dup := make(CheckServiceNodes, len(nodes))
|
|
|
|
copy(dup, nodes)
|
|
|
|
return dup
|
|
|
|
}
|
|
|
|
|
2015-11-07 00:59:32 +00:00
|
|
|
// Filter removes nodes that are failing health checks (and any non-passing
|
|
|
|
// check if that option is selected). Note that this returns the filtered
|
|
|
|
// results AND modifies the receiver for performance.
|
|
|
|
func (nodes CheckServiceNodes) Filter(onlyPassing bool) CheckServiceNodes {
|
2018-04-10 12:28:27 +00:00
|
|
|
return nodes.FilterIgnore(onlyPassing, nil)
|
|
|
|
}
|
|
|
|
|
|
|
|
// FilterIgnore removes nodes that are failing health checks just like Filter.
|
|
|
|
// It also ignores the status of any check with an ID present in ignoreCheckIDs
|
|
|
|
// as if that check didn't exist. Note that this returns the filtered results
|
|
|
|
// AND modifies the receiver for performance.
|
|
|
|
func (nodes CheckServiceNodes) FilterIgnore(onlyPassing bool,
|
|
|
|
ignoreCheckIDs []types.CheckID) CheckServiceNodes {
|
2015-11-07 00:59:32 +00:00
|
|
|
n := len(nodes)
|
|
|
|
OUTER:
|
|
|
|
for i := 0; i < n; i++ {
|
|
|
|
node := nodes[i]
|
2018-04-10 12:28:27 +00:00
|
|
|
INNER:
|
2015-11-07 00:59:32 +00:00
|
|
|
for _, check := range node.Checks {
|
2018-04-10 12:28:27 +00:00
|
|
|
for _, ignore := range ignoreCheckIDs {
|
|
|
|
if check.CheckID == ignore {
|
|
|
|
// Skip this _check_ but keep looking at other checks for this node.
|
|
|
|
continue INNER
|
|
|
|
}
|
|
|
|
}
|
2017-04-19 23:00:11 +00:00
|
|
|
if check.Status == api.HealthCritical ||
|
|
|
|
(onlyPassing && check.Status != api.HealthPassing) {
|
2015-11-07 00:59:32 +00:00
|
|
|
nodes[i], nodes[n-1] = nodes[n-1], CheckServiceNode{}
|
|
|
|
n--
|
|
|
|
i--
|
2018-04-10 12:28:27 +00:00
|
|
|
// Skip this _node_ now we've swapped it off the end of the list.
|
2015-11-07 00:59:32 +00:00
|
|
|
continue OUTER
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nodes[:n]
|
|
|
|
}
|
|
|
|
|
2014-04-27 19:49:20 +00:00
|
|
|
// NodeInfo is used to dump all associated information about
|
|
|
|
// a node. This is currently used for the UI only, as it is
|
|
|
|
// rather expensive to generate.
|
|
|
|
type NodeInfo struct {
|
2017-01-18 22:26:42 +00:00
|
|
|
ID types.NodeID
|
2016-02-07 18:37:34 +00:00
|
|
|
Node string
|
|
|
|
Address string
|
|
|
|
TaggedAddresses map[string]string
|
2017-01-05 22:10:26 +00:00
|
|
|
Meta map[string]string
|
2016-02-07 18:37:34 +00:00
|
|
|
Services []*NodeService
|
2016-11-29 21:15:20 +00:00
|
|
|
Checks HealthChecks
|
2014-04-27 19:49:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// NodeDump is used to dump all the nodes with all their
|
|
|
|
// associated data. This is currently used for the UI only,
|
|
|
|
// as it is rather expensive to generate.
|
|
|
|
type NodeDump []*NodeInfo
|
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
type CheckID struct {
|
|
|
|
ID types.CheckID
|
|
|
|
EnterpriseMeta
|
|
|
|
}
|
|
|
|
|
|
|
|
func NewCheckID(id types.CheckID, entMeta *EnterpriseMeta) CheckID {
|
|
|
|
var cid CheckID
|
|
|
|
cid.Init(id, entMeta)
|
|
|
|
return cid
|
|
|
|
}
|
|
|
|
|
|
|
|
func (cid *CheckID) Init(id types.CheckID, entMeta *EnterpriseMeta) {
|
|
|
|
cid.ID = id
|
|
|
|
if entMeta == nil {
|
|
|
|
entMeta = DefaultEnterpriseMeta()
|
|
|
|
}
|
|
|
|
|
|
|
|
cid.EnterpriseMeta = *entMeta
|
|
|
|
cid.EnterpriseMeta.Normalize()
|
|
|
|
}
|
|
|
|
|
|
|
|
// StringHash is used mainly to populate part of the filename of a check
|
|
|
|
// definition persisted on the local agent
|
|
|
|
func (cid *CheckID) StringHash() string {
|
|
|
|
hasher := md5.New()
|
|
|
|
hasher.Write([]byte(cid.ID))
|
|
|
|
cid.EnterpriseMeta.addToHash(hasher, true)
|
|
|
|
return fmt.Sprintf("%x", hasher.Sum(nil))
|
|
|
|
}
|
|
|
|
|
|
|
|
type ServiceID struct {
|
|
|
|
ID string
|
|
|
|
EnterpriseMeta
|
|
|
|
}
|
|
|
|
|
|
|
|
func NewServiceID(id string, entMeta *EnterpriseMeta) ServiceID {
|
|
|
|
var sid ServiceID
|
|
|
|
sid.Init(id, entMeta)
|
|
|
|
return sid
|
|
|
|
}
|
|
|
|
|
|
|
|
func (sid *ServiceID) Init(id string, entMeta *EnterpriseMeta) {
|
|
|
|
sid.ID = id
|
|
|
|
if entMeta == nil {
|
|
|
|
entMeta = DefaultEnterpriseMeta()
|
|
|
|
}
|
|
|
|
|
|
|
|
sid.EnterpriseMeta = *entMeta
|
|
|
|
sid.EnterpriseMeta.Normalize()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (sid *ServiceID) Matches(other *ServiceID) bool {
|
|
|
|
if sid == nil && other == nil {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
if sid == nil || other == nil || sid.ID != other.ID || !sid.EnterpriseMeta.Matches(&other.EnterpriseMeta) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
// StringHash is used mainly to populate part of the filename of a service
|
|
|
|
// definition persisted on the local agent
|
|
|
|
func (sid *ServiceID) StringHash() string {
|
|
|
|
hasher := md5.New()
|
|
|
|
hasher.Write([]byte(sid.ID))
|
|
|
|
sid.EnterpriseMeta.addToHash(hasher, true)
|
|
|
|
return fmt.Sprintf("%x", hasher.Sum(nil))
|
|
|
|
}
|
|
|
|
|
|
|
|
func (sid *ServiceID) LessThan(other *ServiceID) bool {
|
|
|
|
if sid.EnterpriseMeta.LessThan(&other.EnterpriseMeta) {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
return sid.ID < other.ID
|
|
|
|
}
|
|
|
|
|
2014-02-05 18:44:28 +00:00
|
|
|
type IndexedNodes struct {
|
|
|
|
Nodes Nodes
|
2014-04-18 23:46:51 +00:00
|
|
|
QueryMeta
|
2014-02-05 18:44:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type IndexedServices struct {
|
2014-02-05 22:27:24 +00:00
|
|
|
Services Services
|
2019-12-10 02:26:41 +00:00
|
|
|
// In various situations we need to know the meta that the services are for - in particular
|
|
|
|
// this is needed to be able to properly filter the list based on ACLs
|
|
|
|
EnterpriseMeta
|
2014-04-18 23:46:51 +00:00
|
|
|
QueryMeta
|
2014-02-05 18:44:28 +00:00
|
|
|
}
|
|
|
|
|
2020-01-24 15:04:58 +00:00
|
|
|
type ServiceInfo struct {
|
|
|
|
Name string
|
|
|
|
EnterpriseMeta
|
|
|
|
}
|
|
|
|
|
|
|
|
func (si *ServiceInfo) ToServiceID() ServiceID {
|
|
|
|
return ServiceID{ID: si.Name, EnterpriseMeta: si.EnterpriseMeta}
|
|
|
|
}
|
|
|
|
|
|
|
|
type ServiceList []ServiceInfo
|
|
|
|
|
|
|
|
type IndexedServiceList struct {
|
|
|
|
Services ServiceList
|
|
|
|
QueryMeta
|
|
|
|
}
|
|
|
|
|
2014-02-05 18:44:28 +00:00
|
|
|
type IndexedServiceNodes struct {
|
|
|
|
ServiceNodes ServiceNodes
|
2014-04-18 23:46:51 +00:00
|
|
|
QueryMeta
|
2014-02-05 18:44:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type IndexedNodeServices struct {
|
2018-02-07 15:02:10 +00:00
|
|
|
// TODO: This should not be a pointer, see comments in
|
|
|
|
// agent/catalog_endpoint.go.
|
2014-02-05 18:44:28 +00:00
|
|
|
NodeServices *NodeServices
|
2014-04-18 23:46:51 +00:00
|
|
|
QueryMeta
|
2014-02-05 18:44:28 +00:00
|
|
|
}
|
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
type IndexedNodeServiceList struct {
|
|
|
|
NodeServices NodeServiceList
|
|
|
|
QueryMeta
|
|
|
|
}
|
|
|
|
|
2014-02-05 18:44:28 +00:00
|
|
|
type IndexedHealthChecks struct {
|
|
|
|
HealthChecks HealthChecks
|
2014-04-18 23:46:51 +00:00
|
|
|
QueryMeta
|
2014-02-05 18:44:28 +00:00
|
|
|
}
|
|
|
|
|
2014-02-05 21:30:18 +00:00
|
|
|
type IndexedCheckServiceNodes struct {
|
|
|
|
Nodes CheckServiceNodes
|
2014-04-18 23:46:51 +00:00
|
|
|
QueryMeta
|
2014-02-05 21:30:18 +00:00
|
|
|
}
|
|
|
|
|
2020-03-09 20:59:02 +00:00
|
|
|
type DatacenterIndexedCheckServiceNodes struct {
|
|
|
|
DatacenterNodes map[string]CheckServiceNodes
|
|
|
|
QueryMeta
|
|
|
|
}
|
|
|
|
|
2014-04-27 19:56:06 +00:00
|
|
|
type IndexedNodeDump struct {
|
|
|
|
Dump NodeDump
|
|
|
|
QueryMeta
|
|
|
|
}
|
|
|
|
|
2019-04-07 06:38:08 +00:00
|
|
|
// IndexedConfigEntries has its own encoding logic which differs from
|
|
|
|
// ConfigEntryRequest as it has to send a slice of ConfigEntry.
|
|
|
|
type IndexedConfigEntries struct {
|
2019-04-10 21:27:28 +00:00
|
|
|
Kind string
|
2019-04-07 06:38:08 +00:00
|
|
|
Entries []ConfigEntry
|
|
|
|
QueryMeta
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *IndexedConfigEntries) MarshalBinary() (data []byte, err error) {
|
|
|
|
// bs will grow if needed but allocate enough to avoid reallocation in common
|
|
|
|
// case.
|
|
|
|
bs := make([]byte, 128)
|
2020-02-07 21:50:24 +00:00
|
|
|
enc := codec.NewEncoderBytes(&bs, MsgpackHandle)
|
2019-04-07 06:38:08 +00:00
|
|
|
|
2019-04-10 21:27:28 +00:00
|
|
|
// Encode length.
|
2019-04-07 06:38:08 +00:00
|
|
|
err = enc.Encode(len(c.Entries))
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-04-10 21:27:28 +00:00
|
|
|
|
|
|
|
// Encode kind.
|
|
|
|
err = enc.Encode(c.Kind)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2019-04-07 06:38:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Then actual value using alias trick to avoid infinite recursion
|
|
|
|
type Alias IndexedConfigEntries
|
|
|
|
err = enc.Encode(struct {
|
|
|
|
*Alias
|
|
|
|
}{
|
|
|
|
Alias: (*Alias)(c),
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return bs, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *IndexedConfigEntries) UnmarshalBinary(data []byte) error {
|
2019-04-10 21:27:28 +00:00
|
|
|
// First decode the number of entries.
|
2019-04-07 06:38:08 +00:00
|
|
|
var numEntries int
|
2020-02-07 21:50:24 +00:00
|
|
|
dec := codec.NewDecoderBytes(data, MsgpackHandle)
|
2019-04-07 06:38:08 +00:00
|
|
|
if err := dec.Decode(&numEntries); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-04-10 21:27:28 +00:00
|
|
|
// Next decode the kind.
|
|
|
|
var kind string
|
|
|
|
if err := dec.Decode(&kind); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Then decode the slice of ConfigEntries
|
2019-04-07 06:38:08 +00:00
|
|
|
c.Entries = make([]ConfigEntry, numEntries)
|
|
|
|
for i := 0; i < numEntries; i++ {
|
2019-04-10 21:27:28 +00:00
|
|
|
entry, err := MakeConfigEntry(kind, "")
|
2019-04-07 06:38:08 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
c.Entries[i] = entry
|
|
|
|
}
|
|
|
|
|
|
|
|
// Alias juggling to prevent infinite recursive calls back to this decode
|
|
|
|
// method.
|
|
|
|
type Alias IndexedConfigEntries
|
|
|
|
as := struct {
|
|
|
|
*Alias
|
|
|
|
}{
|
|
|
|
Alias: (*Alias)(c),
|
|
|
|
}
|
|
|
|
if err := dec.Decode(&as); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-04-26 17:38:39 +00:00
|
|
|
type IndexedGenericConfigEntries struct {
|
|
|
|
Entries []ConfigEntry
|
|
|
|
QueryMeta
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *IndexedGenericConfigEntries) MarshalBinary() (data []byte, err error) {
|
|
|
|
// bs will grow if needed but allocate enough to avoid reallocation in common
|
|
|
|
// case.
|
|
|
|
bs := make([]byte, 128)
|
2020-02-07 21:50:24 +00:00
|
|
|
enc := codec.NewEncoderBytes(&bs, MsgpackHandle)
|
2019-04-26 17:38:39 +00:00
|
|
|
|
|
|
|
if err := enc.Encode(len(c.Entries)); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, entry := range c.Entries {
|
|
|
|
if err := enc.Encode(entry.GetKind()); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if err := enc.Encode(entry); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := enc.Encode(c.QueryMeta); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return bs, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *IndexedGenericConfigEntries) UnmarshalBinary(data []byte) error {
|
|
|
|
// First decode the number of entries.
|
|
|
|
var numEntries int
|
2020-02-07 21:50:24 +00:00
|
|
|
dec := codec.NewDecoderBytes(data, MsgpackHandle)
|
2019-04-26 17:38:39 +00:00
|
|
|
if err := dec.Decode(&numEntries); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Then decode the slice of ConfigEntries
|
|
|
|
c.Entries = make([]ConfigEntry, numEntries)
|
|
|
|
for i := 0; i < numEntries; i++ {
|
|
|
|
var kind string
|
|
|
|
if err := dec.Decode(&kind); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
entry, err := MakeConfigEntry(kind, "")
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := dec.Decode(entry); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
c.Entries[i] = entry
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := dec.Decode(&c.QueryMeta); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2014-03-31 18:47:10 +00:00
|
|
|
// DirEntry is used to represent a directory entry. This is
|
|
|
|
// used for values in our Key-Value store.
|
|
|
|
type DirEntry struct {
|
2015-09-01 23:33:52 +00:00
|
|
|
LockIndex uint64
|
|
|
|
Key string
|
|
|
|
Flags uint64
|
|
|
|
Value []byte
|
|
|
|
Session string `json:",omitempty"`
|
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
EnterpriseMeta `bexpr:"-"`
|
2015-09-01 23:33:52 +00:00
|
|
|
RaftIndex
|
2014-03-31 18:47:10 +00:00
|
|
|
}
|
2015-09-25 19:01:46 +00:00
|
|
|
|
|
|
|
// Returns a clone of the given directory entry.
|
|
|
|
func (d *DirEntry) Clone() *DirEntry {
|
|
|
|
return &DirEntry{
|
|
|
|
LockIndex: d.LockIndex,
|
|
|
|
Key: d.Key,
|
|
|
|
Flags: d.Flags,
|
|
|
|
Value: d.Value,
|
|
|
|
Session: d.Session,
|
|
|
|
RaftIndex: RaftIndex{
|
|
|
|
CreateIndex: d.CreateIndex,
|
|
|
|
ModifyIndex: d.ModifyIndex,
|
|
|
|
},
|
2019-11-25 17:57:35 +00:00
|
|
|
EnterpriseMeta: d.EnterpriseMeta,
|
2015-09-25 19:01:46 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-18 13:06:29 +00:00
|
|
|
func (d *DirEntry) Equal(o *DirEntry) bool {
|
|
|
|
return d.LockIndex == o.LockIndex &&
|
|
|
|
d.Key == o.Key &&
|
|
|
|
d.Flags == o.Flags &&
|
|
|
|
bytes.Equal(d.Value, o.Value) &&
|
|
|
|
d.Session == o.Session
|
|
|
|
}
|
|
|
|
|
2014-03-31 18:47:10 +00:00
|
|
|
type DirEntries []*DirEntry
|
|
|
|
|
|
|
|
// KVSRequest is used to operate on the Key-Value store
|
|
|
|
type KVSRequest struct {
|
2014-03-31 21:13:03 +00:00
|
|
|
Datacenter string
|
2017-04-19 23:00:11 +00:00
|
|
|
Op api.KVOp // Which operation are we performing
|
2014-03-31 21:13:03 +00:00
|
|
|
DirEnt DirEntry // Which directory entry
|
2014-04-19 00:14:00 +00:00
|
|
|
WriteRequest
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r *KVSRequest) RequestDatacenter() string {
|
|
|
|
return r.Datacenter
|
2014-03-31 18:47:10 +00:00
|
|
|
}
|
|
|
|
|
2014-03-31 23:00:23 +00:00
|
|
|
// KeyRequest is used to request a key, or key prefix
|
|
|
|
type KeyRequest struct {
|
|
|
|
Datacenter string
|
|
|
|
Key string
|
2019-11-25 17:07:04 +00:00
|
|
|
EnterpriseMeta
|
2014-04-18 23:46:51 +00:00
|
|
|
QueryOptions
|
2014-03-31 23:00:23 +00:00
|
|
|
}
|
|
|
|
|
2014-04-19 00:14:00 +00:00
|
|
|
func (r *KeyRequest) RequestDatacenter() string {
|
|
|
|
return r.Datacenter
|
|
|
|
}
|
|
|
|
|
2014-04-28 23:33:54 +00:00
|
|
|
// KeyListRequest is used to list keys
|
|
|
|
type KeyListRequest struct {
|
|
|
|
Datacenter string
|
|
|
|
Prefix string
|
|
|
|
Seperator string
|
|
|
|
QueryOptions
|
2019-11-25 17:57:35 +00:00
|
|
|
EnterpriseMeta
|
2014-04-28 23:33:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (r *KeyListRequest) RequestDatacenter() string {
|
|
|
|
return r.Datacenter
|
|
|
|
}
|
|
|
|
|
2014-03-31 23:00:23 +00:00
|
|
|
type IndexedDirEntries struct {
|
|
|
|
Entries DirEntries
|
2014-04-18 23:46:51 +00:00
|
|
|
QueryMeta
|
2014-03-31 23:00:23 +00:00
|
|
|
}
|
|
|
|
|
2014-04-28 23:33:54 +00:00
|
|
|
type IndexedKeyList struct {
|
|
|
|
Keys []string
|
|
|
|
QueryMeta
|
|
|
|
}
|
|
|
|
|
2014-11-20 02:11:41 +00:00
|
|
|
type SessionBehavior string
|
|
|
|
|
|
|
|
const (
|
|
|
|
SessionKeysRelease SessionBehavior = "release"
|
|
|
|
SessionKeysDelete = "delete"
|
|
|
|
)
|
|
|
|
|
2014-11-25 16:06:14 +00:00
|
|
|
const (
|
2015-11-14 23:30:53 +00:00
|
|
|
SessionTTLMax = 24 * time.Hour
|
2014-11-25 16:06:14 +00:00
|
|
|
SessionTTLMultiplier = 2
|
|
|
|
)
|
|
|
|
|
2019-10-29 18:13:36 +00:00
|
|
|
type Sessions []*Session
|
|
|
|
|
2014-05-08 22:01:02 +00:00
|
|
|
// Session is used to represent an open session in the KV store.
|
|
|
|
// This issued to associate node checks with acquired locks.
|
|
|
|
type Session struct {
|
2019-12-10 02:26:41 +00:00
|
|
|
ID string
|
|
|
|
Name string
|
|
|
|
Node string
|
|
|
|
LockDelay time.Duration
|
|
|
|
Behavior SessionBehavior // What to do when session is invalidated
|
|
|
|
TTL string
|
|
|
|
NodeChecks []string
|
|
|
|
ServiceChecks []ServiceCheck
|
|
|
|
|
|
|
|
// Deprecated v1.7.0.
|
|
|
|
Checks []types.CheckID `json:",omitempty"`
|
2015-09-04 02:11:12 +00:00
|
|
|
|
2019-11-25 17:07:04 +00:00
|
|
|
EnterpriseMeta
|
2015-09-04 02:11:12 +00:00
|
|
|
RaftIndex
|
2014-05-08 22:01:02 +00:00
|
|
|
}
|
2019-10-29 18:13:36 +00:00
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
type ServiceCheck struct {
|
|
|
|
ID string
|
|
|
|
Namespace string
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Session) UnmarshalJSON(data []byte) (err error) {
|
2019-10-29 18:13:36 +00:00
|
|
|
type Alias Session
|
|
|
|
aux := &struct {
|
|
|
|
LockDelay interface{}
|
|
|
|
*Alias
|
|
|
|
}{
|
2019-12-10 02:26:41 +00:00
|
|
|
Alias: (*Alias)(s),
|
2019-10-29 18:13:36 +00:00
|
|
|
}
|
2019-12-10 02:26:41 +00:00
|
|
|
if err = json.Unmarshal(data, &aux); err != nil {
|
2019-10-29 18:13:36 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
if aux.LockDelay != nil {
|
|
|
|
var dur time.Duration
|
|
|
|
switch v := aux.LockDelay.(type) {
|
|
|
|
case string:
|
|
|
|
if dur, err = time.ParseDuration(v); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
case float64:
|
|
|
|
dur = time.Duration(v)
|
|
|
|
}
|
|
|
|
// Convert low value integers into seconds
|
|
|
|
if dur < lockDelayMinThreshold {
|
|
|
|
dur = dur * time.Second
|
|
|
|
}
|
2019-12-10 02:26:41 +00:00
|
|
|
s.LockDelay = dur
|
2019-10-29 18:13:36 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2014-05-08 22:01:02 +00:00
|
|
|
|
2014-05-16 02:22:31 +00:00
|
|
|
type SessionOp string
|
|
|
|
|
|
|
|
const (
|
|
|
|
SessionCreate SessionOp = "create"
|
|
|
|
SessionDestroy = "destroy"
|
|
|
|
)
|
|
|
|
|
|
|
|
// SessionRequest is used to operate on sessions
|
|
|
|
type SessionRequest struct {
|
|
|
|
Datacenter string
|
|
|
|
Op SessionOp // Which operation are we performing
|
|
|
|
Session Session // Which session
|
|
|
|
WriteRequest
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r *SessionRequest) RequestDatacenter() string {
|
|
|
|
return r.Datacenter
|
|
|
|
}
|
|
|
|
|
2014-05-16 22:49:17 +00:00
|
|
|
// SessionSpecificRequest is used to request a session by ID
|
|
|
|
type SessionSpecificRequest struct {
|
2014-05-16 21:36:14 +00:00
|
|
|
Datacenter string
|
2019-11-25 17:07:04 +00:00
|
|
|
SessionID string
|
2020-03-05 16:06:09 +00:00
|
|
|
// DEPRECATED in 1.7.0
|
|
|
|
Session string
|
2019-11-25 17:07:04 +00:00
|
|
|
EnterpriseMeta
|
2014-05-16 21:36:14 +00:00
|
|
|
QueryOptions
|
|
|
|
}
|
|
|
|
|
2014-05-16 22:49:17 +00:00
|
|
|
func (r *SessionSpecificRequest) RequestDatacenter() string {
|
2014-05-16 21:36:14 +00:00
|
|
|
return r.Datacenter
|
|
|
|
}
|
|
|
|
|
|
|
|
type IndexedSessions struct {
|
|
|
|
Sessions Sessions
|
|
|
|
QueryMeta
|
|
|
|
}
|
|
|
|
|
2015-06-19 15:26:56 +00:00
|
|
|
// Coordinate stores a node name with its associated network coordinate.
|
2015-03-28 18:52:04 +00:00
|
|
|
type Coordinate struct {
|
2017-08-14 14:36:07 +00:00
|
|
|
Node string
|
|
|
|
Segment string
|
|
|
|
Coord *coordinate.Coordinate
|
2015-03-28 18:52:04 +00:00
|
|
|
}
|
|
|
|
|
2015-10-23 22:19:14 +00:00
|
|
|
type Coordinates []*Coordinate
|
|
|
|
|
2015-06-06 03:31:33 +00:00
|
|
|
// IndexedCoordinate is used to represent a single node's coordinate from the state
|
2015-07-29 23:33:25 +00:00
|
|
|
// store.
|
2015-04-18 21:05:29 +00:00
|
|
|
type IndexedCoordinate struct {
|
|
|
|
Coord *coordinate.Coordinate
|
|
|
|
QueryMeta
|
|
|
|
}
|
|
|
|
|
2015-07-29 23:33:25 +00:00
|
|
|
// IndexedCoordinates is used to represent a list of nodes and their
|
|
|
|
// corresponding raw coordinates.
|
|
|
|
type IndexedCoordinates struct {
|
2015-10-23 22:19:14 +00:00
|
|
|
Coordinates Coordinates
|
2015-07-29 23:33:25 +00:00
|
|
|
QueryMeta
|
|
|
|
}
|
|
|
|
|
|
|
|
// DatacenterMap is used to represent a list of nodes with their raw coordinates,
|
2017-03-14 01:54:34 +00:00
|
|
|
// associated with a datacenter. Coordinates are only compatible between nodes in
|
|
|
|
// the same area.
|
2015-07-29 23:33:25 +00:00
|
|
|
type DatacenterMap struct {
|
|
|
|
Datacenter string
|
2017-03-14 01:54:34 +00:00
|
|
|
AreaID types.AreaID
|
2015-10-23 22:19:14 +00:00
|
|
|
Coordinates Coordinates
|
2015-07-29 23:33:25 +00:00
|
|
|
}
|
|
|
|
|
2015-06-06 03:31:33 +00:00
|
|
|
// CoordinateUpdateRequest is used to update the network coordinate of a given
|
|
|
|
// node.
|
2015-03-28 18:52:04 +00:00
|
|
|
type CoordinateUpdateRequest struct {
|
2015-04-18 21:05:29 +00:00
|
|
|
Datacenter string
|
|
|
|
Node string
|
2017-08-14 14:36:07 +00:00
|
|
|
Segment string
|
2015-04-18 21:05:29 +00:00
|
|
|
Coord *coordinate.Coordinate
|
2015-04-09 20:23:14 +00:00
|
|
|
WriteRequest
|
2015-03-28 18:52:04 +00:00
|
|
|
}
|
|
|
|
|
2015-06-06 03:31:33 +00:00
|
|
|
// RequestDatacenter returns the datacenter for a given update request.
|
2015-04-18 21:05:29 +00:00
|
|
|
func (c *CoordinateUpdateRequest) RequestDatacenter() string {
|
|
|
|
return c.Datacenter
|
|
|
|
}
|
|
|
|
|
2014-08-28 22:00:49 +00:00
|
|
|
// EventFireRequest is used to ask a server to fire
|
|
|
|
// a Serf event. It is a bit odd, since it doesn't depend on
|
|
|
|
// the catalog or leader. Any node can respond, so it's not quite
|
|
|
|
// like a standard write request. This is used only internally.
|
|
|
|
type EventFireRequest struct {
|
|
|
|
Datacenter string
|
|
|
|
Name string
|
|
|
|
Payload []byte
|
|
|
|
|
|
|
|
// Not using WriteRequest so that any server can process
|
|
|
|
// the request. It is a bit unusual...
|
|
|
|
QueryOptions
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r *EventFireRequest) RequestDatacenter() string {
|
|
|
|
return r.Datacenter
|
|
|
|
}
|
|
|
|
|
|
|
|
// EventFireResponse is used to respond to a fire request.
|
|
|
|
type EventFireResponse struct {
|
|
|
|
QueryMeta
|
|
|
|
}
|
|
|
|
|
2014-12-15 23:26:46 +00:00
|
|
|
type TombstoneOp string
|
|
|
|
|
|
|
|
const (
|
|
|
|
TombstoneReap TombstoneOp = "reap"
|
|
|
|
)
|
|
|
|
|
|
|
|
// TombstoneRequest is used to trigger a reaping of the tombstones
|
|
|
|
type TombstoneRequest struct {
|
2014-12-15 23:01:04 +00:00
|
|
|
Datacenter string
|
2014-12-15 23:26:46 +00:00
|
|
|
Op TombstoneOp
|
2014-12-15 23:01:04 +00:00
|
|
|
ReapIndex uint64
|
|
|
|
WriteRequest
|
|
|
|
}
|
|
|
|
|
2014-12-15 23:26:46 +00:00
|
|
|
func (r *TombstoneRequest) RequestDatacenter() string {
|
2014-12-15 23:01:04 +00:00
|
|
|
return r.Datacenter
|
|
|
|
}
|
|
|
|
|
2020-02-07 21:50:24 +00:00
|
|
|
// MsgpackHandle is a shared handle for encoding/decoding msgpack payloads
|
|
|
|
var MsgpackHandle = &codec.MsgpackHandle{
|
|
|
|
RawToString: true,
|
|
|
|
BasicHandle: codec.BasicHandle{
|
|
|
|
DecodeOptions: codec.DecodeOptions{
|
|
|
|
MapType: reflect.TypeOf(map[string]interface{}{}),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
2014-06-07 07:59:27 +00:00
|
|
|
|
2013-12-11 22:04:44 +00:00
|
|
|
// Decode is used to decode a MsgPack encoded object
|
|
|
|
func Decode(buf []byte, out interface{}) error {
|
2020-02-07 21:50:24 +00:00
|
|
|
return codec.NewDecoder(bytes.NewReader(buf), MsgpackHandle).Decode(out)
|
2013-12-11 22:04:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Encode is used to encode a MsgPack object with type prefix
|
|
|
|
func Encode(t MessageType, msg interface{}) ([]byte, error) {
|
2014-06-08 21:02:42 +00:00
|
|
|
var buf bytes.Buffer
|
2013-12-11 22:04:44 +00:00
|
|
|
buf.WriteByte(uint8(t))
|
2020-02-07 21:50:24 +00:00
|
|
|
err := codec.NewEncoder(&buf, MsgpackHandle).Encode(msg)
|
2013-12-11 22:04:44 +00:00
|
|
|
return buf.Bytes(), err
|
|
|
|
}
|
2014-09-24 23:39:14 +00:00
|
|
|
|
2019-10-24 18:38:09 +00:00
|
|
|
type ProtoMarshaller interface {
|
|
|
|
Size() int
|
|
|
|
MarshalTo([]byte) (int, error)
|
|
|
|
Unmarshal([]byte) error
|
|
|
|
ProtoMessage()
|
|
|
|
}
|
|
|
|
|
|
|
|
func EncodeProtoInterface(t MessageType, message interface{}) ([]byte, error) {
|
|
|
|
if marshaller, ok := message.(ProtoMarshaller); ok {
|
|
|
|
return EncodeProto(t, marshaller)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil, fmt.Errorf("message does not implement the ProtoMarshaller interface: %T", message)
|
|
|
|
}
|
|
|
|
|
|
|
|
func EncodeProto(t MessageType, message ProtoMarshaller) ([]byte, error) {
|
|
|
|
data := make([]byte, message.Size()+1)
|
|
|
|
data[0] = uint8(t)
|
|
|
|
if _, err := message.MarshalTo(data[1:]); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return data, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func DecodeProto(buf []byte, out ProtoMarshaller) error {
|
|
|
|
// Note that this assumes the leading byte indicating the type as already been stripped off.
|
|
|
|
return out.Unmarshal(buf)
|
|
|
|
}
|
|
|
|
|
2014-10-02 06:09:00 +00:00
|
|
|
// CompoundResponse is an interface for gathering multiple responses. It is
|
|
|
|
// used in cross-datacenter RPC calls where more than 1 datacenter is
|
|
|
|
// expected to reply.
|
|
|
|
type CompoundResponse interface {
|
|
|
|
// Add adds a new response to the compound response
|
|
|
|
Add(interface{})
|
|
|
|
|
|
|
|
// New returns an empty response object which can be passed around by
|
|
|
|
// reference, and then passed to Add() later on.
|
|
|
|
New() interface{}
|
|
|
|
}
|
|
|
|
|
2014-10-03 00:10:54 +00:00
|
|
|
type KeyringOp string
|
|
|
|
|
|
|
|
const (
|
|
|
|
KeyringList KeyringOp = "list"
|
|
|
|
KeyringInstall = "install"
|
|
|
|
KeyringUse = "use"
|
|
|
|
KeyringRemove = "remove"
|
|
|
|
)
|
|
|
|
|
2014-09-24 23:39:14 +00:00
|
|
|
// KeyringRequest encapsulates a request to modify an encryption keyring.
|
|
|
|
// It can be used for install, remove, or use key type operations.
|
|
|
|
type KeyringRequest struct {
|
2017-02-02 02:42:41 +00:00
|
|
|
Operation KeyringOp
|
|
|
|
Key string
|
|
|
|
Datacenter string
|
|
|
|
Forwarded bool
|
|
|
|
RelayFactor uint8
|
2019-08-12 18:11:11 +00:00
|
|
|
LocalOnly bool
|
2014-09-24 23:39:14 +00:00
|
|
|
QueryOptions
|
|
|
|
}
|
|
|
|
|
2014-10-02 06:09:00 +00:00
|
|
|
func (r *KeyringRequest) RequestDatacenter() string {
|
|
|
|
return r.Datacenter
|
|
|
|
}
|
|
|
|
|
2014-09-24 23:39:14 +00:00
|
|
|
// KeyringResponse is a unified key response and can be used for install,
|
|
|
|
// remove, use, as well as listing key queries.
|
|
|
|
type KeyringResponse struct {
|
2014-09-28 19:35:51 +00:00
|
|
|
WAN bool
|
2014-09-25 01:30:34 +00:00
|
|
|
Datacenter string
|
2017-09-07 19:17:39 +00:00
|
|
|
Segment string
|
2016-11-15 02:54:37 +00:00
|
|
|
Messages map[string]string `json:",omitempty"`
|
2014-09-25 01:30:34 +00:00
|
|
|
Keys map[string]int
|
|
|
|
NumNodes int
|
2016-11-15 02:54:37 +00:00
|
|
|
Error string `json:",omitempty"`
|
2014-09-25 01:30:34 +00:00
|
|
|
}
|
|
|
|
|
2014-09-30 22:31:07 +00:00
|
|
|
// KeyringResponses holds multiple responses to keyring queries. Each
|
|
|
|
// datacenter replies independently, and KeyringResponses is used as a
|
|
|
|
// container for the set of all responses.
|
2014-09-25 01:30:34 +00:00
|
|
|
type KeyringResponses struct {
|
|
|
|
Responses []*KeyringResponse
|
2014-09-28 19:35:51 +00:00
|
|
|
QueryMeta
|
2014-09-24 23:39:14 +00:00
|
|
|
}
|
2014-10-02 06:09:00 +00:00
|
|
|
|
|
|
|
func (r *KeyringResponses) Add(v interface{}) {
|
|
|
|
val := v.(*KeyringResponses)
|
|
|
|
r.Responses = append(r.Responses, val.Responses...)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r *KeyringResponses) New() interface{} {
|
|
|
|
return new(KeyringResponses)
|
|
|
|
}
|