consul/agent/structs/structs.go

3181 lines
94 KiB
Go
Raw Normal View History

// Copyright (c) HashiCorp, Inc.
[COMPLIANCE] License changes (#18443) * Adding explicit MPL license for sub-package This directory and its subdirectories (packages) contain files licensed with the MPLv2 `LICENSE` file in this directory and are intentionally licensed separately from the BSL `LICENSE` file at the root of this repository. * Adding explicit MPL license for sub-package This directory and its subdirectories (packages) contain files licensed with the MPLv2 `LICENSE` file in this directory and are intentionally licensed separately from the BSL `LICENSE` file at the root of this repository. * Updating the license from MPL to Business Source License Going forward, this project will be licensed under the Business Source License v1.1. Please see our blog post for more details at <Blog URL>, FAQ at www.hashicorp.com/licensing-faq, and details of the license at www.hashicorp.com/bsl. * add missing license headers * Update copyright file headers to BUSL-1.1 * Update copyright file headers to BUSL-1.1 * Update copyright file headers to BUSL-1.1 * Update copyright file headers to BUSL-1.1 * Update copyright file headers to BUSL-1.1 * Update copyright file headers to BUSL-1.1 * Update copyright file headers to BUSL-1.1 * Update copyright file headers to BUSL-1.1 * Update copyright file headers to BUSL-1.1 * Update copyright file headers to BUSL-1.1 * Update copyright file headers to BUSL-1.1 * Update copyright file headers to BUSL-1.1 * Update copyright file headers to BUSL-1.1 * Update copyright file headers to BUSL-1.1 * Update copyright file headers to BUSL-1.1 --------- Co-authored-by: hashicorp-copywrite[bot] <110428419+hashicorp-copywrite[bot]@users.noreply.github.com>
2023-08-11 13:12:13 +00:00
// SPDX-License-Identifier: BUSL-1.1
2013-12-19 20:03:57 +00:00
package structs
import (
"bytes"
"crypto/md5"
"crypto/sha256"
"encoding/json"
"fmt"
"math/rand"
"os"
"reflect"
"regexp"
"slices"
"sort"
"strconv"
"strings"
"time"
2014-08-08 22:32:43 +00:00
"github.com/hashicorp/go-multierror"
2015-03-28 18:52:04 +00:00
"github.com/hashicorp/serf/coordinate"
"github.com/mitchellh/hashstructure"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/durationpb"
"google.golang.org/protobuf/types/known/timestamppb"
"github.com/hashicorp/consul-net-rpc/go-msgpack/codec"
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/cache"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/lib"
"github.com/hashicorp/consul/types"
)
type MessageType uint8
// RaftIndex is used to track the index used while creating
// or modifying a given struct type.
type RaftIndex struct {
CreateIndex uint64 `bexpr:"-"`
ModifyIndex uint64 `bexpr:"-"`
}
// These are serialized between Consul servers and stored in Consul snapshots,
// so entries must only ever be added.
const (
RegisterRequestType MessageType = 0
DeregisterRequestType = 1
KVSRequestType = 2
SessionRequestType = 3
DeprecatedACLRequestType = 4 // Removed with the legacy ACL system
TombstoneRequestType = 5
CoordinateBatchUpdateType = 6
PreparedQueryRequestType = 7
TxnRequestType = 8
AutopilotRequestType = 9
AreaRequestType = 10
ACLBootstrapRequestType = 11
IntentionRequestType = 12
ConnectCARequestType = 13
ConnectCAProviderStateType = 14
ConnectCAConfigType = 15 // FSM snapshots only.
IndexRequestType = 16 // FSM snapshots only.
ACLTokenSetRequestType = 17
ACLTokenDeleteRequestType = 18
ACLPolicySetRequestType = 19
ACLPolicyDeleteRequestType = 20
ConnectCALeafRequestType = 21
ConfigEntryRequestType = 22
ACLRoleSetRequestType = 23
ACLRoleDeleteRequestType = 24
ACLBindingRuleSetRequestType = 25
ACLBindingRuleDeleteRequestType = 26
ACLAuthMethodSetRequestType = 27
ACLAuthMethodDeleteRequestType = 28
ChunkingStateType = 29
wan federation via mesh gateways (#6884) This is like a Möbius strip of code due to the fact that low-level components (serf/memberlist) are connected to high-level components (the catalog and mesh-gateways) in a twisty maze of references which make it hard to dive into. With that in mind here's a high level summary of what you'll find in the patch: There are several distinct chunks of code that are affected: * new flags and config options for the server * retry join WAN is slightly different * retry join code is shared to discover primary mesh gateways from secondary datacenters * because retry join logic runs in the *agent* and the results of that operation for primary mesh gateways are needed in the *server* there are some methods like `RefreshPrimaryGatewayFallbackAddresses` that must occur at multiple layers of abstraction just to pass the data down to the right layer. * new cache type `FederationStateListMeshGatewaysName` for use in `proxycfg/xds` layers * the function signature for RPC dialing picked up a new required field (the node name of the destination) * several new RPCs for manipulating a FederationState object: `FederationState:{Apply,Get,List,ListMeshGateways}` * 3 read-only internal APIs for debugging use to invoke those RPCs from curl * raft and fsm changes to persist these FederationStates * replication for FederationStates as they are canonically stored in the Primary and replicated to the Secondaries. * a special derivative of anti-entropy that runs in secondaries to snapshot their local mesh gateway `CheckServiceNodes` and sync them into their upstream FederationState in the primary (this works in conjunction with the replication to distribute addresses for all mesh gateways in all DCs to all other DCs) * a "gateway locator" convenience object to make use of this data to choose the addresses of gateways to use for any given RPC or gossip operation to a remote DC. This gets data from the "retry join" logic in the agent and also directly calls into the FSM. * RPC (`:8300`) on the server sniffs the first byte of a new connection to determine if it's actually doing native TLS. If so it checks the ALPN header for protocol determination (just like how the existing system uses the type-byte marker). * 2 new kinds of protocols are exclusively decoded via this native TLS mechanism: one for ferrying "packet" operations (udp-like) from the gossip layer and one for "stream" operations (tcp-like). The packet operations re-use sockets (using length-prefixing) to cut down on TLS re-negotiation overhead. * the server instances specially wrap the `memberlist.NetTransport` when running with gateway federation enabled (in a `wanfed.Transport`). The general gist is that if it tries to dial a node in the SAME datacenter (deduced by looking at the suffix of the node name) there is no change. If dialing a DIFFERENT datacenter it is wrapped up in a TLS+ALPN blob and sent through some mesh gateways to eventually end up in a server's :8300 port. * a new flag when launching a mesh gateway via `consul connect envoy` to indicate that the servers are to be exposed. This sets a special service meta when registering the gateway into the catalog. * `proxycfg/xds` notice this metadata blob to activate additional watches for the FederationState objects as well as the location of all of the consul servers in that datacenter. * `xds:` if the extra metadata is in place additional clusters are defined in a DC to bulk sink all traffic to another DC's gateways. For the current datacenter we listen on a wildcard name (`server.<dc>.consul`) that load balances all servers as well as one mini-cluster per node (`<node>.server.<dc>.consul`) * the `consul tls cert create` command got a new flag (`-node`) to help create an additional SAN in certs that can be used with this flavor of federation.
2020-03-09 20:59:02 +00:00
FederationStateRequestType = 30
SystemMetadataRequestType = 31
ServiceVirtualIPRequestType = 32
FreeVirtualIPRequestType = 33
KindServiceNamesType = 34
PeeringWriteType = 35
PeeringDeleteType = 36
PeeringTerminateByIDType = 37
PeeringTrustBundleWriteType = 38
PeeringTrustBundleDeleteType = 39
PeeringSecretsWriteType = 40
RaftLogVerifierCheckpoint = 41 // Only used for log verifier, no-op on FSM.
2023-04-04 16:30:06 +00:00
ResourceOperationType = 42
UpdateVirtualIPRequestType = 43
)
const (
// LocalPeerKeyword is a reserved keyword used for indexing in the state store for objects in the local peer.
LocalPeerKeyword = "~"
// DefaultPeerKeyword is the PeerName to use to refer to the local
// cluster's own data, rather than replicated peered data.
//
// This may internally be converted into LocalPeerKeyword, but external
// uses should not use that symbol directly in most cases.
DefaultPeerKeyword = ""
// TODOPeerKeyword is the peer keyword to use if you aren't sure if the
// usage SHOULD be peering-aware yet.
//
// TODO(peering): remove this in the future
TODOPeerKeyword = ""
)
// if a new request type is added above it must be
// added to the map below
// requestTypeStrings is used for snapshot enhance
// any new request types added must be placed here
var requestTypeStrings = map[MessageType]string{
RegisterRequestType: "Register",
DeregisterRequestType: "Deregister",
KVSRequestType: "KVS",
SessionRequestType: "Session",
DeprecatedACLRequestType: "ACL", // DEPRECATED (ACL-Legacy-Compat)
TombstoneRequestType: "Tombstone",
CoordinateBatchUpdateType: "CoordinateBatchUpdate",
PreparedQueryRequestType: "PreparedQuery",
TxnRequestType: "Txn",
AutopilotRequestType: "Autopilot",
AreaRequestType: "Area",
ACLBootstrapRequestType: "ACLBootstrap",
IntentionRequestType: "Intention",
ConnectCARequestType: "ConnectCA",
ConnectCAProviderStateType: "ConnectCAProviderState",
ConnectCAConfigType: "ConnectCAConfig", // FSM snapshots only.
IndexRequestType: "Index", // FSM snapshots only.
ACLTokenSetRequestType: "ACLToken",
ACLTokenDeleteRequestType: "ACLTokenDelete",
ACLPolicySetRequestType: "ACLPolicy",
ACLPolicyDeleteRequestType: "ACLPolicyDelete",
ConnectCALeafRequestType: "ConnectCALeaf",
ConfigEntryRequestType: "ConfigEntry",
ACLRoleSetRequestType: "ACLRole",
ACLRoleDeleteRequestType: "ACLRoleDelete",
ACLBindingRuleSetRequestType: "ACLBindingRule",
ACLBindingRuleDeleteRequestType: "ACLBindingRuleDelete",
ACLAuthMethodSetRequestType: "ACLAuthMethod",
ACLAuthMethodDeleteRequestType: "ACLAuthMethodDelete",
ChunkingStateType: "ChunkingState",
FederationStateRequestType: "FederationState",
SystemMetadataRequestType: "SystemMetadata",
ServiceVirtualIPRequestType: "ServiceVirtualIP",
FreeVirtualIPRequestType: "FreeVirtualIP",
KindServiceNamesType: "KindServiceName",
PeeringWriteType: "Peering",
PeeringDeleteType: "PeeringDelete",
PeeringTrustBundleWriteType: "PeeringTrustBundle",
PeeringTrustBundleDeleteType: "PeeringTrustBundleDelete",
PeeringSecretsWriteType: "PeeringSecret",
RaftLogVerifierCheckpoint: "RaftLogVerifierCheckpoint",
2023-04-04 16:30:06 +00:00
ResourceOperationType: "Resource",
UpdateVirtualIPRequestType: "UpdateManualVirtualIPRequestType",
}
const (
// IgnoreUnknownTypeFlag is set along with a MessageType
// to indicate that the message type can be safely ignored
// if it is not recognized. This is for future proofing, so
// that new commands can be added in a way that won't cause
// old servers to crash when the FSM attempts to process them.
IgnoreUnknownTypeFlag MessageType = 128
// NodeMaint is the special key set by a node in maintenance mode.
NodeMaint = "_node_maintenance"
// ServiceMaintPrefix is the prefix for a service in maintenance mode.
ServiceMaintPrefix = "_service_maintenance:"
// The meta key prefix reserved for Consul's internal use
MetaKeyReservedPrefix = "consul-"
// metaMaxKeyPairs is maximum number of metadata key pairs allowed to be registered
metaMaxKeyPairs = 64
// metaKeyMaxLength is the maximum allowed length of a metadata key
metaKeyMaxLength = 128
// metaValueMaxLength is the maximum allowed length of a metadata value
metaValueMaxLength = 512
// MetaSegmentKey is the node metadata key used to store the node's network segment
MetaSegmentKey = "consul-network-segment"
wan federation via mesh gateways (#6884) This is like a Möbius strip of code due to the fact that low-level components (serf/memberlist) are connected to high-level components (the catalog and mesh-gateways) in a twisty maze of references which make it hard to dive into. With that in mind here's a high level summary of what you'll find in the patch: There are several distinct chunks of code that are affected: * new flags and config options for the server * retry join WAN is slightly different * retry join code is shared to discover primary mesh gateways from secondary datacenters * because retry join logic runs in the *agent* and the results of that operation for primary mesh gateways are needed in the *server* there are some methods like `RefreshPrimaryGatewayFallbackAddresses` that must occur at multiple layers of abstraction just to pass the data down to the right layer. * new cache type `FederationStateListMeshGatewaysName` for use in `proxycfg/xds` layers * the function signature for RPC dialing picked up a new required field (the node name of the destination) * several new RPCs for manipulating a FederationState object: `FederationState:{Apply,Get,List,ListMeshGateways}` * 3 read-only internal APIs for debugging use to invoke those RPCs from curl * raft and fsm changes to persist these FederationStates * replication for FederationStates as they are canonically stored in the Primary and replicated to the Secondaries. * a special derivative of anti-entropy that runs in secondaries to snapshot their local mesh gateway `CheckServiceNodes` and sync them into their upstream FederationState in the primary (this works in conjunction with the replication to distribute addresses for all mesh gateways in all DCs to all other DCs) * a "gateway locator" convenience object to make use of this data to choose the addresses of gateways to use for any given RPC or gossip operation to a remote DC. This gets data from the "retry join" logic in the agent and also directly calls into the FSM. * RPC (`:8300`) on the server sniffs the first byte of a new connection to determine if it's actually doing native TLS. If so it checks the ALPN header for protocol determination (just like how the existing system uses the type-byte marker). * 2 new kinds of protocols are exclusively decoded via this native TLS mechanism: one for ferrying "packet" operations (udp-like) from the gossip layer and one for "stream" operations (tcp-like). The packet operations re-use sockets (using length-prefixing) to cut down on TLS re-negotiation overhead. * the server instances specially wrap the `memberlist.NetTransport` when running with gateway federation enabled (in a `wanfed.Transport`). The general gist is that if it tries to dial a node in the SAME datacenter (deduced by looking at the suffix of the node name) there is no change. If dialing a DIFFERENT datacenter it is wrapped up in a TLS+ALPN blob and sent through some mesh gateways to eventually end up in a server's :8300 port. * a new flag when launching a mesh gateway via `consul connect envoy` to indicate that the servers are to be exposed. This sets a special service meta when registering the gateway into the catalog. * `proxycfg/xds` notice this metadata blob to activate additional watches for the FederationState objects as well as the location of all of the consul servers in that datacenter. * `xds:` if the extra metadata is in place additional clusters are defined in a DC to bulk sink all traffic to another DC's gateways. For the current datacenter we listen on a wildcard name (`server.<dc>.consul`) that load balances all servers as well as one mini-cluster per node (`<node>.server.<dc>.consul`) * the `consul tls cert create` command got a new flag (`-node`) to help create an additional SAN in certs that can be used with this flavor of federation.
2020-03-09 20:59:02 +00:00
// MetaWANFederationKey is the mesh gateway metadata key that indicates a
// mesh gateway is usable for wan federation.
MetaWANFederationKey = "consul-wan-federation"
// MetaExternalSource is the metadata key used when a resource is managed by a source outside Consul like nomad/k8s
MetaExternalSource = "external-source"
// TaggedAddressVirtualIP is the key used to store tagged virtual IPs generated by Consul.
TaggedAddressVirtualIP = "consul-virtual"
// MaxLockDelay provides a maximum LockDelay value for
// a session. Any value above this will not be respected.
MaxLockDelay = 60 * time.Second
// lockDelayMinThreshold is used in JSON decoding to convert a
// numeric lockdelay value from nanoseconds to seconds if it is
// below thisthreshold. Users often send a value like 5, which
// they assumeis seconds, but because Go uses nanosecond granularity,
// ends up being very small. If we see a value below this threshold,
// we multiply by time.Second
lockDelayMinThreshold = 1000
// JitterFraction is a the limit to the amount of jitter we apply
// to a user specified MaxQueryTime. We divide the specified time by
// the fraction. So 16 == 6.25% limit of jitter. This same fraction
// is applied to the RPCHoldTimeout
JitterFraction = 16
// WildcardSpecifier is the string which should be used for specifying a wildcard
// The exact semantics of the wildcard is left up to the code where its used.
WildcardSpecifier = "*"
// MetaConsulVersion is the node metadata key used to store the node's consul version
MetaConsulVersion = "consul-version"
)
var allowedConsulMetaKeysForMeshGateway = map[string]struct{}{MetaWANFederationKey: {}}
wan federation via mesh gateways (#6884) This is like a Möbius strip of code due to the fact that low-level components (serf/memberlist) are connected to high-level components (the catalog and mesh-gateways) in a twisty maze of references which make it hard to dive into. With that in mind here's a high level summary of what you'll find in the patch: There are several distinct chunks of code that are affected: * new flags and config options for the server * retry join WAN is slightly different * retry join code is shared to discover primary mesh gateways from secondary datacenters * because retry join logic runs in the *agent* and the results of that operation for primary mesh gateways are needed in the *server* there are some methods like `RefreshPrimaryGatewayFallbackAddresses` that must occur at multiple layers of abstraction just to pass the data down to the right layer. * new cache type `FederationStateListMeshGatewaysName` for use in `proxycfg/xds` layers * the function signature for RPC dialing picked up a new required field (the node name of the destination) * several new RPCs for manipulating a FederationState object: `FederationState:{Apply,Get,List,ListMeshGateways}` * 3 read-only internal APIs for debugging use to invoke those RPCs from curl * raft and fsm changes to persist these FederationStates * replication for FederationStates as they are canonically stored in the Primary and replicated to the Secondaries. * a special derivative of anti-entropy that runs in secondaries to snapshot their local mesh gateway `CheckServiceNodes` and sync them into their upstream FederationState in the primary (this works in conjunction with the replication to distribute addresses for all mesh gateways in all DCs to all other DCs) * a "gateway locator" convenience object to make use of this data to choose the addresses of gateways to use for any given RPC or gossip operation to a remote DC. This gets data from the "retry join" logic in the agent and also directly calls into the FSM. * RPC (`:8300`) on the server sniffs the first byte of a new connection to determine if it's actually doing native TLS. If so it checks the ALPN header for protocol determination (just like how the existing system uses the type-byte marker). * 2 new kinds of protocols are exclusively decoded via this native TLS mechanism: one for ferrying "packet" operations (udp-like) from the gossip layer and one for "stream" operations (tcp-like). The packet operations re-use sockets (using length-prefixing) to cut down on TLS re-negotiation overhead. * the server instances specially wrap the `memberlist.NetTransport` when running with gateway federation enabled (in a `wanfed.Transport`). The general gist is that if it tries to dial a node in the SAME datacenter (deduced by looking at the suffix of the node name) there is no change. If dialing a DIFFERENT datacenter it is wrapped up in a TLS+ALPN blob and sent through some mesh gateways to eventually end up in a server's :8300 port. * a new flag when launching a mesh gateway via `consul connect envoy` to indicate that the servers are to be exposed. This sets a special service meta when registering the gateway into the catalog. * `proxycfg/xds` notice this metadata blob to activate additional watches for the FederationState objects as well as the location of all of the consul servers in that datacenter. * `xds:` if the extra metadata is in place additional clusters are defined in a DC to bulk sink all traffic to another DC's gateways. For the current datacenter we listen on a wildcard name (`server.<dc>.consul`) that load balances all servers as well as one mini-cluster per node (`<node>.server.<dc>.consul`) * the `consul tls cert create` command got a new flag (`-node`) to help create an additional SAN in certs that can be used with this flavor of federation.
2020-03-09 20:59:02 +00:00
// CEDowngrade indicates if we are in downgrading from ent to ce
var CEDowngrade = os.Getenv("CONSUL_ENTERPRISE_DOWNGRADE_TO_CE") == "true"
var (
NodeMaintCheckID = NewCheckID(NodeMaint, nil)
)
const (
TaggedAddressWAN = "wan"
TaggedAddressWANIPv4 = "wan_ipv4"
TaggedAddressWANIPv6 = "wan_ipv6"
TaggedAddressLAN = "lan"
TaggedAddressLANIPv4 = "lan_ipv4"
TaggedAddressLANIPv6 = "lan_ipv6"
)
// metaKeyFormat checks if a metadata key string is valid
var metaKeyFormat = regexp.MustCompile(`^[a-zA-Z0-9_-]+$`).MatchString
func ValidStatus(s string) bool {
return s == api.HealthPassing || s == api.HealthWarning || s == api.HealthCritical
}
// RPCInfo is used to describe common information about query
type RPCInfo interface {
RequestDatacenter() string
IsRead() bool
AllowStaleRead() bool
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
TokenSecret() string
SetTokenSecret(string)
2022-03-16 16:12:29 +00:00
HasTimedOut(since time.Time, rpcHoldTimeout, maxQueryTime, defaultQueryTime time.Duration) (bool, error)
}
// QueryOptions is used to specify various flags for read queries
type QueryOptions struct {
2014-08-05 22:48:28 +00:00
// Token is the ACL token ID. If not provided, the 'anonymous'
// token is assumed for backwards compatibility.
Token string `mapstructure:"x-consul-token,omitempty"`
2014-08-05 22:48:28 +00:00
// If set, wait until query exceeds given index. Must be provided
// with MaxQueryTime.
MinQueryIndex uint64 `mapstructure:"min-query-index,omitempty"`
// Provided with MinQueryIndex to wait for change.
MaxQueryTime time.Duration `mapstructure:"max-query-time,omitempty"`
// If set, any follower can service the request. Results
// may be arbitrarily stale.
AllowStale bool `mapstructure:"allow-stale,omitempty"`
// If set, the leader must verify leadership prior to
// servicing the request. Prevents a stale read.
RequireConsistent bool `mapstructure:"require-consistent,omitempty"`
// If set, the local agent may respond with an arbitrarily stale locally
// cached response. The semantics differ from AllowStale since the agent may
// be entirely partitioned from the servers and still considered "healthy" by
// operators. Stale responses from Servers are also arbitrarily stale, but can
// provide additional bounds on the last contact time from the leader. It's
// expected that servers that are partitioned are noticed and replaced in a
// timely way by operators while the same may not be true for client agents.
UseCache bool `mapstructure:"use-cache,omitempty"`
// If set and AllowStale is true, will try first a stale
// read, and then will perform a consistent read if stale
// read is older than value.
MaxStaleDuration time.Duration `mapstructure:"max-stale-duration,omitempty"`
// MaxAge limits how old a cached value will be returned if UseCache is true.
// If there is a cached response that is older than the MaxAge, it is treated
// as a cache miss and a new fetch invoked. If the fetch fails, the error is
// returned. Clients that wish to allow for stale results on error can set
// StaleIfError to a longer duration to change this behavior. It is ignored
// if the endpoint supports background refresh caching. See
// https://www.consul.io/api/index.html#agent-caching for more details.
MaxAge time.Duration `mapstructure:"max-age,omitempty"`
// MustRevalidate forces the agent to fetch a fresh version of a cached
// resource or at least validate that the cached version is still fresh. It is
// implied by either max-age=0 or must-revalidate Cache-Control headers. It
// only makes sense when UseCache is true. We store it since MaxAge = 0 is the
// default unset value.
MustRevalidate bool `mapstructure:"must-revalidate,omitempty"`
// StaleIfError specifies how stale the client will accept a cached response
// if the servers are unavailable to fetch a fresh one. Only makes sense when
// UseCache is true and MaxAge is set to a lower, non-zero value. It is
// ignored if the endpoint supports background refresh caching. See
// https://www.consul.io/api/index.html#agent-caching for more details.
StaleIfError time.Duration `mapstructure:"stale-if-error,omitempty"`
// Filter specifies the go-bexpr filter expression to be used for
// filtering the data prior to returning a response
Filter string `mapstructure:"filter,omitempty"`
// AllowNotModifiedResponse indicates that if the MinIndex matches the
// QueryMeta.Index, the response can be left empty and QueryMeta.NotModified
// will be set to true to indicate the result of the query has not changed.
AllowNotModifiedResponse bool `mapstructure:"allow-not-modified-response,omitempty"`
}
// IsRead is always true for QueryOption.
func (q QueryOptions) IsRead() bool {
return true
}
// ConsistencyLevel display the consistency required by a request
func (q QueryOptions) ConsistencyLevel() string {
if q.RequireConsistent {
return "consistent"
} else if q.AllowStale {
return "stale"
} else {
return "leader"
}
}
func (q QueryOptions) AllowStaleRead() bool {
return q.AllowStale
}
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
func (q QueryOptions) TokenSecret() string {
2014-08-05 22:48:28 +00:00
return q.Token
}
func (q *QueryOptions) SetTokenSecret(s string) {
q.Token = s
}
// BlockingTimeout implements pool.BlockableQuery
func (q QueryOptions) BlockingTimeout(maxQueryTime, defaultQueryTime time.Duration) time.Duration {
// Match logic in Server.blockingQuery.
if q.MinQueryIndex > 0 {
if q.MaxQueryTime > maxQueryTime {
q.MaxQueryTime = maxQueryTime
} else if q.MaxQueryTime <= 0 {
q.MaxQueryTime = defaultQueryTime
}
// Timeout after maximum jitter has elapsed.
q.MaxQueryTime += q.MaxQueryTime / JitterFraction
return q.MaxQueryTime
}
return 0
}
func (q QueryOptions) HasTimedOut(start time.Time, rpcHoldTimeout, maxQueryTime, defaultQueryTime time.Duration) (bool, error) {
// In addition to BlockingTimeout, allow for an additional rpcHoldTimeout buffer
// in case we need to wait for a leader election.
return time.Since(start) > rpcHoldTimeout+q.BlockingTimeout(maxQueryTime, defaultQueryTime), nil
}
2014-08-05 22:48:28 +00:00
type WriteRequest struct {
// Token is the ACL token ID. If not provided, the 'anonymous'
// token is assumed for backwards compatibility.
Token string
}
// WriteRequest only applies to writes, always false
func (w WriteRequest) IsRead() bool {
return false
}
func (w WriteRequest) AllowStaleRead() bool {
return false
}
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
func (w WriteRequest) TokenSecret() string {
2014-08-05 22:48:28 +00:00
return w.Token
}
func (w *WriteRequest) SetTokenSecret(s string) {
w.Token = s
}
func (w WriteRequest) HasTimedOut(start time.Time, rpcHoldTimeout, _, _ time.Duration) (bool, error) {
return time.Since(start) > rpcHoldTimeout, nil
}
type QueryBackend int
const (
QueryBackendBlocking QueryBackend = iota
QueryBackendStreaming
)
health: ensure /v1/health/service/:service endpoint returns the most recent results when a filter is used with streaming (#12640) The primary bug here is in the streaming subsystem that makes the overall v1/health/service/:service request behave incorrectly when servicing a blocking request with a filter provided. There is a secondary non-streaming bug being fixed here that is much less obvious related to when to update the `reply` variable in a `blockingQuery` evaluation. It is unlikely that it is triggerable in practical environments and I could not actually get the bug to manifest, but I fixed it anyway while investigating the original issue. Simple reproduction (streaming): 1. Register a service with a tag. curl -sL --request PUT 'http://localhost:8500/v1/agent/service/register' \ --header 'Content-Type: application/json' \ --data-raw '{ "ID": "ID1", "Name": "test", "Tags":[ "a" ], "EnableTagOverride": true }' 2. Do an initial filter query that matches on the tag. curl -sLi --get 'http://localhost:8500/v1/health/service/test' --data-urlencode 'filter=a in Service.Tags' 3. Note you get one result. Use the `X-Consul-Index` header to establish a blocking query in another terminal, this should not return yet. curl -sLi --get 'http://localhost:8500/v1/health/service/test?index=$INDEX' --data-urlencode 'filter=a in Service.Tags' 4. Re-register that service with a different tag. curl -sL --request PUT 'http://localhost:8500/v1/agent/service/register' \ --header 'Content-Type: application/json' \ --data-raw '{ "ID": "ID1", "Name": "test", "Tags":[ "b" ], "EnableTagOverride": true }' 5. Your blocking query from (3) should return with a header `X-Consul-Query-Backend: streaming` and empty results if it works correctly `[]`. Attempts to reproduce with non-streaming failed (where you add `&near=_agent` to the read queries and ensure `X-Consul-Query-Backend: blocking-query` shows up in the results).
2022-04-27 15:39:45 +00:00
func (q QueryBackend) GoString() string { return q.String() }
func (q QueryBackend) String() string {
switch q {
case QueryBackendBlocking:
return "blocking-query"
case QueryBackendStreaming:
return "streaming"
default:
return ""
}
}
func QueryBackendFromString(s string) QueryBackend {
switch s {
case "blocking-query":
return QueryBackendBlocking
case "streaming":
return QueryBackendStreaming
default:
return QueryBackendBlocking
}
}
// QueryMeta allows a query response to include potentially
// useful metadata about a query
type QueryMeta struct {
// Index in the raft log of the latest item returned by the query. If the
// query did not return any results the Index will be a value that will
// change when a new item is added.
2014-04-21 18:13:36 +00:00
Index uint64
// If AllowStale is used, this is time elapsed since
// last contact between the follower and leader. This
// can be used to gauge staleness.
LastContact time.Duration
// Used to indicate if there is a known leader node
KnownLeader bool
// Consistencylevel returns the consistency used to serve the query
// Having `discovery_max_stale` on the agent can affect whether
// the request was served by a leader.
ConsistencyLevel string
// NotModified is true when the Index of the query is the same value as the
// requested MinIndex. It indicates that the entity has not been modified.
// When NotModified is true, the response will not contain the result of
// the query.
NotModified bool
// Backend used to handle this query, either blocking-query or streaming.
Backend QueryBackend
// ResultsFilteredByACLs is true when some of the query's results were
// filtered out by enforcing ACLs. It may be false because nothing was
// removed, or because the endpoint does not yet support this flag.
ResultsFilteredByACLs bool
}
Add support for implementing new requests with protobufs instea… (#6502) * Add build system support for protobuf generation This is done generically so that we don’t have to keep updating the makefile to add another proto generation. Note: anything not in the vendor directory and with a .proto extension will be run through protoc if the corresponding namespace.pb.go file is not up to date. If you want to rebuild just a single proto file you can do so with: make proto-rebuild PROTOFILES=<list of proto files to rebuild> Providing the PROTOFILES var will override the default behavior of finding all the .proto files. * Start adding types to the agent/proto package These will be needed for some other work and are by no means comprehensive. * Add ability to resolve/fixup the agentpb.ACLLinks structure in the state store. * Use protobuf marshalling of raft requests instead of msgpack for protoc generated types. This does not change any encoding of existing types. * Removed structs package automatically encoding with protobuf marshalling Instead the caller of raftApply that wants to opt-in to protobuf encoding will have to call `raftApplyProtobuf` * Run update-vendor to fixup modules.txt Nothing changed as far as dependencies go but the ordering of modules in that file depends on the time they are first seen and its not alphabetical. * Rename some things and implement the structs.RPCInfo interface bits agentpb.QueryOptions and agentpb.WriteRequest implement 3 of the 4 RPCInfo funcs and the new TargetDatacenter message type implements the fourth. * Use the right encoding function. * Renamed agent/proto package to agent/agentpb to prevent package name conflicts * Update modules.txt to fix ordering * Change blockingQuery to take in interfaces for the query options and meta * Add %T to error output. * Add/Update some comments
2019-09-20 18:37:22 +00:00
// RegisterRequest is used for the Catalog.Register endpoint
// to register a node as providing a service. If no service
// is provided, the node is registered.
type RegisterRequest struct {
Datacenter string
2017-01-18 22:26:42 +00:00
ID types.NodeID
Node string
Address string
TaggedAddresses map[string]string
NodeMeta map[string]string
Service *NodeService
Check *HealthCheck
Checks HealthChecks
Locality *Locality
// SkipNodeUpdate can be used when a register request is intended for
// updating a service and/or checks, but doesn't want to overwrite any
// node information if the node is already registered. If the node
// doesn't exist, it will still be created, but if the node exists, any
// node portion of this update will not apply.
SkipNodeUpdate bool
PeerName string
// EnterpriseMeta is the embedded enterprise metadata
acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash"`
WriteRequest
RaftIndex `bexpr:"-"`
}
func (r *RegisterRequest) RequestDatacenter() string {
return r.Datacenter
}
// ChangesNode returns true if the given register request changes the given
// node, which can be nil. This only looks for changes to the node record itself,
// not any of the health checks.
func (r *RegisterRequest) ChangesNode(node *Node) bool {
// This means it's creating the node.
if node == nil {
return true
}
// If we've been asked to skip the node update, then say there are no
// changes.
if r.SkipNodeUpdate {
return false
}
// Check if any of the node-level fields are being changed.
2017-01-18 22:26:42 +00:00
if r.ID != node.ID ||
!strings.EqualFold(r.Node, node.Node) ||
r.PartitionOrDefault() != node.PartitionOrDefault() ||
r.PeerName != node.PeerName ||
r.Address != node.Address ||
r.Datacenter != node.Datacenter ||
!reflect.DeepEqual(r.TaggedAddresses, node.TaggedAddresses) ||
!reflect.DeepEqual(r.NodeMeta, node.Meta) ||
!reflect.DeepEqual(r.Locality, node.Locality) {
return true
}
return false
}
// DeregisterRequest is used for the Catalog.Deregister endpoint to
// deregister a service, check, or node (only one should be provided).
// If ServiceID or CheckID are not provided, the entire node is deregistered.
// If a ServiceID is provided, any associated Checks with that service
// are also deregistered.
type DeregisterRequest struct {
Datacenter string
Node string
ServiceID string
CheckID types.CheckID
PeerName string
acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash"`
WriteRequest
}
func (r *DeregisterRequest) RequestDatacenter() string {
return r.Datacenter
}
func (r *DeregisterRequest) UnmarshalJSON(data []byte) error {
type Alias DeregisterRequest
aux := &struct {
Address string // obsolete field - but we want to explicitly allow it
*Alias
}{
Alias: (*Alias)(r),
}
if err := lib.UnmarshalJSON(data, &aux); err != nil {
return err
}
return nil
}
// QuerySource is used to pass along information about the source node
// in queries so that we can adjust the response based on its network
// coordinates.
type QuerySource struct {
Datacenter string
Segment string
Node string
NodePartition string `json:",omitempty"`
// DisableNode indicates that the Node and NodePartition fields should not be used
// for determining the flow of the RPC. This is needed for agentless + wanfed to
// utilize streaming RPCs.
DisableNode bool `json:",omitempty"`
Ip string
}
func (s QuerySource) NodeEnterpriseMeta() *acl.EnterpriseMeta {
return NodeEnterpriseMetaInPartition(s.NodePartition)
}
func (s QuerySource) NodePartitionOrDefault() string {
return acl.PartitionOrDefault(s.NodePartition)
}
type DatacentersRequest struct {
QueryOptions
}
func (r *DatacentersRequest) CacheInfo() cache.RequestInfo {
return cache.RequestInfo{
Token: "",
Datacenter: "",
MinIndex: 0,
Timeout: r.MaxQueryTime,
MaxAge: r.MaxAge,
MustRevalidate: r.MustRevalidate,
Key: "catalog-datacenters", // must not be empty for cache to work
}
}
// DCSpecificRequest is used to query about a specific DC
type DCSpecificRequest struct {
Datacenter string
NodeMetaFilters map[string]string
Source QuerySource
PeerName string
acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash"`
QueryOptions
}
func (r *DCSpecificRequest) RequestDatacenter() string {
return r.Datacenter
}
func (r *DCSpecificRequest) CacheInfo() cache.RequestInfo {
info := cache.RequestInfo{
Token: r.Token,
Datacenter: r.Datacenter,
PeerName: r.PeerName,
MinIndex: r.MinQueryIndex,
Timeout: r.MaxQueryTime,
MaxAge: r.MaxAge,
MustRevalidate: r.MustRevalidate,
}
// To calculate the cache key we only hash the node meta filters and the bexpr filter.
// The datacenter is handled by the cache framework. The other fields are
// not, but should not be used in any cache types.
v, err := hashstructure.Hash([]interface{}{
r.NodeMetaFilters,
r.Filter,
r.EnterpriseMeta,
}, nil)
if err == nil {
// If there is an error, we don't set the key. A blank key forces
// no cache for this request so the request is forwarded directly
// to the server.
info.Key = strconv.FormatUint(v, 10)
}
return info
}
func (r *DCSpecificRequest) CacheMinIndex() uint64 {
return r.QueryOptions.MinQueryIndex
}
type OperatorUsageRequest struct {
DCSpecificRequest
Global bool
}
type ServiceDumpRequest struct {
Datacenter string
ServiceKind ServiceKind
UseServiceKind bool
NodesOnly bool
Source QuerySource
acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash"`
PeerName string
QueryOptions
}
func (r *ServiceDumpRequest) RequestDatacenter() string {
return r.Datacenter
}
func (r *ServiceDumpRequest) CacheInfo() cache.RequestInfo {
info := cache.RequestInfo{
Token: r.Token,
Datacenter: r.Datacenter,
PeerName: r.PeerName,
MinIndex: r.MinQueryIndex,
Timeout: r.MaxQueryTime,
MaxAge: r.MaxAge,
MustRevalidate: r.MustRevalidate,
}
// When we are not using the service kind we want to normalize the ServiceKind
keyKind := ServiceKindTypical
if r.UseServiceKind {
keyKind = r.ServiceKind
}
// To calculate the cache key we only hash the node meta filters and the bexpr filter.
// The datacenter is handled by the cache framework. The other fields are
// not, but should not be used in any cache types.
v, err := hashstructure.Hash([]interface{}{
keyKind,
r.UseServiceKind,
r.NodesOnly,
r.Filter,
r.EnterpriseMeta,
}, nil)
if err == nil {
// If there is an error, we don't set the key. A blank key forces
// no cache for this request so the request is forwarded directly
// to the server.
info.Key = strconv.FormatUint(v, 10)
}
return info
}
func (r *ServiceDumpRequest) CacheMinIndex() uint64 {
return r.QueryOptions.MinQueryIndex
}
// PartitionSpecificRequest is used to query about a specific partition.
type PartitionSpecificRequest struct {
Datacenter string
acl.EnterpriseMeta
QueryOptions
}
func (r *PartitionSpecificRequest) RequestDatacenter() string {
return r.Datacenter
}
func (r *PartitionSpecificRequest) CacheInfo() cache.RequestInfo {
return cache.RequestInfo{
Token: r.Token,
Datacenter: r.Datacenter,
MinIndex: r.MinQueryIndex,
Timeout: r.MaxQueryTime,
MaxAge: r.MaxAge,
MustRevalidate: r.MustRevalidate,
Key: r.EnterpriseMeta.PartitionOrDefault(),
}
}
// ServiceSpecificRequest is used to query about a specific service
2014-01-08 22:43:36 +00:00
type ServiceSpecificRequest struct {
Datacenter string
// The name of the peer that the requested service was imported from.
PeerName string
// The name of the sameness group that should be the target of the query.
SamenessGroup string
NodeMetaFilters map[string]string
ServiceName string
ServiceKind ServiceKind
// DEPRECATED (singular-service-tag) - remove this when backwards RPC compat
// with 1.2.x is not required.
ServiceTag string
ServiceTags []string
ServiceAddress string
TagFilter bool // Controls tag filtering
HealthFilterType HealthFilterType
Source QuerySource
// Connect if true will only search for Connect-compatible services.
Connect bool
// Ingress if true will only search for Ingress gateways for the given service.
Ingress bool
// MergeCentralConfig when set to true returns a service definition merged with
// the proxy-defaults/global and service-defaults/:service config entries.
// This can be used to ensure a full service definition is returned in the response
// especially when the service might not be written into the catalog that way.
MergeCentralConfig bool
acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash"`
QueryOptions
2014-01-08 22:43:36 +00:00
}
func (r *ServiceSpecificRequest) RequestDatacenter() string {
return r.Datacenter
}
func (r *ServiceSpecificRequest) CacheInfo() cache.RequestInfo {
info := cache.RequestInfo{
Token: r.Token,
Datacenter: r.Datacenter,
MinIndex: r.MinQueryIndex,
Timeout: r.MaxQueryTime,
MaxAge: r.MaxAge,
MustRevalidate: r.MustRevalidate,
}
// To calculate the cache key we hash over all the fields that affect the
// output other than Datacenter and Token which are dealt with in the cache
// framework already. Note the order here is important for the outcome - if we
// ever care about cache-invalidation on updates e.g. because we persist
// cached results, we need to be careful we maintain the same order of fields
// here. We could alternatively use `hash:set` struct tag on an anonymous
// struct to make it more robust if it becomes significant.
sort.Strings(r.ServiceTags)
v, err := hashstructure.Hash([]interface{}{
r.NodeMetaFilters,
strings.ToLower(r.ServiceName),
// DEPRECATED (singular-service-tag) - remove this when upgrade RPC compat
// with 1.2.x is not required. We still need this in because <1.3 agents
// might still send RPCs with singular tag set. In fact the only place we
// use this method is in agent cache so if the agent is new enough to have
// this code this should never be set, but it's safer to include it until we
// completely remove this field just in case it's erroneously used anywhere
// (e.g. until this change DNS still used it).
r.ServiceTag,
r.ServiceTags,
r.ServiceAddress,
r.TagFilter,
r.Connect,
r.Filter,
r.EnterpriseMeta,
r.PeerName,
r.SamenessGroup,
r.Ingress,
r.ServiceKind,
r.MergeCentralConfig,
r.HealthFilterType,
}, nil)
if err == nil {
// If there is an error, we don't set the key. A blank key forces
// no cache for this request so the request is forwarded directly
// to the server.
info.Key = strconv.FormatUint(v, 10)
}
return info
}
func (r *ServiceSpecificRequest) CacheMinIndex() uint64 {
return r.QueryOptions.MinQueryIndex
}
2014-01-08 22:43:36 +00:00
// NodeSpecificRequest is used to request the information about a single node
type NodeSpecificRequest struct {
Datacenter string
Node string
PeerName string
// MergeCentralConfig when set to true returns a service definition merged with
// the proxy-defaults/global and service-defaults/:service config entries.
// This can be used to ensure a full service definition is returned in the response
// especially when the service might not be written into the catalog that way.
MergeCentralConfig bool
acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash"`
QueryOptions
2014-01-08 22:43:36 +00:00
}
func (r *NodeSpecificRequest) RequestDatacenter() string {
return r.Datacenter
}
func (r *NodeSpecificRequest) CacheInfo() cache.RequestInfo {
info := cache.RequestInfo{
Token: r.Token,
Datacenter: r.Datacenter,
MinIndex: r.MinQueryIndex,
Timeout: r.MaxQueryTime,
MaxAge: r.MaxAge,
MustRevalidate: r.MustRevalidate,
}
v, err := hashstructure.Hash([]interface{}{
r.Node,
r.Filter,
r.EnterpriseMeta,
r.MergeCentralConfig,
}, nil)
if err == nil {
// If there is an error, we don't set the key. A blank key forces
// no cache for this request so the request is forwarded directly
// to the server.
info.Key = strconv.FormatUint(v, 10)
}
return info
}
// ChecksInStateRequest is used to query for checks in a state
2014-01-08 22:43:36 +00:00
type ChecksInStateRequest struct {
Datacenter string
NodeMetaFilters map[string]string
State string
Source QuerySource
PeerName string
acl.EnterpriseMeta `mapstructure:",squash"`
QueryOptions
2014-01-08 22:43:36 +00:00
}
func (r *ChecksInStateRequest) RequestDatacenter() string {
return r.Datacenter
}
2013-12-12 18:48:36 +00:00
// Used to return information about a node
type Node struct {
2017-01-18 22:26:42 +00:00
ID types.NodeID
Node string
Address string
Datacenter string
Partition string `json:",omitempty"`
PeerName string `json:",omitempty"`
TaggedAddresses map[string]string
Meta map[string]string
Locality *Locality `json:",omitempty" bexpr:"-"`
RaftIndex `bexpr:"-"`
2013-12-12 18:48:36 +00:00
}
func (n *Node) PeerOrEmpty() string {
return n.PeerName
}
func (n *Node) GetEnterpriseMeta() *acl.EnterpriseMeta {
return NodeEnterpriseMetaInPartition(n.Partition)
}
func (n *Node) PartitionOrDefault() string {
return acl.PartitionOrDefault(n.Partition)
}
func (n *Node) BestAddress(wan bool) string {
if wan {
if addr, ok := n.TaggedAddresses[TaggedAddressWAN]; ok {
return addr
}
}
return n.Address
}
func (n *Node) ToRegisterRequest() RegisterRequest {
return RegisterRequest{
ID: n.ID,
Node: n.Node,
Datacenter: n.Datacenter,
Address: n.Address,
TaggedAddresses: n.TaggedAddresses,
NodeMeta: n.Meta,
RaftIndex: n.RaftIndex,
EnterpriseMeta: *n.GetEnterpriseMeta(),
PeerName: n.PeerName,
}
}
type Nodes []*Node
2013-12-12 18:48:36 +00:00
// IsSame return whether nodes are similar without taking into account
// RaftIndex fields.
func (n *Node) IsSame(other *Node) bool {
return n.ID == other.ID &&
strings.EqualFold(n.Node, other.Node) &&
n.PartitionOrDefault() == other.PartitionOrDefault() &&
strings.EqualFold(n.PeerName, other.PeerName) &&
n.Address == other.Address &&
n.Datacenter == other.Datacenter &&
reflect.DeepEqual(n.TaggedAddresses, other.TaggedAddresses) &&
reflect.DeepEqual(n.Meta, other.Meta)
}
wan federation via mesh gateways (#6884) This is like a Möbius strip of code due to the fact that low-level components (serf/memberlist) are connected to high-level components (the catalog and mesh-gateways) in a twisty maze of references which make it hard to dive into. With that in mind here's a high level summary of what you'll find in the patch: There are several distinct chunks of code that are affected: * new flags and config options for the server * retry join WAN is slightly different * retry join code is shared to discover primary mesh gateways from secondary datacenters * because retry join logic runs in the *agent* and the results of that operation for primary mesh gateways are needed in the *server* there are some methods like `RefreshPrimaryGatewayFallbackAddresses` that must occur at multiple layers of abstraction just to pass the data down to the right layer. * new cache type `FederationStateListMeshGatewaysName` for use in `proxycfg/xds` layers * the function signature for RPC dialing picked up a new required field (the node name of the destination) * several new RPCs for manipulating a FederationState object: `FederationState:{Apply,Get,List,ListMeshGateways}` * 3 read-only internal APIs for debugging use to invoke those RPCs from curl * raft and fsm changes to persist these FederationStates * replication for FederationStates as they are canonically stored in the Primary and replicated to the Secondaries. * a special derivative of anti-entropy that runs in secondaries to snapshot their local mesh gateway `CheckServiceNodes` and sync them into their upstream FederationState in the primary (this works in conjunction with the replication to distribute addresses for all mesh gateways in all DCs to all other DCs) * a "gateway locator" convenience object to make use of this data to choose the addresses of gateways to use for any given RPC or gossip operation to a remote DC. This gets data from the "retry join" logic in the agent and also directly calls into the FSM. * RPC (`:8300`) on the server sniffs the first byte of a new connection to determine if it's actually doing native TLS. If so it checks the ALPN header for protocol determination (just like how the existing system uses the type-byte marker). * 2 new kinds of protocols are exclusively decoded via this native TLS mechanism: one for ferrying "packet" operations (udp-like) from the gossip layer and one for "stream" operations (tcp-like). The packet operations re-use sockets (using length-prefixing) to cut down on TLS re-negotiation overhead. * the server instances specially wrap the `memberlist.NetTransport` when running with gateway federation enabled (in a `wanfed.Transport`). The general gist is that if it tries to dial a node in the SAME datacenter (deduced by looking at the suffix of the node name) there is no change. If dialing a DIFFERENT datacenter it is wrapped up in a TLS+ALPN blob and sent through some mesh gateways to eventually end up in a server's :8300 port. * a new flag when launching a mesh gateway via `consul connect envoy` to indicate that the servers are to be exposed. This sets a special service meta when registering the gateway into the catalog. * `proxycfg/xds` notice this metadata blob to activate additional watches for the FederationState objects as well as the location of all of the consul servers in that datacenter. * `xds:` if the extra metadata is in place additional clusters are defined in a DC to bulk sink all traffic to another DC's gateways. For the current datacenter we listen on a wildcard name (`server.<dc>.consul`) that load balances all servers as well as one mini-cluster per node (`<node>.server.<dc>.consul`) * the `consul tls cert create` command got a new flag (`-node`) to help create an additional SAN in certs that can be used with this flavor of federation.
2020-03-09 20:59:02 +00:00
// ValidateNodeMetadata validates a set of key/value pairs from the agent
// config for use on a Node.
func ValidateNodeMetadata(meta map[string]string, allowConsulPrefix bool) error {
return validateMetadata(meta, allowConsulPrefix, nil)
}
// ValidateServiceMetadata validates a set of key/value pairs from the agent config for use on a Service.
// ValidateMeta validates a set of key/value pairs from the agent config
wan federation via mesh gateways (#6884) This is like a Möbius strip of code due to the fact that low-level components (serf/memberlist) are connected to high-level components (the catalog and mesh-gateways) in a twisty maze of references which make it hard to dive into. With that in mind here's a high level summary of what you'll find in the patch: There are several distinct chunks of code that are affected: * new flags and config options for the server * retry join WAN is slightly different * retry join code is shared to discover primary mesh gateways from secondary datacenters * because retry join logic runs in the *agent* and the results of that operation for primary mesh gateways are needed in the *server* there are some methods like `RefreshPrimaryGatewayFallbackAddresses` that must occur at multiple layers of abstraction just to pass the data down to the right layer. * new cache type `FederationStateListMeshGatewaysName` for use in `proxycfg/xds` layers * the function signature for RPC dialing picked up a new required field (the node name of the destination) * several new RPCs for manipulating a FederationState object: `FederationState:{Apply,Get,List,ListMeshGateways}` * 3 read-only internal APIs for debugging use to invoke those RPCs from curl * raft and fsm changes to persist these FederationStates * replication for FederationStates as they are canonically stored in the Primary and replicated to the Secondaries. * a special derivative of anti-entropy that runs in secondaries to snapshot their local mesh gateway `CheckServiceNodes` and sync them into their upstream FederationState in the primary (this works in conjunction with the replication to distribute addresses for all mesh gateways in all DCs to all other DCs) * a "gateway locator" convenience object to make use of this data to choose the addresses of gateways to use for any given RPC or gossip operation to a remote DC. This gets data from the "retry join" logic in the agent and also directly calls into the FSM. * RPC (`:8300`) on the server sniffs the first byte of a new connection to determine if it's actually doing native TLS. If so it checks the ALPN header for protocol determination (just like how the existing system uses the type-byte marker). * 2 new kinds of protocols are exclusively decoded via this native TLS mechanism: one for ferrying "packet" operations (udp-like) from the gossip layer and one for "stream" operations (tcp-like). The packet operations re-use sockets (using length-prefixing) to cut down on TLS re-negotiation overhead. * the server instances specially wrap the `memberlist.NetTransport` when running with gateway federation enabled (in a `wanfed.Transport`). The general gist is that if it tries to dial a node in the SAME datacenter (deduced by looking at the suffix of the node name) there is no change. If dialing a DIFFERENT datacenter it is wrapped up in a TLS+ALPN blob and sent through some mesh gateways to eventually end up in a server's :8300 port. * a new flag when launching a mesh gateway via `consul connect envoy` to indicate that the servers are to be exposed. This sets a special service meta when registering the gateway into the catalog. * `proxycfg/xds` notice this metadata blob to activate additional watches for the FederationState objects as well as the location of all of the consul servers in that datacenter. * `xds:` if the extra metadata is in place additional clusters are defined in a DC to bulk sink all traffic to another DC's gateways. For the current datacenter we listen on a wildcard name (`server.<dc>.consul`) that load balances all servers as well as one mini-cluster per node (`<node>.server.<dc>.consul`) * the `consul tls cert create` command got a new flag (`-node`) to help create an additional SAN in certs that can be used with this flavor of federation.
2020-03-09 20:59:02 +00:00
func ValidateServiceMetadata(kind ServiceKind, meta map[string]string, allowConsulPrefix bool) error {
switch kind {
case ServiceKindMeshGateway:
return validateMetadata(meta, allowConsulPrefix, allowedConsulMetaKeysForMeshGateway)
default:
return validateMetadata(meta, allowConsulPrefix, nil)
}
}
2022-05-09 20:47:37 +00:00
// ValidateMetaTags validates arbitrary key/value pairs from the agent_endpoints
func ValidateMetaTags(metaTags map[string]string) error {
return validateMetadata(metaTags, false, nil)
}
wan federation via mesh gateways (#6884) This is like a Möbius strip of code due to the fact that low-level components (serf/memberlist) are connected to high-level components (the catalog and mesh-gateways) in a twisty maze of references which make it hard to dive into. With that in mind here's a high level summary of what you'll find in the patch: There are several distinct chunks of code that are affected: * new flags and config options for the server * retry join WAN is slightly different * retry join code is shared to discover primary mesh gateways from secondary datacenters * because retry join logic runs in the *agent* and the results of that operation for primary mesh gateways are needed in the *server* there are some methods like `RefreshPrimaryGatewayFallbackAddresses` that must occur at multiple layers of abstraction just to pass the data down to the right layer. * new cache type `FederationStateListMeshGatewaysName` for use in `proxycfg/xds` layers * the function signature for RPC dialing picked up a new required field (the node name of the destination) * several new RPCs for manipulating a FederationState object: `FederationState:{Apply,Get,List,ListMeshGateways}` * 3 read-only internal APIs for debugging use to invoke those RPCs from curl * raft and fsm changes to persist these FederationStates * replication for FederationStates as they are canonically stored in the Primary and replicated to the Secondaries. * a special derivative of anti-entropy that runs in secondaries to snapshot their local mesh gateway `CheckServiceNodes` and sync them into their upstream FederationState in the primary (this works in conjunction with the replication to distribute addresses for all mesh gateways in all DCs to all other DCs) * a "gateway locator" convenience object to make use of this data to choose the addresses of gateways to use for any given RPC or gossip operation to a remote DC. This gets data from the "retry join" logic in the agent and also directly calls into the FSM. * RPC (`:8300`) on the server sniffs the first byte of a new connection to determine if it's actually doing native TLS. If so it checks the ALPN header for protocol determination (just like how the existing system uses the type-byte marker). * 2 new kinds of protocols are exclusively decoded via this native TLS mechanism: one for ferrying "packet" operations (udp-like) from the gossip layer and one for "stream" operations (tcp-like). The packet operations re-use sockets (using length-prefixing) to cut down on TLS re-negotiation overhead. * the server instances specially wrap the `memberlist.NetTransport` when running with gateway federation enabled (in a `wanfed.Transport`). The general gist is that if it tries to dial a node in the SAME datacenter (deduced by looking at the suffix of the node name) there is no change. If dialing a DIFFERENT datacenter it is wrapped up in a TLS+ALPN blob and sent through some mesh gateways to eventually end up in a server's :8300 port. * a new flag when launching a mesh gateway via `consul connect envoy` to indicate that the servers are to be exposed. This sets a special service meta when registering the gateway into the catalog. * `proxycfg/xds` notice this metadata blob to activate additional watches for the FederationState objects as well as the location of all of the consul servers in that datacenter. * `xds:` if the extra metadata is in place additional clusters are defined in a DC to bulk sink all traffic to another DC's gateways. For the current datacenter we listen on a wildcard name (`server.<dc>.consul`) that load balances all servers as well as one mini-cluster per node (`<node>.server.<dc>.consul`) * the `consul tls cert create` command got a new flag (`-node`) to help create an additional SAN in certs that can be used with this flavor of federation.
2020-03-09 20:59:02 +00:00
func validateMetadata(meta map[string]string, allowConsulPrefix bool, allowedConsulKeys map[string]struct{}) error {
if len(meta) > metaMaxKeyPairs {
return fmt.Errorf("Node metadata cannot contain more than %d key/value pairs", metaMaxKeyPairs)
}
for key, value := range meta {
wan federation via mesh gateways (#6884) This is like a Möbius strip of code due to the fact that low-level components (serf/memberlist) are connected to high-level components (the catalog and mesh-gateways) in a twisty maze of references which make it hard to dive into. With that in mind here's a high level summary of what you'll find in the patch: There are several distinct chunks of code that are affected: * new flags and config options for the server * retry join WAN is slightly different * retry join code is shared to discover primary mesh gateways from secondary datacenters * because retry join logic runs in the *agent* and the results of that operation for primary mesh gateways are needed in the *server* there are some methods like `RefreshPrimaryGatewayFallbackAddresses` that must occur at multiple layers of abstraction just to pass the data down to the right layer. * new cache type `FederationStateListMeshGatewaysName` for use in `proxycfg/xds` layers * the function signature for RPC dialing picked up a new required field (the node name of the destination) * several new RPCs for manipulating a FederationState object: `FederationState:{Apply,Get,List,ListMeshGateways}` * 3 read-only internal APIs for debugging use to invoke those RPCs from curl * raft and fsm changes to persist these FederationStates * replication for FederationStates as they are canonically stored in the Primary and replicated to the Secondaries. * a special derivative of anti-entropy that runs in secondaries to snapshot their local mesh gateway `CheckServiceNodes` and sync them into their upstream FederationState in the primary (this works in conjunction with the replication to distribute addresses for all mesh gateways in all DCs to all other DCs) * a "gateway locator" convenience object to make use of this data to choose the addresses of gateways to use for any given RPC or gossip operation to a remote DC. This gets data from the "retry join" logic in the agent and also directly calls into the FSM. * RPC (`:8300`) on the server sniffs the first byte of a new connection to determine if it's actually doing native TLS. If so it checks the ALPN header for protocol determination (just like how the existing system uses the type-byte marker). * 2 new kinds of protocols are exclusively decoded via this native TLS mechanism: one for ferrying "packet" operations (udp-like) from the gossip layer and one for "stream" operations (tcp-like). The packet operations re-use sockets (using length-prefixing) to cut down on TLS re-negotiation overhead. * the server instances specially wrap the `memberlist.NetTransport` when running with gateway federation enabled (in a `wanfed.Transport`). The general gist is that if it tries to dial a node in the SAME datacenter (deduced by looking at the suffix of the node name) there is no change. If dialing a DIFFERENT datacenter it is wrapped up in a TLS+ALPN blob and sent through some mesh gateways to eventually end up in a server's :8300 port. * a new flag when launching a mesh gateway via `consul connect envoy` to indicate that the servers are to be exposed. This sets a special service meta when registering the gateway into the catalog. * `proxycfg/xds` notice this metadata blob to activate additional watches for the FederationState objects as well as the location of all of the consul servers in that datacenter. * `xds:` if the extra metadata is in place additional clusters are defined in a DC to bulk sink all traffic to another DC's gateways. For the current datacenter we listen on a wildcard name (`server.<dc>.consul`) that load balances all servers as well as one mini-cluster per node (`<node>.server.<dc>.consul`) * the `consul tls cert create` command got a new flag (`-node`) to help create an additional SAN in certs that can be used with this flavor of federation.
2020-03-09 20:59:02 +00:00
if err := validateMetaPair(key, value, allowConsulPrefix, allowedConsulKeys); err != nil {
return fmt.Errorf("Couldn't load metadata pair ('%s', '%s'): %s", key, value, err)
}
}
return nil
}
2018-09-07 14:30:47 +00:00
// ValidateWeights checks the definition of DNS weight is valid
func ValidateWeights(weights *Weights) error {
if weights == nil {
return nil
}
if weights.Passing < 1 {
return fmt.Errorf("Passing must be greater than 0")
}
if weights.Warning < 0 {
return fmt.Errorf("Warning must be greater or equal than 0")
}
if weights.Passing > 65535 || weights.Warning > 65535 {
return fmt.Errorf("DNS Weight must be between 0 and 65535")
}
return nil
}
// validateMetaPair checks that the given key/value pair is in a valid format
wan federation via mesh gateways (#6884) This is like a Möbius strip of code due to the fact that low-level components (serf/memberlist) are connected to high-level components (the catalog and mesh-gateways) in a twisty maze of references which make it hard to dive into. With that in mind here's a high level summary of what you'll find in the patch: There are several distinct chunks of code that are affected: * new flags and config options for the server * retry join WAN is slightly different * retry join code is shared to discover primary mesh gateways from secondary datacenters * because retry join logic runs in the *agent* and the results of that operation for primary mesh gateways are needed in the *server* there are some methods like `RefreshPrimaryGatewayFallbackAddresses` that must occur at multiple layers of abstraction just to pass the data down to the right layer. * new cache type `FederationStateListMeshGatewaysName` for use in `proxycfg/xds` layers * the function signature for RPC dialing picked up a new required field (the node name of the destination) * several new RPCs for manipulating a FederationState object: `FederationState:{Apply,Get,List,ListMeshGateways}` * 3 read-only internal APIs for debugging use to invoke those RPCs from curl * raft and fsm changes to persist these FederationStates * replication for FederationStates as they are canonically stored in the Primary and replicated to the Secondaries. * a special derivative of anti-entropy that runs in secondaries to snapshot their local mesh gateway `CheckServiceNodes` and sync them into their upstream FederationState in the primary (this works in conjunction with the replication to distribute addresses for all mesh gateways in all DCs to all other DCs) * a "gateway locator" convenience object to make use of this data to choose the addresses of gateways to use for any given RPC or gossip operation to a remote DC. This gets data from the "retry join" logic in the agent and also directly calls into the FSM. * RPC (`:8300`) on the server sniffs the first byte of a new connection to determine if it's actually doing native TLS. If so it checks the ALPN header for protocol determination (just like how the existing system uses the type-byte marker). * 2 new kinds of protocols are exclusively decoded via this native TLS mechanism: one for ferrying "packet" operations (udp-like) from the gossip layer and one for "stream" operations (tcp-like). The packet operations re-use sockets (using length-prefixing) to cut down on TLS re-negotiation overhead. * the server instances specially wrap the `memberlist.NetTransport` when running with gateway federation enabled (in a `wanfed.Transport`). The general gist is that if it tries to dial a node in the SAME datacenter (deduced by looking at the suffix of the node name) there is no change. If dialing a DIFFERENT datacenter it is wrapped up in a TLS+ALPN blob and sent through some mesh gateways to eventually end up in a server's :8300 port. * a new flag when launching a mesh gateway via `consul connect envoy` to indicate that the servers are to be exposed. This sets a special service meta when registering the gateway into the catalog. * `proxycfg/xds` notice this metadata blob to activate additional watches for the FederationState objects as well as the location of all of the consul servers in that datacenter. * `xds:` if the extra metadata is in place additional clusters are defined in a DC to bulk sink all traffic to another DC's gateways. For the current datacenter we listen on a wildcard name (`server.<dc>.consul`) that load balances all servers as well as one mini-cluster per node (`<node>.server.<dc>.consul`) * the `consul tls cert create` command got a new flag (`-node`) to help create an additional SAN in certs that can be used with this flavor of federation.
2020-03-09 20:59:02 +00:00
func validateMetaPair(key, value string, allowConsulPrefix bool, allowedConsulKeys map[string]struct{}) error {
if key == "" {
return fmt.Errorf("Key cannot be blank")
}
if !metaKeyFormat(key) {
return fmt.Errorf("Key contains invalid characters")
}
if len(key) > metaKeyMaxLength {
return fmt.Errorf("Key is too long (limit: %d characters)", metaKeyMaxLength)
}
if strings.HasPrefix(key, MetaKeyReservedPrefix) {
wan federation via mesh gateways (#6884) This is like a Möbius strip of code due to the fact that low-level components (serf/memberlist) are connected to high-level components (the catalog and mesh-gateways) in a twisty maze of references which make it hard to dive into. With that in mind here's a high level summary of what you'll find in the patch: There are several distinct chunks of code that are affected: * new flags and config options for the server * retry join WAN is slightly different * retry join code is shared to discover primary mesh gateways from secondary datacenters * because retry join logic runs in the *agent* and the results of that operation for primary mesh gateways are needed in the *server* there are some methods like `RefreshPrimaryGatewayFallbackAddresses` that must occur at multiple layers of abstraction just to pass the data down to the right layer. * new cache type `FederationStateListMeshGatewaysName` for use in `proxycfg/xds` layers * the function signature for RPC dialing picked up a new required field (the node name of the destination) * several new RPCs for manipulating a FederationState object: `FederationState:{Apply,Get,List,ListMeshGateways}` * 3 read-only internal APIs for debugging use to invoke those RPCs from curl * raft and fsm changes to persist these FederationStates * replication for FederationStates as they are canonically stored in the Primary and replicated to the Secondaries. * a special derivative of anti-entropy that runs in secondaries to snapshot their local mesh gateway `CheckServiceNodes` and sync them into their upstream FederationState in the primary (this works in conjunction with the replication to distribute addresses for all mesh gateways in all DCs to all other DCs) * a "gateway locator" convenience object to make use of this data to choose the addresses of gateways to use for any given RPC or gossip operation to a remote DC. This gets data from the "retry join" logic in the agent and also directly calls into the FSM. * RPC (`:8300`) on the server sniffs the first byte of a new connection to determine if it's actually doing native TLS. If so it checks the ALPN header for protocol determination (just like how the existing system uses the type-byte marker). * 2 new kinds of protocols are exclusively decoded via this native TLS mechanism: one for ferrying "packet" operations (udp-like) from the gossip layer and one for "stream" operations (tcp-like). The packet operations re-use sockets (using length-prefixing) to cut down on TLS re-negotiation overhead. * the server instances specially wrap the `memberlist.NetTransport` when running with gateway federation enabled (in a `wanfed.Transport`). The general gist is that if it tries to dial a node in the SAME datacenter (deduced by looking at the suffix of the node name) there is no change. If dialing a DIFFERENT datacenter it is wrapped up in a TLS+ALPN blob and sent through some mesh gateways to eventually end up in a server's :8300 port. * a new flag when launching a mesh gateway via `consul connect envoy` to indicate that the servers are to be exposed. This sets a special service meta when registering the gateway into the catalog. * `proxycfg/xds` notice this metadata blob to activate additional watches for the FederationState objects as well as the location of all of the consul servers in that datacenter. * `xds:` if the extra metadata is in place additional clusters are defined in a DC to bulk sink all traffic to another DC's gateways. For the current datacenter we listen on a wildcard name (`server.<dc>.consul`) that load balances all servers as well as one mini-cluster per node (`<node>.server.<dc>.consul`) * the `consul tls cert create` command got a new flag (`-node`) to help create an additional SAN in certs that can be used with this flavor of federation.
2020-03-09 20:59:02 +00:00
if _, ok := allowedConsulKeys[key]; !allowConsulPrefix && !ok {
return fmt.Errorf("Key prefix '%s' is reserved for internal use", MetaKeyReservedPrefix)
wan federation via mesh gateways (#6884) This is like a Möbius strip of code due to the fact that low-level components (serf/memberlist) are connected to high-level components (the catalog and mesh-gateways) in a twisty maze of references which make it hard to dive into. With that in mind here's a high level summary of what you'll find in the patch: There are several distinct chunks of code that are affected: * new flags and config options for the server * retry join WAN is slightly different * retry join code is shared to discover primary mesh gateways from secondary datacenters * because retry join logic runs in the *agent* and the results of that operation for primary mesh gateways are needed in the *server* there are some methods like `RefreshPrimaryGatewayFallbackAddresses` that must occur at multiple layers of abstraction just to pass the data down to the right layer. * new cache type `FederationStateListMeshGatewaysName` for use in `proxycfg/xds` layers * the function signature for RPC dialing picked up a new required field (the node name of the destination) * several new RPCs for manipulating a FederationState object: `FederationState:{Apply,Get,List,ListMeshGateways}` * 3 read-only internal APIs for debugging use to invoke those RPCs from curl * raft and fsm changes to persist these FederationStates * replication for FederationStates as they are canonically stored in the Primary and replicated to the Secondaries. * a special derivative of anti-entropy that runs in secondaries to snapshot their local mesh gateway `CheckServiceNodes` and sync them into their upstream FederationState in the primary (this works in conjunction with the replication to distribute addresses for all mesh gateways in all DCs to all other DCs) * a "gateway locator" convenience object to make use of this data to choose the addresses of gateways to use for any given RPC or gossip operation to a remote DC. This gets data from the "retry join" logic in the agent and also directly calls into the FSM. * RPC (`:8300`) on the server sniffs the first byte of a new connection to determine if it's actually doing native TLS. If so it checks the ALPN header for protocol determination (just like how the existing system uses the type-byte marker). * 2 new kinds of protocols are exclusively decoded via this native TLS mechanism: one for ferrying "packet" operations (udp-like) from the gossip layer and one for "stream" operations (tcp-like). The packet operations re-use sockets (using length-prefixing) to cut down on TLS re-negotiation overhead. * the server instances specially wrap the `memberlist.NetTransport` when running with gateway federation enabled (in a `wanfed.Transport`). The general gist is that if it tries to dial a node in the SAME datacenter (deduced by looking at the suffix of the node name) there is no change. If dialing a DIFFERENT datacenter it is wrapped up in a TLS+ALPN blob and sent through some mesh gateways to eventually end up in a server's :8300 port. * a new flag when launching a mesh gateway via `consul connect envoy` to indicate that the servers are to be exposed. This sets a special service meta when registering the gateway into the catalog. * `proxycfg/xds` notice this metadata blob to activate additional watches for the FederationState objects as well as the location of all of the consul servers in that datacenter. * `xds:` if the extra metadata is in place additional clusters are defined in a DC to bulk sink all traffic to another DC's gateways. For the current datacenter we listen on a wildcard name (`server.<dc>.consul`) that load balances all servers as well as one mini-cluster per node (`<node>.server.<dc>.consul`) * the `consul tls cert create` command got a new flag (`-node`) to help create an additional SAN in certs that can be used with this flavor of federation.
2020-03-09 20:59:02 +00:00
}
}
if len(value) > metaValueMaxLength {
return fmt.Errorf("Value is too long (limit: %d characters)", metaValueMaxLength)
}
return nil
}
// SatisfiesMetaFilters returns true if the metadata map contains the given filters
func SatisfiesMetaFilters(meta map[string]string, filters map[string]string) bool {
for key, value := range filters {
if v, ok := meta[key]; !ok || v != value {
return false
}
}
return true
}
2013-12-12 19:07:14 +00:00
// Used to return information about a provided services.
// Maps service name to available tags
type Services map[string][]string
2017-01-18 22:26:42 +00:00
// ServiceNode represents a node that is part of a service. ID, Address,
// TaggedAddresses, and NodeMeta are node-related fields that are always empty
// in the state store and are filled in on the way out by parseServiceNodes().
// This is also why PartialClone() skips them, because we know they are blank
// already so it would be a waste of time to copy them.
// This is somewhat complicated when the address is really a unix domain socket; technically that
// will override the address field, but in practice the two use cases should not overlap.
2013-12-12 19:37:19 +00:00
type ServiceNode struct {
2017-01-18 22:26:42 +00:00
ID types.NodeID
Node string
Address string
Datacenter string
TaggedAddresses map[string]string
NodeMeta map[string]string
ServiceKind ServiceKind
ServiceID string
ServiceName string
ServiceTags []string
ServiceAddress string
ServiceTaggedAddresses map[string]ServiceAddress `json:",omitempty"`
2018-09-07 14:30:47 +00:00
ServiceWeights Weights
2018-02-07 00:54:42 +00:00
ServiceMeta map[string]string
ServicePort int
ServiceSocketPath string
ServiceEnableTagOverride bool
ServiceProxy ConnectProxyConfig
ServiceConnect ServiceConnect
ServiceLocality *Locality `bexpr:"-"`
// If not empty, PeerName represents the peer that this ServiceNode was imported from.
PeerName string `json:",omitempty"`
acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash" bexpr:"-"`
RaftIndex `bexpr:"-"`
2013-12-12 19:37:19 +00:00
}
func (s *ServiceNode) FillAuthzContext(ctx *acl.AuthorizerContext) {
if ctx == nil {
return
}
ctx.Peer = s.PeerName
s.EnterpriseMeta.FillAuthzContext(ctx)
}
func (s *ServiceNode) PeerOrEmpty() string {
return s.PeerName
}
// PartialClone() returns a clone of the given service node, minus the node-
// related fields that get filled in later, Address and TaggedAddresses.
func (s *ServiceNode) PartialClone() *ServiceNode {
tags := make([]string, len(s.ServiceTags))
copy(tags, s.ServiceTags)
nsmeta := make(map[string]string)
for k, v := range s.ServiceMeta {
nsmeta[k] = v
}
var svcTaggedAddrs map[string]ServiceAddress
if len(s.ServiceTaggedAddresses) > 0 {
svcTaggedAddrs = make(map[string]ServiceAddress)
for k, v := range s.ServiceTaggedAddresses {
svcTaggedAddrs[k] = v
}
}
return &ServiceNode{
2017-01-18 22:26:42 +00:00
// Skip ID, see above.
Node: s.Node,
// Skip Address, see above.
// Skip TaggedAddresses, see above.
ServiceKind: s.ServiceKind,
ServiceID: s.ServiceID,
ServiceName: s.ServiceName,
ServiceTags: tags,
ServiceAddress: s.ServiceAddress,
ServiceSocketPath: s.ServiceSocketPath,
ServiceTaggedAddresses: svcTaggedAddrs,
ServicePort: s.ServicePort,
ServiceMeta: nsmeta,
2018-09-07 14:30:47 +00:00
ServiceWeights: s.ServiceWeights,
ServiceEnableTagOverride: s.ServiceEnableTagOverride,
ServiceProxy: s.ServiceProxy,
ServiceConnect: s.ServiceConnect,
ServiceLocality: s.ServiceLocality,
RaftIndex: RaftIndex{
CreateIndex: s.CreateIndex,
ModifyIndex: s.ModifyIndex,
},
EnterpriseMeta: s.EnterpriseMeta,
PeerName: s.PeerName,
}
}
// ToNodeService converts the given service node to a node service.
func (s *ServiceNode) ToNodeService() *NodeService {
return &NodeService{
Kind: s.ServiceKind,
ID: s.ServiceID,
Service: s.ServiceName,
Tags: s.ServiceTags,
Address: s.ServiceAddress,
TaggedAddresses: s.ServiceTaggedAddresses,
Port: s.ServicePort,
SocketPath: s.ServiceSocketPath,
Meta: s.ServiceMeta,
2018-09-07 14:30:47 +00:00
Weights: &s.ServiceWeights,
EnableTagOverride: s.ServiceEnableTagOverride,
Add Proxy Upstreams to Service Definition (#4639) * Refactor Service Definition ProxyDestination. This includes: - Refactoring all internal structs used - Updated tests for both deprecated and new input for: - Agent Services endpoint response - Agent Service endpoint response - Agent Register endpoint - Unmanaged deprecated field - Unmanaged new fields - Managed deprecated upstreams - Managed new - Catalog Register - Unmanaged deprecated field - Unmanaged new fields - Managed deprecated upstreams - Managed new - Catalog Services endpoint response - Catalog Node endpoint response - Catalog Service endpoint response - Updated API tests for all of the above too (both deprecated and new forms of register) TODO: - config package changes for on-disk service definitions - proxy config endpoint - built-in proxy support for new fields * Agent proxy config endpoint updated with upstreams * Config file changes for upstreams. * Add upstream opaque config and update all tests to ensure it works everywhere. * Built in proxy working with new Upstreams config * Command fixes and deprecations * Fix key translation, upstream type defaults and a spate of other subtele bugs found with ned to end test scripts... TODO: tests still failing on one case that needs a fix. I think it's key translation for upstreams nested in Managed proxy struct. * Fix translated keys in API registration. ≈ * Fixes from docs - omit some empty undocumented fields in API - Bring back ServiceProxyDestination in Catalog responses to not break backwards compat - this was removed assuming it was only used internally. * Documentation updates for Upstreams in service definition * Fixes for tests broken by many refactors. * Enable travis on f-connect branch in this branch too. * Add consistent Deprecation comments to ProxyDestination uses * Update version number on deprecation notices, and correct upstream datacenter field with explanation in docs
2018-09-12 16:07:47 +00:00
Proxy: s.ServiceProxy,
Connect: s.ServiceConnect,
PeerName: s.PeerName,
EnterpriseMeta: s.EnterpriseMeta,
Locality: s.ServiceLocality,
RaftIndex: RaftIndex{
CreateIndex: s.CreateIndex,
ModifyIndex: s.ModifyIndex,
},
}
}
func (sn *ServiceNode) CompoundServiceID() ServiceID {
id := sn.ServiceID
if id == "" {
id = sn.ServiceName
}
// copy the ent meta and normalize it
entMeta := sn.EnterpriseMeta
entMeta.Normalize()
return ServiceID{
ID: id,
EnterpriseMeta: entMeta,
}
}
func (sn *ServiceNode) CompoundServiceName() PeeredServiceName {
name := sn.ServiceName
if name == "" {
name = sn.ServiceID
}
// copy the ent meta and normalize it
entMeta := sn.EnterpriseMeta
entMeta.Normalize()
return PeeredServiceName{
ServiceName: ServiceName{
Name: name,
EnterpriseMeta: entMeta,
},
Peer: sn.PeerName,
}
}
2018-09-07 14:30:47 +00:00
// Weights represent the weight used by DNS for a given status
type Weights struct {
Passing int
Warning int
}
type ServiceNodes []*ServiceNode
2013-12-12 19:37:19 +00:00
// ServiceKind is the kind of service being registered.
type ServiceKind string
func (k ServiceKind) Normalized() string {
if k == ServiceKindTypical {
return "typical"
}
return string(k)
}
// IsProxy returns whether the ServiceKind is a connect proxy or gateway.
func (k ServiceKind) IsProxy() bool {
switch k {
case ServiceKindConnectProxy,
ServiceKindMeshGateway,
ServiceKindTerminatingGateway,
Implement APIGateway proxycfg snapshot (#16194) * Stub proxycfg handler for API gateway * Add Service Kind constants/handling for API Gateway * Begin stubbing for SDS * Add new Secret type to xDS order of operations * Continue stubbing of SDS * Iterate on proxycfg handler for API gateway * Handle BoundAPIGateway config entry subscription in proxycfg-glue * Add API gateway to config snapshot validation * Add API gateway to config snapshot clone, leaf, etc. * Subscribe to bound route + cert config entries on bound-api-gateway * Track routes + certs on API gateway config snapshot * Generate DeepCopy() for types used in watch.Map * Watch all active references on api-gateway, unwatch inactive * Track loading of initial bound-api-gateway config entry * Use proper proto package for SDS mapping * Use ResourceReference instead of ServiceName, collect resources * Fix typo, add + remove TODOs * Watch discovery chains for TCPRoute * Add TODO for updating gateway services for api-gateway * make proto * Regenerate deep-copy for proxycfg * Set datacenter on upstream ID from query source * Watch discovery chains for http-route service backends * Add ServiceName getter to HTTP+TCP Service structs * Clean up unwatched discovery chains on API Gateway * Implement watch for ingress leaf certificate * Collect upstreams on http-route + tcp-route updates * Remove unused GatewayServices update handler * Remove unnecessary gateway services logic for API Gateway * Remove outdate TODO * Use .ToIngress where appropriate, including TODO for cleaning up * Cancel before returning error * Remove GatewayServices subscription * Add godoc for handlerAPIGateway functions * Update terminology from Connect => Consul Service Mesh Consistent with terminology changes in https://github.com/hashicorp/consul/pull/12690 * Add missing TODO * Remove duplicate switch case * Rerun deep-copy generator * Use correct property on config snapshot * Remove unnecessary leaf cert watch * Clean up based on code review feedback * Note handler properties that are initialized but set elsewhere * Add TODO for moving helper func into structs pkg * Update generated DeepCopy code * gofmt * Generate DeepCopy() for API gateway listener types * Improve variable name * Regenerate DeepCopy() code * Fix linting issue * Temporarily remove the secret type from resource generation
2023-02-08 21:52:12 +00:00
ServiceKindIngressGateway,
ServiceKindAPIGateway:
return true
}
return false
}
const (
2018-03-11 16:11:10 +00:00
// ServiceKindTypical is a typical, classic Consul service. This is
// represented by the absence of a value. This was chosen for ease of
2018-03-11 16:11:10 +00:00
// backwards compatibility: existing services in the catalog would
// default to the typical service.
ServiceKindTypical ServiceKind = ""
Implement APIGateway proxycfg snapshot (#16194) * Stub proxycfg handler for API gateway * Add Service Kind constants/handling for API Gateway * Begin stubbing for SDS * Add new Secret type to xDS order of operations * Continue stubbing of SDS * Iterate on proxycfg handler for API gateway * Handle BoundAPIGateway config entry subscription in proxycfg-glue * Add API gateway to config snapshot validation * Add API gateway to config snapshot clone, leaf, etc. * Subscribe to bound route + cert config entries on bound-api-gateway * Track routes + certs on API gateway config snapshot * Generate DeepCopy() for types used in watch.Map * Watch all active references on api-gateway, unwatch inactive * Track loading of initial bound-api-gateway config entry * Use proper proto package for SDS mapping * Use ResourceReference instead of ServiceName, collect resources * Fix typo, add + remove TODOs * Watch discovery chains for TCPRoute * Add TODO for updating gateway services for api-gateway * make proto * Regenerate deep-copy for proxycfg * Set datacenter on upstream ID from query source * Watch discovery chains for http-route service backends * Add ServiceName getter to HTTP+TCP Service structs * Clean up unwatched discovery chains on API Gateway * Implement watch for ingress leaf certificate * Collect upstreams on http-route + tcp-route updates * Remove unused GatewayServices update handler * Remove unnecessary gateway services logic for API Gateway * Remove outdate TODO * Use .ToIngress where appropriate, including TODO for cleaning up * Cancel before returning error * Remove GatewayServices subscription * Add godoc for handlerAPIGateway functions * Update terminology from Connect => Consul Service Mesh Consistent with terminology changes in https://github.com/hashicorp/consul/pull/12690 * Add missing TODO * Remove duplicate switch case * Rerun deep-copy generator * Use correct property on config snapshot * Remove unnecessary leaf cert watch * Clean up based on code review feedback * Note handler properties that are initialized but set elsewhere * Add TODO for moving helper func into structs pkg * Update generated DeepCopy code * gofmt * Generate DeepCopy() for API gateway listener types * Improve variable name * Regenerate DeepCopy() code * Fix linting issue * Temporarily remove the secret type from resource generation
2023-02-08 21:52:12 +00:00
// ServiceKindConnectProxy is a proxy for the Consul Service Mesh. This
// service proxies another service within Consul and speaks the connect
// protocol.
ServiceKindConnectProxy ServiceKind = "connect-proxy"
Implement APIGateway proxycfg snapshot (#16194) * Stub proxycfg handler for API gateway * Add Service Kind constants/handling for API Gateway * Begin stubbing for SDS * Add new Secret type to xDS order of operations * Continue stubbing of SDS * Iterate on proxycfg handler for API gateway * Handle BoundAPIGateway config entry subscription in proxycfg-glue * Add API gateway to config snapshot validation * Add API gateway to config snapshot clone, leaf, etc. * Subscribe to bound route + cert config entries on bound-api-gateway * Track routes + certs on API gateway config snapshot * Generate DeepCopy() for types used in watch.Map * Watch all active references on api-gateway, unwatch inactive * Track loading of initial bound-api-gateway config entry * Use proper proto package for SDS mapping * Use ResourceReference instead of ServiceName, collect resources * Fix typo, add + remove TODOs * Watch discovery chains for TCPRoute * Add TODO for updating gateway services for api-gateway * make proto * Regenerate deep-copy for proxycfg * Set datacenter on upstream ID from query source * Watch discovery chains for http-route service backends * Add ServiceName getter to HTTP+TCP Service structs * Clean up unwatched discovery chains on API Gateway * Implement watch for ingress leaf certificate * Collect upstreams on http-route + tcp-route updates * Remove unused GatewayServices update handler * Remove unnecessary gateway services logic for API Gateway * Remove outdate TODO * Use .ToIngress where appropriate, including TODO for cleaning up * Cancel before returning error * Remove GatewayServices subscription * Add godoc for handlerAPIGateway functions * Update terminology from Connect => Consul Service Mesh Consistent with terminology changes in https://github.com/hashicorp/consul/pull/12690 * Add missing TODO * Remove duplicate switch case * Rerun deep-copy generator * Use correct property on config snapshot * Remove unnecessary leaf cert watch * Clean up based on code review feedback * Note handler properties that are initialized but set elsewhere * Add TODO for moving helper func into structs pkg * Update generated DeepCopy code * gofmt * Generate DeepCopy() for API gateway listener types * Improve variable name * Regenerate DeepCopy() code * Fix linting issue * Temporarily remove the secret type from resource generation
2023-02-08 21:52:12 +00:00
// ServiceKindMeshGateway is a Mesh Gateway for the Consul Service Mesh.
// This service will proxy connections based off the SNI header set by other
// connect proxies
ServiceKindMeshGateway ServiceKind = "mesh-gateway"
Implement APIGateway proxycfg snapshot (#16194) * Stub proxycfg handler for API gateway * Add Service Kind constants/handling for API Gateway * Begin stubbing for SDS * Add new Secret type to xDS order of operations * Continue stubbing of SDS * Iterate on proxycfg handler for API gateway * Handle BoundAPIGateway config entry subscription in proxycfg-glue * Add API gateway to config snapshot validation * Add API gateway to config snapshot clone, leaf, etc. * Subscribe to bound route + cert config entries on bound-api-gateway * Track routes + certs on API gateway config snapshot * Generate DeepCopy() for types used in watch.Map * Watch all active references on api-gateway, unwatch inactive * Track loading of initial bound-api-gateway config entry * Use proper proto package for SDS mapping * Use ResourceReference instead of ServiceName, collect resources * Fix typo, add + remove TODOs * Watch discovery chains for TCPRoute * Add TODO for updating gateway services for api-gateway * make proto * Regenerate deep-copy for proxycfg * Set datacenter on upstream ID from query source * Watch discovery chains for http-route service backends * Add ServiceName getter to HTTP+TCP Service structs * Clean up unwatched discovery chains on API Gateway * Implement watch for ingress leaf certificate * Collect upstreams on http-route + tcp-route updates * Remove unused GatewayServices update handler * Remove unnecessary gateway services logic for API Gateway * Remove outdate TODO * Use .ToIngress where appropriate, including TODO for cleaning up * Cancel before returning error * Remove GatewayServices subscription * Add godoc for handlerAPIGateway functions * Update terminology from Connect => Consul Service Mesh Consistent with terminology changes in https://github.com/hashicorp/consul/pull/12690 * Add missing TODO * Remove duplicate switch case * Rerun deep-copy generator * Use correct property on config snapshot * Remove unnecessary leaf cert watch * Clean up based on code review feedback * Note handler properties that are initialized but set elsewhere * Add TODO for moving helper func into structs pkg * Update generated DeepCopy code * gofmt * Generate DeepCopy() for API gateway listener types * Improve variable name * Regenerate DeepCopy() code * Fix linting issue * Temporarily remove the secret type from resource generation
2023-02-08 21:52:12 +00:00
// ServiceKindTerminatingGateway is a Terminating Gateway for the Consul Service
// Mesh feature. This service will proxy connections to services outside the mesh.
ServiceKindTerminatingGateway ServiceKind = "terminating-gateway"
Implement APIGateway proxycfg snapshot (#16194) * Stub proxycfg handler for API gateway * Add Service Kind constants/handling for API Gateway * Begin stubbing for SDS * Add new Secret type to xDS order of operations * Continue stubbing of SDS * Iterate on proxycfg handler for API gateway * Handle BoundAPIGateway config entry subscription in proxycfg-glue * Add API gateway to config snapshot validation * Add API gateway to config snapshot clone, leaf, etc. * Subscribe to bound route + cert config entries on bound-api-gateway * Track routes + certs on API gateway config snapshot * Generate DeepCopy() for types used in watch.Map * Watch all active references on api-gateway, unwatch inactive * Track loading of initial bound-api-gateway config entry * Use proper proto package for SDS mapping * Use ResourceReference instead of ServiceName, collect resources * Fix typo, add + remove TODOs * Watch discovery chains for TCPRoute * Add TODO for updating gateway services for api-gateway * make proto * Regenerate deep-copy for proxycfg * Set datacenter on upstream ID from query source * Watch discovery chains for http-route service backends * Add ServiceName getter to HTTP+TCP Service structs * Clean up unwatched discovery chains on API Gateway * Implement watch for ingress leaf certificate * Collect upstreams on http-route + tcp-route updates * Remove unused GatewayServices update handler * Remove unnecessary gateway services logic for API Gateway * Remove outdate TODO * Use .ToIngress where appropriate, including TODO for cleaning up * Cancel before returning error * Remove GatewayServices subscription * Add godoc for handlerAPIGateway functions * Update terminology from Connect => Consul Service Mesh Consistent with terminology changes in https://github.com/hashicorp/consul/pull/12690 * Add missing TODO * Remove duplicate switch case * Rerun deep-copy generator * Use correct property on config snapshot * Remove unnecessary leaf cert watch * Clean up based on code review feedback * Note handler properties that are initialized but set elsewhere * Add TODO for moving helper func into structs pkg * Update generated DeepCopy code * gofmt * Generate DeepCopy() for API gateway listener types * Improve variable name * Regenerate DeepCopy() code * Fix linting issue * Temporarily remove the secret type from resource generation
2023-02-08 21:52:12 +00:00
// ServiceKindIngressGateway is an Ingress Gateway for the Consul Service Mesh.
// This service allows external traffic to enter the mesh based on
// centralized configuration.
ServiceKindIngressGateway ServiceKind = "ingress-gateway"
Egress gtw/connect destination intentions (#13341) * update gateway-services table with endpoints * fix failing test * remove unneeded config in test * rename "endpoint" to "destination" * more endpoint renaming to destination in tests * update isDestination based on service-defaults config entry creation * use a 3 state kind to be able to set the kind to unknown (when neither a service or a destination exist) * set unknown state to empty to avoid modifying alot of tests * fix logic to set the kind correctly on CRUD * fix failing tests * add missing tests and fix service delete * fix failing test * Apply suggestions from code review Co-authored-by: Dan Stough <dan.stough@hashicorp.com> * fix a bug with kind and add relevant test * fix compile error * fix failing tests * add kind to clone * fix failing tests * fix failing tests in catalog endpoint * fix service dump test * Apply suggestions from code review Co-authored-by: Dan Stough <dan.stough@hashicorp.com> * remove duplicate tests * first draft of destinations intention in connect proxy * remove ServiceDestinationList * fix failing tests * fix agent/consul failing tests * change to filter intentions in the state store instead of adding a field. * fix failing tests * fix comment * fix comments * store service kind destination and add relevant tests * changes based on review * filter on destinations when querying source match * Apply suggestions from code review Co-authored-by: alex <8968914+acpana@users.noreply.github.com> * fix style * Apply suggestions from code review Co-authored-by: Dan Stough <dan.stough@hashicorp.com> * rename destinationType to targetType. Co-authored-by: Dan Stough <dan.stough@hashicorp.com> Co-authored-by: alex <8968914+acpana@users.noreply.github.com> Co-authored-by: github-team-consul-core <github-team-consul-core@hashicorp.com>
2022-06-07 19:03:59 +00:00
Implement APIGateway proxycfg snapshot (#16194) * Stub proxycfg handler for API gateway * Add Service Kind constants/handling for API Gateway * Begin stubbing for SDS * Add new Secret type to xDS order of operations * Continue stubbing of SDS * Iterate on proxycfg handler for API gateway * Handle BoundAPIGateway config entry subscription in proxycfg-glue * Add API gateway to config snapshot validation * Add API gateway to config snapshot clone, leaf, etc. * Subscribe to bound route + cert config entries on bound-api-gateway * Track routes + certs on API gateway config snapshot * Generate DeepCopy() for types used in watch.Map * Watch all active references on api-gateway, unwatch inactive * Track loading of initial bound-api-gateway config entry * Use proper proto package for SDS mapping * Use ResourceReference instead of ServiceName, collect resources * Fix typo, add + remove TODOs * Watch discovery chains for TCPRoute * Add TODO for updating gateway services for api-gateway * make proto * Regenerate deep-copy for proxycfg * Set datacenter on upstream ID from query source * Watch discovery chains for http-route service backends * Add ServiceName getter to HTTP+TCP Service structs * Clean up unwatched discovery chains on API Gateway * Implement watch for ingress leaf certificate * Collect upstreams on http-route + tcp-route updates * Remove unused GatewayServices update handler * Remove unnecessary gateway services logic for API Gateway * Remove outdate TODO * Use .ToIngress where appropriate, including TODO for cleaning up * Cancel before returning error * Remove GatewayServices subscription * Add godoc for handlerAPIGateway functions * Update terminology from Connect => Consul Service Mesh Consistent with terminology changes in https://github.com/hashicorp/consul/pull/12690 * Add missing TODO * Remove duplicate switch case * Rerun deep-copy generator * Use correct property on config snapshot * Remove unnecessary leaf cert watch * Clean up based on code review feedback * Note handler properties that are initialized but set elsewhere * Add TODO for moving helper func into structs pkg * Update generated DeepCopy code * gofmt * Generate DeepCopy() for API gateway listener types * Improve variable name * Regenerate DeepCopy() code * Fix linting issue * Temporarily remove the secret type from resource generation
2023-02-08 21:52:12 +00:00
// ServiceKindAPIGateway is an API Gateway for the Consul Service Mesh.
// This service allows external traffic to enter the mesh based on
// centralized configuration.
ServiceKindAPIGateway ServiceKind = "api-gateway"
// ServiceKindDestination is a Destination for the Consul Service Mesh feature.
Egress gtw/connect destination intentions (#13341) * update gateway-services table with endpoints * fix failing test * remove unneeded config in test * rename "endpoint" to "destination" * more endpoint renaming to destination in tests * update isDestination based on service-defaults config entry creation * use a 3 state kind to be able to set the kind to unknown (when neither a service or a destination exist) * set unknown state to empty to avoid modifying alot of tests * fix logic to set the kind correctly on CRUD * fix failing tests * add missing tests and fix service delete * fix failing test * Apply suggestions from code review Co-authored-by: Dan Stough <dan.stough@hashicorp.com> * fix a bug with kind and add relevant test * fix compile error * fix failing tests * add kind to clone * fix failing tests * fix failing tests in catalog endpoint * fix service dump test * Apply suggestions from code review Co-authored-by: Dan Stough <dan.stough@hashicorp.com> * remove duplicate tests * first draft of destinations intention in connect proxy * remove ServiceDestinationList * fix failing tests * fix agent/consul failing tests * change to filter intentions in the state store instead of adding a field. * fix failing tests * fix comment * fix comments * store service kind destination and add relevant tests * changes based on review * filter on destinations when querying source match * Apply suggestions from code review Co-authored-by: alex <8968914+acpana@users.noreply.github.com> * fix style * Apply suggestions from code review Co-authored-by: Dan Stough <dan.stough@hashicorp.com> * rename destinationType to targetType. Co-authored-by: Dan Stough <dan.stough@hashicorp.com> Co-authored-by: alex <8968914+acpana@users.noreply.github.com> Co-authored-by: github-team-consul-core <github-team-consul-core@hashicorp.com>
2022-06-07 19:03:59 +00:00
// This service allows external traffic to exit the mesh through a terminating gateway
// based on centralized configuration.
Egress gtw/connect destination intentions (#13341) * update gateway-services table with endpoints * fix failing test * remove unneeded config in test * rename "endpoint" to "destination" * more endpoint renaming to destination in tests * update isDestination based on service-defaults config entry creation * use a 3 state kind to be able to set the kind to unknown (when neither a service or a destination exist) * set unknown state to empty to avoid modifying alot of tests * fix logic to set the kind correctly on CRUD * fix failing tests * add missing tests and fix service delete * fix failing test * Apply suggestions from code review Co-authored-by: Dan Stough <dan.stough@hashicorp.com> * fix a bug with kind and add relevant test * fix compile error * fix failing tests * add kind to clone * fix failing tests * fix failing tests in catalog endpoint * fix service dump test * Apply suggestions from code review Co-authored-by: Dan Stough <dan.stough@hashicorp.com> * remove duplicate tests * first draft of destinations intention in connect proxy * remove ServiceDestinationList * fix failing tests * fix agent/consul failing tests * change to filter intentions in the state store instead of adding a field. * fix failing tests * fix comment * fix comments * store service kind destination and add relevant tests * changes based on review * filter on destinations when querying source match * Apply suggestions from code review Co-authored-by: alex <8968914+acpana@users.noreply.github.com> * fix style * Apply suggestions from code review Co-authored-by: Dan Stough <dan.stough@hashicorp.com> * rename destinationType to targetType. Co-authored-by: Dan Stough <dan.stough@hashicorp.com> Co-authored-by: alex <8968914+acpana@users.noreply.github.com> Co-authored-by: github-team-consul-core <github-team-consul-core@hashicorp.com>
2022-06-07 19:03:59 +00:00
ServiceKindDestination ServiceKind = "destination"
// ServiceKindConnectEnabled is used to indicate whether a service is either
// connect-native or if the service has a corresponding sidecar. It is used for
// internal query purposes and should not be exposed to users as a valid Kind
// option.
ServiceKindConnectEnabled ServiceKind = "connect-enabled"
)
// Type to hold a address and port of a service
type ServiceAddress struct {
Address string
Port int
}
func (a ServiceAddress) ToAPIServiceAddress() api.ServiceAddress {
return api.ServiceAddress{Address: a.Address, Port: a.Port}
}
2023-05-12 20:49:42 +00:00
const SidecarProxySuffix = "-sidecar-proxy"
2013-12-12 19:46:25 +00:00
// NodeService is a service provided by a node
type NodeService struct {
// Kind is the kind of service this is. Different kinds of services may
// have differing validation, DNS behavior, etc. An empty kind will default
// to the Default kind. See ServiceKind for the full list of kinds.
Kind ServiceKind `json:",omitempty"`
ID string
Service string
Tags []string
Address string
TaggedAddresses map[string]ServiceAddress `json:",omitempty"`
Meta map[string]string
Port int `json:",omitempty"`
SocketPath string `json:",omitempty"` // TODO This might be integrated into Address somehow, but not sure about the ergonomics. Only one of (address,port) or socketpath can be defined.
2018-09-07 14:30:47 +00:00
Weights *Weights
EnableTagOverride bool
Locality *Locality `json:",omitempty" bexpr:"-"`
Add Proxy Upstreams to Service Definition (#4639) * Refactor Service Definition ProxyDestination. This includes: - Refactoring all internal structs used - Updated tests for both deprecated and new input for: - Agent Services endpoint response - Agent Service endpoint response - Agent Register endpoint - Unmanaged deprecated field - Unmanaged new fields - Managed deprecated upstreams - Managed new - Catalog Register - Unmanaged deprecated field - Unmanaged new fields - Managed deprecated upstreams - Managed new - Catalog Services endpoint response - Catalog Node endpoint response - Catalog Service endpoint response - Updated API tests for all of the above too (both deprecated and new forms of register) TODO: - config package changes for on-disk service definitions - proxy config endpoint - built-in proxy support for new fields * Agent proxy config endpoint updated with upstreams * Config file changes for upstreams. * Add upstream opaque config and update all tests to ensure it works everywhere. * Built in proxy working with new Upstreams config * Command fixes and deprecations * Fix key translation, upstream type defaults and a spate of other subtele bugs found with ned to end test scripts... TODO: tests still failing on one case that needs a fix. I think it's key translation for upstreams nested in Managed proxy struct. * Fix translated keys in API registration. ≈ * Fixes from docs - omit some empty undocumented fields in API - Bring back ServiceProxyDestination in Catalog responses to not break backwards compat - this was removed assuming it was only used internally. * Documentation updates for Upstreams in service definition * Fixes for tests broken by many refactors. * Enable travis on f-connect branch in this branch too. * Add consistent Deprecation comments to ProxyDestination uses * Update version number on deprecation notices, and correct upstream datacenter field with explanation in docs
2018-09-12 16:07:47 +00:00
// Proxy is the configuration set for Kind = connect-proxy. It is mandatory in
// that case and an error to be set for any other kind. This config is part of
// a proxy service definition. ProxyConfig may be a more natural name here, but
// it's confusing for the UX because one of the fields in ConnectProxyConfig is
Add Proxy Upstreams to Service Definition (#4639) * Refactor Service Definition ProxyDestination. This includes: - Refactoring all internal structs used - Updated tests for both deprecated and new input for: - Agent Services endpoint response - Agent Service endpoint response - Agent Register endpoint - Unmanaged deprecated field - Unmanaged new fields - Managed deprecated upstreams - Managed new - Catalog Register - Unmanaged deprecated field - Unmanaged new fields - Managed deprecated upstreams - Managed new - Catalog Services endpoint response - Catalog Node endpoint response - Catalog Service endpoint response - Updated API tests for all of the above too (both deprecated and new forms of register) TODO: - config package changes for on-disk service definitions - proxy config endpoint - built-in proxy support for new fields * Agent proxy config endpoint updated with upstreams * Config file changes for upstreams. * Add upstream opaque config and update all tests to ensure it works everywhere. * Built in proxy working with new Upstreams config * Command fixes and deprecations * Fix key translation, upstream type defaults and a spate of other subtele bugs found with ned to end test scripts... TODO: tests still failing on one case that needs a fix. I think it's key translation for upstreams nested in Managed proxy struct. * Fix translated keys in API registration. ≈ * Fixes from docs - omit some empty undocumented fields in API - Bring back ServiceProxyDestination in Catalog responses to not break backwards compat - this was removed assuming it was only used internally. * Documentation updates for Upstreams in service definition * Fixes for tests broken by many refactors. * Enable travis on f-connect branch in this branch too. * Add consistent Deprecation comments to ProxyDestination uses * Update version number on deprecation notices, and correct upstream datacenter field with explanation in docs
2018-09-12 16:07:47 +00:00
// also called just "Config"
Proxy ConnectProxyConfig
// Connect are the Connect settings for a service. This is purposely NOT
// a pointer so that we never have to nil-check this.
Connect ServiceConnect
// TODO: rename to reflect that this is used to express future intent to register.
// LocallyRegisteredAsSidecar is private as it is only used by a local agent
// state to track if the service was or will be registered from a nested sidecar_service
// block. We need to track that so we can know whether we need to deregister
// it automatically too if it's removed from the service definition or if the
// parent service is deregistered. Relying only on ID would cause us to
// deregister regular services if they happen to be registered using the same
// ID scheme as our sidecars do by default. We could use meta but that gets
// unpleasant because we can't use the consul- prefix from an agent (reserved
// for use internally but in practice that means within the state store or in
// responses only), and it leaks the detail publicly which people might rely
// on which is a bit unpleasant for something that is meant to be config-file
// syntax sugar. Note this is not translated to ServiceNode and friends and
// may not be set on a NodeService that isn't the one the agent registered and
// keeps in it's local state. We never want this rendered in JSON as it's
// internal only. Right now our agent endpoints return api structs which don't
// include it but this is a safety net incase we change that or there is
// somewhere this is used in API output.
LocallyRegisteredAsSidecar bool `json:"-" bexpr:"-"`
acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash" bexpr:"-"`
// If not empty, PeerName represents the peer that the NodeService was imported from.
PeerName string
RaftIndex `bexpr:"-"`
2013-12-12 19:46:25 +00:00
}
// PeeringServiceMeta is read-only information provided from an exported peer.
type PeeringServiceMeta struct {
SNI []string `json:",omitempty"`
SpiffeID []string `json:",omitempty"`
Protocol string `json:",omitempty"`
}
func (m *PeeringServiceMeta) PrimarySNI() string {
if m == nil || len(m.SNI) == 0 {
return ""
}
return m.SNI[0]
}
func (ns *NodeService) FillAuthzContext(ctx *acl.AuthorizerContext) {
if ctx == nil {
return
}
ctx.Peer = ns.PeerName
ns.EnterpriseMeta.FillAuthzContext(ctx)
}
func (ns *NodeService) BestAddress(wan bool) (string, int) {
addr := ns.Address
port := ns.Port
if wan {
if wan, ok := ns.TaggedAddresses[TaggedAddressWAN]; ok {
addr = wan.Address
if wan.Port != 0 {
port = wan.Port
}
}
}
return addr, port
}
func (ns *NodeService) CompoundServiceID() ServiceID {
id := ns.ID
if id == "" {
id = ns.Service
}
// copy the ent meta and normalize it
entMeta := ns.EnterpriseMeta
entMeta.Normalize()
return ServiceID{
ID: id,
EnterpriseMeta: entMeta,
}
}
func (ns *NodeService) CompoundServiceName() ServiceName {
name := ns.Service
if name == "" {
name = ns.ID
}
// copy the ent meta and normalize it
entMeta := ns.EnterpriseMeta
entMeta.Normalize()
return ServiceName{
Name: name,
EnterpriseMeta: entMeta,
}
}
// UniqueID is a unique identifier for a service instance within a datacenter by encoding:
// node/namespace/service_id
//
// Note: We do not have strict character restrictions in all node names, so this should NOT be split on / to retrieve components.
func UniqueID(node string, compoundID string) string {
// TODO(partitions)
return fmt.Sprintf("%s/%s", node, compoundID)
}
// ServiceConnect are the shared Connect settings between all service
// definitions from the agent to the state store.
type ServiceConnect struct {
// Native is true when this service can natively understand Connect.
Native bool `json:",omitempty"`
// SidecarService is a nested Service Definition to register at the same time.
// It's purely a convenience mechanism to allow specifying a sidecar service
// along with the application service definition. It's nested nature allows
// all of the fields to be defaulted which can reduce the amount of
// boilerplate needed to register a sidecar service separately, but the end
// result is identical to just making a second service registration via any
// other means.
SidecarService *ServiceDefinition `json:",omitempty" bexpr:"-"`
PeerMeta *PeeringServiceMeta `json:",omitempty" bexpr:"-"`
}
func (t *ServiceConnect) UnmarshalJSON(data []byte) (err error) {
type Alias ServiceConnect
aux := &struct {
SidecarServiceSnake *ServiceDefinition `json:"sidecar_service"`
*Alias
}{
Alias: (*Alias)(t),
}
if err = json.Unmarshal(data, &aux); err != nil {
return err
}
if t.SidecarService == nil && aux != nil {
t.SidecarService = aux.SidecarServiceSnake
}
return nil
}
// IsSidecarProxy returns true if the NodeService is a sidecar proxy.
func (s *NodeService) IsSidecarProxy() bool {
return s.Kind == ServiceKindConnectProxy &&
(s.Proxy.DestinationServiceID != "" || s.Proxy.DestinationServiceName != "")
}
func (s *NodeService) IsGateway() bool {
return s.Kind == ServiceKindMeshGateway ||
s.Kind == ServiceKindTerminatingGateway ||
Implement APIGateway proxycfg snapshot (#16194) * Stub proxycfg handler for API gateway * Add Service Kind constants/handling for API Gateway * Begin stubbing for SDS * Add new Secret type to xDS order of operations * Continue stubbing of SDS * Iterate on proxycfg handler for API gateway * Handle BoundAPIGateway config entry subscription in proxycfg-glue * Add API gateway to config snapshot validation * Add API gateway to config snapshot clone, leaf, etc. * Subscribe to bound route + cert config entries on bound-api-gateway * Track routes + certs on API gateway config snapshot * Generate DeepCopy() for types used in watch.Map * Watch all active references on api-gateway, unwatch inactive * Track loading of initial bound-api-gateway config entry * Use proper proto package for SDS mapping * Use ResourceReference instead of ServiceName, collect resources * Fix typo, add + remove TODOs * Watch discovery chains for TCPRoute * Add TODO for updating gateway services for api-gateway * make proto * Regenerate deep-copy for proxycfg * Set datacenter on upstream ID from query source * Watch discovery chains for http-route service backends * Add ServiceName getter to HTTP+TCP Service structs * Clean up unwatched discovery chains on API Gateway * Implement watch for ingress leaf certificate * Collect upstreams on http-route + tcp-route updates * Remove unused GatewayServices update handler * Remove unnecessary gateway services logic for API Gateway * Remove outdate TODO * Use .ToIngress where appropriate, including TODO for cleaning up * Cancel before returning error * Remove GatewayServices subscription * Add godoc for handlerAPIGateway functions * Update terminology from Connect => Consul Service Mesh Consistent with terminology changes in https://github.com/hashicorp/consul/pull/12690 * Add missing TODO * Remove duplicate switch case * Rerun deep-copy generator * Use correct property on config snapshot * Remove unnecessary leaf cert watch * Clean up based on code review feedback * Note handler properties that are initialized but set elsewhere * Add TODO for moving helper func into structs pkg * Update generated DeepCopy code * gofmt * Generate DeepCopy() for API gateway listener types * Improve variable name * Regenerate DeepCopy() code * Fix linting issue * Temporarily remove the secret type from resource generation
2023-02-08 21:52:12 +00:00
s.Kind == ServiceKindIngressGateway ||
s.Kind == ServiceKindAPIGateway
}
// Validate validates the node service configuration.
//
// NOTE(mitchellh): This currently only validates fields for a ConnectProxy.
// Historically validation has been directly in the Catalog.Register RPC.
// ConnectProxy validation was moved here for easier table testing, but
// other validation still exists in Catalog.Register.
func (s *NodeService) Validate() error {
var result error
if err := s.Locality.Validate(); err != nil {
result = multierror.Append(result, err)
}
if s.Kind == ServiceKindConnectProxy {
if s.Port == 0 && s.SocketPath == "" {
result = multierror.Append(result, fmt.Errorf("Port or SocketPath must be set for a %s", s.Kind))
}
}
commonValidation := s.ValidateForAgent()
if commonValidation != nil {
result = multierror.Append(result, commonValidation)
}
return result
}
// ValidateForAgent does a subset validation, with the assumption that a local agent can assist with missing values.
//
// I.e. in the catalog case, a local agent cannot be assumed to facilitate auto-assignment of port or socket path,
// so additional checks are needed.
func (s *NodeService) ValidateForAgent() error {
var result error
// TODO(partitions): remember to double check that this doesn't cross partition boundaries
// ConnectProxy validation
if s.Kind == ServiceKindConnectProxy {
Add Proxy Upstreams to Service Definition (#4639) * Refactor Service Definition ProxyDestination. This includes: - Refactoring all internal structs used - Updated tests for both deprecated and new input for: - Agent Services endpoint response - Agent Service endpoint response - Agent Register endpoint - Unmanaged deprecated field - Unmanaged new fields - Managed deprecated upstreams - Managed new - Catalog Register - Unmanaged deprecated field - Unmanaged new fields - Managed deprecated upstreams - Managed new - Catalog Services endpoint response - Catalog Node endpoint response - Catalog Service endpoint response - Updated API tests for all of the above too (both deprecated and new forms of register) TODO: - config package changes for on-disk service definitions - proxy config endpoint - built-in proxy support for new fields * Agent proxy config endpoint updated with upstreams * Config file changes for upstreams. * Add upstream opaque config and update all tests to ensure it works everywhere. * Built in proxy working with new Upstreams config * Command fixes and deprecations * Fix key translation, upstream type defaults and a spate of other subtele bugs found with ned to end test scripts... TODO: tests still failing on one case that needs a fix. I think it's key translation for upstreams nested in Managed proxy struct. * Fix translated keys in API registration. ≈ * Fixes from docs - omit some empty undocumented fields in API - Bring back ServiceProxyDestination in Catalog responses to not break backwards compat - this was removed assuming it was only used internally. * Documentation updates for Upstreams in service definition * Fixes for tests broken by many refactors. * Enable travis on f-connect branch in this branch too. * Add consistent Deprecation comments to ProxyDestination uses * Update version number on deprecation notices, and correct upstream datacenter field with explanation in docs
2018-09-12 16:07:47 +00:00
if strings.TrimSpace(s.Proxy.DestinationServiceName) == "" {
result = multierror.Append(result, fmt.Errorf(
Add Proxy Upstreams to Service Definition (#4639) * Refactor Service Definition ProxyDestination. This includes: - Refactoring all internal structs used - Updated tests for both deprecated and new input for: - Agent Services endpoint response - Agent Service endpoint response - Agent Register endpoint - Unmanaged deprecated field - Unmanaged new fields - Managed deprecated upstreams - Managed new - Catalog Register - Unmanaged deprecated field - Unmanaged new fields - Managed deprecated upstreams - Managed new - Catalog Services endpoint response - Catalog Node endpoint response - Catalog Service endpoint response - Updated API tests for all of the above too (both deprecated and new forms of register) TODO: - config package changes for on-disk service definitions - proxy config endpoint - built-in proxy support for new fields * Agent proxy config endpoint updated with upstreams * Config file changes for upstreams. * Add upstream opaque config and update all tests to ensure it works everywhere. * Built in proxy working with new Upstreams config * Command fixes and deprecations * Fix key translation, upstream type defaults and a spate of other subtele bugs found with ned to end test scripts... TODO: tests still failing on one case that needs a fix. I think it's key translation for upstreams nested in Managed proxy struct. * Fix translated keys in API registration. ≈ * Fixes from docs - omit some empty undocumented fields in API - Bring back ServiceProxyDestination in Catalog responses to not break backwards compat - this was removed assuming it was only used internally. * Documentation updates for Upstreams in service definition * Fixes for tests broken by many refactors. * Enable travis on f-connect branch in this branch too. * Add consistent Deprecation comments to ProxyDestination uses * Update version number on deprecation notices, and correct upstream datacenter field with explanation in docs
2018-09-12 16:07:47 +00:00
"Proxy.DestinationServiceName must be non-empty for Connect proxy "+
"services"))
}
2021-04-08 17:16:03 +00:00
if s.Proxy.DestinationServiceName == WildcardSpecifier {
result = multierror.Append(result, fmt.Errorf(
"Proxy.DestinationServiceName must not be a wildcard for Connect proxy "+
"services"))
}
if s.Connect.Native {
result = multierror.Append(result, fmt.Errorf(
Add Proxy Upstreams to Service Definition (#4639) * Refactor Service Definition ProxyDestination. This includes: - Refactoring all internal structs used - Updated tests for both deprecated and new input for: - Agent Services endpoint response - Agent Service endpoint response - Agent Register endpoint - Unmanaged deprecated field - Unmanaged new fields - Managed deprecated upstreams - Managed new - Catalog Register - Unmanaged deprecated field - Unmanaged new fields - Managed deprecated upstreams - Managed new - Catalog Services endpoint response - Catalog Node endpoint response - Catalog Service endpoint response - Updated API tests for all of the above too (both deprecated and new forms of register) TODO: - config package changes for on-disk service definitions - proxy config endpoint - built-in proxy support for new fields * Agent proxy config endpoint updated with upstreams * Config file changes for upstreams. * Add upstream opaque config and update all tests to ensure it works everywhere. * Built in proxy working with new Upstreams config * Command fixes and deprecations * Fix key translation, upstream type defaults and a spate of other subtele bugs found with ned to end test scripts... TODO: tests still failing on one case that needs a fix. I think it's key translation for upstreams nested in Managed proxy struct. * Fix translated keys in API registration. ≈ * Fixes from docs - omit some empty undocumented fields in API - Bring back ServiceProxyDestination in Catalog responses to not break backwards compat - this was removed assuming it was only used internally. * Documentation updates for Upstreams in service definition * Fixes for tests broken by many refactors. * Enable travis on f-connect branch in this branch too. * Add consistent Deprecation comments to ProxyDestination uses * Update version number on deprecation notices, and correct upstream datacenter field with explanation in docs
2018-09-12 16:07:47 +00:00
"A Proxy cannot also be Connect Native, only typical services"))
}
if err := validateOpaqueProxyConfig(s.Proxy.Config); err != nil {
result = multierror.Append(result, fmt.Errorf("Proxy.Config: %w", err))
}
// ensure we don't have multiple upstreams for the same service
var (
upstreamKeys = make(map[UpstreamKey]struct{})
bindAddrs = make(map[string]struct{})
)
for _, u := range s.Proxy.Upstreams {
destinationPartition := u.DestinationPartition
if destinationPartition == "" {
// If we have a set DestinationPeer, then DestinationPartition
// must match the NodeService's Partition
if u.DestinationPeer != "" {
destinationPartition = s.PartitionOrDefault()
} else {
destinationPartition = acl.DefaultPartitionName
}
}
if u.DestinationPeer == "" {
// cross DC Upstreams are only allowed for non "default" partitions
if u.Datacenter != "" && (destinationPartition != acl.DefaultPartitionName || s.PartitionOrDefault() != "default") {
result = multierror.Append(result, fmt.Errorf(
"upstreams cannot target another datacenter in non default partition"))
continue
}
} else {
if destinationPartition != s.PartitionOrDefault() {
result = multierror.Append(result, fmt.Errorf(
"upstreams must target peers in the same partition as the service"))
continue
}
}
if err := u.Validate(); err != nil {
result = multierror.Append(result, err)
continue
}
uk := u.ToKey()
if _, ok := upstreamKeys[uk]; ok {
result = multierror.Append(result, fmt.Errorf(
"upstreams cannot contain duplicates of %s", uk))
continue
}
upstreamKeys[uk] = struct{}{}
addr := u.UpstreamAddressToString()
// Centrally configured upstreams will fail this check if there are multiple because they do not have an address/port.
// Only consider non-centrally configured upstreams in this check since those are the ones we create listeners for.
if _, ok := bindAddrs[addr]; ok && !u.CentrallyConfigured {
result = multierror.Append(result, fmt.Errorf(
"upstreams cannot contain duplicates by local bind address and port or unix path; %q is specified twice", addr))
continue
}
bindAddrs[addr] = struct{}{}
}
var knownListeners = make(map[int]bool)
for _, path := range s.Proxy.Expose.Paths {
if path.Path == "" {
result = multierror.Append(result, fmt.Errorf("expose.paths: empty path exposed"))
}
if seen := knownListeners[path.ListenerPort]; seen {
result = multierror.Append(result, fmt.Errorf("expose.paths: duplicate listener ports exposed"))
}
knownListeners[path.ListenerPort] = true
if path.ListenerPort <= 0 || path.ListenerPort > 65535 {
result = multierror.Append(result, fmt.Errorf("expose.paths: invalid listener port: %d", path.ListenerPort))
}
path.Protocol = strings.ToLower(path.Protocol)
if ok := allowedExposeProtocols[path.Protocol]; !ok && path.Protocol != "" {
protocols := make([]string, 0)
for p := range allowedExposeProtocols {
protocols = append(protocols, p)
}
result = multierror.Append(result,
fmt.Errorf("protocol '%s' not supported for path: %s, must be in: %v",
path.Protocol, path.Path, protocols))
}
}
}
// Gateway validation
if s.IsGateway() {
// Non-ingress gateways must have a port
if s.Port == 0 && s.Kind != ServiceKindIngressGateway && s.Kind != ServiceKindAPIGateway {
result = multierror.Append(result, fmt.Errorf("Port must be non-zero for a %s", s.Kind))
}
// Gateways cannot have sidecars
if s.Connect.SidecarService != nil {
result = multierror.Append(result, fmt.Errorf("A %s cannot have a sidecar service defined", s.Kind))
}
if s.Proxy.DestinationServiceName != "" {
result = multierror.Append(result, fmt.Errorf("The Proxy.DestinationServiceName configuration is invalid for a %s", s.Kind))
}
if s.Proxy.DestinationServiceID != "" {
result = multierror.Append(result, fmt.Errorf("The Proxy.DestinationServiceID configuration is invalid for a %s", s.Kind))
}
if s.Proxy.LocalServiceAddress != "" {
result = multierror.Append(result, fmt.Errorf("The Proxy.LocalServiceAddress configuration is invalid for a %s", s.Kind))
}
if s.Proxy.LocalServicePort != 0 {
result = multierror.Append(result, fmt.Errorf("The Proxy.LocalServicePort configuration is invalid for a %s", s.Kind))
}
if s.Proxy.LocalServiceSocketPath != "" {
result = multierror.Append(result, fmt.Errorf("The Proxy.LocalServiceSocketPath configuration is invalid for a %s", s.Kind))
}
if len(s.Proxy.Upstreams) != 0 {
result = multierror.Append(result, fmt.Errorf("The Proxy.Upstreams configuration is invalid for a %s", s.Kind))
}
}
// Nested sidecar validation
if s.Connect.SidecarService != nil {
if s.Connect.SidecarService.ID != "" {
result = multierror.Append(result, fmt.Errorf(
"A SidecarService cannot specify an ID as this is managed by the "+
"agent"))
}
if s.Connect.SidecarService.Connect != nil {
if s.Connect.SidecarService.Connect.SidecarService != nil {
result = multierror.Append(result, fmt.Errorf(
"A SidecarService cannot have a nested SidecarService"))
}
}
}
if s.Connect.Native && s.Port == 0 && s.SocketPath == "" {
result = multierror.Append(result, fmt.Errorf(
"Port or SocketPath must be set for a Connect native service."))
}
return result
}
// IsSame checks if one NodeService is the same as another, without looking
// at the Raft information (that's why we didn't call it IsEqual). This is
// useful for seeing if an update would be idempotent for all the functional
// parts of the structure.
func (s *NodeService) IsSame(other *NodeService) bool {
if s.ID != other.ID ||
s.Service != other.Service ||
!reflect.DeepEqual(s.Tags, other.Tags) ||
s.Address != other.Address ||
s.Port != other.Port ||
s.SocketPath != other.SocketPath ||
!reflect.DeepEqual(s.TaggedAddresses, other.TaggedAddresses) ||
2018-09-07 14:30:47 +00:00
!reflect.DeepEqual(s.Weights, other.Weights) ||
!reflect.DeepEqual(s.Meta, other.Meta) ||
!reflect.DeepEqual(s.Locality, other.Locality) ||
s.EnableTagOverride != other.EnableTagOverride ||
s.Kind != other.Kind ||
Add Proxy Upstreams to Service Definition (#4639) * Refactor Service Definition ProxyDestination. This includes: - Refactoring all internal structs used - Updated tests for both deprecated and new input for: - Agent Services endpoint response - Agent Service endpoint response - Agent Register endpoint - Unmanaged deprecated field - Unmanaged new fields - Managed deprecated upstreams - Managed new - Catalog Register - Unmanaged deprecated field - Unmanaged new fields - Managed deprecated upstreams - Managed new - Catalog Services endpoint response - Catalog Node endpoint response - Catalog Service endpoint response - Updated API tests for all of the above too (both deprecated and new forms of register) TODO: - config package changes for on-disk service definitions - proxy config endpoint - built-in proxy support for new fields * Agent proxy config endpoint updated with upstreams * Config file changes for upstreams. * Add upstream opaque config and update all tests to ensure it works everywhere. * Built in proxy working with new Upstreams config * Command fixes and deprecations * Fix key translation, upstream type defaults and a spate of other subtele bugs found with ned to end test scripts... TODO: tests still failing on one case that needs a fix. I think it's key translation for upstreams nested in Managed proxy struct. * Fix translated keys in API registration. ≈ * Fixes from docs - omit some empty undocumented fields in API - Bring back ServiceProxyDestination in Catalog responses to not break backwards compat - this was removed assuming it was only used internally. * Documentation updates for Upstreams in service definition * Fixes for tests broken by many refactors. * Enable travis on f-connect branch in this branch too. * Add consistent Deprecation comments to ProxyDestination uses * Update version number on deprecation notices, and correct upstream datacenter field with explanation in docs
2018-09-12 16:07:47 +00:00
!reflect.DeepEqual(s.Proxy, other.Proxy) ||
s.Connect != other.Connect ||
s.PeerName != other.PeerName ||
!s.EnterpriseMeta.IsSame(&other.EnterpriseMeta) {
return false
}
return true
}
// IsSameService checks if one Service of a ServiceNode is the same as another,
// without looking at the Raft information or Node information (that's why we
// didn't call it IsEqual).
// This is useful for seeing if an update would be idempotent for all the functional
// parts of the structure.
// In a similar fashion as ToNodeService(), fields related to Node are ignored
// see ServiceNode for more information.
func (s *ServiceNode) IsSameService(other *ServiceNode) bool {
// Skip the following fields, see ServiceNode definition
// Address string
// Datacenter string
// TaggedAddresses map[string]string
// NodeMeta map[string]string
if s.ID != other.ID ||
!strings.EqualFold(s.Node, other.Node) ||
s.ServiceKind != other.ServiceKind ||
s.ServiceID != other.ServiceID ||
s.ServiceName != other.ServiceName ||
!reflect.DeepEqual(s.ServiceTags, other.ServiceTags) ||
s.ServiceAddress != other.ServiceAddress ||
!reflect.DeepEqual(s.ServiceTaggedAddresses, other.ServiceTaggedAddresses) ||
s.ServicePort != other.ServicePort ||
!reflect.DeepEqual(s.ServiceMeta, other.ServiceMeta) ||
!reflect.DeepEqual(s.ServiceWeights, other.ServiceWeights) ||
s.ServiceEnableTagOverride != other.ServiceEnableTagOverride ||
!reflect.DeepEqual(s.ServiceProxy, other.ServiceProxy) ||
!reflect.DeepEqual(s.ServiceConnect, other.ServiceConnect) ||
!s.EnterpriseMeta.IsSame(&other.EnterpriseMeta) {
return false
}
return true
}
// ToServiceNode converts the given node service to a service node.
func (s *NodeService) ToServiceNode(node string) *ServiceNode {
2018-09-07 14:30:47 +00:00
theWeights := Weights{
Passing: 1,
Warning: 1,
}
if s.Weights != nil {
if err := ValidateWeights(s.Weights); err == nil {
theWeights = *s.Weights
}
}
return &ServiceNode{
2017-01-18 22:26:42 +00:00
// Skip ID, see ServiceNode definition.
Node: node,
// Skip Address, see ServiceNode definition.
// Skip TaggedAddresses, see ServiceNode definition.
ServiceKind: s.Kind,
ServiceID: s.ID,
ServiceName: s.Service,
ServiceTags: s.Tags,
ServiceAddress: s.Address,
ServiceTaggedAddresses: s.TaggedAddresses,
ServicePort: s.Port,
ServiceSocketPath: s.SocketPath,
ServiceMeta: s.Meta,
2018-09-07 14:30:47 +00:00
ServiceWeights: theWeights,
ServiceEnableTagOverride: s.EnableTagOverride,
Add Proxy Upstreams to Service Definition (#4639) * Refactor Service Definition ProxyDestination. This includes: - Refactoring all internal structs used - Updated tests for both deprecated and new input for: - Agent Services endpoint response - Agent Service endpoint response - Agent Register endpoint - Unmanaged deprecated field - Unmanaged new fields - Managed deprecated upstreams - Managed new - Catalog Register - Unmanaged deprecated field - Unmanaged new fields - Managed deprecated upstreams - Managed new - Catalog Services endpoint response - Catalog Node endpoint response - Catalog Service endpoint response - Updated API tests for all of the above too (both deprecated and new forms of register) TODO: - config package changes for on-disk service definitions - proxy config endpoint - built-in proxy support for new fields * Agent proxy config endpoint updated with upstreams * Config file changes for upstreams. * Add upstream opaque config and update all tests to ensure it works everywhere. * Built in proxy working with new Upstreams config * Command fixes and deprecations * Fix key translation, upstream type defaults and a spate of other subtele bugs found with ned to end test scripts... TODO: tests still failing on one case that needs a fix. I think it's key translation for upstreams nested in Managed proxy struct. * Fix translated keys in API registration. ≈ * Fixes from docs - omit some empty undocumented fields in API - Bring back ServiceProxyDestination in Catalog responses to not break backwards compat - this was removed assuming it was only used internally. * Documentation updates for Upstreams in service definition * Fixes for tests broken by many refactors. * Enable travis on f-connect branch in this branch too. * Add consistent Deprecation comments to ProxyDestination uses * Update version number on deprecation notices, and correct upstream datacenter field with explanation in docs
2018-09-12 16:07:47 +00:00
ServiceProxy: s.Proxy,
ServiceConnect: s.Connect,
ServiceLocality: s.Locality,
EnterpriseMeta: s.EnterpriseMeta,
PeerName: s.PeerName,
RaftIndex: RaftIndex{
CreateIndex: s.CreateIndex,
ModifyIndex: s.ModifyIndex,
},
}
}
// NodeServices represents services provided by Node.
// Services is a map of service IDs to services.
type NodeServices struct {
Node *Node
Services map[string]*NodeService
}
2013-12-12 19:46:25 +00:00
// NodeServiceList represents services provided by Node.
// Services is a list of services.
type NodeServiceList struct {
Node *Node
Services []*NodeService
}
// HealthCheck represents a single check on a given node.
type HealthCheck struct {
Node string
CheckID types.CheckID // Unique per-node ID
Name string // Check name
Status string // The current check status
Notes string // Additional notes with the status
Output string // Holds output of script runs
ServiceID string // optional associated service
ServiceName string // optional service name
ServiceTags []string // optional service tags
UDP check for service stanza #12221 (#12722) * UDP check for service stanza #12221 * add pass status on timeout condition * delete useless files * Update check_test.go improve comment in test * fix test * fix requested changes and update TestRuntimeConfig_Sanitize.golden * add freeport to TestCheckUDPCritical * improve comment for CheckUDP struct * fix requested changes * fix requested changes * fix requested changes * add UDP to proto * add UDP to proto and add a changelog * add requested test on agent_endpoint_test.go * add test for given endpoints * fix failing tests * add documentation for udp healthcheck * regenerate proto using buf * Update website/content/api-docs/agent/check.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/api-docs/agent/check.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/docs/discovery/checks.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/docs/ecs/configuration-reference.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/docs/ecs/configuration-reference.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * add debug echo * add debug circle-ci * add debug circle-ci bash * use echo instead of status_stage * remove debug and status from devtools script and use echo instead * Update website/content/api-docs/agent/check.mdx Co-authored-by: Jared Kirschner <85913323+jkirschner-hashicorp@users.noreply.github.com> * fix test * replace status_stage with status * replace functions with echo Co-authored-by: Dhia Ayachi <dhia@hashicorp.com> Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> Co-authored-by: Jared Kirschner <85913323+jkirschner-hashicorp@users.noreply.github.com>
2022-06-06 19:13:19 +00:00
Type string // Check type: http/ttl/tcp/udp/etc
Interval string // from definition
Timeout string // from definition
// ExposedPort is the port of the exposed Envoy listener representing the
// HTTP or GRPC health check of the service.
ExposedPort int
// PeerName is the name of the peer the check was imported from.
// It is empty if the check was registered locally.
PeerName string `json:",omitempty"`
Definition HealthCheckDefinition `bexpr:"-"`
2017-11-01 21:25:46 +00:00
acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash" bexpr:"-"`
RaftIndex `bexpr:"-"`
2017-11-01 21:25:46 +00:00
}
func (hc *HealthCheck) FillAuthzContext(ctx *acl.AuthorizerContext) {
if ctx == nil {
return
}
ctx.Peer = hc.PeerName
hc.EnterpriseMeta.FillAuthzContext(ctx)
}
func (hc *HealthCheck) PeerOrEmpty() string {
return hc.PeerName
}
func (hc *HealthCheck) NodeIdentity() Identity {
return Identity{
ID: hc.Node,
EnterpriseMeta: *NodeEnterpriseMetaInPartition(hc.PartitionOrDefault()),
}
}
func (hc *HealthCheck) CompoundServiceID() ServiceID {
id := hc.ServiceID
if id == "" {
id = hc.ServiceName
}
entMeta := hc.EnterpriseMeta
entMeta.Normalize()
return ServiceID{
ID: id,
EnterpriseMeta: entMeta,
}
}
func (hc *HealthCheck) CompoundCheckID() CheckID {
entMeta := hc.EnterpriseMeta
entMeta.Normalize()
return CheckID{
ID: hc.CheckID,
EnterpriseMeta: entMeta,
}
}
2017-11-01 21:25:46 +00:00
type HealthCheckDefinition struct {
HTTP string `json:",omitempty"`
TLSServerName string `json:",omitempty"`
TLSSkipVerify bool `json:",omitempty"`
Header map[string][]string `json:",omitempty"`
Method string `json:",omitempty"`
Body string `json:",omitempty"`
DisableRedirects bool `json:",omitempty"`
TCP string `json:",omitempty"`
TCPUseTLS bool `json:",omitempty"`
UDP check for service stanza #12221 (#12722) * UDP check for service stanza #12221 * add pass status on timeout condition * delete useless files * Update check_test.go improve comment in test * fix test * fix requested changes and update TestRuntimeConfig_Sanitize.golden * add freeport to TestCheckUDPCritical * improve comment for CheckUDP struct * fix requested changes * fix requested changes * fix requested changes * add UDP to proto * add UDP to proto and add a changelog * add requested test on agent_endpoint_test.go * add test for given endpoints * fix failing tests * add documentation for udp healthcheck * regenerate proto using buf * Update website/content/api-docs/agent/check.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/api-docs/agent/check.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/docs/discovery/checks.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/docs/ecs/configuration-reference.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/docs/ecs/configuration-reference.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * add debug echo * add debug circle-ci * add debug circle-ci bash * use echo instead of status_stage * remove debug and status from devtools script and use echo instead * Update website/content/api-docs/agent/check.mdx Co-authored-by: Jared Kirschner <85913323+jkirschner-hashicorp@users.noreply.github.com> * fix test * replace status_stage with status * replace functions with echo Co-authored-by: Dhia Ayachi <dhia@hashicorp.com> Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> Co-authored-by: Jared Kirschner <85913323+jkirschner-hashicorp@users.noreply.github.com>
2022-06-06 19:13:19 +00:00
UDP string `json:",omitempty"`
H2PING string `json:",omitempty"`
OSService string `json:",omitempty"`
2021-10-05 01:36:18 +00:00
H2PingUseTLS bool `json:",omitempty"`
Interval time.Duration `json:",omitempty"`
OutputMaxSize uint `json:",omitempty"`
Timeout time.Duration `json:",omitempty"`
DeregisterCriticalServiceAfter time.Duration `json:",omitempty"`
ScriptArgs []string `json:",omitempty"`
DockerContainerID string `json:",omitempty"`
Shell string `json:",omitempty"`
GRPC string `json:",omitempty"`
GRPCUseTLS bool `json:",omitempty"`
AliasNode string `json:",omitempty"`
AliasService string `json:",omitempty"`
TTL time.Duration `json:",omitempty"`
}
func (d *HealthCheckDefinition) MarshalJSON() ([]byte, error) {
2019-01-24 16:12:08 +00:00
type Alias HealthCheckDefinition
exported := &struct {
Interval string `json:",omitempty"`
OutputMaxSize uint `json:",omitempty"`
2019-01-24 16:12:08 +00:00
Timeout string `json:",omitempty"`
DeregisterCriticalServiceAfter string `json:",omitempty"`
*Alias
}{
Interval: d.Interval.String(),
OutputMaxSize: d.OutputMaxSize,
2019-01-24 16:12:08 +00:00
Timeout: d.Timeout.String(),
DeregisterCriticalServiceAfter: d.DeregisterCriticalServiceAfter.String(),
Alias: (*Alias)(d),
}
2019-01-24 16:12:08 +00:00
if d.Interval == 0 {
exported.Interval = ""
}
2019-01-24 16:12:08 +00:00
if d.Timeout == 0 {
exported.Timeout = ""
}
2019-01-24 16:12:08 +00:00
if d.DeregisterCriticalServiceAfter == 0 {
exported.DeregisterCriticalServiceAfter = ""
}
return json.Marshal(exported)
}
func (t *HealthCheckDefinition) UnmarshalJSON(data []byte) (err error) {
type Alias HealthCheckDefinition
aux := &struct {
Interval interface{}
Timeout interface{}
DeregisterCriticalServiceAfter interface{}
TTL interface{}
*Alias
}{
Alias: (*Alias)(t),
}
if err := json.Unmarshal(data, &aux); err != nil {
return err
}
if aux.Interval != nil {
switch v := aux.Interval.(type) {
case string:
if t.Interval, err = time.ParseDuration(v); err != nil {
return err
}
case float64:
t.Interval = time.Duration(v)
}
}
if aux.Timeout != nil {
switch v := aux.Timeout.(type) {
case string:
if t.Timeout, err = time.ParseDuration(v); err != nil {
return err
}
case float64:
t.Timeout = time.Duration(v)
}
}
if aux.DeregisterCriticalServiceAfter != nil {
switch v := aux.DeregisterCriticalServiceAfter.(type) {
case string:
if t.DeregisterCriticalServiceAfter, err = time.ParseDuration(v); err != nil {
return err
}
case float64:
t.DeregisterCriticalServiceAfter = time.Duration(v)
}
}
if aux.TTL != nil {
switch v := aux.TTL.(type) {
case string:
if t.TTL, err = time.ParseDuration(v); err != nil {
return err
}
case float64:
t.TTL = time.Duration(v)
}
}
return nil
}
// IsSame checks if one HealthCheck is the same as another, without looking
// at the Raft information (that's why we didn't call it IsEqual). This is
// useful for seeing if an update would be idempotent for all the functional
// parts of the structure.
func (c *HealthCheck) IsSame(other *HealthCheck) bool {
if !strings.EqualFold(c.Node, other.Node) ||
c.CheckID != other.CheckID ||
c.Name != other.Name ||
c.Status != other.Status ||
c.Notes != other.Notes ||
c.Output != other.Output ||
c.ServiceID != other.ServiceID ||
c.ServiceName != other.ServiceName ||
!reflect.DeepEqual(c.ServiceTags, other.ServiceTags) ||
!reflect.DeepEqual(c.Definition, other.Definition) ||
c.PeerName != other.PeerName ||
!c.EnterpriseMeta.IsSame(&other.EnterpriseMeta) {
return false
}
return true
}
agent: fix several data races and bugs related to node-local alias checks (#5876) The observed bug was that a full restart of a consul datacenter (servers and clients) in conjunction with a restart of a connect-flavored application with bring-your-own-service-registration logic would very frequently cause the envoy sidecar service check to never reflect the aliased service. Over the course of investigation several bugs and unfortunate interactions were corrected: (1) local.CheckState objects were only shallow copied, but the key piece of data that gets read and updated is one of the things not copied (the underlying Check with a Status field). When the stock code was run with the race detector enabled this highly-relevant-to-the-test-scenario field was found to be racy. Changes: a) update the existing Clone method to include the Check field b) copy-on-write when those fields need to change rather than incrementally updating them in place. This made the observed behavior occur slightly less often. (2) If anything about how the runLocal method for node-local alias check logic was ever flawed, there was no fallback option. Those checks are purely edge-triggered and failure to properly notice a single edge transition would leave the alias check incorrect until the next flap of the aliased check. The change was to introduce a fallback timer to act as a control loop to double check the alias check matches the aliased check every minute (borrowing the duration from the non-local alias check logic body). This made the observed behavior eventually go away when it did occur. (3) Originally I thought there were two main actions involved in the data race: A. The act of adding the original check (from disk recovery) and its first health evaluation. B. The act of the HTTP API requests coming in and resetting the local state when re-registering the same services and checks. It took awhile for me to realize that there's a third action at work: C. The goroutines associated with the original check and the later checks. The actual sequence of actions that was causing the bad behavior was that the API actions result in the original check to be removed and re-added _without waiting for the original goroutine to terminate_. This means for brief windows of time during check definition edits there are two goroutines that can be sending updates for the alias check status. In extremely unlikely scenarios the original goroutine sees the aliased check start up in `critical` before being removed but does not get the notification about the nearly immediate update of that check to `passing`. This is interlaced wit the new goroutine coming up, initializing its base case to `passing` from the current state and then listening for new notifications of edge triggers. If the original goroutine "finishes" its update, it then commits one more write into the local state of `critical` and exits leaving the alias check no longer reflecting the underlying check. The correction here is to enforce that the old goroutines must terminate before spawning the new one for alias checks.
2019-05-24 18:36:56 +00:00
// Clone returns a distinct clone of the HealthCheck. Note that the
// "ServiceTags" and "Definition.Header" field are not deep copied.
func (c *HealthCheck) Clone() *HealthCheck {
clone := new(HealthCheck)
*clone = *c
return clone
}
2019-10-17 18:33:11 +00:00
func (c *HealthCheck) CheckType() *CheckType {
return &CheckType{
CheckID: c.CheckID,
Name: c.Name,
Status: c.Status,
Notes: c.Notes,
ScriptArgs: c.Definition.ScriptArgs,
AliasNode: c.Definition.AliasNode,
AliasService: c.Definition.AliasService,
HTTP: c.Definition.HTTP,
GRPC: c.Definition.GRPC,
GRPCUseTLS: c.Definition.GRPCUseTLS,
Header: c.Definition.Header,
Method: c.Definition.Method,
Body: c.Definition.Body,
DisableRedirects: c.Definition.DisableRedirects,
2019-10-17 18:33:11 +00:00
TCP: c.Definition.TCP,
TCPUseTLS: c.Definition.TCPUseTLS,
UDP check for service stanza #12221 (#12722) * UDP check for service stanza #12221 * add pass status on timeout condition * delete useless files * Update check_test.go improve comment in test * fix test * fix requested changes and update TestRuntimeConfig_Sanitize.golden * add freeport to TestCheckUDPCritical * improve comment for CheckUDP struct * fix requested changes * fix requested changes * fix requested changes * add UDP to proto * add UDP to proto and add a changelog * add requested test on agent_endpoint_test.go * add test for given endpoints * fix failing tests * add documentation for udp healthcheck * regenerate proto using buf * Update website/content/api-docs/agent/check.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/api-docs/agent/check.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/docs/discovery/checks.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/docs/ecs/configuration-reference.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * Update website/content/docs/ecs/configuration-reference.mdx Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> * add debug echo * add debug circle-ci * add debug circle-ci bash * use echo instead of status_stage * remove debug and status from devtools script and use echo instead * Update website/content/api-docs/agent/check.mdx Co-authored-by: Jared Kirschner <85913323+jkirschner-hashicorp@users.noreply.github.com> * fix test * replace status_stage with status * replace functions with echo Co-authored-by: Dhia Ayachi <dhia@hashicorp.com> Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> Co-authored-by: Jared Kirschner <85913323+jkirschner-hashicorp@users.noreply.github.com>
2022-06-06 19:13:19 +00:00
UDP: c.Definition.UDP,
H2PING: c.Definition.H2PING,
OSService: c.Definition.OSService,
2021-10-05 01:36:18 +00:00
H2PingUseTLS: c.Definition.H2PingUseTLS,
2019-10-17 18:33:11 +00:00
Interval: c.Definition.Interval,
DockerContainerID: c.Definition.DockerContainerID,
Shell: c.Definition.Shell,
TLSServerName: c.Definition.TLSServerName,
2019-10-17 18:33:11 +00:00
TLSSkipVerify: c.Definition.TLSSkipVerify,
Timeout: c.Definition.Timeout,
TTL: c.Definition.TTL,
DeregisterCriticalServiceAfter: c.Definition.DeregisterCriticalServiceAfter,
}
}
// HealthChecks is a collection of HealthCheck structs.
2014-01-08 19:35:27 +00:00
type HealthChecks []*HealthCheck
// CheckServiceNode is used to provide the node, its service
// definition, as well as a HealthCheck that is associated.
type CheckServiceNode struct {
Node *Node
Service *NodeService
Checks HealthChecks
2014-01-08 21:52:09 +00:00
}
func (csn *CheckServiceNode) BestAddress(wan bool) (uint64, string, int) {
// TODO (mesh-gateway) needs a test
// best address
// wan
// wan svc addr
// svc addr
// wan node addr
// node addr
// lan
// svc addr
// node addr
addr, port := csn.Service.BestAddress(wan)
idx := csn.Service.ModifyIndex
if addr == "" {
addr = csn.Node.BestAddress(wan)
idx = csn.Node.ModifyIndex
}
return idx, addr, port
}
func (csn *CheckServiceNode) CanRead(authz acl.Authorizer) acl.EnforcementDecision {
if csn.Node == nil || csn.Service == nil {
return acl.Deny
}
var authzContext acl.AuthorizerContext
csn.Service.FillAuthzContext(&authzContext)
if authz.NodeRead(csn.Node.Node, &authzContext) != acl.Allow {
return acl.Deny
}
if authz.ServiceRead(csn.Service.Service, &authzContext) != acl.Allow {
return acl.Deny
}
return acl.Allow
}
func (csn *CheckServiceNode) Locality() *Locality {
if csn.Service != nil && csn.Service.Locality != nil {
return csn.Service.Locality
}
if csn.Node != nil && csn.Node.Locality != nil {
return csn.Node.Locality
}
return nil
}
func (csn *CheckServiceNode) ExcludeBasedOnChecks(opts CheckServiceNodeFilterOptions) bool {
for _, check := range csn.Checks {
if slices.Contains(opts.IgnoreCheckIDs, check.CheckID) {
// Skip this _check_ but keep looking at other checks for this node.
continue
}
if opts.FilterType.ExcludeBasedOnStatus(check.Status) {
return true
}
}
return false
}
type CheckServiceNodes []CheckServiceNode
2014-01-08 21:52:09 +00:00
func (csns CheckServiceNodes) DeepCopy() CheckServiceNodes {
dup := make(CheckServiceNodes, len(csns))
for idx, v := range csns {
dup[idx] = *v.DeepCopy()
}
return dup
}
// Shuffle does an in-place random shuffle using the Fisher-Yates algorithm.
func (nodes CheckServiceNodes) Shuffle() {
for i := len(nodes) - 1; i > 0; i-- {
j := rand.Int31n(int32(i + 1))
nodes[i], nodes[j] = nodes[j], nodes[i]
}
}
func (nodes CheckServiceNodes) ToServiceDump() ServiceDump {
var ret ServiceDump
for i := range nodes {
svc := ServiceInfo{
Node: nodes[i].Node,
Service: nodes[i].Service,
Checks: nodes[i].Checks,
GatewayService: nil,
}
ret = append(ret, &svc)
}
return ret
}
wan federation via mesh gateways (#6884) This is like a Möbius strip of code due to the fact that low-level components (serf/memberlist) are connected to high-level components (the catalog and mesh-gateways) in a twisty maze of references which make it hard to dive into. With that in mind here's a high level summary of what you'll find in the patch: There are several distinct chunks of code that are affected: * new flags and config options for the server * retry join WAN is slightly different * retry join code is shared to discover primary mesh gateways from secondary datacenters * because retry join logic runs in the *agent* and the results of that operation for primary mesh gateways are needed in the *server* there are some methods like `RefreshPrimaryGatewayFallbackAddresses` that must occur at multiple layers of abstraction just to pass the data down to the right layer. * new cache type `FederationStateListMeshGatewaysName` for use in `proxycfg/xds` layers * the function signature for RPC dialing picked up a new required field (the node name of the destination) * several new RPCs for manipulating a FederationState object: `FederationState:{Apply,Get,List,ListMeshGateways}` * 3 read-only internal APIs for debugging use to invoke those RPCs from curl * raft and fsm changes to persist these FederationStates * replication for FederationStates as they are canonically stored in the Primary and replicated to the Secondaries. * a special derivative of anti-entropy that runs in secondaries to snapshot their local mesh gateway `CheckServiceNodes` and sync them into their upstream FederationState in the primary (this works in conjunction with the replication to distribute addresses for all mesh gateways in all DCs to all other DCs) * a "gateway locator" convenience object to make use of this data to choose the addresses of gateways to use for any given RPC or gossip operation to a remote DC. This gets data from the "retry join" logic in the agent and also directly calls into the FSM. * RPC (`:8300`) on the server sniffs the first byte of a new connection to determine if it's actually doing native TLS. If so it checks the ALPN header for protocol determination (just like how the existing system uses the type-byte marker). * 2 new kinds of protocols are exclusively decoded via this native TLS mechanism: one for ferrying "packet" operations (udp-like) from the gossip layer and one for "stream" operations (tcp-like). The packet operations re-use sockets (using length-prefixing) to cut down on TLS re-negotiation overhead. * the server instances specially wrap the `memberlist.NetTransport` when running with gateway federation enabled (in a `wanfed.Transport`). The general gist is that if it tries to dial a node in the SAME datacenter (deduced by looking at the suffix of the node name) there is no change. If dialing a DIFFERENT datacenter it is wrapped up in a TLS+ALPN blob and sent through some mesh gateways to eventually end up in a server's :8300 port. * a new flag when launching a mesh gateway via `consul connect envoy` to indicate that the servers are to be exposed. This sets a special service meta when registering the gateway into the catalog. * `proxycfg/xds` notice this metadata blob to activate additional watches for the FederationState objects as well as the location of all of the consul servers in that datacenter. * `xds:` if the extra metadata is in place additional clusters are defined in a DC to bulk sink all traffic to another DC's gateways. For the current datacenter we listen on a wildcard name (`server.<dc>.consul`) that load balances all servers as well as one mini-cluster per node (`<node>.server.<dc>.consul`) * the `consul tls cert create` command got a new flag (`-node`) to help create an additional SAN in certs that can be used with this flavor of federation.
2020-03-09 20:59:02 +00:00
// ShallowClone duplicates the slice and underlying array.
func (nodes CheckServiceNodes) ShallowClone() CheckServiceNodes {
dup := make(CheckServiceNodes, len(nodes))
copy(dup, nodes)
return dup
}
// HealthFilterType is used to filter nodes based on their health status.
type HealthFilterType int32
func (h HealthFilterType) ExcludeBasedOnStatus(status string) bool {
switch {
case h == HealthFilterExcludeCritical && status == api.HealthCritical:
return true
case h == HealthFilterIncludeOnlyPassing && status != api.HealthPassing:
return true
}
return false
}
// These are listed from most to least inclusive.
const (
HealthFilterIncludeAll HealthFilterType = 0
HealthFilterExcludeCritical HealthFilterType = 1
HealthFilterIncludeOnlyPassing HealthFilterType = 2
)
type CheckServiceNodeFilterOptions struct {
FilterType HealthFilterType
IgnoreCheckIDs []types.CheckID
disableReceiverModification bool
}
// Filter removes nodes that are failing health checks (and any non-passing
// check if that option is selected). Note that this returns the filtered
// results AND modifies the receiver for performance.
func (nodes CheckServiceNodes) Filter(opts CheckServiceNodeFilterOptions) CheckServiceNodes {
n := len(nodes)
for i := 0; i < n; i++ {
if nodes[i].ExcludeBasedOnChecks(opts) {
nodes[i], nodes[n-1] = nodes[n-1], CheckServiceNode{}
n--
i--
}
}
return nodes[:n]
}
// NodeInfo is used to dump all associated information about
// a node. This is currently used for the UI only, as it is
// rather expensive to generate.
type NodeInfo struct {
2017-01-18 22:26:42 +00:00
ID types.NodeID
Node string
Partition string `json:",omitempty"`
PeerName string `json:",omitempty"`
Address string
TaggedAddresses map[string]string
Meta map[string]string
Services []*NodeService
Checks HealthChecks
}
func (n *NodeInfo) GetEnterpriseMeta() *acl.EnterpriseMeta {
return NodeEnterpriseMetaInPartition(n.Partition)
}
func (n *NodeInfo) PartitionOrDefault() string {
return acl.PartitionOrDefault(n.Partition)
}
// NodeDump is used to dump all the nodes with all their
// associated data. This is currently used for the UI only,
// as it is rather expensive to generate.
type NodeDump []*NodeInfo
type ServiceInfo struct {
Node *Node
Service *NodeService
Checks HealthChecks
GatewayService *GatewayService
}
type ServiceDump []*ServiceInfo
type CheckID struct {
ID types.CheckID
acl.EnterpriseMeta
}
// NamespaceOrDefault exists because acl.EnterpriseMeta uses a pointer
// receiver for this method. Remove once that is fixed.
func (c CheckID) NamespaceOrDefault() string {
return c.EnterpriseMeta.NamespaceOrDefault()
}
// PartitionOrDefault exists because acl.EnterpriseMeta uses a pointer
// receiver for this method. Remove once that is fixed.
func (c CheckID) PartitionOrDefault() string {
return c.EnterpriseMeta.PartitionOrDefault()
}
func NewCheckID(id types.CheckID, entMeta *acl.EnterpriseMeta) CheckID {
var cid CheckID
cid.ID = id
if entMeta == nil {
entMeta = DefaultEnterpriseMetaInDefaultPartition()
}
cid.EnterpriseMeta = *entMeta
cid.EnterpriseMeta.Normalize()
return cid
}
// StringHashMD5 is used mainly to populate part of the filename of a check
// definition persisted on the local agent (deprecated in favor of StringHashSHA256)
// Kept around for backwards compatibility
func (cid CheckID) StringHashMD5() string {
hasher := md5.New()
hasher.Write([]byte(cid.ID))
cid.EnterpriseMeta.AddToHash(hasher, true)
return fmt.Sprintf("%x", hasher.Sum(nil))
}
// StringHashSHA256 is used mainly to populate part of the filename of a check
// definition persisted on the local agent
func (cid CheckID) StringHashSHA256() string {
hasher := sha256.New()
hasher.Write([]byte(cid.ID))
cid.EnterpriseMeta.AddToHash(hasher, true)
return fmt.Sprintf("%x", hasher.Sum(nil))
}
type ServiceID struct {
ID string
acl.EnterpriseMeta
}
func NewServiceID(id string, entMeta *acl.EnterpriseMeta) ServiceID {
var sid ServiceID
sid.ID = id
if entMeta == nil {
entMeta = DefaultEnterpriseMetaInDefaultPartition()
}
sid.EnterpriseMeta = *entMeta
sid.EnterpriseMeta.Normalize()
return sid
}
func (sid ServiceID) Matches(other ServiceID) bool {
return sid.ID == other.ID && sid.EnterpriseMeta.Matches(&other.EnterpriseMeta)
}
// StringHashSHA256 is used mainly to populate part of the filename of a service
// definition persisted on the local agent
func (sid ServiceID) StringHashSHA256() string {
hasher := sha256.New()
hasher.Write([]byte(sid.ID))
sid.EnterpriseMeta.AddToHash(hasher, true)
return fmt.Sprintf("%x", hasher.Sum(nil))
}
type IndexedNodes struct {
Nodes Nodes
QueryMeta
}
type IndexedServices struct {
2014-02-05 22:27:24 +00:00
Services Services
// In various situations we need to know the meta that the services are for - in particular
// this is needed to be able to properly filter the list based on ACLs
acl.EnterpriseMeta
QueryMeta
}
type Usage struct {
Usage map[string]ServiceUsage
QueryMeta
}
// ServiceUsage contains all of the usage data related to services
type ServiceUsage struct {
Services int
ServiceInstances int
ConnectServiceInstances map[string]int
BillableServiceInstances int
Nodes int
EnterpriseServiceUsage
}
// PeeredServiceName is a basic tuple of ServiceName and peer
type PeeredServiceName struct {
ServiceName ServiceName
Peer string
}
func (psn PeeredServiceName) String() string {
return fmt.Sprintf("%v:%v", psn.ServiceName.String(), psn.Peer)
}
2023-05-25 16:18:55 +00:00
type ServiceNameWithSamenessGroup struct {
SamenessGroup string
ServiceName
}
type ServiceName struct {
Name string
acl.EnterpriseMeta `mapstructure:",squash"`
}
func NewServiceName(name string, entMeta *acl.EnterpriseMeta) ServiceName {
var ret ServiceName
ret.Name = name
if entMeta == nil {
entMeta = DefaultEnterpriseMetaInDefaultPartition()
}
ret.EnterpriseMeta = *entMeta
ret.EnterpriseMeta.Normalize()
return ret
}
func (n ServiceName) Matches(o ServiceName) bool {
return n.Name == o.Name && n.EnterpriseMeta.Matches(&o.EnterpriseMeta)
}
func (n ServiceName) ToServiceID() ServiceID {
return ServiceID{ID: n.Name, EnterpriseMeta: n.EnterpriseMeta}
}
func ServiceGatewayVirtualIPTag(sn ServiceName) string {
return fmt.Sprintf("%s:%s", TaggedAddressVirtualIP, sn.String())
}
type ServiceList []ServiceName
// Len implements sort.Interface.
func (s ServiceList) Len() int { return len(s) }
// Swap implements sort.Interface.
func (s ServiceList) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s ServiceList) Sort() { sort.Sort(s) }
type IndexedServiceList struct {
Services ServiceList
QueryMeta
}
type IndexedPeeredServiceList struct {
Services []PeeredServiceName
QueryMeta
}
type IndexedServiceNodes struct {
ServiceNodes ServiceNodes
QueryMeta
}
type IndexedNodeServices struct {
// TODO: This should not be a pointer, see comments in
// agent/catalog_endpoint.go.
NodeServices *NodeServices
QueryMeta
}
type IndexedNodeServiceList struct {
NodeServices NodeServiceList
QueryMeta
}
type IndexedHealthChecks struct {
HealthChecks HealthChecks
QueryMeta
}
type IndexedCheckServiceNodes struct {
Nodes CheckServiceNodes
QueryMeta
}
type IndexedNodesWithGateways struct {
ImportedNodes CheckServiceNodes
Nodes CheckServiceNodes
Gateways GatewayServices
QueryMeta
}
wan federation via mesh gateways (#6884) This is like a Möbius strip of code due to the fact that low-level components (serf/memberlist) are connected to high-level components (the catalog and mesh-gateways) in a twisty maze of references which make it hard to dive into. With that in mind here's a high level summary of what you'll find in the patch: There are several distinct chunks of code that are affected: * new flags and config options for the server * retry join WAN is slightly different * retry join code is shared to discover primary mesh gateways from secondary datacenters * because retry join logic runs in the *agent* and the results of that operation for primary mesh gateways are needed in the *server* there are some methods like `RefreshPrimaryGatewayFallbackAddresses` that must occur at multiple layers of abstraction just to pass the data down to the right layer. * new cache type `FederationStateListMeshGatewaysName` for use in `proxycfg/xds` layers * the function signature for RPC dialing picked up a new required field (the node name of the destination) * several new RPCs for manipulating a FederationState object: `FederationState:{Apply,Get,List,ListMeshGateways}` * 3 read-only internal APIs for debugging use to invoke those RPCs from curl * raft and fsm changes to persist these FederationStates * replication for FederationStates as they are canonically stored in the Primary and replicated to the Secondaries. * a special derivative of anti-entropy that runs in secondaries to snapshot their local mesh gateway `CheckServiceNodes` and sync them into their upstream FederationState in the primary (this works in conjunction with the replication to distribute addresses for all mesh gateways in all DCs to all other DCs) * a "gateway locator" convenience object to make use of this data to choose the addresses of gateways to use for any given RPC or gossip operation to a remote DC. This gets data from the "retry join" logic in the agent and also directly calls into the FSM. * RPC (`:8300`) on the server sniffs the first byte of a new connection to determine if it's actually doing native TLS. If so it checks the ALPN header for protocol determination (just like how the existing system uses the type-byte marker). * 2 new kinds of protocols are exclusively decoded via this native TLS mechanism: one for ferrying "packet" operations (udp-like) from the gossip layer and one for "stream" operations (tcp-like). The packet operations re-use sockets (using length-prefixing) to cut down on TLS re-negotiation overhead. * the server instances specially wrap the `memberlist.NetTransport` when running with gateway federation enabled (in a `wanfed.Transport`). The general gist is that if it tries to dial a node in the SAME datacenter (deduced by looking at the suffix of the node name) there is no change. If dialing a DIFFERENT datacenter it is wrapped up in a TLS+ALPN blob and sent through some mesh gateways to eventually end up in a server's :8300 port. * a new flag when launching a mesh gateway via `consul connect envoy` to indicate that the servers are to be exposed. This sets a special service meta when registering the gateway into the catalog. * `proxycfg/xds` notice this metadata blob to activate additional watches for the FederationState objects as well as the location of all of the consul servers in that datacenter. * `xds:` if the extra metadata is in place additional clusters are defined in a DC to bulk sink all traffic to another DC's gateways. For the current datacenter we listen on a wildcard name (`server.<dc>.consul`) that load balances all servers as well as one mini-cluster per node (`<node>.server.<dc>.consul`) * the `consul tls cert create` command got a new flag (`-node`) to help create an additional SAN in certs that can be used with this flavor of federation.
2020-03-09 20:59:02 +00:00
type DatacenterIndexedCheckServiceNodes struct {
DatacenterNodes map[string]CheckServiceNodes
QueryMeta
}
2014-04-27 19:56:06 +00:00
type IndexedNodeDump struct {
ImportedDump NodeDump
Dump NodeDump
2014-04-27 19:56:06 +00:00
QueryMeta
}
type IndexedServiceDump struct {
Dump ServiceDump
QueryMeta
}
type IndexedGatewayServices struct {
Services GatewayServices
QueryMeta
}
type IndexedServiceTopology struct {
ServiceTopology *ServiceTopology
FilteredByACLs bool
QueryMeta
}
type ServiceTopology struct {
Upstreams CheckServiceNodes
Downstreams CheckServiceNodes
UpstreamDecisions map[string]IntentionDecisionSummary
DownstreamDecisions map[string]IntentionDecisionSummary
// MetricsProtocol is the protocol of the service being queried
MetricsProtocol string
// TransparentProxy describes whether all instances of the proxy
// service are in transparent mode.
TransparentProxy bool
// (Up|Down)streamSources are maps with labels for why each service is being
// returned. Services can be upstreams or downstreams due to
// explicit upstream definition or various types of intention policies:
// specific, wildcard, or default allow.
UpstreamSources map[string]string
DownstreamSources map[string]string
}
// IndexedConfigEntries has its own encoding logic which differs from
// ConfigEntryRequest as it has to send a slice of ConfigEntry.
type IndexedConfigEntries struct {
Kind string
Entries []ConfigEntry
QueryMeta
}
func (c *IndexedConfigEntries) MarshalBinary() (data []byte, err error) {
// bs will grow if needed but allocate enough to avoid reallocation in common
// case.
bs := make([]byte, 128)
enc := codec.NewEncoderBytes(&bs, MsgpackHandle)
// Encode length.
err = enc.Encode(len(c.Entries))
if err != nil {
return nil, err
}
// Encode kind.
err = enc.Encode(c.Kind)
if err != nil {
return nil, err
}
// Then actual value using alias trick to avoid infinite recursion
type Alias IndexedConfigEntries
err = enc.Encode(struct {
*Alias
}{
Alias: (*Alias)(c),
})
if err != nil {
return nil, err
}
return bs, nil
}
func (c *IndexedConfigEntries) UnmarshalBinary(data []byte) error {
// First decode the number of entries.
var numEntries int
dec := codec.NewDecoderBytes(data, MsgpackHandle)
if err := dec.Decode(&numEntries); err != nil {
return err
}
// Next decode the kind.
var kind string
if err := dec.Decode(&kind); err != nil {
return err
}
// Then decode the slice of ConfigEntries
c.Entries = make([]ConfigEntry, numEntries)
for i := 0; i < numEntries; i++ {
entry, err := MakeConfigEntry(kind, "")
if err != nil {
return err
}
c.Entries[i] = entry
}
// Alias juggling to prevent infinite recursive calls back to this decode
// method.
type Alias IndexedConfigEntries
as := struct {
*Alias
}{
Alias: (*Alias)(c),
}
if err := dec.Decode(&as); err != nil {
return err
}
return nil
}
type IndexedGenericConfigEntries struct {
Entries []ConfigEntry
QueryMeta
}
func (c *IndexedGenericConfigEntries) MarshalBinary() (data []byte, err error) {
// bs will grow if needed but allocate enough to avoid reallocation in common
// case.
bs := make([]byte, 128)
enc := codec.NewEncoderBytes(&bs, MsgpackHandle)
if err := enc.Encode(len(c.Entries)); err != nil {
return nil, err
}
for _, entry := range c.Entries {
if err := enc.Encode(entry.GetKind()); err != nil {
return nil, err
}
if err := enc.Encode(entry); err != nil {
return nil, err
}
}
if err := enc.Encode(c.QueryMeta); err != nil {
return nil, err
}
return bs, nil
}
func (c *IndexedGenericConfigEntries) UnmarshalBinary(data []byte) error {
// First decode the number of entries.
var numEntries int
dec := codec.NewDecoderBytes(data, MsgpackHandle)
if err := dec.Decode(&numEntries); err != nil {
return err
}
// Then decode the slice of ConfigEntries
c.Entries = make([]ConfigEntry, numEntries)
for i := 0; i < numEntries; i++ {
var kind string
if err := dec.Decode(&kind); err != nil {
return err
}
entry, err := MakeConfigEntry(kind, "")
if err != nil {
return err
}
if err := dec.Decode(entry); err != nil {
return err
}
c.Entries[i] = entry
}
if err := dec.Decode(&c.QueryMeta); err != nil {
return err
}
return nil
}
// DirEntry is used to represent a directory entry. This is
// used for values in our Key-Value store.
type DirEntry struct {
2015-09-01 23:33:52 +00:00
LockIndex uint64
Key string
Flags uint64
Value []byte
Session string `json:",omitempty"`
acl.EnterpriseMeta `bexpr:"-"`
2015-09-01 23:33:52 +00:00
RaftIndex
}
// Returns a clone of the given directory entry.
func (d *DirEntry) Clone() *DirEntry {
return &DirEntry{
LockIndex: d.LockIndex,
Key: d.Key,
Flags: d.Flags,
Value: d.Value,
Session: d.Session,
RaftIndex: RaftIndex{
CreateIndex: d.CreateIndex,
ModifyIndex: d.ModifyIndex,
},
EnterpriseMeta: d.EnterpriseMeta,
}
}
func (d *DirEntry) Equal(o *DirEntry) bool {
return d.LockIndex == o.LockIndex &&
d.Key == o.Key &&
d.Flags == o.Flags &&
bytes.Equal(d.Value, o.Value) &&
d.Session == o.Session
}
// IDValue implements the state.singleValueID interface for indexing.
func (d *DirEntry) IDValue() string {
return d.Key
}
type DirEntries []*DirEntry
// KVSRequest is used to operate on the Key-Value store
type KVSRequest struct {
Datacenter string
Op api.KVOp // Which operation are we performing
DirEnt DirEntry // Which directory entry
WriteRequest
}
func (r *KVSRequest) RequestDatacenter() string {
return r.Datacenter
}
// KeyRequest is used to request a key, or key prefix
type KeyRequest struct {
Datacenter string
Key string
acl.EnterpriseMeta
QueryOptions
}
func (r *KeyRequest) RequestDatacenter() string {
return r.Datacenter
}
2014-04-28 23:33:54 +00:00
// KeyListRequest is used to list keys
type KeyListRequest struct {
Datacenter string
Prefix string
Seperator string
QueryOptions
acl.EnterpriseMeta
2014-04-28 23:33:54 +00:00
}
func (r *KeyListRequest) RequestDatacenter() string {
return r.Datacenter
}
type IndexedDirEntries struct {
Entries DirEntries
QueryMeta
}
2014-04-28 23:33:54 +00:00
type IndexedKeyList struct {
Keys []string
QueryMeta
}
type SessionBehavior string
const (
SessionKeysRelease SessionBehavior = "release"
SessionKeysDelete = "delete"
)
const (
SessionTTLMax = 24 * time.Hour
SessionTTLMultiplier = 2
)
type Sessions []*Session
2014-05-08 22:01:02 +00:00
// Session is used to represent an open session in the KV store.
// This issued to associate node checks with acquired locks.
type Session struct {
ID string
Name string
Node string // TODO(partitions): ensure that the entmeta interacts with this node field properly
LockDelay time.Duration
Behavior SessionBehavior // What to do when session is invalidated
TTL string
NodeChecks []string
ServiceChecks []ServiceCheck
// Deprecated v1.7.0.
Checks []types.CheckID `json:",omitempty"`
acl.EnterpriseMeta
RaftIndex
2014-05-08 22:01:02 +00:00
}
type ServiceCheck struct {
ID string
Namespace string
}
// IDValue implements the state.singleValueID interface for indexing.
func (s *Session) IDValue() string {
return s.ID
}
func (s *Session) UnmarshalJSON(data []byte) (err error) {
type Alias Session
aux := &struct {
LockDelay interface{}
*Alias
}{
Alias: (*Alias)(s),
}
if err = json.Unmarshal(data, &aux); err != nil {
return err
}
if aux.LockDelay != nil {
var dur time.Duration
switch v := aux.LockDelay.(type) {
case string:
if dur, err = time.ParseDuration(v); err != nil {
return err
}
case float64:
dur = time.Duration(v)
}
// Convert low value integers into seconds
if dur < lockDelayMinThreshold {
dur = dur * time.Second
}
s.LockDelay = dur
}
return nil
}
2014-05-08 22:01:02 +00:00
type SessionOp string
const (
SessionCreate SessionOp = "create"
SessionDestroy = "destroy"
)
// SessionRequest is used to operate on sessions
type SessionRequest struct {
Datacenter string
Op SessionOp // Which operation are we performing
Session Session // Which session
WriteRequest
}
func (r *SessionRequest) RequestDatacenter() string {
return r.Datacenter
}
2014-05-16 22:49:17 +00:00
// SessionSpecificRequest is used to request a session by ID
type SessionSpecificRequest struct {
Datacenter string
SessionID string
// DEPRECATED in 1.7.0
Session string
acl.EnterpriseMeta
QueryOptions
}
2014-05-16 22:49:17 +00:00
func (r *SessionSpecificRequest) RequestDatacenter() string {
return r.Datacenter
}
type IndexedSessions struct {
Sessions Sessions
QueryMeta
}
2015-06-19 15:26:56 +00:00
// Coordinate stores a node name with its associated network coordinate.
2015-03-28 18:52:04 +00:00
type Coordinate struct {
Node string
Segment string
Partition string `json:",omitempty"` // TODO(partitions): fully thread this needle
Coord *coordinate.Coordinate
}
func (c *Coordinate) GetEnterpriseMeta() *acl.EnterpriseMeta {
return NodeEnterpriseMetaInPartition(c.Partition)
}
func (c *Coordinate) PartitionOrDefault() string {
return acl.PartitionOrDefault(c.Partition)
2015-03-28 18:52:04 +00:00
}
type Coordinates []*Coordinate
// IndexedCoordinate is used to represent a single node's coordinate from the state
// store.
2015-04-18 21:05:29 +00:00
type IndexedCoordinate struct {
Coord *coordinate.Coordinate
QueryMeta
}
// IndexedCoordinates is used to represent a list of nodes and their
// corresponding raw coordinates.
type IndexedCoordinates struct {
Coordinates Coordinates
QueryMeta
}
// DatacenterMap is used to represent a list of nodes with their raw coordinates,
// associated with a datacenter. Coordinates are only compatible between nodes in
// the same area.
type DatacenterMap struct {
Datacenter string
AreaID types.AreaID
Coordinates Coordinates
}
// CoordinateUpdateRequest is used to update the network coordinate of a given
// node.
2015-03-28 18:52:04 +00:00
type CoordinateUpdateRequest struct {
Datacenter string
Node string
Segment string
Coord *coordinate.Coordinate
acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash"`
2015-04-09 20:23:14 +00:00
WriteRequest
2015-03-28 18:52:04 +00:00
}
// RequestDatacenter returns the datacenter for a given update request.
2015-04-18 21:05:29 +00:00
func (c *CoordinateUpdateRequest) RequestDatacenter() string {
return c.Datacenter
}
2014-08-28 22:00:49 +00:00
// EventFireRequest is used to ask a server to fire
// a Serf event. It is a bit odd, since it doesn't depend on
// the catalog or leader. Any node can respond, so it's not quite
// like a standard write request. This is used only internally.
type EventFireRequest struct {
Datacenter string
Name string
Payload []byte
// Not using WriteRequest so that any server can process
// the request. It is a bit unusual...
QueryOptions
}
func (r *EventFireRequest) RequestDatacenter() string {
return r.Datacenter
}
// EventFireResponse is used to respond to a fire request.
type EventFireResponse struct {
QueryMeta
}
type TombstoneOp string
const (
TombstoneReap TombstoneOp = "reap"
)
// TombstoneRequest is used to trigger a reaping of the tombstones
type TombstoneRequest struct {
Datacenter string
Op TombstoneOp
ReapIndex uint64
WriteRequest
}
func (r *TombstoneRequest) RequestDatacenter() string {
return r.Datacenter
}
// MsgpackHandle is a shared handle for encoding/decoding msgpack payloads
var MsgpackHandle = &codec.MsgpackHandle{
RawToString: true,
BasicHandle: codec.BasicHandle{
DecodeOptions: codec.DecodeOptions{
MapType: reflect.TypeOf(map[string]interface{}{}),
},
},
}
2014-06-07 07:59:27 +00:00
// Decode is used to decode a MsgPack encoded object
func Decode(buf []byte, out interface{}) error {
return codec.NewDecoder(bytes.NewReader(buf), MsgpackHandle).Decode(out)
}
// Encode is used to encode a MsgPack object with type prefix
func Encode(t MessageType, msg interface{}) ([]byte, error) {
2014-06-08 21:02:42 +00:00
var buf bytes.Buffer
buf.WriteByte(uint8(t))
err := codec.NewEncoder(&buf, MsgpackHandle).Encode(msg)
return buf.Bytes(), err
}
func EncodeProtoInterface(t MessageType, message interface{}) ([]byte, error) {
if marshaller, ok := message.(proto.Message); ok {
return EncodeProto(t, marshaller)
}
return nil, fmt.Errorf("message does not implement proto.Message: %T", message)
}
func EncodeProto(t MessageType, pb proto.Message) ([]byte, error) {
data := make([]byte, 0, proto.Size(pb)+1)
data = append(data, uint8(t))
var err error
data, err = proto.MarshalOptions{}.MarshalAppend(data, pb)
if err != nil {
return nil, err
}
return data, nil
}
func DecodeProto(buf []byte, pb proto.Message) error {
// Note that this assumes the leading byte indicating the type as already been stripped off.
return proto.Unmarshal(buf, pb)
}
// CompoundResponse is an interface for gathering multiple responses. It is
// used in cross-datacenter RPC calls where more than 1 datacenter is
// expected to reply.
type CompoundResponse interface {
// Add adds a new response to the compound response
Add(interface{})
// New returns an empty response object which can be passed around by
// reference, and then passed to Add() later on.
New() interface{}
}
type KeyringOp string
const (
KeyringList KeyringOp = "list"
KeyringInstall = "install"
KeyringUse = "use"
KeyringRemove = "remove"
)
// KeyringRequest encapsulates a request to modify an encryption keyring.
// It can be used for install, remove, or use key type operations.
type KeyringRequest struct {
Operation KeyringOp
Key string
Datacenter string
Forwarded bool
RelayFactor uint8
LocalOnly bool
QueryOptions
}
func (r *KeyringRequest) RequestDatacenter() string {
return r.Datacenter
}
// KeyringResponse is a unified key response and can be used for install,
// remove, use, as well as listing key queries.
type KeyringResponse struct {
2020-08-18 07:50:24 +00:00
WAN bool
Datacenter string
Segment string
Partition string `json:",omitempty"`
2020-08-18 07:50:24 +00:00
Messages map[string]string `json:",omitempty"`
Keys map[string]int
PrimaryKeys map[string]int
NumNodes int
Error string `json:",omitempty"`
2014-09-25 01:30:34 +00:00
}
func (r *KeyringResponse) PartitionOrDefault() string {
return acl.PartitionOrDefault(r.Partition)
}
2014-09-30 22:31:07 +00:00
// KeyringResponses holds multiple responses to keyring queries. Each
// datacenter replies independently, and KeyringResponses is used as a
// container for the set of all responses.
2014-09-25 01:30:34 +00:00
type KeyringResponses struct {
Responses []*KeyringResponse
2014-09-28 19:35:51 +00:00
QueryMeta
}
func (r *KeyringResponses) Add(v interface{}) {
val := v.(*KeyringResponses)
r.Responses = append(r.Responses, val.Responses...)
}
func (r *KeyringResponses) New() interface{} {
return new(KeyringResponses)
}
// String converts message type int to string
func (m MessageType) String() string {
s, ok := requestTypeStrings[m]
if ok {
return s
}
s, ok = enterpriseRequestType(m)
if ok {
return s
}
return "Unknown(" + strconv.Itoa(int(m)) + ")"
}
// This should only be used for conversions generated by MOG
func DurationToProto(d time.Duration) *durationpb.Duration {
return durationpb.New(d)
}
// This should only be used for conversions generated by MOG
func DurationFromProto(d *durationpb.Duration) time.Duration {
return d.AsDuration()
}
// This should only be used for conversions generated by MOG
func DurationPointerToProto(d *time.Duration) *durationpb.Duration {
if d == nil {
return nil
}
return durationpb.New(*d)
}
// This should only be used for conversions generated by MOG
func DurationPointerFromProto(d *durationpb.Duration) *time.Duration {
if d == nil {
return nil
}
return DurationPointer(d.AsDuration())
}
func DurationPointer(d time.Duration) *time.Duration {
return &d
}
// This should only be used for conversions generated by MOG
func TimeFromProto(s *timestamppb.Timestamp) time.Time {
return s.AsTime()
}
// This should only be used for conversions generated by MOG
func TimeToProto(s time.Time) *timestamppb.Timestamp {
return timestamppb.New(s)
}
// IsZeroProtoTime returns true if the time is the minimum protobuf timestamp
// (the Unix epoch).
func IsZeroProtoTime(t *timestamppb.Timestamp) bool {
return t.Seconds == 0 && t.Nanos == 0
}
// Locality identifies where a given entity is running.
type Locality struct {
// Region is region the zone belongs to.
Region string `json:",omitempty"`
// Zone is the zone the entity is running in.
Zone string `json:",omitempty"`
}
// ToAPI converts a struct Locality to an API Locality.
func (l *Locality) ToAPI() *api.Locality {
if l == nil {
return nil
}
return &api.Locality{
Region: l.Region,
Zone: l.Zone,
}
}
func (l *Locality) GetRegion() string {
if l == nil {
return ""
}
return l.Region
}
func (l *Locality) Validate() error {
if l == nil {
return nil
}
if l.Region == "" && l.Zone != "" {
return fmt.Errorf("zone cannot be set without region")
}
return nil
}