From 7c19c701e15dd1b40d63c2441149b3d32c3face8 Mon Sep 17 00:00:00 2001 From: Max Bowsher Date: Sun, 19 Jun 2022 11:58:23 +0100 Subject: [PATCH 01/93] Fix incorrect name and doc for kv_entries metric The name of the metric as registered with the metrics library to provide the help string, was incorrect compared with the actual code that sets the metric value - bring them into sync. Also, the help message was incorrect. Rather than copy the help message from telemetry.mdx, which was correct, but felt a bit unnatural in the way it was worded, update both of them to a new wording. --- agent/consul/usagemetrics/usagemetrics.go | 4 ++-- website/content/docs/agent/telemetry.mdx | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/agent/consul/usagemetrics/usagemetrics.go b/agent/consul/usagemetrics/usagemetrics.go index 6733ed3df0..0d74e0e72f 100644 --- a/agent/consul/usagemetrics/usagemetrics.go +++ b/agent/consul/usagemetrics/usagemetrics.go @@ -37,8 +37,8 @@ var Gauges = []prometheus.GaugeDefinition{ Help: "Measures the current number of server agents registered with Consul. It is only emitted by Consul servers. Added in v1.9.6.", }, { - Name: []string{"consul", "kv", "entries"}, - Help: "Measures the current number of server agents registered with Consul. It is only emitted by Consul servers. Added in v1.10.3.", + Name: []string{"consul", "state", "kv_entries"}, + Help: "Measures the current number of entries in the Consul KV store. It is only emitted by Consul servers. Added in v1.10.3.", }, { Name: []string{"consul", "state", "connect_instances"}, diff --git a/website/content/docs/agent/telemetry.mdx b/website/content/docs/agent/telemetry.mdx index 0435a42c84..01adb30a11 100644 --- a/website/content/docs/agent/telemetry.mdx +++ b/website/content/docs/agent/telemetry.mdx @@ -391,7 +391,7 @@ This is a full list of metrics emitted by Consul. | `consul.state.nodes` | Measures the current number of nodes registered with Consul. It is only emitted by Consul servers. Added in v1.9.0. | number of objects | gauge | | `consul.state.services` | Measures the current number of unique services registered with Consul, based on service name. It is only emitted by Consul servers. Added in v1.9.0. | number of objects | gauge | | `consul.state.service_instances` | Measures the current number of unique service instances registered with Consul. It is only emitted by Consul servers. Added in v1.9.0. | number of objects | gauge | -| `consul.state.kv_entries` | Measures the current number of unique KV entries written in Consul. It is only emitted by Consul servers. Added in v1.10.3. | number of objects | gauge | +| `consul.state.kv_entries` | Measures the current number of entries in the Consul KV store. It is only emitted by Consul servers. Added in v1.10.3. | number of objects | gauge | | `consul.state.connect_instances` | Measures the current number of unique connect service instances registered with Consul labeled by Kind (e.g. connect-proxy, connect-native, etc). Added in v1.10.4 | number of objects | gauge | | `consul.state.config_entries` | Measures the current number of configuration entries registered with Consul labeled by Kind (e.g. service-defaults, proxy-defaults, etc). See [Configuration Entries](/docs/connect/config-entries) for more information. Added in v1.10.4 | number of objects | gauge | | `consul.members.clients` | Measures the current number of client agents registered with Consul. It is only emitted by Consul servers. Added in v1.9.6. | number of clients | gauge | From 22511ec491a2e5a06ff05dd7b02f69039fdd45a6 Mon Sep 17 00:00:00 2001 From: Jared Kirschner Date: Fri, 5 Aug 2022 10:45:24 -0700 Subject: [PATCH 02/93] Allow uppercase in proxy launch -sidecar-for arg Previously, when launching a sidecar proxy with one of the following commands: - consul connect envoy -sidecar-for=... - consul connect proxy -sidecar-for=... ... the -sidecar-for argument could only contain lowercase letters, even if the service was registered with some uppercase letters. Now, the -sidecar-for argument is treated as case-insensitive. --- .changelog/14034.txt | 3 +++ command/connect/proxy/proxy.go | 2 +- command/connect/proxy/proxy_test.go | 11 +++++++++++ 3 files changed, 15 insertions(+), 1 deletion(-) create mode 100644 .changelog/14034.txt diff --git a/.changelog/14034.txt b/.changelog/14034.txt new file mode 100644 index 0000000000..216c5406a3 --- /dev/null +++ b/.changelog/14034.txt @@ -0,0 +1,3 @@ +```release-note:bug +cli: When launching a sidecar proxy with `consul connect envoy` or `consul connect proxy`, the `-sidecar-for` service ID argument is now treated as case-insensitive. +``` diff --git a/command/connect/proxy/proxy.go b/command/connect/proxy/proxy.go index d2d0b90cf1..a0477a6a10 100644 --- a/command/connect/proxy/proxy.go +++ b/command/connect/proxy/proxy.go @@ -232,7 +232,7 @@ func LookupProxyIDForSidecar(client *api.Client, sidecarFor string) (string, err var proxyIDs []string for _, svc := range svcs { if svc.Kind == api.ServiceKindConnectProxy && svc.Proxy != nil && - strings.ToLower(svc.Proxy.DestinationServiceID) == sidecarFor { + strings.EqualFold(svc.Proxy.DestinationServiceID, sidecarFor) { proxyIDs = append(proxyIDs, svc.ID) } } diff --git a/command/connect/proxy/proxy_test.go b/command/connect/proxy/proxy_test.go index ae7b1cdfbc..28d5a9da21 100644 --- a/command/connect/proxy/proxy_test.go +++ b/command/connect/proxy/proxy_test.go @@ -110,6 +110,17 @@ func TestCommandConfigWatcher(t *testing.T) { require.Equal(t, 9999, cfg.PublicListener.BindPort) }, }, + + { + Name: "-sidecar-for, one sidecar case-insensitive", + Flags: []string{ + "-sidecar-for", "One-SideCar", + }, + Test: func(t *testing.T, cfg *proxy.Config) { + // Sanity check we got the right instance. + require.Equal(t, 9999, cfg.PublicListener.BindPort) + }, + }, } for _, tc := range cases { From 3c4fa9b4684dc62be1faa9da6a3a3c6cef3d0fb9 Mon Sep 17 00:00:00 2001 From: Daniel Kimsey Date: Wed, 10 Aug 2022 16:52:32 -0500 Subject: [PATCH 03/93] Add support for filtering the 'List Services' API 1. Create a bexpr filter for performing the filtering 2. Change the state store functions to return the raw (not aggregated) list of ServiceNodes. 3. Move the aggregate service tags by name logic out of the state store functions into a new function called from the RPC endpoint 4. Perform the filtering in the endpoint before aggregation. --- .changelog/11742.txt | 3 ++ agent/consul/catalog_endpoint.go | 42 ++++++++++++++++++- agent/consul/catalog_endpoint_test.go | 39 ++++++++++++++++++ agent/consul/state/catalog.go | 55 ++++--------------------- agent/consul/state/catalog_test.go | 56 +++++++++++++------------- agent/consul/state/state_store_test.go | 11 ++--- website/content/api-docs/catalog.mdx | 53 +++++++++++++++++++++++- 7 files changed, 177 insertions(+), 82 deletions(-) create mode 100644 .changelog/11742.txt diff --git a/.changelog/11742.txt b/.changelog/11742.txt new file mode 100644 index 0000000000..6c6d4c2498 --- /dev/null +++ b/.changelog/11742.txt @@ -0,0 +1,3 @@ +```release-note:improvement +api: Add filtering support to Catalog's List Services (v1/catalog/services) +``` diff --git a/agent/consul/catalog_endpoint.go b/agent/consul/catalog_endpoint.go index 111ee7b2ba..696ae314a7 100644 --- a/agent/consul/catalog_endpoint.go +++ b/agent/consul/catalog_endpoint.go @@ -565,6 +565,11 @@ func (c *Catalog) ListServices(args *structs.DCSpecificRequest, reply *structs.I return err } + filter, err := bexpr.CreateFilter(args.Filter, nil, []*structs.ServiceNode{}) + if err != nil { + return err + } + // Set reply enterprise metadata after resolving and validating the token so // that we can properly infer metadata from the token. reply.EnterpriseMeta = args.EnterpriseMeta @@ -574,10 +579,11 @@ func (c *Catalog) ListServices(args *structs.DCSpecificRequest, reply *structs.I &reply.QueryMeta, func(ws memdb.WatchSet, state *state.Store) error { var err error + var serviceNodes structs.ServiceNodes if len(args.NodeMetaFilters) > 0 { - reply.Index, reply.Services, err = state.ServicesByNodeMeta(ws, args.NodeMetaFilters, &args.EnterpriseMeta, args.PeerName) + reply.Index, serviceNodes, err = state.ServicesByNodeMeta(ws, args.NodeMetaFilters, &args.EnterpriseMeta, args.PeerName) } else { - reply.Index, reply.Services, err = state.Services(ws, &args.EnterpriseMeta, args.PeerName) + reply.Index, serviceNodes, err = state.Services(ws, &args.EnterpriseMeta, args.PeerName) } if err != nil { return err @@ -588,11 +594,43 @@ func (c *Catalog) ListServices(args *structs.DCSpecificRequest, reply *structs.I return nil } + raw, err := filter.Execute(serviceNodes) + if err != nil { + return err + } + + reply.Services = servicesTagsByName(raw.(structs.ServiceNodes)) + c.srv.filterACLWithAuthorizer(authz, reply) + return nil }) } +func servicesTagsByName(services []*structs.ServiceNode) structs.Services { + unique := make(map[string]map[string]struct{}) + for _, svc := range services { + tags, ok := unique[svc.ServiceName] + if !ok { + unique[svc.ServiceName] = make(map[string]struct{}) + tags = unique[svc.ServiceName] + } + for _, tag := range svc.ServiceTags { + tags[tag] = struct{}{} + } + } + + // Generate the output structure. + var results = make(structs.Services) + for service, tags := range unique { + results[service] = make([]string, 0, len(tags)) + for tag := range tags { + results[service] = append(results[service], tag) + } + } + return results +} + // ServiceList is used to query the services in a DC. // Returns services as a list of ServiceNames. func (c *Catalog) ServiceList(args *structs.DCSpecificRequest, reply *structs.IndexedServiceList) error { diff --git a/agent/consul/catalog_endpoint_test.go b/agent/consul/catalog_endpoint_test.go index ca00efaea2..daa22c90c1 100644 --- a/agent/consul/catalog_endpoint_test.go +++ b/agent/consul/catalog_endpoint_test.go @@ -1523,6 +1523,45 @@ func TestCatalog_ListServices_NodeMetaFilter(t *testing.T) { } } +func TestCatalog_ListServices_Filter(t *testing.T) { + t.Parallel() + _, s1 := testServer(t) + codec := rpcClient(t, s1) + + testrpc.WaitForTestAgent(t, s1.RPC, "dc1") + + // prep the cluster with some data we can use in our filters + registerTestCatalogEntries(t, codec) + + // Run the tests against the test server + + t.Run("ListServices", func(t *testing.T) { + args := structs.DCSpecificRequest{ + Datacenter: "dc1", + } + + args.Filter = "ServiceName == redis" + out := new(structs.IndexedServices) + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.ListServices", &args, out)) + require.Contains(t, out.Services, "redis") + require.ElementsMatch(t, []string{"v1", "v2"}, out.Services["redis"]) + + args.Filter = "NodeMeta.os == NoSuchOS" + out = new(structs.IndexedServices) + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.ListServices", &args, out)) + require.Len(t, out.Services, 0) + + args.Filter = "NodeMeta.NoSuchMetadata == linux" + out = new(structs.IndexedServices) + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.ListServices", &args, out)) + require.Len(t, out.Services, 0) + + args.Filter = "InvalidField == linux" + out = new(structs.IndexedServices) + require.Error(t, msgpackrpc.CallWithCodec(codec, "Catalog.ListServices", &args, out)) + }) +} + func TestCatalog_ListServices_Blocking(t *testing.T) { if testing.Short() { t.Skip("too slow for testing.Short") diff --git a/agent/consul/state/catalog.go b/agent/consul/state/catalog.go index 258519d5ba..879c59f747 100644 --- a/agent/consul/state/catalog.go +++ b/agent/consul/state/catalog.go @@ -1134,7 +1134,7 @@ func terminatingGatewayVirtualIPsSupported(tx ReadTxn, ws memdb.WatchSet) (bool, } // Services returns all services along with a list of associated tags. -func (s *Store) Services(ws memdb.WatchSet, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.Services, error) { +func (s *Store) Services(ws memdb.WatchSet, entMeta *acl.EnterpriseMeta, peerName string) (uint64, []*structs.ServiceNode, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -1148,30 +1148,11 @@ func (s *Store) Services(ws memdb.WatchSet, entMeta *acl.EnterpriseMeta, peerNam } ws.Add(services.WatchCh()) - // Rip through the services and enumerate them and their unique set of - // tags. - unique := make(map[string]map[string]struct{}) + var result []*structs.ServiceNode for service := services.Next(); service != nil; service = services.Next() { - svc := service.(*structs.ServiceNode) - tags, ok := unique[svc.ServiceName] - if !ok { - unique[svc.ServiceName] = make(map[string]struct{}) - tags = unique[svc.ServiceName] - } - for _, tag := range svc.ServiceTags { - tags[tag] = struct{}{} - } + result = append(result, service.(*structs.ServiceNode)) } - - // Generate the output structure. - var results = make(structs.Services) - for service, tags := range unique { - results[service] = make([]string, 0, len(tags)) - for tag := range tags { - results[service] = append(results[service], tag) - } - } - return idx, results, nil + return idx, result, nil } func (s *Store) ServiceList(ws memdb.WatchSet, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.ServiceList, error) { @@ -1212,7 +1193,7 @@ func serviceListTxn(tx ReadTxn, ws memdb.WatchSet, entMeta *acl.EnterpriseMeta, } // ServicesByNodeMeta returns all services, filtered by the given node metadata. -func (s *Store) ServicesByNodeMeta(ws memdb.WatchSet, filters map[string]string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.Services, error) { +func (s *Store) ServicesByNodeMeta(ws memdb.WatchSet, filters map[string]string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, []*structs.ServiceNode, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -1259,8 +1240,7 @@ func (s *Store) ServicesByNodeMeta(ws memdb.WatchSet, filters map[string]string, } allServicesCh := allServices.WatchCh() - // Populate the services map - unique := make(map[string]map[string]struct{}) + var result structs.ServiceNodes for node := nodes.Next(); node != nil; node = nodes.Next() { n := node.(*structs.Node) if len(filters) > 1 && !structs.SatisfiesMetaFilters(n.Meta, filters) { @@ -1274,30 +1254,11 @@ func (s *Store) ServicesByNodeMeta(ws memdb.WatchSet, filters map[string]string, } ws.AddWithLimit(watchLimit, services.WatchCh(), allServicesCh) - // Rip through the services and enumerate them and their unique set of - // tags. for service := services.Next(); service != nil; service = services.Next() { - svc := service.(*structs.ServiceNode) - tags, ok := unique[svc.ServiceName] - if !ok { - unique[svc.ServiceName] = make(map[string]struct{}) - tags = unique[svc.ServiceName] - } - for _, tag := range svc.ServiceTags { - tags[tag] = struct{}{} - } + result = append(result, service.(*structs.ServiceNode)) } } - - // Generate the output structure. - var results = make(structs.Services) - for service, tags := range unique { - results[service] = make([]string, 0, len(tags)) - for tag := range tags { - results[service] = append(results[service], tag) - } - } - return idx, results, nil + return idx, result, nil } // maxIndexForService return the maximum Raft Index for a service diff --git a/agent/consul/state/catalog_test.go b/agent/consul/state/catalog_test.go index 10e7af6dba..ca2bded03b 100644 --- a/agent/consul/state/catalog_test.go +++ b/agent/consul/state/catalog_test.go @@ -12,6 +12,8 @@ import ( "github.com/hashicorp/consul/acl" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" "github.com/hashicorp/go-memdb" "github.com/hashicorp/go-uuid" "github.com/stretchr/testify/assert" @@ -2105,7 +2107,7 @@ func TestStateStore_Services(t *testing.T) { if err := s.EnsureService(2, "node1", ns1); err != nil { t.Fatalf("err: %s", err) } - testRegisterService(t, s, 3, "node1", "dogs") + ns1Dogs := testRegisterService(t, s, 3, "node1", "dogs") testRegisterNode(t, s, 4, "node2") ns2 := &structs.NodeService{ ID: "service3", @@ -2131,19 +2133,13 @@ func TestStateStore_Services(t *testing.T) { t.Fatalf("bad index: %d", idx) } - // Verify the result. We sort the lists since the order is - // non-deterministic (it's built using a map internally). - expected := structs.Services{ - "redis": []string{"prod", "primary", "replica"}, - "dogs": []string{}, - } - sort.Strings(expected["redis"]) - for _, tags := range services { - sort.Strings(tags) - } - if !reflect.DeepEqual(expected, services) { - t.Fatalf("bad: %#v", services) + // Verify the result. + expected := []*structs.ServiceNode{ + ns1Dogs.ToServiceNode("node1"), + ns1.ToServiceNode("node1"), + ns2.ToServiceNode("node2"), } + assertDeepEqual(t, services, expected, cmpopts.IgnoreFields(structs.ServiceNode{}, "RaftIndex")) // Deleting a node with a service should fire the watch. if err := s.DeleteNode(6, "node1", nil, ""); err != nil { @@ -2206,11 +2202,10 @@ func TestStateStore_ServicesByNodeMeta(t *testing.T) { if err != nil { t.Fatalf("err: %s", err) } - expected := structs.Services{ - "redis": []string{"primary", "prod"}, + expected := []*structs.ServiceNode{ + ns1.ToServiceNode("node0"), } - sort.Strings(res["redis"]) - require.Equal(t, expected, res) + assertDeepEqual(t, res, expected, cmpopts.IgnoreFields(structs.ServiceNode{}, "RaftIndex")) }) t.Run("Get all services using the common meta value", func(t *testing.T) { @@ -2218,11 +2213,12 @@ func TestStateStore_ServicesByNodeMeta(t *testing.T) { if err != nil { t.Fatalf("err: %s", err) } - expected := structs.Services{ - "redis": []string{"primary", "prod", "replica"}, + require.Len(t, res, 2) + expected := []*structs.ServiceNode{ + ns1.ToServiceNode("node0"), + ns2.ToServiceNode("node1"), } - sort.Strings(res["redis"]) - require.Equal(t, expected, res) + assertDeepEqual(t, res, expected, cmpopts.IgnoreFields(structs.ServiceNode{}, "RaftIndex")) }) t.Run("Get an empty list for an invalid meta value", func(t *testing.T) { @@ -2230,8 +2226,8 @@ func TestStateStore_ServicesByNodeMeta(t *testing.T) { if err != nil { t.Fatalf("err: %s", err) } - expected := structs.Services{} - require.Equal(t, expected, res) + var expected []*structs.ServiceNode + assertDeepEqual(t, res, expected, cmpopts.IgnoreFields(structs.ServiceNode{}, "RaftIndex")) }) t.Run("Get the first node's service instance using multiple meta filters", func(t *testing.T) { @@ -2239,11 +2235,10 @@ func TestStateStore_ServicesByNodeMeta(t *testing.T) { if err != nil { t.Fatalf("err: %s", err) } - expected := structs.Services{ - "redis": []string{"primary", "prod"}, + expected := []*structs.ServiceNode{ + ns1.ToServiceNode("node0"), } - sort.Strings(res["redis"]) - require.Equal(t, expected, res) + assertDeepEqual(t, res, expected, cmpopts.IgnoreFields(structs.ServiceNode{}, "RaftIndex")) }) t.Run("Registering some unrelated node + service should not fire the watch.", func(t *testing.T) { @@ -8807,3 +8802,10 @@ func setVirtualIPFlags(t *testing.T, s *Store) { Value: "true", })) } + +func assertDeepEqual(t *testing.T, x, y interface{}, opts ...cmp.Option) { + t.Helper() + if diff := cmp.Diff(x, y, opts...); diff != "" { + t.Fatalf("assertion failed: values are not equal\n--- expected\n+++ actual\n%v", diff) + } +} diff --git a/agent/consul/state/state_store_test.go b/agent/consul/state/state_store_test.go index c8460ca821..88e5418c8d 100644 --- a/agent/consul/state/state_store_test.go +++ b/agent/consul/state/state_store_test.go @@ -146,13 +146,13 @@ func testRegisterServiceOpts(t *testing.T, s *Store, idx uint64, nodeID, service // testRegisterServiceWithChange registers a service and allow ensuring the consul index is updated // even if service already exists if using `modifyAccordingIndex`. // This is done by setting the transaction ID in "version" meta so service will be updated if it already exists -func testRegisterServiceWithChange(t *testing.T, s *Store, idx uint64, nodeID, serviceID string, modifyAccordingIndex bool) { - testRegisterServiceWithChangeOpts(t, s, idx, nodeID, serviceID, modifyAccordingIndex) +func testRegisterServiceWithChange(t *testing.T, s *Store, idx uint64, nodeID, serviceID string, modifyAccordingIndex bool) *structs.NodeService { + return testRegisterServiceWithChangeOpts(t, s, idx, nodeID, serviceID, modifyAccordingIndex) } // testRegisterServiceWithChangeOpts is the same as testRegisterServiceWithChange with the addition of opts that can // modify the service prior to writing. -func testRegisterServiceWithChangeOpts(t *testing.T, s *Store, idx uint64, nodeID, serviceID string, modifyAccordingIndex bool, opts ...func(service *structs.NodeService)) { +func testRegisterServiceWithChangeOpts(t *testing.T, s *Store, idx uint64, nodeID, serviceID string, modifyAccordingIndex bool, opts ...func(service *structs.NodeService)) *structs.NodeService { meta := make(map[string]string) if modifyAccordingIndex { meta["version"] = fmt.Sprint(idx) @@ -183,14 +183,15 @@ func testRegisterServiceWithChangeOpts(t *testing.T, s *Store, idx uint64, nodeI result.ServiceID != serviceID { t.Fatalf("bad service: %#v", result) } + return svc } // testRegisterService register a service with given transaction idx // If the service already exists, transaction number might not be increased // Use `testRegisterServiceWithChange()` if you want perform a registration that // ensures the transaction is updated by setting idx in Meta of Service -func testRegisterService(t *testing.T, s *Store, idx uint64, nodeID, serviceID string) { - testRegisterServiceWithChange(t, s, idx, nodeID, serviceID, false) +func testRegisterService(t *testing.T, s *Store, idx uint64, nodeID, serviceID string) *structs.NodeService { + return testRegisterServiceWithChange(t, s, idx, nodeID, serviceID, false) } func testRegisterConnectService(t *testing.T, s *Store, idx uint64, nodeID, serviceID string) { diff --git a/website/content/api-docs/catalog.mdx b/website/content/api-docs/catalog.mdx index b259176850..86480ab79b 100644 --- a/website/content/api-docs/catalog.mdx +++ b/website/content/api-docs/catalog.mdx @@ -410,13 +410,64 @@ The corresponding CLI command is [`consul catalog services`](/commands/catalog/s - `dc` `(string: "")` - Specifies the datacenter to query. This will default to the datacenter of the agent being queried. -- `node-meta` `(string: "")` - Specifies a desired node metadata key/value pair +- `node-meta` `(string: "")` **Deprecated** - Use `filter` with the `NodeMeta` selector instead. + This parameter will be removed in a future version of Consul. + Specifies a desired node metadata key/value pair of the form `key:value`. This parameter can be specified multiple times, and filters the results to nodes with the specified key/value pairs. - `ns` `(string: "")` - Specifies the namespace of the services you lookup. You can also [specify the namespace through other methods](#methods-to-specify-namespace). +- `filter` `(string: "")` - Specifies the expression used to filter the + queries results prior to returning the data. + +### Filtering + +The filter will be executed against each Service mapping within the catalog. +The following selectors and filter operations are supported: + +| Selector | Supported Operations | +| ---------------------------------------------------- | -------------------------------------------------- | +| `Address` | Equal, Not Equal, In, Not In, Matches, Not Matches | +| `Datacenter` | Equal, Not Equal, In, Not In, Matches, Not Matches | +| `ID` | Equal, Not Equal, In, Not In, Matches, Not Matches | +| `Node` | Equal, Not Equal, In, Not In, Matches, Not Matches | +| `NodeMeta.` | Equal, Not Equal, In, Not In, Matches, Not Matches | +| `NodeMeta` | Is Empty, Is Not Empty, In, Not In | +| `ServiceAddress` | Equal, Not Equal, In, Not In, Matches, Not Matches | +| `ServiceConnect.Native` | Equal, Not Equal | +| `ServiceEnableTagOverride` | Equal, Not Equal | +| `ServiceID` | Equal, Not Equal, In, Not In, Matches, Not Matches | +| `ServiceKind` | Equal, Not Equal, In, Not In, Matches, Not Matches | +| `ServiceMeta.` | Equal, Not Equal, In, Not In, Matches, Not Matches | +| `ServiceMeta` | Is Empty, Is Not Empty, In, Not In | +| `ServiceName` | Equal, Not Equal, In, Not In, Matches, Not Matches | +| `ServicePort` | Equal, Not Equal | +| `ServiceProxy.DestinationServiceID` | Equal, Not Equal, In, Not In, Matches, Not Matches | +| `ServiceProxy.DestinationServiceName` | Equal, Not Equal, In, Not In, Matches, Not Matches | +| `ServiceProxy.LocalServiceAddress` | Equal, Not Equal, In, Not In, Matches, Not Matches | +| `ServiceProxy.LocalServicePort` | Equal, Not Equal | +| `ServiceProxy.Mode` | Equal, Not Equal, In, Not In, Matches, Not Matches | +| `ServiceProxy.TransparentProxy.OutboundListenerPort` | Equal, Not Equal | +| `ServiceProxy.MeshGateway.Mode` | Equal, Not Equal, In, Not In, Matches, Not Matches | +| `ServiceProxy.Upstreams.Datacenter` | Equal, Not Equal, In, Not In, Matches, Not Matches | +| `ServiceProxy.Upstreams.DestinationName` | Equal, Not Equal, In, Not In, Matches, Not Matches | +| `ServiceProxy.Upstreams.DestinationNamespace` | Equal, Not Equal, In, Not In, Matches, Not Matches | +| `ServiceProxy.Upstreams.DestinationType` | Equal, Not Equal, In, Not In, Matches, Not Matches | +| `ServiceProxy.Upstreams.LocalBindAddress` | Equal, Not Equal, In, Not In, Matches, Not Matches | +| `ServiceProxy.Upstreams.LocalBindPort` | Equal, Not Equal | +| `ServiceProxy.Upstreams.MeshGateway.Mode` | Equal, Not Equal, In, Not In, Matches, Not Matches | +| `ServiceProxy.Upstreams` | Is Empty, Is Not Empty | +| `ServiceTaggedAddresses..Address` | Equal, Not Equal, In, Not In, Matches, Not Matches | +| `ServiceTaggedAddresses..Port` | Equal, Not Equal | +| `ServiceTaggedAddresses` | Is Empty, Is Not Empty, In, Not In | +| `ServiceTags` | In, Not In, Is Empty, Is Not Empty | +| `ServiceWeights.Passing` | Equal, Not Equal | +| `ServiceWeights.Warning` | Equal, Not Equal | +| `TaggedAddresses.` | Equal, Not Equal, In, Not In, Matches, Not Matches | +| `TaggedAddresses` | Is Empty, Is Not Empty, In, Not In | + ### Sample Request ```shell-session From 25675c8bc6227fe13f557479329856f58b121d36 Mon Sep 17 00:00:00 2001 From: Max Bowsher Date: Sun, 14 Aug 2022 16:16:41 +0100 Subject: [PATCH 04/93] Correct problem with merge from master, including reformat of table --- website/content/docs/agent/telemetry.mdx | 110 +++++++++++------------ 1 file changed, 55 insertions(+), 55 deletions(-) diff --git a/website/content/docs/agent/telemetry.mdx b/website/content/docs/agent/telemetry.mdx index c297090575..12f04e8825 100644 --- a/website/content/docs/agent/telemetry.mdx +++ b/website/content/docs/agent/telemetry.mdx @@ -349,59 +349,59 @@ populated free list structure. This is a full list of metrics emitted by Consul. -| Metric | Description | Unit | Type | -| -------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------- | ------- | -| `consul.acl.blocked.{check,service}.deregistration` | Increments whenever a deregistration fails for an entity (check or service) is blocked by an ACL. | requests | counter | -| `consul.acl.blocked.{check,node,service}.registration` | Increments whenever a registration fails for an entity (check, node or service) is blocked by an ACL. | requests | counter | -| `consul.api.http` | This samples how long it takes to service the given HTTP request for the given verb and path. Includes labels for `path` and `method`. `path` does not include details like service or key names, for these an underscore will be present as a placeholder (eg. path=`v1.kv._`) | ms | timer | -| `consul.client.rpc` | Increments whenever a Consul agent in client mode makes an RPC request to a Consul server. This gives a measure of how much a given agent is loading the Consul servers. Currently, this is only generated by agents in client mode, not Consul servers. | requests | counter | -| `consul.client.rpc.exceeded` | Increments whenever a Consul agent in client mode makes an RPC request to a Consul server gets rate limited by that agent's [`limits`](/docs/agent/config/config-files#limits) configuration. This gives an indication that there's an abusive application making too many requests on the agent, or that the rate limit needs to be increased. Currently, this only applies to agents in client mode, not Consul servers. | rejected requests | counter | -| `consul.client.rpc.failed` | Increments whenever a Consul agent in client mode makes an RPC request to a Consul server and fails. | requests | counter | -| `consul.client.api.catalog_register.` | Increments whenever a Consul agent receives a catalog register request. | requests | counter | -| `consul.client.api.success.catalog_register.` | Increments whenever a Consul agent successfully responds to a catalog register request. | requests | counter | -| `consul.client.rpc.error.catalog_register.` | Increments whenever a Consul agent receives an RPC error for a catalog register request. | errors | counter | -| `consul.client.api.catalog_deregister.` | Increments whenever a Consul agent receives a catalog deregister request. | requests | counter | -| `consul.client.api.success.catalog_deregister.` | Increments whenever a Consul agent successfully responds to a catalog deregister request. | requests | counter | -| `consul.client.rpc.error.catalog_deregister.` | Increments whenever a Consul agent receives an RPC error for a catalog deregister request. | errors | counter | -| `consul.client.api.catalog_datacenters.` | Increments whenever a Consul agent receives a request to list datacenters in the catalog. | requests | counter | -| `consul.client.api.success.catalog_datacenters.` | Increments whenever a Consul agent successfully responds to a request to list datacenters. | requests | counter | -| `consul.client.rpc.error.catalog_datacenters.` | Increments whenever a Consul agent receives an RPC error for a request to list datacenters. | errors | counter | -| `consul.client.api.catalog_nodes.` | Increments whenever a Consul agent receives a request to list nodes from the catalog. | requests | counter | -| `consul.client.api.success.catalog_nodes.` | Increments whenever a Consul agent successfully responds to a request to list nodes. | requests | counter | -| `consul.client.rpc.error.catalog_nodes.` | Increments whenever a Consul agent receives an RPC error for a request to list nodes. | errors | counter | -| `consul.client.api.catalog_services.` | Increments whenever a Consul agent receives a request to list services from the catalog. | requests | counter | -| `consul.client.api.success.catalog_services.` | Increments whenever a Consul agent successfully responds to a request to list services. | requests | counter | -| `consul.client.rpc.error.catalog_services.` | Increments whenever a Consul agent receives an RPC error for a request to list services. | errors | counter | -| `consul.client.api.catalog_service_nodes.` | Increments whenever a Consul agent receives a request to list nodes offering a service. | requests | counter | -| `consul.client.api.success.catalog_service_nodes.` | Increments whenever a Consul agent successfully responds to a request to list nodes offering a service. | requests | counter | -| `consul.client.api.error.catalog_service_nodes.` | Increments whenever a Consul agent receives an RPC error for request to list nodes offering a service. | requests | counter | -| `consul.client.rpc.error.catalog_service_nodes.` | Increments whenever a Consul agent receives an RPC error for a request to list nodes offering a service.   | errors | counter | -| `consul.client.api.catalog_node_services.` | Increments whenever a Consul agent receives a request to list services registered in a node.   | requests | counter | -| `consul.client.api.success.catalog_node_services.` | Increments whenever a Consul agent successfully responds to a request to list services in a node.   | requests | counter | -| `consul.client.rpc.error.catalog_node_services.` | Increments whenever a Consul agent receives an RPC error for a request to list services in a node.   | errors | counter | -| `consul.client.api.catalog_node_service_list` | Increments whenever a Consul agent receives a request to list a node's registered services. | requests | counter | -| `consul.client.rpc.error.catalog_node_service_list` | Increments whenever a Consul agent receives an RPC error for request to list a node's registered services. | errors | counter | -| `consul.client.api.success.catalog_node_service_list` | Increments whenever a Consul agent successfully responds to a request to list a node's registered services. | requests | counter | -| `consul.client.api.catalog_gateway_services.` | Increments whenever a Consul agent receives a request to list services associated with a gateway. | requests | counter | -| `consul.client.api.success.catalog_gateway_services.` | Increments whenever a Consul agent successfully responds to a request to list services associated with a gateway. | requests | counter | -| `consul.client.rpc.error.catalog_gateway_services.` | Increments whenever a Consul agent receives an RPC error for a request to list services associated with a gateway. | errors | counter | -| `consul.runtime.num_goroutines` | Tracks the number of running goroutines and is a general load pressure indicator. This may burst from time to time but should return to a steady state value. | number of goroutines | gauge | -| `consul.runtime.alloc_bytes` | Measures the number of bytes allocated by the Consul process. This may burst from time to time but should return to a steady state value. | bytes | gauge | -| `consul.runtime.heap_objects` | Measures the number of objects allocated on the heap and is a general memory pressure indicator. This may burst from time to time but should return to a steady state value. | number of objects | gauge | -| `consul.state.nodes` | Measures the current number of nodes registered with Consul. It is only emitted by Consul servers. Added in v1.9.0. | number of objects | gauge | -| `consul.state.peerings` | Measures the current number of peerings registered with Consul. It is only emitted by Consul servers. Added in v1.13.0. | number of objects | gauge | -| `consul.state.services` | Measures the current number of unique services registered with Consul, based on service name. It is only emitted by Consul servers. Added in v1.9.0. | number of objects | gauge | -| `consul.state.service_instances` | Measures the current number of unique service instances registered with Consul. It is only emitted by Consul servers. Added in v1.9.0. | number of objects | gauge | -| `consul.state.kv_entries` | Measures the current number of entries in the Consul KV store. It is only emitted by Consul servers. Added in v1.10.3. | number of objects | gauge | -| `consul.state.connect_instances` | Measures the current number of unique connect service instances registered with Consul labeled by Kind (e.g. connect-proxy, connect-native, etc). Added in v1.10.4 | number of objects | gauge | -| `consul.state.config_entries` | Measures the current number of configuration entries registered with Consul labeled by Kind (e.g. service-defaults, proxy-defaults, etc). See [Configuration Entries](/docs/connect/config-entries) for more information. Added in v1.10.4 | number of objects | gauge | -| `consul.members.clients` | Measures the current number of client agents registered with Consul. It is only emitted by Consul servers. Added in v1.9.6. | number of clients | gauge | -| `consul.members.servers` | Measures the current number of server agents registered with Consul. It is only emitted by Consul servers. Added in v1.9.6. | number of servers | gauge | -| `consul.dns.stale_queries` | Increments when an agent serves a query within the allowed stale threshold. | queries | counter | -| `consul.dns.ptr_query.` | Measures the time spent handling a reverse DNS query for the given node. | ms | timer | -| `consul.dns.domain_query.` | Measures the time spent handling a domain query for the given node. | ms | timer | -| `consul.system.licenseExpiration` | This measures the number of hours remaining on the agents license. | hours | gauge | -| `consul.version` | Represents the Consul version. +| Metric | Description | Unit | Type | +|--------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------|---------| +| `consul.acl.blocked.{check,service}.deregistration` | Increments whenever a deregistration fails for an entity (check or service) is blocked by an ACL. | requests | counter | +| `consul.acl.blocked.{check,node,service}.registration` | Increments whenever a registration fails for an entity (check, node or service) is blocked by an ACL. | requests | counter | +| `consul.api.http` | This samples how long it takes to service the given HTTP request for the given verb and path. Includes labels for `path` and `method`. `path` does not include details like service or key names, for these an underscore will be present as a placeholder (eg. path=`v1.kv._`) | ms | timer | +| `consul.client.rpc` | Increments whenever a Consul agent in client mode makes an RPC request to a Consul server. This gives a measure of how much a given agent is loading the Consul servers. Currently, this is only generated by agents in client mode, not Consul servers. | requests | counter | +| `consul.client.rpc.exceeded` | Increments whenever a Consul agent in client mode makes an RPC request to a Consul server gets rate limited by that agent's [`limits`](/docs/agent/config/config-files#limits) configuration. This gives an indication that there's an abusive application making too many requests on the agent, or that the rate limit needs to be increased. Currently, this only applies to agents in client mode, not Consul servers. | rejected requests | counter | +| `consul.client.rpc.failed` | Increments whenever a Consul agent in client mode makes an RPC request to a Consul server and fails. | requests | counter | +| `consul.client.api.catalog_register.` | Increments whenever a Consul agent receives a catalog register request. | requests | counter | +| `consul.client.api.success.catalog_register.` | Increments whenever a Consul agent successfully responds to a catalog register request. | requests | counter | +| `consul.client.rpc.error.catalog_register.` | Increments whenever a Consul agent receives an RPC error for a catalog register request. | errors | counter | +| `consul.client.api.catalog_deregister.` | Increments whenever a Consul agent receives a catalog deregister request. | requests | counter | +| `consul.client.api.success.catalog_deregister.` | Increments whenever a Consul agent successfully responds to a catalog deregister request. | requests | counter | +| `consul.client.rpc.error.catalog_deregister.` | Increments whenever a Consul agent receives an RPC error for a catalog deregister request. | errors | counter | +| `consul.client.api.catalog_datacenters.` | Increments whenever a Consul agent receives a request to list datacenters in the catalog. | requests | counter | +| `consul.client.api.success.catalog_datacenters.` | Increments whenever a Consul agent successfully responds to a request to list datacenters. | requests | counter | +| `consul.client.rpc.error.catalog_datacenters.` | Increments whenever a Consul agent receives an RPC error for a request to list datacenters. | errors | counter | +| `consul.client.api.catalog_nodes.` | Increments whenever a Consul agent receives a request to list nodes from the catalog. | requests | counter | +| `consul.client.api.success.catalog_nodes.` | Increments whenever a Consul agent successfully responds to a request to list nodes. | requests | counter | +| `consul.client.rpc.error.catalog_nodes.` | Increments whenever a Consul agent receives an RPC error for a request to list nodes. | errors | counter | +| `consul.client.api.catalog_services.` | Increments whenever a Consul agent receives a request to list services from the catalog. | requests | counter | +| `consul.client.api.success.catalog_services.` | Increments whenever a Consul agent successfully responds to a request to list services. | requests | counter | +| `consul.client.rpc.error.catalog_services.` | Increments whenever a Consul agent receives an RPC error for a request to list services. | errors | counter | +| `consul.client.api.catalog_service_nodes.` | Increments whenever a Consul agent receives a request to list nodes offering a service. | requests | counter | +| `consul.client.api.success.catalog_service_nodes.` | Increments whenever a Consul agent successfully responds to a request to list nodes offering a service. | requests | counter | +| `consul.client.api.error.catalog_service_nodes.` | Increments whenever a Consul agent receives an RPC error for request to list nodes offering a service. | requests | counter | +| `consul.client.rpc.error.catalog_service_nodes.` | Increments whenever a Consul agent receives an RPC error for a request to list nodes offering a service.   | errors | counter | +| `consul.client.api.catalog_node_services.` | Increments whenever a Consul agent receives a request to list services registered in a node.   | requests | counter | +| `consul.client.api.success.catalog_node_services.` | Increments whenever a Consul agent successfully responds to a request to list services in a node.   | requests | counter | +| `consul.client.rpc.error.catalog_node_services.` | Increments whenever a Consul agent receives an RPC error for a request to list services in a node.   | errors | counter | +| `consul.client.api.catalog_node_service_list` | Increments whenever a Consul agent receives a request to list a node's registered services. | requests | counter | +| `consul.client.rpc.error.catalog_node_service_list` | Increments whenever a Consul agent receives an RPC error for request to list a node's registered services. | errors | counter | +| `consul.client.api.success.catalog_node_service_list` | Increments whenever a Consul agent successfully responds to a request to list a node's registered services. | requests | counter | +| `consul.client.api.catalog_gateway_services.` | Increments whenever a Consul agent receives a request to list services associated with a gateway. | requests | counter | +| `consul.client.api.success.catalog_gateway_services.` | Increments whenever a Consul agent successfully responds to a request to list services associated with a gateway. | requests | counter | +| `consul.client.rpc.error.catalog_gateway_services.` | Increments whenever a Consul agent receives an RPC error for a request to list services associated with a gateway. | errors | counter | +| `consul.runtime.num_goroutines` | Tracks the number of running goroutines and is a general load pressure indicator. This may burst from time to time but should return to a steady state value. | number of goroutines | gauge | +| `consul.runtime.alloc_bytes` | Measures the number of bytes allocated by the Consul process. This may burst from time to time but should return to a steady state value. | bytes | gauge | +| `consul.runtime.heap_objects` | Measures the number of objects allocated on the heap and is a general memory pressure indicator. This may burst from time to time but should return to a steady state value. | number of objects | gauge | +| `consul.state.nodes` | Measures the current number of nodes registered with Consul. It is only emitted by Consul servers. Added in v1.9.0. | number of objects | gauge | +| `consul.state.peerings` | Measures the current number of peerings registered with Consul. It is only emitted by Consul servers. Added in v1.13.0. | number of objects | gauge | +| `consul.state.services` | Measures the current number of unique services registered with Consul, based on service name. It is only emitted by Consul servers. Added in v1.9.0. | number of objects | gauge | +| `consul.state.service_instances` | Measures the current number of unique service instances registered with Consul. It is only emitted by Consul servers. Added in v1.9.0. | number of objects | gauge | +| `consul.state.kv_entries` | Measures the current number of entries in the Consul KV store. It is only emitted by Consul servers. Added in v1.10.3. | number of objects | gauge | +| `consul.state.connect_instances` | Measures the current number of unique connect service instances registered with Consul labeled by Kind (e.g. connect-proxy, connect-native, etc). Added in v1.10.4 | number of objects | gauge | +| `consul.state.config_entries` | Measures the current number of configuration entries registered with Consul labeled by Kind (e.g. service-defaults, proxy-defaults, etc). See [Configuration Entries](/docs/connect/config-entries) for more information. Added in v1.10.4 | number of objects | gauge | +| `consul.members.clients` | Measures the current number of client agents registered with Consul. It is only emitted by Consul servers. Added in v1.9.6. | number of clients | gauge | +| `consul.members.servers` | Measures the current number of server agents registered with Consul. It is only emitted by Consul servers. Added in v1.9.6. | number of servers | gauge | +| `consul.dns.stale_queries` | Increments when an agent serves a query within the allowed stale threshold. | queries | counter | +| `consul.dns.ptr_query.` | Measures the time spent handling a reverse DNS query for the given node. | ms | timer | +| `consul.dns.domain_query.` | Measures the time spent handling a domain query for the given node. | ms | timer | +| `consul.system.licenseExpiration` | This measures the number of hours remaining on the agents license. | hours | gauge | +| `consul.version` | Represents the Consul version. | agents | gauge | ## Server Health @@ -691,14 +691,14 @@ agent. The table below describes the additional metrics exported by the proxy. **Requirements:** - Consul 1.13.0+ -[Cluster peering](/docs/connect/cluster-peering) refers to enabling communication between Consul clusters through a peer connection, as opposed to a federated connection. Consul collects metrics that describe the number of services exported to a peered cluster. Peering metrics are only emitted by the leader server. +[Cluster peering](/docs/connect/cluster-peering) refers to enabling communication between Consul clusters through a peer connection, as opposed to a federated connection. Consul collects metrics that describe the number of services exported to a peered cluster. Peering metrics are only emitted by the leader server. | Metric | Description | Unit | Type | | ------------------------------------- | ----------------------------------------------------------------------| ------ | ------- | | `consul.peering.exported_services` | Counts the number of services exported to a peer cluster. | count | gauge | ### Labels -Consul attaches the following labels to metric values. +Consul attaches the following labels to metric values. | Label Name | Description | Possible values | | ------------------------------------- | ---------------------------------------------------------------------- | ------------------------------------------ | | `peer_name` | The name of the peering on the reporting cluster or leader. | Any defined peer name in the cluster | From 4f920610bf5c210320a25b3113e4e9e484308afa Mon Sep 17 00:00:00 2001 From: Jared Kirschner <85913323+jkirschner-hashicorp@users.noreply.github.com> Date: Tue, 16 Aug 2022 16:50:03 -0400 Subject: [PATCH 05/93] docs: update k8s vault connect ca config docs - Add namespace to additionalConfig example - Improve the link to additional configuration options available --- website/content/docs/k8s/helm.mdx | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/website/content/docs/k8s/helm.mdx b/website/content/docs/k8s/helm.mdx index 837a03f562..be0c340800 100644 --- a/website/content/docs/k8s/helm.mdx +++ b/website/content/docs/k8s/helm.mdx @@ -270,14 +270,14 @@ Use these links to navigate to a particular top-level stanza. - `authMethodPath` ((#v-global-secretsbackend-vault-connectca-authmethodpath)) (`string: kubernetes`) - The mount path of the Kubernetes auth method in Vault. - `rootPKIPath` ((#v-global-secretsbackend-vault-connectca-rootpkipath)) (`string: ""`) - The path to a PKI secrets engine for the root certificate. - Please see https://www.consul.io/docs/connect/ca/vault#rootpkipath. + For more details, [Vault Connect CA configuration](https://www.consul.io/docs/connect/ca/vault#rootpkipath). - `intermediatePKIPath` ((#v-global-secretsbackend-vault-connectca-intermediatepkipath)) (`string: ""`) - The path to a PKI secrets engine for the generated intermediate certificate. - Please see https://www.consul.io/docs/connect/ca/vault#intermediatepkipath. + For more details, [Vault Connect CA configuration](https://www.consul.io/docs/connect/ca/vault#intermediatepkipath). - `additionalConfig` ((#v-global-secretsbackend-vault-connectca-additionalconfig)) (`string: {}`) - Additional Connect CA configuration in JSON format. - Please see https://www.consul.io/docs/connect/ca/vault#common-ca-config-options - for additional configuration options. + Please refer to [Vault Connect CA configuration](https://www.consul.io/docs/connect/ca/vault#configuration) + for all configuration options available for that provider. Example: @@ -286,7 +286,8 @@ Use these links to navigate to a particular top-level stanza. { "connect": [{ "ca_config": [{ - "leaf_cert_ttl": "36h" + "leaf_cert_ttl": "36h", + "namespace": "my-vault-ns" }] }] } From b0ef7a667433f59d3e7d77775ed10af10b747a9d Mon Sep 17 00:00:00 2001 From: Jared Kirschner <85913323+jkirschner-hashicorp@users.noreply.github.com> Date: Fri, 29 Jul 2022 18:04:05 -0400 Subject: [PATCH 06/93] docs: link pq docs to relevant DNS lookup section --- website/content/api-docs/query.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/content/api-docs/query.mdx b/website/content/api-docs/query.mdx index 54a148e9ad..4ad4e2e905 100644 --- a/website/content/api-docs/query.mdx +++ b/website/content/api-docs/query.mdx @@ -11,7 +11,7 @@ The `/query` endpoints create, update, destroy, and execute prepared queries. Prepared queries allow you to register a complex service query and then execute it later via its ID or name to get a set of healthy nodes that provide a given service. This is particularly useful in combination with Consul's -[DNS Interface](/docs/discovery/dns) as it allows for much richer queries than +[DNS Interface](/docs/discovery/dns#prepared-query-lookups) as it allows for much richer queries than would be possible given the limited entry points exposed by DNS. Check the [Geo Failover tutorial](https://learn.hashicorp.com/tutorials/consul/automate-geo-failover) for details and From 58901ad7df9b649c01eac59edec4bafb3056d48a Mon Sep 17 00:00:00 2001 From: Eric Haberkorn Date: Tue, 23 Aug 2022 09:13:43 -0400 Subject: [PATCH 07/93] Cluster peering failover disco chain changes (#14296) --- agent/connect/sni_test.go | 33 ++- agent/consul/discovery_chain_endpoint_test.go | 17 +- agent/consul/discoverychain/compile.go | 216 +++++++------- agent/consul/discoverychain/compile_test.go | 273 ++++++++++++++---- agent/consul/state/peering_test.go | 8 +- agent/discovery_chain_endpoint_test.go | 27 +- agent/proxycfg/naming.go | 33 ++- agent/proxycfg/naming_test.go | 7 + agent/structs/config_entry_discoverychain.go | 29 ++ agent/structs/discovery_chain.go | 56 +++- agent/xds/failover_math_test.go | 35 ++- 11 files changed, 527 insertions(+), 207 deletions(-) diff --git a/agent/connect/sni_test.go b/agent/connect/sni_test.go index 26fae1da72..59e9f41fcd 100644 --- a/agent/connect/sni_test.go +++ b/agent/connect/sni_test.go @@ -178,20 +178,43 @@ func TestQuerySNI(t *testing.T) { func TestTargetSNI(t *testing.T) { // empty namespace, empty subset require.Equal(t, "api.default.foo."+testTrustDomainSuffix1, - TargetSNI(structs.NewDiscoveryTarget("api", "", "", "default", "foo"), testTrustDomain1)) + TargetSNI(structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{ + Service: "api", + Partition: "default", + Datacenter: "foo", + }), testTrustDomain1)) require.Equal(t, "api.default.foo."+testTrustDomainSuffix1, - TargetSNI(structs.NewDiscoveryTarget("api", "", "", "", "foo"), testTrustDomain1)) + TargetSNI(structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{ + Service: "api", + Datacenter: "foo", + }), testTrustDomain1)) // set namespace, empty subset require.Equal(t, "api.neighbor.foo."+testTrustDomainSuffix2, - TargetSNI(structs.NewDiscoveryTarget("api", "", "neighbor", "default", "foo"), testTrustDomain2)) + TargetSNI(structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{ + Service: "api", + Namespace: "neighbor", + Partition: "default", + Datacenter: "foo", + }), testTrustDomain2)) // empty namespace, set subset require.Equal(t, "v2.api.default.foo."+testTrustDomainSuffix1, - TargetSNI(structs.NewDiscoveryTarget("api", "v2", "", "default", "foo"), testTrustDomain1)) + TargetSNI(structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{ + Service: "api", + ServiceSubset: "v2", + Partition: "default", + Datacenter: "foo", + }), testTrustDomain1)) // set namespace, set subset require.Equal(t, "canary.api.neighbor.foo."+testTrustDomainSuffix2, - TargetSNI(structs.NewDiscoveryTarget("api", "canary", "neighbor", "default", "foo"), testTrustDomain2)) + TargetSNI(structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{ + Service: "api", + ServiceSubset: "canary", + Namespace: "neighbor", + Partition: "default", + Datacenter: "foo", + }), testTrustDomain2)) } diff --git a/agent/consul/discovery_chain_endpoint_test.go b/agent/consul/discovery_chain_endpoint_test.go index 21c34aa864..c1ad0fef35 100644 --- a/agent/consul/discovery_chain_endpoint_test.go +++ b/agent/consul/discovery_chain_endpoint_test.go @@ -56,8 +56,17 @@ func TestDiscoveryChainEndpoint_Get(t *testing.T) { return &resp, nil } - newTarget := func(service, serviceSubset, namespace, partition, datacenter string) *structs.DiscoveryTarget { - t := structs.NewDiscoveryTarget(service, serviceSubset, namespace, partition, datacenter) + newTarget := func(opts structs.DiscoveryTargetOpts) *structs.DiscoveryTarget { + if opts.Namespace == "" { + opts.Namespace = "default" + } + if opts.Partition == "" { + opts.Partition = "default" + } + if opts.Datacenter == "" { + opts.Datacenter = "dc1" + } + t := structs.NewDiscoveryTarget(opts) t.SNI = connect.TargetSNI(t, connect.TestClusterID+".consul") t.Name = t.SNI t.ConnectTimeout = 5 * time.Second // default @@ -119,7 +128,7 @@ func TestDiscoveryChainEndpoint_Get(t *testing.T) { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "web.default.default.dc1": newTarget("web", "", "default", "default", "dc1"), + "web.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "web"}), }, }, } @@ -245,7 +254,7 @@ func TestDiscoveryChainEndpoint_Get(t *testing.T) { }, Targets: map[string]*structs.DiscoveryTarget{ "web.default.default.dc1": targetWithConnectTimeout( - newTarget("web", "", "default", "default", "dc1"), + newTarget(structs.DiscoveryTargetOpts{Service: "web"}), 33*time.Second, ), }, diff --git a/agent/consul/discoverychain/compile.go b/agent/consul/discoverychain/compile.go index ed664878b4..3a9a1f0ed7 100644 --- a/agent/consul/discoverychain/compile.go +++ b/agent/consul/discoverychain/compile.go @@ -8,6 +8,7 @@ import ( "github.com/mitchellh/hashstructure" "github.com/mitchellh/mapstructure" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/configentry" "github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/agent/structs" @@ -576,7 +577,10 @@ func (c *compiler) assembleChain() error { if router == nil { // If no router is configured, move on down the line to the next hop of // the chain. - node, err := c.getSplitterOrResolverNode(c.newTarget(c.serviceName, "", "", "", "")) + node, err := c.getSplitterOrResolverNode(c.newTarget(structs.DiscoveryTargetOpts{ + Service: c.serviceName, + })) + if err != nil { return err } @@ -626,11 +630,20 @@ func (c *compiler) assembleChain() error { ) if dest.ServiceSubset == "" { node, err = c.getSplitterOrResolverNode( - c.newTarget(svc, "", destNamespace, destPartition, ""), - ) + c.newTarget(structs.DiscoveryTargetOpts{ + Service: svc, + Namespace: destNamespace, + Partition: destPartition, + }, + )) } else { node, err = c.getResolverNode( - c.newTarget(svc, dest.ServiceSubset, destNamespace, destPartition, ""), + c.newTarget(structs.DiscoveryTargetOpts{ + Service: svc, + ServiceSubset: dest.ServiceSubset, + Namespace: destNamespace, + Partition: destPartition, + }), false, ) } @@ -642,7 +655,12 @@ func (c *compiler) assembleChain() error { // If we have a router, we'll add a catch-all route at the end to send // unmatched traffic to the next hop in the chain. - defaultDestinationNode, err := c.getSplitterOrResolverNode(c.newTarget(router.Name, "", router.NamespaceOrDefault(), router.PartitionOrDefault(), "")) + opts := structs.DiscoveryTargetOpts{ + Service: router.Name, + Namespace: router.NamespaceOrDefault(), + Partition: router.PartitionOrDefault(), + } + defaultDestinationNode, err := c.getSplitterOrResolverNode(c.newTarget(opts)) if err != nil { return err } @@ -674,26 +692,36 @@ func newDefaultServiceRoute(serviceName, namespace, partition string) *structs.S } } -func (c *compiler) newTarget(service, serviceSubset, namespace, partition, datacenter string) *structs.DiscoveryTarget { - if service == "" { +func (c *compiler) newTarget(opts structs.DiscoveryTargetOpts) *structs.DiscoveryTarget { + if opts.Service == "" { panic("newTarget called with empty service which makes no sense") } - t := structs.NewDiscoveryTarget( - service, - serviceSubset, - defaultIfEmpty(namespace, c.evaluateInNamespace), - defaultIfEmpty(partition, c.evaluateInPartition), - defaultIfEmpty(datacenter, c.evaluateInDatacenter), - ) + if opts.Peer == "" { + opts.Datacenter = defaultIfEmpty(opts.Datacenter, c.evaluateInDatacenter) + opts.Namespace = defaultIfEmpty(opts.Namespace, c.evaluateInNamespace) + opts.Partition = defaultIfEmpty(opts.Partition, c.evaluateInPartition) + } else { + // Don't allow Peer and Datacenter. + opts.Datacenter = "" + // Peer and Partition cannot both be set. + opts.Partition = acl.PartitionOrDefault("") + // Default to "default" rather than c.evaluateInNamespace. + opts.Namespace = acl.PartitionOrDefault(opts.Namespace) + } - // Set default connect SNI. This will be overridden later if the service - // has an explicit SNI value configured in service-defaults. - t.SNI = connect.TargetSNI(t, c.evaluateInTrustDomain) + t := structs.NewDiscoveryTarget(opts) - // Use the same representation for the name. This will NOT be overridden - // later. - t.Name = t.SNI + // We don't have the peer's trust domain yet so we can't construct the SNI. + if opts.Peer == "" { + // Set default connect SNI. This will be overridden later if the service + // has an explicit SNI value configured in service-defaults. + t.SNI = connect.TargetSNI(t, c.evaluateInTrustDomain) + + // Use the same representation for the name. This will NOT be overridden + // later. + t.Name = t.SNI + } prev, ok := c.loadedTargets[t.ID] if ok { @@ -703,34 +731,30 @@ func (c *compiler) newTarget(service, serviceSubset, namespace, partition, datac return t } -func (c *compiler) rewriteTarget(t *structs.DiscoveryTarget, service, serviceSubset, partition, namespace, datacenter string) *structs.DiscoveryTarget { - var ( - service2 = t.Service - serviceSubset2 = t.ServiceSubset - partition2 = t.Partition - namespace2 = t.Namespace - datacenter2 = t.Datacenter - ) +func (c *compiler) rewriteTarget(t *structs.DiscoveryTarget, opts structs.DiscoveryTargetOpts) *structs.DiscoveryTarget { + mergedOpts := t.ToDiscoveryTargetOpts() - if service != "" && service != service2 { - service2 = service + if opts.Service != "" && opts.Service != mergedOpts.Service { + mergedOpts.Service = opts.Service // Reset the chosen subset if we reference a service other than our own. - serviceSubset2 = "" + mergedOpts.ServiceSubset = "" } - if serviceSubset != "" { - serviceSubset2 = serviceSubset + if opts.ServiceSubset != "" { + mergedOpts.ServiceSubset = opts.ServiceSubset } - if partition != "" { - partition2 = partition + if opts.Partition != "" { + mergedOpts.Partition = opts.Partition } - if namespace != "" { - namespace2 = namespace + // Only use explicit Namespace with Peer + if opts.Namespace != "" || opts.Peer != "" { + mergedOpts.Namespace = opts.Namespace } - if datacenter != "" { - datacenter2 = datacenter + if opts.Datacenter != "" { + mergedOpts.Datacenter = opts.Datacenter } + mergedOpts.Peer = opts.Peer - return c.newTarget(service2, serviceSubset2, namespace2, partition2, datacenter2) + return c.newTarget(mergedOpts) } func (c *compiler) getSplitterOrResolverNode(target *structs.DiscoveryTarget) (*structs.DiscoveryGraphNode, error) { @@ -803,10 +827,13 @@ func (c *compiler) getSplitterNode(sid structs.ServiceID) (*structs.DiscoveryGra // fall through to group-resolver } - node, err := c.getResolverNode( - c.newTarget(splitID.ID, split.ServiceSubset, splitID.NamespaceOrDefault(), splitID.PartitionOrDefault(), ""), - false, - ) + opts := structs.DiscoveryTargetOpts{ + Service: splitID.ID, + ServiceSubset: split.ServiceSubset, + Namespace: splitID.NamespaceOrDefault(), + Partition: splitID.PartitionOrDefault(), + } + node, err := c.getResolverNode(c.newTarget(opts), false) if err != nil { return nil, err } @@ -881,11 +908,7 @@ RESOLVE_AGAIN: redirectedTarget := c.rewriteTarget( target, - redirect.Service, - redirect.ServiceSubset, - redirect.Partition, - redirect.Namespace, - redirect.Datacenter, + redirect.ToDiscoveryTargetOpts(), ) if redirectedTarget.ID != target.ID { target = redirectedTarget @@ -895,14 +918,9 @@ RESOLVE_AGAIN: // Handle default subset. if target.ServiceSubset == "" && resolver.DefaultSubset != "" { - target = c.rewriteTarget( - target, - "", - resolver.DefaultSubset, - "", - "", - "", - ) + target = c.rewriteTarget(target, structs.DiscoveryTargetOpts{ + ServiceSubset: resolver.DefaultSubset, + }) goto RESOLVE_AGAIN } @@ -1027,56 +1045,54 @@ RESOLVE_AGAIN: failover, ok = f["*"] } - if ok { - // Determine which failover definitions apply. - var failoverTargets []*structs.DiscoveryTarget - if len(failover.Datacenters) > 0 { - for _, dc := range failover.Datacenters { - // Rewrite the target as per the failover policy. - failoverTarget := c.rewriteTarget( - target, - failover.Service, - failover.ServiceSubset, - target.Partition, - failover.Namespace, - dc, - ) - if failoverTarget.ID != target.ID { // don't failover to yourself - failoverTargets = append(failoverTargets, failoverTarget) - } - } - } else { + if !ok { + return node, nil + } + + // Determine which failover definitions apply. + var failoverTargets []*structs.DiscoveryTarget + if len(failover.Datacenters) > 0 { + opts := failover.ToDiscoveryTargetOpts() + for _, dc := range failover.Datacenters { // Rewrite the target as per the failover policy. - failoverTarget := c.rewriteTarget( - target, - failover.Service, - failover.ServiceSubset, - target.Partition, - failover.Namespace, - "", - ) + opts.Datacenter = dc + failoverTarget := c.rewriteTarget(target, opts) if failoverTarget.ID != target.ID { // don't failover to yourself failoverTargets = append(failoverTargets, failoverTarget) } } - - // If we filtered everything out then no point in having a failover. - if len(failoverTargets) > 0 { - df := &structs.DiscoveryFailover{} - node.Resolver.Failover = df - - // Take care of doing any redirects or configuration loading - // related to targets by cheating a bit and recursing into - // ourselves. - for _, target := range failoverTargets { - failoverResolveNode, err := c.getResolverNode(target, true) - if err != nil { - return nil, err - } - failoverTarget := failoverResolveNode.Resolver.Target - df.Targets = append(df.Targets, failoverTarget) + } else if len(failover.Targets) > 0 { + for _, t := range failover.Targets { + // Rewrite the target as per the failover policy. + failoverTarget := c.rewriteTarget(target, t.ToDiscoveryTargetOpts()) + if failoverTarget.ID != target.ID { // don't failover to yourself + failoverTargets = append(failoverTargets, failoverTarget) } } + } else { + // Rewrite the target as per the failover policy. + failoverTarget := c.rewriteTarget(target, failover.ToDiscoveryTargetOpts()) + if failoverTarget.ID != target.ID { // don't failover to yourself + failoverTargets = append(failoverTargets, failoverTarget) + } + } + + // If we filtered everything out then no point in having a failover. + if len(failoverTargets) > 0 { + df := &structs.DiscoveryFailover{} + node.Resolver.Failover = df + + // Take care of doing any redirects or configuration loading + // related to targets by cheating a bit and recursing into + // ourselves. + for _, target := range failoverTargets { + failoverResolveNode, err := c.getResolverNode(target, true) + if err != nil { + return nil, err + } + failoverTarget := failoverResolveNode.Resolver.Target + df.Targets = append(df.Targets, failoverTarget) + } } } diff --git a/agent/consul/discoverychain/compile_test.go b/agent/consul/discoverychain/compile_test.go index 221ac757f9..6505fdb9ea 100644 --- a/agent/consul/discoverychain/compile_test.go +++ b/agent/consul/discoverychain/compile_test.go @@ -46,6 +46,7 @@ func TestCompile(t *testing.T) { "service and subset failover": testcase_ServiceAndSubsetFailover(), "datacenter failover": testcase_DatacenterFailover(), "datacenter failover with mesh gateways": testcase_DatacenterFailover_WithMeshGateways(), + "target failover": testcase_Failover_Targets(), "noop split to resolver with default subset": testcase_NoopSplit_WithDefaultSubset(), "resolver with default subset": testcase_Resolve_WithDefaultSubset(), "default resolver with external sni": testcase_DefaultResolver_ExternalSNI(), @@ -182,7 +183,7 @@ func testcase_JustRouterWithDefaults() compileTestCase { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil), + "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil), }, } @@ -244,7 +245,7 @@ func testcase_JustRouterWithNoDestination() compileTestCase { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil), + "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil), }, } @@ -294,7 +295,7 @@ func testcase_RouterWithDefaults_NoSplit_WithResolver() compileTestCase { }, Targets: map[string]*structs.DiscoveryTarget{ "main.default.default.dc1": targetWithConnectTimeout( - newTarget("main", "", "default", "default", "dc1", nil), + newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil), 33*time.Second, ), }, @@ -361,7 +362,7 @@ func testcase_RouterWithDefaults_WithNoopSplit_DefaultResolver() compileTestCase }, }, Targets: map[string]*structs.DiscoveryTarget{ - "main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil), + "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil), }, } @@ -426,7 +427,10 @@ func testcase_NoopSplit_DefaultResolver_ProtocolFromProxyDefaults() compileTestC }, }, Targets: map[string]*structs.DiscoveryTarget{ - "main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil), + "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{ + Service: "main", + Datacenter: "dc1", + }, nil), }, } @@ -498,7 +502,7 @@ func testcase_RouterWithDefaults_WithNoopSplit_WithResolver() compileTestCase { }, Targets: map[string]*structs.DiscoveryTarget{ "main.default.default.dc1": targetWithConnectTimeout( - newTarget("main", "", "default", "default", "dc1", nil), + newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil), 33*time.Second, ), }, @@ -584,8 +588,11 @@ func testcase_RouteBypassesSplit() compileTestCase { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil), - "bypass.other.default.default.dc1": newTarget("other", "bypass", "default", "default", "dc1", func(t *structs.DiscoveryTarget) { + "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil), + "bypass.other.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{ + Service: "other", + ServiceSubset: "bypass", + }, func(t *structs.DiscoveryTarget) { t.Subset = structs.ServiceResolverSubset{ Filter: "Service.Meta.version == bypass", } @@ -638,7 +645,7 @@ func testcase_NoopSplit_DefaultResolver() compileTestCase { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil), + "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil), }, } @@ -694,7 +701,7 @@ func testcase_NoopSplit_WithResolver() compileTestCase { }, Targets: map[string]*structs.DiscoveryTarget{ "main.default.default.dc1": targetWithConnectTimeout( - newTarget("main", "", "default", "default", "dc1", nil), + newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil), 33*time.Second, ), }, @@ -776,12 +783,19 @@ func testcase_SubsetSplit() compileTestCase { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "v2.main.default.default.dc1": newTarget("main", "v2", "default", "default", "dc1", func(t *structs.DiscoveryTarget) { + + "v2.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{ + Service: "main", + ServiceSubset: "v2", + }, func(t *structs.DiscoveryTarget) { t.Subset = structs.ServiceResolverSubset{ Filter: "Service.Meta.version == 2", } }), - "v1.main.default.default.dc1": newTarget("main", "v1", "default", "default", "dc1", func(t *structs.DiscoveryTarget) { + "v1.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{ + Service: "main", + ServiceSubset: "v1", + }, func(t *structs.DiscoveryTarget) { t.Subset = structs.ServiceResolverSubset{ Filter: "Service.Meta.version == 1", } @@ -855,8 +869,8 @@ func testcase_ServiceSplit() compileTestCase { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "foo.default.default.dc1": newTarget("foo", "", "default", "default", "dc1", nil), - "bar.default.default.dc1": newTarget("bar", "", "default", "default", "dc1", nil), + "foo.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "foo"}, nil), + "bar.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "bar"}, nil), }, } @@ -935,7 +949,10 @@ func testcase_SplitBypassesSplit() compileTestCase { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "bypassed.next.default.default.dc1": newTarget("next", "bypassed", "default", "default", "dc1", func(t *structs.DiscoveryTarget) { + "bypassed.next.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{ + Service: "next", + ServiceSubset: "bypassed", + }, func(t *structs.DiscoveryTarget) { t.Subset = structs.ServiceResolverSubset{ Filter: "Service.Meta.version == bypass", } @@ -973,7 +990,7 @@ func testcase_ServiceRedirect() compileTestCase { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "other.default.default.dc1": newTarget("other", "", "default", "default", "dc1", nil), + "other.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "other"}, nil), }, } @@ -1019,7 +1036,10 @@ func testcase_ServiceAndSubsetRedirect() compileTestCase { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "v2.other.default.default.dc1": newTarget("other", "v2", "default", "default", "dc1", func(t *structs.DiscoveryTarget) { + "v2.other.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{ + Service: "other", + ServiceSubset: "v2", + }, func(t *structs.DiscoveryTarget) { t.Subset = structs.ServiceResolverSubset{ Filter: "Service.Meta.version == 2", } @@ -1055,7 +1075,10 @@ func testcase_DatacenterRedirect() compileTestCase { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "main.default.default.dc9": newTarget("main", "", "default", "default", "dc9", nil), + "main.default.default.dc9": newTarget(structs.DiscoveryTargetOpts{ + Service: "main", + Datacenter: "dc9", + }, nil), }, } return compileTestCase{entries: entries, expect: expect} @@ -1095,7 +1118,10 @@ func testcase_DatacenterRedirect_WithMeshGateways() compileTestCase { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "main.default.default.dc9": newTarget("main", "", "default", "default", "dc9", func(t *structs.DiscoveryTarget) { + "main.default.default.dc9": newTarget(structs.DiscoveryTargetOpts{ + Service: "main", + Datacenter: "dc9", + }, func(t *structs.DiscoveryTarget) { t.MeshGateway = structs.MeshGatewayConfig{ Mode: structs.MeshGatewayModeRemote, } @@ -1134,8 +1160,8 @@ func testcase_ServiceFailover() compileTestCase { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil), - "backup.default.default.dc1": newTarget("backup", "", "default", "default", "dc1", nil), + "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil), + "backup.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "backup"}, nil), }, } return compileTestCase{entries: entries, expect: expect} @@ -1177,8 +1203,8 @@ func testcase_ServiceFailoverThroughRedirect() compileTestCase { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil), - "actual.default.default.dc1": newTarget("actual", "", "default", "default", "dc1", nil), + "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil), + "actual.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "actual"}, nil), }, } return compileTestCase{entries: entries, expect: expect} @@ -1220,8 +1246,8 @@ func testcase_Resolver_CircularFailover() compileTestCase { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil), - "backup.default.default.dc1": newTarget("backup", "", "default", "default", "dc1", nil), + "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil), + "backup.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "backup"}, nil), }, } return compileTestCase{entries: entries, expect: expect} @@ -1261,8 +1287,11 @@ func testcase_ServiceAndSubsetFailover() compileTestCase { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil), - "backup.main.default.default.dc1": newTarget("main", "backup", "default", "default", "dc1", func(t *structs.DiscoveryTarget) { + "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil), + "backup.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{ + Service: "main", + ServiceSubset: "backup", + }, func(t *structs.DiscoveryTarget) { t.Subset = structs.ServiceResolverSubset{ Filter: "Service.Meta.version == backup", } @@ -1301,9 +1330,15 @@ func testcase_DatacenterFailover() compileTestCase { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil), - "main.default.default.dc2": newTarget("main", "", "default", "default", "dc2", nil), - "main.default.default.dc4": newTarget("main", "", "default", "default", "dc4", nil), + "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil), + "main.default.default.dc2": newTarget(structs.DiscoveryTargetOpts{ + Service: "main", + Datacenter: "dc2", + }, nil), + "main.default.default.dc4": newTarget(structs.DiscoveryTargetOpts{ + Service: "main", + Datacenter: "dc4", + }, nil), }, } return compileTestCase{entries: entries, expect: expect} @@ -1350,17 +1385,105 @@ func testcase_DatacenterFailover_WithMeshGateways() compileTestCase { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", func(t *structs.DiscoveryTarget) { + "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, func(t *structs.DiscoveryTarget) { t.MeshGateway = structs.MeshGatewayConfig{ Mode: structs.MeshGatewayModeRemote, } }), - "main.default.default.dc2": newTarget("main", "", "default", "default", "dc2", func(t *structs.DiscoveryTarget) { + "main.default.default.dc2": newTarget(structs.DiscoveryTargetOpts{ + Service: "main", + Datacenter: "dc2", + }, func(t *structs.DiscoveryTarget) { t.MeshGateway = structs.MeshGatewayConfig{ Mode: structs.MeshGatewayModeRemote, } }), - "main.default.default.dc4": newTarget("main", "", "default", "default", "dc4", func(t *structs.DiscoveryTarget) { + "main.default.default.dc4": newTarget(structs.DiscoveryTargetOpts{ + Service: "main", + Datacenter: "dc4", + }, func(t *structs.DiscoveryTarget) { + t.MeshGateway = structs.MeshGatewayConfig{ + Mode: structs.MeshGatewayModeRemote, + } + }), + }, + } + return compileTestCase{entries: entries, expect: expect} +} + +func testcase_Failover_Targets() compileTestCase { + entries := newEntries() + + entries.AddProxyDefaults(&structs.ProxyConfigEntry{ + Kind: structs.ProxyDefaults, + Name: structs.ProxyConfigGlobal, + MeshGateway: structs.MeshGatewayConfig{ + Mode: structs.MeshGatewayModeRemote, + }, + }) + + entries.AddResolvers( + &structs.ServiceResolverConfigEntry{ + Kind: "service-resolver", + Name: "main", + Failover: map[string]structs.ServiceResolverFailover{ + "*": { + Targets: []structs.ServiceResolverFailoverTarget{ + {Datacenter: "dc3"}, + {Service: "new-main"}, + {Peer: "cluster-01"}, + }, + }, + }, + }, + ) + + expect := &structs.CompiledDiscoveryChain{ + Protocol: "tcp", + StartNode: "resolver:main.default.default.dc1", + Nodes: map[string]*structs.DiscoveryGraphNode{ + "resolver:main.default.default.dc1": { + Type: structs.DiscoveryGraphNodeTypeResolver, + Name: "main.default.default.dc1", + Resolver: &structs.DiscoveryResolver{ + ConnectTimeout: 5 * time.Second, + Target: "main.default.default.dc1", + Failover: &structs.DiscoveryFailover{ + Targets: []string{ + "main.default.default.dc3", + "new-main.default.default.dc1", + "main.default.default.external.cluster-01", + }, + }, + }, + }, + }, + Targets: map[string]*structs.DiscoveryTarget{ + "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, func(t *structs.DiscoveryTarget) { + t.MeshGateway = structs.MeshGatewayConfig{ + Mode: structs.MeshGatewayModeRemote, + } + }), + "main.default.default.dc3": newTarget(structs.DiscoveryTargetOpts{ + Service: "main", + Datacenter: "dc3", + }, func(t *structs.DiscoveryTarget) { + t.MeshGateway = structs.MeshGatewayConfig{ + Mode: structs.MeshGatewayModeRemote, + } + }), + "new-main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "new-main"}, func(t *structs.DiscoveryTarget) { + t.MeshGateway = structs.MeshGatewayConfig{ + Mode: structs.MeshGatewayModeRemote, + } + }), + "main.default.default.external.cluster-01": newTarget(structs.DiscoveryTargetOpts{ + Service: "main", + Peer: "cluster-01", + }, func(t *structs.DiscoveryTarget) { + t.SNI = "" + t.Name = "" + t.Datacenter = "" t.MeshGateway = structs.MeshGatewayConfig{ Mode: structs.MeshGatewayModeRemote, } @@ -1422,7 +1545,10 @@ func testcase_NoopSplit_WithDefaultSubset() compileTestCase { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "v2.main.default.default.dc1": newTarget("main", "v2", "default", "default", "dc1", func(t *structs.DiscoveryTarget) { + "v2.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{ + Service: "main", + ServiceSubset: "v2", + }, func(t *structs.DiscoveryTarget) { t.Subset = structs.ServiceResolverSubset{ Filter: "Service.Meta.version == 2", } @@ -1452,7 +1578,7 @@ func testcase_DefaultResolver() compileTestCase { }, Targets: map[string]*structs.DiscoveryTarget{ // TODO-TARGET - "main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil), + "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil), }, } return compileTestCase{entries: entries, expect: expect} @@ -1488,7 +1614,7 @@ func testcase_DefaultResolver_WithProxyDefaults() compileTestCase { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", func(t *structs.DiscoveryTarget) { + "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, func(t *structs.DiscoveryTarget) { t.MeshGateway = structs.MeshGatewayConfig{ Mode: structs.MeshGatewayModeRemote, } @@ -1530,7 +1656,7 @@ func testcase_ServiceMetaProjection() compileTestCase { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil), + "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil), }, } @@ -1588,7 +1714,7 @@ func testcase_ServiceMetaProjectionWithRedirect() compileTestCase { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "other.default.default.dc1": newTarget("other", "", "default", "default", "dc1", nil), + "other.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "other"}, nil), }, } @@ -1623,7 +1749,7 @@ func testcase_RedirectToDefaultResolverIsNotDefaultChain() compileTestCase { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "other.default.default.dc1": newTarget("other", "", "default", "default", "dc1", nil), + "other.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "other"}, nil), }, } @@ -1658,7 +1784,10 @@ func testcase_Resolve_WithDefaultSubset() compileTestCase { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "v2.main.default.default.dc1": newTarget("main", "v2", "default", "default", "dc1", func(t *structs.DiscoveryTarget) { + "v2.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{ + Service: "main", + ServiceSubset: "v2", + }, func(t *structs.DiscoveryTarget) { t.Subset = structs.ServiceResolverSubset{ Filter: "Service.Meta.version == 2", } @@ -1692,7 +1821,7 @@ func testcase_DefaultResolver_ExternalSNI() compileTestCase { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", func(t *structs.DiscoveryTarget) { + "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, func(t *structs.DiscoveryTarget) { t.SNI = "main.some.other.service.mesh" t.External = true }), @@ -1857,11 +1986,17 @@ func testcase_MultiDatacenterCanary() compileTestCase { }, Targets: map[string]*structs.DiscoveryTarget{ "main.default.default.dc2": targetWithConnectTimeout( - newTarget("main", "", "default", "default", "dc2", nil), + newTarget(structs.DiscoveryTargetOpts{ + Service: "main", + Datacenter: "dc2", + }, nil), 33*time.Second, ), "main.default.default.dc3": targetWithConnectTimeout( - newTarget("main", "", "default", "default", "dc3", nil), + newTarget(structs.DiscoveryTargetOpts{ + Service: "main", + Datacenter: "dc3", + }, nil), 33*time.Second, ), }, @@ -2155,27 +2290,42 @@ func testcase_AllBellsAndWhistles() compileTestCase { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "prod.redirected.default.default.dc1": newTarget("redirected", "prod", "default", "default", "dc1", func(t *structs.DiscoveryTarget) { + "prod.redirected.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{ + Service: "redirected", + ServiceSubset: "prod", + }, func(t *structs.DiscoveryTarget) { t.Subset = structs.ServiceResolverSubset{ Filter: "ServiceMeta.env == prod", } }), - "v1.main.default.default.dc1": newTarget("main", "v1", "default", "default", "dc1", func(t *structs.DiscoveryTarget) { + "v1.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{ + Service: "main", + ServiceSubset: "v1", + }, func(t *structs.DiscoveryTarget) { t.Subset = structs.ServiceResolverSubset{ Filter: "Service.Meta.version == 1", } }), - "v2.main.default.default.dc1": newTarget("main", "v2", "default", "default", "dc1", func(t *structs.DiscoveryTarget) { + "v2.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{ + Service: "main", + ServiceSubset: "v2", + }, func(t *structs.DiscoveryTarget) { t.Subset = structs.ServiceResolverSubset{ Filter: "Service.Meta.version == 2", } }), - "v3.main.default.default.dc1": newTarget("main", "v3", "default", "default", "dc1", func(t *structs.DiscoveryTarget) { + "v3.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{ + Service: "main", + ServiceSubset: "v3", + }, func(t *structs.DiscoveryTarget) { t.Subset = structs.ServiceResolverSubset{ Filter: "Service.Meta.version == 3", } }), - "default-subset.main.default.default.dc1": newTarget("main", "default-subset", "default", "default", "dc1", func(t *structs.DiscoveryTarget) { + "default-subset.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{ + Service: "main", + ServiceSubset: "default-subset", + }, func(t *structs.DiscoveryTarget) { t.Subset = structs.ServiceResolverSubset{OnlyPassing: true} }), }, @@ -2379,7 +2529,7 @@ func testcase_ResolverProtocolOverride() compileTestCase { }, Targets: map[string]*structs.DiscoveryTarget{ // TODO-TARGET - "main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil), + "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil), }, } return compileTestCase{entries: entries, expect: expect, @@ -2413,7 +2563,7 @@ func testcase_ResolverProtocolOverrideIgnored() compileTestCase { }, Targets: map[string]*structs.DiscoveryTarget{ // TODO-TARGET - "main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil), + "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil), }, } return compileTestCase{entries: entries, expect: expect, @@ -2451,7 +2601,7 @@ func testcase_RouterIgnored_ResolverProtocolOverride() compileTestCase { }, Targets: map[string]*structs.DiscoveryTarget{ // TODO-TARGET - "main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil), + "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil), }, } return compileTestCase{entries: entries, expect: expect, @@ -2685,9 +2835,9 @@ func testcase_LBSplitterAndResolver() compileTestCase { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "foo.default.default.dc1": newTarget("foo", "", "default", "default", "dc1", nil), - "bar.default.default.dc1": newTarget("bar", "", "default", "default", "dc1", nil), - "baz.default.default.dc1": newTarget("baz", "", "default", "default", "dc1", nil), + "foo.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "foo"}, nil), + "bar.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "bar"}, nil), + "baz.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "baz"}, nil), }, } @@ -2743,7 +2893,7 @@ func testcase_LBResolver() compileTestCase { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil), + "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil), }, } @@ -2791,8 +2941,17 @@ func newEntries() *configentry.DiscoveryChainSet { } } -func newTarget(service, serviceSubset, namespace, partition, datacenter string, modFn func(t *structs.DiscoveryTarget)) *structs.DiscoveryTarget { - t := structs.NewDiscoveryTarget(service, serviceSubset, namespace, partition, datacenter) +func newTarget(opts structs.DiscoveryTargetOpts, modFn func(t *structs.DiscoveryTarget)) *structs.DiscoveryTarget { + if opts.Namespace == "" { + opts.Namespace = "default" + } + if opts.Partition == "" { + opts.Partition = "default" + } + if opts.Datacenter == "" { + opts.Datacenter = "dc1" + } + t := structs.NewDiscoveryTarget(opts) t.SNI = connect.TargetSNI(t, "trustdomain.consul") t.Name = t.SNI t.ConnectTimeout = 5 * time.Second // default diff --git a/agent/consul/state/peering_test.go b/agent/consul/state/peering_test.go index b48e4f80d9..bfce75295c 100644 --- a/agent/consul/state/peering_test.go +++ b/agent/consul/state/peering_test.go @@ -1461,7 +1461,13 @@ func TestStateStore_ExportedServicesForPeer(t *testing.T) { } newTarget := func(service, serviceSubset, datacenter string) *structs.DiscoveryTarget { - t := structs.NewDiscoveryTarget(service, serviceSubset, "default", "default", datacenter) + t := structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{ + Service: service, + ServiceSubset: serviceSubset, + Partition: "default", + Namespace: "default", + Datacenter: datacenter, + }) t.SNI = connect.TargetSNI(t, connect.TestTrustDomain) t.Name = t.SNI t.ConnectTimeout = 5 * time.Second // default diff --git a/agent/discovery_chain_endpoint_test.go b/agent/discovery_chain_endpoint_test.go index 8b4a7e2723..42c0825916 100644 --- a/agent/discovery_chain_endpoint_test.go +++ b/agent/discovery_chain_endpoint_test.go @@ -27,8 +27,17 @@ func TestDiscoveryChainRead(t *testing.T) { defer a.Shutdown() testrpc.WaitForTestAgent(t, a.RPC, "dc1") - newTarget := func(service, serviceSubset, namespace, partition, datacenter string) *structs.DiscoveryTarget { - t := structs.NewDiscoveryTarget(service, serviceSubset, namespace, partition, datacenter) + newTarget := func(opts structs.DiscoveryTargetOpts) *structs.DiscoveryTarget { + if opts.Namespace == "" { + opts.Namespace = "default" + } + if opts.Partition == "" { + opts.Partition = "default" + } + if opts.Datacenter == "" { + opts.Datacenter = "dc1" + } + t := structs.NewDiscoveryTarget(opts) t.SNI = connect.TargetSNI(t, connect.TestClusterID+".consul") t.Name = t.SNI t.ConnectTimeout = 5 * time.Second // default @@ -99,7 +108,7 @@ func TestDiscoveryChainRead(t *testing.T) { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "web.default.default.dc1": newTarget("web", "", "default", "default", "dc1"), + "web.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "web"}), }, } require.Equal(t, expect, value.Chain) @@ -144,7 +153,7 @@ func TestDiscoveryChainRead(t *testing.T) { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "web.default.default.dc2": newTarget("web", "", "default", "default", "dc2"), + "web.default.default.dc2": newTarget(structs.DiscoveryTargetOpts{Service: "web", Datacenter: "dc2"}), }, } require.Equal(t, expect, value.Chain) @@ -198,7 +207,7 @@ func TestDiscoveryChainRead(t *testing.T) { }, }, Targets: map[string]*structs.DiscoveryTarget{ - "web.default.default.dc1": newTarget("web", "", "default", "default", "dc1"), + "web.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "web"}), }, } require.Equal(t, expect, value.Chain) @@ -264,11 +273,11 @@ func TestDiscoveryChainRead(t *testing.T) { }, Targets: map[string]*structs.DiscoveryTarget{ "web.default.default.dc1": targetWithConnectTimeout( - newTarget("web", "", "default", "default", "dc1"), + newTarget(structs.DiscoveryTargetOpts{Service: "web"}), 33*time.Second, ), "web.default.default.dc2": targetWithConnectTimeout( - newTarget("web", "", "default", "default", "dc2"), + newTarget(structs.DiscoveryTargetOpts{Service: "web", Datacenter: "dc2"}), 33*time.Second, ), }, @@ -280,7 +289,7 @@ func TestDiscoveryChainRead(t *testing.T) { })) expectTarget_DC1 := targetWithConnectTimeout( - newTarget("web", "", "default", "default", "dc1"), + newTarget(structs.DiscoveryTargetOpts{Service: "web"}), 22*time.Second, ) expectTarget_DC1.MeshGateway = structs.MeshGatewayConfig{ @@ -288,7 +297,7 @@ func TestDiscoveryChainRead(t *testing.T) { } expectTarget_DC2 := targetWithConnectTimeout( - newTarget("web", "", "default", "default", "dc2"), + newTarget(structs.DiscoveryTargetOpts{Service: "web", Datacenter: "dc2"}), 22*time.Second, ) expectTarget_DC2.MeshGateway = structs.MeshGatewayConfig{ diff --git a/agent/proxycfg/naming.go b/agent/proxycfg/naming.go index 3bb0854b04..08ff216edf 100644 --- a/agent/proxycfg/naming.go +++ b/agent/proxycfg/naming.go @@ -63,22 +63,29 @@ func NewUpstreamIDFromServiceID(sid structs.ServiceID) UpstreamID { return id } -// TODO(peering): confirm we don't need peername here func NewUpstreamIDFromTargetID(tid string) UpstreamID { - // Drop the leading subset if one is present in the target ID. - separators := strings.Count(tid, ".") - if separators > 3 { - prefix := tid[:strings.Index(tid, ".")+1] - tid = strings.TrimPrefix(tid, prefix) + var id UpstreamID + split := strings.Split(tid, ".") + + switch { + case split[len(split)-2] == "external": + id = UpstreamID{ + Name: split[0], + EnterpriseMeta: acl.NewEnterpriseMetaWithPartition(split[2], split[1]), + Peer: split[4], + } + case len(split) == 5: + // Drop the leading subset if one is present in the target ID. + split = split[1:] + fallthrough + default: + id = UpstreamID{ + Name: split[0], + EnterpriseMeta: acl.NewEnterpriseMetaWithPartition(split[2], split[1]), + Datacenter: split[3], + } } - split := strings.SplitN(tid, ".", 4) - - id := UpstreamID{ - Name: split[0], - EnterpriseMeta: acl.NewEnterpriseMetaWithPartition(split[2], split[1]), - Datacenter: split[3], - } id.normalize() return id } diff --git a/agent/proxycfg/naming_test.go b/agent/proxycfg/naming_test.go index 23ff241658..2c4f5173a8 100644 --- a/agent/proxycfg/naming_test.go +++ b/agent/proxycfg/naming_test.go @@ -35,6 +35,13 @@ func TestUpstreamIDFromTargetID(t *testing.T) { Datacenter: "dc2", }, }, + "peered": { + tid: "foo.default.default.external.cluster-01", + expect: UpstreamID{ + Name: "foo", + Peer: "cluster-01", + }, + }, } for name, tc := range cases { diff --git a/agent/structs/config_entry_discoverychain.go b/agent/structs/config_entry_discoverychain.go index 8bc0305b00..0ea2609551 100644 --- a/agent/structs/config_entry_discoverychain.go +++ b/agent/structs/config_entry_discoverychain.go @@ -1233,6 +1233,16 @@ type ServiceResolverRedirect struct { Datacenter string `json:",omitempty"` } +func (r *ServiceResolverRedirect) ToDiscoveryTargetOpts() DiscoveryTargetOpts { + return DiscoveryTargetOpts{ + Service: r.Service, + ServiceSubset: r.ServiceSubset, + Namespace: r.Namespace, + Partition: r.Partition, + Datacenter: r.Datacenter, + } +} + // There are some restrictions on what is allowed in here: // // - Service, ServiceSubset, Namespace, Datacenters, and Targets cannot all be @@ -1275,6 +1285,14 @@ type ServiceResolverFailover struct { Targets []ServiceResolverFailoverTarget `json:",omitempty"` } +func (t *ServiceResolverFailover) ToDiscoveryTargetOpts() DiscoveryTargetOpts { + return DiscoveryTargetOpts{ + Service: t.Service, + ServiceSubset: t.ServiceSubset, + Namespace: t.Namespace, + } +} + func (f *ServiceResolverFailover) isEmpty() bool { return f.Service == "" && f.ServiceSubset == "" && f.Namespace == "" && len(f.Datacenters) == 0 && len(f.Targets) == 0 } @@ -1299,6 +1317,17 @@ type ServiceResolverFailoverTarget struct { Peer string `json:",omitempty"` } +func (t *ServiceResolverFailoverTarget) ToDiscoveryTargetOpts() DiscoveryTargetOpts { + return DiscoveryTargetOpts{ + Service: t.Service, + ServiceSubset: t.ServiceSubset, + Namespace: t.Namespace, + Partition: t.Partition, + Datacenter: t.Datacenter, + Peer: t.Peer, + } +} + // LoadBalancer determines the load balancing policy and configuration for services // issuing requests to this upstream service. type LoadBalancer struct { diff --git a/agent/structs/discovery_chain.go b/agent/structs/discovery_chain.go index 2bbe88f9ed..ca64d070d6 100644 --- a/agent/structs/discovery_chain.go +++ b/agent/structs/discovery_chain.go @@ -56,7 +56,12 @@ type CompiledDiscoveryChain struct { // ID returns an ID that encodes the service, namespace, partition, and datacenter. // This ID allows us to compare a discovery chain target to the chain upstream itself. func (c *CompiledDiscoveryChain) ID() string { - return chainID("", c.ServiceName, c.Namespace, c.Partition, c.Datacenter) + return chainID(DiscoveryTargetOpts{ + Service: c.ServiceName, + Namespace: c.Namespace, + Partition: c.Partition, + Datacenter: c.Datacenter, + }) } func (c *CompiledDiscoveryChain) CompoundServiceName() ServiceName { @@ -185,6 +190,7 @@ type DiscoveryTarget struct { Namespace string `json:",omitempty"` Partition string `json:",omitempty"` Datacenter string `json:",omitempty"` + Peer string `json:",omitempty"` MeshGateway MeshGatewayConfig `json:",omitempty"` Subset ServiceResolverSubset `json:",omitempty"` @@ -240,28 +246,52 @@ func (t *DiscoveryTarget) UnmarshalJSON(data []byte) error { return nil } -func NewDiscoveryTarget(service, serviceSubset, namespace, partition, datacenter string) *DiscoveryTarget { +type DiscoveryTargetOpts struct { + Service string + ServiceSubset string + Namespace string + Partition string + Datacenter string + Peer string +} + +func NewDiscoveryTarget(opts DiscoveryTargetOpts) *DiscoveryTarget { t := &DiscoveryTarget{ - Service: service, - ServiceSubset: serviceSubset, - Namespace: namespace, - Partition: partition, - Datacenter: datacenter, + Service: opts.Service, + ServiceSubset: opts.ServiceSubset, + Namespace: opts.Namespace, + Partition: opts.Partition, + Datacenter: opts.Datacenter, + Peer: opts.Peer, } t.setID() return t } -func chainID(subset, service, namespace, partition, dc string) string { - // NOTE: this format is similar to the SNI syntax for simplicity - if subset == "" { - return fmt.Sprintf("%s.%s.%s.%s", service, namespace, partition, dc) +func (t *DiscoveryTarget) ToDiscoveryTargetOpts() DiscoveryTargetOpts { + return DiscoveryTargetOpts{ + Service: t.Service, + ServiceSubset: t.ServiceSubset, + Namespace: t.Namespace, + Partition: t.Partition, + Datacenter: t.Datacenter, + Peer: t.Peer, } - return fmt.Sprintf("%s.%s.%s.%s.%s", subset, service, namespace, partition, dc) +} + +func chainID(opts DiscoveryTargetOpts) string { + // NOTE: this format is similar to the SNI syntax for simplicity + if opts.Peer != "" { + return fmt.Sprintf("%s.%s.default.external.%s", opts.Service, opts.Namespace, opts.Peer) + } + if opts.ServiceSubset == "" { + return fmt.Sprintf("%s.%s.%s.%s", opts.Service, opts.Namespace, opts.Partition, opts.Datacenter) + } + return fmt.Sprintf("%s.%s.%s.%s.%s", opts.ServiceSubset, opts.Service, opts.Namespace, opts.Partition, opts.Datacenter) } func (t *DiscoveryTarget) setID() { - t.ID = chainID(t.ServiceSubset, t.Service, t.Namespace, t.Partition, t.Datacenter) + t.ID = chainID(t.ToDiscoveryTargetOpts()) } func (t *DiscoveryTarget) String() string { diff --git a/agent/xds/failover_math_test.go b/agent/xds/failover_math_test.go index 29ac17ffe1..296d1cc77f 100644 --- a/agent/xds/failover_math_test.go +++ b/agent/xds/failover_math_test.go @@ -15,15 +15,40 @@ func TestFirstHealthyTarget(t *testing.T) { warning := proxycfg.TestUpstreamNodesInStatus(t, "warning") critical := proxycfg.TestUpstreamNodesInStatus(t, "critical") - warnOnlyPassingTarget := structs.NewDiscoveryTarget("all-warn", "", "default", "default", "dc1") + warnOnlyPassingTarget := structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{ + Service: "all-warn", + Namespace: "default", + Partition: "default", + Datacenter: "dc1", + }) warnOnlyPassingTarget.Subset.OnlyPassing = true - failOnlyPassingTarget := structs.NewDiscoveryTarget("all-fail", "", "default", "default", "dc1") + failOnlyPassingTarget := structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{ + Service: "all-fail", + Namespace: "default", + Partition: "default", + Datacenter: "dc1", + }) failOnlyPassingTarget.Subset.OnlyPassing = true targets := map[string]*structs.DiscoveryTarget{ - "all-ok.default.dc1": structs.NewDiscoveryTarget("all-ok", "", "default", "default", "dc1"), - "all-warn.default.dc1": structs.NewDiscoveryTarget("all-warn", "", "default", "default", "dc1"), - "all-fail.default.default.dc1": structs.NewDiscoveryTarget("all-fail", "", "default", "default", "dc1"), + "all-ok.default.dc1": structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{ + Service: "all-ok", + Namespace: "default", + Partition: "default", + Datacenter: "dc1", + }), + "all-warn.default.dc1": structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{ + Service: "all-warn", + Namespace: "default", + Partition: "default", + Datacenter: "dc1", + }), + "all-fail.default.default.dc1": structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{ + Service: "all-fail", + Namespace: "default", + Partition: "default", + Datacenter: "dc1", + }), "all-warn-onlypassing.default.dc1": warnOnlyPassingTarget, "all-fail-onlypassing.default.dc1": failOnlyPassingTarget, } From 589e7cfab4e8b4e214d1199919025fdbf360b285 Mon Sep 17 00:00:00 2001 From: Jared Kirschner Date: Tue, 2 Aug 2022 06:30:06 -0700 Subject: [PATCH 08/93] docs: improve consistency of DNS lookup variables Previously, some variables were wrapped in < > while others were not, creating ambiguity in whether some labels were a string literal or a variable. Now, all variables are wrapped in < >. --- website/content/docs/discovery/dns.mdx | 69 +++++++++++++++----------- 1 file changed, 40 insertions(+), 29 deletions(-) diff --git a/website/content/docs/discovery/dns.mdx b/website/content/docs/discovery/dns.mdx index f08a43c6ed..b50e8deeeb 100644 --- a/website/content/docs/discovery/dns.mdx +++ b/website/content/docs/discovery/dns.mdx @@ -52,7 +52,7 @@ There are fundamentally two types of queries: node lookups and service lookups. A node lookup, a simple query for the address of a named node, looks like this: ```text -.node[.datacenter]. +.node[.]. ``` For example, if we have a `foo` node with default settings, we could @@ -79,16 +79,16 @@ $ dig @127.0.0.1 -p 8600 foo.node.consul ANY ;; WARNING: recursion requested but not available ;; QUESTION SECTION: -;foo.node.consul. IN ANY +;foo.node.consul. IN ANY ;; ANSWER SECTION: -foo.node.consul. 0 IN A 10.1.10.12 -foo.node.consul. 0 IN TXT "meta_key=meta_value" -foo.node.consul. 0 IN TXT "value only" +foo.node.consul. 0 IN A 10.1.10.12 +foo.node.consul. 0 IN TXT "meta_key=meta_value" +foo.node.consul. 0 IN TXT "value only" ;; AUTHORITY SECTION: -consul. 0 IN SOA ns.consul. postmaster.consul. 1392836399 3600 600 86400 0 +consul. 0 IN SOA ns.consul. postmaster.consul. 1392836399 3600 600 86400 0 ``` By default the TXT records value will match the node's metadata key-value @@ -121,7 +121,7 @@ it is recommended to use the HTTP API to retrieve the list of nodes. The format of a standard service lookup is: ```text -[tag.].service[.datacenter]. +[.].service[.]. ``` The `tag` is optional, and, as with node lookups, the `datacenter` is as @@ -157,26 +157,37 @@ $ dig @127.0.0.1 -p 8600 consul.service.consul SRV ;; WARNING: recursion requested but not available ;; QUESTION SECTION: -;consul.service.consul. IN SRV +;consul.service.consul. IN SRV ;; ANSWER SECTION: -consul.service.consul. 0 IN SRV 1 1 8300 foobar.node.dc1.consul. +consul.service.consul. 0 IN SRV 1 1 8300 foobar.node.dc1.consul. ;; ADDITIONAL SECTION: -foobar.node.dc1.consul. 0 IN A 10.1.10.12 +foobar.node.dc1.consul. 0 IN A 10.1.10.12 ``` ### RFC 2782 Lookup -The format for RFC 2782 SRV lookups is: +Valid formats for RFC 2782 SRV lookups depend on +whether you want to filter results based on a service tag: - _._[.service][.datacenter][.domain] +- No filtering on service tag -Per [RFC 2782](https://tools.ietf.org/html/rfc2782), SRV queries should use -underscores, `_`, as a prefix to the `service` and `protocol` values in a query to -prevent DNS collisions. The `protocol` value can be any of the tags for a -service. If the service has no tags, `tcp` should be used. If `tcp` -is specified as the protocol, the query will not perform any tag filtering. + ```text + _._tcp[.service][.]. + ``` + +- Filtering on service tag specified in the RFC 2782 protocol field + + ```text + _._[.service][.]. + ``` + +Per [RFC 2782](https://tools.ietf.org/html/rfc2782), SRV queries must +prepend an underscore (`_`) to the `service` and `protocol` values in a query to +prevent DNS collisions. +To perform no tag-based filtering, specify `tcp` in the RFC 2782 protocol field. +To filter results on a service tag, specify the tag in the RFC 2782 protocol field. Other than the query format and default `tcp` protocol/tag value, the behavior of the RFC style lookup is the same as the standard style of lookup. @@ -196,13 +207,13 @@ $ dig @127.0.0.1 -p 8600 _rabbitmq._amqp.service.consul SRV ;; WARNING: recursion requested but not available ;; QUESTION SECTION: -;_rabbitmq._amqp.service.consul. IN SRV +;_rabbitmq._amqp.service.consul. IN SRV ;; ANSWER SECTION: -_rabbitmq._amqp.service.consul. 0 IN SRV 1 1 5672 rabbitmq.node1.dc1.consul. +_rabbitmq._amqp.service.consul. 0 IN SRV 1 1 5672 rabbitmq.node1.dc1.consul. ;; ADDITIONAL SECTION: -rabbitmq.node1.dc1.consul. 0 IN A 10.1.11.20 +rabbitmq.node1.dc1.consul. 0 IN A 10.1.11.20 ``` Again, note that the SRV record returns the port of the service as well as its IP. @@ -328,7 +339,7 @@ $ echo -n "20010db800010002cafe000000001337" | perl -ne 'printf join(":", unpack The format of a prepared query lookup is: ```text -.query[.datacenter]. +.query[.]. ``` The `datacenter` is optional, and if not provided, the datacenter of this Consul @@ -376,7 +387,7 @@ If you need more complex behavior, please use the To find the unique virtual IP allocated for a service: ```text -.virtual[.peer]. +.virtual[.]. ``` This will return the unique virtual IP for any [Connect-capable](/docs/connect) @@ -439,14 +450,14 @@ The following responses are returned: ``` ;; QUESTION SECTION: -;consul.service.test-domain. IN SRV +;consul.service.test-domain. IN SRV ;; ANSWER SECTION: -consul.service.test-domain. 0 IN SRV 1 1 8300 machine.node.dc1.test-domain. +consul.service.test-domain. 0 IN SRV 1 1 8300 machine.node.dc1.test-domain. ;; ADDITIONAL SECTION: -machine.node.dc1.test-domain. 0 IN A 127.0.0.1 -machine.node.dc1.test-domain. 0 IN TXT "consul-network-segment=" +machine.node.dc1.test-domain. 0 IN A 127.0.0.1 +machine.node.dc1.test-domain. 0 IN TXT "consul-network-segment=" ``` -> **PTR queries:** Responses to PTR queries (`.in-addr.arpa.`) will always use the @@ -479,7 +490,7 @@ resolve services within the `default` namespace and partition. However, for reso services from other namespaces or partitions the following form can be used: ```text -[tag.].service..ns..ap..dc. +[.].service..ns..ap..dc. ``` This sequence is the canonical naming convention of a Consul Enterprise service. At least two of the following @@ -491,14 +502,14 @@ fields must be present: For imported lookups, only the namespace and peer need to be specified as the partition can be inferred from the peering: ```text -.virtual[.namespace][.peer]. +.virtual[.].. ``` For node lookups, only the partition and datacenter need to be specified as nodes cannot be namespaced. ```text -[tag.].node..ap..dc. +[.].node..ap..dc. ``` ## DNS with ACLs From cb1043d8ace7edfa26e8703e79903753f7370ad4 Mon Sep 17 00:00:00 2001 From: Tyler Wendlandt Date: Tue, 23 Aug 2022 13:02:40 -0600 Subject: [PATCH 09/93] ui: Update badge / pill icon sizing (#14282) * Update badge icon sizing to be 16x16 * Update icon sizing in pill component --- .../app/components/consul/external-source/index.scss | 9 +++++++-- .../consul-ui/app/components/consul/kind/index.scss | 1 + ui/packages/consul-ui/app/components/pill/index.scss | 3 +++ 3 files changed, 11 insertions(+), 2 deletions(-) diff --git a/ui/packages/consul-ui/app/components/consul/external-source/index.scss b/ui/packages/consul-ui/app/components/consul/external-source/index.scss index b05acb45b2..b876b48fd4 100644 --- a/ui/packages/consul-ui/app/components/consul/external-source/index.scss +++ b/ui/packages/consul-ui/app/components/consul/external-source/index.scss @@ -1,6 +1,11 @@ .consul-external-source { @extend %pill-200, %frame-gray-600, %p1; } + +.consul-external-source::before { + --icon-size: icon-300; +} + .consul-external-source.kubernetes::before { @extend %with-logo-kubernetes-color-icon, %as-pseudo; } @@ -15,10 +20,10 @@ @extend %with-logo-consul-color-icon, %as-pseudo; } .consul-external-source.vault::before { - @extend %with-vault-100; + @extend %with-vault-300; } .consul-external-source.aws::before { - @extend %with-aws-100; + @extend %with-aws-300; } .consul-external-source.leader::before { @extend %with-star-outline-mask, %as-pseudo; diff --git a/ui/packages/consul-ui/app/components/consul/kind/index.scss b/ui/packages/consul-ui/app/components/consul/kind/index.scss index 7467195f2c..0431ac3068 100644 --- a/ui/packages/consul-ui/app/components/consul/kind/index.scss +++ b/ui/packages/consul-ui/app/components/consul/kind/index.scss @@ -3,4 +3,5 @@ } .consul-kind::before { @extend %with-gateway-mask, %as-pseudo; + --icon-size: icon-300; } diff --git a/ui/packages/consul-ui/app/components/pill/index.scss b/ui/packages/consul-ui/app/components/pill/index.scss index d08626db8b..c528bd9ff3 100644 --- a/ui/packages/consul-ui/app/components/pill/index.scss +++ b/ui/packages/consul-ui/app/components/pill/index.scss @@ -18,6 +18,9 @@ span.policy-node-identity::before { span.policy-service-identity::before { content: 'Service Identity: '; } +%pill::before { + --icon-size: icon-300; +} %pill.leader::before { @extend %with-star-outline-mask, %as-pseudo; } From 24a3975494f98050dd09285103420dd7f3789e5a Mon Sep 17 00:00:00 2001 From: Ashwin Venkatesh Date: Tue, 23 Aug 2022 15:14:36 -0400 Subject: [PATCH 10/93] Updates docs for CRDs (#14267) Co-authored-by: NicoletaPopoviciu --- .../docs/connect/config-entries/ingress-gateway.mdx | 6 ------ website/content/docs/connect/config-entries/mesh.mdx | 11 +---------- .../docs/connect/config-entries/proxy-defaults.mdx | 8 +++----- .../docs/connect/config-entries/service-defaults.mdx | 2 -- 4 files changed, 4 insertions(+), 23 deletions(-) diff --git a/website/content/docs/connect/config-entries/ingress-gateway.mdx b/website/content/docs/connect/config-entries/ingress-gateway.mdx index 78773188de..fa95c5b197 100644 --- a/website/content/docs/connect/config-entries/ingress-gateway.mdx +++ b/website/content/docs/connect/config-entries/ingress-gateway.mdx @@ -991,14 +991,12 @@ You can specify the following parameters to configure ingress gateway configurat }, { name: 'TLSMinVersion', - yaml: false, type: 'string: ""', description: "Set the default minimum TLS version supported for the gateway's listeners. One of `TLS_AUTO`, `TLSv1_0`, `TLSv1_1`, `TLSv1_2`, or `TLSv1_3`. If unspecified, Envoy v1.22.0 and newer [will default to TLS 1.2 as a min version](https://github.com/envoyproxy/envoy/pull/19330), while older releases of Envoy default to TLS 1.0.", }, { name: 'TLSMaxVersion', - yaml: false, type: 'string: ""', description: { hcl: @@ -1009,7 +1007,6 @@ You can specify the following parameters to configure ingress gateway configurat }, { name: 'CipherSuites', - yaml: false, type: 'array: ', description: `Set the default list of TLS cipher suites for the gateway's listeners to support when negotiating connections using @@ -1179,21 +1176,18 @@ You can specify the following parameters to configure ingress gateway configurat }, { name: 'TLSMinVersion', - yaml: false, type: 'string: ""', description: 'Set the minimum TLS version supported for this listener. One of `TLS_AUTO`, `TLSv1_0`, `TLSv1_1`, `TLSv1_2`, or `TLSv1_3`. If unspecified, Envoy v1.22.0 and newer [will default to TLS 1.2 as a min version](https://github.com/envoyproxy/envoy/pull/19330), while older releases of Envoy default to TLS 1.0.', }, { name: 'TLSMaxVersion', - yaml: false, type: 'string: ""', description: 'Set the maximum TLS version supported for this listener. Must be greater than or equal to `TLSMinVersion`. One of `TLS_AUTO`, `TLSv1_0`, `TLSv1_1`, `TLSv1_2`, or `TLSv1_3`.', }, { name: 'CipherSuites', - yaml: false, type: 'array: ', description: `Set the list of TLS cipher suites to support when negotiating connections using TLS 1.2 or earlier. If unspecified, diff --git a/website/content/docs/connect/config-entries/mesh.mdx b/website/content/docs/connect/config-entries/mesh.mdx index 8c9f3e718e..e8d6b4de5f 100644 --- a/website/content/docs/connect/config-entries/mesh.mdx +++ b/website/content/docs/connect/config-entries/mesh.mdx @@ -271,7 +271,6 @@ Note that the Kubernetes example does not include a `partition` field. Configura children: [ { name: 'Incoming', - yaml: false, type: 'TLSDirectionConfig: ', description: `TLS configuration for inbound mTLS connections targeting the public listener on \`connect-proxy\` and \`terminating-gateway\` @@ -279,14 +278,12 @@ Note that the Kubernetes example does not include a `partition` field. Configura children: [ { name: 'TLSMinVersion', - yaml: false, type: 'string: ""', description: "Set the default minimum TLS version supported. One of `TLS_AUTO`, `TLSv1_0`, `TLSv1_1`, `TLSv1_2`, or `TLSv1_3`. If unspecified, Envoy v1.22.0 and newer [will default to TLS 1.2 as a min version](https://github.com/envoyproxy/envoy/pull/19330), while older releases of Envoy default to TLS 1.0.", }, { name: 'TLSMaxVersion', - yaml: false, type: 'string: ""', description: { hcl: @@ -297,7 +294,6 @@ Note that the Kubernetes example does not include a `partition` field. Configura }, { name: 'CipherSuites', - yaml: false, type: 'array: ', description: `Set the default list of TLS cipher suites to support when negotiating connections using @@ -315,7 +311,6 @@ Note that the Kubernetes example does not include a `partition` field. Configura }, { name: 'Outgoing', - yaml: false, type: 'TLSDirectionConfig: ', description: `TLS configuration for outbound mTLS connections dialing upstreams from \`connect-proxy\` and \`ingress-gateway\` @@ -323,14 +318,12 @@ Note that the Kubernetes example does not include a `partition` field. Configura children: [ { name: 'TLSMinVersion', - yaml: false, type: 'string: ""', description: "Set the default minimum TLS version supported. One of `TLS_AUTO`, `TLSv1_0`, `TLSv1_1`, `TLSv1_2`, or `TLSv1_3`. If unspecified, Envoy v1.22.0 and newer [will default to TLS 1.2 as a min version](https://github.com/envoyproxy/envoy/pull/19330), while older releases of Envoy default to TLS 1.0.", }, { name: 'TLSMaxVersion', - yaml: false, type: 'string: ""', description: { hcl: @@ -341,7 +334,6 @@ Note that the Kubernetes example does not include a `partition` field. Configura }, { name: 'CipherSuites', - yaml: false, type: 'array: ', description: `Set the default list of TLS cipher suites to support when negotiating connections using @@ -366,9 +358,8 @@ Note that the Kubernetes example does not include a `partition` field. Configura children: [ { name: 'SanitizeXForwardedClientCert', - yaml: false, type: 'bool: ', - description: `If configured to \`true\`, the \`forward_client_cert_details\` option will be set to \`SANITIZE\` + description: `If configured to \`true\`, the \`forward_client_cert_details\` option will be set to \`SANITIZE\` for all Envoy proxies. As a result, Consul will not include the \`x-forwarded-client-cert\` header in the next hop. If set to \`false\` (default), the XFCC header is propagated to upstream applications.`, }, diff --git a/website/content/docs/connect/config-entries/proxy-defaults.mdx b/website/content/docs/connect/config-entries/proxy-defaults.mdx index 3be5c850b1..c6f82d7835 100644 --- a/website/content/docs/connect/config-entries/proxy-defaults.mdx +++ b/website/content/docs/connect/config-entries/proxy-defaults.mdx @@ -10,7 +10,7 @@ description: >- # Proxy Defaults -The `proxy-defaults` configuration entry (`ProxyDefaults` on Kubernetes) allows you +The `proxy-defaults` configuration entry (`ProxyDefaults` on Kubernetes) allows you to configure global defaults across all services for Connect proxy configurations. Only one global entry is supported. @@ -28,8 +28,8 @@ service definitions](/docs/connect/registration/sidecar-service). ## Requirements The following Consul binaries are supported: -* Consul 1.8.4+ on Kubernetes. -* Consul 1.5.0+ on other platforms. +* Consul 1.8.4+ on Kubernetes. +* Consul 1.5.0+ on other platforms. ## Usage @@ -321,7 +321,6 @@ spec: \`direct\` represents that the proxy's listeners must be dialed directly by the local application and other proxies. Added in v1.10.0.`, - yaml: false, }, { name: 'TransparentProxy', @@ -333,7 +332,6 @@ spec: type: 'int: "15001"', description: `The port the proxy should listen on for outbound traffic. This must be the port where outbound application traffic is captured and redirected to.`, - yaml: false, }, { name: 'DialedDirectly', diff --git a/website/content/docs/connect/config-entries/service-defaults.mdx b/website/content/docs/connect/config-entries/service-defaults.mdx index 54aabfe8ef..b431e43459 100644 --- a/website/content/docs/connect/config-entries/service-defaults.mdx +++ b/website/content/docs/connect/config-entries/service-defaults.mdx @@ -366,7 +366,6 @@ represents a location outside the Consul cluster. They can be dialed directly wh \`direct\` represents that the proxy's listeners must be dialed directly by the local application and other proxies. Added in v1.10.0.`, - yaml: false, }, { name: 'UpstreamConfig', @@ -652,7 +651,6 @@ represents a location outside the Consul cluster. They can be dialed directly wh type: 'int: "15001"', description: `The port the proxy should listen on for outbound traffic. This must be the port where outbound application traffic is redirected to.`, - yaml: false, }, { name: 'DialedDirectly', From 13c04a13af70bcd8c00fcf55003212f584a92799 Mon Sep 17 00:00:00 2001 From: Daniel Upton Date: Thu, 11 Aug 2022 10:19:36 +0100 Subject: [PATCH 11/93] proxycfg: terminate stream on irrecoverable errors This is the OSS portion of enterprise PR 2339. It improves our handling of "irrecoverable" errors in proxycfg data sources. The canonical example of this is what happens when the ACL token presented by Envoy is deleted/revoked. Previously, the stream would get "stuck" until the xDS server re-checked the token (after 5 minutes) and terminated the stream. Materializers would also sit burning resources retrying something that could never succeed. Now, it is possible for data sources to mark errors as "terminal" which causes the xDS stream to be closed immediately. Similarly, the submatview.Store will evict materializers when it observes they have encountered such an error. --- agent/proxycfg-glue/glue.go | 20 +++--- agent/proxycfg-glue/intention_upstreams.go | 7 +-- agent/proxycfg-glue/intentions.go | 17 ++--- agent/proxycfg/data_sources.go | 23 +++++++ agent/proxycfg/manager.go | 39 +++++++++--- agent/proxycfg/state.go | 24 +++++++- agent/submatview/local_materializer.go | 12 ++++ agent/submatview/store.go | 60 +++++++++++++++++- agent/submatview/store_test.go | 72 ++++++++++++++++++++++ agent/xds/delta.go | 18 +++++- agent/xds/server.go | 22 ++++--- 11 files changed, 267 insertions(+), 47 deletions(-) diff --git a/agent/proxycfg-glue/glue.go b/agent/proxycfg-glue/glue.go index 86badf67e4..1b22b02bd4 100644 --- a/agent/proxycfg-glue/glue.go +++ b/agent/proxycfg-glue/glue.go @@ -124,15 +124,21 @@ func (c *cacheProxyDataSource[ReqType]) Notify( func dispatchCacheUpdate(ch chan<- proxycfg.UpdateEvent) cache.Callback { return func(ctx context.Context, e cache.UpdateEvent) { - u := proxycfg.UpdateEvent{ - CorrelationID: e.CorrelationID, - Result: e.Result, - Err: e.Err, - } - select { - case ch <- u: + case ch <- newUpdateEvent(e.CorrelationID, e.Result, e.Err): case <-ctx.Done(): } } } + +func newUpdateEvent(correlationID string, result any, err error) proxycfg.UpdateEvent { + // This roughly matches the logic in agent/submatview.LocalMaterializer.isTerminalError. + if acl.IsErrNotFound(err) { + err = proxycfg.TerminalError(err) + } + return proxycfg.UpdateEvent{ + CorrelationID: correlationID, + Result: result, + Err: err, + } +} diff --git a/agent/proxycfg-glue/intention_upstreams.go b/agent/proxycfg-glue/intention_upstreams.go index 186d91b357..a694d033b4 100644 --- a/agent/proxycfg-glue/intention_upstreams.go +++ b/agent/proxycfg-glue/intention_upstreams.go @@ -54,13 +54,8 @@ func (s serverIntentionUpstreams) Notify(ctx context.Context, req *structs.Servi func dispatchBlockingQueryUpdate[ResultType any](ch chan<- proxycfg.UpdateEvent) func(context.Context, string, ResultType, error) { return func(ctx context.Context, correlationID string, result ResultType, err error) { - event := proxycfg.UpdateEvent{ - CorrelationID: correlationID, - Result: result, - Err: err, - } select { - case ch <- event: + case ch <- newUpdateEvent(correlationID, result, err): case <-ctx.Done(): } } diff --git a/agent/proxycfg-glue/intentions.go b/agent/proxycfg-glue/intentions.go index 57f48bdae9..69652d922d 100644 --- a/agent/proxycfg-glue/intentions.go +++ b/agent/proxycfg-glue/intentions.go @@ -39,12 +39,8 @@ func (c cacheIntentions) Notify(ctx context.Context, req *structs.ServiceSpecifi QueryOptions: structs.QueryOptions{Token: req.QueryOptions.Token}, } return c.c.NotifyCallback(ctx, cachetype.IntentionMatchName, query, correlationID, func(ctx context.Context, event cache.UpdateEvent) { - e := proxycfg.UpdateEvent{ - CorrelationID: correlationID, - Err: event.Err, - } - - if e.Err == nil { + var result any + if event.Err == nil { rsp, ok := event.Result.(*structs.IndexedIntentionMatches) if !ok { return @@ -54,11 +50,11 @@ func (c cacheIntentions) Notify(ctx context.Context, req *structs.ServiceSpecifi if len(rsp.Matches) != 0 { matches = rsp.Matches[0] } - e.Result = matches + result = matches } select { - case ch <- e: + case ch <- newUpdateEvent(correlationID, result, event.Err): case <-ctx.Done(): } }) @@ -110,10 +106,7 @@ func (s *serverIntentions) Notify(ctx context.Context, req *structs.ServiceSpeci sort.Sort(structs.IntentionPrecedenceSorter(intentions)) - return proxycfg.UpdateEvent{ - CorrelationID: correlationID, - Result: intentions, - }, true + return newUpdateEvent(correlationID, intentions, nil), true } for subjectIdx, subject := range subjects { diff --git a/agent/proxycfg/data_sources.go b/agent/proxycfg/data_sources.go index bda0226ffb..3649bed2d3 100644 --- a/agent/proxycfg/data_sources.go +++ b/agent/proxycfg/data_sources.go @@ -2,6 +2,7 @@ package proxycfg import ( "context" + "errors" cachetype "github.com/hashicorp/consul/agent/cache-types" "github.com/hashicorp/consul/agent/structs" @@ -15,6 +16,28 @@ type UpdateEvent struct { Err error } +// TerminalError wraps the given error to indicate that the data source is in +// an irrecoverably broken state (e.g. because the given ACL token has been +// deleted). +// +// Setting UpdateEvent.Err to a TerminalError causes all watches to be canceled +// which, in turn, terminates the xDS streams. +func TerminalError(err error) error { + return terminalError{err} +} + +// IsTerminalError returns whether the given error indicates that the data +// source is in an irrecoverably broken state so watches should be torn down +// and retried at a higher level. +func IsTerminalError(err error) bool { + return errors.As(err, &terminalError{}) +} + +type terminalError struct{ err error } + +func (e terminalError) Error() string { return e.err.Error() } +func (e terminalError) Unwrap() error { return e.err } + // DataSources contains the dependencies used to consume data used to configure // proxies. type DataSources struct { diff --git a/agent/proxycfg/manager.go b/agent/proxycfg/manager.go index 3de11b3f8a..efdfe4b724 100644 --- a/agent/proxycfg/manager.go +++ b/agent/proxycfg/manager.go @@ -127,7 +127,7 @@ func (m *Manager) Register(id ProxyID, ns *structs.NodeService, source ProxySour } // We are updating the proxy, close its old state - state.Close() + state.Close(false) } // TODO: move to a function that translates ManagerConfig->stateConfig @@ -148,14 +148,13 @@ func (m *Manager) Register(id ProxyID, ns *structs.NodeService, source ProxySour return err } - ch, err := state.Watch() - if err != nil { + if _, err = state.Watch(); err != nil { return err } m.proxies[id] = state // Start a goroutine that will wait for changes and broadcast them to watchers. - go m.notifyBroadcast(ch) + go m.notifyBroadcast(id, state) return nil } @@ -175,8 +174,8 @@ func (m *Manager) Deregister(id ProxyID, source ProxySource) { } // Closing state will let the goroutine we started in Register finish since - // watch chan is closed. - state.Close() + // watch chan is closed + state.Close(false) delete(m.proxies, id) // We intentionally leave potential watchers hanging here - there is no new @@ -186,11 +185,17 @@ func (m *Manager) Deregister(id ProxyID, source ProxySource) { // cleaned up naturally. } -func (m *Manager) notifyBroadcast(ch <-chan ConfigSnapshot) { - // Run until ch is closed - for snap := range ch { +func (m *Manager) notifyBroadcast(proxyID ProxyID, state *state) { + // Run until ch is closed (by a defer in state.run). + for snap := range state.snapCh { m.notify(&snap) } + + // If state.run exited because of an irrecoverable error, close all of the + // watchers so that the consumers reconnect/retry at a higher level. + if state.failed() { + m.closeAllWatchers(proxyID) + } } func (m *Manager) notify(snap *ConfigSnapshot) { @@ -281,6 +286,20 @@ func (m *Manager) Watch(id ProxyID) (<-chan *ConfigSnapshot, CancelFunc) { } } +func (m *Manager) closeAllWatchers(proxyID ProxyID) { + m.mu.Lock() + defer m.mu.Unlock() + + watchers, ok := m.watchers[proxyID] + if !ok { + return + } + + for watchID := range watchers { + m.closeWatchLocked(proxyID, watchID) + } +} + // closeWatchLocked cleans up state related to a single watcher. It assumes the // lock is held. func (m *Manager) closeWatchLocked(proxyID ProxyID, watchID uint64) { @@ -309,7 +328,7 @@ func (m *Manager) Close() error { // Then close all states for proxyID, state := range m.proxies { - state.Close() + state.Close(false) delete(m.proxies, proxyID) } return nil diff --git a/agent/proxycfg/state.go b/agent/proxycfg/state.go index 13b22c4fd2..34d3364356 100644 --- a/agent/proxycfg/state.go +++ b/agent/proxycfg/state.go @@ -6,6 +6,7 @@ import ( "fmt" "net" "reflect" + "sync/atomic" "time" "github.com/hashicorp/go-hclog" @@ -70,11 +71,21 @@ type state struct { // in Watch. cancel func() + // failedFlag is (atomically) set to 1 (by Close) when run exits because a data + // source is in an irrecoverable state. It can be read with failed. + failedFlag int32 + ch chan UpdateEvent snapCh chan ConfigSnapshot reqCh chan chan *ConfigSnapshot } +// failed returns whether run exited because a data source is in an +// irrecoverable state. +func (s *state) failed() bool { + return atomic.LoadInt32(&s.failedFlag) == 1 +} + type DNSConfig struct { Domain string AltDomain string @@ -250,10 +261,13 @@ func (s *state) Watch() (<-chan ConfigSnapshot, error) { } // Close discards the state and stops any long-running watches. -func (s *state) Close() error { +func (s *state) Close(failed bool) error { if s.cancel != nil { s.cancel() } + if failed { + atomic.StoreInt32(&s.failedFlag, 1) + } return nil } @@ -300,7 +314,13 @@ func (s *state) run(ctx context.Context, snap *ConfigSnapshot) { case <-ctx.Done(): return case u := <-s.ch: - s.logger.Trace("A blocking query returned; handling snapshot update", "correlationID", u.CorrelationID) + s.logger.Trace("Data source returned; handling snapshot update", "correlationID", u.CorrelationID) + + if IsTerminalError(u.Err) { + s.logger.Error("Data source in an irrecoverable state; exiting", "error", u.Err, "correlationID", u.CorrelationID) + s.Close(true) + return + } if err := s.handler.handleUpdate(ctx, u, snap); err != nil { s.logger.Error("Failed to handle update from watch", diff --git a/agent/submatview/local_materializer.go b/agent/submatview/local_materializer.go index 6e32b36025..b3d4480bda 100644 --- a/agent/submatview/local_materializer.go +++ b/agent/submatview/local_materializer.go @@ -66,6 +66,10 @@ func (m *LocalMaterializer) Run(ctx context.Context) { if ctx.Err() != nil { return } + if m.isTerminalError(err) { + return + } + m.mat.handleError(req, err) if err := m.mat.retryWaiter.Wait(ctx); err != nil { @@ -74,6 +78,14 @@ func (m *LocalMaterializer) Run(ctx context.Context) { } } +// isTerminalError determines whether the given error cannot be recovered from +// and should cause the materializer to halt and be evicted from the view store. +// +// This roughly matches the logic in agent/proxycfg-glue.newUpdateEvent. +func (m *LocalMaterializer) isTerminalError(err error) bool { + return acl.IsErrNotFound(err) +} + // subscribeOnce opens a new subscription to a local backend and runs // for its lifetime or until the view is closed. func (m *LocalMaterializer) subscribeOnce(ctx context.Context, req *pbsubscribe.SubscribeRequest) error { diff --git a/agent/submatview/store.go b/agent/submatview/store.go index 242a0d70d7..dacf2d8bae 100644 --- a/agent/submatview/store.go +++ b/agent/submatview/store.go @@ -47,6 +47,9 @@ type entry struct { // requests is the count of active requests using this entry. This entry will // remain in the store as long as this count remains > 0. requests int + // evicting is used to mark an entry that will be evicted when the current in- + // flight requests finish. + evicting bool } // NewStore creates and returns a Store that is ready for use. The caller must @@ -89,6 +92,7 @@ func (s *Store) Run(ctx context.Context) { // Only stop the materializer if there are no active requests. if e.requests == 0 { + s.logger.Trace("evicting item from store", "key", he.Key()) e.stop() delete(s.byKey, he.Key()) } @@ -187,13 +191,13 @@ func (s *Store) NotifyCallback( "error", err, "request-type", req.Type(), "index", index) - continue } index = result.Index cb(ctx, cache.UpdateEvent{ CorrelationID: correlationID, Result: result.Value, + Err: err, Meta: cache.ResultMeta{Index: result.Index, Hit: result.Cached}, }) } @@ -211,6 +215,9 @@ func (s *Store) readEntry(req Request) (string, Materializer, error) { defer s.lock.Unlock() e, ok := s.byKey[key] if ok { + if e.evicting { + return "", nil, errors.New("item is marked for eviction") + } e.requests++ s.byKey[key] = e return key, e.materializer, nil @@ -222,7 +229,18 @@ func (s *Store) readEntry(req Request) (string, Materializer, error) { } ctx, cancel := context.WithCancel(context.Background()) - go mat.Run(ctx) + go func() { + mat.Run(ctx) + + // Materializers run until they either reach their TTL and are evicted (which + // cancels the given context) or encounter an irrecoverable error. + // + // If the context hasn't been canceled, we know it's the error case so we + // trigger an immediate eviction. + if ctx.Err() == nil { + s.evictNow(key) + } + }() e = entry{ materializer: mat, @@ -233,6 +251,28 @@ func (s *Store) readEntry(req Request) (string, Materializer, error) { return key, e.materializer, nil } +// evictNow causes the item with the given key to be evicted immediately. +// +// If there are requests in-flight, the item is marked for eviction such that +// once the requests have been served releaseEntry will move it to the top of +// the expiry heap. If there are no requests in-flight, evictNow will move the +// item to the top of the expiry heap itself. +// +// In either case, the entry's evicting flag prevents it from being served by +// readEntry (and thereby gaining new in-flight requests). +func (s *Store) evictNow(key string) { + s.lock.Lock() + defer s.lock.Unlock() + + e := s.byKey[key] + e.evicting = true + s.byKey[key] = e + + if e.requests == 0 { + s.expireNowLocked(key) + } +} + // releaseEntry decrements the request count and starts an expiry timer if the // count has reached 0. Must be called once for every call to readEntry. func (s *Store) releaseEntry(key string) { @@ -246,6 +286,11 @@ func (s *Store) releaseEntry(key string) { return } + if e.evicting { + s.expireNowLocked(key) + return + } + if e.expiry.Index() == ttlcache.NotIndexed { e.expiry = s.expiryHeap.Add(key, s.idleTTL) s.byKey[key] = e @@ -255,6 +300,17 @@ func (s *Store) releaseEntry(key string) { s.expiryHeap.Update(e.expiry.Index(), s.idleTTL) } +// expireNowLocked moves the item with the given key to the top of the expiry +// heap, causing it to be picked up by the expiry loop and evicted immediately. +func (s *Store) expireNowLocked(key string) { + e := s.byKey[key] + if idx := e.expiry.Index(); idx != ttlcache.NotIndexed { + s.expiryHeap.Remove(idx) + } + e.expiry = s.expiryHeap.Add(key, time.Duration(0)) + s.byKey[key] = e +} + // makeEntryKey matches agent/cache.makeEntryKey, but may change in the future. func makeEntryKey(typ string, r cache.RequestInfo) string { return fmt.Sprintf("%s/%s/%s/%s", typ, r.Datacenter, r.Token, r.Key) diff --git a/agent/submatview/store_test.go b/agent/submatview/store_test.go index 1d5789c054..aab0995998 100644 --- a/agent/submatview/store_test.go +++ b/agent/submatview/store_test.go @@ -509,3 +509,75 @@ func TestStore_Run_ExpiresEntries(t *testing.T) { require.Len(t, store.byKey, 0) require.Equal(t, ttlcache.NotIndexed, e.expiry.Index()) } + +func TestStore_Run_FailingMaterializer(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + store := NewStore(hclog.NewNullLogger()) + store.idleTTL = 24 * time.Hour + go store.Run(ctx) + + t.Run("with an in-flight request", func(t *testing.T) { + req := &failingMaterializerRequest{ + doneCh: make(chan struct{}), + } + + ch := make(chan cache.UpdateEvent) + reqCtx, reqCancel := context.WithCancel(context.Background()) + t.Cleanup(reqCancel) + require.NoError(t, store.Notify(reqCtx, req, "", ch)) + + assertRequestCount(t, store, req, 1) + + // Cause the materializer to "fail" (exit before its context is canceled). + close(req.doneCh) + + // End the in-flight request. + reqCancel() + + // Check that the item was evicted. + retry.Run(t, func(r *retry.R) { + store.lock.Lock() + defer store.lock.Unlock() + + require.Len(r, store.byKey, 0) + }) + }) + + t.Run("with no in-flight requests", func(t *testing.T) { + req := &failingMaterializerRequest{ + doneCh: make(chan struct{}), + } + + // Cause the materializer to "fail" (exit before its context is canceled). + close(req.doneCh) + + // Check that the item was evicted. + retry.Run(t, func(r *retry.R) { + store.lock.Lock() + defer store.lock.Unlock() + + require.Len(r, store.byKey, 0) + }) + }) +} + +type failingMaterializerRequest struct { + doneCh chan struct{} +} + +func (failingMaterializerRequest) CacheInfo() cache.RequestInfo { return cache.RequestInfo{} } +func (failingMaterializerRequest) Type() string { return "test.FailingMaterializerRequest" } + +func (r *failingMaterializerRequest) NewMaterializer() (Materializer, error) { + return &failingMaterializer{doneCh: r.doneCh}, nil +} + +type failingMaterializer struct { + doneCh <-chan struct{} +} + +func (failingMaterializer) Query(context.Context, uint64) (Result, error) { return Result{}, nil } + +func (m *failingMaterializer) Run(context.Context) { <-m.doneCh } diff --git a/agent/xds/delta.go b/agent/xds/delta.go index 701c04f2ed..71c1edcb0f 100644 --- a/agent/xds/delta.go +++ b/agent/xds/delta.go @@ -81,6 +81,11 @@ const ( ) func (s *Server) processDelta(stream ADSDeltaStream, reqCh <-chan *envoy_discovery_v3.DeltaDiscoveryRequest) error { + // Handle invalid ACL tokens up-front. + if _, err := s.authenticate(stream.Context()); err != nil { + return err + } + // Loop state var ( cfgSnap *proxycfg.ConfigSnapshot @@ -200,7 +205,18 @@ func (s *Server) processDelta(stream ADSDeltaStream, reqCh <-chan *envoy_discove } } - case cfgSnap = <-stateCh: + case cs, ok := <-stateCh: + if !ok { + // stateCh is closed either when *we* cancel the watch (on-exit via defer) + // or by the proxycfg.Manager when an irrecoverable error is encountered + // such as the ACL token getting deleted. + // + // We know for sure that this is the latter case, because in the former we + // would've already exited this loop. + return status.Error(codes.Aborted, "xDS stream terminated due to an irrecoverable error, please try again") + } + cfgSnap = cs + newRes, err := generator.allResourcesFromSnapshot(cfgSnap) if err != nil { return status.Errorf(codes.Unavailable, "failed to generate all xDS resources from the snapshot: %v", err) diff --git a/agent/xds/server.go b/agent/xds/server.go index cc27f3fde7..3ee42e77b0 100644 --- a/agent/xds/server.go +++ b/agent/xds/server.go @@ -186,6 +186,18 @@ func (s *Server) Register(srv *grpc.Server) { envoy_discovery_v3.RegisterAggregatedDiscoveryServiceServer(srv, s) } +func (s *Server) authenticate(ctx context.Context) (acl.Authorizer, error) { + authz, err := s.ResolveToken(external.TokenFromContext(ctx)) + if acl.IsErrNotFound(err) { + return nil, status.Errorf(codes.Unauthenticated, "unauthenticated: %v", err) + } else if acl.IsErrPermissionDenied(err) { + return nil, status.Error(codes.PermissionDenied, err.Error()) + } else if err != nil { + return nil, status.Errorf(codes.Internal, "error resolving acl token: %v", err) + } + return authz, nil +} + // authorize the xDS request using the token stored in ctx. This authorization is // a bit different from most interfaces. Instead of explicitly authorizing or // filtering each piece of data in the response, the request is authorized @@ -201,13 +213,9 @@ func (s *Server) authorize(ctx context.Context, cfgSnap *proxycfg.ConfigSnapshot return status.Errorf(codes.Unauthenticated, "unauthenticated: no config snapshot") } - authz, err := s.ResolveToken(external.TokenFromContext(ctx)) - if acl.IsErrNotFound(err) { - return status.Errorf(codes.Unauthenticated, "unauthenticated: %v", err) - } else if acl.IsErrPermissionDenied(err) { - return status.Error(codes.PermissionDenied, err.Error()) - } else if err != nil { - return status.Errorf(codes.Internal, "error resolving acl token: %v", err) + authz, err := s.authenticate(ctx) + if err != nil { + return err } var authzContext acl.AuthorizerContext From 8d6b73aed0da7b2f181e5b97be88a44d4248bc60 Mon Sep 17 00:00:00 2001 From: Rosemary Wang <915624+joatmon08@users.noreply.github.com> Date: Tue, 23 Aug 2022 17:52:03 -0400 Subject: [PATCH 12/93] Clarify transparent proxy documentation (#14301) * Clarify transparent proxy documentation Some confusion over known limitations for transparent proxy, specifically over federation versus cluster peering. Updated `KubeDNS` to Kubernetes DNS for consistency with Kubernetes documentation. Co-authored-by: David Yu Co-authored-by: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> --- .../docs/connect/cluster-peering/k8s.mdx | 9 ++-- .../docs/connect/transparent-proxy.mdx | 44 +++++++++++++------ 2 files changed, 33 insertions(+), 20 deletions(-) diff --git a/website/content/docs/connect/cluster-peering/k8s.mdx b/website/content/docs/connect/cluster-peering/k8s.mdx index 7471efed86..35f17959cb 100644 --- a/website/content/docs/connect/cluster-peering/k8s.mdx +++ b/website/content/docs/connect/cluster-peering/k8s.mdx @@ -132,7 +132,7 @@ To peer Kubernetes clusters running Consul, you need to create a peering token a ## Export services between clusters -1. For the service in "cluster-02" that you want to export, add the following [annotations](/docs/k8s/annotations-and-labels#consul-hashicorp-com-connect-service-upstreams) to your service's pods. +1. For the service in "cluster-02" that you want to export, add the following [annotation](/docs/k8s/annotations-and-labels) to your service's pods. @@ -140,7 +140,6 @@ To peer Kubernetes clusters running Consul, you need to create a peering token a ##… annotations: "consul.hashicorp.com/connect-inject": "true" - "consul.hashicorp.com/transparent-proxy": "false" ##… ``` @@ -207,8 +206,6 @@ To peer Kubernetes clusters running Consul, you need to create a peering token a ##… annotations: "consul.hashicorp.com/connect-inject": "true" - "consul.hashicorp.com/transparent-proxy": "false" - "consul.hashicorp.com/connect-service-upstreams": "backend-service.svc.cluster-02.peer:1234" ##… ``` @@ -220,10 +217,10 @@ To peer Kubernetes clusters running Consul, you need to create a peering token a $ kubectl apply --filename frontend-service.yml ``` -1. Run the following command and check the output to confirm that you peered your clusters successfully. +1. Run the following command in `frontend-service` and check the output to confirm that you peered your clusters successfully. ```shell-session - $ curl localhost:1234 + $ kubectl exec -it $(kubectl get pod -l app=frontend -o name) -- curl localhost:1234 { "name": "backend-service", ##… diff --git a/website/content/docs/connect/transparent-proxy.mdx b/website/content/docs/connect/transparent-proxy.mdx index 6e3353bbad..57ad48ba7a 100644 --- a/website/content/docs/connect/transparent-proxy.mdx +++ b/website/content/docs/connect/transparent-proxy.mdx @@ -31,7 +31,7 @@ With transparent proxy: 1. Local upstreams are inferred from service intentions and peered upstreams are inferred from imported services, so no explicit configuration is needed. -1. Outbound connections pointing to a KubeDNS name "just work" — network rules +1. Outbound connections pointing to a Kubernetes DNS record "just work" — network rules redirect them through the proxy. 1. Inbound traffic is forced to go through the proxy to prevent unauthorized direct access to the application. @@ -160,27 +160,43 @@ configure exceptions on a per-Pod basis. The following Pod annotations allow you - [`consul.hashicorp.com/transparent-proxy-exclude-uids`](/docs/k8s/annotations-and-labels#consul-hashicorp-com-transparent-proxy-exclude-uids) +### Dialing Services Across Kubernetes Clusters + +- You cannot use transparent proxy in a deployment configuration with [federation between Kubernetes clusters](/docs/k8s/installation/multi-cluster/kubernetes). + Instead, services in one Kubernetes cluster must explicitly dial a service to a Consul datacenter in another Kubernetes cluster using the + [consul.hashicorp.com/connect-service-upstreams](/docs/k8s/annotations-and-labels#consul-hashicorp-com-connect-service-upstreams) + annotation. For example, an annotation of + `"consul.hashicorp.com/connect-service-upstreams": "my-service:1234:dc2"` reaches an upstream service called `my-service` + in the datacenter `dc2` on port `1234`. + +- You cannot use transparent proxy in a deployment configuration with a + [single Consul datacenter spanning multiple Kubernetes clusters](/docs/k8s/installation/deployment-configurations/single-dc-multi-k8s). Instead, + services in one Kubernetes cluster must explicitly dial a service in another Kubernetes cluster using the + [consul.hashicorp.com/connect-service-upstreams](/docs/k8s/annotations-and-labels#consul-hashicorp-com-connect-service-upstreams) + annotation. For example, an annotation of + `"consul.hashicorp.com/connect-service-upstreams": "my-service:1234"`, + reaches an upstream service called `my-service` in another Kubernetes cluster and on port `1234`. + Although transparent proxy is enabled, Kubernetes DNS is not utilized when communicating between services that exist on separate Kubernetes clusters. + +- In a deployment configuration with [cluster peering](/docs/connect/cluster-peering), + transparent proxy is fully supported and thus dialing services explicitly is not required. + + ## Known Limitations -* Traffic can only be transparently proxied when the address dialed corresponds to the address of a service in the -transparent proxy's datacenter. Services can also dial explicit upstreams in other datacenters without transparent proxy, for example, by adding an -[annotation](/docs/k8s/annotations-and-labels#consul-hashicorp-com-connect-service-upstreams) such as -`"consul.hashicorp.com/connect-service-upstreams": "my-service:1234:dc2"` to reach an upstream service called `my-service` -in the datacenter `dc2`. -* In the deployment configuration where a [single Consul datacenter spans multiple Kubernetes clusters](/docs/k8s/installation/deployment-configurations/single-dc-multi-k8s), services in one Kubernetes cluster must explicitly dial a service in another Kubernetes cluster using the [consul.hashicorp.com/connect-service-upstreams](/docs/k8s/annotations-and-labels#consul-hashicorp-com-connect-service-upstreams) annotation. An example would be -`"consul.hashicorp.com/connect-service-upstreams": "my-service:1234"`, where `my-service` is the service that exists in another Kubernetes cluster and is exposed on port `1234`. Although Transparent Proxy is enabled, KubeDNS is not utilized when communicating between services existing on separate Kubernetes clusters. +- Deployment configurations with federation across or a single datacenter spanning multiple clusters must explicitly dial a + service in another datacenter or cluster using annotations. -* When dialing headless services, the request will be proxied using a plain TCP - proxy. The upstream's protocol is not considered. +- When dialing headless services, the request is proxied using a plain TCP proxy. The upstream's protocol is not considered. ## Using Transparent Proxy In Kubernetes, services can reach other services via their -[KubeDNS](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/) address or via Pod IPs, and that +[Kubernetes DNS](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/) address or through Pod IPs, and that traffic will be transparently sent through the proxy. Connect services in Kubernetes are required to have a Kubernetes service selecting the Pods. -~> Note: In order to use KubeDNS, the Kubernetes service name will need to match the Consul service name. This will be the +~> **Note**: In order to use Kubernetes DNS, the Kubernetes service name needs to match the Consul service name. This is the case by default, unless the service Pods have the annotation `consul.hashicorp.com/connect-service` overriding the Consul service name. @@ -192,7 +208,7 @@ inbound and outbound listener on the sidecar proxy. The proxy will be configured appropriate upstream services based on [Service Intentions](/docs/connect/config-entries/service-intentions). This means Connect services no longer need to use the `consul.hashicorp.com/connect-service-upstreams` annotation to configure upstreams explicitly. Once the -Service Intentions are set, they can simply address the upstream services using KubeDNS. +Service Intentions are set, they can simply address the upstream services using Kubernetes DNS. As of Consul-k8s >= `0.26.0` and Consul-helm >= `0.32.0`, a Kubernetes service that selects application pods is required for Connect applications, i.e: @@ -213,7 +229,7 @@ spec: In the example above, if another service wants to reach `sample-app` via transparent proxying, it can dial `sample-app.default.svc.cluster.local`, using -[KubeDNS](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/). +[Kubernetes DNS](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/). If ACLs with default "deny" policy are enabled, it also needs a [ServiceIntention](/docs/connect/config-entries/service-intentions) allowing it to talk to `sample-app`. From bb35a8303ddcd2758de575326502f6a232a4cc97 Mon Sep 17 00:00:00 2001 From: twunderlich-grapl <88346193+twunderlich-grapl@users.noreply.github.com> Date: Tue, 23 Aug 2022 20:06:00 -0400 Subject: [PATCH 13/93] Clarify docs around using either Consul or Vault managed PKI paths (#13295) * Clarify docs around using either Consul or Vault managed PKI paths The current docs can be misread to indicate that you need both the Consul and Vault managed PKI Paths policies. The [Learning Tutorial](https://learn.hashicorp.com/tutorials/consul/vault-pki-consul-connect-ca?in=consul/vault-secure#create-vault-policies) is clearer. This tries to make the original docs as clear as the learning tutorial * Clarify that PKI secret engines are used to store certs Co-authored-by: Blake Covarrubias --- website/content/docs/connect/ca/vault.mdx | 2 ++ 1 file changed, 2 insertions(+) diff --git a/website/content/docs/connect/ca/vault.mdx b/website/content/docs/connect/ca/vault.mdx index e0a9daa6ea..e563a6d83d 100644 --- a/website/content/docs/connect/ca/vault.mdx +++ b/website/content/docs/connect/ca/vault.mdx @@ -201,6 +201,8 @@ If the paths already exist, Consul will use them as configured. ## Vault ACL Policies +Vault PKI can be managed by either Consul or by Vault. If you want to manually create and tune the PKI secret engines used to store the root and intermediate certificates, use Vault Managed PKI Paths. If you want to have the PKI automatically managed for you, use Consul Managed PKI Paths. + ### Vault Managed PKI Paths The following Vault policy allows Consul to use pre-existing PKI paths in Vault. From 3b993f2da77534c8b6463227bbfe5a0f382deb27 Mon Sep 17 00:00:00 2001 From: Dan Upton Date: Wed, 24 Aug 2022 12:03:15 +0100 Subject: [PATCH 14/93] dataplane: update envoy bootstrap params for consul-dataplane (#14017) Contains 2 changes to the GetEnvoyBootstrapParams response to support consul-dataplane. Exposing node_name and node_id: consul-dataplane will support providing either the node_id or node_name in its configuration. Unfortunately, supporting both in the xDS meta adds a fair amount of complexity (partly because most tables are currently indexed on node_name) so for now we're going to return them both from the bootstrap params endpoint, allowing consul-dataplane to exchange a node_id for a node_name (which it will supply in the xDS meta). Properly setting service for gateways: To avoid the need to special case gateways in consul-dataplane, service will now either be the destination service name for connect proxies, or the gateway service name. This means it can be used as-is in Envoy configuration (i.e. as a cluster name or in metric tags). --- agent/consul/state/catalog.go | 3 + agent/consul/state/catalog_test.go | 7 +- .../dataplane/get_envoy_bootstrap_params.go | 10 +- ....go => get_envoy_bootstrap_params_test.go} | 10 +- proto-public/pbdataplane/dataplane.pb.go | 151 ++++++++++-------- proto-public/pbdataplane/dataplane.proto | 7 +- 6 files changed, 118 insertions(+), 70 deletions(-) rename agent/grpc-external/services/dataplane/{get_envoy_boostrap_params_test.go => get_envoy_bootstrap_params_test.go} (96%) diff --git a/agent/consul/state/catalog.go b/agent/consul/state/catalog.go index 258519d5ba..f9483a313f 100644 --- a/agent/consul/state/catalog.go +++ b/agent/consul/state/catalog.go @@ -1717,6 +1717,9 @@ func (s *Store) ServiceNode(nodeID, nodeName, serviceID string, entMeta *acl.Ent if err != nil { return 0, nil, fmt.Errorf("failed querying service for node %q: %w", node.Node, err) } + if service != nil { + service.ID = node.ID + } return idx, service, nil } diff --git a/agent/consul/state/catalog_test.go b/agent/consul/state/catalog_test.go index 10e7af6dba..1e096d136f 100644 --- a/agent/consul/state/catalog_test.go +++ b/agent/consul/state/catalog_test.go @@ -270,17 +270,20 @@ func TestStateStore_EnsureRegistration(t *testing.T) { require.Equal(t, uint64(2), idx) require.Equal(t, svcmap["redis1"], r) + exp := svcmap["redis1"].ToServiceNode("node1") + exp.ID = nodeID + // lookup service by node name idx, sn, err := s.ServiceNode("", "node1", "redis1", nil, peerName) require.NoError(t, err) require.Equal(t, uint64(2), idx) - require.Equal(t, svcmap["redis1"].ToServiceNode("node1"), sn) + require.Equal(t, exp, sn) // lookup service by node ID idx, sn, err = s.ServiceNode(string(nodeID), "", "redis1", nil, peerName) require.NoError(t, err) require.Equal(t, uint64(2), idx) - require.Equal(t, svcmap["redis1"].ToServiceNode("node1"), sn) + require.Equal(t, exp, sn) // lookup service by invalid node _, _, err = s.ServiceNode("", "invalid-node", "redis1", nil, peerName) diff --git a/agent/grpc-external/services/dataplane/get_envoy_bootstrap_params.go b/agent/grpc-external/services/dataplane/get_envoy_bootstrap_params.go index bed302d12b..b320559e98 100644 --- a/agent/grpc-external/services/dataplane/get_envoy_bootstrap_params.go +++ b/agent/grpc-external/services/dataplane/get_envoy_bootstrap_params.go @@ -52,13 +52,21 @@ func (s *Server) GetEnvoyBootstrapParams(ctx context.Context, req *pbdataplane.G } // Build out the response + var serviceName string + if svc.ServiceKind == structs.ServiceKindConnectProxy { + serviceName = svc.ServiceProxy.DestinationServiceName + } else { + serviceName = svc.ServiceName + } resp := &pbdataplane.GetEnvoyBootstrapParamsResponse{ - Service: svc.ServiceProxy.DestinationServiceName, + Service: serviceName, Partition: svc.EnterpriseMeta.PartitionOrDefault(), Namespace: svc.EnterpriseMeta.NamespaceOrDefault(), Datacenter: s.Datacenter, ServiceKind: convertToResponseServiceKind(svc.ServiceKind), + NodeName: svc.Node, + NodeId: string(svc.ID), } bootstrapConfig, err := structpb.NewStruct(svc.ServiceProxy.Config) diff --git a/agent/grpc-external/services/dataplane/get_envoy_boostrap_params_test.go b/agent/grpc-external/services/dataplane/get_envoy_bootstrap_params_test.go similarity index 96% rename from agent/grpc-external/services/dataplane/get_envoy_boostrap_params_test.go rename to agent/grpc-external/services/dataplane/get_envoy_bootstrap_params_test.go index c3b4fd1468..aa42b0bf13 100644 --- a/agent/grpc-external/services/dataplane/get_envoy_boostrap_params_test.go +++ b/agent/grpc-external/services/dataplane/get_envoy_bootstrap_params_test.go @@ -97,14 +97,20 @@ func TestGetEnvoyBootstrapParams_Success(t *testing.T) { resp, err := client.GetEnvoyBootstrapParams(ctx, req) require.NoError(t, err) - require.Equal(t, tc.registerReq.Service.Proxy.DestinationServiceName, resp.Service) + if tc.registerReq.Service.IsGateway() { + require.Equal(t, tc.registerReq.Service.Service, resp.Service) + } else { + require.Equal(t, tc.registerReq.Service.Proxy.DestinationServiceName, resp.Service) + } + require.Equal(t, serverDC, resp.Datacenter) require.Equal(t, tc.registerReq.EnterpriseMeta.PartitionOrDefault(), resp.Partition) require.Equal(t, tc.registerReq.EnterpriseMeta.NamespaceOrDefault(), resp.Namespace) require.Contains(t, resp.Config.Fields, proxyConfigKey) require.Equal(t, structpb.NewStringValue(proxyConfigValue), resp.Config.Fields[proxyConfigKey]) require.Equal(t, convertToResponseServiceKind(tc.registerReq.Service.Kind), resp.ServiceKind) - + require.Equal(t, tc.registerReq.Node, resp.NodeName) + require.Equal(t, string(tc.registerReq.ID), resp.NodeId) } testCases := []testCase{ diff --git a/proto-public/pbdataplane/dataplane.pb.go b/proto-public/pbdataplane/dataplane.pb.go index 1da1eea15a..8e8a1000f2 100644 --- a/proto-public/pbdataplane/dataplane.pb.go +++ b/proto-public/pbdataplane/dataplane.pb.go @@ -401,12 +401,17 @@ type GetEnvoyBootstrapParamsResponse struct { unknownFields protoimpl.UnknownFields ServiceKind ServiceKind `protobuf:"varint,1,opt,name=service_kind,json=serviceKind,proto3,enum=hashicorp.consul.dataplane.ServiceKind" json:"service_kind,omitempty"` - // The destination service name + // service is be used to identify the service (as the local cluster name and + // in metric tags). If the service is a connect proxy it will be the name of + // the proxy's destination service, for gateways it will be the gateway + // service's name. Service string `protobuf:"bytes,2,opt,name=service,proto3" json:"service,omitempty"` Namespace string `protobuf:"bytes,3,opt,name=namespace,proto3" json:"namespace,omitempty"` Partition string `protobuf:"bytes,4,opt,name=partition,proto3" json:"partition,omitempty"` Datacenter string `protobuf:"bytes,5,opt,name=datacenter,proto3" json:"datacenter,omitempty"` Config *structpb.Struct `protobuf:"bytes,6,opt,name=config,proto3" json:"config,omitempty"` + NodeId string `protobuf:"bytes,7,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + NodeName string `protobuf:"bytes,8,opt,name=node_name,json=nodeName,proto3" json:"node_name,omitempty"` } func (x *GetEnvoyBootstrapParamsResponse) Reset() { @@ -483,6 +488,20 @@ func (x *GetEnvoyBootstrapParamsResponse) GetConfig() *structpb.Struct { return nil } +func (x *GetEnvoyBootstrapParamsResponse) GetNodeId() string { + if x != nil { + return x.NodeId + } + return "" +} + +func (x *GetEnvoyBootstrapParamsResponse) GetNodeName() string { + if x != nil { + return x.NodeName + } + return "" +} + var File_proto_public_pbdataplane_dataplane_proto protoreflect.FileDescriptor var file_proto_public_pbdataplane_dataplane_proto_rawDesc = []byte{ @@ -525,7 +544,7 @@ var file_proto_public_pbdataplane_dataplane_proto_rawDesc = []byte{ 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x42, 0x0b, 0x0a, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x22, 0x94, + 0x65, 0x42, 0x0b, 0x0a, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x22, 0xca, 0x02, 0x0a, 0x1f, 0x47, 0x65, 0x74, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6b, 0x69, @@ -543,69 +562,73 @@ var file_proto_public_pbdataplane_dataplane_proto_rawDesc = []byte{ 0x6e, 0x74, 0x65, 0x72, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x06, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2a, 0xc7, 0x01, 0x0a, 0x11, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, - 0x61, 0x6e, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x22, 0x0a, 0x1e, 0x44, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x1b, + 0x0a, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x6e, 0x6f, 0x64, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x2a, 0xc7, 0x01, 0x0a, 0x11, + 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x12, 0x22, 0x0a, 0x1e, 0x44, 0x41, 0x54, 0x41, 0x50, 0x4c, 0x41, 0x4e, 0x45, 0x5f, 0x46, + 0x45, 0x41, 0x54, 0x55, 0x52, 0x45, 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, + 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x24, 0x0a, 0x20, 0x44, 0x41, 0x54, 0x41, 0x50, 0x4c, 0x41, + 0x4e, 0x45, 0x5f, 0x46, 0x45, 0x41, 0x54, 0x55, 0x52, 0x45, 0x53, 0x5f, 0x57, 0x41, 0x54, 0x43, + 0x48, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x53, 0x10, 0x01, 0x12, 0x32, 0x0a, 0x2e, 0x44, 0x41, 0x54, 0x41, 0x50, 0x4c, 0x41, 0x4e, 0x45, 0x5f, 0x46, 0x45, 0x41, 0x54, 0x55, 0x52, 0x45, - 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, - 0x24, 0x0a, 0x20, 0x44, 0x41, 0x54, 0x41, 0x50, 0x4c, 0x41, 0x4e, 0x45, 0x5f, 0x46, 0x45, 0x41, - 0x54, 0x55, 0x52, 0x45, 0x53, 0x5f, 0x57, 0x41, 0x54, 0x43, 0x48, 0x5f, 0x53, 0x45, 0x52, 0x56, - 0x45, 0x52, 0x53, 0x10, 0x01, 0x12, 0x32, 0x0a, 0x2e, 0x44, 0x41, 0x54, 0x41, 0x50, 0x4c, 0x41, - 0x4e, 0x45, 0x5f, 0x46, 0x45, 0x41, 0x54, 0x55, 0x52, 0x45, 0x53, 0x5f, 0x45, 0x44, 0x47, 0x45, - 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x4d, 0x41, 0x4e, - 0x41, 0x47, 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x12, 0x34, 0x0a, 0x30, 0x44, 0x41, 0x54, - 0x41, 0x50, 0x4c, 0x41, 0x4e, 0x45, 0x5f, 0x46, 0x45, 0x41, 0x54, 0x55, 0x52, 0x45, 0x53, 0x5f, - 0x45, 0x4e, 0x56, 0x4f, 0x59, 0x5f, 0x42, 0x4f, 0x4f, 0x54, 0x53, 0x54, 0x52, 0x41, 0x50, 0x5f, - 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x55, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x03, 0x2a, - 0xcc, 0x01, 0x0a, 0x0b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4b, 0x69, 0x6e, 0x64, 0x12, - 0x1c, 0x0a, 0x18, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, - 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x18, 0x0a, - 0x14, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x54, 0x59, - 0x50, 0x49, 0x43, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x1e, 0x0a, 0x1a, 0x53, 0x45, 0x52, 0x56, 0x49, - 0x43, 0x45, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x5f, - 0x50, 0x52, 0x4f, 0x58, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x19, 0x53, 0x45, 0x52, 0x56, 0x49, - 0x43, 0x45, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x4d, 0x45, 0x53, 0x48, 0x5f, 0x47, 0x41, 0x54, - 0x45, 0x57, 0x41, 0x59, 0x10, 0x03, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, - 0x45, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x54, 0x45, 0x52, 0x4d, 0x49, 0x4e, 0x41, 0x54, 0x49, - 0x4e, 0x47, 0x5f, 0x47, 0x41, 0x54, 0x45, 0x57, 0x41, 0x59, 0x10, 0x04, 0x12, 0x20, 0x0a, 0x1c, - 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x49, 0x4e, 0x47, - 0x52, 0x45, 0x53, 0x53, 0x5f, 0x47, 0x41, 0x54, 0x45, 0x57, 0x41, 0x59, 0x10, 0x05, 0x32, 0xd2, - 0x02, 0x0a, 0x10, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x12, 0xa6, 0x01, 0x0a, 0x1d, 0x47, 0x65, 0x74, 0x53, 0x75, 0x70, 0x70, 0x6f, - 0x72, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x46, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x40, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, - 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, - 0x6e, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x44, - 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x41, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, - 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x70, - 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, - 0x64, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x94, 0x01, 0x0a, - 0x17, 0x47, 0x65, 0x74, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, - 0x61, 0x70, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x3a, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, - 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x64, 0x61, 0x74, 0x61, - 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x42, 0x6f, - 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3b, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, - 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, - 0x65, 0x2e, 0x47, 0x65, 0x74, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, - 0x72, 0x61, 0x70, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x42, 0xf0, 0x01, 0x0a, 0x1e, 0x63, 0x6f, 0x6d, 0x2e, 0x68, 0x61, 0x73, 0x68, - 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x64, 0x61, 0x74, - 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x42, 0x0e, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, - 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, - 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2d, 0x70, 0x75, 0x62, 0x6c, - 0x69, 0x63, 0x2f, 0x70, 0x62, 0x64, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0xa2, 0x02, - 0x03, 0x48, 0x43, 0x44, 0xaa, 0x02, 0x1a, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, - 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, - 0x65, 0xca, 0x02, 0x1a, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, - 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0xe2, 0x02, - 0x26, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, - 0x6c, 0x5c, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x5c, 0x47, 0x50, 0x42, 0x4d, - 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x1c, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, - 0x6f, 0x72, 0x70, 0x3a, 0x3a, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x3a, 0x3a, 0x44, 0x61, 0x74, - 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x53, 0x5f, 0x45, 0x44, 0x47, 0x45, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, + 0x54, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x12, + 0x34, 0x0a, 0x30, 0x44, 0x41, 0x54, 0x41, 0x50, 0x4c, 0x41, 0x4e, 0x45, 0x5f, 0x46, 0x45, 0x41, + 0x54, 0x55, 0x52, 0x45, 0x53, 0x5f, 0x45, 0x4e, 0x56, 0x4f, 0x59, 0x5f, 0x42, 0x4f, 0x4f, 0x54, + 0x53, 0x54, 0x52, 0x41, 0x50, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x55, 0x52, 0x41, 0x54, + 0x49, 0x4f, 0x4e, 0x10, 0x03, 0x2a, 0xcc, 0x01, 0x0a, 0x0b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x1c, 0x0a, 0x18, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, + 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, + 0x44, 0x10, 0x00, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x4b, + 0x49, 0x4e, 0x44, 0x5f, 0x54, 0x59, 0x50, 0x49, 0x43, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x1e, 0x0a, + 0x1a, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x43, 0x4f, + 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x5f, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, + 0x19, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x4d, 0x45, + 0x53, 0x48, 0x5f, 0x47, 0x41, 0x54, 0x45, 0x57, 0x41, 0x59, 0x10, 0x03, 0x12, 0x24, 0x0a, 0x20, + 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x54, 0x45, 0x52, + 0x4d, 0x49, 0x4e, 0x41, 0x54, 0x49, 0x4e, 0x47, 0x5f, 0x47, 0x41, 0x54, 0x45, 0x57, 0x41, 0x59, + 0x10, 0x04, 0x12, 0x20, 0x0a, 0x1c, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x4b, 0x49, + 0x4e, 0x44, 0x5f, 0x49, 0x4e, 0x47, 0x52, 0x45, 0x53, 0x53, 0x5f, 0x47, 0x41, 0x54, 0x45, 0x57, + 0x41, 0x59, 0x10, 0x05, 0x32, 0xd2, 0x02, 0x0a, 0x10, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, + 0x6e, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0xa6, 0x01, 0x0a, 0x1d, 0x47, 0x65, + 0x74, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, + 0x61, 0x6e, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x40, 0x2e, 0x68, 0x61, + 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x64, + 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x75, 0x70, 0x70, + 0x6f, 0x72, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x41, 0x2e, + 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, + 0x2e, 0x64, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x75, + 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x94, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x42, + 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x3a, + 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, + 0x6c, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x45, + 0x6e, 0x76, 0x6f, 0x79, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x50, 0x61, 0x72, + 0x61, 0x6d, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3b, 0x2e, 0x68, 0x61, 0x73, + 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x64, 0x61, + 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x45, 0x6e, 0x76, 0x6f, 0x79, + 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0xf0, 0x01, 0x0a, 0x1e, 0x63, 0x6f, + 0x6d, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, + 0x75, 0x6c, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x42, 0x0e, 0x44, 0x61, + 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x34, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, + 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2d, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x2f, 0x70, 0x62, 0x64, 0x61, 0x74, 0x61, 0x70, + 0x6c, 0x61, 0x6e, 0x65, 0xa2, 0x02, 0x03, 0x48, 0x43, 0x44, 0xaa, 0x02, 0x1a, 0x48, 0x61, 0x73, + 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x44, 0x61, + 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0xca, 0x02, 0x1a, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, + 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x44, 0x61, 0x74, 0x61, 0x70, + 0x6c, 0x61, 0x6e, 0x65, 0xe2, 0x02, 0x26, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, + 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, + 0x65, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x1c, + 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x3a, 0x3a, 0x43, 0x6f, 0x6e, 0x73, 0x75, + 0x6c, 0x3a, 0x3a, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/proto-public/pbdataplane/dataplane.proto b/proto-public/pbdataplane/dataplane.proto index 0502dcd707..cc95f3a517 100644 --- a/proto-public/pbdataplane/dataplane.proto +++ b/proto-public/pbdataplane/dataplane.proto @@ -68,12 +68,17 @@ enum ServiceKind { message GetEnvoyBootstrapParamsResponse { ServiceKind service_kind = 1; - // The destination service name + // service is be used to identify the service (as the local cluster name and + // in metric tags). If the service is a connect proxy it will be the name of + // the proxy's destination service, for gateways it will be the gateway + // service's name. string service = 2; string namespace = 3; string partition = 4; string datacenter = 5; google.protobuf.Struct config = 6; + string node_id = 7; + string node_name = 8; } service DataplaneService { From cdc6fd89d3c3e21aef032df77e57a47051f308e8 Mon Sep 17 00:00:00 2001 From: Tyler Wendlandt Date: Wed, 24 Aug 2022 06:44:01 -0600 Subject: [PATCH 15/93] ui: Replace file-mask with file-text icon usage on policy list (#14275) --- ui/.gitignore | 1 + ui/packages/consul-ui/app/components/composite-row/index.scss | 2 +- ui/packages/consul-ui/app/styles/base/icons/icons/index.scss | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/ui/.gitignore b/ui/.gitignore index 08df27ddb9..6bd9a0135a 100644 --- a/ui/.gitignore +++ b/ui/.gitignore @@ -12,6 +12,7 @@ node_modules .pnp* .sass-cache .DS_Store +.tool-versions connect.lock coverage coverage_* diff --git a/ui/packages/consul-ui/app/components/composite-row/index.scss b/ui/packages/consul-ui/app/components/composite-row/index.scss index bd66491a7f..1dce70e4bb 100644 --- a/ui/packages/consul-ui/app/components/composite-row/index.scss +++ b/ui/packages/consul-ui/app/components/composite-row/index.scss @@ -95,7 +95,7 @@ } %composite-row-detail .policy::before { - @extend %with-file-fill-mask, %as-pseudo; + @extend %with-file-text-mask, %as-pseudo; margin-right: 3px; } %composite-row-detail .role::before { diff --git a/ui/packages/consul-ui/app/styles/base/icons/icons/index.scss b/ui/packages/consul-ui/app/styles/base/icons/icons/index.scss index 9d1a5efe3f..20f57edc7a 100644 --- a/ui/packages/consul-ui/app/styles/base/icons/icons/index.scss +++ b/ui/packages/consul-ui/app/styles/base/icons/icons/index.scss @@ -330,7 +330,7 @@ // @import './file-minus/index.scss'; // @import './file-plus/index.scss'; // @import './file-source/index.scss'; -// @import './file-text/index.scss'; +@import './file-text/index.scss'; // @import './file-x/index.scss'; // @import './files/index.scss'; // @import './film/index.scss'; From ca228aad8dbf246e57c90916853a792297d457b7 Mon Sep 17 00:00:00 2001 From: DanStough Date: Fri, 19 Aug 2022 16:51:11 -0400 Subject: [PATCH 16/93] doc: tproxy destination fixes --- .../config-entries/terminating-gateway.mdx | 5 +-- .../docs/k8s/connect/terminating-gateways.mdx | 36 +++++++++++-------- 2 files changed, 24 insertions(+), 17 deletions(-) diff --git a/website/content/docs/connect/config-entries/terminating-gateway.mdx b/website/content/docs/connect/config-entries/terminating-gateway.mdx index 3692eff1ec..c406c5687d 100644 --- a/website/content/docs/connect/config-entries/terminating-gateway.mdx +++ b/website/content/docs/connect/config-entries/terminating-gateway.mdx @@ -153,8 +153,9 @@ spec: Link gateway named "us-west-gateway" with the billing service, and specify a CA file to be used for one-way TLS authentication. --> **Note**: The `CAFile` parameter must be specified _and_ point to a valid CA -bundle in order to properly initiate a TLS connection to the destination service. +-> **Note**: When not using destinations in transparent proxy mode, you must specify the `CAFile` parameter +and point to a valid CA bundle in order to properly initiate a TLS +connection to the destination service. For more information about configuring a gateway for destinations, refer to [Register an External Service as a Destination](/docs/k8s/connect/terminating-gateways#register-an-external-service-as-a-destination). diff --git a/website/content/docs/k8s/connect/terminating-gateways.mdx b/website/content/docs/k8s/connect/terminating-gateways.mdx index 13da908b4f..e82bd773fb 100644 --- a/website/content/docs/k8s/connect/terminating-gateways.mdx +++ b/website/content/docs/k8s/connect/terminating-gateways.mdx @@ -89,13 +89,13 @@ Registering the external services with Consul is a multi-step process: ### Register external services with Consul There are two ways to register an external service with Consul: -1. If [`TransparentProxy`](/docs/k8s/helm#v-connectinject-transparentproxy) is enabled, you can declare external endpoints in the [`Destination`](/docs/connect/config-entries/service-defaults#terminating-gateway-destination) field of `service-defaults`. +1. If [`TransparentProxy`](/docs/connect/transparent-proxy) is enabled, the preferred method is to declare external endpoints in the [`destination`](/docs/connect/config-entries/service-defaults#terminating-gateway-destination) field of `ServiceDefaults`. 1. You can add the service as a node in the Consul catalog. -#### Register an external service as a Destination +#### Register an external service as a destination -`Destination` fields allow clients to dial the external service directly and are valid only in [`TransparentProxy`](/docs/k8s/helm#v-connectinject-transparentproxy) mode. -The following table describes traffic behaviors when using `Destination`s to route traffic through a terminating gateway: +The [`destination`](/docs/connect/config-entries/service-defaults#terminating-gateway-destination) field of the `ServiceDefaults` Custom Resource Definition (CRD) allows clients to dial the external service directly. It is valid only in [`TransparentProxy`](/docs/connect/transparent-proxy)) mode. +The following table describes traffic behaviors when using `destination`s to route traffic through a terminating gateway: | External Services Layer | Client dials | Client uses TLS | Allowed | Notes | |---|---|---|---|---| @@ -109,11 +109,13 @@ The following table describes traffic behaviors when using `Destination`s to rou | L7 | IP | No | Allowed | There are no limitations on dialing IPs without TLS. | You can provide a `caFile` to secure traffic between unencrypted clients that connect to external services through the terminating gateway. -Refer to [Create the configuration entry for the terminating gateway](/docs/k8s/connect/terminating-gateways#create-the-configuration-entry-for-the-terminating-gateway) for details. +Refer to [Create the configuration entry for the terminating gateway](#create-the-configuration-entry-for-the-terminating-gateway) for details. -Create a `service-defaults` custom resource for the external service: +Also note that regardless of the `protocol` specified in the `ServiceDefaults`, [L7 intentions](/docs/connect/config-entries/service-intentions#permissions) are not currently supported with `ServiceDefaults` destinations. - +Create a `ServiceDefaults` custom resource for the external service: + + ```yaml apiVersion: consul.hashicorp.com/v1alpha1 @@ -133,10 +135,10 @@ Create a `service-defaults` custom resource for the external service: Apply the `ServiceDefaults` resource with `kubectl apply`: ```shell-session -$ kubectl apply --filename service-defaults.yaml +$ kubectl apply --filename serviceDefaults.yaml ``` -All other terminating gateway operations can use the name of the `service-defaults` in place of a typical Consul service name. +All other terminating gateway operations can use the name of the `ServiceDefaults` in place of a typical Consul service name. #### Register an external service as a Catalog Node @@ -261,11 +263,13 @@ spec: --> **NOTE**: If TLS is enabled for external services registered through the Consul catalog, you must include the `caFile` parameter that points to the system trust store of the terminating gateway container. +If TLS is enabled for external services registered through the Consul catalog and you are not using [transparent proxy `destination`](#register-an-external-service-as-a-destination), you must include the [`caFile`](/docs/connect/config-entries/terminating-gateway#cafile) parameter that points to the system trust store of the terminating gateway container. By default, the trust store is located in the `/etc/ssl/certs/ca-certificates.crt` directory. -Configure the `caFile` parameter to point to the `/etc/ssl/cert.pem` directory if TLS is enabled and you are using one of the following components: - * Consul Helm chart 0.43 or older - * Or an Envoy image with an alpine base image +Configure the [`caFile`](https://www.consul.io/docs/connect/config-entries/terminating-gateway#cafile) parameter in the `TerminatingGateway` config entry to point to the `/etc/ssl/cert.pem` directory if TLS is enabled and you are using one of the following components: +- Consul Helm chart 0.43 or older +- An Envoy image with an alpine base image + +For `ServiceDefaults` destinations, refer to [Register an external service as a destination](#register-an-external-service-as-a-destination). Apply the `TerminatingGateway` resource with `kubectl apply`: @@ -273,7 +277,7 @@ Apply the `TerminatingGateway` resource with `kubectl apply`: $ kubectl apply --filename terminating-gateway.yaml ``` -If using ACLs and TLS, create a [`ServiceIntentions`](/docs/connect/config-entries/service-intentions) resource to allow access from services in the mesh to the external service +If using ACLs and TLS, create a [`ServiceIntentions`](/docs/connect/config-entries/service-intentions) resource to allow access from services in the mesh to the external service: @@ -292,6 +296,8 @@ spec: +-> **NOTE**: [L7 Intentions](/docs/connect/config-entries/service-intentions#permissions) are not currently supported for `ServiceDefaults` destinations. + Apply the `ServiceIntentions` resource with `kubectl apply`: ```shell-session @@ -372,7 +378,7 @@ $ kubectl exec deploy/static-client -- curl -vvvs --header "Host: example-https. - + ```shell-session $ kubectl exec deploy/static-client -- curl -vvvs https://example.com/ From 1f293e52440942b5a78e0381a80e362b3df6b763 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20Ruiz=20Garc=C3=ADa?= Date: Wed, 24 Aug 2022 18:31:38 +0200 Subject: [PATCH 17/93] Added new auto_encrypt.grpc_server_tls config option to control AutoTLS enabling of GRPC Server's TLS usage Fix for #14253 Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> --- agent/config/builder.go | 8 +- agent/config/config.go | 1 + agent/config/runtime_test.go | 103 ++++++++++++++++-- .../TestRuntimeConfig_Sanitize.golden | 11 +- agent/config/testdata/full-config.hcl | 1 + agent/config/testdata/full-config.json | 3 +- agent/grpc-external/server.go | 5 +- tlsutil/config.go | 20 +++- tlsutil/config_test.go | 41 +++++-- .../docs/agent/config/config-files.mdx | 2 + 10 files changed, 162 insertions(+), 33 deletions(-) diff --git a/agent/config/builder.go b/agent/config/builder.go index 40389553d2..960d86ea43 100644 --- a/agent/config/builder.go +++ b/agent/config/builder.go @@ -2531,10 +2531,9 @@ func (b *builder) buildTLSConfig(rt RuntimeConfig, t TLS) (tlsutil.Config, error return c, errors.New("verify_server_hostname is only valid in the tls.internal_rpc stanza") } - // TLS is only enabled on the gRPC listener if there's an HTTPS port configured - // for historic and backwards-compatibility reasons. - if rt.HTTPSPort <= 0 && (t.GRPC != TLSProtocolConfig{} && t.GRPCModifiedByDeprecatedConfig == nil) { - b.warn("tls.grpc was provided but TLS will NOT be enabled on the gRPC listener without an HTTPS listener configured (e.g. via ports.https)") + // And UseAutoCert right now only applies to external gRPC interface. + if t.Defaults.UseAutoCert != nil || t.HTTPS.UseAutoCert != nil || t.InternalRPC.UseAutoCert != nil { + return c, errors.New("use_auto_cert is only valid in the tls.grpc stanza") } defaultTLSMinVersion := b.tlsVersion("tls.defaults.tls_min_version", t.Defaults.TLSMinVersion) @@ -2591,6 +2590,7 @@ func (b *builder) buildTLSConfig(rt RuntimeConfig, t TLS) (tlsutil.Config, error mapCommon("https", t.HTTPS, &c.HTTPS) mapCommon("grpc", t.GRPC, &c.GRPC) + c.GRPC.UseAutoCert = boolValWithDefault(t.GRPC.UseAutoCert, false) c.ServerName = rt.ServerName c.NodeName = rt.NodeName diff --git a/agent/config/config.go b/agent/config/config.go index 145c74db7c..2d21e75dae 100644 --- a/agent/config/config.go +++ b/agent/config/config.go @@ -867,6 +867,7 @@ type TLSProtocolConfig struct { VerifyIncoming *bool `mapstructure:"verify_incoming"` VerifyOutgoing *bool `mapstructure:"verify_outgoing"` VerifyServerHostname *bool `mapstructure:"verify_server_hostname"` + UseAutoCert *bool `mapstructure:"use_auto_cert"` } type TLS struct { diff --git a/agent/config/runtime_test.go b/agent/config/runtime_test.go index e0266811e3..f5e9bd3352 100644 --- a/agent/config/runtime_test.go +++ b/agent/config/runtime_test.go @@ -5516,7 +5516,70 @@ func TestLoad_IntegrationWithFlags(t *testing.T) { }, }) run(t, testCase{ - desc: "tls.grpc without ports.https", + desc: "tls.grpc.use_auto_cert defaults to false", + args: []string{ + `-data-dir=` + dataDir, + }, + json: []string{` + { + "tls": { + "grpc": {} + } + } + `}, + hcl: []string{` + tls { + grpc {} + } + `}, + expected: func(rt *RuntimeConfig) { + rt.DataDir = dataDir + rt.TLS.Domain = "consul." + rt.TLS.NodeName = "thehostname" + rt.TLS.GRPC.UseAutoCert = false + }, + }) + run(t, testCase{ + desc: "tls.grpc.use_auto_cert defaults to false (II)", + args: []string{ + `-data-dir=` + dataDir, + }, + json: []string{` + { + "tls": {} + } + `}, + hcl: []string{` + tls { + } + `}, + expected: func(rt *RuntimeConfig) { + rt.DataDir = dataDir + rt.TLS.Domain = "consul." + rt.TLS.NodeName = "thehostname" + rt.TLS.GRPC.UseAutoCert = false + }, + }) + run(t, testCase{ + desc: "tls.grpc.use_auto_cert defaults to false (III)", + args: []string{ + `-data-dir=` + dataDir, + }, + json: []string{` + { + } + `}, + hcl: []string{` + `}, + expected: func(rt *RuntimeConfig) { + rt.DataDir = dataDir + rt.TLS.Domain = "consul." + rt.TLS.NodeName = "thehostname" + rt.TLS.GRPC.UseAutoCert = false + }, + }) + run(t, testCase{ + desc: "tls.grpc.use_auto_cert enabled when true", args: []string{ `-data-dir=` + dataDir, }, @@ -5524,7 +5587,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) { { "tls": { "grpc": { - "cert_file": "cert-1234" + "use_auto_cert": true } } } @@ -5532,20 +5595,43 @@ func TestLoad_IntegrationWithFlags(t *testing.T) { hcl: []string{` tls { grpc { - cert_file = "cert-1234" + use_auto_cert = true } } `}, expected: func(rt *RuntimeConfig) { rt.DataDir = dataDir - rt.TLS.Domain = "consul." rt.TLS.NodeName = "thehostname" - - rt.TLS.GRPC.CertFile = "cert-1234" + rt.TLS.GRPC.UseAutoCert = true }, - expectedWarnings: []string{ - "tls.grpc was provided but TLS will NOT be enabled on the gRPC listener without an HTTPS listener configured (e.g. via ports.https)", + }) + run(t, testCase{ + desc: "tls.grpc.use_auto_cert disabled when false", + args: []string{ + `-data-dir=` + dataDir, + }, + json: []string{` + { + "tls": { + "grpc": { + "use_auto_cert": false + } + } + } + `}, + hcl: []string{` + tls { + grpc { + use_auto_cert = false + } + } + `}, + expected: func(rt *RuntimeConfig) { + rt.DataDir = dataDir + rt.TLS.Domain = "consul." + rt.TLS.NodeName = "thehostname" + rt.TLS.GRPC.UseAutoCert = false }, }) } @@ -6340,6 +6426,7 @@ func TestLoad_FullConfig(t *testing.T) { TLSMinVersion: types.TLSv1_0, CipherSuites: []types.TLSCipherSuite{types.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, types.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA}, VerifyOutgoing: false, + UseAutoCert: true, }, HTTPS: tlsutil.ProtocolConfig{ VerifyIncoming: true, diff --git a/agent/config/testdata/TestRuntimeConfig_Sanitize.golden b/agent/config/testdata/TestRuntimeConfig_Sanitize.golden index 09ecd4cfeb..8f91743dba 100644 --- a/agent/config/testdata/TestRuntimeConfig_Sanitize.golden +++ b/agent/config/testdata/TestRuntimeConfig_Sanitize.golden @@ -374,7 +374,8 @@ "TLSMinVersion": "", "VerifyIncoming": false, "VerifyOutgoing": false, - "VerifyServerHostname": false + "VerifyServerHostname": false, + "UseAutoCert": false }, "HTTPS": { "CAFile": "", @@ -385,7 +386,8 @@ "TLSMinVersion": "", "VerifyIncoming": false, "VerifyOutgoing": false, - "VerifyServerHostname": false + "VerifyServerHostname": false, + "UseAutoCert": false }, "InternalRPC": { "CAFile": "", @@ -396,7 +398,8 @@ "TLSMinVersion": "", "VerifyIncoming": false, "VerifyOutgoing": false, - "VerifyServerHostname": false + "VerifyServerHostname": false, + "UseAutoCert": false }, "NodeName": "", "ServerName": "" @@ -466,4 +469,4 @@ "VersionMetadata": "", "VersionPrerelease": "", "Watches": [] -} \ No newline at end of file +} diff --git a/agent/config/testdata/full-config.hcl b/agent/config/testdata/full-config.hcl index ed8203296c..305df9b89e 100644 --- a/agent/config/testdata/full-config.hcl +++ b/agent/config/testdata/full-config.hcl @@ -697,6 +697,7 @@ tls { tls_cipher_suites = "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA" tls_min_version = "TLSv1_0" verify_incoming = true + use_auto_cert = true } } tls_cipher_suites = "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256" diff --git a/agent/config/testdata/full-config.json b/agent/config/testdata/full-config.json index 8294a27b7c..bc72c2955e 100644 --- a/agent/config/testdata/full-config.json +++ b/agent/config/testdata/full-config.json @@ -692,7 +692,8 @@ "key_file": "1y4prKjl", "tls_cipher_suites": "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", "tls_min_version": "TLSv1_0", - "verify_incoming": true + "verify_incoming": true, + "use_auto_cert": true } }, "tls_cipher_suites": "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", diff --git a/agent/grpc-external/server.go b/agent/grpc-external/server.go index 751cca91c8..4ae8c6d652 100644 --- a/agent/grpc-external/server.go +++ b/agent/grpc-external/server.go @@ -1,12 +1,13 @@ package external import ( + "time" + middleware "github.com/grpc-ecosystem/go-grpc-middleware" recovery "github.com/grpc-ecosystem/go-grpc-middleware/recovery" "google.golang.org/grpc" "google.golang.org/grpc/credentials" "google.golang.org/grpc/keepalive" - "time" agentmiddleware "github.com/hashicorp/consul/agent/grpc-middleware" "github.com/hashicorp/consul/tlsutil" @@ -34,7 +35,7 @@ func NewServer(logger agentmiddleware.Logger, tls *tlsutil.Configurator) *grpc.S MinTime: 15 * time.Second, }), } - if tls != nil && tls.GRPCTLSConfigured() { + if tls != nil && tls.GRPCServerUseTLS() { creds := credentials.NewTLS(tls.IncomingGRPCConfig()) opts = append(opts, grpc.Creds(creds)) } diff --git a/tlsutil/config.go b/tlsutil/config.go index 7c9e6d2ad6..2e1614165e 100644 --- a/tlsutil/config.go +++ b/tlsutil/config.go @@ -102,6 +102,10 @@ type ProtocolConfig struct { // // Note: this setting only applies to the Internal RPC configuration. VerifyServerHostname bool + + // UseAutoCert is used to enable usage of auto_encrypt/auto_config generated + // certificate & key material on external gRPC listener. + UseAutoCert bool } // Config configures the Configurator. @@ -167,6 +171,10 @@ type protocolConfig struct { // combinedCAPool is a pool containing both manualCAPEMs and the certificates // received from auto-config/auto-encrypt. combinedCAPool *x509.CertPool + + // useAutoCert indicates wether we should use auto-encrypt/config data + // for TLS server/listener. NOTE: Only applies to external GRPC Server. + useAutoCert bool } // Configurator provides tls.Config and net.Dial wrappers to enable TLS for @@ -323,6 +331,7 @@ func (c *Configurator) loadProtocolConfig(base Config, pc ProtocolConfig) (*prot manualCAPEMs: pems, manualCAPool: manualPool, combinedCAPool: combinedPool, + useAutoCert: pc.UseAutoCert, }, nil } @@ -620,16 +629,15 @@ func (c *Configurator) Cert() *tls.Certificate { return cert } -// GRPCTLSConfigured returns whether there's a TLS certificate configured for -// gRPC (either manually or by auto-config/auto-encrypt). It is checked, along -// with the presence of an HTTPS port, to determine whether to enable TLS on -// incoming gRPC connections. +// GRPCServerUseTLS returns whether there's a TLS certificate configured for +// (external) gRPC (either manually or by auto-config/auto-encrypt), and use +// of TLS for gRPC has not been explicitly disabled at auto-encrypt. // // This function acquires a read lock because it reads from the config. -func (c *Configurator) GRPCTLSConfigured() bool { +func (c *Configurator) GRPCServerUseTLS() bool { c.lock.RLock() defer c.lock.RUnlock() - return c.grpc.cert != nil || c.autoTLS.cert != nil + return c.grpc.cert != nil || (c.grpc.useAutoCert && c.autoTLS.cert != nil) } // VerifyIncomingRPC returns true if we should verify incoming connnections to diff --git a/tlsutil/config_test.go b/tlsutil/config_test.go index 75fa839458..fc817aec69 100644 --- a/tlsutil/config_test.go +++ b/tlsutil/config_test.go @@ -1465,7 +1465,7 @@ func TestConfigurator_AuthorizeInternalRPCServerConn(t *testing.T) { }) } -func TestConfigurator_GRPCTLSConfigured(t *testing.T) { +func TestConfigurator_GRPCServerUseTLS(t *testing.T) { t.Run("certificate manually configured", func(t *testing.T) { c := makeConfigurator(t, Config{ GRPC: ProtocolConfig{ @@ -1473,22 +1473,47 @@ func TestConfigurator_GRPCTLSConfigured(t *testing.T) { KeyFile: "../test/hostname/Alice.key", }, }) - require.True(t, c.GRPCTLSConfigured()) + require.True(t, c.GRPCServerUseTLS()) }) - t.Run("AutoTLS", func(t *testing.T) { + t.Run("no certificate", func(t *testing.T) { + c := makeConfigurator(t, Config{}) + require.False(t, c.GRPCServerUseTLS()) + }) + + t.Run("AutoTLS (default)", func(t *testing.T) { c := makeConfigurator(t, Config{}) bobCert := loadFile(t, "../test/hostname/Bob.crt") bobKey := loadFile(t, "../test/hostname/Bob.key") require.NoError(t, c.UpdateAutoTLSCert(bobCert, bobKey)) - - require.True(t, c.GRPCTLSConfigured()) + require.False(t, c.GRPCServerUseTLS()) }) - t.Run("no certificate", func(t *testing.T) { - c := makeConfigurator(t, Config{}) - require.False(t, c.GRPCTLSConfigured()) + t.Run("AutoTLS w/ UseAutoCert Disabled", func(t *testing.T) { + c := makeConfigurator(t, Config{ + GRPC: ProtocolConfig{ + UseAutoCert: false, + }, + }) + + bobCert := loadFile(t, "../test/hostname/Bob.crt") + bobKey := loadFile(t, "../test/hostname/Bob.key") + require.NoError(t, c.UpdateAutoTLSCert(bobCert, bobKey)) + require.False(t, c.GRPCServerUseTLS()) + }) + + t.Run("AutoTLS w/ UseAutoCert Enabled", func(t *testing.T) { + c := makeConfigurator(t, Config{ + GRPC: ProtocolConfig{ + UseAutoCert: true, + }, + }) + + bobCert := loadFile(t, "../test/hostname/Bob.crt") + bobKey := loadFile(t, "../test/hostname/Bob.key") + require.NoError(t, c.UpdateAutoTLSCert(bobCert, bobKey)) + require.True(t, c.GRPCServerUseTLS()) }) } diff --git a/website/content/docs/agent/config/config-files.mdx b/website/content/docs/agent/config/config-files.mdx index bf3e219bee..2631378731 100644 --- a/website/content/docs/agent/config/config-files.mdx +++ b/website/content/docs/agent/config/config-files.mdx @@ -2019,6 +2019,8 @@ specially crafted certificate signed by the CA can be used to gain full access t - `verify_incoming` - ((#tls_grpc_verify_incoming)) Overrides [`tls.defaults.verify_incoming`](#tls_defaults_verify_incoming). + - `use_auto_cert` - (Defaults to `false`) Enables or disables TLS on gRPC servers. Set to `true` to allow `auto_encrypt` TLS settings to apply to gRPC listeners. We recommend disabling TLS on gRPC servers if you are using `auto_encrypt` for other TLS purposes, such as enabling HTTPS. + - `https` ((#tls_https)) Provides settings for the HTTPS interface. To enable the HTTPS interface you must define a port via [`ports.https`](#https_port). From 919da333314df19faa8686b33a3348167e6baab3 Mon Sep 17 00:00:00 2001 From: skpratt Date: Wed, 24 Aug 2022 12:00:09 -0500 Subject: [PATCH 18/93] no-op: refactor usagemetrics tests for clarity and DRY cases (#14313) --- .../usagemetrics/usagemetrics_oss_test.go | 2067 +++++------------ 1 file changed, 560 insertions(+), 1507 deletions(-) diff --git a/agent/consul/usagemetrics/usagemetrics_oss_test.go b/agent/consul/usagemetrics/usagemetrics_oss_test.go index c860e5b741..8c37fe2695 100644 --- a/agent/consul/usagemetrics/usagemetrics_oss_test.go +++ b/agent/consul/usagemetrics/usagemetrics_oss_test.go @@ -8,10 +8,11 @@ import ( "time" "github.com/armon/go-metrics" - uuid "github.com/hashicorp/go-uuid" "github.com/hashicorp/serf/serf" "github.com/stretchr/testify/require" + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/consul/state" "github.com/hashicorp/consul/agent/structs" @@ -23,371 +24,368 @@ func newStateStore() (*state.Store, error) { return state.NewStateStore(nil), nil } +type testCase struct { + modfiyStateStore func(t *testing.T, s *state.Store) + getMembersFunc getMembersFunc + expectedGauges map[string]metrics.GaugeValue +} + +var baseCases = map[string]testCase{ + "empty-state": { + expectedGauges: map[string]metrics.GaugeValue{ + // --- node --- + "consul.usage.test.consul.state.nodes;datacenter=dc1": { + Name: "consul.usage.test.consul.state.nodes", + Value: 0, + Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, + }, + // --- peering --- + "consul.usage.test.consul.state.peerings;datacenter=dc1": { + Name: "consul.usage.test.consul.state.peerings", + Value: 0, + Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, + }, + // --- member --- + "consul.usage.test.consul.members.clients;datacenter=dc1": { + Name: "consul.usage.test.consul.members.clients", + Value: 0, + Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, + }, + "consul.usage.test.consul.members.servers;datacenter=dc1": { + Name: "consul.usage.test.consul.members.servers", + Value: 0, + Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, + }, + // --- service --- + "consul.usage.test.consul.state.services;datacenter=dc1": { + Name: "consul.usage.test.consul.state.services", + Value: 0, + Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, + }, + "consul.usage.test.consul.state.service_instances;datacenter=dc1": { + Name: "consul.usage.test.consul.state.service_instances", + Value: 0, + Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, + }, + // --- service mesh --- + "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=connect-proxy": { + Name: "consul.usage.test.consul.state.connect_instances", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "connect-proxy"}, + }, + }, + "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=terminating-gateway": { + Name: "consul.usage.test.consul.state.connect_instances", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "terminating-gateway"}, + }, + }, + "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=ingress-gateway": { + Name: "consul.usage.test.consul.state.connect_instances", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "ingress-gateway"}, + }, + }, + "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=mesh-gateway": { + Name: "consul.usage.test.consul.state.connect_instances", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "mesh-gateway"}, + }, + }, + "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=connect-native": { + Name: "consul.usage.test.consul.state.connect_instances", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "connect-native"}, + }, + }, + // --- kv --- + "consul.usage.test.consul.state.kv_entries;datacenter=dc1": { + Name: "consul.usage.test.consul.state.kv_entries", + Value: 0, + Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, + }, + // --- config entries --- + "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-intentions": { + Name: "consul.usage.test.consul.state.config_entries", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "service-intentions"}, + }, + }, + "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-resolver": { + Name: "consul.usage.test.consul.state.config_entries", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "service-resolver"}, + }, + }, + "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-router": { + Name: "consul.usage.test.consul.state.config_entries", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "service-router"}, + }, + }, + "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-defaults": { + Name: "consul.usage.test.consul.state.config_entries", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "service-defaults"}, + }, + }, + "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=ingress-gateway": { + Name: "consul.usage.test.consul.state.config_entries", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "ingress-gateway"}, + }, + }, + "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-splitter": { + Name: "consul.usage.test.consul.state.config_entries", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "service-splitter"}, + }, + }, + "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=mesh": { + Name: "consul.usage.test.consul.state.config_entries", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "mesh"}, + }, + }, + "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=proxy-defaults": { + Name: "consul.usage.test.consul.state.config_entries", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "proxy-defaults"}, + }, + }, + "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=terminating-gateway": { + Name: "consul.usage.test.consul.state.config_entries", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "terminating-gateway"}, + }, + }, + "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=exported-services": { + Name: "consul.usage.test.consul.state.config_entries", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "exported-services"}, + }, + }, + }, + getMembersFunc: func() []serf.Member { return []serf.Member{} }, + }, + "nodes": { + modfiyStateStore: func(t *testing.T, s *state.Store) { + require.NoError(t, s.EnsureNode(1, &structs.Node{Node: "foo", Address: "127.0.0.1"})) + require.NoError(t, s.EnsureNode(2, &structs.Node{Node: "bar", Address: "127.0.0.2"})) + }, + getMembersFunc: func() []serf.Member { + return []serf.Member{ + { + Name: "foo", + Tags: map[string]string{"role": "consul"}, + Status: serf.StatusAlive, + }, + { + Name: "bar", + Tags: map[string]string{"role": "consul"}, + Status: serf.StatusAlive, + }, + } + }, + expectedGauges: map[string]metrics.GaugeValue{ + // --- node --- + "consul.usage.test.consul.state.nodes;datacenter=dc1": { + Name: "consul.usage.test.consul.state.nodes", + Value: 2, + Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, + }, + // --- peering --- + "consul.usage.test.consul.state.peerings;datacenter=dc1": { + Name: "consul.usage.test.consul.state.peerings", + Value: 0, + Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, + }, + // --- member --- + "consul.usage.test.consul.members.servers;datacenter=dc1": { + Name: "consul.usage.test.consul.members.servers", + Value: 2, + Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, + }, + "consul.usage.test.consul.members.clients;datacenter=dc1": { + Name: "consul.usage.test.consul.members.clients", + Value: 0, + Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, + }, + // --- service --- + "consul.usage.test.consul.state.services;datacenter=dc1": { + Name: "consul.usage.test.consul.state.services", + Value: 0, + Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, + }, + "consul.usage.test.consul.state.service_instances;datacenter=dc1": { + Name: "consul.usage.test.consul.state.service_instances", + Value: 0, + Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, + }, + // --- service mesh --- + "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=connect-proxy": { + Name: "consul.usage.test.consul.state.connect_instances", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "connect-proxy"}, + }, + }, + "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=terminating-gateway": { + Name: "consul.usage.test.consul.state.connect_instances", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "terminating-gateway"}, + }, + }, + "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=ingress-gateway": { + Name: "consul.usage.test.consul.state.connect_instances", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "ingress-gateway"}, + }, + }, + "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=mesh-gateway": { + Name: "consul.usage.test.consul.state.connect_instances", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "mesh-gateway"}, + }, + }, + "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=connect-native": { + Name: "consul.usage.test.consul.state.connect_instances", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "connect-native"}, + }, + }, + // --- kv --- + "consul.usage.test.consul.state.kv_entries;datacenter=dc1": { + Name: "consul.usage.test.consul.state.kv_entries", + Value: 0, + Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, + }, + // --- config entries --- + "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-intentions": { + Name: "consul.usage.test.consul.state.config_entries", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "service-intentions"}, + }, + }, + "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-resolver": { + Name: "consul.usage.test.consul.state.config_entries", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "service-resolver"}, + }, + }, + "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-router": { + Name: "consul.usage.test.consul.state.config_entries", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "service-router"}, + }, + }, + "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-defaults": { + Name: "consul.usage.test.consul.state.config_entries", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "service-defaults"}, + }, + }, + "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=ingress-gateway": { + Name: "consul.usage.test.consul.state.config_entries", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "ingress-gateway"}, + }, + }, + "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-splitter": { + Name: "consul.usage.test.consul.state.config_entries", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "service-splitter"}, + }, + }, + "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=mesh": { + Name: "consul.usage.test.consul.state.config_entries", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "mesh"}, + }, + }, + "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=proxy-defaults": { + Name: "consul.usage.test.consul.state.config_entries", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "proxy-defaults"}, + }, + }, + "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=terminating-gateway": { + Name: "consul.usage.test.consul.state.config_entries", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "terminating-gateway"}, + }, + }, + "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=exported-services": { + Name: "consul.usage.test.consul.state.config_entries", + Value: 0, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "exported-services"}, + }, + }, + }, + }, +} + func TestUsageReporter_emitNodeUsage_OSS(t *testing.T) { - type testCase struct { - modfiyStateStore func(t *testing.T, s *state.Store) - getMembersFunc getMembersFunc - expectedGauges map[string]metrics.GaugeValue - } - cases := map[string]testCase{ - "empty-state": { - expectedGauges: map[string]metrics.GaugeValue{ - // --- node --- - "consul.usage.test.consul.state.nodes;datacenter=dc1": { - Name: "consul.usage.test.consul.state.nodes", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- peering --- - "consul.usage.test.consul.state.peerings;datacenter=dc1": { - Name: "consul.usage.test.consul.state.peerings", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- member --- - "consul.usage.test.consul.members.clients;datacenter=dc1": { - Name: "consul.usage.test.consul.members.clients", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - "consul.usage.test.consul.members.servers;datacenter=dc1": { - Name: "consul.usage.test.consul.members.servers", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- service --- - "consul.usage.test.consul.state.services;datacenter=dc1": { - Name: "consul.usage.test.consul.state.services", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - "consul.usage.test.consul.state.service_instances;datacenter=dc1": { - Name: "consul.usage.test.consul.state.service_instances", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- service mesh --- - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=connect-proxy": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "connect-proxy"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=terminating-gateway": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "terminating-gateway"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=ingress-gateway": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "ingress-gateway"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=mesh-gateway": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "mesh-gateway"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=connect-native": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "connect-native"}, - }, - }, - // --- kv --- - "consul.usage.test.consul.state.kv_entries;datacenter=dc1": { - Name: "consul.usage.test.consul.state.kv_entries", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- config entries --- - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-intentions": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-intentions"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-resolver": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-resolver"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-router": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-router"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-defaults": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-defaults"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=ingress-gateway": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "ingress-gateway"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-splitter": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-splitter"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=mesh": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "mesh"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=proxy-defaults": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "proxy-defaults"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=terminating-gateway": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "terminating-gateway"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=exported-services": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "exported-services"}, - }, - }, - }, - getMembersFunc: func() []serf.Member { return []serf.Member{} }, - }, - "nodes": { - modfiyStateStore: func(t *testing.T, s *state.Store) { - require.NoError(t, s.EnsureNode(1, &structs.Node{Node: "foo", Address: "127.0.0.1"})) - require.NoError(t, s.EnsureNode(2, &structs.Node{Node: "bar", Address: "127.0.0.2"})) - require.NoError(t, s.EnsureNode(3, &structs.Node{Node: "baz", Address: "127.0.0.2"})) - }, - getMembersFunc: func() []serf.Member { - return []serf.Member{ - { - Name: "foo", - Tags: map[string]string{"role": "consul"}, - Status: serf.StatusAlive, - }, - { - Name: "bar", - Tags: map[string]string{"role": "consul"}, - Status: serf.StatusAlive, - }, - { - Name: "baz", - Tags: map[string]string{"role": "node"}, - Status: serf.StatusAlive, - }, - } - }, - expectedGauges: map[string]metrics.GaugeValue{ - // --- node --- - "consul.usage.test.consul.state.nodes;datacenter=dc1": { - Name: "consul.usage.test.consul.state.nodes", - Value: 3, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- peering --- - "consul.usage.test.consul.state.peerings;datacenter=dc1": { - Name: "consul.usage.test.consul.state.peerings", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- member --- - "consul.usage.test.consul.members.servers;datacenter=dc1": { - Name: "consul.usage.test.consul.members.servers", - Value: 2, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - "consul.usage.test.consul.members.clients;datacenter=dc1": { - Name: "consul.usage.test.consul.members.clients", - Value: 1, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- service --- - "consul.usage.test.consul.state.services;datacenter=dc1": { - Name: "consul.usage.test.consul.state.services", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - "consul.usage.test.consul.state.service_instances;datacenter=dc1": { - Name: "consul.usage.test.consul.state.service_instances", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- service mesh --- - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=connect-proxy": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "connect-proxy"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=terminating-gateway": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "terminating-gateway"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=ingress-gateway": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "ingress-gateway"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=mesh-gateway": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "mesh-gateway"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=connect-native": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "connect-native"}, - }, - }, - // --- kv --- - "consul.usage.test.consul.state.kv_entries;datacenter=dc1": { - Name: "consul.usage.test.consul.state.kv_entries", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- config entries --- - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-intentions": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-intentions"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-resolver": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-resolver"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-router": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-router"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-defaults": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-defaults"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=ingress-gateway": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "ingress-gateway"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-splitter": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-splitter"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=mesh": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "mesh"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=proxy-defaults": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "proxy-defaults"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=terminating-gateway": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "terminating-gateway"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=exported-services": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "exported-services"}, - }, - }, - }, - }, - } + cases := baseCases for name, tcase := range cases { t.Run(name, func(t *testing.T) { @@ -426,371 +424,57 @@ func TestUsageReporter_emitNodeUsage_OSS(t *testing.T) { } func TestUsageReporter_emitPeeringUsage_OSS(t *testing.T) { - type testCase struct { - modfiyStateStore func(t *testing.T, s *state.Store) - getMembersFunc getMembersFunc - expectedGauges map[string]metrics.GaugeValue + cases := make(map[string]testCase) + for k, v := range baseCases { + eg := make(map[string]metrics.GaugeValue) + for k, v := range v.expectedGauges { + eg[k] = v + } + cases[k] = testCase{v.modfiyStateStore, v.getMembersFunc, eg} } - cases := map[string]testCase{ - "empty-state": { - expectedGauges: map[string]metrics.GaugeValue{ - // --- node --- - "consul.usage.test.consul.state.nodes;datacenter=dc1": { - Name: "consul.usage.test.consul.state.nodes", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- peering --- - "consul.usage.test.consul.state.peerings;datacenter=dc1": { - Name: "consul.usage.test.consul.state.peerings", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- member --- - "consul.usage.test.consul.members.clients;datacenter=dc1": { - Name: "consul.usage.test.consul.members.clients", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - "consul.usage.test.consul.members.servers;datacenter=dc1": { - Name: "consul.usage.test.consul.members.servers", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- service --- - "consul.usage.test.consul.state.services;datacenter=dc1": { - Name: "consul.usage.test.consul.state.services", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - "consul.usage.test.consul.state.service_instances;datacenter=dc1": { - Name: "consul.usage.test.consul.state.service_instances", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- service mesh --- - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=connect-proxy": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "connect-proxy"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=terminating-gateway": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "terminating-gateway"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=ingress-gateway": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "ingress-gateway"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=mesh-gateway": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "mesh-gateway"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=connect-native": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "connect-native"}, - }, - }, - // --- kv --- - "consul.usage.test.consul.state.kv_entries;datacenter=dc1": { - Name: "consul.usage.test.consul.state.kv_entries", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- config entries --- - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-intentions": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-intentions"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-resolver": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-resolver"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-router": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-router"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-defaults": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-defaults"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=ingress-gateway": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "ingress-gateway"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-splitter": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-splitter"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=mesh": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "mesh"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=proxy-defaults": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "proxy-defaults"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=terminating-gateway": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "terminating-gateway"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=exported-services": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "exported-services"}, - }, - }, - }, - getMembersFunc: func() []serf.Member { return []serf.Member{} }, - }, - "peerings": { - modfiyStateStore: func(t *testing.T, s *state.Store) { - id, err := uuid.GenerateUUID() - require.NoError(t, err) - require.NoError(t, s.PeeringWrite(1, &pbpeering.PeeringWriteRequest{Peering: &pbpeering.Peering{Name: "foo", ID: id}})) - id, err = uuid.GenerateUUID() - require.NoError(t, err) - require.NoError(t, s.PeeringWrite(2, &pbpeering.PeeringWriteRequest{Peering: &pbpeering.Peering{Name: "bar", ID: id}})) - id, err = uuid.GenerateUUID() - require.NoError(t, err) - require.NoError(t, s.PeeringWrite(3, &pbpeering.PeeringWriteRequest{Peering: &pbpeering.Peering{Name: "baz", ID: id}})) - }, - getMembersFunc: func() []serf.Member { - return []serf.Member{ - { - Name: "foo", - Tags: map[string]string{"role": "consul"}, - Status: serf.StatusAlive, - }, - { - Name: "bar", - Tags: map[string]string{"role": "consul"}, - Status: serf.StatusAlive, - }, - } - }, - expectedGauges: map[string]metrics.GaugeValue{ - // --- node --- - "consul.usage.test.consul.state.nodes;datacenter=dc1": { - Name: "consul.usage.test.consul.state.nodes", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- peering --- - "consul.usage.test.consul.state.peerings;datacenter=dc1": { - Name: "consul.usage.test.consul.state.peerings", - Value: 3, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- member --- - "consul.usage.test.consul.members.servers;datacenter=dc1": { - Name: "consul.usage.test.consul.members.servers", - Value: 2, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - "consul.usage.test.consul.members.clients;datacenter=dc1": { - Name: "consul.usage.test.consul.members.clients", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- service --- - "consul.usage.test.consul.state.services;datacenter=dc1": { - Name: "consul.usage.test.consul.state.services", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - "consul.usage.test.consul.state.service_instances;datacenter=dc1": { - Name: "consul.usage.test.consul.state.service_instances", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- service mesh --- - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=connect-proxy": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "connect-proxy"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=terminating-gateway": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "terminating-gateway"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=ingress-gateway": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "ingress-gateway"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=mesh-gateway": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "mesh-gateway"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=connect-native": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "connect-native"}, - }, - }, - // --- kv --- - "consul.usage.test.consul.state.kv_entries;datacenter=dc1": { - Name: "consul.usage.test.consul.state.kv_entries", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- config entries --- - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-intentions": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-intentions"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-resolver": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-resolver"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-router": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-router"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-defaults": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-defaults"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=ingress-gateway": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "ingress-gateway"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-splitter": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-splitter"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=mesh": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "mesh"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=proxy-defaults": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "proxy-defaults"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=terminating-gateway": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "terminating-gateway"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=exported-services": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "exported-services"}, - }, - }, - }, - }, + peeringsCase := cases["nodes"] + peeringsCase.modfiyStateStore = func(t *testing.T, s *state.Store) { + id, err := uuid.GenerateUUID() + require.NoError(t, err) + require.NoError(t, s.PeeringWrite(1, &pbpeering.PeeringWriteRequest{Peering: &pbpeering.Peering{Name: "foo", ID: id}})) + id, err = uuid.GenerateUUID() + require.NoError(t, err) + require.NoError(t, s.PeeringWrite(2, &pbpeering.PeeringWriteRequest{Peering: &pbpeering.Peering{Name: "bar", ID: id}})) + id, err = uuid.GenerateUUID() + require.NoError(t, err) + require.NoError(t, s.PeeringWrite(3, &pbpeering.PeeringWriteRequest{Peering: &pbpeering.Peering{Name: "baz", ID: id}})) } + peeringsCase.getMembersFunc = func() []serf.Member { + return []serf.Member{ + { + Name: "foo", + Tags: map[string]string{"role": "consul"}, + Status: serf.StatusAlive, + }, + { + Name: "bar", + Tags: map[string]string{"role": "consul"}, + Status: serf.StatusAlive, + }, + } + } + peeringsCase.expectedGauges["consul.usage.test.consul.state.nodes;datacenter=dc1"] = metrics.GaugeValue{ + Name: "consul.usage.test.consul.state.nodes", + Value: 0, + Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, + } + peeringsCase.expectedGauges["consul.usage.test.consul.state.peerings;datacenter=dc1"] = metrics.GaugeValue{ + Name: "consul.usage.test.consul.state.peerings", + Value: 3, + Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, + } + peeringsCase.expectedGauges["consul.usage.test.consul.members.clients;datacenter=dc1"] = metrics.GaugeValue{ + Name: "consul.usage.test.consul.members.clients", + Value: 0, + Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, + } + cases["peerings"] = peeringsCase + delete(cases, "nodes") for name, tcase := range cases { t.Run(name, func(t *testing.T) { @@ -829,420 +513,134 @@ func TestUsageReporter_emitPeeringUsage_OSS(t *testing.T) { } func TestUsageReporter_emitServiceUsage_OSS(t *testing.T) { - type testCase struct { - modfiyStateStore func(t *testing.T, s *state.Store) - getMembersFunc getMembersFunc - expectedGauges map[string]metrics.GaugeValue + cases := make(map[string]testCase) + for k, v := range baseCases { + eg := make(map[string]metrics.GaugeValue) + for k, v := range v.expectedGauges { + eg[k] = v + } + cases[k] = testCase{v.modfiyStateStore, v.getMembersFunc, eg} } - cases := map[string]testCase{ - "empty-state": { - expectedGauges: map[string]metrics.GaugeValue{ - // --- node --- - "consul.usage.test.consul.state.nodes;datacenter=dc1": { - Name: "consul.usage.test.consul.state.nodes", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- peering --- - "consul.usage.test.consul.state.peerings;datacenter=dc1": { - Name: "consul.usage.test.consul.state.peerings", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- member --- - "consul.usage.test.consul.members.servers;datacenter=dc1": { - Name: "consul.usage.test.consul.members.servers", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - }, - }, - "consul.usage.test.consul.members.clients;datacenter=dc1": { - Name: "consul.usage.test.consul.members.clients", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - }, - }, - // --- service --- - "consul.usage.test.consul.state.services;datacenter=dc1": { - Name: "consul.usage.test.consul.state.services", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - }, - }, - "consul.usage.test.consul.state.service_instances;datacenter=dc1": { - Name: "consul.usage.test.consul.state.service_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - }, - }, - // --- service mesh --- - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=connect-proxy": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "connect-proxy"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=terminating-gateway": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "terminating-gateway"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=ingress-gateway": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "ingress-gateway"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=mesh-gateway": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "mesh-gateway"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=connect-native": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "connect-native"}, - }, - }, - // --- kv --- - "consul.usage.test.consul.state.kv_entries;datacenter=dc1": { - Name: "consul.usage.test.consul.state.kv_entries", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- config entries --- - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-intentions": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-intentions"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-resolver": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-resolver"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-router": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-router"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-defaults": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-defaults"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=ingress-gateway": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "ingress-gateway"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-splitter": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-splitter"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=mesh": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "mesh"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=proxy-defaults": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "proxy-defaults"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=terminating-gateway": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "terminating-gateway"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=exported-services": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "exported-services"}, - }, - }, - }, - getMembersFunc: func() []serf.Member { return []serf.Member{} }, - }, - "nodes-and-services": { - modfiyStateStore: func(t *testing.T, s *state.Store) { - require.NoError(t, s.EnsureNode(1, &structs.Node{Node: "foo", Address: "127.0.0.1"})) - require.NoError(t, s.EnsureNode(2, &structs.Node{Node: "bar", Address: "127.0.0.2"})) - require.NoError(t, s.EnsureNode(3, &structs.Node{Node: "baz", Address: "127.0.0.2"})) - require.NoError(t, s.EnsureNode(4, &structs.Node{Node: "qux", Address: "127.0.0.3"})) - mgw := structs.TestNodeServiceMeshGateway(t) - mgw.ID = "mesh-gateway" + nodesAndSvcsCase := cases["nodes"] + nodesAndSvcsCase.modfiyStateStore = func(t *testing.T, s *state.Store) { + require.NoError(t, s.EnsureNode(1, &structs.Node{Node: "foo", Address: "127.0.0.1"})) + require.NoError(t, s.EnsureNode(2, &structs.Node{Node: "bar", Address: "127.0.0.2"})) + require.NoError(t, s.EnsureNode(3, &structs.Node{Node: "baz", Address: "127.0.0.2"})) + require.NoError(t, s.EnsureNode(4, &structs.Node{Node: "qux", Address: "127.0.0.3"})) - tgw := structs.TestNodeServiceTerminatingGateway(t, "1.1.1.1") - tgw.ID = "terminating-gateway" - // Typical services and some consul services spread across two nodes - require.NoError(t, s.EnsureService(5, "foo", &structs.NodeService{ID: "db", Service: "db", Tags: nil, Address: "", Port: 5000})) - require.NoError(t, s.EnsureService(6, "bar", &structs.NodeService{ID: "api", Service: "api", Tags: nil, Address: "", Port: 5000})) - require.NoError(t, s.EnsureService(7, "foo", &structs.NodeService{ID: "consul", Service: "consul", Tags: nil})) - require.NoError(t, s.EnsureService(8, "bar", &structs.NodeService{ID: "consul", Service: "consul", Tags: nil})) - require.NoError(t, s.EnsureService(9, "foo", &structs.NodeService{ID: "db-connect-proxy", Service: "db-connect-proxy", Tags: nil, Address: "", Port: 5000, Kind: structs.ServiceKindConnectProxy})) - require.NoError(t, s.EnsureRegistration(10, structs.TestRegisterIngressGateway(t))) - require.NoError(t, s.EnsureService(11, "foo", mgw)) - require.NoError(t, s.EnsureService(12, "foo", tgw)) - require.NoError(t, s.EnsureService(13, "bar", &structs.NodeService{ID: "db-native", Service: "db", Tags: nil, Address: "", Port: 5000, Connect: structs.ServiceConnect{Native: true}})) - require.NoError(t, s.EnsureConfigEntry(14, &structs.IngressGatewayConfigEntry{ - Kind: structs.IngressGateway, - Name: "foo", - })) - require.NoError(t, s.EnsureConfigEntry(15, &structs.IngressGatewayConfigEntry{ - Kind: structs.IngressGateway, - Name: "bar", - })) - require.NoError(t, s.EnsureConfigEntry(16, &structs.IngressGatewayConfigEntry{ - Kind: structs.IngressGateway, - Name: "baz", - })) - }, - getMembersFunc: func() []serf.Member { - return []serf.Member{ - { - Name: "foo", - Tags: map[string]string{"role": "consul"}, - Status: serf.StatusAlive, - }, - { - Name: "bar", - Tags: map[string]string{"role": "consul"}, - Status: serf.StatusAlive, - }, - { - Name: "baz", - Tags: map[string]string{"role": "node", "segment": "a"}, - Status: serf.StatusAlive, - }, - { - Name: "qux", - Tags: map[string]string{"role": "node", "segment": "b"}, - Status: serf.StatusAlive, - }, - } - }, - expectedGauges: map[string]metrics.GaugeValue{ - // --- node --- - "consul.usage.test.consul.state.nodes;datacenter=dc1": { - Name: "consul.usage.test.consul.state.nodes", - Value: 4, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- peering --- - "consul.usage.test.consul.state.peerings;datacenter=dc1": { - Name: "consul.usage.test.consul.state.peerings", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- member --- - "consul.usage.test.consul.members.servers;datacenter=dc1": { - Name: "consul.usage.test.consul.members.servers", - Value: 2, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - }, - }, - "consul.usage.test.consul.members.clients;datacenter=dc1": { - Name: "consul.usage.test.consul.members.clients", - Value: 2, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - }, - }, - // --- service --- - "consul.usage.test.consul.state.services;datacenter=dc1": { - Name: "consul.usage.test.consul.state.services", - Value: 7, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - }, - }, - "consul.usage.test.consul.state.service_instances;datacenter=dc1": { - Name: "consul.usage.test.consul.state.service_instances", - Value: 9, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - }, - }, - // --- service mesh --- - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=connect-proxy": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 1, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "connect-proxy"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=terminating-gateway": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 1, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "terminating-gateway"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=ingress-gateway": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 1, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "ingress-gateway"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=mesh-gateway": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 1, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "mesh-gateway"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=connect-native": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 1, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "connect-native"}, - }, - }, - // --- kv --- - "consul.usage.test.consul.state.kv_entries;datacenter=dc1": { - Name: "consul.usage.test.consul.state.kv_entries", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- config entries --- - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-intentions": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-intentions"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-resolver": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-resolver"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-router": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-router"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-defaults": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-defaults"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=ingress-gateway": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 3, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "ingress-gateway"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-splitter": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-splitter"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=mesh": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "mesh"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=proxy-defaults": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "proxy-defaults"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=terminating-gateway": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "terminating-gateway"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=exported-services": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "exported-services"}, - }, - }, - }, + mgw := structs.TestNodeServiceMeshGateway(t) + mgw.ID = "mesh-gateway" + + tgw := structs.TestNodeServiceTerminatingGateway(t, "1.1.1.1") + tgw.ID = "terminating-gateway" + // Typical services and some consul services spread across two nodes + require.NoError(t, s.EnsureService(5, "foo", &structs.NodeService{ID: "db", Service: "db", Tags: nil, Address: "", Port: 5000})) + require.NoError(t, s.EnsureService(6, "bar", &structs.NodeService{ID: "api", Service: "api", Tags: nil, Address: "", Port: 5000})) + require.NoError(t, s.EnsureService(7, "foo", &structs.NodeService{ID: "consul", Service: "consul", Tags: nil})) + require.NoError(t, s.EnsureService(8, "bar", &structs.NodeService{ID: "consul", Service: "consul", Tags: nil})) + require.NoError(t, s.EnsureService(9, "foo", &structs.NodeService{ID: "db-connect-proxy", Service: "db-connect-proxy", Tags: nil, Address: "", Port: 5000, Kind: structs.ServiceKindConnectProxy})) + require.NoError(t, s.EnsureRegistration(10, structs.TestRegisterIngressGateway(t))) + require.NoError(t, s.EnsureService(11, "foo", mgw)) + require.NoError(t, s.EnsureService(12, "foo", tgw)) + require.NoError(t, s.EnsureService(13, "bar", &structs.NodeService{ID: "db-native", Service: "db", Tags: nil, Address: "", Port: 5000, Connect: structs.ServiceConnect{Native: true}})) + require.NoError(t, s.EnsureConfigEntry(14, &structs.IngressGatewayConfigEntry{ + Kind: structs.IngressGateway, + Name: "foo", + })) + require.NoError(t, s.EnsureConfigEntry(15, &structs.IngressGatewayConfigEntry{ + Kind: structs.IngressGateway, + Name: "bar", + })) + require.NoError(t, s.EnsureConfigEntry(16, &structs.IngressGatewayConfigEntry{ + Kind: structs.IngressGateway, + Name: "baz", + })) + } + baseCaseMembers := nodesAndSvcsCase.getMembersFunc() + nodesAndSvcsCase.getMembersFunc = func() []serf.Member { + baseCaseMembers = append(baseCaseMembers, serf.Member{ + Name: "baz", + Tags: map[string]string{"role": "node", "segment": "a"}, + Status: serf.StatusAlive, + }) + baseCaseMembers = append(baseCaseMembers, serf.Member{ + Name: "qux", + Tags: map[string]string{"role": "node", "segment": "b"}, + Status: serf.StatusAlive, + }) + return baseCaseMembers + } + nodesAndSvcsCase.expectedGauges["consul.usage.test.consul.state.nodes;datacenter=dc1"] = metrics.GaugeValue{ + Name: "consul.usage.test.consul.state.nodes", + Value: 4, + Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, + } + nodesAndSvcsCase.expectedGauges["consul.usage.test.consul.members.clients;datacenter=dc1"] = metrics.GaugeValue{ + Name: "consul.usage.test.consul.members.clients", + Value: 2, + Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, + } + nodesAndSvcsCase.expectedGauges["consul.usage.test.consul.state.services;datacenter=dc1"] = metrics.GaugeValue{ + Name: "consul.usage.test.consul.state.services", + Value: 7, + Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, + } + nodesAndSvcsCase.expectedGauges["consul.usage.test.consul.state.service_instances;datacenter=dc1"] = metrics.GaugeValue{ + Name: "consul.usage.test.consul.state.service_instances", + Value: 9, + Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, + } + nodesAndSvcsCase.expectedGauges["consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=connect-proxy"] = metrics.GaugeValue{ + Name: "consul.usage.test.consul.state.connect_instances", + Value: 1, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "connect-proxy"}, }, } + nodesAndSvcsCase.expectedGauges["consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=terminating-gateway"] = metrics.GaugeValue{ + Name: "consul.usage.test.consul.state.connect_instances", + Value: 1, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "terminating-gateway"}, + }, + } + nodesAndSvcsCase.expectedGauges["consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=ingress-gateway"] = metrics.GaugeValue{ + Name: "consul.usage.test.consul.state.connect_instances", + Value: 1, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "ingress-gateway"}, + }, + } + nodesAndSvcsCase.expectedGauges["consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=mesh-gateway"] = metrics.GaugeValue{ + Name: "consul.usage.test.consul.state.connect_instances", + Value: 1, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "mesh-gateway"}, + }, + } + nodesAndSvcsCase.expectedGauges["consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=connect-native"] = metrics.GaugeValue{ + Name: "consul.usage.test.consul.state.connect_instances", + Value: 1, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "connect-native"}, + }, + } + nodesAndSvcsCase.expectedGauges["consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=ingress-gateway"] = metrics.GaugeValue{ + Name: "consul.usage.test.consul.state.config_entries", + Value: 3, + Labels: []metrics.Label{ + {Name: "datacenter", Value: "dc1"}, + {Name: "kind", Value: "ingress-gateway"}, + }, + } + cases["nodes-and-services"] = nodesAndSvcsCase + delete(cases, "nodes") for name, tcase := range cases { t.Run(name, func(t *testing.T) { @@ -1280,379 +678,34 @@ func TestUsageReporter_emitServiceUsage_OSS(t *testing.T) { } func TestUsageReporter_emitKVUsage_OSS(t *testing.T) { - type testCase struct { - modfiyStateStore func(t *testing.T, s *state.Store) - getMembersFunc getMembersFunc - expectedGauges map[string]metrics.GaugeValue + cases := make(map[string]testCase) + for k, v := range baseCases { + eg := make(map[string]metrics.GaugeValue) + for k, v := range v.expectedGauges { + eg[k] = v + } + cases[k] = testCase{v.modfiyStateStore, v.getMembersFunc, eg} } - cases := map[string]testCase{ - "empty-state": { - expectedGauges: map[string]metrics.GaugeValue{ - // --- node --- - "consul.usage.test.consul.state.nodes;datacenter=dc1": { - Name: "consul.usage.test.consul.state.nodes", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- peering --- - "consul.usage.test.consul.state.peerings;datacenter=dc1": { - Name: "consul.usage.test.consul.state.peerings", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- member --- - "consul.usage.test.consul.members.clients;datacenter=dc1": { - Name: "consul.usage.test.consul.members.clients", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - "consul.usage.test.consul.members.servers;datacenter=dc1": { - Name: "consul.usage.test.consul.members.servers", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- service --- - "consul.usage.test.consul.state.services;datacenter=dc1": { - Name: "consul.usage.test.consul.state.services", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - "consul.usage.test.consul.state.service_instances;datacenter=dc1": { - Name: "consul.usage.test.consul.state.service_instances", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- service mesh --- - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=connect-proxy": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "connect-proxy"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=terminating-gateway": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "terminating-gateway"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=ingress-gateway": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "ingress-gateway"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=mesh-gateway": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "mesh-gateway"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=connect-native": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "connect-native"}, - }, - }, - // --- kv --- - "consul.usage.test.consul.state.kv_entries;datacenter=dc1": { - Name: "consul.usage.test.consul.state.kv_entries", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- config entries --- - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-intentions": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-intentions"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-resolver": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-resolver"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-router": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-router"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-defaults": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-defaults"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=ingress-gateway": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "ingress-gateway"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-splitter": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-splitter"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=mesh": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "mesh"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=proxy-defaults": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "proxy-defaults"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=terminating-gateway": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "terminating-gateway"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=exported-services": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "exported-services"}, - }, - }, - }, - getMembersFunc: func() []serf.Member { return []serf.Member{} }, - }, - "nodes": { - modfiyStateStore: func(t *testing.T, s *state.Store) { - require.NoError(t, s.EnsureNode(1, &structs.Node{Node: "foo", Address: "127.0.0.1"})) - require.NoError(t, s.EnsureNode(2, &structs.Node{Node: "bar", Address: "127.0.0.2"})) - require.NoError(t, s.EnsureNode(3, &structs.Node{Node: "baz", Address: "127.0.0.2"})) - require.NoError(t, s.KVSSet(4, &structs.DirEntry{Key: "a", Value: []byte{1}})) - require.NoError(t, s.KVSSet(5, &structs.DirEntry{Key: "b", Value: []byte{1}})) - require.NoError(t, s.KVSSet(6, &structs.DirEntry{Key: "c", Value: []byte{1}})) - require.NoError(t, s.KVSSet(7, &structs.DirEntry{Key: "d", Value: []byte{1}})) - require.NoError(t, s.KVSDelete(8, "d", &acl.EnterpriseMeta{})) - require.NoError(t, s.KVSDelete(9, "c", &acl.EnterpriseMeta{})) - require.NoError(t, s.KVSSet(10, &structs.DirEntry{Key: "e", Value: []byte{1}})) - require.NoError(t, s.KVSSet(11, &structs.DirEntry{Key: "f", Value: []byte{1}})) - }, - getMembersFunc: func() []serf.Member { - return []serf.Member{ - { - Name: "foo", - Tags: map[string]string{"role": "consul"}, - Status: serf.StatusAlive, - }, - { - Name: "bar", - Tags: map[string]string{"role": "consul"}, - Status: serf.StatusAlive, - }, - { - Name: "baz", - Tags: map[string]string{"role": "node"}, - Status: serf.StatusAlive, - }, - } - }, - expectedGauges: map[string]metrics.GaugeValue{ - // --- node --- - "consul.usage.test.consul.state.nodes;datacenter=dc1": { - Name: "consul.usage.test.consul.state.nodes", - Value: 3, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- peering --- - "consul.usage.test.consul.state.peerings;datacenter=dc1": { - Name: "consul.usage.test.consul.state.peerings", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- member --- - "consul.usage.test.consul.members.servers;datacenter=dc1": { - Name: "consul.usage.test.consul.members.servers", - Value: 2, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - "consul.usage.test.consul.members.clients;datacenter=dc1": { - Name: "consul.usage.test.consul.members.clients", - Value: 1, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- service --- - "consul.usage.test.consul.state.services;datacenter=dc1": { - Name: "consul.usage.test.consul.state.services", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - "consul.usage.test.consul.state.service_instances;datacenter=dc1": { - Name: "consul.usage.test.consul.state.service_instances", - Value: 0, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- service mesh --- - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=connect-proxy": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "connect-proxy"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=terminating-gateway": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "terminating-gateway"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=ingress-gateway": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "ingress-gateway"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=mesh-gateway": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "mesh-gateway"}, - }, - }, - "consul.usage.test.consul.state.connect_instances;datacenter=dc1;kind=connect-native": { - Name: "consul.usage.test.consul.state.connect_instances", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "connect-native"}, - }, - }, - // --- kv --- - "consul.usage.test.consul.state.kv_entries;datacenter=dc1": { - Name: "consul.usage.test.consul.state.kv_entries", - Value: 4, - Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, - }, - // --- config entries --- - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-intentions": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-intentions"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-resolver": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-resolver"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-router": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-router"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-defaults": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-defaults"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=ingress-gateway": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "ingress-gateway"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=service-splitter": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "service-splitter"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=mesh": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "mesh"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=proxy-defaults": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "proxy-defaults"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=terminating-gateway": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "terminating-gateway"}, - }, - }, - "consul.usage.test.consul.state.config_entries;datacenter=dc1;kind=exported-services": { - Name: "consul.usage.test.consul.state.config_entries", - Value: 0, - Labels: []metrics.Label{ - {Name: "datacenter", Value: "dc1"}, - {Name: "kind", Value: "exported-services"}, - }, - }, - }, - }, + nodesCase := cases["nodes"] + mss := nodesCase.modfiyStateStore + nodesCase.modfiyStateStore = func(t *testing.T, s *state.Store) { + mss(t, s) + require.NoError(t, s.KVSSet(4, &structs.DirEntry{Key: "a", Value: []byte{1}})) + require.NoError(t, s.KVSSet(5, &structs.DirEntry{Key: "b", Value: []byte{1}})) + require.NoError(t, s.KVSSet(6, &structs.DirEntry{Key: "c", Value: []byte{1}})) + require.NoError(t, s.KVSSet(7, &structs.DirEntry{Key: "d", Value: []byte{1}})) + require.NoError(t, s.KVSDelete(8, "d", &acl.EnterpriseMeta{})) + require.NoError(t, s.KVSDelete(9, "c", &acl.EnterpriseMeta{})) + require.NoError(t, s.KVSSet(10, &structs.DirEntry{Key: "e", Value: []byte{1}})) + require.NoError(t, s.KVSSet(11, &structs.DirEntry{Key: "f", Value: []byte{1}})) } + nodesCase.expectedGauges["consul.usage.test.consul.state.kv_entries;datacenter=dc1"] = metrics.GaugeValue{ + Name: "consul.usage.test.consul.state.kv_entries", + Value: 4, + Labels: []metrics.Label{{Name: "datacenter", Value: "dc1"}}, + } + cases["nodes"] = nodesCase for name, tcase := range cases { t.Run(name, func(t *testing.T) { From 8f27a077cbd5b27edbeae962bcdf83ac30776059 Mon Sep 17 00:00:00 2001 From: Derek Menteer Date: Wed, 24 Aug 2022 12:39:15 -0500 Subject: [PATCH 19/93] Add 14269 changelog entry. --- .changelog/14269.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/14269.txt diff --git a/.changelog/14269.txt b/.changelog/14269.txt new file mode 100644 index 0000000000..29eec6d5da --- /dev/null +++ b/.changelog/14269.txt @@ -0,0 +1,3 @@ +```release-note:bugfix +connect: Fix issue where `auto_config` and `auto_encrypt` could unintentionally enable TLS for gRPC xDS connections. +``` \ No newline at end of file From 41aea6521496d8a86a7cef842660997348e345c0 Mon Sep 17 00:00:00 2001 From: cskh Date: Wed, 24 Aug 2022 14:13:10 -0400 Subject: [PATCH 20/93] =?UTF-8?q?Fix:=20the=20inboundconnection=20limit=20?= =?UTF-8?q?filter=20should=20be=20placed=20in=20front=20of=20http=20co?= =?UTF-8?q?=E2=80=A6=20(#14325)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix: the inboundconnection limit should be placed in front of http connection manager Co-authored-by: Freddy --- agent/xds/listeners.go | 44 ++++++++++++------- ...ener-max-inbound-connections.latest.golden | 15 ++++--- 2 files changed, 36 insertions(+), 23 deletions(-) diff --git a/agent/xds/listeners.go b/agent/xds/listeners.go index 95b84c94ce..33c339c4d8 100644 --- a/agent/xds/listeners.go +++ b/agent/xds/listeners.go @@ -1214,16 +1214,38 @@ func (s *ResourceGenerator) makeInboundListener(cfgSnap *proxycfg.ConfigSnapshot filterOpts.forwardClientPolicy = envoy_http_v3.HttpConnectionManager_APPEND_FORWARD } } + + // If an inbound connect limit is set, inject a connection limit filter on each chain. + if cfg.MaxInboundConnections > 0 { + connectionLimitFilter, err := makeConnectionLimitFilter(cfg.MaxInboundConnections) + if err != nil { + return nil, err + } + l.FilterChains = []*envoy_listener_v3.FilterChain{ + { + Filters: []*envoy_listener_v3.Filter{ + connectionLimitFilter, + }, + }, + } + } + filter, err := makeListenerFilter(filterOpts) if err != nil { return nil, err } - l.FilterChains = []*envoy_listener_v3.FilterChain{ - { - Filters: []*envoy_listener_v3.Filter{ - filter, + + if len(l.FilterChains) > 0 { + // The list of FilterChains has already been initialized + l.FilterChains[0].Filters = append(l.FilterChains[0].Filters, filter) + } else { + l.FilterChains = []*envoy_listener_v3.FilterChain{ + { + Filters: []*envoy_listener_v3.Filter{ + filter, + }, }, - }, + } } err = s.finalizePublicListenerFromConfig(l, cfgSnap, cfg, useHTTPFilter) @@ -1249,17 +1271,6 @@ func (s *ResourceGenerator) finalizePublicListenerFromConfig(l *envoy_listener_v return nil } - // If an inbound connect limit is set, inject a connection limit filter on each chain. - if proxyCfg.MaxInboundConnections > 0 { - filter, err := makeConnectionLimitFilter(proxyCfg.MaxInboundConnections) - if err != nil { - return nil - } - for idx := range l.FilterChains { - l.FilterChains[idx].Filters = append(l.FilterChains[idx].Filters, filter) - } - } - return nil } @@ -1990,6 +2001,7 @@ func makeTCPProxyFilter(filterName, cluster, statPrefix string) (*envoy_listener func makeConnectionLimitFilter(limit int) (*envoy_listener_v3.Filter, error) { cfg := &envoy_connection_limit_v3.ConnectionLimit{ + StatPrefix: "inbound_connection_limit", MaxConnections: wrapperspb.UInt64(uint64(limit)), } return makeFilter("envoy.filters.network.connection_limit", cfg) diff --git a/agent/xds/testdata/listeners/listener-max-inbound-connections.latest.golden b/agent/xds/testdata/listeners/listener-max-inbound-connections.latest.golden index be3b83433a..cbfda69f56 100644 --- a/agent/xds/testdata/listeners/listener-max-inbound-connections.latest.golden +++ b/agent/xds/testdata/listeners/listener-max-inbound-connections.latest.golden @@ -73,6 +73,14 @@ "statPrefix": "connect_authz" } }, + { + "name": "envoy.filters.network.connection_limit", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.connection_limit.v3.ConnectionLimit", + "statPrefix": "inbound_connection_limit", + "maxConnections": "222" + } + }, { "name": "envoy.filters.network.tcp_proxy", "typedConfig": { @@ -80,13 +88,6 @@ "statPrefix": "public_listener", "cluster": "local_app" } - }, - { - "name": "envoy.filters.network.connection_limit", - "typedConfig": { - "@type": "type.googleapis.com/envoy.extensions.filters.network.connection_limit.v3.ConnectionLimit", - "maxConnections": "222" - } } ], "transportSocket": { From 8e6b6a49a2d473a8cc52cab2cd55a21073ee14e2 Mon Sep 17 00:00:00 2001 From: Evan Culver Date: Wed, 24 Aug 2022 17:04:26 -0700 Subject: [PATCH 21/93] docs: Update Envoy support matrix to match the code (#14338) --- website/content/docs/connect/proxies/envoy.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/content/docs/connect/proxies/envoy.mdx b/website/content/docs/connect/proxies/envoy.mdx index 526d642bc8..7ada5b6fd0 100644 --- a/website/content/docs/connect/proxies/envoy.mdx +++ b/website/content/docs/connect/proxies/envoy.mdx @@ -37,8 +37,8 @@ Consul supports **four major Envoy releases** at the beginning of each major Con | Consul Version | Compatible Envoy Versions | | ------------------- | -----------------------------------------------------------------------------------| | 1.13.x | 1.23.0, 1.22.2, 1.21.4, 1.20.6 | -| 1.12.x | 1.22.2, 1.21.3, 1.20.4, 1.19.5 | -| 1.11.x | 1.20.2, 1.19.3, 1.18.6, 1.17.41 | +| 1.12.x | 1.22.2, 1.21.4, 1.20.6, 1.19.5 | +| 1.11.x | 1.20.6, 1.19.5, 1.18.6, 1.17.41 | 1. Envoy 1.20.1 and earlier are vulnerable to [CVE-2022-21654](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-21654) and [CVE-2022-21655](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-21655). Both CVEs were patched in Envoy versions 1.18.6, 1.19.3, and 1.20.2. Envoy 1.16.x and older releases are no longer supported (see [HCSEC-2022-07](https://discuss.hashicorp.com/t/hcsec-2022-07-consul-s-connect-service-mesh-affected-by-recent-envoy-security-releases/36332)). Consul 1.9.x clusters should be upgraded to 1.10.x and Envoy upgraded to the latest supported Envoy version for that release, 1.18.6. From 181063cd2399a8cf1243e489a9f345b0a91e7fa5 Mon Sep 17 00:00:00 2001 From: "Chris S. Kim" Date: Thu, 25 Aug 2022 11:25:59 -0400 Subject: [PATCH 22/93] Exit loop when context is cancelled --- agent/consul/leader_peering.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/agent/consul/leader_peering.go b/agent/consul/leader_peering.go index bc5b669cdd..d1823b026b 100644 --- a/agent/consul/leader_peering.go +++ b/agent/consul/leader_peering.go @@ -391,6 +391,12 @@ func (s *Server) runPeeringDeletions(ctx context.Context) error { // process. This includes deletion of the peerings themselves in addition to any peering data raftLimiter := rate.NewLimiter(defaultDeletionApplyRate, int(defaultDeletionApplyRate)) for { + select { + case <-ctx.Done(): + return nil + default: + } + ws := memdb.NewWatchSet() state := s.fsm.State() _, peerings, err := s.fsm.State().PeeringListDeleted(ws) From 4d40d02c73d8daeff833f451e9e1714e6d0a7cde Mon Sep 17 00:00:00 2001 From: Thomas Eckert Date: Thu, 25 Aug 2022 12:45:57 -0400 Subject: [PATCH 23/93] Remove warning about 1.9 --- website/content/docs/k8s/connect/terminating-gateways.mdx | 2 -- 1 file changed, 2 deletions(-) diff --git a/website/content/docs/k8s/connect/terminating-gateways.mdx b/website/content/docs/k8s/connect/terminating-gateways.mdx index e82bd773fb..9ff16b5c64 100644 --- a/website/content/docs/k8s/connect/terminating-gateways.mdx +++ b/website/content/docs/k8s/connect/terminating-gateways.mdx @@ -6,8 +6,6 @@ description: Configuring Terminating Gateways on Kubernetes # Terminating Gateways on Kubernetes --> 1.9.0+: This feature is available in Consul versions 1.9.0 and higher - ~> This topic requires familiarity with [Terminating Gateways](/docs/connect/gateways/terminating-gateway). Adding a terminating gateway is a multi-step process: From ac129339f8ec9e3081b915c9e4cf4a575f23dad2 Mon Sep 17 00:00:00 2001 From: Thomas Eckert Date: Thu, 25 Aug 2022 12:49:54 -0400 Subject: [PATCH 24/93] Instruct users to use the CLI --- website/content/docs/k8s/connect/terminating-gateways.mdx | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/website/content/docs/k8s/connect/terminating-gateways.mdx b/website/content/docs/k8s/connect/terminating-gateways.mdx index 9ff16b5c64..c5607d4380 100644 --- a/website/content/docs/k8s/connect/terminating-gateways.mdx +++ b/website/content/docs/k8s/connect/terminating-gateways.mdx @@ -36,9 +36,11 @@ terminatingGateways: ## Deploying the Helm chart -Ensure you have the latest consul-helm chart and install Consul via helm using the following -[guide](/docs/k8s/installation/install#installing-consul) while being sure to provide the yaml configuration -as previously discussed. +The Helm chart may be deployed using the [Consul on Kubernetes CLI](/docs/k8s/k8s-cli). + +```shell-session +consul-k8s install -f config.yaml +``` ## Accessing the Consul agent From 884dda25c26caced01ae4a00ca89f5a277acf26e Mon Sep 17 00:00:00 2001 From: Thomas Eckert Date: Thu, 25 Aug 2022 13:02:55 -0400 Subject: [PATCH 25/93] Use tabs for with and without TLS --- .../docs/k8s/connect/terminating-gateways.mdx | 32 +++++++++++-------- 1 file changed, 19 insertions(+), 13 deletions(-) diff --git a/website/content/docs/k8s/connect/terminating-gateways.mdx b/website/content/docs/k8s/connect/terminating-gateways.mdx index c5607d4380..9fb2b149c4 100644 --- a/website/content/docs/k8s/connect/terminating-gateways.mdx +++ b/website/content/docs/k8s/connect/terminating-gateways.mdx @@ -6,8 +6,6 @@ description: Configuring Terminating Gateways on Kubernetes # Terminating Gateways on Kubernetes -~> This topic requires familiarity with [Terminating Gateways](/docs/connect/gateways/terminating-gateway). - Adding a terminating gateway is a multi-step process: - Update the Helm chart with terminating gateway config options @@ -15,6 +13,12 @@ Adding a terminating gateway is a multi-step process: - Access the Consul agent - Register external services with Consul +## Requirements + +- [Consul]() +- [Consul on Kubernetes CLI]() +- Familiarity with [Terminating Gateways](/docs/connect/gateways/terminating-gateway) + ## Update the helm chart with terminating gateway config options Minimum required Helm options: @@ -39,36 +43,38 @@ terminatingGateways: The Helm chart may be deployed using the [Consul on Kubernetes CLI](/docs/k8s/k8s-cli). ```shell-session -consul-k8s install -f config.yaml +$ consul-k8s install -f config.yaml ``` ## Accessing the Consul agent -You can access the Consul server directly from your host via `kubectl port-forward`. This is helpful for interacting with your Consul UI locally as well as to validate connectivity of the application. +You can access the Consul server directly from your host via `kubectl port-forward`. This is helpful for interacting with your Consul UI locally as well as for validating the connectivity of the application. + + + ```shell-session $ kubectl port-forward consul-server-0 8500 & ``` +```shell-session +$ export CONSUL_HTTP_ADDR=http://localhost:8500 +``` + + + If TLS is enabled use port 8501: ```shell-session $ kubectl port-forward consul-server-0 8501 & ``` --> Be sure the latest consul binary is installed locally on your host. -[https://releases.hashicorp.com/consul/](https://releases.hashicorp.com/consul/) - -```shell-session -$ export CONSUL_HTTP_ADDR=http://localhost:8500 -``` - -If TLS is enabled set: - ```shell-session $ export CONSUL_HTTP_ADDR=https://localhost:8501 $ export CONSUL_HTTP_SSL_VERIFY=false ``` + + If ACLs are enabled also set: From 65dce3476f5e5f3d1a6e55b8e2448c8b525071e8 Mon Sep 17 00:00:00 2001 From: Thomas Eckert Date: Thu, 25 Aug 2022 13:27:43 -0400 Subject: [PATCH 26/93] Clean up copy for registration --- .../docs/k8s/connect/terminating-gateways.mdx | 21 ++++++++++++------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/website/content/docs/k8s/connect/terminating-gateways.mdx b/website/content/docs/k8s/connect/terminating-gateways.mdx index 9fb2b149c4..188da110f0 100644 --- a/website/content/docs/k8s/connect/terminating-gateways.mdx +++ b/website/content/docs/k8s/connect/terminating-gateways.mdx @@ -94,14 +94,15 @@ Registering the external services with Consul is a multi-step process: ### Register external services with Consul -There are two ways to register an external service with Consul: -1. If [`TransparentProxy`](/docs/connect/transparent-proxy) is enabled, the preferred method is to declare external endpoints in the [`destination`](/docs/connect/config-entries/service-defaults#terminating-gateway-destination) field of `ServiceDefaults`. -1. You can add the service as a node in the Consul catalog. +You may register an external service with Consul using `ServiceDefaults` if +[`TransparentProxy`](/docs/connect/transparent-proxy) is enabled. Otherwise, +you may register the service as a node in the Consul catalog. -#### Register an external service as a destination + + -The [`destination`](/docs/connect/config-entries/service-defaults#terminating-gateway-destination) field of the `ServiceDefaults` Custom Resource Definition (CRD) allows clients to dial the external service directly. It is valid only in [`TransparentProxy`](/docs/connect/transparent-proxy)) mode. -The following table describes traffic behaviors when using `destination`s to route traffic through a terminating gateway: +The [`destination`](/docs/connect/config-entries/service-defaults#terminating-gateway-destination) field of the `ServiceDefaults` Custom Resource Definition (CRD) allows clients to dial an external service directly. For this method to work, [`TransparentProxy`](/docs/connect/transparent-proxy) must be enabled. +The following table describes traffic behaviors when using the `destination` field to route traffic through a terminating gateway: | External Services Layer | Client dials | Client uses TLS | Allowed | Notes | |---|---|---|---|---| @@ -145,8 +146,8 @@ $ kubectl apply --filename serviceDefaults.yaml ``` All other terminating gateway operations can use the name of the `ServiceDefaults` in place of a typical Consul service name. - -#### Register an external service as a Catalog Node + + -> **Note:** Normal Consul services are registered with the Consul client on the node that they're running on. Since this is an external service, there is no Consul node @@ -197,6 +198,10 @@ If ACLs and TLS are enabled : $ curl --request PUT --header "X-Consul-Token: $CONSUL_HTTP_TOKEN" --data @external.json --insecure $CONSUL_HTTP_ADDR/v1/catalog/register true ``` + + + + ### Update terminating gateway ACL role if ACLs are enabled From a2a7b56292b0f64d3e94d3fbc72562420c4589b4 Mon Sep 17 00:00:00 2001 From: Thomas Eckert Date: Thu, 25 Aug 2022 13:37:52 -0400 Subject: [PATCH 27/93] Format traffic behaviors table --- .../docs/k8s/connect/terminating-gateways.mdx | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/website/content/docs/k8s/connect/terminating-gateways.mdx b/website/content/docs/k8s/connect/terminating-gateways.mdx index 188da110f0..c685c07c20 100644 --- a/website/content/docs/k8s/connect/terminating-gateways.mdx +++ b/website/content/docs/k8s/connect/terminating-gateways.mdx @@ -104,16 +104,16 @@ you may register the service as a node in the Consul catalog. The [`destination`](/docs/connect/config-entries/service-defaults#terminating-gateway-destination) field of the `ServiceDefaults` Custom Resource Definition (CRD) allows clients to dial an external service directly. For this method to work, [`TransparentProxy`](/docs/connect/transparent-proxy) must be enabled. The following table describes traffic behaviors when using the `destination` field to route traffic through a terminating gateway: -| External Services Layer | Client dials | Client uses TLS | Allowed | Notes | -|---|---|---|---|---| -| L4 | Hostname | Yes | Allowed | `CAFiles` are not allowed because traffic is already end-to-end encrypted by the client. | -| L4 | IP | Yes | Allowed | `CAFiles` are not allowed because traffic is already end-to-end encrypted by the client. | -| L4 | Hostname | No | Not allowed | The sidecar is not protocol aware and can not identify traffic going to the external service. | -| L4 | IP | No | Allowed | There are no limitations on dialing IPs without TLS. | -| L7 | Hostname | Yes | Not allowed | Because traffic is already encrypted before the sidecar, it cannot route as L7 traffic. | -| L7 | IP | Yes | Not allowed | Because traffic is already encrypted before the sidecar, it cannot route as L7 traffic. | -| L7 | Hostname | No | Allowed | A `Host` or `:authority` header is required. | -| L7 | IP | No | Allowed | There are no limitations on dialing IPs without TLS. | +| External Services Layer | Client dials | Client uses TLS | Allowed | Notes | +| ----------------------- | ------------ | --------------- | ----------- | --------------------------------------------------------------------------------------------- | +| L4 | Hostname | Yes | Allowed | `CAFiles` are not allowed because traffic is already end-to-end encrypted by the client. | +| L4 | IP | Yes | Allowed | `CAFiles` are not allowed because traffic is already end-to-end encrypted by the client. | +| L4 | Hostname | No | Not allowed | The sidecar is not protocol aware and can not identify traffic going to the external service. | +| L4 | IP | No | Allowed | There are no limitations on dialing IPs without TLS. | +| L7 | Hostname | Yes | Not allowed | Because traffic is already encrypted before the sidecar, it cannot route as L7 traffic. | +| L7 | IP | Yes | Not allowed | Because traffic is already encrypted before the sidecar, it cannot route as L7 traffic. | +| L7 | Hostname | No | Allowed | A `Host` or `:authority` header is required. | +| L7 | IP | No | Allowed | There are no limitations on dialing IPs without TLS. | You can provide a `caFile` to secure traffic between unencrypted clients that connect to external services through the terminating gateway. Refer to [Create the configuration entry for the terminating gateway](#create-the-configuration-entry-for-the-terminating-gateway) for details. From e990b03d5cd0af5136703070694a08ce6d8faa42 Mon Sep 17 00:00:00 2001 From: Thomas Eckert Date: Thu, 25 Aug 2022 13:56:13 -0400 Subject: [PATCH 28/93] Normalize table with nobrs --- .../docs/k8s/connect/terminating-gateways.mdx | 31 ++++++++++--------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/website/content/docs/k8s/connect/terminating-gateways.mdx b/website/content/docs/k8s/connect/terminating-gateways.mdx index c685c07c20..80654698a3 100644 --- a/website/content/docs/k8s/connect/terminating-gateways.mdx +++ b/website/content/docs/k8s/connect/terminating-gateways.mdx @@ -104,25 +104,25 @@ you may register the service as a node in the Consul catalog. The [`destination`](/docs/connect/config-entries/service-defaults#terminating-gateway-destination) field of the `ServiceDefaults` Custom Resource Definition (CRD) allows clients to dial an external service directly. For this method to work, [`TransparentProxy`](/docs/connect/transparent-proxy) must be enabled. The following table describes traffic behaviors when using the `destination` field to route traffic through a terminating gateway: -| External Services Layer | Client dials | Client uses TLS | Allowed | Notes | -| ----------------------- | ------------ | --------------- | ----------- | --------------------------------------------------------------------------------------------- | -| L4 | Hostname | Yes | Allowed | `CAFiles` are not allowed because traffic is already end-to-end encrypted by the client. | -| L4 | IP | Yes | Allowed | `CAFiles` are not allowed because traffic is already end-to-end encrypted by the client. | -| L4 | Hostname | No | Not allowed | The sidecar is not protocol aware and can not identify traffic going to the external service. | -| L4 | IP | No | Allowed | There are no limitations on dialing IPs without TLS. | -| L7 | Hostname | Yes | Not allowed | Because traffic is already encrypted before the sidecar, it cannot route as L7 traffic. | -| L7 | IP | Yes | Not allowed | Because traffic is already encrypted before the sidecar, it cannot route as L7 traffic. | -| L7 | Hostname | No | Allowed | A `Host` or `:authority` header is required. | -| L7 | IP | No | Allowed | There are no limitations on dialing IPs without TLS. | +| External Services Layer | Client dials | Client uses TLS | Allowed | Notes | +|--------------------------------------|---------------------------|------------------------------|--------------------------|-----------------------------------------------------------------------------------------------| +| L4 | Hostname | Yes | Allowed | `CAFiles` are not allowed because traffic is already end-to-end encrypted by the client. | +| L4 | IP | Yes | Allowed | `CAFiles` are not allowed because traffic is already end-to-end encrypted by the client. | +| L4 | Hostname | No | Not allowed | The sidecar is not protocol aware and can not identify traffic going to the external service. | +| L4 | IP | No | Allowed | There are no limitations on dialing IPs without TLS. | +| L7 | Hostname | Yes | Not allowed | Because traffic is already encrypted before the sidecar, it cannot route as L7 traffic. | +| L7 | IP | Yes | Not allowed | Because traffic is already encrypted before the sidecar, it cannot route as L7 traffic. | +| L7 | Hostname | No | Allowed | A `Host` or `:authority` header is required. | +| L7 | IP | No | Allowed | There are no limitations on dialing IPs without TLS. | You can provide a `caFile` to secure traffic between unencrypted clients that connect to external services through the terminating gateway. Refer to [Create the configuration entry for the terminating gateway](#create-the-configuration-entry-for-the-terminating-gateway) for details. -Also note that regardless of the `protocol` specified in the `ServiceDefaults`, [L7 intentions](/docs/connect/config-entries/service-intentions#permissions) are not currently supported with `ServiceDefaults` destinations. +-> **Note:** Regardless of the `protocol` specified in the `ServiceDefaults`, [L7 intentions](/docs/connect/config-entries/service-intentions#permissions) are not currently supported with `ServiceDefaults` destinations. Create a `ServiceDefaults` custom resource for the external service: - + ```yaml apiVersion: consul.hashicorp.com/v1alpha1 @@ -142,14 +142,15 @@ Create a `ServiceDefaults` custom resource for the external service: Apply the `ServiceDefaults` resource with `kubectl apply`: ```shell-session -$ kubectl apply --filename serviceDefaults.yaml +$ kubectl apply --filename service-defaults.yaml ``` -All other terminating gateway operations can use the name of the `ServiceDefaults` in place of a typical Consul service name. +All other terminating gateway operations can use the name of the `ServiceDefaults` component, in this case "example-https", as a Consul service name. + --> **Note:** Normal Consul services are registered with the Consul client on the node that +Normally, Consul services are registered with the Consul client on the node that they're running on. Since this is an external service, there is no Consul node to register it onto. Instead, we will make up a node name and register the service to that node. From 6d9872388bcb4dc7a9b3f8f6cde67a70731f0037 Mon Sep 17 00:00:00 2001 From: Thomas Eckert Date: Thu, 25 Aug 2022 14:03:43 -0400 Subject: [PATCH 29/93] Clean up copy in ACL role update --- .../content/docs/k8s/connect/terminating-gateways.mdx | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/website/content/docs/k8s/connect/terminating-gateways.mdx b/website/content/docs/k8s/connect/terminating-gateways.mdx index 80654698a3..9ed9865a3a 100644 --- a/website/content/docs/k8s/connect/terminating-gateways.mdx +++ b/website/content/docs/k8s/connect/terminating-gateways.mdx @@ -202,15 +202,12 @@ true - - ### Update terminating gateway ACL role if ACLs are enabled If ACLs are enabled, update the terminating gateway acl role to have `service: write` permissions on all of the services -being represented by the gateway: +being represented by the gateway. -- Create a new policy that includes these permissions -- Update the existing role to include the new policy +Create a new policy that includes the write permission for the service you created. @@ -242,7 +239,7 @@ consul acl role list | grep -B 6 -- "- RELEASE_NAME-terminating-gateway-policy" ID: ``` -Update the terminating gateway acl token with the new policy +Update the terminating gateway ACL token with the new policy. ```shell-session $ consul acl role update -id -policy-name example-https-write-policy From 77c9995a8e1634c6bcb72e6f65e000f2c6252e5b Mon Sep 17 00:00:00 2001 From: Thomas Eckert Date: Thu, 25 Aug 2022 14:04:33 -0400 Subject: [PATCH 30/93] Lil' more cleanup --- website/content/docs/k8s/connect/terminating-gateways.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/content/docs/k8s/connect/terminating-gateways.mdx b/website/content/docs/k8s/connect/terminating-gateways.mdx index 9ed9865a3a..01937f6aa4 100644 --- a/website/content/docs/k8s/connect/terminating-gateways.mdx +++ b/website/content/docs/k8s/connect/terminating-gateways.mdx @@ -231,7 +231,7 @@ service "example-https" { } ``` -Now fetch the ID of the terminating gateway token +Fetch the ID of the terminating gateway token. ```shell-session consul acl role list | grep -B 6 -- "- RELEASE_NAME-terminating-gateway-policy" | grep ID From ed4a430b3ec17d7d70b671fb4e01deab63fbc8a4 Mon Sep 17 00:00:00 2001 From: Thomas Eckert Date: Thu, 25 Aug 2022 14:40:18 -0400 Subject: [PATCH 31/93] Use tabs for destinations --- .../docs/k8s/connect/terminating-gateways.mdx | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/website/content/docs/k8s/connect/terminating-gateways.mdx b/website/content/docs/k8s/connect/terminating-gateways.mdx index 01937f6aa4..2119e54e4f 100644 --- a/website/content/docs/k8s/connect/terminating-gateways.mdx +++ b/website/content/docs/k8s/connect/terminating-gateways.mdx @@ -278,8 +278,6 @@ Configure the [`caFile`](https://www.consul.io/docs/connect/config-entries/termi - Consul Helm chart 0.43 or older - An Envoy image with an alpine base image -For `ServiceDefaults` destinations, refer to [Register an external service as a destination](#register-an-external-service-as-a-destination). - Apply the `TerminatingGateway` resource with `kubectl apply`: ```shell-session @@ -315,7 +313,7 @@ $ kubectl apply --filename service-intentions.yaml ### Define the external services as upstreams for services in the mesh -Finally define and deploy the external services as upstreams for the internal mesh services that wish to talk to them. +As a final step, you may define and deploy the external services as upstreams for the internal mesh services that wish to talk to them. An example deployment is provided which will serve as a static client for the terminating gateway service. @@ -364,33 +362,35 @@ spec: -Run the service via `kubectl apply`: +Deploy the service with `kubectl apply`. ```shell-session $ kubectl apply --filename static-client.yaml ``` -Wait for the service to be ready: +Wait for the service to be ready. ```shell-session $ kubectl rollout status deploy static-client --watch deployment "static-client" successfully rolled out ``` -You can verify connectivity of the static-client and terminating gateway via a curl command: +You can verify connectivity of the static-client and terminating gateway via a curl command. - - -```shell-session -$ kubectl exec deploy/static-client -- curl -vvvs --header "Host: example-https.com" http://localhost:1234/ -``` - - - - + + ```shell-session $ kubectl exec deploy/static-client -- curl -vvvs https://example.com/ ``` - + + + +```shell-session +$ kubectl exec deploy/static-client -- curl -vvvs --header "Host: example-https.com" http://localhost:1234/ +``` + + + + From 5064fbc2545706dec7c4a8b87bf7a7f9ba05e354 Mon Sep 17 00:00:00 2001 From: Thomas Eckert Date: Thu, 25 Aug 2022 14:44:33 -0400 Subject: [PATCH 32/93] Add links to requirements --- website/content/docs/k8s/connect/terminating-gateways.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/content/docs/k8s/connect/terminating-gateways.mdx b/website/content/docs/k8s/connect/terminating-gateways.mdx index 2119e54e4f..ed1b11e8c7 100644 --- a/website/content/docs/k8s/connect/terminating-gateways.mdx +++ b/website/content/docs/k8s/connect/terminating-gateways.mdx @@ -15,8 +15,8 @@ Adding a terminating gateway is a multi-step process: ## Requirements -- [Consul]() -- [Consul on Kubernetes CLI]() +- [Consul](https://www.consul.io/docs/install#install-consul) +- [Consul on Kubernetes CLI](/docs/k8s/k8s-cli) - Familiarity with [Terminating Gateways](/docs/connect/gateways/terminating-gateway) ## Update the helm chart with terminating gateway config options From 70a1cbd8ea3a1efef2373cb15c4657d590c31615 Mon Sep 17 00:00:00 2001 From: Thomas Eckert Date: Thu, 25 Aug 2022 14:44:45 -0400 Subject: [PATCH 33/93] Capitalize Helm --- website/content/docs/k8s/connect/terminating-gateways.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/content/docs/k8s/connect/terminating-gateways.mdx b/website/content/docs/k8s/connect/terminating-gateways.mdx index ed1b11e8c7..06316f5f51 100644 --- a/website/content/docs/k8s/connect/terminating-gateways.mdx +++ b/website/content/docs/k8s/connect/terminating-gateways.mdx @@ -19,7 +19,7 @@ Adding a terminating gateway is a multi-step process: - [Consul on Kubernetes CLI](/docs/k8s/k8s-cli) - Familiarity with [Terminating Gateways](/docs/connect/gateways/terminating-gateway) -## Update the helm chart with terminating gateway config options +## Update the Helm chart with terminating gateway config options Minimum required Helm options: From 20f291fa066efb58ed99b1972934807d7b8605ef Mon Sep 17 00:00:00 2001 From: Jared Kirschner Date: Wed, 27 Jul 2022 14:03:06 -0700 Subject: [PATCH 34/93] docs: improve health check related docs Includes: - Improved scannability and organization of checks overview - Checks overview includes more guidance on - How to register a health check - The options available for a health check definition - Contextual cross-references to maintenance mode --- website/content/api-docs/agent/check.mdx | 13 +- website/content/api-docs/health.mdx | 3 + website/content/docs/discovery/checks.mdx | 595 +++++++++++++--------- 3 files changed, 365 insertions(+), 246 deletions(-) diff --git a/website/content/api-docs/agent/check.mdx b/website/content/api-docs/agent/check.mdx index eafbb17c42..785fbce8b3 100644 --- a/website/content/api-docs/agent/check.mdx +++ b/website/content/api-docs/agent/check.mdx @@ -6,7 +6,10 @@ description: The /agent/check endpoints interact with checks on the local agent # Check - Agent HTTP API -The `/agent/check` endpoints interact with checks on the local agent in Consul. +Consul's health check capabilities are described in the +[health checks overview](/docs/discovery/checks). +The `/agent/check` endpoints interact with health checks +managed by the local agent in Consul. These should not be confused with checks in the catalog. ## List Checks @@ -418,6 +421,10 @@ $ curl \ This endpoint is used with a TTL type check to set the status of the check to `critical` and to reset the TTL clock. +If you want to manually mark a service as unhealthy, +use [maintenance mode](/api-docs/agent#enable-maintenance-mode) +instead of defining a TTL health check and using this endpoint. + | Method | Path | Produces | | ------ | ----------------------------- | ------------------ | | `PUT` | `/agent/check/fail/:check_id` | `application/json` | @@ -456,6 +463,10 @@ $ curl \ This endpoint is used with a TTL type check to set the status of the check and to reset the TTL clock. +If you want to manually mark a service as unhealthy, +use [maintenance mode](/api-docs/agent#enable-maintenance-mode) +instead of defining a TTL health check and using this endpoint. + | Method | Path | Produces | | ------ | ------------------------------- | ------------------ | | `PUT` | `/agent/check/update/:check_id` | `application/json` | diff --git a/website/content/api-docs/health.mdx b/website/content/api-docs/health.mdx index 898c8ffe41..cad74bbad2 100644 --- a/website/content/api-docs/health.mdx +++ b/website/content/api-docs/health.mdx @@ -14,6 +14,9 @@ optional health checking mechanisms. Additionally, some of the query results from the health endpoints are filtered while the catalog endpoints provide the raw entries. +To modify health check registration or information, +use the [`/agent/check`](/api-docs/agent/check) endpoints. + ## List Checks for Node This endpoint returns the checks specific to the node provided on the path. diff --git a/website/content/docs/discovery/checks.mdx b/website/content/docs/discovery/checks.mdx index 5a21495793..1b4c4faf4b 100644 --- a/website/content/docs/discovery/checks.mdx +++ b/website/content/docs/discovery/checks.mdx @@ -13,144 +13,72 @@ description: >- One of the primary roles of the agent is management of system-level and application-level health checks. A health check is considered to be application-level if it is associated with a service. If not associated with a service, the check monitors the health of the entire node. -Review the [health checks tutorial](https://learn.hashicorp.com/tutorials/consul/service-registration-health-checks) to get a more complete example on how to leverage health check capabilities in Consul. -A check is defined in a configuration file or added at runtime over the HTTP interface. Checks -created via the HTTP interface persist with that node. +Review the [service health checks tutorial](https://learn.hashicorp.com/tutorials/consul/service-registration-health-checks) +to get a more complete example on how to leverage health check capabilities in Consul. -There are several different kinds of checks: +## Registering a health check -- Script + Interval - These checks depend on invoking an external application - that performs the health check, exits with an appropriate exit code, and potentially - generates some output. A script is paired with an invocation interval (e.g. - every 30 seconds). This is similar to the Nagios plugin system. The output of - a script check is limited to 4KB. Output larger than this will be truncated. - By default, Script checks will be configured with a timeout equal to 30 seconds. - It is possible to configure a custom Script check timeout value by specifying the - `timeout` field in the check definition. When the timeout is reached on Windows, - Consul will wait for any child processes spawned by the script to finish. For any - other system, Consul will attempt to force-kill the script and any child processes - it has spawned once the timeout has passed. - In Consul 0.9.0 and later, script checks are not enabled by default. To use them you - can either use : +There are three ways to register a service with health checks: - - [`enable_local_script_checks`](/docs/agent/config/cli-flags#_enable_local_script_checks): - enable script checks defined in local config files. Script checks defined via the HTTP - API will not be allowed. - - [`enable_script_checks`](/docs/agent/config/cli-flags#_enable_script_checks): enable - script checks regardless of how they are defined. +1. Start or reload a Consul agent with a service definition file in the + [agent's configuration directory](/docs/agent#configuring-consul-agents). +1. Call the + [`/agent/service/register`](/api-docs/agent/service#register-service) + HTTP API endpoint to register the service. +1. Use the + [`consul services register`](/commands/services/register) + CLI command to register the service. - ~> **Security Warning:** Enabling script checks in some configurations may - introduce a remote execution vulnerability which is known to be targeted by - malware. We strongly recommend `enable_local_script_checks` instead. See [this - blog post](https://www.hashicorp.com/blog/protecting-consul-from-rce-risk-in-specific-configurations) - for more details. +When a service is registered using the HTTP API endpoint or CLI command, +the checks persist in the Consul data folder across Consul agent restarts. -- `HTTP + Interval` - These checks make an HTTP `GET` request to the specified URL, - waiting the specified `interval` amount of time between requests (eg. 30 seconds). - The status of the service depends on the HTTP response code: any `2xx` code is - considered passing, a `429 Too ManyRequests` is a warning, and anything else is - a failure. This type of check - should be preferred over a script that uses `curl` or another external process - to check a simple HTTP operation. By default, HTTP checks are `GET` requests - unless the `method` field specifies a different method. Additional header - fields can be set through the `header` field which is a map of lists of - strings, e.g. `{"x-foo": ["bar", "baz"]}`. By default, HTTP checks will be - configured with a request timeout equal to 10 seconds. +## Types of checks - It is possible to configure a custom HTTP check timeout value by - specifying the `timeout` field in the check definition. The output of the - check is limited to roughly 4KB. Responses larger than this will be truncated. - HTTP checks also support TLS. By default, a valid TLS certificate is expected. - Certificate verification can be turned off by setting the `tls_skip_verify` - field to `true` in the check definition. When using TLS, the SNI will be set - automatically from the URL if it uses a hostname (as opposed to an IP address); - the value can be overridden by setting `tls_server_name`. +This section describes the available types of health checks you can use to +automatically monitor the health of a service instance or node. - Consul follows HTTP redirects by default. Set the `disable_redirects` field to - `true` to disable redirects. +-> **To manually mark a service unhealthy:** Use the maintenance mode + [CLI command](/commands/maint) or + [HTTP API endpoint](/api-docs/agent#enable-maintenance-mode) + to temporarily remove one or all service instances on a node + from service discovery DNS and HTTP API query results. -- `TCP + Interval` - These checks make a TCP connection attempt to the specified - IP/hostname and port, waiting `interval` amount of time between attempts - (e.g. 30 seconds). If no hostname - is specified, it defaults to "localhost". The status of the service depends on - whether the connection attempt is successful (ie - the port is currently - accepting connections). If the connection is accepted, the status is - `success`, otherwise the status is `critical`. In the case of a hostname that - resolves to both IPv4 and IPv6 addresses, an attempt will be made to both - addresses, and the first successful connection attempt will result in a - successful check. This type of check should be preferred over a script that - uses `netcat` or another external process to check a simple socket operation. - By default, TCP checks will be configured with a request timeout of 10 seconds. - It is possible to configure a custom TCP check timeout value by specifying the - `timeout` field in the check definition. +### Script check ((#script-interval)) -- `UDP + Interval` - These checks direct the client to periodically send UDP datagrams - to the specified IP/hostname and port. The duration specified in the `interval` field sets the amount of time - between attempts, such as `30s` to indicate 30 seconds. The check is logged as healthy if any response from the UDP server is received. Any other result sets the status to `critical`. - The default interval for, UDP checks is `10s`, but you can configure a custom UDP check timeout value by specifying the - `timeout` field in the check definition. If any timeout on read exists, the check is still considered healthy. +Script checks periodically invoke an external application that performs the health check, +exits with an appropriate exit code, and potentially generates some output. +The specified `interval` determines the time between check invocations. +The output of a script check is limited to 4KB. +Larger outputs are truncated. -- `Time to Live (TTL)` ((#ttl)) - These checks retain their last known state - for a given TTL. The state of the check must be updated periodically over the HTTP - interface. If an external system fails to update the status within a given TTL, - the check is set to the failed state. This mechanism, conceptually similar to a - dead man's switch, relies on the application to directly report its health. For - example, a healthy app can periodically `PUT` a status update to the HTTP endpoint; - if the app fails, the TTL will expire and the health check enters a critical state. - The endpoints used to update health information for a given check are: [pass](/api-docs/agent/check#ttl-check-pass), - [warn](/api-docs/agent/check#ttl-check-warn), [fail](/api-docs/agent/check#ttl-check-fail), - and [update](/api-docs/agent/check#ttl-check-update). TTL checks also persist their - last known status to disk. This allows the Consul agent to restore the last known - status of the check across restarts. Persisted check status is valid through the - end of the TTL from the time of the last check. +By default, script checks are configured with a timeout equal to 30 seconds. +To configure a custom script check timeout value, +specify the `timeout` field in the check definition. +After reaching the timeout on a Windows system, +Consul waits for any child processes spawned by the script to finish. +After reaching the timeout on other systems, +Consul attempts to force-kill the script and any child processes it spawned. -- `Docker + Interval` - These checks depend on invoking an external application which - is packaged within a Docker Container. The application is triggered within the running - container via the Docker Exec API. We expect that the Consul agent user has access - to either the Docker HTTP API or the unix socket. Consul uses `$DOCKER_HOST` to - determine the Docker API endpoint. The application is expected to run, perform a health - check of the service running inside the container, and exit with an appropriate exit code. - The check should be paired with an invocation interval. The shell on which the check - has to be performed is configurable which makes it possible to run containers which - have different shells on the same host. Check output for Docker is limited to - 4KB. Any output larger than this will be truncated. In Consul 0.9.0 and later, the agent - must be configured with [`enable_script_checks`](/docs/agent/config/cli-flags#_enable_script_checks) - set to `true` in order to enable Docker health checks. +Script checks are not enabled by default. +To enable a Consul agent to perform script checks, +use one of the following agent configuration options: -- `gRPC + Interval` - These checks are intended for applications that support the standard - [gRPC health checking protocol](https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - The state of the check will be updated by probing the configured endpoint, waiting `interval` - amount of time between probes (eg. 30 seconds). By default, gRPC checks will be configured - with a default timeout of 10 seconds. - It is possible to configure a custom timeout value by specifying the `timeout` field in - the check definition. gRPC checks will default to not using TLS, but TLS can be enabled by - setting `grpc_use_tls` in the check definition. If TLS is enabled, then by default, a valid - TLS certificate is expected. Certificate verification can be turned off by setting the - `tls_skip_verify` field to `true` in the check definition. - To check on a specific service instead of the whole gRPC server, add the service identifier after the `gRPC` check's endpoint in the following format `/:service_identifier`. +- [`enable_local_script_checks`](/docs/agent/config/cli-flags#_enable_local_script_checks): + Enable script checks defined in local config files. + Script checks registered using the HTTP API are not allowed. +- [`enable_script_checks`](/docs/agent/config/cli-flags#_enable_script_checks): + Enable script checks no matter how they are registered. -- `H2ping + Interval` - These checks test an endpoint that uses http2 - by connecting to the endpoint and sending a ping frame. TLS is assumed to be configured by default. - To disable TLS and use h2c, set `h2ping_use_tls` to `false`. If the ping is successful - within a specified timeout, then the check is updated as passing. - The timeout defaults to 10 seconds, but is configurable using the `timeout` field. If TLS is enabled a valid - certificate is required, unless `tls_skip_verify` is set to `true`. - The check will be run on the interval specified by the `interval` field. + ~> **Security Warning:** + Enabling non-local script checks in some configurations may introduce + a remote execution vulnerability known to be targeted by malware. + We strongly recommend `enable_local_script_checks` instead. + For more information, refer to + [this blog post](https://www.hashicorp.com/blog/protecting-consul-from-rce-risk-in-specific-configurations). -- `Alias` - These checks alias the health state of another registered - node or service. The state of the check will be updated asynchronously, but is - nearly instant. For aliased services on the same agent, the local state is monitored - and no additional network resources are consumed. For other services and nodes, - the check maintains a blocking query over the agent's connection with a current - server and allows stale requests. If there are any errors in watching the aliased - node or service, the check state will be critical. For the blocking query, the - check will use the ACL token set on the service or check definition or otherwise - will fall back to the default ACL token set with the agent (`acl_token`). - -## Check Definition - -A script check: +The following service definition file snippet is an example +of a script check definition: @@ -162,7 +90,6 @@ check = { interval = "10s" timeout = "1s" } - ``` ```json @@ -179,7 +106,47 @@ check = { -A HTTP check: +#### Check script conventions + +A check script's exit code is used to determine the health check status: + +- Exit code 0 - Check is passing +- Exit code 1 - Check is warning +- Any other code - Check is failing + +Any output of the script is captured and made available in the +`Output` field of checks included in HTTP API responses, +as in this example from the [local service health endpoint](/api-docs/agent/service#by-name-json). + +### HTTP check ((#http-interval)) + +HTTP checks periodically make an HTTP `GET` request to the specified URL, +waiting the specified `interval` amount of time between requests. +The status of the service depends on the HTTP response code: any `2xx` code is +considered passing, a `429 Too ManyRequests` is a warning, and anything else is +a failure. This type of check +should be preferred over a script that uses `curl` or another external process +to check a simple HTTP operation. By default, HTTP checks are `GET` requests +unless the `method` field specifies a different method. Additional request +headers can be set through the `header` field which is a map of lists of +strings, such as `{"x-foo": ["bar", "baz"]}`. + +By default, HTTP checks are configured with a request timeout equal to 10 seconds. +To configure a custom HTTP check timeout value, +specify the `timeout` field in the check definition. +The output of an HTTP check is limited to approximately 4KB. +Larger outputs are truncated. +HTTP checks also support TLS. By default, a valid TLS certificate is expected. +Certificate verification can be turned off by setting the `tls_skip_verify` +field to `true` in the check definition. When using TLS, the SNI is implicitly +determined from the URL if it uses a hostname instead of an IP address. +You can explicitly set the SNI value by setting `tls_server_name`. + +Consul follows HTTP redirects by default. +To disable redirects, set the `disable_redirects` field to `true`. + +The following service definition file snippet is an example +of an HTTP check definition: @@ -220,7 +187,23 @@ check = { -A TCP check: +### TCP check ((#tcp-interval)) + +TCP checks periodically make a TCP connection attempt to the specified IP/hostname and port, waiting `interval` amount of time between attempts. +If no hostname is specified, it defaults to "localhost". +The health check status is `success` if the target host accepts the connection attempt, +otherwise the status is `critical`. In the case of a hostname that +resolves to both IPv4 and IPv6 addresses, an attempt is made to both +addresses, and the first successful connection attempt results in a +successful check. This type of check should be preferred over a script that +uses `netcat` or another external process to check a simple socket operation. + +By default, TCP checks are configured with a request timeout equal to 10 seconds. +To configure a custom TCP check timeout value, +specify the `timeout` field in the check definition. + +The following service definition file snippet is an example +of a TCP check definition: @@ -232,7 +215,6 @@ check = { interval = "10s" timeout = "1s" } - ``` ```json @@ -249,7 +231,21 @@ check = { -A UDP check: +### UDP check ((#udp-interval)) + +UDP checks periodically direct the Consul agent to send UDP datagrams +to the specified IP/hostname and port, +waiting `interval` amount of time between attempts. +The check status is set to `success` if any response is received from the targeted UDP server. +Any other result sets the status to `critical`. + +By default, UDP checks are configured with a request timeout equal to 10 seconds. +To configure a custom UDP check timeout value, +specify the `timeout` field in the check definition. +If any timeout on read exists, the check is still considered healthy. + +The following service definition file snippet is an example +of a UDP check definition: @@ -261,7 +257,6 @@ check = { interval = "10s" timeout = "1s" } - ``` ```json @@ -278,7 +273,32 @@ check = { -A TTL check: +### Time to live (TTL) check ((#ttl)) + +TTL checks retain their last known state for the specified `ttl` duration. +If the `ttl` duration elapses before a new check update +is provided over the HTTP interface, +the check is set to `critical` state. + +This mechanism relies on the application to directly report its health. +For example, a healthy app can periodically `PUT` a status update to the HTTP endpoint. +Then, if the app is disrupted and unable to perform this update +before the TTL expires, the health check enters the `critical` state. +The endpoints used to update health information for a given check are: [pass](/api-docs/agent/check#ttl-check-pass), +[warn](/api-docs/agent/check#ttl-check-warn), [fail](/api-docs/agent/check#ttl-check-fail), +and [update](/api-docs/agent/check#ttl-check-update). TTL checks also persist their +last known status to disk. This persistence allows the Consul agent to restore the last known +status of the check across agent restarts. Persisted check status is valid through the +end of the TTL from the time of the last check. + +To manually mark a service unhealthy, +it is far more convenient to use the maintenance mode +[CLI command](/commands/maint) or +[HTTP API endpoint](/api-docs/agent#enable-maintenance-mode) +rather than a TTL health check with arbitrarily high `ttl`. + +The following service definition file snippet is an example +of a TTL check definition: @@ -304,7 +324,24 @@ check = { -A Docker check: +### Docker check ((#docker-interval)) + +These checks depend on periodically invoking an external application that +is packaged within a Docker Container. The application is triggered within the running +container through the Docker Exec API. We expect that the Consul agent user has access +to either the Docker HTTP API or the unix socket. Consul uses `$DOCKER_HOST` to +determine the Docker API endpoint. The application is expected to run, perform a health +check of the service running inside the container, and exit with an appropriate exit code. +The check should be paired with an invocation interval. The shell on which the check +has to be performed is configurable, making it possible to run containers which +have different shells on the same host. +The output of a Docker check is limited to 4KB. +Larger outputs are truncated. +The agent must be configured with [`enable_script_checks`](/docs/agent/config/cli-flags#_enable_script_checks) +set to `true` in order to enable Docker health checks. + +The following service definition file snippet is an example +of a Docker check definition: @@ -334,7 +371,26 @@ check = { -A gRPC check for the whole application: +### gRPC check ((##grpc-interval)) + +gRPC checks are intended for applications that support the standard +[gRPC health checking protocol](https://github.com/grpc/grpc/blob/master/doc/health-checking.md). +The state of the check will be updated by periodically probing the configured endpoint, +waiting `interval` amount of time between attempts. + +By default, gRPC checks are configured with a timeout equal to 10 seconds. +To configure a custom Docker check timeout value, +specify the `timeout` field in the check definition. + +gRPC checks default to not using TLS. +To enable TLS, set `grpc_use_tls` in the check definition. +If TLS is enabled, then by default, a valid TLS certificate is expected. +Certificate verification can be turned off by setting the +`tls_skip_verify` field to `true` in the check definition. +To check on a specific service instead of the whole gRPC server, add the service identifier after the `gRPC` check's endpoint in the following format `/:service_identifier`. + +The following service definition file snippet is an example +of a gRPC check for a whole application: @@ -362,7 +418,8 @@ check = { -A gRPC check for the specific `my_service` service: +The following service definition file snippet is an example +of a gRPC check for the specific `my_service` service @@ -390,7 +447,23 @@ check = { -A h2ping check: +### H2ping check ((#h2ping-interval)) + +H2ping checks test an endpoint that uses http2 by connecting to the endpoint +and sending a ping frame, waiting `interval` amount of time between attempts. +If the ping is successful within a specified timeout, +then the check status is set to `success`. + +By default, h2ping checks are configured with a request timeout equal to 10 seconds. +To configure a custom h2ping check timeout value, +specify the `timeout` field in the check definition. + +TLS is enabled by default. +To disable TLS and use h2c, set `h2ping_use_tls` to `false`. +If TLS is not disabled, a valid certificate is required unless `tls_skip_verify` is set to `true`. + +The following service definition file snippet is an example +of an h2ping check definition: @@ -418,7 +491,29 @@ check = { -An alias check for a local service: +### Alias check + +These checks alias the health state of another registered +node or service. The state of the check updates asynchronously, but is +nearly instant. For aliased services on the same agent, the local state is monitored +and no additional network resources are consumed. For other services and nodes, +the check maintains a blocking query over the agent's connection with a current +server and allows stale requests. If there are any errors in watching the aliased +node or service, the check state is set to `critical`. +For the blocking query, the check uses the ACL token set on the service or check definition. +If no ACL token is set in the service or check definition, +the blocking query uses the agent's default ACL token +([`acl.tokens.default`](/docs/agent/config/config-files#acl_tokens_default)). + +~> **Configuration info**: The alias check configuration expects the alias to be +registered on the same agent as the one you are aliasing. If the service is +not registered with the same agent, `"alias_node": ""` must also be +specified. When using `alias_node`, if no service is specified, the check will +alias the health of the node. If a service is specified, the check will alias +the specified service on this particular node. + +The following service definition file snippet is an example +of an alias check for a local service: @@ -440,72 +535,137 @@ check = { -~> Configuration info: The alias check configuration expects the alias to be -registered on the same agent as the one you are aliasing. If the service is -not registered with the same agent, `"alias_node": ""` must also be -specified. When using `alias_node`, if no service is specified, the check will -alias the health of the node. If a service is specified, the check will alias -the specified service on this particular node. +## Check definition -Each type of definition must include a `name` and may optionally provide an -`id` and `notes` field. The `id` must be unique per _agent_ otherwise only the -last defined check with that `id` will be registered. If the `id` is not set -and the check is embedded within a service definition a unique check id is -generated. Otherwise, `id` will be set to `name`. If names might conflict, -unique IDs should be provided. +This section covers some of the most common options for check definitions. +For a complete list of all check options, refer to the +[Register Check HTTP API endpoint documentation](/api-docs/agent/check#json-request-body-schema). -The `notes` field is opaque to Consul but can be used to provide a human-readable -description of the current state of the check. Similarly, an external process -updating a TTL check via the HTTP interface can set the `notes` value. +-> **Casing for check options:** + The correct casing for an option depends on whether the check is defined in + a service definition file or an HTTP API JSON request body. + For example, the option `deregister_critical_service_after` in a service + definition file is instead named `DeregisterCriticalServiceAfter` in an + HTTP API JSON request body. -Checks may also contain a `token` field to provide an ACL token. This token is -used for any interaction with the catalog for the check, including -[anti-entropy syncs](/docs/architecture/anti-entropy) and deregistration. -For Alias checks, this token is used if a remote blocking query is necessary -to watch the state of the aliased node or service. +#### General options -Script, TCP, UDP, HTTP, Docker, and gRPC checks must include an `interval` field. This -field is parsed by Go's `time` package, and has the following -[formatting specification](https://golang.org/pkg/time/#ParseDuration): +- `name` `(string: )` - Specifies the name of the check. -> A duration string is a possibly signed sequence of decimal numbers, each with -> optional fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". -> Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". +- `id` `(string: "")` - Specifies a unique ID for this check on this node. + + If unspecified, Consul defines the check id by: + - If the check definition is embedded within a service definition file, + a unique check id is auto-generated. + - Otherwise, the `id` is set to the value of `name`. + If names might conflict, you must provide unique IDs to avoid + overwriting existing checks with the same id on this node. -In Consul 0.7 and later, checks that are associated with a service may also contain -an optional `deregister_critical_service_after` field, which is a timeout in the -same Go time format as `interval` and `ttl`. If a check is in the critical state -for more than this configured value, then its associated service (and all of its -associated checks) will automatically be deregistered. The minimum timeout is 1 -minute, and the process that reaps critical services runs every 30 seconds, so it -may take slightly longer than the configured timeout to trigger the deregistration. -This should generally be configured with a timeout that's much, much longer than -any expected recoverable outage for the given service. +- `interval` `(string: )` - Specifies + the frequency at which to run this check. + Required for all check types except TTL and alias checks. -To configure a check, either provide it as a `-config-file` option to the -agent or place it inside the `-config-dir` of the agent. The file must -end in a ".json" or ".hcl" extension to be loaded by Consul. Check definitions -can also be updated by sending a `SIGHUP` to the agent. Alternatively, the -check can be registered dynamically using the [HTTP API](/api). + The value is parsed by Go's `time` package, and has the following + [formatting specification](https://golang.org/pkg/time/#ParseDuration): -## Check Scripts + > A duration string is a possibly signed sequence of decimal numbers, each with + > optional fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". + > Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". -A check script is generally free to do anything to determine the status -of the check. The only limitations placed are that the exit codes must obey -this convention: +- `service_id` `(string: )` - Specifies + the ID of a service instance to associate this check with. + That service instance must be on this node. + If not specified, this check is treated as a node-level check. + For more information, refer to the + [service-bound checks](#service-bound-checks) section. -- Exit code 0 - Check is passing -- Exit code 1 - Check is warning -- Any other code - Check is failing +- `status` `(string: "")` - Specifies the initial status of the health check as + "critical" (default), "warning", or "passing". For more details, refer to + the [initial health check status](#initial-health-check-status) section. + + -> **Health defaults to critical:** If health status it not initially specified, + it defaults to "critical" to protect against including a service + in discovery results before it is ready. -This is the only convention that Consul depends on. Any output of the script -will be captured and stored in the `output` field. +- `deregister_critical_service_after` `(string: "")` - If specified, + the associated service and all its checks are deregistered + after this check is in the critical state for more than the specified value. + The value has the same formatting specification as the [`interval`](#interval) field. -In Consul 0.9.0 and later, the agent must be configured with -[`enable_script_checks`](/docs/agent/config/cli-flags#_enable_script_checks) set to `true` -in order to enable script checks. + The minimum timeout is 1 minute, + and the process that reaps critical services runs every 30 seconds, + so it may take slightly longer than the configured timeout to trigger the deregistration. + This field should generally be configured with a timeout that's significantly longer than + any expected recoverable outage for the given service. -## Initial Health Check Status +- `notes` `(string: "")` - Provides a human-readable description of the check. + This field is opaque to Consul and can be used however is useful to the user. + For example, it could be used to describe the current state of the check. + +- `token` `(string: "")` - Specifies an ACL token used for any interaction + with the catalog for the check, including + [anti-entropy syncs](/docs/architecture/anti-entropy) and deregistration. + + For alias checks, this token is used if a remote blocking query is necessary to watch the state of the aliased node or service. + +#### Success/failures before passing/warning/critical + +To prevent flapping health checks and limit the load they cause on the cluster, +a health check may be configured to become passing/warning/critical only after a +specified number of consecutive checks return as passing/critical. +The status does not transition states until the configured threshold is reached. + +- `success_before_passing` - Number of consecutive successful results required + before check status transitions to passing. Defaults to `0`. Added in Consul 1.7.0. + +- `failures_before_warning` - Number of consecutive unsuccessful results required + before check status transitions to warning. Defaults to the same value as that of + `failures_before_critical` to maintain the expected behavior of not changing the + status of service checks to `warning` before `critical` unless configured to do so. + Values higher than `failures_before_critical` are invalid. Added in Consul 1.11.0. + +- `failures_before_critical` - Number of consecutive unsuccessful results required + before check status transitions to critical. Defaults to `0`. Added in Consul 1.7.0. + +This feature is available for all check types except TTL and alias checks. +By default, both passing and critical thresholds are set to 0 so the check +status always reflects the last check result. + + + +```hcl +checks = [ + { + name = "HTTP TCP on port 80" + tcp = "localhost:80" + interval = "10s" + timeout = "1s" + success_before_passing = 3 + failures_before_warning = 1 + failures_before_critical = 3 + } +] +``` + +```json +{ + "checks": [ + { + "name": "HTTP TCP on port 80", + "tcp": "localhost:80", + "interval": "10s", + "timeout": "1s", + "success_before_passing": 3, + "failures_before_warning": 1, + "failures_before_critical": 3 + } + ] +} +``` + + + +## Initial health check status By default, when checks are registered against a Consul agent, the state is set immediately to "critical". This is useful to prevent services from being @@ -576,13 +736,13 @@ In the above configuration, if the web-app health check begins failing, it will only affect the availability of the web-app service. All other services provided by the node will remain unchanged. -## Agent Certificates for TLS Checks +## Agent certificates for TLS checks The [enable_agent_tls_for_checks](/docs/agent/config/config-files#enable_agent_tls_for_checks) agent configuration option can be utilized to have HTTP or gRPC health checks to use the agent's credentials when configured for TLS. -## Multiple Check Definitions +## Multiple check definitions Multiple check definitions can be defined using the `checks` (plural) key in your configuration file. @@ -640,58 +800,3 @@ checks = [ ``` - -## Success/Failures before passing/warning/critical - -To prevent flapping health checks, and limit the load they cause on the cluster, -a health check may be configured to become passing/warning/critical only after a -specified number of consecutive checks return passing/critical. -The status will not transition states until the configured threshold is reached. - -- `success_before_passing` - Number of consecutive successful results required - before check status transitions to passing. Defaults to `0`. Added in Consul 1.7.0. -- `failures_before_warning` - Number of consecutive unsuccessful results required - before check status transitions to warning. Defaults to the same value as that of - `failures_before_critical` to maintain the expected behavior of not changing the - status of service checks to `warning` before `critical` unless configured to do so. - Values higher than `failures_before_critical` are invalid. Added in Consul 1.11.0. -- `failures_before_critical` - Number of consecutive unsuccessful results required - before check status transitions to critical. Defaults to `0`. Added in Consul 1.7.0. - -This feature is available for HTTP, TCP, gRPC, Docker & Monitor checks. -By default, both passing and critical thresholds will be set to 0 so the check -status will always reflect the last check result. - - - -```hcl -checks = [ - { - name = "HTTP TCP on port 80" - tcp = "localhost:80" - interval = "10s" - timeout = "1s" - success_before_passing = 3 - failures_before_warning = 1 - failures_before_critical = 3 - } -] -``` - -```json -{ - "checks": [ - { - "name": "HTTP TCP on port 80", - "tcp": "localhost:80", - "interval": "10s", - "timeout": "1s", - "success_before_passing": 3, - "failures_before_warning": 1, - "failures_before_critical": 3 - } - ] -} -``` - - From fead3c537b8f0a4325dec29661165d309f1cf3d0 Mon Sep 17 00:00:00 2001 From: Dao Thanh Tung Date: Fri, 26 Aug 2022 06:21:49 +0800 Subject: [PATCH 35/93] Fix Consul KV CLI 'GET' flags 'keys' and 'recurse' to be set together (#13493) allow flags -recurse and -keys to be run at the same time in consul kv get CLI --- .changelog/13493.txt | 3 ++ command/kv/get/kv_get.go | 36 +++++++++++-- command/kv/get/kv_get_test.go | 99 +++++++++++++++++++++++++++++++++++ 3 files changed, 133 insertions(+), 5 deletions(-) create mode 100644 .changelog/13493.txt diff --git a/.changelog/13493.txt b/.changelog/13493.txt new file mode 100644 index 0000000000..9c3eec605d --- /dev/null +++ b/.changelog/13493.txt @@ -0,0 +1,3 @@ +```release-note:bug +cli: Fix Consul kv CLI 'GET' flags 'keys' and 'recurse' to be set together +``` diff --git a/command/kv/get/kv_get.go b/command/kv/get/kv_get.go index 099aedb9fc..aa93ef963b 100644 --- a/command/kv/get/kv_get.go +++ b/command/kv/get/kv_get.go @@ -99,6 +99,32 @@ func (c *cmd) Run(args []string) int { } switch { + case c.keys && c.recurse: + pairs, _, err := client.KV().List(key, &api.QueryOptions{ + AllowStale: c.http.Stale(), + }) + if err != nil { + c.UI.Error(fmt.Sprintf("Error querying Consul agent: %s", err)) + return 1 + } + + for i, pair := range pairs { + if c.detailed { + var b bytes.Buffer + if err := prettyKVPair(&b, pair, false, true); err != nil { + c.UI.Error(fmt.Sprintf("Error rendering KV key: %s", err)) + return 1 + } + c.UI.Info(b.String()) + + if i < len(pairs)-1 { + c.UI.Info("") + } + } else { + c.UI.Info(fmt.Sprintf("%s", pair.Key)) + } + } + return 0 case c.keys: keys, _, err := client.KV().Keys(key, c.separator, &api.QueryOptions{ AllowStale: c.http.Stale(), @@ -125,7 +151,7 @@ func (c *cmd) Run(args []string) int { for i, pair := range pairs { if c.detailed { var b bytes.Buffer - if err := prettyKVPair(&b, pair, c.base64encode); err != nil { + if err := prettyKVPair(&b, pair, c.base64encode, false); err != nil { c.UI.Error(fmt.Sprintf("Error rendering KV pair: %s", err)) return 1 } @@ -161,7 +187,7 @@ func (c *cmd) Run(args []string) int { if c.detailed { var b bytes.Buffer - if err := prettyKVPair(&b, pair, c.base64encode); err != nil { + if err := prettyKVPair(&b, pair, c.base64encode, false); err != nil { c.UI.Error(fmt.Sprintf("Error rendering KV pair: %s", err)) return 1 } @@ -187,7 +213,7 @@ func (c *cmd) Help() string { return c.help } -func prettyKVPair(w io.Writer, pair *api.KVPair, base64EncodeValue bool) error { +func prettyKVPair(w io.Writer, pair *api.KVPair, base64EncodeValue bool, keysOnly bool) error { tw := tabwriter.NewWriter(w, 0, 2, 6, ' ', 0) fmt.Fprintf(tw, "CreateIndex\t%d\n", pair.CreateIndex) fmt.Fprintf(tw, "Flags\t%d\n", pair.Flags) @@ -205,9 +231,9 @@ func prettyKVPair(w io.Writer, pair *api.KVPair, base64EncodeValue bool) error { if pair.Namespace != "" { fmt.Fprintf(tw, "Namespace\t%s\n", pair.Namespace) } - if base64EncodeValue { + if !keysOnly && base64EncodeValue { fmt.Fprintf(tw, "Value\t%s", base64.StdEncoding.EncodeToString(pair.Value)) - } else { + } else if !keysOnly { fmt.Fprintf(tw, "Value\t%s", pair.Value) } return tw.Flush() diff --git a/command/kv/get/kv_get_test.go b/command/kv/get/kv_get_test.go index 3a7b12d8a7..5143391ef0 100644 --- a/command/kv/get/kv_get_test.go +++ b/command/kv/get/kv_get_test.go @@ -418,3 +418,102 @@ func TestKVGetCommand_DetailedBase64(t *testing.T) { t.Fatalf("bad %#v, value is not base64 encoded", output) } } + +func TestKVGetCommand_KeysRecurse(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + a := agent.NewTestAgent(t, ``) + defer a.Shutdown() + client := a.Client() + + ui := cli.NewMockUi() + c := New(ui) + keys := map[string]string{ + "foo/": "", + "foo/a": "Hello World 2", + "foo1/a": "Hello World 1", + } + for k, v := range keys { + var pair *api.KVPair + switch v { + case "": + pair = &api.KVPair{Key: k, Value: nil} + default: + pair = &api.KVPair{Key: k, Value: []byte(v)} + } + if _, err := client.KV().Put(pair, nil); err != nil { + t.Fatalf("err: %#v", err) + } + } + args := []string{ + "-http-addr=" + a.HTTPAddr(), + "-recurse", + "-keys", + "foo", + } + + code := c.Run(args) + if code != 0 { + t.Fatalf("bad: %d. %#v", code, ui.ErrorWriter.String()) + } + output := ui.OutputWriter.String() + for key, value := range keys { + if !strings.Contains(output, key) { + t.Fatalf("bad %#v missing %q", output, key) + } + if strings.Contains(output, key+":"+value) { + t.Fatalf("bad %#v expected no values for keys %q but received %q", output, key, value) + } + } +} +func TestKVGetCommand_DetailedKeysRecurse(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + a := agent.NewTestAgent(t, ``) + defer a.Shutdown() + client := a.Client() + + ui := cli.NewMockUi() + c := New(ui) + keys := map[string]string{ + "foo/": "", + "foo/a": "Hello World 2", + "foo1/a": "Hello World 1", + } + for k, v := range keys { + var pair *api.KVPair + switch v { + case "": + pair = &api.KVPair{Key: k, Value: nil} + default: + pair = &api.KVPair{Key: k, Value: []byte(v)} + } + if _, err := client.KV().Put(pair, nil); err != nil { + t.Fatalf("err: %#v", err) + } + } + args := []string{ + "-http-addr=" + a.HTTPAddr(), + "-recurse", + "-keys", + "-detailed", + "foo", + } + + code := c.Run(args) + if code != 0 { + t.Fatalf("bad: %d. %#v", code, ui.ErrorWriter.String()) + } + output := ui.OutputWriter.String() + for key, value := range keys { + if value != "" && strings.Contains(output, value) { + t.Fatalf("bad %#v expected no values for keys %q but received %q", output, key, value) + } + } +} From 30ff2e9a3596d79aaed0af1a322628996e5d109a Mon Sep 17 00:00:00 2001 From: alex <8968914+acpana@users.noreply.github.com> Date: Thu, 25 Aug 2022 16:32:59 -0700 Subject: [PATCH 36/93] peering: add peer health metric (#14004) Signed-off-by: acpana <8968914+acpana@users.noreply.github.com> --- agent/consul/leader_peering.go | 36 ++++-- agent/consul/leader_peering_test.go | 32 +++++ agent/consul/server.go | 1 + .../services/peerstream/server.go | 8 +- .../services/peerstream/stream_resources.go | 5 +- .../services/peerstream/stream_test.go | 52 +++++--- .../services/peerstream/stream_tracker.go | 62 +++++++++- .../peerstream/stream_tracker_test.go | 113 ++++++++++++++++-- 8 files changed, 262 insertions(+), 47 deletions(-) diff --git a/agent/consul/leader_peering.go b/agent/consul/leader_peering.go index d1823b026b..00128bcd87 100644 --- a/agent/consul/leader_peering.go +++ b/agent/consul/leader_peering.go @@ -31,11 +31,18 @@ import ( ) var leaderExportedServicesCountKey = []string{"consul", "peering", "exported_services"} +var leaderHealthyPeeringKey = []string{"consul", "peering", "healthy"} var LeaderPeeringMetrics = []prometheus.GaugeDefinition{ { Name: leaderExportedServicesCountKey, Help: "A gauge that tracks how many services are exported for the peering. " + - "The labels are \"peering\" and, for enterprise, \"partition\". " + + "The labels are \"peer_name\", \"peer_id\" and, for enterprise, \"partition\". " + + "We emit this metric every 9 seconds", + }, + { + Name: leaderHealthyPeeringKey, + Help: "A gauge that tracks how if a peering is healthy (1) or not (0). " + + "The labels are \"peer_name\", \"peer_id\" and, for enterprise, \"partition\". " + "We emit this metric every 9 seconds", }, } @@ -85,13 +92,6 @@ func (s *Server) emitPeeringMetricsOnce(logger hclog.Logger, metricsImpl *metric } for _, peer := range peers { - status, found := s.peerStreamServer.StreamStatus(peer.ID) - if !found { - logger.Trace("did not find status for", "peer_name", peer.Name) - continue - } - - esc := status.GetExportedServicesCount() part := peer.Partition labels := []metrics.Label{ {Name: "peer_name", Value: peer.Name}, @@ -101,7 +101,25 @@ func (s *Server) emitPeeringMetricsOnce(logger hclog.Logger, metricsImpl *metric labels = append(labels, metrics.Label{Name: "partition", Value: part}) } - metricsImpl.SetGaugeWithLabels(leaderExportedServicesCountKey, float32(esc), labels) + status, found := s.peerStreamServer.StreamStatus(peer.ID) + if found { + // exported services count metric + esc := status.GetExportedServicesCount() + metricsImpl.SetGaugeWithLabels(leaderExportedServicesCountKey, float32(esc), labels) + } + + // peering health metric + if status.NeverConnected { + metricsImpl.SetGaugeWithLabels(leaderHealthyPeeringKey, float32(math.NaN()), labels) + } else { + healthy := status.IsHealthy() + healthyInt := 0 + if healthy { + healthyInt = 1 + } + + metricsImpl.SetGaugeWithLabels(leaderHealthyPeeringKey, float32(healthyInt), labels) + } } return nil diff --git a/agent/consul/leader_peering_test.go b/agent/consul/leader_peering_test.go index 46a74b6ad3..d419303852 100644 --- a/agent/consul/leader_peering_test.go +++ b/agent/consul/leader_peering_test.go @@ -7,6 +7,7 @@ import ( "errors" "fmt" "io/ioutil" + "math" "testing" "time" @@ -974,6 +975,7 @@ func TestLeader_PeeringMetrics_emitPeeringMetrics(t *testing.T) { var ( s2PeerID1 = generateUUID() s2PeerID2 = generateUUID() + s2PeerID3 = generateUUID() testContextTimeout = 60 * time.Second lastIdx = uint64(0) ) @@ -1063,6 +1065,24 @@ func TestLeader_PeeringMetrics_emitPeeringMetrics(t *testing.T) { // mimic tracking exported services mst2.TrackExportedService(structs.ServiceName{Name: "d-service"}) mst2.TrackExportedService(structs.ServiceName{Name: "e-service"}) + + // pretend that the hearbeat happened + mst2.TrackRecvHeartbeat() + } + + // Simulate a peering that never connects + { + p3 := &pbpeering.Peering{ + ID: s2PeerID3, + Name: "my-peer-s4", + PeerID: token.PeerID, // doesn't much matter what these values are + PeerCAPems: token.CA, + PeerServerName: token.ServerName, + PeerServerAddresses: token.ServerAddresses, + } + require.True(t, p3.ShouldDial()) + lastIdx++ + require.NoError(t, s2.fsm.State().PeeringWrite(lastIdx, &pbpeering.PeeringWriteRequest{Peering: p3})) } // set up a metrics sink @@ -1092,6 +1112,18 @@ func TestLeader_PeeringMetrics_emitPeeringMetrics(t *testing.T) { require.True(r, ok, fmt.Sprintf("did not find the key %q", keyMetric2)) require.Equal(r, float32(2), metric2.Value) // for d, e services + + keyHealthyMetric2 := fmt.Sprintf("us-west.consul.peering.healthy;peer_name=my-peer-s3;peer_id=%s", s2PeerID2) + healthyMetric2, ok := intv.Gauges[keyHealthyMetric2] + require.True(r, ok, fmt.Sprintf("did not find the key %q", keyHealthyMetric2)) + + require.Equal(r, float32(1), healthyMetric2.Value) + + keyHealthyMetric3 := fmt.Sprintf("us-west.consul.peering.healthy;peer_name=my-peer-s4;peer_id=%s", s2PeerID3) + healthyMetric3, ok := intv.Gauges[keyHealthyMetric3] + require.True(r, ok, fmt.Sprintf("did not find the key %q", keyHealthyMetric3)) + + require.True(r, math.IsNaN(float64(healthyMetric3.Value))) }) } diff --git a/agent/consul/server.go b/agent/consul/server.go index 1afa74c91d..8f2986c3eb 100644 --- a/agent/consul/server.go +++ b/agent/consul/server.go @@ -742,6 +742,7 @@ func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server) (*Ser return s.ForwardGRPC(s.grpcConnPool, info, fn) }, }) + s.peerStreamTracker.SetHeartbeatTimeout(s.peerStreamServer.Config.IncomingHeartbeatTimeout) s.peerStreamServer.Register(s.externalGRPCServer) // Initialize internal gRPC server. diff --git a/agent/grpc-external/services/peerstream/server.go b/agent/grpc-external/services/peerstream/server.go index 7254c60c7c..6568d7bf80 100644 --- a/agent/grpc-external/services/peerstream/server.go +++ b/agent/grpc-external/services/peerstream/server.go @@ -42,8 +42,8 @@ type Config struct { // outgoingHeartbeatInterval is how often we send a heartbeat. outgoingHeartbeatInterval time.Duration - // incomingHeartbeatTimeout is how long we'll wait between receiving heartbeats before we close the connection. - incomingHeartbeatTimeout time.Duration + // IncomingHeartbeatTimeout is how long we'll wait between receiving heartbeats before we close the connection. + IncomingHeartbeatTimeout time.Duration } //go:generate mockery --name ACLResolver --inpackage @@ -63,8 +63,8 @@ func NewServer(cfg Config) *Server { if cfg.outgoingHeartbeatInterval == 0 { cfg.outgoingHeartbeatInterval = defaultOutgoingHeartbeatInterval } - if cfg.incomingHeartbeatTimeout == 0 { - cfg.incomingHeartbeatTimeout = defaultIncomingHeartbeatTimeout + if cfg.IncomingHeartbeatTimeout == 0 { + cfg.IncomingHeartbeatTimeout = defaultIncomingHeartbeatTimeout } return &Server{ Config: cfg, diff --git a/agent/grpc-external/services/peerstream/stream_resources.go b/agent/grpc-external/services/peerstream/stream_resources.go index 657972b886..0e6b28f45a 100644 --- a/agent/grpc-external/services/peerstream/stream_resources.go +++ b/agent/grpc-external/services/peerstream/stream_resources.go @@ -406,7 +406,7 @@ func (s *Server) realHandleStream(streamReq HandleStreamRequest) error { // incomingHeartbeatCtx will complete if incoming heartbeats time out. incomingHeartbeatCtx, incomingHeartbeatCtxCancel := - context.WithTimeout(context.Background(), s.incomingHeartbeatTimeout) + context.WithTimeout(context.Background(), s.IncomingHeartbeatTimeout) // NOTE: It's important that we wrap the call to cancel in a wrapper func because during the loop we're // re-assigning the value of incomingHeartbeatCtxCancel and we want the defer to run on the last assigned // value, not the current value. @@ -605,7 +605,7 @@ func (s *Server) realHandleStream(streamReq HandleStreamRequest) error { // They just can't trace the execution properly for some reason (possibly golang/go#29587). //nolint:govet incomingHeartbeatCtx, incomingHeartbeatCtxCancel = - context.WithTimeout(context.Background(), s.incomingHeartbeatTimeout) + context.WithTimeout(context.Background(), s.IncomingHeartbeatTimeout) } case update := <-subCh: @@ -642,6 +642,7 @@ func (s *Server) realHandleStream(streamReq HandleStreamRequest) error { if err := streamSend(replResp); err != nil { return fmt.Errorf("failed to push data for %q: %w", update.CorrelationID, err) } + status.TrackSendSuccess() } } } diff --git a/agent/grpc-external/services/peerstream/stream_test.go b/agent/grpc-external/services/peerstream/stream_test.go index 49ba7be046..be4a44ec87 100644 --- a/agent/grpc-external/services/peerstream/stream_test.go +++ b/agent/grpc-external/services/peerstream/stream_test.go @@ -572,7 +572,7 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { }) }) - var lastSendSuccess time.Time + var lastSendAck, lastSendSuccess time.Time testutil.RunStep(t, "ack tracked as success", func(t *testing.T) { ack := &pbpeerstream.ReplicationMessage{ @@ -587,19 +587,22 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { }, } - lastSendSuccess = it.FutureNow(1) + lastSendAck = time.Date(2000, time.January, 1, 0, 0, 2, 0, time.UTC) + lastSendSuccess = time.Date(2000, time.January, 1, 0, 0, 3, 0, time.UTC) err := client.Send(ack) require.NoError(t, err) expect := Status{ - Connected: true, - LastAck: lastSendSuccess, + Connected: true, + LastAck: lastSendAck, + heartbeatTimeout: defaultIncomingHeartbeatTimeout, + LastSendSuccess: lastSendSuccess, } retry.Run(t, func(r *retry.R) { - status, ok := srv.StreamStatus(testPeerID) + rStatus, ok := srv.StreamStatus(testPeerID) require.True(r, ok) - require.Equal(r, expect, status) + require.Equal(r, expect, rStatus) }) }) @@ -621,23 +624,26 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { }, } - lastNack = it.FutureNow(1) + lastSendAck = time.Date(2000, time.January, 1, 0, 0, 4, 0, time.UTC) + lastNack = time.Date(2000, time.January, 1, 0, 0, 5, 0, time.UTC) err := client.Send(nack) require.NoError(t, err) lastNackMsg = "client peer was unable to apply resource: bad bad not good" expect := Status{ - Connected: true, - LastAck: lastSendSuccess, - LastNack: lastNack, - LastNackMessage: lastNackMsg, + Connected: true, + LastAck: lastSendAck, + LastNack: lastNack, + LastNackMessage: lastNackMsg, + heartbeatTimeout: defaultIncomingHeartbeatTimeout, + LastSendSuccess: lastSendSuccess, } retry.Run(t, func(r *retry.R) { - status, ok := srv.StreamStatus(testPeerID) + rStatus, ok := srv.StreamStatus(testPeerID) require.True(r, ok) - require.Equal(r, expect, status) + require.Equal(r, expect, rStatus) }) }) @@ -694,13 +700,15 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { expect := Status{ Connected: true, - LastAck: lastSendSuccess, + LastAck: lastSendAck, LastNack: lastNack, LastNackMessage: lastNackMsg, LastRecvResourceSuccess: lastRecvResourceSuccess, ImportedServices: map[string]struct{}{ api.String(): {}, }, + heartbeatTimeout: defaultIncomingHeartbeatTimeout, + LastSendSuccess: lastSendSuccess, } retry.Run(t, func(r *retry.R) { @@ -753,7 +761,7 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { expect := Status{ Connected: true, - LastAck: lastSendSuccess, + LastAck: lastSendAck, LastNack: lastNack, LastNackMessage: lastNackMsg, LastRecvResourceSuccess: lastRecvResourceSuccess, @@ -762,6 +770,8 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { ImportedServices: map[string]struct{}{ api.String(): {}, }, + heartbeatTimeout: defaultIncomingHeartbeatTimeout, + LastSendSuccess: lastSendSuccess, } retry.Run(t, func(r *retry.R) { @@ -785,7 +795,7 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { expect := Status{ Connected: true, - LastAck: lastSendSuccess, + LastAck: lastSendAck, LastNack: lastNack, LastNackMessage: lastNackMsg, LastRecvResourceSuccess: lastRecvResourceSuccess, @@ -795,6 +805,8 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { ImportedServices: map[string]struct{}{ api.String(): {}, }, + heartbeatTimeout: defaultIncomingHeartbeatTimeout, + LastSendSuccess: lastSendSuccess, } retry.Run(t, func(r *retry.R) { @@ -816,7 +828,7 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { expect := Status{ Connected: false, DisconnectErrorMessage: lastRecvErrorMsg, - LastAck: lastSendSuccess, + LastAck: lastSendAck, LastNack: lastNack, LastNackMessage: lastNackMsg, DisconnectTime: disconnectTime, @@ -827,6 +839,8 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { ImportedServices: map[string]struct{}{ api.String(): {}, }, + heartbeatTimeout: defaultIncomingHeartbeatTimeout, + LastSendSuccess: lastSendSuccess, } retry.Run(t, func(r *retry.R) { @@ -1129,7 +1143,7 @@ func TestStreamResources_Server_DisconnectsOnHeartbeatTimeout(t *testing.T) { srv, store := newTestServer(t, func(c *Config) { c.Tracker.SetClock(it.Now) - c.incomingHeartbeatTimeout = 5 * time.Millisecond + c.IncomingHeartbeatTimeout = 5 * time.Millisecond }) p := writePeeringToBeDialed(t, store, 1, "my-peer") @@ -1236,7 +1250,7 @@ func TestStreamResources_Server_KeepsConnectionOpenWithHeartbeat(t *testing.T) { srv, store := newTestServer(t, func(c *Config) { c.Tracker.SetClock(it.Now) - c.incomingHeartbeatTimeout = incomingHeartbeatTimeout + c.IncomingHeartbeatTimeout = incomingHeartbeatTimeout }) p := writePeeringToBeDialed(t, store, 1, "my-peer") diff --git a/agent/grpc-external/services/peerstream/stream_tracker.go b/agent/grpc-external/services/peerstream/stream_tracker.go index f7a451595d..ffde98ba32 100644 --- a/agent/grpc-external/services/peerstream/stream_tracker.go +++ b/agent/grpc-external/services/peerstream/stream_tracker.go @@ -16,6 +16,8 @@ type Tracker struct { // timeNow is a shim for testing. timeNow func() time.Time + + heartbeatTimeout time.Duration } func NewTracker() *Tracker { @@ -33,6 +35,12 @@ func (t *Tracker) SetClock(clock func() time.Time) { } } +func (t *Tracker) SetHeartbeatTimeout(heartbeatTimeout time.Duration) { + t.mu.Lock() + defer t.mu.Unlock() + t.heartbeatTimeout = heartbeatTimeout +} + // Register a stream for a given peer but do not mark it as connected. func (t *Tracker) Register(id string) (*MutableStatus, error) { t.mu.Lock() @@ -44,7 +52,7 @@ func (t *Tracker) Register(id string) (*MutableStatus, error) { func (t *Tracker) registerLocked(id string, initAsConnected bool) (*MutableStatus, bool, error) { status, ok := t.streams[id] if !ok { - status = newMutableStatus(t.timeNow, initAsConnected) + status = newMutableStatus(t.timeNow, t.heartbeatTimeout, initAsConnected) t.streams[id] = status return status, true, nil } @@ -101,7 +109,9 @@ func (t *Tracker) StreamStatus(id string) (resp Status, found bool) { s, ok := t.streams[id] if !ok { - return Status{}, false + return Status{ + NeverConnected: true, + }, false } return s.GetStatus(), true } @@ -142,9 +152,14 @@ type MutableStatus struct { // Status contains information about the replication stream to a peer cluster. // TODO(peering): There's a lot of fields here... type Status struct { + heartbeatTimeout time.Duration + // Connected is true when there is an open stream for the peer. Connected bool + // NeverConnected is true for peerings that have never connected, false otherwise. + NeverConnected bool + // DisconnectErrorMessage tracks the error that caused the stream to disconnect non-gracefully. // If the stream is connected or it disconnected gracefully it will be empty. DisconnectErrorMessage string @@ -167,6 +182,9 @@ type Status struct { // LastSendErrorMessage tracks the last error message when sending into the stream. LastSendErrorMessage string + // LastSendSuccess tracks the time of the last success response sent into the stream. + LastSendSuccess time.Time + // LastRecvHeartbeat tracks when we last received a heartbeat from our peer. LastRecvHeartbeat time.Time @@ -196,10 +214,40 @@ func (s *Status) GetExportedServicesCount() uint64 { return uint64(len(s.ExportedServices)) } -func newMutableStatus(now func() time.Time, connected bool) *MutableStatus { +// IsHealthy is a convenience func that returns true/ false for a peering status. +// We define a peering as unhealthy if its status satisfies one of the following: +// - If heartbeat hasn't been received within the IncomingHeartbeatTimeout +// - If the last sent error is newer than last sent success +// - If the last received error is newer than last received success +// If none of these conditions apply, we call the peering healthy. +func (s *Status) IsHealthy() bool { + if time.Now().Sub(s.LastRecvHeartbeat) > s.heartbeatTimeout { + // 1. If heartbeat hasn't been received for a while - report unhealthy + return false + } + + if s.LastSendError.After(s.LastSendSuccess) { + // 2. If last sent error is newer than last sent success - report unhealthy + return false + } + + if s.LastRecvError.After(s.LastRecvResourceSuccess) { + // 3. If last recv error is newer than last recv success - report unhealthy + return false + } + + return true +} + +func newMutableStatus(now func() time.Time, heartbeatTimeout time.Duration, connected bool) *MutableStatus { + if heartbeatTimeout.Microseconds() == 0 { + heartbeatTimeout = defaultIncomingHeartbeatTimeout + } return &MutableStatus{ Status: Status{ - Connected: connected, + Connected: connected, + heartbeatTimeout: heartbeatTimeout, + NeverConnected: !connected, }, timeNow: now, doneCh: make(chan struct{}), @@ -223,6 +271,12 @@ func (s *MutableStatus) TrackSendError(error string) { s.mu.Unlock() } +func (s *MutableStatus) TrackSendSuccess() { + s.mu.Lock() + s.LastSendSuccess = s.timeNow().UTC() + s.mu.Unlock() +} + // TrackRecvResourceSuccess tracks receiving a replicated resource. func (s *MutableStatus) TrackRecvResourceSuccess() { s.mu.Lock() diff --git a/agent/grpc-external/services/peerstream/stream_tracker_test.go b/agent/grpc-external/services/peerstream/stream_tracker_test.go index f7a9df321d..8cdcbc79a2 100644 --- a/agent/grpc-external/services/peerstream/stream_tracker_test.go +++ b/agent/grpc-external/services/peerstream/stream_tracker_test.go @@ -10,6 +10,97 @@ import ( "github.com/hashicorp/consul/sdk/testutil" ) +const ( + aPeerID = "63b60245-c475-426b-b314-4588d210859d" +) + +func TestStatus_IsHealthy(t *testing.T) { + type testcase struct { + name string + dontConnect bool + modifierFunc func(status *MutableStatus) + expectedVal bool + heartbeatTimeout time.Duration + } + + tcs := []testcase{ + { + name: "never connected, unhealthy", + expectedVal: false, + dontConnect: true, + }, + { + name: "no heartbeat, unhealthy", + expectedVal: false, + }, + { + name: "heartbeat is not received, unhealthy", + expectedVal: false, + modifierFunc: func(status *MutableStatus) { + // set heartbeat + status.LastRecvHeartbeat = time.Now().Add(-1 * time.Second) + }, + heartbeatTimeout: 1 * time.Second, + }, + { + name: "send error before send success", + expectedVal: false, + modifierFunc: func(status *MutableStatus) { + // set heartbeat + status.LastRecvHeartbeat = time.Now() + + status.LastSendSuccess = time.Now() + status.LastSendError = time.Now() + }, + }, + { + name: "received error before received success", + expectedVal: false, + modifierFunc: func(status *MutableStatus) { + // set heartbeat + status.LastRecvHeartbeat = time.Now() + + status.LastRecvResourceSuccess = time.Now() + status.LastRecvError = time.Now() + }, + }, + { + name: "healthy", + expectedVal: true, + modifierFunc: func(status *MutableStatus) { + // set heartbeat + status.LastRecvHeartbeat = time.Now() + }, + }, + } + + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + tracker := NewTracker() + if tc.heartbeatTimeout.Microseconds() != 0 { + tracker.SetHeartbeatTimeout(tc.heartbeatTimeout) + } + + if !tc.dontConnect { + st, err := tracker.Connected(aPeerID) + require.NoError(t, err) + require.True(t, st.Connected) + + if tc.modifierFunc != nil { + tc.modifierFunc(st) + } + + require.Equal(t, tc.expectedVal, st.IsHealthy()) + + } else { + st, found := tracker.StreamStatus(aPeerID) + require.False(t, found) + require.Equal(t, tc.expectedVal, st.IsHealthy()) + } + }) + } +} + func TestTracker_EnsureConnectedDisconnected(t *testing.T) { tracker := NewTracker() peerID := "63b60245-c475-426b-b314-4588d210859d" @@ -29,7 +120,8 @@ func TestTracker_EnsureConnectedDisconnected(t *testing.T) { require.NoError(t, err) expect := Status{ - Connected: true, + Connected: true, + heartbeatTimeout: defaultIncomingHeartbeatTimeout, } status, ok := tracker.StreamStatus(peerID) @@ -55,8 +147,9 @@ func TestTracker_EnsureConnectedDisconnected(t *testing.T) { lastSuccess = it.base.Add(time.Duration(sequence) * time.Second).UTC() expect := Status{ - Connected: true, - LastAck: lastSuccess, + Connected: true, + LastAck: lastSuccess, + heartbeatTimeout: defaultIncomingHeartbeatTimeout, } require.Equal(t, expect, status) }) @@ -66,9 +159,10 @@ func TestTracker_EnsureConnectedDisconnected(t *testing.T) { sequence++ expect := Status{ - Connected: false, - DisconnectTime: it.base.Add(time.Duration(sequence) * time.Second).UTC(), - LastAck: lastSuccess, + Connected: false, + DisconnectTime: it.base.Add(time.Duration(sequence) * time.Second).UTC(), + LastAck: lastSuccess, + heartbeatTimeout: defaultIncomingHeartbeatTimeout, } status, ok := tracker.StreamStatus(peerID) require.True(t, ok) @@ -80,8 +174,9 @@ func TestTracker_EnsureConnectedDisconnected(t *testing.T) { require.NoError(t, err) expect := Status{ - Connected: true, - LastAck: lastSuccess, + Connected: true, + LastAck: lastSuccess, + heartbeatTimeout: defaultIncomingHeartbeatTimeout, // DisconnectTime gets cleared on re-connect. } @@ -96,7 +191,7 @@ func TestTracker_EnsureConnectedDisconnected(t *testing.T) { status, ok := tracker.StreamStatus(peerID) require.False(t, ok) - require.Zero(t, status) + require.Equal(t, Status{NeverConnected: true}, status) }) } From 6ddcc046136d4e5a6a5f05fb59afdac0e070f7d3 Mon Sep 17 00:00:00 2001 From: "Chris S. Kim" Date: Fri, 26 Aug 2022 10:27:13 -0400 Subject: [PATCH 37/93] Replace ring buffer with async version (#14314) We need to watch for changes to peerings and update the server addresses which get served by the ring buffer. Also, if there is an active connection for a peer, we are getting up-to-date server addresses from the replication stream and can safely ignore the token's addresses which may be stale. --- agent/consul/leader_peering.go | 100 ++++++++++++++++---- agent/consul/leader_peering_test.go | 137 ++++++++++++++++++++++++++++ agent/consul/state/peering.go | 5 +- agent/rpc/peering/service.go | 16 +++- proto/pbpeering/peering.go | 4 +- 5 files changed, 233 insertions(+), 29 deletions(-) diff --git a/agent/consul/leader_peering.go b/agent/consul/leader_peering.go index 00128bcd87..556f1b5bfc 100644 --- a/agent/consul/leader_peering.go +++ b/agent/consul/leader_peering.go @@ -295,13 +295,6 @@ func (s *Server) establishStream(ctx context.Context, logger hclog.Logger, ws me return fmt.Errorf("failed to build TLS dial option from peering: %w", err) } - // Create a ring buffer to cycle through peer addresses in the retry loop below. - buffer := ring.New(len(peer.PeerServerAddresses)) - for _, addr := range peer.PeerServerAddresses { - buffer.Value = addr - buffer = buffer.Next() - } - secret, err := s.fsm.State().PeeringSecretsRead(ws, peer.ID) if err != nil { return fmt.Errorf("failed to read secret for peering: %w", err) @@ -312,27 +305,26 @@ func (s *Server) establishStream(ctx context.Context, logger hclog.Logger, ws me logger.Trace("establishing stream to peer") - retryCtx, cancel := context.WithCancel(ctx) - cancelFns[peer.ID] = cancel - streamStatus, err := s.peerStreamTracker.Register(peer.ID) if err != nil { return fmt.Errorf("failed to register stream: %v", err) } + streamCtx, cancel := context.WithCancel(ctx) + cancelFns[peer.ID] = cancel + + // Start a goroutine to watch for updates to peer server addresses. + // The latest valid server address can be received from nextServerAddr. + nextServerAddr := make(chan string) + go s.watchPeerServerAddrs(streamCtx, peer, nextServerAddr) + // Establish a stream-specific retry so that retrying stream/conn errors isn't dependent on state store changes. - go retryLoopBackoffPeering(retryCtx, logger, func() error { + go retryLoopBackoffPeering(streamCtx, logger, func() error { // Try a new address on each iteration by advancing the ring buffer on errors. - defer func() { - buffer = buffer.Next() - }() - addr, ok := buffer.Value.(string) - if !ok { - return fmt.Errorf("peer server address type %T is not a string", buffer.Value) - } + addr := <-nextServerAddr logger.Trace("dialing peer", "addr", addr) - conn, err := grpc.DialContext(retryCtx, addr, + conn, err := grpc.DialContext(streamCtx, addr, // TODO(peering): use a grpc.WithStatsHandler here?) tlsOption, // For keep alive parameters there is a larger comment in ClientConnPool.dial about that. @@ -349,7 +341,7 @@ func (s *Server) establishStream(ctx context.Context, logger hclog.Logger, ws me defer conn.Close() client := pbpeerstream.NewPeerStreamServiceClient(conn) - stream, err := client.StreamResources(retryCtx) + stream, err := client.StreamResources(streamCtx) if err != nil { return err } @@ -397,6 +389,74 @@ func (s *Server) establishStream(ctx context.Context, logger hclog.Logger, ws me return nil } +// watchPeerServerAddrs sends an up-to-date peer server address to nextServerAddr. +// It loads the server addresses into a ring buffer and cycles through them until: +// 1. streamCtx is cancelled (peer is deleted) +// 2. the peer is modified and the watchset fires. +// +// In case (2) we refetch the peering and rebuild the ring buffer. +func (s *Server) watchPeerServerAddrs(ctx context.Context, peer *pbpeering.Peering, nextServerAddr chan<- string) { + defer close(nextServerAddr) + + // we initialize the ring buffer with the peer passed to `establishStream` + // because the caller has pre-checked `peer.ShouldDial`, guaranteeing + // at least one server address. + // + // IMPORTANT: ringbuf must always be length > 0 or else `<-nextServerAddr` may block. + ringbuf := ring.New(len(peer.PeerServerAddresses)) + for _, addr := range peer.PeerServerAddresses { + ringbuf.Value = addr + ringbuf = ringbuf.Next() + } + innerWs := memdb.NewWatchSet() + _, _, err := s.fsm.State().PeeringReadByID(innerWs, peer.ID) + if err != nil { + s.logger.Warn("failed to watch for changes to peer; server addresses may become stale over time.", + "peer_id", peer.ID, + "error", err) + } + + fetchAddrs := func() error { + // reinstantiate innerWs to prevent it from growing indefinitely + innerWs = memdb.NewWatchSet() + _, peering, err := s.fsm.State().PeeringReadByID(innerWs, peer.ID) + if err != nil { + return fmt.Errorf("failed to fetch peer %q: %w", peer.ID, err) + } + if !peering.IsActive() { + return fmt.Errorf("peer %q is no longer active", peer.ID) + } + if len(peering.PeerServerAddresses) == 0 { + return fmt.Errorf("peer %q has no addresses to dial", peer.ID) + } + + ringbuf = ring.New(len(peering.PeerServerAddresses)) + for _, addr := range peering.PeerServerAddresses { + ringbuf.Value = addr + ringbuf = ringbuf.Next() + } + return nil + } + + for { + select { + case nextServerAddr <- ringbuf.Value.(string): + ringbuf = ringbuf.Next() + case err := <-innerWs.WatchCh(ctx): + if err != nil { + // context was cancelled + return + } + // watch fired so we refetch the peering and rebuild the ring buffer + if err := fetchAddrs(); err != nil { + s.logger.Warn("watchset for peer was fired but failed to update server addresses", + "peer_id", peer.ID, + "error", err) + } + } + } +} + func (s *Server) startPeeringDeferredDeletion(ctx context.Context) { s.leaderRoutineManager.Start(ctx, peeringDeletionRoutineName, s.runPeeringDeletions) } diff --git a/agent/consul/leader_peering_test.go b/agent/consul/leader_peering_test.go index d419303852..b8b5166d8f 100644 --- a/agent/consul/leader_peering_test.go +++ b/agent/consul/leader_peering_test.go @@ -18,6 +18,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" grpcstatus "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/consul/state" @@ -25,6 +26,7 @@ import ( "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/proto/pbpeering" "github.com/hashicorp/consul/sdk/freeport" + "github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/consul/testrpc" "github.com/hashicorp/consul/types" @@ -1375,3 +1377,138 @@ func Test_isFailedPreconditionErr(t *testing.T) { werr := fmt.Errorf("wrapped: %w", err) assert.True(t, isFailedPreconditionErr(werr)) } + +func Test_Leader_PeeringSync_ServerAddressUpdates(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + // We want 1s retries for this test + orig := maxRetryBackoff + maxRetryBackoff = 1 + t.Cleanup(func() { maxRetryBackoff = orig }) + + _, acceptor := testServerWithConfig(t, func(c *Config) { + c.NodeName = "acceptor" + c.Datacenter = "dc1" + c.TLSConfig.Domain = "consul" + }) + testrpc.WaitForLeader(t, acceptor.RPC, "dc1") + + // Create a peering by generating a token + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + t.Cleanup(cancel) + + conn, err := grpc.DialContext(ctx, acceptor.config.RPCAddr.String(), + grpc.WithContextDialer(newServerDialer(acceptor.config.RPCAddr.String())), + grpc.WithInsecure(), + grpc.WithBlock()) + require.NoError(t, err) + defer conn.Close() + + acceptorClient := pbpeering.NewPeeringServiceClient(conn) + + req := pbpeering.GenerateTokenRequest{ + PeerName: "my-peer-dialer", + } + resp, err := acceptorClient.GenerateToken(ctx, &req) + require.NoError(t, err) + + // Bring up dialer and establish a peering with acceptor's token so that it attempts to dial. + _, dialer := testServerWithConfig(t, func(c *Config) { + c.NodeName = "dialer" + c.Datacenter = "dc2" + c.PrimaryDatacenter = "dc2" + }) + testrpc.WaitForLeader(t, dialer.RPC, "dc2") + + // Create a peering at dialer by establishing a peering with acceptor's token + ctx, cancel = context.WithTimeout(context.Background(), 3*time.Second) + t.Cleanup(cancel) + + conn, err = grpc.DialContext(ctx, dialer.config.RPCAddr.String(), + grpc.WithContextDialer(newServerDialer(dialer.config.RPCAddr.String())), + grpc.WithInsecure(), + grpc.WithBlock()) + require.NoError(t, err) + defer conn.Close() + + dialerClient := pbpeering.NewPeeringServiceClient(conn) + + establishReq := pbpeering.EstablishRequest{ + PeerName: "my-peer-acceptor", + PeeringToken: resp.PeeringToken, + } + _, err = dialerClient.Establish(ctx, &establishReq) + require.NoError(t, err) + + p, err := dialerClient.PeeringRead(ctx, &pbpeering.PeeringReadRequest{Name: "my-peer-acceptor"}) + require.NoError(t, err) + + retry.Run(t, func(r *retry.R) { + status, found := dialer.peerStreamServer.StreamStatus(p.Peering.ID) + require.True(r, found) + require.True(r, status.Connected) + }) + + testutil.RunStep(t, "calling establish with active connection does not overwrite server addresses", func(t *testing.T) { + ctx, cancel = context.WithTimeout(context.Background(), 3*time.Second) + t.Cleanup(cancel) + + // generate a new token from the acceptor + req := pbpeering.GenerateTokenRequest{ + PeerName: "my-peer-dialer", + } + resp, err := acceptorClient.GenerateToken(ctx, &req) + require.NoError(t, err) + + token, err := acceptor.peeringBackend.DecodeToken([]byte(resp.PeeringToken)) + require.NoError(t, err) + + // we will update the token with bad addresses to assert it doesn't clobber existing ones + token.ServerAddresses = []string{"1.2.3.4:1234"} + + badToken, err := acceptor.peeringBackend.EncodeToken(token) + require.NoError(t, err) + + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + t.Cleanup(cancel) + + // Try establishing. + // This call will only succeed if the bad address was not used in the calls to exchange the peering secret. + establishReq := pbpeering.EstablishRequest{ + PeerName: "my-peer-acceptor", + PeeringToken: string(badToken), + } + _, err = dialerClient.Establish(ctx, &establishReq) + require.NoError(t, err) + + p, err := dialerClient.PeeringRead(ctx, &pbpeering.PeeringReadRequest{Name: "my-peer-acceptor"}) + require.NoError(t, err) + require.NotContains(t, p.Peering.PeerServerAddresses, "1.2.3.4:1234") + }) + + testutil.RunStep(t, "updated server addresses are picked up by the leader", func(t *testing.T) { + // force close the acceptor's gRPC server so the dialier retries with a new address. + acceptor.externalGRPCServer.Stop() + + clone := proto.Clone(p.Peering) + updated := clone.(*pbpeering.Peering) + // start with a bad address so we can assert for a specific error + updated.PeerServerAddresses = append([]string{ + "bad", + }, p.Peering.PeerServerAddresses...) + + // this write will wake up the watch on the leader to refetch server addresses + require.NoError(t, dialer.fsm.State().PeeringWrite(2000, &pbpeering.PeeringWriteRequest{Peering: updated})) + + retry.Run(t, func(r *retry.R) { + status, found := dialer.peerStreamServer.StreamStatus(p.Peering.ID) + require.True(r, found) + // We assert for this error to be set which would indicate that we iterated + // through a bad address. + require.Contains(r, status.LastSendErrorMessage, "transport: Error while dialing dial tcp: address bad: missing port in address") + require.False(r, status.Connected) + }) + }) +} diff --git a/agent/consul/state/peering.go b/agent/consul/state/peering.go index f56fbe0e15..287e822919 100644 --- a/agent/consul/state/peering.go +++ b/agent/consul/state/peering.go @@ -7,12 +7,13 @@ import ( "strings" "github.com/golang/protobuf/proto" + "github.com/hashicorp/go-memdb" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/configentry" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/lib/maps" "github.com/hashicorp/consul/proto/pbpeering" - "github.com/hashicorp/go-memdb" ) const ( @@ -981,7 +982,7 @@ func peeringsForServiceTxn(tx ReadTxn, ws memdb.WatchSet, serviceName string, en if idx > maxIdx { maxIdx = idx } - if peering == nil || !peering.IsActive() { + if !peering.IsActive() { continue } peerings = append(peerings, peering) diff --git a/agent/rpc/peering/service.go b/agent/rpc/peering/service.go index ed9cd9e4fa..20bbafc1cf 100644 --- a/agent/rpc/peering/service.go +++ b/agent/rpc/peering/service.go @@ -8,7 +8,6 @@ import ( "time" "github.com/armon/go-metrics" - "github.com/hashicorp/consul/proto/pbpeerstream" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-memdb" "github.com/hashicorp/go-multierror" @@ -27,6 +26,7 @@ import ( "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/lib" "github.com/hashicorp/consul/proto/pbpeering" + "github.com/hashicorp/consul/proto/pbpeerstream" ) var ( @@ -379,6 +379,7 @@ func (s *Server) Establish( } var id string + serverAddrs := tok.ServerAddresses if existing == nil { id, err = lib.GenerateUUID(s.Backend.CheckPeeringUUID) if err != nil { @@ -386,6 +387,11 @@ func (s *Server) Establish( } } else { id = existing.ID + // If there is a connected stream, assume that the existing ServerAddresses + // are up to date and do not try to overwrite them with the token's addresses. + if status, ok := s.Tracker.StreamStatus(id); ok && status.Connected { + serverAddrs = existing.PeerServerAddresses + } } // validate that this peer name is not being used as an acceptor already @@ -397,7 +403,7 @@ func (s *Server) Establish( ID: id, Name: req.PeerName, PeerCAPems: tok.CA, - PeerServerAddresses: tok.ServerAddresses, + PeerServerAddresses: serverAddrs, PeerServerName: tok.ServerName, PeerID: tok.PeerID, Meta: req.Meta, @@ -418,9 +424,9 @@ func (s *Server) Establish( } var exchangeResp *pbpeerstream.ExchangeSecretResponse - // Loop through the token's addresses once, attempting to fetch the long-lived stream secret. + // Loop through the known server addresses once, attempting to fetch the long-lived stream secret. var dialErrors error - for _, addr := range peering.PeerServerAddresses { + for _, addr := range serverAddrs { exchangeResp, err = exchangeSecret(ctx, addr, tlsOption, &exchangeReq) if err != nil { dialErrors = multierror.Append(dialErrors, fmt.Errorf("failed to exchange peering secret with %q: %w", addr, err)) @@ -720,7 +726,7 @@ func (s *Server) PeeringDelete(ctx context.Context, req *pbpeering.PeeringDelete return nil, err } - if existing == nil || !existing.IsActive() { + if !existing.IsActive() { // Return early when the Peering doesn't exist or is already marked for deletion. // We don't return nil because the pb will fail to marshal. return &pbpeering.PeeringDeleteResponse{}, nil diff --git a/proto/pbpeering/peering.go b/proto/pbpeering/peering.go index d31328b589..74f5a52f08 100644 --- a/proto/pbpeering/peering.go +++ b/proto/pbpeering/peering.go @@ -143,10 +143,10 @@ func PeeringStateFromAPI(t api.PeeringState) PeeringState { } func (p *Peering) IsActive() bool { - if p != nil && p.State == PeeringState_TERMINATED { + if p == nil || p.State == PeeringState_TERMINATED { return false } - if p == nil || p.DeletedAt == nil { + if p.DeletedAt == nil { return true } From e2fe8b8d65638877ac4cc64c1957b69dac82ed7d Mon Sep 17 00:00:00 2001 From: "Chris S. Kim" Date: Fri, 26 Aug 2022 11:14:02 -0400 Subject: [PATCH 38/93] Fix tests for enterprise --- agent/consul/state/catalog_test.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/agent/consul/state/catalog_test.go b/agent/consul/state/catalog_test.go index ca2bded03b..e2310dbb7c 100644 --- a/agent/consul/state/catalog_test.go +++ b/agent/consul/state/catalog_test.go @@ -2104,10 +2104,13 @@ func TestStateStore_Services(t *testing.T) { Address: "1.1.1.1", Port: 1111, } + ns1.EnterpriseMeta.Normalize() if err := s.EnsureService(2, "node1", ns1); err != nil { t.Fatalf("err: %s", err) } ns1Dogs := testRegisterService(t, s, 3, "node1", "dogs") + ns1Dogs.EnterpriseMeta.Normalize() + testRegisterNode(t, s, 4, "node2") ns2 := &structs.NodeService{ ID: "service3", @@ -2116,6 +2119,7 @@ func TestStateStore_Services(t *testing.T) { Address: "1.1.1.1", Port: 1111, } + ns2.EnterpriseMeta.Normalize() if err := s.EnsureService(5, "node2", ns2); err != nil { t.Fatalf("err: %s", err) } @@ -2139,7 +2143,7 @@ func TestStateStore_Services(t *testing.T) { ns1.ToServiceNode("node1"), ns2.ToServiceNode("node2"), } - assertDeepEqual(t, services, expected, cmpopts.IgnoreFields(structs.ServiceNode{}, "RaftIndex")) + assertDeepEqual(t, expected, services, cmpopts.IgnoreFields(structs.ServiceNode{}, "RaftIndex")) // Deleting a node with a service should fire the watch. if err := s.DeleteNode(6, "node1", nil, ""); err != nil { @@ -2178,6 +2182,7 @@ func TestStateStore_ServicesByNodeMeta(t *testing.T) { Address: "1.1.1.1", Port: 1111, } + ns1.EnterpriseMeta.Normalize() if err := s.EnsureService(2, "node0", ns1); err != nil { t.Fatalf("err: %s", err) } @@ -2188,6 +2193,7 @@ func TestStateStore_ServicesByNodeMeta(t *testing.T) { Address: "1.1.1.1", Port: 1111, } + ns2.EnterpriseMeta.Normalize() if err := s.EnsureService(3, "node1", ns2); err != nil { t.Fatalf("err: %s", err) } From eb0c5bb9a17ea2a6643eaea240fc3526bc8f129c Mon Sep 17 00:00:00 2001 From: smamindla57 <106655516+smamindla57@users.noreply.github.com> Date: Fri, 26 Aug 2022 21:43:46 +0530 Subject: [PATCH 39/93] Updated consul monitoring with Newrelic APM (#14360) * added newrelic consul quickstart link * adding HCP Consul Co-authored-by: David Yu --- website/content/docs/integrate/partnerships.mdx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/content/docs/integrate/partnerships.mdx b/website/content/docs/integrate/partnerships.mdx index e7c5bb2228..057e3464cf 100644 --- a/website/content/docs/integrate/partnerships.mdx +++ b/website/content/docs/integrate/partnerships.mdx @@ -99,12 +99,13 @@ Here are links to resources, documentation, examples and best practices to guide - [Consul Telemetry Documentation](/docs/agent/telemetry) - [Monitoring Consul with Datadog APM](https://www.datadoghq.com/blog/consul-datadog/) - [Monitoring Consul with Dynatrace APM](https://www.dynatrace.com/news/blog/automatic-intelligent-observability-into-your-hashicorp-consul-service-mesh/) +- [Monitoring Consul with New Relic APM](https://newrelic.com/instant-observability/consul/b65825cc-faee-47b5-8d7c-6d60d6ab3c59) +- [Monitoring HCP Consul with New Relic APM](https://newrelic.com/instant-observability/hcp-consul/bc99ad15-7aba-450e-8236-6ea667d50cae) **Logging** - [Monitor Consul with Logz.io](https://www.hashicorp.com/integrations/logz-io/consul) - [Monitor Consul with Splunk SignalFx](https://www.hashicorp.com/integrations/splunksignalfx/consul) -- [Consul Datacenter Monitoring with New Relic](https://www.hashicorp.com/integrations/new-relic/consul) #### Platform: From 650e48624df0d5f7dcc9dabf088709f87c2390a9 Mon Sep 17 00:00:00 2001 From: freddygv Date: Fri, 26 Aug 2022 10:52:47 -0600 Subject: [PATCH 40/93] Allow terminated peerings to be deleted Peerings are terminated when a peer decides to delete the peering from their end. Deleting a peering sends a termination message to the peer and triggers them to mark the peering as terminated but does NOT delete the peering itself. This is to prevent peerings from disappearing from both sides just because one side deleted them. Previously the Delete endpoint was skipping the deletion if the peering was not marked as active. However, terminated peerings are also inactive. This PR makes some updates so that peerings marked as terminated can be deleted by users. --- agent/consul/state/peering.go | 23 ++- agent/consul/state/peering_test.go | 228 +++++++++++++++++++++++++---- agent/rpc/peering/service.go | 3 +- agent/rpc/peering/service_test.go | 64 ++++---- 4 files changed, 258 insertions(+), 60 deletions(-) diff --git a/agent/consul/state/peering.go b/agent/consul/state/peering.go index 287e822919..9457dd811a 100644 --- a/agent/consul/state/peering.go +++ b/agent/consul/state/peering.go @@ -549,8 +549,25 @@ func (s *Store) PeeringWrite(idx uint64, req *pbpeering.PeeringWriteRequest) err if req.Peering.ID != existing.ID { return fmt.Errorf("A peering already exists with the name %q and a different ID %q", req.Peering.Name, existing.ID) } + + // Nothing to do if our peer wants to terminate the peering but the peering is already marked for deletion. + if existing.State == pbpeering.PeeringState_DELETING && req.Peering.State == pbpeering.PeeringState_TERMINATED { + return nil + } + + // No-op deletion + if existing.State == pbpeering.PeeringState_DELETING && req.Peering.State == pbpeering.PeeringState_DELETING { + return nil + } + + // No-op termination + if existing.State == pbpeering.PeeringState_TERMINATED && req.Peering.State == pbpeering.PeeringState_TERMINATED { + return nil + } + // Prevent modifications to Peering marked for deletion. - if !existing.IsActive() { + // This blocks generating new peering tokens or re-establishing the peering until the peering is done deleting. + if existing.State == pbpeering.PeeringState_DELETING { return fmt.Errorf("cannot write to peering that is marked for deletion") } @@ -582,8 +599,8 @@ func (s *Store) PeeringWrite(idx uint64, req *pbpeering.PeeringWriteRequest) err req.Peering.ModifyIndex = idx } - // Ensure associated secrets are cleaned up when a peering is marked for deletion. - if req.Peering.State == pbpeering.PeeringState_DELETING { + // Ensure associated secrets are cleaned up when a peering is marked for deletion or terminated. + if !req.Peering.IsActive() { if err := peeringSecretsDeleteTxn(tx, req.Peering.ID, req.Peering.ShouldDial()); err != nil { return fmt.Errorf("failed to delete peering secrets: %w", err) } diff --git a/agent/consul/state/peering_test.go b/agent/consul/state/peering_test.go index bfce75295c..1dc2446fe1 100644 --- a/agent/consul/state/peering_test.go +++ b/agent/consul/state/peering_test.go @@ -1112,16 +1112,22 @@ func TestStore_PeeringWrite(t *testing.T) { // Each case depends on the previous. s := NewStateStore(nil) + testTime := time.Now() + + type expectations struct { + peering *pbpeering.Peering + secrets *pbpeering.PeeringSecrets + err string + } type testcase struct { - name string - input *pbpeering.PeeringWriteRequest - expectSecrets *pbpeering.PeeringSecrets - expectErr string + name string + input *pbpeering.PeeringWriteRequest + expect expectations } run := func(t *testing.T, tc testcase) { err := s.PeeringWrite(10, tc.input) - if tc.expectErr != "" { - testutil.RequireErrorContains(t, err, tc.expectErr) + if tc.expect.err != "" { + testutil.RequireErrorContains(t, err, tc.expect.err) return } require.NoError(t, err) @@ -1133,57 +1139,185 @@ func TestStore_PeeringWrite(t *testing.T) { _, p, err := s.PeeringRead(nil, q) require.NoError(t, err) require.NotNil(t, p) - require.Equal(t, tc.input.Peering.State, p.State) - require.Equal(t, tc.input.Peering.Name, p.Name) + require.Equal(t, tc.expect.peering.State, p.State) + require.Equal(t, tc.expect.peering.Name, p.Name) + require.Equal(t, tc.expect.peering.Meta, p.Meta) + if tc.expect.peering.DeletedAt != nil { + require.Equal(t, tc.expect.peering.DeletedAt, p.DeletedAt) + } secrets, err := s.PeeringSecretsRead(nil, tc.input.Peering.ID) require.NoError(t, err) - prototest.AssertDeepEqual(t, tc.expectSecrets, secrets) + prototest.AssertDeepEqual(t, tc.expect.secrets, secrets) } tcs := []testcase{ { name: "create baz", input: &pbpeering.PeeringWriteRequest{ Peering: &pbpeering.Peering{ - ID: testBazPeerID, - Name: "baz", - Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), + ID: testBazPeerID, + Name: "baz", + State: pbpeering.PeeringState_ESTABLISHING, + PeerServerAddresses: []string{"localhost:8502"}, + Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), }, SecretsRequest: &pbpeering.SecretsWriteRequest{ PeerID: testBazPeerID, - Request: &pbpeering.SecretsWriteRequest_GenerateToken{ - GenerateToken: &pbpeering.SecretsWriteRequest_GenerateTokenRequest{ - EstablishmentSecret: testBazSecretID, + Request: &pbpeering.SecretsWriteRequest_Establish{ + Establish: &pbpeering.SecretsWriteRequest_EstablishRequest{ + ActiveStreamSecret: testBazSecretID, }, }, }, }, - expectSecrets: &pbpeering.PeeringSecrets{ - PeerID: testBazPeerID, - Establishment: &pbpeering.PeeringSecrets_Establishment{ - SecretID: testBazSecretID, + expect: expectations{ + peering: &pbpeering.Peering{ + ID: testBazPeerID, + Name: "baz", + State: pbpeering.PeeringState_ESTABLISHING, }, + secrets: &pbpeering.PeeringSecrets{ + PeerID: testBazPeerID, + Stream: &pbpeering.PeeringSecrets_Stream{ + ActiveSecretID: testBazSecretID, + }, + }, + }, + }, + { + name: "cannot change ID for baz", + input: &pbpeering.PeeringWriteRequest{ + Peering: &pbpeering.Peering{ + ID: "123", + Name: "baz", + State: pbpeering.PeeringState_FAILING, + PeerServerAddresses: []string{"localhost:8502"}, + Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), + }, + }, + expect: expectations{ + err: `A peering already exists with the name "baz" and a different ID`, }, }, { name: "update baz", input: &pbpeering.PeeringWriteRequest{ Peering: &pbpeering.Peering{ - ID: testBazPeerID, - Name: "baz", - State: pbpeering.PeeringState_FAILING, + ID: testBazPeerID, + Name: "baz", + State: pbpeering.PeeringState_FAILING, + PeerServerAddresses: []string{"localhost:8502"}, + Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), + }, + }, + expect: expectations{ + peering: &pbpeering.Peering{ + ID: testBazPeerID, + Name: "baz", + State: pbpeering.PeeringState_FAILING, + }, + secrets: &pbpeering.PeeringSecrets{ + PeerID: testBazPeerID, + Stream: &pbpeering.PeeringSecrets_Stream{ + ActiveSecretID: testBazSecretID, + }, + }, + }, + }, + { + name: "if no state was included in request it is inherited from existing", + input: &pbpeering.PeeringWriteRequest{ + Peering: &pbpeering.Peering{ + ID: testBazPeerID, + Name: "baz", + // Send undefined state. + // State: pbpeering.PeeringState_FAILING, + PeerServerAddresses: []string{"localhost:8502"}, + Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), + }, + }, + expect: expectations{ + peering: &pbpeering.Peering{ + ID: testBazPeerID, + Name: "baz", + // Previous failing state is picked up. + State: pbpeering.PeeringState_FAILING, + }, + secrets: &pbpeering.PeeringSecrets{ + PeerID: testBazPeerID, + Stream: &pbpeering.PeeringSecrets_Stream{ + ActiveSecretID: testBazSecretID, + }, + }, + }, + }, + { + name: "mark baz as terminated", + input: &pbpeering.PeeringWriteRequest{ + Peering: &pbpeering.Peering{ + ID: testBazPeerID, + Name: "baz", + State: pbpeering.PeeringState_TERMINATED, + PeerServerAddresses: []string{"localhost:8502"}, + Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), + }, + }, + expect: expectations{ + peering: &pbpeering.Peering{ + ID: testBazPeerID, + Name: "baz", + State: pbpeering.PeeringState_TERMINATED, + }, + // Secrets for baz should have been deleted + secrets: nil, + }, + }, + { + name: "cannot edit data during no-op termination", + input: &pbpeering.PeeringWriteRequest{ + Peering: &pbpeering.Peering{ + ID: testBazPeerID, + Name: "baz", + State: pbpeering.PeeringState_TERMINATED, + // Attempt to modify the addresses + Meta: map[string]string{"foo": "bar"}, Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), }, }, - expectSecrets: &pbpeering.PeeringSecrets{ - PeerID: testBazPeerID, - Establishment: &pbpeering.PeeringSecrets_Establishment{ - SecretID: testBazSecretID, + expect: expectations{ + peering: &pbpeering.Peering{ + ID: testBazPeerID, + Name: "baz", + State: pbpeering.PeeringState_TERMINATED, + // Meta should be unchanged. + Meta: nil, }, }, }, { name: "mark baz for deletion", + input: &pbpeering.PeeringWriteRequest{ + Peering: &pbpeering.Peering{ + ID: testBazPeerID, + Name: "baz", + State: pbpeering.PeeringState_DELETING, + PeerServerAddresses: []string{"localhost:8502"}, + DeletedAt: structs.TimeToProto(testTime), + Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), + }, + }, + expect: expectations{ + peering: &pbpeering.Peering{ + ID: testBazPeerID, + Name: "baz", + State: pbpeering.PeeringState_DELETING, + DeletedAt: structs.TimeToProto(testTime), + }, + secrets: nil, + }, + }, + { + name: "deleting a deleted peering is a no-op", input: &pbpeering.PeeringWriteRequest{ Peering: &pbpeering.Peering{ ID: testBazPeerID, @@ -1193,8 +1327,38 @@ func TestStore_PeeringWrite(t *testing.T) { Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), }, }, - // Secrets for baz should have been deleted - expectSecrets: nil, + expect: expectations{ + peering: &pbpeering.Peering{ + ID: testBazPeerID, + Name: "baz", + // Still marked as deleting at the original testTime + State: pbpeering.PeeringState_DELETING, + DeletedAt: structs.TimeToProto(testTime), + }, + // Secrets for baz should have been deleted + secrets: nil, + }, + }, + { + name: "terminating a peering marked for deletion is a no-op", + input: &pbpeering.PeeringWriteRequest{ + Peering: &pbpeering.Peering{ + ID: testBazPeerID, + Name: "baz", + State: pbpeering.PeeringState_TERMINATED, + Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), + }, + }, + expect: expectations{ + peering: &pbpeering.Peering{ + ID: testBazPeerID, + Name: "baz", + // Still marked as deleting + State: pbpeering.PeeringState_DELETING, + }, + // Secrets for baz should have been deleted + secrets: nil, + }, }, { name: "cannot update peering marked for deletion", @@ -1209,7 +1373,9 @@ func TestStore_PeeringWrite(t *testing.T) { Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), }, }, - expectErr: "cannot write to peering that is marked for deletion", + expect: expectations{ + err: "cannot write to peering that is marked for deletion", + }, }, { name: "cannot create peering marked for deletion", @@ -1221,7 +1387,9 @@ func TestStore_PeeringWrite(t *testing.T) { Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), }, }, - expectErr: "cannot create a new peering marked for deletion", + expect: expectations{ + err: "cannot create a new peering marked for deletion", + }, }, } for _, tc := range tcs { diff --git a/agent/rpc/peering/service.go b/agent/rpc/peering/service.go index 20bbafc1cf..6c0950d9e9 100644 --- a/agent/rpc/peering/service.go +++ b/agent/rpc/peering/service.go @@ -726,11 +726,12 @@ func (s *Server) PeeringDelete(ctx context.Context, req *pbpeering.PeeringDelete return nil, err } - if !existing.IsActive() { + if existing == nil || existing.State == pbpeering.PeeringState_DELETING { // Return early when the Peering doesn't exist or is already marked for deletion. // We don't return nil because the pb will fail to marshal. return &pbpeering.PeeringDeleteResponse{}, nil } + // We are using a write request due to needing to perform a deferred deletion. // The peering gets marked for deletion by setting the DeletedAt field, // and a leader routine will handle deleting the peering. diff --git a/agent/rpc/peering/service_test.go b/agent/rpc/peering/service_test.go index 54770d6a61..6b26db7d04 100644 --- a/agent/rpc/peering/service_test.go +++ b/agent/rpc/peering/service_test.go @@ -621,38 +621,50 @@ func TestPeeringService_Read_ACLEnforcement(t *testing.T) { } func TestPeeringService_Delete(t *testing.T) { - // TODO(peering): see note on newTestServer, refactor to not use this - s := newTestServer(t, nil) - - p := &pbpeering.Peering{ - ID: testUUID(t), - Name: "foo", - State: pbpeering.PeeringState_ESTABLISHING, - PeerCAPems: nil, - PeerServerName: "test", - PeerServerAddresses: []string{"addr1"}, + tt := map[string]pbpeering.PeeringState{ + "active peering": pbpeering.PeeringState_ACTIVE, + "terminated peering": pbpeering.PeeringState_TERMINATED, } - err := s.Server.FSM().State().PeeringWrite(10, &pbpeering.PeeringWriteRequest{Peering: p}) - require.NoError(t, err) - require.Nil(t, p.DeletedAt) - require.True(t, p.IsActive()) - client := pbpeering.NewPeeringServiceClient(s.ClientConn(t)) + for name, overrideState := range tt { + t.Run(name, func(t *testing.T) { + // TODO(peering): see note on newTestServer, refactor to not use this + s := newTestServer(t, nil) - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - t.Cleanup(cancel) + // A pointer is kept for the following peering so that we can modify the object without another PeeringWrite. + p := &pbpeering.Peering{ + ID: testUUID(t), + Name: "foo", + PeerCAPems: nil, + PeerServerName: "test", + PeerServerAddresses: []string{"addr1"}, + } + err := s.Server.FSM().State().PeeringWrite(10, &pbpeering.PeeringWriteRequest{Peering: p}) + require.NoError(t, err) + require.Nil(t, p.DeletedAt) + require.True(t, p.IsActive()) - _, err = client.PeeringDelete(ctx, &pbpeering.PeeringDeleteRequest{Name: "foo"}) - require.NoError(t, err) + // Overwrite the peering state to simulate deleting from a non-initial state. + p.State = overrideState - retry.Run(t, func(r *retry.R) { - _, resp, err := s.Server.FSM().State().PeeringRead(nil, state.Query{Value: "foo"}) - require.NoError(r, err) + client := pbpeering.NewPeeringServiceClient(s.ClientConn(t)) - // Initially the peering will be marked for deletion but eventually the leader - // routine will clean it up. - require.Nil(r, resp) - }) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + t.Cleanup(cancel) + + _, err = client.PeeringDelete(ctx, &pbpeering.PeeringDeleteRequest{Name: "foo"}) + require.NoError(t, err) + + retry.Run(t, func(r *retry.R) { + _, resp, err := s.Server.FSM().State().PeeringRead(nil, state.Query{Value: "foo"}) + require.NoError(r, err) + + // Initially the peering will be marked for deletion but eventually the leader + // routine will clean it up. + require.Nil(r, resp) + }) + }) + } } func TestPeeringService_Delete_ACLEnforcement(t *testing.T) { From b1ba0a89bc4c577fc65a21995ff7a25c199beaa0 Mon Sep 17 00:00:00 2001 From: David Yu Date: Fri, 26 Aug 2022 13:37:41 -0700 Subject: [PATCH 41/93] docs: Release notes for Consul 1.12, 1.13 and Consul K8s 0.47.0 (#14352) * consul 1.12, consul 1.13, and consul-k8s release notes Co-authored-by: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> --- website/content/docs/lambda/invocation.mdx | 2 +- .../consul-api-gateway/v0_1_x.mdx | 2 +- .../docs/release-notes/consul-k8s/v0_47_x.mdx | 48 +++++++++++++++++ .../docs/release-notes/consul/v1_10_x.mdx | 2 + .../docs/release-notes/consul/v1_11_x.mdx | 2 + .../docs/release-notes/consul/v1_12_x.mdx | 54 +++++++++++++++++++ .../docs/release-notes/consul/v1_13_x.mdx | 44 +++++++++++++++ website/data/docs-nav-data.json | 17 ++++++ 8 files changed, 169 insertions(+), 2 deletions(-) create mode 100644 website/content/docs/release-notes/consul-k8s/v0_47_x.mdx create mode 100644 website/content/docs/release-notes/consul/v1_12_x.mdx create mode 100644 website/content/docs/release-notes/consul/v1_13_x.mdx diff --git a/website/content/docs/lambda/invocation.mdx b/website/content/docs/lambda/invocation.mdx index 991c8eb92d..4789c0adac 100644 --- a/website/content/docs/lambda/invocation.mdx +++ b/website/content/docs/lambda/invocation.mdx @@ -72,7 +72,7 @@ service mesh. } ``` 1. Issue the `consul services register` command to store the configuration: - ```shell-sesion + ```shell-session $ consul services register api-sidecar-proxy.hcl ``` 1. Call the upstream service to invoke the Lambda function. In the following example, the `api` service invokes the `authentication` service at `localhost:2345`: diff --git a/website/content/docs/release-notes/consul-api-gateway/v0_1_x.mdx b/website/content/docs/release-notes/consul-api-gateway/v0_1_x.mdx index 357fd7032d..b191e77fd2 100644 --- a/website/content/docs/release-notes/consul-api-gateway/v0_1_x.mdx +++ b/website/content/docs/release-notes/consul-api-gateway/v0_1_x.mdx @@ -8,7 +8,7 @@ description: >- # Consul API Gateway 0.1.0 -## OVerview +## Overview This is the first general availability (GA) release of Consul API Gateway. It provides controlled access for network traffic from outside a Consul service diff --git a/website/content/docs/release-notes/consul-k8s/v0_47_x.mdx b/website/content/docs/release-notes/consul-k8s/v0_47_x.mdx new file mode 100644 index 0000000000..b040787c5c --- /dev/null +++ b/website/content/docs/release-notes/consul-k8s/v0_47_x.mdx @@ -0,0 +1,48 @@ +--- +layout: docs +page_title: 0.47.x +description: >- + Consul on Kubernetes release notes for version 0.47.x +--- + +# Consul on Kubernetes 0.47.0 + +## Release Highlights + +- **Cluster Peering (Beta)**: This release introduces support for Cluster Peering, which allows service connectivity between two independent clusters. Enabling peering will deploy the peering controllers and PeeringAcceptor and PeeringDialer CRDs. The new CRDs are used to establish a peering connection between two clusters. Refer to [Cluster Peering on Kubernetes](/docs/connect/cluster-peering/k8s) for full instructions on using Cluster Peering on Kubernetes. + +- **Envoy Proxy Debugging CLI Commands**: This release introduces new commands to quickly identify proxies and troubleshoot Envoy proxies for sidecars and gateways. + * Add `consul-k8s proxy list` command for displaying pods running Envoy managed by Consul. + * Add `consul-k8s proxy read podname` command for displaying Envoy configuration for a given pod + +- **Transparent Proxy Egress**: Adds support for destinations on the Service Defaults CRD when using transparent proxy for terminating gateways. + +## Supported Software + +- Consul 1.11.x, Consul 1.12.x and Consul 1.13.1+ +- Kubernetes 1.19+ + - Kubernetes 1.24 is not supported at this time. +- Kubectl 1.21+ +- Envoy proxy support is determined by the Consul version deployed. Refer to + [Envoy Integration](/docs/connect/proxies/envoy) for details. + +## Upgrading + +For detailed information on upgrading, please refer to the [Upgrades page](/docs/k8s/upgrade) + +## Known Issues + +The following issues are know to exist in the v0.47.0 and v0.47.1 releases + +- Kubernetes 1.24 is not supported because secret-based tokens are no longer autocreated by default for service accounts. Refer to GitHub issue + [[GH-1145](https://github.com/hashicorp/consul-k8s/issues/1145)] for + details. + +## Changelogs + +The changelogs for this major release version and any maintenance versions are listed below. + +~> **Note:** The following link takes you to the changelogs on the GitHub website. + +- [0.47.0](https://github.com/hashicorp/consul-k8s/releases/tag/v0.47.0) +- [0.47.1](https://github.com/hashicorp/consul-k8s/releases/tag/v0.47.1) diff --git a/website/content/docs/release-notes/consul/v1_10_x.mdx b/website/content/docs/release-notes/consul/v1_10_x.mdx index e4531dec08..e55b2ce32c 100644 --- a/website/content/docs/release-notes/consul/v1_10_x.mdx +++ b/website/content/docs/release-notes/consul/v1_10_x.mdx @@ -24,6 +24,8 @@ description: >- - Drops support for Envoy version 1.13.x. - (Enterprise Only) Consul Enterprise has removed support for temporary licensing. All server agents must have a valid license at startup and client agents must have a license at startup or be able to retrieve one from the servers. +## Upgrading + For more detailed information, please refer to the [upgrade details page](/docs/upgrading/upgrade-specific#consul-1-10-0) and the changelogs. ## Changelogs diff --git a/website/content/docs/release-notes/consul/v1_11_x.mdx b/website/content/docs/release-notes/consul/v1_11_x.mdx index eeb468003d..d26cd6a804 100644 --- a/website/content/docs/release-notes/consul/v1_11_x.mdx +++ b/website/content/docs/release-notes/consul/v1_11_x.mdx @@ -27,6 +27,8 @@ description: >- - Drops support for Envoy versions 1.15.x and 1.16.x +## Upgrading + For more detailed information, please refer to the [upgrade details page](/docs/upgrading/upgrade-specific#consul-1-11-0) and the changelogs. ## Changelogs diff --git a/website/content/docs/release-notes/consul/v1_12_x.mdx b/website/content/docs/release-notes/consul/v1_12_x.mdx new file mode 100644 index 0000000000..842dfb31c8 --- /dev/null +++ b/website/content/docs/release-notes/consul/v1_12_x.mdx @@ -0,0 +1,54 @@ +--- +layout: docs +page_title: 1.12.x +description: >- + Consul release notes for version 1.12.x +--- + +# Consul 1.12.0 + +## Release Highlights + +- **AWS IAM Auth Method**: Consul now provides an AWS IAM auth method that allows AWS IAM roles and users to authenticate with Consul to obtain ACL tokens. Refer to [AWS IAM Auth Method](/docs/security/acl/auth-methods/aws-iam) for detailed configuration information. + +- **Per listener TLS Config**: It is now possible to configure TLS differently for each of Consul's listeners, such as HTTPS, gRPC, and the internal multiplexed RPC listener, using the `tls` stanza. Refer to [TLS Configuration Reference](/docs/agent/config/config-files#tls-configuration-reference) for more details. + +- **AWS Lambda**: Adds the ability to invoke AWS Lambdas through terminating gateways, which allows for cross-datacenter communication, transparent proxy, and intentions with Consul Service Mesh. Refer to [AWS Lambda](/docs]/lambda) and [Invoke Lambda Functions](/docs/lambda/invocation) for more details. + +- **Mesh-wide TLS min/max versions and cipher suites:** Using the [Mesh](/docs/connect/config-entries/mesh#tls) Config Entry or CRD, it is now possible to set TLS min/max versions and cipher suites for both inbound and outbound mTLS connections. + +- **Expanded details for ACL Permission Denied errors**: Details are now provided when a permission denied errors surface for RPC calls. Details include the accessor ID of the ACL token, the missing permission, and any namespace or partition that the error occurred on. + +- **ACL token read**: The `consul acl token read -rules` command now includes an `-expanded` option to display detailed info about any policies and rules affecting the token. Refer to [Consul ACL Token read](/commands/acl/token/read) for more details. + +- **Automatically reload agent config when watching agent config file changes**: When using the `auto-reload-config` CLI flag or `auto_reload_config` agent config option, Consul now automatically reloads the [reloadable configuration options](/docs/agent/config#reloadable-configuration) when configuration files change. Refer to [auto_reload_config](/docs/agent/config/cli-flags#_auto_reload_config) for more details. + + +## What's Changed + +- Removes support for Envoy 1.17.x and Envoy 1.18.x, and adds support for Envoy 1.21.x and Envoy 1.22.x. Refer to the [Envoy Compatibility matrix](/docs/connect/proxies/envoy) for more details. + +- The `disable_compat_1.9` option now defaults to true. Metrics formatted in the style of version 1.9, such as `consul.http...`, can still be enabled by setting disable_compat_1.9 = false. However, these metrics will be removed in 1.13. + +- The `agent_master` ACL token has been renamed to `agent_recovery` ACL token. In addition, the `consul acl set-agent-token master` command has been replaced with `consul acl set-agent-token recovery`. Refer to [ACL Agent Recovery Token](/docs/security/acl/acl-tokens#acl-agent-recovery-token) and [Consul ACL Set Agent Token](/commands/acl/set-agent-token) for more information. + +- If TLS min versions and max versions are not specified, the TLS min/max versions default to the following values. For details on how to configure TLS min and max, refer to the [Mesh TLS config entry](/docs/connect/config-entries/mesh#tls) or CRD documentation. + - Incoming connections: TLS 1.2 for min0 version, TLS 1.3 for max version + - Outgoing connections: TLS 1.2 for both TLS min and TLS max versions. + +## Upgrading + +For more detailed information, please refer to the [upgrade details page](/docs/upgrading/upgrade-specific#consul-1-12-0) and the changelogs. + +## Changelogs + +The changelogs for this major release version and any maintenance versions are listed below. + +-> **Note**: These links take you to the changelogs on the GitHub website. + +- [1.12.0](https://github.com/hashicorp/consul/releases/tag/v1.12.0) +- [1.12.1](https://github.com/hashicorp/consul/releases/tag/v1.12.1) +- [1.12.2](https://github.com/hashicorp/consul/releases/tag/v1.12.2) +- [1.12.3](https://github.com/hashicorp/consul/releases/tag/v1.12.3) +- [1.12.4](https://github.com/hashicorp/consul/releases/tag/v1.12.4) + diff --git a/website/content/docs/release-notes/consul/v1_13_x.mdx b/website/content/docs/release-notes/consul/v1_13_x.mdx new file mode 100644 index 0000000000..23b694a913 --- /dev/null +++ b/website/content/docs/release-notes/consul/v1_13_x.mdx @@ -0,0 +1,44 @@ +--- +layout: docs +page_title: 1.13.x +description: >- + Consul release notes for version 1.13.x +--- + +# Consul 1.13.0 + +## Release Highlights + +- **Cluster Peering (Beta)**: This version adds a new model to federate Consul clusters for both service mesh and traditional service discovery. Cluster peering allows for service interconnectivity with looser coupling than the existing WAN federation. For more information, refer to the [cluster peering](/docs/connect/cluster-peering) documentation. + +- **Transparent proxying through terminating gateways**: This version adds egress traffic control to destinations outside of Consul's catalog, such as APIs on the public internet. Transparent proxies can dial [destinations defined in service-defaults](/docs/connect/config-entries/service-defaults#destination) and have the traffic routed through terminating gateways. For more information, refer to the [terminating gateway](/docs/connect/gateways/terminating-gateway#terminating-gateway-configuration) documentation. + +- **Enables TLS on the Envoy Prometheus endpoint**: The Envoy prometheus endpoint can be enabled when `envoy_prometheus_bind_addr` is set and then secured over TLS using new CLI flags for the `consul connect envoy` command. These commands are: `-prometheus-ca-file`, `-prometheus-ca-path`, `-prometheus-cert-file` and `-prometheus-key-file`. The CA, cert, and key can be provided to Envoy by a Kubernetes mounted volume so that Envoy can watch the files and dynamically reload the certs when the volume is updated. + +- **UDP Health Checks**: Adds the ability to register service discovery health checks that periodically send UDP datagrams to the specified IP/hostname and port. Refer to [UDP checks](/docs/discovery/checks#udp-interval). + +## What's Changed + +- Removes support for Envoy 1.19.x and adds suport for Envoy 1.23. Refer to the [Envoy Compatibility matrix](/docs/connect/proxies/envoy) for more details. + +- The [`disable_compat_19`](/docs/agent/options#telemetry-disable_compat_1.9) telemetry configuration option is now removed. In Consul versions 1.10.x through 1.11.x, the config defaulted to `false`. In version 1.12.x it defaulted to `true`. Before upgrading you should remove this flag from your config if the flag is being used. + +## Upgrading + +For more detailed information, please refer to the [upgrade details page](/docs/upgrading/upgrade-specific#consul-1-13-0) and the changelogs. + +## Known Issues +The following issues are know to exist in the 1.13.0 release: + +- Consul 1.13.1 fixes a compatibility issue when restoring snapshots from pre-1.13.0 versions of Consul. Refer to GitHub issue [[GH-14149](https://github.com/hashicorp/consul/issues/14149)] for more details. +- Consul 1.13.0 and Consul 1.13.1 default to requiring TLS for gRPC communication with Envoy proxies when auto-encrypt and auto-config are enabled. In environments where Envoy proxies are not already configured to use TLS for gRPC, upgrading Consul 1.13 will cause Envoy proxies to disconnect from the control plane (Consul agents). A future patch release will default to disabling TLS by default for GRPC communication with Envoy proxies when using Service Mesh and auto-config or auto-encrypt. Refer to GitHub issue [GH-14253](https://github.com/hashicorp/consul/issues/14253) and [Service Mesh deployments using auto-config and auto-enrypt](https://www.consul.io/docs/upgrading/upgrade-specific#service-mesh-deployments-using-auto-encrypt-or-auto-config) for more details. + + +## Changelogs + +The changelogs for this major release version and any maintenance versions are listed below. + +-> **Note**: These links take you to the changelogs on the GitHub website. + +- [1.13.0](https://github.com/hashicorp/consul/releases/tag/v1.13.0) +- [1.13.1](https://github.com/hashicorp/consul/releases/tag/v1.13.1) diff --git a/website/data/docs-nav-data.json b/website/data/docs-nav-data.json index 7f64ed3b45..49b1d91100 100644 --- a/website/data/docs-nav-data.json +++ b/website/data/docs-nav-data.json @@ -1248,6 +1248,14 @@ { "title": "Consul", "routes": [ + { + "title": "v1.13.x", + "path": "release-notes/consul/v1_13_x" + }, + { + "title": "v1.12.x", + "path": "release-notes/consul/v1_12_x" + }, { "title": "v1.11.x", "path": "release-notes/consul/v1_11_x" @@ -1262,6 +1270,15 @@ } ] }, + { + "title": "Consul K8s", + "routes": [ + { + "title": "v0.47.x", + "path": "release-notes/consul-k8s/v0_47_x" + } + ] + }, { "title": "Consul API Gateway", "routes": [ From 4d97e2f9364bfdae39eda1297df3f47fbd7c2aca Mon Sep 17 00:00:00 2001 From: "Chris S. Kim" Date: Fri, 26 Aug 2022 16:49:03 -0400 Subject: [PATCH 42/93] Adjust metrics reporting for peering tracker --- agent/consul/server.go | 11 ++- .../services/peerstream/server.go | 8 +- .../services/peerstream/stream_resources.go | 6 +- .../services/peerstream/stream_test.go | 38 +++------- .../services/peerstream/stream_tracker.go | 51 +++++-------- .../peerstream/stream_tracker_test.go | 76 +++++++------------ 6 files changed, 68 insertions(+), 122 deletions(-) diff --git a/agent/consul/server.go b/agent/consul/server.go index 8f2986c3eb..f92a03ccd2 100644 --- a/agent/consul/server.go +++ b/agent/consul/server.go @@ -742,7 +742,6 @@ func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server) (*Ser return s.ForwardGRPC(s.grpcConnPool, info, fn) }, }) - s.peerStreamTracker.SetHeartbeatTimeout(s.peerStreamServer.Config.IncomingHeartbeatTimeout) s.peerStreamServer.Register(s.externalGRPCServer) // Initialize internal gRPC server. @@ -1575,12 +1574,12 @@ func (s *Server) Stats() map[string]map[string]string { // GetLANCoordinate returns the coordinate of the node in the LAN gossip // pool. // -// - Clients return a single coordinate for the single gossip pool they are -// in (default, segment, or partition). +// - Clients return a single coordinate for the single gossip pool they are +// in (default, segment, or partition). // -// - Servers return one coordinate for their canonical gossip pool (i.e. -// default partition/segment) and one per segment they are also ancillary -// members of. +// - Servers return one coordinate for their canonical gossip pool (i.e. +// default partition/segment) and one per segment they are also ancillary +// members of. // // NOTE: servers do not emit coordinates for partitioned gossip pools they // are ancillary members of. diff --git a/agent/grpc-external/services/peerstream/server.go b/agent/grpc-external/services/peerstream/server.go index 6568d7bf80..7254c60c7c 100644 --- a/agent/grpc-external/services/peerstream/server.go +++ b/agent/grpc-external/services/peerstream/server.go @@ -42,8 +42,8 @@ type Config struct { // outgoingHeartbeatInterval is how often we send a heartbeat. outgoingHeartbeatInterval time.Duration - // IncomingHeartbeatTimeout is how long we'll wait between receiving heartbeats before we close the connection. - IncomingHeartbeatTimeout time.Duration + // incomingHeartbeatTimeout is how long we'll wait between receiving heartbeats before we close the connection. + incomingHeartbeatTimeout time.Duration } //go:generate mockery --name ACLResolver --inpackage @@ -63,8 +63,8 @@ func NewServer(cfg Config) *Server { if cfg.outgoingHeartbeatInterval == 0 { cfg.outgoingHeartbeatInterval = defaultOutgoingHeartbeatInterval } - if cfg.IncomingHeartbeatTimeout == 0 { - cfg.IncomingHeartbeatTimeout = defaultIncomingHeartbeatTimeout + if cfg.incomingHeartbeatTimeout == 0 { + cfg.incomingHeartbeatTimeout = defaultIncomingHeartbeatTimeout } return &Server{ Config: cfg, diff --git a/agent/grpc-external/services/peerstream/stream_resources.go b/agent/grpc-external/services/peerstream/stream_resources.go index 0e6b28f45a..ad5d9d4631 100644 --- a/agent/grpc-external/services/peerstream/stream_resources.go +++ b/agent/grpc-external/services/peerstream/stream_resources.go @@ -406,7 +406,7 @@ func (s *Server) realHandleStream(streamReq HandleStreamRequest) error { // incomingHeartbeatCtx will complete if incoming heartbeats time out. incomingHeartbeatCtx, incomingHeartbeatCtxCancel := - context.WithTimeout(context.Background(), s.IncomingHeartbeatTimeout) + context.WithTimeout(context.Background(), s.incomingHeartbeatTimeout) // NOTE: It's important that we wrap the call to cancel in a wrapper func because during the loop we're // re-assigning the value of incomingHeartbeatCtxCancel and we want the defer to run on the last assigned // value, not the current value. @@ -575,6 +575,7 @@ func (s *Server) realHandleStream(streamReq HandleStreamRequest) error { status.TrackRecvResourceSuccess() } + // We are replying ACK or NACK depending on whether we successfully processed the response. if err := streamSend(reply); err != nil { return fmt.Errorf("failed to send to stream: %v", err) } @@ -605,7 +606,7 @@ func (s *Server) realHandleStream(streamReq HandleStreamRequest) error { // They just can't trace the execution properly for some reason (possibly golang/go#29587). //nolint:govet incomingHeartbeatCtx, incomingHeartbeatCtxCancel = - context.WithTimeout(context.Background(), s.IncomingHeartbeatTimeout) + context.WithTimeout(context.Background(), s.incomingHeartbeatTimeout) } case update := <-subCh: @@ -642,7 +643,6 @@ func (s *Server) realHandleStream(streamReq HandleStreamRequest) error { if err := streamSend(replResp); err != nil { return fmt.Errorf("failed to push data for %q: %w", update.CorrelationID, err) } - status.TrackSendSuccess() } } } diff --git a/agent/grpc-external/services/peerstream/stream_test.go b/agent/grpc-external/services/peerstream/stream_test.go index be4a44ec87..fcdd07422b 100644 --- a/agent/grpc-external/services/peerstream/stream_test.go +++ b/agent/grpc-external/services/peerstream/stream_test.go @@ -572,7 +572,7 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { }) }) - var lastSendAck, lastSendSuccess time.Time + var lastSendAck time.Time testutil.RunStep(t, "ack tracked as success", func(t *testing.T) { ack := &pbpeerstream.ReplicationMessage{ @@ -587,16 +587,13 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { }, } - lastSendAck = time.Date(2000, time.January, 1, 0, 0, 2, 0, time.UTC) - lastSendSuccess = time.Date(2000, time.January, 1, 0, 0, 3, 0, time.UTC) + lastSendAck = it.FutureNow(1) err := client.Send(ack) require.NoError(t, err) expect := Status{ - Connected: true, - LastAck: lastSendAck, - heartbeatTimeout: defaultIncomingHeartbeatTimeout, - LastSendSuccess: lastSendSuccess, + Connected: true, + LastAck: lastSendAck, } retry.Run(t, func(r *retry.R) { @@ -624,20 +621,17 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { }, } - lastSendAck = time.Date(2000, time.January, 1, 0, 0, 4, 0, time.UTC) - lastNack = time.Date(2000, time.January, 1, 0, 0, 5, 0, time.UTC) + lastNack = it.FutureNow(1) err := client.Send(nack) require.NoError(t, err) lastNackMsg = "client peer was unable to apply resource: bad bad not good" expect := Status{ - Connected: true, - LastAck: lastSendAck, - LastNack: lastNack, - LastNackMessage: lastNackMsg, - heartbeatTimeout: defaultIncomingHeartbeatTimeout, - LastSendSuccess: lastSendSuccess, + Connected: true, + LastAck: lastSendAck, + LastNack: lastNack, + LastNackMessage: lastNackMsg, } retry.Run(t, func(r *retry.R) { @@ -661,7 +655,7 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { }, }, } - lastRecvResourceSuccess = it.FutureNow(1) + lastRecvResourceSuccess = it.FutureNow(2) err := client.Send(resp) require.NoError(t, err) @@ -707,8 +701,6 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { ImportedServices: map[string]struct{}{ api.String(): {}, }, - heartbeatTimeout: defaultIncomingHeartbeatTimeout, - LastSendSuccess: lastSendSuccess, } retry.Run(t, func(r *retry.R) { @@ -770,8 +762,6 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { ImportedServices: map[string]struct{}{ api.String(): {}, }, - heartbeatTimeout: defaultIncomingHeartbeatTimeout, - LastSendSuccess: lastSendSuccess, } retry.Run(t, func(r *retry.R) { @@ -805,8 +795,6 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { ImportedServices: map[string]struct{}{ api.String(): {}, }, - heartbeatTimeout: defaultIncomingHeartbeatTimeout, - LastSendSuccess: lastSendSuccess, } retry.Run(t, func(r *retry.R) { @@ -839,8 +827,6 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { ImportedServices: map[string]struct{}{ api.String(): {}, }, - heartbeatTimeout: defaultIncomingHeartbeatTimeout, - LastSendSuccess: lastSendSuccess, } retry.Run(t, func(r *retry.R) { @@ -1143,7 +1129,7 @@ func TestStreamResources_Server_DisconnectsOnHeartbeatTimeout(t *testing.T) { srv, store := newTestServer(t, func(c *Config) { c.Tracker.SetClock(it.Now) - c.IncomingHeartbeatTimeout = 5 * time.Millisecond + c.incomingHeartbeatTimeout = 5 * time.Millisecond }) p := writePeeringToBeDialed(t, store, 1, "my-peer") @@ -1250,7 +1236,7 @@ func TestStreamResources_Server_KeepsConnectionOpenWithHeartbeat(t *testing.T) { srv, store := newTestServer(t, func(c *Config) { c.Tracker.SetClock(it.Now) - c.IncomingHeartbeatTimeout = incomingHeartbeatTimeout + c.incomingHeartbeatTimeout = incomingHeartbeatTimeout }) p := writePeeringToBeDialed(t, store, 1, "my-peer") diff --git a/agent/grpc-external/services/peerstream/stream_tracker.go b/agent/grpc-external/services/peerstream/stream_tracker.go index ffde98ba32..d0dcd39770 100644 --- a/agent/grpc-external/services/peerstream/stream_tracker.go +++ b/agent/grpc-external/services/peerstream/stream_tracker.go @@ -16,8 +16,6 @@ type Tracker struct { // timeNow is a shim for testing. timeNow func() time.Time - - heartbeatTimeout time.Duration } func NewTracker() *Tracker { @@ -35,12 +33,6 @@ func (t *Tracker) SetClock(clock func() time.Time) { } } -func (t *Tracker) SetHeartbeatTimeout(heartbeatTimeout time.Duration) { - t.mu.Lock() - defer t.mu.Unlock() - t.heartbeatTimeout = heartbeatTimeout -} - // Register a stream for a given peer but do not mark it as connected. func (t *Tracker) Register(id string) (*MutableStatus, error) { t.mu.Lock() @@ -52,7 +44,7 @@ func (t *Tracker) Register(id string) (*MutableStatus, error) { func (t *Tracker) registerLocked(id string, initAsConnected bool) (*MutableStatus, bool, error) { status, ok := t.streams[id] if !ok { - status = newMutableStatus(t.timeNow, t.heartbeatTimeout, initAsConnected) + status = newMutableStatus(t.timeNow, initAsConnected) t.streams[id] = status return status, true, nil } @@ -152,8 +144,6 @@ type MutableStatus struct { // Status contains information about the replication stream to a peer cluster. // TODO(peering): There's a lot of fields here... type Status struct { - heartbeatTimeout time.Duration - // Connected is true when there is an open stream for the peer. Connected bool @@ -182,9 +172,6 @@ type Status struct { // LastSendErrorMessage tracks the last error message when sending into the stream. LastSendErrorMessage string - // LastSendSuccess tracks the time of the last success response sent into the stream. - LastSendSuccess time.Time - // LastRecvHeartbeat tracks when we last received a heartbeat from our peer. LastRecvHeartbeat time.Time @@ -216,38 +203,40 @@ func (s *Status) GetExportedServicesCount() uint64 { // IsHealthy is a convenience func that returns true/ false for a peering status. // We define a peering as unhealthy if its status satisfies one of the following: -// - If heartbeat hasn't been received within the IncomingHeartbeatTimeout -// - If the last sent error is newer than last sent success +// - If it is disconnected +// - If the last received Nack is newer than last received Ack // - If the last received error is newer than last received success // If none of these conditions apply, we call the peering healthy. func (s *Status) IsHealthy() bool { - if time.Now().Sub(s.LastRecvHeartbeat) > s.heartbeatTimeout { - // 1. If heartbeat hasn't been received for a while - report unhealthy + if !s.Connected { return false } - if s.LastSendError.After(s.LastSendSuccess) { - // 2. If last sent error is newer than last sent success - report unhealthy + // If stream is in a disconnected state, report unhealthy. + // This should be logically equivalent to s.Connected above. + if !s.DisconnectTime.IsZero() { return false } + // If last Nack is after last Ack, it means the peer is unable to + // handle our replication messages. + if s.LastNack.After(s.LastAck) { + return false + } + + // If last recv error is newer than last recv success - report unhealthy if s.LastRecvError.After(s.LastRecvResourceSuccess) { - // 3. If last recv error is newer than last recv success - report unhealthy return false } return true } -func newMutableStatus(now func() time.Time, heartbeatTimeout time.Duration, connected bool) *MutableStatus { - if heartbeatTimeout.Microseconds() == 0 { - heartbeatTimeout = defaultIncomingHeartbeatTimeout - } +func newMutableStatus(now func() time.Time, connected bool) *MutableStatus { return &MutableStatus{ Status: Status{ - Connected: connected, - heartbeatTimeout: heartbeatTimeout, - NeverConnected: !connected, + Connected: connected, + NeverConnected: !connected, }, timeNow: now, doneCh: make(chan struct{}), @@ -271,12 +260,6 @@ func (s *MutableStatus) TrackSendError(error string) { s.mu.Unlock() } -func (s *MutableStatus) TrackSendSuccess() { - s.mu.Lock() - s.LastSendSuccess = s.timeNow().UTC() - s.mu.Unlock() -} - // TrackRecvResourceSuccess tracks receiving a replicated resource. func (s *MutableStatus) TrackRecvResourceSuccess() { s.mu.Lock() diff --git a/agent/grpc-external/services/peerstream/stream_tracker_test.go b/agent/grpc-external/services/peerstream/stream_tracker_test.go index 8cdcbc79a2..7500ccd4b8 100644 --- a/agent/grpc-external/services/peerstream/stream_tracker_test.go +++ b/agent/grpc-external/services/peerstream/stream_tracker_test.go @@ -16,11 +16,10 @@ const ( func TestStatus_IsHealthy(t *testing.T) { type testcase struct { - name string - dontConnect bool - modifierFunc func(status *MutableStatus) - expectedVal bool - heartbeatTimeout time.Duration + name string + dontConnect bool + modifierFunc func(status *MutableStatus) + expectedVal bool } tcs := []testcase{ @@ -30,56 +29,39 @@ func TestStatus_IsHealthy(t *testing.T) { dontConnect: true, }, { - name: "no heartbeat, unhealthy", - expectedVal: false, - }, - { - name: "heartbeat is not received, unhealthy", + name: "disconnect time not zero", expectedVal: false, modifierFunc: func(status *MutableStatus) { - // set heartbeat - status.LastRecvHeartbeat = time.Now().Add(-1 * time.Second) - }, - heartbeatTimeout: 1 * time.Second, - }, - { - name: "send error before send success", - expectedVal: false, - modifierFunc: func(status *MutableStatus) { - // set heartbeat - status.LastRecvHeartbeat = time.Now() - - status.LastSendSuccess = time.Now() - status.LastSendError = time.Now() + status.DisconnectTime = time.Now() }, }, { - name: "received error before received success", + name: "receive error before receive success", expectedVal: false, modifierFunc: func(status *MutableStatus) { - // set heartbeat - status.LastRecvHeartbeat = time.Now() - - status.LastRecvResourceSuccess = time.Now() - status.LastRecvError = time.Now() + now := time.Now() + status.LastRecvResourceSuccess = now + status.LastRecvError = now.Add(1 * time.Second) + }, + }, + { + name: "receive error before receive success", + expectedVal: false, + modifierFunc: func(status *MutableStatus) { + now := time.Now() + status.LastAck = now + status.LastNack = now.Add(1 * time.Second) }, }, { name: "healthy", expectedVal: true, - modifierFunc: func(status *MutableStatus) { - // set heartbeat - status.LastRecvHeartbeat = time.Now() - }, }, } for _, tc := range tcs { t.Run(tc.name, func(t *testing.T) { tracker := NewTracker() - if tc.heartbeatTimeout.Microseconds() != 0 { - tracker.SetHeartbeatTimeout(tc.heartbeatTimeout) - } if !tc.dontConnect { st, err := tracker.Connected(aPeerID) @@ -120,8 +102,7 @@ func TestTracker_EnsureConnectedDisconnected(t *testing.T) { require.NoError(t, err) expect := Status{ - Connected: true, - heartbeatTimeout: defaultIncomingHeartbeatTimeout, + Connected: true, } status, ok := tracker.StreamStatus(peerID) @@ -147,9 +128,8 @@ func TestTracker_EnsureConnectedDisconnected(t *testing.T) { lastSuccess = it.base.Add(time.Duration(sequence) * time.Second).UTC() expect := Status{ - Connected: true, - LastAck: lastSuccess, - heartbeatTimeout: defaultIncomingHeartbeatTimeout, + Connected: true, + LastAck: lastSuccess, } require.Equal(t, expect, status) }) @@ -159,10 +139,9 @@ func TestTracker_EnsureConnectedDisconnected(t *testing.T) { sequence++ expect := Status{ - Connected: false, - DisconnectTime: it.base.Add(time.Duration(sequence) * time.Second).UTC(), - LastAck: lastSuccess, - heartbeatTimeout: defaultIncomingHeartbeatTimeout, + Connected: false, + DisconnectTime: it.base.Add(time.Duration(sequence) * time.Second).UTC(), + LastAck: lastSuccess, } status, ok := tracker.StreamStatus(peerID) require.True(t, ok) @@ -174,9 +153,8 @@ func TestTracker_EnsureConnectedDisconnected(t *testing.T) { require.NoError(t, err) expect := Status{ - Connected: true, - LastAck: lastSuccess, - heartbeatTimeout: defaultIncomingHeartbeatTimeout, + Connected: true, + LastAck: lastSuccess, // DisconnectTime gets cleared on re-connect. } From 10996654736710e57581443a1bceaef533c57c6a Mon Sep 17 00:00:00 2001 From: Eric Haberkorn Date: Mon, 29 Aug 2022 09:51:32 -0400 Subject: [PATCH 43/93] Update the structs and discovery chain for service resolver redirects to cluster peers. (#14366) --- agent/consul/discoverychain/compile_test.go | 42 ++ agent/structs/config_entry_discoverychain.go | 29 +- .../config_entry_discoverychain_oss_test.go | 22 + .../config_entry_discoverychain_test.go | 46 ++ api/config_entry_discoverychain.go | 1 + api/config_entry_discoverychain_test.go | 14 + proto/pbconfigentry/config_entry.gen.go | 2 + proto/pbconfigentry/config_entry.pb.go | 661 +++++++++--------- proto/pbconfigentry/config_entry.proto | 1 + 9 files changed, 487 insertions(+), 331 deletions(-) diff --git a/agent/consul/discoverychain/compile_test.go b/agent/consul/discoverychain/compile_test.go index 6505fdb9ea..a4c9c65ed7 100644 --- a/agent/consul/discoverychain/compile_test.go +++ b/agent/consul/discoverychain/compile_test.go @@ -39,6 +39,7 @@ func TestCompile(t *testing.T) { "service redirect": testcase_ServiceRedirect(), "service and subset redirect": testcase_ServiceAndSubsetRedirect(), "datacenter redirect": testcase_DatacenterRedirect(), + "redirect to cluster peer": testcase_PeerRedirect(), "datacenter redirect with mesh gateways": testcase_DatacenterRedirect_WithMeshGateways(), "service failover": testcase_ServiceFailover(), "service failover through redirect": testcase_ServiceFailoverThroughRedirect(), @@ -1084,6 +1085,47 @@ func testcase_DatacenterRedirect() compileTestCase { return compileTestCase{entries: entries, expect: expect} } +func testcase_PeerRedirect() compileTestCase { + entries := newEntries() + entries.AddResolvers( + &structs.ServiceResolverConfigEntry{ + Kind: "service-resolver", + Name: "main", + Redirect: &structs.ServiceResolverRedirect{ + Service: "other", + Peer: "cluster-01", + }, + }, + ) + + expect := &structs.CompiledDiscoveryChain{ + Protocol: "tcp", + StartNode: "resolver:other.default.default.external.cluster-01", + Nodes: map[string]*structs.DiscoveryGraphNode{ + "resolver:other.default.default.external.cluster-01": { + Type: structs.DiscoveryGraphNodeTypeResolver, + Name: "other.default.default.external.cluster-01", + Resolver: &structs.DiscoveryResolver{ + Default: true, + ConnectTimeout: 5 * time.Second, + Target: "other.default.default.external.cluster-01", + }, + }, + }, + Targets: map[string]*structs.DiscoveryTarget{ + "other.default.default.external.cluster-01": newTarget(structs.DiscoveryTargetOpts{ + Service: "other", + Peer: "cluster-01", + }, func(t *structs.DiscoveryTarget) { + t.SNI = "" + t.Name = "" + t.Datacenter = "" + }), + }, + } + return compileTestCase{entries: entries, expect: expect} +} + func testcase_DatacenterRedirect_WithMeshGateways() compileTestCase { entries := newEntries() entries.AddProxyDefaults(&structs.ProxyConfigEntry{ diff --git a/agent/structs/config_entry_discoverychain.go b/agent/structs/config_entry_discoverychain.go index 0ea2609551..7867602cae 100644 --- a/agent/structs/config_entry_discoverychain.go +++ b/agent/structs/config_entry_discoverychain.go @@ -964,11 +964,18 @@ func (e *ServiceResolverConfigEntry) Validate() error { // TODO(rb): prevent subsets and default subsets from being defined? - if r.Service == "" && r.ServiceSubset == "" && r.Namespace == "" && r.Partition == "" && r.Datacenter == "" { + if r.isEmpty() { return fmt.Errorf("Redirect is empty") } - if r.Service == "" { + switch { + case r.Peer != "" && r.ServiceSubset != "": + return fmt.Errorf("Redirect.Peer cannot be set with Redirect.ServiceSubset") + case r.Peer != "" && r.Partition != "": + return fmt.Errorf("Redirect.Partition cannot be set with Redirect.Peer") + case r.Peer != "" && r.Datacenter != "": + return fmt.Errorf("Redirect.Peer cannot be set with Redirect.Datacenter") + case r.Service == "": if r.ServiceSubset != "" { return fmt.Errorf("Redirect.ServiceSubset defined without Redirect.Service") } @@ -978,9 +985,12 @@ func (e *ServiceResolverConfigEntry) Validate() error { if r.Partition != "" { return fmt.Errorf("Redirect.Partition defined without Redirect.Service") } - } else if r.Service == e.Name { - if r.ServiceSubset != "" && !isSubset(r.ServiceSubset) { - return fmt.Errorf("Redirect.ServiceSubset %q is not a valid subset of %q", r.ServiceSubset, r.Service) + if r.Peer != "" { + return fmt.Errorf("Redirect.Peer defined without Redirect.Service") + } + case r.ServiceSubset != "" && (r.Service == "" || r.Service == e.Name): + if !isSubset(r.ServiceSubset) { + return fmt.Errorf("Redirect.ServiceSubset %q is not a valid subset of %q", r.ServiceSubset, e.Name) } } } @@ -1231,6 +1241,10 @@ type ServiceResolverRedirect struct { // Datacenter is the datacenter to resolve the service from instead of the // current one (optional). Datacenter string `json:",omitempty"` + + // Peer is the name of the cluster peer to resolve the service from instead + // of the current one (optional). + Peer string `json:",omitempty"` } func (r *ServiceResolverRedirect) ToDiscoveryTargetOpts() DiscoveryTargetOpts { @@ -1240,9 +1254,14 @@ func (r *ServiceResolverRedirect) ToDiscoveryTargetOpts() DiscoveryTargetOpts { Namespace: r.Namespace, Partition: r.Partition, Datacenter: r.Datacenter, + Peer: r.Peer, } } +func (r *ServiceResolverRedirect) isEmpty() bool { + return r.Service == "" && r.ServiceSubset == "" && r.Namespace == "" && r.Partition == "" && r.Datacenter == "" && r.Peer == "" +} + // There are some restrictions on what is allowed in here: // // - Service, ServiceSubset, Namespace, Datacenters, and Targets cannot all be diff --git a/agent/structs/config_entry_discoverychain_oss_test.go b/agent/structs/config_entry_discoverychain_oss_test.go index 81bf6541a1..9f962c8bd4 100644 --- a/agent/structs/config_entry_discoverychain_oss_test.go +++ b/agent/structs/config_entry_discoverychain_oss_test.go @@ -72,6 +72,28 @@ func TestServiceResolverConfigEntry_OSS(t *testing.T) { }, validateErr: `Bad Failover["*"]: Setting Namespace requires Consul Enterprise`, }, + { + name: "setting redirect Namespace on OSS", + entry: &ServiceResolverConfigEntry{ + Kind: ServiceResolver, + Name: "test", + Redirect: &ServiceResolverRedirect{ + Namespace: "ns1", + }, + }, + validateErr: `Redirect: Setting Namespace requires Consul Enterprise`, + }, + { + name: "setting redirect Partition on OSS", + entry: &ServiceResolverConfigEntry{ + Kind: ServiceResolver, + Name: "test", + Redirect: &ServiceResolverRedirect{ + Partition: "ap1", + }, + }, + validateErr: `Redirect: Setting Partition requires Consul Enterprise`, + }, } // Bulk add a bunch of similar validation cases. diff --git a/agent/structs/config_entry_discoverychain_test.go b/agent/structs/config_entry_discoverychain_test.go index 2580ed4c1b..0d6691fa02 100644 --- a/agent/structs/config_entry_discoverychain_test.go +++ b/agent/structs/config_entry_discoverychain_test.go @@ -655,6 +655,41 @@ func TestServiceResolverConfigEntry(t *testing.T) { }, validateErr: `Redirect.ServiceSubset "gone" is not a valid subset of "test"`, }, + { + name: "redirect with peer and subset", + entry: &ServiceResolverConfigEntry{ + Kind: ServiceResolver, + Name: "test", + Redirect: &ServiceResolverRedirect{ + Peer: "cluster-01", + ServiceSubset: "gone", + }, + }, + validateErr: `Redirect.Peer cannot be set with Redirect.ServiceSubset`, + }, + { + name: "redirect with peer and datacenter", + entry: &ServiceResolverConfigEntry{ + Kind: ServiceResolver, + Name: "test", + Redirect: &ServiceResolverRedirect{ + Peer: "cluster-01", + Datacenter: "dc2", + }, + }, + validateErr: `Redirect.Peer cannot be set with Redirect.Datacenter`, + }, + { + name: "redirect with peer and datacenter", + entry: &ServiceResolverConfigEntry{ + Kind: ServiceResolver, + Name: "test", + Redirect: &ServiceResolverRedirect{ + Peer: "cluster-01", + }, + }, + validateErr: `Redirect.Peer defined without Redirect.Service`, + }, { name: "self redirect with valid subset", entry: &ServiceResolverConfigEntry{ @@ -669,6 +704,17 @@ func TestServiceResolverConfigEntry(t *testing.T) { }, }, }, + { + name: "redirect to peer", + entry: &ServiceResolverConfigEntry{ + Kind: ServiceResolver, + Name: "test", + Redirect: &ServiceResolverRedirect{ + Service: "other", + Peer: "cluster-01", + }, + }, + }, { name: "simple wildcard failover", entry: &ServiceResolverConfigEntry{ diff --git a/api/config_entry_discoverychain.go b/api/config_entry_discoverychain.go index fb22baabdb..f827708ee2 100644 --- a/api/config_entry_discoverychain.go +++ b/api/config_entry_discoverychain.go @@ -219,6 +219,7 @@ type ServiceResolverRedirect struct { Namespace string `json:",omitempty"` Partition string `json:",omitempty"` Datacenter string `json:",omitempty"` + Peer string `json:",omitempty"` } type ServiceResolverFailover struct { diff --git a/api/config_entry_discoverychain_test.go b/api/config_entry_discoverychain_test.go index 672aae1af5..c990fa0c68 100644 --- a/api/config_entry_discoverychain_test.go +++ b/api/config_entry_discoverychain_test.go @@ -193,6 +193,20 @@ func TestAPI_ConfigEntry_DiscoveryChain(t *testing.T) { }, verify: verifyResolver, }, + { + name: "redirect to peer", + entry: &ServiceResolverConfigEntry{ + Kind: ServiceResolver, + Name: "test-redirect", + Partition: splitDefaultPartition, + Namespace: splitDefaultNamespace, + Redirect: &ServiceResolverRedirect{ + Service: "test-failover", + Peer: "cluster-01", + }, + }, + verify: verifyResolver, + }, { name: "mega splitter", // use one mega object to avoid multiple trips entry: &ServiceSplitterConfigEntry{ diff --git a/proto/pbconfigentry/config_entry.gen.go b/proto/pbconfigentry/config_entry.gen.go index 7c01387dfb..6bb8fdb0d0 100644 --- a/proto/pbconfigentry/config_entry.gen.go +++ b/proto/pbconfigentry/config_entry.gen.go @@ -689,6 +689,7 @@ func ServiceResolverRedirectToStructs(s *ServiceResolverRedirect, t *structs.Ser t.Namespace = s.Namespace t.Partition = s.Partition t.Datacenter = s.Datacenter + t.Peer = s.Peer } func ServiceResolverRedirectFromStructs(t *structs.ServiceResolverRedirect, s *ServiceResolverRedirect) { if s == nil { @@ -699,6 +700,7 @@ func ServiceResolverRedirectFromStructs(t *structs.ServiceResolverRedirect, s *S s.Namespace = t.Namespace s.Partition = t.Partition s.Datacenter = t.Datacenter + s.Peer = t.Peer } func ServiceResolverSubsetToStructs(s *ServiceResolverSubset, t *structs.ServiceResolverSubset) { if s == nil { diff --git a/proto/pbconfigentry/config_entry.pb.go b/proto/pbconfigentry/config_entry.pb.go index bfa51c8cc6..0a27021543 100644 --- a/proto/pbconfigentry/config_entry.pb.go +++ b/proto/pbconfigentry/config_entry.pb.go @@ -796,6 +796,7 @@ type ServiceResolverRedirect struct { Namespace string `protobuf:"bytes,3,opt,name=Namespace,proto3" json:"Namespace,omitempty"` Partition string `protobuf:"bytes,4,opt,name=Partition,proto3" json:"Partition,omitempty"` Datacenter string `protobuf:"bytes,5,opt,name=Datacenter,proto3" json:"Datacenter,omitempty"` + Peer string `protobuf:"bytes,6,opt,name=Peer,proto3" json:"Peer,omitempty"` } func (x *ServiceResolverRedirect) Reset() { @@ -865,6 +866,13 @@ func (x *ServiceResolverRedirect) GetDatacenter() string { return "" } +func (x *ServiceResolverRedirect) GetPeer() string { + if x != nil { + return x.Peer + } + return "" +} + // mog annotation: // // target=github.com/hashicorp/consul/agent/structs.ServiceResolverFailover @@ -2521,7 +2529,7 @@ var file_proto_pbconfigentry_config_entry_proto_rawDesc = []byte{ 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x4f, 0x6e, 0x6c, 0x79, 0x50, 0x61, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x0b, 0x4f, 0x6e, 0x6c, 0x79, 0x50, 0x61, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x22, 0xb5, 0x01, 0x0a, + 0x0b, 0x4f, 0x6e, 0x6c, 0x79, 0x50, 0x61, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x22, 0xc9, 0x01, 0x0a, 0x17, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, @@ -2533,341 +2541,342 @@ var file_proto_pbconfigentry_config_entry_proto_rawDesc = []byte{ 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, - 0x6e, 0x74, 0x65, 0x72, 0x22, 0xf9, 0x01, 0x0a, 0x17, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, - 0x12, 0x18, 0x0a, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, - 0x12, 0x1c, 0x0a, 0x09, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x09, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x20, - 0x0a, 0x0b, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x0b, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x73, - 0x12, 0x5e, 0x0a, 0x07, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x44, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, - 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, - 0x72, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x07, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, - 0x22, 0xcf, 0x01, 0x0a, 0x1d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x6f, - 0x6c, 0x76, 0x65, 0x72, 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x54, 0x61, 0x72, 0x67, - 0x65, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x24, 0x0a, 0x0d, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x53, 0x75, 0x62, 0x73, - 0x65, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x1c, 0x0a, 0x09, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x09, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1e, - 0x0a, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x12, - 0x0a, 0x04, 0x50, 0x65, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x50, 0x65, - 0x65, 0x72, 0x22, 0xc7, 0x02, 0x0a, 0x0c, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, - 0x63, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x06, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x5d, 0x0a, 0x0e, 0x52, - 0x69, 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, - 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x52, 0x69, 0x6e, 0x67, - 0x48, 0x61, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, 0x52, 0x69, 0x6e, 0x67, - 0x48, 0x61, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x69, 0x0a, 0x12, 0x4c, 0x65, - 0x61, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, - 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, - 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x4c, - 0x65, 0x61, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x52, 0x12, 0x4c, 0x65, 0x61, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x55, 0x0a, 0x0c, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x68, 0x61, - 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, - 0x74, 0x72, 0x79, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0c, - 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x22, 0x64, 0x0a, 0x0e, - 0x52, 0x69, 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x28, - 0x0a, 0x0f, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x52, 0x69, 0x6e, 0x67, 0x53, 0x69, 0x7a, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, - 0x52, 0x69, 0x6e, 0x67, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x28, 0x0a, 0x0f, 0x4d, 0x61, 0x78, 0x69, - 0x6d, 0x75, 0x6d, 0x52, 0x69, 0x6e, 0x67, 0x53, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x04, 0x52, 0x0f, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x52, 0x69, 0x6e, 0x67, 0x53, 0x69, - 0x7a, 0x65, 0x22, 0x36, 0x0a, 0x12, 0x4c, 0x65, 0x61, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x20, 0x0a, 0x0b, 0x43, 0x68, 0x6f, 0x69, - 0x63, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x43, - 0x68, 0x6f, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xd3, 0x01, 0x0a, 0x0a, 0x48, - 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, - 0x1e, 0x0a, 0x0a, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0a, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, - 0x57, 0x0a, 0x0c, 0x43, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, + 0x6e, 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x50, 0x65, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x50, 0x65, 0x65, 0x72, 0x22, 0xf9, 0x01, 0x0a, 0x17, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x46, 0x61, 0x69, 0x6c, + 0x6f, 0x76, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x24, + 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x53, 0x75, + 0x62, 0x73, 0x65, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, + 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, + 0x74, 0x65, 0x72, 0x73, 0x12, 0x5e, 0x0a, 0x07, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, + 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x44, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, - 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x43, 0x6f, - 0x6f, 0x6b, 0x69, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0c, 0x43, 0x6f, 0x6f, 0x6b, - 0x69, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1a, 0x0a, 0x08, 0x53, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x49, 0x50, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x53, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x49, 0x50, 0x12, 0x1a, 0x0a, 0x08, 0x54, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x54, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, - 0x22, 0x69, 0x0a, 0x0c, 0x43, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x12, 0x18, 0x0a, 0x07, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x07, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x03, 0x54, 0x54, - 0x4c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x03, 0x54, 0x54, 0x4c, 0x12, 0x12, 0x0a, 0x04, 0x50, 0x61, 0x74, 0x68, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x50, 0x61, 0x74, 0x68, 0x22, 0xbf, 0x02, 0x0a, 0x0e, - 0x49, 0x6e, 0x67, 0x72, 0x65, 0x73, 0x73, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x12, 0x49, - 0x0a, 0x03, 0x54, 0x4c, 0x53, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x68, 0x61, - 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, - 0x74, 0x72, 0x79, 0x2e, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x54, 0x4c, 0x53, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x52, 0x03, 0x54, 0x4c, 0x53, 0x12, 0x54, 0x0a, 0x09, 0x4c, 0x69, 0x73, - 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x68, - 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, - 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, - 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x49, 0x6e, 0x67, 0x72, 0x65, 0x73, 0x73, 0x4c, 0x69, 0x73, 0x74, - 0x65, 0x6e, 0x65, 0x72, 0x52, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x12, - 0x53, 0x0a, 0x04, 0x4d, 0x65, 0x74, 0x61, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3f, 0x2e, - 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, - 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x49, 0x6e, 0x67, 0x72, 0x65, 0x73, 0x73, 0x47, 0x61, 0x74, - 0x65, 0x77, 0x61, 0x79, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, - 0x4d, 0x65, 0x74, 0x61, 0x1a, 0x37, 0x0a, 0x09, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xea, 0x01, - 0x0a, 0x10, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x54, 0x4c, 0x53, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x07, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x4c, 0x0a, 0x03, - 0x53, 0x44, 0x53, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x68, 0x61, 0x73, 0x68, + 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x46, 0x61, 0x69, + 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x07, 0x54, 0x61, 0x72, + 0x67, 0x65, 0x74, 0x73, 0x22, 0xcf, 0x01, 0x0a, 0x1d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, + 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x12, 0x24, 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x53, 0x75, 0x62, 0x73, 0x65, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x53, 0x75, 0x62, 0x73, 0x65, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, + 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x50, 0x65, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x50, 0x65, 0x65, 0x72, 0x22, 0xc7, 0x02, 0x0a, 0x0c, 0x4c, 0x6f, 0x61, 0x64, 0x42, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, + 0x5d, 0x0a, 0x0e, 0x52, 0x69, 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, + 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, + 0x52, 0x69, 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, + 0x52, 0x69, 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x69, + 0x0a, 0x12, 0x4c, 0x65, 0x61, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x68, 0x61, 0x73, + 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, + 0x72, 0x79, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x12, 0x4c, 0x65, 0x61, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x55, 0x0a, 0x0c, 0x48, 0x61, 0x73, + 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x31, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, + 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x52, 0x0c, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, + 0x22, 0x64, 0x0a, 0x0e, 0x52, 0x69, 0x6e, 0x67, 0x48, 0x61, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x28, 0x0a, 0x0f, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x52, 0x69, 0x6e, + 0x67, 0x53, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x4d, 0x69, 0x6e, + 0x69, 0x6d, 0x75, 0x6d, 0x52, 0x69, 0x6e, 0x67, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x28, 0x0a, 0x0f, + 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x52, 0x69, 0x6e, 0x67, 0x53, 0x69, 0x7a, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x52, 0x69, + 0x6e, 0x67, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x36, 0x0a, 0x12, 0x4c, 0x65, 0x61, 0x73, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x20, 0x0a, 0x0b, + 0x43, 0x68, 0x6f, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x0b, 0x43, 0x68, 0x6f, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xd3, + 0x01, 0x0a, 0x0a, 0x48, 0x61, 0x73, 0x68, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x14, 0x0a, + 0x05, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x57, 0x0a, 0x0c, 0x43, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, - 0x79, 0x2e, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x54, 0x4c, 0x53, 0x53, 0x44, 0x53, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x03, 0x53, 0x44, 0x53, 0x12, 0x24, 0x0a, 0x0d, 0x54, 0x4c, - 0x53, 0x4d, 0x69, 0x6e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0d, 0x54, 0x4c, 0x53, 0x4d, 0x69, 0x6e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x12, 0x24, 0x0a, 0x0d, 0x54, 0x4c, 0x53, 0x4d, 0x61, 0x78, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x54, 0x4c, 0x53, 0x4d, 0x61, 0x78, 0x56, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x0c, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, - 0x53, 0x75, 0x69, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x43, 0x69, - 0x70, 0x68, 0x65, 0x72, 0x53, 0x75, 0x69, 0x74, 0x65, 0x73, 0x22, 0x5b, 0x0a, 0x13, 0x47, 0x61, - 0x74, 0x65, 0x77, 0x61, 0x79, 0x54, 0x4c, 0x53, 0x53, 0x44, 0x53, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x12, 0x20, 0x0a, 0x0b, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x43, 0x65, 0x72, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x43, 0x65, 0x72, 0x74, 0x52, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0xdf, 0x01, 0x0a, 0x0f, 0x49, 0x6e, 0x67, 0x72, - 0x65, 0x73, 0x73, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x50, - 0x6f, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x50, 0x6f, 0x72, 0x74, 0x12, - 0x1a, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x51, 0x0a, 0x08, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, - 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, - 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x49, 0x6e, 0x67, 0x72, 0x65, 0x73, 0x73, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x52, 0x08, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x49, - 0x0a, 0x03, 0x54, 0x4c, 0x53, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x68, 0x61, - 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, - 0x74, 0x72, 0x79, 0x2e, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x54, 0x4c, 0x53, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x52, 0x03, 0x54, 0x4c, 0x53, 0x22, 0xbe, 0x04, 0x0a, 0x0e, 0x49, 0x6e, - 0x67, 0x72, 0x65, 0x73, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, - 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, - 0x12, 0x14, 0x0a, 0x05, 0x48, 0x6f, 0x73, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x05, 0x48, 0x6f, 0x73, 0x74, 0x73, 0x12, 0x50, 0x0a, 0x03, 0x54, 0x4c, 0x53, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, - 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x47, 0x61, 0x74, 0x65, - 0x77, 0x61, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, 0x4c, 0x53, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x52, 0x03, 0x54, 0x4c, 0x53, 0x12, 0x62, 0x0a, 0x0e, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x3a, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, - 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x48, 0x54, 0x54, 0x50, 0x48, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x72, 0x73, 0x52, 0x0e, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x64, 0x0a, 0x0f, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, - 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, - 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x48, 0x54, - 0x54, 0x50, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x72, - 0x73, 0x52, 0x0f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x73, 0x12, 0x53, 0x0a, 0x04, 0x4d, 0x65, 0x74, 0x61, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x3f, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, + 0x79, 0x2e, 0x43, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0c, + 0x43, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1a, 0x0a, 0x08, + 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x50, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, + 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x50, 0x12, 0x1a, 0x0a, 0x08, 0x54, 0x65, 0x72, 0x6d, + 0x69, 0x6e, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x54, 0x65, 0x72, 0x6d, + 0x69, 0x6e, 0x61, 0x6c, 0x22, 0x69, 0x0a, 0x0c, 0x43, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2b, + 0x0a, 0x03, 0x54, 0x54, 0x4c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x03, 0x54, 0x54, 0x4c, 0x12, 0x12, 0x0a, 0x04, 0x50, + 0x61, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x50, 0x61, 0x74, 0x68, 0x22, + 0xbf, 0x02, 0x0a, 0x0e, 0x49, 0x6e, 0x67, 0x72, 0x65, 0x73, 0x73, 0x47, 0x61, 0x74, 0x65, 0x77, + 0x61, 0x79, 0x12, 0x49, 0x0a, 0x03, 0x54, 0x4c, 0x53, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x37, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, + 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x54, + 0x4c, 0x53, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x03, 0x54, 0x4c, 0x53, 0x12, 0x54, 0x0a, + 0x09, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x36, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x49, 0x6e, 0x67, 0x72, 0x65, 0x73, 0x73, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x04, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x58, 0x0a, 0x0e, 0x45, 0x6e, 0x74, 0x65, 0x72, - 0x70, 0x72, 0x69, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x30, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, - 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, - 0x6f, 0x6e, 0x2e, 0x45, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x69, 0x73, 0x65, 0x4d, 0x65, 0x74, - 0x61, 0x52, 0x0e, 0x45, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x69, 0x73, 0x65, 0x4d, 0x65, 0x74, - 0x61, 0x1a, 0x37, 0x0a, 0x09, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, - 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, - 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x67, 0x0a, 0x17, 0x47, 0x61, - 0x74, 0x65, 0x77, 0x61, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, 0x4c, 0x53, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4c, 0x0a, 0x03, 0x53, 0x44, 0x53, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, - 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x47, 0x61, 0x74, 0x65, 0x77, - 0x61, 0x79, 0x54, 0x4c, 0x53, 0x53, 0x44, 0x53, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x03, - 0x53, 0x44, 0x53, 0x22, 0xcb, 0x02, 0x0a, 0x13, 0x48, 0x54, 0x54, 0x50, 0x48, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x72, 0x73, 0x12, 0x55, 0x0a, 0x03, 0x41, - 0x64, 0x64, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, - 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, - 0x2e, 0x48, 0x54, 0x54, 0x50, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x6f, 0x64, 0x69, 0x66, - 0x69, 0x65, 0x72, 0x73, 0x2e, 0x41, 0x64, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x03, 0x41, - 0x64, 0x64, 0x12, 0x55, 0x0a, 0x03, 0x53, 0x65, 0x74, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x43, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, - 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x48, 0x54, 0x54, 0x50, 0x48, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x72, 0x73, 0x2e, 0x53, 0x65, 0x74, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x52, 0x03, 0x53, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x52, 0x65, 0x6d, - 0x6f, 0x76, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x52, 0x65, 0x6d, 0x6f, 0x76, - 0x65, 0x1a, 0x36, 0x0a, 0x08, 0x41, 0x64, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, - 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, - 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x36, 0x0a, 0x08, 0x53, 0x65, 0x74, + 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x52, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, + 0x65, 0x72, 0x73, 0x12, 0x53, 0x0a, 0x04, 0x4d, 0x65, 0x74, 0x61, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x3f, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, + 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x49, 0x6e, 0x67, 0x72, 0x65, 0x73, + 0x73, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x04, 0x4d, 0x65, 0x74, 0x61, 0x1a, 0x37, 0x0a, 0x09, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, - 0x01, 0x22, 0xf6, 0x01, 0x0a, 0x11, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, 0x74, - 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x50, 0x0a, 0x07, 0x53, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, - 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, - 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x07, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x56, 0x0a, 0x04, 0x4d, 0x65, 0x74, - 0x61, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, - 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x4d, 0x65, 0x74, - 0x61, 0x1a, 0x37, 0x0a, 0x09, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, - 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, - 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xa6, 0x06, 0x0a, 0x0f, 0x53, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, - 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, - 0x6d, 0x65, 0x12, 0x4e, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, - 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x6e, - 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x41, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x5c, 0x0a, 0x0b, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, - 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, - 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, - 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, - 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x1e, 0x0a, 0x0a, 0x50, 0x72, 0x65, 0x63, 0x65, 0x64, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x50, 0x72, 0x65, 0x63, 0x65, 0x64, 0x65, 0x6e, 0x63, 0x65, - 0x12, 0x1a, 0x0a, 0x08, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x49, 0x44, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x49, 0x44, 0x12, 0x4e, 0x0a, 0x04, - 0x54, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3a, 0x2e, 0x68, 0x61, 0x73, - 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, - 0x72, 0x79, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x20, 0x0a, 0x0b, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0b, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x66, - 0x0a, 0x0a, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x18, 0x08, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x46, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, - 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x65, 0x67, 0x61, 0x63, - 0x79, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x4c, 0x65, 0x67, 0x61, - 0x63, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x46, 0x0a, 0x10, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, - 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x4c, 0x65, - 0x67, 0x61, 0x63, 0x79, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x46, - 0x0a, 0x10, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, - 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x55, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x58, 0x0a, 0x0e, 0x45, 0x6e, 0x74, 0x65, 0x72, 0x70, - 0x72, 0x69, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, - 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, - 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, - 0x6e, 0x2e, 0x45, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x69, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, - 0x52, 0x0e, 0x45, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x69, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, - 0x12, 0x12, 0x0a, 0x04, 0x50, 0x65, 0x65, 0x72, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x50, 0x65, 0x65, 0x72, 0x1a, 0x3d, 0x0a, 0x0f, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4d, 0x65, - 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x02, 0x38, 0x01, 0x22, 0xb9, 0x01, 0x0a, 0x13, 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, - 0x6e, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x06, 0x41, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x68, 0x61, - 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, - 0x74, 0x72, 0x79, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x52, 0x0a, 0x04, 0x48, - 0x54, 0x54, 0x50, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x68, 0x61, 0x73, 0x68, - 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, - 0x79, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x54, 0x54, 0x50, 0x50, - 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x48, 0x54, 0x54, 0x50, 0x22, - 0xed, 0x01, 0x0a, 0x17, 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x54, 0x54, - 0x50, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x50, - 0x61, 0x74, 0x68, 0x45, 0x78, 0x61, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, - 0x50, 0x61, 0x74, 0x68, 0x45, 0x78, 0x61, 0x63, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x50, 0x61, 0x74, - 0x68, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x50, - 0x61, 0x74, 0x68, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x61, 0x74, - 0x68, 0x52, 0x65, 0x67, 0x65, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x50, 0x61, - 0x74, 0x68, 0x52, 0x65, 0x67, 0x65, 0x78, 0x12, 0x5c, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x44, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, - 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, - 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x54, 0x54, 0x50, 0x48, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x48, - 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, - 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x22, - 0xc1, 0x01, 0x0a, 0x1d, 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x54, 0x54, - 0x50, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, - 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x12, - 0x14, 0x0a, 0x05, 0x45, 0x78, 0x61, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, - 0x45, 0x78, 0x61, 0x63, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x16, 0x0a, - 0x06, 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x53, - 0x75, 0x66, 0x66, 0x69, 0x78, 0x12, 0x14, 0x0a, 0x05, 0x52, 0x65, 0x67, 0x65, 0x78, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x52, 0x65, 0x67, 0x65, 0x78, 0x12, 0x16, 0x0a, 0x06, 0x49, - 0x6e, 0x76, 0x65, 0x72, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x49, 0x6e, 0x76, - 0x65, 0x72, 0x74, 0x2a, 0x77, 0x0a, 0x04, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x0f, 0x0a, 0x0b, 0x4b, - 0x69, 0x6e, 0x64, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, - 0x4b, 0x69, 0x6e, 0x64, 0x4d, 0x65, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x10, 0x01, - 0x12, 0x17, 0x0a, 0x13, 0x4b, 0x69, 0x6e, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, - 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x10, 0x02, 0x12, 0x16, 0x0a, 0x12, 0x4b, 0x69, 0x6e, - 0x64, 0x49, 0x6e, 0x67, 0x72, 0x65, 0x73, 0x73, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x10, - 0x03, 0x12, 0x19, 0x0a, 0x15, 0x4b, 0x69, 0x6e, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x10, 0x04, 0x2a, 0x26, 0x0a, 0x0f, - 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x08, 0x0a, 0x04, 0x44, 0x65, 0x6e, 0x79, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x6c, 0x6c, - 0x6f, 0x77, 0x10, 0x01, 0x2a, 0x21, 0x0a, 0x13, 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, - 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x43, - 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x10, 0x00, 0x42, 0xa6, 0x02, 0x0a, 0x29, 0x63, 0x6f, 0x6d, 0x2e, + 0x01, 0x22, 0xea, 0x01, 0x0a, 0x10, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x54, 0x4c, 0x53, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, + 0x12, 0x4c, 0x0a, 0x03, 0x53, 0x44, 0x53, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x65, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x10, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, - 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, - 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0xa2, 0x02, 0x04, 0x48, 0x43, 0x49, - 0x43, 0xaa, 0x02, 0x25, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x43, 0x6f, - 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0xca, 0x02, 0x25, 0x48, 0x61, 0x73, 0x68, - 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x49, 0x6e, 0x74, - 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5c, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, - 0x79, 0xe2, 0x02, 0x31, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, - 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5c, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, - 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x28, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, - 0x70, 0x3a, 0x3a, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x3a, 0x3a, 0x49, 0x6e, 0x74, 0x65, 0x72, - 0x6e, 0x61, 0x6c, 0x3a, 0x3a, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x54, 0x4c, 0x53, + 0x53, 0x44, 0x53, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x03, 0x53, 0x44, 0x53, 0x12, 0x24, + 0x0a, 0x0d, 0x54, 0x4c, 0x53, 0x4d, 0x69, 0x6e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x54, 0x4c, 0x53, 0x4d, 0x69, 0x6e, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x24, 0x0a, 0x0d, 0x54, 0x4c, 0x53, 0x4d, 0x61, 0x78, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x54, 0x4c, 0x53, + 0x4d, 0x61, 0x78, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x0c, 0x43, 0x69, + 0x70, 0x68, 0x65, 0x72, 0x53, 0x75, 0x69, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x0c, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x53, 0x75, 0x69, 0x74, 0x65, 0x73, 0x22, 0x5b, + 0x0a, 0x13, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x54, 0x4c, 0x53, 0x53, 0x44, 0x53, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x20, 0x0a, 0x0b, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x43, 0x65, 0x72, 0x74, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x43, + 0x65, 0x72, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0xdf, 0x01, 0x0a, 0x0f, + 0x49, 0x6e, 0x67, 0x72, 0x65, 0x73, 0x73, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x12, + 0x12, 0x0a, 0x04, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x50, + 0x6f, 0x72, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, + 0x51, 0x0a, 0x08, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x35, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, + 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x49, 0x6e, 0x67, 0x72, 0x65, 0x73, + 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x08, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x12, 0x49, 0x0a, 0x03, 0x54, 0x4c, 0x53, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x37, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, + 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x54, + 0x4c, 0x53, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x03, 0x54, 0x4c, 0x53, 0x22, 0xbe, 0x04, + 0x0a, 0x0e, 0x49, 0x6e, 0x67, 0x72, 0x65, 0x73, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x48, 0x6f, 0x73, 0x74, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x05, 0x48, 0x6f, 0x73, 0x74, 0x73, 0x12, 0x50, 0x0a, 0x03, 0x54, 0x4c, + 0x53, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, + 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, + 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, 0x4c, + 0x53, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x03, 0x54, 0x4c, 0x53, 0x12, 0x62, 0x0a, 0x0e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, + 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x48, 0x54, 0x54, + 0x50, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x72, 0x73, + 0x52, 0x0e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, + 0x12, 0x64, 0x0a, 0x0f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x68, 0x61, 0x73, 0x68, + 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, + 0x79, 0x2e, 0x48, 0x54, 0x54, 0x50, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x6f, 0x64, 0x69, + 0x66, 0x69, 0x65, 0x72, 0x73, 0x52, 0x0f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x53, 0x0a, 0x04, 0x4d, 0x65, 0x74, 0x61, 0x18, 0x06, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, + 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x49, 0x6e, 0x67, + 0x72, 0x65, 0x73, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x4d, 0x65, 0x74, 0x61, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x58, 0x0a, 0x0e, 0x45, + 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x69, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, + 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x45, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x69, 0x73, + 0x65, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x0e, 0x45, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x69, 0x73, + 0x65, 0x4d, 0x65, 0x74, 0x61, 0x1a, 0x37, 0x0a, 0x09, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x67, + 0x0a, 0x17, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x54, 0x4c, 0x53, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4c, 0x0a, 0x03, 0x53, 0x44, 0x53, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, + 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x47, + 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x54, 0x4c, 0x53, 0x53, 0x44, 0x53, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x52, 0x03, 0x53, 0x44, 0x53, 0x22, 0xcb, 0x02, 0x0a, 0x13, 0x48, 0x54, 0x54, 0x50, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x72, 0x73, 0x12, + 0x55, 0x0a, 0x03, 0x41, 0x64, 0x64, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x68, + 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, + 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x48, 0x54, 0x54, 0x50, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, + 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x72, 0x73, 0x2e, 0x41, 0x64, 0x64, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x03, 0x41, 0x64, 0x64, 0x12, 0x55, 0x0a, 0x03, 0x53, 0x65, 0x74, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, + 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x48, 0x54, 0x54, 0x50, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x72, 0x73, 0x2e, + 0x53, 0x65, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x03, 0x53, 0x65, 0x74, 0x12, 0x16, 0x0a, + 0x06, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x52, + 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x1a, 0x36, 0x0a, 0x08, 0x41, 0x64, 0x64, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x36, 0x0a, + 0x08, 0x53, 0x65, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xf6, 0x01, 0x0a, 0x11, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x50, 0x0a, 0x07, 0x53, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x68, + 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, + 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x6e, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x56, 0x0a, + 0x04, 0x4d, 0x65, 0x74, 0x61, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x68, 0x61, + 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, + 0x74, 0x72, 0x79, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x6e, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x04, 0x4d, 0x65, 0x74, 0x61, 0x1a, 0x37, 0x0a, 0x09, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xa6, + 0x06, 0x0a, 0x0f, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x4e, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, + 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x49, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, + 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5c, 0x0a, 0x0b, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x68, 0x61, + 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, + 0x74, 0x72, 0x79, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x65, 0x72, + 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x50, 0x72, 0x65, 0x63, 0x65, 0x64, 0x65, 0x6e, + 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x50, 0x72, 0x65, 0x63, 0x65, 0x64, + 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x49, 0x44, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x49, 0x44, + 0x12, 0x4e, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3a, + 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, + 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, + 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x20, 0x0a, 0x0b, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x66, 0x0a, 0x0a, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4d, 0x65, 0x74, 0x61, + 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x46, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, + 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x53, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, + 0x65, 0x67, 0x61, 0x63, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, + 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x46, 0x0a, 0x10, 0x4c, 0x65, + 0x67, 0x61, 0x63, 0x79, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x18, 0x09, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x52, 0x10, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, + 0x6d, 0x65, 0x12, 0x46, 0x0a, 0x10, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x58, 0x0a, 0x0e, 0x45, 0x6e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x69, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x18, 0x0b, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, + 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x45, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x69, 0x73, 0x65, + 0x4d, 0x65, 0x74, 0x61, 0x52, 0x0e, 0x45, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x69, 0x73, 0x65, + 0x4d, 0x65, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x50, 0x65, 0x65, 0x72, 0x18, 0x0c, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x50, 0x65, 0x65, 0x72, 0x1a, 0x3d, 0x0a, 0x0f, 0x4c, 0x65, 0x67, 0x61, + 0x63, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb9, 0x01, 0x0a, 0x13, 0x49, 0x6e, 0x74, 0x65, + 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x4e, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x36, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, + 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, + 0x6e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x52, 0x0a, 0x04, 0x48, 0x54, 0x54, 0x50, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, + 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, + 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x48, + 0x54, 0x54, 0x50, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x48, + 0x54, 0x54, 0x50, 0x22, 0xed, 0x01, 0x0a, 0x17, 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, + 0x6e, 0x48, 0x54, 0x54, 0x50, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x1c, 0x0a, 0x09, 0x50, 0x61, 0x74, 0x68, 0x45, 0x78, 0x61, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x50, 0x61, 0x74, 0x68, 0x45, 0x78, 0x61, 0x63, 0x74, 0x12, 0x1e, 0x0a, + 0x0a, 0x50, 0x61, 0x74, 0x68, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0a, 0x50, 0x61, 0x74, 0x68, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x1c, 0x0a, + 0x09, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x67, 0x65, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x67, 0x65, 0x78, 0x12, 0x5c, 0x0a, 0x06, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x44, 0x2e, 0x68, 0x61, + 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, + 0x74, 0x72, 0x79, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x54, 0x54, + 0x50, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, + 0x6e, 0x52, 0x06, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x4d, 0x65, 0x74, + 0x68, 0x6f, 0x64, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x4d, 0x65, 0x74, 0x68, + 0x6f, 0x64, 0x73, 0x22, 0xc1, 0x01, 0x0a, 0x1d, 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, + 0x6e, 0x48, 0x54, 0x54, 0x50, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x65, 0x72, 0x6d, 0x69, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x50, 0x72, 0x65, + 0x73, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x50, 0x72, 0x65, 0x73, + 0x65, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x45, 0x78, 0x61, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x45, 0x78, 0x61, 0x63, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x50, 0x72, 0x65, + 0x66, 0x69, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x50, 0x72, 0x65, 0x66, 0x69, + 0x78, 0x12, 0x16, 0x0a, 0x06, 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x12, 0x14, 0x0a, 0x05, 0x52, 0x65, 0x67, + 0x65, 0x78, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x52, 0x65, 0x67, 0x65, 0x78, 0x12, + 0x16, 0x0a, 0x06, 0x49, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x06, 0x49, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x2a, 0x77, 0x0a, 0x04, 0x4b, 0x69, 0x6e, 0x64, 0x12, + 0x0f, 0x0a, 0x0b, 0x4b, 0x69, 0x6e, 0x64, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x10, 0x00, + 0x12, 0x12, 0x0a, 0x0e, 0x4b, 0x69, 0x6e, 0x64, 0x4d, 0x65, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x4b, 0x69, 0x6e, 0x64, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x10, 0x02, 0x12, 0x16, 0x0a, + 0x12, 0x4b, 0x69, 0x6e, 0x64, 0x49, 0x6e, 0x67, 0x72, 0x65, 0x73, 0x73, 0x47, 0x61, 0x74, 0x65, + 0x77, 0x61, 0x79, 0x10, 0x03, 0x12, 0x19, 0x0a, 0x15, 0x4b, 0x69, 0x6e, 0x64, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x10, 0x04, + 0x2a, 0x26, 0x0a, 0x0f, 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x08, 0x0a, 0x04, 0x44, 0x65, 0x6e, 0x79, 0x10, 0x00, 0x12, 0x09, 0x0a, + 0x05, 0x41, 0x6c, 0x6c, 0x6f, 0x77, 0x10, 0x01, 0x2a, 0x21, 0x0a, 0x13, 0x49, 0x6e, 0x74, 0x65, + 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x0a, 0x0a, 0x06, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x10, 0x00, 0x42, 0xa6, 0x02, 0x0a, 0x29, + 0x63, 0x6f, 0x6d, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, + 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x10, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2f, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, + 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2f, 0x70, 0x62, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0xa2, 0x02, + 0x04, 0x48, 0x43, 0x49, 0x43, 0xaa, 0x02, 0x25, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, + 0x70, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0xca, 0x02, 0x25, + 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, + 0x5c, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5c, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x65, 0x6e, 0x74, 0x72, 0x79, 0xe2, 0x02, 0x31, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, + 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x5c, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x5c, 0x47, 0x50, + 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x28, 0x48, 0x61, 0x73, 0x68, + 0x69, 0x63, 0x6f, 0x72, 0x70, 0x3a, 0x3a, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x3a, 0x3a, 0x49, + 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x3a, 0x3a, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x65, + 0x6e, 0x74, 0x72, 0x79, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/proto/pbconfigentry/config_entry.proto b/proto/pbconfigentry/config_entry.proto index 37d497a68b..f92efc18b1 100644 --- a/proto/pbconfigentry/config_entry.proto +++ b/proto/pbconfigentry/config_entry.proto @@ -122,6 +122,7 @@ message ServiceResolverRedirect { string Namespace = 3; string Partition = 4; string Datacenter = 5; + string Peer = 6; } // mog annotation: From 93271f649c443f90db1631d012fdea9a3b98bf9d Mon Sep 17 00:00:00 2001 From: "Chris S. Kim" Date: Mon, 29 Aug 2022 10:19:46 -0400 Subject: [PATCH 44/93] Fix test --- agent/grpc-external/services/peerstream/stream_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/agent/grpc-external/services/peerstream/stream_test.go b/agent/grpc-external/services/peerstream/stream_test.go index fcdd07422b..bee02592a3 100644 --- a/agent/grpc-external/services/peerstream/stream_test.go +++ b/agent/grpc-external/services/peerstream/stream_test.go @@ -655,7 +655,7 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { }, }, } - lastRecvResourceSuccess = it.FutureNow(2) + lastRecvResourceSuccess = it.FutureNow(1) err := client.Send(resp) require.NoError(t, err) From def529edd3be9c42ab63cc0b92552d78bda1a041 Mon Sep 17 00:00:00 2001 From: "Chris S. Kim" Date: Mon, 29 Aug 2022 10:34:50 -0400 Subject: [PATCH 45/93] Rename test --- agent/grpc-external/services/peerstream/stream_tracker_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/agent/grpc-external/services/peerstream/stream_tracker_test.go b/agent/grpc-external/services/peerstream/stream_tracker_test.go index 7500ccd4b8..555e76258b 100644 --- a/agent/grpc-external/services/peerstream/stream_tracker_test.go +++ b/agent/grpc-external/services/peerstream/stream_tracker_test.go @@ -45,7 +45,7 @@ func TestStatus_IsHealthy(t *testing.T) { }, }, { - name: "receive error before receive success", + name: "nack before ack", expectedVal: false, modifierFunc: func(status *MutableStatus) { now := time.Now() From d8cb7731dde161e395b07ccba3566b18c6fe18a5 Mon Sep 17 00:00:00 2001 From: DanStough Date: Tue, 16 Aug 2022 16:47:39 -0400 Subject: [PATCH 46/93] chore: add multi-arch docker build for testing --- GNUmakefile | 30 ++++++++++++++++--- .../docker/Consul-Dev-Multiarch.dockerfile | 5 ++++ 2 files changed, 31 insertions(+), 4 deletions(-) create mode 100644 build-support/docker/Consul-Dev-Multiarch.dockerfile diff --git a/GNUmakefile b/GNUmakefile index 6327ea579b..77d6a7ec21 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -16,6 +16,7 @@ PROTOC_GO_INJECT_TAG_VERSION='v1.3.0' GOTAGS ?= GOPATH=$(shell go env GOPATH) +GOARCH?=$(shell go env GOARCH) MAIN_GOPATH=$(shell go env GOPATH | cut -d: -f1) export PATH := $(PWD)/bin:$(GOPATH)/bin:$(PATH) @@ -152,7 +153,28 @@ dev-docker: linux @docker pull consul:$(CONSUL_IMAGE_VERSION) >/dev/null @echo "Building Consul Development container - $(CONSUL_DEV_IMAGE)" # 'consul:local' tag is needed to run the integration tests - @DOCKER_DEFAULT_PLATFORM=linux/amd64 docker build $(NOCACHE) $(QUIET) -t '$(CONSUL_DEV_IMAGE)' -t 'consul:local' --build-arg CONSUL_IMAGE_VERSION=$(CONSUL_IMAGE_VERSION) $(CURDIR)/pkg/bin/linux_amd64 -f $(CURDIR)/build-support/docker/Consul-Dev.dockerfile + @docker buildx use default && docker buildx build -t 'consul:local' \ + --platform linux/$(GOARCH) \ + --build-arg CONSUL_IMAGE_VERSION=$(CONSUL_IMAGE_VERSION) \ + --load \ + -f $(CURDIR)/build-support/docker/Consul-Dev-Multiarch.dockerfile $(CURDIR)/pkg/bin/ + +check-remote-dev-image-env: +ifndef REMOTE_DEV_IMAGE + $(error REMOTE_DEV_IMAGE is undefined: set this image to /:, e.g. hashicorp/consul-k8s-dev:latest) +endif + +remote-docker: check-remote-dev-image-env + $(MAKE) GOARCH=amd64 linux + $(MAKE) GOARCH=arm64 linux + @echo "Pulling consul container image - $(CONSUL_IMAGE_VERSION)" + @docker pull consul:$(CONSUL_IMAGE_VERSION) >/dev/null + @echo "Building and Pushing Consul Development container - $(REMOTE_DEV_IMAGE)" + @docker buildx use default && docker buildx build -t '$(REMOTE_DEV_IMAGE)' \ + --platform linux/amd64,linux/arm64 \ + --build-arg CONSUL_IMAGE_VERSION=$(CONSUL_IMAGE_VERSION) \ + --push \ + -f $(CURDIR)/build-support/docker/Consul-Dev-Multiarch.dockerfile $(CURDIR)/pkg/bin/ # In CircleCI, the linux binary will be attached from a previous step at bin/. This make target # should only run in CI and not locally. @@ -174,10 +196,10 @@ ifeq ($(CIRCLE_BRANCH), main) @docker push $(CI_DEV_DOCKER_NAMESPACE)/$(CI_DEV_DOCKER_IMAGE_NAME):latest endif -# linux builds a linux binary independent of the source platform +# linux builds a linux binary compatible with the source platform linux: - @mkdir -p ./pkg/bin/linux_amd64 - CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o ./pkg/bin/linux_amd64 -ldflags "$(GOLDFLAGS)" -tags "$(GOTAGS)" + @mkdir -p ./pkg/bin/linux_$(GOARCH) + CGO_ENABLED=0 GOOS=linux GOARCH=$(GOARCH) go build -o ./pkg/bin/linux_$(GOARCH) -ldflags "$(GOLDFLAGS)" -tags "$(GOTAGS)" # dist builds binaries for all platforms and packages them for distribution dist: diff --git a/build-support/docker/Consul-Dev-Multiarch.dockerfile b/build-support/docker/Consul-Dev-Multiarch.dockerfile new file mode 100644 index 0000000000..a3069bd99c --- /dev/null +++ b/build-support/docker/Consul-Dev-Multiarch.dockerfile @@ -0,0 +1,5 @@ +ARG CONSUL_IMAGE_VERSION=latest +FROM consul:${CONSUL_IMAGE_VERSION} +RUN apk update && apk add iptables +ARG TARGETARCH +COPY linux_${TARGETARCH}/consul /bin/consul From 72f90754ae8c6dceeac08da4c1154b64e71dd184 Mon Sep 17 00:00:00 2001 From: Eric Haberkorn Date: Mon, 29 Aug 2022 13:46:41 -0400 Subject: [PATCH 47/93] Update max_ejection_percent on outlier detection for peered clusters to 100% (#14373) We can't trust health checks on peered services when service resolvers, splitters and routers are used. --- .changelog/14373.txt | 3 +++ agent/xds/clusters.go | 9 ++++++++- .../connect-proxy-with-peered-upstreams.latest.golden | 4 ++-- ...transparent-proxy-with-peered-upstreams.latest.golden | 6 +++--- 4 files changed, 16 insertions(+), 6 deletions(-) create mode 100644 .changelog/14373.txt diff --git a/.changelog/14373.txt b/.changelog/14373.txt new file mode 100644 index 0000000000..d9531b09ec --- /dev/null +++ b/.changelog/14373.txt @@ -0,0 +1,3 @@ +```release-note:improvement +xds: Set `max_ejection_percent` on Envoy's outlier detection to 100% for peered services. +``` diff --git a/agent/xds/clusters.go b/agent/xds/clusters.go index adde810d37..c3ac718472 100644 --- a/agent/xds/clusters.go +++ b/agent/xds/clusters.go @@ -772,6 +772,13 @@ func (s *ResourceGenerator) makeUpstreamClusterForPeerService( clusterName := generatePeeredClusterName(uid, tbs) + outlierDetection := ToOutlierDetection(cfg.PassiveHealthCheck) + // We can't rely on health checks for services on cluster peers because they + // don't take into account service resolvers, splitters and routers. Setting + // MaxEjectionPercent too 100% gives outlier detection the power to eject the + // entire cluster. + outlierDetection.MaxEjectionPercent = &wrappers.UInt32Value{Value: 100} + s.Logger.Trace("generating cluster for", "cluster", clusterName) if c == nil { c = &envoy_cluster_v3.Cluster{ @@ -785,7 +792,7 @@ func (s *ResourceGenerator) makeUpstreamClusterForPeerService( CircuitBreakers: &envoy_cluster_v3.CircuitBreakers{ Thresholds: makeThresholdsIfNeeded(cfg.Limits), }, - OutlierDetection: ToOutlierDetection(cfg.PassiveHealthCheck), + OutlierDetection: outlierDetection, } if cfg.Protocol == "http2" || cfg.Protocol == "grpc" { if err := s.setHttp2ProtocolOptions(c); err != nil { diff --git a/agent/xds/testdata/clusters/connect-proxy-with-peered-upstreams.latest.golden b/agent/xds/testdata/clusters/connect-proxy-with-peered-upstreams.latest.golden index d7c23515fc..29059b1435 100644 --- a/agent/xds/testdata/clusters/connect-proxy-with-peered-upstreams.latest.golden +++ b/agent/xds/testdata/clusters/connect-proxy-with-peered-upstreams.latest.golden @@ -58,7 +58,7 @@ "dnsRefreshRate": "10s", "dnsLookupFamily": "V4_ONLY", "outlierDetection": { - + "maxEjectionPercent": 100 }, "commonLbConfig": { "healthyPanicThreshold": { @@ -115,7 +115,7 @@ }, "outlierDetection": { - + "maxEjectionPercent": 100 }, "commonLbConfig": { "healthyPanicThreshold": { diff --git a/agent/xds/testdata/clusters/transparent-proxy-with-peered-upstreams.latest.golden b/agent/xds/testdata/clusters/transparent-proxy-with-peered-upstreams.latest.golden index 0dbbf4277d..d1f6d0bb0e 100644 --- a/agent/xds/testdata/clusters/transparent-proxy-with-peered-upstreams.latest.golden +++ b/agent/xds/testdata/clusters/transparent-proxy-with-peered-upstreams.latest.golden @@ -18,7 +18,7 @@ }, "outlierDetection": { - + "maxEjectionPercent": 100 }, "commonLbConfig": { "healthyPanicThreshold": { @@ -75,7 +75,7 @@ }, "outlierDetection": { - + "maxEjectionPercent": 100 }, "commonLbConfig": { "healthyPanicThreshold": { @@ -157,7 +157,7 @@ }, "outlierDetection": { - + "maxEjectionPercent": 100 }, "commonLbConfig": { "healthyPanicThreshold": { From 310608fb191557a508bb3f7d2c8f0e657f87ea90 Mon Sep 17 00:00:00 2001 From: freddygv Date: Mon, 29 Aug 2022 12:00:30 -0600 Subject: [PATCH 48/93] Add validation to prevent switching dialing mode This prevents unexpected changes to the output of ShouldDial, which should never change unless a peering is deleted and recreated. --- agent/consul/leader_peering_test.go | 12 +++-- agent/consul/state/peering.go | 10 ++++ agent/consul/state/peering_test.go | 76 ++++++++++++++++++++--------- 3 files changed, 72 insertions(+), 26 deletions(-) diff --git a/agent/consul/leader_peering_test.go b/agent/consul/leader_peering_test.go index b8b5166d8f..206608ed4e 100644 --- a/agent/consul/leader_peering_test.go +++ b/agent/consul/leader_peering_test.go @@ -40,6 +40,7 @@ func TestLeader_PeeringSync_Lifecycle_ClientDeletion(t *testing.T) { testLeader_PeeringSync_Lifecycle_ClientDeletion(t, true) }) } + func testLeader_PeeringSync_Lifecycle_ClientDeletion(t *testing.T, enableTLS bool) { if testing.Short() { t.Skip("too slow for testing.Short") @@ -137,9 +138,11 @@ func testLeader_PeeringSync_Lifecycle_ClientDeletion(t *testing.T, enableTLS boo // Delete the peering to trigger the termination sequence. deleted := &pbpeering.Peering{ - ID: p.Peering.ID, - Name: "my-peer-acceptor", - DeletedAt: structs.TimeToProto(time.Now()), + ID: p.Peering.ID, + Name: "my-peer-acceptor", + State: pbpeering.PeeringState_DELETING, + PeerServerAddresses: p.Peering.PeerServerAddresses, + DeletedAt: structs.TimeToProto(time.Now()), } require.NoError(t, dialer.fsm.State().PeeringWrite(2000, &pbpeering.PeeringWriteRequest{Peering: deleted})) dialer.logger.Trace("deleted peering for my-peer-acceptor") @@ -262,6 +265,7 @@ func testLeader_PeeringSync_Lifecycle_AcceptorDeletion(t *testing.T, enableTLS b deleted := &pbpeering.Peering{ ID: p.Peering.PeerID, Name: "my-peer-dialer", + State: pbpeering.PeeringState_DELETING, DeletedAt: structs.TimeToProto(time.Now()), } @@ -431,6 +435,7 @@ func TestLeader_Peering_DeferredDeletion(t *testing.T) { Peering: &pbpeering.Peering{ ID: peerID, Name: peerName, + State: pbpeering.PeeringState_DELETING, DeletedAt: structs.TimeToProto(time.Now()), }, })) @@ -1165,6 +1170,7 @@ func TestLeader_Peering_NoDeletionWhenPeeringDisabled(t *testing.T) { Peering: &pbpeering.Peering{ ID: peerID, Name: peerName, + State: pbpeering.PeeringState_DELETING, DeletedAt: structs.TimeToProto(time.Now()), }, })) diff --git a/agent/consul/state/peering.go b/agent/consul/state/peering.go index 9457dd811a..eef76aa726 100644 --- a/agent/consul/state/peering.go +++ b/agent/consul/state/peering.go @@ -535,6 +535,12 @@ func (s *Store) PeeringWrite(idx uint64, req *pbpeering.PeeringWriteRequest) err if req.Peering.Name == "" { return errors.New("Missing Peering Name") } + if req.Peering.State == pbpeering.PeeringState_DELETING && (req.Peering.DeletedAt == nil || structs.IsZeroProtoTime(req.Peering.DeletedAt)) { + return errors.New("Missing deletion time for peering in deleting state") + } + if req.Peering.DeletedAt != nil && !structs.IsZeroProtoTime(req.Peering.DeletedAt) && req.Peering.State != pbpeering.PeeringState_DELETING { + return fmt.Errorf("Unexpected state for peering with deletion time: %s", pbpeering.PeeringStateToAPI(req.Peering.State)) + } // Ensure the name is unique (cannot conflict with another peering with a different ID). _, existing, err := peeringReadTxn(tx, nil, Query{ @@ -546,6 +552,10 @@ func (s *Store) PeeringWrite(idx uint64, req *pbpeering.PeeringWriteRequest) err } if existing != nil { + if req.Peering.ShouldDial() != existing.ShouldDial() { + return fmt.Errorf("Cannot switch peering dialing mode from %t to %t", existing.ShouldDial(), req.Peering.ShouldDial()) + } + if req.Peering.ID != existing.ID { return fmt.Errorf("A peering already exists with the name %q and a different ID %q", req.Peering.Name, existing.ID) } diff --git a/agent/consul/state/peering_test.go b/agent/consul/state/peering_test.go index 1dc2446fe1..a90727f0eb 100644 --- a/agent/consul/state/peering_test.go +++ b/agent/consul/state/peering_test.go @@ -950,6 +950,7 @@ func TestStore_Peering_Watch(t *testing.T) { Peering: &pbpeering.Peering{ ID: testFooPeerID, Name: "foo", + State: pbpeering.PeeringState_DELETING, DeletedAt: structs.TimeToProto(time.Now()), }, }) @@ -976,6 +977,7 @@ func TestStore_Peering_Watch(t *testing.T) { err := s.PeeringWrite(lastIdx, &pbpeering.PeeringWriteRequest{Peering: &pbpeering.Peering{ ID: testBarPeerID, Name: "bar", + State: pbpeering.PeeringState_DELETING, DeletedAt: structs.TimeToProto(time.Now()), }, }) @@ -1077,6 +1079,7 @@ func TestStore_PeeringList_Watch(t *testing.T) { Peering: &pbpeering.Peering{ ID: testFooPeerID, Name: "foo", + State: pbpeering.PeeringState_DELETING, DeletedAt: structs.TimeToProto(time.Now()), Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), }, @@ -1199,6 +1202,22 @@ func TestStore_PeeringWrite(t *testing.T) { err: `A peering already exists with the name "baz" and a different ID`, }, }, + { + name: "cannot change dialer status for baz", + input: &pbpeering.PeeringWriteRequest{ + Peering: &pbpeering.Peering{ + ID: "123", + Name: "baz", + State: pbpeering.PeeringState_FAILING, + // Excluding the peer server addresses leads to baz not being considered a dialer. + // PeerServerAddresses: []string{"localhost:8502"}, + Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), + }, + }, + expect: expectations{ + err: "Cannot switch peering dialing mode from true to false", + }, + }, { name: "update baz", input: &pbpeering.PeeringWriteRequest{ @@ -1273,15 +1292,17 @@ func TestStore_PeeringWrite(t *testing.T) { }, }, { - name: "cannot edit data during no-op termination", + name: "cannot modify peering during no-op termination", input: &pbpeering.PeeringWriteRequest{ Peering: &pbpeering.Peering{ - ID: testBazPeerID, - Name: "baz", - State: pbpeering.PeeringState_TERMINATED, - // Attempt to modify the addresses - Meta: map[string]string{"foo": "bar"}, - Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), + ID: testBazPeerID, + Name: "baz", + State: pbpeering.PeeringState_TERMINATED, + Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), + PeerServerAddresses: []string{"localhost:8502"}, + + // Attempt to add metadata + Meta: map[string]string{"foo": "bar"}, }, }, expect: expectations{ @@ -1320,11 +1341,12 @@ func TestStore_PeeringWrite(t *testing.T) { name: "deleting a deleted peering is a no-op", input: &pbpeering.PeeringWriteRequest{ Peering: &pbpeering.Peering{ - ID: testBazPeerID, - Name: "baz", - State: pbpeering.PeeringState_DELETING, - DeletedAt: structs.TimeToProto(time.Now()), - Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), + ID: testBazPeerID, + Name: "baz", + State: pbpeering.PeeringState_DELETING, + PeerServerAddresses: []string{"localhost:8502"}, + DeletedAt: structs.TimeToProto(time.Now()), + Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), }, }, expect: expectations{ @@ -1343,10 +1365,11 @@ func TestStore_PeeringWrite(t *testing.T) { name: "terminating a peering marked for deletion is a no-op", input: &pbpeering.PeeringWriteRequest{ Peering: &pbpeering.Peering{ - ID: testBazPeerID, - Name: "baz", - State: pbpeering.PeeringState_TERMINATED, - Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), + ID: testBazPeerID, + Name: "baz", + State: pbpeering.PeeringState_TERMINATED, + PeerServerAddresses: []string{"localhost:8502"}, + Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), }, }, expect: expectations{ @@ -1364,13 +1387,15 @@ func TestStore_PeeringWrite(t *testing.T) { name: "cannot update peering marked for deletion", input: &pbpeering.PeeringWriteRequest{ Peering: &pbpeering.Peering{ - ID: testBazPeerID, - Name: "baz", + ID: testBazPeerID, + Name: "baz", + PeerServerAddresses: []string{"localhost:8502"}, + Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), + // Attempt to add metadata Meta: map[string]string{ "source": "kubernetes", }, - Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), }, }, expect: expectations{ @@ -1381,10 +1406,12 @@ func TestStore_PeeringWrite(t *testing.T) { name: "cannot create peering marked for deletion", input: &pbpeering.PeeringWriteRequest{ Peering: &pbpeering.Peering{ - ID: testFooPeerID, - Name: "foo", - DeletedAt: structs.TimeToProto(time.Now()), - Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), + ID: testFooPeerID, + Name: "foo", + PeerServerAddresses: []string{"localhost:8502"}, + State: pbpeering.PeeringState_DELETING, + DeletedAt: structs.TimeToProto(time.Now()), + Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), }, }, expect: expectations{ @@ -1414,6 +1441,7 @@ func TestStore_PeeringDelete(t *testing.T) { Peering: &pbpeering.Peering{ ID: testFooPeerID, Name: "foo", + State: pbpeering.PeeringState_DELETING, DeletedAt: structs.TimeToProto(time.Now()), }, })) @@ -1927,6 +1955,7 @@ func TestStateStore_PeeringsForService(t *testing.T) { copied := pbpeering.Peering{ ID: tp.peering.ID, Name: tp.peering.Name, + State: pbpeering.PeeringState_DELETING, DeletedAt: structs.TimeToProto(time.Now()), } require.NoError(t, s.PeeringWrite(lastIdx, &pbpeering.PeeringWriteRequest{Peering: &copied})) @@ -2369,6 +2398,7 @@ func TestStore_TrustBundleListByService(t *testing.T) { Peering: &pbpeering.Peering{ ID: peerID1, Name: "peer1", + State: pbpeering.PeeringState_DELETING, DeletedAt: structs.TimeToProto(time.Now()), }, })) From d3955bd84cf7fcb7f3cdc3feb21360c242dc9557 Mon Sep 17 00:00:00 2001 From: freddygv Date: Mon, 29 Aug 2022 12:07:18 -0600 Subject: [PATCH 49/93] Add changelog entry --- .changelog/14364.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/14364.txt diff --git a/.changelog/14364.txt b/.changelog/14364.txt new file mode 100644 index 0000000000..d2f777af49 --- /dev/null +++ b/.changelog/14364.txt @@ -0,0 +1,3 @@ +```release-note:bugfix +peering: Fix issue preventing deletion and recreation of peerings in TERMINATED state. +``` \ No newline at end of file From daa30a03c37cc5d71038069d1b06d4a22d70f35a Mon Sep 17 00:00:00 2001 From: David Yu Date: Mon, 29 Aug 2022 11:34:39 -0700 Subject: [PATCH 50/93] docs: Update Consul K8s release notes (#14379) --- website/content/docs/release-notes/consul-k8s/v0_47_x.mdx | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/website/content/docs/release-notes/consul-k8s/v0_47_x.mdx b/website/content/docs/release-notes/consul-k8s/v0_47_x.mdx index b040787c5c..a9228e9984 100644 --- a/website/content/docs/release-notes/consul-k8s/v0_47_x.mdx +++ b/website/content/docs/release-notes/consul-k8s/v0_47_x.mdx @@ -20,8 +20,7 @@ description: >- ## Supported Software - Consul 1.11.x, Consul 1.12.x and Consul 1.13.1+ -- Kubernetes 1.19+ - - Kubernetes 1.24 is not supported at this time. +- Kubernetes 1.19-1.23 - Kubectl 1.21+ - Envoy proxy support is determined by the Consul version deployed. Refer to [Envoy Integration](/docs/connect/proxies/envoy) for details. From e6b63221eb403290ece5770ce1f68e44da68cb0f Mon Sep 17 00:00:00 2001 From: David Yu Date: Mon, 29 Aug 2022 13:07:08 -0700 Subject: [PATCH 51/93] docs: Cluster peering with Transparent Proxy updates (#14369) * Update Cluster Peering docs to show example with Transparent Proxy Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> --- .../docs/connect/cluster-peering/k8s.mdx | 271 ++++++++++++++---- 1 file changed, 215 insertions(+), 56 deletions(-) diff --git a/website/content/docs/connect/cluster-peering/k8s.mdx b/website/content/docs/connect/cluster-peering/k8s.mdx index 35f17959cb..b18633f091 100644 --- a/website/content/docs/connect/cluster-peering/k8s.mdx +++ b/website/content/docs/connect/cluster-peering/k8s.mdx @@ -25,50 +25,82 @@ You must implement the following requirements to create and use cluster peering - At least two Kubernetes clusters - The installation must be running on Consul on Kubernetes version 0.47.1 or later -### Helm chart configuration +### Prepare for install -To establish cluster peering through Kubernetes, deploy clusters with the following Helm values. +1. After provisioning a Kubernetes cluster and setting up your kubeconfig file to manage access to multiple Kubernetes clusters, export the Kubernetes context names for future use with `kubectl`. For more information on how to use kubeconfig and contexts, refer to [Configure access to multiple clusters](https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/) on the Kubernetes documentation website. - + You can use the following methods to get the context names for your clusters: + + * Issue the `kubectl config current-context` command to get the context for the cluster you are currently in. + * Issue the `kubectl config get-contexts` command to get all configured contexts in your kubeconfig file. + + ```shell-session + $ export CLUSTER1_CONTEXT= + $ export CLUSTER2_CONTEXT= + ``` - ```yaml - global: - image: "hashicorp/consul:1.13.1" - peering: +1. To establish cluster peering through Kubernetes, create a `values.yaml` file with the following Helm values. + + With these values, + the servers in each cluster will be exposed over a Kubernetes Load balancer service. This service can be customized + using [`server.exposeService`](/docs/k8s/helm#v-server-exposeservice). + + When generating a peering token from one of the clusters, Consul uses the address(es) of the load balancer in the peering token so that the peering stream goes through the load balancer in front of the servers. For customizing the addresses used in the peering token, refer to [`global.peering.tokenGeneration`](/docs/k8s/helm#v-global-peering-tokengeneration). + + + + ```yaml + global: + image: "hashicorp/consul:1.13.1" + peering: + enabled: true + connectInject: enabled: true - connectInject: - enabled: true - controller: - enabled: true - meshGateway: - enabled: true - replicas: 1 - ``` + dns: + enabled: true + enableRedirection: true + server: + exposeService: + enabeld: true + controller: + enabled: true + meshGateway: + enabled: true + replicas: 1 + ``` - + + +### Install Consul on Kubernetes -Install Consul on Kubernetes on each Kubernetes cluster by applying `values.yaml` using the Helm CLI. With these values, -the servers in each cluster will be exposed over a Kubernetes Load balancer service. This service can be customized -using [`server.exposeService`](/docs/k8s/helm#v-server-exposeservice). When generating a peering token from one of the -clusters, the address(es) of the load balancer will be used in the peering token, so the peering stream will go through -the load balancer in front of the servers. For customizing the addresses used in the peering token, see -[`global.peering.tokenGeneration`](/docs/k8s/helm#v-global-peering-tokengeneration). +1. Install Consul on Kubernetes on each Kubernetes cluster by applying `values.yaml` using the Helm CLI. + + 1. Install Consul on Kubernetes on `cluster-01` + + ```shell-session + $ export HELM_RELEASE_NAME=cluster-01 + ``` -```shell-session -$ export HELM_RELEASE_NAME=cluster-name -``` + ```shell-session + $ helm install ${HELM_RELEASE_NAME} hashicorp/consul --create-namespace --namespace consul --version "0.47.1" --values values.yaml --kube-context $CLUSTER1_CONTEXT + ``` + 1. Install Consul on Kubernetes on `cluster-02` + + ```shell-session + $ export HELM_RELEASE_NAME=cluster-02 + ``` -```shell-session -$ helm install ${HELM_RELEASE_NAME} hashicorp/consul --version "0.47.1" --values values.yaml -``` + ```shell-session + $ helm install ${HELM_RELEASE_NAME} hashicorp/consul --create-namespace --namespace consul --version "0.47.1" --values values.yaml --kube-context $CLUSTER2_CONTEXT + ``` ## Create a peering token -To peer Kubernetes clusters running Consul, you need to create a peering token and share it with the other cluster. +To peer Kubernetes clusters running Consul, you need to create a peering token and share it with the other cluster. As part of the peering process, the peer names for each respective cluster within the peering are established by using the `metadata.name` values for the `PeeringAcceptor` and `PeeringDialer` CRDs. 1. In `cluster-01`, create the `PeeringAcceptor` custom resource. - + ```yaml apiVersion: consul.hashicorp.com/v1alpha1 @@ -88,13 +120,13 @@ To peer Kubernetes clusters running Consul, you need to create a peering token a 1. Apply the `PeeringAcceptor` resource to the first cluster. ```shell-session - $ kubectl apply --filename acceptor.yml + $ kubectl --context $CLUSTER1_CONTEXT apply --filename acceptor.yaml ```` 1. Save your peering token so that you can export it to the other cluster. ```shell-session - $ kubectl get secret peering-token --output yaml > peering-token.yml + $ kubectl --context $CLUSTER1_CONTEXT get secret peering-token --output yaml > peering-token.yaml ``` ## Establish a peering connection between clusters @@ -102,12 +134,12 @@ To peer Kubernetes clusters running Consul, you need to create a peering token a 1. Apply the peering token to the second cluster. ```shell-session - $ kubectl apply --filename peering-token.yml + $ kubectl --context $CLUSTER2_CONTEXT apply --filename peering-token.yaml ``` -1. In `cluster-02`, create the `PeeringDialer` custom resource. +1. In `cluster-02`, create the `PeeringDialer` custom resource. - + ```yaml apiVersion: consul.hashicorp.com/v1alpha1 @@ -127,27 +159,74 @@ To peer Kubernetes clusters running Consul, you need to create a peering token a 1. Apply the `PeeringDialer` resource to the second cluster. ```shell-session - $ kubectl apply --filename dialer.yml + $ kubectl --context $CLUSTER2_CONTEXT apply --filename dialer.yaml ``` ## Export services between clusters 1. For the service in "cluster-02" that you want to export, add the following [annotation](/docs/k8s/annotations-and-labels) to your service's pods. - + ```yaml - ##… - annotations: - "consul.hashicorp.com/connect-inject": "true" - ##… + # Service to expose backend + apiVersion: v1 + kind: Service + metadata: + name: backend-service + spec: + selector: + app: backend + ports: + - name: http + protocol: TCP + port: 80 + targetPort: 9090 + --- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: backend + --- + # deployment for backend + apiVersion: apps/v1 + kind: Deployment + metadata: + name: backend + labels: + app: backend + spec: + replicas: 1 + selector: + matchLabels: + app: backend + template: + metadata: + labels: + app: backend + annotations: + "consul.hashicorp.com/connect-inject": "true" + spec: + serviceAccountName: backend + containers: + - name: backend + image: nicholasjackson/fake-service:v0.22.4 + ports: + - containerPort: 9090 + env: + - name: "LISTEN_ADDR" + value: "0.0.0.0:9090" + - name: "NAME" + value: "backend" + - name: "MESSAGE" + value: "Response from backend" ``` 1. In `cluster-02`, create an `ExportedServices` custom resource. - + ```yaml apiVersion: consul.hashicorp.com/v1alpha1 @@ -166,7 +245,7 @@ To peer Kubernetes clusters running Consul, you need to create a peering token a 1. Apply the service file and the `ExportedServices` resource for the second cluster. ```shell-session - $ kubectl apply --filename backend-service.yml --filename exportedsvc.yml + $ kubectl apply --context $CLUSTER2_CONTEXT --filename backend-service.yaml --filename exportedsvc.yaml ``` ## Authorize services for peers @@ -195,18 +274,71 @@ To peer Kubernetes clusters running Consul, you need to create a peering token a 1. Apply the intentions to the second cluster. ```shell-session - $ kubectl apply --filename intention.yml + $ kubectl --context $CLUSTER2_CONTEXT apply --filename intention.yml ``` -1. For the services in `cluster-01` that you want to access the "backend-service," add the following annotations to the service file. +1. For the services in `cluster-01` that you want to access the "backend-service," add the following annotations to the service file. To dial the upstream service from an application, ensure that the requests are sent to the correct DNS name as specified in [Service Virtual IP Lookups](/docs/discovery/dns#service-virtual-ip-lookups). - + ```yaml - ##… - annotations: - "consul.hashicorp.com/connect-inject": "true" - ##… + # Service to expose frontend + apiVersion: v1 + kind: Service + metadata: + name: frontend-service + spec: + selector: + app: frontend + ports: + - name: http + protocol: TCP + port: 9090 + targetPort: 9090 + --- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: frontend + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: frontend + labels: + app: frontend + spec: + replicas: 1 + selector: + matchLabels: + app: frontend + template: + metadata: + labels: + app: frontend + annotations: + "consul.hashicorp.com/connect-inject": "true" + spec: + serviceAccountName: frontend + containers: + - name: frontend + image: nicholasjackson/fake-service:v0.22.4 + securityContext: + capabilities: + add: ["NET_ADMIN"] + ports: + - containerPort: 9090 + env: + - name: "LISTEN_ADDR" + value: "0.0.0.0:9090" + - name: "UPSTREAM_URIS" + value: "http://backend-service.virtual.cluster-02.consul" + - name: "NAME" + value: "frontend" + - name: "MESSAGE" + value: "Hello World" + - name: "HTTP_CLIENT_KEEP_ALIVES" + value: "false" ``` @@ -214,18 +346,45 @@ To peer Kubernetes clusters running Consul, you need to create a peering token a 1. Apply the service file to the first cluster. ```shell-session - $ kubectl apply --filename frontend-service.yml + $ kubectl --context $CLUSTER1_CONTEXT apply --filename frontend-service.yaml ``` 1. Run the following command in `frontend-service` and check the output to confirm that you peered your clusters successfully. ```shell-session - $ kubectl exec -it $(kubectl get pod -l app=frontend -o name) -- curl localhost:1234 + $ kubectl --context $CLUSTER1_CONTEXT exec -it $(kubectl --context $CLUSTER1_CONTEXT get pod -l app=frontend -o name) -- curl localhost:9090 { - "name": "backend-service", - ##… - "body": "Response from backend", - "code": 200 + "name": "frontend", + "uri": "/", + "type": "HTTP", + "ip_addresses": [ + "10.16.2.11" + ], + "start_time": "2022-08-26T23:40:01.167199", + "end_time": "2022-08-26T23:40:01.226951", + "duration": "59.752279ms", + "body": "Hello World", + "upstream_calls": { + "http://backend-service.virtual.cluster-02.consul": { + "name": "backend", + "uri": "http://backend-service.virtual.cluster-02.consul", + "type": "HTTP", + "ip_addresses": [ + "10.32.2.10" + ], + "start_time": "2022-08-26T23:40:01.223503", + "end_time": "2022-08-26T23:40:01.224653", + "duration": "1.149666ms", + "headers": { + "Content-Length": "266", + "Content-Type": "text/plain; charset=utf-8", + "Date": "Fri, 26 Aug 2022 23:40:01 GMT" + }, + "body": "Response from backend", + "code": 200 + } + }, + "code": 200 } ``` From 74ddf040dd1776aabcf5b4e81ced2dea89a39b29 Mon Sep 17 00:00:00 2001 From: "Chris S. Kim" Date: Mon, 29 Aug 2022 16:32:26 -0400 Subject: [PATCH 52/93] Add heartbeat timeout grace period when accounting for peering health --- agent/consul/leader_peering.go | 4 +- agent/consul/leader_peering_test.go | 2 +- agent/consul/server.go | 8 +- .../services/peerstream/server.go | 7 +- .../services/peerstream/stream_test.go | 17 ++-- .../services/peerstream/stream_tracker.go | 81 +++++++++-------- .../peerstream/stream_tracker_test.go | 87 +++++++++++++------ 7 files changed, 122 insertions(+), 84 deletions(-) diff --git a/agent/consul/leader_peering.go b/agent/consul/leader_peering.go index 556f1b5bfc..d80038397b 100644 --- a/agent/consul/leader_peering.go +++ b/agent/consul/leader_peering.go @@ -112,7 +112,7 @@ func (s *Server) emitPeeringMetricsOnce(logger hclog.Logger, metricsImpl *metric if status.NeverConnected { metricsImpl.SetGaugeWithLabels(leaderHealthyPeeringKey, float32(math.NaN()), labels) } else { - healthy := status.IsHealthy() + healthy := s.peerStreamServer.Tracker.IsHealthy(status) healthyInt := 0 if healthy { healthyInt = 1 @@ -305,7 +305,7 @@ func (s *Server) establishStream(ctx context.Context, logger hclog.Logger, ws me logger.Trace("establishing stream to peer") - streamStatus, err := s.peerStreamTracker.Register(peer.ID) + streamStatus, err := s.peerStreamServer.Tracker.Register(peer.ID) if err != nil { return fmt.Errorf("failed to register stream: %v", err) } diff --git a/agent/consul/leader_peering_test.go b/agent/consul/leader_peering_test.go index b8b5166d8f..61f060802a 100644 --- a/agent/consul/leader_peering_test.go +++ b/agent/consul/leader_peering_test.go @@ -1216,7 +1216,7 @@ func TestLeader_Peering_NoEstablishmentWhenPeeringDisabled(t *testing.T) { })) require.Never(t, func() bool { - _, found := s1.peerStreamTracker.StreamStatus(peerID) + _, found := s1.peerStreamServer.StreamStatus(peerID) return found }, 7*time.Second, 1*time.Second, "peering should not have been established") } diff --git a/agent/consul/server.go b/agent/consul/server.go index f92a03ccd2..94048d06f2 100644 --- a/agent/consul/server.go +++ b/agent/consul/server.go @@ -370,9 +370,9 @@ type Server struct { // peerStreamServer is a server used to handle peering streams from external clusters. peerStreamServer *peerstream.Server + // peeringServer handles peering RPC requests internal to this cluster, like generating peering tokens. - peeringServer *peering.Server - peerStreamTracker *peerstream.Tracker + peeringServer *peering.Server // embedded struct to hold all the enterprise specific data EnterpriseServer @@ -724,11 +724,9 @@ func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server) (*Ser Logger: logger.Named("grpc-api.server-discovery"), }).Register(s.externalGRPCServer) - s.peerStreamTracker = peerstream.NewTracker() s.peeringBackend = NewPeeringBackend(s) s.peerStreamServer = peerstream.NewServer(peerstream.Config{ Backend: s.peeringBackend, - Tracker: s.peerStreamTracker, GetStore: func() peerstream.StateStore { return s.FSM().State() }, Logger: logger.Named("grpc-api.peerstream"), ACLResolver: s.ACLResolver, @@ -790,7 +788,7 @@ func newGRPCHandlerFromConfig(deps Deps, config *Config, s *Server) connHandler p := peering.NewServer(peering.Config{ Backend: s.peeringBackend, - Tracker: s.peerStreamTracker, + Tracker: s.peerStreamServer.Tracker, Logger: deps.Logger.Named("grpc-api.peering"), ForwardRPC: func(info structs.RPCInfo, fn func(*grpc.ClientConn) error) (bool, error) { // Only forward the request if the dc in the request matches the server's datacenter. diff --git a/agent/grpc-external/services/peerstream/server.go b/agent/grpc-external/services/peerstream/server.go index 7254c60c7c..17388f4a25 100644 --- a/agent/grpc-external/services/peerstream/server.go +++ b/agent/grpc-external/services/peerstream/server.go @@ -26,11 +26,12 @@ const ( type Server struct { Config + + Tracker *Tracker } type Config struct { Backend Backend - Tracker *Tracker GetStore func() StateStore Logger hclog.Logger ForwardRPC func(structs.RPCInfo, func(*grpc.ClientConn) error) (bool, error) @@ -53,7 +54,6 @@ type ACLResolver interface { func NewServer(cfg Config) *Server { requireNotNil(cfg.Backend, "Backend") - requireNotNil(cfg.Tracker, "Tracker") requireNotNil(cfg.GetStore, "GetStore") requireNotNil(cfg.Logger, "Logger") // requireNotNil(cfg.ACLResolver, "ACLResolver") // TODO(peering): reenable check when ACLs are required @@ -67,7 +67,8 @@ func NewServer(cfg Config) *Server { cfg.incomingHeartbeatTimeout = defaultIncomingHeartbeatTimeout } return &Server{ - Config: cfg, + Config: cfg, + Tracker: NewTracker(cfg.incomingHeartbeatTimeout), } } diff --git a/agent/grpc-external/services/peerstream/stream_test.go b/agent/grpc-external/services/peerstream/stream_test.go index bee02592a3..9116d7a313 100644 --- a/agent/grpc-external/services/peerstream/stream_test.go +++ b/agent/grpc-external/services/peerstream/stream_test.go @@ -499,9 +499,8 @@ func TestStreamResources_Server_Terminate(t *testing.T) { base: time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC), } - srv, store := newTestServer(t, func(c *Config) { - c.Tracker.SetClock(it.Now) - }) + srv, store := newTestServer(t, nil) + srv.Tracker.setClock(it.Now) p := writePeeringToBeDialed(t, store, 1, "my-peer") require.Empty(t, p.PeerID, "should be empty if being dialed") @@ -552,9 +551,8 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) { base: time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC), } - srv, store := newTestServer(t, func(c *Config) { - c.Tracker.SetClock(it.Now) - }) + srv, store := newTestServer(t, nil) + srv.Tracker.setClock(it.Now) // Set the initial roots and CA configuration. _, rootA := writeInitialRootsAndCA(t, store) @@ -1128,9 +1126,9 @@ func TestStreamResources_Server_DisconnectsOnHeartbeatTimeout(t *testing.T) { } srv, store := newTestServer(t, func(c *Config) { - c.Tracker.SetClock(it.Now) c.incomingHeartbeatTimeout = 5 * time.Millisecond }) + srv.Tracker.setClock(it.Now) p := writePeeringToBeDialed(t, store, 1, "my-peer") require.Empty(t, p.PeerID, "should be empty if being dialed") @@ -1176,9 +1174,9 @@ func TestStreamResources_Server_SendsHeartbeats(t *testing.T) { outgoingHeartbeatInterval := 5 * time.Millisecond srv, store := newTestServer(t, func(c *Config) { - c.Tracker.SetClock(it.Now) c.outgoingHeartbeatInterval = outgoingHeartbeatInterval }) + srv.Tracker.setClock(it.Now) p := writePeeringToBeDialed(t, store, 1, "my-peer") require.Empty(t, p.PeerID, "should be empty if being dialed") @@ -1235,9 +1233,9 @@ func TestStreamResources_Server_KeepsConnectionOpenWithHeartbeat(t *testing.T) { incomingHeartbeatTimeout := 10 * time.Millisecond srv, store := newTestServer(t, func(c *Config) { - c.Tracker.SetClock(it.Now) c.incomingHeartbeatTimeout = incomingHeartbeatTimeout }) + srv.Tracker.setClock(it.Now) p := writePeeringToBeDialed(t, store, 1, "my-peer") require.Empty(t, p.PeerID, "should be empty if being dialed") @@ -2746,7 +2744,6 @@ func newTestServer(t *testing.T, configFn func(c *Config)) (*testServer, *state. store: store, pub: publisher, }, - Tracker: NewTracker(), GetStore: func() StateStore { return store }, Logger: testutil.Logger(t), Datacenter: "dc1", diff --git a/agent/grpc-external/services/peerstream/stream_tracker.go b/agent/grpc-external/services/peerstream/stream_tracker.go index d0dcd39770..c3108e71e7 100644 --- a/agent/grpc-external/services/peerstream/stream_tracker.go +++ b/agent/grpc-external/services/peerstream/stream_tracker.go @@ -14,18 +14,27 @@ type Tracker struct { mu sync.RWMutex streams map[string]*MutableStatus + // heartbeatTimeout is the max duration a connection is allowed to be + // disconnected before the stream health is reported as non-healthy + heartbeatTimeout time.Duration + // timeNow is a shim for testing. timeNow func() time.Time } -func NewTracker() *Tracker { +func NewTracker(heartbeatTimeout time.Duration) *Tracker { + if heartbeatTimeout == 0 { + heartbeatTimeout = defaultIncomingHeartbeatTimeout + } return &Tracker{ - streams: make(map[string]*MutableStatus), - timeNow: time.Now, + streams: make(map[string]*MutableStatus), + timeNow: time.Now, + heartbeatTimeout: heartbeatTimeout, } } -func (t *Tracker) SetClock(clock func() time.Time) { +// setClock is used for debugging purposes only. +func (t *Tracker) setClock(clock func() time.Time) { if clock == nil { t.timeNow = time.Now } else { @@ -128,6 +137,39 @@ func (t *Tracker) DeleteStatus(id string) { delete(t.streams, id) } +// IsHealthy is a calculates the health of a peering status. +// We define a peering as unhealthy if its status has been in the following +// states for longer than the configured incomingHeartbeatTimeout. +// - If it is disconnected +// - If the last received Nack is newer than last received Ack +// - If the last received error is newer than last received success +// +// If none of these conditions apply, we call the peering healthy. +func (t *Tracker) IsHealthy(s Status) bool { + // If stream is in a disconnected state for longer than the configured + // heartbeat timeout, report as unhealthy. + if !s.DisconnectTime.IsZero() && + t.timeNow().Sub(s.DisconnectTime) > t.heartbeatTimeout { + return false + } + + // If last Nack is after last Ack, it means the peer is unable to + // handle our replication message. + if s.LastNack.After(s.LastAck) && + t.timeNow().Sub(s.LastAck) > t.heartbeatTimeout { + return false + } + + // If last recv error is newer than last recv success, we were unable + // to handle the peer's replication message. + if s.LastRecvError.After(s.LastRecvResourceSuccess) && + t.timeNow().Sub(s.LastRecvError) > t.heartbeatTimeout { + return false + } + + return true +} + type MutableStatus struct { mu sync.RWMutex @@ -201,37 +243,6 @@ func (s *Status) GetExportedServicesCount() uint64 { return uint64(len(s.ExportedServices)) } -// IsHealthy is a convenience func that returns true/ false for a peering status. -// We define a peering as unhealthy if its status satisfies one of the following: -// - If it is disconnected -// - If the last received Nack is newer than last received Ack -// - If the last received error is newer than last received success -// If none of these conditions apply, we call the peering healthy. -func (s *Status) IsHealthy() bool { - if !s.Connected { - return false - } - - // If stream is in a disconnected state, report unhealthy. - // This should be logically equivalent to s.Connected above. - if !s.DisconnectTime.IsZero() { - return false - } - - // If last Nack is after last Ack, it means the peer is unable to - // handle our replication messages. - if s.LastNack.After(s.LastAck) { - return false - } - - // If last recv error is newer than last recv success - report unhealthy - if s.LastRecvError.After(s.LastRecvResourceSuccess) { - return false - } - - return true -} - func newMutableStatus(now func() time.Time, connected bool) *MutableStatus { return &MutableStatus{ Status: Status{ diff --git a/agent/grpc-external/services/peerstream/stream_tracker_test.go b/agent/grpc-external/services/peerstream/stream_tracker_test.go index 555e76258b..bb018b4b46 100644 --- a/agent/grpc-external/services/peerstream/stream_tracker_test.go +++ b/agent/grpc-external/services/peerstream/stream_tracker_test.go @@ -5,6 +5,7 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/hashicorp/consul/sdk/testutil" @@ -14,30 +15,35 @@ const ( aPeerID = "63b60245-c475-426b-b314-4588d210859d" ) -func TestStatus_IsHealthy(t *testing.T) { +func TestTracker_IsHealthy(t *testing.T) { type testcase struct { name string - dontConnect bool + tracker *Tracker modifierFunc func(status *MutableStatus) expectedVal bool } tcs := []testcase{ { - name: "never connected, unhealthy", - expectedVal: false, - dontConnect: true, - }, - { - name: "disconnect time not zero", - expectedVal: false, + name: "disconnect time within timeout", + tracker: NewTracker(defaultIncomingHeartbeatTimeout), + expectedVal: true, modifierFunc: func(status *MutableStatus) { status.DisconnectTime = time.Now() }, }, { - name: "receive error before receive success", + name: "disconnect time past timeout", + tracker: NewTracker(1 * time.Millisecond), expectedVal: false, + modifierFunc: func(status *MutableStatus) { + status.DisconnectTime = time.Now().Add(-1 * time.Minute) + }, + }, + { + name: "receive error before receive success within timeout", + tracker: NewTracker(defaultIncomingHeartbeatTimeout), + expectedVal: true, modifierFunc: func(status *MutableStatus) { now := time.Now() status.LastRecvResourceSuccess = now @@ -45,46 +51,71 @@ func TestStatus_IsHealthy(t *testing.T) { }, }, { - name: "nack before ack", + name: "receive error before receive success within timeout", + tracker: NewTracker(defaultIncomingHeartbeatTimeout), + expectedVal: true, + modifierFunc: func(status *MutableStatus) { + now := time.Now() + status.LastRecvResourceSuccess = now + status.LastRecvError = now.Add(1 * time.Second) + }, + }, + { + name: "receive error before receive success past timeout", + tracker: NewTracker(1 * time.Millisecond), expectedVal: false, + modifierFunc: func(status *MutableStatus) { + now := time.Now().Add(-2 * time.Second) + status.LastRecvResourceSuccess = now + status.LastRecvError = now.Add(1 * time.Second) + }, + }, + { + name: "nack before ack within timeout", + tracker: NewTracker(defaultIncomingHeartbeatTimeout), + expectedVal: true, modifierFunc: func(status *MutableStatus) { now := time.Now() status.LastAck = now status.LastNack = now.Add(1 * time.Second) }, }, + { + name: "nack before ack past timeout", + tracker: NewTracker(1 * time.Millisecond), + expectedVal: false, + modifierFunc: func(status *MutableStatus) { + now := time.Now().Add(-2 * time.Second) + status.LastAck = now + status.LastNack = now.Add(1 * time.Second) + }, + }, { name: "healthy", + tracker: NewTracker(defaultIncomingHeartbeatTimeout), expectedVal: true, }, } for _, tc := range tcs { t.Run(tc.name, func(t *testing.T) { - tracker := NewTracker() + tracker := tc.tracker - if !tc.dontConnect { - st, err := tracker.Connected(aPeerID) - require.NoError(t, err) - require.True(t, st.Connected) + st, err := tracker.Connected(aPeerID) + require.NoError(t, err) + require.True(t, st.Connected) - if tc.modifierFunc != nil { - tc.modifierFunc(st) - } - - require.Equal(t, tc.expectedVal, st.IsHealthy()) - - } else { - st, found := tracker.StreamStatus(aPeerID) - require.False(t, found) - require.Equal(t, tc.expectedVal, st.IsHealthy()) + if tc.modifierFunc != nil { + tc.modifierFunc(st) } + + assert.Equal(t, tc.expectedVal, tracker.IsHealthy(st.GetStatus())) }) } } func TestTracker_EnsureConnectedDisconnected(t *testing.T) { - tracker := NewTracker() + tracker := NewTracker(defaultIncomingHeartbeatTimeout) peerID := "63b60245-c475-426b-b314-4588d210859d" it := incrementalTime{ @@ -181,7 +212,7 @@ func TestTracker_connectedStreams(t *testing.T) { } run := func(t *testing.T, tc testCase) { - tracker := NewTracker() + tracker := NewTracker(defaultIncomingHeartbeatTimeout) if tc.setup != nil { tc.setup(t, tracker) } From b6f2b6c94f4182a06373dcc544191a91681aef4b Mon Sep 17 00:00:00 2001 From: Austin Workman Date: Mon, 11 Jul 2022 16:06:34 -0500 Subject: [PATCH 53/93] Add support for S3 path based addressing --- .changelog/_2271.txt | 3 +++ website/content/commands/snapshot/agent.mdx | 7 ++++++- 2 files changed, 9 insertions(+), 1 deletion(-) create mode 100644 .changelog/_2271.txt diff --git a/.changelog/_2271.txt b/.changelog/_2271.txt new file mode 100644 index 0000000000..58dc78dfaf --- /dev/null +++ b/.changelog/_2271.txt @@ -0,0 +1,3 @@ +```release-note:improvement +snapshot agent: **(Enterprise only)** Add support for path-based addressing when using s3 backend. +``` \ No newline at end of file diff --git a/website/content/commands/snapshot/agent.mdx b/website/content/commands/snapshot/agent.mdx index c565a52e6d..7607fc2b51 100644 --- a/website/content/commands/snapshot/agent.mdx +++ b/website/content/commands/snapshot/agent.mdx @@ -168,7 +168,8 @@ Usage: `consul snapshot agent [options]` "s3_bucket": "", "s3_key_prefix": "consul-snapshot", "s3_server_side_encryption": false, - "s3_static_snapshot_name": "" + "s3_static_snapshot_name": "", + "s3_force_path_style": false }, "azure_blob_storage": { "account_name": "", @@ -275,6 +276,10 @@ Note that despite the AWS references, any S3-compatible endpoint can be specifie - `-aws-s3-static-snapshot-name` - If this is given, all snapshots are saved with the same file name. The agent will not rotate or version snapshots, and will save them with the same name each time. Use this if you want to rely on [S3's versioning capabilities](http://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html) instead of the agent handling it for you. +- `-aws-s3-force-path-style` - Enables the use of legacy path-based addressing instead of virtual addressing. This flag is required by minio + and other 3rd party S3 compatible object storage platforms where DNS or TLS requirements for virtual addressing are prohibitive. +For more information, refer to the AWS documentation on [Methods for accessing a bucket](https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-bucket-intro.html) + - `-aws-s3-enable-kms` - Enables using [Amazon KMS](https://aws.amazon.com/kms/) for encrypting snapshots. - `-aws-s3-kms-key` - Optional Amazon KMS key to use, if this is not set the default KMS master key will be used. Set this if you want to manage key rotation yourself. From 77918d9dea0830aae2261925a1bb727a4446f36c Mon Sep 17 00:00:00 2001 From: Eric Haberkorn Date: Mon, 29 Aug 2022 16:59:27 -0400 Subject: [PATCH 54/93] Fix a breaking change to the API package introduced in #13835 (#14378) `QueryDatacenterOptions` was renamed to `QueryFailoverOptions` without creating an alias. This adds `QueryDatacenterOptions` back as an alias to `QueryFailoverOptions` and marks it is deprecated. --- .changelog/14378.txt | 5 +++++ api/prepared_query.go | 3 +++ 2 files changed, 8 insertions(+) create mode 100644 .changelog/14378.txt diff --git a/.changelog/14378.txt b/.changelog/14378.txt new file mode 100644 index 0000000000..2ab1b8f138 --- /dev/null +++ b/.changelog/14378.txt @@ -0,0 +1,5 @@ +```release-note:bug +api: Fix a breaking change caused by renaming `QueryDatacenterOptions` to +`QueryFailoverOptions`. This adds `QueryDatacenterOptions` back as an alias to +`QueryFailoverOptions` and marks it as deprecated. +``` diff --git a/api/prepared_query.go b/api/prepared_query.go index 60cd437cb7..7e0518f580 100644 --- a/api/prepared_query.go +++ b/api/prepared_query.go @@ -17,6 +17,9 @@ type QueryFailoverOptions struct { Targets []QueryFailoverTarget } +// Deprecated: use QueryFailoverOptions instead. +type QueryDatacenterOptions = QueryFailoverOptions + type QueryFailoverTarget struct { // PeerName specifies a peer to try during failover. PeerName string From 70bb6a2abdbc5ed4a6e728e8da243c5394a631d1 Mon Sep 17 00:00:00 2001 From: Luke Kysow <1034429+lkysow@users.noreply.github.com> Date: Mon, 29 Aug 2022 16:13:49 -0700 Subject: [PATCH 55/93] Run integration tests locally using amd64 (#14365) Locally, always run integration tests using amd64, even if running on an arm mac. This ensures the architecture locally always matches the CI/CD environment. In addition: * Use consul:local for envoy integration and upgrade tests. Previously, consul:local was used for upgrade tests and consul-dev for integration tests. I didn't see a reason to use separate images as it's more confusing. * By default, disable the requirement that aws credentials are set. These are only needed for the lambda tests and make it so you can't run any tests locally, even if you're not running the lambda tests. Now they'll only run if the LAMBDA_TESTS_ENABLED env var is set. * Split out the building of the Docker image for integration tests into its own target from `dev-docker`. This allows us to always use an amd64 image without messing up the `dev-docker` target. * Add support for passing GO_TEST_FLAGs to `test-envoy-integ` target. * Add a wait_for_leader function because tests were failing locally without it. --- .circleci/config.yml | 7 +++-- GNUmakefile | 18 +++++++++-- .../connect/envoy/Dockerfile-consul-envoy | 2 +- .../envoy/case-wanfed-gw/global-setup.sh | 2 +- test/integration/connect/envoy/helpers.bash | 13 ++++++-- test/integration/connect/envoy/run-tests.sh | 30 +++++++++++-------- 6 files changed, 49 insertions(+), 23 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 105666c661..053d50d256 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -816,7 +816,7 @@ jobs: # Get go binary from workspace - attach_workspace: at: . - # Build the consul-dev image from the already built binary + # Build the consul:local image from the already built binary - run: command: | sudo rm -rf /usr/local/go @@ -887,8 +887,8 @@ jobs: - attach_workspace: at: . - run: *install-gotestsum - # Build the consul-dev image from the already built binary - - run: docker build -t consul-dev -f ./build-support/docker/Consul-Dev.dockerfile . + # Build the consul:local image from the already built binary + - run: docker build -t consul:local -f ./build-support/docker/Consul-Dev.dockerfile . - run: name: Envoy Integration Tests command: | @@ -902,6 +902,7 @@ jobs: GOTESTSUM_JUNITFILE: /tmp/test-results/results.xml GOTESTSUM_FORMAT: standard-verbose COMPOSE_INTERACTIVE_NO_CLI: 1 + LAMBDA_TESTS_ENABLED: "true" # tput complains if this isn't set to something. TERM: ansi - store_artifacts: diff --git a/GNUmakefile b/GNUmakefile index 77d6a7ec21..f9dd160811 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -130,7 +130,7 @@ export GOLDFLAGS # Allow skipping docker build during integration tests in CI since we already # have a built binary -ENVOY_INTEG_DEPS?=dev-docker +ENVOY_INTEG_DEPS?=docker-envoy-integ ifdef SKIP_DOCKER_BUILD ENVOY_INTEG_DEPS=noop endif @@ -346,8 +346,22 @@ consul-docker: go-build-image ui-docker: ui-build-image @$(SHELL) $(CURDIR)/build-support/scripts/build-docker.sh ui +# Build image used to run integration tests locally. +docker-envoy-integ: + $(MAKE) GOARCH=amd64 linux + docker build \ + --platform linux/amd64 $(NOCACHE) $(QUIET) \ + -t 'consul:local' \ + --build-arg CONSUL_IMAGE_VERSION=$(CONSUL_IMAGE_VERSION) \ + $(CURDIR)/pkg/bin/linux_amd64 \ + -f $(CURDIR)/build-support/docker/Consul-Dev.dockerfile + +# Run integration tests. +# Use GO_TEST_FLAGS to run specific tests: +# make test-envoy-integ GO_TEST_FLAGS="-run TestEnvoy/case-basic" +# NOTE: Always uses amd64 images, even when running on M1 macs, to match CI/CD environment. test-envoy-integ: $(ENVOY_INTEG_DEPS) - @go test -v -timeout=30m -tags integration ./test/integration/connect/envoy + @go test -v -timeout=30m -tags integration $(GO_TEST_FLAGS) ./test/integration/connect/envoy .PHONY: test-compat-integ test-compat-integ: dev-docker diff --git a/test/integration/connect/envoy/Dockerfile-consul-envoy b/test/integration/connect/envoy/Dockerfile-consul-envoy index b6d5b3e8e2..41941a3367 100644 --- a/test/integration/connect/envoy/Dockerfile-consul-envoy +++ b/test/integration/connect/envoy/Dockerfile-consul-envoy @@ -1,7 +1,7 @@ # Note this arg has to be before the first FROM ARG ENVOY_VERSION -FROM consul-dev as consul +FROM consul:local as consul FROM docker.mirror.hashicorp.services/envoyproxy/envoy:v${ENVOY_VERSION} COPY --from=consul /bin/consul /bin/consul diff --git a/test/integration/connect/envoy/case-wanfed-gw/global-setup.sh b/test/integration/connect/envoy/case-wanfed-gw/global-setup.sh index fee985addd..3e61204459 100755 --- a/test/integration/connect/envoy/case-wanfed-gw/global-setup.sh +++ b/test/integration/connect/envoy/case-wanfed-gw/global-setup.sh @@ -17,7 +17,7 @@ consul tls cert create -dc=secondary -server -node=sec " docker rm -f "$container" &>/dev/null || true -docker run -i --net=none --name="$container" consul-dev:latest sh -c "${scriptlet}" +docker run -i --net=none --name="$container" consul:local sh -c "${scriptlet}" # primary for f in \ diff --git a/test/integration/connect/envoy/helpers.bash b/test/integration/connect/envoy/helpers.bash index 2fd9be7e38..d7fe0ae024 100755 --- a/test/integration/connect/envoy/helpers.bash +++ b/test/integration/connect/envoy/helpers.bash @@ -562,14 +562,14 @@ function assert_intention_denied { function docker_consul { local DC=$1 shift 1 - docker run -i --rm --network container:envoy_consul-${DC}_1 consul-dev "$@" + docker run -i --rm --network container:envoy_consul-${DC}_1 consul:local "$@" } function docker_consul_for_proxy_bootstrap { local DC=$1 shift 1 - docker run -i --rm --network container:envoy_consul-${DC}_1 consul-dev "$@" + docker run -i --rm --network container:envoy_consul-${DC}_1 consul:local "$@" 2> /dev/null } function docker_wget { @@ -581,7 +581,7 @@ function docker_wget { function docker_curl { local DC=$1 shift 1 - docker run --rm --network container:envoy_consul-${DC}_1 --entrypoint curl consul-dev "$@" + docker run --rm --network container:envoy_consul-${DC}_1 --entrypoint curl consul:local "$@" } function docker_exec { @@ -806,9 +806,16 @@ function delete_config_entry { function register_services { local DC=${1:-primary} + wait_for_leader "$DC" docker_consul_exec ${DC} sh -c "consul services register /workdir/${DC}/register/service_*.hcl" } +# wait_for_leader waits until a leader is elected. +# Its first argument must be the datacenter name. +function wait_for_leader { + retry_default docker_consul_exec "$1" sh -c '[[ $(curl --fail -sS http://127.0.0.1:8500/v1/status/leader) ]]' +} + function setup_upsert_l4_intention { local SOURCE=$1 local DESTINATION=$2 diff --git a/test/integration/connect/envoy/run-tests.sh b/test/integration/connect/envoy/run-tests.sh index c8c392c34f..f0e6b165cb 100755 --- a/test/integration/connect/envoy/run-tests.sh +++ b/test/integration/connect/envoy/run-tests.sh @@ -16,6 +16,8 @@ ENVOY_VERSION=${ENVOY_VERSION:-"1.23.0"} export ENVOY_VERSION export DOCKER_BUILDKIT=1 +# Always run tests on amd64 because that's what the CI environment uses. +export DOCKER_DEFAULT_PLATFORM="linux/amd64" if [ ! -z "$DEBUG" ] ; then set -x @@ -44,17 +46,19 @@ function network_snippet { } function aws_snippet { - local snippet="" + if [[ ! -z "$LAMBDA_TESTS_ENABLED" ]]; then + local snippet="" - # The Lambda integration cases assume that a Lambda function exists in $AWS_REGION with an ARN of $AWS_LAMBDA_ARN. - # The AWS credentials must have permission to invoke the Lambda function. - [ -n "$(set | grep '^AWS_ACCESS_KEY_ID=')" ] && snippet="${snippet} -e AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID" - [ -n "$(set | grep '^AWS_SECRET_ACCESS_KEY=')" ] && snippet="${snippet} -e AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY" - [ -n "$(set | grep '^AWS_SESSION_TOKEN=')" ] && snippet="${snippet} -e AWS_SESSION_TOKEN=$AWS_SESSION_TOKEN" - [ -n "$(set | grep '^AWS_LAMBDA_REGION=')" ] && snippet="${snippet} -e AWS_LAMBDA_REGION=$AWS_LAMBDA_REGION" - [ -n "$(set | grep '^AWS_LAMBDA_ARN=')" ] && snippet="${snippet} -e AWS_LAMBDA_ARN=$AWS_LAMBDA_ARN" + # The Lambda integration cases assume that a Lambda function exists in $AWS_REGION with an ARN of $AWS_LAMBDA_ARN. + # The AWS credentials must have permission to invoke the Lambda function. + [ -n "$(set | grep '^AWS_ACCESS_KEY_ID=')" ] && snippet="${snippet} -e AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID" + [ -n "$(set | grep '^AWS_SECRET_ACCESS_KEY=')" ] && snippet="${snippet} -e AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY" + [ -n "$(set | grep '^AWS_SESSION_TOKEN=')" ] && snippet="${snippet} -e AWS_SESSION_TOKEN=$AWS_SESSION_TOKEN" + [ -n "$(set | grep '^AWS_LAMBDA_REGION=')" ] && snippet="${snippet} -e AWS_LAMBDA_REGION=$AWS_LAMBDA_REGION" + [ -n "$(set | grep '^AWS_LAMBDA_ARN=')" ] && snippet="${snippet} -e AWS_LAMBDA_ARN=$AWS_LAMBDA_ARN" - echo "$snippet" + echo "$snippet" + fi } function init_workdir { @@ -222,7 +226,7 @@ function start_consul { --hostname "consul-${DC}-server" \ --network-alias "consul-${DC}-server" \ -e "CONSUL_LICENSE=$license" \ - consul-dev \ + consul:local \ agent -dev -datacenter "${DC}" \ -config-dir "/workdir/${DC}/consul" \ -config-dir "/workdir/${DC}/consul-server" \ @@ -237,7 +241,7 @@ function start_consul { --network-alias "consul-${DC}-client" \ -e "CONSUL_LICENSE=$license" \ ${ports[@]} \ - consul-dev \ + consul:local \ agent -datacenter "${DC}" \ -config-dir "/workdir/${DC}/consul" \ -data-dir "/tmp/consul" \ @@ -256,7 +260,7 @@ function start_consul { --network-alias "consul-${DC}-server" \ -e "CONSUL_LICENSE=$license" \ ${ports[@]} \ - consul-dev \ + consul:local \ agent -dev -datacenter "${DC}" \ -config-dir "/workdir/${DC}/consul" \ -config-dir "/workdir/${DC}/consul-server" \ @@ -290,7 +294,7 @@ function start_partitioned_client { --hostname "consul-${PARTITION}-client" \ --network-alias "consul-${PARTITION}-client" \ -e "CONSUL_LICENSE=$license" \ - consul-dev agent \ + consul:local agent \ -datacenter "primary" \ -retry-join "consul-primary-server" \ -grpc-port 8502 \ From ed7b34128f258854085a14fd56a4262bea305ba5 Mon Sep 17 00:00:00 2001 From: Jorge Marey Date: Tue, 2 Aug 2022 08:52:48 +0200 Subject: [PATCH 56/93] Add new tracing configuration --- agent/xds/config.go | 6 ++++ agent/xds/listeners.go | 65 +++++++++++++++++++++++++++++++++++++++++- 2 files changed, 70 insertions(+), 1 deletion(-) diff --git a/agent/xds/config.go b/agent/xds/config.go index 2fdf9d115e..89e92106d9 100644 --- a/agent/xds/config.go +++ b/agent/xds/config.go @@ -27,6 +27,12 @@ type ProxyConfig struct { // Note: This escape hatch is compatible with the discovery chain. PublicListenerJSON string `mapstructure:"envoy_public_listener_json"` + // LstenerTracingJSON is a complete override ("escape hatch") for the + // listeners tracing configuration. + // + // Note: This escape hatch is compatible with the discovery chain. + LstenerTracingJSON string `mapstructure:"envoy_listener_tracing_json"` + // LocalClusterJSON is a complete override ("escape hatch") for the // local application cluster. // diff --git a/agent/xds/listeners.go b/agent/xds/listeners.go index 33c339c4d8..b3c9577e13 100644 --- a/agent/xds/listeners.go +++ b/agent/xds/listeners.go @@ -3,7 +3,6 @@ package xds import ( "errors" "fmt" - envoy_extensions_filters_listener_http_inspector_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/listener/http_inspector/v3" "net" "net/url" "regexp" @@ -12,6 +11,8 @@ import ( "strings" "time" + envoy_extensions_filters_listener_http_inspector_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/listener/http_inspector/v3" + envoy_core_v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" envoy_listener_v3 "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" envoy_route_v3 "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" @@ -107,6 +108,19 @@ func (s *ResourceGenerator) listenersFromSnapshotConnectProxy(cfgSnap *proxycfg. } } + proxyCfg, err := ParseProxyConfig(cfgSnap.Proxy.Config) + if err != nil { + // Don't hard fail on a config typo, just warn. The parse func returns + // default config if there is an error so it's safe to continue. + s.Logger.Warn("failed to parse Connect.Proxy.Config", "error", err) + } + var tracing *envoy_http_v3.HttpConnectionManager_Tracing + if proxyCfg.LstenerTracingJSON != "" { + if tracing, err = makeTracingFromUserConfig(proxyCfg.LstenerTracingJSON); err != nil { + s.Logger.Warn("failed to parse LstenerTracingJSON config", "error", err) + } + } + for uid, chain := range cfgSnap.ConnectProxy.DiscoveryChain { upstreamCfg := cfgSnap.ConnectProxy.UpstreamConfig[uid] @@ -153,6 +167,7 @@ func (s *ResourceGenerator) listenersFromSnapshotConnectProxy(cfgSnap *proxycfg. filterName: filterName, protocol: cfg.Protocol, useRDS: useRDS, + tracing: tracing, }) if err != nil { return nil, err @@ -178,6 +193,7 @@ func (s *ResourceGenerator) listenersFromSnapshotConnectProxy(cfgSnap *proxycfg. filterName: filterName, protocol: cfg.Protocol, useRDS: useRDS, + tracing: tracing, }) if err != nil { return nil, err @@ -249,6 +265,7 @@ func (s *ResourceGenerator) listenersFromSnapshotConnectProxy(cfgSnap *proxycfg. filterName: routeName, protocol: svcConfig.Protocol, useRDS: true, + tracing: tracing, }) if err != nil { return err @@ -265,6 +282,7 @@ func (s *ResourceGenerator) listenersFromSnapshotConnectProxy(cfgSnap *proxycfg. clusterName: clusterName, filterName: clusterName, protocol: svcConfig.Protocol, + tracing: tracing, }) if err != nil { return err @@ -376,6 +394,7 @@ func (s *ResourceGenerator) listenersFromSnapshotConnectProxy(cfgSnap *proxycfg. protocol: cfg.Protocol, useRDS: false, statPrefix: "upstream_peered.", + tracing: tracing, }) if err != nil { return nil, err @@ -533,6 +552,7 @@ func (s *ResourceGenerator) listenersFromSnapshotConnectProxy(cfgSnap *proxycfg. filterName: uid.EnvoyID(), routeName: uid.EnvoyID(), protocol: cfg.Protocol, + tracing: tracing, }) if err != nil { return nil, err @@ -1188,12 +1208,20 @@ func (s *ResourceGenerator) makeInboundListener(cfgSnap *proxycfg.ConfigSnapshot l = makePortListener(name, addr, port, envoy_core_v3.TrafficDirection_INBOUND) + var tracing *envoy_http_v3.HttpConnectionManager_Tracing + if cfg.LstenerTracingJSON != "" { + if tracing, err = makeTracingFromUserConfig(cfg.LstenerTracingJSON); err != nil { + s.Logger.Warn("failed to parse LstenerTracingJSON config", "error", err) + } + } + filterOpts := listenerFilterOpts{ protocol: cfg.Protocol, filterName: name, routeName: name, cluster: LocalAppClusterName, requestTimeoutMs: cfg.LocalRequestTimeoutMs, + tracing: tracing, } if useHTTPFilter { filterOpts.httpAuthzFilter, err = makeRBACHTTPFilter( @@ -1310,6 +1338,7 @@ func (s *ResourceGenerator) makeExposedCheckListener(cfgSnap *proxycfg.ConfigSna statPrefix: "", routePath: path.Path, httpAuthzFilter: nil, + // in the exposed check listener de don't set the tracing configuration } f, err := makeListenerFilter(opts) if err != nil { @@ -1542,6 +1571,19 @@ func (s *ResourceGenerator) makeFilterChainTerminatingGateway(cfgSnap *proxycfg. filterChain.Filters = append(filterChain.Filters, authFilter) } + proxyCfg, err := ParseProxyConfig(cfgSnap.Proxy.Config) + if err != nil { + // Don't hard fail on a config typo, just warn. The parse func returns + // default config if there is an error so it's safe to continue. + s.Logger.Warn("failed to parse Connect.Proxy.Config", "error", err) + } + var tracing *envoy_http_v3.HttpConnectionManager_Tracing + if proxyCfg.LstenerTracingJSON != "" { + if tracing, err = makeTracingFromUserConfig(proxyCfg.LstenerTracingJSON); err != nil { + s.Logger.Warn("failed to parse LstenerTracingJSON config", "error", err) + } + } + // Lastly we setup the actual proxying component. For L4 this is a straight // tcp proxy. For L7 this is a very hands-off HTTP proxy just to inject an // HTTP filter to do intention checks here instead. @@ -1552,6 +1594,7 @@ func (s *ResourceGenerator) makeFilterChainTerminatingGateway(cfgSnap *proxycfg. cluster: tgtwyOpts.cluster, statPrefix: "upstream.", routePath: "", + tracing: tracing, } if useHTTPFilter { @@ -1798,6 +1841,7 @@ type filterChainOpts struct { statPrefix string forwardClientDetails bool forwardClientPolicy envoy_http_v3.HttpConnectionManager_ForwardClientCertDetails + tracing *envoy_http_v3.HttpConnectionManager_Tracing } func (s *ResourceGenerator) makeUpstreamFilterChain(opts filterChainOpts) (*envoy_listener_v3.FilterChain, error) { @@ -1813,6 +1857,7 @@ func (s *ResourceGenerator) makeUpstreamFilterChain(opts filterChainOpts) (*envo statPrefix: opts.statPrefix, forwardClientDetails: opts.forwardClientDetails, forwardClientPolicy: opts.forwardClientPolicy, + tracing: opts.tracing, }) if err != nil { return nil, err @@ -1955,6 +2000,7 @@ type listenerFilterOpts struct { httpAuthzFilter *envoy_http_v3.HttpFilter forwardClientDetails bool forwardClientPolicy envoy_http_v3.HttpConnectionManager_ForwardClientCertDetails + tracing *envoy_http_v3.HttpConnectionManager_Tracing } func makeListenerFilter(opts listenerFilterOpts) (*envoy_listener_v3.Filter, error) { @@ -2014,6 +2060,19 @@ func makeStatPrefix(prefix, filterName string) string { return fmt.Sprintf("%s%s", prefix, strings.Replace(filterName, ":", "_", -1)) } +func makeTracingFromUserConfig(configJSON string) (*envoy_http_v3.HttpConnectionManager_Tracing, error) { + // Type field is present so decode it as a any.Any + var any any.Any + if err := jsonpb.UnmarshalString(configJSON, &any); err != nil { + return nil, err + } + var t envoy_http_v3.HttpConnectionManager_Tracing + if err := proto.Unmarshal(any.Value, &t); err != nil { + return nil, err + } + return &t, nil +} + func makeHTTPFilter(opts listenerFilterOpts) (*envoy_listener_v3.Filter, error) { router, err := makeEnvoyHTTPFilter("envoy.filters.http.router", &envoy_http_router_v3.Router{}) if err != nil { @@ -2034,6 +2093,10 @@ func makeHTTPFilter(opts listenerFilterOpts) (*envoy_listener_v3.Filter, error) }, } + if opts.tracing != nil { + cfg.Tracing = opts.tracing + } + if opts.useRDS { if opts.cluster != "" { return nil, fmt.Errorf("cannot specify cluster name when using RDS") From 7bf1eb9369e4c329376b4ada780193a2ffaebfd5 Mon Sep 17 00:00:00 2001 From: Jorge Marey Date: Tue, 2 Aug 2022 09:58:37 +0200 Subject: [PATCH 57/93] add changelog file --- .changelog/13998.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/13998.txt diff --git a/.changelog/13998.txt b/.changelog/13998.txt new file mode 100644 index 0000000000..cced542dba --- /dev/null +++ b/.changelog/13998.txt @@ -0,0 +1,3 @@ +```release-note:improvement +connect: change deprecated tracing configuration on envoy +``` From 3f3bb8831eccf7fa27f7c067113f2b91f16c184a Mon Sep 17 00:00:00 2001 From: Jorge Marey Date: Tue, 30 Aug 2022 08:36:06 +0200 Subject: [PATCH 58/93] Fix typos. Add test. Add documentation --- agent/xds/config.go | 4 +- agent/xds/listeners.go | 20 +- agent/xds/listeners_test.go | 43 +++++ .../custom-trace-listener.latest.golden | 180 ++++++++++++++++++ .../content/docs/connect/proxies/envoy.mdx | 39 ++++ 5 files changed, 274 insertions(+), 12 deletions(-) create mode 100644 agent/xds/testdata/listeners/custom-trace-listener.latest.golden diff --git a/agent/xds/config.go b/agent/xds/config.go index 89e92106d9..cfbd23e070 100644 --- a/agent/xds/config.go +++ b/agent/xds/config.go @@ -27,11 +27,11 @@ type ProxyConfig struct { // Note: This escape hatch is compatible with the discovery chain. PublicListenerJSON string `mapstructure:"envoy_public_listener_json"` - // LstenerTracingJSON is a complete override ("escape hatch") for the + // ListenerTracingJSON is a complete override ("escape hatch") for the // listeners tracing configuration. // // Note: This escape hatch is compatible with the discovery chain. - LstenerTracingJSON string `mapstructure:"envoy_listener_tracing_json"` + ListenerTracingJSON string `mapstructure:"envoy_listener_tracing_json"` // LocalClusterJSON is a complete override ("escape hatch") for the // local application cluster. diff --git a/agent/xds/listeners.go b/agent/xds/listeners.go index b3c9577e13..488cc6eb85 100644 --- a/agent/xds/listeners.go +++ b/agent/xds/listeners.go @@ -115,9 +115,9 @@ func (s *ResourceGenerator) listenersFromSnapshotConnectProxy(cfgSnap *proxycfg. s.Logger.Warn("failed to parse Connect.Proxy.Config", "error", err) } var tracing *envoy_http_v3.HttpConnectionManager_Tracing - if proxyCfg.LstenerTracingJSON != "" { - if tracing, err = makeTracingFromUserConfig(proxyCfg.LstenerTracingJSON); err != nil { - s.Logger.Warn("failed to parse LstenerTracingJSON config", "error", err) + if proxyCfg.ListenerTracingJSON != "" { + if tracing, err = makeTracingFromUserConfig(proxyCfg.ListenerTracingJSON); err != nil { + s.Logger.Warn("failed to parse ListenerTracingJSON config", "error", err) } } @@ -1209,9 +1209,9 @@ func (s *ResourceGenerator) makeInboundListener(cfgSnap *proxycfg.ConfigSnapshot l = makePortListener(name, addr, port, envoy_core_v3.TrafficDirection_INBOUND) var tracing *envoy_http_v3.HttpConnectionManager_Tracing - if cfg.LstenerTracingJSON != "" { - if tracing, err = makeTracingFromUserConfig(cfg.LstenerTracingJSON); err != nil { - s.Logger.Warn("failed to parse LstenerTracingJSON config", "error", err) + if cfg.ListenerTracingJSON != "" { + if tracing, err = makeTracingFromUserConfig(cfg.ListenerTracingJSON); err != nil { + s.Logger.Warn("failed to parse ListenerTracingJSON config", "error", err) } } @@ -1338,7 +1338,7 @@ func (s *ResourceGenerator) makeExposedCheckListener(cfgSnap *proxycfg.ConfigSna statPrefix: "", routePath: path.Path, httpAuthzFilter: nil, - // in the exposed check listener de don't set the tracing configuration + // in the exposed check listener we don't set the tracing configuration } f, err := makeListenerFilter(opts) if err != nil { @@ -1578,9 +1578,9 @@ func (s *ResourceGenerator) makeFilterChainTerminatingGateway(cfgSnap *proxycfg. s.Logger.Warn("failed to parse Connect.Proxy.Config", "error", err) } var tracing *envoy_http_v3.HttpConnectionManager_Tracing - if proxyCfg.LstenerTracingJSON != "" { - if tracing, err = makeTracingFromUserConfig(proxyCfg.LstenerTracingJSON); err != nil { - s.Logger.Warn("failed to parse LstenerTracingJSON config", "error", err) + if proxyCfg.ListenerTracingJSON != "" { + if tracing, err = makeTracingFromUserConfig(proxyCfg.ListenerTracingJSON); err != nil { + s.Logger.Warn("failed to parse ListenerTracingJSON config", "error", err) } } diff --git a/agent/xds/listeners_test.go b/agent/xds/listeners_test.go index c51730074c..1112222f3f 100644 --- a/agent/xds/listeners_test.go +++ b/agent/xds/listeners_test.go @@ -772,6 +772,15 @@ func TestListenersFromSnapshot(t *testing.T) { name: "transparent-proxy-terminating-gateway", create: proxycfg.TestConfigSnapshotTransparentProxyTerminatingGatewayCatalogDestinationsOnly, }, + { + name: "custom-trace-listener", + create: func(t testinf.T) *proxycfg.ConfigSnapshot { + return proxycfg.TestConfigSnapshot(t, func(ns *structs.NodeService) { + ns.Proxy.Config["protocol"] = "http" + ns.Proxy.Config["envoy_listener_tracing_json"] = customTraceJSON(t) + }, nil) + }, + }, } latestEnvoyVersion := proxysupport.EnvoyVersions[0] @@ -947,6 +956,40 @@ func customHTTPListenerJSON(t testinf.T, opts customHTTPListenerJSONOptions) str return buf.String() } +func customTraceJSON(t testinf.T) string { + t.Helper() + return ` + { + "@type" : "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.Tracing", + "provider" : { + "name" : "envoy.tracers.zipkin", + "typed_config" : { + "@type" : "type.googleapis.com/envoy.config.trace.v3.ZipkinConfig", + "collector_cluster" : "otelcolector", + "collector_endpoint" : "/api/v2/spans", + "collector_endpoint_version" : "HTTP_JSON", + "shared_span_context" : false + } + }, + "custom_tags" : [ + { + "tag" : "custom_header", + "request_header" : { + "name" : "x-custom-traceid", + "default_value" : "" + } + }, + { + "tag" : "alloc_id", + "environment" : { + "name" : "NOMAD_ALLOC_ID" + } + } + ] + } + ` +} + type configFetcherFunc func() string var _ ConfigFetcher = (configFetcherFunc)(nil) diff --git a/agent/xds/testdata/listeners/custom-trace-listener.latest.golden b/agent/xds/testdata/listeners/custom-trace-listener.latest.golden new file mode 100644 index 0000000000..5fce12bb73 --- /dev/null +++ b/agent/xds/testdata/listeners/custom-trace-listener.latest.golden @@ -0,0 +1,180 @@ +{ + "versionInfo": "00000001", + "resources": [ + { + "@type": "type.googleapis.com/envoy.config.listener.v3.Listener", + "name": "db:127.0.0.1:9191", + "address": { + "socketAddress": { + "address": "127.0.0.1", + "portValue": 9191 + } + }, + "filterChains": [ + { + "filters": [ + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "upstream.db.default.default.dc1", + "cluster": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + } + } + ] + } + ], + "trafficDirection": "OUTBOUND" + }, + { + "@type": "type.googleapis.com/envoy.config.listener.v3.Listener", + "name": "prepared_query:geo-cache:127.10.10.10:8181", + "address": { + "socketAddress": { + "address": "127.10.10.10", + "portValue": 8181 + } + }, + "filterChains": [ + { + "filters": [ + { + "name": "envoy.filters.network.tcp_proxy", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy", + "statPrefix": "upstream.prepared_query_geo-cache", + "cluster": "geo-cache.default.dc1.query.11111111-2222-3333-4444-555555555555.consul" + } + } + ] + } + ], + "trafficDirection": "OUTBOUND" + }, + { + "@type": "type.googleapis.com/envoy.config.listener.v3.Listener", + "name": "public_listener:0.0.0.0:9999", + "address": { + "socketAddress": { + "address": "0.0.0.0", + "portValue": 9999 + } + }, + "filterChains": [ + { + "filters": [ + { + "name": "envoy.filters.network.http_connection_manager", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager", + "statPrefix": "public_listener", + "routeConfig": { + "name": "public_listener", + "virtualHosts": [ + { + "name": "public_listener", + "domains": [ + "*" + ], + "routes": [ + { + "match": { + "prefix": "/" + }, + "route": { + "cluster": "local_app" + } + } + ] + } + ] + }, + "httpFilters": [ + { + "name": "envoy.filters.http.rbac", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBAC", + "rules": { + + } + } + }, + { + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } + } + ], + "tracing": { + "customTags": [ + { + "tag": "custom_header", + "requestHeader": { + "name": "x-custom-traceid" + } + }, + { + "tag": "alloc_id", + "environment": { + "name": "NOMAD_ALLOC_ID" + } + } + ], + "provider": { + "name": "envoy.tracers.zipkin", + "typedConfig": { + "@type": "type.googleapis.com/envoy.config.trace.v3.ZipkinConfig", + "collectorCluster": "otelcolector", + "collectorEndpoint": "/api/v2/spans", + "sharedSpanContext": false, + "collectorEndpointVersion": "HTTP_JSON" + } + } + }, + "forwardClientCertDetails": "APPEND_FORWARD", + "setCurrentClientCertDetails": { + "subject": true, + "cert": true, + "chain": true, + "dns": true, + "uri": true + } + } + } + ], + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + } + } + }, + "requireClientCertificate": true + } + } + } + ], + "trafficDirection": "INBOUND" + } + ], + "typeUrl": "type.googleapis.com/envoy.config.listener.v3.Listener", + "nonce": "00000001" +} \ No newline at end of file diff --git a/website/content/docs/connect/proxies/envoy.mdx b/website/content/docs/connect/proxies/envoy.mdx index 7ada5b6fd0..020a0510f0 100644 --- a/website/content/docs/connect/proxies/envoy.mdx +++ b/website/content/docs/connect/proxies/envoy.mdx @@ -759,6 +759,45 @@ definition](/docs/connect/registration/service-registration) or +- `envoy_listener_tracing_json` - Specifies a [tracing + configuration](https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto#envoy-v3-api-msg-extensions-filters-network-http-connection-manager-v3-httpconnectionmanager-tracing) + to be inserter in the public and upstreams listeners of the proxy. + + + + ```json + { + "@type" : "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.Tracing", + "provider" : { + "name" : "envoy.tracers.zipkin", + "typed_config" : { + "@type" : "type.googleapis.com/envoy.config.trace.v3.ZipkinConfig", + "collector_cluster" : "otelcolector", + "collector_endpoint" : "/api/v2/spans", + "collector_endpoint_version" : "HTTP_JSON", + "shared_span_context" : false + } + }, + "custom_tags" : [ + { + "tag" : "custom_header", + "request_header" : { + "name" : "x-custom-traceid", + "default_value" : "" + } + }, + { + "tag" : "alloc_id", + "environment" : { + "name" : "NOMAD_ALLOC_ID" + } + } + ] + } + ``` + + + - `envoy_local_cluster_json` - Specifies a complete [Envoy cluster][pb-cluster] to be delivered in place of the local application cluster. This allows customization of timeouts, rate limits, load balancing strategy etc. From 311a728836fff19ea7c5dfb5e99c96d2730b8f1c Mon Sep 17 00:00:00 2001 From: Jorge Marey Date: Tue, 30 Aug 2022 17:00:11 +0200 Subject: [PATCH 59/93] Change changelog message --- .changelog/13998.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.changelog/13998.txt b/.changelog/13998.txt index cced542dba..0fba03f22e 100644 --- a/.changelog/13998.txt +++ b/.changelog/13998.txt @@ -1,3 +1,3 @@ ```release-note:improvement -connect: change deprecated tracing configuration on envoy +connect: expose new tracing configuration on envoy ``` From 3726a0ab7aa5b60a1d4a12db08d8f1c137fa6072 Mon Sep 17 00:00:00 2001 From: Eric Haberkorn Date: Tue, 30 Aug 2022 11:46:34 -0400 Subject: [PATCH 60/93] Finish up cluster peering failover (#14396) --- .changelog/14396.txt | 3 + agent/proxycfg/connect_proxy.go | 34 ++- agent/proxycfg/ingress_gateway.go | 11 + agent/proxycfg/snapshot.go | 12 + agent/proxycfg/state_test.go | 62 ++++- agent/proxycfg/testing.go | 25 ++ agent/proxycfg/testing_upstreams.go | 34 +++ agent/proxycfg/upstreams.go | 92 ++++++-- agent/structs/testing_catalog.go | 22 ++ agent/xds/clusters.go | 165 +++++++------ agent/xds/clusters_test.go | 13 ++ agent/xds/endpoints.go | 216 ++++++++++------- agent/xds/endpoints_test.go | 13 ++ ...and-failover-to-cluster-peer.latest.golden | 219 ++++++++++++++++++ ...and-failover-to-cluster-peer.latest.golden | 139 +++++++++++ ...and-failover-to-cluster-peer.latest.golden | 109 +++++++++ ...and-failover-to-cluster-peer.latest.golden | 75 ++++++ .../alpha/base.hcl | 5 + .../alpha/config_entries.hcl | 26 +++ .../alpha/service_gateway.hcl | 5 + .../alpha/service_s1.hcl | 1 + .../alpha/service_s2.hcl | 7 + .../alpha/setup.sh | 11 + .../alpha/verify.bats | 27 +++ .../bind.hcl | 2 + .../capture.sh | 6 + .../primary/base.hcl | 3 + .../primary/config_entries.hcl | 21 ++ .../primary/service_s1.hcl | 16 ++ .../primary/service_s2.hcl | 7 + .../primary/setup.sh | 10 + .../primary/verify.bats | 87 +++++++ .../vars.sh | 4 + 33 files changed, 1298 insertions(+), 184 deletions(-) create mode 100644 .changelog/14396.txt create mode 100644 agent/xds/testdata/clusters/connect-proxy-with-chain-and-failover-to-cluster-peer.latest.golden create mode 100644 agent/xds/testdata/clusters/ingress-with-chain-and-failover-to-cluster-peer.latest.golden create mode 100644 agent/xds/testdata/endpoints/connect-proxy-with-chain-and-failover-to-cluster-peer.latest.golden create mode 100644 agent/xds/testdata/endpoints/ingress-with-chain-and-failover-to-cluster-peer.latest.golden create mode 100644 test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/alpha/base.hcl create mode 100644 test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/alpha/config_entries.hcl create mode 100644 test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/alpha/service_gateway.hcl create mode 100644 test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/alpha/service_s1.hcl create mode 100644 test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/alpha/service_s2.hcl create mode 100644 test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/alpha/setup.sh create mode 100644 test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/alpha/verify.bats create mode 100644 test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/bind.hcl create mode 100644 test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/capture.sh create mode 100644 test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/primary/base.hcl create mode 100644 test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/primary/config_entries.hcl create mode 100644 test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/primary/service_s1.hcl create mode 100644 test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/primary/service_s2.hcl create mode 100644 test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/primary/setup.sh create mode 100644 test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/primary/verify.bats create mode 100644 test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/vars.sh diff --git a/.changelog/14396.txt b/.changelog/14396.txt new file mode 100644 index 0000000000..3905df9462 --- /dev/null +++ b/.changelog/14396.txt @@ -0,0 +1,3 @@ +```release-note:feature +peering: Add support to failover to services running on cluster peers. +``` diff --git a/agent/proxycfg/connect_proxy.go b/agent/proxycfg/connect_proxy.go index 15e3498f2f..2ff1f9ca9f 100644 --- a/agent/proxycfg/connect_proxy.go +++ b/agent/proxycfg/connect_proxy.go @@ -280,16 +280,6 @@ func (s *handlerConnectProxy) handleUpdate(ctx context.Context, u UpdateEvent, s } snap.Roots = roots - case strings.HasPrefix(u.CorrelationID, peerTrustBundleIDPrefix): - resp, ok := u.Result.(*pbpeering.TrustBundleReadResponse) - if !ok { - return fmt.Errorf("invalid type for response: %T", u.Result) - } - peer := strings.TrimPrefix(u.CorrelationID, peerTrustBundleIDPrefix) - if resp.Bundle != nil { - snap.ConnectProxy.UpstreamPeerTrustBundles.Set(peer, resp.Bundle) - } - case u.CorrelationID == peeringTrustBundlesWatchID: resp, ok := u.Result.(*pbpeering.TrustBundleListByServiceResponse) if !ok { @@ -369,6 +359,17 @@ func (s *handlerConnectProxy) handleUpdate(ctx context.Context, u UpdateEvent, s // Clean up data // + peeredChainTargets := make(map[UpstreamID]struct{}) + for _, discoChain := range snap.ConnectProxy.DiscoveryChain { + for _, target := range discoChain.Targets { + if target.Peer == "" { + continue + } + uid := NewUpstreamIDFromTargetID(target.ID) + peeredChainTargets[uid] = struct{}{} + } + } + validPeerNames := make(map[string]struct{}) // Iterate through all known endpoints and remove references to upstream IDs that weren't in the update @@ -383,6 +384,11 @@ func (s *handlerConnectProxy) handleUpdate(ctx context.Context, u UpdateEvent, s validPeerNames[uid.Peer] = struct{}{} return true } + // Peered upstream came from a discovery chain target + if _, ok := peeredChainTargets[uid]; ok { + validPeerNames[uid.Peer] = struct{}{} + return true + } snap.ConnectProxy.PeerUpstreamEndpoints.CancelWatch(uid) return true }) @@ -463,8 +469,14 @@ func (s *handlerConnectProxy) handleUpdate(ctx context.Context, u UpdateEvent, s continue } if _, ok := seenUpstreams[uid]; !ok { - for _, cancelFn := range targets { + for targetID, cancelFn := range targets { cancelFn() + + targetUID := NewUpstreamIDFromTargetID(targetID) + if targetUID.Peer != "" { + snap.ConnectProxy.PeerUpstreamEndpoints.CancelWatch(targetUID) + snap.ConnectProxy.UpstreamPeerTrustBundles.CancelWatch(targetUID.Peer) + } } delete(snap.ConnectProxy.WatchedUpstreams, uid) } diff --git a/agent/proxycfg/ingress_gateway.go b/agent/proxycfg/ingress_gateway.go index 8282298649..81a4928369 100644 --- a/agent/proxycfg/ingress_gateway.go +++ b/agent/proxycfg/ingress_gateway.go @@ -5,7 +5,9 @@ import ( "fmt" cachetype "github.com/hashicorp/consul/agent/cache-types" + "github.com/hashicorp/consul/agent/proxycfg/internal/watch" "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/proto/pbpeering" ) type handlerIngressGateway struct { @@ -66,6 +68,9 @@ func (s *handlerIngressGateway) initialize(ctx context.Context) (ConfigSnapshot, snap.IngressGateway.WatchedGateways = make(map[UpstreamID]map[string]context.CancelFunc) snap.IngressGateway.WatchedGatewayEndpoints = make(map[UpstreamID]map[string]structs.CheckServiceNodes) snap.IngressGateway.Listeners = make(map[IngressListenerKey]structs.IngressListener) + snap.IngressGateway.UpstreamPeerTrustBundles = watch.NewMap[string, *pbpeering.PeeringTrustBundle]() + snap.IngressGateway.PeerUpstreamEndpoints = watch.NewMap[UpstreamID, structs.CheckServiceNodes]() + snap.IngressGateway.PeerUpstreamEndpointsUseHostnames = make(map[UpstreamID]struct{}) return snap, nil } @@ -152,6 +157,12 @@ func (s *handlerIngressGateway) handleUpdate(ctx context.Context, u UpdateEvent, delete(snap.IngressGateway.WatchedUpstreams[uid], targetID) delete(snap.IngressGateway.WatchedUpstreamEndpoints[uid], targetID) cancelUpstreamFn() + + targetUID := NewUpstreamIDFromTargetID(targetID) + if targetUID.Peer != "" { + snap.IngressGateway.PeerUpstreamEndpoints.CancelWatch(targetUID) + snap.IngressGateway.UpstreamPeerTrustBundles.CancelWatch(targetUID.Peer) + } } cancelFn() diff --git a/agent/proxycfg/snapshot.go b/agent/proxycfg/snapshot.go index 8d2d81bedf..23cb8a9556 100644 --- a/agent/proxycfg/snapshot.go +++ b/agent/proxycfg/snapshot.go @@ -814,6 +814,18 @@ func (s *ConfigSnapshot) MeshConfigTLSOutgoing() *structs.MeshDirectionalTLSConf return mesh.TLS.Outgoing } +func (s *ConfigSnapshot) ToConfigSnapshotUpstreams() (*ConfigSnapshotUpstreams, error) { + switch s.Kind { + case structs.ServiceKindConnectProxy: + return &s.ConnectProxy.ConfigSnapshotUpstreams, nil + case structs.ServiceKindIngressGateway: + return &s.IngressGateway.ConfigSnapshotUpstreams, nil + default: + // This is a coherence check and should never fail + return nil, fmt.Errorf("No upstream snapshot for gateway mode %q", s.Kind) + } +} + func (u *ConfigSnapshotUpstreams) UpstreamPeerMeta(uid UpstreamID) structs.PeeringServiceMeta { nodes, _ := u.PeerUpstreamEndpoints.Get(uid) if len(nodes) == 0 { diff --git a/agent/proxycfg/state_test.go b/agent/proxycfg/state_test.go index 855ded03d5..825ac84fe6 100644 --- a/agent/proxycfg/state_test.go +++ b/agent/proxycfg/state_test.go @@ -493,6 +493,11 @@ func TestState_WatchesAndUpdates(t *testing.T) { Mode: structs.MeshGatewayModeNone, }, }, + structs.Upstream{ + DestinationType: structs.UpstreamDestTypeService, + DestinationName: "api-failover-to-peer", + LocalBindPort: 10007, + }, structs.Upstream{ DestinationType: structs.UpstreamDestTypeService, DestinationName: "api-dc2", @@ -552,6 +557,16 @@ func TestState_WatchesAndUpdates(t *testing.T) { Mode: structs.MeshGatewayModeNone, }, }), + fmt.Sprintf("discovery-chain:%s-failover-to-peer", apiUID.String()): genVerifyDiscoveryChainWatch(&structs.DiscoveryChainRequest{ + Name: "api-failover-to-peer", + EvaluateInDatacenter: "dc1", + EvaluateInNamespace: "default", + EvaluateInPartition: "default", + Datacenter: "dc1", + OverrideMeshGateway: structs.MeshGatewayConfig{ + Mode: meshGatewayProxyConfigValue, + }, + }), fmt.Sprintf("discovery-chain:%s-dc2", apiUID.String()): genVerifyDiscoveryChainWatch(&structs.DiscoveryChainRequest{ Name: "api-dc2", EvaluateInDatacenter: "dc1", @@ -639,6 +654,26 @@ func TestState_WatchesAndUpdates(t *testing.T) { }, Err: nil, }, + { + CorrelationID: fmt.Sprintf("discovery-chain:%s-failover-to-peer", apiUID.String()), + Result: &structs.DiscoveryChainResponse{ + Chain: discoverychain.TestCompileConfigEntries(t, "api-failover-to-peer", "default", "default", "dc1", "trustdomain.consul", + func(req *discoverychain.CompileRequest) { + req.OverrideMeshGateway.Mode = meshGatewayProxyConfigValue + }, &structs.ServiceResolverConfigEntry{ + Kind: structs.ServiceResolver, + Name: "api-failover-to-peer", + Failover: map[string]structs.ServiceResolverFailover{ + "*": { + Targets: []structs.ServiceResolverFailoverTarget{ + {Peer: "cluster-01"}, + }, + }, + }, + }), + }, + Err: nil, + }, }, verifySnapshot: func(t testing.TB, snap *ConfigSnapshot) { require.True(t, snap.Valid()) @@ -646,15 +681,18 @@ func TestState_WatchesAndUpdates(t *testing.T) { require.Equal(t, indexedRoots, snap.Roots) require.Equal(t, issuedCert, snap.ConnectProxy.Leaf) - require.Len(t, snap.ConnectProxy.DiscoveryChain, 5, "%+v", snap.ConnectProxy.DiscoveryChain) - require.Len(t, snap.ConnectProxy.WatchedUpstreams, 5, "%+v", snap.ConnectProxy.WatchedUpstreams) - require.Len(t, snap.ConnectProxy.WatchedUpstreamEndpoints, 5, "%+v", snap.ConnectProxy.WatchedUpstreamEndpoints) - require.Len(t, snap.ConnectProxy.WatchedGateways, 5, "%+v", snap.ConnectProxy.WatchedGateways) - require.Len(t, snap.ConnectProxy.WatchedGatewayEndpoints, 5, "%+v", snap.ConnectProxy.WatchedGatewayEndpoints) + require.Len(t, snap.ConnectProxy.DiscoveryChain, 6, "%+v", snap.ConnectProxy.DiscoveryChain) + require.Len(t, snap.ConnectProxy.WatchedUpstreams, 6, "%+v", snap.ConnectProxy.WatchedUpstreams) + require.Len(t, snap.ConnectProxy.WatchedUpstreamEndpoints, 6, "%+v", snap.ConnectProxy.WatchedUpstreamEndpoints) + require.Len(t, snap.ConnectProxy.WatchedGateways, 6, "%+v", snap.ConnectProxy.WatchedGateways) + require.Len(t, snap.ConnectProxy.WatchedGatewayEndpoints, 6, "%+v", snap.ConnectProxy.WatchedGatewayEndpoints) require.Len(t, snap.ConnectProxy.WatchedServiceChecks, 0, "%+v", snap.ConnectProxy.WatchedServiceChecks) require.Len(t, snap.ConnectProxy.PreparedQueryEndpoints, 0, "%+v", snap.ConnectProxy.PreparedQueryEndpoints) + require.Equal(t, 1, snap.ConnectProxy.ConfigSnapshotUpstreams.PeerUpstreamEndpoints.Len()) + require.Equal(t, 1, snap.ConnectProxy.ConfigSnapshotUpstreams.UpstreamPeerTrustBundles.Len()) + require.True(t, snap.ConnectProxy.IntentionsSet) require.Equal(t, ixnMatch, snap.ConnectProxy.Intentions) require.True(t, snap.ConnectProxy.MeshConfigSet) @@ -667,6 +705,7 @@ func TestState_WatchesAndUpdates(t *testing.T) { fmt.Sprintf("upstream-target:api-failover-remote.default.default.dc2:%s-failover-remote?dc=dc2", apiUID.String()): genVerifyServiceSpecificRequest("api-failover-remote", "", "dc2", true), fmt.Sprintf("upstream-target:api-failover-local.default.default.dc2:%s-failover-local?dc=dc2", apiUID.String()): genVerifyServiceSpecificRequest("api-failover-local", "", "dc2", true), fmt.Sprintf("upstream-target:api-failover-direct.default.default.dc2:%s-failover-direct?dc=dc2", apiUID.String()): genVerifyServiceSpecificRequest("api-failover-direct", "", "dc2", true), + upstreamPeerWatchIDPrefix + fmt.Sprintf("%s-failover-to-peer?peer=cluster-01", apiUID.String()): genVerifyServiceSpecificPeeredRequest("api-failover-to-peer", "", "", "cluster-01", true), fmt.Sprintf("mesh-gateway:dc2:%s-failover-remote?dc=dc2", apiUID.String()): genVerifyGatewayWatch("dc2"), fmt.Sprintf("mesh-gateway:dc1:%s-failover-local?dc=dc2", apiUID.String()): genVerifyGatewayWatch("dc1"), }, @@ -676,15 +715,18 @@ func TestState_WatchesAndUpdates(t *testing.T) { require.Equal(t, indexedRoots, snap.Roots) require.Equal(t, issuedCert, snap.ConnectProxy.Leaf) - require.Len(t, snap.ConnectProxy.DiscoveryChain, 5, "%+v", snap.ConnectProxy.DiscoveryChain) - require.Len(t, snap.ConnectProxy.WatchedUpstreams, 5, "%+v", snap.ConnectProxy.WatchedUpstreams) - require.Len(t, snap.ConnectProxy.WatchedUpstreamEndpoints, 5, "%+v", snap.ConnectProxy.WatchedUpstreamEndpoints) - require.Len(t, snap.ConnectProxy.WatchedGateways, 5, "%+v", snap.ConnectProxy.WatchedGateways) - require.Len(t, snap.ConnectProxy.WatchedGatewayEndpoints, 5, "%+v", snap.ConnectProxy.WatchedGatewayEndpoints) + require.Len(t, snap.ConnectProxy.DiscoveryChain, 6, "%+v", snap.ConnectProxy.DiscoveryChain) + require.Len(t, snap.ConnectProxy.WatchedUpstreams, 6, "%+v", snap.ConnectProxy.WatchedUpstreams) + require.Len(t, snap.ConnectProxy.WatchedUpstreamEndpoints, 6, "%+v", snap.ConnectProxy.WatchedUpstreamEndpoints) + require.Len(t, snap.ConnectProxy.WatchedGateways, 6, "%+v", snap.ConnectProxy.WatchedGateways) + require.Len(t, snap.ConnectProxy.WatchedGatewayEndpoints, 6, "%+v", snap.ConnectProxy.WatchedGatewayEndpoints) require.Len(t, snap.ConnectProxy.WatchedServiceChecks, 0, "%+v", snap.ConnectProxy.WatchedServiceChecks) require.Len(t, snap.ConnectProxy.PreparedQueryEndpoints, 0, "%+v", snap.ConnectProxy.PreparedQueryEndpoints) + require.Equal(t, 1, snap.ConnectProxy.ConfigSnapshotUpstreams.PeerUpstreamEndpoints.Len()) + require.Equal(t, 1, snap.ConnectProxy.ConfigSnapshotUpstreams.UpstreamPeerTrustBundles.Len()) + require.True(t, snap.ConnectProxy.IntentionsSet) require.Equal(t, ixnMatch, snap.ConnectProxy.Intentions) }, diff --git a/agent/proxycfg/testing.go b/agent/proxycfg/testing.go index 0493e30dae..d436589471 100644 --- a/agent/proxycfg/testing.go +++ b/agent/proxycfg/testing.go @@ -280,6 +280,31 @@ func TestUpstreamNodesDC2(t testing.T) structs.CheckServiceNodes { } } +func TestUpstreamNodesPeerCluster01(t testing.T) structs.CheckServiceNodes { + peer := "cluster-01" + service := structs.TestNodeServiceWithNameInPeer(t, "web", peer) + return structs.CheckServiceNodes{ + structs.CheckServiceNode{ + Node: &structs.Node{ + ID: "test1", + Node: "test1", + Address: "10.40.1.1", + PeerName: peer, + }, + Service: service, + }, + structs.CheckServiceNode{ + Node: &structs.Node{ + ID: "test2", + Node: "test2", + Address: "10.40.1.2", + PeerName: peer, + }, + Service: service, + }, + } +} + func TestUpstreamNodesInStatusDC2(t testing.T, status string) structs.CheckServiceNodes { return structs.CheckServiceNodes{ structs.CheckServiceNode{ diff --git a/agent/proxycfg/testing_upstreams.go b/agent/proxycfg/testing_upstreams.go index 2d80c0968d..5e131af4fb 100644 --- a/agent/proxycfg/testing_upstreams.go +++ b/agent/proxycfg/testing_upstreams.go @@ -8,6 +8,7 @@ import ( "github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/agent/consul/discoverychain" "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/proto/pbpeering" ) func setupTestVariationConfigEntriesAndSnapshot( @@ -72,6 +73,24 @@ func setupTestVariationConfigEntriesAndSnapshot( Nodes: TestGatewayNodesDC2(t), }, }) + case "failover-to-cluster-peer": + events = append(events, UpdateEvent{ + CorrelationID: "peer-trust-bundle:cluster-01", + Result: &pbpeering.TrustBundleReadResponse{ + Bundle: &pbpeering.PeeringTrustBundle{ + PeerName: "peer1", + TrustDomain: "peer1.domain", + ExportedPartition: "peer1ap", + RootPEMs: []string{"peer1-root-1"}, + }, + }, + }) + events = append(events, UpdateEvent{ + CorrelationID: "upstream-peer:db?peer=cluster-01", + Result: &structs.IndexedCheckServiceNodes{ + Nodes: TestUpstreamNodesPeerCluster01(t), + }, + }) case "failover-through-double-remote-gateway-triggered": events = append(events, UpdateEvent{ CorrelationID: "upstream-target:db.default.default.dc1:" + dbUID.String(), @@ -255,6 +274,21 @@ func setupTestVariationDiscoveryChain( }, }, ) + case "failover-to-cluster-peer": + entries = append(entries, + &structs.ServiceResolverConfigEntry{ + Kind: structs.ServiceResolver, + Name: "db", + ConnectTimeout: 33 * time.Second, + Failover: map[string]structs.ServiceResolverFailover{ + "*": { + Targets: []structs.ServiceResolverFailoverTarget{ + {Peer: "cluster-01"}, + }, + }, + }, + }, + ) case "failover-through-double-remote-gateway-triggered": fallthrough case "failover-through-double-remote-gateway": diff --git a/agent/proxycfg/upstreams.go b/agent/proxycfg/upstreams.go index 600a89e092..e8825e94c6 100644 --- a/agent/proxycfg/upstreams.go +++ b/agent/proxycfg/upstreams.go @@ -9,7 +9,9 @@ import ( "github.com/mitchellh/mapstructure" "github.com/hashicorp/consul/acl" + cachetype "github.com/hashicorp/consul/agent/cache-types" "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/proto/pbpeering" ) type handlerUpstreams struct { @@ -21,9 +23,10 @@ func (s *handlerUpstreams) handleUpdateUpstreams(ctx context.Context, u UpdateEv return fmt.Errorf("error filling agent cache: %v", u.Err) } - upstreamsSnapshot := &snap.ConnectProxy.ConfigSnapshotUpstreams - if snap.Kind == structs.ServiceKindIngressGateway { - upstreamsSnapshot = &snap.IngressGateway.ConfigSnapshotUpstreams + upstreamsSnapshot, err := snap.ToConfigSnapshotUpstreams() + + if err != nil { + return err } switch { @@ -98,19 +101,16 @@ func (s *handlerUpstreams) handleUpdateUpstreams(ctx context.Context, u UpdateEv uid := UpstreamIDFromString(uidString) - filteredNodes := hostnameEndpoints( - s.logger, - GatewayKey{ /*empty so it never matches*/ }, - resp.Nodes, - ) - if len(filteredNodes) > 0 { - if set := upstreamsSnapshot.PeerUpstreamEndpoints.Set(uid, filteredNodes); set { - upstreamsSnapshot.PeerUpstreamEndpointsUseHostnames[uid] = struct{}{} - } - } else { - if set := upstreamsSnapshot.PeerUpstreamEndpoints.Set(uid, resp.Nodes); set { - delete(upstreamsSnapshot.PeerUpstreamEndpointsUseHostnames, uid) - } + s.setPeerEndpoints(upstreamsSnapshot, uid, resp.Nodes) + + case strings.HasPrefix(u.CorrelationID, peerTrustBundleIDPrefix): + resp, ok := u.Result.(*pbpeering.TrustBundleReadResponse) + if !ok { + return fmt.Errorf("invalid type for response: %T", u.Result) + } + peer := strings.TrimPrefix(u.CorrelationID, peerTrustBundleIDPrefix) + if resp.Bundle != nil { + upstreamsSnapshot.UpstreamPeerTrustBundles.Set(peer, resp.Bundle) } case strings.HasPrefix(u.CorrelationID, "upstream-target:"): @@ -216,6 +216,23 @@ func removeColonPrefix(s string) (string, string, bool) { return s[0:idx], s[idx+1:], true } +func (s *handlerUpstreams) setPeerEndpoints(upstreamsSnapshot *ConfigSnapshotUpstreams, uid UpstreamID, nodes structs.CheckServiceNodes) { + filteredNodes := hostnameEndpoints( + s.logger, + GatewayKey{ /*empty so it never matches*/ }, + nodes, + ) + if len(filteredNodes) > 0 { + if set := upstreamsSnapshot.PeerUpstreamEndpoints.Set(uid, filteredNodes); set { + upstreamsSnapshot.PeerUpstreamEndpointsUseHostnames[uid] = struct{}{} + } + } else { + if set := upstreamsSnapshot.PeerUpstreamEndpoints.Set(uid, nodes); set { + delete(upstreamsSnapshot.PeerUpstreamEndpointsUseHostnames, uid) + } + } +} + func (s *handlerUpstreams) resetWatchesFromChain( ctx context.Context, uid UpstreamID, @@ -255,6 +272,12 @@ func (s *handlerUpstreams) resetWatchesFromChain( delete(snap.WatchedUpstreams[uid], targetID) delete(snap.WatchedUpstreamEndpoints[uid], targetID) cancelFn() + + targetUID := NewUpstreamIDFromTargetID(targetID) + if targetUID.Peer != "" { + snap.PeerUpstreamEndpoints.CancelWatch(targetUID) + snap.UpstreamPeerTrustBundles.CancelWatch(targetUID.Peer) + } } var ( @@ -274,6 +297,7 @@ func (s *handlerUpstreams) resetWatchesFromChain( service: target.Service, filter: target.Subset.Filter, datacenter: target.Datacenter, + peer: target.Peer, entMeta: target.GetEnterpriseMetadata(), } err := s.watchUpstreamTarget(ctx, snap, opts) @@ -384,6 +408,7 @@ type targetWatchOpts struct { service string filter string datacenter string + peer string entMeta *acl.EnterpriseMeta } @@ -397,11 +422,17 @@ func (s *handlerUpstreams) watchUpstreamTarget(ctx context.Context, snap *Config var finalMeta acl.EnterpriseMeta finalMeta.Merge(opts.entMeta) - correlationID := "upstream-target:" + opts.chainID + ":" + opts.upstreamID.String() + uid := opts.upstreamID + correlationID := "upstream-target:" + opts.chainID + ":" + uid.String() + + if opts.peer != "" { + uid = NewUpstreamIDFromTargetID(opts.chainID) + correlationID = upstreamPeerWatchIDPrefix + uid.String() + } ctx, cancel := context.WithCancel(ctx) err := s.dataSources.Health.Notify(ctx, &structs.ServiceSpecificRequest{ - PeerName: opts.upstreamID.Peer, + PeerName: opts.peer, Datacenter: opts.datacenter, QueryOptions: structs.QueryOptions{ Token: s.token, @@ -422,6 +453,31 @@ func (s *handlerUpstreams) watchUpstreamTarget(ctx context.Context, snap *Config } snap.WatchedUpstreams[opts.upstreamID][opts.chainID] = cancel + if uid.Peer == "" { + return nil + } + + if ok := snap.PeerUpstreamEndpoints.IsWatched(uid); !ok { + snap.PeerUpstreamEndpoints.InitWatch(uid, cancel) + } + + // Check whether a watch for this peer exists to avoid duplicates. + if ok := snap.UpstreamPeerTrustBundles.IsWatched(uid.Peer); !ok { + peerCtx, cancel := context.WithCancel(ctx) + if err := s.dataSources.TrustBundle.Notify(peerCtx, &cachetype.TrustBundleReadRequest{ + Request: &pbpeering.TrustBundleReadRequest{ + Name: uid.Peer, + Partition: uid.PartitionOrDefault(), + }, + QueryOptions: structs.QueryOptions{Token: s.token}, + }, peerTrustBundleIDPrefix+uid.Peer, s.ch); err != nil { + cancel() + return fmt.Errorf("error while watching trust bundle for peer %q: %w", uid.Peer, err) + } + + snap.UpstreamPeerTrustBundles.InitWatch(uid.Peer, cancel) + } + return nil } diff --git a/agent/structs/testing_catalog.go b/agent/structs/testing_catalog.go index c9fcf017d2..f026f6091e 100644 --- a/agent/structs/testing_catalog.go +++ b/agent/structs/testing_catalog.go @@ -53,6 +53,28 @@ func TestNodeServiceWithName(t testing.T, name string) *NodeService { } } +const peerTrustDomain = "1c053652-8512-4373-90cf-5a7f6263a994.consul" + +func TestNodeServiceWithNameInPeer(t testing.T, name string, peer string) *NodeService { + service := "payments" + return &NodeService{ + Kind: ServiceKindTypical, + Service: name, + Port: 8080, + Connect: ServiceConnect{ + PeerMeta: &PeeringServiceMeta{ + SNI: []string{ + service + ".default.default." + peer + ".external." + peerTrustDomain, + }, + SpiffeID: []string{ + "spiffe://" + peerTrustDomain + "/ns/default/dc/" + peer + "-dc/svc/" + service, + }, + Protocol: "tcp", + }, + }, + } +} + // TestNodeServiceProxy returns a *NodeService representing a valid // Connect proxy. func TestNodeServiceProxy(t testing.T) *NodeService { diff --git a/agent/xds/clusters.go b/agent/xds/clusters.go index c3ac718472..6b171a27f4 100644 --- a/agent/xds/clusters.go +++ b/agent/xds/clusters.go @@ -88,29 +88,26 @@ func (s *ResourceGenerator) clustersFromSnapshotConnectProxy(cfgSnap *proxycfg.C clusters = append(clusters, passthroughs...) } - // NOTE: Any time we skip a chain below we MUST also skip that discovery chain in endpoints.go - // so that the sets of endpoints generated matches the sets of clusters. - for uid, chain := range cfgSnap.ConnectProxy.DiscoveryChain { + getUpstream := func(uid proxycfg.UpstreamID) (*structs.Upstream, bool) { upstream := cfgSnap.ConnectProxy.UpstreamConfig[uid] explicit := upstream.HasLocalPortOrSocket() implicit := cfgSnap.ConnectProxy.IsImplicitUpstream(uid) - if !implicit && !explicit { - // Discovery chain is not associated with a known explicit or implicit upstream so it is skipped. - continue - } + return upstream, !implicit && !explicit + } - chainEndpoints, ok := cfgSnap.ConnectProxy.WatchedUpstreamEndpoints[uid] - if !ok { - // this should not happen - return nil, fmt.Errorf("no endpoint map for upstream %q", uid) + // NOTE: Any time we skip a chain below we MUST also skip that discovery chain in endpoints.go + // so that the sets of endpoints generated matches the sets of clusters. + for uid, chain := range cfgSnap.ConnectProxy.DiscoveryChain { + upstream, skip := getUpstream(uid) + if skip { + continue } upstreamClusters, err := s.makeUpstreamClustersForDiscoveryChain( uid, upstream, chain, - chainEndpoints, cfgSnap, false, ) @@ -127,18 +124,15 @@ func (s *ResourceGenerator) clustersFromSnapshotConnectProxy(cfgSnap *proxycfg.C // upstream in endpoints.go so that the sets of endpoints generated matches // the sets of clusters. for _, uid := range cfgSnap.ConnectProxy.PeeredUpstreamIDs() { - upstreamCfg := cfgSnap.ConnectProxy.UpstreamConfig[uid] - - explicit := upstreamCfg.HasLocalPortOrSocket() - implicit := cfgSnap.ConnectProxy.IsImplicitUpstream(uid) - if !implicit && !explicit { - // Not associated with a known explicit or implicit upstream so it is skipped. + upstream, skip := getUpstream(uid) + if skip { continue } peerMeta := cfgSnap.ConnectProxy.UpstreamPeerMeta(uid) + cfg := s.getAndModifyUpstreamConfigForPeeredListener(uid, upstream, peerMeta) - upstreamCluster, err := s.makeUpstreamClusterForPeerService(uid, upstreamCfg, peerMeta, cfgSnap) + upstreamCluster, err := s.makeUpstreamClusterForPeerService(uid, cfg, peerMeta, cfgSnap) if err != nil { return nil, err } @@ -652,17 +646,10 @@ func (s *ResourceGenerator) clustersFromSnapshotIngressGateway(cfgSnap *proxycfg return nil, fmt.Errorf("no discovery chain for upstream %q", uid) } - chainEndpoints, ok := cfgSnap.IngressGateway.WatchedUpstreamEndpoints[uid] - if !ok { - // this should not happen - return nil, fmt.Errorf("no endpoint map for upstream %q", uid) - } - upstreamClusters, err := s.makeUpstreamClustersForDiscoveryChain( uid, &u, chain, - chainEndpoints, cfgSnap, false, ) @@ -745,7 +732,7 @@ func (s *ResourceGenerator) makeAppCluster(cfgSnap *proxycfg.ConfigSnapshot, nam func (s *ResourceGenerator) makeUpstreamClusterForPeerService( uid proxycfg.UpstreamID, - upstream *structs.Upstream, + upstreamConfig structs.UpstreamConfig, peerMeta structs.PeeringServiceMeta, cfgSnap *proxycfg.ConfigSnapshot, ) (*envoy_cluster_v3.Cluster, error) { @@ -754,16 +741,21 @@ func (s *ResourceGenerator) makeUpstreamClusterForPeerService( err error ) - cfg := s.getAndModifyUpstreamConfigForPeeredListener(uid, upstream, peerMeta) - if cfg.EnvoyClusterJSON != "" { - c, err = makeClusterFromUserConfig(cfg.EnvoyClusterJSON) + if upstreamConfig.EnvoyClusterJSON != "" { + c, err = makeClusterFromUserConfig(upstreamConfig.EnvoyClusterJSON) if err != nil { return c, err } // In the happy path don't return yet as we need to inject TLS config still. } - tbs, ok := cfgSnap.ConnectProxy.UpstreamPeerTrustBundles.Get(uid.Peer) + upstreamsSnapshot, err := cfgSnap.ToConfigSnapshotUpstreams() + + if err != nil { + return c, err + } + + tbs, ok := upstreamsSnapshot.UpstreamPeerTrustBundles.Get(uid.Peer) if !ok { // this should never happen since we loop through upstreams with // set trust bundles @@ -772,7 +764,7 @@ func (s *ResourceGenerator) makeUpstreamClusterForPeerService( clusterName := generatePeeredClusterName(uid, tbs) - outlierDetection := ToOutlierDetection(cfg.PassiveHealthCheck) + outlierDetection := ToOutlierDetection(upstreamConfig.PassiveHealthCheck) // We can't rely on health checks for services on cluster peers because they // don't take into account service resolvers, splitters and routers. Setting // MaxEjectionPercent too 100% gives outlier detection the power to eject the @@ -783,18 +775,18 @@ func (s *ResourceGenerator) makeUpstreamClusterForPeerService( if c == nil { c = &envoy_cluster_v3.Cluster{ Name: clusterName, - ConnectTimeout: durationpb.New(time.Duration(cfg.ConnectTimeoutMs) * time.Millisecond), + ConnectTimeout: durationpb.New(time.Duration(upstreamConfig.ConnectTimeoutMs) * time.Millisecond), CommonLbConfig: &envoy_cluster_v3.Cluster_CommonLbConfig{ HealthyPanicThreshold: &envoy_type_v3.Percent{ Value: 0, // disable panic threshold }, }, CircuitBreakers: &envoy_cluster_v3.CircuitBreakers{ - Thresholds: makeThresholdsIfNeeded(cfg.Limits), + Thresholds: makeThresholdsIfNeeded(upstreamConfig.Limits), }, OutlierDetection: outlierDetection, } - if cfg.Protocol == "http2" || cfg.Protocol == "grpc" { + if upstreamConfig.Protocol == "http2" || upstreamConfig.Protocol == "grpc" { if err := s.setHttp2ProtocolOptions(c); err != nil { return c, err } @@ -828,12 +820,11 @@ func (s *ResourceGenerator) makeUpstreamClusterForPeerService( false, /*onlyPassing*/ ) } - } rootPEMs := cfgSnap.RootPEMs() if uid.Peer != "" { - tbs, _ := cfgSnap.ConnectProxy.UpstreamPeerTrustBundles.Get(uid.Peer) + tbs, _ := upstreamsSnapshot.UpstreamPeerTrustBundles.Get(uid.Peer) rootPEMs = tbs.ConcatenatedRootPEMs() } @@ -968,7 +959,6 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain( uid proxycfg.UpstreamID, upstream *structs.Upstream, chain *structs.CompiledDiscoveryChain, - chainEndpoints map[string]structs.CheckServiceNodes, cfgSnap *proxycfg.ConfigSnapshot, forMeshGateway bool, ) ([]*envoy_cluster_v3.Cluster, error) { @@ -985,7 +975,15 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain( upstreamConfigMap = upstream.Config } - cfg, err := structs.ParseUpstreamConfigNoDefaults(upstreamConfigMap) + upstreamsSnapshot, err := cfgSnap.ToConfigSnapshotUpstreams() + + // Mesh gateways are exempt because upstreamsSnapshot is only used for + // cluster peering targets and transative failover/redirects are unsupported. + if err != nil && !forMeshGateway { + return nil, fmt.Errorf("No upstream snapshot for gateway mode %q", cfgSnap.Kind) + } + + rawUpstreamConfig, err := structs.ParseUpstreamConfigNoDefaults(upstreamConfigMap) if err != nil { // Don't hard fail on a config typo, just warn. The parse func returns // default config if there is an error so it's safe to continue. @@ -993,13 +991,28 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain( "error", err) } + finalizeUpstreamConfig := func(cfg structs.UpstreamConfig, connectTimeout time.Duration) structs.UpstreamConfig { + if cfg.Protocol == "" { + cfg.Protocol = chain.Protocol + } + + if cfg.Protocol == "" { + cfg.Protocol = "tcp" + } + + if cfg.ConnectTimeoutMs == 0 { + cfg.ConnectTimeoutMs = int(connectTimeout / time.Millisecond) + } + return cfg + } + var escapeHatchCluster *envoy_cluster_v3.Cluster if !forMeshGateway { - if cfg.EnvoyClusterJSON != "" { + if rawUpstreamConfig.EnvoyClusterJSON != "" { if chain.Default { // If you haven't done anything to setup the discovery chain, then // you can use the envoy_cluster_json escape hatch. - escapeHatchCluster, err = makeClusterFromUserConfig(cfg.EnvoyClusterJSON) + escapeHatchCluster, err = makeClusterFromUserConfig(rawUpstreamConfig.EnvoyClusterJSON) if err != nil { return nil, err } @@ -1013,14 +1026,20 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain( var out []*envoy_cluster_v3.Cluster for _, node := range chain.Nodes { - if node.Type != structs.DiscoveryGraphNodeTypeResolver { + switch { + case node == nil: + return nil, fmt.Errorf("impossible to process a nil node") + case node.Type != structs.DiscoveryGraphNodeTypeResolver: continue + case node.Resolver == nil: + return nil, fmt.Errorf("impossible to process a non-resolver node") } failover := node.Resolver.Failover // These variables are prefixed with primary to avoid shaddowing bugs. primaryTargetID := node.Resolver.Target primaryTarget := chain.Targets[primaryTargetID] primaryClusterName := CustomizeClusterName(primaryTarget.Name, chain) + upstreamConfig := finalizeUpstreamConfig(rawUpstreamConfig, node.Resolver.ConnectTimeout) if forMeshGateway { primaryClusterName = meshGatewayExportedClusterNamePrefix + primaryClusterName } @@ -1033,22 +1052,38 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain( continue } - type targetClusterOptions struct { + type targetClusterOption struct { targetID string clusterName string } // Construct the information required to make target clusters. When // failover is configured, create the aggregate cluster. - var targetClustersOptions []targetClusterOptions + var targetClustersOptions []targetClusterOption if failover != nil && !forMeshGateway { var failoverClusterNames []string for _, tid := range append([]string{primaryTargetID}, failover.Targets...) { target := chain.Targets[tid] - clusterName := CustomizeClusterName(target.Name, chain) + clusterName := target.Name + targetUID := proxycfg.NewUpstreamIDFromTargetID(tid) + if targetUID.Peer != "" { + tbs, ok := upstreamsSnapshot.UpstreamPeerTrustBundles.Get(targetUID.Peer) + // We can't generate cluster on peers without the trust bundle. The + // trust bundle should be ready soon. + if !ok { + s.Logger.Debug("peer trust bundle not ready for discovery chain target", + "peer", targetUID.Peer, + "target", tid, + ) + continue + } + + clusterName = generatePeeredClusterName(targetUID, tbs) + } + clusterName = CustomizeClusterName(clusterName, chain) clusterName = failoverClusterNamePrefix + clusterName - targetClustersOptions = append(targetClustersOptions, targetClusterOptions{ + targetClustersOptions = append(targetClustersOptions, targetClusterOption{ targetID: tid, clusterName: clusterName, }) @@ -1077,7 +1112,7 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain( out = append(out, c) } else { - targetClustersOptions = append(targetClustersOptions, targetClusterOptions{ + targetClustersOptions = append(targetClustersOptions, targetClusterOption{ targetID: primaryTargetID, clusterName: primaryClusterName, }) @@ -1096,11 +1131,20 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain( Datacenter: target.Datacenter, Service: target.Service, }.URI().String() - if uid.Peer != "" { - return nil, fmt.Errorf("impossible to get a peer discovery chain") + targetUID := proxycfg.NewUpstreamIDFromTargetID(targetInfo.targetID) + s.Logger.Debug("generating cluster for", "cluster", targetInfo.clusterName) + if targetUID.Peer != "" { + peerMeta := upstreamsSnapshot.UpstreamPeerMeta(targetUID) + upstreamCluster, err := s.makeUpstreamClusterForPeerService(targetUID, upstreamConfig, peerMeta, cfgSnap) + if err != nil { + continue + } + // Override the cluster name to include the failover-target~ prefix. + upstreamCluster.Name = targetInfo.clusterName + out = append(out, upstreamCluster) + continue } - s.Logger.Trace("generating cluster for", "cluster", targetInfo.clusterName) c := &envoy_cluster_v3.Cluster{ Name: targetInfo.clusterName, AltStatName: targetInfo.clusterName, @@ -1121,9 +1165,9 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain( }, // TODO(peering): make circuit breakers or outlier detection work? CircuitBreakers: &envoy_cluster_v3.CircuitBreakers{ - Thresholds: makeThresholdsIfNeeded(cfg.Limits), + Thresholds: makeThresholdsIfNeeded(upstreamConfig.Limits), }, - OutlierDetection: ToOutlierDetection(cfg.PassiveHealthCheck), + OutlierDetection: ToOutlierDetection(upstreamConfig.PassiveHealthCheck), } var lb *structs.LoadBalancer @@ -1134,19 +1178,7 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain( return nil, fmt.Errorf("failed to apply load balancer configuration to cluster %q: %v", targetInfo.clusterName, err) } - var proto string - if !forMeshGateway { - proto = cfg.Protocol - } - if proto == "" { - proto = chain.Protocol - } - - if proto == "" { - proto = "tcp" - } - - if proto == "http2" || proto == "grpc" { + if upstreamConfig.Protocol == "http2" || upstreamConfig.Protocol == "grpc" { if err := s.setHttp2ProtocolOptions(c); err != nil { return nil, err } @@ -1155,7 +1187,7 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain( configureTLS := true if forMeshGateway { // We only initiate TLS if we're doing an L7 proxy. - configureTLS = structs.IsProtocolHTTPLike(proto) + configureTLS = structs.IsProtocolHTTPLike(upstreamConfig.Protocol) } if configureTLS { @@ -1228,7 +1260,6 @@ func (s *ResourceGenerator) makeExportedUpstreamClustersForMeshGateway(cfgSnap * proxycfg.NewUpstreamIDFromServiceName(svc), nil, chain, - nil, cfgSnap, true, ) diff --git a/agent/xds/clusters_test.go b/agent/xds/clusters_test.go index a56853b816..26087dd1d0 100644 --- a/agent/xds/clusters_test.go +++ b/agent/xds/clusters_test.go @@ -257,6 +257,12 @@ func TestClustersFromSnapshot(t *testing.T) { return proxycfg.TestConfigSnapshotDiscoveryChain(t, "failover", nil, nil) }, }, + { + name: "connect-proxy-with-chain-and-failover-to-cluster-peer", + create: func(t testinf.T) *proxycfg.ConfigSnapshot { + return proxycfg.TestConfigSnapshotDiscoveryChain(t, "failover-to-cluster-peer", nil, nil) + }, + }, { name: "connect-proxy-with-tcp-chain-failover-through-remote-gateway", create: func(t testinf.T) *proxycfg.ConfigSnapshot { @@ -495,6 +501,13 @@ func TestClustersFromSnapshot(t *testing.T) { "failover", nil, nil, nil) }, }, + { + name: "ingress-with-chain-and-failover-to-cluster-peer", + create: func(t testinf.T) *proxycfg.ConfigSnapshot { + return proxycfg.TestConfigSnapshotIngressGateway(t, true, "tcp", + "failover-to-cluster-peer", nil, nil, nil) + }, + }, { name: "ingress-with-tcp-chain-failover-through-remote-gateway", create: func(t testinf.T) *proxycfg.ConfigSnapshot { diff --git a/agent/xds/endpoints.go b/agent/xds/endpoints.go index 6c0fca7f25..c1501f0f77 100644 --- a/agent/xds/endpoints.go +++ b/agent/xds/endpoints.go @@ -50,14 +50,19 @@ func (s *ResourceGenerator) endpointsFromSnapshotConnectProxy(cfgSnap *proxycfg. cfgSnap.ConnectProxy.PeerUpstreamEndpoints.Len()+ len(cfgSnap.ConnectProxy.WatchedUpstreamEndpoints)) - // NOTE: Any time we skip a chain below we MUST also skip that discovery chain in clusters.go - // so that the sets of endpoints generated matches the sets of clusters. - for uid, chain := range cfgSnap.ConnectProxy.DiscoveryChain { + getUpstream := func(uid proxycfg.UpstreamID) (*structs.Upstream, bool) { upstream := cfgSnap.ConnectProxy.UpstreamConfig[uid] explicit := upstream.HasLocalPortOrSocket() implicit := cfgSnap.ConnectProxy.IsImplicitUpstream(uid) - if !implicit && !explicit { + return upstream, !implicit && !explicit + } + + // NOTE: Any time we skip a chain below we MUST also skip that discovery chain in clusters.go + // so that the sets of endpoints generated matches the sets of clusters. + for uid, chain := range cfgSnap.ConnectProxy.DiscoveryChain { + upstream, skip := getUpstream(uid) + if skip { // Discovery chain is not associated with a known explicit or implicit upstream so it is skipped. continue } @@ -70,6 +75,7 @@ func (s *ResourceGenerator) endpointsFromSnapshotConnectProxy(cfgSnap *proxycfg. es, err := s.endpointsFromDiscoveryChain( uid, chain, + cfgSnap, cfgSnap.Locality, upstreamConfigMap, cfgSnap.ConnectProxy.WatchedUpstreamEndpoints[uid], @@ -86,12 +92,9 @@ func (s *ResourceGenerator) endpointsFromSnapshotConnectProxy(cfgSnap *proxycfg. // upstream in clusters.go so that the sets of endpoints generated matches // the sets of clusters. for _, uid := range cfgSnap.ConnectProxy.PeeredUpstreamIDs() { - upstreamCfg := cfgSnap.ConnectProxy.UpstreamConfig[uid] - - explicit := upstreamCfg.HasLocalPortOrSocket() - implicit := cfgSnap.ConnectProxy.IsImplicitUpstream(uid) - if !implicit && !explicit { - // Not associated with a known explicit or implicit upstream so it is skipped. + _, skip := getUpstream(uid) + if skip { + // Discovery chain is not associated with a known explicit or implicit upstream so it is skipped. continue } @@ -104,22 +107,14 @@ func (s *ResourceGenerator) endpointsFromSnapshotConnectProxy(cfgSnap *proxycfg. clusterName := generatePeeredClusterName(uid, tbs) - // Also skip peer instances with a hostname as their address. EDS - // cannot resolve hostnames, so we provide them through CDS instead. - if _, ok := cfgSnap.ConnectProxy.PeerUpstreamEndpointsUseHostnames[uid]; ok { - continue + loadAssignment, err := s.makeUpstreamLoadAssignmentForPeerService(cfgSnap, clusterName, uid) + + if err != nil { + return nil, err } - endpoints, ok := cfgSnap.ConnectProxy.PeerUpstreamEndpoints.Get(uid) - if ok { - la := makeLoadAssignment( - clusterName, - []loadAssignmentEndpointGroup{ - {Endpoints: endpoints}, - }, - proxycfg.GatewayKey{ /*empty so it never matches*/ }, - ) - resources = append(resources, la) + if loadAssignment != nil { + resources = append(resources, loadAssignment) } } @@ -375,6 +370,7 @@ func (s *ResourceGenerator) endpointsFromSnapshotIngressGateway(cfgSnap *proxycf es, err := s.endpointsFromDiscoveryChain( uid, cfgSnap.IngressGateway.DiscoveryChain[uid], + cfgSnap, proxycfg.GatewayKey{Datacenter: cfgSnap.Datacenter, Partition: u.DestinationPartition}, u.Config, cfgSnap.IngressGateway.WatchedUpstreamEndpoints[uid], @@ -412,9 +408,38 @@ func makePipeEndpoint(path string) *envoy_endpoint_v3.LbEndpoint { } } +func (s *ResourceGenerator) makeUpstreamLoadAssignmentForPeerService(cfgSnap *proxycfg.ConfigSnapshot, clusterName string, uid proxycfg.UpstreamID) (*envoy_endpoint_v3.ClusterLoadAssignment, error) { + var la *envoy_endpoint_v3.ClusterLoadAssignment + + upstreamsSnapshot, err := cfgSnap.ToConfigSnapshotUpstreams() + if err != nil { + return la, err + } + + // Also skip peer instances with a hostname as their address. EDS + // cannot resolve hostnames, so we provide them through CDS instead. + if _, ok := upstreamsSnapshot.PeerUpstreamEndpointsUseHostnames[uid]; ok { + return la, nil + } + + endpoints, ok := upstreamsSnapshot.PeerUpstreamEndpoints.Get(uid) + if !ok { + return nil, nil + } + la = makeLoadAssignment( + clusterName, + []loadAssignmentEndpointGroup{ + {Endpoints: endpoints}, + }, + proxycfg.GatewayKey{ /*empty so it never matches*/ }, + ) + return la, nil +} + func (s *ResourceGenerator) endpointsFromDiscoveryChain( uid proxycfg.UpstreamID, chain *structs.CompiledDiscoveryChain, + cfgSnap *proxycfg.ConfigSnapshot, gatewayKey proxycfg.GatewayKey, upstreamConfigMap map[string]interface{}, upstreamEndpoints map[string]structs.CheckServiceNodes, @@ -432,6 +457,14 @@ func (s *ResourceGenerator) endpointsFromDiscoveryChain( upstreamConfigMap = make(map[string]interface{}) // TODO:needed? } + upstreamsSnapshot, err := cfgSnap.ToConfigSnapshotUpstreams() + + // Mesh gateways are exempt because upstreamsSnapshot is only used for + // cluster peering targets and transative failover/redirects are unsupported. + if err != nil && !forMeshGateway { + return nil, fmt.Errorf("No upstream snapshot for gateway mode %q", cfgSnap.Kind) + } + var resources []proto.Message var escapeHatchCluster *envoy_cluster_v3.Cluster @@ -465,8 +498,15 @@ func (s *ResourceGenerator) endpointsFromDiscoveryChain( if node.Type != structs.DiscoveryGraphNodeTypeResolver { continue } + primaryTargetID := node.Resolver.Target failover := node.Resolver.Failover + type targetLoadAssignmentOption struct { + targetID string + clusterName string + } + var targetLoadAssignmentOptions []targetLoadAssignmentOption + var numFailoverTargets int if failover != nil { numFailoverTargets = len(failover.Targets) @@ -474,66 +514,84 @@ func (s *ResourceGenerator) endpointsFromDiscoveryChain( clusterNamePrefix := "" if numFailoverTargets > 0 && !forMeshGateway { clusterNamePrefix = failoverClusterNamePrefix - for _, failTargetID := range failover.Targets { - target := chain.Targets[failTargetID] - endpointGroup, valid := makeLoadAssignmentEndpointGroup( - chain.Targets, - upstreamEndpoints, - gatewayEndpoints, - failTargetID, - gatewayKey, - forMeshGateway, - ) - if !valid { - continue // skip the failover target if we're still populating the snapshot - } + for _, targetID := range append([]string{primaryTargetID}, failover.Targets...) { + target := chain.Targets[targetID] + clusterName := target.Name + targetUID := proxycfg.NewUpstreamIDFromTargetID(targetID) + if targetUID.Peer != "" { + tbs, ok := upstreamsSnapshot.UpstreamPeerTrustBundles.Get(targetUID.Peer) + // We can't generate cluster on peers without the trust bundle. The + // trust bundle should be ready soon. + if !ok { + s.Logger.Debug("peer trust bundle not ready for discovery chain target", + "peer", targetUID.Peer, + "target", targetID, + ) + continue + } - clusterName := CustomizeClusterName(target.Name, chain) + clusterName = generatePeeredClusterName(targetUID, tbs) + } + clusterName = CustomizeClusterName(clusterName, chain) clusterName = failoverClusterNamePrefix + clusterName if escapeHatchCluster != nil { clusterName = escapeHatchCluster.Name } - s.Logger.Debug("generating endpoints for", "cluster", clusterName) - - la := makeLoadAssignment( - clusterName, - []loadAssignmentEndpointGroup{endpointGroup}, - gatewayKey, - ) - resources = append(resources, la) + targetLoadAssignmentOptions = append(targetLoadAssignmentOptions, targetLoadAssignmentOption{ + targetID: targetID, + clusterName: clusterName, + }) } - } - targetID := node.Resolver.Target - - target := chain.Targets[targetID] - clusterName := CustomizeClusterName(target.Name, chain) - clusterName = clusterNamePrefix + clusterName - if escapeHatchCluster != nil { - clusterName = escapeHatchCluster.Name - } - if forMeshGateway { - clusterName = meshGatewayExportedClusterNamePrefix + clusterName - } - s.Logger.Debug("generating endpoints for", "cluster", clusterName) - endpointGroup, valid := makeLoadAssignmentEndpointGroup( - chain.Targets, - upstreamEndpoints, - gatewayEndpoints, - targetID, - gatewayKey, - forMeshGateway, - ) - if !valid { - continue // skip the cluster if we're still populating the snapshot + } else { + target := chain.Targets[primaryTargetID] + clusterName := CustomizeClusterName(target.Name, chain) + clusterName = clusterNamePrefix + clusterName + if escapeHatchCluster != nil { + clusterName = escapeHatchCluster.Name + } + if forMeshGateway { + clusterName = meshGatewayExportedClusterNamePrefix + clusterName + } + targetLoadAssignmentOptions = append(targetLoadAssignmentOptions, targetLoadAssignmentOption{ + targetID: primaryTargetID, + clusterName: clusterName, + }) } - la := makeLoadAssignment( - clusterName, - []loadAssignmentEndpointGroup{endpointGroup}, - gatewayKey, - ) - resources = append(resources, la) + for _, targetInfo := range targetLoadAssignmentOptions { + s.Logger.Debug("generating endpoints for", "cluster", targetInfo.clusterName) + targetUID := proxycfg.NewUpstreamIDFromTargetID(targetInfo.targetID) + if targetUID.Peer != "" { + loadAssignment, err := s.makeUpstreamLoadAssignmentForPeerService(cfgSnap, targetInfo.clusterName, targetUID) + if err != nil { + return nil, err + } + if loadAssignment != nil { + resources = append(resources, loadAssignment) + } + continue + } + + endpointGroup, valid := makeLoadAssignmentEndpointGroup( + chain.Targets, + upstreamEndpoints, + gatewayEndpoints, + targetInfo.targetID, + gatewayKey, + forMeshGateway, + ) + if !valid { + continue // skip the cluster if we're still populating the snapshot + } + + la := makeLoadAssignment( + targetInfo.clusterName, + []loadAssignmentEndpointGroup{endpointGroup}, + gatewayKey, + ) + resources = append(resources, la) + } } return resources, nil @@ -586,6 +644,7 @@ func (s *ResourceGenerator) makeExportedUpstreamEndpointsForMeshGateway(cfgSnap clusterEndpoints, err := s.endpointsFromDiscoveryChain( proxycfg.NewUpstreamIDFromServiceName(svc), chain, + cfgSnap, cfgSnap.Locality, nil, chainEndpoints, @@ -640,11 +699,12 @@ func makeLoadAssignment(clusterName string, endpointGroups []loadAssignmentEndpo healthStatus = endpointGroup.OverrideHealth } + endpoint := &envoy_endpoint_v3.Endpoint{ + Address: makeAddress(addr, port), + } es = append(es, &envoy_endpoint_v3.LbEndpoint{ HostIdentifier: &envoy_endpoint_v3.LbEndpoint_Endpoint{ - Endpoint: &envoy_endpoint_v3.Endpoint{ - Address: makeAddress(addr, port), - }, + Endpoint: endpoint, }, HealthStatus: healthStatus, LoadBalancingWeight: makeUint32Value(weight), diff --git a/agent/xds/endpoints_test.go b/agent/xds/endpoints_test.go index b02bdd7258..90fad78e2f 100644 --- a/agent/xds/endpoints_test.go +++ b/agent/xds/endpoints_test.go @@ -284,6 +284,12 @@ func TestEndpointsFromSnapshot(t *testing.T) { return proxycfg.TestConfigSnapshotDiscoveryChain(t, "failover", nil, nil) }, }, + { + name: "connect-proxy-with-chain-and-failover-to-cluster-peer", + create: func(t testinf.T) *proxycfg.ConfigSnapshot { + return proxycfg.TestConfigSnapshotDiscoveryChain(t, "failover-to-cluster-peer", nil, nil) + }, + }, { name: "connect-proxy-with-tcp-chain-failover-through-remote-gateway", create: func(t testinf.T) *proxycfg.ConfigSnapshot { @@ -396,6 +402,13 @@ func TestEndpointsFromSnapshot(t *testing.T) { "failover", nil, nil, nil) }, }, + { + name: "ingress-with-chain-and-failover-to-cluster-peer", + create: func(t testinf.T) *proxycfg.ConfigSnapshot { + return proxycfg.TestConfigSnapshotIngressGateway(t, true, "tcp", + "failover-to-cluster-peer", nil, nil, nil) + }, + }, { name: "ingress-with-tcp-chain-failover-through-remote-gateway", create: func(t testinf.T) *proxycfg.ConfigSnapshot { diff --git a/agent/xds/testdata/clusters/connect-proxy-with-chain-and-failover-to-cluster-peer.latest.golden b/agent/xds/testdata/clusters/connect-proxy-with-chain-and-failover-to-cluster-peer.latest.golden new file mode 100644 index 0000000000..61de6b2e29 --- /dev/null +++ b/agent/xds/testdata/clusters/connect-proxy-with-chain-and-failover-to-cluster-peer.latest.golden @@ -0,0 +1,219 @@ +{ + "versionInfo": "00000001", + "resources": [ + { + "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "name": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "altStatName": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "clusterType": { + "name": "envoy.clusters.aggregate", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.clusters.aggregate.v3.ClusterConfig", + "clusters": [ + "failover-target~db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "failover-target~db.default.cluster-01.external.peer1.domain" + ] + } + }, + "connectTimeout": "33s", + "lbPolicy": "CLUSTER_PROVIDED" + }, + { + "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "name": "failover-target~db.default.cluster-01.external.peer1.domain", + "type": "EDS", + "edsClusterConfig": { + "edsConfig": { + "ads": { + + }, + "resourceApiVersion": "V3" + } + }, + "connectTimeout": "1s", + "circuitBreakers": { + + }, + "outlierDetection": { + "maxEjectionPercent": 100 + }, + "commonLbConfig": { + "healthyPanicThreshold": { + + } + }, + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "peer1-root-1\n" + }, + "matchSubjectAltNames": [ + { + "exact": "spiffe://1c053652-8512-4373-90cf-5a7f6263a994.consul/ns/default/dc/cluster-01-dc/svc/payments" + } + ] + } + }, + "sni": "payments.default.default.cluster-01.external.1c053652-8512-4373-90cf-5a7f6263a994.consul" + } + } + }, + { + "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "name": "failover-target~db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "altStatName": "failover-target~db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "type": "EDS", + "edsClusterConfig": { + "edsConfig": { + "ads": { + + }, + "resourceApiVersion": "V3" + } + }, + "connectTimeout": "33s", + "circuitBreakers": { + + }, + "outlierDetection": { + + }, + "commonLbConfig": { + "healthyPanicThreshold": { + + } + }, + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + }, + "matchSubjectAltNames": [ + { + "exact": "spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc1/svc/db" + } + ] + } + }, + "sni": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + } + } + }, + { + "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "name": "geo-cache.default.dc1.query.11111111-2222-3333-4444-555555555555.consul", + "type": "EDS", + "edsClusterConfig": { + "edsConfig": { + "ads": { + + }, + "resourceApiVersion": "V3" + } + }, + "connectTimeout": "5s", + "circuitBreakers": { + + }, + "outlierDetection": { + + }, + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + }, + "matchSubjectAltNames": [ + { + "exact": "spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc1/svc/geo-cache-target" + }, + { + "exact": "spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc2/svc/geo-cache-target" + } + ] + } + }, + "sni": "geo-cache.default.dc1.query.11111111-2222-3333-4444-555555555555.consul" + } + } + }, + { + "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "name": "local_app", + "type": "STATIC", + "connectTimeout": "5s", + "loadAssignment": { + "clusterName": "local_app", + "endpoints": [ + { + "lbEndpoints": [ + { + "endpoint": { + "address": { + "socketAddress": { + "address": "127.0.0.1", + "portValue": 8080 + } + } + } + } + ] + } + ] + } + } + ], + "typeUrl": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "nonce": "00000001" +} \ No newline at end of file diff --git a/agent/xds/testdata/clusters/ingress-with-chain-and-failover-to-cluster-peer.latest.golden b/agent/xds/testdata/clusters/ingress-with-chain-and-failover-to-cluster-peer.latest.golden new file mode 100644 index 0000000000..94521dc8f6 --- /dev/null +++ b/agent/xds/testdata/clusters/ingress-with-chain-and-failover-to-cluster-peer.latest.golden @@ -0,0 +1,139 @@ +{ + "versionInfo": "00000001", + "resources": [ + { + "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "name": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "altStatName": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "clusterType": { + "name": "envoy.clusters.aggregate", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.clusters.aggregate.v3.ClusterConfig", + "clusters": [ + "failover-target~db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "failover-target~db.default.cluster-01.external.peer1.domain" + ] + } + }, + "connectTimeout": "33s", + "lbPolicy": "CLUSTER_PROVIDED" + }, + { + "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "name": "failover-target~db.default.cluster-01.external.peer1.domain", + "type": "EDS", + "edsClusterConfig": { + "edsConfig": { + "ads": { + + }, + "resourceApiVersion": "V3" + } + }, + "connectTimeout": "33s", + "circuitBreakers": { + + }, + "outlierDetection": { + "maxEjectionPercent": 100 + }, + "commonLbConfig": { + "healthyPanicThreshold": { + + } + }, + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "peer1-root-1\n" + }, + "matchSubjectAltNames": [ + { + "exact": "spiffe://1c053652-8512-4373-90cf-5a7f6263a994.consul/ns/default/dc/cluster-01-dc/svc/payments" + } + ] + } + }, + "sni": "payments.default.default.cluster-01.external.1c053652-8512-4373-90cf-5a7f6263a994.consul" + } + } + }, + { + "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "name": "failover-target~db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "altStatName": "failover-target~db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "type": "EDS", + "edsClusterConfig": { + "edsConfig": { + "ads": { + + }, + "resourceApiVersion": "V3" + } + }, + "connectTimeout": "33s", + "circuitBreakers": { + + }, + "outlierDetection": { + + }, + "commonLbConfig": { + "healthyPanicThreshold": { + + } + }, + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + }, + "matchSubjectAltNames": [ + { + "exact": "spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc1/svc/db" + } + ] + } + }, + "sni": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + } + } + } + ], + "typeUrl": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "nonce": "00000001" +} \ No newline at end of file diff --git a/agent/xds/testdata/endpoints/connect-proxy-with-chain-and-failover-to-cluster-peer.latest.golden b/agent/xds/testdata/endpoints/connect-proxy-with-chain-and-failover-to-cluster-peer.latest.golden new file mode 100644 index 0000000000..feaea90551 --- /dev/null +++ b/agent/xds/testdata/endpoints/connect-proxy-with-chain-and-failover-to-cluster-peer.latest.golden @@ -0,0 +1,109 @@ +{ + "versionInfo": "00000001", + "resources": [ + { + "@type": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", + "clusterName": "failover-target~db.default.cluster-01.external.peer1.domain", + "endpoints": [ + { + "lbEndpoints": [ + { + "endpoint": { + "address": { + "socketAddress": { + "address": "10.40.1.1", + "portValue": 8080 + } + } + }, + "healthStatus": "HEALTHY", + "loadBalancingWeight": 1 + }, + { + "endpoint": { + "address": { + "socketAddress": { + "address": "10.40.1.2", + "portValue": 8080 + } + } + }, + "healthStatus": "HEALTHY", + "loadBalancingWeight": 1 + } + ] + } + ] + }, + { + "@type": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", + "clusterName": "failover-target~db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "endpoints": [ + { + "lbEndpoints": [ + { + "endpoint": { + "address": { + "socketAddress": { + "address": "10.10.1.1", + "portValue": 8080 + } + } + }, + "healthStatus": "HEALTHY", + "loadBalancingWeight": 1 + }, + { + "endpoint": { + "address": { + "socketAddress": { + "address": "10.10.1.2", + "portValue": 8080 + } + } + }, + "healthStatus": "HEALTHY", + "loadBalancingWeight": 1 + } + ] + } + ] + }, + { + "@type": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", + "clusterName": "geo-cache.default.dc1.query.11111111-2222-3333-4444-555555555555.consul", + "endpoints": [ + { + "lbEndpoints": [ + { + "endpoint": { + "address": { + "socketAddress": { + "address": "10.10.1.1", + "portValue": 8080 + } + } + }, + "healthStatus": "HEALTHY", + "loadBalancingWeight": 1 + }, + { + "endpoint": { + "address": { + "socketAddress": { + "address": "10.20.1.2", + "portValue": 8080 + } + } + }, + "healthStatus": "HEALTHY", + "loadBalancingWeight": 1 + } + ] + } + ] + } + ], + "typeUrl": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", + "nonce": "00000001" +} \ No newline at end of file diff --git a/agent/xds/testdata/endpoints/ingress-with-chain-and-failover-to-cluster-peer.latest.golden b/agent/xds/testdata/endpoints/ingress-with-chain-and-failover-to-cluster-peer.latest.golden new file mode 100644 index 0000000000..c799a5a0cc --- /dev/null +++ b/agent/xds/testdata/endpoints/ingress-with-chain-and-failover-to-cluster-peer.latest.golden @@ -0,0 +1,75 @@ +{ + "versionInfo": "00000001", + "resources": [ + { + "@type": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", + "clusterName": "failover-target~db.default.cluster-01.external.peer1.domain", + "endpoints": [ + { + "lbEndpoints": [ + { + "endpoint": { + "address": { + "socketAddress": { + "address": "10.40.1.1", + "portValue": 8080 + } + } + }, + "healthStatus": "HEALTHY", + "loadBalancingWeight": 1 + }, + { + "endpoint": { + "address": { + "socketAddress": { + "address": "10.40.1.2", + "portValue": 8080 + } + } + }, + "healthStatus": "HEALTHY", + "loadBalancingWeight": 1 + } + ] + } + ] + }, + { + "@type": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", + "clusterName": "failover-target~db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "endpoints": [ + { + "lbEndpoints": [ + { + "endpoint": { + "address": { + "socketAddress": { + "address": "10.10.1.1", + "portValue": 8080 + } + } + }, + "healthStatus": "HEALTHY", + "loadBalancingWeight": 1 + }, + { + "endpoint": { + "address": { + "socketAddress": { + "address": "10.10.1.2", + "portValue": 8080 + } + } + }, + "healthStatus": "HEALTHY", + "loadBalancingWeight": 1 + } + ] + } + ] + } + ], + "typeUrl": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", + "nonce": "00000001" +} \ No newline at end of file diff --git a/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/alpha/base.hcl b/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/alpha/base.hcl new file mode 100644 index 0000000000..f81ab0edd6 --- /dev/null +++ b/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/alpha/base.hcl @@ -0,0 +1,5 @@ +primary_datacenter = "alpha" +log_level = "trace" +peering { + enabled = true +} diff --git a/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/alpha/config_entries.hcl b/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/alpha/config_entries.hcl new file mode 100644 index 0000000000..64d0117020 --- /dev/null +++ b/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/alpha/config_entries.hcl @@ -0,0 +1,26 @@ +config_entries { + bootstrap = [ + { + kind = "proxy-defaults" + name = "global" + + config { + protocol = "tcp" + } + }, + { + kind = "exported-services" + name = "default" + services = [ + { + name = "s2" + consumers = [ + { + peer_name = "alpha-to-primary" + } + ] + } + ] + } + ] +} diff --git a/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/alpha/service_gateway.hcl b/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/alpha/service_gateway.hcl new file mode 100644 index 0000000000..bcdcb2e8b3 --- /dev/null +++ b/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/alpha/service_gateway.hcl @@ -0,0 +1,5 @@ +services { + name = "mesh-gateway" + kind = "mesh-gateway" + port = 4432 +} diff --git a/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/alpha/service_s1.hcl b/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/alpha/service_s1.hcl new file mode 100644 index 0000000000..e97ec23666 --- /dev/null +++ b/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/alpha/service_s1.hcl @@ -0,0 +1 @@ +# We don't want an s1 service in this peer diff --git a/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/alpha/service_s2.hcl b/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/alpha/service_s2.hcl new file mode 100644 index 0000000000..01d4505c67 --- /dev/null +++ b/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/alpha/service_s2.hcl @@ -0,0 +1,7 @@ +services { + name = "s2" + port = 8181 + connect { + sidecar_service {} + } +} diff --git a/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/alpha/setup.sh b/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/alpha/setup.sh new file mode 100644 index 0000000000..820506ea9b --- /dev/null +++ b/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/alpha/setup.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +set -euo pipefail + +register_services alpha + +gen_envoy_bootstrap s2 19002 alpha +gen_envoy_bootstrap mesh-gateway 19003 alpha true + +wait_for_config_entry proxy-defaults global alpha +wait_for_config_entry exported-services default alpha diff --git a/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/alpha/verify.bats b/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/alpha/verify.bats new file mode 100644 index 0000000000..d2229b2974 --- /dev/null +++ b/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/alpha/verify.bats @@ -0,0 +1,27 @@ +#!/usr/bin/env bats + +load helpers + +@test "s2 proxy is running correct version" { + assert_envoy_version 19002 +} + +@test "s2 proxy admin is up on :19002" { + retry_default curl -f -s localhost:19002/stats -o /dev/null +} + +@test "gateway-alpha proxy admin is up on :19003" { + retry_default curl -f -s localhost:19003/stats -o /dev/null +} + +@test "s2 proxy listener should be up and have right cert" { + assert_proxy_presents_cert_uri localhost:21000 s2 alpha +} + +@test "s2 proxy should be healthy" { + assert_service_has_healthy_instances s2 1 alpha +} + +@test "gateway-alpha should be up and listening" { + retry_long nc -z consul-alpha-client:4432 +} diff --git a/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/bind.hcl b/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/bind.hcl new file mode 100644 index 0000000000..f54393f03e --- /dev/null +++ b/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/bind.hcl @@ -0,0 +1,2 @@ +bind_addr = "0.0.0.0" +advertise_addr = "{{ GetInterfaceIP \"eth0\" }}" \ No newline at end of file diff --git a/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/capture.sh b/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/capture.sh new file mode 100644 index 0000000000..ab90eb425a --- /dev/null +++ b/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/capture.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +snapshot_envoy_admin localhost:19000 s1 primary || true +snapshot_envoy_admin localhost:19001 s2 primary || true +snapshot_envoy_admin localhost:19002 s2 alpha || true +snapshot_envoy_admin localhost:19003 mesh-gateway alpha || true diff --git a/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/primary/base.hcl b/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/primary/base.hcl new file mode 100644 index 0000000000..c1e134d5a2 --- /dev/null +++ b/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/primary/base.hcl @@ -0,0 +1,3 @@ +peering { + enabled = true +} diff --git a/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/primary/config_entries.hcl b/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/primary/config_entries.hcl new file mode 100644 index 0000000000..d9b4ba03b5 --- /dev/null +++ b/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/primary/config_entries.hcl @@ -0,0 +1,21 @@ +config_entries { + bootstrap { + kind = "proxy-defaults" + name = "global" + + config { + protocol = "tcp" + } + } + + bootstrap { + kind = "service-resolver" + name = "s2" + + failover = { + "*" = { + targets = [{peer = "primary-to-alpha"}] + } + } + } +} diff --git a/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/primary/service_s1.hcl b/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/primary/service_s1.hcl new file mode 100644 index 0000000000..842490e63b --- /dev/null +++ b/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/primary/service_s1.hcl @@ -0,0 +1,16 @@ +services { + name = "s1" + port = 8080 + connect { + sidecar_service { + proxy { + upstreams = [ + { + destination_name = "s2" + local_bind_port = 5000 + } + ] + } + } + } +} diff --git a/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/primary/service_s2.hcl b/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/primary/service_s2.hcl new file mode 100644 index 0000000000..01d4505c67 --- /dev/null +++ b/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/primary/service_s2.hcl @@ -0,0 +1,7 @@ +services { + name = "s2" + port = 8181 + connect { + sidecar_service {} + } +} diff --git a/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/primary/setup.sh b/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/primary/setup.sh new file mode 100644 index 0000000000..c65cc31e49 --- /dev/null +++ b/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/primary/setup.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +set -euo pipefail + +register_services primary + +gen_envoy_bootstrap s1 19000 primary +gen_envoy_bootstrap s2 19001 primary + +wait_for_config_entry proxy-defaults global diff --git a/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/primary/verify.bats b/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/primary/verify.bats new file mode 100644 index 0000000000..543459333d --- /dev/null +++ b/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/primary/verify.bats @@ -0,0 +1,87 @@ +#!/usr/bin/env bats + +load helpers + +@test "s1 proxy is running correct version" { + assert_envoy_version 19000 +} + +@test "s1 proxy admin is up on :19000" { + retry_default curl -f -s localhost:19000/stats -o /dev/null +} + +@test "s2 proxy admin is up on :19001" { + retry_default curl -f -s localhost:19001/stats -o /dev/null +} + +@test "gateway-primary proxy admin is up on :19001" { + retry_default curl localhost:19000/config_dump +} + +@test "s1 proxy listener should be up and have right cert" { + assert_proxy_presents_cert_uri localhost:21000 s1 +} + +@test "s2 proxies should be healthy in primary" { + assert_service_has_healthy_instances s2 1 primary +} + +@test "s2 proxies should be healthy in alpha" { + assert_service_has_healthy_instances s2 1 alpha +} + +@test "gateway-alpha should be up and listening" { + retry_long nc -z consul-alpha-client:4432 +} + +@test "peer the two clusters together" { + create_peering primary alpha +} + +@test "s2 alpha proxies should be healthy in primary" { + assert_service_has_healthy_instances s2 1 primary "" "" primary-to-alpha +} + +@test "s1 upstream should have healthy endpoints for s2 in both primary and failover" { + assert_upstream_has_endpoints_in_status 127.0.0.1:19000 failover-target~s2.default.primary.internal HEALTHY 1 + assert_upstream_has_endpoints_in_status 127.0.0.1:19000 failover-target~s2.default.primary-to-alpha.external HEALTHY 1 +} + + +@test "s1 upstream should be able to connect to s2" { + run retry_default curl -s -f -d hello localhost:5000 + [ "$status" -eq 0 ] + [ "$output" = "hello" ] +} + +@test "s1 upstream made 1 connection" { + assert_envoy_metric_at_least 127.0.0.1:19000 "cluster.failover-target~s2.default.primary.internal.*cx_total" 1 +} + +@test "terminate instance of s2 primary envoy which should trigger failover to s2 alpha when the tcp check fails" { + kill_envoy s2 primary +} + +@test "s2 proxies should be unhealthy in primary" { + assert_service_has_healthy_instances s2 0 primary +} + +@test "s1 upstream should have healthy endpoints for s2 in the failover cluster peer" { + assert_upstream_has_endpoints_in_status 127.0.0.1:19000 failover-target~s2.default.primary.internal UNHEALTHY 1 + assert_upstream_has_endpoints_in_status 127.0.0.1:19000 failover-target~s2.default.primary-to-alpha.external HEALTHY 1 +} + +@test "reset envoy statistics" { + reset_envoy_metrics 127.0.0.1:19000 +} + + +@test "s1 upstream should be able to connect to s2 in the failover cluster peer" { + run retry_default curl -s -f -d hello localhost:5000 + [ "$status" -eq 0 ] + [ "$output" = "hello" ] +} + +@test "s1 upstream made 1 connection to s2 through the cluster peer" { + assert_envoy_metric_at_least 127.0.0.1:19000 "cluster.failover-target~s2.default.primary-to-alpha.external.*cx_total" 1 +} diff --git a/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/vars.sh b/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/vars.sh new file mode 100644 index 0000000000..8e9108a340 --- /dev/null +++ b/test/integration/connect/envoy/case-cfg-resolver-cluster-peering-failover/vars.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +export REQUIRED_SERVICES="s1 s1-sidecar-proxy s2 s2-sidecar-proxy s2-alpha s2-sidecar-proxy-alpha gateway-alpha tcpdump-primary tcpdump-alpha" +export REQUIRE_PEERS=1 From 7345432c3bab5adf7685e1c6c5fdcd50e786ee63 Mon Sep 17 00:00:00 2001 From: Josh Roose <54345520+joshRooz@users.noreply.github.com> Date: Wed, 31 Aug 2022 02:53:18 +1000 Subject: [PATCH 61/93] events compiled to JSON sentence structure (#13717) --- website/content/docs/enterprise/audit-logging.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/content/docs/enterprise/audit-logging.mdx b/website/content/docs/enterprise/audit-logging.mdx index a8d69fae26..93661d49c6 100644 --- a/website/content/docs/enterprise/audit-logging.mdx +++ b/website/content/docs/enterprise/audit-logging.mdx @@ -17,7 +17,7 @@ description: >- With Consul Enterprise v1.8.0+, audit logging can be used to capture a clear and actionable log of authenticated events (both attempted and committed) that Consul -processes via its HTTP API. These events are compiled them into a JSON format for easy export +processes via its HTTP API. These events are then compiled into a JSON format for easy export and contain a timestamp, the operation performed, and the user who initiated the action. Audit logging enables security and compliance teams within an organization to get From e56fd9a45a8a632d6f2f7e836b342531b9d5e267 Mon Sep 17 00:00:00 2001 From: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> Date: Tue, 10 May 2022 21:40:39 -0700 Subject: [PATCH 62/93] docs: Additional feedback from PR #12971 This commit incorporates additional feedback received related to PR #12971. --- website/content/api-docs/snapshot.mdx | 6 +++--- website/content/commands/snapshot/restore.mdx | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/website/content/api-docs/snapshot.mdx b/website/content/api-docs/snapshot.mdx index 81357464b4..2f3c094ab4 100644 --- a/website/content/api-docs/snapshot.mdx +++ b/website/content/api-docs/snapshot.mdx @@ -75,9 +75,9 @@ This endpoint restores a point-in-time snapshot of the Consul server state. Restores involve a potentially dangerous low-level Raft operation that is not designed to handle server failures during a restore. This operation is primarily -intended to be used when recovering from a disaster, restoring into a fresh -cluster of Consul servers running the same version as the cluster from where the -snapshot was taken. +intended to recover from a disaster. It restores your configuration into a fresh +cluster of Consul servers as long as your new cluster runs the same Consul +version as the cluster that originally took the snapshot. | Method | Path | Produces | | :----- | :---------- | ----------------------------- | diff --git a/website/content/commands/snapshot/restore.mdx b/website/content/commands/snapshot/restore.mdx index 2d7ec902ea..8bbe50fe13 100644 --- a/website/content/commands/snapshot/restore.mdx +++ b/website/content/commands/snapshot/restore.mdx @@ -16,9 +16,9 @@ from the given file. Restores involve a potentially dangerous low-level Raft operation that is not designed to handle server failures during a restore. This command is primarily -intended to be used when recovering from a disaster, restoring into a fresh -cluster of Consul servers running the same version as the cluster from where the -snapshot was taken. +intended to recover from a disaster. It restores your configuration into a fresh +cluster of Consul servers as long as your new cluster runs the same Consul +version as the cluster that originally took the snapshot. The table below shows this command's [required ACLs](/api#authentication). Configuration of [blocking queries](/api-docs/features/blocking) and [agent caching](/api-docs/features/caching) From 3c82d36a23968c3216bccc319521ebc19bc3e344 Mon Sep 17 00:00:00 2001 From: Mike Morris Date: Tue, 30 Aug 2022 15:44:06 -0400 Subject: [PATCH 63/93] ci: update backport-assistant to pick merge commit (#14408) --- .github/workflows/backport-assistant.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/backport-assistant.yml b/.github/workflows/backport-assistant.yml index f6738815b9..b68e41e612 100644 --- a/.github/workflows/backport-assistant.yml +++ b/.github/workflows/backport-assistant.yml @@ -16,7 +16,7 @@ jobs: backport: if: github.event.pull_request.merged runs-on: ubuntu-latest - container: hashicorpdev/backport-assistant:0.2.3 + container: hashicorpdev/backport-assistant:0.2.5 steps: - name: Run Backport Assistant for stable-website run: | @@ -24,6 +24,7 @@ jobs: env: BACKPORT_LABEL_REGEXP: "type/docs-(?Pcherrypick)" BACKPORT_TARGET_TEMPLATE: "stable-website" + BACKPORT_MERGE_COMMIT: true GITHUB_TOKEN: ${{ secrets.ELEVATED_GITHUB_TOKEN }} - name: Backport changes to latest release branch run: | From 9e726e769186bfac5dfd8df374baebdeefdc8265 Mon Sep 17 00:00:00 2001 From: David Yu Date: Tue, 30 Aug 2022 15:17:35 -0700 Subject: [PATCH 64/93] docs: re-organize service and node lookups for Consul Enterprise (#14389) * docs: re-organize service and node lookups for Consul Enterprise Co-authored-by: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> Co-authored-by: Jared Kirschner <85913323+jkirschner-hashicorp@users.noreply.github.com> --- website/content/docs/discovery/dns.mdx | 85 ++++++++++++------- .../docs/enterprise/admin-partitions.mdx | 2 +- 2 files changed, 54 insertions(+), 33 deletions(-) diff --git a/website/content/docs/discovery/dns.mdx b/website/content/docs/discovery/dns.mdx index b50e8deeeb..5643068ba3 100644 --- a/website/content/docs/discovery/dns.mdx +++ b/website/content/docs/discovery/dns.mdx @@ -96,6 +96,23 @@ pairs according to [RFC1464](https://www.ietf.org/rfc/rfc1464.txt). Alternatively, the TXT record will only include the node's metadata value when the node's metadata key starts with `rfc1035-`. + +### Node Lookups for Consul Enterprise + +Consul nodes exist at the admin partition level within a datacenter. +By default, the partition and datacenter used in a [node lookup](#node-lookups) are +the partition and datacenter of the Consul agent that received the DNS query. + +Use the following query format to specify a partition for a node lookup: +```text +[.].node..ap..dc. +``` + +Consul server agents are in the `default` partition. +If DNS queries are addressed to Consul server agents, +node lookups to non-`default` partitions must explicitly specify +the partition of the target node. + ## Service Lookups A service lookup is used to query for service providers. Service queries support @@ -334,6 +351,28 @@ $ echo -n "20010db800010002cafe000000001337" | perl -ne 'printf join(":", unpack +### Service Lookups for Consul Enterprise + +By default, all service lookups use the `default` namespace +within the partition and datacenter of the Consul agent that received the DNS query. + +Use the following query format to specify a namespace, partition, and/or datacenter +for all service lookup types except `.query`, +including `.service`, `.connect`, `.virtual`, and `.ingress`. +At least two of those three fields (`namespace`, `partition`, `datacenter`) +must be specified. +```text +[.].service..ns..ap..dc. +``` + +Consul server agents are in the `default` partition. +If DNS queries are addressed to Consul server agents, +service lookups to non-`default` partitions must explicitly specify +the partition of the target service. + +To lookup services imported from a cluster peer, +use a [service virtual IP lookups for Consul Enterprise](#service-virtual-ip-lookups-for-consul-enterprise) instead. + ### Prepared Query Lookups The format of a prepared query lookup is: @@ -398,7 +437,21 @@ of a service imported from that peer. The virtual IP is also added to the service's [Tagged Addresses](/docs/discovery/services#tagged-addresses) under the `consul-virtual` tag. + +#### Service Virtual IP Lookups for Consul Enterprise +By default, a service virtual IP lookup uses the `default` namespace +within the partition and datacenter of the Consul agent that received the DNS query. + +To lookup services imported from a cluster peered partition or open-source datacenter, +specify the namespace and peer name in the lookup: +```text +.virtual[.].. +``` + +To lookup services not imported from a cluster peer, +refer to [service lookups for Consul Enterprise](#service-lookups-for-consul-enterprise) instead. + ### Ingress Service Lookups To find ingress-enabled services: @@ -480,38 +533,6 @@ using the [`advertise-wan`](/docs/agent/config/cli-flags#_advertise-wan) and [`translate_wan_addrs`](/docs/agent/config/config-files#translate_wan_addrs) configuration options. -## Namespaced/Partitioned Services and Nodes - -Consul Enterprise supports resolving namespaced and partitioned services via DNS. -The DNS server in Consul Enterprise can resolve services assigned to namespaces and partitions. -The DNS server can also resolve nodes assigned to partitions. -To maintain backwards compatibility existing queries can be used and these will -resolve services within the `default` namespace and partition. However, for resolving -services from other namespaces or partitions the following form can be used: - -```text -[.].service..ns..ap..dc. -``` - -This sequence is the canonical naming convention of a Consul Enterprise service. At least two of the following -fields must be present: -* `namespace` -* `partition` -* `datacenter` - -For imported lookups, only the namespace and peer need to be specified as the partition can be inferred from the peering: - -```text -.virtual[.].. -``` - -For node lookups, only the partition and datacenter need to be specified as nodes cannot be -namespaced. - -```text -[.].node..ap..dc. -``` - ## DNS with ACLs In order to use the DNS interface when diff --git a/website/content/docs/enterprise/admin-partitions.mdx b/website/content/docs/enterprise/admin-partitions.mdx index 089aac51db..da33eff193 100644 --- a/website/content/docs/enterprise/admin-partitions.mdx +++ b/website/content/docs/enterprise/admin-partitions.mdx @@ -58,7 +58,7 @@ The partition in which [`proxy-defaults`](/docs/connect/config-entries/proxy-def ### Cross-partition Networking -You can configure services to be discoverable by downstream services in any partition within the datacenter. Specify the upstream services that you want to be available for discovery by configuring the `exported-services` configuration entry in the partition where the services are registered. Refer to the [`exported-services` documentation](/docs/connect/config-entries/exported-services) for details. Additionally, the `upstreams` configuration for proxies in the source partition must specify the name of the destination partition so that listeners can be created. Refer to the [Upstream Configuration Reference](/docs/connect/registration/service-registration#upstream-configuration-reference) for additional information. +You can configure services to be discoverable by downstream services in any partition within the datacenter. Specify the upstream services that you want to be available for discovery by configuring the `exported-services` configuration entry in the partition where the services are registered. Refer to the [`exported-services` documentation](/docs/connect/config-entries/exported-services) for details. Additionally, the requests made by dowstream applications must have the correct DNS name for the Virtual IP Service lookup to occur. Service Virtual IP lookups allow for communications across Admin Partitions when using Transparent Proxy. Refer to the [Service Virtual IP Lookups for Consul Enterprise](/docs/discovery/dns#service-virtual-ip-lookups-for-consul-enterprise) for additional information. ## Requirements From 436bc8f3e3255997a3d37ddee57a006f8b577968 Mon Sep 17 00:00:00 2001 From: Thomas Kula Date: Sat, 28 May 2022 15:22:01 -0400 Subject: [PATCH 65/93] Typo fix in service-splitter.mdx --- .../content/docs/connect/config-entries/service-splitter.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/content/docs/connect/config-entries/service-splitter.mdx b/website/content/docs/connect/config-entries/service-splitter.mdx index bc5d709ce5..609cf818e6 100644 --- a/website/content/docs/connect/config-entries/service-splitter.mdx +++ b/website/content/docs/connect/config-entries/service-splitter.mdx @@ -302,7 +302,7 @@ spec: name: 'weight', type: 'float32: 0', description: - 'A value between 0 and 100 reflecting what portion of traffic should be directed to this split. The smallest representable eight is 1/10000 or .01%', + 'A value between 0 and 100 reflecting what portion of traffic should be directed to this split. The smallest representable weight is 1/10000 or .01%', }, { name: 'Service', From e12a13d2774ab30160013dafd72a5262bb49a71a Mon Sep 17 00:00:00 2001 From: "Chris S. Kim" Date: Wed, 31 Aug 2022 12:11:15 -0400 Subject: [PATCH 66/93] Fix code example --- .../docs/connect/cluster-peering/create-manage-peering.mdx | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/website/content/docs/connect/cluster-peering/create-manage-peering.mdx b/website/content/docs/connect/cluster-peering/create-manage-peering.mdx index 009c60f409..ee0a69a945 100644 --- a/website/content/docs/connect/cluster-peering/create-manage-peering.mdx +++ b/website/content/docs/connect/cluster-peering/create-manage-peering.mdx @@ -108,7 +108,7 @@ First, create a configuration entry and specify the `Kind` as `"exported-service ```hcl Kind = "exported-services" - +Name = "default" Services = [ { ## The name and namespace of the service to export. @@ -120,10 +120,11 @@ Services = [ { ## The peer name to reference in config is the one set ## during the peering process. - Peer = "cluster-02" + PeerName = "cluster-02" } - } ] + } +] ``` From a88ca9518db7630b1b3974edfd115b304277c678 Mon Sep 17 00:00:00 2001 From: Jared Kirschner <85913323+jkirschner-hashicorp@users.noreply.github.com> Date: Wed, 31 Aug 2022 13:58:23 -0400 Subject: [PATCH 67/93] docs: node lookups don't support filtering on tag --- website/content/docs/discovery/dns.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/content/docs/discovery/dns.mdx b/website/content/docs/discovery/dns.mdx index 5643068ba3..f052b0e27b 100644 --- a/website/content/docs/discovery/dns.mdx +++ b/website/content/docs/discovery/dns.mdx @@ -105,7 +105,7 @@ the partition and datacenter of the Consul agent that received the DNS query. Use the following query format to specify a partition for a node lookup: ```text -[.].node..ap..dc. +.node..ap..dc. ``` Consul server agents are in the `default` partition. From 113454645dd572047cc9517a9990d66911018627 Mon Sep 17 00:00:00 2001 From: Kyle Havlovitz Date: Wed, 31 Aug 2022 11:20:29 -0700 Subject: [PATCH 68/93] Prune old expired intermediate certs when appending a new one --- agent/consul/leader_connect_ca.go | 22 ++++++++++++++++++++++ agent/consul/leader_connect_ca_test.go | 1 - agent/consul/leader_connect_test.go | 24 ++++++++++++++++++++++++ 3 files changed, 46 insertions(+), 1 deletion(-) diff --git a/agent/consul/leader_connect_ca.go b/agent/consul/leader_connect_ca.go index 5e22681645..d2cd021134 100644 --- a/agent/consul/leader_connect_ca.go +++ b/agent/consul/leader_connect_ca.go @@ -1100,6 +1100,28 @@ func setLeafSigningCert(caRoot *structs.CARoot, pem string) error { caRoot.IntermediateCerts = append(caRoot.IntermediateCerts, pem) caRoot.SigningKeyID = connect.EncodeSigningKeyID(cert.SubjectKeyId) + return pruneExpiredIntermediates(caRoot) +} + +// pruneExpiredIntermediates removes expired intermediate certificates +// from the given CARoot. +func pruneExpiredIntermediates(caRoot *structs.CARoot) error { + var newIntermediates []string + now := time.Now() + for i, intermediatePEM := range caRoot.IntermediateCerts { + cert, err := connect.ParseCert(intermediatePEM) + if err != nil { + return fmt.Errorf("error parsing leaf signing cert: %w", err) + } + + // Only keep the intermediate cert if it's still valid, or if it's the most + // recently added (and thus the active signing cert). + if cert.NotAfter.After(now) || i == len(caRoot.IntermediateCerts) { + newIntermediates = append(newIntermediates, intermediatePEM) + } + } + + caRoot.IntermediateCerts = newIntermediates return nil } diff --git a/agent/consul/leader_connect_ca_test.go b/agent/consul/leader_connect_ca_test.go index 37756eb204..91095be8e6 100644 --- a/agent/consul/leader_connect_ca_test.go +++ b/agent/consul/leader_connect_ca_test.go @@ -435,7 +435,6 @@ func TestCAManager_SignCertificate_WithExpiredCert(t *testing.T) { errorMsg string }{ {"intermediate valid", time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, 2), time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, 2), false, ""}, - {"intermediate expired", time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, 2), time.Now().AddDate(-2, 0, 0), time.Now().AddDate(0, 0, -1), true, "intermediate expired: certificate expired, expiration date"}, {"root expired", time.Now().AddDate(-2, 0, 0), time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, 2), true, "root expired: certificate expired, expiration date"}, // a cert that is not yet valid is ok, assume it will be valid soon enough {"intermediate in the future", time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, 2), time.Now().AddDate(0, 0, 1), time.Now().AddDate(0, 0, 2), false, ""}, diff --git a/agent/consul/leader_connect_test.go b/agent/consul/leader_connect_test.go index d9b3863865..c8b361b03b 100644 --- a/agent/consul/leader_connect_test.go +++ b/agent/consul/leader_connect_test.go @@ -401,6 +401,18 @@ func TestCAManager_RenewIntermediate_Vault_Primary(t *testing.T) { err = msgpackrpc.CallWithCodec(codec, "ConnectCA.Sign", &req, &cert) require.NoError(t, err) verifyLeafCert(t, activeRoot, cert.CertPEM) + + // Wait for the primary's old intermediate to be pruned after expiring. + oldIntermediate := activeRoot.IntermediateCerts[0] + retry.Run(t, func(r *retry.R) { + store := s1.caManager.delegate.State() + _, storedRoot, err := store.CARootActive(nil) + r.Check(err) + + if storedRoot.IntermediateCerts[0] == oldIntermediate { + r.Fatal("old intermediate should be gone") + } + }) } func patchIntermediateCertRenewInterval(t *testing.T) { @@ -516,6 +528,18 @@ func TestCAManager_RenewIntermediate_Secondary(t *testing.T) { err = msgpackrpc.CallWithCodec(codec, "ConnectCA.Sign", &req, &cert) require.NoError(t, err) verifyLeafCert(t, activeRoot, cert.CertPEM) + + // Wait for dc2's old intermediate to be pruned after expiring. + oldIntermediate := activeRoot.IntermediateCerts[0] + retry.Run(t, func(r *retry.R) { + store := s2.caManager.delegate.State() + _, storedRoot, err := store.CARootActive(nil) + r.Check(err) + + if storedRoot.IntermediateCerts[0] == oldIntermediate { + r.Fatal("old intermediate should be gone") + } + }) } func TestConnectCA_ConfigurationSet_RootRotation_Secondary(t *testing.T) { From 7150ccad85e941e4ce56de1760526bc24cba0e0e Mon Sep 17 00:00:00 2001 From: Kyle Havlovitz Date: Wed, 31 Aug 2022 11:43:21 -0700 Subject: [PATCH 69/93] Add changelog note --- .changelog/14429.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/14429.txt diff --git a/.changelog/14429.txt b/.changelog/14429.txt new file mode 100644 index 0000000000..4387d1ed40 --- /dev/null +++ b/.changelog/14429.txt @@ -0,0 +1,3 @@ +```release-note:bug +connect: Fixed an issue where intermediate certificates could build up in the root CA because they were never being pruned after expiring. +`` \ No newline at end of file From a80e0bcd007b1b8549cefc5f33f1dfa4a92823b4 Mon Sep 17 00:00:00 2001 From: malizz Date: Wed, 31 Aug 2022 13:03:38 -0700 Subject: [PATCH 70/93] validate args before deleting proxy defaults (#14290) * validate args before deleting proxy defaults * add changelog * validate name when normalizing proxy defaults * add test for proxyConfigEntry * add comments --- .changelog/14290.txt | 3 +++ agent/structs/config_entry.go | 10 +++++++++- agent/structs/config_entry_test.go | 21 +++++++++++++++++++++ 3 files changed, 33 insertions(+), 1 deletion(-) create mode 100644 .changelog/14290.txt diff --git a/.changelog/14290.txt b/.changelog/14290.txt new file mode 100644 index 0000000000..719bd67b3b --- /dev/null +++ b/.changelog/14290.txt @@ -0,0 +1,3 @@ +```release-note:bugfix +envoy: validate name before deleting proxy default configurations. +``` \ No newline at end of file diff --git a/agent/structs/config_entry.go b/agent/structs/config_entry.go index 8b3b0a8d24..88c523a155 100644 --- a/agent/structs/config_entry.go +++ b/agent/structs/config_entry.go @@ -3,12 +3,13 @@ package structs import ( "errors" "fmt" - "github.com/miekg/dns" "net" "strconv" "strings" "time" + "github.com/miekg/dns" + "github.com/hashicorp/go-multierror" "github.com/mitchellh/hashstructure" "github.com/mitchellh/mapstructure" @@ -362,6 +363,13 @@ func (e *ProxyConfigEntry) Normalize() error { } e.Kind = ProxyDefaults + + // proxy default config only accepts global configs + // this check is replicated in normalize() and validate(), + // since validate is not called by all the endpoints (e.g., delete) + if e.Name != "" && e.Name != ProxyConfigGlobal { + return fmt.Errorf("invalid name (%q), only %q is supported", e.Name, ProxyConfigGlobal) + } e.Name = ProxyConfigGlobal e.EnterpriseMeta.Normalize() diff --git a/agent/structs/config_entry_test.go b/agent/structs/config_entry_test.go index e462f6aa74..a9e113f21e 100644 --- a/agent/structs/config_entry_test.go +++ b/agent/structs/config_entry_test.go @@ -2944,6 +2944,27 @@ func TestParseUpstreamConfig(t *testing.T) { } } +func TestProxyConfigEntry(t *testing.T) { + cases := map[string]configEntryTestcase{ + "proxy config name provided is not global": { + entry: &ProxyConfigEntry{ + Name: "foo", + }, + normalizeErr: `invalid name ("foo"), only "global" is supported`, + }, + "proxy config has no name": { + entry: &ProxyConfigEntry{ + Name: "", + }, + expected: &ProxyConfigEntry{ + Name: ProxyConfigGlobal, + Kind: ProxyDefaults, + }, + }, + } + testConfigEntryNormalizeAndValidate(t, cases) +} + func requireContainsLower(t *testing.T, haystack, needle string) { t.Helper() require.Contains(t, strings.ToLower(haystack), strings.ToLower(needle)) From 095764a482afb9bfa90f619a2937c7d93112dcd4 Mon Sep 17 00:00:00 2001 From: Luke Kysow <1034429+lkysow@users.noreply.github.com> Date: Wed, 31 Aug 2022 13:06:35 -0700 Subject: [PATCH 71/93] Suppress "unbound variable" error. (#14424) Without this change, you'd see this error: ``` ./run-tests.sh: line 49: LAMBDA_TESTS_ENABLED: unbound variable ./run-tests.sh: line 49: LAMBDA_TESTS_ENABLED: unbound variable ``` --- test/integration/connect/envoy/run-tests.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/integration/connect/envoy/run-tests.sh b/test/integration/connect/envoy/run-tests.sh index f0e6b165cb..1de8ed3e50 100755 --- a/test/integration/connect/envoy/run-tests.sh +++ b/test/integration/connect/envoy/run-tests.sh @@ -46,7 +46,8 @@ function network_snippet { } function aws_snippet { - if [[ ! -z "$LAMBDA_TESTS_ENABLED" ]]; then + LAMBDA_TESTS_ENABLED=${LAMBDA_TESTS_ENABLED:-false} + if [ "$LAMBDA_TESTS_ENABLED" != false ]; then local snippet="" # The Lambda integration cases assume that a Lambda function exists in $AWS_REGION with an ARN of $AWS_LAMBDA_ARN. From f476a80aa9408194d939f550a98fb1ef8efc7985 Mon Sep 17 00:00:00 2001 From: Jorge Marey Date: Wed, 31 Aug 2022 23:14:25 +0200 Subject: [PATCH 72/93] Fix typo on documentation --- website/content/docs/connect/proxies/envoy.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/content/docs/connect/proxies/envoy.mdx b/website/content/docs/connect/proxies/envoy.mdx index 020a0510f0..812adff17b 100644 --- a/website/content/docs/connect/proxies/envoy.mdx +++ b/website/content/docs/connect/proxies/envoy.mdx @@ -761,7 +761,7 @@ definition](/docs/connect/registration/service-registration) or - `envoy_listener_tracing_json` - Specifies a [tracing configuration](https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto#envoy-v3-api-msg-extensions-filters-network-http-connection-manager-v3-httpconnectionmanager-tracing) - to be inserter in the public and upstreams listeners of the proxy. + to be inserted in the proxy's public and upstreams listeners. From f2b147e57562f613edb59a6a004ac3cf9361b251 Mon Sep 17 00:00:00 2001 From: "Chris S. Kim" Date: Thu, 1 Sep 2022 10:32:59 -0400 Subject: [PATCH 73/93] Add Internal.ServiceDump support for querying by PeerName --- agent/consul/internal_endpoint.go | 113 ++++++++++++++++++------------ agent/ui_endpoint.go | 4 +- 2 files changed, 71 insertions(+), 46 deletions(-) diff --git a/agent/consul/internal_endpoint.go b/agent/consul/internal_endpoint.go index 28d7f365e0..534513c8ab 100644 --- a/agent/consul/internal_endpoint.go +++ b/agent/consul/internal_endpoint.go @@ -153,64 +153,87 @@ func (m *Internal) ServiceDump(args *structs.ServiceDumpRequest, reply *structs. &args.QueryOptions, &reply.QueryMeta, func(ws memdb.WatchSet, state *state.Store) error { - // we don't support calling this endpoint for a specific peer - if args.PeerName != "" { - return fmt.Errorf("this endpoint does not support specifying a peer: %q", args.PeerName) - } - // this maxIndex will be the max of the ServiceDump calls and the PeeringList call var maxIndex uint64 - // get a local dump for services - index, nodes, err := state.ServiceDump(ws, args.ServiceKind, args.UseServiceKind, &args.EnterpriseMeta, structs.DefaultPeerKeyword) - if err != nil { - return fmt.Errorf("could not get a service dump for local nodes: %w", err) - } - - if index > maxIndex { - maxIndex = index - } - reply.Nodes = nodes - - // get a list of all peerings - index, listedPeerings, err := state.PeeringList(ws, args.EnterpriseMeta) - if err != nil { - return fmt.Errorf("could not list peers for service dump %w", err) - } - - if index > maxIndex { - maxIndex = index - } - - for _, p := range listedPeerings { - index, importedNodes, err := state.ServiceDump(ws, args.ServiceKind, args.UseServiceKind, &args.EnterpriseMeta, p.Name) + // If PeerName is not empty, we return only the imported services from that peer + if args.PeerName != "" { + // get a local dump for services + index, nodes, err := state.ServiceDump(ws, + args.ServiceKind, + args.UseServiceKind, + // Note we fetch imported services with wildcard namespace because imported services' namespaces + // are in a different locality; regardless of our local namespace, we return all imported services + // of the local partition. + args.EnterpriseMeta.WithWildcardNamespace(), + args.PeerName) if err != nil { - return fmt.Errorf("could not get a service dump for peer %q: %w", p.Name, err) + return fmt.Errorf("could not get a service dump for peer %q: %w", args.PeerName, err) } if index > maxIndex { maxIndex = index } - reply.ImportedNodes = append(reply.ImportedNodes, importedNodes...) - } + reply.Index = maxIndex + reply.ImportedNodes = nodes - // Get, store, and filter gateway services - idx, gatewayServices, err := state.DumpGatewayServices(ws) - if err != nil { - return err - } - reply.Gateways = gatewayServices + } else { + // otherwise return both local and all imported services - if idx > maxIndex { - maxIndex = idx - } - reply.Index = maxIndex + // get a local dump for services + index, nodes, err := state.ServiceDump(ws, args.ServiceKind, args.UseServiceKind, &args.EnterpriseMeta, structs.DefaultPeerKeyword) + if err != nil { + return fmt.Errorf("could not get a service dump for local nodes: %w", err) + } - raw, err := filter.Execute(reply.Nodes) - if err != nil { - return fmt.Errorf("could not filter local service dump: %w", err) + if index > maxIndex { + maxIndex = index + } + reply.Nodes = nodes + + // get a list of all peerings + index, listedPeerings, err := state.PeeringList(ws, args.EnterpriseMeta) + if err != nil { + return fmt.Errorf("could not list peers for service dump %w", err) + } + + if index > maxIndex { + maxIndex = index + } + + for _, p := range listedPeerings { + // Note we fetch imported services with wildcard namespace because imported services' namespaces + // are in a different locality; regardless of our local namespace, we return all imported services + // of the local partition. + index, importedNodes, err := state.ServiceDump(ws, args.ServiceKind, args.UseServiceKind, args.EnterpriseMeta.WithWildcardNamespace(), p.Name) + if err != nil { + return fmt.Errorf("could not get a service dump for peer %q: %w", p.Name, err) + } + + if index > maxIndex { + maxIndex = index + } + reply.ImportedNodes = append(reply.ImportedNodes, importedNodes...) + } + + // Get, store, and filter gateway services + idx, gatewayServices, err := state.DumpGatewayServices(ws) + if err != nil { + return err + } + reply.Gateways = gatewayServices + + if idx > maxIndex { + maxIndex = idx + } + reply.Index = maxIndex + + raw, err := filter.Execute(reply.Nodes) + if err != nil { + return fmt.Errorf("could not filter local service dump: %w", err) + } + reply.Nodes = raw.(structs.CheckServiceNodes) } - reply.Nodes = raw.(structs.CheckServiceNodes) importedRaw, err := filter.Execute(reply.ImportedNodes) if err != nil { diff --git a/agent/ui_endpoint.go b/agent/ui_endpoint.go index 2f74d8e59e..df6f359de5 100644 --- a/agent/ui_endpoint.go +++ b/agent/ui_endpoint.go @@ -211,7 +211,9 @@ func (s *HTTPHandlers) UIServices(resp http.ResponseWriter, req *http.Request) ( if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done { return nil, nil } - + if peer := req.URL.Query().Get("peer"); peer != "" { + args.PeerName = peer + } if err := s.parseEntMeta(req, &args.EnterpriseMeta); err != nil { return nil, err } From 39c5b91272878c7352999455d83df59d1fa9fe2f Mon Sep 17 00:00:00 2001 From: Michael Klein Date: Thu, 1 Sep 2022 17:37:37 +0200 Subject: [PATCH 74/93] ui: chore upgrade to ember-qunit v5 (#14430) * Refactor remaining `moduleFor`-tests `moduleFor*` will be removed from ember-qunit v5 * Upgrade ember-qunit to v5 * Update how we use ember-sinon-qunit With ember-qunit v5 we need to use ember-sinon-qunit differently. * Fix submit-blank test We can't click on disabled buttons with new test-helpers. We need to adapt the test accordingly. * Make sure we await fill-in with form yaml step We need to await `fill-in`. This changes the reducer function in the step to create a proper await chain. * Fix show-routing test We need to await a tick before visiting again. * Remove redundant `wait one tick`-step * remove unneeded "next Tick" promise from form step * Increase timeout show-routing feature * Comment on pause hack for show-routing test --- ui/packages/consul-ui/package.json | 8 +- .../dc/services/show-routing.feature | 7 + .../tests/acceptance/submit-blank.feature | 2 +- ui/packages/consul-ui/tests/index.html | 7 + .../components/data-source-test.js | 16 +- .../services/repository/auth-method-test.js | 170 ++++----- .../services/repository/coordinate-test.js | 124 ++++--- .../services/repository/dc-test.js | 57 +-- .../repository/discovery-chain-test.js | 76 ++-- .../services/repository/kv-test.js | 171 +++++---- .../services/repository/node-test.js | 113 +++--- .../services/repository/policy-test.js | 179 ++++----- .../services/repository/role-test.js | 185 +++++----- .../services/repository/service-test.js | 129 +++---- .../services/repository/session-test.js | 181 ++++----- .../services/repository/token-test.js | 349 +++++++++--------- .../services/repository/topology-test.js | 78 ++-- .../integration/services/routlet-test.js | 63 ++-- .../utils/dom/event-source/callable-test.js | 12 +- .../tests/steps/interactions/form.js | 15 +- ui/packages/consul-ui/tests/test-helper.js | 7 + .../tests/unit/adapters/application-test.js | 3 +- .../unit/mixins/with-blocking-actions-test.js | 14 +- .../consul-ui/tests/unit/routes/dc-test.js | 3 +- .../unit/serializers/application-test.js | 3 +- .../tests/unit/serializers/kv-test.js | 6 +- .../consul-ui/tests/unit/utils/ascend-test.js | 3 +- .../consul-ui/tests/unit/utils/atob-test.js | 3 +- .../consul-ui/tests/unit/utils/btoa-test.js | 3 +- .../tests/unit/utils/dom/closest-test.js | 6 +- .../unit/utils/dom/create-listeners-test.js | 22 +- .../utils/dom/event-source/blocking-test.js | 8 +- .../unit/utils/dom/event-source/cache-test.js | 20 +- .../utils/dom/event-source/callable-test.js | 10 +- .../utils/dom/event-source/openable-test.js | 6 +- .../tests/unit/utils/http/create-url-test.js | 3 +- .../tests/unit/utils/isFolder-test.js | 3 +- .../tests/unit/utils/keyToArray-test.js | 3 +- .../tests/unit/utils/left-trim-test.js | 3 +- .../tests/unit/utils/promisedTimeout-test.js | 3 +- .../tests/unit/utils/right-trim-test.js | 3 +- .../tests/unit/utils/routing/walk-test.js | 6 +- .../tests/unit/utils/ucfirst-test.js | 3 +- ui/yarn.lock | 237 +++++++----- 44 files changed, 1214 insertions(+), 1109 deletions(-) diff --git a/ui/packages/consul-ui/package.json b/ui/packages/consul-ui/package.json index 7960af1a75..64e731b32c 100644 --- a/ui/packages/consul-ui/package.json +++ b/ui/packages/consul-ui/package.json @@ -60,6 +60,7 @@ "@docfy/ember": "^0.4.1", "@ember/optional-features": "^1.3.0", "@ember/render-modifiers": "^1.0.2", + "@ember/test-helpers": "^2.1.4", "@glimmer/component": "^1.0.0", "@glimmer/tracking": "^1.0.0", "@hashicorp/ember-cli-api-double": "^3.1.0", @@ -135,7 +136,7 @@ "ember-page-title": "^6.2.1", "ember-power-select": "^4.0.5", "ember-power-select-with-create": "^0.8.0", - "ember-qunit": "^4.6.0", + "ember-qunit": "^5.1.1", "ember-ref-modifier": "^1.0.0", "ember-render-helpers": "^0.2.0", "ember-resolver": "^8.0.0", @@ -166,7 +167,7 @@ "pretender": "^3.2.0", "prettier": "^1.10.2", "pretty-ms": "^7.0.1", - "qunit-dom": "^1.0.0", + "qunit-dom": "^1.6.0", "react-is": "^17.0.1", "refractor": "^3.5.0", "remark-autolink-headings": "^6.0.1", @@ -177,7 +178,8 @@ "tippy.js": "^6.2.7", "torii": "^0.10.1", "unist-util-visit": "^2.0.3", - "wayfarer": "^7.0.1" + "wayfarer": "^7.0.1", + "qunit": "^2.13.0" }, "engines": { "node": ">=10 <=14" diff --git a/ui/packages/consul-ui/tests/acceptance/dc/services/show-routing.feature b/ui/packages/consul-ui/tests/acceptance/dc/services/show-routing.feature index 8befb868f8..3cbf392f73 100644 --- a/ui/packages/consul-ui/tests/acceptance/dc/services/show-routing.feature +++ b/ui/packages/consul-ui/tests/acceptance/dc/services/show-routing.feature @@ -52,6 +52,13 @@ Feature: dc / services / show-routing: Show Routing for Service service: service-1 --- And I see routing on the tabs + # something weird is going on with this test + # without waiting we issue a url reload that + # will make the test timeout. + # waiting will "fix" this - we should look into + # the underlying reason for this soon. This is + # only a quick-fix to land ember-qunit v5. + And pause for 1000 And I visit the service page for yaml --- dc: dc1 diff --git a/ui/packages/consul-ui/tests/acceptance/submit-blank.feature b/ui/packages/consul-ui/tests/acceptance/submit-blank.feature index 9062810120..f18cdbbc31 100644 --- a/ui/packages/consul-ui/tests/acceptance/submit-blank.feature +++ b/ui/packages/consul-ui/tests/acceptance/submit-blank.feature @@ -10,7 +10,7 @@ Feature: submit-blank dc: datacenter --- Then the url should be /datacenter/[Slug]/create - And I submit + Then I don't see submitIsEnabled Then the url should be /datacenter/[Slug]/create Where: -------------------------- diff --git a/ui/packages/consul-ui/tests/index.html b/ui/packages/consul-ui/tests/index.html index f841d34b91..4ee572ab4b 100644 --- a/ui/packages/consul-ui/tests/index.html +++ b/ui/packages/consul-ui/tests/index.html @@ -16,6 +16,13 @@ {{content-for "body"}} {{content-for "test-body"}} +
+
+
+
+
+
+ {{content-for "body-footer"}} diff --git a/ui/packages/consul-ui/tests/integration/components/data-source-test.js b/ui/packages/consul-ui/tests/integration/components/data-source-test.js index 10702240a5..6c343d6177 100644 --- a/ui/packages/consul-ui/tests/integration/components/data-source-test.js +++ b/ui/packages/consul-ui/tests/integration/components/data-source-test.js @@ -1,13 +1,13 @@ -import { module } from 'qunit'; +import { module, test } from 'qunit'; import { setupRenderingTest } from 'ember-qunit'; import { clearRender, render, waitUntil } from '@ember/test-helpers'; import hbs from 'htmlbars-inline-precompile'; -import test from 'ember-sinon-qunit/test-support/test'; import Service, { inject as service } from '@ember/service'; import DataSourceComponent from 'consul-ui/components/data-source/index'; import { BlockingEventSource as RealEventSource } from 'consul-ui/utils/dom/event-source'; +import sinon from 'sinon'; const createFakeBlockingEventSource = function() { const EventSource = function(cb) { @@ -39,10 +39,10 @@ module('Integration | Component | data-source', function(hooks) { // Set any properties with this.set('myProperty', 'value'); // Handle any actions with this.set('myAction', function(val) { ... }); assert.expect(9); - const close = this.stub(); - const open = this.stub(); - const addEventListener = this.stub(); - const removeEventListener = this.stub(); + const close = sinon.stub(); + const open = sinon.stub(); + const addEventListener = sinon.stub(); + const removeEventListener = sinon.stub(); let count = 0; const fakeService = class extends Service { close = close; @@ -98,8 +98,8 @@ module('Integration | Component | data-source', function(hooks) { }); test('error actions are triggered when errors are dispatched', async function(assert) { const source = new RealEventSource(); - const error = this.stub(); - const close = this.stub(); + const error = sinon.stub(); + const close = sinon.stub(); const fakeService = class extends Service { close = close; open(uri, obj) { diff --git a/ui/packages/consul-ui/tests/integration/services/repository/auth-method-test.js b/ui/packages/consul-ui/tests/integration/services/repository/auth-method-test.js index ce9ab231c4..93de5150ff 100644 --- a/ui/packages/consul-ui/tests/integration/services/repository/auth-method-test.js +++ b/ui/packages/consul-ui/tests/integration/services/repository/auth-method-test.js @@ -1,92 +1,94 @@ -import { moduleFor, test } from 'ember-qunit'; +import { setupTest } from 'ember-qunit'; import repo from 'consul-ui/tests/helpers/repo'; -import { skip } from 'qunit'; +import { module, skip, test } from 'qunit'; -const NAME = 'auth-method'; -moduleFor(`service:repository/${NAME}`, `Integration | Service | ${NAME}`, { - // Specify the other units that are required for this test. - integration: true, -}); -const dc = 'dc-1'; -const id = 'auth-method-name'; -const undefinedNspace = 'default'; -const undefinedPartition = 'default'; -const partition = 'default'; -[undefinedNspace, 'team-1', undefined].forEach(nspace => { - test(`findAllByDatacenter returns the correct data for list endpoint when nspace is ${nspace}`, function(assert) { - return repo( - 'auth-method', - 'findAllByDatacenter', - this.subject(), - function retrieveStub(stub) { - return stub( - `/v1/acl/auth-methods?dc=${dc}${typeof nspace !== 'undefined' ? `&ns=${nspace}` : ``}${ - typeof partition !== 'undefined' ? `&partition=${partition}` : `` - }`, - { - CONSUL_AUTH_METHOD_COUNT: '3', - } - ); - }, - function performTest(service) { - return service.findAllByDatacenter({ - dc: dc, - nspace: nspace || undefinedNspace, - partition: partition || undefinedPartition, - }); - }, - function performAssertion(actual, expected) { - assert.deepEqual( - actual, - expected(function(payload) { - return payload.map(function(item) { +module(`Integration | Service | auth-method`, function(hooks) { + setupTest(hooks); + const dc = 'dc-1'; + const id = 'auth-method-name'; + const undefinedNspace = 'default'; + const undefinedPartition = 'default'; + const partition = 'default'; + [undefinedNspace, 'team-1', undefined].forEach(nspace => { + test(`findAllByDatacenter returns the correct data for list endpoint when nspace is ${nspace}`, function(assert) { + const subject = this.owner.lookup('service:repository/auth-method'); + + return repo( + 'auth-method', + 'findAllByDatacenter', + subject, + function retrieveStub(stub) { + return stub( + `/v1/acl/auth-methods?dc=${dc}${typeof nspace !== 'undefined' ? `&ns=${nspace}` : ``}${ + typeof partition !== 'undefined' ? `&partition=${partition}` : `` + }`, + { + CONSUL_AUTH_METHOD_COUNT: '3', + } + ); + }, + function performTest(service) { + return service.findAllByDatacenter({ + dc: dc, + nspace: nspace || undefinedNspace, + partition: partition || undefinedPartition, + }); + }, + function performAssertion(actual, expected) { + assert.deepEqual( + actual, + expected(function(payload) { + return payload.map(function(item) { + return Object.assign({}, item, { + Datacenter: dc, + Namespace: item.Namespace || undefinedNspace, + Partition: item.Partition || undefinedPartition, + uid: `["${item.Partition || undefinedPartition}","${item.Namespace || + undefinedNspace}","${dc}","${item.Name}"]`, + }); + }); + }) + ); + } + ); + }); + skip(`findBySlug returns the correct data for item endpoint when the nspace is ${nspace}`, function(assert) { + const subject = this.owner.lookup('service:repository/auth-method'); + + return repo( + 'AuthMethod', + 'findBySlug', + subject, + function retrieveStub(stub) { + return stub( + `/v1/acl/auth-method/${id}?dc=${dc}${ + typeof nspace !== 'undefined' ? `&ns=${nspace}` : `` + }` + ); + }, + function performTest(service) { + return service.findBySlug(id, dc, nspace || undefinedNspace); + }, + function performAssertion(actual, expected) { + assert.deepEqual( + actual, + expected(function(payload) { + const item = payload; return Object.assign({}, item, { Datacenter: dc, Namespace: item.Namespace || undefinedNspace, - Partition: item.Partition || undefinedPartition, - uid: `["${item.Partition || undefinedPartition}","${item.Namespace || - undefinedNspace}","${dc}","${item.Name}"]`, + uid: `["${item.Namespace || undefinedNspace}","${dc}","${item.Name}"]`, + meta: { + cacheControl: undefined, + cursor: undefined, + dc: dc, + nspace: item.Namespace || undefinedNspace, + }, }); - }); - }) - ); - } - ); - }); - skip(`findBySlug returns the correct data for item endpoint when the nspace is ${nspace}`, function(assert) { - return repo( - 'AuthMethod', - 'findBySlug', - this.subject(), - function retrieveStub(stub) { - return stub( - `/v1/acl/auth-method/${id}?dc=${dc}${ - typeof nspace !== 'undefined' ? `&ns=${nspace}` : `` - }` - ); - }, - function performTest(service) { - return service.findBySlug(id, dc, nspace || undefinedNspace); - }, - function performAssertion(actual, expected) { - assert.deepEqual( - actual, - expected(function(payload) { - const item = payload; - return Object.assign({}, item, { - Datacenter: dc, - Namespace: item.Namespace || undefinedNspace, - uid: `["${item.Namespace || undefinedNspace}","${dc}","${item.Name}"]`, - meta: { - cacheControl: undefined, - cursor: undefined, - dc: dc, - nspace: item.Namespace || undefinedNspace, - }, - }); - }) - ); - } - ); + }) + ); + } + ); + }); }); }); diff --git a/ui/packages/consul-ui/tests/integration/services/repository/coordinate-test.js b/ui/packages/consul-ui/tests/integration/services/repository/coordinate-test.js index de96343c2e..1e19a84ca5 100644 --- a/ui/packages/consul-ui/tests/integration/services/repository/coordinate-test.js +++ b/ui/packages/consul-ui/tests/integration/services/repository/coordinate-test.js @@ -1,68 +1,74 @@ -import { moduleFor, test } from 'ember-qunit'; +import { setupTest } from 'ember-qunit'; +import { module, test } from 'qunit'; import repo from 'consul-ui/tests/helpers/repo'; import { get } from '@ember/object'; -const NAME = 'coordinate'; -moduleFor(`service:repository/${NAME}`, `Integration | Service | ${NAME}`, { - // Specify the other units that are required for this test. - integration: true, -}); const dc = 'dc-1'; const nspace = 'default'; const partition = 'default'; const now = new Date().getTime(); -test('findAllByDatacenter returns the correct data for list endpoint', function(assert) { - get(this.subject(), 'store').serializerFor(NAME).timestamp = function() { - return now; - }; - return repo( - 'Coordinate', - 'findAllByDatacenter', - this.subject(), - function retrieveStub(stub) { - return stub( - `/v1/coordinate/nodes?dc=${dc}${ - typeof partition !== 'undefined' ? `&partition=${partition}` : `` - }`, - { - CONSUL_NODE_COUNT: '100', - } +module(`Integration | Service | coordinate`, function(hooks) { + setupTest(hooks); + + test('findAllByDatacenter returns the correct data for list endpoint', function(assert) { + const subject = this.owner.lookup('service:repository/coordinate'); + + get(subject, 'store').serializerFor('coordinate').timestamp = function() { + return now; + }; + return repo( + 'Coordinate', + 'findAllByDatacenter', + subject, + function retrieveStub(stub) { + return stub( + `/v1/coordinate/nodes?dc=${dc}${ + typeof partition !== 'undefined' ? `&partition=${partition}` : `` + }`, + { + CONSUL_NODE_COUNT: '100', + } + ); + }, + function performTest(service) { + return service.findAllByDatacenter({ dc, partition }); + }, + function performAssertion(actual, expected) { + assert.deepEqual( + actual, + expected(function(payload) { + return payload.map(item => + Object.assign({}, item, { + SyncTime: now, + Datacenter: dc, + Partition: partition, + // TODO: nspace isn't required here, once we've + // refactored out our Serializer this can go + uid: `["${partition}","${nspace}","${dc}","${item.Node}"]`, + }) + ); + }) + ); + } + ); + }); + test('findAllByNode calls findAllByDatacenter with the correct arguments', function(assert) { + assert.expect(3); + const datacenter = 'dc-1'; + const conf = { + cursor: 1, + }; + const service = this.owner.lookup('service:repository/coordinate'); + service.findAllByDatacenter = function(params, configuration) { + assert.equal( + arguments.length, + 2, + 'Expected to be called with the correct number of arguments' ); - }, - function performTest(service) { - return service.findAllByDatacenter({ dc, partition }); - }, - function performAssertion(actual, expected) { - assert.deepEqual( - actual, - expected(function(payload) { - return payload.map(item => - Object.assign({}, item, { - SyncTime: now, - Datacenter: dc, - Partition: partition, - // TODO: nspace isn't required here, once we've - // refactored out our Serializer this can go - uid: `["${partition}","${nspace}","${dc}","${item.Node}"]`, - }) - ); - }) - ); - } - ); -}); -test('findAllByNode calls findAllByDatacenter with the correct arguments', function(assert) { - assert.expect(3); - const datacenter = 'dc-1'; - const conf = { - cursor: 1, - }; - const service = this.subject(); - service.findAllByDatacenter = function(params, configuration) { - assert.equal(arguments.length, 2, 'Expected to be called with the correct number of arguments'); - assert.equal(params.dc, datacenter); - assert.deepEqual(configuration, conf); - return Promise.resolve([]); - }; - return service.findAllByNode({ node: 'node-name', dc: datacenter }, conf); + assert.equal(params.dc, datacenter); + assert.deepEqual(configuration, conf); + return Promise.resolve([]); + }; + return service.findAllByNode({ node: 'node-name', dc: datacenter }, conf); + }); }); diff --git a/ui/packages/consul-ui/tests/integration/services/repository/dc-test.js b/ui/packages/consul-ui/tests/integration/services/repository/dc-test.js index a491c98e0c..6763d54bed 100644 --- a/ui/packages/consul-ui/tests/integration/services/repository/dc-test.js +++ b/ui/packages/consul-ui/tests/integration/services/repository/dc-test.js @@ -1,30 +1,31 @@ -import { moduleFor } from 'ember-qunit'; -import { skip } from 'qunit'; +import { setupTest } from 'ember-qunit'; +import { module, skip } from 'qunit'; import repo from 'consul-ui/tests/helpers/repo'; -const NAME = 'dc'; -moduleFor(`service:repository/${NAME}`, `Integration | Service | ${NAME}`, { - // Specify the other units that are required for this test. - integration: true, -}); -skip("findBySlug (doesn't interact with the API) but still needs an int test"); -skip('findAll returns the correct data for list endpoint', function(assert) { - return repo( - 'Dc', - 'findAll', - this.subject(), - function retrieveStub(stub) { - return stub(`/v1/catalog/datacenters`, { - CONSUL_DATACENTER_COUNT: '100', - }); - }, - function performTest(service) { - return service.findAll(); - }, - function performAssertion(actual, expected) { - actual.forEach((item, i) => { - assert.equal(actual[i].Name, item.Name); - assert.equal(item.Local, i === 0); - }); - } - ); + +module(`Integration | Service | dc`, function(hooks) { + setupTest(hooks); + skip("findBySlug (doesn't interact with the API) but still needs an int test"); + skip('findAll returns the correct data for list endpoint', function(assert) { + const subject = this.owner.lookup('service:repository/dc'); + + return repo( + 'Dc', + 'findAll', + subject, + function retrieveStub(stub) { + return stub(`/v1/catalog/datacenters`, { + CONSUL_DATACENTER_COUNT: '100', + }); + }, + function performTest(service) { + return service.findAll(); + }, + function performAssertion(actual, expected) { + actual.forEach((item, i) => { + assert.equal(actual[i].Name, item.Name); + assert.equal(item.Local, i === 0); + }); + } + ); + }); }); diff --git a/ui/packages/consul-ui/tests/integration/services/repository/discovery-chain-test.js b/ui/packages/consul-ui/tests/integration/services/repository/discovery-chain-test.js index b289fd75c5..e7d3da2ba4 100644 --- a/ui/packages/consul-ui/tests/integration/services/repository/discovery-chain-test.js +++ b/ui/packages/consul-ui/tests/integration/services/repository/discovery-chain-test.js @@ -1,42 +1,42 @@ -import { moduleFor, test } from 'ember-qunit'; +import { module, test } from 'qunit'; +import { setupTest } from 'ember-qunit'; import repo from 'consul-ui/tests/helpers/repo'; -moduleFor('service:repository/discovery-chain', 'Integration | Service | discovery-chain', { - // Specify the other units that are required for this test. - integration: true, -}); -const dc = 'dc-1'; -const id = 'slug'; -test('findBySlug returns the correct data for item endpoint', function(assert) { - return repo( - 'Service', - 'findBySlug', - this.subject(), - function retrieveStub(stub) { - return stub(`/v1/discovery-chain/${id}?dc=${dc}`, { - CONSUL_DISCOVERY_CHAIN_COUNT: 1, - }); - }, - function performTest(service) { - return service.findBySlug({ id, dc }); - }, - function performAssertion(actual, expected) { - const result = expected(function(payload) { - return Object.assign( - {}, - { - Datacenter: dc, - uid: `["default","default","${dc}","${id}"]`, - meta: { - cacheControl: undefined, - cursor: undefined, +module('Integration | Service | discovery-chain', function(hooks) { + setupTest(hooks); + const dc = 'dc-1'; + const id = 'slug'; + test('findBySlug returns the correct data for item endpoint', function(assert) { + return repo( + 'Service', + 'findBySlug', + this.owner.lookup('service:repository/discovery-chain'), + function retrieveStub(stub) { + return stub(`/v1/discovery-chain/${id}?dc=${dc}`, { + CONSUL_DISCOVERY_CHAIN_COUNT: 1, + }); + }, + function performTest(service) { + return service.findBySlug({ id, dc }); + }, + function performAssertion(actual, expected) { + const result = expected(function(payload) { + return Object.assign( + {}, + { + Datacenter: dc, + uid: `["default","default","${dc}","${id}"]`, + meta: { + cacheControl: undefined, + cursor: undefined, + }, }, - }, - payload - ); - }); - assert.equal(actual.Datacenter, result.Datacenter); - assert.equal(actual.uid, result.uid); - } - ); + payload + ); + }); + assert.equal(actual.Datacenter, result.Datacenter); + assert.equal(actual.uid, result.uid); + } + ); + }); }); diff --git a/ui/packages/consul-ui/tests/integration/services/repository/kv-test.js b/ui/packages/consul-ui/tests/integration/services/repository/kv-test.js index ee7f5a085f..bde99eb3f4 100644 --- a/ui/packages/consul-ui/tests/integration/services/repository/kv-test.js +++ b/ui/packages/consul-ui/tests/integration/services/repository/kv-test.js @@ -1,90 +1,97 @@ -import { moduleFor, test } from 'ember-qunit'; +import { module, test } from 'qunit'; +import { setupTest } from 'ember-qunit'; import repo from 'consul-ui/tests/helpers/repo'; import { env } from '../../../../env'; import { get } from '@ember/object'; -const NAME = 'kv'; -moduleFor(`service:repository/${NAME}`, `Integration | Service | ${NAME}`, { - // Specify the other units that are required for this test. - integration: true, -}); -const dc = 'dc-1'; -const id = 'key-name'; -const now = new Date().getTime(); -const undefinedNspace = 'default'; -const undefinedPartition = 'default'; -const partition = 'default'; -[undefinedNspace, 'team-1', undefined].forEach(nspace => { - test(`findAllBySlug returns the correct data for list endpoint when nspace is ${nspace}`, function(assert) { - get(this.subject(), 'store').serializerFor(NAME).timestamp = function() { - return now; - }; - return repo( - 'Kv', - 'findAllBySlug', - this.subject(), - function retrieveTest(stub) { - return stub( - `/v1/kv/${id}?keys&dc=${dc}${typeof nspace !== 'undefined' ? `&ns=${nspace}` : ``}${ - typeof partition !== 'undefined' ? `&partition=${partition}` : `` - }`, - { - CONSUL_KV_COUNT: '1', - } - ); - }, - function performTest(service) { - return service.findAllBySlug({ - id, - dc, - ns: nspace || undefinedNspace, - partition: partition || undefinedPartition, - }); - }, - function performAssertion(actual, expected) { - const expectedNspace = env('CONSUL_NSPACES_ENABLED') - ? nspace || undefinedNspace - : 'default'; - const expectedPartition = env('CONSUL_PARTITIONS_ENABLED') - ? partition || undefinedPartition - : 'default'; - actual.forEach(item => { - assert.equal(item.uid, `["${expectedPartition}","${expectedNspace}","${dc}","${item.Key}"]`); - assert.equal(item.Datacenter, dc); - }); - } - ); - }); - test(`findBySlug returns the correct data for item endpoint when nspace is ${nspace}`, function(assert) { - return repo( - 'Kv', - 'findBySlug', - this.subject(), - function(stub) { - return stub( - `/v1/kv/${id}?dc=${dc}${typeof nspace !== 'undefined' ? `&ns=${nspace}` : ``}${ - typeof partition !== 'undefined' ? `&partition=${partition}` : `` - }` - ); - }, - function(service) { - return service.findBySlug({ - id, - dc, - ns: nspace || undefinedNspace, - partition: partition || undefinedPartition, - }); - }, - function(actual, expected) { - expected( - function(payload) { +module(`Integration | Service | kv`, function(hooks) { + setupTest(hooks); + const dc = 'dc-1'; + const id = 'key-name'; + const now = new Date().getTime(); + const undefinedNspace = 'default'; + const undefinedPartition = 'default'; + const partition = 'default'; + [undefinedNspace, 'team-1', undefined].forEach(nspace => { + test(`findAllBySlug returns the correct data for list endpoint when nspace is ${nspace}`, function(assert) { + const subject = this.owner.lookup('service:repository/kv'); + + get(subject, 'store').serializerFor('kv').timestamp = function() { + return now; + }; + return repo( + 'Kv', + 'findAllBySlug', + subject, + function retrieveTest(stub) { + return stub( + `/v1/kv/${id}?keys&dc=${dc}${typeof nspace !== 'undefined' ? `&ns=${nspace}` : ``}${ + typeof partition !== 'undefined' ? `&partition=${partition}` : `` + }`, + { + CONSUL_KV_COUNT: '1', + } + ); + }, + function performTest(service) { + return service.findAllBySlug({ + id, + dc, + ns: nspace || undefinedNspace, + partition: partition || undefinedPartition, + }); + }, + function performAssertion(actual, expected) { + const expectedNspace = env('CONSUL_NSPACES_ENABLED') + ? nspace || undefinedNspace + : 'default'; + const expectedPartition = env('CONSUL_PARTITIONS_ENABLED') + ? partition || undefinedPartition + : 'default'; + actual.forEach(item => { + assert.equal( + item.uid, + `["${expectedPartition}","${expectedNspace}","${dc}","${item.Key}"]` + ); + assert.equal(item.Datacenter, dc); + }); + } + ); + }); + test(`findBySlug returns the correct data for item endpoint when nspace is ${nspace}`, function(assert) { + const subject = this.owner.lookup('service:repository/kv'); + + return repo( + 'Kv', + 'findBySlug', + subject, + function(stub) { + return stub( + `/v1/kv/${id}?dc=${dc}${typeof nspace !== 'undefined' ? `&ns=${nspace}` : ``}${ + typeof partition !== 'undefined' ? `&partition=${partition}` : `` + }` + ); + }, + function(service) { + return service.findBySlug({ + id, + dc, + ns: nspace || undefinedNspace, + partition: partition || undefinedPartition, + }); + }, + function(actual, expected) { + expected(function(payload) { const item = payload[0]; - assert.equal(actual.uid, `["${item.Partition || undefinedPartition}","${item.Namespace || - undefinedNspace}","${dc}","${item.Key}"]`); + assert.equal( + actual.uid, + `["${item.Partition || undefinedPartition}","${item.Namespace || + undefinedNspace}","${dc}","${item.Key}"]` + ); assert.equal(actual.Datacenter, dc); - } - ); - } - ); + }); + } + ); + }); }); }); diff --git a/ui/packages/consul-ui/tests/integration/services/repository/node-test.js b/ui/packages/consul-ui/tests/integration/services/repository/node-test.js index 0735c57d62..2e43465ccd 100644 --- a/ui/packages/consul-ui/tests/integration/services/repository/node-test.js +++ b/ui/packages/consul-ui/tests/integration/services/repository/node-test.js @@ -1,64 +1,67 @@ -import { moduleFor, test } from 'ember-qunit'; +import { setupTest } from 'ember-qunit'; +import { module, test } from 'qunit'; import repo from 'consul-ui/tests/helpers/repo'; import { get } from '@ember/object'; -const NAME = 'node'; -moduleFor(`service:repository/${NAME}`, `Integration | Service | ${NAME}`, { - // Specify the other units that are required for this test. - integration: true, -}); const dc = 'dc-1'; const id = 'token-name'; const now = new Date().getTime(); const nspace = 'default'; const partition = 'default'; -test('findByDatacenter returns the correct data for list endpoint', function(assert) { - get(this.subject(), 'store').serializerFor(NAME).timestamp = function() { - return now; - }; - return repo( - 'Node', - 'findAllByDatacenter', - this.subject(), - function retrieveStub(stub) { - return stub( - `/v1/internal/ui/nodes?dc=${dc}${ - typeof partition !== 'undefined' ? `&partition=${partition}` : `` - }`, - { - CONSUL_NODE_COUNT: '100', - } - ); - }, - function performTest(service) { - return service.findAllByDatacenter({ dc, partition }); - }, - function performAssertion(actual, expected) { - actual.forEach(item => { - assert.equal(item.uid, `["${partition}","${nspace}","${dc}","${item.ID}"]`); - assert.equal(item.Datacenter, dc); - }); - } - ); -}); -test('findBySlug returns the correct data for item endpoint', function(assert) { - return repo( - 'Node', - 'findBySlug', - this.subject(), - function(stub) { - return stub( - `/v1/internal/ui/node/${id}?dc=${dc}${ - typeof partition !== 'undefined' ? `&partition=${partition}` : `` - }` - ); - }, - function(service) { - return service.findBySlug({ id, dc, partition }); - }, - function(actual, expected) { - assert.equal(actual.uid, `["${partition}","${nspace}","${dc}","${actual.ID}"]`); - assert.equal(actual.Datacenter, dc); - } - ); +module(`Integration | Service | node`, function(hooks) { + setupTest(hooks); + + test('findByDatacenter returns the correct data for list endpoint', function(assert) { + const subject = this.owner.lookup('service:repository/node'); + get(subject, 'store').serializerFor('node').timestamp = function() { + return now; + }; + return repo( + 'Node', + 'findAllByDatacenter', + subject, + function retrieveStub(stub) { + return stub( + `/v1/internal/ui/nodes?dc=${dc}${ + typeof partition !== 'undefined' ? `&partition=${partition}` : `` + }`, + { + CONSUL_NODE_COUNT: '100', + } + ); + }, + function performTest(service) { + return service.findAllByDatacenter({ dc, partition }); + }, + function performAssertion(actual, expected) { + actual.forEach(item => { + assert.equal(item.uid, `["${partition}","${nspace}","${dc}","${item.ID}"]`); + assert.equal(item.Datacenter, dc); + }); + } + ); + }); + test('findBySlug returns the correct data for item endpoint', function(assert) { + const subject = this.owner.lookup('service:repository/node'); + + return repo( + 'Node', + 'findBySlug', + subject, + function(stub) { + return stub( + `/v1/internal/ui/node/${id}?dc=${dc}${ + typeof partition !== 'undefined' ? `&partition=${partition}` : `` + }` + ); + }, + function(service) { + return service.findBySlug({ id, dc, partition }); + }, + function(actual, expected) { + assert.equal(actual.uid, `["${partition}","${nspace}","${dc}","${actual.ID}"]`); + assert.equal(actual.Datacenter, dc); + } + ); + }); }); diff --git a/ui/packages/consul-ui/tests/integration/services/repository/policy-test.js b/ui/packages/consul-ui/tests/integration/services/repository/policy-test.js index b7fe07ed1d..662ff8715b 100644 --- a/ui/packages/consul-ui/tests/integration/services/repository/policy-test.js +++ b/ui/packages/consul-ui/tests/integration/services/repository/policy-test.js @@ -1,92 +1,95 @@ -import { moduleFor, test, skip } from 'ember-qunit'; +import { module, skip, test } from 'qunit'; +import { setupTest } from 'ember-qunit'; import { get } from '@ember/object'; import repo from 'consul-ui/tests/helpers/repo'; -const NAME = 'policy'; -moduleFor(`service:repository/${NAME}`, `Integration | Service | ${NAME}`, { - // Specify the other units that are required for this test. - integration: true, -}); -skip('translate returns the correct data for the translate endpoint'); -const now = new Date().getTime(); -const dc = 'dc-1'; -const id = 'policy-name'; -const undefinedNspace = 'default'; -const undefinedPartition = 'default'; -const partition = 'default'; -[undefinedNspace, 'team-1', undefined].forEach(nspace => { - test(`findByDatacenter returns the correct data for list endpoint when nspace is ${nspace}`, function(assert) { - get(this.subject(), 'store').serializerFor(NAME).timestamp = function() { - return now; - }; - return repo( - 'Policy', - 'findAllByDatacenter', - this.subject(), - function retrieveStub(stub) { - return stub( - `/v1/acl/policies?dc=${dc}${typeof nspace !== 'undefined' ? `&ns=${nspace}` : ``}${ - typeof partition !== 'undefined' ? `&partition=${partition}` : `` - }`, - { - CONSUL_POLICY_COUNT: '10', - } - ); - }, - function performTest(service) { - return service.findAllByDatacenter({ - dc, - ns: nspace || undefinedNspace, - partition: partition || undefinedPartition, - }); - }, - function performAssertion(actual, expected) { - assert.deepEqual( - actual, - expected(function(payload) { - return payload.map(item => - Object.assign({}, item, { - SyncTime: now, - Datacenter: dc, - Namespace: item.Namespace || undefinedNspace, - Partition: item.Partition || undefinedPartition, - uid: `["${item.Partition || undefinedPartition}","${item.Namespace || - undefinedNspace}","${dc}","${item.ID}"]`, - }) - ); - }) - ); - } - ); - }); - test(`findBySlug returns the correct data for item endpoint when the nspace is ${nspace}`, function(assert) { - return repo( - 'Policy', - 'findBySlug', - this.subject(), - function retrieveStub(stub) { - return stub( - `/v1/acl/policy/${id}?dc=${dc}${typeof nspace !== 'undefined' ? `&ns=${nspace}` : ``}${ - typeof partition !== 'undefined' ? `&partition=${partition}` : `` - }` - ); - }, - function performTest(service) { - return service.findBySlug({ - id, - dc, - ns: nspace || undefinedNspace, - partition: partition || undefinedPartition, - }); - }, - function performAssertion(actual, expected) { - assert.equal( - actual.uid, - `["${partition || undefinedPartition}","${nspace || undefinedNspace}","${dc}","${ - actual.ID - }"]` - ); - assert.equal(actual.Datacenter, dc); - } - ); + +module(`Integration | Service | policy`, function(hooks) { + setupTest(hooks); + skip('translate returns the correct data for the translate endpoint'); + const now = new Date().getTime(); + const dc = 'dc-1'; + const id = 'policy-name'; + const undefinedNspace = 'default'; + const undefinedPartition = 'default'; + const partition = 'default'; + [undefinedNspace, 'team-1', undefined].forEach(nspace => { + test(`findByDatacenter returns the correct data for list endpoint when nspace is ${nspace}`, function(assert) { + const subject = this.owner.lookup('service:repository/policy'); + + get(subject, 'store').serializerFor('policy').timestamp = function() { + return now; + }; + return repo( + 'Policy', + 'findAllByDatacenter', + subject, + function retrieveStub(stub) { + return stub( + `/v1/acl/policies?dc=${dc}${typeof nspace !== 'undefined' ? `&ns=${nspace}` : ``}${ + typeof partition !== 'undefined' ? `&partition=${partition}` : `` + }`, + { + CONSUL_POLICY_COUNT: '10', + } + ); + }, + function performTest(service) { + return service.findAllByDatacenter({ + dc, + ns: nspace || undefinedNspace, + partition: partition || undefinedPartition, + }); + }, + function performAssertion(actual, expected) { + assert.deepEqual( + actual, + expected(function(payload) { + return payload.map(item => + Object.assign({}, item, { + SyncTime: now, + Datacenter: dc, + Namespace: item.Namespace || undefinedNspace, + Partition: item.Partition || undefinedPartition, + uid: `["${item.Partition || undefinedPartition}","${item.Namespace || + undefinedNspace}","${dc}","${item.ID}"]`, + }) + ); + }) + ); + } + ); + }); + test(`findBySlug returns the correct data for item endpoint when the nspace is ${nspace}`, function(assert) { + const subject = this.owner.lookup('service:repository/policy'); + return repo( + 'Policy', + 'findBySlug', + subject, + function retrieveStub(stub) { + return stub( + `/v1/acl/policy/${id}?dc=${dc}${typeof nspace !== 'undefined' ? `&ns=${nspace}` : ``}${ + typeof partition !== 'undefined' ? `&partition=${partition}` : `` + }` + ); + }, + function performTest(service) { + return service.findBySlug({ + id, + dc, + ns: nspace || undefinedNspace, + partition: partition || undefinedPartition, + }); + }, + function performAssertion(actual, expected) { + assert.equal( + actual.uid, + `["${partition || undefinedPartition}","${nspace || undefinedNspace}","${dc}","${ + actual.ID + }"]` + ); + assert.equal(actual.Datacenter, dc); + } + ); + }); }); }); diff --git a/ui/packages/consul-ui/tests/integration/services/repository/role-test.js b/ui/packages/consul-ui/tests/integration/services/repository/role-test.js index 474eeaf65a..9241b4323b 100644 --- a/ui/packages/consul-ui/tests/integration/services/repository/role-test.js +++ b/ui/packages/consul-ui/tests/integration/services/repository/role-test.js @@ -1,102 +1,105 @@ -import { moduleFor, test, skip } from 'ember-qunit'; +import { module, skip, test } from 'qunit'; +import { setupTest } from 'ember-qunit'; import { get } from '@ember/object'; import repo from 'consul-ui/tests/helpers/repo'; import { createPolicies } from 'consul-ui/tests/helpers/normalizers'; -const NAME = 'role'; -moduleFor(`service:repository/${NAME}`, `Integration | Service | ${NAME}`, { - // Specify the other units that are required for this test. - integration: true, -}); -const now = new Date().getTime(); -const dc = 'dc-1'; -const id = 'role-name'; -const undefinedNspace = 'default'; -const undefinedPartition = 'default'; -const partition = 'default'; -[undefinedNspace, 'team-1', undefined].forEach(nspace => { - test(`findByDatacenter returns the correct data for list endpoint when nspace is ${nspace}`, function(assert) { - get(this.subject(), 'store').serializerFor(NAME).timestamp = function() { - return now; - }; - return repo( - 'Role', - 'findAllByDatacenter', - this.subject(), - function retrieveStub(stub) { - return stub( - `/v1/acl/roles?dc=${dc}${typeof nspace !== 'undefined' ? `&ns=${nspace}` : ``}${ - typeof partition !== 'undefined' ? `&partition=${partition}` : `` - }`, - { - CONSUL_ROLE_COUNT: '100', - } - ); - }, - function performTest(service) { - return service.findAllByDatacenter({ - dc: dc, - nspace: nspace || undefinedNspace, - partition: partition || undefinedPartition, - }); - }, - function performAssertion(actual, expected) { - assert.deepEqual( - actual, - expected(function(payload) { - return payload.map(item => - Object.assign({}, item, { - SyncTime: now, +module(`Integration | Service | role`, function(hooks) { + setupTest(hooks); + const now = new Date().getTime(); + const dc = 'dc-1'; + const id = 'role-name'; + const undefinedNspace = 'default'; + const undefinedPartition = 'default'; + const partition = 'default'; + [undefinedNspace, 'team-1', undefined].forEach(nspace => { + test(`findByDatacenter returns the correct data for list endpoint when nspace is ${nspace}`, function(assert) { + const subject = this.owner.lookup('service:repository/role'); + + get(subject, 'store').serializerFor('role').timestamp = function() { + return now; + }; + return repo( + 'Role', + 'findAllByDatacenter', + subject, + function retrieveStub(stub) { + return stub( + `/v1/acl/roles?dc=${dc}${typeof nspace !== 'undefined' ? `&ns=${nspace}` : ``}${ + typeof partition !== 'undefined' ? `&partition=${partition}` : `` + }`, + { + CONSUL_ROLE_COUNT: '100', + } + ); + }, + function performTest(service) { + return service.findAllByDatacenter({ + dc: dc, + nspace: nspace || undefinedNspace, + partition: partition || undefinedPartition, + }); + }, + function performAssertion(actual, expected) { + assert.deepEqual( + actual, + expected(function(payload) { + return payload.map(item => + Object.assign({}, item, { + SyncTime: now, + Datacenter: dc, + Namespace: item.Namespace || undefinedNspace, + Partition: item.Partition || undefinedPartition, + uid: `["${item.Partition || undefinedPartition}","${item.Namespace || + undefinedNspace}","${dc}","${item.ID}"]`, + Policies: createPolicies(item), + }) + ); + }) + ); + } + ); + }); + // FIXME: For some reason this tries to initialize the metrics service? + skip(`findBySlug returns the correct data for item endpoint when the nspace is ${nspace}`, function(assert) { + const subject = this.owner.lookup('service:repository/role'); + + return repo( + 'Role', + 'findBySlug', + subject, + function retrieveStub(stub) { + return stub( + `/v1/acl/role/${id}?dc=${dc}${typeof nspace !== 'undefined' ? `&ns=${nspace}` : ``}${ + typeof partition !== 'undefined' ? `&partition=${partition}` : `` + }` + ); + }, + function performTest(service) { + return service.findBySlug({ + id, + dc, + ns: nspace || undefinedNspace, + partition: partition || undefinedPartition, + }); + }, + function performAssertion(actual, expected) { + assert.deepEqual( + actual, + expected(function(payload) { + const item = payload; + return Object.assign({}, item, { Datacenter: dc, Namespace: item.Namespace || undefinedNspace, Partition: item.Partition || undefinedPartition, - uid: `["${item.Partition || undefinedPartition}","${item.Namespace || + uid: `["${partition || undefinedPartition}","${item.Namespace || undefinedNspace}","${dc}","${item.ID}"]`, Policies: createPolicies(item), - }) - ); - }) - ); - } - ); - }); - // FIXME: For some reason this tries to initialize the metrics service? - skip(`findBySlug returns the correct data for item endpoint when the nspace is ${nspace}`, function(assert) { - return repo( - 'Role', - 'findBySlug', - this.subject(), - function retrieveStub(stub) { - return stub( - `/v1/acl/role/${id}?dc=${dc}${typeof nspace !== 'undefined' ? `&ns=${nspace}` : ``}${ - typeof partition !== 'undefined' ? `&partition=${partition}` : `` - }` - ); - }, - function performTest(service) { - return service.findBySlug({ - id, - dc, - ns: nspace || undefinedNspace, - partition: partition || undefinedPartition, - }); - }, - function performAssertion(actual, expected) { - assert.deepEqual( - actual, - expected(function(payload) { - const item = payload; - return Object.assign({}, item, { - Datacenter: dc, - Namespace: item.Namespace || undefinedNspace, - Partition: item.Partition || undefinedPartition, - uid: `["${partition || undefinedPartition}","${item.Namespace || - undefinedNspace}","${dc}","${item.ID}"]`, - Policies: createPolicies(item), - }); - }) - ); - } - ); + }); + }) + ); + } + ); + }); }); }); diff --git a/ui/packages/consul-ui/tests/integration/services/repository/service-test.js b/ui/packages/consul-ui/tests/integration/services/repository/service-test.js index 77b7cf92fc..40174005f7 100644 --- a/ui/packages/consul-ui/tests/integration/services/repository/service-test.js +++ b/ui/packages/consul-ui/tests/integration/services/repository/service-test.js @@ -1,69 +1,70 @@ -import { moduleFor, test } from 'ember-qunit'; +import { module, test } from 'qunit'; +import { setupTest } from 'ember-qunit'; import repo from 'consul-ui/tests/helpers/repo'; import { get } from '@ember/object'; -const NAME = 'service'; -moduleFor(`service:repository/${NAME}`, `Integration | Service | ${NAME}`, { - // Specify the other units that are required for this test. - integration: true, -}); -const dc = 'dc-1'; -const now = new Date().getTime(); -const undefinedNspace = 'default'; -const undefinedPartition = 'default'; -const partition = 'default'; -[undefinedNspace, 'team-1', undefined].forEach(nspace => { - test(`findGatewayBySlug returns the correct data for list endpoint when nspace is ${nspace}`, function(assert) { - get(this.subject(), 'store').serializerFor(NAME).timestamp = function() { - return now; - }; - const gateway = 'gateway'; - const conf = { - cursor: 1, - }; - return repo( - 'Service', - 'findGatewayBySlug', - this.subject(), - function retrieveStub(stub) { - return stub( - `/v1/internal/ui/gateway-services-nodes/${gateway}?dc=${dc}${ - typeof nspace !== 'undefined' ? `&ns=${nspace}` : `` - }${typeof partition !== 'undefined' ? `&partition=${partition}` : ``}`, - { - CONSUL_SERVICE_COUNT: '100', - } - ); - }, - function performTest(service) { - return service.findGatewayBySlug( - { - gateway, - dc, - ns: nspace || undefinedNspace, - partition: partition || undefinedPartition, - }, - conf - ); - }, - function performAssertion(actual, expected) { - const result = expected(function(payload) { - return payload.map(item => - Object.assign({}, item, { - SyncTime: now, - Datacenter: dc, - Namespace: item.Namespace || undefinedNspace, - Partition: item.Partition || undefinedPartition, - uid: `["${item.Partition || undefinedPartition}","${item.Namespace || - undefinedNspace}","${dc}","${item.Name}"]`, - }) + +module(`Integration | Service | service`, function(hooks) { + setupTest(hooks); + const dc = 'dc-1'; + const now = new Date().getTime(); + const undefinedNspace = 'default'; + const undefinedPartition = 'default'; + const partition = 'default'; + [undefinedNspace, 'team-1', undefined].forEach(nspace => { + test(`findGatewayBySlug returns the correct data for list endpoint when nspace is ${nspace}`, function(assert) { + const subject = this.owner.lookup('service:repository/service'); + get(subject, 'store').serializerFor('service').timestamp = function() { + return now; + }; + const gateway = 'gateway'; + const conf = { + cursor: 1, + }; + return repo( + 'Service', + 'findGatewayBySlug', + subject, + function retrieveStub(stub) { + return stub( + `/v1/internal/ui/gateway-services-nodes/${gateway}?dc=${dc}${ + typeof nspace !== 'undefined' ? `&ns=${nspace}` : `` + }${typeof partition !== 'undefined' ? `&partition=${partition}` : ``}`, + { + CONSUL_SERVICE_COUNT: '100', + } ); - }); - assert.equal(actual[0].SyncTime, result[0].SyncTime); - assert.equal(actual[0].Datacenter, result[0].Datacenter); - assert.equal(actual[0].Namespace, result[0].Namespace); - assert.equal(actual[0].Partition, result[0].Partition); - assert.equal(actual[0].uid, result[0].uid); - } - ); + }, + function performTest(service) { + return service.findGatewayBySlug( + { + gateway, + dc, + ns: nspace || undefinedNspace, + partition: partition || undefinedPartition, + }, + conf + ); + }, + function performAssertion(actual, expected) { + const result = expected(function(payload) { + return payload.map(item => + Object.assign({}, item, { + SyncTime: now, + Datacenter: dc, + Namespace: item.Namespace || undefinedNspace, + Partition: item.Partition || undefinedPartition, + uid: `["${item.Partition || undefinedPartition}","${item.Namespace || + undefinedNspace}","${dc}","${item.Name}"]`, + }) + ); + }); + assert.equal(actual[0].SyncTime, result[0].SyncTime); + assert.equal(actual[0].Datacenter, result[0].Datacenter); + assert.equal(actual[0].Namespace, result[0].Namespace); + assert.equal(actual[0].Partition, result[0].Partition); + assert.equal(actual[0].uid, result[0].uid); + } + ); + }); }); }); diff --git a/ui/packages/consul-ui/tests/integration/services/repository/session-test.js b/ui/packages/consul-ui/tests/integration/services/repository/session-test.js index c99742fbe2..eee607e1ba 100644 --- a/ui/packages/consul-ui/tests/integration/services/repository/session-test.js +++ b/ui/packages/consul-ui/tests/integration/services/repository/session-test.js @@ -1,99 +1,102 @@ -import { moduleFor, test } from 'ember-qunit'; +import { module, test } from 'qunit'; +import { setupTest } from 'ember-qunit'; import repo from 'consul-ui/tests/helpers/repo'; import { get } from '@ember/object'; -const NAME = 'session'; -moduleFor(`service:repository/${NAME}`, `Integration | Service | ${NAME}`, { - // Specify the other units that are required for this test. - integration: true, -}); -const dc = 'dc-1'; -const id = 'node-name'; -const now = new Date().getTime(); -const undefinedNspace = 'default'; -const undefinedPartition = 'default'; -const partition = 'default'; -[undefinedNspace, 'team-1', undefined].forEach(nspace => { - test(`findByNode returns the correct data for list endpoint when the nspace is ${nspace}`, function(assert) { - get(this.subject(), 'store').serializerFor(NAME).timestamp = function() { - return now; - }; - return repo( - 'Session', - 'findByNode', - this.subject(), - function retrieveStub(stub) { - return stub( - `/v1/session/node/${id}?dc=${dc}${typeof nspace !== 'undefined' ? `&ns=${nspace}` : ``}${ - typeof partition !== 'undefined' ? `&partition=${partition}` : `` - }`, - { - CONSUL_SESSION_COUNT: '100', - } - ); - }, - function performTest(service) { - return service.findByNode({ - id, - dc, - ns: nspace || undefinedNspace, - partition: partition || undefinedPartition, - }); - }, - function performAssertion(actual, expected) { - assert.deepEqual( - actual, - expected(function(payload) { - return payload.map(item => - Object.assign({}, item, { - SyncTime: now, +module(`Integration | Service | session`, function(hooks) { + setupTest(hooks); + + const dc = 'dc-1'; + const id = 'node-name'; + const now = new Date().getTime(); + const undefinedNspace = 'default'; + const undefinedPartition = 'default'; + const partition = 'default'; + [undefinedNspace, 'team-1', undefined].forEach(nspace => { + test(`findByNode returns the correct data for list endpoint when the nspace is ${nspace}`, function(assert) { + const subject = this.owner.lookup('service:repository/session'); + + get(subject, 'store').serializerFor('session').timestamp = function() { + return now; + }; + return repo( + 'Session', + 'findByNode', + subject, + function retrieveStub(stub) { + return stub( + `/v1/session/node/${id}?dc=${dc}${ + typeof nspace !== 'undefined' ? `&ns=${nspace}` : `` + }${typeof partition !== 'undefined' ? `&partition=${partition}` : ``}`, + { + CONSUL_SESSION_COUNT: '100', + } + ); + }, + function performTest(service) { + return service.findByNode({ + id, + dc, + ns: nspace || undefinedNspace, + partition: partition || undefinedPartition, + }); + }, + function performAssertion(actual, expected) { + assert.deepEqual( + actual, + expected(function(payload) { + return payload.map(item => + Object.assign({}, item, { + SyncTime: now, + Datacenter: dc, + Namespace: item.Namespace || undefinedNspace, + Partition: item.Partition || undefinedPartition, + uid: `["${item.Partition || undefinedPartition}","${item.Namespace || + undefinedNspace}","${dc}","${item.ID}"]`, + }) + ); + }) + ); + } + ); + }); + test(`findByKey returns the correct data for item endpoint when the nspace is ${nspace}`, function(assert) { + const subject = this.owner.lookup('service:repository/session'); + return repo( + 'Session', + 'findByKey', + subject, + function(stub) { + return stub( + `/v1/session/info/${id}?dc=${dc}${ + typeof nspace !== 'undefined' ? `&ns=${nspace}` : `` + }${typeof partition !== 'undefined' ? `&partition=${partition}` : ``}` + ); + }, + function(service) { + return service.findByKey({ + id, + dc, + ns: nspace || undefinedNspace, + partition: partition || undefinedPartition, + }); + }, + function(actual, expected) { + assert.deepEqual( + actual, + expected(function(payload) { + const item = payload[0]; + return Object.assign({}, item, { Datacenter: dc, Namespace: item.Namespace || undefinedNspace, Partition: item.Partition || undefinedPartition, uid: `["${item.Partition || undefinedPartition}","${item.Namespace || undefinedNspace}","${dc}","${item.ID}"]`, - }) - ); - }) - ); - } - ); - }); - test(`findByKey returns the correct data for item endpoint when the nspace is ${nspace}`, function(assert) { - return repo( - 'Session', - 'findByKey', - this.subject(), - function(stub) { - return stub( - `/v1/session/info/${id}?dc=${dc}${typeof nspace !== 'undefined' ? `&ns=${nspace}` : ``}${ - typeof partition !== 'undefined' ? `&partition=${partition}` : `` - }` - ); - }, - function(service) { - return service.findByKey({ - id, - dc, - ns: nspace || undefinedNspace, - partition: partition || undefinedPartition, - }); - }, - function(actual, expected) { - assert.deepEqual( - actual, - expected(function(payload) { - const item = payload[0]; - return Object.assign({}, item, { - Datacenter: dc, - Namespace: item.Namespace || undefinedNspace, - Partition: item.Partition || undefinedPartition, - uid: `["${item.Partition || undefinedPartition}","${item.Namespace || - undefinedNspace}","${dc}","${item.ID}"]`, - }); - }) - ); - } - ); + }); + }) + ); + } + ); + }); }); }); diff --git a/ui/packages/consul-ui/tests/integration/services/repository/token-test.js b/ui/packages/consul-ui/tests/integration/services/repository/token-test.js index 2142f031bb..2af8d73d7f 100644 --- a/ui/packages/consul-ui/tests/integration/services/repository/token-test.js +++ b/ui/packages/consul-ui/tests/integration/services/repository/token-test.js @@ -1,181 +1,184 @@ -import { moduleFor, test, skip } from 'ember-qunit'; +import { module, skip, test } from 'qunit'; +import { setupTest } from 'ember-qunit'; import repo from 'consul-ui/tests/helpers/repo'; import { createPolicies } from 'consul-ui/tests/helpers/normalizers'; -const NAME = 'token'; -moduleFor(`service:repository/${NAME}`, `Integration | Service | ${NAME}`, { - // Specify the other units that are required for this test. - integration: true, -}); -skip('clone returns the correct data for the clone endpoint'); -const dc = 'dc-1'; -const id = 'token-id'; -const undefinedNspace = 'default'; -const undefinedPartition = 'default'; -const partition = 'default'; -[undefinedNspace, 'team-1', undefined].forEach(nspace => { - test(`findByDatacenter returns the correct data for list endpoint when nspace is ${nspace}`, function(assert) { - return repo( - 'Token', - 'findAllByDatacenter', - this.subject(), - function retrieveStub(stub) { - return stub( - `/v1/acl/tokens?dc=${dc}${typeof nspace !== 'undefined' ? `&ns=${nspace}` : ``}${ - typeof partition !== 'undefined' ? `&partition=${partition}` : `` - }`, - { - CONSUL_TOKEN_COUNT: '100', - } - ); - }, - function performTest(service) { - return service.findAllByDatacenter({ - dc, - ns: nspace || undefinedNspace, - partition: partition || undefinedPartition, - }); - }, - function performAssertion(actual, expected) { - assert.deepEqual( - actual, - expected(function(payload) { - return payload.map(function(item) { - return Object.assign({}, item, { - Datacenter: dc, - CreateTime: new Date(item.CreateTime), - Namespace: item.Namespace || undefinedNspace, - Partition: item.Partition || undefinedPartition, - uid: `["${item.Partition || undefinedPartition}","${item.Namespace || - undefinedNspace}","${dc}","${item.AccessorID}"]`, - Policies: createPolicies(item), - }); - }); - }) - ); - } - ); - }); - test(`findBySlug returns the correct data for item endpoint when nspace is ${nspace}`, function(assert) { - return repo( - 'Token', - 'findBySlug', - this.subject(), - function retrieveStub(stub) { - return stub( - `/v1/acl/token/${id}?dc=${dc}${typeof nspace !== 'undefined' ? `&ns=${nspace}` : ``}${ - typeof partition !== 'undefined' ? `&partition=${partition}` : `` - }` - ); - }, - function performTest(service) { - return service.findBySlug({ - id, - dc, - ns: nspace || undefinedNspace, - partition: partition || undefinedPartition, - }); - }, - function performAssertion(actual, expected) { - expected(function(item) { - assert.equal( - actual.uid, - `["${partition || undefinedPartition}","${nspace || undefinedNspace}","${dc}","${ - item.AccessorID - }"]` +module(`Integration | Service | token`, function(hooks) { + setupTest(hooks); + skip('clone returns the correct data for the clone endpoint'); + const dc = 'dc-1'; + const id = 'token-id'; + const undefinedNspace = 'default'; + const undefinedPartition = 'default'; + const partition = 'default'; + [undefinedNspace, 'team-1', undefined].forEach(nspace => { + test(`findByDatacenter returns the correct data for list endpoint when nspace is ${nspace}`, function(assert) { + const subject = this.owner.lookup('service:repository/token'); + return repo( + 'Token', + 'findAllByDatacenter', + subject, + function retrieveStub(stub) { + return stub( + `/v1/acl/tokens?dc=${dc}${typeof nspace !== 'undefined' ? `&ns=${nspace}` : ``}${ + typeof partition !== 'undefined' ? `&partition=${partition}` : `` + }`, + { + CONSUL_TOKEN_COUNT: '100', + } ); - assert.equal(actual.Datacenter, dc); - assert.deepEqual(actual.Policies, createPolicies(item)); - }); - } - ); - }); - test(`findByPolicy returns the correct data when nspace is ${nspace}`, function(assert) { - const policy = 'policy-1'; - return repo( - 'Token', - 'findByPolicy', - this.subject(), - function retrieveStub(stub) { - return stub( - `/v1/acl/tokens?dc=${dc}&policy=${policy}${ - typeof nspace !== 'undefined' ? `&ns=${nspace}` : `` - }${typeof partition !== 'undefined' ? `&partition=${partition}` : ``}`, - { - CONSUL_TOKEN_COUNT: '100', - } - ); - }, - function performTest(service) { - return service.findByPolicy({ - id: policy, - dc, - ns: nspace || undefinedNspace, - partition: partition || undefinedPartition, - }); - }, - function performAssertion(actual, expected) { - assert.deepEqual( - actual, - expected(function(payload) { - return payload.map(function(item) { - return Object.assign({}, item, { - Datacenter: dc, - CreateTime: new Date(item.CreateTime), - Namespace: item.Namespace || undefinedNspace, - Partition: item.Partition || undefinedPartition, - uid: `["${item.Partition || undefinedPartition}","${item.Namespace || - undefinedNspace}","${dc}","${item.AccessorID}"]`, - Policies: createPolicies(item), + }, + function performTest(service) { + return service.findAllByDatacenter({ + dc, + ns: nspace || undefinedNspace, + partition: partition || undefinedPartition, + }); + }, + function performAssertion(actual, expected) { + assert.deepEqual( + actual, + expected(function(payload) { + return payload.map(function(item) { + return Object.assign({}, item, { + Datacenter: dc, + CreateTime: new Date(item.CreateTime), + Namespace: item.Namespace || undefinedNspace, + Partition: item.Partition || undefinedPartition, + uid: `["${item.Partition || undefinedPartition}","${item.Namespace || + undefinedNspace}","${dc}","${item.AccessorID}"]`, + Policies: createPolicies(item), + }); }); - }); - }) - ); - } - ); - }); - test(`findByRole returns the correct data when nspace is ${nspace}`, function(assert) { - const role = 'role-1'; - return repo( - 'Token', - 'findByPolicy', - this.subject(), - function retrieveStub(stub) { - return stub( - `/v1/acl/tokens?dc=${dc}&role=${role}${ - typeof nspace !== 'undefined' ? `&ns=${nspace}` : `` - }${typeof partition !== 'undefined' ? `&partition=${partition}` : ``}`, - { - CONSUL_TOKEN_COUNT: '100', - } - ); - }, - function performTest(service) { - return service.findByRole({ - id: role, - dc, - ns: nspace || undefinedNspace, - partition: partition || undefinedPartition, - }); - }, - function performAssertion(actual, expected) { - assert.deepEqual( - actual, - expected(function(payload) { - return payload.map(function(item) { - return Object.assign({}, item, { - Datacenter: dc, - CreateTime: new Date(item.CreateTime), - Namespace: item.Namespace || undefinedNspace, - Partition: item.Partition || undefinedPartition, - uid: `["${item.Partition || undefinedPartition}","${item.Namespace || - undefinedNspace}","${dc}","${item.AccessorID}"]`, - Policies: createPolicies(item), + }) + ); + } + ); + }); + test(`findBySlug returns the correct data for item endpoint when nspace is ${nspace}`, function(assert) { + const subject = this.owner.lookup('service:repository/token'); + return repo( + 'Token', + 'findBySlug', + subject, + function retrieveStub(stub) { + return stub( + `/v1/acl/token/${id}?dc=${dc}${typeof nspace !== 'undefined' ? `&ns=${nspace}` : ``}${ + typeof partition !== 'undefined' ? `&partition=${partition}` : `` + }` + ); + }, + function performTest(service) { + return service.findBySlug({ + id, + dc, + ns: nspace || undefinedNspace, + partition: partition || undefinedPartition, + }); + }, + function performAssertion(actual, expected) { + expected(function(item) { + assert.equal( + actual.uid, + `["${partition || undefinedPartition}","${nspace || undefinedNspace}","${dc}","${ + item.AccessorID + }"]` + ); + assert.equal(actual.Datacenter, dc); + assert.deepEqual(actual.Policies, createPolicies(item)); + }); + } + ); + }); + test(`findByPolicy returns the correct data when nspace is ${nspace}`, function(assert) { + const subject = this.owner.lookup('service:repository/token'); + const policy = 'policy-1'; + return repo( + 'Token', + 'findByPolicy', + subject, + function retrieveStub(stub) { + return stub( + `/v1/acl/tokens?dc=${dc}&policy=${policy}${ + typeof nspace !== 'undefined' ? `&ns=${nspace}` : `` + }${typeof partition !== 'undefined' ? `&partition=${partition}` : ``}`, + { + CONSUL_TOKEN_COUNT: '100', + } + ); + }, + function performTest(service) { + return service.findByPolicy({ + id: policy, + dc, + ns: nspace || undefinedNspace, + partition: partition || undefinedPartition, + }); + }, + function performAssertion(actual, expected) { + assert.deepEqual( + actual, + expected(function(payload) { + return payload.map(function(item) { + return Object.assign({}, item, { + Datacenter: dc, + CreateTime: new Date(item.CreateTime), + Namespace: item.Namespace || undefinedNspace, + Partition: item.Partition || undefinedPartition, + uid: `["${item.Partition || undefinedPartition}","${item.Namespace || + undefinedNspace}","${dc}","${item.AccessorID}"]`, + Policies: createPolicies(item), + }); }); - }); - }) - ); - } - ); + }) + ); + } + ); + }); + test(`findByRole returns the correct data when nspace is ${nspace}`, function(assert) { + const subject = this.owner.lookup('service:repository/token'); + const role = 'role-1'; + return repo( + 'Token', + 'findByPolicy', + subject, + function retrieveStub(stub) { + return stub( + `/v1/acl/tokens?dc=${dc}&role=${role}${ + typeof nspace !== 'undefined' ? `&ns=${nspace}` : `` + }${typeof partition !== 'undefined' ? `&partition=${partition}` : ``}`, + { + CONSUL_TOKEN_COUNT: '100', + } + ); + }, + function performTest(service) { + return service.findByRole({ + id: role, + dc, + ns: nspace || undefinedNspace, + partition: partition || undefinedPartition, + }); + }, + function performAssertion(actual, expected) { + assert.deepEqual( + actual, + expected(function(payload) { + return payload.map(function(item) { + return Object.assign({}, item, { + Datacenter: dc, + CreateTime: new Date(item.CreateTime), + Namespace: item.Namespace || undefinedNspace, + Partition: item.Partition || undefinedPartition, + uid: `["${item.Partition || undefinedPartition}","${item.Namespace || + undefinedNspace}","${dc}","${item.AccessorID}"]`, + Policies: createPolicies(item), + }); + }); + }) + ); + } + ); + }); }); }); diff --git a/ui/packages/consul-ui/tests/integration/services/repository/topology-test.js b/ui/packages/consul-ui/tests/integration/services/repository/topology-test.js index 928da30fcb..e42eb5c518 100644 --- a/ui/packages/consul-ui/tests/integration/services/repository/topology-test.js +++ b/ui/packages/consul-ui/tests/integration/services/repository/topology-test.js @@ -1,43 +1,43 @@ -import { moduleFor, test } from 'ember-qunit'; +import { module, test } from 'qunit'; +import { setupTest } from 'ember-qunit'; import repo from 'consul-ui/tests/helpers/repo'; -moduleFor('service:repository/topology', 'Integration | Service | topology', { - // Specify the other units that are required for this test. - integration: true, -}); -const dc = 'dc-1'; -const id = 'slug'; -const kind = ''; -test('findBySlug returns the correct data for item endpoint', function(assert) { - return repo( - 'Service', - 'findBySlug', - this.subject(), - function retrieveStub(stub) { - return stub(`/v1/internal/ui/service-topology/${id}?dc=${dc}&${kind}`, { - CONSUL_DISCOVERY_CHAIN_COUNT: 1, - }); - }, - function performTest(service) { - return service.findBySlug({ id, kind, dc }); - }, - function performAssertion(actual, expected) { - const result = expected(function(payload) { - return Object.assign( - {}, - { - Datacenter: dc, - uid: `["default","default","${dc}","${id}"]`, - meta: { - cacheControl: undefined, - cursor: undefined, +module('Integration | Service | topology', function(hooks) { + setupTest(hooks); + const dc = 'dc-1'; + const id = 'slug'; + const kind = ''; + test('findBySlug returns the correct data for item endpoint', function(assert) { + return repo( + 'Service', + 'findBySlug', + this.owner.lookup('service:repository/topology'), + function retrieveStub(stub) { + return stub(`/v1/internal/ui/service-topology/${id}?dc=${dc}&${kind}`, { + CONSUL_DISCOVERY_CHAIN_COUNT: 1, + }); + }, + function performTest(service) { + return service.findBySlug({ id, kind, dc }); + }, + function performAssertion(actual, expected) { + const result = expected(function(payload) { + return Object.assign( + {}, + { + Datacenter: dc, + uid: `["default","default","${dc}","${id}"]`, + meta: { + cacheControl: undefined, + cursor: undefined, + }, }, - }, - payload - ); - }); - assert.equal(actual.Datacenter, result.Datacenter); - assert.equal(actual.uid, result.uid); - } - ); + payload + ); + }); + assert.equal(actual.Datacenter, result.Datacenter); + assert.equal(actual.uid, result.uid); + } + ); + }); }); diff --git a/ui/packages/consul-ui/tests/integration/services/routlet-test.js b/ui/packages/consul-ui/tests/integration/services/routlet-test.js index c0faa5f408..0fd40846e5 100644 --- a/ui/packages/consul-ui/tests/integration/services/routlet-test.js +++ b/ui/packages/consul-ui/tests/integration/services/routlet-test.js @@ -1,32 +1,33 @@ -import { moduleFor, test } from 'ember-qunit'; -moduleFor('service:routlet', 'Integration | Routlet', { - // Specify the other units that are required for this test. - integration: true, -}); -test('outletFor works', function(assert) { - const routlet = this.subject(); - routlet.addOutlet('application', { - name: 'application' - }); - routlet.addRoute('dc', {}); - routlet.addOutlet('dc', { - name: 'dc' - }); - routlet.addRoute('dc.services', {}); - routlet.addOutlet('dc.services', { - name: 'dc.services' - }); - routlet.addRoute('dc.services.instances', {}); - - let actual = routlet.outletFor('dc.services'); - let expected = 'dc'; - assert.equal(actual.name, expected); - - actual = routlet.outletFor('dc'); - expected = 'application'; - assert.equal(actual.name, expected); - - actual = routlet.outletFor('application'); - expected = undefined; - assert.equal(actual, expected); +import { module, test } from 'qunit'; +import { setupTest } from 'ember-qunit'; + +module('Integration | Routlet', function(hooks) { + setupTest(hooks); + test('outletFor works', function(assert) { + const routlet = this.owner.lookup('service:routlet'); + routlet.addOutlet('application', { + name: 'application', + }); + routlet.addRoute('dc', {}); + routlet.addOutlet('dc', { + name: 'dc', + }); + routlet.addRoute('dc.services', {}); + routlet.addOutlet('dc.services', { + name: 'dc.services', + }); + routlet.addRoute('dc.services.instances', {}); + + let actual = routlet.outletFor('dc.services'); + let expected = 'dc'; + assert.equal(actual.name, expected); + + actual = routlet.outletFor('dc'); + expected = 'application'; + assert.equal(actual.name, expected); + + actual = routlet.outletFor('application'); + expected = undefined; + assert.equal(actual, expected); + }); }); diff --git a/ui/packages/consul-ui/tests/integration/utils/dom/event-source/callable-test.js b/ui/packages/consul-ui/tests/integration/utils/dom/event-source/callable-test.js index 71d322388e..527b1c7359 100644 --- a/ui/packages/consul-ui/tests/integration/utils/dom/event-source/callable-test.js +++ b/ui/packages/consul-ui/tests/integration/utils/dom/event-source/callable-test.js @@ -1,16 +1,16 @@ import domEventSourceCallable from 'consul-ui/utils/dom/event-source/callable'; import EventTarget from 'consul-ui/utils/dom/event-target/rsvp'; -import { module, skip } from 'qunit'; +import { module, test, skip } from 'qunit'; import { setupTest } from 'ember-qunit'; -import test from 'ember-sinon-qunit/test-support/test'; +import sinon from 'sinon'; module('Integration | Utility | dom/event-source/callable', function(hooks) { setupTest(hooks); test('it dispatches messages', function(assert) { assert.expect(1); const EventSource = domEventSourceCallable(EventTarget); - const listener = this.stub(); + const listener = sinon.stub(); const source = new EventSource( function(configuration) { return new Promise(resolve => { @@ -45,7 +45,7 @@ module('Integration | Utility | dom/event-source/callable', function(hooks) { skip('it dispatches a single open event and closes when called with no callable', function(assert) { assert.expect(4); const EventSource = domEventSourceCallable(EventTarget, Promise); - const listener = this.stub(); + const listener = sinon.stub(); const source = new EventSource(); source.addEventListener('open', function(e) { assert.deepEqual(e.target, this); @@ -60,7 +60,7 @@ module('Integration | Utility | dom/event-source/callable', function(hooks) { test('it dispatches a single open event, and calls the specified callable that can dispatch an event', function(assert) { assert.expect(1); const EventSource = domEventSourceCallable(EventTarget); - const listener = this.stub(); + const listener = sinon.stub(); const source = new EventSource(function() { return new Promise(resolve => { setTimeout(() => { @@ -87,7 +87,7 @@ module('Integration | Utility | dom/event-source/callable', function(hooks) { test("it can be closed before the first tick, and therefore doesn't run", function(assert) { assert.expect(4); const EventSource = domEventSourceCallable(EventTarget); - const listener = this.stub(); + const listener = sinon.stub(); const source = new EventSource(); assert.equal(source.readyState, 0); source.close(); diff --git a/ui/packages/consul-ui/tests/steps/interactions/form.js b/ui/packages/consul-ui/tests/steps/interactions/form.js index 5ac5cc1f06..122aa24885 100644 --- a/ui/packages/consul-ui/tests/steps/interactions/form.js +++ b/ui/packages/consul-ui/tests/steps/interactions/form.js @@ -1,6 +1,6 @@ export default function(scenario, find, fillIn, triggerKeyEvent, currentPage) { const dont = `( don't| shouldn't| can't)?`; - const fillInElement = function(page, name, value) { + const fillInElement = async function(page, name, value) { const cm = document.querySelector(`textarea[name="${name}"] + .CodeMirror`); if (cm) { if (!cm.CodeMirror.options.readOnly) { @@ -11,7 +11,7 @@ export default function(scenario, find, fillIn, triggerKeyEvent, currentPage) { return page; } else { const $el = document.querySelector(`[name="${name}"]`); - fillIn($el, value); + await fillIn($el, value); return page; } }; @@ -57,11 +57,13 @@ export default function(scenario, find, fillIn, triggerKeyEvent, currentPage) { } catch (e) { obj = currentPage(); } - const res = Object.keys(data).reduce(function(prev, item, i, arr) { + const res = Object.keys(data).reduce(async function(prev, item, i, arr) { + await prev; + const name = `${obj.prefix || property}[${item}]`; if (negative) { try { - fillInElement(prev, name, data[item]); + await fillInElement(obj, name, data[item]); throw new TypeError(`${item} is editable`); } catch (e) { if (e instanceof TypeError) { @@ -69,10 +71,9 @@ export default function(scenario, find, fillIn, triggerKeyEvent, currentPage) { } } } else { - return fillInElement(prev, name, data[item]); + return await fillInElement(obj, name, data[item]); } - }, obj); - await new Promise(resolve => setTimeout(resolve, 0)); + }, Promise.resolve()); return res; } ) diff --git a/ui/packages/consul-ui/tests/test-helper.js b/ui/packages/consul-ui/tests/test-helper.js index 58bc7a64a5..2d638ea150 100644 --- a/ui/packages/consul-ui/tests/test-helper.js +++ b/ui/packages/consul-ui/tests/test-helper.js @@ -1,9 +1,12 @@ import Application from '../app'; import config from '../config/environment'; +import * as QUnit from 'qunit'; import { setApplication } from '@ember/test-helpers'; +import { setup } from 'qunit-dom'; import { registerWaiter } from '@ember/test'; import './helpers/flash-message'; import start from 'ember-exam/test-support/start'; +import setupSinon from 'ember-sinon-qunit'; import ClientConnections from 'consul-ui/services/client/connections'; @@ -33,6 +36,10 @@ ClientConnections.reopen({ }); const application = Application.create(config.APP); application.inject('component:copy-button', 'clipboard', 'service:clipboard/local-storage'); + setApplication(application); +setup(QUnit.assert); +setupSinon(); + start(); diff --git a/ui/packages/consul-ui/tests/unit/adapters/application-test.js b/ui/packages/consul-ui/tests/unit/adapters/application-test.js index 4c6d52515d..b8dc15e8c8 100644 --- a/ui/packages/consul-ui/tests/unit/adapters/application-test.js +++ b/ui/packages/consul-ui/tests/unit/adapters/application-test.js @@ -1,5 +1,4 @@ -import { module } from 'qunit'; -import test from 'ember-sinon-qunit/test-support/test'; +import { module, test } from 'qunit'; import { setupTest } from 'ember-qunit'; module('Unit | Adapter | application', function(hooks) { diff --git a/ui/packages/consul-ui/tests/unit/mixins/with-blocking-actions-test.js b/ui/packages/consul-ui/tests/unit/mixins/with-blocking-actions-test.js index 436a44dbe9..6f72a7887c 100644 --- a/ui/packages/consul-ui/tests/unit/mixins/with-blocking-actions-test.js +++ b/ui/packages/consul-ui/tests/unit/mixins/with-blocking-actions-test.js @@ -1,8 +1,8 @@ -import { module, skip } from 'qunit'; +import { module, test, skip } from 'qunit'; import { setupTest } from 'ember-qunit'; -import test from 'ember-sinon-qunit/test-support/test'; import Route from '@ember/routing/route'; import Mixin from 'consul-ui/mixins/with-blocking-actions'; +import sinon from 'sinon'; module('Unit | Mixin | with blocking actions', function(hooks) { setupTest(hooks); @@ -24,7 +24,7 @@ module('Unit | Mixin | with blocking actions', function(hooks) { test('afterCreate just calls afterUpdate', function(assert) { const subject = this.subject(); const expected = [1, 2, 3, 4]; - const afterUpdate = this.stub(subject, 'afterUpdate').returns(expected); + const afterUpdate = sinon.stub(subject, 'afterUpdate').returns(expected); const actual = subject.afterCreate(expected); assert.deepEqual(actual, expected); assert.ok(afterUpdate.calledOnce); @@ -33,7 +33,7 @@ module('Unit | Mixin | with blocking actions', function(hooks) { const subject = this.subject(); const expected = 'dc.kv'; subject.routeName = expected + '.edit'; - const transitionTo = this.stub(subject, 'transitionTo').returnsArg(0); + const transitionTo = sinon.stub(subject, 'transitionTo').returnsArg(0); const actual = subject.afterUpdate(); assert.equal(actual, expected); assert.ok(transitionTo.calledOnce); @@ -42,7 +42,7 @@ module('Unit | Mixin | with blocking actions', function(hooks) { const subject = this.subject(); const expected = 'dc.kv'; subject.routeName = expected + '.edit'; - const transitionTo = this.stub(subject, 'transitionTo').returnsArg(0); + const transitionTo = sinon.stub(subject, 'transitionTo').returnsArg(0); const actual = subject.afterDelete(); assert.equal(actual, expected); assert.ok(transitionTo.calledOnce); @@ -51,7 +51,7 @@ module('Unit | Mixin | with blocking actions', function(hooks) { const subject = this.subject(); subject.routeName = 'dc.kv.index'; const expected = 'refresh'; - const refresh = this.stub(subject, 'refresh').returns(expected); + const refresh = sinon.stub(subject, 'refresh').returns(expected); const actual = subject.afterDelete(); assert.equal(actual, expected); assert.ok(refresh.calledOnce); @@ -67,7 +67,7 @@ module('Unit | Mixin | with blocking actions', function(hooks) { test('action cancel just calls afterUpdate', function(assert) { const subject = this.subject(); const expected = [1, 2, 3, 4]; - const afterUpdate = this.stub(subject, 'afterUpdate').returns(expected); + const afterUpdate = sinon.stub(subject, 'afterUpdate').returns(expected); // TODO: unsure as to whether ember testing should actually bind this for you? const actual = subject.actions.cancel.bind(subject)(expected); assert.deepEqual(actual, expected); diff --git a/ui/packages/consul-ui/tests/unit/routes/dc-test.js b/ui/packages/consul-ui/tests/unit/routes/dc-test.js index 79bc3ca394..b8cba11f28 100644 --- a/ui/packages/consul-ui/tests/unit/routes/dc-test.js +++ b/ui/packages/consul-ui/tests/unit/routes/dc-test.js @@ -1,6 +1,5 @@ -import { module } from 'qunit'; +import { module, test } from 'qunit'; import { setupTest } from 'ember-qunit'; -import test from 'ember-sinon-qunit/test-support/test'; module('Unit | Route | dc', function(hooks) { setupTest(hooks); diff --git a/ui/packages/consul-ui/tests/unit/serializers/application-test.js b/ui/packages/consul-ui/tests/unit/serializers/application-test.js index d6e69e6fc2..19648227db 100644 --- a/ui/packages/consul-ui/tests/unit/serializers/application-test.js +++ b/ui/packages/consul-ui/tests/unit/serializers/application-test.js @@ -1,5 +1,4 @@ -import { module } from 'qunit'; -import test from 'ember-sinon-qunit/test-support/test'; +import { module, test } from 'qunit'; import { setupTest } from 'ember-qunit'; import { HEADERS_SYMBOL as META } from 'consul-ui/utils/http/consul'; diff --git a/ui/packages/consul-ui/tests/unit/serializers/kv-test.js b/ui/packages/consul-ui/tests/unit/serializers/kv-test.js index b1a4421725..ed9c00b594 100644 --- a/ui/packages/consul-ui/tests/unit/serializers/kv-test.js +++ b/ui/packages/consul-ui/tests/unit/serializers/kv-test.js @@ -1,8 +1,8 @@ -import { module, skip } from 'qunit'; -import test from 'ember-sinon-qunit/test-support/test'; +import { module, skip, test } from 'qunit'; import { setupTest } from 'ember-qunit'; import { run } from '@ember/runloop'; import { set } from '@ember/object'; +import sinon from 'sinon'; module('Unit | Serializer | kv', function(hooks) { setupTest(hooks); @@ -101,7 +101,7 @@ module('Unit | Serializer | kv', function(hooks) { test('serialize decodes Value if its a string', function(assert) { const serializer = this.owner.lookup('serializer:kv'); set(serializer, 'decoder', { - execute: this.stub().returnsArg(0), + execute: sinon.stub().returnsArg(0), }); // const expected = 'value'; diff --git a/ui/packages/consul-ui/tests/unit/utils/ascend-test.js b/ui/packages/consul-ui/tests/unit/utils/ascend-test.js index 9d8c9ac7c0..9ec0c9afb9 100644 --- a/ui/packages/consul-ui/tests/unit/utils/ascend-test.js +++ b/ui/packages/consul-ui/tests/unit/utils/ascend-test.js @@ -1,5 +1,4 @@ -import { module } from 'qunit'; -import test from 'ember-sinon-qunit/test-support/test'; +import { module, test } from 'qunit'; import ascend from 'consul-ui/utils/ascend'; module('Unit | Utils | ascend', function() { diff --git a/ui/packages/consul-ui/tests/unit/utils/atob-test.js b/ui/packages/consul-ui/tests/unit/utils/atob-test.js index b84abaec89..10cc764e94 100644 --- a/ui/packages/consul-ui/tests/unit/utils/atob-test.js +++ b/ui/packages/consul-ui/tests/unit/utils/atob-test.js @@ -1,5 +1,4 @@ -import test from 'ember-sinon-qunit/test-support/test'; -import { module, skip } from 'qunit'; +import { module, skip, test } from 'qunit'; import atob from 'consul-ui/utils/atob'; module('Unit | Utils | atob', function() { diff --git a/ui/packages/consul-ui/tests/unit/utils/btoa-test.js b/ui/packages/consul-ui/tests/unit/utils/btoa-test.js index 776a3b57f5..1fe10c5844 100644 --- a/ui/packages/consul-ui/tests/unit/utils/btoa-test.js +++ b/ui/packages/consul-ui/tests/unit/utils/btoa-test.js @@ -1,5 +1,4 @@ -import { module } from 'qunit'; -import test from 'ember-sinon-qunit/test-support/test'; +import { module, test } from 'qunit'; import btoa from 'consul-ui/utils/btoa'; module('Unit | Utils | btoa', function() { diff --git a/ui/packages/consul-ui/tests/unit/utils/dom/closest-test.js b/ui/packages/consul-ui/tests/unit/utils/dom/closest-test.js index 5c72e21773..ccbb2bf951 100644 --- a/ui/packages/consul-ui/tests/unit/utils/dom/closest-test.js +++ b/ui/packages/consul-ui/tests/unit/utils/dom/closest-test.js @@ -1,11 +1,11 @@ import domClosest from 'consul-ui/utils/dom/closest'; -import test from 'ember-sinon-qunit/test-support/test'; -import { module, skip } from 'qunit'; +import { module, skip, test } from 'qunit'; +import sinon from 'sinon'; module('Unit | Utility | dom/closest', function() { test('it calls Element.closest with the specified selector', function(assert) { const el = { - closest: this.stub().returnsArg(0), + closest: sinon.stub().returnsArg(0), }; const expected = 'selector'; const actual = domClosest(expected, el); diff --git a/ui/packages/consul-ui/tests/unit/utils/dom/create-listeners-test.js b/ui/packages/consul-ui/tests/unit/utils/dom/create-listeners-test.js index 9cb2391e86..1297ee3629 100644 --- a/ui/packages/consul-ui/tests/unit/utils/dom/create-listeners-test.js +++ b/ui/packages/consul-ui/tests/unit/utils/dom/create-listeners-test.js @@ -1,6 +1,6 @@ import createListeners from 'consul-ui/utils/dom/create-listeners'; -import { module } from 'qunit'; -import test from 'ember-sinon-qunit/test-support/test'; +import { module, test } from 'qunit'; +import sinon from 'sinon'; module('Unit | Utility | dom/create listeners', function() { test('it has add and remove methods', function(assert) { @@ -33,7 +33,7 @@ module('Unit | Utility | dom/create listeners', function() { assert.equal(handlers.length, 0); }); test('remove calls the remove functions', function(assert) { - const expected = this.stub(); + const expected = sinon.stub(); const arr = [expected]; const listeners = createListeners(arr); listeners.remove(); @@ -42,7 +42,7 @@ module('Unit | Utility | dom/create listeners', function() { }); test('listeners are added on add', function(assert) { const listeners = createListeners(); - const stub = this.stub(); + const stub = sinon.stub(); const target = { addEventListener: stub, }; @@ -54,8 +54,8 @@ module('Unit | Utility | dom/create listeners', function() { }); test('listeners as objects are added on add and removed on remove', function(assert) { const listeners = createListeners(); - const addStub = this.stub(); - const removeStub = this.stub(); + const addStub = sinon.stub(); + const removeStub = sinon.stub(); const target = { addEventListener: addStub, removeEventListener: removeStub, @@ -77,7 +77,7 @@ module('Unit | Utility | dom/create listeners', function() { }); test('listeners are removed on remove', function(assert) { const listeners = createListeners(); - const stub = this.stub(); + const stub = sinon.stub(); const target = { addEventListener: function() {}, removeEventListener: stub, @@ -91,7 +91,7 @@ module('Unit | Utility | dom/create listeners', function() { }); test('listeners as functions are removed on remove', function(assert) { const listeners = createListeners(); - const stub = this.stub(); + const stub = sinon.stub(); const remove = listeners.add(stub); remove(); assert.ok(stub.calledOnce); @@ -99,7 +99,7 @@ module('Unit | Utility | dom/create listeners', function() { test('listeners as other listeners are removed on remove', function(assert) { const listeners = createListeners(); const listeners2 = createListeners(); - const stub = this.stub(); + const stub = sinon.stub(); listeners2.add(stub); const remove = listeners.add(listeners2); remove(); @@ -108,7 +108,7 @@ module('Unit | Utility | dom/create listeners', function() { test('listeners as functions of other listeners are removed on remove', function(assert) { const listeners = createListeners(); const listeners2 = createListeners(); - const stub = this.stub(); + const stub = sinon.stub(); const remove = listeners.add(listeners2.add(stub)); remove(); assert.ok(stub.calledOnce); @@ -120,7 +120,7 @@ module('Unit | Utility | dom/create listeners', function() { removeEventListener: function() {}, }; const name = 'test'; - const expected = this.stub(); + const expected = sinon.stub(); const remove = listeners.add(target, name, expected); const actual = remove(); actual[0](); diff --git a/ui/packages/consul-ui/tests/unit/utils/dom/event-source/blocking-test.js b/ui/packages/consul-ui/tests/unit/utils/dom/event-source/blocking-test.js index 49d6dac984..e81cf89b80 100644 --- a/ui/packages/consul-ui/tests/unit/utils/dom/event-source/blocking-test.js +++ b/ui/packages/consul-ui/tests/unit/utils/dom/event-source/blocking-test.js @@ -2,8 +2,8 @@ import domEventSourceBlocking, { validateCursor, createErrorBackoff, } from 'consul-ui/utils/dom/event-source/blocking'; -import { module } from 'qunit'; -import test from 'ember-sinon-qunit/test-support/test'; +import { module, test } from 'qunit'; +import sinon from 'sinon'; module('Unit | Utility | dom/event-source/blocking', function() { const createEventSource = function() { @@ -74,8 +74,8 @@ module('Unit | Utility | dom/event-source/blocking', function() { { errors: [{ status: '504' }] }, { errors: [{ status: '524' }] }, ].forEach(item => { - const timeout = this.stub().callsArg(0); - const resolve = this.stub().withArgs(item); + const timeout = sinon.stub().callsArg(0); + const resolve = sinon.stub().withArgs(item); const Promise = createPromise(resolve); const backoff = createErrorBackoff(undefined, Promise, timeout); const promise = backoff(item); diff --git a/ui/packages/consul-ui/tests/unit/utils/dom/event-source/cache-test.js b/ui/packages/consul-ui/tests/unit/utils/dom/event-source/cache-test.js index 2a82d59ce7..8561479ea8 100644 --- a/ui/packages/consul-ui/tests/unit/utils/dom/event-source/cache-test.js +++ b/ui/packages/consul-ui/tests/unit/utils/dom/event-source/cache-test.js @@ -1,6 +1,6 @@ import domEventSourceCache from 'consul-ui/utils/dom/event-source/cache'; -import { module } from 'qunit'; -import test from 'ember-sinon-qunit/test-support/test'; +import { module, test } from 'qunit'; +import sinon from 'sinon'; module('Unit | Utility | dom/event-source/cache', function() { const createEventSource = function() { @@ -66,8 +66,8 @@ module('Unit | Utility | dom/event-source/cache', function() { const Promise = createPromise(function() { return stub; }); - const source = this.stub().returns(Promise.resolve()); - const cb = this.stub(); + const source = sinon.stub().returns(Promise.resolve()); + const cb = sinon.stub(); const getCache = domEventSourceCache(source, EventSource, Promise); const obj = {}; const cache = getCache(obj); @@ -92,14 +92,14 @@ module('Unit | Utility | dom/event-source/cache', function() { test('cache creates the default EventSource and keeps it open when there is a cursor', function(assert) { const EventSource = createEventSource(); const stub = { - close: this.stub(), + close: sinon.stub(), configuration: { cursor: 1 }, }; const Promise = createPromise(function() { return stub; }); - const source = this.stub().returns(Promise.resolve()); - const cb = this.stub(); + const source = sinon.stub().returns(Promise.resolve()); + const cb = sinon.stub(); const getCache = domEventSourceCache(source, EventSource, Promise); const obj = {}; const cache = getCache(obj); @@ -120,14 +120,14 @@ module('Unit | Utility | dom/event-source/cache', function() { test("cache creates the default EventSource and closes it when there isn't a cursor", function(assert) { const EventSource = createEventSource(); const stub = { - close: this.stub(), + close: sinon.stub(), configuration: {}, }; const Promise = createPromise(function() { return stub; }); - const source = this.stub().returns(Promise.resolve()); - const cb = this.stub(); + const source = sinon.stub().returns(Promise.resolve()); + const cb = sinon.stub(); const getCache = domEventSourceCache(source, EventSource, Promise); const obj = {}; const cache = getCache(obj); diff --git a/ui/packages/consul-ui/tests/unit/utils/dom/event-source/callable-test.js b/ui/packages/consul-ui/tests/unit/utils/dom/event-source/callable-test.js index 823af58f59..a4b2f9cb1c 100644 --- a/ui/packages/consul-ui/tests/unit/utils/dom/event-source/callable-test.js +++ b/ui/packages/consul-ui/tests/unit/utils/dom/event-source/callable-test.js @@ -1,6 +1,6 @@ import domEventSourceCallable, { defaultRunner } from 'consul-ui/utils/dom/event-source/callable'; -import { module } from 'qunit'; -import test from 'ember-sinon-qunit/test-support/test'; +import { module, test } from 'qunit'; +import sinon from 'sinon'; module('Unit | Utility | dom/event-source/callable', function() { const createEventTarget = function() { @@ -43,14 +43,14 @@ module('Unit | Utility | dom/event-source/callable', function() { return count === 11; }; const configuration = {}; - const then = this.stub().callsArg(0); + const then = sinon.stub().callsArg(0); const target = { source: function(configuration) { return { then: then, }; }, - dispatchEvent: this.stub(), + dispatchEvent: sinon.stub(), }; defaultRunner(target, configuration, isClosed); assert.ok(then.callCount == 10); @@ -59,7 +59,7 @@ module('Unit | Utility | dom/event-source/callable', function() { test('it calls the defaultRunner', function(assert) { const Promise = createPromise(); const EventTarget = createEventTarget(); - const run = this.stub(); + const run = sinon.stub(); const EventSource = domEventSourceCallable(EventTarget, Promise, run); const source = new EventSource(); assert.ok(run.calledOnce); diff --git a/ui/packages/consul-ui/tests/unit/utils/dom/event-source/openable-test.js b/ui/packages/consul-ui/tests/unit/utils/dom/event-source/openable-test.js index 9ab9dc3d58..16fabfa3de 100644 --- a/ui/packages/consul-ui/tests/unit/utils/dom/event-source/openable-test.js +++ b/ui/packages/consul-ui/tests/unit/utils/dom/event-source/openable-test.js @@ -1,6 +1,6 @@ import domEventSourceOpenable from 'consul-ui/utils/dom/event-source/openable'; -import { module } from 'qunit'; -import test from 'ember-sinon-qunit/test-support/test'; +import { module, test } from 'qunit'; +import sinon from 'sinon'; module('Unit | Utility | dom/event-source/openable', function() { const createEventSource = function() { @@ -23,7 +23,7 @@ module('Unit | Utility | dom/event-source/openable', function() { assert.ok(source instanceof EventSource); }); test('it reopens the event source when open is called', function(assert) { - const callable = this.stub(); + const callable = sinon.stub(); const EventSource = createEventSource(); const OpenableEventSource = domEventSourceOpenable(EventSource); const source = new OpenableEventSource(callable); diff --git a/ui/packages/consul-ui/tests/unit/utils/http/create-url-test.js b/ui/packages/consul-ui/tests/unit/utils/http/create-url-test.js index e996bb3462..c9ba8eb5f1 100644 --- a/ui/packages/consul-ui/tests/unit/utils/http/create-url-test.js +++ b/ui/packages/consul-ui/tests/unit/utils/http/create-url-test.js @@ -1,5 +1,4 @@ -import { module, skip } from 'qunit'; -import test from 'ember-sinon-qunit/test-support/test'; +import { module, skip, test } from 'qunit'; import createURL from 'consul-ui/utils/http/create-url'; import createQueryParams from 'consul-ui/utils/http/create-query-params'; diff --git a/ui/packages/consul-ui/tests/unit/utils/isFolder-test.js b/ui/packages/consul-ui/tests/unit/utils/isFolder-test.js index ab536257aa..a9fab45846 100644 --- a/ui/packages/consul-ui/tests/unit/utils/isFolder-test.js +++ b/ui/packages/consul-ui/tests/unit/utils/isFolder-test.js @@ -1,5 +1,4 @@ -import { module } from 'qunit'; -import test from 'ember-sinon-qunit/test-support/test'; +import { module, test } from 'qunit'; import isFolder from 'consul-ui/utils/isFolder'; module('Unit | Utils | isFolder', function() { diff --git a/ui/packages/consul-ui/tests/unit/utils/keyToArray-test.js b/ui/packages/consul-ui/tests/unit/utils/keyToArray-test.js index 5cd33c322c..120939cb49 100644 --- a/ui/packages/consul-ui/tests/unit/utils/keyToArray-test.js +++ b/ui/packages/consul-ui/tests/unit/utils/keyToArray-test.js @@ -1,5 +1,4 @@ -import { module } from 'qunit'; -import test from 'ember-sinon-qunit/test-support/test'; +import { module, test } from 'qunit'; import keyToArray from 'consul-ui/utils/keyToArray'; module('Unit | Utils | keyToArray', function() { diff --git a/ui/packages/consul-ui/tests/unit/utils/left-trim-test.js b/ui/packages/consul-ui/tests/unit/utils/left-trim-test.js index 3f0f1c31d8..9bdefcdf27 100644 --- a/ui/packages/consul-ui/tests/unit/utils/left-trim-test.js +++ b/ui/packages/consul-ui/tests/unit/utils/left-trim-test.js @@ -1,5 +1,4 @@ -import { module } from 'qunit'; -import test from 'ember-sinon-qunit/test-support/test'; +import { module, test } from 'qunit'; import leftTrim from 'consul-ui/utils/left-trim'; module('Unit | Utility | left trim', function() { diff --git a/ui/packages/consul-ui/tests/unit/utils/promisedTimeout-test.js b/ui/packages/consul-ui/tests/unit/utils/promisedTimeout-test.js index 7719f6c08a..b080f51192 100644 --- a/ui/packages/consul-ui/tests/unit/utils/promisedTimeout-test.js +++ b/ui/packages/consul-ui/tests/unit/utils/promisedTimeout-test.js @@ -1,5 +1,4 @@ -import { module, skip } from 'qunit'; -import test from 'ember-sinon-qunit/test-support/test'; +import { module, skip, test } from 'qunit'; import promisedTimeout from 'consul-ui/utils/promisedTimeout'; module('Unit | Utils | promisedTimeout', function() { diff --git a/ui/packages/consul-ui/tests/unit/utils/right-trim-test.js b/ui/packages/consul-ui/tests/unit/utils/right-trim-test.js index 6898de8e9c..ac988322d3 100644 --- a/ui/packages/consul-ui/tests/unit/utils/right-trim-test.js +++ b/ui/packages/consul-ui/tests/unit/utils/right-trim-test.js @@ -1,5 +1,4 @@ -import { module } from 'qunit'; -import test from 'ember-sinon-qunit/test-support/test'; +import { module, test } from 'qunit'; import rightTrim from 'consul-ui/utils/right-trim'; module('Unit | Utility | right trim', function() { diff --git a/ui/packages/consul-ui/tests/unit/utils/routing/walk-test.js b/ui/packages/consul-ui/tests/unit/utils/routing/walk-test.js index 9f520e57c2..01eb1d487c 100644 --- a/ui/packages/consul-ui/tests/unit/utils/routing/walk-test.js +++ b/ui/packages/consul-ui/tests/unit/utils/routing/walk-test.js @@ -1,10 +1,10 @@ import { walk } from 'consul-ui/utils/routing/walk'; -import { module } from 'qunit'; -import test from 'ember-sinon-qunit/test-support/test'; +import { module, test } from 'qunit'; +import sinon from 'sinon'; module('Unit | Utility | routing/walk', function() { test('it walks down deep routes', function(assert) { - const route = this.stub(); + const route = sinon.stub(); const Router = { route: function(name, options, cb) { route(); diff --git a/ui/packages/consul-ui/tests/unit/utils/ucfirst-test.js b/ui/packages/consul-ui/tests/unit/utils/ucfirst-test.js index b911ac04fa..3c45b51d6b 100644 --- a/ui/packages/consul-ui/tests/unit/utils/ucfirst-test.js +++ b/ui/packages/consul-ui/tests/unit/utils/ucfirst-test.js @@ -1,5 +1,4 @@ -import { module } from 'qunit'; -import test from 'ember-sinon-qunit/test-support/test'; +import { module, test } from 'qunit'; import ucfirst from 'consul-ui/utils/ucfirst'; module('Unit | Utils | ucfirst', function() { diff --git a/ui/yarn.lock b/ui/yarn.lock index 1f3f24f352..76bbc42af1 100644 --- a/ui/yarn.lock +++ b/ui/yarn.lock @@ -1948,17 +1948,29 @@ ember-compatibility-helpers "^1.2.5" ember-modifier-manager-polyfill "^1.2.0" -"@ember/test-helpers@^1.7.1": - version "1.7.2" - resolved "https://registry.yarnpkg.com/@ember/test-helpers/-/test-helpers-1.7.2.tgz#5b128dc5f6524c3850abf52668e6bd4fda401194" - integrity sha512-FEJBpbFNIaWAsCSnataiNwYFvmcpoymL/B7fXLruuJ/46BnJjzLaRPtpUIZ91w4GNTK6knxbHWXW76aVb3Aezg== +"@ember/test-helpers@^2.1.4": + version "2.8.1" + resolved "https://registry.yarnpkg.com/@ember/test-helpers/-/test-helpers-2.8.1.tgz#20f2e30d48172c2ff713e1db7fbec5352f918d4e" + integrity sha512-jbsYwWyAdhL/pdPu7Gb3SG1gvIXY70FWMtC/Us0Kmvk82Y+5YUQ1SOC0io75qmOGYQmH7eQrd/bquEVd+4XtdQ== dependencies: + "@ember/test-waiters" "^3.0.0" + "@embroider/macros" "^1.6.0" + "@embroider/util" "^1.6.0" broccoli-debug "^0.6.5" - broccoli-funnel "^2.0.2" - ember-assign-polyfill "^2.6.0" - ember-cli-babel "^7.7.3" - ember-cli-htmlbars-inline-precompile "^2.1.0" - ember-test-waiters "^1.1.1" + broccoli-funnel "^3.0.8" + ember-cli-babel "^7.26.6" + ember-cli-htmlbars "^5.7.1" + ember-destroyable-polyfill "^2.0.3" + +"@ember/test-waiters@^3.0.0": + version "3.0.2" + resolved "https://registry.yarnpkg.com/@ember/test-waiters/-/test-waiters-3.0.2.tgz#5b950c580a1891ed1d4ee64f9c6bacf49a15ea6f" + integrity sha512-H8Q3Xy9rlqhDKnQpwt2pzAYDouww4TZIGSI1pZJhM7mQIGufQKuB0ijzn/yugA6Z+bNdjYp1HioP8Y4hn2zazQ== + dependencies: + calculate-cache-key-for-tree "^2.0.0" + ember-cli-babel "^7.26.6" + ember-cli-version-checker "^5.1.2" + semver "^7.3.5" "@embroider/core@0.33.0", "@embroider/core@^0.33.0": version "0.33.0" @@ -2039,6 +2051,20 @@ resolve "^1.20.0" semver "^7.3.2" +"@embroider/macros@1.8.3", "@embroider/macros@^1.6.0": + version "1.8.3" + resolved "https://registry.yarnpkg.com/@embroider/macros/-/macros-1.8.3.tgz#2f0961ab8871f6ad819630208031d705b357757e" + integrity sha512-gnIOfTL/pUkoD6oI7JyWOqXlVIUgZM+CnbH10/YNtZr2K0hij9eZQMdgjOZZVgN0rKOFw9dIREqc1ygrJHRYQA== + dependencies: + "@embroider/shared-internals" "1.8.3" + assert-never "^1.2.1" + babel-import-util "^1.1.0" + ember-cli-babel "^7.26.6" + find-up "^5.0.0" + lodash "^4.17.21" + resolve "^1.20.0" + semver "^7.3.2" + "@embroider/shared-internals@0.41.0": version "0.41.0" resolved "https://registry.yarnpkg.com/@embroider/shared-internals/-/shared-internals-0.41.0.tgz#2553f026d4f48ea1fd11235501feb63bf49fa306" @@ -2065,6 +2091,20 @@ semver "^7.3.5" typescript-memoize "^1.0.1" +"@embroider/shared-internals@1.8.3", "@embroider/shared-internals@^1.0.0": + version "1.8.3" + resolved "https://registry.yarnpkg.com/@embroider/shared-internals/-/shared-internals-1.8.3.tgz#52d868dc80016e9fe983552c0e516f437bf9b9f9" + integrity sha512-N5Gho6Qk8z5u+mxLCcMYAoQMbN4MmH+z2jXwQHVs859bxuZTxwF6kKtsybDAASCtd2YGxEmzcc1Ja/wM28824w== + dependencies: + babel-import-util "^1.1.0" + ember-rfc176-data "^0.3.17" + fs-extra "^9.1.0" + js-string-escape "^1.0.1" + lodash "^4.17.21" + resolve-package-path "^4.0.1" + semver "^7.3.5" + typescript-memoize "^1.0.1" + "@embroider/util@^0.39.1 || ^0.40.0 || ^0.41.0": version "0.41.0" resolved "https://registry.yarnpkg.com/@embroider/util/-/util-0.41.0.tgz#5324cb4742aa4ed8d613c4f88a466f73e4e6acc1" @@ -2083,6 +2123,15 @@ broccoli-funnel "^3.0.5" ember-cli-babel "^7.23.1" +"@embroider/util@^1.6.0": + version "1.8.3" + resolved "https://registry.yarnpkg.com/@embroider/util/-/util-1.8.3.tgz#7267a2b6fcbf3e56712711441159ab373f9bee7a" + integrity sha512-FvsPzsb9rNeveSnIGnsfLkWWBdSM5QIA9lDVtckUktRnRnBWZHm5jDxU/ST//pWMhZ8F0DucRlFWE149MTLtuQ== + dependencies: + "@embroider/macros" "1.8.3" + broccoli-funnel "^3.0.5" + ember-cli-babel "^7.23.1" + "@eslint/eslintrc@^0.4.0": version "0.4.0" resolved "https://registry.yarnpkg.com/@eslint/eslintrc/-/eslintrc-0.4.0.tgz#99cc0a0584d72f1df38b900fb062ba995f395547" @@ -3387,6 +3436,11 @@ babel-import-util@^0.2.0: resolved "https://registry.yarnpkg.com/babel-import-util/-/babel-import-util-0.2.0.tgz#b468bb679919601a3570f9e317536c54f2862e23" integrity sha512-CtWYYHU/MgK88rxMrLfkD356dApswtR/kWZ/c6JifG1m10e7tBBrs/366dFzWMAoqYmG5/JSh+94tUSpIwh+ag== +babel-import-util@^1.1.0: + version "1.2.2" + resolved "https://registry.yarnpkg.com/babel-import-util/-/babel-import-util-1.2.2.tgz#1027560e143a4a68b1758e71d4fadc661614e495" + integrity sha512-8HgkHWt5WawRFukO30TuaL9EiDUOdvyKtDwLma4uBNeUSDbOO0/hiPfavrOWxSS6J6TKXfukWHZ3wiqZhJ8ONQ== + babel-loader@^8.0.6, babel-loader@^8.1.0: version "8.2.2" resolved "https://registry.yarnpkg.com/babel-loader/-/babel-loader-8.2.2.tgz#9363ce84c10c9a40e6c753748e1441b60c8a0b81" @@ -3471,11 +3525,6 @@ babel-plugin-filter-imports@^4.0.0: "@babel/types" "^7.7.2" lodash "^4.17.15" -babel-plugin-htmlbars-inline-precompile@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/babel-plugin-htmlbars-inline-precompile/-/babel-plugin-htmlbars-inline-precompile-1.0.0.tgz#a9d2f6eaad8a3f3d361602de593a8cbef8179c22" - integrity sha512-4jvKEHR1bAX03hBDZ94IXsYCj3bwk9vYsn6ux6JZNL2U5pvzCWjqyrGahfsGNrhERyxw8IqcirOi9Q6WCo3dkQ== - babel-plugin-htmlbars-inline-precompile@^3.2.0: version "3.2.0" resolved "https://registry.yarnpkg.com/babel-plugin-htmlbars-inline-precompile/-/babel-plugin-htmlbars-inline-precompile-3.2.0.tgz#c4882ea875d0f5683f0d91c1f72e29a4f14b5606" @@ -4427,7 +4476,7 @@ broccoli-funnel@^3.0.1, broccoli-funnel@^3.0.2, broccoli-funnel@^3.0.3: path-posix "^1.0.0" walk-sync "^2.0.2" -broccoli-funnel@^3.0.5: +broccoli-funnel@^3.0.5, broccoli-funnel@^3.0.8: version "3.0.8" resolved "https://registry.yarnpkg.com/broccoli-funnel/-/broccoli-funnel-3.0.8.tgz#f5b62e2763c3918026a15a3c833edc889971279b" integrity sha512-ng4eIhPYiXqMw6SyGoxPHR3YAwEd2lr9FgBI1CyTbspl4txZovOsmzFkMkGAlu88xyvYXJqHiM2crfLa65T1BQ== @@ -5444,10 +5493,10 @@ commander@2.8.x: dependencies: graceful-readlink ">= 1.0.0" -commander@7.1.0: - version "7.1.0" - resolved "https://registry.yarnpkg.com/commander/-/commander-7.1.0.tgz#f2eaecf131f10e36e07d894698226e36ae0eb5ff" - integrity sha512-pRxBna3MJe6HKnBGsDyMv8ETbptw3axEdYHoqNh7gu5oDcew8fs0xnivZGm06Ogk8zGAJ9VX+OPEr2GXEQK4dg== +commander@7.2.0: + version "7.2.0" + resolved "https://registry.yarnpkg.com/commander/-/commander-7.2.0.tgz#a36cb57d0b501ce108e4d20559a150a391d97ab7" + integrity sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw== commander@^2.20.0, commander@^2.6.0: version "2.20.3" @@ -5464,7 +5513,7 @@ commander@^6.2.0: resolved "https://registry.yarnpkg.com/commander/-/commander-6.2.1.tgz#0792eb682dfbc325999bb2b84fddddba110ac73c" integrity sha512-U7VdrJFnJgo4xjrHpTzu0yrHPGImdsmD95ZlgYSEajAn2JKzDhDTPG9kBTefmObL2w/ngeZnilk+OV9CG3d7UA== -common-tags@^1.4.0, common-tags@^1.8.0: +common-tags@^1.8.0: version "1.8.0" resolved "https://registry.yarnpkg.com/common-tags/-/common-tags-1.8.0.tgz#8e3153e542d4a39e9b10554434afaaf98956a937" integrity sha512-6P6g0uetGpW/sdyUy/iQQCbFF0kWVMSIVSyYz7Zgjcgh8mgw8PQzDNZeyZ5DQ2gM7LBoZPHmnjz8rUthkBG5tw== @@ -6292,13 +6341,40 @@ ember-assign-helper@^0.3.0: ember-cli-babel "^7.19.0" ember-cli-htmlbars "^4.3.1" -ember-assign-polyfill@^2.6.0: - version "2.7.2" - resolved "https://registry.yarnpkg.com/ember-assign-polyfill/-/ember-assign-polyfill-2.7.2.tgz#58f6f60235126cb23df248c846008fa9a3245fc1" - integrity sha512-hDSaKIZyFS0WRQsWzxUgO6pJPFfmcpfdM7CbGoMgYGriYbvkKn+k8zTXSKpTFVGehhSmsLE9YPqisQ9QpPisfA== +ember-auto-import@^1.11.3: + version "1.12.2" + resolved "https://registry.yarnpkg.com/ember-auto-import/-/ember-auto-import-1.12.2.tgz#cc7298ee5c0654b0249267de68fb27a2861c3579" + integrity sha512-gLqML2k77AuUiXxWNon1FSzuG1DV7PEPpCLCU5aJvf6fdL6rmFfElsZRh+8ELEB/qP9dT+LHjNEunVzd2dYc8A== dependencies: - ember-cli-babel "^7.20.5" - ember-cli-version-checker "^2.0.0" + "@babel/core" "^7.1.6" + "@babel/preset-env" "^7.10.2" + "@babel/traverse" "^7.1.6" + "@babel/types" "^7.1.6" + "@embroider/shared-internals" "^1.0.0" + babel-core "^6.26.3" + babel-loader "^8.0.6" + babel-plugin-syntax-dynamic-import "^6.18.0" + babylon "^6.18.0" + broccoli-debug "^0.6.4" + broccoli-node-api "^1.7.0" + broccoli-plugin "^4.0.0" + broccoli-source "^3.0.0" + debug "^3.1.0" + ember-cli-babel "^7.0.0" + enhanced-resolve "^4.0.0" + fs-extra "^6.0.1" + fs-tree-diff "^2.0.0" + handlebars "^4.3.1" + js-string-escape "^1.0.1" + lodash "^4.17.19" + mkdirp "^0.5.1" + resolve-package-path "^3.1.0" + rimraf "^2.6.2" + semver "^7.3.4" + symlink-or-copy "^1.2.0" + typescript-memoize "^1.0.0-alpha.3" + walk-sync "^0.3.3" + webpack "^4.43.0" ember-auto-import@^1.5.2, ember-auto-import@^1.5.3, ember-auto-import@^1.6.0: version "1.10.1" @@ -6402,7 +6478,7 @@ ember-cli-babel-plugin-helpers@^1.0.0, ember-cli-babel-plugin-helpers@^1.1.0, em resolved "https://registry.yarnpkg.com/ember-cli-babel-plugin-helpers/-/ember-cli-babel-plugin-helpers-1.1.1.tgz#5016b80cdef37036c4282eef2d863e1d73576879" integrity sha512-sKvOiPNHr5F/60NLd7SFzMpYPte/nnGkq/tMIfXejfKHIhaiIkYFqX8Z9UFTKWLLn+V7NOaby6niNPZUdvKCRw== -ember-cli-babel@7, ember-cli-babel@^7.0.0, ember-cli-babel@^7.1.3, ember-cli-babel@^7.10.0, ember-cli-babel@^7.11.0, ember-cli-babel@^7.12.0, ember-cli-babel@^7.13.0, ember-cli-babel@^7.17.2, ember-cli-babel@^7.18.0, ember-cli-babel@^7.19.0, ember-cli-babel@^7.20.0, ember-cli-babel@^7.20.5, ember-cli-babel@^7.21.0, ember-cli-babel@^7.22.1, ember-cli-babel@^7.23.0, ember-cli-babel@^7.23.1, ember-cli-babel@^7.7.3, ember-cli-babel@^7.8.0: +ember-cli-babel@7, ember-cli-babel@^7.0.0, ember-cli-babel@^7.1.3, ember-cli-babel@^7.10.0, ember-cli-babel@^7.13.0, ember-cli-babel@^7.17.2, ember-cli-babel@^7.18.0, ember-cli-babel@^7.19.0, ember-cli-babel@^7.20.0, ember-cli-babel@^7.20.5, ember-cli-babel@^7.21.0, ember-cli-babel@^7.22.1, ember-cli-babel@^7.23.0, ember-cli-babel@^7.23.1, ember-cli-babel@^7.7.3, ember-cli-babel@^7.8.0: version "7.26.1" resolved "https://registry.yarnpkg.com/ember-cli-babel/-/ember-cli-babel-7.26.1.tgz#d3f06bd9aec8aac9197c5ff4d0b87ff1e4f0d62a" integrity sha512-WEWP3hJSe9CWL22gEWQ+Y3uKMGk1vLoIREUQfJNKrgUUh3l49bnfAamh3ywcAQz31IgzvkLPO8ZTXO4rxnuP4Q== @@ -6454,7 +6530,7 @@ ember-cli-babel@^6.0.0, ember-cli-babel@^6.0.0-beta.4, ember-cli-babel@^6.11.0, ember-cli-version-checker "^2.1.2" semver "^5.5.0" -ember-cli-babel@^7.26.3, ember-cli-babel@^7.26.5: +ember-cli-babel@^7.13.2, ember-cli-babel@^7.26.3, ember-cli-babel@^7.26.5: version "7.26.11" resolved "https://registry.yarnpkg.com/ember-cli-babel/-/ember-cli-babel-7.26.11.tgz#50da0fe4dcd99aada499843940fec75076249a9f" integrity sha512-JJYeYjiz/JTn34q7F5DSOjkkZqy8qwFOOxXfE6pe9yEJqWGu4qErKxlz8I22JoVEQ/aBUO+OcKTpmctvykM9YA== @@ -6573,17 +6649,6 @@ ember-cli-get-component-path-option@^1.0.0: resolved "https://registry.yarnpkg.com/ember-cli-get-component-path-option/-/ember-cli-get-component-path-option-1.0.0.tgz#0d7b595559e2f9050abed804f1d8eff1b08bc771" integrity sha1-DXtZVVni+QUKvtgE8djv8bCLx3E= -ember-cli-htmlbars-inline-precompile@^2.1.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/ember-cli-htmlbars-inline-precompile/-/ember-cli-htmlbars-inline-precompile-2.1.0.tgz#61b91ff1879d44ae504cadb46fb1f2604995ae08" - integrity sha512-BylIHduwQkncPhnj0ZyorBuljXbTzLgRo6kuHf1W+IHFxThFl2xG+r87BVwsqx4Mn9MTgW9SE0XWjwBJcSWd6Q== - dependencies: - babel-plugin-htmlbars-inline-precompile "^1.0.0" - ember-cli-version-checker "^2.1.2" - hash-for-dep "^1.2.3" - heimdalljs-logger "^0.1.9" - silent-error "^1.1.0" - ember-cli-htmlbars@^3.0.1: version "3.1.0" resolved "https://registry.yarnpkg.com/ember-cli-htmlbars/-/ember-cli-htmlbars-3.1.0.tgz#87806c2a0bca2ab52d4fb8af8e2215c1ca718a99" @@ -6636,7 +6701,7 @@ ember-cli-htmlbars@^5.0.0, ember-cli-htmlbars@^5.1.0, ember-cli-htmlbars@^5.1.2, strip-bom "^4.0.0" walk-sync "^2.2.0" -ember-cli-htmlbars@^5.3.2: +ember-cli-htmlbars@^5.3.2, ember-cli-htmlbars@^5.7.1: version "5.7.2" resolved "https://registry.yarnpkg.com/ember-cli-htmlbars/-/ember-cli-htmlbars-5.7.2.tgz#e0cd2fb3c20d85fe4c3e228e6f0590ee1c645ba8" integrity sha512-Uj6R+3TtBV5RZoJY14oZn/sNPnc+UgmC8nb5rI4P3fR/gYoyTFIZSXiIM7zl++IpMoIrocxOrgt+mhonKphgGg== @@ -6799,12 +6864,12 @@ ember-cli-test-info@^1.0.0: dependencies: ember-cli-string-utils "^1.0.0" -ember-cli-test-loader@^2.2.0: - version "2.2.0" - resolved "https://registry.yarnpkg.com/ember-cli-test-loader/-/ember-cli-test-loader-2.2.0.tgz#3fb8d5d1357e4460d3f0a092f5375e71b6f7c243" - integrity sha512-mlSXX9SciIRwGkFTX6XGyJYp4ry6oCFZRxh5jJ7VH8UXLTNx2ZACtDTwaWtNhYrWXgKyiDUvmD8enD56aePWRA== +ember-cli-test-loader@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/ember-cli-test-loader/-/ember-cli-test-loader-3.0.0.tgz#1c036fc48de36155355fcda3266af63f977826f1" + integrity sha512-wfFRBrfO9gaKScYcdQxTfklx9yp1lWK6zv1rZRpkas9z2SHyJojF7NOQRWQgSB3ypm7vfpiF8VsFFVVr7VBzAQ== dependencies: - ember-cli-babel "^6.8.1" + ember-cli-babel "^7.13.2" ember-cli-typescript@3.0.0: version "3.0.0" @@ -6885,7 +6950,7 @@ ember-cli-uglify@^3.0.0: broccoli-uglify-sourcemap "^3.1.0" lodash.defaultsdeep "^4.6.0" -ember-cli-version-checker@^2.0.0, ember-cli-version-checker@^2.1.0, ember-cli-version-checker@^2.1.2: +ember-cli-version-checker@^2.1.0, ember-cli-version-checker@^2.1.2: version "2.2.0" resolved "https://registry.yarnpkg.com/ember-cli-version-checker/-/ember-cli-version-checker-2.2.0.tgz#47771b731fe0962705e27c8199a9e3825709f3b3" integrity sha512-G+KtYIVlSOWGcNaTFHk76xR4GdzDLzAS4uxZUKdASuFX0KJE43C6DaqL+y3VTpUFLI2FIkAS6HZ4I1YBi+S3hg== @@ -7135,7 +7200,7 @@ ember-decorators@^6.1.1: "@ember-decorators/object" "^6.1.1" ember-cli-babel "^7.7.3" -ember-destroyable-polyfill@^2.0.2: +ember-destroyable-polyfill@^2.0.2, ember-destroyable-polyfill@^2.0.3: version "2.0.3" resolved "https://registry.yarnpkg.com/ember-destroyable-polyfill/-/ember-destroyable-polyfill-2.0.3.tgz#1673ed66609a82268ef270a7d917ebd3647f11e1" integrity sha512-TovtNqCumzyAiW0/OisSkkVK93xnVF4NRU6+FN0ubpfwEOpRrmM2RqDwXI6YAChCgSHON1cz0DfQStpA1Gjuuw== @@ -7403,18 +7468,20 @@ ember-power-select@^4.0.0, ember-power-select@^4.0.5: ember-text-measurer "^0.6.0" ember-truth-helpers "^2.1.0 || ^3.0.0" -ember-qunit@^4.6.0: - version "4.6.0" - resolved "https://registry.yarnpkg.com/ember-qunit/-/ember-qunit-4.6.0.tgz#ad79fd3ff00073a8779400cc5a4b44829517590f" - integrity sha512-i5VOGn0RP8XH+5qkYDOZshbqAvO6lHgF65D0gz8vRx4DszCIvJMJO+bbftBTfYMxp6rqG85etAA6pfNxE0DqsQ== +ember-qunit@^5.1.1: + version "5.1.5" + resolved "https://registry.yarnpkg.com/ember-qunit/-/ember-qunit-5.1.5.tgz#24a7850f052be24189ff597dfc31b923e684c444" + integrity sha512-2cFA4oMygh43RtVcMaBrr086Tpdhgbn3fVZ2awLkzF/rnSN0D0PSRpd7hAD7OdBPerC/ZYRwzVyGXLoW/Zes4A== dependencies: - "@ember/test-helpers" "^1.7.1" - broccoli-funnel "^2.0.2" + broccoli-funnel "^3.0.8" broccoli-merge-trees "^3.0.2" - common-tags "^1.4.0" - ember-cli-babel "^7.12.0" - ember-cli-test-loader "^2.2.0" - qunit "^2.9.3" + common-tags "^1.8.0" + ember-auto-import "^1.11.3" + ember-cli-babel "^7.26.6" + ember-cli-test-loader "^3.0.0" + resolve-package-path "^3.1.0" + silent-error "^1.1.1" + validate-peer-dependencies "^1.2.0" ember-ref-modifier@^1.0.0: version "1.0.1" @@ -7629,14 +7696,6 @@ ember-test-selectors@^5.0.0: ember-cli-babel "^7.22.1" ember-cli-version-checker "^5.1.1" -ember-test-waiters@^1.1.1: - version "1.2.0" - resolved "https://registry.yarnpkg.com/ember-test-waiters/-/ember-test-waiters-1.2.0.tgz#c12ead4313934c24cff41857020cacdbf8e6effe" - integrity sha512-aEw7YuutLuJT4NUuPTNiGFwgTYl23ThqmBxSkfFimQAn+keWjAftykk3dlFELuhsJhYW/S8YoVjN0bSAQRLNtw== - dependencies: - ember-cli-babel "^7.11.0" - semver "^6.3.0" - ember-text-measurer@^0.6.0: version "0.6.0" resolved "https://registry.yarnpkg.com/ember-text-measurer/-/ember-text-measurer-0.6.0.tgz#140eda044fd7d4d7f60f654dd30da79c06922b2e" @@ -10290,11 +10349,6 @@ jquery@^3.4.1, jquery@^3.5.0: resolved "https://registry.yarnpkg.com/jquery/-/jquery-3.6.0.tgz#c72a09f15c1bdce142f49dbf1170bdf8adac2470" integrity sha512-JVzAR/AjBvVt2BmYhxRCSYysDsPcssdmTFnzyLEts9qNwmjmu4JTAMYubEfwVOSwpQ1I1sKKFcxhZCI2buerfw== -js-reporters@1.2.3: - version "1.2.3" - resolved "https://registry.yarnpkg.com/js-reporters/-/js-reporters-1.2.3.tgz#8febcab370539df62e09b95da133da04b11f6168" - integrity sha512-2YzWkHbbRu6LueEs5ZP3P1LqbECvAeUJYrjw3H4y1ofW06hqCS0AbzBtLwbr+Hke51bt9CUepJ/Fj1hlCRIF6A== - js-string-escape@^1.0.1: version "1.0.1" resolved "https://registry.yarnpkg.com/js-string-escape/-/js-string-escape-1.0.1.tgz#e2625badbc0d67c7533e9edc1068c587ae4137ef" @@ -11734,10 +11788,10 @@ node-releases@^2.0.2: resolved "https://registry.yarnpkg.com/node-releases/-/node-releases-2.0.2.tgz#7139fe71e2f4f11b47d4d2986aaf8c48699e0c01" integrity sha512-XxYDdcQ6eKqp/YjI+tb2C5WM2LgjnZrfYg4vgQt49EK268b6gYCHsBLrK2qvJo4FmCtqmKezb0WZFK4fkrZNsg== -node-watch@0.7.1: - version "0.7.1" - resolved "https://registry.yarnpkg.com/node-watch/-/node-watch-0.7.1.tgz#0caaa6a6833b0d533487f953c52a6c787769ba7c" - integrity sha512-UWblPYuZYrkCQCW5PxAwYSxaELNBLUckrTBBk8xr1/bUgyOkYYTsUcV4e3ytcazFEOyiRyiUrsG37pu6I0I05g== +node-watch@0.7.3: + version "0.7.3" + resolved "https://registry.yarnpkg.com/node-watch/-/node-watch-0.7.3.tgz#6d4db88e39c8d09d3ea61d6568d80e5975abc7ab" + integrity sha512-3l4E8uMPY1HdMMryPRUAl+oIHtXtyiTlIiESNSVSNxcPfzAFzeTbXFQkZfAwBbo0B1qMSG8nUABx+Gd+YrbKrQ== nomnom@^1.5.x: version "1.8.1" @@ -12705,7 +12759,7 @@ quick-temp@^0.1.2, quick-temp@^0.1.3, quick-temp@^0.1.5, quick-temp@^0.1.8: rimraf "^2.5.4" underscore.string "~3.3.4" -qunit-dom@^1.0.0: +qunit-dom@^1.6.0: version "1.6.0" resolved "https://registry.yarnpkg.com/qunit-dom/-/qunit-dom-1.6.0.tgz#a4bea6a46329d221e4a317d712cb40709107b977" integrity sha512-YwSqcLjQcRI0fUFpaSWwU10KIJPFW5Qh+d3cT5DOgx81dypRuUSiPkKFmBY/CDs/R1KdHRadthkcXg2rqAon8Q== @@ -12715,15 +12769,14 @@ qunit-dom@^1.0.0: ember-cli-babel "^7.23.0" ember-cli-version-checker "^5.1.1" -qunit@^2.9.3: - version "2.14.1" - resolved "https://registry.yarnpkg.com/qunit/-/qunit-2.14.1.tgz#02ba25c108f0845fda411a42b5cbfca0f0319943" - integrity sha512-jtFw8bf8+GjzY8UpnwbjqTOdK/rvrjcafUFTNpRc6/9N4q5dBwcwSMlcC76kAn5BRiSFj5Ssn2dfHtEYvtsXSw== +qunit@^2.13.0: + version "2.19.1" + resolved "https://registry.yarnpkg.com/qunit/-/qunit-2.19.1.tgz#eb1afd188da9e47f07c13aa70461a1d9c4505490" + integrity sha512-gSGuw0vErE/rNjnlBW/JmE7NNubBlGrDPQvsug32ejYhcVFuZec9yoU0+C30+UgeCGwq6Ap89K65dMGo+kDGZQ== dependencies: - commander "7.1.0" - js-reporters "1.2.3" - node-watch "0.7.1" - tiny-glob "0.2.8" + commander "7.2.0" + node-watch "0.7.3" + tiny-glob "0.2.9" raf-pool@~0.1.4: version "0.1.4" @@ -13686,7 +13739,7 @@ signal-exit@^3.0.0, signal-exit@^3.0.2: resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.3.tgz#a1410c2edd8f077b08b4e253c8eacfcaf057461c" integrity sha512-VUJ49FC8U1OxwZLxIbTTrDvLnf/6TDgxZcK8wxR8zs13xpx7xbG60ndBlhNrFi2EMuFRoeDoJO7wthSLq42EjA== -silent-error@^1.0.0, silent-error@^1.0.1, silent-error@^1.1.0, silent-error@^1.1.1: +silent-error@^1.0.0, silent-error@^1.0.1, silent-error@^1.1.1: version "1.1.1" resolved "https://registry.yarnpkg.com/silent-error/-/silent-error-1.1.1.tgz#f72af5b0d73682a2ba1778b7e32cd8aa7c2d8662" integrity sha512-n4iEKyNcg4v6/jpb3c0/iyH2G1nzUNl7Gpqtn/mHIJK9S/q/7MCfoO4rwVOoO59qPFIc0hVHvMbiOJ0NdtxKKw== @@ -14531,10 +14584,10 @@ tiny-emitter@^2.0.0: resolved "https://registry.yarnpkg.com/tiny-emitter/-/tiny-emitter-2.1.0.tgz#1d1a56edfc51c43e863cbb5382a72330e3555423" integrity sha512-NB6Dk1A9xgQPMoGqC5CVXn123gWyte215ONT5Pp5a0yt4nlEoO1ZWeCwpncaekPHXO60i47ihFnZPiRPjRMq4Q== -tiny-glob@0.2.8: - version "0.2.8" - resolved "https://registry.yarnpkg.com/tiny-glob/-/tiny-glob-0.2.8.tgz#b2792c396cc62db891ffa161fe8b33e76123e531" - integrity sha512-vkQP7qOslq63XRX9kMswlby99kyO5OvKptw7AMwBVMjXEI7Tb61eoI5DydyEMOseyGS5anDN1VPoVxEvH01q8w== +tiny-glob@0.2.9: + version "0.2.9" + resolved "https://registry.yarnpkg.com/tiny-glob/-/tiny-glob-0.2.9.tgz#2212d441ac17928033b110f8b3640683129d31e2" + integrity sha512-g/55ssRPUjShh+xkfx9UPDXqhckHEsHr4Vd9zX55oSdGZc/MD0m3sferOkwWtp98bv+kcVfEHtRJgBVJzelrzg== dependencies: globalyzer "0.1.0" globrex "^0.1.2" @@ -15235,6 +15288,14 @@ validate-npm-package-name@^3.0.0: dependencies: builtins "^1.0.3" +validate-peer-dependencies@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/validate-peer-dependencies/-/validate-peer-dependencies-1.2.0.tgz#22aab93c514f4fda457d36c80685e8b1160d2036" + integrity sha512-nd2HUpKc6RWblPZQ2GDuI65sxJ2n/UqZwSBVtj64xlWjMx0m7ZB2m9b2JS3v1f+n9VWH/dd1CMhkHfP6pIdckA== + dependencies: + resolve-package-path "^3.1.0" + semver "^7.3.2" + validated-changeset@0.10.0, validated-changeset@~0.10.0: version "0.10.0" resolved "https://registry.yarnpkg.com/validated-changeset/-/validated-changeset-0.10.0.tgz#2e8188c089ab282c1b51fba3c289073f6bd14c8b" From b3ac8f48ca18f195b311430781bdba8163ed5a48 Mon Sep 17 00:00:00 2001 From: malizz Date: Thu, 1 Sep 2022 09:59:11 -0700 Subject: [PATCH 75/93] Add additional parameters to envoy passive health check config (#14238) * draft commit * add changelog, update test * remove extra param * fix test * update type to account for nil value * add test for custom passive health check * update comments and tests * update description in docs * fix missing commas --- .changelog/14238.txt | 3 + agent/consul/config_endpoint_test.go | 19 ++- agent/structs/config_entry.go | 5 + agent/structs/config_entry_test.go | 14 +- agent/structs/testing_connect_proxy_config.go | 2 +- agent/xds/clusters_test.go | 12 ++ agent/xds/config.go | 5 + .../custom-passive-healthcheck.latest.golden | 147 ++++++++++++++++++ api/config_entry.go | 5 + .../config-entries/service-defaults.mdx | 20 +++ .../content/docs/connect/proxies/envoy.mdx | 2 + 11 files changed, 223 insertions(+), 11 deletions(-) create mode 100644 .changelog/14238.txt create mode 100644 agent/xds/testdata/clusters/custom-passive-healthcheck.latest.golden diff --git a/.changelog/14238.txt b/.changelog/14238.txt new file mode 100644 index 0000000000..43c570915f --- /dev/null +++ b/.changelog/14238.txt @@ -0,0 +1,3 @@ +```release-note:improvement +envoy: adds additional Envoy outlier ejection parameters to passive health check configurations. +``` \ No newline at end of file diff --git a/agent/consul/config_endpoint_test.go b/agent/consul/config_endpoint_test.go index 3f79b3d1b4..aaf7cba94d 100644 --- a/agent/consul/config_endpoint_test.go +++ b/agent/consul/config_endpoint_test.go @@ -1399,8 +1399,9 @@ func TestConfigEntry_ResolveServiceConfig_Upstreams(t *testing.T) { Protocol: "http", MeshGateway: structs.MeshGatewayConfig{Mode: structs.MeshGatewayModeRemote}, PassiveHealthCheck: &structs.PassiveHealthCheck{ - Interval: 10, - MaxFailures: 2, + Interval: 10, + MaxFailures: 2, + EnforcingConsecutive5xx: uintPointer(60), }, }, Overrides: []*structs.UpstreamConfig{ @@ -1432,8 +1433,9 @@ func TestConfigEntry_ResolveServiceConfig_Upstreams(t *testing.T) { Upstream: wildcard, Config: map[string]interface{}{ "passive_health_check": map[string]interface{}{ - "Interval": int64(10), - "MaxFailures": int64(2), + "Interval": int64(10), + "MaxFailures": int64(2), + "EnforcingConsecutive5xx": int64(60), }, "mesh_gateway": map[string]interface{}{ "Mode": "remote", @@ -1445,8 +1447,9 @@ func TestConfigEntry_ResolveServiceConfig_Upstreams(t *testing.T) { Upstream: mysql, Config: map[string]interface{}{ "passive_health_check": map[string]interface{}{ - "Interval": int64(10), - "MaxFailures": int64(2), + "Interval": int64(10), + "MaxFailures": int64(2), + "EnforcingConsecutive5xx": int64(60), }, "mesh_gateway": map[string]interface{}{ "Mode": "local", @@ -2507,3 +2510,7 @@ func Test_gateWriteToSecondary_AllowedKinds(t *testing.T) { }) } } + +func uintPointer(v uint32) *uint32 { + return &v +} diff --git a/agent/structs/config_entry.go b/agent/structs/config_entry.go index 88c523a155..23c5c8e47f 100644 --- a/agent/structs/config_entry.go +++ b/agent/structs/config_entry.go @@ -969,6 +969,11 @@ type PassiveHealthCheck struct { // MaxFailures is the count of consecutive failures that results in a host // being removed from the pool. MaxFailures uint32 `json:",omitempty" alias:"max_failures"` + + // EnforcingConsecutive5xx is the % chance that a host will be actually ejected + // when an outlier status is detected through consecutive 5xx. + // This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100. + EnforcingConsecutive5xx *uint32 `json:",omitempty" alias:"enforcing_consecutive_5xx"` } func (chk *PassiveHealthCheck) Clone() *PassiveHealthCheck { diff --git a/agent/structs/config_entry_test.go b/agent/structs/config_entry_test.go index a9e113f21e..c08e823996 100644 --- a/agent/structs/config_entry_test.go +++ b/agent/structs/config_entry_test.go @@ -2754,8 +2754,9 @@ func TestUpstreamConfig_MergeInto(t *testing.T) { MaxConcurrentRequests: intPointer(12), }, "passive_health_check": &PassiveHealthCheck{ - MaxFailures: 13, - Interval: 14 * time.Second, + MaxFailures: 13, + Interval: 14 * time.Second, + EnforcingConsecutive5xx: uintPointer(80), }, "mesh_gateway": MeshGatewayConfig{Mode: MeshGatewayModeLocal}, }, @@ -2770,8 +2771,9 @@ func TestUpstreamConfig_MergeInto(t *testing.T) { MaxConcurrentRequests: intPointer(12), }, "passive_health_check": &PassiveHealthCheck{ - MaxFailures: 13, - Interval: 14 * time.Second, + MaxFailures: 13, + Interval: 14 * time.Second, + EnforcingConsecutive5xx: uintPointer(80), }, "mesh_gateway": MeshGatewayConfig{Mode: MeshGatewayModeLocal}, }, @@ -3067,3 +3069,7 @@ func testConfigEntryNormalizeAndValidate(t *testing.T, cases map[string]configEn }) } } + +func uintPointer(v uint32) *uint32 { + return &v +} diff --git a/agent/structs/testing_connect_proxy_config.go b/agent/structs/testing_connect_proxy_config.go index ad918927ae..fdee3f6937 100644 --- a/agent/structs/testing_connect_proxy_config.go +++ b/agent/structs/testing_connect_proxy_config.go @@ -26,7 +26,7 @@ func TestUpstreams(t testing.T) Upstreams { Config: map[string]interface{}{ // Float because this is how it is decoded by JSON decoder so this // enables the value returned to be compared directly to a decoded JSON - // response without spurios type loss. + // response without spurious type loss. "connect_timeout_ms": float64(1000), }, }, diff --git a/agent/xds/clusters_test.go b/agent/xds/clusters_test.go index 26087dd1d0..5efd5029c8 100644 --- a/agent/xds/clusters_test.go +++ b/agent/xds/clusters_test.go @@ -169,6 +169,18 @@ func TestClustersFromSnapshot(t *testing.T) { }, nil) }, }, + { + name: "custom-passive-healthcheck", + create: func(t testinf.T) *proxycfg.ConfigSnapshot { + return proxycfg.TestConfigSnapshot(t, func(ns *structs.NodeService) { + ns.Proxy.Upstreams[0].Config["passive_health_check"] = map[string]interface{}{ + "enforcing_consecutive_5xx": float64(80), + "max_failures": float64(5), + "interval": float64(10), + } + }, nil) + }, + }, { name: "custom-max-inbound-connections", create: func(t testinf.T) *proxycfg.ConfigSnapshot { diff --git a/agent/xds/config.go b/agent/xds/config.go index cfbd23e070..0736fb44ca 100644 --- a/agent/xds/config.go +++ b/agent/xds/config.go @@ -174,5 +174,10 @@ func ToOutlierDetection(p *structs.PassiveHealthCheck) *envoy_cluster_v3.Outlier if p.MaxFailures != 0 { od.Consecutive_5Xx = &wrappers.UInt32Value{Value: p.MaxFailures} } + + if p.EnforcingConsecutive5xx != nil { + od.EnforcingConsecutive_5Xx = &wrappers.UInt32Value{Value: *p.EnforcingConsecutive5xx} + } + return od } diff --git a/agent/xds/testdata/clusters/custom-passive-healthcheck.latest.golden b/agent/xds/testdata/clusters/custom-passive-healthcheck.latest.golden new file mode 100644 index 0000000000..41bc16f6ef --- /dev/null +++ b/agent/xds/testdata/clusters/custom-passive-healthcheck.latest.golden @@ -0,0 +1,147 @@ +{ + "versionInfo": "00000001", + "resources": [ + { + "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "name": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "altStatName": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", + "type": "EDS", + "edsClusterConfig": { + "edsConfig": { + "ads": { + + }, + "resourceApiVersion": "V3" + } + }, + "connectTimeout": "5s", + "circuitBreakers": { + + }, + "outlierDetection": { + "consecutive5xx": 5, + "interval": "0.000000010s", + "enforcingConsecutive5xx": 80 + }, + "commonLbConfig": { + "healthyPanicThreshold": { + + } + }, + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + }, + "matchSubjectAltNames": [ + { + "exact": "spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc1/svc/db" + } + ] + } + }, + "sni": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul" + } + } + }, + { + "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "name": "geo-cache.default.dc1.query.11111111-2222-3333-4444-555555555555.consul", + "type": "EDS", + "edsClusterConfig": { + "edsConfig": { + "ads": { + + }, + "resourceApiVersion": "V3" + } + }, + "connectTimeout": "5s", + "circuitBreakers": { + + }, + "outlierDetection": { + + }, + "transportSocket": { + "name": "tls", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext", + "commonTlsContext": { + "tlsParams": { + + }, + "tlsCertificates": [ + { + "certificateChain": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n" + }, + "privateKey": { + "inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n" + } + } + ], + "validationContext": { + "trustedCa": { + "inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n" + }, + "matchSubjectAltNames": [ + { + "exact": "spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc1/svc/geo-cache-target" + }, + { + "exact": "spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc2/svc/geo-cache-target" + } + ] + } + }, + "sni": "geo-cache.default.dc1.query.11111111-2222-3333-4444-555555555555.consul" + } + } + }, + { + "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "name": "local_app", + "type": "STATIC", + "connectTimeout": "5s", + "loadAssignment": { + "clusterName": "local_app", + "endpoints": [ + { + "lbEndpoints": [ + { + "endpoint": { + "address": { + "socketAddress": { + "address": "127.0.0.1", + "portValue": 8080 + } + } + } + } + ] + } + ] + } + } + ], + "typeUrl": "type.googleapis.com/envoy.config.cluster.v3.Cluster", + "nonce": "00000001" +} \ No newline at end of file diff --git a/api/config_entry.go b/api/config_entry.go index da685b7867..7fe128958c 100644 --- a/api/config_entry.go +++ b/api/config_entry.go @@ -196,6 +196,11 @@ type PassiveHealthCheck struct { // MaxFailures is the count of consecutive failures that results in a host // being removed from the pool. MaxFailures uint32 `alias:"max_failures"` + + // EnforcingConsecutive5xx is the % chance that a host will be actually ejected + // when an outlier status is detected through consecutive 5xx. + // This setting can be used to disable ejection or to ramp it up slowly. + EnforcingConsecutive5xx uint32 `json:",omitempty" alias:"enforcing_consecutive_5xx"` } // UpstreamLimits describes the limits that are associated with a specific diff --git a/website/content/docs/connect/config-entries/service-defaults.mdx b/website/content/docs/connect/config-entries/service-defaults.mdx index b431e43459..2ce58c2716 100644 --- a/website/content/docs/connect/config-entries/service-defaults.mdx +++ b/website/content/docs/connect/config-entries/service-defaults.mdx @@ -503,6 +503,16 @@ represents a location outside the Consul cluster. They can be dialed directly wh description: `The number of consecutive failures which cause a host to be removed from the load balancer.`, }, + { + name: 'EnforcingConsecutive5xx', + type: 'int: 100', + description: { + hcl: `The % chance that a host will be actually ejected + when an outlier status is detected through consecutive 5xx.`, + yaml: `The % chance that a host will be actually ejected + when an outlier status is detected through consecutive 5xx.`, + }, + }, ], }, ], @@ -635,6 +645,16 @@ represents a location outside the Consul cluster. They can be dialed directly wh description: `The number of consecutive failures which cause a host to be removed from the load balancer.`, }, + { + name: 'EnforcingConsecutive5xx', + type: 'int: 100', + description: { + hcl: `The % chance that a host will be actually ejected + when an outlier status is detected through consecutive 5xx.`, + yaml: `The % chance that a host will be actually ejected + when an outlier status is detected through consecutive 5xx.`, + }, + }, ], }, ], diff --git a/website/content/docs/connect/proxies/envoy.mdx b/website/content/docs/connect/proxies/envoy.mdx index 812adff17b..d6222e898b 100644 --- a/website/content/docs/connect/proxies/envoy.mdx +++ b/website/content/docs/connect/proxies/envoy.mdx @@ -309,6 +309,8 @@ definition](/docs/connect/registration/service-registration) or load balancer. - `max_failures` - The number of consecutive failures which cause a host to be removed from the load balancer. + - `enforcing_consecutive_5xx` - The % chance that a host will be actually ejected + when an outlier status is detected through consecutive 5xx. ### Gateway Options From e36650c49c03cba8d0e6e8df6a1fd73a5537b964 Mon Sep 17 00:00:00 2001 From: David Yu Date: Thu, 1 Sep 2022 10:10:32 -0700 Subject: [PATCH 76/93] docs: minor changes to cluster peering k8s docs and typos (#14442) * docs: minor changes to cluster peering k8s docs and typos --- .../docs/connect/cluster-peering/k8s.mdx | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/website/content/docs/connect/cluster-peering/k8s.mdx b/website/content/docs/connect/cluster-peering/k8s.mdx index b18633f091..0263e87949 100644 --- a/website/content/docs/connect/cluster-peering/k8s.mdx +++ b/website/content/docs/connect/cluster-peering/k8s.mdx @@ -61,7 +61,7 @@ You must implement the following requirements to create and use cluster peering enableRedirection: true server: exposeService: - enabeld: true + enabled: true controller: enabled: true meshGateway: @@ -166,14 +166,14 @@ To peer Kubernetes clusters running Consul, you need to create a peering token a 1. For the service in "cluster-02" that you want to export, add the following [annotation](/docs/k8s/annotations-and-labels) to your service's pods. - + ```yaml # Service to expose backend apiVersion: v1 kind: Service metadata: - name: backend-service + name: backend spec: selector: app: backend @@ -235,7 +235,7 @@ To peer Kubernetes clusters running Consul, you need to create a peering token a name: default ## The name of the partition containing the service spec: services: - - name: backend-service ## The name of the service you want to export + - name: backend ## The name of the service you want to export consumers: - peer: cluster-01 ## The name of the peer that receives the service ``` @@ -245,7 +245,7 @@ To peer Kubernetes clusters running Consul, you need to create a peering token a 1. Apply the service file and the `ExportedServices` resource for the second cluster. ```shell-session - $ kubectl apply --context $CLUSTER2_CONTEXT --filename backend-service.yaml --filename exportedsvc.yaml + $ kubectl apply --context $CLUSTER2_CONTEXT --filename backend.yaml --filename exportedsvc.yaml ``` ## Authorize services for peers @@ -261,11 +261,11 @@ To peer Kubernetes clusters running Consul, you need to create a peering token a name: backend-deny spec: destination: - name: backend-service + name: backend sources: - name: "*" action: deny - - name: frontend-service + - name: frontend action: allow ``` @@ -277,16 +277,16 @@ To peer Kubernetes clusters running Consul, you need to create a peering token a $ kubectl --context $CLUSTER2_CONTEXT apply --filename intention.yml ``` -1. For the services in `cluster-01` that you want to access the "backend-service," add the following annotations to the service file. To dial the upstream service from an application, ensure that the requests are sent to the correct DNS name as specified in [Service Virtual IP Lookups](/docs/discovery/dns#service-virtual-ip-lookups). +1. For the services in `cluster-01` that you want to access the "backend," add the following annotations to the service file. To dial the upstream service from an application, ensure that the requests are sent to the correct DNS name as specified in [Service Virtual IP Lookups](/docs/discovery/dns#service-virtual-ip-lookups). - + ```yaml # Service to expose frontend apiVersion: v1 kind: Service metadata: - name: frontend-service + name: frontend spec: selector: app: frontend @@ -332,7 +332,7 @@ To peer Kubernetes clusters running Consul, you need to create a peering token a - name: "LISTEN_ADDR" value: "0.0.0.0:9090" - name: "UPSTREAM_URIS" - value: "http://backend-service.virtual.cluster-02.consul" + value: "http://backend.virtual.cluster-02.consul" - name: "NAME" value: "frontend" - name: "MESSAGE" @@ -346,10 +346,10 @@ To peer Kubernetes clusters running Consul, you need to create a peering token a 1. Apply the service file to the first cluster. ```shell-session - $ kubectl --context $CLUSTER1_CONTEXT apply --filename frontend-service.yaml + $ kubectl --context $CLUSTER1_CONTEXT apply --filename frontend.yaml ``` -1. Run the following command in `frontend-service` and check the output to confirm that you peered your clusters successfully. +1. Run the following command in `frontend` and check the output to confirm that you peered your clusters successfully. ```shell-session $ kubectl --context $CLUSTER1_CONTEXT exec -it $(kubectl --context $CLUSTER1_CONTEXT get pod -l app=frontend -o name) -- curl localhost:9090 @@ -365,9 +365,9 @@ To peer Kubernetes clusters running Consul, you need to create a peering token a "duration": "59.752279ms", "body": "Hello World", "upstream_calls": { - "http://backend-service.virtual.cluster-02.consul": { + "http://backend.virtual.cluster-02.consul": { "name": "backend", - "uri": "http://backend-service.virtual.cluster-02.consul", + "uri": "http://backend.virtual.cluster-02.consul", "type": "HTTP", "ip_addresses": [ "10.32.2.10" From c06cc60b9045fb8eceeaa79c13a2db0c82309d86 Mon Sep 17 00:00:00 2001 From: John Cowen Date: Thu, 1 Sep 2022 18:15:06 +0100 Subject: [PATCH 77/93] ui: Use credentials for all HTTP API requests (#14343) Adds withCredentials/credentials to all HTTP API requests. --- .changelog/14343.txt | 4 ++++ ui/packages/consul-ui/app/services/client/http.js | 1 + ui/packages/consul-ui/app/utils/http/xhr.js | 1 + 3 files changed, 6 insertions(+) create mode 100644 .changelog/14343.txt diff --git a/.changelog/14343.txt b/.changelog/14343.txt new file mode 100644 index 0000000000..94e7432b44 --- /dev/null +++ b/.changelog/14343.txt @@ -0,0 +1,4 @@ +```release-note:feature +ui: Use withCredentials for all HTTP API requests +``` + diff --git a/ui/packages/consul-ui/app/services/client/http.js b/ui/packages/consul-ui/app/services/client/http.js index 6d3659c22c..9b77365019 100644 --- a/ui/packages/consul-ui/app/services/client/http.js +++ b/ui/packages/consul-ui/app/services/client/http.js @@ -210,6 +210,7 @@ export default class HttpService extends Service { return this.settings.findBySlug('token').then(token => { return fetch(`${path}`, { ...params, + credentials: 'include', headers: { 'X-Consul-Token': typeof token.SecretID === 'undefined' ? '' : token.SecretID, ...params.headers, diff --git a/ui/packages/consul-ui/app/utils/http/xhr.js b/ui/packages/consul-ui/app/utils/http/xhr.js index cbdea6411f..8ef24a0194 100644 --- a/ui/packages/consul-ui/app/utils/http/xhr.js +++ b/ui/packages/consul-ui/app/utils/http/xhr.js @@ -27,6 +27,7 @@ export default function(parseHeaders, XHR) { }; Object.entries(headers).forEach(([key, value]) => xhr.setRequestHeader(key, value)); options.beforeSend(xhr); + xhr.withCredentials = true; xhr.send(options.body); return xhr; }; From 50380861d0253d6826f91ff7bfcf3b30ba4dd355 Mon Sep 17 00:00:00 2001 From: John Cowen Date: Thu, 1 Sep 2022 18:26:12 +0100 Subject: [PATCH 78/93] ui: Adds a HCP home link when in HCP (#14417) --- .../app/components/consul/hcp/home/index.hbs | 8 ++++++++ .../app/components/consul/hcp/home/index.scss | 11 +++++++++++ .../consul-hcp/vendor/consul-hcp/services.js | 4 +++- .../app/components/hashicorp-consul/index.hbs | 13 +++++++------ .../app/components/main-nav-vertical/index.scss | 3 +-- .../app/styles/base/icons/icons/index.scss | 2 +- ui/packages/consul-ui/app/styles/components.scss | 1 + ui/packages/consul-ui/vendor/consul-ui/services.js | 3 +++ 8 files changed, 35 insertions(+), 10 deletions(-) create mode 100644 ui/packages/consul-hcp/app/components/consul/hcp/home/index.hbs create mode 100644 ui/packages/consul-hcp/app/components/consul/hcp/home/index.scss diff --git a/ui/packages/consul-hcp/app/components/consul/hcp/home/index.hbs b/ui/packages/consul-hcp/app/components/consul/hcp/home/index.hbs new file mode 100644 index 0000000000..053f235da0 --- /dev/null +++ b/ui/packages/consul-hcp/app/components/consul/hcp/home/index.hbs @@ -0,0 +1,8 @@ + diff --git a/ui/packages/consul-hcp/app/components/consul/hcp/home/index.scss b/ui/packages/consul-hcp/app/components/consul/hcp/home/index.scss new file mode 100644 index 0000000000..7ae65f2416 --- /dev/null +++ b/ui/packages/consul-hcp/app/components/consul/hcp/home/index.scss @@ -0,0 +1,11 @@ +.consul-hcp-home { + position: relative; + top: -22px; +} +.consul-hcp-home a::before { + content: ''; + --icon-name: icon-arrow-left; + --icon-size: icon-300; + margin-right: 8px; +} + diff --git a/ui/packages/consul-hcp/vendor/consul-hcp/services.js b/ui/packages/consul-hcp/vendor/consul-hcp/services.js index 159a7a96ec..27f9d4a742 100644 --- a/ui/packages/consul-hcp/vendor/consul-hcp/services.js +++ b/ui/packages/consul-hcp/vendor/consul-hcp/services.js @@ -1,5 +1,7 @@ (services => services({ - + 'component:consul/hcp/home': { + class: 'consul-ui/components/consul/hcp/home', + }, }))( (json, data = (typeof document !== 'undefined' ? document.currentScript.dataset : module.exports)) => { data[`services`] = JSON.stringify(json); diff --git a/ui/packages/consul-ui/app/components/hashicorp-consul/index.hbs b/ui/packages/consul-ui/app/components/hashicorp-consul/index.hbs index 4d7a040ff9..672985310d 100644 --- a/ui/packages/consul-ui/app/components/hashicorp-consul/index.hbs +++ b/ui/packages/consul-ui/app/components/hashicorp-consul/index.hbs @@ -86,13 +86,14 @@ <:main-nav> +
    - + ul > li > a { +%main-nav-vertical a { @extend %main-nav-vertical-action; } %main-nav-vertical > ul > li.is-active > a { diff --git a/ui/packages/consul-ui/app/styles/base/icons/icons/index.scss b/ui/packages/consul-ui/app/styles/base/icons/icons/index.scss index 20f57edc7a..8f8663c4ca 100644 --- a/ui/packages/consul-ui/app/styles/base/icons/icons/index.scss +++ b/ui/packages/consul-ui/app/styles/base/icons/icons/index.scss @@ -2,7 +2,7 @@ @import './alert-circle-outline/index.scss'; @import './alert-triangle/index.scss'; // @import './arrow-down/index.scss'; -// @import './arrow-left/index.scss'; +@import './arrow-left/index.scss'; @import './arrow-right/index.scss'; // @import './arrow-up/index.scss'; // @import './bolt/index.scss'; diff --git a/ui/packages/consul-ui/app/styles/components.scss b/ui/packages/consul-ui/app/styles/components.scss index f94f14d448..4a7e7b9e05 100644 --- a/ui/packages/consul-ui/app/styles/components.scss +++ b/ui/packages/consul-ui/app/styles/components.scss @@ -109,3 +109,4 @@ @import 'consul-ui/components/consul/node/peer-info'; @import 'consul-ui/components/consul/peer/info'; @import 'consul-ui/components/consul/peer/form'; +@import 'consul-ui/components/consul/hcp/home'; diff --git a/ui/packages/consul-ui/vendor/consul-ui/services.js b/ui/packages/consul-ui/vendor/consul-ui/services.js index 13f2f054b3..2b2258d52c 100644 --- a/ui/packages/consul-ui/vendor/consul-ui/services.js +++ b/ui/packages/consul-ui/vendor/consul-ui/services.js @@ -18,6 +18,9 @@ 'component:consul/peer/selector': { class: '@glimmer/component', }, + 'component:consul/hcp/home': { + class: '@glimmer/component', + }, }))( ( json, From c02b841a3f022d373a35c2d3eb3d5a676481d3a6 Mon Sep 17 00:00:00 2001 From: Kyle Schochenmaier Date: Thu, 1 Sep 2022 13:33:37 -0500 Subject: [PATCH 79/93] [docs] update docs for kube-1.24 support (#14339) * update docs for kube-1.24 support. Co-authored-by: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> --- website/content/docs/k8s/connect/index.mdx | 26 ++++++++++++++++++++-- 1 file changed, 24 insertions(+), 2 deletions(-) diff --git a/website/content/docs/k8s/connect/index.mdx b/website/content/docs/k8s/connect/index.mdx index 7a3c472cab..c84729fe9b 100644 --- a/website/content/docs/k8s/connect/index.mdx +++ b/website/content/docs/k8s/connect/index.mdx @@ -13,8 +13,8 @@ description: >- [Consul Service Mesh](/docs/connect) is a feature built into to Consul that enables automatic service-to-service authorization and connection encryption across your Consul services. Consul Service Mesh can be used with Kubernetes to secure pod -communication with other pods and external Kubernetes services. Consul Connect is used interchangeably with the name -Consul Service Mesh and is what will be used to refer to for Service Mesh functionality within Consul. +communication with other pods and external Kubernetes services. "Consul Connect" refers to the service mesh functionality within Consul and is used interchangeably with the name +"Consul Service Mesh." The Connect sidecar running Envoy can be automatically injected into pods in your cluster, making configuration for Kubernetes automatic. @@ -273,6 +273,27 @@ spec: `web` will target `containerPort` `8080` and select pods labeled `app: web`. `web-admin` will target `containerPort` `9090` and will also select the same pods. +~> Kubernetes 1.24+ only +In Kubernetes 1.24+ you need to [create a Kubernetes secret](https://kubernetes.io/docs/concepts/configuration/secret/#service-account-token-secrets) for each multi-port service that references the ServiceAccount, and the Kubernetes secret must have the same name as the ServiceAccount: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: web + annotations: + kubernetes.io/service-account.name: web + type: kubernetes.io/service-account-token +--- +apiVersion: v1 +kind: Secret +metadata: + name: web-admin + annotations: + kubernetes.io/service-account.name: web-admin + type: kubernetes.io/service-account-token +``` + Create a Deployment with any chosen name, and use the following annotations: ```yaml consul.hashicorp.com/connect-inject: true @@ -355,6 +376,7 @@ The way this works is that a Consul service instance is being registered per por services in this case. An additional Envoy sidecar proxy and `connect-init` init container are also deployed per port in the Pod. So the upstream configuration can use the individual service names to reach each port as seen in the example. + #### Caveats for Multi-port Pods * Transparent proxy is not supported for multi-port Pods. * Metrics and metrics merging is not supported for multi-port Pods. From f1054dada9aecf2efb9a2d68b21719561200d5fd Mon Sep 17 00:00:00 2001 From: malizz Date: Thu, 1 Sep 2022 11:37:47 -0700 Subject: [PATCH 80/93] fix TestProxyConfigEntry (#14435) --- agent/structs/config_entry_test.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/agent/structs/config_entry_test.go b/agent/structs/config_entry_test.go index c08e823996..004f8b6fe1 100644 --- a/agent/structs/config_entry_test.go +++ b/agent/structs/config_entry_test.go @@ -2959,8 +2959,9 @@ func TestProxyConfigEntry(t *testing.T) { Name: "", }, expected: &ProxyConfigEntry{ - Name: ProxyConfigGlobal, - Kind: ProxyDefaults, + Name: ProxyConfigGlobal, + Kind: ProxyDefaults, + EnterpriseMeta: *acl.DefaultEnterpriseMeta(), }, }, } From 81d7cc41dcebd30a59e5c92c77728e5a3d9b2f9d Mon Sep 17 00:00:00 2001 From: Luke Kysow <1034429+lkysow@users.noreply.github.com> Date: Thu, 1 Sep 2022 14:03:35 -0700 Subject: [PATCH 81/93] Use proxy address for default check (#14433) When a sidecar proxy is registered, a check is automatically added. Previously, the address this check used was the underlying service's address instead of the proxy's address, even though the check is testing if the proxy is up. This worked in most cases because the proxy ran on the same IP as the underlying service but it's not guaranteed and so the proper default address should be the proxy's address. --- .changelog/14433.txt | 3 + agent/agent_endpoint_test.go | 2 +- agent/sidecar_service.go | 24 ++++-- agent/sidecar_service_test.go | 135 ++++++++++++++++++++++++++++++++++ 4 files changed, 155 insertions(+), 9 deletions(-) create mode 100644 .changelog/14433.txt diff --git a/.changelog/14433.txt b/.changelog/14433.txt new file mode 100644 index 0000000000..25167320c6 --- /dev/null +++ b/.changelog/14433.txt @@ -0,0 +1,3 @@ +```release-note:bug +checks: If set, use proxy address for automatically added sidecar check instead of service address. +``` diff --git a/agent/agent_endpoint_test.go b/agent/agent_endpoint_test.go index 67850f9ebd..d380d0d939 100644 --- a/agent/agent_endpoint_test.go +++ b/agent/agent_endpoint_test.go @@ -3764,7 +3764,7 @@ func testAgent_RegisterService_TranslateKeys(t *testing.T, extraHCL string) { fmt.Println("TCP Check:= ", v) } if hasNoCorrectTCPCheck { - t.Fatalf("Did not find the expected TCP Healtcheck '%s' in %#v ", tt.expectedTCPCheckStart, a.checkTCPs) + t.Fatalf("Did not find the expected TCP Healthcheck '%s' in %#v ", tt.expectedTCPCheckStart, a.checkTCPs) } require.Equal(t, sidecarSvc, gotSidecar) }) diff --git a/agent/sidecar_service.go b/agent/sidecar_service.go index e0cb24a0ea..a41d73d80a 100644 --- a/agent/sidecar_service.go +++ b/agent/sidecar_service.go @@ -127,9 +127,20 @@ func (a *Agent) sidecarServiceFromNodeService(ns *structs.NodeService, token str if err != nil { return nil, nil, "", err } - // Setup default check if none given + // Setup default check if none given. if len(checks) < 1 { - checks = sidecarDefaultChecks(ns.ID, sidecar.Proxy.LocalServiceAddress, sidecar.Port) + // The check should use the sidecar's address because it makes a request to the sidecar. + // If the sidecar's address is empty, we fall back to the address of the local service, as set in + // sidecar.Proxy.LocalServiceAddress, in the hope that the proxy is also accessible on that address + // (which in most cases it is because it's running as a sidecar in the same network). + // We could instead fall back to the address of the service as set by (ns.Address), but I've kept it using + // sidecar.Proxy.LocalServiceAddress so as to not change things too much in the + // process of fixing #14433. + checkAddress := sidecar.Address + if checkAddress == "" { + checkAddress = sidecar.Proxy.LocalServiceAddress + } + checks = sidecarDefaultChecks(ns.ID, checkAddress, sidecar.Port) } return sidecar, checks, token, nil @@ -202,14 +213,11 @@ func (a *Agent) sidecarPortFromServiceID(sidecarCompoundServiceID structs.Servic return sidecarPort, nil } -func sidecarDefaultChecks(serviceID string, localServiceAddress string, port int) []*structs.CheckType { - // Setup default check if none given +func sidecarDefaultChecks(serviceID string, address string, port int) []*structs.CheckType { return []*structs.CheckType{ { - Name: "Connect Sidecar Listening", - // Default to localhost rather than agent/service public IP. The checks - // can always be overridden if a non-loopback IP is needed. - TCP: ipaddr.FormatAddressPort(localServiceAddress, port), + Name: "Connect Sidecar Listening", + TCP: ipaddr.FormatAddressPort(address, port), Interval: 10 * time.Second, }, { diff --git a/agent/sidecar_service_test.go b/agent/sidecar_service_test.go index f095670ff5..39ab854a6b 100644 --- a/agent/sidecar_service_test.go +++ b/agent/sidecar_service_test.go @@ -215,6 +215,141 @@ func TestAgent_sidecarServiceFromNodeService(t *testing.T) { token: "foo", wantErr: "reserved for internal use", }, + { + name: "uses proxy address for check", + sd: &structs.ServiceDefinition{ + ID: "web1", + Name: "web", + Port: 1111, + Connect: &structs.ServiceConnect{ + SidecarService: &structs.ServiceDefinition{ + Address: "123.123.123.123", + Proxy: &structs.ConnectProxyConfig{ + LocalServiceAddress: "255.255.255.255", + }, + }, + }, + Address: "255.255.255.255", + }, + token: "foo", + wantNS: &structs.NodeService{ + EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), + Kind: structs.ServiceKindConnectProxy, + ID: "web1-sidecar-proxy", + Service: "web-sidecar-proxy", + Port: 2222, + Address: "123.123.123.123", + LocallyRegisteredAsSidecar: true, + Proxy: structs.ConnectProxyConfig{ + DestinationServiceName: "web", + DestinationServiceID: "web1", + LocalServiceAddress: "255.255.255.255", + LocalServicePort: 1111, + }, + }, + wantChecks: []*structs.CheckType{ + { + Name: "Connect Sidecar Listening", + TCP: "123.123.123.123:2222", + Interval: 10 * time.Second, + }, + { + Name: "Connect Sidecar Aliasing web1", + AliasService: "web1", + }, + }, + wantToken: "foo", + }, + { + name: "uses proxy.local_service_address for check if proxy address is empty", + sd: &structs.ServiceDefinition{ + ID: "web1", + Name: "web", + Port: 1111, + Connect: &structs.ServiceConnect{ + SidecarService: &structs.ServiceDefinition{ + Address: "", // Proxy address empty. + Proxy: &structs.ConnectProxyConfig{ + LocalServiceAddress: "1.2.3.4", + }, + }, + }, + Address: "", // Service address empty. + }, + token: "foo", + wantNS: &structs.NodeService{ + EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), + Kind: structs.ServiceKindConnectProxy, + ID: "web1-sidecar-proxy", + Service: "web-sidecar-proxy", + Port: 2222, + Address: "", + LocallyRegisteredAsSidecar: true, + Proxy: structs.ConnectProxyConfig{ + DestinationServiceName: "web", + DestinationServiceID: "web1", + LocalServiceAddress: "1.2.3.4", + LocalServicePort: 1111, + }, + }, + wantChecks: []*structs.CheckType{ + { + Name: "Connect Sidecar Listening", + TCP: "1.2.3.4:2222", + Interval: 10 * time.Second, + }, + { + Name: "Connect Sidecar Aliasing web1", + AliasService: "web1", + }, + }, + wantToken: "foo", + }, + { + name: "uses 127.0.0.1 for check if proxy and proxy.local_service_address are empty", + sd: &structs.ServiceDefinition{ + ID: "web1", + Name: "web", + Port: 1111, + Connect: &structs.ServiceConnect{ + SidecarService: &structs.ServiceDefinition{ + Address: "", + Proxy: &structs.ConnectProxyConfig{ + LocalServiceAddress: "", + }, + }, + }, + Address: "", + }, + token: "foo", + wantNS: &structs.NodeService{ + EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), + Kind: structs.ServiceKindConnectProxy, + ID: "web1-sidecar-proxy", + Service: "web-sidecar-proxy", + Port: 2222, + Address: "", + LocallyRegisteredAsSidecar: true, + Proxy: structs.ConnectProxyConfig{ + DestinationServiceName: "web", + DestinationServiceID: "web1", + LocalServiceAddress: "127.0.0.1", + LocalServicePort: 1111, + }, + }, + wantChecks: []*structs.CheckType{ + { + Name: "Connect Sidecar Listening", + TCP: "127.0.0.1:2222", + Interval: 10 * time.Second, + }, + { + Name: "Connect Sidecar Aliasing web1", + AliasService: "web1", + }, + }, + wantToken: "foo", + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { From e617e7df3e7b92b75ce76eba09ddbcf73f76ea95 Mon Sep 17 00:00:00 2001 From: DanStough Date: Wed, 31 Aug 2022 12:58:41 -0400 Subject: [PATCH 82/93] feat(cli): add initial peering cli commands --- .changelog/14423.txt | 3 + command/flags/http.go | 4 + command/peering/delete/delete.go | 91 ++++++++++ command/peering/delete/delete_test.go | 70 ++++++++ command/peering/establish/establish.go | 109 ++++++++++++ command/peering/establish/establish_test.go | 127 ++++++++++++++ command/peering/generate/generate.go | 139 +++++++++++++++ command/peering/generate/generate_test.go | 141 +++++++++++++++ command/peering/list/list.go | 139 +++++++++++++++ command/peering/list/list_test.go | 133 ++++++++++++++ command/peering/peering.go | 69 ++++++++ command/peering/read/read.go | 164 ++++++++++++++++++ command/peering/read/read_test.go | 135 ++++++++++++++ command/registry.go | 12 ++ testrpc/wait.go | 7 +- website/content/api-docs/peering.mdx | 18 +- website/content/commands/index.mdx | 1 + website/content/commands/peering/delete.mdx | 50 ++++++ .../content/commands/peering/establish.mdx | 52 ++++++ .../commands/peering/generate-token.mdx | 68 ++++++++ website/content/commands/peering/index.mdx | 40 +++++ website/content/commands/peering/list.mdx | 47 +++++ website/content/commands/peering/read.mdx | 62 +++++++ .../cluster-peering/create-manage-peering.mdx | 86 +++++++++ website/data/commands-nav-data.json | 29 ++++ 25 files changed, 1780 insertions(+), 16 deletions(-) create mode 100644 .changelog/14423.txt create mode 100644 command/peering/delete/delete.go create mode 100644 command/peering/delete/delete_test.go create mode 100644 command/peering/establish/establish.go create mode 100644 command/peering/establish/establish_test.go create mode 100644 command/peering/generate/generate.go create mode 100644 command/peering/generate/generate_test.go create mode 100644 command/peering/list/list.go create mode 100644 command/peering/list/list_test.go create mode 100644 command/peering/peering.go create mode 100644 command/peering/read/read.go create mode 100644 command/peering/read/read_test.go create mode 100644 website/content/commands/peering/delete.mdx create mode 100644 website/content/commands/peering/establish.mdx create mode 100644 website/content/commands/peering/generate-token.mdx create mode 100644 website/content/commands/peering/index.mdx create mode 100644 website/content/commands/peering/list.mdx create mode 100644 website/content/commands/peering/read.mdx diff --git a/.changelog/14423.txt b/.changelog/14423.txt new file mode 100644 index 0000000000..fd40339458 --- /dev/null +++ b/.changelog/14423.txt @@ -0,0 +1,3 @@ +```release-note:feature +cli: Adds new subcommands for `peering` workflows. Refer to the [CLI docs](https://www.consul.io/commands/peering) for more information. +``` diff --git a/command/flags/http.go b/command/flags/http.go index 139ab7ed08..e82e024fbb 100644 --- a/command/flags/http.go +++ b/command/flags/http.go @@ -98,6 +98,10 @@ func (f *HTTPFlags) Datacenter() string { return f.datacenter.String() } +func (f *HTTPFlags) Partition() string { + return f.partition.String() +} + func (f *HTTPFlags) Stale() bool { if f.stale.v == nil { return false diff --git a/command/peering/delete/delete.go b/command/peering/delete/delete.go new file mode 100644 index 0000000000..cb98189006 --- /dev/null +++ b/command/peering/delete/delete.go @@ -0,0 +1,91 @@ +package delete + +import ( + "context" + "flag" + "fmt" + + "github.com/mitchellh/cli" + + "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/command/flags" +) + +func New(ui cli.Ui) *cmd { + c := &cmd{UI: ui} + c.init() + return c +} + +type cmd struct { + UI cli.Ui + flags *flag.FlagSet + http *flags.HTTPFlags + help string + + name string +} + +func (c *cmd) init() { + c.flags = flag.NewFlagSet("", flag.ContinueOnError) + + c.flags.StringVar(&c.name, "name", "", "(Required) The local name assigned to the peer cluster.") + + c.http = &flags.HTTPFlags{} + flags.Merge(c.flags, c.http.ClientFlags()) + flags.Merge(c.flags, c.http.PartitionFlag()) + c.help = flags.Usage(help, c.flags) +} + +func (c *cmd) Run(args []string) int { + if err := c.flags.Parse(args); err != nil { + return 1 + } + + if c.name == "" { + c.UI.Error("Missing the required -name flag") + return 1 + } + + client, err := c.http.APIClient() + if err != nil { + c.UI.Error(fmt.Sprintf("Error connecting to Consul agent: %s", err)) + return 1 + } + + peerings := client.Peerings() + + _, err = peerings.Delete(context.Background(), c.name, &api.WriteOptions{}) + if err != nil { + c.UI.Error(fmt.Sprintf("Error deleting peering for %s: %v", c.name, err)) + return 1 + } + + c.UI.Info(fmt.Sprintf("Successfully submitted peering connection, %s, for deletion", c.name)) + return 0 +} + +func (c *cmd) Synopsis() string { + return synopsis +} + +func (c *cmd) Help() string { + return flags.Usage(c.help, nil) +} + +const ( + synopsis = "Delete a peering connection" + help = ` +Usage: consul peering delete [options] -name + + Delete a peering connection. Consul deletes all data imported from the peer + in the background. The peering connection is removed after all associated + data has been deleted. Operators can still read the peering connections + while the data is being removed. A 'DeletedAt' field will be populated with + the timestamp of when the peering was marked for deletion. + + Example: + + $ consul peering delete -name west-dc +` +) diff --git a/command/peering/delete/delete_test.go b/command/peering/delete/delete_test.go new file mode 100644 index 0000000000..984e773f57 --- /dev/null +++ b/command/peering/delete/delete_test.go @@ -0,0 +1,70 @@ +package delete + +import ( + "context" + "strings" + "testing" + + "github.com/mitchellh/cli" + "github.com/stretchr/testify/require" + + "github.com/hashicorp/consul/agent" + "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/testrpc" +) + +func TestDeleteCommand_noTabs(t *testing.T) { + t.Parallel() + + if strings.ContainsRune(New(cli.NewMockUi()).Help(), '\t') { + t.Fatal("help has tabs") + } +} + +func TestDeleteCommand(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + + acceptor := agent.NewTestAgent(t, ``) + t.Cleanup(func() { _ = acceptor.Shutdown() }) + + testrpc.WaitForTestAgent(t, acceptor.RPC, "dc1") + + acceptingClient := acceptor.Client() + + t.Run("name is required", func(t *testing.T) { + ui := cli.NewMockUi() + cmd := New(ui) + + args := []string{ + "-http-addr=" + acceptor.HTTPAddr(), + } + + code := cmd.Run(args) + require.Equal(t, 1, code, "err: %s", ui.ErrorWriter.String()) + require.Contains(t, ui.ErrorWriter.String(), "Missing the required -name flag") + }) + + t.Run("delete connection", func(t *testing.T) { + + req := api.PeeringGenerateTokenRequest{PeerName: "foo"} + _, _, err := acceptingClient.Peerings().GenerateToken(context.Background(), req, &api.WriteOptions{}) + require.NoError(t, err, "Could not generate peering token at acceptor") + + ui := cli.NewMockUi() + cmd := New(ui) + + args := []string{ + "-http-addr=" + acceptor.HTTPAddr(), + "-name=foo", + } + + code := cmd.Run(args) + require.Equal(t, 0, code) + output := ui.OutputWriter.String() + require.Contains(t, output, "Success") + }) +} diff --git a/command/peering/establish/establish.go b/command/peering/establish/establish.go new file mode 100644 index 0000000000..14cd0e310e --- /dev/null +++ b/command/peering/establish/establish.go @@ -0,0 +1,109 @@ +package establish + +import ( + "context" + "flag" + "fmt" + + "github.com/mitchellh/cli" + + "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/command/flags" +) + +func New(ui cli.Ui) *cmd { + c := &cmd{UI: ui} + c.init() + return c +} + +type cmd struct { + UI cli.Ui + flags *flag.FlagSet + http *flags.HTTPFlags + help string + + name string + peeringToken string + meta map[string]string +} + +func (c *cmd) init() { + c.flags = flag.NewFlagSet("", flag.ContinueOnError) + + c.flags.StringVar(&c.name, "name", "", "(Required) The local name assigned to the peer cluster.") + + c.flags.StringVar(&c.peeringToken, "peering-token", "", "(Required) The peering token from the accepting cluster.") + + c.flags.Var((*flags.FlagMapValue)(&c.meta), "meta", + "Metadata to associate with the peering, formatted as key=value. This flag "+ + "may be specified multiple times to set multiple meta fields.") + + c.http = &flags.HTTPFlags{} + flags.Merge(c.flags, c.http.ClientFlags()) + flags.Merge(c.flags, c.http.PartitionFlag()) + c.help = flags.Usage(help, c.flags) +} + +func (c *cmd) Run(args []string) int { + if err := c.flags.Parse(args); err != nil { + return 1 + } + + if c.name == "" { + c.UI.Error("Missing the required -name flag") + return 1 + } + + if c.peeringToken == "" { + c.UI.Error("Missing the required -peering-token flag") + return 1 + } + + client, err := c.http.APIClient() + if err != nil { + c.UI.Error(fmt.Sprintf("Error connecting to Consul agent: %s", err)) + return 1 + } + + peerings := client.Peerings() + + req := api.PeeringEstablishRequest{ + PeerName: c.name, + PeeringToken: c.peeringToken, + Partition: c.http.Partition(), + Meta: c.meta, + } + + _, _, err = peerings.Establish(context.Background(), req, &api.WriteOptions{}) + if err != nil { + c.UI.Error(fmt.Sprintf("Error establishing peering for %s: %v", req.PeerName, err)) + return 1 + } + + c.UI.Info(fmt.Sprintf("Successfully established peering connection with %s", req.PeerName)) + return 0 +} + +func (c *cmd) Synopsis() string { + return synopsis +} + +func (c *cmd) Help() string { + return flags.Usage(c.help, nil) +} + +const ( + synopsis = "Establish a peering connection" + help = ` +Usage: consul peering establish [options] -name -peering-token + + Establish a peering connection. The name provided will be used locally by + this cluster to refer to the peering connection. The peering token can + only be used once to establish the connection. + + Example: + + $ consul peering establish -name west-dc -peering-token +` +) diff --git a/command/peering/establish/establish_test.go b/command/peering/establish/establish_test.go new file mode 100644 index 0000000000..95e7da5052 --- /dev/null +++ b/command/peering/establish/establish_test.go @@ -0,0 +1,127 @@ +package establish + +import ( + "context" + "fmt" + "strings" + "testing" + + "github.com/mitchellh/cli" + "github.com/stretchr/testify/require" + + "github.com/hashicorp/consul/agent" + "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/testrpc" +) + +func TestEstablishCommand_noTabs(t *testing.T) { + t.Parallel() + + if strings.ContainsRune(New(cli.NewMockUi()).Help(), '\t') { + t.Fatal("help has tabs") + } +} + +func TestEstablishCommand(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + + acceptor := agent.NewTestAgent(t, ``) + t.Cleanup(func() { _ = acceptor.Shutdown() }) + + dialer := agent.NewTestAgent(t, ``) + t.Cleanup(func() { _ = dialer.Shutdown() }) + + testrpc.WaitForTestAgent(t, acceptor.RPC, "dc1") + testrpc.WaitForTestAgent(t, dialer.RPC, "dc1") + + acceptingClient := acceptor.Client() + dialingClient := dialer.Client() + + t.Run("name is required", func(t *testing.T) { + ui := cli.NewMockUi() + cmd := New(ui) + + args := []string{ + "-http-addr=" + dialer.HTTPAddr(), + "-peering-token=1234abcde", + } + + code := cmd.Run(args) + require.Equal(t, 1, code, "err: %s", ui.ErrorWriter.String()) + require.Contains(t, ui.ErrorWriter.String(), "Missing the required -name flag") + }) + + t.Run("peering token is required", func(t *testing.T) { + ui := cli.NewMockUi() + cmd := New(ui) + + args := []string{ + "-http-addr=" + dialer.HTTPAddr(), + "-name=bar", + } + + code := cmd.Run(args) + require.Equal(t, 1, code, "err: %s", ui.ErrorWriter.String()) + require.Contains(t, ui.ErrorWriter.String(), "Missing the required -peering-token flag") + }) + + t.Run("establish connection", func(t *testing.T) { + // Grab the token from the acceptor + req := api.PeeringGenerateTokenRequest{PeerName: "foo"} + res, _, err := acceptingClient.Peerings().GenerateToken(context.Background(), req, &api.WriteOptions{}) + require.NoError(t, err, "Could not generate peering token at acceptor") + + ui := cli.NewMockUi() + cmd := New(ui) + + args := []string{ + "-http-addr=" + dialer.HTTPAddr(), + "-name=bar", + fmt.Sprintf("-peering-token=%s", res.PeeringToken), + } + + code := cmd.Run(args) + require.Equal(t, 0, code) + output := ui.OutputWriter.String() + require.Contains(t, output, "Success") + }) + + t.Run("establish connection with options", func(t *testing.T) { + // Grab the token from the acceptor + req := api.PeeringGenerateTokenRequest{PeerName: "foo"} + res, _, err := acceptingClient.Peerings().GenerateToken(context.Background(), req, &api.WriteOptions{}) + require.NoError(t, err, "Could not generate peering token at acceptor") + + ui := cli.NewMockUi() + cmd := New(ui) + + args := []string{ + "-http-addr=" + dialer.HTTPAddr(), + "-name=bar", + fmt.Sprintf("-peering-token=%s", res.PeeringToken), + "-meta=env=production", + "-meta=region=us-west-1", + } + + code := cmd.Run(args) + require.Equal(t, 0, code) + output := ui.OutputWriter.String() + require.Contains(t, output, "Success") + + //Meta + peering, _, err := dialingClient.Peerings().Read(context.Background(), "bar", &api.QueryOptions{}) + require.NoError(t, err) + + actual, ok := peering.Meta["env"] + require.True(t, ok) + require.Equal(t, "production", actual) + + actual, ok = peering.Meta["region"] + require.True(t, ok) + require.Equal(t, "us-west-1", actual) + }) +} diff --git a/command/peering/generate/generate.go b/command/peering/generate/generate.go new file mode 100644 index 0000000000..cbbb230098 --- /dev/null +++ b/command/peering/generate/generate.go @@ -0,0 +1,139 @@ +package generate + +import ( + "context" + "encoding/json" + "flag" + "fmt" + "strings" + + "github.com/mitchellh/cli" + + "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/command/flags" + "github.com/hashicorp/consul/command/peering" +) + +func New(ui cli.Ui) *cmd { + c := &cmd{UI: ui} + c.init() + return c +} + +type cmd struct { + UI cli.Ui + flags *flag.FlagSet + http *flags.HTTPFlags + help string + + name string + externalAddresses []string + meta map[string]string + format string +} + +func (c *cmd) init() { + c.flags = flag.NewFlagSet("", flag.ContinueOnError) + + c.flags.StringVar(&c.name, "name", "", "(Required) The local name assigned to the peer cluster.") + + c.flags.Var((*flags.FlagMapValue)(&c.meta), "meta", + "Metadata to associate with the peering, formatted as key=value. This flag "+ + "may be specified multiple times to set multiple metadata fields.") + + c.flags.Var((*flags.AppendSliceValue)(&c.externalAddresses), "server-external-addresses", + "A list of addresses to put into the generated token, formatted as a comma-separate list. "+ + "Addresses are the form of :port. "+ + "This could be used to specify load balancer(s) or external IPs to reach the servers from "+ + "the dialing side, and will override any server addresses obtained from the \"consul\" service.") + + c.flags.StringVar( + &c.format, + "format", + peering.PeeringFormatPretty, + fmt.Sprintf("Output format {%s} (default: %s)", strings.Join(peering.GetSupportedFormats(), "|"), peering.PeeringFormatPretty), + ) + + c.http = &flags.HTTPFlags{} + flags.Merge(c.flags, c.http.ClientFlags()) + flags.Merge(c.flags, c.http.PartitionFlag()) + c.help = flags.Usage(help, c.flags) +} + +func (c *cmd) Run(args []string) int { + if err := c.flags.Parse(args); err != nil { + return 1 + } + + if c.name == "" { + c.UI.Error("Missing the required -name flag") + return 1 + } + + if !peering.FormatIsValid(c.format) { + c.UI.Error(fmt.Sprintf("Invalid format, valid formats are {%s}", strings.Join(peering.GetSupportedFormats(), "|"))) + return 1 + } + + client, err := c.http.APIClient() + if err != nil { + c.UI.Error(fmt.Sprintf("Error connect to Consul agent: %s", err)) + return 1 + } + + peerings := client.Peerings() + + req := api.PeeringGenerateTokenRequest{ + PeerName: c.name, + Partition: c.http.Partition(), + Meta: c.meta, + ServerExternalAddresses: c.externalAddresses, + } + + res, _, err := peerings.GenerateToken(context.Background(), req, &api.WriteOptions{}) + if err != nil { + c.UI.Error(fmt.Sprintf("Error generating peering token for %s: %v", req.PeerName, err)) + return 1 + } + + if c.format == peering.PeeringFormatJSON { + output, err := json.Marshal(res) + if err != nil { + c.UI.Error(fmt.Sprintf("Error marshalling JSON: %s", err)) + return 1 + } + c.UI.Output(string(output)) + return 0 + } + + c.UI.Info(res.PeeringToken) + return 0 +} + +func (c *cmd) Synopsis() string { + return synopsis +} + +func (c *cmd) Help() string { + return flags.Usage(c.help, nil) +} + +const ( + synopsis = "Generate a peering token" + help = ` +Usage: consul peering generate-token [options] -name + + Generate a peering token. The name provided will be used locally by + this cluster to refer to the peering connection. Re-generating a token + for a given name will not interrupt any active connection, but will + invalidate any unused token for that name. + + Example: + + $ consul peering generate-token -name west-dc + + Example using a load balancer in front of Consul servers: + + $ consul peering generate-token -name west-dc -server-external-addresses load-balancer.elb.us-west-1.amazonaws.com:8502 +` +) diff --git a/command/peering/generate/generate_test.go b/command/peering/generate/generate_test.go new file mode 100644 index 0000000000..c745976104 --- /dev/null +++ b/command/peering/generate/generate_test.go @@ -0,0 +1,141 @@ +package generate + +import ( + "context" + "encoding/base64" + "encoding/json" + "strings" + "testing" + + "github.com/mitchellh/cli" + "github.com/stretchr/testify/require" + + "github.com/hashicorp/consul/agent" + "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/testrpc" +) + +func TestGenerateCommand_noTabs(t *testing.T) { + t.Parallel() + + if strings.ContainsRune(New(cli.NewMockUi()).Help(), '\t') { + t.Fatal("help has tabs") + } +} + +func TestGenerateCommand(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + + a := agent.NewTestAgent(t, ``) + t.Cleanup(func() { _ = a.Shutdown() }) + testrpc.WaitForTestAgent(t, a.RPC, "dc1") + + client := a.Client() + + t.Run("name is required", func(t *testing.T) { + ui := cli.NewMockUi() + cmd := New(ui) + + args := []string{ + "-http-addr=" + a.HTTPAddr(), + } + + code := cmd.Run(args) + require.Equal(t, 1, code, "err: %s", ui.ErrorWriter.String()) + require.Contains(t, ui.ErrorWriter.String(), "Missing the required -name flag") + }) + + t.Run("invalid format", func(t *testing.T) { + ui := cli.NewMockUi() + cmd := New(ui) + + args := []string{ + "-http-addr=" + a.HTTPAddr(), + "-name=foo", + "-format=toml", + } + + code := cmd.Run(args) + require.Equal(t, 1, code, "exited successfully when it should have failed") + output := ui.ErrorWriter.String() + require.Contains(t, output, "Invalid format") + }) + + t.Run("generate token", func(t *testing.T) { + ui := cli.NewMockUi() + cmd := New(ui) + + args := []string{ + "-http-addr=" + a.HTTPAddr(), + "-name=foo", + } + + code := cmd.Run(args) + require.Equal(t, 0, code) + token, err := base64.StdEncoding.DecodeString(ui.OutputWriter.String()) + require.NoError(t, err, "error decoding token") + require.Contains(t, string(token), "\"ServerName\":\"server.dc1.consul\"") + }) + + t.Run("generate token with options", func(t *testing.T) { + ui := cli.NewMockUi() + cmd := New(ui) + + args := []string{ + "-http-addr=" + a.HTTPAddr(), + "-name=bar", + "-server-external-addresses=1.2.3.4,5.6.7.8", + "-meta=env=production", + "-meta=region=us-east-1", + } + + code := cmd.Run(args) + require.Equal(t, 0, code) + token, err := base64.StdEncoding.DecodeString(ui.OutputWriter.String()) + require.NoError(t, err, "error decoding token") + require.Contains(t, string(token), "\"ServerName\":\"server.dc1.consul\"") + + //ServerExternalAddresses + require.Contains(t, string(token), "1.2.3.4") + require.Contains(t, string(token), "5.6.7.8") + + //Meta + peering, _, err := client.Peerings().Read(context.Background(), "bar", &api.QueryOptions{}) + require.NoError(t, err) + + actual, ok := peering.Meta["env"] + require.True(t, ok) + require.Equal(t, "production", actual) + + actual, ok = peering.Meta["region"] + require.True(t, ok) + require.Equal(t, "us-east-1", actual) + }) + + t.Run("read with json", func(t *testing.T) { + + ui := cli.NewMockUi() + cmd := New(ui) + + args := []string{ + "-http-addr=" + a.HTTPAddr(), + "-name=baz", + "-format=json", + } + + code := cmd.Run(args) + require.Equal(t, 0, code) + output := ui.OutputWriter.Bytes() + + var outputRes api.PeeringGenerateTokenResponse + require.NoError(t, json.Unmarshal(output, &outputRes)) + + token, err := base64.StdEncoding.DecodeString(outputRes.PeeringToken) + require.NoError(t, err, "error decoding token") + require.Contains(t, string(token), "\"ServerName\":\"server.dc1.consul\"") + }) +} diff --git a/command/peering/list/list.go b/command/peering/list/list.go new file mode 100644 index 0000000000..c445e3d57a --- /dev/null +++ b/command/peering/list/list.go @@ -0,0 +1,139 @@ +package list + +import ( + "context" + "encoding/json" + "flag" + "fmt" + "sort" + "strings" + + "github.com/mitchellh/cli" + "github.com/ryanuber/columnize" + + "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/command/flags" + "github.com/hashicorp/consul/command/peering" +) + +func New(ui cli.Ui) *cmd { + c := &cmd{UI: ui} + c.init() + return c +} + +type cmd struct { + UI cli.Ui + flags *flag.FlagSet + http *flags.HTTPFlags + help string + + format string +} + +func (c *cmd) init() { + c.flags = flag.NewFlagSet("", flag.ContinueOnError) + + c.flags.StringVar( + &c.format, + "format", + peering.PeeringFormatPretty, + fmt.Sprintf("Output format {%s} (default: %s)", strings.Join(peering.GetSupportedFormats(), "|"), peering.PeeringFormatPretty), + ) + + c.http = &flags.HTTPFlags{} + flags.Merge(c.flags, c.http.ClientFlags()) + flags.Merge(c.flags, c.http.PartitionFlag()) + c.help = flags.Usage(help, c.flags) +} + +func (c *cmd) Run(args []string) int { + if err := c.flags.Parse(args); err != nil { + return 1 + } + + if !peering.FormatIsValid(c.format) { + c.UI.Error(fmt.Sprintf("Invalid format, valid formats are {%s}", strings.Join(peering.GetSupportedFormats(), "|"))) + return 1 + } + + client, err := c.http.APIClient() + if err != nil { + c.UI.Error(fmt.Sprintf("Error connect to Consul agent: %s", err)) + return 1 + } + + peerings := client.Peerings() + + res, _, err := peerings.List(context.Background(), &api.QueryOptions{}) + if err != nil { + c.UI.Error("Error listing peerings") + return 1 + } + + list := peeringList(res) + sort.Sort(list) + + if c.format == peering.PeeringFormatJSON { + output, err := json.Marshal(list) + if err != nil { + c.UI.Error(fmt.Sprintf("Error marshalling JSON: %s", err)) + return 1 + } + c.UI.Output(string(output)) + return 0 + } + + if len(res) == 0 { + c.UI.Info(fmt.Sprintf("There are no peering connections.")) + return 0 + } + + result := make([]string, 0, len(list)) + header := "Name\x1fState\x1fImported Svcs\x1fExported Svcs\x1fMeta" + result = append(result, header) + for _, peer := range list { + metaPairs := make([]string, 0, len(peer.Meta)) + for k, v := range peer.Meta { + metaPairs = append(metaPairs, fmt.Sprintf("%s=%s", k, v)) + } + meta := strings.Join(metaPairs, ",") + line := fmt.Sprintf("%s\x1f%s\x1f%d\x1f%d\x1f%s", + peer.Name, peer.State, peer.ImportedServiceCount, peer.ExportedServiceCount, meta) + result = append(result, line) + } + + output := columnize.Format(result, &columnize.Config{Delim: string([]byte{0x1f})}) + c.UI.Output(output) + + return 0 +} + +func (c *cmd) Synopsis() string { + return synopsis +} + +func (c *cmd) Help() string { + return flags.Usage(c.help, nil) +} + +const ( + synopsis = "List peering connections" + help = ` +Usage: consul peering list [options] + + List all peering connections. The results will be filtered according + to ACL policy configuration. + + Example: + + $ consul peering list +` +) + +// peeringList applies sort.Interface to a list of peering connections for sorting by name. +type peeringList []*api.Peering + +func (d peeringList) Len() int { return len(d) } +func (d peeringList) Less(i, j int) bool { return d[i].Name < d[j].Name } +func (d peeringList) Swap(i, j int) { d[i], d[j] = d[j], d[i] } diff --git a/command/peering/list/list_test.go b/command/peering/list/list_test.go new file mode 100644 index 0000000000..06f9248b08 --- /dev/null +++ b/command/peering/list/list_test.go @@ -0,0 +1,133 @@ +package list + +import ( + "context" + "encoding/json" + "strings" + "testing" + + "github.com/mitchellh/cli" + "github.com/stretchr/testify/require" + + "github.com/hashicorp/consul/agent" + "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/testrpc" +) + +func TestListCommand_noTabs(t *testing.T) { + t.Parallel() + + if strings.ContainsRune(New(cli.NewMockUi()).Help(), '\t') { + t.Fatal("help has tabs") + } +} + +func TestListCommand(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + + acceptor := agent.NewTestAgent(t, ``) + t.Cleanup(func() { _ = acceptor.Shutdown() }) + + testrpc.WaitForTestAgent(t, acceptor.RPC, "dc1") + + acceptingClient := acceptor.Client() + + t.Run("invalid format", func(t *testing.T) { + ui := cli.NewMockUi() + cmd := New(ui) + + args := []string{ + "-http-addr=" + acceptor.HTTPAddr(), + "-format=toml", + } + + code := cmd.Run(args) + require.Equal(t, 1, code, "exited successfully when it should have failed") + output := ui.ErrorWriter.String() + require.Contains(t, output, "Invalid format") + }) + + t.Run("no results - pretty", func(t *testing.T) { + ui := cli.NewMockUi() + cmd := New(ui) + + args := []string{ + "-http-addr=" + acceptor.HTTPAddr(), + } + + code := cmd.Run(args) + require.Equal(t, 0, code) + output := ui.OutputWriter.String() + require.Contains(t, output, "no peering connections") + }) + + t.Run("two results for pretty print", func(t *testing.T) { + + generateReq := api.PeeringGenerateTokenRequest{PeerName: "foo"} + _, _, err := acceptingClient.Peerings().GenerateToken(context.Background(), generateReq, &api.WriteOptions{}) + require.NoError(t, err, "Could not generate peering token at acceptor for \"foo\"") + + generateReq = api.PeeringGenerateTokenRequest{PeerName: "bar"} + _, _, err = acceptingClient.Peerings().GenerateToken(context.Background(), generateReq, &api.WriteOptions{}) + require.NoError(t, err, "Could not generate peering token at acceptor for \"bar\"") + + ui := cli.NewMockUi() + cmd := New(ui) + + args := []string{ + "-http-addr=" + acceptor.HTTPAddr(), + } + + code := cmd.Run(args) + require.Equal(t, 0, code) + output := ui.OutputWriter.String() + require.Equal(t, 3, strings.Count(output, "\n")) // There should be three lines including the header + + lines := strings.Split(output, "\n") + + require.Contains(t, lines[0], "Name") + require.Contains(t, lines[1], "bar") + require.Contains(t, lines[2], "foo") + }) + + t.Run("no results - json", func(t *testing.T) { + ui := cli.NewMockUi() + cmd := New(ui) + + args := []string{ + "-http-addr=" + acceptor.HTTPAddr(), + "-format=json", + } + + code := cmd.Run(args) + require.Equal(t, 0, code) + output := ui.OutputWriter.String() + require.Contains(t, output, "[]") + }) + + t.Run("two results for JSON print", func(t *testing.T) { + + ui := cli.NewMockUi() + cmd := New(ui) + + args := []string{ + "-http-addr=" + acceptor.HTTPAddr(), + "-format=json", + } + + code := cmd.Run(args) + require.Equal(t, 0, code) + output := ui.OutputWriter.Bytes() + + var outputList []*api.Peering + require.NoError(t, json.Unmarshal(output, &outputList)) + + require.Len(t, outputList, 2) + require.Equal(t, "bar", outputList[0].Name) + require.Equal(t, "foo", outputList[1].Name) + }) +} diff --git a/command/peering/peering.go b/command/peering/peering.go new file mode 100644 index 0000000000..1872f37387 --- /dev/null +++ b/command/peering/peering.go @@ -0,0 +1,69 @@ +package peering + +import ( + "github.com/mitchellh/cli" + + "github.com/hashicorp/consul/command/flags" +) + +const ( + PeeringFormatJSON = "json" + PeeringFormatPretty = "pretty" +) + +func GetSupportedFormats() []string { + return []string{PeeringFormatJSON, PeeringFormatPretty} +} + +func FormatIsValid(f string) bool { + return f == PeeringFormatPretty || f == PeeringFormatJSON +} + +func New() *cmd { + return &cmd{} +} + +type cmd struct{} + +func (c *cmd) Run(args []string) int { + return cli.RunResultHelp +} + +func (c *cmd) Synopsis() string { + return synopsis +} + +func (c *cmd) Help() string { + return flags.Usage(help, nil) +} + +const synopsis = "Create and manage peering connections between Consul clusters" +const help = ` +Usage: consul peering [options] [args] + + This command has subcommands for interacting with Cluster Peering + connections. Here are some simple examples, and more detailed + examples are available in the subcommands or the documentation. + + Generate a peering token: + + $ consul peering generate-token -name west-dc + + Establish a peering connection: + + $ consul peering establish -name east-dc -peering-token + + List all the local peering connections: + + $ consul peering list + + Print the status of a peering connection: + + $ consul peering read -name west-dc + + Delete and close a peering connection: + + $ consul peering delete -name west-dc + + For more examples, ask for subcommand help or view the documentation. +` diff --git a/command/peering/read/read.go b/command/peering/read/read.go new file mode 100644 index 0000000000..c8340e19bc --- /dev/null +++ b/command/peering/read/read.go @@ -0,0 +1,164 @@ +package read + +import ( + "bytes" + "context" + "encoding/json" + "flag" + "fmt" + "strings" + "time" + + "github.com/mitchellh/cli" + + "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/command/flags" + "github.com/hashicorp/consul/command/peering" +) + +func New(ui cli.Ui) *cmd { + c := &cmd{UI: ui} + c.init() + return c +} + +type cmd struct { + UI cli.Ui + flags *flag.FlagSet + http *flags.HTTPFlags + help string + + name string + format string +} + +func (c *cmd) init() { + c.flags = flag.NewFlagSet("", flag.ContinueOnError) + + c.flags.StringVar(&c.name, "name", "", "(Required) The local name assigned to the peer cluster.") + + c.flags.StringVar( + &c.format, + "format", + peering.PeeringFormatPretty, + fmt.Sprintf("Output format {%s} (default: %s)", strings.Join(peering.GetSupportedFormats(), "|"), peering.PeeringFormatPretty), + ) + + c.http = &flags.HTTPFlags{} + flags.Merge(c.flags, c.http.ClientFlags()) + flags.Merge(c.flags, c.http.PartitionFlag()) + c.help = flags.Usage(help, c.flags) +} + +func (c *cmd) Run(args []string) int { + if err := c.flags.Parse(args); err != nil { + return 1 + } + + if c.name == "" { + c.UI.Error("Missing the required -name flag") + return 1 + } + + if !peering.FormatIsValid(c.format) { + c.UI.Error(fmt.Sprintf("Invalid format, valid formats are {%s}", strings.Join(peering.GetSupportedFormats(), "|"))) + return 1 + } + + client, err := c.http.APIClient() + if err != nil { + c.UI.Error(fmt.Sprintf("Error connect to Consul agent: %s", err)) + return 1 + } + + peerings := client.Peerings() + + res, _, err := peerings.Read(context.Background(), c.name, &api.QueryOptions{}) + if err != nil { + c.UI.Error("Error reading peerings") + return 1 + } + + if res == nil { + c.UI.Error(fmt.Sprintf("No peering with name %s found.", c.name)) + return 1 + } + + if c.format == peering.PeeringFormatJSON { + output, err := json.Marshal(res) + if err != nil { + c.UI.Error(fmt.Sprintf("Error marshalling JSON: %s", err)) + return 1 + } + c.UI.Output(string(output)) + return 0 + } + + c.UI.Output(formatPeering(res)) + + return 0 +} + +func formatPeering(peering *api.Peering) string { + var buffer bytes.Buffer + + buffer.WriteString(fmt.Sprintf("Name: %s\n", peering.Name)) + buffer.WriteString(fmt.Sprintf("ID: %s\n", peering.ID)) + if peering.Partition != "" { + buffer.WriteString(fmt.Sprintf("Partition: %s\n", peering.Partition)) + } + if peering.DeletedAt != nil { + buffer.WriteString(fmt.Sprintf("DeletedAt: %s\n", peering.DeletedAt.Format(time.RFC3339))) + } + buffer.WriteString(fmt.Sprintf("State: %s\n", peering.State)) + if peering.Meta != nil && len(peering.Meta) > 0 { + buffer.WriteString("Meta:\n") + for k, v := range peering.Meta { + buffer.WriteString(fmt.Sprintf(" %s=%s\n", k, v)) + } + } + + buffer.WriteString("\n") + buffer.WriteString(fmt.Sprintf("Peer ID: %s\n", peering.PeerID)) + buffer.WriteString(fmt.Sprintf("Peer Server Name: %s\n", peering.PeerServerName)) + buffer.WriteString(fmt.Sprintf("Peer CA Pems: %d\n", len(peering.PeerCAPems))) + if peering.PeerServerAddresses != nil && len(peering.PeerServerAddresses) > 0 { + buffer.WriteString("Peer Server Addresses:\n") + for _, v := range peering.PeerServerAddresses { + buffer.WriteString(fmt.Sprintf(" %s", v)) + } + } + + buffer.WriteString("\n") + buffer.WriteString(fmt.Sprintf("Imported Services: %d\n", peering.ImportedServiceCount)) + buffer.WriteString(fmt.Sprintf("Exported Services: %d\n", peering.ExportedServiceCount)) + + buffer.WriteString("\n") + buffer.WriteString(fmt.Sprintf("Create Index: %d\n", peering.CreateIndex)) + buffer.WriteString(fmt.Sprintf("Modify Index: %d\n", peering.ModifyIndex)) + + return buffer.String() +} + +func (c *cmd) Synopsis() string { + return synopsis +} + +func (c *cmd) Help() string { + return flags.Usage(c.help, nil) +} + +const ( + synopsis = "Read a peering connection" + help = ` +Usage: consul peering read [options] -name + + Read a peering connection with the provided name. If one is not found, + the command will exit with a non-zero code. The result will be filtered according + to ACL policy configuration. + + Example: + + $ consul peering read -name west-dc +` +) diff --git a/command/peering/read/read_test.go b/command/peering/read/read_test.go new file mode 100644 index 0000000000..fe19e11000 --- /dev/null +++ b/command/peering/read/read_test.go @@ -0,0 +1,135 @@ +package read + +import ( + "context" + "encoding/json" + "strings" + "testing" + + "github.com/mitchellh/cli" + "github.com/stretchr/testify/require" + + "github.com/hashicorp/consul/agent" + "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/testrpc" +) + +func TestReadCommand_noTabs(t *testing.T) { + t.Parallel() + + if strings.ContainsRune(New(cli.NewMockUi()).Help(), '\t') { + t.Fatal("help has tabs") + } +} + +func TestReadCommand(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + + acceptor := agent.NewTestAgent(t, ``) + t.Cleanup(func() { _ = acceptor.Shutdown() }) + + testrpc.WaitForTestAgent(t, acceptor.RPC, "dc1") + + acceptingClient := acceptor.Client() + + t.Run("no name flag", func(t *testing.T) { + ui := cli.NewMockUi() + cmd := New(ui) + + args := []string{ + "-http-addr=" + acceptor.HTTPAddr(), + } + + code := cmd.Run(args) + require.Equal(t, 1, code, "err: %s", ui.ErrorWriter.String()) + require.Contains(t, ui.ErrorWriter.String(), "Missing the required -name flag") + }) + + t.Run("invalid format", func(t *testing.T) { + ui := cli.NewMockUi() + cmd := New(ui) + + args := []string{ + "-http-addr=" + acceptor.HTTPAddr(), + "-name=foo", + "-format=toml", + } + + code := cmd.Run(args) + require.Equal(t, 1, code, "exited successfully when it should have failed") + output := ui.ErrorWriter.String() + require.Contains(t, output, "Invalid format") + }) + + t.Run("peering does not exist", func(t *testing.T) { + ui := cli.NewMockUi() + cmd := New(ui) + + args := []string{ + "-http-addr=" + acceptor.HTTPAddr(), + "-name=foo", + } + + code := cmd.Run(args) + require.Equal(t, 1, code, "err: %s", ui.ErrorWriter.String()) + require.Contains(t, ui.ErrorWriter.String(), "No peering with name") + }) + + t.Run("read with pretty print", func(t *testing.T) { + + generateReq := api.PeeringGenerateTokenRequest{ + PeerName: "foo", + Meta: map[string]string{ + "env": "production", + }, + } + _, _, err := acceptingClient.Peerings().GenerateToken(context.Background(), generateReq, &api.WriteOptions{}) + require.NoError(t, err, "Could not generate peering token at acceptor for \"foo\"") + + ui := cli.NewMockUi() + cmd := New(ui) + + args := []string{ + "-http-addr=" + acceptor.HTTPAddr(), + "-name=foo", + } + + code := cmd.Run(args) + require.Equal(t, 0, code) + output := ui.OutputWriter.String() + require.Greater(t, strings.Count(output, "\n"), 0) // Checking for some kind of empty output + + // Spot check some fields and values + require.Contains(t, output, "foo") + require.Contains(t, output, api.PeeringStatePending) + require.Contains(t, output, "env=production") + require.Contains(t, output, "Imported Services") + require.Contains(t, output, "Exported Services") + }) + + t.Run("read with json", func(t *testing.T) { + + ui := cli.NewMockUi() + cmd := New(ui) + + args := []string{ + "-http-addr=" + acceptor.HTTPAddr(), + "-name=foo", + "-format=json", + } + + code := cmd.Run(args) + require.Equal(t, 0, code) + output := ui.OutputWriter.Bytes() + + var outputPeering api.Peering + require.NoError(t, json.Unmarshal(output, &outputPeering)) + + require.Equal(t, "foo", outputPeering.Name) + require.Equal(t, "production", outputPeering.Meta["env"]) + }) +} diff --git a/command/registry.go b/command/registry.go index 28e441e87f..b35ac2e424 100644 --- a/command/registry.go +++ b/command/registry.go @@ -96,6 +96,12 @@ import ( operraft "github.com/hashicorp/consul/command/operator/raft" operraftlist "github.com/hashicorp/consul/command/operator/raft/listpeers" operraftremove "github.com/hashicorp/consul/command/operator/raft/removepeer" + "github.com/hashicorp/consul/command/peering" + peerdelete "github.com/hashicorp/consul/command/peering/delete" + peerestablish "github.com/hashicorp/consul/command/peering/establish" + peergenerate "github.com/hashicorp/consul/command/peering/generate" + peerlist "github.com/hashicorp/consul/command/peering/list" + peerread "github.com/hashicorp/consul/command/peering/read" "github.com/hashicorp/consul/command/reload" "github.com/hashicorp/consul/command/rtt" "github.com/hashicorp/consul/command/services" @@ -214,6 +220,12 @@ func RegisteredCommands(ui cli.Ui) map[string]mcli.CommandFactory { entry{"operator raft", func(cli.Ui) (cli.Command, error) { return operraft.New(), nil }}, entry{"operator raft list-peers", func(ui cli.Ui) (cli.Command, error) { return operraftlist.New(ui), nil }}, entry{"operator raft remove-peer", func(ui cli.Ui) (cli.Command, error) { return operraftremove.New(ui), nil }}, + entry{"peering", func(cli.Ui) (cli.Command, error) { return peering.New(), nil }}, + entry{"peering delete", func(ui cli.Ui) (cli.Command, error) { return peerdelete.New(ui), nil }}, + entry{"peering generate-token", func(ui cli.Ui) (cli.Command, error) { return peergenerate.New(ui), nil }}, + entry{"peering establish", func(ui cli.Ui) (cli.Command, error) { return peerestablish.New(ui), nil }}, + entry{"peering list", func(ui cli.Ui) (cli.Command, error) { return peerlist.New(ui), nil }}, + entry{"peering read", func(ui cli.Ui) (cli.Command, error) { return peerread.New(ui), nil }}, entry{"reload", func(ui cli.Ui) (cli.Command, error) { return reload.New(ui), nil }}, entry{"rtt", func(ui cli.Ui) (cli.Command, error) { return rtt.New(ui), nil }}, entry{"services", func(cli.Ui) (cli.Command, error) { return services.New(), nil }}, diff --git a/testrpc/wait.go b/testrpc/wait.go index d6b72749e2..39e3d65922 100644 --- a/testrpc/wait.go +++ b/testrpc/wait.go @@ -11,7 +11,9 @@ import ( type rpcFn func(string, interface{}, interface{}) error -// WaitForLeader ensures we have a leader and a node registration. +// WaitForLeader ensures we have a leader and a node registration. It +// does not wait for the Consul (node) service to be ready. Use `WaitForTestAgent` +// to make sure the Consul service is ready. // // Most uses of this would be better served in the agent/consul package by // using waitForLeaderEstablishment() instead. @@ -91,7 +93,8 @@ func flattenOptions(options []waitOption) waitOption { return flat } -// WaitForTestAgent ensures we have a node with serfHealth check registered +// WaitForTestAgent ensures we have a node with serfHealth check registered. +// You'll want to use this if you expect the Consul (node) service to be ready. func WaitForTestAgent(t *testing.T, rpc rpcFn, dc string, options ...waitOption) { t.Helper() diff --git a/website/content/api-docs/peering.mdx b/website/content/api-docs/peering.mdx index ef50fcb87b..102161319c 100644 --- a/website/content/api-docs/peering.mdx +++ b/website/content/api-docs/peering.mdx @@ -42,12 +42,9 @@ The table below shows this endpoint's support for - `Partition` `(string: "")` - The admin partition that the peering token is generated from. Uses `default` when not specified. -- `Datacenter` `(string: "")` - Specifies the datacenter where the peering token is generated. Defaults to the - agent's datacenter when not specified. - -- `Token` `(string: "")` - Specifies the ACL token to use in the request. Takes precedence - over the token specified in the `token` query parameter, `X-Consul-Token` request header, - and `CONSUL_HTTP_TOKEN` environment variable. +- `ServerExternalAddresses` `([]string: )` - A list of addresses to put +into the generated token. Addresses are the form of `{host or IP}:port`. +You can specify one or more load balancers or external IPs that route external traffic to this cluster's Consul servers. - `Meta` `(map: )` - Specifies KV metadata to associate with the peering. This parameter is not required and does not directly impact the cluster @@ -116,13 +113,6 @@ The table below shows this endpoint's support for - `PeeringToken` `(string: )` - The peering token fetched from the peer cluster. -- `Datacenter` `(string: "")` - Specifies the datacenter where the peering token is generated. Defaults to the - agent's datacenter when not specified. - -- `Token` `(string: "")` - Specifies the ACL token to use in the request. Takes precedence - over the token specified in the `token` query parameter, `X-Consul-Token` request header, - and `CONSUL_HTTP_TOKEN` environment variable. - - `Meta` `(map: )` - Specifies KV metadata to associate with the peering. This parameter is not required and does not directly impact the cluster peering process. @@ -314,6 +304,6 @@ $ curl --header "X-Consul-Token: 0137db51-5895-4c25-b6cd-d9ed992f4a52" \ }, "CreateIndex": 109, "ModifyIndex": 119 - }, + } ] ``` diff --git a/website/content/commands/index.mdx b/website/content/commands/index.mdx index 7b0b9c2b03..d805d4eb22 100644 --- a/website/content/commands/index.mdx +++ b/website/content/commands/index.mdx @@ -50,6 +50,7 @@ Available commands are: members Lists the members of a Consul cluster monitor Stream logs from a Consul agent operator Provides cluster-level tools for Consul operators + peering Create and manage peering connections between Consul clusters reload Triggers the agent to reload configuration files rtt Estimates network round trip time between nodes services Interact with services diff --git a/website/content/commands/peering/delete.mdx b/website/content/commands/peering/delete.mdx new file mode 100644 index 0000000000..04a7e16ba4 --- /dev/null +++ b/website/content/commands/peering/delete.mdx @@ -0,0 +1,50 @@ +--- +layout: commands +page_title: 'Commands: Peering Delete' +description: Learn how to use the consul peering delete command to remove a peering connection between Consul clusters. +--- + +# Consul Peering Delete + +Command: `consul peering delete` + +Corresponding HTTP API Endpoint: [\[DELETE\] /v1/peering/:name](/api-docs/peering#delete-a-peering-connection) + +The `peering delete` removes a peering connection with another cluster. +Consul deletes all data imported from the peer in the background. +The peering connection is removed after all associated data has been deleted. +Operators can still read the peering connections while the data is being removed. +The command adds a `DeletedAt` field to the peering connection object with the timestamp of when the peering was marked for deletion. +You can only use a peering token to establish the connection once. If you need to reestablish a peering connection, you must generate a new token. + +The table below shows this command's [required ACLs](/api#authentication). + +| ACL Required | +| ------------ | +| `peering:write` | + +## Usage + +Usage: `consul peering delete [options] -name ` + +#### Command Options + +- `-name=` - (Required) The name of the peer. + +#### Enterprise Options + +@include 'http_api_partition_options.mdx' + +#### API Options + +@include 'http_api_options_client.mdx' + +## Examples + +The following examples deletes a peering connection to a cluster locally referred to as "cluster-02": + +```shell-session hideClipboard +$ consul peering delete -name cluster-02 +Successfully submitted peering connection, cluster-02, for deletion +``` + diff --git a/website/content/commands/peering/establish.mdx b/website/content/commands/peering/establish.mdx new file mode 100644 index 0000000000..d9906e0681 --- /dev/null +++ b/website/content/commands/peering/establish.mdx @@ -0,0 +1,52 @@ +--- +layout: commands +page_title: 'Commands: Peering Establish' +description: Learn how to use the consul peering establish command to establish a peering connection between Consul clusters. +--- + +# Consul Peering Establish + +Command: `consul peering establish` + +Corresponding HTTP API Endpoint: [\[POST\] /v1/peering/establish](/api-docs/peering#establish-a-peering-connection) + +The `peering establish` starts a peering connection with the cluster that generated the peering token. +You can generate cluster peering tokens using the [`consul peering generate-token`](/commands/operator/generate-token) command or the [HTTP API](https://www.consul.io/api-docs/peering#generate-a-peering-token). + +You can only use a peering token to establish the connection once. If you need to reestablish a peering connection, you must generate a new token. + +The table below shows this command's [required ACLs](/api#authentication). + +| ACL Required | +| ------------ | +| `peering:write` | + +## Usage + +Usage: `consul peering establish [options] -name -peering-token ` + +#### Command Options + +- `-name=` - (Required) Specifies a local name for the cluster you are establishing a connection with. The `name` is only used to identify the connection with the peer. + +- `-peering-token=` - (Required) Specifies the peering token from the cluster that generated the token. + +- `-meta==` - Specifies key/value pairs to associate with the peering connection in `-meta="key"="value"` format. You can use the flag multiple times to set multiple metadata fields. + +#### Enterprise Options + +@include 'http_api_partition_options.mdx' + +#### API Options + +@include 'http_api_options_client.mdx' + +## Examples + +The following examples establishes a peering connection with a cluster locally referred to as "cluster-01": + +```shell-session hideClipboard +$ consul peering establish -name cluster-01 -peering-token eyJDQSI6bnVs...5Yi0wNzk5NTA1YTRmYjYifQ== +Successfully established peering connection with cluster-01 +``` + diff --git a/website/content/commands/peering/generate-token.mdx b/website/content/commands/peering/generate-token.mdx new file mode 100644 index 0000000000..961122fc66 --- /dev/null +++ b/website/content/commands/peering/generate-token.mdx @@ -0,0 +1,68 @@ +--- +layout: commands +page_title: 'Commands: Peering Generate Token' +description: Learn how to use the consul peering generate-token command to generate token that enables you to peer Consul clusters. +--- + +# Consul Peering Generate Token + +Command: `consul peering generate-token` + +Corresponding HTTP API Endpoint: [\[POST\] /v1/peering/token](/api-docs/peering#generate-a-peering-token) + +The `peering generate-token` generates a peering token. The token is base 64-encoded string containing the token details. +This token should be transferred to the other cluster being peered and consumed using [`consul peering establish`](/commands/peering/establish). + +Generating a token and specifying the same local name associated with a previously-generated token does not affect active connections established with the original token. If the previously-generated token is not actively being used for a peer connection, however, it will become invalid when the new token with the same local name is generated. + +The table below shows this command's [required ACLs](/api#authentication). + +| ACL Required | +| ------------ | +| `peering:write` | + +## Usage + +Usage: `consul peering generate-token [options] -name ` + +#### Command Options + +- `-name=` - (Required) Specifies a local name for the cluster that the token is intended for. +The `name` is only used to identify the connection with the peer. +Generating a token and specifying the same local name associated with a previously-generated token does not affect active connections established with the original token. +If the previously-generated token is not actively being used for a peer connection, however, it will become invalid when the new token with the same local name is generated. + +- `-meta==` - Specifies key/value pairs to associate with the peering connection token in `-meta="key"="value"` format. You can use the flag multiple times to set multiple metadata fields. + +<<<<<<< HEAD +- `-server-external-addresses=[,string,...]` - Specifies a comma-separated list of addresses +to put into the generated token. Addresses are of the form of `{host or IP}:port`. +You can specify one or more load balancers or external IPs that route external traffic to this cluster's Consul servers. + +- `-format={pretty|json}` - Command output format. The default value is `pretty`. + +#### Enterprise Options + +@include 'http_api_partition_options.mdx' + +#### API Options + +@include 'http_api_options_client.mdx' + +## Examples + +The following example generates a peering token for a cluster called "cluster-02": + +```shell-session hideClipboard +$ consul peering generate-token -name cluster-02 +eyJDQSI6bnVs...5Yi0wNzk5NTA1YTRmYjYifQ== +``` + +### Using a Load Balancer for Consul Servers + +The following example generates a token for a cluster where servers are proxied by a load balancer: + +```shell-session hideClipboard +$ consul peering generate-token -server-external-addresses my-load-balancer-1234567890abcdef.elb.us-east-2.amazonaws.com -name cluster-02 +eyJDQSI6bnVs...5Yi0wNzk5NTA1YTRmYjYifQ== +``` diff --git a/website/content/commands/peering/index.mdx b/website/content/commands/peering/index.mdx new file mode 100644 index 0000000000..47311a444a --- /dev/null +++ b/website/content/commands/peering/index.mdx @@ -0,0 +1,40 @@ +--- +layout: commands +page_title: 'Commands: Peering' +--- + +# Consul Peering + +Command: `consul peering` + +Use the `peering` command to create and manage peering connections between Consul clusters, including token generation and consumption. Refer to +[Create and Manage Peerings Connections](/docs/connect/cluster-peering/create-manage-peering) for an +overview of the CLI workflow for cluster peering. + +## Usage + +```text +Usage: consul peering [options] + + # ... + +Subcommands: + + delete Close and delete a peering connection + establish Consume a peering token and establish a connection with the accepting cluster + generate-token Generate a peering token for use by a dialing cluster + list List the local cluster's peering connections + read Read detailed information on a peering connection +``` + +For more information, examples, and usage about a subcommand, click on the name +of the subcommand in the sidebar or one of the links below: + +- [delete](/commands/peering/delete) +- [establish](/commands/peering/establish) +- [generate-token](/commands/peering/generate-token) +- [list](/commands/peering/list) +- [read](/commands/peering/read) + + + diff --git a/website/content/commands/peering/list.mdx b/website/content/commands/peering/list.mdx new file mode 100644 index 0000000000..27f9f748f9 --- /dev/null +++ b/website/content/commands/peering/list.mdx @@ -0,0 +1,47 @@ +--- +layout: commands +page_title: 'Commands: Peering List' +--- + +# Consul Peering List + +Command: `consul peering List` + +Corresponding HTTP API Endpoint: [\[GET\] /v1/peerings](/api-docs/peering#list-all-peerings) + +The `peering list` lists all peering connections. +The results are filtered according to ACL policy configuration. + +The table below shows this command's [required ACLs](/api#authentication). + +| ACL Required | +| ------------ | +| `peering:read` | + +## Usage + +Usage: `consul peering list [options]` + +#### Command Options + +- `-format={pretty|json}` - Command output format. The default value is `pretty`. + +#### Enterprise Options + +@include 'http_api_partition_options.mdx' + +#### API Options + +@include 'http_api_options_client.mdx' + +## Examples + +The following example lists all peering connections associated with the cluster: + +```shell-session hideClipboard +$ consul peering list +Name State Imported Svcs Exported Svcs Meta +cluster-02 ACTIVE 0 2 env=production +cluster-03 PENDING 0 0 +``` + diff --git a/website/content/commands/peering/read.mdx b/website/content/commands/peering/read.mdx new file mode 100644 index 0000000000..59d3cc74f8 --- /dev/null +++ b/website/content/commands/peering/read.mdx @@ -0,0 +1,62 @@ +--- +layout: commands +page_title: 'Commands: Peering Read' +--- + +# Consul Peering Read + +Command: `consul peering read` + +Corresponding HTTP API Endpoint: [\[GET\] /v1/peering/:name](/api-docs/peering#read-a-peering-connection) + +The `peering read` displays information on the status of a peering connection. + +The table below shows this command's [required ACLs](/api#authentication). + +| ACL Required | +| ------------ | +| `peering:read` | + +## Usage + +Usage: `consul peering read [options] -name ` + +#### Command Options + +- `-name=` - (Required) The name of the peer associated with a connection that you want to read. + +- `-format={pretty|json}` - Command output format. The default value is `pretty`. + +#### Enterprise Options + +@include 'http_api_partition_options.mdx' + +#### API Options + +@include 'http_api_options_client.mdx' + +## Examples + +The following example outputs information about a peering connection locally referred to as "cluster-02": + +```shell-session hideClipboard +$ consul peering read -name cluster-02 +Name: cluster-02 +ID: 3b001063-8079-b1a6-764c-738af5a39a97 +State: ACTIVE +Meta: + env=production + +Peer ID: e83a315c-027e-bcb1-7c0c-a46650904a05 +Peer Server Name: server.dc1.consul +Peer CA Pems: 0 +Peer Server Addresses: + 10.0.0.1:8300 + +Imported Services: 0 +Exported Services: 2 + +Create Index: 89 +Modify Index: 89 +``` + diff --git a/website/content/docs/connect/cluster-peering/create-manage-peering.mdx b/website/content/docs/connect/cluster-peering/create-manage-peering.mdx index ee0a69a945..5af8324e2f 100644 --- a/website/content/docs/connect/cluster-peering/create-manage-peering.mdx +++ b/website/content/docs/connect/cluster-peering/create-manage-peering.mdx @@ -57,6 +57,19 @@ Create a JSON file that contains the first cluster's name and the peering token. + + +In `cluster-01`, use the [`consul peering generate-token` command](/commands/operator/generate-token) to issue a request for a peering token. + +```shell-session +$ consul peering generate-token -name cluster-02 +``` + +The CLI outputs the peering token, which is a base64-encoded string containing the token details. +Save this value to a file or clipboard to be used in the next step on `cluster-02`. + + + 1. In the Consul UI for the datacenter associated with `cluster-01`, click **Peers**. @@ -88,6 +101,25 @@ You can dial the `peering/establish` endpoint once per peering token. Peering to + + +In one of the client agents in "cluster-02," issue the [`consul peering establish` command](/commands/peering/establish) and specify the token generated in the previous step. The command establishes the peering connection. +The commands prints "Successfully established peering connection with cluster-01" after the connection is established. + +```shell-session +$ consul peering establish -name cluster-01 -peering-token token-from-generate +``` + +When you connect server agents through cluster peering, they peer their default partitions. +To establish peering connections for other partitions through server agents, you must add the `-partition` flag to the `establish` command and specify the partitions you want to peer. +For additional configuration information, refer to [`consul peering establish` command](/commands/peering/establish) . + +You can run the `peering establish` command once per peering token. +Peering tokens cannot be reused after being used to establish a connection. +If you need to re-establish a connection, you must generate a new peering token. + + + 1. In the Consul UI for the datacenter associated with `cluster 02`, click **Peers** and then **Add peer connection**. @@ -213,6 +245,20 @@ $ curl http://127.0.0.1:8500/v1/peerings ``` + + +After you establish a peering connection, run the [`consul peering list`](/commands/peering/list) command to get a list of all peering connections. +For example, the following command requests a list of all peering connections and returns the information in a table: + +```shell-session +$ consul peerings list + +Name State Imported Svcs Exported Svcs Meta +cluster-02 ACTIVE 0 2 env=production +cluster-03 PENDING 0 0 + ``` + + In the Consul UI, click **Peers**. The UI lists peering connections you created for clusters in a datacenter. @@ -248,6 +294,35 @@ $ curl http://127.0.0.1:8500/v1/peering/cluster-02 ``` + + +After you establish a peering connection, run the [`consul peering read`](/commands/peering/list) command to get peering information about for a specific cluster. +For example, the following command requests peering connection information for "cluster-02": + +```shell-session +$ consul peering read -name cluster-02 + +Name: cluster-02 +ID: 3b001063-8079-b1a6-764c-738af5a39a97 +State: ACTIVE +Meta: + env=production + +Peer ID: e83a315c-027e-bcb1-7c0c-a46650904a05 +Peer Server Name: server.dc1.consul +Peer CA Pems: 0 +Peer Server Addresses: + 10.0.0.1:8300 + +Imported Services: 0 +Exported Services: 2 + +Create Index: 89 +Modify Index: 89 + +``` + + In the Consul UI, click **Peers**. The UI lists peering connections you created for clusters in that datacenter. Click the name of a peered cluster to view additional details about the peering connection. @@ -281,6 +356,17 @@ $ curl --request DELETE http://127.0.0.1:8500/v1/peering/cluster-02 ``` + + +In "cluster-01," request the deletion through the [`consul peering delete`](/commands/peering/list) command. + +```shell-session +$ consul peering delete -name cluster-02 + +Successfully submitted peering connection, cluster-02, for deletion +``` + + In the Consul UI, click **Peers**. The UI lists peering connections you created for clusters in that datacenter. diff --git a/website/data/commands-nav-data.json b/website/data/commands-nav-data.json index 5658512238..3a3bb0609b 100644 --- a/website/data/commands-nav-data.json +++ b/website/data/commands-nav-data.json @@ -436,6 +436,35 @@ "title": "partition", "path": "partition" }, + { + "title": "peering", + "routes": [ + { + "title": "Overview", + "path": "peering" + }, + { + "title": "delete", + "path": "peering/delete" + }, + { + "title": "establish", + "path": "peering/establish" + }, + { + "title": "generate-token", + "path": "peering/generate-token" + }, + { + "title": "list", + "path": "peering/list" + }, + { + "title": "read", + "path": "peering/read" + } + ] + }, { "title": "reload", "path": "reload" From 0c2fb7252dd28b5108f8e9d544a893256729064f Mon Sep 17 00:00:00 2001 From: Kyle Havlovitz Date: Thu, 1 Sep 2022 14:24:30 -0700 Subject: [PATCH 83/93] Prune intermediates before appending new one --- agent/consul/leader_connect_ca.go | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/agent/consul/leader_connect_ca.go b/agent/consul/leader_connect_ca.go index d2cd021134..fe780f7f22 100644 --- a/agent/consul/leader_connect_ca.go +++ b/agent/consul/leader_connect_ca.go @@ -1098,9 +1098,13 @@ func setLeafSigningCert(caRoot *structs.CARoot, pem string) error { return fmt.Errorf("error parsing leaf signing cert: %w", err) } + if err := pruneExpiredIntermediates(caRoot); err != nil { + return err + } + caRoot.IntermediateCerts = append(caRoot.IntermediateCerts, pem) caRoot.SigningKeyID = connect.EncodeSigningKeyID(cert.SubjectKeyId) - return pruneExpiredIntermediates(caRoot) + return nil } // pruneExpiredIntermediates removes expired intermediate certificates @@ -1108,15 +1112,14 @@ func setLeafSigningCert(caRoot *structs.CARoot, pem string) error { func pruneExpiredIntermediates(caRoot *structs.CARoot) error { var newIntermediates []string now := time.Now() - for i, intermediatePEM := range caRoot.IntermediateCerts { + for _, intermediatePEM := range caRoot.IntermediateCerts { cert, err := connect.ParseCert(intermediatePEM) if err != nil { return fmt.Errorf("error parsing leaf signing cert: %w", err) } - // Only keep the intermediate cert if it's still valid, or if it's the most - // recently added (and thus the active signing cert). - if cert.NotAfter.After(now) || i == len(caRoot.IntermediateCerts) { + // Only keep the intermediate cert if it's still valid. + if cert.NotAfter.After(now) { newIntermediates = append(newIntermediates, intermediatePEM) } } From 16e8179a9a11a281c24d5fc0fc71497716f7e4da Mon Sep 17 00:00:00 2001 From: David Yu Date: Thu, 1 Sep 2022 16:21:36 -0700 Subject: [PATCH 84/93] docs: Consul K8s 0.48.0 release notes (#14414) Co-authored-by: Thomas Eckert --- .../docs/release-notes/consul-k8s/v0_47_x.mdx | 2 +- .../docs/release-notes/consul-k8s/v0_48_x.mdx | 66 +++++++++++++++++++ .../docs/release-notes/consul/v1_11_x.mdx | 10 +-- .../docs/release-notes/consul/v1_12_x.mdx | 2 +- .../docs/release-notes/consul/v1_13_x.mdx | 2 +- website/data/docs-nav-data.json | 4 ++ 6 files changed, 78 insertions(+), 8 deletions(-) create mode 100644 website/content/docs/release-notes/consul-k8s/v0_48_x.mdx diff --git a/website/content/docs/release-notes/consul-k8s/v0_47_x.mdx b/website/content/docs/release-notes/consul-k8s/v0_47_x.mdx index a9228e9984..b13d858fd7 100644 --- a/website/content/docs/release-notes/consul-k8s/v0_47_x.mdx +++ b/website/content/docs/release-notes/consul-k8s/v0_47_x.mdx @@ -21,7 +21,7 @@ description: >- - Consul 1.11.x, Consul 1.12.x and Consul 1.13.1+ - Kubernetes 1.19-1.23 -- Kubectl 1.21+ +- Kubectl 1.19+ - Envoy proxy support is determined by the Consul version deployed. Refer to [Envoy Integration](/docs/connect/proxies/envoy) for details. diff --git a/website/content/docs/release-notes/consul-k8s/v0_48_x.mdx b/website/content/docs/release-notes/consul-k8s/v0_48_x.mdx new file mode 100644 index 0000000000..38c6732bf4 --- /dev/null +++ b/website/content/docs/release-notes/consul-k8s/v0_48_x.mdx @@ -0,0 +1,66 @@ +--- +layout: docs +page_title: 0.48.x +description: >- + Consul on Kubernetes release notes for version 0.48.x +--- + +# Consul on Kubernetes 0.48.0 + +## Release Highlights + +- **Consul CNI Plugin**: This release introduces the Consul CNI Plugin for Consul on Kubernetes, to allow for configuring traffic redirection rules without escalated container privileges such as `CAP_NET_ADMIN`. Refer to [Enable the Consul CNI Plugin](/docs/k8s/installation/install#enable-the-consul-cni-plugin) for more details. The Consul CNI Plugin is supported for Consul K8s 0.48.0+ and Consul 1.13.1+. + +- **Kubernetes 1.24 Support**: Add support for Kubernetes 1.24 where ServiceAccounts no longer have long-term JWT tokens. [[GH-1431](https://github.com/hashicorp/consul-k8s/pull/1431)] + +- **MaxInboundConnections in service-defaults CRD**: Add support for MaxInboundConnections on the Service Defaults CRD. [[GH-1437](https://github.com/hashicorp/consul-k8s/pull/1437)] + +- **API Gateway: ACL auth when using WAN Federation**: Configure ACL auth for controller correctly when deployed in secondary datacenter with federation enabled [[GH-1462](https://github.com/hashicorp/consul-k8s/pull/1462)] + +## What has Changed + +- **Kubernetes 1.24 Support for multiport applications require Kubernetes secrets**: Users deploying multiple services to the same Pod (multiport) on Kubernetes 1.24+ must also deploy a Kubernetes secret for each ServiceAccount associated with the Consul service. The name of the Secret must match the ServiceAccount name and be of type `kubernetes.io/service-account-token` +Example: + + ```yaml + apiVersion: v1 + kind: Secret + metadata: + name: svc1 + annotations: + kubernetes.io/service-account.name: svc1 + type: kubernetes.io/service-account-token + --- + apiVersion: v1 + kind: Secret + metadata: + name: svc2 + annotations: + kubernetes.io/service-account.name: svc2 + type: kubernetes.io/service-account-token + ``` + +## Supported Software + +- Consul 1.11.x, Consul 1.12.x and Consul 1.13.1+ +- Kubernetes 1.19-1.24 +- Kubectl 1.19+ +- Envoy proxy support is determined by the Consul version deployed. Refer to + [Envoy Integration](/docs/connect/proxies/envoy) for details. + +## Upgrading + +For detailed information on upgrading, please refer to the [Upgrades page](/docs/k8s/upgrade) + +## Known Issues +The following issues are know to exist in the v0.48.0 release: + +- Consul CNI Plugin currently does not support RedHat OpenShift as the CNI Plugin Daemonset requires additional SecurityContextConstraint objects to run on OpenShift. Support for OpenShift will be added in an upcoming release. + +## Changelogs + +The changelogs for this major release version and any maintenance versions are listed below. + +~> **Note:** The following link takes you to the changelogs on the GitHub website. + +- [0.48.0](https://github.com/hashicorp/consul-k8s/releases/tag/v0.48.0) diff --git a/website/content/docs/release-notes/consul/v1_11_x.mdx b/website/content/docs/release-notes/consul/v1_11_x.mdx index d26cd6a804..aa5e68f80c 100644 --- a/website/content/docs/release-notes/consul/v1_11_x.mdx +++ b/website/content/docs/release-notes/consul/v1_11_x.mdx @@ -9,15 +9,15 @@ description: >- ## Release Highlights -- **Admin Partitions (Enterprise):** Consul 1.11.0 Enterprise introduces a new entity for defining administrative and networking boundaries within a Consul deployment. This feature also enables servers to communicate with clients over a specific gossip segment created for each partition. This release also enables cross partition communication between services across partitions, using Mesh Gateways. For more information refer to the [Admin Partitions](/docs/enterprise/admin-partitions) documentation. +- **Admin Partitions (Enterprise)**: Consul 1.11.0 Enterprise introduces a new entity for defining administrative and networking boundaries within a Consul deployment. This feature also enables servers to communicate with clients over a specific gossip segment created for each partition. This release also enables cross partition communication between services across partitions, using Mesh Gateways. For more information refer to the [Admin Partitions](/docs/enterprise/admin-partitions) documentation. -- **Virtual IPs for services deployed with Consul Service Mesh:** Consul will now generate a unique virtual IP for each service deployed within Consul Service Mesh, allowing transparent proxy to route to services within a data center that exist in different clusters or outside the service mesh. +- **Virtual IPs for services deployed with Consul Service Mesh**: Consul will now generate a unique virtual IP for each service deployed within Consul Service Mesh, allowing transparent proxy to route to services within a data center that exist in different clusters or outside the service mesh. -- **Replace [boltdb](https://github.com/boltdb/bolt) with [etcd-io/bbolt](https://github.com/etcd-io/bbolt) for raft log store:** Consul now leverages `etcd-io/bbolt` as the default implementation of `boltdb` instead of `boltdb/bolt`. This change also exposes a configuration to allow for disabling boltdb freelist syncing. In addition, Consul now emits metrics for the raft boltdb store to provide insights into boltdb performance. +- **Replace [boltdb](https://github.com/boltdb/bolt) with [etcd-io/bbolt](https://github.com/etcd-io/bbolt) for raft log store**: Consul now leverages `etcd-io/bbolt` as the default implementation of `boltdb` instead of `boltdb/bolt`. This change also exposes a configuration to allow for disabling boltdb freelist syncing. In addition, Consul now emits metrics for the raft boltdb store to provide insights into boltdb performance. -- **TLS Certificates for Ingress Gateways via an SDS source:**: Ingress Gateways can now be configured to retrieve TLS certificates from an external SDS Service and load the TLS certificates for Ingress listeners. This configuration is set using the `ingress-gateway` configuration entry via the [SDS](/docs/connect/config-entries/ingress-gateway#sds) stanza within the Ingress Gateway TLS configuration. +- **TLS Certificates for Ingress Gateways via an SDS source**: Ingress Gateways can now be configured to retrieve TLS certificates from an external SDS Service and load the TLS certificates for Ingress listeners. This configuration is set using the `ingress-gateway` configuration entry via the [SDS](/docs/connect/config-entries/ingress-gateway#sds) stanza within the Ingress Gateway TLS configuration. -- **Vault Auth Method support for Connect CA Vault Provider:** Consul now supports configuring the Connect CA Vault provider to use auth methods for authentication to Vault. Consul supports using any non-deprecated auth method that is available in Vault v1.8.5, including AppRole, AliCloud, AWS, Azure, Cloud Foundry, GitHub, Google Cloud, JWT/OIDC, Kerberos, Kubernetes, LDAP, Oracle Cloud Infrastructure, Okta, Radius, TLS Certificates, and Username & Password. The Vault Auth Method for Connect CA Provider is utilized by default for the [Vault Secrets Backend](/docs/k8s/installation/vault) feature on Consul on Kubernetes. Utilizing a Vault Auth method would no longer require a Vault token to be managed or provisioned ahead of time to be used for authentication to Vault. +- **Vault Auth Method support for Connect CA Vault Provider**: Consul now supports configuring the Connect CA Vault provider to use auth methods for authentication to Vault. Consul supports using any non-deprecated auth method that is available in Vault v1.8.5, including AppRole, AliCloud, AWS, Azure, Cloud Foundry, GitHub, Google Cloud, JWT/OIDC, Kerberos, Kubernetes, LDAP, Oracle Cloud Infrastructure, Okta, Radius, TLS Certificates, and Username & Password. The Vault Auth Method for Connect CA Provider is utilized by default for the [Vault Secrets Backend](/docs/k8s/installation/vault) feature on Consul on Kubernetes. Utilizing a Vault Auth method would no longer require a Vault token to be managed or provisioned ahead of time to be used for authentication to Vault. ## What's Changed diff --git a/website/content/docs/release-notes/consul/v1_12_x.mdx b/website/content/docs/release-notes/consul/v1_12_x.mdx index 842dfb31c8..dd354d60b4 100644 --- a/website/content/docs/release-notes/consul/v1_12_x.mdx +++ b/website/content/docs/release-notes/consul/v1_12_x.mdx @@ -15,7 +15,7 @@ description: >- - **AWS Lambda**: Adds the ability to invoke AWS Lambdas through terminating gateways, which allows for cross-datacenter communication, transparent proxy, and intentions with Consul Service Mesh. Refer to [AWS Lambda](/docs]/lambda) and [Invoke Lambda Functions](/docs/lambda/invocation) for more details. -- **Mesh-wide TLS min/max versions and cipher suites:** Using the [Mesh](/docs/connect/config-entries/mesh#tls) Config Entry or CRD, it is now possible to set TLS min/max versions and cipher suites for both inbound and outbound mTLS connections. +- **Mesh-wide TLS min/max versions and cipher suites**: Using the [Mesh](/docs/connect/config-entries/mesh#tls) Config Entry or CRD, it is now possible to set TLS min/max versions and cipher suites for both inbound and outbound mTLS connections. - **Expanded details for ACL Permission Denied errors**: Details are now provided when a permission denied errors surface for RPC calls. Details include the accessor ID of the ACL token, the missing permission, and any namespace or partition that the error occurred on. diff --git a/website/content/docs/release-notes/consul/v1_13_x.mdx b/website/content/docs/release-notes/consul/v1_13_x.mdx index 23b694a913..2687126675 100644 --- a/website/content/docs/release-notes/consul/v1_13_x.mdx +++ b/website/content/docs/release-notes/consul/v1_13_x.mdx @@ -31,7 +31,7 @@ For more detailed information, please refer to the [upgrade details page](/docs/ The following issues are know to exist in the 1.13.0 release: - Consul 1.13.1 fixes a compatibility issue when restoring snapshots from pre-1.13.0 versions of Consul. Refer to GitHub issue [[GH-14149](https://github.com/hashicorp/consul/issues/14149)] for more details. -- Consul 1.13.0 and Consul 1.13.1 default to requiring TLS for gRPC communication with Envoy proxies when auto-encrypt and auto-config are enabled. In environments where Envoy proxies are not already configured to use TLS for gRPC, upgrading Consul 1.13 will cause Envoy proxies to disconnect from the control plane (Consul agents). A future patch release will default to disabling TLS by default for GRPC communication with Envoy proxies when using Service Mesh and auto-config or auto-encrypt. Refer to GitHub issue [GH-14253](https://github.com/hashicorp/consul/issues/14253) and [Service Mesh deployments using auto-config and auto-enrypt](https://www.consul.io/docs/upgrading/upgrade-specific#service-mesh-deployments-using-auto-encrypt-or-auto-config) for more details. +- Consul 1.13.0 and Consul 1.13.1 default to requiring TLS for gRPC communication with Envoy proxies when auto-encrypt and auto-config are enabled. In environments where Envoy proxies are not already configured to use TLS for gRPC, upgrading Consul 1.13 will cause Envoy proxies to disconnect from the control plane (Consul agents). A future patch release will default to disabling TLS by default for GRPC communication with Envoy proxies when using Service Mesh and auto-config or auto-encrypt. Refer to GitHub issue [[GH-14253](https://github.com/hashicorp/consul/issues/14253)] and [Service Mesh deployments using auto-config and auto-enrypt](https://www.consul.io/docs/upgrading/upgrade-specific#service-mesh-deployments-using-auto-encrypt-or-auto-config) for more details. ## Changelogs diff --git a/website/data/docs-nav-data.json b/website/data/docs-nav-data.json index 49b1d91100..cb33486b7a 100644 --- a/website/data/docs-nav-data.json +++ b/website/data/docs-nav-data.json @@ -1273,6 +1273,10 @@ { "title": "Consul K8s", "routes": [ + { + "title": "v0.48.x", + "path": "release-notes/consul-k8s/v0_48_x" + }, { "title": "v0.47.x", "path": "release-notes/consul-k8s/v0_47_x" From 61291ef9c5aad9744a912b0d64de5b7e4811446c Mon Sep 17 00:00:00 2001 From: trujillo-adam <47586768+trujillo-adam@users.noreply.github.com> Date: Thu, 1 Sep 2022 16:22:11 -0700 Subject: [PATCH 85/93] Docs cni plugin (#14009) Co-authored-by: Jeff Boruszak <104028618+boruszak@users.noreply.github.com> --- .../docs/connect/transparent-proxy.mdx | 287 ++++++++++-------- .../clients-outside-kubernetes.mdx | 0 .../consul-enterprise.mdx | 0 .../multi-cluster/index.mdx | 0 .../multi-cluster/kubernetes.mdx | 0 .../multi-cluster/vms-and-kubernetes.mdx | 0 .../servers-outside-kubernetes.mdx | 0 .../single-dc-multi-k8s.mdx | 0 .../data-integration/bootstrap-token.mdx | 0 .../vault/data-integration/connect-ca.mdx | 0 .../data-integration/enterprise-license.mdx | 0 .../vault/data-integration/gossip.mdx | 0 .../vault/data-integration/index.mdx | 0 .../data-integration/partition-token.mdx | 0 .../data-integration/replication-token.mdx | 0 .../vault/data-integration/server-tls.mdx | 0 .../snapshot-agent-config.mdx | 0 .../vault/data-integration/webhook-certs.mdx | 0 .../vault/index.mdx | 0 .../vault/systems-integration.mdx | 0 .../vault/wan-federation.mdx | 0 .../docs/k8s/installation/install-cli.mdx | 98 +++++- .../content/docs/k8s/installation/install.mdx | 202 +++++------- .../platforms/self-hosted-kubernetes.mdx | 0 website/data/docs-nav-data.json | 146 ++++----- website/redirects.js | 246 +++++++++++++++ 26 files changed, 652 insertions(+), 327 deletions(-) rename website/content/docs/k8s/{installation => }/deployment-configurations/clients-outside-kubernetes.mdx (100%) rename website/content/docs/k8s/{installation => }/deployment-configurations/consul-enterprise.mdx (100%) rename website/content/docs/k8s/{installation => deployment-configurations}/multi-cluster/index.mdx (100%) rename website/content/docs/k8s/{installation => deployment-configurations}/multi-cluster/kubernetes.mdx (100%) rename website/content/docs/k8s/{installation => deployment-configurations}/multi-cluster/vms-and-kubernetes.mdx (100%) rename website/content/docs/k8s/{installation => }/deployment-configurations/servers-outside-kubernetes.mdx (100%) rename website/content/docs/k8s/{installation => }/deployment-configurations/single-dc-multi-k8s.mdx (100%) rename website/content/docs/k8s/{installation => deployment-configurations}/vault/data-integration/bootstrap-token.mdx (100%) rename website/content/docs/k8s/{installation => deployment-configurations}/vault/data-integration/connect-ca.mdx (100%) rename website/content/docs/k8s/{installation => deployment-configurations}/vault/data-integration/enterprise-license.mdx (100%) rename website/content/docs/k8s/{installation => deployment-configurations}/vault/data-integration/gossip.mdx (100%) rename website/content/docs/k8s/{installation => deployment-configurations}/vault/data-integration/index.mdx (100%) rename website/content/docs/k8s/{installation => deployment-configurations}/vault/data-integration/partition-token.mdx (100%) rename website/content/docs/k8s/{installation => deployment-configurations}/vault/data-integration/replication-token.mdx (100%) rename website/content/docs/k8s/{installation => deployment-configurations}/vault/data-integration/server-tls.mdx (100%) rename website/content/docs/k8s/{installation => deployment-configurations}/vault/data-integration/snapshot-agent-config.mdx (100%) rename website/content/docs/k8s/{installation => deployment-configurations}/vault/data-integration/webhook-certs.mdx (100%) rename website/content/docs/k8s/{installation => deployment-configurations}/vault/index.mdx (100%) rename website/content/docs/k8s/{installation => deployment-configurations}/vault/systems-integration.mdx (100%) rename website/content/docs/k8s/{installation => deployment-configurations}/vault/wan-federation.mdx (100%) rename website/content/docs/k8s/{installation => }/platforms/self-hosted-kubernetes.mdx (100%) diff --git a/website/content/docs/connect/transparent-proxy.mdx b/website/content/docs/connect/transparent-proxy.mdx index 57ad48ba7a..a60bb72d00 100644 --- a/website/content/docs/connect/transparent-proxy.mdx +++ b/website/content/docs/connect/transparent-proxy.mdx @@ -9,72 +9,59 @@ description: |- # Transparent Proxy -Transparent proxy allows applications to communicate through the mesh without changing their configuration. -Transparent proxy also hardens application security by preventing direct inbound connections that bypass the mesh. +This topic describes how to use Consul’s transparent proxy feature, which allows applications to communicate through the service mesh without modifying their configurations. Transparent proxy also hardens application security by preventing direct inbound connections that bypass the mesh. -#### Without Transparent Proxy +## Introduction -![Diagram demonstrating that without transparent proxy, applications must "opt in" to connecting to their dependencies through the mesh](/img/consul-connect/without-transparent-proxy.png) +When transparent proxy is enabled, Consul is able to perform the following actions automatically: -Without transparent proxy, application owners need to: +- Infer the location of upstream services using service intentions. +- Redirect outbound connections that point to KubeDNS through the proxy. +- Force traffic through the proxy to prevent unauthorized direct access to the application. -1. Explicitly configure upstream services, choosing a local port to access them. -1. Change application to access `localhost:`. -1. Configure application to listen only on the loopback interface to prevent unauthorized - traffic from bypassing the mesh. - -#### With Transparent Proxy +The following diagram shows how transparent proxy routes traffic: ![Diagram demonstrating that with transparent proxy, connections are automatically routed through the mesh](/img/consul-connect/with-transparent-proxy.png) -With transparent proxy: +When transparent proxy is disabled, you must manually specify the following configurations so that your applications can communicate with other services in the mesh: -1. Local upstreams are inferred from service intentions and peered upstreams are - inferred from imported services, so no explicit configuration is needed. -1. Outbound connections pointing to a Kubernetes DNS record "just work" — network rules - redirect them through the proxy. -1. Inbound traffic is forced to go through the proxy to prevent unauthorized - direct access to the application. +* Explicitly configure upstream services by specifying a local port to access them. +* Change application to access `localhost:`. +* Configure applications to only listen on the loopback interface to prevent unauthorized traffic from bypassing the mesh. + +The following diagram shows how traffic flows through the mesh without transparent proxy enabled: -#### Overview +![Diagram demonstrating that without transparent proxy, applications must "opt in" to connecting to their dependencies through the mesh](/img/consul-connect/without-transparent-proxy.png) -Transparent proxy allows users to reach other services in the service mesh while ensuring that inbound and outbound -traffic for services in the mesh are directed through the sidecar proxy. Traffic is secured -and only reaches intended destinations since the proxy can enforce security and policy like TLS and Service Intentions. +Transparent proxy is available for Kubernetes environments. As part of the integration with Kubernetes, Consul registers Kubernetes Services, injects sidecar proxies, and enables traffic redirection. -Previously, service mesh users would need to explicitly define upstreams for a service as a local listener on the sidecar -proxy, and dial the local listener to reach the appropriate upstream. Users would also have to set intentions to allow -specific services to talk to one another. Transparent proxying reduces this duplication, by determining upstreams -implicitly from Service Intentions and imported services from a peer. Explicit upstreams are still supported in the [proxy service -registration](/docs/connect/registration/service-registration) on VMs and via the -[annotation](/docs/k8s/annotations-and-labels#consul-hashicorp-com-connect-service-upstreams) in Kubernetes. +## Requirements -To support transparent proxying, Consul's CLI now has a command -[`consul connect redirect-traffic`](/commands/connect/redirect-traffic) to redirect traffic through an inbound and -outbound listener on the sidecar. Consul also watches Service Intentions and imported services then configures the Envoy -proxy with the appropriate upstream IPs. If the default ACL policy is "allow", then Service Intentions are not required. -In Consul on Kubernetes, the traffic redirection command is automatically set up via an init container. +Your network must meet the following environment and software requirements to use transparent proxy. -## Prerequisites +* Transparent proxy is available for Kubernetes environments. +* Consul 1.10.0+ +* Consul Helm chart 0.32.0+. If you want to use the Consul CNI plugin to redirect traffic, Helm chart 0.48.0+ is required. Refer to [Enable the Consul CNI plugin](#enable-the-consul-cni-plugin) for additional information. +* [Service intentions](/docs/connect/intentions) must be configured to allow communication between intended services. +* The `ip_tables` kernel module must be running on all worker nodes within a Kubernetes cluster. If you are using the `modprobe` Linux utility, for example, issue the following command: -### Kubernetes + `$ modprobe ip_tables` -* To use transparent proxy on Kubernetes, Consul-helm >= `0.32.0` and Consul-k8s >= `0.26.0` are required in addition to Consul >= `1.10.0`. -* If the default policy for ACLs is "deny", then Service Intentions should be set up to allow intended services to connect to each other. -Otherwise, all Connect services can talk to all other services. -* If using Transparent Proxy, all worker nodes within a Kubernetes cluster must have the `ip_tables` kernel module running, e.g. `modprobe ip_tables`. - -The Kubernetes integration takes care of registering Kubernetes services with Consul, injecting a sidecar proxy, and -enabling traffic redirection. - -## Upgrading to Transparent Proxy - -~> When upgrading from older versions (i.e Consul-k8s < `0.26.0` or Consul-helm < `0.32.0`) to Consul-k8s >= `0.26.0` and Consul-helm >= `0.32.0`, please make sure to follow the upgrade steps [here](/docs/upgrading/upgrade-specific/#transparent-proxy-on-kubernetes). +~> **Upgrading to a supported version**: Always follow the [proper upgrade path](/docs/upgrading/upgrade-specific/#transparent-proxy-on-kubernetes) when upgrading to a supported version of Consul, Consul on Kubernetes (`consul-k8s`), and the Consul Helm chart. ## Configuration -### Enabling Transparent Proxy -Transparent proxy can be enabled in Kubernetes on the whole cluster via the Helm value: +This section describes how to configure the transparent proxy. + +### Enable transparent proxy + +You can enable the transparent proxy for an entire cluster, individual Kubernetes namespaces, and individual services. + +When you install Consul using the Helm chart, transparent proxy is enabled for the entire cluster by default. + +#### Entire cluster + +Use the `connectInject.transparentProxy.defaultEnabled` Helm value to enable or disable transparent proxy for the entire cluster: ```yaml connectInject: @@ -82,15 +69,16 @@ connectInject: defaultEnabled: true ``` -It can also be enabled on a per namespace basis by setting the label `consul.hashicorp.com/transparent-proxy=true` on the -Kubernetes namespace. This will override the Helm value `connectInject.transparentProxy.defaultEnabled` and define the -default behavior of Pods in the namespace. For example: +#### Kubernetes namespace + +Apply the `consul.hashicorp.com/transparent-proxy=true` label to enable transparent proxy for a Kubernetes namespace. The label overrides the `connectInject.transparentProxy.defaultEnabled` Helm value and defines the default behavior of Pods in the namespace. The following example enables transparent proxy for Pods in the `my-app` namespace: + ```bash kubectl label namespaces my-app "consul.hashicorp.com/transparent-proxy=true" ``` +#### Individual service -It can also be enabled on a per service basis via the annotation `consul.hashicorp.com/transparent-proxy=true` on the -Pod for each service, which will override both the Helm value and the namespace label: +Apply the `consul.hashicorp.com/transparent-proxy=true` annotation to eanble transparent proxy on the Pod for each service. The annotation overrides the Helm value and the namespace label. The following example enables transparent proxy for the `static-server` service: ```yaml apiVersion: v1 @@ -140,78 +128,136 @@ spec: serviceAccountName: static-server ``` -### Kubernetes HTTP Health Probes Configuration -Traffic redirection interferes with [Kubernetes HTTP health -probes](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/) since the -probes expect that kubelet can directly reach the application container on the probe's endpoint, but that traffic will -be redirected through the sidecar proxy, causing errors because kubelet itself is not encrypting that traffic using a -mesh proxy. For this reason, Consul allows you to [overwrite Kubernetes HTTP health probes](/docs/k8s/connect/health) to point to the proxy instead. -This can be done using the Helm value `connectInject.transparentProxy.defaultOverwriteProbes` -or the Pod annotation `consul.hashicorp.com/transparent-proxy-overwrite-probes`. +### Enable the Consul CNI plugin -### Traffic Redirection Configuration -Pods with transparent proxy enabled will have an init container injected that sets up traffic redirection for all -inbound and outbound traffic through the sidecar proxies. This will include all traffic by default, with the ability to -configure exceptions on a per-Pod basis. The following Pod annotations allow you to exclude certain traffic from redirection to the sidecar proxies: +By default, Consul generates a `connect-inject init` container as part of the Kubernetes Pod startup process. The container configures traffic redirection in the service mesh through the sidecar proxy. To configure redirection, the container requires elevated CAP_NET_ADMIN privileges, which may not be compatible with security policies in your organization. -- [`consul.hashicorp.com/transparent-proxy-exclude-inbound-ports`](/docs/k8s/annotations-and-labels#consul-hashicorp-com-transparent-proxy-exclude-inbound-ports) -- [`consul.hashicorp.com/transparent-proxy-exclude-outbound-ports`](/docs/k8s/annotations-and-labels#consul-hashicorp-com-transparent-proxy-exclude-outbound-ports) -- [`consul.hashicorp.com/transparent-proxy-exclude-outbound-cidrs`](/docs/k8s/annotations-and-labels#consul-hashicorp-com-transparent-proxy-exclude-outbound-cidrs) -- [`consul.hashicorp.com/transparent-proxy-exclude-uids`](/docs/k8s/annotations-and-labels#consul-hashicorp-com-transparent-proxy-exclude-uids) +Instead, you can enable the Consul container network interface (CNI) plugin to perform traffic redirection. Because the plugin is executed by the Kubernetes kubelet, it already has the elevated privileges necessary to configure the network. Additionally, you do not need to specify annotations that automatically overwrite Kubernetes HTTP health probes when the plugin is enabled (see [Overwrite Kubernetes HTTP health probes](#overwrite-kubernetes-http-health-probes)). +The Consul Helm chart installs the CNI plugin, but it is disabled by default. Refer to the [instructions for enabling the CNI plugin](/docs/k8s/installation/install#enable-the-consul-cni-plugin) in the Consul on Kubernetes installation documentation for additional information. -### Dialing Services Across Kubernetes Clusters +### Traffic redirection -- You cannot use transparent proxy in a deployment configuration with [federation between Kubernetes clusters](/docs/k8s/installation/multi-cluster/kubernetes). - Instead, services in one Kubernetes cluster must explicitly dial a service to a Consul datacenter in another Kubernetes cluster using the - [consul.hashicorp.com/connect-service-upstreams](/docs/k8s/annotations-and-labels#consul-hashicorp-com-connect-service-upstreams) - annotation. For example, an annotation of - `"consul.hashicorp.com/connect-service-upstreams": "my-service:1234:dc2"` reaches an upstream service called `my-service` - in the datacenter `dc2` on port `1234`. +There are two mechanisms for redirecting traffic through the sidecar proxies. By default, Consul injects an init container that redirects all inbound and outbound traffic. The default mechanism requires elevated permissions (CAP_NET_ADMIN) in order to redirect traffic to the service mesh. -- You cannot use transparent proxy in a deployment configuration with a - [single Consul datacenter spanning multiple Kubernetes clusters](/docs/k8s/installation/deployment-configurations/single-dc-multi-k8s). Instead, - services in one Kubernetes cluster must explicitly dial a service in another Kubernetes cluster using the - [consul.hashicorp.com/connect-service-upstreams](/docs/k8s/annotations-and-labels#consul-hashicorp-com-connect-service-upstreams) - annotation. For example, an annotation of - `"consul.hashicorp.com/connect-service-upstreams": "my-service:1234"`, - reaches an upstream service called `my-service` in another Kubernetes cluster and on port `1234`. - Although transparent proxy is enabled, Kubernetes DNS is not utilized when communicating between services that exist on separate Kubernetes clusters. +Alternatively, you can enable the Consul CNI plugin to handle traffic redirection. Because the Kubernetes kubelet runs CNI plugins, the Consul CNI plugin has the necessary privileges to apply routing tables in the network. -- In a deployment configuration with [cluster peering](/docs/connect/cluster-peering), - transparent proxy is fully supported and thus dialing services explicitly is not required. +Both mechanisms redirect all inbound and outbound traffic, but you can configure exceptions for specific Pods or groups of Pods. The following annotations enable you to exclude certain traffic from being redirected to sidecar proxies. +#### Exclude inbound ports -## Known Limitations +The [`consul.hashicorp.com/transparent-proxy-exclude-inbound-ports`](/docs/k8s/annotations-and-labels#consul-hashicorp-com-transparent-proxy-exclude-inbound-ports) annotation defines a comma-separated list of inbound ports to exclude from traffic redirection when running in transparent proxy mode. The port numbers are string data values. In the following example, services in the pod at port `8200` and `8201` are not redirected through the transparent proxy: -- Deployment configurations with federation across or a single datacenter spanning multiple clusters must explicitly dial a - service in another datacenter or cluster using annotations. + -- When dialing headless services, the request is proxied using a plain TCP proxy. The upstream's protocol is not considered. +```yaml +"metadata": { + "annotations": { + "consul.hashicorp.com/transparent-proxy-exclude-inbound-ports" : "8200, 8201” + } +} +``` + -## Using Transparent Proxy +#### Exclude outbound ports -In Kubernetes, services can reach other services via their -[Kubernetes DNS](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/) address or through Pod IPs, and that -traffic will be transparently sent through the proxy. Connect services in Kubernetes are required to have a Kubernetes -service selecting the Pods. +The [`consul.hashicorp.com/transparent-proxy-exclude-outbound-ports`](/docs/k8s/annotations-and-labels#consul-hashicorp-com-transparent-proxy-exclude-outbound-ports) annotation defines a comma-separated list of outbound ports to exclude from traffic redirection when running in transparent proxy mode. The port numbers are string data values. In the following example, services in the pod at port `8200` and `8201` are not redirected through the transparent proxy: -~> **Note**: In order to use Kubernetes DNS, the Kubernetes service name needs to match the Consul service name. This is the -case by default, unless the service Pods have the annotation `consul.hashicorp.com/connect-service` overriding the -Consul service name. + -Transparent proxy is enabled by default in Consul-helm >=`0.32.0`. The Helm value used to enable/disable transparent -proxy for all applications in a Kubernetes cluster is `connectInject.transparentProxy.defaultEnabled`. +```yaml +"metadata": { + "annotations": { + "consul.hashicorp.com/transparent-proxy-exclude-outbound-ports" : "8200, 8201” + } +} +``` -Each Pod for the service will be configured with iptables rules to direct all inbound and outbound traffic through an -inbound and outbound listener on the sidecar proxy. The proxy will be configured to know how to route traffic to the -appropriate upstream services based on [Service -Intentions](/docs/connect/config-entries/service-intentions). This means Connect services no longer -need to use the `consul.hashicorp.com/connect-service-upstreams` annotation to configure upstreams explicitly. Once the -Service Intentions are set, they can simply address the upstream services using Kubernetes DNS. + -As of Consul-k8s >= `0.26.0` and Consul-helm >= `0.32.0`, a Kubernetes service that selects application pods is required -for Connect applications, i.e: +#### Exclude outbound CIDR blocks + +The [`consul.hashicorp.com/transparent-proxy-exclude-outbound-cidrs`](/docs/k8s/annotations-and-labels#consul-hashicorp-com-transparent-proxy-exclude-outbound-cidrs) annotation +defines a comma-separated list of outbound CIDR blocks to exclude from traffic redirection when running in transparent proxy mode. The CIDR blocks are string data values. +In the following example, services in the `3.3.3.3/24` IP range are not redirected through the transparent proxy: + + + +```yaml +"metadata": { + "annotations": { + "consul.hashicorp.com/transparent-proxy-exclude-outbound-cidrs" : "3.3.3.3,3.3.3.3/24" + } +} +``` + + +#### Exclude user IDs + +The [`consul.hashicorp.com/transparent-proxy-exclude-uids`](/docs/k8s/annotations-and-labels#consul-hashicorp-com-transparent-proxy-exclude-uids) annotation +defines a comma-separated list of additional user IDs to exclude from traffic redirection when running in transparent proxy mode. The user IDs are string data values. +In the following example, services with the IDs `4444 ` and `44444 ` are not redirected through the transparent proxy: + + + +```yaml +"metadata": { + "annotations": { + "consul.hashicorp.com/transparent-proxy-exclude-uids" : "4444,44444” + } +} +``` + + + +### Kubernetes HTTP health probes configuration + +By default, `connect-inject` is disabled. As a result, Consul on Kubernetes uses a mechanism for traffic redirection that interferes with [Kubernetes HTTP health +probes](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/). This is because probes expect the kubelet to reach the application container on the probe's endpoint. Instead, traffic is redirected through the sidecar proxy. As a result, health probes return errors because the kubelet does not encrypt that traffic using a mesh proxy. + +There are two methods for solving this issue. The first method is to set the `connectInject.transparentProxy.defaultOverwriteProbes` annotation to overwrite the Kubernetes HTTP health probes so that they point to the proxy. The second method is to [enable the Consul container network interface (CNI) plugin](#enable-the-consul-cni-plugin) to perform traffic redirection. Refer to the [Consul on Kubernetes installation instructions](/docs/k8s/installation/install) for additional information. + +#### Overwrite Kubernetes HTTP health probes + +You can either include the `connectInject.transparentProxy.defaultOverwriteProbes` Helm value to your command or add the `consul.hashicorp.com/transparent-proxy-overwrite-probes` Kubernetes annotation to your pod configuration to overwrite health probes. + +Refer to [Kubernetes Health Checks in Consul on Kubernetes](/docs/k8s/connect/health) for additional information. + +### Dial services across Kubernetes cluster + +If your [Consul servers are federated between Kubernetes clusters](/docs/k8s/installation/multi-cluster/kubernetes), +then you must configure services in one Kubernetes cluster to explicitly dial a service in the datacenter of another Kubernetes cluster using the +[consul.hashicorp.com/connect-service-upstreams](/docs/k8s/annotations-and-labels#consul-hashicorp-com-connect-service-upstreams) annotation. +The following example configures the service to dial an upstream service called `my-service` in datacenter `dc2` on port `1234`: + +```yaml + "consul.hashicorp.com/connect-service-upstreams": "my-service:1234:dc2" +``` + +If your Consul cluster is deployed to a [single datacenter spanning multiple Kubernetes clusters](/docs/k8s/installation/deployment-configurations/single-dc-multi-k8s), +then you must configure services in one Kubernetes cluster to explicitly dial a service in another Kubernetes cluster using the +[consul.hashicorp.com/connect-service-upstreams](/docs/k8s/annotations-and-labels#consul-hashicorp-com-connect-service-upstreams) annotation. +The following example configures the service to dial an upstream service called `my-service` in another Kubernetes cluster on port `1234`: + +```yaml +"consul.hashicorp.com/connect-service-upstreams": "my-service:1234" +``` + +You do not need to configure services to explicitlly dial upstream services if your Consul clusters are connected with a [peering connection](/docs/connect/cluster-peering). + +## Usage + +When transparent proxy is enabled, traffic sent to [KubeDNS](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/) +or Pod IP addresses is redirected through the proxy. You must use a selector to bind Kubernetes Services to Pods as you define Kubernetes Services in the mesh. +The Kubernetes Service name must match the Consul service name to use KubeDNS. This is the default behavior unless you have applied the `consul.hashicorp.com/connect-service` +Kubernetes annotation to the service pods. The annotation overrides the Consul service name. + +Consul configures redirection for each Pod bound to the Kubernetes Service using `iptables` rules. The rules redirect all inbound and outbound traffic through an inbound and outbound listener on the sidecar proxy. Consul configures the proxy to route traffic to the appropriate upstream services based on [service +intentions](/docs/connect/config-entries/service-intentions), which address the upstream services using KubeDNS. + +In the following example, the Kubernetes service selects `sample-app` application Pods so that they can be reached within the mesh. + + ```yaml apiVersion: v1 @@ -227,22 +273,17 @@ spec: port: 80 ``` -In the example above, if another service wants to reach `sample-app` via transparent proxying, -it can dial `sample-app.default.svc.cluster.local`, using -[Kubernetes DNS](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/). -If ACLs with default "deny" policy are enabled, it also needs a -[ServiceIntention](/docs/connect/config-entries/service-intentions) allowing it to talk to -`sample-app`. + + +Additional services can query the [KubeDNS](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/) at `sample-app.default.svc.cluster.local` to reach `sample-app`. If ACLs are enabled and configured with default `deny` policies, the configuration also requires a [`ServiceIntention`](/docs/connect/config-entries/service-intentions) to allow it to talk to `sample-app`. ### Headless Services -For services that are not addressed using a virtual cluster IP, the upstream service must be -configured using the [DialedDirectly](/docs/connect/config-entries/service-defaults#dialeddirectly) -option. +For services that are not addressed using a virtual cluster IP, you must configure the upstream service using the [DialedDirectly](/docs/connect/config-entries/service-defaults#dialeddirectly) option. Then, use DNS to discover individual instance addresses and dial them through the transparent proxy. When this mode is enabled on the upstream, services present connect certificates for mTLS and intentions are enforced at the destination. -Individual instance addresses can then be discovered using DNS, and dialed through the transparent proxy. -When this mode is enabled on the upstream, connect certificates will be presented for mTLS and -intentions will be enforced at the destination. +Note that when dialing individual instances, Consul ignores the HTTP routing rules configured with configuration entries. The transparent proxy acts as a TCP proxy to the original destination IP address. -Note that when dialing individual instances HTTP routing rules configured with config entries -will **not** be considered. The transparent proxy acts as a TCP proxy to the original -destination IP address. +## Known Limitations + +- Deployment configurations with federation across or a single datacenter spanning multiple clusters must explicitly dial a service in another datacenter or cluster using annotations. + +- When dialing headless services, the request is proxied using a plain TCP proxy. Consul does not take into consideration the upstream's protocol. diff --git a/website/content/docs/k8s/installation/deployment-configurations/clients-outside-kubernetes.mdx b/website/content/docs/k8s/deployment-configurations/clients-outside-kubernetes.mdx similarity index 100% rename from website/content/docs/k8s/installation/deployment-configurations/clients-outside-kubernetes.mdx rename to website/content/docs/k8s/deployment-configurations/clients-outside-kubernetes.mdx diff --git a/website/content/docs/k8s/installation/deployment-configurations/consul-enterprise.mdx b/website/content/docs/k8s/deployment-configurations/consul-enterprise.mdx similarity index 100% rename from website/content/docs/k8s/installation/deployment-configurations/consul-enterprise.mdx rename to website/content/docs/k8s/deployment-configurations/consul-enterprise.mdx diff --git a/website/content/docs/k8s/installation/multi-cluster/index.mdx b/website/content/docs/k8s/deployment-configurations/multi-cluster/index.mdx similarity index 100% rename from website/content/docs/k8s/installation/multi-cluster/index.mdx rename to website/content/docs/k8s/deployment-configurations/multi-cluster/index.mdx diff --git a/website/content/docs/k8s/installation/multi-cluster/kubernetes.mdx b/website/content/docs/k8s/deployment-configurations/multi-cluster/kubernetes.mdx similarity index 100% rename from website/content/docs/k8s/installation/multi-cluster/kubernetes.mdx rename to website/content/docs/k8s/deployment-configurations/multi-cluster/kubernetes.mdx diff --git a/website/content/docs/k8s/installation/multi-cluster/vms-and-kubernetes.mdx b/website/content/docs/k8s/deployment-configurations/multi-cluster/vms-and-kubernetes.mdx similarity index 100% rename from website/content/docs/k8s/installation/multi-cluster/vms-and-kubernetes.mdx rename to website/content/docs/k8s/deployment-configurations/multi-cluster/vms-and-kubernetes.mdx diff --git a/website/content/docs/k8s/installation/deployment-configurations/servers-outside-kubernetes.mdx b/website/content/docs/k8s/deployment-configurations/servers-outside-kubernetes.mdx similarity index 100% rename from website/content/docs/k8s/installation/deployment-configurations/servers-outside-kubernetes.mdx rename to website/content/docs/k8s/deployment-configurations/servers-outside-kubernetes.mdx diff --git a/website/content/docs/k8s/installation/deployment-configurations/single-dc-multi-k8s.mdx b/website/content/docs/k8s/deployment-configurations/single-dc-multi-k8s.mdx similarity index 100% rename from website/content/docs/k8s/installation/deployment-configurations/single-dc-multi-k8s.mdx rename to website/content/docs/k8s/deployment-configurations/single-dc-multi-k8s.mdx diff --git a/website/content/docs/k8s/installation/vault/data-integration/bootstrap-token.mdx b/website/content/docs/k8s/deployment-configurations/vault/data-integration/bootstrap-token.mdx similarity index 100% rename from website/content/docs/k8s/installation/vault/data-integration/bootstrap-token.mdx rename to website/content/docs/k8s/deployment-configurations/vault/data-integration/bootstrap-token.mdx diff --git a/website/content/docs/k8s/installation/vault/data-integration/connect-ca.mdx b/website/content/docs/k8s/deployment-configurations/vault/data-integration/connect-ca.mdx similarity index 100% rename from website/content/docs/k8s/installation/vault/data-integration/connect-ca.mdx rename to website/content/docs/k8s/deployment-configurations/vault/data-integration/connect-ca.mdx diff --git a/website/content/docs/k8s/installation/vault/data-integration/enterprise-license.mdx b/website/content/docs/k8s/deployment-configurations/vault/data-integration/enterprise-license.mdx similarity index 100% rename from website/content/docs/k8s/installation/vault/data-integration/enterprise-license.mdx rename to website/content/docs/k8s/deployment-configurations/vault/data-integration/enterprise-license.mdx diff --git a/website/content/docs/k8s/installation/vault/data-integration/gossip.mdx b/website/content/docs/k8s/deployment-configurations/vault/data-integration/gossip.mdx similarity index 100% rename from website/content/docs/k8s/installation/vault/data-integration/gossip.mdx rename to website/content/docs/k8s/deployment-configurations/vault/data-integration/gossip.mdx diff --git a/website/content/docs/k8s/installation/vault/data-integration/index.mdx b/website/content/docs/k8s/deployment-configurations/vault/data-integration/index.mdx similarity index 100% rename from website/content/docs/k8s/installation/vault/data-integration/index.mdx rename to website/content/docs/k8s/deployment-configurations/vault/data-integration/index.mdx diff --git a/website/content/docs/k8s/installation/vault/data-integration/partition-token.mdx b/website/content/docs/k8s/deployment-configurations/vault/data-integration/partition-token.mdx similarity index 100% rename from website/content/docs/k8s/installation/vault/data-integration/partition-token.mdx rename to website/content/docs/k8s/deployment-configurations/vault/data-integration/partition-token.mdx diff --git a/website/content/docs/k8s/installation/vault/data-integration/replication-token.mdx b/website/content/docs/k8s/deployment-configurations/vault/data-integration/replication-token.mdx similarity index 100% rename from website/content/docs/k8s/installation/vault/data-integration/replication-token.mdx rename to website/content/docs/k8s/deployment-configurations/vault/data-integration/replication-token.mdx diff --git a/website/content/docs/k8s/installation/vault/data-integration/server-tls.mdx b/website/content/docs/k8s/deployment-configurations/vault/data-integration/server-tls.mdx similarity index 100% rename from website/content/docs/k8s/installation/vault/data-integration/server-tls.mdx rename to website/content/docs/k8s/deployment-configurations/vault/data-integration/server-tls.mdx diff --git a/website/content/docs/k8s/installation/vault/data-integration/snapshot-agent-config.mdx b/website/content/docs/k8s/deployment-configurations/vault/data-integration/snapshot-agent-config.mdx similarity index 100% rename from website/content/docs/k8s/installation/vault/data-integration/snapshot-agent-config.mdx rename to website/content/docs/k8s/deployment-configurations/vault/data-integration/snapshot-agent-config.mdx diff --git a/website/content/docs/k8s/installation/vault/data-integration/webhook-certs.mdx b/website/content/docs/k8s/deployment-configurations/vault/data-integration/webhook-certs.mdx similarity index 100% rename from website/content/docs/k8s/installation/vault/data-integration/webhook-certs.mdx rename to website/content/docs/k8s/deployment-configurations/vault/data-integration/webhook-certs.mdx diff --git a/website/content/docs/k8s/installation/vault/index.mdx b/website/content/docs/k8s/deployment-configurations/vault/index.mdx similarity index 100% rename from website/content/docs/k8s/installation/vault/index.mdx rename to website/content/docs/k8s/deployment-configurations/vault/index.mdx diff --git a/website/content/docs/k8s/installation/vault/systems-integration.mdx b/website/content/docs/k8s/deployment-configurations/vault/systems-integration.mdx similarity index 100% rename from website/content/docs/k8s/installation/vault/systems-integration.mdx rename to website/content/docs/k8s/deployment-configurations/vault/systems-integration.mdx diff --git a/website/content/docs/k8s/installation/vault/wan-federation.mdx b/website/content/docs/k8s/deployment-configurations/vault/wan-federation.mdx similarity index 100% rename from website/content/docs/k8s/installation/vault/wan-federation.mdx rename to website/content/docs/k8s/deployment-configurations/vault/wan-federation.mdx diff --git a/website/content/docs/k8s/installation/install-cli.mdx b/website/content/docs/k8s/installation/install-cli.mdx index 4cd3ea9f55..ae6de69bf7 100644 --- a/website/content/docs/k8s/installation/install-cli.mdx +++ b/website/content/docs/k8s/installation/install-cli.mdx @@ -1,23 +1,43 @@ --- layout: docs -page_title: Installing the Consul K8s CLI +page_title: Install Consul from the Consul K8s CLI description: >- - Consul K8s CLI is a tool for quickly installing and interacting with Consul on Kubernetes. + This topic describes how to install Consul on Kubernetes using the Consul K8s CLI tool. --- -# Installing the Consul K8s CLI -Consul K8s CLI is a tool for quickly installing and interacting with Consul on Kubernetes. Ensure that you are installing the correct version of the CLI for your Consul on Kubernetes deployment, as the CLI and the control plane are version dependent. +# Install Consul on Kubernetes from Consul K8s CLI + +This topic describes how to install Consul on Kubernetes using the Consul K8s CLI tool. The Consul K8s CLI tool enables you to quickly install and interact with Consul on Kubernetes. Use the Consul K8s CLI tool to install Consul on Kubernetes if you are deploying a single cluster. We recommend using the [Helm chart installation method](/docs/k8s/installation/install) if you are installing Consul on Kubernetes for multi-cluster deployments that involve cross-partition or cross datacenter communication. + +## Introduction + +If it is your first time installing Consul on Kubernetes, then you must first install the Consul K8s CLI tool. You can install Consul on Kubernetes using the Consul K8s tool after installing the CLI. + +## Requirements + +- The `kubectl` client must already be configured to authenticate to the Kubernetes cluster using a valid `kubeconfig` file. +- Install one of the following package managers so that you can install the Consul K8s CLI tool. The installation instructions also provide commands for installing and using the package managers: + - MacOS: [Homebrew](https://brew.sh) + - Ubuntu/Debian: apt + - CentOS/RHEL: yum + +You must install the correct version of the CLI for your Consul on Kubernetes deployment. To deploy a previous version of Consul on Kubernetes, download the specific version of the CLI that matches the version of the control plane that you would like to deploy. Refer to the [compatibility matrix](/docs/k8s/compatibility) for details. + ## Install the CLI -These instructions describe how to install the latest version of the CLI depending on your Operating System, and are suited for fresh installations of Consul on Kubernetes. +The following instructions describe how to install the latest version of the Consul K8s CLI tool, as well as earlier versions, so that you can install an appropriate version of tool for your control plane. + +### Install the latest version + +Complete the following instructions for a fresh installation of Consul on Kubernetes. -The [Homebrew](https://brew.sh) package manager is required to complete the following installation instructions. The Homebrew formulae will always install the latest version of a binary. If you are looking to install a specific version of the CLI please follow [Install a specific version of Consul K8s CLI](#install-a-specific-version-of-the-cli). +The [Homebrew](https://brew.sh) package manager is required to complete the following installation instructions. The Homebrew formulae always installs the latest version of a binary. 1. Install the HashiCorp `tap`, which is a repository of all Homebrew packages for HashiCorp: ```shell-session @@ -104,17 +124,15 @@ The [Homebrew](https://brew.sh) package manager is required to complete the foll -## Install a specific version of the CLI +### Install a previous version -These instructions describe how to install a specific version of the CLI and are best suited for installing or managing specific versions of the Consul on Kubernetes control plane. +Complete the following instructions to install a specific version of the CLI so that your tool is compatible with your Consul on Kubernetes control plane. Refer to the [compatibility matrix](/docs/k8s/compatibility) for additional information. -Homebrew does not provide a method to install previous versions of a package. The Consul K8s CLI will need to be installed manually. Previous versions of the Consul K8s CLI could be used to install a specific version of Consul on the Kubernetes control plane. Manual upgrades to the Consul K8s CLI is also performed in the same manner, provided that the Consul K8s CLI was manually installed before. - -1. Download the desired Consul K8s CLI using the following `curl` command. Enter the appropriate version for your deployment via the `$VERSION` environment variable. +1. Download the appropriate version of Consul K8s CLI using the following `curl` command. Set the `$VERSION` environment variable to the appropriate version for your deployment. ```shell-session $ export VERSION=0.39.0 && \ @@ -203,3 +221,61 @@ Homebrew does not provide a method to install previous versions of a package. Th + +## Install Consul on Kubernetes + +After installing the Consul K8s CLI tool (`consul-k8s`), issue the `install` subcommand and any additional options to install Consul on Kubernetes. Refer to the [Consul K8s CLI reference](/docs/k8s/k8s-cli) for details about all commands and available options. If you do not include any additional options, the `consul-k8s` CLI installs Consul on Kubernetes using the default settings form the Consul Helm chart values. The following example installs Consul on Kubernetes with service mesh and CRDs enabled. + +```shell-session +$ consul-k8s install -set connectInject.enabled=true -set controller.enabled=true + +==> Pre-Install Checks +No existing installations found. + ✓ No previous persistent volume claims found + ✓ No previous secrets found +==> Consul Installation Summary + Installation name: consul + Namespace: consul + Overrides: + connectInject: + enabled: true + controller: + enabled: true + + Proceed with installation? (y/N) y + +==> Running Installation + ✓ Downloaded charts +--> creating 1 resource(s) +--> creating 45 resource(s) +--> beginning wait for 45 resources with timeout of 10m0s + ✓ Consul installed into namespace "consul" +``` + +You can include the `-auto-approve` option set to `true` to proceed with the installation if the pre-install checks pass. + +The pre-install checks may fail if existing `PersistentVolumeClaims` (PVC) are detected. Refer to the [uninstall instructions](/docs/k8s/operations/uninstall#uninstall-consul) for information about removing PVCs. + +## Check the Consul cluster status + +Issue the `consul-k8s status` command to view the status of the installed Consul cluster. + +```shell-session +$ consul-k8s status + +==> Consul-K8s Status Summary + NAME | NAMESPACE | STATUS | CHARTVERSION | APPVERSION | REVISION | LAST UPDATED +---------+-----------+----------+--------------+------------+----------+-------------------------- + consul | consul | deployed | 0.40.0 | 1.11.2 | 1 | 2022/01/31 16:58:51 PST + +==> Config: + connectInject: + enabled: true + controller: + enabled: true + global: + name: consul + +✓ Consul servers healthy (3/3) +✓ Consul clients healthy (3/3) +``` \ No newline at end of file diff --git a/website/content/docs/k8s/installation/install.mdx b/website/content/docs/k8s/installation/install.mdx index ffed47349c..7247013d67 100644 --- a/website/content/docs/k8s/installation/install.mdx +++ b/website/content/docs/k8s/installation/install.mdx @@ -1,133 +1,37 @@ --- layout: docs -page_title: Installing Consul on Kubernetes +page_title: Install Consul on Kubernetes from the Helm Chart description: >- - Consul can run directly on Kubernetes, both in server or client mode. For - pure-Kubernetes workloads, this enables Consul to also exist purely within - Kubernetes. For heterogeneous workloads, Consul agents can join a server - running inside or outside of Kubernetes. + This topic describes how to install Consul on Kubernetes using the official Consul Helm chart. --- -# Installing Consul on Kubernetes +# Install Consul on Kubernetes from the Helm Chart -Consul can run directly on Kubernetes, both in server or client mode. -For pure-Kubernetes workloads, this enables Consul to also exist purely -within Kubernetes. For heterogeneous workloads, Consul agents can join -a server running inside or outside of Kubernetes. +This topic describes how to install Consul on Kubernetes using the official Consul Helm chart. For instruction on how to install Consul on Kubernetes using the Consul K8s CLI, refer to [Installing the Consul K8s CLI](/docs/k8s/installation/install-cli). -You can install Consul on Kubernetes using the following methods: +## Introduction -1. [Consul K8s CLI install](#consul-k8s-cli-installation) -1. [Helm chart install](#helm-chart-installation) +We recommend using the Consul Helm chart to install Consul on Kubernetes for multi-cluster installations that involve cross-partition or cross datacenter communication. The Helm chart installs and configures all necessary components to run Consul. The configuration enables you to run a server cluster, a client cluster, or both. + +Consul can run directly on Kubernetes in server or client mode so that you can leverage Consul functionality if your workloads are fully deployed to Kubernetes. For heterogeneous workloads, Consul agents can join a server running inside or outside of Kubernetes. Refer to the [architecture section](/docs/k8s/architecture) to learn more about the general architecture of Consul on Kubernetes. + +The Helm chart exposes several useful configurations and automatically sets up complex resources, but it does not automatically operate Consul. You must still become familiar with how to monitor, backup, and upgrade the Consul cluster. + +The Helm chart has no required configuration, so it installs a Consul cluster with default configurations. We strongly recommend that you [learn about the configuration options](/docs/k8s/helm#configuration-values) prior to going to production. + +-> **Security warning**: By default, Helm installs Consul with security configurations disabled so that the out-of-box experience is optimized for new users. We strongly recommend using a properly-secured Kubernetes cluster or making sure that you understand and enable [Consul’s security features](/docs/security) before going into production. Some security features are not supported in the Helm chart and require additional manual configuration. Refer to the [architecture](/docs/k8s/installation/install#architecture) section to learn more about the general architecture of Consul on Kubernetes. + For a hands-on experience with Consul as a service mesh for Kubernetes, follow the [Getting Started with Consul service mesh](https://learn.hashicorp.com/tutorials/consul/service-mesh-deploy?utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS) tutorial. -## Consul K8s CLI Installation +## Requirements -We recommend using the [Consul K8s CLI](/docs/k8s/k8s-cli) to install Consul on Kubernetes for single-cluster deployments. You can install Consul on Kubernetes using the Consul K8s CLI tool after installing the CLI. +- Helm version 3.2+. Visit the [Helm website](https://helm.sh/docs/intro/install/) to download the latest version. -Before beginning the installation process, verify that `kubectl` is already configured to authenticate to the Kubernetes cluster using a valid `kubeconfig` file. - -The [Homebrew](https://brew.sh) package manager is required to complete the following installation instructions. - --> ** NOTE:** To deploy a previous version of Consul on Kubernetes via the CLI, you will need to first download the specific version of the CLI that matches the version of the control plane that you would like to deploy. Please follow [Install a specific version of Consul K8s CLI](/docs/k8s/installation/install-cli#install-a-specific-version-of-the-cli). - -1. Install the HashiCorp `tap`, which is a repository of all Homebrew packages for HashiCorp: - ```shell-session - $ brew tap hashicorp/tap - ``` - -1. Install the Consul K8s CLI with the `hashicorp/tap/consul` formula. - ```shell-session - $ brew install hashicorp/tap/consul-k8s - ``` - -1. Issue the `install` subcommand to install Consul on Kubernetes. Refer to the [Consul K8s CLI reference](/docs/k8s/k8s-cli) for details about all commands and available options. Without any additional options passed, the `consul-k8s` CLI will install Consul on Kubernetes by using the Consul Helm chart's default values. Below is an example that installs Consul on Kubernetes with Service Mesh and CRDs enabled. If you did not set the `-auto-approve` option to `true`, you will be prompted to proceed with the installation if the pre-install checks pass. - - -> The pre-install checks may fail if existing `PersistentVolumeClaims` (PVC) are detected. Refer to the [uninstall instructions](/docs/k8s/operations/uninstall#uninstall-consul) for information about removing PVCs. - - ```shell-session - $ consul-k8s install -set connectInject.enabled=true -set controller.enabled=true - - ==> Pre-Install Checks - No existing installations found. - ✓ No previous persistent volume claims found - ✓ No previous secrets found - - ==> Consul Installation Summary - Installation name: consul - Namespace: consul - Overrides: - connectInject: - enabled: true - controller: - enabled: true - - Proceed with installation? (y/N) y - - ==> Running Installation - ✓ Downloaded charts - --> creating 1 resource(s) - --> creating 45 resource(s) - --> beginning wait for 45 resources with timeout of 10m0s - ✓ Consul installed into namespace "consul" - ``` - -1. (Optional) Issue the `consul-k8s status` command to quickly glance at the status of the installed Consul cluster. - - ```shell-session - $ consul-k8s status - - ==> Consul-K8s Status Summary - NAME | NAMESPACE | STATUS | CHARTVERSION | APPVERSION | REVISION | LAST UPDATED - ---------+-----------+----------+--------------+------------+----------+-------------------------- - consul | consul | deployed | 0.40.0 | 1.11.2 | 1 | 2022/01/31 16:58:51 PST - - ==> Config: - connectInject: - enabled: true - controller: - enabled: true - global: - name: consul - - ✓ Consul servers healthy (3/3) - ✓ Consul clients healthy (3/3) - ``` - -## Helm Chart Installation - -We recommend using the Consul Helm chart to install Consul on Kubernetes for multi-cluster installations that involve cross-partition of cross datacenter communication. The Helm chart installs and configures all necessary components to run Consul. The configuration enables you to run a server cluster, a client cluster, or both. - -Step-by-step tutorials for how to deploy Consul to Kubernetes, please see -our [Deploy to Kubernetes](https://learn.hashicorp.com/collections/consul/kubernetes-deploy) -collection. This collection includes configuration caveats for single-node deployments. - -The Helm chart exposes several useful configurations and automatically -sets up complex resources, but it **does not automatically operate Consul.** -You must still become familiar with how to monitor, backup, -upgrade, etc. the Consul cluster. - -The Helm chart has no required configuration and will install a Consul -cluster with default configurations. We strongly recommend [learning about the configuration options](/docs/k8s/helm#configuration-values) prior to going to production. - -~> **Security Warning:** By default, the chart will install an insecure configuration -of Consul. This provides a less complicated out-of-box experience for new users, -but is not appropriate for a production setup. We strongly recommend using -a properly-secured Kubernetes cluster or making sure that you understand and enable -the [recommended security features](/docs/security). Currently, -some of these features are not supported in the Helm chart and require additional -manual configuration. - -### Prerequisites - -The Consul Helm only supports Helm 3.2+. Install the latest version of the Helm CLI here: -[Installing Helm](https://helm.sh/docs/intro/install/). - -### Installing Consul +## Install Consul 1. Add the HashiCorp Helm Repository: @@ -171,14 +75,14 @@ The Consul Helm only supports Helm 3.2+. Install the latest version of the Helm ``` -### Customizing Your Installation +## Custom installation If you want to customize your installation, create a `config.yaml` file to override the default settings. You can learn what settings are available by running `helm inspect values hashicorp/consul` or by reading the [Helm Chart Reference](/docs/k8s/helm). -#### Minimal `config.yaml` for Consul Service Mesh +### Minimal `config.yaml` for Consul service mesh The minimal settings to enable [Consul Service Mesh]((/docs/k8s/connect)) would be captured in the following `config.yaml` config file: @@ -203,7 +107,59 @@ NAME: consul ... ``` -#### Enable Consul Service Mesh on select namespaces +### Enable the Consul CNI plugin + +By default, Consul generates a `connect-inject init` container as part of the Kubernetes pod startup process when Consul is in [transparent proxy mode](/docs/connect/transparent-proxy). The container configures traffic redirection in the service mesh through the sidecar proxy. To configure redirection, the container requires elevated CAP_NET_ADMIN privileges, which may not be compatible with security policies in your organization. + +Instead, you can enable the Consul container network interface (CNI) plugin to perform traffic redirection. Because the plugin is executed by the Kubernetes kubelet, the plugin already has the elevated privileges necessary to configure the network. + +Add the following configuration to your `config.yaml` file to enable the Consul CNI plugin: + + + + + +```yaml +global: + name: consul +connectInject: + enabled: true + cni: + enabled: true + logLevel: info + cniBinDir: "/opt/cni/bin" + cniNetDir: "/etc/cni/net.d" +``` + + + + +```yaml +global: + name: consul +connectInject: + enabled: true + cni: + enabled: true + logLevel: info + cniBinDir: "/home/kubernetes/bin" + cniNetDir: "/etc/cni/net.d" +``` + + + + + +The following table describes the available CNI plugin options: + +| Option | Description | Default | +| --- | --- | --- | +| `cni.enabled` | Boolean value that enables or disables the CNI plugin. If `true`, the plugin is responsible for redirecting traffic in the service mesh. If `false`, redirection is handled by the `connect-inject init` container. | `false` | +| `cni.logLevel` | String value that specifies the log level for the installer and plugin. You can specify the following values: `info`, `debug`, `error`. | `info` | +| `cni.cniBinDir` | String value that specifies the location on the Kubernetes node where the CNI plugin is installed. | `/opt/cni/bin` | +| `cni.cniNetDir` | String value that specifies the location on the Kubernetes node for storing the CNI configuration. | `/etc/cni/net.d` | + +### Enable Consul service mesh on select namespaces By default, Consul Service Mesh is enabled on almost all namespaces (with the exception of `kube-system` and `local-path-storage`) within a Kubernetes cluster. You can restrict this to a subset of namespaces by specifying a `namespaceSelector` that matches a label attached to each namespace denoting whether to enable Consul service mesh. In order to default to enabling service mesh on select namespaces by label, the `connectInject.default` value must be set to `true`. @@ -239,12 +195,16 @@ NAME: consul ... ``` -#### Updating your Consul on Kubernetes configuration +### Update your Consul on Kubernetes configuration If you've already installed Consul and want to make changes, you'll need to run `helm upgrade`. See [Upgrading](/docs/k8s/upgrade) for more details. -## Viewing the Consul UI +## Usage + +You can view the Consul UI and access the Consul HTTP API after installation. + +### Viewing the Consul UI The Consul UI is enabled by default when using the Helm chart. For security reasons, it isn't exposed via a `LoadBalancer` Service by default so you must @@ -293,14 +253,14 @@ Then paste the token into the UI under the ACLs tab (without the `%`). to retrieve the bootstrap token since secondary datacenters use a separate token with less permissions. -### Exposing the UI via a service +#### Exposing the UI via a service If you want to expose the UI via a Kubernetes Service, configure the [`ui.service` chart values](/docs/k8s/helm#v-ui-service). This service will allow requests to the Consul servers so it should not be open to the world. -## Accessing the Consul HTTP API +### Accessing the Consul HTTP API The Consul HTTP API should be accessed by communicating to the local agent running on the same node. While technically any listening agent (client or diff --git a/website/content/docs/k8s/installation/platforms/self-hosted-kubernetes.mdx b/website/content/docs/k8s/platforms/self-hosted-kubernetes.mdx similarity index 100% rename from website/content/docs/k8s/installation/platforms/self-hosted-kubernetes.mdx rename to website/content/docs/k8s/platforms/self-hosted-kubernetes.mdx diff --git a/website/data/docs-nav-data.json b/website/data/docs-nav-data.json index cb33486b7a..b89ec40984 100644 --- a/website/data/docs-nav-data.json +++ b/website/data/docs-nav-data.json @@ -419,85 +419,53 @@ "title": "Architecture", "path": "k8s/architecture" }, + { - "title": "Get Started", + "title": "Installation", "routes": [ { - "title": "Installing Consul on Kubernetes", - "path": "k8s/installation/install" - }, - { - "title": "Installing Consul K8s CLI", + "title": "Install from Consul K8s CLI", "path": "k8s/installation/install-cli" }, { - "title": "Platform Guides", - "routes": [ - { - "title": "Minikube", - "href": "https://learn.hashicorp.com/tutorials/consul/kubernetes-minikube?utm_source=consul.io&utm_medium=docs&utm_content=k8s&utm_term=mk" - }, - { - "title": "Kind", - "href": "https://learn.hashicorp.com/tutorials/consul/kubernetes-kind?utm_source=consul.io&utm_medium=docs&utm_content=k8s&utm_term=kind" - }, - { - "title": "AKS (Azure)", - "href": "https://learn.hashicorp.com/tutorials/consul/kubernetes-aks-azure?utm_source=consul.io&utm_medium=docs&utm_content=k8s&utm_term=aks" - }, - { - "title": "EKS (AWS)", - "href": "https://learn.hashicorp.com/tutorials/consul/kubernetes-eks-aws?utm_source=consul.io&utm_medium=docs&utm_content=k8s&utm_term=eks" - }, - { - "title": "GKE (Google Cloud)", - "href": "https://learn.hashicorp.com/tutorials/consul/kubernetes-gke-google?utm_source=consul.io&utm_medium=docs&utm_content=k8s&utm_term=gke" - }, - { - "title": "Red Hat OpenShift", - "href": "https://learn.hashicorp.com/tutorials/consul/kubernetes-openshift-red-hat?utm_source=consul.io&utm_medium=docs&utm_content=k8s&utm_term=openshift" - }, - { - "title": "Self Hosted Kubernetes", - "path": "k8s/installation/platforms/self-hosted-kubernetes" - } - ] + "title": "Install from Helm Chart", + "path": "k8s/installation/install" + } + ] + }, + { + "title": "Deployment Configurations", + "routes": [ + { + "title": "Consul Clients Outside Kubernetes", + "path": "k8s/deployment-configurations/clients-outside-kubernetes" }, { - "title": "Deployment Configurations", - "routes": [ - { - "title": "Consul Clients Outside Kubernetes", - "path": "k8s/installation/deployment-configurations/clients-outside-kubernetes" - }, - { - "title": "Consul Servers Outside Kubernetes", - "path": "k8s/installation/deployment-configurations/servers-outside-kubernetes" - }, - { - "title": "Single Consul Datacenter in Multiple Kubernetes Clusters", - "path": "k8s/installation/deployment-configurations/single-dc-multi-k8s" - }, - { - "title": "Consul Enterprise", - "path": "k8s/installation/deployment-configurations/consul-enterprise" - } - ] + "title": "Consul Servers Outside Kubernetes", + "path": "k8s/deployment-configurations/servers-outside-kubernetes" + }, + { + "title": "Single Consul Datacenter in Multiple Kubernetes Clusters", + "path": "k8s/deployment-configurations/single-dc-multi-k8s" + }, + { + "title": "Consul Enterprise", + "path": "k8s/deployment-configurations/consul-enterprise" }, { "title": "Multi-Cluster Federation", "routes": [ { "title": "Overview", - "path": "k8s/installation/multi-cluster" + "path": "k8s/deployment-configurations/multi-cluster" }, { "title": "Federation Between Kubernetes Clusters", - "path": "k8s/installation/multi-cluster/kubernetes" + "path": "k8s/deployment-configurations/multi-cluster/kubernetes" }, { "title": "Federation Between VMs and Kubernetes", - "path": "k8s/installation/multi-cluster/vms-and-kubernetes" + "path": "k8s/deployment-configurations/multi-cluster/vms-and-kubernetes" } ] }, @@ -506,65 +474,98 @@ "routes": [ { "title": "Overview", - "path": "k8s/installation/vault" + "path": "k8s/deployment-configurations/vault" }, { "title": "Systems Integration", - "path": "k8s/installation/vault/systems-integration" + "path": "k8s/deployment-configurations/vault/systems-integration" }, { "title": "Data Integration", "routes": [ { "title": "Overview", - "path": "k8s/installation/vault/data-integration" + "path": "k8s/deployment-configurations/vault/data-integration" }, { "title": "Bootstrap Token", - "path": "k8s/installation/vault/data-integration/bootstrap-token" + "path": "k8s/deployment-configurations/vault/data-integration/bootstrap-token" }, { "title": "Enterprise License", - "path": "k8s/installation/vault/data-integration/enterprise-license" + "path": "k8s/deployment-configurations/vault/data-integration/enterprise-license" }, { "title": "Gossip Encryption Key", - "path": "k8s/installation/vault/data-integration/gossip" + "path": "k8s/deployment-configurations/vault/data-integration/gossip" }, { "title": "Partition Token", - "path": "k8s/installation/vault/data-integration/partition-token" + "path": "k8s/deployment-configurations/vault/data-integration/partition-token" }, { "title": "Replication Token", - "path": "k8s/installation/vault/data-integration/replication-token" + "path": "k8s/deployment-configurations/vault/data-integration/replication-token" }, { "title": "Server TLS", - "path": "k8s/installation/vault/data-integration/server-tls" + "path": "k8s/deployment-configurations/vault/data-integration/server-tls" }, { "title": "Service Mesh Certificates", - "path": "k8s/installation/vault/data-integration/connect-ca" + "path": "k8s/deployment-configurations/vault/data-integration/connect-ca" }, { "title": "Snapshot Agent Config", - "path": "k8s/installation/vault/data-integration/snapshot-agent-config" + "path": "k8s/deployment-configurations/vault/data-integration/snapshot-agent-config" }, { "title": "Webhook Certificates", - "path": "k8s/installation/vault/data-integration/webhook-certs" + "path": "k8s/deployment-configurations/vault/data-integration/webhook-certs" } ] }, { "title": "WAN Federation", - "path": "k8s/installation/vault/wan-federation" + "path": "k8s/deployment-configurations/vault/wan-federation" } ] } ] }, + { + "title": "Platform Guides", + "routes": [ + { + "title": "Minikube", + "href": "https://learn.hashicorp.com/tutorials/consul/kubernetes-minikube?utm_source=consul.io&utm_medium=docs&utm_content=k8s&utm_term=mk" + }, + { + "title": "Kind", + "href": "https://learn.hashicorp.com/tutorials/consul/kubernetes-kind?utm_source=consul.io&utm_medium=docs&utm_content=k8s&utm_term=kind" + }, + { + "title": "AKS (Azure)", + "href": "https://learn.hashicorp.com/tutorials/consul/kubernetes-aks-azure?utm_source=consul.io&utm_medium=docs&utm_content=k8s&utm_term=aks" + }, + { + "title": "EKS (AWS)", + "href": "https://learn.hashicorp.com/tutorials/consul/kubernetes-eks-aws?utm_source=consul.io&utm_medium=docs&utm_content=k8s&utm_term=eks" + }, + { + "title": "GKE (Google Cloud)", + "href": "https://learn.hashicorp.com/tutorials/consul/kubernetes-gke-google?utm_source=consul.io&utm_medium=docs&utm_content=k8s&utm_term=gke" + }, + { + "title": "Red Hat OpenShift", + "href": "https://learn.hashicorp.com/tutorials/consul/kubernetes-openshift-red-hat?utm_source=consul.io&utm_medium=docs&utm_content=k8s&utm_term=openshift" + }, + { + "title": "Self Hosted Kubernetes", + "path": "k8s/platforms/self-hosted-kubernetes" + } + ] + }, { "title": "Service Mesh", "routes": [ @@ -693,6 +694,7 @@ } ] }, + { "title": "AWS ECS", "routes": [ diff --git a/website/redirects.js b/website/redirects.js index a4b3f272bb..c9c5b668b0 100644 --- a/website/redirects.js +++ b/website/redirects.js @@ -1278,6 +1278,129 @@ module.exports = [ destination: '/docs/k8s/installation/vault/data-integration/connect-ca', permanent: true, }, + { + source: '/docs/k8s/installation/install#consul-k8s-cli-installation', + destination: '/docs/k8s/installation/install-cli', + permanent: true, + }, + { + source: + '/docs/k8s/installation/deployment-configurations/clients-outside-kubernetes', + destination: + '/docs/k8s/deployment-configurations/clients-outside-kubernetes', + permanent: true, + }, + { + source: + '/docs/k8s/installation/deployment-configurations/servers-outside-kubernetes', + destination: + '/docs/k8s/deployment-configurations/servers-outside-kubernetes', + permanent: true, + }, + { + source: + '/docs/k8s/installation/deployment-configurations/single-dc-multi-k8s', + destination: '/docs/k8s/deployment-configurations/single-dc-multi-k8s', + permanent: true, + }, + { + source: + '/docs/k8s/installation/deployment-configurations/consul-enterprise', + destination: '/docs/k8s/deployment-configurations/consul-enterprise', + permanent: true, + }, + { + source: '/docs/k8s/installation/multi-cluster', + destination: '/docs/k8s/deployment-configurations/multi-cluster', + permanent: true, + }, + { + source: '/docs/k8s/installation/multi-cluster/kubernetes', + destination: '/docs/k8s/deployment-configurations/multi-cluster/kubernetes', + permanent: true, + }, + { + source: '/docs/k8s/installation/multi-cluster/vms-and-kubernetes', + destination: + '/docs/k8s/deployent-configurations/multi-cluster/vms-and-kubernetes', + permanent: true, + }, + { + source: '/docs/k8s/installation/vault', + destination: '/docs/k8s/deployment-configurations/vault', + permanent: true, + }, + { + source: '/docs/k8s/installation/vault/systems-integration', + destination: + '/docs/k8s/deployment-configurations/vault/systems-integration', + permanent: true, + }, + { + source: '/docs/k8s/installation/vault/data-integration', + destination: '/docs/k8s/deployment-configurations/vault/data-integration', + permanent: true, + }, + { + source: '/docs/k8s/installation/vault/data-integration/bootstrap-token', + destination: + '/docs/k8s/deployment-configurations/vault/data-integration/bootstrap-token', + permanent: true, + }, + { + source: '/docs/k8s/installation/vault/data-integration/enterprise-license', + destination: + '/docs/k8s/deployment-configurations/vault/data-integration/enterprise-license', + permanent: true, + }, + { + source: '/docs/k8s/installation/vault/data-integration/gossip', + destination: + '/docs/k8s/deployment-configurations/vault/data-integration/gossip', + permanent: true, + }, + { + source: '/docs/k8s/installation/vault/data-integration/partition-token', + destination: + '/docs/k8s/deployment-configurations/vault/data-integration/partition-token', + permanent: true, + }, + { + source: '/docs/k8s/installation/vault/data-integration/replication-token', + destination: + '/docs/k8s/deployment-configurations/vault/data-integration/replication-token', + permanent: true, + }, + { + source: '/docs/k8s/installation/vault/data-integration/server-tls', + destination: + '/docs/k8s/deployment-configurations/vault/data-integration/server-tls', + permanent: true, + }, + { + source: '/docs/k8s/installation/vault/data-integration/connect-ca', + destination: + '/docs/k8s/deployment-configurations/vault/data-integration/connect-ca', + permanent: true, + }, + { + source: + '/docs/k8s/installation/vault/data-integration/snapshot-agent-config', + destination: + '/docs/k8s/deployment-configurations/vault/data-integration/snapshot-agent-config', + permanent: true, + }, + { + source: '/docs/k8s/installation/vault/data-integration/webhook-certs', + destination: + '/docs/k8s/deployment-configurations/vault/data-integration/webhook-certs', + permanent: true, + }, + { + source: '/docs/k8s/installation/vault/wan-federation', + destination: '/docs/k8s/deployment-configurations/vault/wan-federation', + permanent: true, + }, { source: '/docs/api-gateway/common-errors', destination: '/docs/api-gateway/usage#error-messages', @@ -1288,4 +1411,127 @@ module.exports = [ destination: '/docs/api-gateway/upgrades', permanent: true, }, + { + source: '/docs/k8s/installation/install#consul-k8s-cli-installation', + destination: '/docs/k8s/installation/install-cli', + permanent: true, + }, + { + source: + '/docs/k8s/installation/deployment-configurations/clients-outside-kubernetes', + destination: + '/docs/k8s/deployment-configurations/clients-outside-kubernetes', + permanent: true, + }, + { + source: + '/docs/k8s/installation/deployment-configurations/servers-outside-kubernetes', + destination: + '/docs/k8s/deployment-configurations/servers-outside-kubernetes', + permanent: true, + }, + { + source: + '/docs/k8s/installation/deployment-configurations/single-dc-multi-k8s', + destination: '/docs/k8s/deployment-configurations/single-dc-multi-k8s', + permanent: true, + }, + { + source: + '/docs/k8s/installation/deployment-configurations/consul-enterprise', + destination: '/docs/k8s/deployment-configurations/consul-enterprise', + permanent: true, + }, + { + source: '/docs/k8s/installation/multi-cluster', + destination: '/docs/k8s/deployment-configurations/multi-cluster', + permanent: true, + }, + { + source: '/docs/k8s/installation/multi-cluster/kubernetes', + destination: '/docs/k8s/deployment-configurations/multi-cluster/kubernetes', + permanent: true, + }, + { + source: '/docs/k8s/installation/multi-cluster/vms-and-kubernetes', + destination: + '/docs/k8s/deployent-configurations/multi-cluster/vms-and-kubernetes', + permanent: true, + }, + { + source: '/docs/k8s/installation/vault', + destination: '/docs/k8s/deployment-configurations/vault', + permanent: true, + }, + { + source: '/docs/k8s/installation/vault/systems-integration', + destination: + '/docs/k8s/deployment-configurations/vault/systems-integration', + permanent: true, + }, + { + source: '/docs/k8s/installation/vault/data-integration', + destination: '/docs/k8s/deployment-configurations/vault/data-integration', + permanent: true, + }, + { + source: '/docs/k8s/installation/vault/data-integration/bootstrap-token', + destination: + '/docs/k8s/deployment-configurations/vault/data-integration/bootstrap-token', + permanent: true, + }, + { + source: '/docs/k8s/installation/vault/data-integration/enterprise-license', + destination: + '/docs/k8s/deployment-configurations/vault/data-integration/enterprise-license', + permanent: true, + }, + { + source: '/docs/k8s/installation/vault/data-integration/gossip', + destination: + '/docs/k8s/deployment-configurations/vault/data-integration/gossip', + permanent: true, + }, + { + source: '/docs/k8s/installation/vault/data-integration/partition-token', + destination: + '/docs/k8s/deployment-configurations/vault/data-integration/partition-token', + permanent: true, + }, + { + source: '/docs/k8s/installation/vault/data-integration/replication-token', + destination: + '/docs/k8s/deployment-configurations/vault/data-integration/replication-token', + permanent: true, + }, + { + source: '/docs/k8s/installation/vault/data-integration/server-tls', + destination: + '/docs/k8s/deployment-configurations/vault/data-integration/server-tls', + permanent: true, + }, + { + source: '/docs/k8s/installation/vault/data-integration/connect-ca', + destination: + '/docs/k8s/deployment-configurations/vault/data-integration/connect-ca', + permanent: true, + }, + { + source: + '/docs/k8s/installation/vault/data-integration/snapshot-agent-config', + destination: + '/docs/k8s/deployment-configurations/vault/data-integration/snapshot-agent-config', + permanent: true, + }, + { + source: '/docs/k8s/installation/vault/data-integration/webhook-certs', + destination: + '/docs/k8s/deployment-configurations/vault/data-integration/webhook-certs', + permanent: true, + }, + { + source: '/docs/k8s/installation/vault/wan-federation', + destination: '/docs/k8s/deployment-configurations/vault/wan-federation', + permanent: true, + }, ] From 25d272a67ab560d495575defaa311fbde9d7ab81 Mon Sep 17 00:00:00 2001 From: Kyle Schochenmaier Date: Thu, 1 Sep 2022 19:21:27 -0500 Subject: [PATCH 86/93] update helm docs for release 0.48.0 (#14459) --- website/content/docs/k8s/helm.mdx | 52 +++++++++++++++++++++++++++---- 1 file changed, 46 insertions(+), 6 deletions(-) diff --git a/website/content/docs/k8s/helm.mdx b/website/content/docs/k8s/helm.mdx index be0c340800..c39bfaac27 100644 --- a/website/content/docs/k8s/helm.mdx +++ b/website/content/docs/k8s/helm.mdx @@ -270,10 +270,10 @@ Use these links to navigate to a particular top-level stanza. - `authMethodPath` ((#v-global-secretsbackend-vault-connectca-authmethodpath)) (`string: kubernetes`) - The mount path of the Kubernetes auth method in Vault. - `rootPKIPath` ((#v-global-secretsbackend-vault-connectca-rootpkipath)) (`string: ""`) - The path to a PKI secrets engine for the root certificate. - For more details, [Vault Connect CA configuration](https://www.consul.io/docs/connect/ca/vault#rootpkipath). + For more details, please refer to [Vault Connect CA configuration](https://www.consul.io/docs/connect/ca/vault#rootpkipath). - `intermediatePKIPath` ((#v-global-secretsbackend-vault-connectca-intermediatepkipath)) (`string: ""`) - The path to a PKI secrets engine for the generated intermediate certificate. - For more details, [Vault Connect CA configuration](https://www.consul.io/docs/connect/ca/vault#intermediatepkipath). + For more details, please refer to [Vault Connect CA configuration](https://www.consul.io/docs/connect/ca/vault#intermediatepkipath). - `additionalConfig` ((#v-global-secretsbackend-vault-connectca-additionalconfig)) (`string: {}`) - Additional Connect CA configuration in JSON format. Please refer to [Vault Connect CA configuration](https://www.consul.io/docs/connect/ca/vault#configuration) @@ -286,8 +286,8 @@ Use these links to navigate to a particular top-level stanza. { "connect": [{ "ca_config": [{ - "leaf_cert_ttl": "36h", - "namespace": "my-vault-ns" + "namespace": "my-vault-ns", + "leaf_cert_ttl": "36h" }] }] } @@ -505,8 +505,7 @@ Use these links to navigate to a particular top-level stanza. `-federation` (if setting `global.name`), otherwise `-consul-federation`. - - `primaryDatacenter` ((#v-global-federation-primarydatacenter)) (`string: null`) - The name of the primary datacenter. This should only be set for datacenters - that are not the primary datacenter. + - `primaryDatacenter` ((#v-global-federation-primarydatacenter)) (`string: null`) - The name of the primary datacenter. - `primaryGateways` ((#v-global-federation-primarygateways)) (`array: []`) - A list of addresses of the primary mesh gateways in the form `:`. (e.g. ["1.1.1.1:443", "2.3.4.5:443"] @@ -1577,6 +1576,47 @@ Use these links to navigate to a particular top-level stanza. --set 'connectInject.disruptionBudget.maxUnavailable=0'` flag to the helm chart installation command because of a limitation in the Helm templating language. + - `cni` ((#v-connectinject-cni)) - Configures consul-cni plugin for Consul Service mesh services + + - `enabled` ((#v-connectinject-cni-enabled)) (`boolean: false`) - If true, then all traffic redirection setup will use the consul-cni plugin. + Requires connectInject.enabled to also be true. + + - `logLevel` ((#v-connectinject-cni-loglevel)) (`string: null`) - Log level for the installer and plugin. Overrides global.logLevel + + - `cniBinDir` ((#v-connectinject-cni-cnibindir)) (`string: /opt/cni/bin`) - Location on the kubernetes node where the CNI plugin is installed. Shoud be the absolute path and start with a '/' + Example on GKE: + + ```yaml + cniBinDir: "/home/kubernetes/bin" + ``` + + - `cniNetDir` ((#v-connectinject-cni-cninetdir)) (`string: /etc/cni/net.d`) - Location on the kubernetes node of all CNI configuration. Should be the absolute path and start with a '/' + + - `resources` ((#v-connectinject-cni-resources)) (`map`) - The resource settings for CNI installer daemonset. + + - `resourceQuota` ((#v-connectinject-cni-resourcequota)) - Resource quotas for running the daemonset as system critical pods + + - `pods` ((#v-connectinject-cni-resourcequota-pods)) (`integer: 5000`) + + - `securityContext` ((#v-connectinject-cni-securitycontext)) (`map`) - The security context for the CNI installer daemonset. This should be a YAML map corresponding to a + Kubernetes [SecurityContext](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) object. + By default, servers will run as root, with user ID `0` and group ID `0`. + Note: if running on OpenShift, this setting is ignored because the user and group are set automatically + by the OpenShift platform. + + - `updateStrategy` ((#v-connectinject-cni-updatestrategy)) (`string: null`) - updateStrategy for the CNI installer DaemonSet. + See https://kubernetes.io/docs/tasks/manage-daemon/update-daemon-set/#daemonset-update-strategy. + This should be a multi-line string mapping directly to the updateStrategy + + Example: + + ```yaml + updateStrategy: | + rollingUpdate: + maxUnavailable: 5 + type: RollingUpdate + ``` + - `metrics` ((#v-connectinject-metrics)) - Configures metrics for Consul Connect services. All values are overridable via annotations on a per-pod basis. From ecc43db7746a32649eb063f05332788428eeba22 Mon Sep 17 00:00:00 2001 From: "Chris S. Kim" Date: Fri, 2 Sep 2022 11:57:28 -0400 Subject: [PATCH 87/93] Fix early return in prototest.AssertElementsMatch (#14467) --- proto/prototest/testing.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/proto/prototest/testing.go b/proto/prototest/testing.go index 275d8502b3..bf25fb0a10 100644 --- a/proto/prototest/testing.go +++ b/proto/prototest/testing.go @@ -57,7 +57,7 @@ func AssertElementsMatch[V any]( } } - if len(outX) == len(outY) && len(outX) == len(listX) { + if len(outX) == len(outY) && len(listX) == len(listY) { return // matches } From 08f7e52d824a05dc73fd9dd14f1eae87ec06b9c2 Mon Sep 17 00:00:00 2001 From: DanStough Date: Wed, 31 Aug 2022 17:15:32 -0400 Subject: [PATCH 88/93] fix(api): OSS<->ENT exported service incompatibility --- api/agent_test.go | 16 +++++----- api/catalog_test.go | 18 +++++------ api/config_entry_discoverychain_test.go | 40 ++++++++++++------------- api/config_entry_exports.go | 2 +- api/config_entry_exports_test.go | 6 ++-- api/config_entry_test.go | 4 +-- api/coordinate_test.go | 2 +- api/health_test.go | 4 +-- api/{oss.go => oss_test.go} | 4 +-- api/txn_test.go | 30 +++++++++---------- 10 files changed, 63 insertions(+), 63 deletions(-) rename api/{oss.go => oss_test.go} (79%) diff --git a/api/agent_test.go b/api/agent_test.go index d67aba7b8a..0c1660b1e9 100644 --- a/api/agent_test.go +++ b/api/agent_test.go @@ -363,7 +363,7 @@ func TestAPI_AgentServicesWithFilterOpts(t *testing.T) { } require.NoError(t, agent.ServiceRegister(reg)) - opts := &QueryOptions{Namespace: splitDefaultNamespace} + opts := &QueryOptions{Namespace: defaultNamespace} services, err := agent.ServicesWithFilterOpts("foo in Tags", opts) require.NoError(t, err) require.Len(t, services, 1) @@ -791,8 +791,8 @@ func TestAPI_AgentService(t *testing.T) { Warning: 1, }, Meta: map[string]string{}, - Namespace: splitDefaultNamespace, - Partition: splitDefaultPartition, + Namespace: defaultNamespace, + Partition: defaultPartition, Datacenter: "dc1", } require.Equal(t, expect, got) @@ -932,7 +932,7 @@ func TestAPI_AgentUpdateTTLOpts(t *testing.T) { } } - opts := &QueryOptions{Namespace: splitDefaultNamespace} + opts := &QueryOptions{Namespace: defaultNamespace} if err := agent.UpdateTTLOpts("service:foo", "foo", HealthWarning, opts); err != nil { t.Fatalf("err: %v", err) @@ -1007,7 +1007,7 @@ func TestAPI_AgentChecksWithFilterOpts(t *testing.T) { reg.TTL = "15s" require.NoError(t, agent.CheckRegister(reg)) - opts := &QueryOptions{Namespace: splitDefaultNamespace} + opts := &QueryOptions{Namespace: defaultNamespace} checks, err := agent.ChecksWithFilterOpts("Name == foo", opts) require.NoError(t, err) require.Len(t, checks, 1) @@ -1382,7 +1382,7 @@ func TestAPI_ServiceMaintenanceOpts(t *testing.T) { } // Specify namespace in query option - opts := &QueryOptions{Namespace: splitDefaultNamespace} + opts := &QueryOptions{Namespace: defaultNamespace} // Enable maintenance mode if err := agent.EnableServiceMaintenanceOpts("redis", "broken", opts); err != nil { @@ -1701,7 +1701,7 @@ func TestAPI_AgentHealthServiceOpts(t *testing.T) { requireServiceHealthID := func(t *testing.T, serviceID, expected string, shouldExist bool) { msg := fmt.Sprintf("service id:%s, shouldExist:%v, expectedStatus:%s : bad %%s", serviceID, shouldExist, expected) - opts := &QueryOptions{Namespace: splitDefaultNamespace} + opts := &QueryOptions{Namespace: defaultNamespace} state, out, err := agent.AgentHealthServiceByIDOpts(serviceID, opts) require.Nil(t, err, msg, "err") require.Equal(t, expected, state, msg, "state") @@ -1715,7 +1715,7 @@ func TestAPI_AgentHealthServiceOpts(t *testing.T) { requireServiceHealthName := func(t *testing.T, serviceName, expected string, shouldExist bool) { msg := fmt.Sprintf("service name:%s, shouldExist:%v, expectedStatus:%s : bad %%s", serviceName, shouldExist, expected) - opts := &QueryOptions{Namespace: splitDefaultNamespace} + opts := &QueryOptions{Namespace: defaultNamespace} state, outs, err := agent.AgentHealthServiceByNameOpts(serviceName, opts) require.Nil(t, err, msg, "err") require.Equal(t, expected, state, msg, "state") diff --git a/api/catalog_test.go b/api/catalog_test.go index 2c926d1999..b0071e87f4 100644 --- a/api/catalog_test.go +++ b/api/catalog_test.go @@ -51,7 +51,7 @@ func TestAPI_CatalogNodes(t *testing.T) { want := &Node{ ID: s.Config.NodeID, Node: s.Config.NodeName, - Partition: splitDefaultPartition, + Partition: defaultPartition, Address: "127.0.0.1", Datacenter: "dc1", TaggedAddresses: map[string]string{ @@ -1144,8 +1144,8 @@ func TestAPI_CatalogGatewayServices_Terminating(t *testing.T) { expect := []*GatewayService{ { - Service: CompoundServiceName{Name: "api", Namespace: splitDefaultNamespace, Partition: splitDefaultPartition}, - Gateway: CompoundServiceName{Name: "terminating", Namespace: splitDefaultNamespace, Partition: splitDefaultPartition}, + Service: CompoundServiceName{Name: "api", Namespace: defaultNamespace, Partition: defaultPartition}, + Gateway: CompoundServiceName{Name: "terminating", Namespace: defaultNamespace, Partition: defaultPartition}, GatewayKind: ServiceKindTerminatingGateway, CAFile: "api/ca.crt", CertFile: "api/client.crt", @@ -1153,8 +1153,8 @@ func TestAPI_CatalogGatewayServices_Terminating(t *testing.T) { SNI: "my-domain", }, { - Service: CompoundServiceName{Name: "redis", Namespace: splitDefaultNamespace, Partition: splitDefaultPartition}, - Gateway: CompoundServiceName{Name: "terminating", Namespace: splitDefaultNamespace, Partition: splitDefaultPartition}, + Service: CompoundServiceName{Name: "redis", Namespace: defaultNamespace, Partition: defaultPartition}, + Gateway: CompoundServiceName{Name: "terminating", Namespace: defaultNamespace, Partition: defaultPartition}, GatewayKind: ServiceKindTerminatingGateway, CAFile: "ca.crt", CertFile: "client.crt", @@ -1212,15 +1212,15 @@ func TestAPI_CatalogGatewayServices_Ingress(t *testing.T) { expect := []*GatewayService{ { - Service: CompoundServiceName{Name: "api", Namespace: splitDefaultNamespace, Partition: splitDefaultPartition}, - Gateway: CompoundServiceName{Name: "ingress", Namespace: splitDefaultNamespace, Partition: splitDefaultPartition}, + Service: CompoundServiceName{Name: "api", Namespace: defaultNamespace, Partition: defaultPartition}, + Gateway: CompoundServiceName{Name: "ingress", Namespace: defaultNamespace, Partition: defaultPartition}, GatewayKind: ServiceKindIngressGateway, Protocol: "tcp", Port: 8888, }, { - Service: CompoundServiceName{Name: "redis", Namespace: splitDefaultNamespace, Partition: splitDefaultPartition}, - Gateway: CompoundServiceName{Name: "ingress", Namespace: splitDefaultNamespace, Partition: splitDefaultPartition}, + Service: CompoundServiceName{Name: "redis", Namespace: defaultNamespace, Partition: defaultPartition}, + Gateway: CompoundServiceName{Name: "ingress", Namespace: defaultNamespace, Partition: defaultPartition}, GatewayKind: ServiceKindIngressGateway, Protocol: "tcp", Port: 9999, diff --git a/api/config_entry_discoverychain_test.go b/api/config_entry_discoverychain_test.go index c990fa0c68..8facb72e13 100644 --- a/api/config_entry_discoverychain_test.go +++ b/api/config_entry_discoverychain_test.go @@ -139,8 +139,8 @@ func TestAPI_ConfigEntry_DiscoveryChain(t *testing.T) { entry: &ServiceResolverConfigEntry{ Kind: ServiceResolver, Name: "test-failover", - Partition: splitDefaultPartition, - Namespace: splitDefaultNamespace, + Partition: defaultPartition, + Namespace: defaultNamespace, DefaultSubset: "v1", Subsets: map[string]ServiceResolverSubset{ "v1": { @@ -159,7 +159,7 @@ func TestAPI_ConfigEntry_DiscoveryChain(t *testing.T) { }, "v1": { Service: "alternate", - Namespace: splitDefaultNamespace, + Namespace: defaultNamespace, }, "v3": { Targets: []ServiceResolverFailoverTarget{ @@ -182,12 +182,12 @@ func TestAPI_ConfigEntry_DiscoveryChain(t *testing.T) { entry: &ServiceResolverConfigEntry{ Kind: ServiceResolver, Name: "test-redirect", - Partition: splitDefaultPartition, - Namespace: splitDefaultNamespace, + Partition: defaultPartition, + Namespace: defaultNamespace, Redirect: &ServiceResolverRedirect{ Service: "test-failover", ServiceSubset: "v2", - Namespace: splitDefaultNamespace, + Namespace: defaultNamespace, Datacenter: "d", }, }, @@ -198,8 +198,8 @@ func TestAPI_ConfigEntry_DiscoveryChain(t *testing.T) { entry: &ServiceResolverConfigEntry{ Kind: ServiceResolver, Name: "test-redirect", - Partition: splitDefaultPartition, - Namespace: splitDefaultNamespace, + Partition: defaultPartition, + Namespace: defaultNamespace, Redirect: &ServiceResolverRedirect{ Service: "test-failover", Peer: "cluster-01", @@ -212,14 +212,14 @@ func TestAPI_ConfigEntry_DiscoveryChain(t *testing.T) { entry: &ServiceSplitterConfigEntry{ Kind: ServiceSplitter, Name: "test-split", - Partition: splitDefaultPartition, - Namespace: splitDefaultNamespace, + Partition: defaultPartition, + Namespace: defaultNamespace, Splits: []ServiceSplit{ { Weight: 90, Service: "test-failover", ServiceSubset: "v1", - Namespace: splitDefaultNamespace, + Namespace: defaultNamespace, RequestHeaders: &HTTPHeaderModifiers{ Set: map[string]string{ "x-foo": "bar", @@ -232,7 +232,7 @@ func TestAPI_ConfigEntry_DiscoveryChain(t *testing.T) { { Weight: 10, Service: "test-redirect", - Namespace: splitDefaultNamespace, + Namespace: defaultNamespace, }, }, Meta: map[string]string{ @@ -247,8 +247,8 @@ func TestAPI_ConfigEntry_DiscoveryChain(t *testing.T) { entry: &ServiceRouterConfigEntry{ Kind: ServiceRouter, Name: "test-route", - Partition: splitDefaultPartition, - Namespace: splitDefaultNamespace, + Partition: defaultPartition, + Namespace: defaultNamespace, Routes: []ServiceRoute{ { Match: &ServiceRouteMatch{ @@ -265,8 +265,8 @@ func TestAPI_ConfigEntry_DiscoveryChain(t *testing.T) { Destination: &ServiceRouteDestination{ Service: "test-failover", ServiceSubset: "v2", - Namespace: splitDefaultNamespace, - Partition: splitDefaultPartition, + Namespace: defaultNamespace, + Partition: defaultPartition, PrefixRewrite: "/", RequestTimeout: 5 * time.Second, NumRetries: 5, @@ -358,8 +358,8 @@ func TestAPI_ConfigEntry_ServiceResolver_LoadBalancer(t *testing.T) { entry: &ServiceResolverConfigEntry{ Kind: ServiceResolver, Name: "test-least-req", - Partition: splitDefaultPartition, - Namespace: splitDefaultNamespace, + Partition: defaultPartition, + Namespace: defaultNamespace, LoadBalancer: &LoadBalancer{ Policy: "least_request", LeastRequestConfig: &LeastRequestConfig{ChoiceCount: 10}, @@ -372,8 +372,8 @@ func TestAPI_ConfigEntry_ServiceResolver_LoadBalancer(t *testing.T) { entry: &ServiceResolverConfigEntry{ Kind: ServiceResolver, Name: "test-ring-hash", - Namespace: splitDefaultNamespace, - Partition: splitDefaultPartition, + Namespace: defaultNamespace, + Partition: defaultPartition, LoadBalancer: &LoadBalancer{ Policy: "ring_hash", RingHashConfig: &RingHashConfig{ diff --git a/api/config_entry_exports.go b/api/config_entry_exports.go index e162b5fa60..0827e5816b 100644 --- a/api/config_entry_exports.go +++ b/api/config_entry_exports.go @@ -57,7 +57,7 @@ type ServiceConsumer struct { func (e *ExportedServicesConfigEntry) GetKind() string { return ExportedServices } func (e *ExportedServicesConfigEntry) GetName() string { return e.Name } func (e *ExportedServicesConfigEntry) GetPartition() string { return e.Name } -func (e *ExportedServicesConfigEntry) GetNamespace() string { return splitDefaultNamespace } +func (e *ExportedServicesConfigEntry) GetNamespace() string { return "" } func (e *ExportedServicesConfigEntry) GetMeta() map[string]string { return e.Meta } func (e *ExportedServicesConfigEntry) GetCreateIndex() uint64 { return e.CreateIndex } func (e *ExportedServicesConfigEntry) GetModifyIndex() uint64 { return e.ModifyIndex } diff --git a/api/config_entry_exports_test.go b/api/config_entry_exports_test.go index 8d56be0abb..4a6f3c7a25 100644 --- a/api/config_entry_exports_test.go +++ b/api/config_entry_exports_test.go @@ -17,7 +17,7 @@ func TestAPI_ConfigEntries_ExportedServices(t *testing.T) { testutil.RunStep(t, "set and get", func(t *testing.T) { exports := &ExportedServicesConfigEntry{ Name: PartitionDefaultName, - Partition: splitDefaultPartition, + Partition: defaultPartition, Meta: map[string]string{ "gir": "zim", }, @@ -48,7 +48,7 @@ func TestAPI_ConfigEntries_ExportedServices(t *testing.T) { Services: []ExportedService{ { Name: "db", - Namespace: splitDefaultNamespace, + Namespace: defaultNamespace, Consumers: []ServiceConsumer{ { PeerName: "alpha", @@ -60,7 +60,7 @@ func TestAPI_ConfigEntries_ExportedServices(t *testing.T) { "foo": "bar", "gir": "zim", }, - Partition: splitDefaultPartition, + Partition: defaultPartition, } _, wm, err := entries.Set(updated, nil) diff --git a/api/config_entry_test.go b/api/config_entry_test.go index 63aba11b8a..a897cdeb0c 100644 --- a/api/config_entry_test.go +++ b/api/config_entry_test.go @@ -215,8 +215,8 @@ func TestAPI_ConfigEntries(t *testing.T) { "foo": "bar", "gir": "zim", }, - Partition: splitDefaultPartition, - Namespace: splitDefaultNamespace, + Partition: defaultPartition, + Namespace: defaultNamespace, } ce := c.ConfigEntries() diff --git a/api/coordinate_test.go b/api/coordinate_test.go index 984167e177..071b1f99e4 100644 --- a/api/coordinate_test.go +++ b/api/coordinate_test.go @@ -87,7 +87,7 @@ func TestAPI_CoordinateUpdate(t *testing.T) { newCoord.Height = 0.5 entry := &CoordinateEntry{ Node: node, - Partition: splitDefaultPartition, + Partition: defaultPartition, Coord: newCoord, } _, err = coord.Update(entry, nil) diff --git a/api/health_test.go b/api/health_test.go index 7fc7a3f127..b69e9275fd 100644 --- a/api/health_test.go +++ b/api/health_test.go @@ -223,8 +223,8 @@ func TestAPI_HealthChecks(t *testing.T) { ServiceName: "foo", ServiceTags: []string{"bar"}, Type: "ttl", - Partition: splitDefaultPartition, - Namespace: splitDefaultNamespace, + Partition: defaultPartition, + Namespace: defaultNamespace, }, } diff --git a/api/oss.go b/api/oss_test.go similarity index 79% rename from api/oss.go rename to api/oss_test.go index 93d639e692..e4e266a389 100644 --- a/api/oss.go +++ b/api/oss_test.go @@ -6,5 +6,5 @@ package api // The following defaults return "default" in enterprise and "" in OSS. // This constant is useful when a default value is needed for an // operation that will reject non-empty values in OSS. -const splitDefaultNamespace = "" -const splitDefaultPartition = "" +const defaultNamespace = "" +const defaultPartition = "" diff --git a/api/txn_test.go b/api/txn_test.go index bf69b7bc8c..81348a8c27 100644 --- a/api/txn_test.go +++ b/api/txn_test.go @@ -187,7 +187,7 @@ func TestAPI_ClientTxn(t *testing.T) { CreateIndex: ret.Results[0].KV.CreateIndex, ModifyIndex: ret.Results[0].KV.ModifyIndex, Namespace: ret.Results[0].KV.Namespace, - Partition: splitDefaultPartition, + Partition: defaultPartition, }, }, &TxnResult{ @@ -199,14 +199,14 @@ func TestAPI_ClientTxn(t *testing.T) { CreateIndex: ret.Results[1].KV.CreateIndex, ModifyIndex: ret.Results[1].KV.ModifyIndex, Namespace: ret.Results[0].KV.Namespace, - Partition: splitDefaultPartition, + Partition: defaultPartition, }, }, &TxnResult{ Node: &Node{ ID: nodeID, Node: "foo", - Partition: splitDefaultPartition, + Partition: defaultPartition, Address: "2.2.2.2", Datacenter: "dc1", CreateIndex: ret.Results[2].Node.CreateIndex, @@ -218,8 +218,8 @@ func TestAPI_ClientTxn(t *testing.T) { ID: "foo1", CreateIndex: ret.Results[3].Service.CreateIndex, ModifyIndex: ret.Results[3].Service.CreateIndex, - Partition: splitDefaultPartition, - Namespace: splitDefaultNamespace, + Partition: defaultPartition, + Namespace: defaultNamespace, }, }, &TxnResult{ @@ -237,8 +237,8 @@ func TestAPI_ClientTxn(t *testing.T) { DeregisterCriticalServiceAfterDuration: 20 * time.Second, }, Type: "tcp", - Partition: splitDefaultPartition, - Namespace: splitDefaultNamespace, + Partition: defaultPartition, + Namespace: defaultNamespace, CreateIndex: ret.Results[4].Check.CreateIndex, ModifyIndex: ret.Results[4].Check.CreateIndex, }, @@ -258,8 +258,8 @@ func TestAPI_ClientTxn(t *testing.T) { DeregisterCriticalServiceAfterDuration: 160 * time.Second, }, Type: "tcp", - Partition: splitDefaultPartition, - Namespace: splitDefaultNamespace, + Partition: defaultPartition, + Namespace: defaultNamespace, CreateIndex: ret.Results[4].Check.CreateIndex, ModifyIndex: ret.Results[4].Check.CreateIndex, }, @@ -279,8 +279,8 @@ func TestAPI_ClientTxn(t *testing.T) { DeregisterCriticalServiceAfterDuration: 20 * time.Second, }, Type: "udp", - Partition: splitDefaultPartition, - Namespace: splitDefaultNamespace, + Partition: defaultPartition, + Namespace: defaultNamespace, CreateIndex: ret.Results[4].Check.CreateIndex, ModifyIndex: ret.Results[4].Check.CreateIndex, }, @@ -300,8 +300,8 @@ func TestAPI_ClientTxn(t *testing.T) { DeregisterCriticalServiceAfterDuration: 20 * time.Second, }, Type: "udp", - Partition: splitDefaultPartition, - Namespace: splitDefaultNamespace, + Partition: defaultPartition, + Namespace: defaultNamespace, CreateIndex: ret.Results[4].Check.CreateIndex, ModifyIndex: ret.Results[4].Check.CreateIndex, }, @@ -342,14 +342,14 @@ func TestAPI_ClientTxn(t *testing.T) { CreateIndex: ret.Results[0].KV.CreateIndex, ModifyIndex: ret.Results[0].KV.ModifyIndex, Namespace: ret.Results[0].KV.Namespace, - Partition: splitDefaultPartition, + Partition: defaultPartition, }, }, &TxnResult{ Node: &Node{ ID: s.Config.NodeID, Node: s.Config.NodeName, - Partition: splitDefaultPartition, + Partition: defaultPartition, Address: "127.0.0.1", Datacenter: "dc1", TaggedAddresses: map[string]string{ From 8f5ddec17c8258c84cf5a99a2774bddff2a4db05 Mon Sep 17 00:00:00 2001 From: alex <8968914+acpana@users.noreply.github.com> Date: Fri, 2 Sep 2022 09:56:40 -0700 Subject: [PATCH 89/93] lint net/rpc usage (#12816) Signed-off-by: acpana <8968914+acpana@users.noreply.github.com> Co-authored-by: R.B. Boyer --- .golangci.yml | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/.golangci.yml b/.golangci.yml index 5dd9235837..d71c93d163 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -8,6 +8,8 @@ linters: - ineffassign - unparam - forbidigo + - gomodguard + - depguard issues: # Disable the default exclude list so that all excludes are explicitly @@ -75,6 +77,30 @@ linters-settings: # Exclude godoc examples from forbidigo checks. # Default: true exclude_godoc_examples: false + gomodguard: + blocked: + # List of blocked modules. + modules: + # Blocked module. + - github.com/hashicorp/net-rpc-msgpackrpc: + recommendations: + - github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc + - github.com/hashicorp/go-msgpack: + recommendations: + - github.com/hashicorp/consul-net-rpc/go-msgpack + + depguard: + list-type: denylist + include-go-root: true + # A list of packages for the list type specified. + # Default: [] + packages: + - net/rpc + # A list of packages for the list type specified. + # Specify an error message to output when a denied package is used. + # Default: [] + packages-with-error-message: + - net/rpc: 'only use forked copy in github.com/hashicorp/consul-net-rpc/net/rpc' run: timeout: 10m From 0f7d4efac3c69bfc172f629f211c4cca619f6a3d Mon Sep 17 00:00:00 2001 From: cskh Date: Fri, 2 Sep 2022 14:28:05 -0400 Subject: [PATCH 90/93] fix(txn api): missing proxy config in registering proxy service (#14471) * fix(txn api): missing proxy config in registering proxy service --- agent/txn_endpoint.go | 37 ++++++++++ agent/txn_endpoint_test.go | 136 +++++++++++++++++++++++++++++++++---- api/catalog.go | 1 + api/health.go | 5 +- 4 files changed, 165 insertions(+), 14 deletions(-) diff --git a/agent/txn_endpoint.go b/agent/txn_endpoint.go index 4e898bfce8..f528bbda4d 100644 --- a/agent/txn_endpoint.go +++ b/agent/txn_endpoint.go @@ -185,6 +185,7 @@ func (s *HTTPHandlers) convertOps(resp http.ResponseWriter, req *http.Request) ( Address: node.Address, Datacenter: node.Datacenter, TaggedAddresses: node.TaggedAddresses, + PeerName: node.PeerName, Meta: node.Meta, RaftIndex: structs.RaftIndex{ ModifyIndex: node.ModifyIndex, @@ -207,6 +208,7 @@ func (s *HTTPHandlers) convertOps(resp http.ResponseWriter, req *http.Request) ( Service: structs.NodeService{ ID: svc.ID, Service: svc.Service, + Kind: structs.ServiceKind(svc.Kind), Tags: svc.Tags, Address: svc.Address, Meta: svc.Meta, @@ -226,6 +228,39 @@ func (s *HTTPHandlers) convertOps(resp http.ResponseWriter, req *http.Request) ( }, }, } + + if svc.Proxy != nil { + out.Service.Service.Proxy = structs.ConnectProxyConfig{} + t := &out.Service.Service.Proxy + if svc.Proxy.DestinationServiceName != "" { + t.DestinationServiceName = svc.Proxy.DestinationServiceName + } + if svc.Proxy.DestinationServiceID != "" { + t.DestinationServiceID = svc.Proxy.DestinationServiceID + } + if svc.Proxy.LocalServiceAddress != "" { + t.LocalServiceAddress = svc.Proxy.LocalServiceAddress + } + if svc.Proxy.LocalServicePort != 0 { + t.LocalServicePort = svc.Proxy.LocalServicePort + } + if svc.Proxy.LocalServiceSocketPath != "" { + t.LocalServiceSocketPath = svc.Proxy.LocalServiceSocketPath + } + if svc.Proxy.MeshGateway.Mode != "" { + t.MeshGateway.Mode = structs.MeshGatewayMode(svc.Proxy.MeshGateway.Mode) + } + + if svc.Proxy.TransparentProxy != nil { + if svc.Proxy.TransparentProxy.DialedDirectly { + t.TransparentProxy.DialedDirectly = svc.Proxy.TransparentProxy.DialedDirectly + } + + if svc.Proxy.TransparentProxy.OutboundListenerPort != 0 { + t.TransparentProxy.OutboundListenerPort = svc.Proxy.TransparentProxy.OutboundListenerPort + } + } + } opsRPC = append(opsRPC, out) case in.Check != nil: @@ -265,6 +300,8 @@ func (s *HTTPHandlers) convertOps(resp http.ResponseWriter, req *http.Request) ( ServiceID: check.ServiceID, ServiceName: check.ServiceName, ServiceTags: check.ServiceTags, + PeerName: check.PeerName, + ExposedPort: check.ExposedPort, Definition: structs.HealthCheckDefinition{ HTTP: check.Definition.HTTP, TLSServerName: check.Definition.TLSServerName, diff --git a/agent/txn_endpoint_test.go b/agent/txn_endpoint_test.go index 4b529d5dee..f6f47b8fab 100644 --- a/agent/txn_endpoint_test.go +++ b/agent/txn_endpoint_test.go @@ -585,6 +585,7 @@ func TestTxnEndpoint_UpdateCheck(t *testing.T) { "Output": "success", "ServiceID": "", "ServiceName": "", + "ExposedPort": 5678, "Definition": { "IntervalDuration": "15s", "TimeoutDuration": "15s", @@ -600,12 +601,8 @@ func TestTxnEndpoint_UpdateCheck(t *testing.T) { req, _ := http.NewRequest("PUT", "/v1/txn", buf) resp := httptest.NewRecorder() obj, err := a.srv.Txn(resp, req) - if err != nil { - t.Fatalf("err: %v", err) - } - if resp.Code != 200 { - t.Fatalf("expected 200, got %d", resp.Code) - } + require.NoError(t, err) + require.Equal(t, 200, resp.Code, resp.Body) txnResp, ok := obj.(structs.TxnResponse) if !ok { @@ -662,12 +659,13 @@ func TestTxnEndpoint_UpdateCheck(t *testing.T) { }, &structs.TxnResult{ Check: &structs.HealthCheck{ - Node: a.config.NodeName, - CheckID: "nodecheck", - Name: "Node http check", - Status: api.HealthPassing, - Notes: "Http based health check", - Output: "success", + Node: a.config.NodeName, + CheckID: "nodecheck", + Name: "Node http check", + Status: api.HealthPassing, + Notes: "Http based health check", + Output: "success", + ExposedPort: 5678, Definition: structs.HealthCheckDefinition{ Interval: 15 * time.Second, Timeout: 15 * time.Second, @@ -686,3 +684,117 @@ func TestTxnEndpoint_UpdateCheck(t *testing.T) { } assert.Equal(t, expected, txnResp) } + +func TestTxnEndpoint_NodeService(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + a := NewTestAgent(t, "") + defer a.Shutdown() + testrpc.WaitForTestAgent(t, a.RPC, "dc1") + + // Make sure the fields of a check are handled correctly when both creating and + // updating, and test both sets of duration fields to ensure backwards compatibility. + buf := bytes.NewBuffer([]byte(fmt.Sprintf(` +[ + { + "Service": { + "Verb": "set", + "Node": "%s", + "Service": { + "Service": "test", + "Port": 4444 + } + } + }, + { + "Service": { + "Verb": "set", + "Node": "%s", + "Service": { + "Service": "test-sidecar-proxy", + "Port": 20000, + "Kind": "connect-proxy", + "Proxy": { + "DestinationServiceName": "test", + "DestinationServiceID": "test", + "LocalServiceAddress": "127.0.0.1", + "LocalServicePort": 4444, + "upstreams": [ + { + "DestinationName": "fake-backend", + "LocalBindPort": 25001 + } + ] + } + } + } + } +] +`, a.config.NodeName, a.config.NodeName))) + req, _ := http.NewRequest("PUT", "/v1/txn", buf) + resp := httptest.NewRecorder() + obj, err := a.srv.Txn(resp, req) + require.NoError(t, err) + require.Equal(t, 200, resp.Code) + + txnResp, ok := obj.(structs.TxnResponse) + if !ok { + t.Fatalf("bad type: %T", obj) + } + require.Equal(t, 2, len(txnResp.Results)) + + index := txnResp.Results[0].Service.ModifyIndex + expected := structs.TxnResponse{ + Results: structs.TxnResults{ + &structs.TxnResult{ + Service: &structs.NodeService{ + Service: "test", + ID: "test", + Port: 4444, + Weights: &structs.Weights{ + Passing: 1, + Warning: 1, + }, + RaftIndex: structs.RaftIndex{ + CreateIndex: index, + ModifyIndex: index, + }, + EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), + }, + }, + &structs.TxnResult{ + Service: &structs.NodeService{ + Service: "test-sidecar-proxy", + ID: "test-sidecar-proxy", + Port: 20000, + Kind: "connect-proxy", + Weights: &structs.Weights{ + Passing: 1, + Warning: 1, + }, + Proxy: structs.ConnectProxyConfig{ + DestinationServiceName: "test", + DestinationServiceID: "test", + LocalServiceAddress: "127.0.0.1", + LocalServicePort: 4444, + }, + TaggedAddresses: map[string]structs.ServiceAddress{ + "consul-virtual": { + Address: "240.0.0.1", + Port: 20000, + }, + }, + RaftIndex: structs.RaftIndex{ + CreateIndex: index, + ModifyIndex: index, + }, + EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), + }, + }, + }, + } + assert.Equal(t, expected, txnResp) +} diff --git a/api/catalog.go b/api/catalog.go index 80ae325eac..84a2bdbc65 100644 --- a/api/catalog.go +++ b/api/catalog.go @@ -20,6 +20,7 @@ type Node struct { CreateIndex uint64 ModifyIndex uint64 Partition string `json:",omitempty"` + PeerName string `json:",omitempty"` } type ServiceAddress struct { diff --git a/api/health.go b/api/health.go index 2bcb3cb52e..0886bb12ac 100644 --- a/api/health.go +++ b/api/health.go @@ -45,6 +45,8 @@ type HealthCheck struct { Type string Namespace string `json:",omitempty"` Partition string `json:",omitempty"` + ExposedPort int + PeerName string `json:",omitempty"` Definition HealthCheckDefinition @@ -176,8 +178,7 @@ type HealthChecks []*HealthCheck // attached, this function determines the best representative of the status as // as single string using the following heuristic: // -// maintenance > critical > warning > passing -// +// maintenance > critical > warning > passing func (c HealthChecks) AggregatedStatus() string { var passing, warning, critical, maintenance bool for _, check := range c { From 97606d94a30b7880ea3db8951bf8a80bba43781f Mon Sep 17 00:00:00 2001 From: David Yu Date: Fri, 2 Sep 2022 15:34:15 -0700 Subject: [PATCH 91/93] docs: Update single dc multiple k8s clusters doc (#14476) Co-authored-by: Jona Apelbaum --- .../deployment-configurations/single-dc-multi-k8s.mdx | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/website/content/docs/k8s/deployment-configurations/single-dc-multi-k8s.mdx b/website/content/docs/k8s/deployment-configurations/single-dc-multi-k8s.mdx index d27c23fed2..b854ebc3e0 100644 --- a/website/content/docs/k8s/deployment-configurations/single-dc-multi-k8s.mdx +++ b/website/content/docs/k8s/deployment-configurations/single-dc-multi-k8s.mdx @@ -6,6 +6,8 @@ description: Single Consul Datacenter deployed in multiple Kubernetes clusters # Single Consul Datacenter in Multiple Kubernetes Clusters +~> **Note:** For running Consul across multiple Kubernetes, it is generally recommended to utilize [Admin Partitions](/docs/enterprise/admin-partitions) for production environments. This Consul Enterprise feature allows for the ability to accommodate for multiple tenants without concerns of resource collisions when administering a cluster at scale, and for the ability to run Consul on Kubernetes clusters across a non-flat network. + This page describes deploying a single Consul datacenter in multiple Kubernetes clusters, with servers and clients running in one cluster and only clients in the rest of the clusters. This example uses two Kubernetes clusters, but this approach could be extended to using more than two. @@ -19,16 +21,13 @@ to pods or nodes in another. In many hosted Kubernetes environments, this may ha * [Azure AKS CNI](https://docs.microsoft.com/en-us/azure/aks/concepts-network#azure-cni-advanced-networking) * [AWS EKS CNI](https://docs.aws.amazon.com/eks/latest/userguide/pod-networking.html) * [GKE VPC-native clusters](https://cloud.google.com/kubernetes-engine/docs/concepts/alias-ips). - -If a flat network is unavailable across all Kubernetes clusters, follow the instructions for using [Admin Partitions](/docs/enterprise/admin-partitions), which is a Consul Enterprise feature. - +* Either the Helm release name for each Kubernetes cluster must be unique, or `global.name` for each Kubernetes cluster must be unique to prevent collisions of ACL resources with the same prefix. ## Prepare Helm release name ahead of installs The Helm release name must be unique for each Kubernetes cluster. The Helm chart uses the Helm release name as a prefix for the -ACL resources that it creates, such as tokens and auth methods. If the names of the Helm releases -are identical, subsequent Consul on Kubernetes clusters overwrite existing ACL resources and cause the clusters to fail. +ACL resources that it creates, such as tokens and auth methods. If the names of the Helm releases are identical, or if `global.name` for each cluster is identical, subsequent Consul on Kubernetes clusters will overwrite existing ACL resources and cause the clusters to fail. Before proceeding with installation, prepare the Helm release names as environment variables for both the server and client install. From 4f41eaf88fcb657176827e7eb6fbe06431cc3da9 Mon Sep 17 00:00:00 2001 From: John Cowen Date: Mon, 5 Sep 2022 19:17:33 +0100 Subject: [PATCH 92/93] ui: Additionally use message for displaying errors in DataWriter (#14074) --- ui/packages/consul-ui/app/components/data-writer/index.hbs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ui/packages/consul-ui/app/components/data-writer/index.hbs b/ui/packages/consul-ui/app/components/data-writer/index.hbs index 5f0ecdfcdb..36f418459b 100644 --- a/ui/packages/consul-ui/app/components/data-writer/index.hbs +++ b/ui/packages/consul-ui/app/components/data-writer/index.hbs @@ -108,6 +108,8 @@ as |after|}} There was an error saving your {{or label type}}. {{#if (and api.error.status api.error.detail)}}
    {{api.error.status}}: {{api.error.detail}} + {{else if api.error.message}} +
    {{api.error.message}} {{/if}}

    From 9675faeab570f28daa6b284cccb6657211367195 Mon Sep 17 00:00:00 2001 From: John Cowen Date: Tue, 6 Sep 2022 11:13:51 +0100 Subject: [PATCH 93/93] ui: Add support for prefixing the API path (#14342) --- .../consul-ui/app/services/client/http.js | 3 ++- .../consul-ui/app/utils/get-environment.js | 7 ++++++ ui/packages/consul-ui/config/environment.js | 8 ++++++- .../mock-api/prefixed-api/v1/catalog/.config | 7 ++++++ .../prefixed-api/v1/catalog/datacenters | 13 +++++++++++ .../prefixed-api/v1/internal/acl/authorize | 14 ++++++++++++ .../prefixed-api/v1/internal/ui/services | 22 +++++++++++++++++++ .../node-tests/config/environment.js | 8 ++++++- .../tests/acceptance/api-prefix.feature | 7 ++++++ .../acceptance/steps/api-prefix-steps.js | 11 ++++++++++ ui/packages/consul-ui/tests/steps.js | 1 + .../consul-ui/tests/steps/doubles/http.js | 3 +++ 12 files changed, 101 insertions(+), 3 deletions(-) create mode 100644 ui/packages/consul-ui/mock-api/prefixed-api/v1/catalog/.config create mode 100644 ui/packages/consul-ui/mock-api/prefixed-api/v1/catalog/datacenters create mode 100644 ui/packages/consul-ui/mock-api/prefixed-api/v1/internal/acl/authorize create mode 100644 ui/packages/consul-ui/mock-api/prefixed-api/v1/internal/ui/services create mode 100644 ui/packages/consul-ui/tests/acceptance/api-prefix.feature create mode 100644 ui/packages/consul-ui/tests/acceptance/steps/api-prefix-steps.js diff --git a/ui/packages/consul-ui/app/services/client/http.js b/ui/packages/consul-ui/app/services/client/http.js index 9b77365019..708b64d2a5 100644 --- a/ui/packages/consul-ui/app/services/client/http.js +++ b/ui/packages/consul-ui/app/services/client/http.js @@ -203,12 +203,13 @@ export default class HttpService extends Service { // also see adapters/kv content-types in requestForCreate/UpdateRecord // also see https://github.com/hashicorp/consul/issues/3804 params.headers[CONTENT_TYPE] = 'application/json; charset=utf-8'; + params.url = `${this.env.var('CONSUL_API_PREFIX')}${params.url}`; return params; } fetchWithToken(path, params) { return this.settings.findBySlug('token').then(token => { - return fetch(`${path}`, { + return fetch(`${this.env.var('CONSUL_API_PREFIX')}${path}`, { ...params, credentials: 'include', headers: { diff --git a/ui/packages/consul-ui/app/utils/get-environment.js b/ui/packages/consul-ui/app/utils/get-environment.js index fd3757fbff..9fabe8b7ac 100644 --- a/ui/packages/consul-ui/app/utils/get-environment.js +++ b/ui/packages/consul-ui/app/utils/get-environment.js @@ -132,6 +132,12 @@ export default function(config = {}, win = window, doc = document) { return operatorConfig.LocalDatacenter; case 'CONSUL_DATACENTER_PRIMARY': return operatorConfig.PrimaryDatacenter; + case 'CONSUL_API_PREFIX': + // we want API prefix to look like an env var for if we ever change + // operator config to be an API request, we need this variable before we + // make and API request so this specific variable should never be be + // retrived via an API request + return operatorConfig.APIPrefix; case 'CONSUL_UI_CONFIG': dashboards = { service: undefined, @@ -246,6 +252,7 @@ export default function(config = {}, win = window, doc = document) { case 'CONSUL_UI_CONFIG': case 'CONSUL_DATACENTER_LOCAL': case 'CONSUL_DATACENTER_PRIMARY': + case 'CONSUL_API_PREFIX': case 'CONSUL_ACLS_ENABLED': case 'CONSUL_NSPACES_ENABLED': case 'CONSUL_PEERINGS_ENABLED': diff --git a/ui/packages/consul-ui/config/environment.js b/ui/packages/consul-ui/config/environment.js index a85b0093d8..4bd0967996 100644 --- a/ui/packages/consul-ui/config/environment.js +++ b/ui/packages/consul-ui/config/environment.js @@ -86,6 +86,7 @@ module.exports = function(environment, $ = process.env) { PartitionsEnabled: false, LocalDatacenter: env('CONSUL_DATACENTER_LOCAL', 'dc1'), PrimaryDatacenter: env('CONSUL_DATACENTER_PRIMARY', 'dc1'), + APIPrefix: env('CONSUL_API_PREFIX', '') }, // Static variables used in multiple places throughout the UI @@ -111,6 +112,7 @@ module.exports = function(environment, $ = process.env) { PartitionsEnabled: env('CONSUL_PARTITIONS_ENABLED', false), LocalDatacenter: env('CONSUL_DATACENTER_LOCAL', 'dc1'), PrimaryDatacenter: env('CONSUL_DATACENTER_PRIMARY', 'dc1'), + APIPrefix: env('CONSUL_API_PREFIX', '') }, '@hashicorp/ember-cli-api-double': { @@ -118,6 +120,7 @@ module.exports = function(environment, $ = process.env) { enabled: true, endpoints: { '/v1': '/mock-api/v1', + '/prefixed-api': '/mock-api/prefixed-api', }, }, APP: Object.assign({}, ENV.APP, { @@ -162,6 +165,7 @@ module.exports = function(environment, $ = process.env) { PartitionsEnabled: env('CONSUL_PARTITIONS_ENABLED', true), LocalDatacenter: env('CONSUL_DATACENTER_LOCAL', 'dc1'), PrimaryDatacenter: env('CONSUL_DATACENTER_PRIMARY', 'dc1'), + APIPrefix: env('CONSUL_API_PREFIX', '') }, '@hashicorp/ember-cli-api-double': { @@ -176,7 +180,9 @@ module.exports = function(environment, $ = process.env) { ENV = Object.assign({}, ENV, { // in production operatorConfig is populated at consul runtime from // operator configuration - operatorConfig: {}, + operatorConfig: { + APIPrefix: '' + }, }); break; } diff --git a/ui/packages/consul-ui/mock-api/prefixed-api/v1/catalog/.config b/ui/packages/consul-ui/mock-api/prefixed-api/v1/catalog/.config new file mode 100644 index 0000000000..93326539bf --- /dev/null +++ b/ui/packages/consul-ui/mock-api/prefixed-api/v1/catalog/.config @@ -0,0 +1,7 @@ +--- +"*": + GET: + "*": + headers: + response: + X-Consul-Default-Acl-Policy: ${env('CONSUL_ACL_POLICY', fake.helpers.randomize(['allow', 'deny']))} diff --git a/ui/packages/consul-ui/mock-api/prefixed-api/v1/catalog/datacenters b/ui/packages/consul-ui/mock-api/prefixed-api/v1/catalog/datacenters new file mode 100644 index 0000000000..5cfa3ee6b2 --- /dev/null +++ b/ui/packages/consul-ui/mock-api/prefixed-api/v1/catalog/datacenters @@ -0,0 +1,13 @@ +[ + ${ + range(env('CONSUL_DATACENTER_COUNT', 10)).map((item, i) => { + if(i === 0) { + return `"${env('CONSUL_DATACENTER_LOCAL', 'dc1')}"`; + } + return ` + "${fake.address.countryCode().toLowerCase()}_${ i % 2 ? "west" : "east"}-${i}" +`; + } + ) + } +] diff --git a/ui/packages/consul-ui/mock-api/prefixed-api/v1/internal/acl/authorize b/ui/packages/consul-ui/mock-api/prefixed-api/v1/internal/acl/authorize new file mode 100644 index 0000000000..11d84ed6e4 --- /dev/null +++ b/ui/packages/consul-ui/mock-api/prefixed-api/v1/internal/acl/authorize @@ -0,0 +1,14 @@ +[ +${ + http.body.map(item => { + return JSON.stringify( + Object.assign( + item, + { + Allow: !!JSON.parse(env(`CONSUL_RESOURCE_${item.Resource.toUpperCase()}_${item.Access.toUpperCase()}`, 'true')) + } + ) + ); + }) +} +] diff --git a/ui/packages/consul-ui/mock-api/prefixed-api/v1/internal/ui/services b/ui/packages/consul-ui/mock-api/prefixed-api/v1/internal/ui/services new file mode 100644 index 0000000000..ccf51a864a --- /dev/null +++ b/ui/packages/consul-ui/mock-api/prefixed-api/v1/internal/ui/services @@ -0,0 +1,22 @@ +[ + { + "Name": "consul", + "Datacenter": "dc1", + "Tags": null, + "Nodes": [ + "node" + ], + "ExternalSources": null, + "InstanceCount": 1, + "ChecksPassing": 1, + "ChecksWarning": 0, + "ChecksCritical": 0, + "GatewayConfig": {}, + "TransparentProxy": false, + "ConnectNative": false, + "Partition": "default", + "Namespace": "default", + "ConnectedWithProxy": false, + "ConnectedWithGateway": false + } +] diff --git a/ui/packages/consul-ui/node-tests/config/environment.js b/ui/packages/consul-ui/node-tests/config/environment.js index fc64faf944..286fb741db 100644 --- a/ui/packages/consul-ui/node-tests/config/environment.js +++ b/ui/packages/consul-ui/node-tests/config/environment.js @@ -11,7 +11,9 @@ test( { environment: 'production', CONSUL_BINARY_TYPE: 'oss', - operatorConfig: {} + operatorConfig: { + APIPrefix: '', + } }, { environment: 'test', @@ -24,6 +26,7 @@ test( PeeringEnabled: true, LocalDatacenter: 'dc1', PrimaryDatacenter: 'dc1', + APIPrefix: '', } }, { @@ -40,6 +43,7 @@ test( PeeringEnabled: true, LocalDatacenter: 'dc1', PrimaryDatacenter: 'dc1', + APIPrefix: '', } }, { @@ -56,6 +60,7 @@ test( PeeringEnabled: true, LocalDatacenter: 'dc1', PrimaryDatacenter: 'dc1', + APIPrefix: '', } }, { @@ -69,6 +74,7 @@ test( PeeringEnabled: true, LocalDatacenter: 'dc1', PrimaryDatacenter: 'dc1', + APIPrefix: '', } } ].forEach( diff --git a/ui/packages/consul-ui/tests/acceptance/api-prefix.feature b/ui/packages/consul-ui/tests/acceptance/api-prefix.feature new file mode 100644 index 0000000000..bc4a9d87e3 --- /dev/null +++ b/ui/packages/consul-ui/tests/acceptance/api-prefix.feature @@ -0,0 +1,7 @@ +@setupApplicationTest +Feature: api-prefix + Scenario: + Given 1 datacenter model with the value "dc1" + And an API prefix of "/prefixed-api" + When I visit the index page + Then a GET request was made to "/prefixed-api/v1/catalog/datacenters" diff --git a/ui/packages/consul-ui/tests/acceptance/steps/api-prefix-steps.js b/ui/packages/consul-ui/tests/acceptance/steps/api-prefix-steps.js new file mode 100644 index 0000000000..f40e979108 --- /dev/null +++ b/ui/packages/consul-ui/tests/acceptance/steps/api-prefix-steps.js @@ -0,0 +1,11 @@ +import steps from './steps'; + +// step definitions that are shared between features should be moved to the +// tests/acceptance/steps/steps.js file + +export default function(assert) { + return steps(assert) + .then('I should find a file', function() { + assert.ok(true, this.step); + }); +} diff --git a/ui/packages/consul-ui/tests/steps.js b/ui/packages/consul-ui/tests/steps.js index 43b1c0c553..973098dae8 100644 --- a/ui/packages/consul-ui/tests/steps.js +++ b/ui/packages/consul-ui/tests/steps.js @@ -87,6 +87,7 @@ export default function({ api.server.respondWith(url.split('?')[0], data); }; const setCookie = function(key, value) { + document.cookie = `${key}=${value}`; api.server.setCookie(key, value); }; diff --git a/ui/packages/consul-ui/tests/steps/doubles/http.js b/ui/packages/consul-ui/tests/steps/doubles/http.js index b92ec8ae7d..3624cc1aa2 100644 --- a/ui/packages/consul-ui/tests/steps/doubles/http.js +++ b/ui/packages/consul-ui/tests/steps/doubles/http.js @@ -17,5 +17,8 @@ export default function(scenario, respondWith, set, oidc) { }) .given('a network latency of $number', function(number) { set('CONSUL_LATENCY', number); + }) + .given('an API prefix of "$prefix"', function(prefix) { + set('CONSUL_API_PREFIX', prefix); }); }