2014-01-10 23:25:37 +00:00
|
|
|
package agent
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"net/http"
|
2014-04-21 20:11:05 +00:00
|
|
|
"net/http/httptest"
|
2019-04-16 16:00:15 +00:00
|
|
|
"net/url"
|
2014-07-05 16:49:10 +00:00
|
|
|
"reflect"
|
2021-02-08 16:53:18 +00:00
|
|
|
"strconv"
|
2022-01-31 16:17:35 +00:00
|
|
|
"strings"
|
2014-01-10 23:25:37 +00:00
|
|
|
"testing"
|
2020-11-13 20:05:16 +00:00
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/armon/go-metrics"
|
|
|
|
"github.com/hashicorp/serf/coordinate"
|
|
|
|
"github.com/stretchr/testify/assert"
|
|
|
|
"github.com/stretchr/testify/require"
|
2015-07-27 21:41:46 +00:00
|
|
|
|
2017-07-06 10:34:00 +00:00
|
|
|
"github.com/hashicorp/consul/agent/structs"
|
2017-04-19 23:00:11 +00:00
|
|
|
"github.com/hashicorp/consul/api"
|
2022-05-10 20:25:51 +00:00
|
|
|
"github.com/hashicorp/consul/sdk/testutil"
|
2019-03-27 12:54:56 +00:00
|
|
|
"github.com/hashicorp/consul/sdk/testutil/retry"
|
2019-04-16 16:00:15 +00:00
|
|
|
"github.com/hashicorp/consul/testrpc"
|
2022-04-27 15:39:45 +00:00
|
|
|
"github.com/hashicorp/consul/types"
|
2014-01-10 23:25:37 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func TestHealthChecksInState(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2017-06-09 18:36:00 +00:00
|
|
|
t.Run("warning", func(t *testing.T) {
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, "")
|
2017-05-21 18:31:20 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
|
2017-05-09 11:38:05 +00:00
|
|
|
req, _ := http.NewRequest("GET", "/v1/health/state/warning?dc=dc1", nil)
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2015-11-15 05:05:37 +00:00
|
|
|
resp := httptest.NewRecorder()
|
2017-05-21 18:31:20 +00:00
|
|
|
obj, err := a.srv.HealthChecksInState(resp, req)
|
2015-11-15 05:05:37 +00:00
|
|
|
if err != nil {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatal(err)
|
2015-11-15 05:05:37 +00:00
|
|
|
}
|
|
|
|
if err := checkIndex(resp); err != nil {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatal(err)
|
2015-11-15 05:05:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Should be a non-nil empty list
|
|
|
|
nodes := obj.(structs.HealthChecks)
|
|
|
|
if nodes == nil || len(nodes) != 0 {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("bad: %v", obj)
|
2015-11-15 05:05:37 +00:00
|
|
|
}
|
2017-04-29 16:34:02 +00:00
|
|
|
})
|
2015-11-15 05:05:37 +00:00
|
|
|
})
|
|
|
|
|
2017-06-09 18:36:00 +00:00
|
|
|
t.Run("passing", func(t *testing.T) {
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, "")
|
2017-05-21 18:31:20 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
|
2017-05-09 11:38:05 +00:00
|
|
|
req, _ := http.NewRequest("GET", "/v1/health/state/passing?dc=dc1", nil)
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2014-05-21 19:31:22 +00:00
|
|
|
resp := httptest.NewRecorder()
|
2017-05-21 18:31:20 +00:00
|
|
|
obj, err := a.srv.HealthChecksInState(resp, req)
|
2014-05-21 19:31:22 +00:00
|
|
|
if err != nil {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatal(err)
|
2014-05-21 19:31:22 +00:00
|
|
|
}
|
|
|
|
if err := checkIndex(resp); err != nil {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatal(err)
|
2014-05-21 19:31:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Should be 1 health check for the server
|
|
|
|
nodes := obj.(structs.HealthChecks)
|
|
|
|
if len(nodes) != 1 {
|
2017-04-29 16:34:02 +00:00
|
|
|
r.Fatalf("bad: %v", obj)
|
2014-05-21 19:31:22 +00:00
|
|
|
}
|
2017-04-29 16:34:02 +00:00
|
|
|
})
|
2014-05-21 19:31:22 +00:00
|
|
|
})
|
2014-01-10 23:25:37 +00:00
|
|
|
}
|
|
|
|
|
2017-01-14 01:08:43 +00:00
|
|
|
func TestHealthChecksInState_NodeMetaFilter(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, "")
|
2017-05-21 18:31:20 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
args := &structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "bar",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
NodeMeta: map[string]string{"somekey": "somevalue"},
|
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Node: "bar",
|
|
|
|
Name: "node check",
|
|
|
|
Status: api.HealthCritical,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var out struct{}
|
|
|
|
if err := a.RPC("Catalog.Register", args, &out); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
req, _ := http.NewRequest("GET", "/v1/health/state/critical?node-meta=somekey:somevalue", nil)
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.HealthChecksInState(resp, req)
|
|
|
|
if err != nil {
|
|
|
|
r.Fatal(err)
|
2017-01-14 01:08:43 +00:00
|
|
|
}
|
2017-05-21 18:31:20 +00:00
|
|
|
if err := checkIndex(resp); err != nil {
|
|
|
|
r.Fatal(err)
|
2017-01-14 01:08:43 +00:00
|
|
|
}
|
|
|
|
|
2017-05-21 18:31:20 +00:00
|
|
|
// Should be 1 health check for the server
|
|
|
|
nodes := obj.(structs.HealthChecks)
|
|
|
|
if len(nodes) != 1 {
|
|
|
|
r.Fatalf("bad: %v", obj)
|
|
|
|
}
|
2017-01-14 01:08:43 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-04-16 16:00:15 +00:00
|
|
|
func TestHealthChecksInState_Filter(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2019-04-16 16:00:15 +00:00
|
|
|
t.Parallel()
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, "")
|
2019-04-16 16:00:15 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
args := &structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "bar",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
NodeMeta: map[string]string{"somekey": "somevalue"},
|
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Node: "bar",
|
|
|
|
Name: "node check",
|
|
|
|
Status: api.HealthCritical,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var out struct{}
|
|
|
|
require.NoError(t, a.RPC("Catalog.Register", args, &out))
|
|
|
|
|
|
|
|
args = &structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "bar",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
NodeMeta: map[string]string{"somekey": "somevalue"},
|
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Node: "bar",
|
|
|
|
Name: "node check 2",
|
|
|
|
Status: api.HealthCritical,
|
|
|
|
},
|
|
|
|
SkipNodeUpdate: true,
|
|
|
|
}
|
|
|
|
require.NoError(t, a.RPC("Catalog.Register", args, &out))
|
|
|
|
|
|
|
|
req, _ := http.NewRequest("GET", "/v1/health/state/critical?filter="+url.QueryEscape("Name == `node check 2`"), nil)
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.HealthChecksInState(resp, req)
|
|
|
|
require.NoError(r, err)
|
|
|
|
require.NoError(r, checkIndex(resp))
|
|
|
|
|
|
|
|
// Should be 1 health check for the server
|
|
|
|
nodes := obj.(structs.HealthChecks)
|
|
|
|
require.Len(r, nodes, 1)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2015-07-27 21:41:46 +00:00
|
|
|
func TestHealthChecksInState_DistanceSort(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2015-07-27 21:41:46 +00:00
|
|
|
|
|
|
|
args := &structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "bar",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
Check: &structs.HealthCheck{
|
2015-07-29 23:33:25 +00:00
|
|
|
Node: "bar",
|
|
|
|
Name: "node check",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthCritical,
|
2015-07-27 21:41:46 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
var out struct{}
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.Register", args, &out); err != nil {
|
2015-07-27 21:41:46 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
args.Node, args.Check.Node = "foo", "foo"
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.Register", args, &out); err != nil {
|
2015-07-27 21:41:46 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-05-09 11:38:05 +00:00
|
|
|
req, _ := http.NewRequest("GET", "/v1/health/state/critical?dc=dc1&near=foo", nil)
|
2015-07-27 21:41:46 +00:00
|
|
|
resp := httptest.NewRecorder()
|
2017-05-21 07:11:09 +00:00
|
|
|
obj, err := a.srv.HealthChecksInState(resp, req)
|
2015-07-27 21:41:46 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
assertIndex(t, resp)
|
|
|
|
nodes := obj.(structs.HealthChecks)
|
|
|
|
if len(nodes) != 2 {
|
|
|
|
t.Fatalf("bad: %v", nodes)
|
|
|
|
}
|
|
|
|
if nodes[0].Node != "bar" {
|
|
|
|
t.Fatalf("bad: %v", nodes)
|
|
|
|
}
|
|
|
|
if nodes[1].Node != "foo" {
|
|
|
|
t.Fatalf("bad: %v", nodes)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Send an update for the node and wait for it to get applied.
|
|
|
|
arg := structs.CoordinateUpdateRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "foo",
|
|
|
|
Coord: coordinate.NewCoordinate(coordinate.DefaultConfig()),
|
|
|
|
}
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Coordinate.Update", &arg, &out); err != nil {
|
2015-07-27 21:41:46 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2017-05-05 10:14:43 +00:00
|
|
|
// Retry until foo moves to the front of the line.
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
resp = httptest.NewRecorder()
|
2017-05-21 07:11:09 +00:00
|
|
|
obj, err = a.srv.HealthChecksInState(resp, req)
|
2017-05-05 10:14:43 +00:00
|
|
|
if err != nil {
|
|
|
|
r.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
assertIndex(t, resp)
|
|
|
|
nodes = obj.(structs.HealthChecks)
|
|
|
|
if len(nodes) != 2 {
|
|
|
|
r.Fatalf("bad: %v", nodes)
|
|
|
|
}
|
|
|
|
if nodes[0].Node != "foo" {
|
|
|
|
r.Fatalf("bad: %v", nodes)
|
|
|
|
}
|
|
|
|
if nodes[1].Node != "bar" {
|
|
|
|
r.Fatalf("bad: %v", nodes)
|
|
|
|
}
|
|
|
|
})
|
2015-07-27 21:41:46 +00:00
|
|
|
}
|
|
|
|
|
2014-01-10 23:25:37 +00:00
|
|
|
func TestHealthNodeChecks(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2018-09-10 15:58:53 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2014-01-10 23:25:37 +00:00
|
|
|
|
2017-05-09 11:38:05 +00:00
|
|
|
req, _ := http.NewRequest("GET", "/v1/health/node/nope?dc=dc1", nil)
|
2014-04-21 20:11:05 +00:00
|
|
|
resp := httptest.NewRecorder()
|
2017-05-21 07:11:09 +00:00
|
|
|
obj, err := a.srv.HealthNodeChecks(resp, req)
|
2014-01-10 23:25:37 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2014-04-21 20:11:05 +00:00
|
|
|
assertIndex(t, resp)
|
2014-02-05 22:36:13 +00:00
|
|
|
|
2015-11-15 05:05:37 +00:00
|
|
|
// Should be a non-nil empty list
|
2014-01-10 23:25:37 +00:00
|
|
|
nodes := obj.(structs.HealthChecks)
|
2015-11-15 05:05:37 +00:00
|
|
|
if nodes == nil || len(nodes) != 0 {
|
|
|
|
t.Fatalf("bad: %v", obj)
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:11:09 +00:00
|
|
|
req, _ = http.NewRequest("GET", fmt.Sprintf("/v1/health/node/%s?dc=dc1", a.Config.NodeName), nil)
|
2015-11-15 05:05:37 +00:00
|
|
|
resp = httptest.NewRecorder()
|
2017-05-21 07:11:09 +00:00
|
|
|
obj, err = a.srv.HealthNodeChecks(resp, req)
|
2015-11-15 05:05:37 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
assertIndex(t, resp)
|
|
|
|
|
|
|
|
// Should be 1 health check for the server
|
|
|
|
nodes = obj.(structs.HealthChecks)
|
2014-01-10 23:25:37 +00:00
|
|
|
if len(nodes) != 1 {
|
|
|
|
t.Fatalf("bad: %v", obj)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-16 16:00:15 +00:00
|
|
|
func TestHealthNodeChecks_Filtering(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2019-04-16 16:00:15 +00:00
|
|
|
t.Parallel()
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, "")
|
2019-04-16 16:00:15 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
|
|
|
|
|
|
|
// Create a node check
|
|
|
|
args := &structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "test-health-node",
|
|
|
|
Address: "127.0.0.2",
|
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Node: "test-health-node",
|
|
|
|
Name: "check1",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
var out struct{}
|
|
|
|
require.NoError(t, a.RPC("Catalog.Register", args, &out))
|
|
|
|
|
|
|
|
// Create a second check
|
|
|
|
args = &structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "test-health-node",
|
|
|
|
Address: "127.0.0.2",
|
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Node: "test-health-node",
|
|
|
|
Name: "check2",
|
|
|
|
},
|
|
|
|
SkipNodeUpdate: true,
|
|
|
|
}
|
|
|
|
require.NoError(t, a.RPC("Catalog.Register", args, &out))
|
|
|
|
|
|
|
|
req, _ := http.NewRequest("GET", "/v1/health/node/test-health-node?filter="+url.QueryEscape("Name == check2"), nil)
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.HealthNodeChecks(resp, req)
|
|
|
|
require.NoError(t, err)
|
|
|
|
assertIndex(t, resp)
|
|
|
|
|
|
|
|
// Should be 1 health check for the server
|
|
|
|
nodes := obj.(structs.HealthChecks)
|
|
|
|
require.Len(t, nodes, 1)
|
|
|
|
}
|
|
|
|
|
2014-01-10 23:25:37 +00:00
|
|
|
func TestHealthServiceChecks(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2018-09-12 13:49:27 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2014-05-07 21:47:16 +00:00
|
|
|
|
2017-05-09 11:38:05 +00:00
|
|
|
req, _ := http.NewRequest("GET", "/v1/health/checks/consul?dc=dc1", nil)
|
2015-11-15 05:05:37 +00:00
|
|
|
resp := httptest.NewRecorder()
|
2017-05-21 07:11:09 +00:00
|
|
|
obj, err := a.srv.HealthServiceChecks(resp, req)
|
2015-11-15 05:05:37 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
assertIndex(t, resp)
|
|
|
|
|
|
|
|
// Should be a non-nil empty list
|
|
|
|
nodes := obj.(structs.HealthChecks)
|
|
|
|
if nodes == nil || len(nodes) != 0 {
|
|
|
|
t.Fatalf("bad: %v", obj)
|
|
|
|
}
|
|
|
|
|
2014-05-07 21:47:16 +00:00
|
|
|
// Create a service check
|
2014-01-10 23:25:37 +00:00
|
|
|
args := &structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2014-01-10 23:25:37 +00:00
|
|
|
Address: "127.0.0.1",
|
|
|
|
Check: &structs.HealthCheck{
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2014-01-10 23:25:37 +00:00
|
|
|
Name: "consul check",
|
|
|
|
ServiceID: "consul",
|
2019-10-17 18:33:11 +00:00
|
|
|
Type: "grpc",
|
2014-01-10 23:25:37 +00:00
|
|
|
},
|
|
|
|
}
|
2014-05-07 21:12:53 +00:00
|
|
|
|
2014-01-10 23:25:37 +00:00
|
|
|
var out struct{}
|
2017-05-21 07:11:09 +00:00
|
|
|
if err = a.RPC("Catalog.Register", args, &out); err != nil {
|
2014-01-10 23:25:37 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-05-09 11:38:05 +00:00
|
|
|
req, _ = http.NewRequest("GET", "/v1/health/checks/consul?dc=dc1", nil)
|
2015-11-15 05:05:37 +00:00
|
|
|
resp = httptest.NewRecorder()
|
2017-05-21 07:11:09 +00:00
|
|
|
obj, err = a.srv.HealthServiceChecks(resp, req)
|
2014-01-10 23:25:37 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2014-04-21 20:11:05 +00:00
|
|
|
assertIndex(t, resp)
|
2014-02-05 22:36:13 +00:00
|
|
|
|
2014-01-10 23:25:37 +00:00
|
|
|
// Should be 1 health check for consul
|
2015-11-15 05:05:37 +00:00
|
|
|
nodes = obj.(structs.HealthChecks)
|
2014-01-10 23:25:37 +00:00
|
|
|
if len(nodes) != 1 {
|
|
|
|
t.Fatalf("bad: %v", obj)
|
|
|
|
}
|
2019-10-17 18:33:11 +00:00
|
|
|
if nodes[0].Type != "grpc" {
|
|
|
|
t.Fatalf("expected grpc check type, got %s", nodes[0].Type)
|
|
|
|
}
|
2014-01-10 23:25:37 +00:00
|
|
|
}
|
|
|
|
|
2017-01-14 01:08:43 +00:00
|
|
|
func TestHealthServiceChecks_NodeMetaFilter(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2018-09-10 15:58:53 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2017-01-14 01:08:43 +00:00
|
|
|
|
2017-05-09 11:38:05 +00:00
|
|
|
req, _ := http.NewRequest("GET", "/v1/health/checks/consul?dc=dc1&node-meta=somekey:somevalue", nil)
|
2017-01-14 01:08:43 +00:00
|
|
|
resp := httptest.NewRecorder()
|
2017-05-21 07:11:09 +00:00
|
|
|
obj, err := a.srv.HealthServiceChecks(resp, req)
|
2017-01-14 01:08:43 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
assertIndex(t, resp)
|
|
|
|
|
|
|
|
// Should be a non-nil empty list
|
|
|
|
nodes := obj.(structs.HealthChecks)
|
|
|
|
if nodes == nil || len(nodes) != 0 {
|
|
|
|
t.Fatalf("bad: %v", obj)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a service check
|
|
|
|
args := &structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2017-01-14 01:08:43 +00:00
|
|
|
Address: "127.0.0.1",
|
|
|
|
NodeMeta: map[string]string{"somekey": "somevalue"},
|
|
|
|
Check: &structs.HealthCheck{
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2017-01-14 01:08:43 +00:00
|
|
|
Name: "consul check",
|
|
|
|
ServiceID: "consul",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
var out struct{}
|
2017-05-21 07:11:09 +00:00
|
|
|
if err = a.RPC("Catalog.Register", args, &out); err != nil {
|
2017-01-14 01:08:43 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-05-09 11:38:05 +00:00
|
|
|
req, _ = http.NewRequest("GET", "/v1/health/checks/consul?dc=dc1&node-meta=somekey:somevalue", nil)
|
2017-01-14 01:08:43 +00:00
|
|
|
resp = httptest.NewRecorder()
|
2017-05-21 07:11:09 +00:00
|
|
|
obj, err = a.srv.HealthServiceChecks(resp, req)
|
2017-01-14 01:08:43 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
assertIndex(t, resp)
|
|
|
|
|
|
|
|
// Should be 1 health check for consul
|
|
|
|
nodes = obj.(structs.HealthChecks)
|
|
|
|
if len(nodes) != 1 {
|
|
|
|
t.Fatalf("bad: %v", obj)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-16 16:00:15 +00:00
|
|
|
func TestHealthServiceChecks_Filtering(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2019-04-16 16:00:15 +00:00
|
|
|
t.Parallel()
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, "")
|
2019-04-16 16:00:15 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
|
|
|
|
|
|
|
req, _ := http.NewRequest("GET", "/v1/health/checks/consul?dc=dc1&node-meta=somekey:somevalue", nil)
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.HealthServiceChecks(resp, req)
|
|
|
|
require.NoError(t, err)
|
|
|
|
assertIndex(t, resp)
|
|
|
|
|
|
|
|
// Should be a non-nil empty list
|
|
|
|
nodes := obj.(structs.HealthChecks)
|
|
|
|
require.Empty(t, nodes)
|
|
|
|
|
|
|
|
args := &structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: a.Config.NodeName,
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
NodeMeta: map[string]string{"somekey": "somevalue"},
|
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Node: a.Config.NodeName,
|
|
|
|
Name: "consul check",
|
|
|
|
ServiceID: "consul",
|
|
|
|
},
|
|
|
|
SkipNodeUpdate: true,
|
|
|
|
}
|
|
|
|
|
|
|
|
var out struct{}
|
|
|
|
require.NoError(t, a.RPC("Catalog.Register", args, &out))
|
|
|
|
|
|
|
|
// Create a new node, service and check
|
|
|
|
args = &structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "test-health-node",
|
|
|
|
Address: "127.0.0.2",
|
|
|
|
NodeMeta: map[string]string{"somekey": "somevalue"},
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
ID: "consul",
|
|
|
|
Service: "consul",
|
|
|
|
},
|
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Node: "test-health-node",
|
|
|
|
Name: "consul check",
|
|
|
|
ServiceID: "consul",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
require.NoError(t, a.RPC("Catalog.Register", args, &out))
|
|
|
|
|
|
|
|
req, _ = http.NewRequest("GET", "/v1/health/checks/consul?dc=dc1&filter="+url.QueryEscape("Node == `test-health-node`"), nil)
|
|
|
|
resp = httptest.NewRecorder()
|
|
|
|
obj, err = a.srv.HealthServiceChecks(resp, req)
|
|
|
|
require.NoError(t, err)
|
|
|
|
assertIndex(t, resp)
|
|
|
|
|
|
|
|
// Should be 1 health check for consul
|
|
|
|
nodes = obj.(structs.HealthChecks)
|
|
|
|
require.Len(t, nodes, 1)
|
|
|
|
}
|
|
|
|
|
2015-07-27 21:41:46 +00:00
|
|
|
func TestHealthServiceChecks_DistanceSort(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2018-09-12 13:49:27 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2015-07-27 21:41:46 +00:00
|
|
|
|
|
|
|
// Create a service check
|
|
|
|
args := &structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "bar",
|
|
|
|
Address: "127.0.0.1",
|
2015-07-29 23:33:25 +00:00
|
|
|
Service: &structs.NodeService{
|
2015-07-27 21:41:46 +00:00
|
|
|
ID: "test",
|
|
|
|
Service: "test",
|
|
|
|
},
|
2015-07-29 23:33:25 +00:00
|
|
|
Check: &structs.HealthCheck{
|
2015-07-27 21:41:46 +00:00
|
|
|
Node: "bar",
|
|
|
|
Name: "test check",
|
|
|
|
ServiceID: "test",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
var out struct{}
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.Register", args, &out); err != nil {
|
2015-07-27 21:41:46 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
args.Node, args.Check.Node = "foo", "foo"
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.Register", args, &out); err != nil {
|
2015-07-27 21:41:46 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-05-09 11:38:05 +00:00
|
|
|
req, _ := http.NewRequest("GET", "/v1/health/checks/test?dc=dc1&near=foo", nil)
|
2015-07-27 21:41:46 +00:00
|
|
|
resp := httptest.NewRecorder()
|
2017-05-21 07:11:09 +00:00
|
|
|
obj, err := a.srv.HealthServiceChecks(resp, req)
|
2015-07-27 21:41:46 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
assertIndex(t, resp)
|
|
|
|
nodes := obj.(structs.HealthChecks)
|
|
|
|
if len(nodes) != 2 {
|
|
|
|
t.Fatalf("bad: %v", obj)
|
|
|
|
}
|
|
|
|
if nodes[0].Node != "bar" {
|
|
|
|
t.Fatalf("bad: %v", nodes)
|
|
|
|
}
|
|
|
|
if nodes[1].Node != "foo" {
|
|
|
|
t.Fatalf("bad: %v", nodes)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Send an update for the node and wait for it to get applied.
|
|
|
|
arg := structs.CoordinateUpdateRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "foo",
|
|
|
|
Coord: coordinate.NewCoordinate(coordinate.DefaultConfig()),
|
|
|
|
}
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Coordinate.Update", &arg, &out); err != nil {
|
2015-07-27 21:41:46 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2017-05-05 10:14:43 +00:00
|
|
|
// Retry until foo has moved to the front of the line.
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
resp = httptest.NewRecorder()
|
2017-05-21 07:11:09 +00:00
|
|
|
obj, err = a.srv.HealthServiceChecks(resp, req)
|
2017-05-05 10:14:43 +00:00
|
|
|
if err != nil {
|
|
|
|
r.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
assertIndex(t, resp)
|
|
|
|
nodes = obj.(structs.HealthChecks)
|
|
|
|
if len(nodes) != 2 {
|
|
|
|
r.Fatalf("bad: %v", obj)
|
|
|
|
}
|
|
|
|
if nodes[0].Node != "foo" {
|
|
|
|
r.Fatalf("bad: %v", nodes)
|
|
|
|
}
|
|
|
|
if nodes[1].Node != "bar" {
|
|
|
|
r.Fatalf("bad: %v", nodes)
|
|
|
|
}
|
|
|
|
})
|
2015-07-27 21:41:46 +00:00
|
|
|
}
|
|
|
|
|
2014-01-10 23:25:37 +00:00
|
|
|
func TestHealthServiceNodes(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, "")
|
2018-09-10 15:58:53 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2014-01-10 23:25:37 +00:00
|
|
|
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
testingPeerNames := []string{"", "my-peer"}
|
2014-05-09 09:38:29 +00:00
|
|
|
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
suffix := func(peerName string) string {
|
|
|
|
if peerName == "" {
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
// TODO(peering): after streaming works, remove the "&near=_agent" part
|
|
|
|
return "&peer=" + peerName + "&near=_agent"
|
2014-01-10 23:25:37 +00:00
|
|
|
}
|
2015-11-15 05:05:37 +00:00
|
|
|
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
for _, peerName := range testingPeerNames {
|
|
|
|
req, err := http.NewRequest("GET", "/v1/health/service/consul?dc=dc1"+suffix(peerName), nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.HealthServiceNodes(resp, req)
|
|
|
|
require.NoError(t, err)
|
2015-11-15 05:05:37 +00:00
|
|
|
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
assertIndex(t, resp)
|
2015-11-15 05:05:37 +00:00
|
|
|
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
nodes := obj.(structs.CheckServiceNodes)
|
|
|
|
if peerName == "" {
|
|
|
|
// Should be 1 health check for consul
|
|
|
|
require.Len(t, nodes, 1)
|
|
|
|
} else {
|
|
|
|
require.NotNil(t, nodes)
|
|
|
|
require.Len(t, nodes, 0)
|
|
|
|
}
|
2015-11-15 05:05:37 +00:00
|
|
|
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
req, err = http.NewRequest("GET", "/v1/health/service/nope?dc=dc1"+suffix(peerName), nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
resp = httptest.NewRecorder()
|
|
|
|
obj, err = a.srv.HealthServiceNodes(resp, req)
|
|
|
|
require.NoError(t, err)
|
2015-11-15 05:05:37 +00:00
|
|
|
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
assertIndex(t, resp)
|
2015-11-15 05:05:37 +00:00
|
|
|
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
// Should be a non-nil empty list
|
|
|
|
nodes = obj.(structs.CheckServiceNodes)
|
|
|
|
require.NotNil(t, nodes)
|
|
|
|
require.Len(t, nodes, 0)
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO(peering): will have to seed this data differently in the future
|
|
|
|
originalRegister := make(map[string]*structs.RegisterRequest)
|
|
|
|
for _, peerName := range testingPeerNames {
|
|
|
|
args := &structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "bar",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
PeerName: peerName,
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
ID: "test",
|
|
|
|
Service: "test",
|
|
|
|
PeerName: peerName,
|
|
|
|
},
|
|
|
|
}
|
2015-11-15 05:05:37 +00:00
|
|
|
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
var out struct{}
|
|
|
|
require.NoError(t, a.RPC("Catalog.Register", args, &out))
|
|
|
|
originalRegister[peerName] = args
|
|
|
|
}
|
2015-11-15 05:05:37 +00:00
|
|
|
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
verify := func(t *testing.T, peerName string, nodes structs.CheckServiceNodes) {
|
|
|
|
require.Len(t, nodes, 1)
|
|
|
|
require.Equal(t, peerName, nodes[0].Node.PeerName)
|
|
|
|
require.Equal(t, "bar", nodes[0].Node.Node)
|
|
|
|
require.Equal(t, peerName, nodes[0].Service.PeerName)
|
|
|
|
require.Equal(t, "test", nodes[0].Service.Service)
|
|
|
|
require.NotNil(t, nodes[0].Checks)
|
|
|
|
require.Len(t, nodes[0].Checks, 0)
|
2015-11-15 05:05:37 +00:00
|
|
|
}
|
2018-09-06 10:34:28 +00:00
|
|
|
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
for _, peerName := range testingPeerNames {
|
|
|
|
req, err := http.NewRequest("GET", "/v1/health/service/test?dc=dc1"+suffix(peerName), nil)
|
|
|
|
require.NoError(t, err)
|
2018-09-06 10:34:28 +00:00
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.HealthServiceNodes(resp, req)
|
bulk rewrite using this script
set -euo pipefail
unset CDPATH
cd "$(dirname "$0")"
for f in $(git grep '\brequire := require\.New(' | cut -d':' -f1 | sort -u); do
echo "=== require: $f ==="
sed -i '/require := require.New(t)/d' $f
# require.XXX(blah) but not require.XXX(tblah) or require.XXX(rblah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\([^tr]\)/require.\1(t,\2/g' $f
# require.XXX(tblah) but not require.XXX(t, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/require.\1(t,\2/g' $f
# require.XXX(rblah) but not require.XXX(r, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/require.\1(t,\2/g' $f
gofmt -s -w $f
done
for f in $(git grep '\bassert := assert\.New(' | cut -d':' -f1 | sort -u); do
echo "=== assert: $f ==="
sed -i '/assert := assert.New(t)/d' $f
# assert.XXX(blah) but not assert.XXX(tblah) or assert.XXX(rblah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\([^tr]\)/assert.\1(t,\2/g' $f
# assert.XXX(tblah) but not assert.XXX(t, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/assert.\1(t,\2/g' $f
# assert.XXX(rblah) but not assert.XXX(r, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/assert.\1(t,\2/g' $f
gofmt -s -w $f
done
2022-01-20 16:46:23 +00:00
|
|
|
require.NoError(t, err)
|
2018-09-06 10:34:28 +00:00
|
|
|
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
assertIndex(t, resp)
|
2018-09-06 10:34:28 +00:00
|
|
|
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
// Should be a non-nil empty list for checks
|
2018-09-06 10:34:28 +00:00
|
|
|
nodes := obj.(structs.CheckServiceNodes)
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
verify(t, peerName, nodes)
|
2018-09-06 10:34:28 +00:00
|
|
|
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
// Test caching
|
|
|
|
{
|
|
|
|
// List instances with cache enabled
|
|
|
|
req, err := http.NewRequest("GET", "/v1/health/service/test?cached"+suffix(peerName), nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.HealthServiceNodes(resp, req)
|
|
|
|
require.NoError(t, err)
|
|
|
|
nodes := obj.(structs.CheckServiceNodes)
|
|
|
|
verify(t, peerName, nodes)
|
2018-09-06 10:34:28 +00:00
|
|
|
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
// Should be a cache miss
|
|
|
|
require.Equal(t, "MISS", resp.Header().Get("X-Cache"))
|
|
|
|
}
|
2018-09-06 10:34:28 +00:00
|
|
|
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
{
|
|
|
|
// List instances with cache enabled
|
|
|
|
req, err := http.NewRequest("GET", "/v1/health/service/test?cached"+suffix(peerName), nil)
|
|
|
|
require.NoError(t, err)
|
2018-09-06 10:34:28 +00:00
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.HealthServiceNodes(resp, req)
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
require.NoError(t, err)
|
2018-09-06 10:34:28 +00:00
|
|
|
nodes := obj.(structs.CheckServiceNodes)
|
peering: initial sync (#12842)
- Add endpoints related to peering: read, list, generate token, initiate peering
- Update node/service/check table indexing to account for peers
- Foundational changes for pushing service updates to a peer
- Plumb peer name through Health.ServiceNodes path
see: ENT-1765, ENT-1280, ENT-1283, ENT-1283, ENT-1756, ENT-1739, ENT-1750, ENT-1679,
ENT-1709, ENT-1704, ENT-1690, ENT-1689, ENT-1702, ENT-1701, ENT-1683, ENT-1663,
ENT-1650, ENT-1678, ENT-1628, ENT-1658, ENT-1640, ENT-1637, ENT-1597, ENT-1634,
ENT-1613, ENT-1616, ENT-1617, ENT-1591, ENT-1588, ENT-1596, ENT-1572, ENT-1555
Co-authored-by: R.B. Boyer <rb@hashicorp.com>
Co-authored-by: freddygv <freddy@hashicorp.com>
Co-authored-by: Chris S. Kim <ckim@hashicorp.com>
Co-authored-by: Evan Culver <eculver@hashicorp.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
2022-04-21 22:34:40 +00:00
|
|
|
verify(t, peerName, nodes)
|
|
|
|
|
|
|
|
// Should be a cache HIT now!
|
|
|
|
require.Equal(t, "HIT", resp.Header().Get("X-Cache"))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure background refresh works
|
|
|
|
{
|
|
|
|
// TODO(peering): will have to seed this data differently in the future
|
|
|
|
for _, peerName := range testingPeerNames {
|
|
|
|
args := originalRegister[peerName]
|
|
|
|
// Register a new instance of the service
|
|
|
|
args2 := *args
|
|
|
|
args2.Node = "baz"
|
|
|
|
args2.Address = "127.0.0.2"
|
|
|
|
var out struct{}
|
|
|
|
require.NoError(t, a.RPC("Catalog.Register", &args2, &out))
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, peerName := range testingPeerNames {
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
// List it again
|
|
|
|
req, err := http.NewRequest("GET", "/v1/health/service/test?cached"+suffix(peerName), nil)
|
|
|
|
require.NoError(r, err)
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.HealthServiceNodes(resp, req)
|
|
|
|
require.NoError(r, err)
|
|
|
|
|
|
|
|
nodes := obj.(structs.CheckServiceNodes)
|
|
|
|
require.Len(r, nodes, 2)
|
|
|
|
|
|
|
|
header := resp.Header().Get("X-Consul-Index")
|
|
|
|
if header == "" || header == "0" {
|
|
|
|
r.Fatalf("Want non-zero header: %q", header)
|
|
|
|
}
|
|
|
|
_, err = strconv.ParseUint(header, 10, 64)
|
|
|
|
require.NoError(r, err)
|
|
|
|
|
|
|
|
// Should be a cache hit! The data should've updated in the cache
|
|
|
|
// in the background so this should've been fetched directly from
|
|
|
|
// the cache.
|
|
|
|
if resp.Header().Get("X-Cache") != "HIT" {
|
|
|
|
r.Fatalf("should be a cache hit")
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
2018-09-06 10:34:28 +00:00
|
|
|
}
|
2014-01-10 23:25:37 +00:00
|
|
|
}
|
2014-04-21 23:08:26 +00:00
|
|
|
|
2020-11-13 20:05:16 +00:00
|
|
|
func TestHealthServiceNodes_Blocking(t *testing.T) {
|
|
|
|
cases := []struct {
|
2021-06-28 20:48:10 +00:00
|
|
|
name string
|
|
|
|
hcl string
|
|
|
|
grpcMetrics bool
|
|
|
|
queryBackend string
|
2020-11-13 20:05:16 +00:00
|
|
|
}{
|
2021-06-28 20:48:10 +00:00
|
|
|
{
|
|
|
|
name: "no streaming",
|
|
|
|
queryBackend: "blocking-query",
|
|
|
|
hcl: `use_streaming_backend = false`,
|
|
|
|
},
|
2020-11-13 20:05:16 +00:00
|
|
|
{
|
|
|
|
name: "streaming",
|
|
|
|
grpcMetrics: true,
|
|
|
|
hcl: `
|
|
|
|
rpc { enable_streaming = true }
|
|
|
|
use_streaming_backend = true
|
|
|
|
`,
|
2021-06-28 20:48:10 +00:00
|
|
|
queryBackend: "streaming",
|
2020-11-13 20:05:16 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tc := range cases {
|
|
|
|
tc := tc
|
|
|
|
t.Run(tc.name, func(t *testing.T) {
|
|
|
|
|
|
|
|
sink := metrics.NewInmemSink(5*time.Second, time.Minute)
|
|
|
|
metrics.NewGlobal(&metrics.Config{
|
|
|
|
ServiceName: "testing",
|
|
|
|
AllowedPrefixes: []string{"testing.grpc."},
|
|
|
|
}, sink)
|
|
|
|
|
|
|
|
a := NewTestAgent(t, tc.hcl)
|
|
|
|
defer a.Shutdown()
|
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
|
|
|
|
|
|
|
// Register some initial service instances
|
|
|
|
for i := 0; i < 2; i++ {
|
|
|
|
args := &structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "bar",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
ID: fmt.Sprintf("test%03d", i),
|
|
|
|
Service: "test",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
var out struct{}
|
|
|
|
require.NoError(t, a.RPC("Catalog.Register", args, &out))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Initial request should return two instances
|
|
|
|
req, _ := http.NewRequest("GET", "/v1/health/service/test?dc=dc1", nil)
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.HealthServiceNodes(resp, req)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
nodes := obj.(structs.CheckServiceNodes)
|
|
|
|
require.Len(t, nodes, 2)
|
|
|
|
|
|
|
|
idx := getIndex(t, resp)
|
|
|
|
require.True(t, idx > 0)
|
|
|
|
|
|
|
|
// errCh collects errors from goroutines since it's unsafe for them to use
|
|
|
|
// t to fail tests directly.
|
|
|
|
errCh := make(chan error, 1)
|
|
|
|
|
|
|
|
checkErrs := func() {
|
|
|
|
// Ensure no errors were sent on errCh and drain any nils we have
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case err := <-errCh:
|
|
|
|
require.NoError(t, err)
|
|
|
|
default:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Blocking on that index should block. We test that by launching another
|
|
|
|
// goroutine that will wait a while before updating the registration and
|
|
|
|
// make sure that we unblock before timeout and see the update but that it
|
|
|
|
// takes at least as long as the sleep time.
|
|
|
|
sleep := 200 * time.Millisecond
|
|
|
|
start := time.Now()
|
|
|
|
go func() {
|
|
|
|
time.Sleep(sleep)
|
|
|
|
|
|
|
|
args := &structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "zoo",
|
|
|
|
Address: "127.0.0.3",
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
ID: "test",
|
|
|
|
Service: "test",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
var out struct{}
|
|
|
|
errCh <- a.RPC("Catalog.Register", args, &out)
|
|
|
|
}()
|
|
|
|
|
|
|
|
{
|
|
|
|
timeout := 30 * time.Second
|
|
|
|
url := fmt.Sprintf("/v1/health/service/test?dc=dc1&index=%d&wait=%s", idx, timeout)
|
|
|
|
req, _ := http.NewRequest("GET", url, nil)
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.HealthServiceNodes(resp, req)
|
|
|
|
require.NoError(t, err)
|
|
|
|
elapsed := time.Since(start)
|
|
|
|
require.True(t, elapsed > sleep, "request should block for at "+
|
|
|
|
" least as long as sleep. sleep=%s, elapsed=%s", sleep, elapsed)
|
|
|
|
|
|
|
|
require.True(t, elapsed < timeout, "request should unblock before"+
|
|
|
|
" it timed out. timeout=%s, elapsed=%s", timeout, elapsed)
|
|
|
|
|
|
|
|
nodes := obj.(structs.CheckServiceNodes)
|
|
|
|
require.Len(t, nodes, 3)
|
|
|
|
|
|
|
|
newIdx := getIndex(t, resp)
|
|
|
|
require.True(t, idx < newIdx, "index should have increased."+
|
|
|
|
"idx=%d, newIdx=%d", idx, newIdx)
|
|
|
|
|
2021-06-28 20:48:10 +00:00
|
|
|
require.Equal(t, tc.queryBackend, resp.Header().Get("X-Consul-Query-Backend"))
|
|
|
|
|
2020-11-13 20:05:16 +00:00
|
|
|
idx = newIdx
|
|
|
|
|
|
|
|
checkErrs()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Blocking should last until timeout in absence of updates
|
|
|
|
start = time.Now()
|
|
|
|
{
|
|
|
|
timeout := 200 * time.Millisecond
|
|
|
|
url := fmt.Sprintf("/v1/health/service/test?dc=dc1&index=%d&wait=%s",
|
|
|
|
idx, timeout)
|
|
|
|
req, _ := http.NewRequest("GET", url, nil)
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.HealthServiceNodes(resp, req)
|
|
|
|
require.NoError(t, err)
|
|
|
|
elapsed := time.Since(start)
|
|
|
|
// Note that servers add jitter to timeout requested but don't remove it
|
|
|
|
// so this should always be true.
|
|
|
|
require.True(t, elapsed > timeout, "request should block for at "+
|
|
|
|
" least as long as timeout. timeout=%s, elapsed=%s", timeout, elapsed)
|
|
|
|
|
|
|
|
nodes := obj.(structs.CheckServiceNodes)
|
|
|
|
require.Len(t, nodes, 3)
|
|
|
|
|
|
|
|
newIdx := getIndex(t, resp)
|
|
|
|
require.Equal(t, idx, newIdx)
|
2021-06-28 20:48:10 +00:00
|
|
|
require.Equal(t, tc.queryBackend, resp.Header().Get("X-Consul-Query-Backend"))
|
2020-11-13 20:05:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if tc.grpcMetrics {
|
|
|
|
data := sink.Data()
|
|
|
|
if l := len(data); l < 1 {
|
|
|
|
t.Errorf("expected at least 1 metrics interval, got :%v", l)
|
|
|
|
}
|
|
|
|
if count := len(data[0].Gauges); count < 2 {
|
|
|
|
t.Errorf("expected at least 2 grpc gauge metrics, got: %v", count)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-27 15:39:45 +00:00
|
|
|
func TestHealthServiceNodes_Blocking_withFilter(t *testing.T) {
|
|
|
|
cases := []struct {
|
|
|
|
name string
|
|
|
|
hcl string
|
|
|
|
queryBackend string
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
name: "no streaming",
|
|
|
|
queryBackend: "blocking-query",
|
|
|
|
hcl: `use_streaming_backend = false`,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "streaming",
|
|
|
|
hcl: `
|
|
|
|
rpc { enable_streaming = true }
|
|
|
|
use_streaming_backend = true
|
|
|
|
`,
|
|
|
|
queryBackend: "streaming",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
register := func(t *testing.T, a *TestAgent, name, tag string) {
|
|
|
|
args := &structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
ID: types.NodeID("43d419c0-433b-42c3-bf8a-193eba0b41a3"),
|
|
|
|
Node: "node1",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
ID: name,
|
|
|
|
Service: name,
|
|
|
|
Tags: []string{tag},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
var out struct{}
|
|
|
|
require.NoError(t, a.RPC("Catalog.Register", args, &out))
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tc := range cases {
|
|
|
|
tc := tc
|
|
|
|
t.Run(tc.name, func(t *testing.T) {
|
|
|
|
a := NewTestAgent(t, tc.hcl)
|
|
|
|
defer a.Shutdown()
|
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
|
|
|
|
|
|
|
// Register one with a tag.
|
|
|
|
register(t, a, "web", "foo")
|
|
|
|
|
|
|
|
filterUrlPart := "filter=" + url.QueryEscape("foo in Service.Tags")
|
|
|
|
|
|
|
|
// TODO: use other call format
|
|
|
|
|
|
|
|
// Initial request with a filter should return one.
|
|
|
|
var lastIndex uint64
|
2022-05-10 20:25:51 +00:00
|
|
|
testutil.RunStep(t, "read original", func(t *testing.T) {
|
2022-04-27 15:39:45 +00:00
|
|
|
req, err := http.NewRequest("GET", "/v1/health/service/web?dc=dc1&"+filterUrlPart, nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.HealthServiceNodes(resp, req)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
nodes := obj.(structs.CheckServiceNodes)
|
|
|
|
|
|
|
|
require.Len(t, nodes, 1)
|
|
|
|
|
|
|
|
node := nodes[0]
|
|
|
|
require.Equal(t, "node1", node.Node.Node)
|
|
|
|
require.Equal(t, "web", node.Service.Service)
|
|
|
|
require.Equal(t, []string{"foo"}, node.Service.Tags)
|
|
|
|
|
|
|
|
require.Equal(t, "blocking-query", resp.Header().Get("X-Consul-Query-Backend"))
|
|
|
|
|
|
|
|
idx := getIndex(t, resp)
|
|
|
|
require.True(t, idx > 0)
|
|
|
|
|
|
|
|
lastIndex = idx
|
|
|
|
})
|
|
|
|
|
|
|
|
const timeout = 30 * time.Second
|
2022-05-10 20:25:51 +00:00
|
|
|
testutil.RunStep(t, "read blocking query result", func(t *testing.T) {
|
2022-04-27 15:39:45 +00:00
|
|
|
var (
|
|
|
|
// out and resp are not safe to read until reading from errCh
|
|
|
|
out structs.CheckServiceNodes
|
|
|
|
resp = httptest.NewRecorder()
|
|
|
|
errCh = make(chan error, 1)
|
|
|
|
)
|
|
|
|
go func() {
|
|
|
|
url := fmt.Sprintf("/v1/health/service/web?dc=dc1&index=%d&wait=%s&%s", lastIndex, timeout, filterUrlPart)
|
|
|
|
req, err := http.NewRequest("GET", url, nil)
|
|
|
|
if err != nil {
|
|
|
|
errCh <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
obj, err := a.srv.HealthServiceNodes(resp, req)
|
|
|
|
if err != nil {
|
|
|
|
errCh <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
nodes := obj.(structs.CheckServiceNodes)
|
|
|
|
out = nodes
|
|
|
|
errCh <- nil
|
|
|
|
}()
|
|
|
|
|
|
|
|
time.Sleep(200 * time.Millisecond)
|
|
|
|
|
|
|
|
// Change the tags.
|
|
|
|
register(t, a, "web", "bar")
|
|
|
|
|
|
|
|
if err := <-errCh; err != nil {
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
require.Len(t, out, 0)
|
|
|
|
require.Equal(t, tc.queryBackend, resp.Header().Get("X-Consul-Query-Backend"))
|
|
|
|
})
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-14 01:08:43 +00:00
|
|
|
func TestHealthServiceNodes_NodeMetaFilter(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2017-01-14 01:08:43 +00:00
|
|
|
|
2021-02-08 16:53:18 +00:00
|
|
|
tests := []struct {
|
2021-06-28 20:48:10 +00:00
|
|
|
name string
|
|
|
|
config string
|
|
|
|
queryBackend string
|
2021-02-08 16:53:18 +00:00
|
|
|
}{
|
2021-06-28 20:48:10 +00:00
|
|
|
{
|
|
|
|
name: "blocking-query",
|
|
|
|
config: `use_streaming_backend=false`,
|
|
|
|
queryBackend: "blocking-query",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "cache-with-streaming",
|
|
|
|
config: `
|
2021-02-08 16:53:18 +00:00
|
|
|
rpc{
|
|
|
|
enable_streaming=true
|
|
|
|
}
|
|
|
|
use_streaming_backend=true
|
2021-06-28 20:48:10 +00:00
|
|
|
`,
|
|
|
|
queryBackend: "streaming",
|
|
|
|
},
|
2017-01-14 01:08:43 +00:00
|
|
|
}
|
2021-02-08 16:53:18 +00:00
|
|
|
for _, tst := range tests {
|
|
|
|
t.Run(tst.name, func(t *testing.T) {
|
2017-01-14 01:08:43 +00:00
|
|
|
|
2021-02-08 16:53:18 +00:00
|
|
|
a := NewTestAgent(t, tst.config)
|
|
|
|
defer a.Shutdown()
|
|
|
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
2017-01-14 01:08:43 +00:00
|
|
|
|
2021-02-08 16:53:18 +00:00
|
|
|
req, _ := http.NewRequest("GET", "/v1/health/service/consul?dc=dc1&node-meta=somekey:somevalue", nil)
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.HealthServiceNodes(resp, req)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2017-01-14 01:08:43 +00:00
|
|
|
|
2021-02-08 16:53:18 +00:00
|
|
|
assertIndex(t, resp)
|
2017-01-14 01:08:43 +00:00
|
|
|
|
2021-02-08 16:53:18 +00:00
|
|
|
cIndex, err := strconv.ParseUint(resp.Header().Get("X-Consul-Index"), 10, 64)
|
|
|
|
require.NoError(t, err)
|
2017-01-14 01:08:43 +00:00
|
|
|
|
2021-02-08 16:53:18 +00:00
|
|
|
// Should be a non-nil empty list
|
|
|
|
nodes := obj.(structs.CheckServiceNodes)
|
|
|
|
if nodes == nil || len(nodes) != 0 {
|
|
|
|
t.Fatalf("bad: %v", obj)
|
|
|
|
}
|
2017-01-14 01:08:43 +00:00
|
|
|
|
2021-02-08 16:53:18 +00:00
|
|
|
args := &structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "bar",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
NodeMeta: map[string]string{"somekey": "somevalue"},
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
ID: "test",
|
|
|
|
Service: "test",
|
|
|
|
},
|
|
|
|
}
|
2017-01-14 01:08:43 +00:00
|
|
|
|
2021-02-08 16:53:18 +00:00
|
|
|
var out struct{}
|
|
|
|
if err := a.RPC("Catalog.Register", args, &out); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
args = &structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "bar2",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
NodeMeta: map[string]string{"somekey": "othervalue"},
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
ID: "test2",
|
|
|
|
Service: "test",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := a.RPC("Catalog.Register", args, &out); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
req, _ = http.NewRequest("GET", fmt.Sprintf("/v1/health/service/test?dc=dc1&node-meta=somekey:somevalue&index=%d&wait=10ms", cIndex), nil)
|
|
|
|
resp = httptest.NewRecorder()
|
|
|
|
obj, err = a.srv.HealthServiceNodes(resp, req)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
assertIndex(t, resp)
|
|
|
|
|
|
|
|
// Should be a non-nil empty list for checks
|
|
|
|
nodes = obj.(structs.CheckServiceNodes)
|
|
|
|
if len(nodes) != 1 || nodes[0].Checks == nil || len(nodes[0].Checks) != 0 {
|
|
|
|
t.Fatalf("bad: %v", obj)
|
|
|
|
}
|
2021-06-28 20:48:10 +00:00
|
|
|
|
|
|
|
require.Equal(t, tst.queryBackend, resp.Header().Get("X-Consul-Query-Backend"))
|
2021-02-08 16:53:18 +00:00
|
|
|
})
|
2017-01-14 01:08:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-16 16:00:15 +00:00
|
|
|
func TestHealthServiceNodes_Filter(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2019-04-16 16:00:15 +00:00
|
|
|
t.Parallel()
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, "")
|
2019-04-16 16:00:15 +00:00
|
|
|
defer a.Shutdown()
|
2019-11-07 20:45:30 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2019-04-16 16:00:15 +00:00
|
|
|
|
|
|
|
req, _ := http.NewRequest("GET", "/v1/health/service/consul?dc=dc1&filter="+url.QueryEscape("Node.Node == `test-health-node`"), nil)
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.HealthServiceNodes(resp, req)
|
|
|
|
require.NoError(t, err)
|
|
|
|
assertIndex(t, resp)
|
|
|
|
|
|
|
|
// Should be a non-nil empty list
|
|
|
|
nodes := obj.(structs.CheckServiceNodes)
|
|
|
|
require.Empty(t, nodes)
|
|
|
|
|
|
|
|
args := &structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: a.Config.NodeName,
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
NodeMeta: map[string]string{"somekey": "somevalue"},
|
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Node: a.Config.NodeName,
|
|
|
|
Name: "consul check",
|
|
|
|
ServiceID: "consul",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
var out struct{}
|
|
|
|
require.NoError(t, a.RPC("Catalog.Register", args, &out))
|
|
|
|
|
|
|
|
// Create a new node, service and check
|
|
|
|
args = &structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "test-health-node",
|
|
|
|
Address: "127.0.0.2",
|
|
|
|
NodeMeta: map[string]string{"somekey": "somevalue"},
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
ID: "consul",
|
|
|
|
Service: "consul",
|
|
|
|
},
|
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Node: "test-health-node",
|
|
|
|
Name: "consul check",
|
|
|
|
ServiceID: "consul",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
require.NoError(t, a.RPC("Catalog.Register", args, &out))
|
|
|
|
|
|
|
|
req, _ = http.NewRequest("GET", "/v1/health/service/consul?dc=dc1&filter="+url.QueryEscape("Node.Node == `test-health-node`"), nil)
|
|
|
|
resp = httptest.NewRecorder()
|
|
|
|
obj, err = a.srv.HealthServiceNodes(resp, req)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
assertIndex(t, resp)
|
|
|
|
|
2019-11-07 20:45:30 +00:00
|
|
|
// Should be a list of checks with 1 element
|
2019-04-16 16:00:15 +00:00
|
|
|
nodes = obj.(structs.CheckServiceNodes)
|
|
|
|
require.Len(t, nodes, 1)
|
|
|
|
require.Len(t, nodes[0].Checks, 1)
|
|
|
|
}
|
|
|
|
|
2015-07-27 21:41:46 +00:00
|
|
|
func TestHealthServiceNodes_DistanceSort(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2018-08-06 23:46:09 +00:00
|
|
|
dc := "dc1"
|
2015-07-27 21:41:46 +00:00
|
|
|
// Create a service check
|
|
|
|
args := &structs.RegisterRequest{
|
2018-08-06 23:46:09 +00:00
|
|
|
Datacenter: dc,
|
2015-07-27 21:41:46 +00:00
|
|
|
Node: "bar",
|
|
|
|
Address: "127.0.0.1",
|
2015-07-29 23:33:25 +00:00
|
|
|
Service: &structs.NodeService{
|
2015-07-27 21:41:46 +00:00
|
|
|
ID: "test",
|
|
|
|
Service: "test",
|
|
|
|
},
|
2015-07-29 23:33:25 +00:00
|
|
|
Check: &structs.HealthCheck{
|
2015-07-27 21:41:46 +00:00
|
|
|
Node: "bar",
|
|
|
|
Name: "test check",
|
|
|
|
ServiceID: "test",
|
|
|
|
},
|
|
|
|
}
|
2018-08-06 23:46:09 +00:00
|
|
|
testrpc.WaitForLeader(t, a.RPC, dc)
|
2015-07-27 21:41:46 +00:00
|
|
|
var out struct{}
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.Register", args, &out); err != nil {
|
2015-07-27 21:41:46 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
args.Node, args.Check.Node = "foo", "foo"
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Catalog.Register", args, &out); err != nil {
|
2015-07-27 21:41:46 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-05-09 11:38:05 +00:00
|
|
|
req, _ := http.NewRequest("GET", "/v1/health/service/test?dc=dc1&near=foo", nil)
|
2015-07-27 21:41:46 +00:00
|
|
|
resp := httptest.NewRecorder()
|
2017-05-21 07:11:09 +00:00
|
|
|
obj, err := a.srv.HealthServiceNodes(resp, req)
|
2015-07-27 21:41:46 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
assertIndex(t, resp)
|
|
|
|
nodes := obj.(structs.CheckServiceNodes)
|
|
|
|
if len(nodes) != 2 {
|
|
|
|
t.Fatalf("bad: %v", obj)
|
|
|
|
}
|
|
|
|
if nodes[0].Node.Node != "bar" {
|
|
|
|
t.Fatalf("bad: %v", nodes)
|
|
|
|
}
|
|
|
|
if nodes[1].Node.Node != "foo" {
|
|
|
|
t.Fatalf("bad: %v", nodes)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Send an update for the node and wait for it to get applied.
|
|
|
|
arg := structs.CoordinateUpdateRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: "foo",
|
|
|
|
Coord: coordinate.NewCoordinate(coordinate.DefaultConfig()),
|
|
|
|
}
|
2017-05-21 07:11:09 +00:00
|
|
|
if err := a.RPC("Coordinate.Update", &arg, &out); err != nil {
|
2015-07-27 21:41:46 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2017-05-05 10:14:43 +00:00
|
|
|
// Retry until foo has moved to the front of the line.
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
resp = httptest.NewRecorder()
|
2017-05-21 07:11:09 +00:00
|
|
|
obj, err = a.srv.HealthServiceNodes(resp, req)
|
2017-05-05 10:14:43 +00:00
|
|
|
if err != nil {
|
|
|
|
r.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
assertIndex(t, resp)
|
|
|
|
nodes = obj.(structs.CheckServiceNodes)
|
|
|
|
if len(nodes) != 2 {
|
|
|
|
r.Fatalf("bad: %v", obj)
|
|
|
|
}
|
|
|
|
if nodes[0].Node.Node != "foo" {
|
|
|
|
r.Fatalf("bad: %v", nodes)
|
|
|
|
}
|
|
|
|
if nodes[1].Node.Node != "bar" {
|
|
|
|
r.Fatalf("bad: %v", nodes)
|
|
|
|
}
|
|
|
|
})
|
2015-07-27 21:41:46 +00:00
|
|
|
}
|
|
|
|
|
2014-04-21 23:08:26 +00:00
|
|
|
func TestHealthServiceNodes_PassingFilter(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, "")
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a.Shutdown()
|
2014-05-07 21:47:16 +00:00
|
|
|
|
2018-08-06 23:46:09 +00:00
|
|
|
dc := "dc1"
|
2014-05-07 21:47:16 +00:00
|
|
|
// Create a failing service check
|
2014-04-21 23:08:26 +00:00
|
|
|
args := &structs.RegisterRequest{
|
2018-08-06 23:46:09 +00:00
|
|
|
Datacenter: dc,
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2014-04-21 23:08:26 +00:00
|
|
|
Address: "127.0.0.1",
|
|
|
|
Check: &structs.HealthCheck{
|
2017-05-21 07:11:09 +00:00
|
|
|
Node: a.Config.NodeName,
|
2014-04-21 23:08:26 +00:00
|
|
|
Name: "consul check",
|
|
|
|
ServiceID: "consul",
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthCritical,
|
2014-04-21 23:08:26 +00:00
|
|
|
},
|
|
|
|
}
|
2014-05-07 21:12:53 +00:00
|
|
|
|
2019-07-12 15:52:26 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
var out struct{}
|
|
|
|
if err := a.RPC("Catalog.Register", args, &out); err != nil {
|
|
|
|
r.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
})
|
2014-04-21 23:08:26 +00:00
|
|
|
|
2017-06-09 18:36:00 +00:00
|
|
|
t.Run("bc_no_query_value", func(t *testing.T) {
|
|
|
|
req, _ := http.NewRequest("GET", "/v1/health/service/consul?passing", nil)
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.HealthServiceNodes(resp, req)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2014-05-09 09:38:29 +00:00
|
|
|
|
2017-06-09 18:36:00 +00:00
|
|
|
assertIndex(t, resp)
|
2014-04-21 23:08:26 +00:00
|
|
|
|
2017-06-09 18:36:00 +00:00
|
|
|
// Should be 0 health check for consul
|
|
|
|
nodes := obj.(structs.CheckServiceNodes)
|
|
|
|
if len(nodes) != 0 {
|
|
|
|
t.Fatalf("bad: %v", obj)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("passing_true", func(t *testing.T) {
|
|
|
|
req, _ := http.NewRequest("GET", "/v1/health/service/consul?passing=true", nil)
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.HealthServiceNodes(resp, req)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
assertIndex(t, resp)
|
|
|
|
|
|
|
|
// Should be 0 health check for consul
|
|
|
|
nodes := obj.(structs.CheckServiceNodes)
|
|
|
|
if len(nodes) != 0 {
|
|
|
|
t.Fatalf("bad: %v", obj)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("passing_false", func(t *testing.T) {
|
|
|
|
req, _ := http.NewRequest("GET", "/v1/health/service/consul?passing=false", nil)
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.HealthServiceNodes(resp, req)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
assertIndex(t, resp)
|
|
|
|
|
2017-06-09 18:51:34 +00:00
|
|
|
// Should be 1 consul, it's unhealthy, but we specifically asked for
|
|
|
|
// everything.
|
2017-06-09 18:36:00 +00:00
|
|
|
nodes := obj.(structs.CheckServiceNodes)
|
|
|
|
if len(nodes) != 1 {
|
|
|
|
t.Fatalf("bad: %v", obj)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("passing_bad", func(t *testing.T) {
|
|
|
|
req, _ := http.NewRequest("GET", "/v1/health/service/consul?passing=nope-nope-nope", nil)
|
|
|
|
resp := httptest.NewRecorder()
|
2022-01-31 16:17:35 +00:00
|
|
|
_, err := a.srv.HealthServiceNodes(resp, req)
|
2022-04-29 17:42:49 +00:00
|
|
|
require.True(t, isHTTPBadRequest(err), fmt.Sprintf("Expected bad request HTTP error but got %v", err))
|
2022-01-31 16:17:35 +00:00
|
|
|
if !strings.Contains(err.Error(), "Invalid value for ?passing") {
|
|
|
|
t.Errorf("bad %s", err.Error())
|
2017-06-09 18:36:00 +00:00
|
|
|
}
|
|
|
|
})
|
2014-04-21 23:08:26 +00:00
|
|
|
}
|
2014-07-05 16:49:10 +00:00
|
|
|
|
2019-10-17 18:33:11 +00:00
|
|
|
func TestHealthServiceNodes_CheckType(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2019-10-17 18:33:11 +00:00
|
|
|
t.Parallel()
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, "")
|
2019-10-17 18:33:11 +00:00
|
|
|
defer a.Shutdown()
|
2019-11-07 20:45:30 +00:00
|
|
|
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
2019-10-17 18:33:11 +00:00
|
|
|
|
|
|
|
req, _ := http.NewRequest("GET", "/v1/health/service/consul?dc=dc1", nil)
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.HealthServiceNodes(resp, req)
|
|
|
|
require.NoError(t, err)
|
|
|
|
assertIndex(t, resp)
|
|
|
|
|
|
|
|
// Should be 1 health check for consul
|
|
|
|
nodes := obj.(structs.CheckServiceNodes)
|
|
|
|
if len(nodes) != 1 {
|
|
|
|
t.Fatalf("expected 1 node, got %d", len(nodes))
|
|
|
|
}
|
|
|
|
|
|
|
|
args := &structs.RegisterRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Node: a.Config.NodeName,
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
NodeMeta: map[string]string{"somekey": "somevalue"},
|
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Node: a.Config.NodeName,
|
|
|
|
Name: "consul check",
|
|
|
|
ServiceID: "consul",
|
|
|
|
Type: "grpc",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
var out struct{}
|
|
|
|
require.NoError(t, a.RPC("Catalog.Register", args, &out))
|
|
|
|
|
|
|
|
req, _ = http.NewRequest("GET", "/v1/health/service/consul?dc=dc1", nil)
|
|
|
|
resp = httptest.NewRecorder()
|
|
|
|
obj, err = a.srv.HealthServiceNodes(resp, req)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
assertIndex(t, resp)
|
|
|
|
|
|
|
|
// Should be a non-nil empty list for checks
|
|
|
|
nodes = obj.(structs.CheckServiceNodes)
|
|
|
|
require.Len(t, nodes, 1)
|
|
|
|
require.Len(t, nodes[0].Checks, 2)
|
|
|
|
|
|
|
|
for _, check := range nodes[0].Checks {
|
|
|
|
if check.Name == "consul check" && check.Type != "grpc" {
|
|
|
|
t.Fatalf("exptected grpc check type, got %s", check.Type)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-15 18:02:51 +00:00
|
|
|
func TestHealthServiceNodes_WanTranslation(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2020-03-31 19:59:56 +00:00
|
|
|
a1 := NewTestAgent(t, `
|
2017-09-25 18:40:42 +00:00
|
|
|
datacenter = "dc1"
|
|
|
|
translate_wan_addrs = true
|
|
|
|
acl_datacenter = ""
|
|
|
|
`)
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a1.Shutdown()
|
|
|
|
|
2020-03-31 19:59:56 +00:00
|
|
|
a2 := NewTestAgent(t, `
|
2017-09-25 18:40:42 +00:00
|
|
|
datacenter = "dc2"
|
|
|
|
translate_wan_addrs = true
|
|
|
|
acl_datacenter = ""
|
|
|
|
`)
|
2017-05-21 07:11:09 +00:00
|
|
|
defer a2.Shutdown()
|
2016-08-15 22:34:11 +00:00
|
|
|
|
|
|
|
// Wait for the WAN join.
|
2017-09-25 18:40:42 +00:00
|
|
|
addr := fmt.Sprintf("127.0.0.1:%d", a1.Config.SerfPortWAN)
|
2019-06-17 14:51:50 +00:00
|
|
|
_, err := a2.srv.agent.JoinWAN([]string{addr})
|
|
|
|
require.NoError(t, err)
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2019-06-17 14:51:50 +00:00
|
|
|
require.Len(r, a1.WANMembers(), 2)
|
2017-04-29 16:34:02 +00:00
|
|
|
})
|
2016-06-15 18:02:51 +00:00
|
|
|
|
2016-08-15 22:34:11 +00:00
|
|
|
// Register a node with DC2.
|
2016-06-15 18:02:51 +00:00
|
|
|
{
|
|
|
|
args := &structs.RegisterRequest{
|
|
|
|
Datacenter: "dc2",
|
|
|
|
Node: "foo",
|
|
|
|
Address: "127.0.0.1",
|
|
|
|
TaggedAddresses: map[string]string{
|
|
|
|
"wan": "127.0.0.2",
|
|
|
|
},
|
|
|
|
Service: &structs.NodeService{
|
|
|
|
Service: "http_wan_translation_test",
|
2019-06-17 14:51:50 +00:00
|
|
|
Address: "127.0.0.1",
|
|
|
|
Port: 8080,
|
|
|
|
TaggedAddresses: map[string]structs.ServiceAddress{
|
2020-06-16 17:19:31 +00:00
|
|
|
"wan": {
|
2019-06-17 14:51:50 +00:00
|
|
|
Address: "1.2.3.4",
|
|
|
|
Port: 80,
|
|
|
|
},
|
|
|
|
},
|
2016-06-15 18:02:51 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
var out struct{}
|
2019-06-17 14:51:50 +00:00
|
|
|
require.NoError(t, a2.RPC("Catalog.Register", args, &out))
|
2016-06-15 18:02:51 +00:00
|
|
|
}
|
|
|
|
|
2016-08-15 22:34:11 +00:00
|
|
|
// Query for a service in DC2 from DC1.
|
2017-05-09 11:38:05 +00:00
|
|
|
req, _ := http.NewRequest("GET", "/v1/health/service/http_wan_translation_test?dc=dc2", nil)
|
2016-06-15 18:02:51 +00:00
|
|
|
resp1 := httptest.NewRecorder()
|
2017-05-21 07:11:09 +00:00
|
|
|
obj1, err1 := a1.srv.HealthServiceNodes(resp1, req)
|
2019-06-17 14:51:50 +00:00
|
|
|
require.NoError(t, err1)
|
|
|
|
require.NoError(t, checkIndex(resp1))
|
2016-06-15 18:02:51 +00:00
|
|
|
|
2016-08-15 22:34:11 +00:00
|
|
|
// Expect that DC1 gives us a WAN address (since the node is in DC2).
|
2019-06-17 14:51:50 +00:00
|
|
|
nodes1, ok := obj1.(structs.CheckServiceNodes)
|
|
|
|
require.True(t, ok, "obj1 is not a structs.CheckServiceNodes")
|
|
|
|
require.Len(t, nodes1, 1)
|
|
|
|
node1 := nodes1[0]
|
|
|
|
require.NotNil(t, node1.Node)
|
|
|
|
require.Equal(t, node1.Node.Address, "127.0.0.2")
|
|
|
|
require.NotNil(t, node1.Service)
|
|
|
|
require.Equal(t, node1.Service.Address, "1.2.3.4")
|
|
|
|
require.Equal(t, node1.Service.Port, 80)
|
2016-06-15 18:02:51 +00:00
|
|
|
|
2016-08-15 22:34:11 +00:00
|
|
|
// Query DC2 from DC2.
|
2016-06-15 18:02:51 +00:00
|
|
|
resp2 := httptest.NewRecorder()
|
2017-05-21 07:11:09 +00:00
|
|
|
obj2, err2 := a2.srv.HealthServiceNodes(resp2, req)
|
2019-06-17 14:51:50 +00:00
|
|
|
require.NoError(t, err2)
|
|
|
|
require.NoError(t, checkIndex(resp2))
|
|
|
|
|
|
|
|
// Expect that DC2 gives us a local address (since the node is in DC2).
|
|
|
|
nodes2, ok := obj2.(structs.CheckServiceNodes)
|
|
|
|
require.True(t, ok, "obj2 is not a structs.ServiceNodes")
|
|
|
|
require.Len(t, nodes2, 1)
|
|
|
|
node2 := nodes2[0]
|
|
|
|
require.NotNil(t, node2.Node)
|
|
|
|
require.Equal(t, node2.Node.Address, "127.0.0.1")
|
|
|
|
require.NotNil(t, node2.Service)
|
|
|
|
require.Equal(t, node2.Service.Address, "127.0.0.1")
|
|
|
|
require.Equal(t, node2.Service.Port, 8080)
|
2016-06-15 18:02:51 +00:00
|
|
|
}
|
|
|
|
|
2018-03-09 17:52:32 +00:00
|
|
|
func TestHealthConnectServiceNodes(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2018-03-09 17:52:32 +00:00
|
|
|
t.Parallel()
|
|
|
|
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, "")
|
2018-03-09 17:52:32 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
// Register
|
|
|
|
args := structs.TestRegisterRequestProxy(t)
|
|
|
|
var out struct{}
|
bulk rewrite using this script
set -euo pipefail
unset CDPATH
cd "$(dirname "$0")"
for f in $(git grep '\brequire := require\.New(' | cut -d':' -f1 | sort -u); do
echo "=== require: $f ==="
sed -i '/require := require.New(t)/d' $f
# require.XXX(blah) but not require.XXX(tblah) or require.XXX(rblah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\([^tr]\)/require.\1(t,\2/g' $f
# require.XXX(tblah) but not require.XXX(t, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/require.\1(t,\2/g' $f
# require.XXX(rblah) but not require.XXX(r, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/require.\1(t,\2/g' $f
gofmt -s -w $f
done
for f in $(git grep '\bassert := assert\.New(' | cut -d':' -f1 | sort -u); do
echo "=== assert: $f ==="
sed -i '/assert := assert.New(t)/d' $f
# assert.XXX(blah) but not assert.XXX(tblah) or assert.XXX(rblah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\([^tr]\)/assert.\1(t,\2/g' $f
# assert.XXX(tblah) but not assert.XXX(t, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/assert.\1(t,\2/g' $f
# assert.XXX(rblah) but not assert.XXX(r, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/assert.\1(t,\2/g' $f
gofmt -s -w $f
done
2022-01-20 16:46:23 +00:00
|
|
|
assert.Nil(t, a.RPC("Catalog.Register", args, &out))
|
2018-03-09 17:52:32 +00:00
|
|
|
|
|
|
|
// Request
|
|
|
|
req, _ := http.NewRequest("GET", fmt.Sprintf(
|
2018-09-12 16:07:47 +00:00
|
|
|
"/v1/health/connect/%s?dc=dc1", args.Service.Proxy.DestinationServiceName), nil)
|
2018-03-09 17:52:32 +00:00
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.HealthConnectServiceNodes(resp, req)
|
bulk rewrite using this script
set -euo pipefail
unset CDPATH
cd "$(dirname "$0")"
for f in $(git grep '\brequire := require\.New(' | cut -d':' -f1 | sort -u); do
echo "=== require: $f ==="
sed -i '/require := require.New(t)/d' $f
# require.XXX(blah) but not require.XXX(tblah) or require.XXX(rblah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\([^tr]\)/require.\1(t,\2/g' $f
# require.XXX(tblah) but not require.XXX(t, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/require.\1(t,\2/g' $f
# require.XXX(rblah) but not require.XXX(r, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/require.\1(t,\2/g' $f
gofmt -s -w $f
done
for f in $(git grep '\bassert := assert\.New(' | cut -d':' -f1 | sort -u); do
echo "=== assert: $f ==="
sed -i '/assert := assert.New(t)/d' $f
# assert.XXX(blah) but not assert.XXX(tblah) or assert.XXX(rblah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\([^tr]\)/assert.\1(t,\2/g' $f
# assert.XXX(tblah) but not assert.XXX(t, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/assert.\1(t,\2/g' $f
# assert.XXX(rblah) but not assert.XXX(r, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/assert.\1(t,\2/g' $f
gofmt -s -w $f
done
2022-01-20 16:46:23 +00:00
|
|
|
assert.Nil(t, err)
|
2018-03-09 17:52:32 +00:00
|
|
|
assertIndex(t, resp)
|
|
|
|
|
|
|
|
// Should be a non-nil empty list for checks
|
|
|
|
nodes := obj.(structs.CheckServiceNodes)
|
bulk rewrite using this script
set -euo pipefail
unset CDPATH
cd "$(dirname "$0")"
for f in $(git grep '\brequire := require\.New(' | cut -d':' -f1 | sort -u); do
echo "=== require: $f ==="
sed -i '/require := require.New(t)/d' $f
# require.XXX(blah) but not require.XXX(tblah) or require.XXX(rblah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\([^tr]\)/require.\1(t,\2/g' $f
# require.XXX(tblah) but not require.XXX(t, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/require.\1(t,\2/g' $f
# require.XXX(rblah) but not require.XXX(r, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/require.\1(t,\2/g' $f
gofmt -s -w $f
done
for f in $(git grep '\bassert := assert\.New(' | cut -d':' -f1 | sort -u); do
echo "=== assert: $f ==="
sed -i '/assert := assert.New(t)/d' $f
# assert.XXX(blah) but not assert.XXX(tblah) or assert.XXX(rblah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\([^tr]\)/assert.\1(t,\2/g' $f
# assert.XXX(tblah) but not assert.XXX(t, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/assert.\1(t,\2/g' $f
# assert.XXX(rblah) but not assert.XXX(r, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/assert.\1(t,\2/g' $f
gofmt -s -w $f
done
2022-01-20 16:46:23 +00:00
|
|
|
assert.Len(t, nodes, 1)
|
|
|
|
assert.Len(t, nodes[0].Checks, 0)
|
2018-03-09 17:52:32 +00:00
|
|
|
}
|
|
|
|
|
2020-06-10 18:07:15 +00:00
|
|
|
func TestHealthIngressServiceNodes(t *testing.T) {
|
2021-04-05 18:23:00 +00:00
|
|
|
t.Run("no streaming", func(t *testing.T) {
|
|
|
|
testHealthIngressServiceNodes(t, ` rpc { enable_streaming = false } use_streaming_backend = false `)
|
|
|
|
})
|
|
|
|
t.Run("cache with streaming", func(t *testing.T) {
|
|
|
|
testHealthIngressServiceNodes(t, ` rpc { enable_streaming = true } use_streaming_backend = true `)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func testHealthIngressServiceNodes(t *testing.T, agentHCL string) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2021-04-05 18:23:00 +00:00
|
|
|
a := NewTestAgent(t, agentHCL)
|
2020-06-09 16:44:31 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
|
|
|
|
|
|
|
// Register gateway
|
|
|
|
gatewayArgs := structs.TestRegisterIngressGateway(t)
|
|
|
|
gatewayArgs.Service.Address = "127.0.0.27"
|
|
|
|
var out struct{}
|
|
|
|
require.NoError(t, a.RPC("Catalog.Register", gatewayArgs, &out))
|
|
|
|
|
|
|
|
args := structs.TestRegisterRequest(t)
|
|
|
|
require.NoError(t, a.RPC("Catalog.Register", args, &out))
|
|
|
|
|
|
|
|
// Associate service to gateway
|
|
|
|
cfgArgs := &structs.IngressGatewayConfigEntry{
|
|
|
|
Name: "ingress-gateway",
|
|
|
|
Kind: structs.IngressGateway,
|
|
|
|
Listeners: []structs.IngressListener{
|
|
|
|
{
|
|
|
|
Port: 8888,
|
|
|
|
Protocol: "tcp",
|
|
|
|
Services: []structs.IngressService{
|
|
|
|
{Name: args.Service.Service},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
req := structs.ConfigEntryRequest{
|
|
|
|
Op: structs.ConfigEntryUpsert,
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Entry: cfgArgs,
|
|
|
|
}
|
|
|
|
var outB bool
|
|
|
|
require.Nil(t, a.RPC("ConfigEntry.Apply", req, &outB))
|
|
|
|
require.True(t, outB)
|
|
|
|
|
2021-04-05 18:23:00 +00:00
|
|
|
checkResults := func(t *testing.T, obj interface{}) {
|
|
|
|
nodes := obj.(structs.CheckServiceNodes)
|
|
|
|
require.Len(t, nodes, 1)
|
|
|
|
require.Equal(t, structs.ServiceKindIngressGateway, nodes[0].Service.Kind)
|
|
|
|
require.Equal(t, gatewayArgs.Service.Address, nodes[0].Service.Address)
|
|
|
|
require.Equal(t, gatewayArgs.Service.Proxy, nodes[0].Service.Proxy)
|
|
|
|
}
|
|
|
|
|
|
|
|
require.True(t, t.Run("associated service", func(t *testing.T) {
|
2020-06-09 16:44:31 +00:00
|
|
|
req, _ := http.NewRequest("GET", fmt.Sprintf(
|
2020-06-10 18:07:15 +00:00
|
|
|
"/v1/health/ingress/%s", args.Service.Service), nil)
|
2020-06-09 16:44:31 +00:00
|
|
|
resp := httptest.NewRecorder()
|
2020-06-10 18:07:15 +00:00
|
|
|
obj, err := a.srv.HealthIngressServiceNodes(resp, req)
|
2021-04-05 18:23:00 +00:00
|
|
|
require.NoError(t, err)
|
2020-06-09 16:44:31 +00:00
|
|
|
assertIndex(t, resp)
|
|
|
|
|
2021-04-05 18:23:00 +00:00
|
|
|
checkResults(t, obj)
|
|
|
|
}))
|
2020-06-09 16:44:31 +00:00
|
|
|
|
2021-04-05 18:23:00 +00:00
|
|
|
require.True(t, t.Run("non-associated service", func(t *testing.T) {
|
2020-06-10 18:07:15 +00:00
|
|
|
req, _ := http.NewRequest("GET",
|
|
|
|
"/v1/health/connect/notexist", nil)
|
2020-06-09 16:44:31 +00:00
|
|
|
resp := httptest.NewRecorder()
|
2020-06-10 18:07:15 +00:00
|
|
|
obj, err := a.srv.HealthIngressServiceNodes(resp, req)
|
2021-04-05 18:23:00 +00:00
|
|
|
require.NoError(t, err)
|
2020-06-09 16:44:31 +00:00
|
|
|
assertIndex(t, resp)
|
|
|
|
|
|
|
|
nodes := obj.(structs.CheckServiceNodes)
|
|
|
|
require.Len(t, nodes, 0)
|
2021-04-05 18:23:00 +00:00
|
|
|
}))
|
|
|
|
|
|
|
|
require.True(t, t.Run("test caching miss", func(t *testing.T) {
|
|
|
|
// List instances with cache enabled
|
|
|
|
req, _ := http.NewRequest("GET", fmt.Sprintf(
|
|
|
|
"/v1/health/ingress/%s?cached", args.Service.Service), nil)
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.HealthIngressServiceNodes(resp, req)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
checkResults(t, obj)
|
|
|
|
|
|
|
|
// Should be a cache miss
|
|
|
|
require.Equal(t, "MISS", resp.Header().Get("X-Cache"))
|
2021-06-28 20:48:10 +00:00
|
|
|
// always a blocking query, because the ingress endpoint does not yet support streaming.
|
|
|
|
require.Equal(t, "blocking-query", resp.Header().Get("X-Consul-Query-Backend"))
|
2021-04-05 18:23:00 +00:00
|
|
|
}))
|
|
|
|
|
|
|
|
require.True(t, t.Run("test caching hit", func(t *testing.T) {
|
|
|
|
// List instances with cache enabled
|
|
|
|
req, _ := http.NewRequest("GET", fmt.Sprintf(
|
|
|
|
"/v1/health/ingress/%s?cached", args.Service.Service), nil)
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.HealthIngressServiceNodes(resp, req)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
checkResults(t, obj)
|
|
|
|
|
|
|
|
// Should be a cache HIT now!
|
|
|
|
require.Equal(t, "HIT", resp.Header().Get("X-Cache"))
|
2021-06-28 20:48:10 +00:00
|
|
|
// always a blocking query, because the ingress endpoint does not yet support streaming.
|
|
|
|
require.Equal(t, "blocking-query", resp.Header().Get("X-Consul-Query-Backend"))
|
2021-04-05 18:23:00 +00:00
|
|
|
}))
|
2020-06-09 16:44:31 +00:00
|
|
|
}
|
|
|
|
|
2019-04-16 16:00:15 +00:00
|
|
|
func TestHealthConnectServiceNodes_Filter(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2019-04-16 16:00:15 +00:00
|
|
|
t.Parallel()
|
|
|
|
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, "")
|
2019-04-16 16:00:15 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
|
|
|
|
|
|
|
// Register
|
|
|
|
args := structs.TestRegisterRequestProxy(t)
|
|
|
|
args.Service.Address = "127.0.0.55"
|
|
|
|
var out struct{}
|
|
|
|
require.NoError(t, a.RPC("Catalog.Register", args, &out))
|
|
|
|
|
|
|
|
args = structs.TestRegisterRequestProxy(t)
|
|
|
|
args.Service.Address = "127.0.0.55"
|
|
|
|
args.Service.Meta = map[string]string{
|
|
|
|
"version": "2",
|
|
|
|
}
|
|
|
|
args.Service.ID = "web-proxy2"
|
|
|
|
args.SkipNodeUpdate = true
|
|
|
|
require.NoError(t, a.RPC("Catalog.Register", args, &out))
|
|
|
|
|
|
|
|
req, _ := http.NewRequest("GET", fmt.Sprintf(
|
|
|
|
"/v1/health/connect/%s?filter=%s",
|
|
|
|
args.Service.Proxy.DestinationServiceName,
|
|
|
|
url.QueryEscape("Service.Meta.version == 2")), nil)
|
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.HealthConnectServiceNodes(resp, req)
|
|
|
|
require.NoError(t, err)
|
|
|
|
assertIndex(t, resp)
|
|
|
|
|
|
|
|
nodes := obj.(structs.CheckServiceNodes)
|
|
|
|
require.Len(t, nodes, 1)
|
|
|
|
require.Equal(t, structs.ServiceKindConnectProxy, nodes[0].Service.Kind)
|
|
|
|
require.Equal(t, args.Service.Address, nodes[0].Service.Address)
|
|
|
|
require.Equal(t, args.Service.Proxy, nodes[0].Service.Proxy)
|
|
|
|
}
|
|
|
|
|
2018-03-09 17:52:32 +00:00
|
|
|
func TestHealthConnectServiceNodes_PassingFilter(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2018-03-09 17:52:32 +00:00
|
|
|
t.Parallel()
|
|
|
|
|
2020-03-31 19:59:56 +00:00
|
|
|
a := NewTestAgent(t, "")
|
2018-03-09 17:52:32 +00:00
|
|
|
defer a.Shutdown()
|
|
|
|
|
|
|
|
// Register
|
|
|
|
args := structs.TestRegisterRequestProxy(t)
|
|
|
|
args.Check = &structs.HealthCheck{
|
|
|
|
Node: args.Node,
|
|
|
|
Name: "check",
|
|
|
|
ServiceID: args.Service.Service,
|
|
|
|
Status: api.HealthCritical,
|
|
|
|
}
|
|
|
|
var out struct{}
|
|
|
|
assert.Nil(t, a.RPC("Catalog.Register", args, &out))
|
|
|
|
|
|
|
|
t.Run("bc_no_query_value", func(t *testing.T) {
|
|
|
|
req, _ := http.NewRequest("GET", fmt.Sprintf(
|
2018-09-12 16:07:47 +00:00
|
|
|
"/v1/health/connect/%s?passing", args.Service.Proxy.DestinationServiceName), nil)
|
2018-03-09 17:52:32 +00:00
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.HealthConnectServiceNodes(resp, req)
|
bulk rewrite using this script
set -euo pipefail
unset CDPATH
cd "$(dirname "$0")"
for f in $(git grep '\brequire := require\.New(' | cut -d':' -f1 | sort -u); do
echo "=== require: $f ==="
sed -i '/require := require.New(t)/d' $f
# require.XXX(blah) but not require.XXX(tblah) or require.XXX(rblah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\([^tr]\)/require.\1(t,\2/g' $f
# require.XXX(tblah) but not require.XXX(t, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/require.\1(t,\2/g' $f
# require.XXX(rblah) but not require.XXX(r, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/require.\1(t,\2/g' $f
gofmt -s -w $f
done
for f in $(git grep '\bassert := assert\.New(' | cut -d':' -f1 | sort -u); do
echo "=== assert: $f ==="
sed -i '/assert := assert.New(t)/d' $f
# assert.XXX(blah) but not assert.XXX(tblah) or assert.XXX(rblah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\([^tr]\)/assert.\1(t,\2/g' $f
# assert.XXX(tblah) but not assert.XXX(t, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/assert.\1(t,\2/g' $f
# assert.XXX(rblah) but not assert.XXX(r, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/assert.\1(t,\2/g' $f
gofmt -s -w $f
done
2022-01-20 16:46:23 +00:00
|
|
|
assert.Nil(t, err)
|
2018-03-09 17:52:32 +00:00
|
|
|
assertIndex(t, resp)
|
|
|
|
|
|
|
|
// Should be 0 health check for consul
|
|
|
|
nodes := obj.(structs.CheckServiceNodes)
|
bulk rewrite using this script
set -euo pipefail
unset CDPATH
cd "$(dirname "$0")"
for f in $(git grep '\brequire := require\.New(' | cut -d':' -f1 | sort -u); do
echo "=== require: $f ==="
sed -i '/require := require.New(t)/d' $f
# require.XXX(blah) but not require.XXX(tblah) or require.XXX(rblah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\([^tr]\)/require.\1(t,\2/g' $f
# require.XXX(tblah) but not require.XXX(t, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/require.\1(t,\2/g' $f
# require.XXX(rblah) but not require.XXX(r, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/require.\1(t,\2/g' $f
gofmt -s -w $f
done
for f in $(git grep '\bassert := assert\.New(' | cut -d':' -f1 | sort -u); do
echo "=== assert: $f ==="
sed -i '/assert := assert.New(t)/d' $f
# assert.XXX(blah) but not assert.XXX(tblah) or assert.XXX(rblah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\([^tr]\)/assert.\1(t,\2/g' $f
# assert.XXX(tblah) but not assert.XXX(t, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/assert.\1(t,\2/g' $f
# assert.XXX(rblah) but not assert.XXX(r, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/assert.\1(t,\2/g' $f
gofmt -s -w $f
done
2022-01-20 16:46:23 +00:00
|
|
|
assert.Len(t, nodes, 0)
|
2018-03-09 17:52:32 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("passing_true", func(t *testing.T) {
|
|
|
|
req, _ := http.NewRequest("GET", fmt.Sprintf(
|
2018-09-12 16:07:47 +00:00
|
|
|
"/v1/health/connect/%s?passing=true", args.Service.Proxy.DestinationServiceName), nil)
|
2018-03-09 17:52:32 +00:00
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.HealthConnectServiceNodes(resp, req)
|
bulk rewrite using this script
set -euo pipefail
unset CDPATH
cd "$(dirname "$0")"
for f in $(git grep '\brequire := require\.New(' | cut -d':' -f1 | sort -u); do
echo "=== require: $f ==="
sed -i '/require := require.New(t)/d' $f
# require.XXX(blah) but not require.XXX(tblah) or require.XXX(rblah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\([^tr]\)/require.\1(t,\2/g' $f
# require.XXX(tblah) but not require.XXX(t, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/require.\1(t,\2/g' $f
# require.XXX(rblah) but not require.XXX(r, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/require.\1(t,\2/g' $f
gofmt -s -w $f
done
for f in $(git grep '\bassert := assert\.New(' | cut -d':' -f1 | sort -u); do
echo "=== assert: $f ==="
sed -i '/assert := assert.New(t)/d' $f
# assert.XXX(blah) but not assert.XXX(tblah) or assert.XXX(rblah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\([^tr]\)/assert.\1(t,\2/g' $f
# assert.XXX(tblah) but not assert.XXX(t, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/assert.\1(t,\2/g' $f
# assert.XXX(rblah) but not assert.XXX(r, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/assert.\1(t,\2/g' $f
gofmt -s -w $f
done
2022-01-20 16:46:23 +00:00
|
|
|
assert.Nil(t, err)
|
2018-03-09 17:52:32 +00:00
|
|
|
assertIndex(t, resp)
|
|
|
|
|
|
|
|
// Should be 0 health check for consul
|
|
|
|
nodes := obj.(structs.CheckServiceNodes)
|
bulk rewrite using this script
set -euo pipefail
unset CDPATH
cd "$(dirname "$0")"
for f in $(git grep '\brequire := require\.New(' | cut -d':' -f1 | sort -u); do
echo "=== require: $f ==="
sed -i '/require := require.New(t)/d' $f
# require.XXX(blah) but not require.XXX(tblah) or require.XXX(rblah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\([^tr]\)/require.\1(t,\2/g' $f
# require.XXX(tblah) but not require.XXX(t, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/require.\1(t,\2/g' $f
# require.XXX(rblah) but not require.XXX(r, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/require.\1(t,\2/g' $f
gofmt -s -w $f
done
for f in $(git grep '\bassert := assert\.New(' | cut -d':' -f1 | sort -u); do
echo "=== assert: $f ==="
sed -i '/assert := assert.New(t)/d' $f
# assert.XXX(blah) but not assert.XXX(tblah) or assert.XXX(rblah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\([^tr]\)/assert.\1(t,\2/g' $f
# assert.XXX(tblah) but not assert.XXX(t, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/assert.\1(t,\2/g' $f
# assert.XXX(rblah) but not assert.XXX(r, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/assert.\1(t,\2/g' $f
gofmt -s -w $f
done
2022-01-20 16:46:23 +00:00
|
|
|
assert.Len(t, nodes, 0)
|
2018-03-09 17:52:32 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("passing_false", func(t *testing.T) {
|
|
|
|
req, _ := http.NewRequest("GET", fmt.Sprintf(
|
2018-09-12 16:07:47 +00:00
|
|
|
"/v1/health/connect/%s?passing=false", args.Service.Proxy.DestinationServiceName), nil)
|
2018-03-09 17:52:32 +00:00
|
|
|
resp := httptest.NewRecorder()
|
|
|
|
obj, err := a.srv.HealthConnectServiceNodes(resp, req)
|
bulk rewrite using this script
set -euo pipefail
unset CDPATH
cd "$(dirname "$0")"
for f in $(git grep '\brequire := require\.New(' | cut -d':' -f1 | sort -u); do
echo "=== require: $f ==="
sed -i '/require := require.New(t)/d' $f
# require.XXX(blah) but not require.XXX(tblah) or require.XXX(rblah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\([^tr]\)/require.\1(t,\2/g' $f
# require.XXX(tblah) but not require.XXX(t, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/require.\1(t,\2/g' $f
# require.XXX(rblah) but not require.XXX(r, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/require.\1(t,\2/g' $f
gofmt -s -w $f
done
for f in $(git grep '\bassert := assert\.New(' | cut -d':' -f1 | sort -u); do
echo "=== assert: $f ==="
sed -i '/assert := assert.New(t)/d' $f
# assert.XXX(blah) but not assert.XXX(tblah) or assert.XXX(rblah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\([^tr]\)/assert.\1(t,\2/g' $f
# assert.XXX(tblah) but not assert.XXX(t, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/assert.\1(t,\2/g' $f
# assert.XXX(rblah) but not assert.XXX(r, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/assert.\1(t,\2/g' $f
gofmt -s -w $f
done
2022-01-20 16:46:23 +00:00
|
|
|
assert.Nil(t, err)
|
2018-03-09 17:52:32 +00:00
|
|
|
assertIndex(t, resp)
|
|
|
|
|
2018-03-22 02:54:44 +00:00
|
|
|
// Should be 1
|
2018-03-09 17:52:32 +00:00
|
|
|
nodes := obj.(structs.CheckServiceNodes)
|
bulk rewrite using this script
set -euo pipefail
unset CDPATH
cd "$(dirname "$0")"
for f in $(git grep '\brequire := require\.New(' | cut -d':' -f1 | sort -u); do
echo "=== require: $f ==="
sed -i '/require := require.New(t)/d' $f
# require.XXX(blah) but not require.XXX(tblah) or require.XXX(rblah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\([^tr]\)/require.\1(t,\2/g' $f
# require.XXX(tblah) but not require.XXX(t, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/require.\1(t,\2/g' $f
# require.XXX(rblah) but not require.XXX(r, blah)
sed -i 's/\brequire\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/require.\1(t,\2/g' $f
gofmt -s -w $f
done
for f in $(git grep '\bassert := assert\.New(' | cut -d':' -f1 | sort -u); do
echo "=== assert: $f ==="
sed -i '/assert := assert.New(t)/d' $f
# assert.XXX(blah) but not assert.XXX(tblah) or assert.XXX(rblah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\([^tr]\)/assert.\1(t,\2/g' $f
# assert.XXX(tblah) but not assert.XXX(t, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(t[^,]\)/assert.\1(t,\2/g' $f
# assert.XXX(rblah) but not assert.XXX(r, blah)
sed -i 's/\bassert\.\([a-zA-Z0-9_]*\)(\(r[^,]\)/assert.\1(t,\2/g' $f
gofmt -s -w $f
done
2022-01-20 16:46:23 +00:00
|
|
|
assert.Len(t, nodes, 1)
|
2018-03-09 17:52:32 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("passing_bad", func(t *testing.T) {
|
|
|
|
req, _ := http.NewRequest("GET", fmt.Sprintf(
|
2018-09-12 16:07:47 +00:00
|
|
|
"/v1/health/connect/%s?passing=nope-nope", args.Service.Proxy.DestinationServiceName), nil)
|
2018-03-09 17:52:32 +00:00
|
|
|
resp := httptest.NewRecorder()
|
2022-01-31 16:17:35 +00:00
|
|
|
_, err := a.srv.HealthConnectServiceNodes(resp, req)
|
|
|
|
assert.NotNil(t, err)
|
2022-04-29 17:42:49 +00:00
|
|
|
assert.True(t, isHTTPBadRequest(err))
|
2018-03-09 17:52:32 +00:00
|
|
|
|
2022-01-31 16:17:35 +00:00
|
|
|
assert.True(t, strings.Contains(err.Error(), "Invalid value for ?passing"))
|
2018-03-09 17:52:32 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2014-07-05 16:49:10 +00:00
|
|
|
func TestFilterNonPassing(t *testing.T) {
|
2017-05-21 07:54:40 +00:00
|
|
|
t.Parallel()
|
2014-07-05 16:49:10 +00:00
|
|
|
nodes := structs.CheckServiceNodes{
|
|
|
|
structs.CheckServiceNode{
|
|
|
|
Checks: structs.HealthChecks{
|
|
|
|
&structs.HealthCheck{
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthCritical,
|
2014-07-05 16:49:10 +00:00
|
|
|
},
|
|
|
|
&structs.HealthCheck{
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthCritical,
|
2014-07-05 16:49:10 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
structs.CheckServiceNode{
|
|
|
|
Checks: structs.HealthChecks{
|
|
|
|
&structs.HealthCheck{
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthCritical,
|
2014-07-05 16:49:10 +00:00
|
|
|
},
|
|
|
|
&structs.HealthCheck{
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthCritical,
|
2014-07-05 16:49:10 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
structs.CheckServiceNode{
|
|
|
|
Checks: structs.HealthChecks{
|
|
|
|
&structs.HealthCheck{
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthPassing,
|
2014-07-05 16:49:10 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
out := filterNonPassing(nodes)
|
|
|
|
if len(out) != 1 && reflect.DeepEqual(out[0], nodes[2]) {
|
|
|
|
t.Fatalf("bad: %v", out)
|
|
|
|
}
|
|
|
|
}
|