mirror of https://github.com/status-im/consul.git
catalog: compare node names case insensitively in more places (#12444)
Many places in consul already treated node names case insensitively. The state store indexes already do it, but there are a few places that did a direct byte comparison which have now been corrected. One place of particular consideration is ensureCheckIfNodeMatches which is executed during snapshot restore (among other places). If a node check used a slightly different casing than the casing of the node during register then the snapshot restore here would deterministically fail. This has been fixed. Primary approach: git grep -i "node.*[!=]=.*node" -- ':!*_test.go' ':!docs' git grep -i '\[[^]]*member[^]]*\] git grep -i '\[[^]]*\(member\|name\|node\)[^]]*\]' -- ':!*_test.go' ':!website' ':!ui' ':!agent/proxycfg/testing.go:' ':!*.md'
This commit is contained in:
parent
263357f7d5
commit
957146401e
|
@ -0,0 +1,7 @@
|
|||
```release-note:bug
|
||||
catalog: compare node names case insensitively in more places
|
||||
```
|
||||
|
||||
```release-note:bug
|
||||
state: fix bug blocking snapshot restore when a node check and node differed in casing of the node string
|
||||
```
|
|
@ -2,19 +2,21 @@ package agent
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/hashicorp/consul/api"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
"github.com/hashicorp/consul/api"
|
||||
|
||||
"github.com/hashicorp/serf/coordinate"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
)
|
||||
|
||||
func TestCatalogRegister_Service_InvalidAddress(t *testing.T) {
|
||||
|
@ -412,42 +414,28 @@ func TestCatalogNodes_DistanceSort(t *testing.T) {
|
|||
Address: "127.0.0.1",
|
||||
}
|
||||
var out struct{}
|
||||
if err := a.RPC("Catalog.Register", args, &out); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
require.NoError(t, a.RPC("Catalog.Register", args, &out))
|
||||
|
||||
args = &structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: "bar",
|
||||
Address: "127.0.0.2",
|
||||
}
|
||||
if err := a.RPC("Catalog.Register", args, &out); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
require.NoError(t, a.RPC("Catalog.Register", args, &out))
|
||||
|
||||
// Nobody has coordinates set so this will still return them in the
|
||||
// order they are indexed.
|
||||
req, _ := http.NewRequest("GET", "/v1/catalog/nodes?dc=dc1&near=foo", nil)
|
||||
resp := httptest.NewRecorder()
|
||||
obj, err := a.srv.CatalogNodes(resp, req)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
assertIndex(t, resp)
|
||||
nodes := obj.(structs.Nodes)
|
||||
if len(nodes) != 3 {
|
||||
t.Fatalf("bad: %v", obj)
|
||||
}
|
||||
if nodes[0].Node != "bar" {
|
||||
t.Fatalf("bad: %v", nodes)
|
||||
}
|
||||
if nodes[1].Node != "foo" {
|
||||
t.Fatalf("bad: %v", nodes)
|
||||
}
|
||||
if nodes[2].Node != a.Config.NodeName {
|
||||
t.Fatalf("bad: %v", nodes)
|
||||
}
|
||||
require.Len(t, nodes, 3)
|
||||
require.Equal(t, "bar", nodes[0].Node)
|
||||
require.Equal(t, "foo", nodes[1].Node)
|
||||
require.Equal(t, a.Config.NodeName, nodes[2].Node)
|
||||
|
||||
// Send an update for the node and wait for it to get applied.
|
||||
arg := structs.CoordinateUpdateRequest{
|
||||
|
@ -455,33 +443,21 @@ func TestCatalogNodes_DistanceSort(t *testing.T) {
|
|||
Node: "foo",
|
||||
Coord: coordinate.NewCoordinate(coordinate.DefaultConfig()),
|
||||
}
|
||||
if err := a.RPC("Coordinate.Update", &arg, &out); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
require.NoError(t, a.RPC("Coordinate.Update", &arg, &out))
|
||||
time.Sleep(300 * time.Millisecond)
|
||||
|
||||
// Query again and now foo should have moved to the front of the line.
|
||||
req, _ = http.NewRequest("GET", "/v1/catalog/nodes?dc=dc1&near=foo", nil)
|
||||
resp = httptest.NewRecorder()
|
||||
obj, err = a.srv.CatalogNodes(resp, req)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
assertIndex(t, resp)
|
||||
nodes = obj.(structs.Nodes)
|
||||
if len(nodes) != 3 {
|
||||
t.Fatalf("bad: %v", obj)
|
||||
}
|
||||
if nodes[0].Node != "foo" {
|
||||
t.Fatalf("bad: %v", nodes)
|
||||
}
|
||||
if nodes[1].Node != "bar" {
|
||||
t.Fatalf("bad: %v", nodes)
|
||||
}
|
||||
if nodes[2].Node != a.Config.NodeName {
|
||||
t.Fatalf("bad: %v", nodes)
|
||||
}
|
||||
require.Len(t, nodes, 3)
|
||||
require.Equal(t, "foo", nodes[0].Node)
|
||||
require.Equal(t, "bar", nodes[1].Node)
|
||||
require.Equal(t, a.Config.NodeName, nodes[2].Node)
|
||||
}
|
||||
|
||||
func TestCatalogServices(t *testing.T) {
|
||||
|
|
|
@ -2,6 +2,7 @@ package checks
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
|
@ -246,7 +247,7 @@ func (c *CheckAlias) processChecks(checks []*structs.HealthCheck, CheckIfService
|
|||
msg := "No checks found."
|
||||
serviceFound := false
|
||||
for _, chk := range checks {
|
||||
if c.Node != "" && c.Node != chk.Node {
|
||||
if c.Node != "" && !strings.EqualFold(c.Node, chk.Node) {
|
||||
continue
|
||||
}
|
||||
serviceMatch := c.ServiceID.Matches(chk.CompoundServiceID())
|
||||
|
|
|
@ -454,46 +454,55 @@ func TestCheckAlias_remoteNodeOnlyPassing(t *testing.T) {
|
|||
func TestCheckAlias_remoteNodeOnlyCritical(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
notify := newMockAliasNotify()
|
||||
chkID := structs.NewCheckID(types.CheckID("foo"), nil)
|
||||
rpc := &mockRPC{}
|
||||
chk := &CheckAlias{
|
||||
Node: "remote",
|
||||
CheckID: chkID,
|
||||
Notify: notify,
|
||||
RPC: rpc,
|
||||
run := func(t *testing.T, responseNodeName string) {
|
||||
notify := newMockAliasNotify()
|
||||
chkID := structs.NewCheckID(types.CheckID("foo"), nil)
|
||||
rpc := &mockRPC{}
|
||||
chk := &CheckAlias{
|
||||
Node: "remote",
|
||||
CheckID: chkID,
|
||||
Notify: notify,
|
||||
RPC: rpc,
|
||||
}
|
||||
|
||||
rpc.AddReply("Health.NodeChecks", structs.IndexedHealthChecks{
|
||||
HealthChecks: []*structs.HealthCheck{
|
||||
// Should ignore non-matching node
|
||||
{
|
||||
Node: "A",
|
||||
ServiceID: "web",
|
||||
Status: api.HealthCritical,
|
||||
},
|
||||
|
||||
// Should ignore any services
|
||||
{
|
||||
Node: responseNodeName,
|
||||
ServiceID: "db",
|
||||
Status: api.HealthCritical,
|
||||
},
|
||||
|
||||
// Match
|
||||
{
|
||||
Node: responseNodeName,
|
||||
Status: api.HealthCritical,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
chk.Start()
|
||||
defer chk.Stop()
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := notify.State(chkID), api.HealthCritical; got != want {
|
||||
r.Fatalf("got state %q want %q", got, want)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
rpc.AddReply("Health.NodeChecks", structs.IndexedHealthChecks{
|
||||
HealthChecks: []*structs.HealthCheck{
|
||||
// Should ignore non-matching node
|
||||
{
|
||||
Node: "A",
|
||||
ServiceID: "web",
|
||||
Status: api.HealthCritical,
|
||||
},
|
||||
|
||||
// Should ignore any services
|
||||
{
|
||||
Node: "remote",
|
||||
ServiceID: "db",
|
||||
Status: api.HealthCritical,
|
||||
},
|
||||
|
||||
// Match
|
||||
{
|
||||
Node: "remote",
|
||||
Status: api.HealthCritical,
|
||||
},
|
||||
},
|
||||
t.Run("same case node name", func(t *testing.T) {
|
||||
run(t, "remote")
|
||||
})
|
||||
|
||||
chk.Start()
|
||||
defer chk.Stop()
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := notify.State(chkID), api.HealthCritical; got != want {
|
||||
r.Fatalf("got state %q want %q", got, want)
|
||||
}
|
||||
t.Run("lowercase node name", func(t *testing.T) {
|
||||
run(t, "ReMoTe")
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
@ -3,6 +3,7 @@ package consul
|
|||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/armon/go-metrics"
|
||||
|
@ -285,7 +286,7 @@ func vetRegisterWithACL(
|
|||
// note in state_store.go to ban this down there in Consul 0.8,
|
||||
// but it's good to leave this here because it's required for
|
||||
// correctness wrt. ACLs.
|
||||
if check.Node != subj.Node {
|
||||
if !strings.EqualFold(check.Node, subj.Node) {
|
||||
return fmt.Errorf("Node '%s' for check '%s' doesn't match register request node '%s'",
|
||||
check.Node, check.CheckID, subj.Node)
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@ import (
|
|||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/lib"
|
||||
"github.com/hashicorp/consul/lib/stringslice"
|
||||
"github.com/hashicorp/consul/sdk/testutil"
|
||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
"github.com/hashicorp/consul/types"
|
||||
|
@ -3431,42 +3432,49 @@ service "gateway" {
|
|||
}
|
||||
|
||||
func TestVetRegisterWithACL(t *testing.T) {
|
||||
t.Parallel()
|
||||
appendAuthz := func(t *testing.T, defaultAuthz acl.Authorizer, rules string) acl.Authorizer {
|
||||
policy, err := acl.NewPolicyFromSource(rules, acl.SyntaxCurrent, nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
authz, err := acl.NewPolicyAuthorizerWithDefaults(defaultAuthz, []*acl.Policy{policy}, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
return authz
|
||||
}
|
||||
|
||||
t.Run("With an 'allow all' authorizer the update should be allowed", func(t *testing.T) {
|
||||
args := &structs.RegisterRequest{
|
||||
Node: "nope",
|
||||
Address: "127.0.0.1",
|
||||
}
|
||||
|
||||
// With an "allow all" authorizer the update should be allowed.
|
||||
require.NoError(t, vetRegisterWithACL(acl.ManageAll(), args, nil))
|
||||
})
|
||||
|
||||
var perms acl.Authorizer = acl.DenyAll()
|
||||
|
||||
args := &structs.RegisterRequest{
|
||||
Node: "nope",
|
||||
Address: "127.0.0.1",
|
||||
}
|
||||
|
||||
// With an "allow all" authorizer the update should be allowed.
|
||||
if err := vetRegisterWithACL(acl.ManageAll(), args, nil); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Create a basic node policy.
|
||||
policy, err := acl.NewPolicyFromSource(`
|
||||
node "node" {
|
||||
policy = "write"
|
||||
}
|
||||
`, acl.SyntaxLegacy, nil, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err %v", err)
|
||||
}
|
||||
perms, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
perms = appendAuthz(t, perms, `
|
||||
node "node" {
|
||||
policy = "write"
|
||||
} `)
|
||||
|
||||
// With that policy, the update should now be blocked for node reasons.
|
||||
err = vetRegisterWithACL(perms, args, nil)
|
||||
if !acl.IsErrPermissionDenied(err) {
|
||||
t.Fatalf("bad: %v", err)
|
||||
}
|
||||
err := vetRegisterWithACL(perms, args, nil)
|
||||
require.True(t, acl.IsErrPermissionDenied(err))
|
||||
|
||||
// Now use a permitted node name.
|
||||
args.Node = "node"
|
||||
if err := vetRegisterWithACL(perms, args, nil); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
args = &structs.RegisterRequest{
|
||||
Node: "node",
|
||||
Address: "127.0.0.1",
|
||||
}
|
||||
require.NoError(t, vetRegisterWithACL(perms, args, nil))
|
||||
|
||||
// Build some node info that matches what we have now.
|
||||
ns := &structs.NodeServices{
|
||||
|
@ -3478,183 +3486,220 @@ node "node" {
|
|||
}
|
||||
|
||||
// Try to register a service, which should be blocked.
|
||||
args.Service = &structs.NodeService{
|
||||
Service: "service",
|
||||
ID: "my-id",
|
||||
args = &structs.RegisterRequest{
|
||||
Node: "node",
|
||||
Address: "127.0.0.1",
|
||||
Service: &structs.NodeService{
|
||||
Service: "service",
|
||||
ID: "my-id",
|
||||
},
|
||||
}
|
||||
err = vetRegisterWithACL(perms, args, ns)
|
||||
if !acl.IsErrPermissionDenied(err) {
|
||||
t.Fatalf("bad: %v", err)
|
||||
}
|
||||
require.True(t, acl.IsErrPermissionDenied(err))
|
||||
|
||||
// Chain on a basic service policy.
|
||||
policy, err = acl.NewPolicyFromSource(`
|
||||
service "service" {
|
||||
policy = "write"
|
||||
}
|
||||
`, acl.SyntaxLegacy, nil, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err %v", err)
|
||||
}
|
||||
perms, err = acl.NewPolicyAuthorizerWithDefaults(perms, []*acl.Policy{policy}, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
perms = appendAuthz(t, perms, `
|
||||
service "service" {
|
||||
policy = "write"
|
||||
} `)
|
||||
|
||||
// With the service ACL, the update should go through.
|
||||
if err := vetRegisterWithACL(perms, args, ns); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
require.NoError(t, vetRegisterWithACL(perms, args, ns))
|
||||
|
||||
// Add an existing service that they are clobbering and aren't allowed
|
||||
// to write to.
|
||||
ns.Services["my-id"] = &structs.NodeService{
|
||||
Service: "other",
|
||||
ID: "my-id",
|
||||
ns = &structs.NodeServices{
|
||||
Node: &structs.Node{
|
||||
Node: "node",
|
||||
Address: "127.0.0.1",
|
||||
},
|
||||
Services: map[string]*structs.NodeService{
|
||||
"my-id": {
|
||||
Service: "other",
|
||||
ID: "my-id",
|
||||
},
|
||||
},
|
||||
}
|
||||
err = vetRegisterWithACL(perms, args, ns)
|
||||
if !acl.IsErrPermissionDenied(err) {
|
||||
t.Fatalf("bad: %v", err)
|
||||
}
|
||||
require.True(t, acl.IsErrPermissionDenied(err))
|
||||
|
||||
// Chain on a policy that allows them to write to the other service.
|
||||
policy, err = acl.NewPolicyFromSource(`
|
||||
service "other" {
|
||||
policy = "write"
|
||||
}
|
||||
`, acl.SyntaxLegacy, nil, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err %v", err)
|
||||
}
|
||||
perms, err = acl.NewPolicyAuthorizerWithDefaults(perms, []*acl.Policy{policy}, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
perms = appendAuthz(t, perms, `
|
||||
service "other" {
|
||||
policy = "write"
|
||||
} `)
|
||||
|
||||
// Now it should go through.
|
||||
if err := vetRegisterWithACL(perms, args, ns); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
require.NoError(t, vetRegisterWithACL(perms, args, ns))
|
||||
|
||||
// Try creating the node and the service at once by having no existing
|
||||
// node record. This should be ok since we have node and service
|
||||
// permissions.
|
||||
if err := vetRegisterWithACL(perms, args, nil); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
require.NoError(t, vetRegisterWithACL(perms, args, nil))
|
||||
|
||||
// Add a node-level check to the member, which should be rejected.
|
||||
args.Check = &structs.HealthCheck{
|
||||
Node: "node",
|
||||
args = &structs.RegisterRequest{
|
||||
Node: "node",
|
||||
Address: "127.0.0.1",
|
||||
Service: &structs.NodeService{
|
||||
Service: "service",
|
||||
ID: "my-id",
|
||||
},
|
||||
Check: &structs.HealthCheck{
|
||||
Node: "node",
|
||||
},
|
||||
}
|
||||
err = vetRegisterWithACL(perms, args, ns)
|
||||
if err == nil || !strings.Contains(err.Error(), "check member must be nil") {
|
||||
t.Fatalf("bad: %v", err)
|
||||
}
|
||||
testutil.RequireErrorContains(t, err, "check member must be nil")
|
||||
|
||||
// Move the check into the slice, but give a bad node name.
|
||||
args.Check.Node = "nope"
|
||||
args.Checks = append(args.Checks, args.Check)
|
||||
args.Check = nil
|
||||
err = vetRegisterWithACL(perms, args, ns)
|
||||
if err == nil || !strings.Contains(err.Error(), "doesn't match register request node") {
|
||||
t.Fatalf("bad: %v", err)
|
||||
args = &structs.RegisterRequest{
|
||||
Node: "node",
|
||||
Address: "127.0.0.1",
|
||||
Service: &structs.NodeService{
|
||||
Service: "service",
|
||||
ID: "my-id",
|
||||
},
|
||||
Checks: []*structs.HealthCheck{
|
||||
{
|
||||
Node: "nope",
|
||||
},
|
||||
},
|
||||
}
|
||||
err = vetRegisterWithACL(perms, args, ns)
|
||||
testutil.RequireErrorContains(t, err, "doesn't match register request node")
|
||||
|
||||
// Fix the node name, which should now go through.
|
||||
args.Checks[0].Node = "node"
|
||||
if err := vetRegisterWithACL(perms, args, ns); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
args = &structs.RegisterRequest{
|
||||
Node: "node",
|
||||
Address: "127.0.0.1",
|
||||
Service: &structs.NodeService{
|
||||
Service: "service",
|
||||
ID: "my-id",
|
||||
},
|
||||
Checks: []*structs.HealthCheck{
|
||||
{
|
||||
Node: "node",
|
||||
},
|
||||
},
|
||||
}
|
||||
require.NoError(t, vetRegisterWithACL(perms, args, ns))
|
||||
|
||||
// Add a service-level check.
|
||||
args.Checks = append(args.Checks, &structs.HealthCheck{
|
||||
Node: "node",
|
||||
ServiceID: "my-id",
|
||||
})
|
||||
if err := vetRegisterWithACL(perms, args, ns); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
args = &structs.RegisterRequest{
|
||||
Node: "node",
|
||||
Address: "127.0.0.1",
|
||||
Service: &structs.NodeService{
|
||||
Service: "service",
|
||||
ID: "my-id",
|
||||
},
|
||||
Checks: []*structs.HealthCheck{
|
||||
{
|
||||
Node: "node",
|
||||
},
|
||||
{
|
||||
Node: "node",
|
||||
ServiceID: "my-id",
|
||||
},
|
||||
},
|
||||
}
|
||||
require.NoError(t, vetRegisterWithACL(perms, args, ns))
|
||||
|
||||
// Try creating everything at once. This should be ok since we have all
|
||||
// the permissions we need. It also makes sure that we can register a
|
||||
// new node, service, and associated checks.
|
||||
if err := vetRegisterWithACL(perms, args, nil); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
require.NoError(t, vetRegisterWithACL(perms, args, nil))
|
||||
|
||||
// Nil out the service registration, which'll skip the special case
|
||||
// and force us to look at the ns data (it will look like we are
|
||||
// writing to the "other" service which also has "my-id").
|
||||
args.Service = nil
|
||||
if err := vetRegisterWithACL(perms, args, ns); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
args = &structs.RegisterRequest{
|
||||
Node: "node",
|
||||
Address: "127.0.0.1",
|
||||
Checks: []*structs.HealthCheck{
|
||||
{
|
||||
Node: "node",
|
||||
},
|
||||
{
|
||||
Node: "node",
|
||||
ServiceID: "my-id",
|
||||
},
|
||||
},
|
||||
}
|
||||
require.NoError(t, vetRegisterWithACL(perms, args, ns))
|
||||
|
||||
// Chain on a policy that forbids them to write to the other service.
|
||||
policy, err = acl.NewPolicyFromSource(`
|
||||
service "other" {
|
||||
policy = "deny"
|
||||
}
|
||||
`, acl.SyntaxLegacy, nil, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err %v", err)
|
||||
}
|
||||
perms, err = acl.NewPolicyAuthorizerWithDefaults(perms, []*acl.Policy{policy}, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
perms = appendAuthz(t, perms, `
|
||||
service "other" {
|
||||
policy = "deny"
|
||||
} `)
|
||||
|
||||
// This should get rejected.
|
||||
err = vetRegisterWithACL(perms, args, ns)
|
||||
if !acl.IsErrPermissionDenied(err) {
|
||||
t.Fatalf("bad: %v", err)
|
||||
}
|
||||
require.True(t, acl.IsErrPermissionDenied(err))
|
||||
|
||||
// Change the existing service data to point to a service name they
|
||||
// car write to. This should go through.
|
||||
ns.Services["my-id"] = &structs.NodeService{
|
||||
Service: "service",
|
||||
ID: "my-id",
|
||||
}
|
||||
if err := vetRegisterWithACL(perms, args, ns); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
// can write to. This should go through.
|
||||
ns = &structs.NodeServices{
|
||||
Node: &structs.Node{
|
||||
Node: "node",
|
||||
Address: "127.0.0.1",
|
||||
},
|
||||
Services: map[string]*structs.NodeService{
|
||||
"my-id": {
|
||||
Service: "service",
|
||||
ID: "my-id",
|
||||
},
|
||||
},
|
||||
}
|
||||
require.NoError(t, vetRegisterWithACL(perms, args, ns))
|
||||
|
||||
// Chain on a policy that forbids them to write to the node.
|
||||
policy, err = acl.NewPolicyFromSource(`
|
||||
node "node" {
|
||||
policy = "deny"
|
||||
}
|
||||
`, acl.SyntaxLegacy, nil, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err %v", err)
|
||||
}
|
||||
perms, err = acl.NewPolicyAuthorizerWithDefaults(perms, []*acl.Policy{policy}, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
perms = appendAuthz(t, perms, `
|
||||
node "node" {
|
||||
policy = "deny"
|
||||
} `)
|
||||
|
||||
// This should get rejected because there's a node-level check in here.
|
||||
err = vetRegisterWithACL(perms, args, ns)
|
||||
if !acl.IsErrPermissionDenied(err) {
|
||||
t.Fatalf("bad: %v", err)
|
||||
}
|
||||
require.True(t, acl.IsErrPermissionDenied(err))
|
||||
|
||||
// Change the node-level check into a service check, and then it should
|
||||
// go through.
|
||||
args.Checks[0].ServiceID = "my-id"
|
||||
if err := vetRegisterWithACL(perms, args, ns); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
args = &structs.RegisterRequest{
|
||||
Node: "node",
|
||||
Address: "127.0.0.1",
|
||||
Checks: []*structs.HealthCheck{
|
||||
{
|
||||
Node: "node",
|
||||
ServiceID: "my-id",
|
||||
},
|
||||
{
|
||||
Node: "node",
|
||||
ServiceID: "my-id",
|
||||
},
|
||||
},
|
||||
}
|
||||
require.NoError(t, vetRegisterWithACL(perms, args, ns))
|
||||
|
||||
// Finally, attempt to update the node part of the data and make sure
|
||||
// that gets rejected since they no longer have permissions.
|
||||
args.Address = "127.0.0.2"
|
||||
err = vetRegisterWithACL(perms, args, ns)
|
||||
if !acl.IsErrPermissionDenied(err) {
|
||||
t.Fatalf("bad: %v", err)
|
||||
args = &structs.RegisterRequest{
|
||||
Node: "node",
|
||||
Address: "127.0.0.2",
|
||||
Checks: []*structs.HealthCheck{
|
||||
{
|
||||
Node: "node",
|
||||
ServiceID: "my-id",
|
||||
},
|
||||
{
|
||||
Node: "node",
|
||||
ServiceID: "my-id",
|
||||
},
|
||||
},
|
||||
}
|
||||
err = vetRegisterWithACL(perms, args, ns)
|
||||
require.True(t, acl.IsErrPermissionDenied(err))
|
||||
}
|
||||
|
||||
func TestVetDeregisterWithACL(t *testing.T) {
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"net"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
@ -899,7 +900,7 @@ func (s *Server) reconcileReaped(known map[string]struct{}, nodeEntMeta *structs
|
|||
}
|
||||
|
||||
// Check if this node is "known" by serf
|
||||
if _, ok := known[check.Node]; ok {
|
||||
if _, ok := known[strings.ToLower(check.Node)]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -1204,7 +1205,7 @@ func (s *Server) handleDeregisterMember(reason string, member serf.Member, nodeE
|
|||
// deregister us later.
|
||||
//
|
||||
// TODO(partitions): check partitions here too? server names should be unique in general though
|
||||
if member.Name == s.config.NodeName {
|
||||
if strings.EqualFold(member.Name, s.config.NodeName) {
|
||||
s.logger.Warn("deregistering self should be done by follower",
|
||||
"name", s.config.NodeName,
|
||||
"partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(),
|
||||
|
|
|
@ -189,12 +189,8 @@ func TestLeader_LeftMember(t *testing.T) {
|
|||
// Should be registered
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
_, node, err := state.GetNode(c1.config.NodeName, nil)
|
||||
if err != nil {
|
||||
r.Fatalf("err: %v", err)
|
||||
}
|
||||
if node == nil {
|
||||
r.Fatal("client not registered")
|
||||
}
|
||||
require.NoError(r, err)
|
||||
require.NotNil(r, node, "client not registered")
|
||||
})
|
||||
|
||||
// Node should leave
|
||||
|
@ -204,14 +200,11 @@ func TestLeader_LeftMember(t *testing.T) {
|
|||
// Should be deregistered
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
_, node, err := state.GetNode(c1.config.NodeName, nil)
|
||||
if err != nil {
|
||||
r.Fatalf("err: %v", err)
|
||||
}
|
||||
if node != nil {
|
||||
r.Fatal("client still registered")
|
||||
}
|
||||
require.NoError(r, err)
|
||||
require.Nil(r, node, "client still registered")
|
||||
})
|
||||
}
|
||||
|
||||
func TestLeader_ReapMember(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
|
@ -239,12 +232,8 @@ func TestLeader_ReapMember(t *testing.T) {
|
|||
// Should be registered
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
_, node, err := state.GetNode(c1.config.NodeName, nil)
|
||||
if err != nil {
|
||||
r.Fatalf("err: %v", err)
|
||||
}
|
||||
if node == nil {
|
||||
r.Fatal("client not registered")
|
||||
}
|
||||
require.NoError(r, err)
|
||||
require.NotNil(r, node, "client not registered")
|
||||
})
|
||||
|
||||
// Simulate a node reaping
|
||||
|
@ -264,9 +253,7 @@ func TestLeader_ReapMember(t *testing.T) {
|
|||
reaped := false
|
||||
for start := time.Now(); time.Since(start) < 5*time.Second; {
|
||||
_, node, err := state.GetNode(c1.config.NodeName, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
if node == nil {
|
||||
reaped = true
|
||||
break
|
||||
|
@ -277,6 +264,85 @@ func TestLeader_ReapMember(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestLeader_ReapOrLeftMember_IgnoreSelf(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
}
|
||||
|
||||
t.Parallel()
|
||||
|
||||
run := func(t *testing.T, status serf.MemberStatus, nameFn func(string) string) {
|
||||
dir1, s1 := testServerWithConfig(t, func(c *Config) {
|
||||
c.PrimaryDatacenter = "dc1"
|
||||
c.ACLsEnabled = true
|
||||
c.ACLInitialManagementToken = "root"
|
||||
c.ACLResolverSettings.ACLDefaultPolicy = "deny"
|
||||
})
|
||||
defer os.RemoveAll(dir1)
|
||||
defer s1.Shutdown()
|
||||
|
||||
nodeName := s1.config.NodeName
|
||||
if nameFn != nil {
|
||||
nodeName = nameFn(nodeName)
|
||||
}
|
||||
|
||||
state := s1.fsm.State()
|
||||
|
||||
// Should be registered
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
_, node, err := state.GetNode(nodeName, nil)
|
||||
require.NoError(r, err)
|
||||
require.NotNil(r, node, "server not registered")
|
||||
})
|
||||
|
||||
// Simulate THIS node reaping or leaving
|
||||
mems := s1.LANMembersInAgentPartition()
|
||||
var s1mem serf.Member
|
||||
for _, m := range mems {
|
||||
if strings.EqualFold(m.Name, nodeName) {
|
||||
s1mem = m
|
||||
s1mem.Status = status
|
||||
s1mem.Name = nodeName
|
||||
break
|
||||
}
|
||||
}
|
||||
s1.reconcileCh <- s1mem
|
||||
|
||||
// Should NOT be deregistered; we have to poll quickly here because
|
||||
// anti-entropy will put it back if it did get deleted.
|
||||
reaped := false
|
||||
for start := time.Now(); time.Since(start) < 5*time.Second; {
|
||||
_, node, err := state.GetNode(nodeName, nil)
|
||||
require.NoError(t, err)
|
||||
if node == nil {
|
||||
reaped = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if reaped {
|
||||
t.Fatalf("server should still be registered")
|
||||
}
|
||||
}
|
||||
|
||||
t.Run("original name", func(t *testing.T) {
|
||||
t.Run("left", func(t *testing.T) {
|
||||
run(t, serf.StatusLeft, nil)
|
||||
})
|
||||
t.Run("reap", func(t *testing.T) {
|
||||
run(t, StatusReap, nil)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("uppercased name", func(t *testing.T) {
|
||||
t.Run("left", func(t *testing.T) {
|
||||
run(t, serf.StatusLeft, strings.ToUpper)
|
||||
})
|
||||
t.Run("reap", func(t *testing.T) {
|
||||
run(t, StatusReap, strings.ToUpper)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestLeader_CheckServersMeta(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
|
|
|
@ -2,6 +2,7 @@ package consul
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/hashicorp/go-version"
|
||||
|
@ -38,7 +39,7 @@ func (md *lanMergeDelegate) NotifyMerge(members []*serf.Member) error {
|
|||
nodeID := types.NodeID(rawID)
|
||||
|
||||
// See if there's another node that conflicts with us.
|
||||
if (nodeID == md.nodeID) && (m.Name != md.nodeName) {
|
||||
if (nodeID == md.nodeID) && !strings.EqualFold(m.Name, md.nodeName) {
|
||||
return fmt.Errorf("Member '%s' has conflicting node ID '%s' with this agent's ID",
|
||||
m.Name, nodeID)
|
||||
}
|
||||
|
|
|
@ -58,6 +58,30 @@ func TestMerge_LAN(t *testing.T) {
|
|||
},
|
||||
expect: "wrong datacenter",
|
||||
},
|
||||
"node ID conflict with delegate's ID but same node name with same casing": {
|
||||
members: []*serf.Member{
|
||||
makeTestNode(t, testMember{
|
||||
dc: "dc1",
|
||||
name: "node0",
|
||||
id: thisNodeID,
|
||||
server: true,
|
||||
build: "0.7.5",
|
||||
}),
|
||||
},
|
||||
expect: "",
|
||||
},
|
||||
"node ID conflict with delegate's ID but same node name with different casing": {
|
||||
members: []*serf.Member{
|
||||
makeTestNode(t, testMember{
|
||||
dc: "dc1",
|
||||
name: "NoDe0",
|
||||
id: thisNodeID,
|
||||
server: true,
|
||||
build: "0.7.5",
|
||||
}),
|
||||
},
|
||||
expect: "",
|
||||
},
|
||||
"node ID conflict with delegate's ID": {
|
||||
members: []*serf.Member{
|
||||
makeTestNode(t, testMember{
|
||||
|
|
|
@ -439,7 +439,7 @@ func (p *PreparedQuery) Execute(args *structs.PreparedQueryExecuteRequest,
|
|||
// position 0, provided the results are from the same datacenter.
|
||||
if qs.Node != "" && reply.Datacenter == qs.Datacenter {
|
||||
for i, node := range reply.Nodes {
|
||||
if node.Node.Node == qs.Node {
|
||||
if strings.EqualFold(node.Node.Node, qs.Node) {
|
||||
reply.Nodes[0], reply.Nodes[i] = reply.Nodes[i], reply.Nodes[0]
|
||||
break
|
||||
}
|
||||
|
|
|
@ -5,6 +5,7 @@ package consul
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/armon/go-metrics"
|
||||
|
@ -138,10 +139,11 @@ func (s *Server) reconcile() (err error) {
|
|||
members := s.serfLAN.Members()
|
||||
knownMembers := make(map[string]struct{})
|
||||
for _, member := range members {
|
||||
memberName := strings.ToLower(member.Name)
|
||||
if err := s.reconcileMember(member); err != nil {
|
||||
return err
|
||||
}
|
||||
knownMembers[member.Name] = struct{}{}
|
||||
knownMembers[memberName] = struct{}{}
|
||||
}
|
||||
|
||||
// Reconcile any members that have been reaped while we were not the
|
||||
|
|
|
@ -136,7 +136,7 @@ func (s *Store) ensureCheckIfNodeMatches(
|
|||
nodePartition string,
|
||||
check *structs.HealthCheck,
|
||||
) error {
|
||||
if check.Node != node || !structs.EqualPartitions(nodePartition, check.PartitionOrDefault()) {
|
||||
if !strings.EqualFold(check.Node, node) || !structs.EqualPartitions(nodePartition, check.PartitionOrDefault()) {
|
||||
return fmt.Errorf("check node %q does not match node %q",
|
||||
printNodeName(check.Node, check.PartitionOrDefault()),
|
||||
printNodeName(node, nodePartition),
|
||||
|
@ -330,7 +330,7 @@ func (s *Store) ensureNodeTxn(tx WriteTxn, idx uint64, preserveIndexes bool, nod
|
|||
}
|
||||
if existing != nil {
|
||||
n = existing
|
||||
if n.Node != node.Node {
|
||||
if !strings.EqualFold(n.Node, node.Node) {
|
||||
// Lets first get all nodes and check whether name do match, we do not allow clash on nodes without ID
|
||||
dupNameError := ensureNoNodeWithSimilarNameTxn(tx, node, false)
|
||||
if dupNameError != nil {
|
||||
|
|
|
@ -105,7 +105,7 @@ type nodeServiceTuple struct {
|
|||
|
||||
func newNodeServiceTupleFromServiceNode(sn *structs.ServiceNode) nodeServiceTuple {
|
||||
return nodeServiceTuple{
|
||||
Node: sn.Node,
|
||||
Node: strings.ToLower(sn.Node),
|
||||
ServiceID: sn.ServiceID,
|
||||
EntMeta: sn.EnterpriseMeta,
|
||||
}
|
||||
|
@ -113,7 +113,7 @@ func newNodeServiceTupleFromServiceNode(sn *structs.ServiceNode) nodeServiceTupl
|
|||
|
||||
func newNodeServiceTupleFromServiceHealthCheck(hc *structs.HealthCheck) nodeServiceTuple {
|
||||
return nodeServiceTuple{
|
||||
Node: hc.Node,
|
||||
Node: strings.ToLower(hc.Node),
|
||||
ServiceID: hc.ServiceID,
|
||||
EntMeta: hc.EnterpriseMeta,
|
||||
}
|
||||
|
|
|
@ -3,22 +3,29 @@
|
|||
|
||||
package state
|
||||
|
||||
import "github.com/hashicorp/consul/agent/structs"
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
)
|
||||
|
||||
func (nst nodeServiceTuple) nodeTuple() nodeTuple {
|
||||
return nodeTuple{Node: nst.Node, Partition: ""}
|
||||
return nodeTuple{
|
||||
Node: strings.ToLower(nst.Node),
|
||||
Partition: "",
|
||||
}
|
||||
}
|
||||
|
||||
func newNodeTupleFromNode(node *structs.Node) nodeTuple {
|
||||
return nodeTuple{
|
||||
Node: node.Node,
|
||||
Node: strings.ToLower(node.Node),
|
||||
Partition: "",
|
||||
}
|
||||
}
|
||||
|
||||
func newNodeTupleFromHealthCheck(hc *structs.HealthCheck) nodeTuple {
|
||||
return nodeTuple{
|
||||
Node: hc.Node,
|
||||
Node: strings.ToLower(hc.Node),
|
||||
Partition: "",
|
||||
}
|
||||
}
|
||||
|
|
|
@ -471,8 +471,11 @@ func TestStateStore_EnsureRegistration_Restore(t *testing.T) {
|
|||
}
|
||||
|
||||
// Add in a top-level check.
|
||||
//
|
||||
// Verify that node name references in checks are case-insensitive during
|
||||
// restore.
|
||||
req.Check = &structs.HealthCheck{
|
||||
Node: nodeName,
|
||||
Node: strings.ToUpper(nodeName),
|
||||
CheckID: "check1",
|
||||
Name: "check",
|
||||
RaftIndex: structs.RaftIndex{
|
||||
|
@ -499,7 +502,7 @@ func TestStateStore_EnsureRegistration_Restore(t *testing.T) {
|
|||
t.Fatalf("bad: %#v", out)
|
||||
}
|
||||
c := out[0]
|
||||
if c.Node != nodeName || c.CheckID != "check1" || c.Name != "check" ||
|
||||
if c.Node != strings.ToUpper(nodeName) || c.CheckID != "check1" || c.Name != "check" ||
|
||||
c.CreateIndex != 3 || c.ModifyIndex != 3 {
|
||||
t.Fatalf("bad check returned: %#v", c)
|
||||
}
|
||||
|
@ -545,7 +548,7 @@ func TestStateStore_EnsureRegistration_Restore(t *testing.T) {
|
|||
t.Fatalf("bad: %#v", out)
|
||||
}
|
||||
c1 := out[0]
|
||||
if c1.Node != nodeName || c1.CheckID != "check1" || c1.Name != "check" ||
|
||||
if c1.Node != strings.ToUpper(nodeName) || c1.CheckID != "check1" || c1.Name != "check" ||
|
||||
c1.CreateIndex != 3 || c1.ModifyIndex != 3 {
|
||||
t.Fatalf("bad check returned, should not be modified: %#v", c1)
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@ package consul
|
|||
import (
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/go-version"
|
||||
"github.com/hashicorp/serf/serf"
|
||||
|
@ -161,7 +162,7 @@ func (c *Client) CheckServers(datacenter string, fn func(*metadata.Server) bool)
|
|||
|
||||
func isSerfMember(s *serf.Serf, nodeName string) bool {
|
||||
for _, m := range s.Members() {
|
||||
if m.Name == nodeName {
|
||||
if strings.EqualFold(m.Name, nodeName) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
|
|
@ -452,7 +452,7 @@ func (r *RegisterRequest) ChangesNode(node *Node) bool {
|
|||
|
||||
// Check if any of the node-level fields are being changed.
|
||||
if r.ID != node.ID ||
|
||||
r.Node != node.Node ||
|
||||
!strings.EqualFold(r.Node, node.Node) ||
|
||||
r.PartitionOrDefault() != node.PartitionOrDefault() ||
|
||||
r.Address != node.Address ||
|
||||
r.Datacenter != node.Datacenter ||
|
||||
|
@ -796,7 +796,7 @@ type Nodes []*Node
|
|||
// RaftIndex fields.
|
||||
func (n *Node) IsSame(other *Node) bool {
|
||||
return n.ID == other.ID &&
|
||||
n.Node == other.Node &&
|
||||
strings.EqualFold(n.Node, other.Node) &&
|
||||
n.PartitionOrDefault() == other.PartitionOrDefault() &&
|
||||
n.Address == other.Address &&
|
||||
n.Datacenter == other.Datacenter &&
|
||||
|
@ -1431,7 +1431,7 @@ func (s *ServiceNode) IsSameService(other *ServiceNode) bool {
|
|||
// TaggedAddresses map[string]string
|
||||
// NodeMeta map[string]string
|
||||
if s.ID != other.ID ||
|
||||
s.Node != other.Node ||
|
||||
!strings.EqualFold(s.Node, other.Node) ||
|
||||
s.ServiceKind != other.ServiceKind ||
|
||||
s.ServiceID != other.ServiceID ||
|
||||
s.ServiceName != other.ServiceName ||
|
||||
|
@ -1675,7 +1675,7 @@ func (t *HealthCheckDefinition) UnmarshalJSON(data []byte) (err error) {
|
|||
// useful for seeing if an update would be idempotent for all the functional
|
||||
// parts of the structure.
|
||||
func (c *HealthCheck) IsSame(other *HealthCheck) bool {
|
||||
if c.Node != other.Node ||
|
||||
if !strings.EqualFold(c.Node, other.Node) ||
|
||||
c.CheckID != other.CheckID ||
|
||||
c.Name != other.Name ||
|
||||
c.Status != other.Status ||
|
||||
|
|
|
@ -73,16 +73,6 @@ func TestStructs_Implements(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestStructs_RegisterRequest_ChangesNode(t *testing.T) {
|
||||
req := &RegisterRequest{
|
||||
ID: types.NodeID("40e4a748-2192-161a-0510-9bf59fe950b5"),
|
||||
Node: "test",
|
||||
Address: "127.0.0.1",
|
||||
Datacenter: "dc1",
|
||||
TaggedAddresses: make(map[string]string),
|
||||
NodeMeta: map[string]string{
|
||||
"role": "server",
|
||||
},
|
||||
}
|
||||
|
||||
node := &Node{
|
||||
ID: types.NodeID("40e4a748-2192-161a-0510-9bf59fe950b5"),
|
||||
|
@ -95,41 +85,104 @@ func TestStructs_RegisterRequest_ChangesNode(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
check := func(twiddle, restore func()) {
|
||||
if req.ChangesNode(node) {
|
||||
t.Fatalf("should not change")
|
||||
}
|
||||
|
||||
twiddle()
|
||||
if !req.ChangesNode(node) {
|
||||
t.Fatalf("should change")
|
||||
}
|
||||
|
||||
req.SkipNodeUpdate = true
|
||||
if req.ChangesNode(node) {
|
||||
t.Fatalf("should skip")
|
||||
}
|
||||
|
||||
req.SkipNodeUpdate = false
|
||||
if !req.ChangesNode(node) {
|
||||
t.Fatalf("should change")
|
||||
}
|
||||
|
||||
restore()
|
||||
if req.ChangesNode(node) {
|
||||
t.Fatalf("should not change")
|
||||
}
|
||||
type testcase struct {
|
||||
name string
|
||||
setup func(*RegisterRequest)
|
||||
expect bool
|
||||
}
|
||||
|
||||
check(func() { req.ID = "nope" }, func() { req.ID = types.NodeID("40e4a748-2192-161a-0510-9bf59fe950b5") })
|
||||
check(func() { req.Node = "nope" }, func() { req.Node = "test" })
|
||||
check(func() { req.Address = "127.0.0.2" }, func() { req.Address = "127.0.0.1" })
|
||||
check(func() { req.Datacenter = "dc2" }, func() { req.Datacenter = "dc1" })
|
||||
check(func() { req.TaggedAddresses["wan"] = "nope" }, func() { delete(req.TaggedAddresses, "wan") })
|
||||
check(func() { req.NodeMeta["invalid"] = "nope" }, func() { delete(req.NodeMeta, "invalid") })
|
||||
cases := []testcase{
|
||||
{
|
||||
name: "id",
|
||||
setup: func(r *RegisterRequest) {
|
||||
r.ID = "nope"
|
||||
},
|
||||
expect: true,
|
||||
},
|
||||
{
|
||||
name: "name",
|
||||
setup: func(r *RegisterRequest) {
|
||||
r.Node = "nope"
|
||||
},
|
||||
expect: true,
|
||||
},
|
||||
{
|
||||
name: "name casing",
|
||||
setup: func(r *RegisterRequest) {
|
||||
r.Node = "TeSt"
|
||||
},
|
||||
expect: false,
|
||||
},
|
||||
{
|
||||
name: "address",
|
||||
setup: func(r *RegisterRequest) {
|
||||
r.Address = "127.0.0.2"
|
||||
},
|
||||
expect: true,
|
||||
},
|
||||
{
|
||||
name: "dc",
|
||||
setup: func(r *RegisterRequest) {
|
||||
r.Datacenter = "dc2"
|
||||
},
|
||||
expect: true,
|
||||
},
|
||||
{
|
||||
name: "tagged addresses",
|
||||
setup: func(r *RegisterRequest) {
|
||||
r.TaggedAddresses["wan"] = "nope"
|
||||
},
|
||||
expect: true,
|
||||
},
|
||||
{
|
||||
name: "node meta",
|
||||
setup: func(r *RegisterRequest) {
|
||||
r.NodeMeta["invalid"] = "nope"
|
||||
},
|
||||
expect: true,
|
||||
},
|
||||
}
|
||||
|
||||
if !req.ChangesNode(nil) {
|
||||
t.Fatalf("should change")
|
||||
run := func(t *testing.T, tc testcase) {
|
||||
req := &RegisterRequest{
|
||||
ID: types.NodeID("40e4a748-2192-161a-0510-9bf59fe950b5"),
|
||||
Node: "test",
|
||||
Address: "127.0.0.1",
|
||||
Datacenter: "dc1",
|
||||
TaggedAddresses: make(map[string]string),
|
||||
NodeMeta: map[string]string{
|
||||
"role": "server",
|
||||
},
|
||||
}
|
||||
|
||||
if req.ChangesNode(node) {
|
||||
t.Fatalf("should not change")
|
||||
}
|
||||
|
||||
tc.setup(req)
|
||||
|
||||
if tc.expect {
|
||||
if !req.ChangesNode(node) {
|
||||
t.Fatalf("should change")
|
||||
}
|
||||
} else {
|
||||
if req.ChangesNode(node) {
|
||||
t.Fatalf("should not change")
|
||||
}
|
||||
}
|
||||
|
||||
t.Run("skip node update", func(t *testing.T) {
|
||||
req.SkipNodeUpdate = true
|
||||
if req.ChangesNode(node) {
|
||||
t.Fatalf("should skip")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
run(t, tc)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -234,98 +287,228 @@ func TestNode_IsSame(t *testing.T) {
|
|||
ModifyIndex: 2,
|
||||
},
|
||||
}
|
||||
other := &Node{
|
||||
ID: id,
|
||||
Node: node,
|
||||
Datacenter: datacenter,
|
||||
Address: address,
|
||||
TaggedAddresses: make(map[string]string),
|
||||
Meta: make(map[string]string),
|
||||
RaftIndex: RaftIndex{
|
||||
CreateIndex: 1,
|
||||
ModifyIndex: 3,
|
||||
|
||||
type testcase struct {
|
||||
name string
|
||||
setup func(*Node)
|
||||
expect bool
|
||||
}
|
||||
cases := []testcase{
|
||||
{
|
||||
name: "id",
|
||||
setup: func(n *Node) {
|
||||
n.ID = types.NodeID("")
|
||||
},
|
||||
expect: false,
|
||||
},
|
||||
{
|
||||
name: "node",
|
||||
setup: func(n *Node) {
|
||||
n.Node = "other"
|
||||
},
|
||||
expect: false,
|
||||
},
|
||||
{
|
||||
name: "node casing",
|
||||
setup: func(n *Node) {
|
||||
n.Node = "MyNoDe1"
|
||||
},
|
||||
expect: true,
|
||||
},
|
||||
{
|
||||
name: "dc",
|
||||
setup: func(n *Node) {
|
||||
n.Datacenter = "dcX"
|
||||
},
|
||||
expect: false,
|
||||
},
|
||||
{
|
||||
name: "address",
|
||||
setup: func(n *Node) {
|
||||
n.Address = "127.0.0.1"
|
||||
},
|
||||
expect: false,
|
||||
},
|
||||
{
|
||||
name: "tagged addresses",
|
||||
setup: func(n *Node) {
|
||||
n.TaggedAddresses = map[string]string{"my": "address"}
|
||||
},
|
||||
expect: false,
|
||||
},
|
||||
{
|
||||
name: "meta",
|
||||
setup: func(n *Node) {
|
||||
n.Meta = map[string]string{"my": "meta"}
|
||||
},
|
||||
expect: false,
|
||||
},
|
||||
}
|
||||
check := func(twiddle, restore func()) {
|
||||
t.Helper()
|
||||
|
||||
run := func(t *testing.T, tc testcase) {
|
||||
other := &Node{
|
||||
ID: id,
|
||||
Node: node,
|
||||
Datacenter: datacenter,
|
||||
Address: address,
|
||||
TaggedAddresses: make(map[string]string),
|
||||
Meta: make(map[string]string),
|
||||
RaftIndex: RaftIndex{
|
||||
CreateIndex: 1,
|
||||
ModifyIndex: 3,
|
||||
},
|
||||
}
|
||||
|
||||
if !n.IsSame(other) || !other.IsSame(n) {
|
||||
t.Fatalf("should be the same")
|
||||
}
|
||||
|
||||
twiddle()
|
||||
if n.IsSame(other) || other.IsSame(n) {
|
||||
t.Fatalf("should be different, was %#v VS %#v", n, other)
|
||||
}
|
||||
tc.setup(other)
|
||||
|
||||
restore()
|
||||
if !n.IsSame(other) || !other.IsSame(n) {
|
||||
t.Fatalf("should be the same")
|
||||
if tc.expect {
|
||||
if !n.IsSame(other) || !other.IsSame(n) {
|
||||
t.Fatalf("should be the same")
|
||||
}
|
||||
} else {
|
||||
if n.IsSame(other) || other.IsSame(n) {
|
||||
t.Fatalf("should be different, was %#v VS %#v", n, other)
|
||||
}
|
||||
}
|
||||
}
|
||||
check(func() { other.ID = types.NodeID("") }, func() { other.ID = id })
|
||||
check(func() { other.Node = "other" }, func() { other.Node = node })
|
||||
check(func() { other.Datacenter = "dcX" }, func() { other.Datacenter = datacenter })
|
||||
check(func() { other.Address = "127.0.0.1" }, func() { other.Address = address })
|
||||
check(func() { other.TaggedAddresses = map[string]string{"my": "address"} }, func() { other.TaggedAddresses = map[string]string{} })
|
||||
check(func() { other.Meta = map[string]string{"my": "meta"} }, func() { other.Meta = map[string]string{} })
|
||||
|
||||
if !n.IsSame(other) {
|
||||
t.Fatalf("should be equal, was %#v VS %#v", n, other)
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
run(t, tc)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStructs_ServiceNode_IsSameService(t *testing.T) {
|
||||
sn := testServiceNode(t)
|
||||
node := "node1"
|
||||
serviceID := sn.ServiceID
|
||||
serviceAddress := sn.ServiceAddress
|
||||
serviceEnableTagOverride := sn.ServiceEnableTagOverride
|
||||
serviceMeta := make(map[string]string)
|
||||
for k, v := range sn.ServiceMeta {
|
||||
serviceMeta[k] = v
|
||||
const (
|
||||
nodeName = "node1"
|
||||
)
|
||||
|
||||
type testcase struct {
|
||||
name string
|
||||
setup func(*ServiceNode)
|
||||
expect bool
|
||||
}
|
||||
cases := []testcase{
|
||||
{
|
||||
name: "ServiceID",
|
||||
setup: func(sn *ServiceNode) {
|
||||
sn.ServiceID = "66fb695a-c782-472f-8d36-4f3edd754b37"
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Node",
|
||||
setup: func(sn *ServiceNode) {
|
||||
sn.Node = "other"
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Node casing",
|
||||
setup: func(sn *ServiceNode) {
|
||||
sn.Node = "NoDe1"
|
||||
},
|
||||
expect: true,
|
||||
},
|
||||
{
|
||||
name: "ServiceAddress",
|
||||
setup: func(sn *ServiceNode) {
|
||||
sn.ServiceAddress = "1.2.3.4"
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ServiceEnableTagOverride",
|
||||
setup: func(sn *ServiceNode) {
|
||||
sn.ServiceEnableTagOverride = !sn.ServiceEnableTagOverride
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ServiceKind",
|
||||
setup: func(sn *ServiceNode) {
|
||||
sn.ServiceKind = "newKind"
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ServiceMeta",
|
||||
setup: func(sn *ServiceNode) {
|
||||
sn.ServiceMeta = map[string]string{"my": "meta"}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ServiceName",
|
||||
setup: func(sn *ServiceNode) {
|
||||
sn.ServiceName = "duck"
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ServicePort",
|
||||
setup: func(sn *ServiceNode) {
|
||||
sn.ServicePort = 65534
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ServiceTags",
|
||||
setup: func(sn *ServiceNode) {
|
||||
sn.ServiceTags = []string{"new", "tags"}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ServiceWeights",
|
||||
setup: func(sn *ServiceNode) {
|
||||
sn.ServiceWeights = Weights{Passing: 42, Warning: 41}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ServiceProxy",
|
||||
setup: func(sn *ServiceNode) {
|
||||
sn.ServiceProxy = ConnectProxyConfig{}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ServiceConnect",
|
||||
setup: func(sn *ServiceNode) {
|
||||
sn.ServiceConnect = ServiceConnect{}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ServiceTaggedAddresses",
|
||||
setup: func(sn *ServiceNode) {
|
||||
sn.ServiceTaggedAddresses = nil
|
||||
},
|
||||
},
|
||||
}
|
||||
serviceName := sn.ServiceName
|
||||
servicePort := sn.ServicePort
|
||||
serviceTags := sn.ServiceTags
|
||||
serviceWeights := Weights{Passing: 2, Warning: 1}
|
||||
sn.ServiceWeights = serviceWeights
|
||||
serviceProxy := sn.ServiceProxy
|
||||
serviceConnect := sn.ServiceConnect
|
||||
serviceTaggedAddresses := sn.ServiceTaggedAddresses
|
||||
|
||||
n := sn.ToNodeService().ToServiceNode(node)
|
||||
other := sn.ToNodeService().ToServiceNode(node)
|
||||
run := func(t *testing.T, tc testcase) {
|
||||
sn := testServiceNode(t)
|
||||
sn.ServiceWeights = Weights{Passing: 2, Warning: 1}
|
||||
n := sn.ToNodeService().ToServiceNode(nodeName)
|
||||
other := sn.ToNodeService().ToServiceNode(nodeName)
|
||||
|
||||
check := func(twiddle, restore func()) {
|
||||
t.Helper()
|
||||
if !n.IsSameService(other) || !other.IsSameService(n) {
|
||||
t.Fatalf("should be the same")
|
||||
}
|
||||
|
||||
twiddle()
|
||||
if n.IsSameService(other) || other.IsSameService(n) {
|
||||
t.Fatalf("should be different, was %#v VS %#v", n, other)
|
||||
}
|
||||
tc.setup(other)
|
||||
|
||||
restore()
|
||||
if !n.IsSameService(other) || !other.IsSameService(n) {
|
||||
t.Fatalf("should be the same after restore, was:\n %#v VS\n %#v", n, other)
|
||||
if tc.expect {
|
||||
if !n.IsSameService(other) || !other.IsSameService(n) {
|
||||
t.Fatalf("should be the same")
|
||||
}
|
||||
} else {
|
||||
if n.IsSameService(other) || other.IsSameService(n) {
|
||||
t.Fatalf("should be different, was %#v VS %#v", n, other)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
check(func() { other.ServiceID = "66fb695a-c782-472f-8d36-4f3edd754b37" }, func() { other.ServiceID = serviceID })
|
||||
check(func() { other.Node = "other" }, func() { other.Node = node })
|
||||
check(func() { other.ServiceAddress = "1.2.3.4" }, func() { other.ServiceAddress = serviceAddress })
|
||||
check(func() { other.ServiceEnableTagOverride = !serviceEnableTagOverride }, func() { other.ServiceEnableTagOverride = serviceEnableTagOverride })
|
||||
check(func() { other.ServiceKind = "newKind" }, func() { other.ServiceKind = "" })
|
||||
check(func() { other.ServiceMeta = map[string]string{"my": "meta"} }, func() { other.ServiceMeta = serviceMeta })
|
||||
check(func() { other.ServiceName = "duck" }, func() { other.ServiceName = serviceName })
|
||||
check(func() { other.ServicePort = 65534 }, func() { other.ServicePort = servicePort })
|
||||
check(func() { other.ServiceTags = []string{"new", "tags"} }, func() { other.ServiceTags = serviceTags })
|
||||
check(func() { other.ServiceWeights = Weights{Passing: 42, Warning: 41} }, func() { other.ServiceWeights = serviceWeights })
|
||||
check(func() { other.ServiceProxy = ConnectProxyConfig{} }, func() { other.ServiceProxy = serviceProxy })
|
||||
check(func() { other.ServiceConnect = ServiceConnect{} }, func() { other.ServiceConnect = serviceConnect })
|
||||
check(func() { other.ServiceTaggedAddresses = nil }, func() { other.ServiceTaggedAddresses = serviceTaggedAddresses })
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
run(t, tc)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStructs_ServiceNode_PartialClone(t *testing.T) {
|
||||
|
@ -1175,82 +1358,125 @@ func TestStructs_NodeService_IsSame(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestStructs_HealthCheck_IsSame(t *testing.T) {
|
||||
hc := &HealthCheck{
|
||||
Node: "node1",
|
||||
CheckID: "check1",
|
||||
Name: "thecheck",
|
||||
Status: api.HealthPassing,
|
||||
Notes: "it's all good",
|
||||
Output: "lgtm",
|
||||
ServiceID: "service1",
|
||||
ServiceName: "theservice",
|
||||
ServiceTags: []string{"foo"},
|
||||
}
|
||||
if !hc.IsSame(hc) {
|
||||
t.Fatalf("should be equal to itself")
|
||||
type testcase struct {
|
||||
name string
|
||||
setup func(*HealthCheck)
|
||||
expect bool
|
||||
}
|
||||
|
||||
other := &HealthCheck{
|
||||
Node: "node1",
|
||||
CheckID: "check1",
|
||||
Name: "thecheck",
|
||||
Status: api.HealthPassing,
|
||||
Notes: "it's all good",
|
||||
Output: "lgtm",
|
||||
ServiceID: "service1",
|
||||
ServiceName: "theservice",
|
||||
ServiceTags: []string{"foo"},
|
||||
RaftIndex: RaftIndex{
|
||||
CreateIndex: 1,
|
||||
ModifyIndex: 2,
|
||||
cases := []testcase{
|
||||
{
|
||||
name: "Node",
|
||||
setup: func(hc *HealthCheck) {
|
||||
hc.Node = "XXX"
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Node casing",
|
||||
setup: func(hc *HealthCheck) {
|
||||
hc.Node = "NoDe1"
|
||||
},
|
||||
expect: true,
|
||||
},
|
||||
{
|
||||
name: "CheckID",
|
||||
setup: func(hc *HealthCheck) {
|
||||
hc.CheckID = "XXX"
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Name",
|
||||
setup: func(hc *HealthCheck) {
|
||||
hc.Name = "XXX"
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Status",
|
||||
setup: func(hc *HealthCheck) {
|
||||
hc.Status = "XXX"
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Notes",
|
||||
setup: func(hc *HealthCheck) {
|
||||
hc.Notes = "XXX"
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Output",
|
||||
setup: func(hc *HealthCheck) {
|
||||
hc.Output = "XXX"
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ServiceID",
|
||||
setup: func(hc *HealthCheck) {
|
||||
hc.ServiceID = "XXX"
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ServiceName",
|
||||
setup: func(hc *HealthCheck) {
|
||||
hc.ServiceName = "XXX"
|
||||
},
|
||||
},
|
||||
}
|
||||
if !hc.IsSame(other) || !other.IsSame(hc) {
|
||||
t.Fatalf("should not care about Raft fields")
|
||||
}
|
||||
|
||||
checkCheckIDField := func(field *types.CheckID) {
|
||||
if !hc.IsSame(other) || !other.IsSame(hc) {
|
||||
t.Fatalf("should be the same")
|
||||
run := func(t *testing.T, tc testcase) {
|
||||
hc := &HealthCheck{
|
||||
Node: "node1",
|
||||
CheckID: "check1",
|
||||
Name: "thecheck",
|
||||
Status: api.HealthPassing,
|
||||
Notes: "it's all good",
|
||||
Output: "lgtm",
|
||||
ServiceID: "service1",
|
||||
ServiceName: "theservice",
|
||||
ServiceTags: []string{"foo"},
|
||||
}
|
||||
|
||||
old := *field
|
||||
*field = "XXX"
|
||||
if hc.IsSame(other) || other.IsSame(hc) {
|
||||
t.Fatalf("should not be the same")
|
||||
if !hc.IsSame(hc) {
|
||||
t.Fatalf("should be equal to itself")
|
||||
}
|
||||
|
||||
other := &HealthCheck{
|
||||
Node: "node1",
|
||||
CheckID: "check1",
|
||||
Name: "thecheck",
|
||||
Status: api.HealthPassing,
|
||||
Notes: "it's all good",
|
||||
Output: "lgtm",
|
||||
ServiceID: "service1",
|
||||
ServiceName: "theservice",
|
||||
ServiceTags: []string{"foo"},
|
||||
RaftIndex: RaftIndex{
|
||||
CreateIndex: 1,
|
||||
ModifyIndex: 2,
|
||||
},
|
||||
}
|
||||
*field = old
|
||||
|
||||
if !hc.IsSame(other) || !other.IsSame(hc) {
|
||||
t.Fatalf("should be the same")
|
||||
t.Fatalf("should not care about Raft fields")
|
||||
}
|
||||
|
||||
tc.setup(hc)
|
||||
|
||||
if tc.expect {
|
||||
if !hc.IsSame(other) || !other.IsSame(hc) {
|
||||
t.Fatalf("should be the same")
|
||||
}
|
||||
} else {
|
||||
if hc.IsSame(other) || other.IsSame(hc) {
|
||||
t.Fatalf("should not be the same")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
checkStringField := func(field *string) {
|
||||
if !hc.IsSame(other) || !other.IsSame(hc) {
|
||||
t.Fatalf("should be the same")
|
||||
}
|
||||
|
||||
old := *field
|
||||
*field = "XXX"
|
||||
if hc.IsSame(other) || other.IsSame(hc) {
|
||||
t.Fatalf("should not be the same")
|
||||
}
|
||||
*field = old
|
||||
|
||||
if !hc.IsSame(other) || !other.IsSame(hc) {
|
||||
t.Fatalf("should be the same")
|
||||
}
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
run(t, tc)
|
||||
})
|
||||
}
|
||||
|
||||
checkStringField(&other.Node)
|
||||
checkCheckIDField(&other.CheckID)
|
||||
checkStringField(&other.Name)
|
||||
checkStringField(&other.Status)
|
||||
checkStringField(&other.Notes)
|
||||
checkStringField(&other.Output)
|
||||
checkStringField(&other.ServiceID)
|
||||
checkStringField(&other.ServiceName)
|
||||
}
|
||||
|
||||
func TestStructs_HealthCheck_Marshalling(t *testing.T) {
|
||||
|
|
|
@ -103,11 +103,11 @@ func (c *cmd) Run(args []string) int {
|
|||
var area1, area2 string
|
||||
for _, dc := range dcs {
|
||||
for _, entry := range dc.Coordinates {
|
||||
if dc.Datacenter == dc1 && entry.Node == node1 {
|
||||
if dc.Datacenter == dc1 && strings.EqualFold(entry.Node, node1) {
|
||||
area1 = dc.AreaID
|
||||
coord1 = entry.Coord
|
||||
}
|
||||
if dc.Datacenter == dc2 && entry.Node == node2 {
|
||||
if dc.Datacenter == dc2 && strings.EqualFold(entry.Node, node2) {
|
||||
area2 = dc.AreaID
|
||||
coord2 = entry.Coord
|
||||
}
|
||||
|
@ -145,10 +145,10 @@ func (c *cmd) Run(args []string) int {
|
|||
// Index all the coordinates by segment.
|
||||
cs1, cs2 := make(lib.CoordinateSet), make(lib.CoordinateSet)
|
||||
for _, entry := range entries {
|
||||
if entry.Node == nodes[0] {
|
||||
if strings.EqualFold(entry.Node, nodes[0]) {
|
||||
cs1[entry.Segment] = entry.Coord
|
||||
}
|
||||
if entry.Node == nodes[1] {
|
||||
if strings.EqualFold(entry.Node, nodes[1]) {
|
||||
cs2[entry.Segment] = entry.Coord
|
||||
}
|
||||
}
|
||||
|
|
|
@ -7,11 +7,12 @@ import (
|
|||
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
|
||||
"github.com/hashicorp/serf/coordinate"
|
||||
"github.com/mitchellh/cli"
|
||||
|
||||
"github.com/hashicorp/consul/agent"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||
"github.com/hashicorp/serf/coordinate"
|
||||
"github.com/mitchellh/cli"
|
||||
)
|
||||
|
||||
func TestRTTCommand_noTabs(t *testing.T) {
|
||||
|
@ -167,11 +168,15 @@ func TestRTTCommand_WAN(t *testing.T) {
|
|||
defer a.Shutdown()
|
||||
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
||||
|
||||
node := fmt.Sprintf("%s.%s", a.Config.NodeName, a.Config.Datacenter)
|
||||
var (
|
||||
node = fmt.Sprintf("%s.%s", a.Config.NodeName, a.Config.Datacenter)
|
||||
nodeUpper = fmt.Sprintf("%s.%s", strings.ToUpper(a.Config.NodeName), a.Config.Datacenter)
|
||||
nodeLower = fmt.Sprintf("%s.%s", strings.ToLower(a.Config.NodeName), a.Config.Datacenter)
|
||||
)
|
||||
|
||||
// We can't easily inject WAN coordinates, so we will just query the
|
||||
// node with itself.
|
||||
{
|
||||
t.Run("query self", func(t *testing.T) {
|
||||
ui := cli.NewMockUi()
|
||||
c := New(ui)
|
||||
args := []string{
|
||||
|
@ -189,10 +194,30 @@ func TestRTTCommand_WAN(t *testing.T) {
|
|||
if !strings.Contains(ui.OutputWriter.String(), "rtt: ") {
|
||||
t.Fatalf("bad: %#v", ui.OutputWriter.String())
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// Default to the agent's node.
|
||||
{
|
||||
t.Run("query self with case differences", func(t *testing.T) {
|
||||
ui := cli.NewMockUi()
|
||||
c := New(ui)
|
||||
args := []string{
|
||||
"-wan",
|
||||
"-http-addr=" + a.HTTPAddr(),
|
||||
nodeLower,
|
||||
nodeUpper,
|
||||
}
|
||||
code := c.Run(args)
|
||||
if code != 0 {
|
||||
t.Log(ui.OutputWriter.String())
|
||||
t.Fatalf("bad: %d: %#v", code, ui.ErrorWriter.String())
|
||||
}
|
||||
|
||||
// Make sure there was some kind of RTT reported in the output.
|
||||
if !strings.Contains(ui.OutputWriter.String(), "rtt: ") {
|
||||
t.Fatalf("bad: %#v", ui.OutputWriter.String())
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("default to the agent's node", func(t *testing.T) {
|
||||
ui := cli.NewMockUi()
|
||||
c := New(ui)
|
||||
args := []string{
|
||||
|
@ -209,10 +234,9 @@ func TestRTTCommand_WAN(t *testing.T) {
|
|||
if !strings.Contains(ui.OutputWriter.String(), "rtt: ") {
|
||||
t.Fatalf("bad: %#v", ui.OutputWriter.String())
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// Try an unknown node.
|
||||
{
|
||||
t.Run("try an unknown node", func(t *testing.T) {
|
||||
ui := cli.NewMockUi()
|
||||
c := New(ui)
|
||||
args := []string{
|
||||
|
@ -225,5 +249,5 @@ func TestRTTCommand_WAN(t *testing.T) {
|
|||
if code != 1 {
|
||||
t.Fatalf("bad: %d: %#v", code, ui.ErrorWriter.String())
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue