mirror of https://github.com/status-im/consul.git
Merge pull request #8365 from hashicorp/dnephin/fix-service-by-node-meta-flake
state: speed up tests that use watchLimit
This commit is contained in:
commit
5b37efd91b
|
@ -1627,12 +1627,14 @@ func TestStateStore_Services(t *testing.T) {
|
|||
func TestStateStore_ServicesByNodeMeta(t *testing.T) {
|
||||
s := testStateStore(t)
|
||||
|
||||
// Listing with no results returns nil.
|
||||
ws := memdb.NewWatchSet()
|
||||
|
||||
t.Run("Listing with no results returns nil", func(t *testing.T) {
|
||||
idx, res, err := s.ServicesByNodeMeta(ws, map[string]string{"somekey": "somevalue"}, nil)
|
||||
if idx != 0 || len(res) != 0 || err != nil {
|
||||
t.Fatalf("expected (0, nil, nil), got: (%d, %#v, %#v)", idx, res, err)
|
||||
}
|
||||
})
|
||||
|
||||
// Create some nodes and services in the state store.
|
||||
node0 := &structs.Node{Node: "node0", Address: "127.0.0.1", Meta: map[string]string{"role": "client", "common": "1"}}
|
||||
|
@ -1664,12 +1666,13 @@ func TestStateStore_ServicesByNodeMeta(t *testing.T) {
|
|||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
if !watchFired(ws) {
|
||||
t.Fatalf("bad")
|
||||
t.Fatalf("expected the watch to be triggered by the queries")
|
||||
}
|
||||
|
||||
// Filter the services by the first node's meta value.
|
||||
ws = memdb.NewWatchSet()
|
||||
_, res, err = s.ServicesByNodeMeta(ws, map[string]string{"role": "client"}, nil)
|
||||
|
||||
t.Run("Filter the services by the first node's meta value", func(t *testing.T) {
|
||||
_, res, err := s.ServicesByNodeMeta(ws, map[string]string{"role": "client"}, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
@ -1677,61 +1680,55 @@ func TestStateStore_ServicesByNodeMeta(t *testing.T) {
|
|||
"redis": []string{"master", "prod"},
|
||||
}
|
||||
sort.Strings(res["redis"])
|
||||
if !reflect.DeepEqual(res, expected) {
|
||||
t.Fatalf("bad: %v %v", res, expected)
|
||||
}
|
||||
require.Equal(t, expected, res)
|
||||
})
|
||||
|
||||
// Get all services using the common meta value
|
||||
_, res, err = s.ServicesByNodeMeta(ws, map[string]string{"common": "1"}, nil)
|
||||
t.Run("Get all services using the common meta value", func(t *testing.T) {
|
||||
_, res, err := s.ServicesByNodeMeta(ws, map[string]string{"common": "1"}, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
expected = structs.Services{
|
||||
expected := structs.Services{
|
||||
"redis": []string{"master", "prod", "slave"},
|
||||
}
|
||||
sort.Strings(res["redis"])
|
||||
if !reflect.DeepEqual(res, expected) {
|
||||
t.Fatalf("bad: %v %v", res, expected)
|
||||
}
|
||||
require.Equal(t, expected, res)
|
||||
})
|
||||
|
||||
// Get an empty list for an invalid meta value
|
||||
_, res, err = s.ServicesByNodeMeta(ws, map[string]string{"invalid": "nope"}, nil)
|
||||
t.Run("Get an empty list for an invalid meta value", func(t *testing.T) {
|
||||
_, res, err := s.ServicesByNodeMeta(ws, map[string]string{"invalid": "nope"}, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
expected = structs.Services{}
|
||||
if !reflect.DeepEqual(res, expected) {
|
||||
t.Fatalf("bad: %v %v", res, expected)
|
||||
}
|
||||
expected := structs.Services{}
|
||||
require.Equal(t, expected, res)
|
||||
})
|
||||
|
||||
// Get the first node's service instance using multiple meta filters
|
||||
_, res, err = s.ServicesByNodeMeta(ws, map[string]string{"role": "client", "common": "1"}, nil)
|
||||
t.Run("Get the first node's service instance using multiple meta filters", func(t *testing.T) {
|
||||
_, res, err := s.ServicesByNodeMeta(ws, map[string]string{"role": "client", "common": "1"}, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
expected = structs.Services{
|
||||
expected := structs.Services{
|
||||
"redis": []string{"master", "prod"},
|
||||
}
|
||||
sort.Strings(res["redis"])
|
||||
if !reflect.DeepEqual(res, expected) {
|
||||
t.Fatalf("bad: %v %v", res, expected)
|
||||
}
|
||||
require.Equal(t, expected, res)
|
||||
})
|
||||
|
||||
// Sanity check the watch before we proceed.
|
||||
if watchFired(ws) {
|
||||
t.Fatalf("bad")
|
||||
}
|
||||
|
||||
// Registering some unrelated node + service should not fire the watch.
|
||||
t.Run("Registering some unrelated node + service should not fire the watch.", func(t *testing.T) {
|
||||
testRegisterNode(t, s, 4, "nope")
|
||||
testRegisterService(t, s, 5, "nope", "nope")
|
||||
if watchFired(ws) {
|
||||
t.Fatalf("bad")
|
||||
t.Fatalf("expected the watch to timeout and not be triggered")
|
||||
}
|
||||
})
|
||||
|
||||
// Overwhelm the service tracking.
|
||||
idx = 6
|
||||
for i := 0; i < 2*watchLimit; i++ {
|
||||
t.Run("Uses watchLimit to limit the number of watches", func(t *testing.T) {
|
||||
patchWatchLimit(t, 10)
|
||||
|
||||
var idx uint64 = 6
|
||||
for i := 0; i < watchLimit+2; i++ {
|
||||
node := fmt.Sprintf("many%d", i)
|
||||
testRegisterNodeWithMeta(t, s, idx, node, map[string]string{"common": "1"})
|
||||
idx++
|
||||
|
@ -1741,17 +1738,25 @@ func TestStateStore_ServicesByNodeMeta(t *testing.T) {
|
|||
|
||||
// Now get a fresh watch, which will be forced to watch the whole
|
||||
// service table.
|
||||
ws = memdb.NewWatchSet()
|
||||
_, _, err = s.ServicesByNodeMeta(ws, map[string]string{"common": "1"}, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
ws := memdb.NewWatchSet()
|
||||
_, _, err := s.ServicesByNodeMeta(ws, map[string]string{"common": "1"}, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Registering some unrelated node + service should not fire the watch.
|
||||
testRegisterService(t, s, idx, "nope", "more-nope")
|
||||
if !watchFired(ws) {
|
||||
t.Fatalf("bad")
|
||||
t.Fatalf("expected the watch to timeout and not be triggered")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// patchWatchLimit package variable. Not safe for concurrent use. Do not use
|
||||
// with t.Parallel.
|
||||
func patchWatchLimit(t *testing.T, limit int) {
|
||||
oldLimit := watchLimit
|
||||
watchLimit = limit
|
||||
t.Cleanup(func() {
|
||||
watchLimit = oldLimit
|
||||
})
|
||||
}
|
||||
|
||||
func TestStateStore_ServiceNodes(t *testing.T) {
|
||||
|
@ -1870,7 +1875,8 @@ func TestStateStore_ServiceNodes(t *testing.T) {
|
|||
|
||||
// Overwhelm the node tracking.
|
||||
idx = 19
|
||||
for i := 0; i < 2*watchLimit; i++ {
|
||||
patchWatchLimit(t, 10)
|
||||
for i := 0; i < watchLimit+2; i++ {
|
||||
node := fmt.Sprintf("many%d", i)
|
||||
if err := s.EnsureNode(idx, &structs.Node{Node: node, Address: "127.0.0.1"}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
|
@ -2639,7 +2645,8 @@ func TestStateStore_ServiceChecksByNodeMeta(t *testing.T) {
|
|||
}
|
||||
|
||||
// Overwhelm the node tracking.
|
||||
for i := 0; i < 2*watchLimit; i++ {
|
||||
patchWatchLimit(t, 10)
|
||||
for i := 0; i < watchLimit+2; i++ {
|
||||
node := fmt.Sprintf("many%d", idx)
|
||||
testRegisterNodeWithMeta(t, s, idx, node, map[string]string{"common": "1"})
|
||||
idx++
|
||||
|
@ -2819,7 +2826,8 @@ func TestStateStore_ChecksInStateByNodeMeta(t *testing.T) {
|
|||
}
|
||||
|
||||
// Overwhelm the node tracking.
|
||||
for i := 0; i < 2*watchLimit; i++ {
|
||||
patchWatchLimit(t, 10)
|
||||
for i := 0; i < watchLimit+2; i++ {
|
||||
node := fmt.Sprintf("many%d", idx)
|
||||
testRegisterNodeWithMeta(t, s, idx, node, map[string]string{"common": "1"})
|
||||
idx++
|
||||
|
|
|
@ -82,7 +82,7 @@ var (
|
|||
ErrMissingIntentionID = errors.New("Missing Intention ID")
|
||||
)
|
||||
|
||||
const (
|
||||
var (
|
||||
// watchLimit is used as a soft limit to cap how many watches we allow
|
||||
// for a given blocking query. If this is exceeded, then we will use a
|
||||
// higher-level watch that's less fine-grained. Choosing the perfect
|
||||
|
|
Loading…
Reference in New Issue