From 0402dd7ac5be10e57d40443d3b0812430c8d7e45 Mon Sep 17 00:00:00 2001 From: Daniel Nephin Date: Wed, 22 Jul 2020 16:26:49 -0400 Subject: [PATCH 1/2] state: Use subtests in TestStateStore_ServicesByNodeMeta These subtests make it much easier to identify the slow part of the test, but they also help enumerate all the different cases which are being tested. --- agent/consul/state/catalog_test.go | 157 ++++++++++++++--------------- 1 file changed, 75 insertions(+), 82 deletions(-) diff --git a/agent/consul/state/catalog_test.go b/agent/consul/state/catalog_test.go index c080f2a0d7..c21b203db4 100644 --- a/agent/consul/state/catalog_test.go +++ b/agent/consul/state/catalog_test.go @@ -1611,12 +1611,14 @@ func TestStateStore_Services(t *testing.T) { func TestStateStore_ServicesByNodeMeta(t *testing.T) { s := testStateStore(t) - // Listing with no results returns nil. ws := memdb.NewWatchSet() - idx, res, err := s.ServicesByNodeMeta(ws, map[string]string{"somekey": "somevalue"}, nil) - if idx != 0 || len(res) != 0 || err != nil { - t.Fatalf("expected (0, nil, nil), got: (%d, %#v, %#v)", idx, res, err) - } + + t.Run("Listing with no results returns nil", func(t *testing.T) { + idx, res, err := s.ServicesByNodeMeta(ws, map[string]string{"somekey": "somevalue"}, nil) + if idx != 0 || len(res) != 0 || err != nil { + t.Fatalf("expected (0, nil, nil), got: (%d, %#v, %#v)", idx, res, err) + } + }) // Create some nodes and services in the state store. node0 := &structs.Node{Node: "node0", Address: "127.0.0.1", Meta: map[string]string{"role": "client", "common": "1"}} @@ -1648,94 +1650,85 @@ func TestStateStore_ServicesByNodeMeta(t *testing.T) { t.Fatalf("err: %s", err) } if !watchFired(ws) { - t.Fatalf("bad") + t.Fatalf("expected the watch to be triggered by the queries") } - // Filter the services by the first node's meta value. ws = memdb.NewWatchSet() - _, res, err = s.ServicesByNodeMeta(ws, map[string]string{"role": "client"}, nil) - if err != nil { - t.Fatalf("err: %s", err) - } - expected := structs.Services{ - "redis": []string{"master", "prod"}, - } - sort.Strings(res["redis"]) - if !reflect.DeepEqual(res, expected) { - t.Fatalf("bad: %v %v", res, expected) - } - // Get all services using the common meta value - _, res, err = s.ServicesByNodeMeta(ws, map[string]string{"common": "1"}, nil) - if err != nil { - t.Fatalf("err: %s", err) - } - expected = structs.Services{ - "redis": []string{"master", "prod", "slave"}, - } - sort.Strings(res["redis"]) - if !reflect.DeepEqual(res, expected) { - t.Fatalf("bad: %v %v", res, expected) - } + t.Run("Filter the services by the first node's meta value", func(t *testing.T) { + _, res, err := s.ServicesByNodeMeta(ws, map[string]string{"role": "client"}, nil) + if err != nil { + t.Fatalf("err: %s", err) + } + expected := structs.Services{ + "redis": []string{"master", "prod"}, + } + sort.Strings(res["redis"]) + require.Equal(t, expected, res) + }) - // Get an empty list for an invalid meta value - _, res, err = s.ServicesByNodeMeta(ws, map[string]string{"invalid": "nope"}, nil) - if err != nil { - t.Fatalf("err: %s", err) - } - expected = structs.Services{} - if !reflect.DeepEqual(res, expected) { - t.Fatalf("bad: %v %v", res, expected) - } + t.Run("Get all services using the common meta value", func(t *testing.T) { + _, res, err := s.ServicesByNodeMeta(ws, map[string]string{"common": "1"}, nil) + if err != nil { + t.Fatalf("err: %s", err) + } + expected := structs.Services{ + "redis": []string{"master", "prod", "slave"}, + } + sort.Strings(res["redis"]) + require.Equal(t, expected, res) + }) - // Get the first node's service instance using multiple meta filters - _, res, err = s.ServicesByNodeMeta(ws, map[string]string{"role": "client", "common": "1"}, nil) - if err != nil { - t.Fatalf("err: %s", err) - } - expected = structs.Services{ - "redis": []string{"master", "prod"}, - } - sort.Strings(res["redis"]) - if !reflect.DeepEqual(res, expected) { - t.Fatalf("bad: %v %v", res, expected) - } + t.Run("Get an empty list for an invalid meta value", func(t *testing.T) { + _, res, err := s.ServicesByNodeMeta(ws, map[string]string{"invalid": "nope"}, nil) + if err != nil { + t.Fatalf("err: %s", err) + } + expected := structs.Services{} + require.Equal(t, expected, res) + }) - // Sanity check the watch before we proceed. - if watchFired(ws) { - t.Fatalf("bad") - } + t.Run("Get the first node's service instance using multiple meta filters", func(t *testing.T) { + _, res, err := s.ServicesByNodeMeta(ws, map[string]string{"role": "client", "common": "1"}, nil) + if err != nil { + t.Fatalf("err: %s", err) + } + expected := structs.Services{ + "redis": []string{"master", "prod"}, + } + sort.Strings(res["redis"]) + require.Equal(t, expected, res) + }) - // Registering some unrelated node + service should not fire the watch. - testRegisterNode(t, s, 4, "nope") - testRegisterService(t, s, 5, "nope", "nope") - if watchFired(ws) { - t.Fatalf("bad") - } + t.Run("Registering some unrelated node + service should not fire the watch.", func(t *testing.T) { + testRegisterNode(t, s, 4, "nope") + testRegisterService(t, s, 5, "nope", "nope") + if watchFired(ws) { + t.Fatalf("expected the watch to timeout and not be triggered") + } + }) - // Overwhelm the service tracking. - idx = 6 - for i := 0; i < 2*watchLimit; i++ { - node := fmt.Sprintf("many%d", i) - testRegisterNodeWithMeta(t, s, idx, node, map[string]string{"common": "1"}) - idx++ - testRegisterService(t, s, idx, node, "nope") - idx++ - } + t.Run("Uses watchLimit to limit the number of watches", func(t *testing.T) { + var idx uint64 = 6 + for i := 0; i < 2*watchLimit; i++ { + node := fmt.Sprintf("many%d", i) + testRegisterNodeWithMeta(t, s, idx, node, map[string]string{"common": "1"}) + idx++ + testRegisterService(t, s, idx, node, "nope") + idx++ + } - // Now get a fresh watch, which will be forced to watch the whole - // service table. - ws = memdb.NewWatchSet() - _, _, err = s.ServicesByNodeMeta(ws, map[string]string{"common": "1"}, nil) - if err != nil { - t.Fatalf("err: %s", err) - } + // Now get a fresh watch, which will be forced to watch the whole + // service table. + ws := memdb.NewWatchSet() + _, _, err := s.ServicesByNodeMeta(ws, map[string]string{"common": "1"}, nil) + require.NoError(t, err) - // Registering some unrelated node + service should not fire the watch. - testRegisterService(t, s, idx, "nope", "more-nope") - if !watchFired(ws) { - t.Fatalf("bad") - } + testRegisterService(t, s, idx, "nope", "more-nope") + if !watchFired(ws) { + t.Fatalf("expected the watch to timeout and not be triggered") + } + }) } func TestStateStore_ServiceNodes(t *testing.T) { From 9ed61fd160c93cb006a6f206329660731fbbdd12 Mon Sep 17 00:00:00 2001 From: Daniel Nephin Date: Wed, 22 Jul 2020 16:38:39 -0400 Subject: [PATCH 2/2] state: speed up TestStateStore_ServicesByNodeMeta Make watchLimit a var so that we can patch it in tests and reduce the time spent creating state. --- agent/consul/state/catalog_test.go | 23 +++++++++++++++++++---- agent/consul/state/state_store.go | 2 +- 2 files changed, 20 insertions(+), 5 deletions(-) diff --git a/agent/consul/state/catalog_test.go b/agent/consul/state/catalog_test.go index c21b203db4..07bcc49377 100644 --- a/agent/consul/state/catalog_test.go +++ b/agent/consul/state/catalog_test.go @@ -1709,8 +1709,10 @@ func TestStateStore_ServicesByNodeMeta(t *testing.T) { }) t.Run("Uses watchLimit to limit the number of watches", func(t *testing.T) { + patchWatchLimit(t, 10) + var idx uint64 = 6 - for i := 0; i < 2*watchLimit; i++ { + for i := 0; i < watchLimit+2; i++ { node := fmt.Sprintf("many%d", i) testRegisterNodeWithMeta(t, s, idx, node, map[string]string{"common": "1"}) idx++ @@ -1731,6 +1733,16 @@ func TestStateStore_ServicesByNodeMeta(t *testing.T) { }) } +// patchWatchLimit package variable. Not safe for concurrent use. Do not use +// with t.Parallel. +func patchWatchLimit(t *testing.T, limit int) { + oldLimit := watchLimit + watchLimit = limit + t.Cleanup(func() { + watchLimit = oldLimit + }) +} + func TestStateStore_ServiceNodes(t *testing.T) { s := testStateStore(t) @@ -1847,7 +1859,8 @@ func TestStateStore_ServiceNodes(t *testing.T) { // Overwhelm the node tracking. idx = 19 - for i := 0; i < 2*watchLimit; i++ { + patchWatchLimit(t, 10) + for i := 0; i < watchLimit+2; i++ { node := fmt.Sprintf("many%d", i) if err := s.EnsureNode(idx, &structs.Node{Node: node, Address: "127.0.0.1"}); err != nil { t.Fatalf("err: %v", err) @@ -2616,7 +2629,8 @@ func TestStateStore_ServiceChecksByNodeMeta(t *testing.T) { } // Overwhelm the node tracking. - for i := 0; i < 2*watchLimit; i++ { + patchWatchLimit(t, 10) + for i := 0; i < watchLimit+2; i++ { node := fmt.Sprintf("many%d", idx) testRegisterNodeWithMeta(t, s, idx, node, map[string]string{"common": "1"}) idx++ @@ -2796,7 +2810,8 @@ func TestStateStore_ChecksInStateByNodeMeta(t *testing.T) { } // Overwhelm the node tracking. - for i := 0; i < 2*watchLimit; i++ { + patchWatchLimit(t, 10) + for i := 0; i < watchLimit+2; i++ { node := fmt.Sprintf("many%d", idx) testRegisterNodeWithMeta(t, s, idx, node, map[string]string{"common": "1"}) idx++ diff --git a/agent/consul/state/state_store.go b/agent/consul/state/state_store.go index 3692deb9dc..bab348fefa 100644 --- a/agent/consul/state/state_store.go +++ b/agent/consul/state/state_store.go @@ -82,7 +82,7 @@ var ( ErrMissingIntentionID = errors.New("Missing Intention ID") ) -const ( +var ( // watchLimit is used as a soft limit to cap how many watches we allow // for a given blocking query. If this is exceeded, then we will use a // higher-level watch that's less fine-grained. Choosing the perfect