mirror of https://github.com/status-im/consul.git
retry: Removes the description parameter.
This commit is contained in:
parent
55cf5e1ba0
commit
ddfa57765c
|
@ -14,7 +14,7 @@ func TestCatalog_Datacenters(t *testing.T) {
|
|||
defer s.Stop()
|
||||
|
||||
catalog := c.Catalog()
|
||||
retry.Run("no datacenters", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
datacenters, err := catalog.Datacenters()
|
||||
if err != nil {
|
||||
r.Fatalf("catalog.Datacenters: ", err)
|
||||
|
@ -30,7 +30,7 @@ func TestCatalog_Nodes(t *testing.T) {
|
|||
defer s.Stop()
|
||||
|
||||
catalog := c.Catalog()
|
||||
retry.RunWith(retry.ThreeTimes(), "no nodes", t, func(r *retry.R) {
|
||||
retry.RunWith(retry.ThreeTimes(), t, func(r *retry.R) {
|
||||
nodes, meta, err := catalog.Nodes(nil)
|
||||
if err != nil {
|
||||
r.Fatalf("catalog.Nodes: ", err)
|
||||
|
@ -68,7 +68,7 @@ func TestCatalog_Nodes_MetaFilter(t *testing.T) {
|
|||
|
||||
catalog := c.Catalog()
|
||||
// Make sure we get the node back when filtering by its metadata
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
nodes, meta, err := catalog.Nodes(&QueryOptions{NodeMeta: meta})
|
||||
if err != nil {
|
||||
r.Fatal(err)
|
||||
|
@ -95,7 +95,7 @@ func TestCatalog_Nodes_MetaFilter(t *testing.T) {
|
|||
}
|
||||
})
|
||||
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
// Get nothing back when we use an invalid filter
|
||||
nodes, meta, err := catalog.Nodes(&QueryOptions{NodeMeta: map[string]string{"nope": "nope"}})
|
||||
if err != nil {
|
||||
|
@ -118,7 +118,7 @@ func TestCatalog_Services(t *testing.T) {
|
|||
defer s.Stop()
|
||||
|
||||
catalog := c.Catalog()
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
services, meta, err := catalog.Services(nil)
|
||||
if err != nil {
|
||||
r.Fatal(err)
|
||||
|
@ -143,7 +143,7 @@ func TestCatalog_Services_NodeMetaFilter(t *testing.T) {
|
|||
|
||||
catalog := c.Catalog()
|
||||
// Make sure we get the service back when filtering by the node's metadata
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
services, meta, err := catalog.Services(&QueryOptions{NodeMeta: meta})
|
||||
if err != nil {
|
||||
r.Fatal(err)
|
||||
|
@ -158,7 +158,7 @@ func TestCatalog_Services_NodeMetaFilter(t *testing.T) {
|
|||
}
|
||||
})
|
||||
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
// Get nothing back when using an invalid filter
|
||||
services, meta, err := catalog.Services(&QueryOptions{NodeMeta: map[string]string{"nope": "nope"}})
|
||||
if err != nil {
|
||||
|
@ -181,7 +181,7 @@ func TestCatalog_Service(t *testing.T) {
|
|||
defer s.Stop()
|
||||
|
||||
catalog := c.Catalog()
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
services, meta, err := catalog.Service("consul", "", nil)
|
||||
if err != nil {
|
||||
r.Fatal(err)
|
||||
|
@ -210,7 +210,7 @@ func TestCatalog_Service_NodeMetaFilter(t *testing.T) {
|
|||
defer s.Stop()
|
||||
|
||||
catalog := c.Catalog()
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
services, meta, err := catalog.Service("consul", "", &QueryOptions{NodeMeta: meta})
|
||||
if err != nil {
|
||||
r.Fatal(err)
|
||||
|
@ -237,7 +237,7 @@ func TestCatalog_Node(t *testing.T) {
|
|||
|
||||
catalog := c.Catalog()
|
||||
name, _ := c.Agent().NodeName()
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
info, meta, err := catalog.Node(name, nil)
|
||||
if err != nil {
|
||||
r.Fatal(err)
|
||||
|
@ -292,7 +292,7 @@ func TestCatalog_Registration(t *testing.T) {
|
|||
Service: service,
|
||||
Check: check,
|
||||
}
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if _, err := catalog.Register(reg, nil); err != nil {
|
||||
r.Fatal(err)
|
||||
}
|
||||
|
@ -332,7 +332,7 @@ func TestCatalog_Registration(t *testing.T) {
|
|||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
node, _, err := catalog.Node("foobar", nil)
|
||||
if err != nil {
|
||||
r.Fatal(err)
|
||||
|
@ -355,7 +355,7 @@ func TestCatalog_Registration(t *testing.T) {
|
|||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
health, _, err := c.Health().Node("foobar", nil)
|
||||
if err != nil {
|
||||
r.Fatal(err)
|
||||
|
@ -377,7 +377,7 @@ func TestCatalog_Registration(t *testing.T) {
|
|||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
node, _, err := catalog.Node("foobar", nil)
|
||||
if err != nil {
|
||||
r.Fatal(err)
|
||||
|
@ -410,7 +410,7 @@ func TestCatalog_EnableTagOverride(t *testing.T) {
|
|||
Service: service,
|
||||
}
|
||||
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if _, err := catalog.Register(reg, nil); err != nil {
|
||||
r.Fatal(err)
|
||||
}
|
||||
|
@ -442,7 +442,7 @@ func TestCatalog_EnableTagOverride(t *testing.T) {
|
|||
|
||||
service.EnableTagOverride = true
|
||||
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if _, err := catalog.Register(reg, nil); err != nil {
|
||||
r.Fatal(err)
|
||||
}
|
||||
|
|
|
@ -12,7 +12,7 @@ func TestCoordinate_Datacenters(t *testing.T) {
|
|||
defer s.Stop()
|
||||
|
||||
coordinate := c.Coordinate()
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
datacenters, err := coordinate.Datacenters()
|
||||
if err != nil {
|
||||
r.Fatal(err)
|
||||
|
@ -30,7 +30,7 @@ func TestCoordinate_Nodes(t *testing.T) {
|
|||
defer s.Stop()
|
||||
|
||||
coordinate := c.Coordinate()
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
_, _, err := coordinate.Nodes(nil)
|
||||
if err != nil {
|
||||
r.Fatal(err)
|
||||
|
|
|
@ -30,7 +30,7 @@ func TestEvent_FireList(t *testing.T) {
|
|||
var events []*UserEvent
|
||||
var qm *QueryMeta
|
||||
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
events, qm, err = event.List("", nil)
|
||||
if err != nil {
|
||||
r.Fatalf("err: %v", err)
|
||||
|
|
|
@ -22,7 +22,7 @@ func TestHealth_Node(t *testing.T) {
|
|||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
name := info["Config"]["NodeName"].(string)
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
checks, meta, err := health.Node(name, nil)
|
||||
if err != nil {
|
||||
r.Fatal(err)
|
||||
|
@ -192,7 +192,7 @@ func TestHealth_Checks(t *testing.T) {
|
|||
}
|
||||
defer agent.ServiceDeregister("foo")
|
||||
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
checks := HealthChecks{
|
||||
&HealthCheck{
|
||||
Node: "node123",
|
||||
|
@ -241,7 +241,7 @@ func TestHealth_Checks_NodeMetaFilter(t *testing.T) {
|
|||
}
|
||||
defer agent.ServiceDeregister("foo")
|
||||
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
checks, meta, err := health.Checks("foo", &QueryOptions{NodeMeta: meta})
|
||||
if err != nil {
|
||||
r.Fatal(err)
|
||||
|
@ -260,7 +260,7 @@ func TestHealth_Service(t *testing.T) {
|
|||
defer s.Stop()
|
||||
|
||||
health := c.Health()
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
// consul service should always exist...
|
||||
checks, meta, err := health.Service("consul", "", true, nil)
|
||||
if err != nil {
|
||||
|
@ -289,7 +289,7 @@ func TestHealth_Service_NodeMetaFilter(t *testing.T) {
|
|||
defer s.Stop()
|
||||
|
||||
health := c.Health()
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
// consul service should always exist...
|
||||
checks, meta, err := health.Service("consul", "", true, &QueryOptions{NodeMeta: meta})
|
||||
if err != nil {
|
||||
|
@ -316,7 +316,7 @@ func TestHealth_State(t *testing.T) {
|
|||
defer s.Stop()
|
||||
|
||||
health := c.Health()
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
checks, meta, err := health.State("any", nil)
|
||||
if err != nil {
|
||||
r.Fatal(err)
|
||||
|
@ -339,7 +339,7 @@ func TestHealth_State_NodeMetaFilter(t *testing.T) {
|
|||
defer s.Stop()
|
||||
|
||||
health := c.Health()
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
checks, meta, err := health.State("any", &QueryOptions{NodeMeta: meta})
|
||||
if err != nil {
|
||||
r.Fatal(err)
|
||||
|
|
|
@ -89,7 +89,7 @@ func TestOperator_AutopilotServerHealth(t *testing.T) {
|
|||
defer s.Stop()
|
||||
|
||||
operator := c.Operator()
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
out, err := operator.AutopilotServerHealth(nil)
|
||||
if err != nil {
|
||||
r.Fatalf("err: %v", err)
|
||||
|
|
|
@ -30,7 +30,7 @@ func TestPreparedQuery(t *testing.T) {
|
|||
}
|
||||
|
||||
catalog := c.Catalog()
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if _, err := catalog.Register(reg, nil); err != nil {
|
||||
r.Fatal(err)
|
||||
}
|
||||
|
|
|
@ -347,7 +347,7 @@ func TestAgent_Reload(t *testing.T) {
|
|||
close(doneCh)
|
||||
}()
|
||||
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := len(cmd.httpServers), 1; got != want {
|
||||
r.Fatalf("got %d servers want %d", got, want)
|
||||
}
|
||||
|
@ -536,7 +536,7 @@ func TestAgent_Join(t *testing.T) {
|
|||
t.Fatalf("should have 2 members")
|
||||
}
|
||||
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := len(a2.LANMembers()), 2; got != want {
|
||||
r.Fatalf("got %d LAN members want %d", got, want)
|
||||
}
|
||||
|
@ -571,7 +571,7 @@ func TestAgent_Join_WAN(t *testing.T) {
|
|||
t.Fatalf("should have 2 members")
|
||||
}
|
||||
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := len(a2.WANMembers()), 2; got != want {
|
||||
r.Fatalf("got %d WAN members want %d", got, want)
|
||||
}
|
||||
|
@ -663,7 +663,7 @@ func TestAgent_Leave(t *testing.T) {
|
|||
if obj != nil {
|
||||
t.Fatalf("Err: %v", obj)
|
||||
}
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
m := srv.agent.LANMembers()
|
||||
if got, want := m[1].Status, serf.StatusLeft; got != want {
|
||||
r.Fatalf("got status %q want %q", got, want)
|
||||
|
@ -760,7 +760,7 @@ func TestAgent_ForceLeave(t *testing.T) {
|
|||
if obj != nil {
|
||||
t.Fatalf("Err: %v", obj)
|
||||
}
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
m := srv.agent.LANMembers()
|
||||
if got, want := m[1].Status, serf.StatusLeft; got != want {
|
||||
r.Fatalf("got status %q want %q", got, want)
|
||||
|
@ -1928,7 +1928,7 @@ func TestAgent_Monitor(t *testing.T) {
|
|||
}
|
||||
|
||||
// Try to stream logs until we see the expected log line
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
req, _ = http.NewRequest("GET", "/v1/agent/monitor?loglevel=debug", nil)
|
||||
resp = newClosableRecorder()
|
||||
done := make(chan struct{})
|
||||
|
|
|
@ -90,7 +90,7 @@ func TestCatalogDatacenters(t *testing.T) {
|
|||
defer srv.Shutdown()
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
obj, err := srv.CatalogDatacenters(nil, nil)
|
||||
if err != nil {
|
||||
r.Fatal(err)
|
||||
|
@ -218,7 +218,7 @@ func TestCatalogNodes_WanTranslation(t *testing.T) {
|
|||
if _, err := srv2.agent.JoinWAN([]string{addr}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := len(srv1.agent.WANMembers()), 2; got < want {
|
||||
r.Fatalf("got %d WAN members want at least %d", got, want)
|
||||
}
|
||||
|
@ -700,7 +700,7 @@ func TestCatalogServiceNodes_WanTranslation(t *testing.T) {
|
|||
if _, err := srv2.agent.JoinWAN([]string{addr}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := len(srv1.agent.WANMembers()), 2; got < want {
|
||||
r.Fatalf("got %d WAN members want at least %d", got, want)
|
||||
}
|
||||
|
@ -941,7 +941,7 @@ func TestCatalogNodeServices_WanTranslation(t *testing.T) {
|
|||
if _, err := srv2.agent.JoinWAN([]string{addr}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := len(srv1.agent.WANMembers()), 2; got < want {
|
||||
r.Fatalf("got %d WAN members want at least %d", got, want)
|
||||
}
|
||||
|
|
|
@ -78,7 +78,7 @@ func expectStatus(t *testing.T, script, status string) {
|
|||
}
|
||||
check.Start()
|
||||
defer check.Stop()
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := mock.Updates("foo"), 2; got < want {
|
||||
r.Fatalf("got %d updates want at least %d", got, want)
|
||||
}
|
||||
|
@ -274,7 +274,7 @@ func expectHTTPStatus(t *testing.T, url string, status string) {
|
|||
}
|
||||
check.Start()
|
||||
defer check.Stop()
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := mock.Updates("foo"), 2; got < want {
|
||||
r.Fatalf("got %d updates want at least %d", got, want)
|
||||
}
|
||||
|
@ -373,7 +373,7 @@ func TestCheckHTTPTimeout(t *testing.T) {
|
|||
|
||||
check.Start()
|
||||
defer check.Stop()
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := mock.Updates("bar"), 2; got < want {
|
||||
r.Fatalf("got %d updates want at least %d", got, want)
|
||||
}
|
||||
|
@ -440,7 +440,7 @@ func TestCheckHTTP_TLSSkipVerify_true_pass(t *testing.T) {
|
|||
if !check.httpClient.Transport.(*http.Transport).TLSClientConfig.InsecureSkipVerify {
|
||||
t.Fatalf("should be true")
|
||||
}
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := mock.state["skipverify_true"], api.HealthPassing; got != want {
|
||||
r.Fatalf("got state %q want %q", got, want)
|
||||
}
|
||||
|
@ -471,7 +471,7 @@ func TestCheckHTTP_TLSSkipVerify_true_fail(t *testing.T) {
|
|||
if !check.httpClient.Transport.(*http.Transport).TLSClientConfig.InsecureSkipVerify {
|
||||
t.Fatalf("should be true")
|
||||
}
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := mock.state["skipverify_true"], api.HealthCritical; got != want {
|
||||
r.Fatalf("got state %q want %q", got, want)
|
||||
}
|
||||
|
@ -503,7 +503,7 @@ func TestCheckHTTP_TLSSkipVerify_false(t *testing.T) {
|
|||
if check.httpClient.Transport.(*http.Transport).TLSClientConfig.InsecureSkipVerify {
|
||||
t.Fatalf("should be false")
|
||||
}
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
// This should fail due to an invalid SSL cert
|
||||
if got, want := mock.state["skipverify_false"], api.HealthCritical; got != want {
|
||||
r.Fatalf("got state %q want %q", got, want)
|
||||
|
@ -548,7 +548,7 @@ func expectTCPStatus(t *testing.T, tcp string, status string) {
|
|||
}
|
||||
check.Start()
|
||||
defer check.Stop()
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := mock.Updates("foo"), 2; got < want {
|
||||
r.Fatalf("got %d updates want at least %d", got, want)
|
||||
}
|
||||
|
|
|
@ -103,7 +103,7 @@ func TestRetryJoin(t *testing.T) {
|
|||
}
|
||||
close(doneCh)
|
||||
}()
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := len(agent.LANMembers()), 2; got != want {
|
||||
r.Fatalf("got %d LAN members want %d", got, want)
|
||||
}
|
||||
|
|
|
@ -1302,7 +1302,7 @@ func TestDNS_ServiceLookup_WanAddress(t *testing.T) {
|
|||
if _, err := srv2.agent.JoinWAN([]string{addr}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := len(srv1.agent.WANMembers()), 2; got < want {
|
||||
r.Fatalf("got %d WAN members want at least %d", got, want)
|
||||
}
|
||||
|
@ -3377,7 +3377,7 @@ func TestDNS_PreparedQuery_Failover(t *testing.T) {
|
|||
if _, err := srv2.agent.JoinWAN([]string{addr}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := len(srv1.agent.WANMembers()), 2; got < want {
|
||||
r.Fatalf("got %d WAN members want at least %d", got, want)
|
||||
}
|
||||
|
|
|
@ -123,7 +123,7 @@ func TestEventList(t *testing.T) {
|
|||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
req, err := http.NewRequest("GET", "/v1/event/list", nil)
|
||||
if err != nil {
|
||||
r.Fatal(err)
|
||||
|
@ -161,7 +161,7 @@ func TestEventList_Filter(t *testing.T) {
|
|||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
req, err := http.NewRequest("GET", "/v1/event/list?name=foo", nil)
|
||||
if err != nil {
|
||||
r.Fatal(err)
|
||||
|
@ -201,7 +201,7 @@ func TestEventList_ACLFilter(t *testing.T) {
|
|||
|
||||
// Try no token.
|
||||
{
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
req, err := http.NewRequest("GET", "/v1/event/list", nil)
|
||||
if err != nil {
|
||||
r.Fatal(err)
|
||||
|
@ -224,7 +224,7 @@ func TestEventList_ACLFilter(t *testing.T) {
|
|||
|
||||
// Try the root token.
|
||||
{
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
req, err := http.NewRequest("GET", "/v1/event/list?token=root", nil)
|
||||
if err != nil {
|
||||
r.Fatal(err)
|
||||
|
@ -254,7 +254,7 @@ func TestEventList_Blocking(t *testing.T) {
|
|||
}
|
||||
|
||||
var index string
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
req, err := http.NewRequest("GET", "/v1/event/list", nil)
|
||||
if err != nil {
|
||||
r.Fatal(err)
|
||||
|
@ -279,7 +279,7 @@ func TestEventList_Blocking(t *testing.T) {
|
|||
}
|
||||
}()
|
||||
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
url := "/v1/event/list?index=" + index
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
|
@ -322,7 +322,7 @@ func TestEventList_EventBufOrder(t *testing.T) {
|
|||
|
||||
// Test that the event order is preserved when name
|
||||
// filtering on a list of > 1 matching event.
|
||||
Run("", t, func(r *retry.R) {
|
||||
Run(t, func(r *retry.R) {
|
||||
|
||||
url := "/v1/event/list?name=foo"
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
|
|
|
@ -22,7 +22,7 @@ func TestHealthChecksInState(t *testing.T) {
|
|||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
resp := httptest.NewRecorder()
|
||||
obj, err := srv.HealthChecksInState(resp, req)
|
||||
if err != nil {
|
||||
|
@ -46,7 +46,7 @@ func TestHealthChecksInState(t *testing.T) {
|
|||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
resp := httptest.NewRecorder()
|
||||
obj, err := srv.HealthChecksInState(resp, req)
|
||||
if err != nil {
|
||||
|
@ -88,7 +88,7 @@ func TestHealthChecksInState_NodeMetaFilter(t *testing.T) {
|
|||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
resp := httptest.NewRecorder()
|
||||
obj, err := srv.HealthChecksInState(resp, req)
|
||||
if err != nil {
|
||||
|
@ -170,7 +170,7 @@ func TestHealthChecksInState_DistanceSort(t *testing.T) {
|
|||
retry.
|
||||
|
||||
// Retry until foo moves to the front of the line.
|
||||
Run("", t, func(r *retry.R) {
|
||||
Run(t, func(r *retry.R) {
|
||||
|
||||
resp = httptest.NewRecorder()
|
||||
obj, err = srv.HealthChecksInState(resp, req)
|
||||
|
@ -431,7 +431,7 @@ func TestHealthServiceChecks_DistanceSort(t *testing.T) {
|
|||
retry.
|
||||
|
||||
// Retry until foo has moved to the front of the line.
|
||||
Run("", t, func(r *retry.R) {
|
||||
Run(t, func(r *retry.R) {
|
||||
|
||||
resp = httptest.NewRecorder()
|
||||
obj, err = srv.HealthServiceChecks(resp, req)
|
||||
|
@ -665,7 +665,7 @@ func TestHealthServiceNodes_DistanceSort(t *testing.T) {
|
|||
retry.
|
||||
|
||||
// Retry until foo has moved to the front of the line.
|
||||
Run("", t, func(r *retry.R) {
|
||||
Run(t, func(r *retry.R) {
|
||||
|
||||
resp = httptest.NewRecorder()
|
||||
obj, err = srv.HealthServiceNodes(resp, req)
|
||||
|
@ -761,7 +761,7 @@ func TestHealthServiceNodes_WanTranslation(t *testing.T) {
|
|||
if _, err := srv2.agent.JoinWAN([]string{addr}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := len(srv1.agent.WANMembers()), 2; got < want {
|
||||
r.Fatalf("got %d WAN members want at least %d", got, want)
|
||||
}
|
||||
|
|
|
@ -117,7 +117,7 @@ func TestAgentAntiEntropy_Services(t *testing.T) {
|
|||
Node: agent.config.NodeName,
|
||||
}
|
||||
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if err := agent.RPC("Catalog.NodeServices", &req, &services); err != nil {
|
||||
r.Fatalf("err: %v", err)
|
||||
}
|
||||
|
@ -188,7 +188,7 @@ func TestAgentAntiEntropy_Services(t *testing.T) {
|
|||
// Trigger anti-entropy run and wait
|
||||
agent.StartSync()
|
||||
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if err := agent.RPC("Catalog.NodeServices", &req, &services); err != nil {
|
||||
r.Fatalf("err: %v", err)
|
||||
}
|
||||
|
@ -300,7 +300,7 @@ func TestAgentAntiEntropy_EnableTagOverride(t *testing.T) {
|
|||
}
|
||||
var services structs.IndexedNodeServices
|
||||
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if err := agent.RPC("Catalog.NodeServices", &req, &services); err != nil {
|
||||
r.Fatalf("err: %v", err)
|
||||
}
|
||||
|
@ -720,7 +720,7 @@ func TestAgentAntiEntropy_Checks(t *testing.T) {
|
|||
var checks structs.IndexedHealthChecks
|
||||
|
||||
// Verify that we are in sync
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if err := agent.RPC("Health.NodeChecks", &req, &checks); err != nil {
|
||||
r.Fatalf("err: %v", err)
|
||||
}
|
||||
|
@ -799,7 +799,7 @@ func TestAgentAntiEntropy_Checks(t *testing.T) {
|
|||
agent.StartSync()
|
||||
|
||||
// Verify that we are in sync
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if err := agent.RPC("Health.NodeChecks", &req, &checks); err != nil {
|
||||
r.Fatalf("err: %v", err)
|
||||
}
|
||||
|
@ -978,7 +978,7 @@ func TestAgentAntiEntropy_Checks_ACLDeny(t *testing.T) {
|
|||
time.Sleep(200 * time.Millisecond)
|
||||
|
||||
// Verify that we are in sync
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
req := structs.NodeSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: agent.config.NodeName,
|
||||
|
@ -1032,7 +1032,7 @@ func TestAgentAntiEntropy_Checks_ACLDeny(t *testing.T) {
|
|||
agent.StartSync()
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
// Verify that we are in sync
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
req := structs.NodeSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: agent.config.NodeName,
|
||||
|
@ -1113,7 +1113,7 @@ func TestAgentAntiEntropy_Check_DeferSync(t *testing.T) {
|
|||
Node: agent.config.NodeName,
|
||||
}
|
||||
var checks structs.IndexedHealthChecks
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if err := agent.RPC("Health.NodeChecks", &req, &checks); err != nil {
|
||||
r.Fatalf("err: %v", err)
|
||||
}
|
||||
|
@ -1141,7 +1141,7 @@ func TestAgentAntiEntropy_Check_DeferSync(t *testing.T) {
|
|||
}
|
||||
}
|
||||
// Wait for a deferred update
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if err := agent.RPC("Health.NodeChecks", &req, &checks); err != nil {
|
||||
r.Fatal(err)
|
||||
}
|
||||
|
@ -1242,7 +1242,7 @@ func TestAgentAntiEntropy_Check_DeferSync(t *testing.T) {
|
|||
}
|
||||
}
|
||||
// Wait for the deferred update.
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if err := agent.RPC("Health.NodeChecks", &req, &checks); err != nil {
|
||||
r.Fatal(err)
|
||||
}
|
||||
|
@ -1292,7 +1292,7 @@ func TestAgentAntiEntropy_NodeInfo(t *testing.T) {
|
|||
retry.
|
||||
|
||||
// Wait for the sync
|
||||
Run("", t, func(r *retry.R) {
|
||||
Run(t, func(r *retry.R) {
|
||||
|
||||
if err := agent.RPC("Catalog.NodeServices", &req, &services); err != nil {
|
||||
r.Fatalf("err: %v", err)
|
||||
|
@ -1321,7 +1321,7 @@ func TestAgentAntiEntropy_NodeInfo(t *testing.T) {
|
|||
|
||||
// Wait for the sync - this should have been a sync of just the
|
||||
// node info
|
||||
Run("", t, func(r *retry.R) {
|
||||
Run(t, func(r *retry.R) {
|
||||
|
||||
if err := agent.RPC("Catalog.NodeServices", &req, &services); err != nil {
|
||||
r.Fatalf("err: %v", err)
|
||||
|
@ -1514,7 +1514,7 @@ func TestAgent_sendCoordinate(t *testing.T) {
|
|||
Datacenter: agent.config.Datacenter,
|
||||
}
|
||||
var reply structs.IndexedCoordinates
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if err := agent.RPC("Coordinate.ListNodes", &req, &reply); err != nil {
|
||||
r.Fatalf("err: %s", err)
|
||||
}
|
||||
|
|
|
@ -451,7 +451,7 @@ func TestOperator_ServerHealth(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
resp := httptest.NewRecorder()
|
||||
obj, err := srv.OperatorServerHealth(resp, req)
|
||||
if err != nil {
|
||||
|
@ -488,7 +488,7 @@ func TestOperator_ServerHealth_Unhealthy(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
resp := httptest.NewRecorder()
|
||||
obj, err := srv.OperatorServerHealth(resp, req)
|
||||
if err != nil {
|
||||
|
|
|
@ -175,7 +175,7 @@ func TestFireReceiveEvent(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := len(agent.UserEvents()), 1; got != want {
|
||||
r.Fatalf("got %d events want %d", got, want)
|
||||
}
|
||||
|
|
|
@ -55,7 +55,7 @@ func TestForceLeaveCommandRun(t *testing.T) {
|
|||
if len(m) != 2 {
|
||||
t.Fatalf("should have 2 members: %#v", m)
|
||||
}
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
m = a1.agent.LANMembers()
|
||||
if got, want := m[1].Status, serf.StatusLeft; got != want {
|
||||
r.Fatalf("got status %q want %q", got, want)
|
||||
|
|
|
@ -107,7 +107,7 @@ func TestRTTCommand_Run_LAN(t *testing.T) {
|
|||
"dogs",
|
||||
}
|
||||
// Wait for the updates to get flushed to the data store.
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
code := c.Run(args)
|
||||
if code != 0 {
|
||||
r.Fatalf("bad: %d: %#v", code, ui.ErrorWriter.String())
|
||||
|
|
|
@ -485,7 +485,7 @@ func TestACLEndpoint_ReplicationStatus(t *testing.T) {
|
|||
Datacenter: "dc1",
|
||||
}
|
||||
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
var status structs.ACLReplicationStatus
|
||||
err := msgpackrpc.CallWithCodec(codec, "ACL.ReplicationStatus", &getR, &status)
|
||||
if err != nil {
|
||||
|
|
|
@ -395,7 +395,7 @@ func TestACLReplication(t *testing.T) {
|
|||
return nil
|
||||
}
|
||||
// Wait for the replica to converge.
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if err := checkSame(); err != nil {
|
||||
r.Fatal(err)
|
||||
}
|
||||
|
@ -419,7 +419,7 @@ func TestACLReplication(t *testing.T) {
|
|||
}
|
||||
}
|
||||
// Wait for the replica to converge.
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if err := checkSame(); err != nil {
|
||||
r.Fatal(err)
|
||||
}
|
||||
|
@ -439,7 +439,7 @@ func TestACLReplication(t *testing.T) {
|
|||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
// Wait for the replica to converge.
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if err := checkSame(); err != nil {
|
||||
r.Fatal(err)
|
||||
}
|
||||
|
|
|
@ -231,7 +231,7 @@ func TestACL_NonAuthority_NotFound(t *testing.T) {
|
|||
if _, err := s2.JoinLAN([]string{addr}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := numPeers(s1), 2; got != want {
|
||||
r.Fatalf("got %d peers want %d", got, want)
|
||||
}
|
||||
|
@ -281,7 +281,7 @@ func TestACL_NonAuthority_Found(t *testing.T) {
|
|||
if _, err := s2.JoinLAN([]string{addr}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := numPeers(s1), 2; got != want {
|
||||
r.Fatalf("got %d peers want %d", got, want)
|
||||
}
|
||||
|
@ -356,7 +356,7 @@ func TestACL_NonAuthority_Management(t *testing.T) {
|
|||
if _, err := s2.JoinLAN([]string{addr}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := numPeers(s1), 2; got != want {
|
||||
r.Fatalf("got %d peers want %d", got, want)
|
||||
}
|
||||
|
@ -412,7 +412,7 @@ func TestACL_DownPolicy_Deny(t *testing.T) {
|
|||
if _, err := s2.JoinLAN([]string{addr}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := numPeers(s1), 2; got != want {
|
||||
r.Fatalf("got %d peers want %d", got, want)
|
||||
}
|
||||
|
@ -485,7 +485,7 @@ func TestACL_DownPolicy_Allow(t *testing.T) {
|
|||
if _, err := s2.JoinLAN([]string{addr}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := numPeers(s1), 2; got != want {
|
||||
r.Fatalf("got %d peers want %d", got, want)
|
||||
}
|
||||
|
@ -560,7 +560,7 @@ func TestACL_DownPolicy_ExtendCache(t *testing.T) {
|
|||
if _, err := s2.JoinLAN([]string{addr}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := numPeers(s1), 2; got != want {
|
||||
r.Fatalf("got %d peers want %d", got, want)
|
||||
}
|
||||
|
@ -679,7 +679,7 @@ func TestACL_Replication(t *testing.T) {
|
|||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
// Wait for replication to occur.
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
_, acl, err := s2.fsm.State().ACLGet(nil, id)
|
||||
if err != nil {
|
||||
r.Fatal(err)
|
||||
|
|
|
@ -50,7 +50,7 @@ func testCleanupDeadServer(t *testing.T, raftVersion int) {
|
|||
}
|
||||
|
||||
for _, s := range servers {
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := numPeers(s), 3; got != want {
|
||||
r.Fatalf("got %d peers want %d", got, want)
|
||||
}
|
||||
|
@ -64,7 +64,7 @@ func testCleanupDeadServer(t *testing.T, raftVersion int) {
|
|||
|
||||
// Kill a non-leader server
|
||||
s3.Shutdown()
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
alive := 0
|
||||
for _, m := range s1.LANMembers() {
|
||||
if m.Status == serf.StatusAlive {
|
||||
|
@ -84,7 +84,7 @@ func testCleanupDeadServer(t *testing.T, raftVersion int) {
|
|||
|
||||
// Make sure the dead server is removed and we're back to 3 total peers
|
||||
for _, s := range servers {
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := numPeers(s), 3; got != want {
|
||||
r.Fatalf("got %d peers want %d", got, want)
|
||||
}
|
||||
|
@ -128,7 +128,7 @@ func TestAutopilot_CleanupDeadServerPeriodic(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, s := range servers {
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := numPeers(s), 4; got != want {
|
||||
r.Fatalf("got %d peers want %d", got, want)
|
||||
}
|
||||
|
@ -140,7 +140,7 @@ func TestAutopilot_CleanupDeadServerPeriodic(t *testing.T) {
|
|||
|
||||
// Should be removed from the peers automatically
|
||||
for _, s := range []*Server{s1, s2, s3} {
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := numPeers(s), 3; got != want {
|
||||
r.Fatalf("got %d peers want %d", got, want)
|
||||
}
|
||||
|
@ -178,7 +178,7 @@ func TestAutopilot_CleanupStaleRaftServer(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, s := range servers {
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := numPeers(s), 3; got != want {
|
||||
r.Fatalf("got %d peers want %d", got, want)
|
||||
}
|
||||
|
@ -203,7 +203,7 @@ func TestAutopilot_CleanupStaleRaftServer(t *testing.T) {
|
|||
|
||||
// Wait for s4 to be removed
|
||||
for _, s := range []*Server{s1, s2, s3} {
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := numPeers(s), 3; got != want {
|
||||
r.Fatalf("got %d peers want %d", got, want)
|
||||
}
|
||||
|
@ -244,7 +244,7 @@ func TestAutopilot_PromoteNonVoter(t *testing.T) {
|
|||
// Wait for the new server to be added as a non-voter, but make sure
|
||||
// it doesn't get promoted to a voter even after ServerStabilizationTime,
|
||||
// because that would result in an even-numbered quorum count.
|
||||
Run("", t, func(r *retry.R) {
|
||||
Run(t, func(r *retry.R) {
|
||||
|
||||
future := s1.raft.GetConfiguration()
|
||||
if err := future.Error(); err != nil {
|
||||
|
@ -282,7 +282,7 @@ func TestAutopilot_PromoteNonVoter(t *testing.T) {
|
|||
if _, err := s3.JoinLAN([]string{addr}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
future := s1.raft.GetConfiguration()
|
||||
if err := future.Error(); err != nil {
|
||||
r.Fatal(err)
|
||||
|
|
|
@ -604,7 +604,7 @@ func TestCatalog_ListNodes(t *testing.T) {
|
|||
if err := s1.fsm.State().EnsureNode(1, &structs.Node{Node: "foo", Address: "127.0.0.1"}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
msgpackrpc.CallWithCodec(codec, "Catalog.ListNodes", &args, &out)
|
||||
if got, want := len(out.Nodes), 2; got != want {
|
||||
r.Fatalf("got %d nodes want %d", got, want)
|
||||
|
@ -646,7 +646,7 @@ func TestCatalog_ListNodes_NodeMetaFilter(t *testing.T) {
|
|||
},
|
||||
}
|
||||
var out structs.IndexedNodes
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
msgpackrpc.CallWithCodec(codec, "Catalog.ListNodes", &args, &out)
|
||||
if got, want := len(out.Nodes), 1; got != want {
|
||||
r.Fatalf("got %d nodes want %d", got, want)
|
||||
|
@ -679,7 +679,7 @@ func TestCatalog_ListNodes_NodeMetaFilter(t *testing.T) {
|
|||
retry.
|
||||
|
||||
// Should get an empty list of nodes back
|
||||
Run("", t, func(r *retry.R) {
|
||||
Run(t, func(r *retry.R) {
|
||||
|
||||
msgpackrpc.CallWithCodec(codec, "Catalog.ListNodes", &args, &out)
|
||||
if len(out.Nodes) != 0 {
|
||||
|
@ -892,7 +892,7 @@ func TestCatalog_ListNodes_DistanceSort(t *testing.T) {
|
|||
Datacenter: "dc1",
|
||||
}
|
||||
var out structs.IndexedNodes
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
msgpackrpc.CallWithCodec(codec, "Catalog.ListNodes", &args, &out)
|
||||
if got, want := len(out.Nodes), 5; got != want {
|
||||
r.Fatalf("got %d nodes want %d", got, want)
|
||||
|
@ -921,7 +921,7 @@ func TestCatalog_ListNodes_DistanceSort(t *testing.T) {
|
|||
Datacenter: "dc1",
|
||||
Source: structs.QuerySource{Datacenter: "dc1", Node: "foo"},
|
||||
}
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
msgpackrpc.CallWithCodec(codec, "Catalog.ListNodes", &args, &out)
|
||||
if got, want := len(out.Nodes), 5; got != want {
|
||||
r.Fatalf("got %d nodes want %d", got, want)
|
||||
|
|
|
@ -351,7 +351,7 @@ func TestCoordinate_ListNodes(t *testing.T) {
|
|||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
// Now query back for all the nodes.
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
arg := structs.DCSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
}
|
||||
|
@ -442,7 +442,7 @@ func TestCoordinate_ListNodes_ACLFilter(t *testing.T) {
|
|||
// Wait for all the coordinate updates to apply. Since we aren't
|
||||
// enforcing version 8 ACLs, this should also allow us to read
|
||||
// everything back without a token.
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
arg := structs.DCSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
}
|
||||
|
|
|
@ -229,7 +229,7 @@ func TestOperator_ServerHealth(t *testing.T) {
|
|||
}
|
||||
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
arg := structs.DCSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
}
|
||||
|
|
|
@ -1455,7 +1455,7 @@ func TestPreparedQuery_Execute(t *testing.T) {
|
|||
if _, err := s2.JoinWAN([]string{addr}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := len(s1.WANMembers()), 2; got != want {
|
||||
r.Fatalf("got %d WAN members want %d", got, want)
|
||||
}
|
||||
|
@ -2704,7 +2704,7 @@ func TestPreparedQuery_Wrapper(t *testing.T) {
|
|||
if _, err := s2.JoinWAN([]string{addr}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := len(s1.WANMembers()), 2; got != want {
|
||||
r.Fatalf("got %d WAN members want %d", got, want)
|
||||
}
|
||||
|
|
|
@ -157,7 +157,7 @@ func TestServer_JoinLAN(t *testing.T) {
|
|||
if _, err := s2.JoinLAN([]string{addr}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := len(s1.LANMembers()), 2; got != want {
|
||||
r.Fatalf("got %d s1 LAN members want %d", got, want)
|
||||
}
|
||||
|
@ -182,7 +182,7 @@ func TestServer_JoinWAN(t *testing.T) {
|
|||
if _, err := s2.JoinWAN([]string{addr}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := len(s1.WANMembers()), 2; got != want {
|
||||
r.Fatalf("got %d s1 WAN members want %d", got, want)
|
||||
}
|
||||
|
@ -195,7 +195,7 @@ func TestServer_JoinWAN(t *testing.T) {
|
|||
if len(s1.router.GetDatacenters()) != 2 {
|
||||
t.Fatalf("remote consul missing")
|
||||
}
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := len(s2.router.GetDatacenters()), 2; got != want {
|
||||
r.Fatalf("got %d data centers want %d", got, want)
|
||||
}
|
||||
|
@ -219,7 +219,7 @@ func TestServer_JoinWAN_Flood(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, s := range []*Server{s1, s2} {
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := len(s.WANMembers()), 2; got != want {
|
||||
r.Fatalf("got %d WAN members want %d", got, want)
|
||||
}
|
||||
|
@ -239,7 +239,7 @@ func TestServer_JoinWAN_Flood(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, s := range []*Server{s1, s2, s3} {
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := len(s.WANMembers()), 3; got != want {
|
||||
r.Fatalf("got %d WAN members want %d", got, want)
|
||||
}
|
||||
|
@ -281,7 +281,7 @@ func TestServer_JoinSeparateLanAndWanAddresses(t *testing.T) {
|
|||
if _, err := s3.JoinLAN([]string{addrs2}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := len(s1.WANMembers()), 2; got != want {
|
||||
r.Fatalf("got %d s1 WAN members want %d", got, want)
|
||||
}
|
||||
|
@ -349,7 +349,7 @@ func TestServer_LeaveLeader(t *testing.T) {
|
|||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := numPeers(s1), 2; got != want {
|
||||
r.Fatalf("got %d s1 peers want %d", got, want)
|
||||
}
|
||||
|
@ -369,7 +369,7 @@ func TestServer_LeaveLeader(t *testing.T) {
|
|||
}
|
||||
|
||||
// Should lose a peer
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := numPeers(s1), 1; got != want {
|
||||
r.Fatalf("got %d s1 peers want %d", got, want)
|
||||
}
|
||||
|
@ -396,7 +396,7 @@ func TestServer_Leave(t *testing.T) {
|
|||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := numPeers(s1), 2; got != want {
|
||||
r.Fatalf("got %d s1 peers want %d", got, want)
|
||||
}
|
||||
|
@ -416,7 +416,7 @@ func TestServer_Leave(t *testing.T) {
|
|||
}
|
||||
|
||||
// Should lose a peer
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := numPeers(s1), 1; got != want {
|
||||
r.Fatalf("got %d s1 peers want %d", got, want)
|
||||
}
|
||||
|
@ -467,7 +467,7 @@ func TestServer_JoinLAN_TLS(t *testing.T) {
|
|||
if _, err := s2.JoinLAN([]string{addr}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := len(s1.LANMembers()), 2; got != want {
|
||||
r.Fatalf("got %d s1 LAN members want %d", got, want)
|
||||
}
|
||||
|
@ -476,7 +476,7 @@ func TestServer_JoinLAN_TLS(t *testing.T) {
|
|||
}
|
||||
})
|
||||
// Verify Raft has established a peer
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := numPeers(s1), 2; got != want {
|
||||
r.Fatalf("got %d s1 peers want %d", got, want)
|
||||
}
|
||||
|
@ -514,7 +514,7 @@ func TestServer_Expect(t *testing.T) {
|
|||
}
|
||||
|
||||
// Should have no peers yet since the bootstrap didn't occur.
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := numPeers(s1), 0; got != want {
|
||||
r.Fatalf("got %d s1 peers want %d", got, want)
|
||||
}
|
||||
|
@ -529,7 +529,7 @@ func TestServer_Expect(t *testing.T) {
|
|||
}
|
||||
|
||||
// Now we have three servers so we should bootstrap.
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := numPeers(s1), 3; got != want {
|
||||
r.Fatalf("got %d s1 peers want %d", got, want)
|
||||
}
|
||||
|
@ -550,7 +550,7 @@ func TestServer_Expect(t *testing.T) {
|
|||
}
|
||||
|
||||
// Wait for the new server to see itself added to the cluster.
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := numPeers(s1), 4; got != want {
|
||||
r.Fatalf("got %d s1 peers want %d", got, want)
|
||||
}
|
||||
|
@ -602,14 +602,14 @@ func TestServer_BadExpect(t *testing.T) {
|
|||
retry.
|
||||
|
||||
// should have no peers yet
|
||||
Run("", t, func(r *retry.R) {
|
||||
Run(t, func(r *retry.R) {
|
||||
|
||||
p1, _ = s1.numPeers()
|
||||
if p1 != 0 {
|
||||
r.Fatalf("%d", p1)
|
||||
}
|
||||
})
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
|
||||
p2, _ = s2.numPeers()
|
||||
if p2 != 0 {
|
||||
|
@ -626,21 +626,21 @@ func TestServer_BadExpect(t *testing.T) {
|
|||
retry.
|
||||
|
||||
// should still have no peers (because s2 is in expect=2 mode)
|
||||
Run("", t, func(r *retry.R) {
|
||||
Run(t, func(r *retry.R) {
|
||||
|
||||
p1, _ = s1.numPeers()
|
||||
if p1 != 0 {
|
||||
r.Fatalf("%d", p1)
|
||||
}
|
||||
})
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
|
||||
p2, _ = s2.numPeers()
|
||||
if p2 != 0 {
|
||||
r.Fatalf("%d", p2)
|
||||
}
|
||||
})
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
|
||||
p3, _ = s3.numPeers()
|
||||
if p3 != 0 {
|
||||
|
@ -664,7 +664,7 @@ func TestServer_globalRPCErrors(t *testing.T) {
|
|||
dir1, s1 := testServerDC(t, "dc1")
|
||||
defer os.RemoveAll(dir1)
|
||||
defer s1.Shutdown()
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if len(s1.router.GetDatacenters()) != 1 {
|
||||
r.Fatal(nil)
|
||||
}
|
||||
|
|
|
@ -298,7 +298,7 @@ func TestServer_SessionTTL_Failover(t *testing.T) {
|
|||
if _, err := s3.JoinLAN([]string{addr}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := numPeers(s1), 3; got != want {
|
||||
r.Fatalf("got %d s1 peers want %d", got, want)
|
||||
}
|
||||
|
@ -361,7 +361,7 @@ func TestServer_SessionTTL_Failover(t *testing.T) {
|
|||
t.Fatalf("session timers should be empty on the shutdown leader")
|
||||
}
|
||||
// Find the new leader
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
|
||||
leader = nil
|
||||
for _, s := range servers {
|
||||
|
|
|
@ -331,7 +331,7 @@ func TestSnapshot_Forward_Datacenter(t *testing.T) {
|
|||
if _, err := s2.JoinWAN([]string{addr}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
retry.Run("", t, func(r *retry.R) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
if got, want := len(s1.WANMembers()), 2; got < want {
|
||||
r.Fatalf("got %d WAN members want at least %d", got, want)
|
||||
}
|
||||
|
|
|
@ -74,12 +74,12 @@ func decorate(s string) string {
|
|||
return fmt.Sprintf("%s:%d: %s", file, line, s)
|
||||
}
|
||||
|
||||
func Run(desc string, t Failer, f func(r *R)) {
|
||||
run(OneSec(), desc, t, f)
|
||||
func Run(t Failer, f func(r *R)) {
|
||||
run(OneSec(), t, f)
|
||||
}
|
||||
|
||||
func RunWith(r Retryer, desc string, t Failer, f func(r *R)) {
|
||||
run(r, desc, t, f)
|
||||
func RunWith(r Retryer, t Failer, f func(r *R)) {
|
||||
run(r, t, f)
|
||||
}
|
||||
|
||||
func dedup(a []string) string {
|
||||
|
@ -101,10 +101,10 @@ func dedup(a []string) string {
|
|||
return string(b.Bytes())
|
||||
}
|
||||
|
||||
func run(r Retryer, desc string, t Failer, f func(r *R)) {
|
||||
func run(r Retryer, t Failer, f func(r *R)) {
|
||||
rr := &R{}
|
||||
fail := func() {
|
||||
out := desc + "\n" + dedup(rr.output)
|
||||
out := dedup(rr.output)
|
||||
if out != "" {
|
||||
t.Log(out)
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue