From 3d830b36768204b82d646964fa84923aad9f8abc Mon Sep 17 00:00:00 2001 From: Seth Vargo Date: Thu, 23 Mar 2017 16:26:05 -0400 Subject: [PATCH 1/4] Reduce coupling with testing.T This reduces the coupling with testing.T, allowing many of the server's startup-related functions to return an error. This makes them more re-usable. --- testutil/README.md | 28 ++-- testutil/server.go | 287 +++++++------------------------------ testutil/server_methods.go | 237 ++++++++++++++++++++++++++++++ testutil/server_wrapper.go | 96 +++++++++++++ testutil/wait.go | 13 +- 5 files changed, 412 insertions(+), 249 deletions(-) create mode 100644 testutil/server_methods.go create mode 100644 testutil/server_wrapper.go diff --git a/testutil/README.md b/testutil/README.md index 21eb01d2a7..da5d682ca3 100644 --- a/testutil/README.md +++ b/testutil/README.md @@ -25,41 +25,51 @@ import ( "github.com/hashicorp/consul/testutil" ) -func TestMain(t *testing.T) { +func TestFoo_bar(t *testing.T) { // Create a test Consul server - srv1 := testutil.NewTestServer(t) + srv1, err := testutil.NewTestServer() + if err != nil { + t.Fatal(err) + } defer srv1.Stop() // Create a secondary server, passing in configuration // to avoid bootstrapping as we are forming a cluster. - srv2 := testutil.NewTestServerConfig(t, func(c *testutil.TestServerConfig) { + srv2, err := testutil.NewTestServerConfig(t, func(c *testutil.TestServerConfig) { c.Bootstrap = false }) + if err != nil { + t.Fatal(err) + } defer srv2.Stop() // Join the servers together - srv1.JoinLAN(srv2.LANAddr) + srv1.JoinLAN(t, srv2.LANAddr) // Create a test key/value pair - srv1.SetKV("foo", []byte("bar")) + srv1.SetKV(t, "foo", []byte("bar")) // Create lots of test key/value pairs - srv1.PopulateKV(map[string][]byte{ + srv1.PopulateKV(t, map[string][]byte{ "bar": []byte("123"), "baz": []byte("456"), }) // Create a service - srv1.AddService("redis", structs.HealthPassing, []string{"master"}) + srv1.AddService(t, "redis", structs.HealthPassing, []string{"master"}) // Create a service check - srv1.AddCheck("service:redis", "redis", structs.HealthPassing) + srv1.AddCheck(t, "service:redis", "redis", structs.HealthPassing) // Create a node check - srv1.AddCheck("mem", "", structs.HealthCritical) + srv1.AddCheck(t, "mem", "", structs.HealthCritical) // The HTTPAddr field contains the address of the Consul // API on the new test server instance. println(srv1.HTTPAddr) + + // All functions also have a wrapper method to limit the passing of "t" + wrap := srv1.Wrap(t) + wrap.SetKV("foo", []byte("bar")) } ``` diff --git a/testutil/server.go b/testutil/server.go index ad350c01f0..00bdc08cfd 100644 --- a/testutil/server.go +++ b/testutil/server.go @@ -12,9 +12,7 @@ package testutil // otherwise cause an import cycle. import ( - "bytes" "context" - "encoding/base64" "encoding/json" "fmt" "io" @@ -26,8 +24,8 @@ import ( "strconv" "strings" - "github.com/hashicorp/consul/consul/structs" "github.com/hashicorp/go-cleanhttp" + "github.com/pkg/errors" ) // TestPerformanceConfig configures the performance parameters. @@ -129,15 +127,6 @@ type TestCheck struct { TTL string `json:",omitempty"` } -// TestingT is an interface wrapper around TestingT -type TestingT interface { - Logf(format string, args ...interface{}) - Errorf(format string, args ...interface{}) - Fatalf(format string, args ...interface{}) - Fatal(args ...interface{}) - Skip(args ...interface{}) -} - // TestKVResponse is what we use to decode KV data. type TestKVResponse struct { Value string @@ -147,7 +136,6 @@ type TestKVResponse struct { type TestServer struct { cmd *exec.Cmd Config *TestServerConfig - t TestingT HTTPAddr string LANAddr string @@ -158,27 +146,29 @@ type TestServer struct { // NewTestServer is an easy helper method to create a new Consul // test server with the most basic configuration. -func NewTestServer(t TestingT) *TestServer { - return NewTestServerConfig(t, nil) +func NewTestServer() (*TestServer, error) { + return NewTestServerConfig(nil) } -// NewTestServerConfig creates a new TestServer, and makes a call to -// an optional callback function to modify the configuration. -func NewTestServerConfig(t TestingT, cb ServerConfigCallback) *TestServer { +// NewTestServerConfig creates a new TestServer, and makes a call to an optional +// callback function to modify the configuration. If there is an error +// configuring or starting the server, the server will NOT be running when the +// function returns (thus you do not need to stop it). +func NewTestServerConfig(cb ServerConfigCallback) (*TestServer, error) { if path, err := exec.LookPath("consul"); err != nil || path == "" { - t.Fatal("consul not found on $PATH - download and install " + + return nil, fmt.Errorf("consul not found on $PATH - download and install " + "consul or skip this test") } dataDir, err := ioutil.TempDir("", "consul") if err != nil { - t.Fatalf("err: %s", err) + return nil, errors.Wrap(err, "failed creating tempdir") } configFile, err := ioutil.TempFile(dataDir, "config") if err != nil { defer os.RemoveAll(dataDir) - t.Fatalf("err: %s", err) + return nil, errors.Wrap(err, "failed creating temp config") } consulConfig := defaultServerConfig() @@ -190,11 +180,13 @@ func NewTestServerConfig(t TestingT, cb ServerConfigCallback) *TestServer { configContent, err := json.Marshal(consulConfig) if err != nil { - t.Fatalf("err: %s", err) + return nil, errors.Wrap(err, "failed marshaling json") } if _, err := configFile.Write(configContent); err != nil { - t.Fatalf("err: %s", err) + defer configFile.Close() + defer os.RemoveAll(dataDir) + return nil, errors.Wrap(err, "failed writing config content") } configFile.Close() @@ -215,7 +207,7 @@ func NewTestServerConfig(t TestingT, cb ServerConfigCallback) *TestServer { cmd.Stdout = stdout cmd.Stderr = stderr if err := cmd.Start(); err != nil { - t.Fatalf("err: %s", err) + return nil, errors.Wrap(err, "failed starting command") } var httpAddr string @@ -237,7 +229,6 @@ func NewTestServerConfig(t TestingT, cb ServerConfigCallback) *TestServer { server := &TestServer{ Config: consulConfig, cmd: cmd, - t: t, HTTPAddr: httpAddr, LANAddr: fmt.Sprintf("127.0.0.1:%d", consulConfig.Ports.SerfLan), @@ -247,65 +238,77 @@ func NewTestServerConfig(t TestingT, cb ServerConfigCallback) *TestServer { } // Wait for the server to be ready + var startErr error if consulConfig.Bootstrap { - server.waitForLeader() + startErr = server.waitForLeader() } else { - server.waitForAPI() + startErr = server.waitForAPI() + } + if startErr != nil { + defer server.Stop() + return nil, errors.Wrap(err, "failed waiting for server to start") } - return server + return server, nil } // Stop stops the test Consul server, and removes the Consul data // directory once we are done. -func (s *TestServer) Stop() { +func (s *TestServer) Stop() error { defer os.RemoveAll(s.Config.DataDir) - if err := s.cmd.Process.Kill(); err != nil { - s.t.Errorf("err: %s", err) + if s.cmd != nil { + if s.cmd.Process != nil { + if err := s.cmd.Process.Kill(); err != nil { + return errors.Wrap(err, "failed to kill consul server") + } + } + + // wait for the process to exit to be sure that the data dir can be + // deleted on all platforms. + return s.cmd.Wait() } - // wait for the process to exit to be sure that the data dir can be - // deleted on all platforms. - s.cmd.Wait() + // There was no process + return nil } // waitForAPI waits for only the agent HTTP endpoint to start // responding. This is an indication that the agent has started, // but will likely return before a leader is elected. -func (s *TestServer) waitForAPI() { - WaitForResult(func() (bool, error) { +func (s *TestServer) waitForAPI() error { + if err := WaitForResult(func() (bool, error) { resp, err := s.HttpClient.Get(s.url("/v1/agent/self")) if err != nil { - return false, err + return false, errors.Wrap(err, "failed http get") } defer resp.Body.Close() if err := s.requireOK(resp); err != nil { - return false, err + return false, errors.Wrap(err, "failed OK response") } return true, nil - }, func(err error) { - defer s.Stop() - s.t.Fatalf("err: %s", err) - }) + }); err != nil { + return errors.Wrap(err, "failed waiting for API") + } + return nil } // waitForLeader waits for the Consul server's HTTP API to become // available, and then waits for a known leader and an index of // 1 or more to be observed to confirm leader election is done. // It then waits to ensure the anti-entropy sync has completed. -func (s *TestServer) waitForLeader() { +func (s *TestServer) waitForLeader() error { var index int64 - WaitForResult(func() (bool, error) { + if err := WaitForResult(func() (bool, error) { // Query the API and check the status code. url := s.url(fmt.Sprintf("/v1/catalog/nodes?index=%d&wait=2s", index)) resp, err := s.HttpClient.Get(url) if err != nil { - return false, err + return false, errors.Wrap(err, "failed http get") } defer resp.Body.Close() if err := s.requireOK(resp); err != nil { - return false, err + return false, errors.Wrap(err, "failed OK response") } // Ensure we have a leader and a node registration. @@ -314,10 +317,10 @@ func (s *TestServer) waitForLeader() { } index, err = strconv.ParseInt(resp.Header.Get("X-Consul-Index"), 10, 64) if err != nil { - return false, fmt.Errorf("Consul index was bad: %v", err) + return false, errors.Wrap(err, "bad consul index") } if index == 0 { - return false, fmt.Errorf("Consul index is 0") + return false, fmt.Errorf("consul index is 0") } // Watch for the anti-entropy sync to finish. @@ -337,192 +340,8 @@ func (s *TestServer) waitForLeader() { return false, fmt.Errorf("No lan tagged addresses") } return true, nil - }, func(err error) { - defer s.Stop() - s.t.Fatalf("err: %s", err) - }) -} - -// url is a helper function which takes a relative URL and -// makes it into a proper URL against the local Consul server. -func (s *TestServer) url(path string) string { - return fmt.Sprintf("http://127.0.0.1:%d%s", s.Config.Ports.HTTP, path) -} - -// requireOK checks the HTTP response code and ensures it is acceptable. -func (s *TestServer) requireOK(resp *http.Response) error { - if resp.StatusCode != 200 { - return fmt.Errorf("Bad status code: %d", resp.StatusCode) + }); err != nil { + return errors.Wrap(err, "failed waiting for leader") } return nil } - -// put performs a new HTTP PUT request. -func (s *TestServer) put(path string, body io.Reader) *http.Response { - req, err := http.NewRequest("PUT", s.url(path), body) - if err != nil { - s.t.Fatalf("err: %s", err) - } - resp, err := s.HttpClient.Do(req) - if err != nil { - s.t.Fatalf("err: %s", err) - } - if err := s.requireOK(resp); err != nil { - defer resp.Body.Close() - s.t.Fatal(err) - } - return resp -} - -// get performs a new HTTP GET request. -func (s *TestServer) get(path string) *http.Response { - resp, err := s.HttpClient.Get(s.url(path)) - if err != nil { - s.t.Fatalf("err: %s", err) - } - if err := s.requireOK(resp); err != nil { - defer resp.Body.Close() - s.t.Fatal(err) - } - return resp -} - -// encodePayload returns a new io.Reader wrapping the encoded contents -// of the payload, suitable for passing directly to a new request. -func (s *TestServer) encodePayload(payload interface{}) io.Reader { - var encoded bytes.Buffer - enc := json.NewEncoder(&encoded) - if err := enc.Encode(payload); err != nil { - s.t.Fatalf("err: %s", err) - } - return &encoded -} - -// JoinLAN is used to join nodes within the same datacenter. -func (s *TestServer) JoinLAN(addr string) { - resp := s.get("/v1/agent/join/" + addr) - resp.Body.Close() -} - -// JoinWAN is used to join remote datacenters together. -func (s *TestServer) JoinWAN(addr string) { - resp := s.get("/v1/agent/join/" + addr + "?wan=1") - resp.Body.Close() -} - -// SetKV sets an individual key in the K/V store. -func (s *TestServer) SetKV(key string, val []byte) { - resp := s.put("/v1/kv/"+key, bytes.NewBuffer(val)) - resp.Body.Close() -} - -// GetKV retrieves a single key and returns its value -func (s *TestServer) GetKV(key string) []byte { - resp := s.get("/v1/kv/" + key) - defer resp.Body.Close() - - raw, err := ioutil.ReadAll(resp.Body) - if err != nil { - s.t.Fatalf("err: %s", err) - } - - var result []*TestKVResponse - if err := json.Unmarshal(raw, &result); err != nil { - s.t.Fatalf("err: %s", err) - } - if len(result) < 1 { - s.t.Fatalf("key does not exist: %s", key) - } - - v, err := base64.StdEncoding.DecodeString(result[0].Value) - if err != nil { - s.t.Fatalf("err: %s", err) - } - - return v -} - -// PopulateKV fills the Consul KV with data from a generic map. -func (s *TestServer) PopulateKV(data map[string][]byte) { - for k, v := range data { - s.SetKV(k, v) - } -} - -// ListKV returns a list of keys present in the KV store. This will list all -// keys under the given prefix recursively and return them as a slice. -func (s *TestServer) ListKV(prefix string) []string { - resp := s.get("/v1/kv/" + prefix + "?keys") - defer resp.Body.Close() - - raw, err := ioutil.ReadAll(resp.Body) - if err != nil { - s.t.Fatalf("err: %s", err) - } - - var result []string - if err := json.Unmarshal(raw, &result); err != nil { - s.t.Fatalf("err: %s", err) - } - return result -} - -// AddService adds a new service to the Consul instance. It also -// automatically adds a health check with the given status, which -// can be one of "passing", "warning", or "critical". -func (s *TestServer) AddService(name, status string, tags []string) { - svc := &TestService{ - Name: name, - Tags: tags, - } - payload := s.encodePayload(svc) - s.put("/v1/agent/service/register", payload) - - chkName := "service:" + name - chk := &TestCheck{ - Name: chkName, - ServiceID: name, - TTL: "10m", - } - payload = s.encodePayload(chk) - s.put("/v1/agent/check/register", payload) - - switch status { - case structs.HealthPassing: - s.put("/v1/agent/check/pass/"+chkName, nil) - case structs.HealthWarning: - s.put("/v1/agent/check/warn/"+chkName, nil) - case structs.HealthCritical: - s.put("/v1/agent/check/fail/"+chkName, nil) - default: - s.t.Fatalf("Unrecognized status: %s", status) - } -} - -// AddCheck adds a check to the Consul instance. If the serviceID is -// left empty (""), then the check will be associated with the node. -// The check status may be "passing", "warning", or "critical". -func (s *TestServer) AddCheck(name, serviceID, status string) { - chk := &TestCheck{ - ID: name, - Name: name, - TTL: "10m", - } - if serviceID != "" { - chk.ServiceID = serviceID - } - - payload := s.encodePayload(chk) - s.put("/v1/agent/check/register", payload) - - switch status { - case structs.HealthPassing: - s.put("/v1/agent/check/pass/"+name, nil) - case structs.HealthWarning: - s.put("/v1/agent/check/warn/"+name, nil) - case structs.HealthCritical: - s.put("/v1/agent/check/fail/"+name, nil) - default: - s.t.Fatalf("Unrecognized status: %s", status) - } -} diff --git a/testutil/server_methods.go b/testutil/server_methods.go new file mode 100644 index 0000000000..3c74e140cb --- /dev/null +++ b/testutil/server_methods.go @@ -0,0 +1,237 @@ +package testutil + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "testing" + + "github.com/hashicorp/consul/consul/structs" + "github.com/pkg/errors" +) + +// JoinLAN is used to join local datacenters together. +func (s *TestServer) JoinLAN(t *testing.T, addr string) { + resp := s.get(t, "/v1/agent/join/"+addr) + defer resp.Body.Close() +} + +// JoinWAN is used to join remote datacenters together. +func (s *TestServer) JoinWAN(t *testing.T, addr string) { + resp := s.get(t, "/v1/agent/join/"+addr+"?wan=1") + resp.Body.Close() +} + +// SetKV sets an individual key in the K/V store. +func (s *TestServer) SetKV(t *testing.T, key string, val []byte) { + resp := s.put(t, "/v1/kv/"+key, bytes.NewBuffer(val)) + resp.Body.Close() +} + +// SetKVString sets an individual key in the K/V store, but accepts a string +// instead of []byte. +func (s *TestServer) SetKVString(t *testing.T, key string, val string) { + resp := s.put(t, "/v1/kv/"+key, bytes.NewBufferString(val)) + resp.Body.Close() +} + +// GetKV retrieves a single key and returns its value +func (s *TestServer) GetKV(t *testing.T, key string) []byte { + resp := s.get(t, "/v1/kv/"+key) + defer resp.Body.Close() + + raw, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatalf("failed to read body: %s", err) + } + + var result []*TestKVResponse + if err := json.Unmarshal(raw, &result); err != nil { + t.Fatalf("failed to unmarshal: %s", err) + } + if len(result) < 1 { + t.Fatalf("key does not exist: %s", key) + } + + v, err := base64.StdEncoding.DecodeString(result[0].Value) + if err != nil { + t.Fatalf("failed to base64 decode: %s", err) + } + + return v +} + +// GetKVString retrieves a value from the store, but returns as a string instead +// of []byte. +func (s *TestServer) GetKVString(t *testing.T, key string) string { + return string(s.GetKV(t, key)) +} + +// PopulateKV fills the Consul KV with data from a generic map. +func (s *TestServer) PopulateKV(t *testing.T, data map[string][]byte) { + for k, v := range data { + s.SetKV(t, k, v) + } +} + +// ListKV returns a list of keys present in the KV store. This will list all +// keys under the given prefix recursively and return them as a slice. +func (s *TestServer) ListKV(t *testing.T, prefix string) []string { + resp := s.get(t, "/v1/kv/"+prefix+"?keys") + defer resp.Body.Close() + + raw, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatalf("failed to read body: %s", err) + } + + var result []string + if err := json.Unmarshal(raw, &result); err != nil { + t.Fatalf("failed to unmarshal: %s", err) + } + return result +} + +// AddService adds a new service to the Consul instance. It also +// automatically adds a health check with the given status, which +// can be one of "passing", "warning", or "critical". +func (s *TestServer) AddService(t *testing.T, name, status string, tags []string) { + svc := &TestService{ + Name: name, + Tags: tags, + } + payload, err := s.encodePayload(svc) + if err != nil { + t.Fatal(err) + } + s.put(t, "/v1/agent/service/register", payload) + + chkName := "service:" + name + chk := &TestCheck{ + Name: chkName, + ServiceID: name, + TTL: "10m", + } + payload, err = s.encodePayload(chk) + if err != nil { + t.Fatal(err) + } + s.put(t, "/v1/agent/check/register", payload) + + switch status { + case structs.HealthPassing: + s.put(t, "/v1/agent/check/pass/"+chkName, nil) + case structs.HealthWarning: + s.put(t, "/v1/agent/check/warn/"+chkName, nil) + case structs.HealthCritical: + s.put(t, "/v1/agent/check/fail/"+chkName, nil) + default: + t.Fatalf("Unrecognized status: %s", status) + } +} + +// AddCheck adds a check to the Consul instance. If the serviceID is +// left empty (""), then the check will be associated with the node. +// The check status may be "passing", "warning", or "critical". +func (s *TestServer) AddCheck(t *testing.T, name, serviceID, status string) { + chk := &TestCheck{ + ID: name, + Name: name, + TTL: "10m", + } + if serviceID != "" { + chk.ServiceID = serviceID + } + + payload, err := s.encodePayload(chk) + if err != nil { + t.Fatal(err) + } + s.put(t, "/v1/agent/check/register", payload) + + switch status { + case structs.HealthPassing: + s.put(t, "/v1/agent/check/pass/"+name, nil) + case structs.HealthWarning: + s.put(t, "/v1/agent/check/warn/"+name, nil) + case structs.HealthCritical: + s.put(t, "/v1/agent/check/fail/"+name, nil) + default: + t.Fatalf("Unrecognized status: %s", status) + } +} + +// put performs a new HTTP PUT request. +func (s *TestServer) put(t *testing.T, path string, body io.Reader) *http.Response { + req, err := http.NewRequest("PUT", s.url(path), body) + if err != nil { + t.Fatalf("failed to create PUT request: %s", err) + } + resp, err := s.HttpClient.Do(req) + if err != nil { + t.Fatalf("failed to make PUT request: %s", err) + } + if err := s.requireOK(resp); err != nil { + defer resp.Body.Close() + t.Fatalf("not OK PUT: %s", err) + } + return resp +} + +// get performs a new HTTP GET request. +func (s *TestServer) get(t *testing.T, path string) *http.Response { + resp, err := s.HttpClient.Get(s.url(path)) + if err != nil { + t.Fatalf("failed to create GET request: %s", err) + } + if err := s.requireOK(resp); err != nil { + defer resp.Body.Close() + t.Fatalf("not OK GET: %s", err) + } + return resp +} + +// encodePayload returns a new io.Reader wrapping the encoded contents +// of the payload, suitable for passing directly to a new request. +func (s *TestServer) encodePayload(payload interface{}) (io.Reader, error) { + var encoded bytes.Buffer + enc := json.NewEncoder(&encoded) + if err := enc.Encode(payload); err != nil { + return nil, errors.Wrap(err, "failed to encode payload") + } + return &encoded, nil +} + +// url is a helper function which takes a relative URL and +// makes it into a proper URL against the local Consul server. +func (s *TestServer) url(path string) string { + if s == nil { + log.Fatal("s is nil") + } + if s.Config == nil { + log.Fatal("s.Config is nil") + } + if s.Config.Ports == nil { + log.Fatal("s.Config.Ports is nil") + } + if s.Config.Ports.HTTP == 0 { + log.Fatal("s.Config.Ports.HTTP is 0") + } + if path == "" { + log.Fatal("path is empty") + } + return fmt.Sprintf("http://127.0.0.1:%d%s", s.Config.Ports.HTTP, path) +} + +// requireOK checks the HTTP response code and ensures it is acceptable. +func (s *TestServer) requireOK(resp *http.Response) error { + if resp.StatusCode != 200 { + return fmt.Errorf("Bad status code: %d", resp.StatusCode) + } + return nil +} diff --git a/testutil/server_wrapper.go b/testutil/server_wrapper.go new file mode 100644 index 0000000000..0402f17a22 --- /dev/null +++ b/testutil/server_wrapper.go @@ -0,0 +1,96 @@ +package testutil + +import ( + "testing" +) + +type WrappedServer struct { + s *TestServer + t *testing.T +} + +// Wrap wraps the test server in a `testing.t` for convenience. +// +// For example, the following code snippets are equivalent. +// +// server.JoinLAN(t, "1.2.3.4") +// server.Wrap(t).JoinLAN("1.2.3.4") +// +// This is useful when you are calling multiple functions and save the wrapped +// value as another variable to reduce the inclusion of "t". +func (s *TestServer) Wrap(t *testing.T) *WrappedServer { + return &WrappedServer{ + s: s, + t: t, + } +} + +// See Also +// +// TestServer.JoinLAN() +func (w *WrappedServer) JoinLAN(addr string) { + w.s.JoinLAN(w.t, addr) +} + +// See Also +// +// TestServer.JoinWAN() +func (w *WrappedServer) JoinWAN(addr string) { + w.s.JoinWAN(w.t, addr) +} + +// See Also +// +// TestServer.SetKV() +func (w *WrappedServer) SetKV(key string, val []byte) { + w.s.SetKV(w.t, key, val) +} + +// See Also +// +// TestServer.SetKVString() +func (w *WrappedServer) SetKVString(key string, val string) { + w.s.SetKVString(w.t, key, val) +} + +// See Also +// +// TestServer.GetKV() +func (w *WrappedServer) GetKV(key string) []byte { + return w.s.GetKV(w.t, key) +} + +// See Also +// +// TestServer.GetKVString() +func (w *WrappedServer) GetKVString(key string) string { + return w.s.GetKVString(w.t, key) +} + +// See Also +// +// TestServer.PopulateKV() +func (w *WrappedServer) PopulateKV(data map[string][]byte) { + w.s.PopulateKV(w.t, data) +} + +// See Also +// +// TestServer.ListKV() +func (w *WrappedServer) ListKV(prefix string) []string { + return w.s.ListKV(w.t, prefix) +} + +// See Also +// +// TestServer.AddService() +func (w *WrappedServer) AddService(name, status string, tags []string) { + w.s.AddService(w.t, name, status, tags) +} + +// See Also +// +// TestServer.AddCheck() +func (w *WrappedServer) AddCheck(name, serviceID, status string) { + w.s.AddCheck(w.t, name, serviceID, status) +} diff --git a/testutil/wait.go b/testutil/wait.go index bd240796ff..7ecb74f180 100644 --- a/testutil/wait.go +++ b/testutil/wait.go @@ -16,7 +16,7 @@ const ( maxWait = 100 * time.Millisecond ) -func WaitForResult(try testFn, fail errorFn) { +func WaitForResult(try testFn) error { var err error wait := baseWait for retries := 100; retries > 0; retries-- { @@ -24,7 +24,7 @@ func WaitForResult(try testFn, fail errorFn) { success, err = try() if success { time.Sleep(25 * time.Millisecond) - return + return nil } time.Sleep(wait) @@ -33,14 +33,15 @@ func WaitForResult(try testFn, fail errorFn) { wait = maxWait } } - fail(err) + + return err } type rpcFn func(string, interface{}, interface{}) error func WaitForLeader(t *testing.T, rpc rpcFn, dc string) structs.IndexedNodes { var out structs.IndexedNodes - WaitForResult(func() (bool, error) { + if err := WaitForResult(func() (bool, error) { // Ensure we have a leader and a node registration. args := &structs.DCSpecificRequest{ Datacenter: dc, @@ -55,8 +56,8 @@ func WaitForLeader(t *testing.T, rpc rpcFn, dc string) structs.IndexedNodes { return false, fmt.Errorf("Consul index is 0") } return true, nil - }, func(err error) { + }); err != nil { t.Fatalf("failed to find leader: %v", err) - }) + } return out } From 592de8919194a67439930458c6ad44de3780f12e Mon Sep 17 00:00:00 2001 From: Seth Vargo Date: Thu, 23 Mar 2017 17:26:12 -0400 Subject: [PATCH 2/4] Add pkg/errors --- vendor/github.com/pkg/errors/LICENSE | 23 ++ vendor/github.com/pkg/errors/README.md | 52 +++++ vendor/github.com/pkg/errors/appveyor.yml | 32 +++ vendor/github.com/pkg/errors/errors.go | 269 ++++++++++++++++++++++ vendor/github.com/pkg/errors/stack.go | 178 ++++++++++++++ vendor/vendor.json | 7 + 6 files changed, 561 insertions(+) create mode 100644 vendor/github.com/pkg/errors/LICENSE create mode 100644 vendor/github.com/pkg/errors/README.md create mode 100644 vendor/github.com/pkg/errors/appveyor.yml create mode 100644 vendor/github.com/pkg/errors/errors.go create mode 100644 vendor/github.com/pkg/errors/stack.go diff --git a/vendor/github.com/pkg/errors/LICENSE b/vendor/github.com/pkg/errors/LICENSE new file mode 100644 index 0000000000..835ba3e755 --- /dev/null +++ b/vendor/github.com/pkg/errors/LICENSE @@ -0,0 +1,23 @@ +Copyright (c) 2015, Dave Cheney +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pkg/errors/README.md b/vendor/github.com/pkg/errors/README.md new file mode 100644 index 0000000000..273db3c98a --- /dev/null +++ b/vendor/github.com/pkg/errors/README.md @@ -0,0 +1,52 @@ +# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors) + +Package errors provides simple error handling primitives. + +`go get github.com/pkg/errors` + +The traditional error handling idiom in Go is roughly akin to +```go +if err != nil { + return err +} +``` +which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error. + +## Adding context to an error + +The errors.Wrap function returns a new error that adds context to the original error. For example +```go +_, err := ioutil.ReadAll(r) +if err != nil { + return errors.Wrap(err, "read failed") +} +``` +## Retrieving the cause of an error + +Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`. +```go +type causer interface { + Cause() error +} +``` +`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example: +```go +switch err := errors.Cause(err).(type) { +case *MyError: + // handle specifically +default: + // unknown error +} +``` + +[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors). + +## Contributing + +We welcome pull requests, bug fixes and issue reports. With that said, the bar for adding new symbols to this package is intentionally set high. + +Before proposing a change, please discuss your change by raising an issue. + +## Licence + +BSD-2-Clause diff --git a/vendor/github.com/pkg/errors/appveyor.yml b/vendor/github.com/pkg/errors/appveyor.yml new file mode 100644 index 0000000000..a932eade02 --- /dev/null +++ b/vendor/github.com/pkg/errors/appveyor.yml @@ -0,0 +1,32 @@ +version: build-{build}.{branch} + +clone_folder: C:\gopath\src\github.com\pkg\errors +shallow_clone: true # for startup speed + +environment: + GOPATH: C:\gopath + +platform: + - x64 + +# http://www.appveyor.com/docs/installed-software +install: + # some helpful output for debugging builds + - go version + - go env + # pre-installed MinGW at C:\MinGW is 32bit only + # but MSYS2 at C:\msys64 has mingw64 + - set PATH=C:\msys64\mingw64\bin;%PATH% + - gcc --version + - g++ --version + +build_script: + - go install -v ./... + +test_script: + - set PATH=C:\gopath\bin;%PATH% + - go test -v ./... + +#artifacts: +# - path: '%GOPATH%\bin\*.exe' +deploy: off diff --git a/vendor/github.com/pkg/errors/errors.go b/vendor/github.com/pkg/errors/errors.go new file mode 100644 index 0000000000..842ee80456 --- /dev/null +++ b/vendor/github.com/pkg/errors/errors.go @@ -0,0 +1,269 @@ +// Package errors provides simple error handling primitives. +// +// The traditional error handling idiom in Go is roughly akin to +// +// if err != nil { +// return err +// } +// +// which applied recursively up the call stack results in error reports +// without context or debugging information. The errors package allows +// programmers to add context to the failure path in their code in a way +// that does not destroy the original value of the error. +// +// Adding context to an error +// +// The errors.Wrap function returns a new error that adds context to the +// original error by recording a stack trace at the point Wrap is called, +// and the supplied message. For example +// +// _, err := ioutil.ReadAll(r) +// if err != nil { +// return errors.Wrap(err, "read failed") +// } +// +// If additional control is required the errors.WithStack and errors.WithMessage +// functions destructure errors.Wrap into its component operations of annotating +// an error with a stack trace and an a message, respectively. +// +// Retrieving the cause of an error +// +// Using errors.Wrap constructs a stack of errors, adding context to the +// preceding error. Depending on the nature of the error it may be necessary +// to reverse the operation of errors.Wrap to retrieve the original error +// for inspection. Any error value which implements this interface +// +// type causer interface { +// Cause() error +// } +// +// can be inspected by errors.Cause. errors.Cause will recursively retrieve +// the topmost error which does not implement causer, which is assumed to be +// the original cause. For example: +// +// switch err := errors.Cause(err).(type) { +// case *MyError: +// // handle specifically +// default: +// // unknown error +// } +// +// causer interface is not exported by this package, but is considered a part +// of stable public API. +// +// Formatted printing of errors +// +// All error values returned from this package implement fmt.Formatter and can +// be formatted by the fmt package. The following verbs are supported +// +// %s print the error. If the error has a Cause it will be +// printed recursively +// %v see %s +// %+v extended format. Each Frame of the error's StackTrace will +// be printed in detail. +// +// Retrieving the stack trace of an error or wrapper +// +// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are +// invoked. This information can be retrieved with the following interface. +// +// type stackTracer interface { +// StackTrace() errors.StackTrace +// } +// +// Where errors.StackTrace is defined as +// +// type StackTrace []Frame +// +// The Frame type represents a call site in the stack trace. Frame supports +// the fmt.Formatter interface that can be used for printing information about +// the stack trace of this error. For example: +// +// if err, ok := err.(stackTracer); ok { +// for _, f := range err.StackTrace() { +// fmt.Printf("%+s:%d", f) +// } +// } +// +// stackTracer interface is not exported by this package, but is considered a part +// of stable public API. +// +// See the documentation for Frame.Format for more details. +package errors + +import ( + "fmt" + "io" +) + +// New returns an error with the supplied message. +// New also records the stack trace at the point it was called. +func New(message string) error { + return &fundamental{ + msg: message, + stack: callers(), + } +} + +// Errorf formats according to a format specifier and returns the string +// as a value that satisfies error. +// Errorf also records the stack trace at the point it was called. +func Errorf(format string, args ...interface{}) error { + return &fundamental{ + msg: fmt.Sprintf(format, args...), + stack: callers(), + } +} + +// fundamental is an error that has a message and a stack, but no caller. +type fundamental struct { + msg string + *stack +} + +func (f *fundamental) Error() string { return f.msg } + +func (f *fundamental) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + if s.Flag('+') { + io.WriteString(s, f.msg) + f.stack.Format(s, verb) + return + } + fallthrough + case 's': + io.WriteString(s, f.msg) + case 'q': + fmt.Fprintf(s, "%q", f.msg) + } +} + +// WithStack annotates err with a stack trace at the point WithStack was called. +// If err is nil, WithStack returns nil. +func WithStack(err error) error { + if err == nil { + return nil + } + return &withStack{ + err, + callers(), + } +} + +type withStack struct { + error + *stack +} + +func (w *withStack) Cause() error { return w.error } + +func (w *withStack) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + if s.Flag('+') { + fmt.Fprintf(s, "%+v", w.Cause()) + w.stack.Format(s, verb) + return + } + fallthrough + case 's': + io.WriteString(s, w.Error()) + case 'q': + fmt.Fprintf(s, "%q", w.Error()) + } +} + +// Wrap returns an error annotating err with a stack trace +// at the point Wrap is called, and the supplied message. +// If err is nil, Wrap returns nil. +func Wrap(err error, message string) error { + if err == nil { + return nil + } + err = &withMessage{ + cause: err, + msg: message, + } + return &withStack{ + err, + callers(), + } +} + +// Wrapf returns an error annotating err with a stack trace +// at the point Wrapf is call, and the format specifier. +// If err is nil, Wrapf returns nil. +func Wrapf(err error, format string, args ...interface{}) error { + if err == nil { + return nil + } + err = &withMessage{ + cause: err, + msg: fmt.Sprintf(format, args...), + } + return &withStack{ + err, + callers(), + } +} + +// WithMessage annotates err with a new message. +// If err is nil, WithMessage returns nil. +func WithMessage(err error, message string) error { + if err == nil { + return nil + } + return &withMessage{ + cause: err, + msg: message, + } +} + +type withMessage struct { + cause error + msg string +} + +func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() } +func (w *withMessage) Cause() error { return w.cause } + +func (w *withMessage) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + if s.Flag('+') { + fmt.Fprintf(s, "%+v\n", w.Cause()) + io.WriteString(s, w.msg) + return + } + fallthrough + case 's', 'q': + io.WriteString(s, w.Error()) + } +} + +// Cause returns the underlying cause of the error, if possible. +// An error value has a cause if it implements the following +// interface: +// +// type causer interface { +// Cause() error +// } +// +// If the error does not implement Cause, the original error will +// be returned. If the error is nil, nil will be returned without further +// investigation. +func Cause(err error) error { + type causer interface { + Cause() error + } + + for err != nil { + cause, ok := err.(causer) + if !ok { + break + } + err = cause.Cause() + } + return err +} diff --git a/vendor/github.com/pkg/errors/stack.go b/vendor/github.com/pkg/errors/stack.go new file mode 100644 index 0000000000..6b1f2891a5 --- /dev/null +++ b/vendor/github.com/pkg/errors/stack.go @@ -0,0 +1,178 @@ +package errors + +import ( + "fmt" + "io" + "path" + "runtime" + "strings" +) + +// Frame represents a program counter inside a stack frame. +type Frame uintptr + +// pc returns the program counter for this frame; +// multiple frames may have the same PC value. +func (f Frame) pc() uintptr { return uintptr(f) - 1 } + +// file returns the full path to the file that contains the +// function for this Frame's pc. +func (f Frame) file() string { + fn := runtime.FuncForPC(f.pc()) + if fn == nil { + return "unknown" + } + file, _ := fn.FileLine(f.pc()) + return file +} + +// line returns the line number of source code of the +// function for this Frame's pc. +func (f Frame) line() int { + fn := runtime.FuncForPC(f.pc()) + if fn == nil { + return 0 + } + _, line := fn.FileLine(f.pc()) + return line +} + +// Format formats the frame according to the fmt.Formatter interface. +// +// %s source file +// %d source line +// %n function name +// %v equivalent to %s:%d +// +// Format accepts flags that alter the printing of some verbs, as follows: +// +// %+s path of source file relative to the compile time GOPATH +// %+v equivalent to %+s:%d +func (f Frame) Format(s fmt.State, verb rune) { + switch verb { + case 's': + switch { + case s.Flag('+'): + pc := f.pc() + fn := runtime.FuncForPC(pc) + if fn == nil { + io.WriteString(s, "unknown") + } else { + file, _ := fn.FileLine(pc) + fmt.Fprintf(s, "%s\n\t%s", fn.Name(), file) + } + default: + io.WriteString(s, path.Base(f.file())) + } + case 'd': + fmt.Fprintf(s, "%d", f.line()) + case 'n': + name := runtime.FuncForPC(f.pc()).Name() + io.WriteString(s, funcname(name)) + case 'v': + f.Format(s, 's') + io.WriteString(s, ":") + f.Format(s, 'd') + } +} + +// StackTrace is stack of Frames from innermost (newest) to outermost (oldest). +type StackTrace []Frame + +func (st StackTrace) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + switch { + case s.Flag('+'): + for _, f := range st { + fmt.Fprintf(s, "\n%+v", f) + } + case s.Flag('#'): + fmt.Fprintf(s, "%#v", []Frame(st)) + default: + fmt.Fprintf(s, "%v", []Frame(st)) + } + case 's': + fmt.Fprintf(s, "%s", []Frame(st)) + } +} + +// stack represents a stack of program counters. +type stack []uintptr + +func (s *stack) Format(st fmt.State, verb rune) { + switch verb { + case 'v': + switch { + case st.Flag('+'): + for _, pc := range *s { + f := Frame(pc) + fmt.Fprintf(st, "\n%+v", f) + } + } + } +} + +func (s *stack) StackTrace() StackTrace { + f := make([]Frame, len(*s)) + for i := 0; i < len(f); i++ { + f[i] = Frame((*s)[i]) + } + return f +} + +func callers() *stack { + const depth = 32 + var pcs [depth]uintptr + n := runtime.Callers(3, pcs[:]) + var st stack = pcs[0:n] + return &st +} + +// funcname removes the path prefix component of a function's name reported by func.Name(). +func funcname(name string) string { + i := strings.LastIndex(name, "/") + name = name[i+1:] + i = strings.Index(name, ".") + return name[i+1:] +} + +func trimGOPATH(name, file string) string { + // Here we want to get the source file path relative to the compile time + // GOPATH. As of Go 1.6.x there is no direct way to know the compiled + // GOPATH at runtime, but we can infer the number of path segments in the + // GOPATH. We note that fn.Name() returns the function name qualified by + // the import path, which does not include the GOPATH. Thus we can trim + // segments from the beginning of the file path until the number of path + // separators remaining is one more than the number of path separators in + // the function name. For example, given: + // + // GOPATH /home/user + // file /home/user/src/pkg/sub/file.go + // fn.Name() pkg/sub.Type.Method + // + // We want to produce: + // + // pkg/sub/file.go + // + // From this we can easily see that fn.Name() has one less path separator + // than our desired output. We count separators from the end of the file + // path until it finds two more than in the function name and then move + // one character forward to preserve the initial path segment without a + // leading separator. + const sep = "/" + goal := strings.Count(name, sep) + 2 + i := len(file) + for n := 0; n < goal; n++ { + i = strings.LastIndex(file[:i], sep) + if i == -1 { + // not enough separators found, set i so that the slice expression + // below leaves file unmodified + i = -len(sep) + break + } + } + // get back to 0 or trim the leading separator + file = file[i+len(sep):] + return file +} diff --git a/vendor/vendor.json b/vendor/vendor.json index 27151b1c4c..d9b4ca3ead 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -699,6 +699,13 @@ "revision": "0f764571384a3ff16c6fed25ace5b7c83f0f0379", "revisionTime": "2016-08-09T12:22:04Z" }, + { + "checksumSHA1": "ynJSWoF6v+3zMnh9R0QmmG6iGV8=", + "path": "github.com/pkg/errors", + "revision": "ff09b135c25aae272398c51a07235b90a75aa4f0", + "revisionTime": "2017-03-16T20:15:38Z", + "tree": true + }, { "checksumSHA1": "ExnVEVNT8APpFTm26cUb5T09yR4=", "comment": "v2.0.1-8-g983d3a5", From 1dc1f72fb6beac44ff15963476ff3e9da9629606 Mon Sep 17 00:00:00 2001 From: Seth Vargo Date: Thu, 23 Mar 2017 17:48:45 -0400 Subject: [PATCH 3/4] Add RPC --- testutil/server.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/testutil/server.go b/testutil/server.go index 00bdc08cfd..befc1d5ae7 100644 --- a/testutil/server.go +++ b/testutil/server.go @@ -41,6 +41,9 @@ type TestPortConfig struct { SerfLan int `json:"serf_lan,omitempty"` SerfWan int `json:"serf_wan,omitempty"` Server int `json:"server,omitempty"` + + // Deprecated + RPC int `json:"rpc,omitempty"` } // TestAddressConfig contains the bind addresses for various @@ -96,6 +99,7 @@ func defaultServerConfig() *TestServerConfig { SerfLan: randomPort(), SerfWan: randomPort(), Server: randomPort(), + RPC: randomPort(), }, } } From 0170a28ec9c29fbcfebd19065eebddccccb46db5 Mon Sep 17 00:00:00 2001 From: Seth Vargo Date: Thu, 23 Mar 2017 18:27:16 -0400 Subject: [PATCH 4/4] Use new APIs --- api/api_test.go | 5 +- api/catalog_test.go | 128 ++++++------ api/coordinate_test.go | 16 +- api/event_test.go | 8 +- api/health_test.go | 56 ++--- api/operator_test.go | 6 +- api/prepared_query_test.go | 8 +- command/agent/agent_endpoint.go | 2 - command/agent/agent_endpoint_test.go | 40 ++-- command/agent/catalog_endpoint_test.go | 44 ++-- command/agent/check_test.go | 56 ++--- command/agent/command_test.go | 8 +- command/agent/dns_test.go | 24 +-- command/agent/event_endpoint_test.go | 56 ++--- command/agent/health_endpoint_test.go | 54 ++--- command/agent/local_test.go | 74 +++---- command/agent/operator_endpoint_test.go | 12 +- command/agent/user_event_test.go | 12 +- command/exec_test.go | 16 +- command/force_leave_test.go | 11 +- command/rtt_test.go | 8 +- consul/acl_replication_test.go | 12 +- consul/acl_test.go | 56 ++--- consul/autopilot_test.go | 52 ++--- consul/catalog_endpoint_test.go | 40 ++-- consul/client_test.go | 78 +++---- consul/coordinate_endpoint_test.go | 12 +- consul/leader_test.go | 202 +++++++++--------- consul/operator_endpoint_test.go | 9 +- consul/prepared_query_endpoint_test.go | 24 +-- consul/server_test.go | 260 ++++++++++++------------ consul/session_ttl_test.go | 16 +- consul/snapshot_endpoint_test.go | 12 +- 33 files changed, 705 insertions(+), 712 deletions(-) diff --git a/api/api_test.go b/api/api_test.go index 51a2068957..85a2f9e626 100644 --- a/api/api_test.go +++ b/api/api_test.go @@ -43,7 +43,10 @@ func makeClientWithConfig( cb1(conf) } // Create server - server := testutil.NewTestServerConfig(t, cb2) + server, err := testutil.NewTestServerConfig(cb2) + if err != nil { + t.Fatal(err) + } conf.Address = server.HTTPAddr // Create client diff --git a/api/catalog_test.go b/api/catalog_test.go index ed70e38586..c418f85486 100644 --- a/api/catalog_test.go +++ b/api/catalog_test.go @@ -14,7 +14,7 @@ func TestCatalog_Datacenters(t *testing.T) { catalog := c.Catalog() - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { datacenters, err := catalog.Datacenters() if err != nil { return false, err @@ -25,9 +25,9 @@ func TestCatalog_Datacenters(t *testing.T) { } return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) + }); err != nil { + t.Fatal(err) + } } func TestCatalog_Nodes(t *testing.T) { @@ -36,7 +36,7 @@ func TestCatalog_Nodes(t *testing.T) { catalog := c.Catalog() - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { nodes, meta, err := catalog.Nodes(nil) if err != nil { return false, err @@ -55,9 +55,9 @@ func TestCatalog_Nodes(t *testing.T) { } return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) + }); err != nil { + t.Fatal(err) + } } func TestCatalog_Nodes_MetaFilter(t *testing.T) { @@ -70,7 +70,7 @@ func TestCatalog_Nodes_MetaFilter(t *testing.T) { catalog := c.Catalog() // Make sure we get the node back when filtering by its metadata - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { nodes, meta, err := catalog.Nodes(&QueryOptions{NodeMeta: meta}) if err != nil { return false, err @@ -93,12 +93,12 @@ func TestCatalog_Nodes_MetaFilter(t *testing.T) { } return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) + }); err != nil { + t.Fatal(err) + } // Get nothing back when we use an invalid filter - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { nodes, meta, err := catalog.Nodes(&QueryOptions{NodeMeta: map[string]string{"nope": "nope"}}) if err != nil { return false, err @@ -113,9 +113,9 @@ func TestCatalog_Nodes_MetaFilter(t *testing.T) { } return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) + }); err != nil { + t.Fatal(err) + } } func TestCatalog_Services(t *testing.T) { @@ -125,7 +125,7 @@ func TestCatalog_Services(t *testing.T) { catalog := c.Catalog() - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { services, meta, err := catalog.Services(nil) if err != nil { return false, err @@ -140,9 +140,9 @@ func TestCatalog_Services(t *testing.T) { } return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) + }); err != nil { + t.Fatal(err) + } } func TestCatalog_Services_NodeMetaFilter(t *testing.T) { @@ -155,7 +155,7 @@ func TestCatalog_Services_NodeMetaFilter(t *testing.T) { catalog := c.Catalog() // Make sure we get the service back when filtering by the node's metadata - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { services, meta, err := catalog.Services(&QueryOptions{NodeMeta: meta}) if err != nil { return false, err @@ -170,12 +170,12 @@ func TestCatalog_Services_NodeMetaFilter(t *testing.T) { } return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) + }); err != nil { + t.Fatal(err) + } // Get nothing back when using an invalid filter - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { services, meta, err := catalog.Services(&QueryOptions{NodeMeta: map[string]string{"nope": "nope"}}) if err != nil { return false, err @@ -190,9 +190,9 @@ func TestCatalog_Services_NodeMetaFilter(t *testing.T) { } return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) + }); err != nil { + t.Fatal(err) + } } func TestCatalog_Service(t *testing.T) { @@ -202,7 +202,7 @@ func TestCatalog_Service(t *testing.T) { catalog := c.Catalog() - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { services, meta, err := catalog.Service("consul", "", nil) if err != nil { return false, err @@ -217,9 +217,9 @@ func TestCatalog_Service(t *testing.T) { } return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) + }); err != nil { + t.Fatal(err) + } } func TestCatalog_Service_NodeMetaFilter(t *testing.T) { @@ -232,7 +232,7 @@ func TestCatalog_Service_NodeMetaFilter(t *testing.T) { catalog := c.Catalog() - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { services, meta, err := catalog.Service("consul", "", &QueryOptions{NodeMeta: meta}) if err != nil { return false, err @@ -247,9 +247,9 @@ func TestCatalog_Service_NodeMetaFilter(t *testing.T) { } return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) + }); err != nil { + t.Fatal(err) + } } func TestCatalog_Node(t *testing.T) { @@ -260,7 +260,7 @@ func TestCatalog_Node(t *testing.T) { catalog := c.Catalog() name, _ := c.Agent().NodeName() - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { info, meta, err := catalog.Node(name, nil) if err != nil { return false, err @@ -279,9 +279,9 @@ func TestCatalog_Node(t *testing.T) { } return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) + }); err != nil { + t.Fatal(err) + } } func TestCatalog_Registration(t *testing.T) { @@ -316,7 +316,7 @@ func TestCatalog_Registration(t *testing.T) { Check: check, } - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { if _, err := catalog.Register(reg, nil); err != nil { return false, err } @@ -344,9 +344,9 @@ func TestCatalog_Registration(t *testing.T) { } return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) + }); err != nil { + t.Fatal(err) + } // Test catalog deregistration of the previously registered service dereg := &CatalogDeregistration{ @@ -360,7 +360,7 @@ func TestCatalog_Registration(t *testing.T) { t.Fatalf("err: %v", err) } - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { node, _, err := catalog.Node("foobar", nil) if err != nil { return false, err @@ -371,9 +371,9 @@ func TestCatalog_Registration(t *testing.T) { } return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) + }); err != nil { + t.Fatal(err) + } // Test deregistration of the previously registered check dereg = &CatalogDeregistration{ @@ -387,7 +387,7 @@ func TestCatalog_Registration(t *testing.T) { t.Fatalf("err: %v", err) } - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { health, _, err := c.Health().Node("foobar", nil) if err != nil { return false, err @@ -398,9 +398,9 @@ func TestCatalog_Registration(t *testing.T) { } return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) + }); err != nil { + t.Fatal(err) + } // Test node deregistration of the previously registered node dereg = &CatalogDeregistration{ @@ -413,7 +413,7 @@ func TestCatalog_Registration(t *testing.T) { t.Fatalf("err: %v", err) } - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { node, _, err := catalog.Node("foobar", nil) if err != nil { return false, err @@ -424,9 +424,9 @@ func TestCatalog_Registration(t *testing.T) { } return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) + }); err != nil { + t.Fatal(err) + } } func TestCatalog_EnableTagOverride(t *testing.T) { @@ -450,7 +450,7 @@ func TestCatalog_EnableTagOverride(t *testing.T) { Service: service, } - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { if _, err := catalog.Register(reg, nil); err != nil { return false, err } @@ -480,12 +480,12 @@ func TestCatalog_EnableTagOverride(t *testing.T) { } return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) + }); err != nil { + t.Fatal(err) + } service.EnableTagOverride = true - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { if _, err := catalog.Register(reg, nil); err != nil { return false, err } @@ -515,7 +515,7 @@ func TestCatalog_EnableTagOverride(t *testing.T) { } return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) + }); err != nil { + t.Fatal(err) + } } diff --git a/api/coordinate_test.go b/api/coordinate_test.go index 9d13d1c39d..6353949276 100644 --- a/api/coordinate_test.go +++ b/api/coordinate_test.go @@ -14,7 +14,7 @@ func TestCoordinate_Datacenters(t *testing.T) { coordinate := c.Coordinate() - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { datacenters, err := coordinate.Datacenters() if err != nil { return false, err @@ -25,9 +25,9 @@ func TestCoordinate_Datacenters(t *testing.T) { } return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) + }); err != nil { + t.Fatal(err) + } } func TestCoordinate_Nodes(t *testing.T) { @@ -37,7 +37,7 @@ func TestCoordinate_Nodes(t *testing.T) { coordinate := c.Coordinate() - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { _, _, err := coordinate.Nodes(nil) if err != nil { return false, err @@ -48,7 +48,7 @@ func TestCoordinate_Nodes(t *testing.T) { // we can do is call the endpoint and make sure we don't // get an error. return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) + }); err != nil { + t.Fatal(err) + } } diff --git a/api/event_test.go b/api/event_test.go index 1ca92e2331..c5b2314e3f 100644 --- a/api/event_test.go +++ b/api/event_test.go @@ -29,15 +29,15 @@ func TestEvent_FireList(t *testing.T) { var events []*UserEvent var qm *QueryMeta - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { events, qm, err = event.List("", nil) if err != nil { t.Fatalf("err: %v", err) } return len(events) > 0, err - }, func(err error) { - t.Fatalf("err: %#v", err) - }) + }); err != nil { + t.Fatal(err) + } if events[len(events)-1].ID != id { t.Fatalf("bad: %#v", events) diff --git a/api/health_test.go b/api/health_test.go index 4140a2811a..c697d4e569 100644 --- a/api/health_test.go +++ b/api/health_test.go @@ -21,7 +21,7 @@ func TestHealth_Node(t *testing.T) { } name := info["Config"]["NodeName"].(string) - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { checks, meta, err := health.Node(name, nil) if err != nil { return false, err @@ -33,9 +33,9 @@ func TestHealth_Node(t *testing.T) { return false, fmt.Errorf("bad: %v", checks) } return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) + }); err != nil { + t.Fatal(err) + } } func TestHealthChecks_AggregatedStatus(t *testing.T) { @@ -191,7 +191,7 @@ func TestHealth_Checks(t *testing.T) { } defer agent.ServiceDeregister("foo") - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { checks, meta, err := health.Checks("foo", nil) if err != nil { return false, err @@ -203,9 +203,9 @@ func TestHealth_Checks(t *testing.T) { return false, fmt.Errorf("Bad: %v", checks) } return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) + }); err != nil { + t.Fatal(err) + } } func TestHealth_Checks_NodeMetaFilter(t *testing.T) { @@ -231,7 +231,7 @@ func TestHealth_Checks_NodeMetaFilter(t *testing.T) { } defer agent.ServiceDeregister("foo") - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { checks, meta, err := health.Checks("foo", &QueryOptions{NodeMeta: meta}) if err != nil { return false, err @@ -243,9 +243,9 @@ func TestHealth_Checks_NodeMetaFilter(t *testing.T) { return false, fmt.Errorf("Bad: %v", checks) } return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) + }); err != nil { + t.Fatal(err) + } } func TestHealth_Service(t *testing.T) { @@ -254,7 +254,7 @@ func TestHealth_Service(t *testing.T) { health := c.Health() - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { // consul service should always exist... checks, meta, err := health.Service("consul", "", true, nil) if err != nil { @@ -270,9 +270,9 @@ func TestHealth_Service(t *testing.T) { return false, fmt.Errorf("Bad: %v", checks[0].Node) } return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) + }); err != nil { + t.Fatal(err) + } } func TestHealth_Service_NodeMetaFilter(t *testing.T) { @@ -284,7 +284,7 @@ func TestHealth_Service_NodeMetaFilter(t *testing.T) { health := c.Health() - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { // consul service should always exist... checks, meta, err := health.Service("consul", "", true, &QueryOptions{NodeMeta: meta}) if err != nil { @@ -300,9 +300,9 @@ func TestHealth_Service_NodeMetaFilter(t *testing.T) { return false, fmt.Errorf("Bad: %v", checks[0].Node) } return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) + }); err != nil { + t.Fatal(err) + } } func TestHealth_State(t *testing.T) { @@ -312,7 +312,7 @@ func TestHealth_State(t *testing.T) { health := c.Health() - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { checks, meta, err := health.State("any", nil) if err != nil { return false, err @@ -324,9 +324,9 @@ func TestHealth_State(t *testing.T) { return false, fmt.Errorf("Bad: %v", checks) } return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) + }); err != nil { + t.Fatal(err) + } } func TestHealth_State_NodeMetaFilter(t *testing.T) { @@ -339,7 +339,7 @@ func TestHealth_State_NodeMetaFilter(t *testing.T) { health := c.Health() - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { checks, meta, err := health.State("any", &QueryOptions{NodeMeta: meta}) if err != nil { return false, err @@ -351,7 +351,7 @@ func TestHealth_State_NodeMetaFilter(t *testing.T) { return false, fmt.Errorf("Bad: %v", checks) } return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) + }); err != nil { + t.Fatal(err) + } } diff --git a/api/operator_test.go b/api/operator_test.go index 38768c6881..717f04e8f8 100644 --- a/api/operator_test.go +++ b/api/operator_test.go @@ -188,7 +188,7 @@ func TestOperator_ServerHealth(t *testing.T) { defer s.Stop() operator := c.Operator() - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { out, err := operator.AutopilotServerHealth(nil) if err != nil { return false, fmt.Errorf("err: %v", err) @@ -200,7 +200,7 @@ func TestOperator_ServerHealth(t *testing.T) { } return true, nil - }, func(err error) { + }); err != nil { t.Fatal(err) - }) + } } diff --git a/api/prepared_query_test.go b/api/prepared_query_test.go index 667e1dea38..84ec1f822b 100644 --- a/api/prepared_query_test.go +++ b/api/prepared_query_test.go @@ -30,7 +30,7 @@ func TestPreparedQuery(t *testing.T) { } catalog := c.Catalog() - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { if _, err := catalog.Register(reg, nil); err != nil { return false, err } @@ -40,9 +40,9 @@ func TestPreparedQuery(t *testing.T) { } return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) + }); err != nil { + t.Fatal(err) + } // Create a simple prepared query. def := &PreparedQueryDefinition{ diff --git a/command/agent/agent_endpoint.go b/command/agent/agent_endpoint.go index 48c0b27f4d..47a20369e2 100644 --- a/command/agent/agent_endpoint.go +++ b/command/agent/agent_endpoint.go @@ -649,8 +649,6 @@ func (s *HTTPServer) AgentMonitor(resp http.ResponseWriter, req *http.Request) ( flusher.Flush() } } - - return nil, nil } type httpLogHandler struct { diff --git a/command/agent/agent_endpoint_test.go b/command/agent/agent_endpoint_test.go index cfd3200c33..fcb2c67a52 100644 --- a/command/agent/agent_endpoint_test.go +++ b/command/agent/agent_endpoint_test.go @@ -346,11 +346,11 @@ func TestAgent_Reload(t *testing.T) { close(doneCh) }() - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { return len(cmd.httpServers) == 1, nil - }, func(err error) { + }); err != nil { t.Fatalf("should have an http server") - }) + } if _, ok := cmd.agent.state.services["redis"]; !ok { t.Fatalf("missing redis service") @@ -535,11 +535,11 @@ func TestAgent_Join(t *testing.T) { t.Fatalf("should have 2 members") } - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { return len(a2.LANMembers()) == 2, nil - }, func(err error) { - t.Fatalf("should have 2 members") - }) + }); err != nil { + t.Fatal("should have 2 members") + } } func TestAgent_Join_WAN(t *testing.T) { @@ -570,11 +570,11 @@ func TestAgent_Join_WAN(t *testing.T) { t.Fatalf("should have 2 members") } - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { return len(a2.WANMembers()) == 2, nil - }, func(err error) { - t.Fatalf("should have 2 members") - }) + }); err != nil { + t.Fatal("should have 2 members") + } } func TestAgent_Join_ACLDeny(t *testing.T) { @@ -663,13 +663,13 @@ func TestAgent_Leave(t *testing.T) { t.Fatalf("Err: %v", obj) } - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { m := srv.agent.LANMembers() success := m[1].Status == serf.StatusLeft return success, errors.New(m[1].Status.String()) - }, func(err error) { + }); err != nil { t.Fatalf("member status is %v, should be left", err) - }) + } } func TestAgent_Leave_ACLDeny(t *testing.T) { @@ -762,13 +762,13 @@ func TestAgent_ForceLeave(t *testing.T) { t.Fatalf("Err: %v", obj) } - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { m := srv.agent.LANMembers() success := m[1].Status == serf.StatusLeft return success, errors.New(m[1].Status.String()) - }, func(err error) { + }); err != nil { t.Fatalf("member status is %v, should be left", err) - }) + } } func TestAgent_ForceLeave_ACLDeny(t *testing.T) { @@ -1931,7 +1931,7 @@ func TestAgent_Monitor(t *testing.T) { // Try to stream logs until we see the expected log line expected := []byte("raft: Initial configuration (index=1)") - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { req, _ = http.NewRequest("GET", "/v1/agent/monitor?loglevel=debug", nil) resp = newClosableRecorder() done := make(chan struct{}) @@ -1950,9 +1950,9 @@ func TestAgent_Monitor(t *testing.T) { } else { return false, fmt.Errorf("didn't see expected") } - }, func(err error) { + }); err != nil { t.Fatalf("err: %v", err) - }) + } } type closableRecorder struct { diff --git a/command/agent/catalog_endpoint_test.go b/command/agent/catalog_endpoint_test.go index d0d636fc85..34271756c1 100644 --- a/command/agent/catalog_endpoint_test.go +++ b/command/agent/catalog_endpoint_test.go @@ -89,7 +89,7 @@ func TestCatalogDatacenters(t *testing.T) { defer srv.Shutdown() defer srv.agent.Shutdown() - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { obj, err := srv.CatalogDatacenters(nil, nil) if err != nil { return false, err @@ -100,9 +100,9 @@ func TestCatalogDatacenters(t *testing.T) { return false, fmt.Errorf("missing dc: %v", dcs) } return true, nil - }, func(err error) { - t.Fatalf("bad: %v", err) - }) + }); err != nil { + t.Fatal(err) + } } func TestCatalogNodes(t *testing.T) { @@ -219,13 +219,11 @@ func TestCatalogNodes_WanTranslation(t *testing.T) { if _, err := srv2.agent.JoinWAN([]string{addr}); err != nil { t.Fatalf("err: %v", err) } - testutil.WaitForResult( - func() (bool, error) { - return len(srv1.agent.WANMembers()) > 1, nil - }, - func(err error) { - t.Fatalf("Failed waiting for WAN join: %v", err) - }) + if err := testutil.WaitForResult(func() (bool, error) { + return len(srv1.agent.WANMembers()) > 1, nil + }); err != nil { + t.Fatalf("Failed waiting for WAN join: %v", err) + } // Register a node with DC2. { @@ -701,13 +699,11 @@ func TestCatalogServiceNodes_WanTranslation(t *testing.T) { if _, err := srv2.agent.JoinWAN([]string{addr}); err != nil { t.Fatalf("err: %v", err) } - testutil.WaitForResult( - func() (bool, error) { - return len(srv1.agent.WANMembers()) > 1, nil - }, - func(err error) { - t.Fatalf("Failed waiting for WAN join: %v", err) - }) + if err := testutil.WaitForResult(func() (bool, error) { + return len(srv1.agent.WANMembers()) > 1, nil + }); err != nil { + t.Fatalf("Failed waiting for WAN join: %v", err) + } // Register a node with DC2. { @@ -942,13 +938,11 @@ func TestCatalogNodeServices_WanTranslation(t *testing.T) { if _, err := srv2.agent.JoinWAN([]string{addr}); err != nil { t.Fatalf("err: %v", err) } - testutil.WaitForResult( - func() (bool, error) { - return len(srv1.agent.WANMembers()) > 1, nil - }, - func(err error) { - t.Fatalf("Failed waiting for WAN join: %v", err) - }) + if err := testutil.WaitForResult(func() (bool, error) { + return len(srv1.agent.WANMembers()) > 1, nil + }); err != nil { + t.Fatalf("Failed waiting for WAN join: %v", err) + } // Register a node with DC2. { diff --git a/command/agent/check_test.go b/command/agent/check_test.go index b7381bbc5a..59db2eb59f 100644 --- a/command/agent/check_test.go +++ b/command/agent/check_test.go @@ -79,7 +79,7 @@ func expectStatus(t *testing.T, script, status string) { check.Start() defer check.Stop() - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { // Should have at least 2 updates if mock.Updates("foo") < 2 { return false, fmt.Errorf("should have 2 updates %v", mock.updates) @@ -90,9 +90,9 @@ func expectStatus(t *testing.T, script, status string) { } return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) + }); err != nil { + t.Fatal(err) + } } func TestCheckMonitor_Passing(t *testing.T) { @@ -282,7 +282,7 @@ func expectHTTPStatus(t *testing.T, url string, status string) { check.Start() defer check.Stop() - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { // Should have at least 2 updates if mock.Updates("foo") < 2 { return false, fmt.Errorf("should have 2 updates %v", mock.updates) @@ -297,9 +297,9 @@ func expectHTTPStatus(t *testing.T, url string, status string) { return false, fmt.Errorf("output too long: %d (%d-byte limit)", n, CheckBufSize) } return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) + }); err != nil { + t.Fatal(err) + } } func TestCheckHTTPCritical(t *testing.T) { @@ -388,7 +388,7 @@ func TestCheckHTTPTimeout(t *testing.T) { check.Start() defer check.Stop() - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { // Should have at least 2 updates if mock.updates["bar"] < 2 { return false, fmt.Errorf("should have at least 2 updates %v", mock.updates) @@ -398,9 +398,9 @@ func TestCheckHTTPTimeout(t *testing.T) { return false, fmt.Errorf("should be critical %v", mock.state) } return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) + }); err != nil { + t.Fatal(err) + } } func TestCheckHTTP_disablesKeepAlives(t *testing.T) { @@ -461,14 +461,14 @@ func TestCheckHTTP_TLSSkipVerify_true_pass(t *testing.T) { t.Fatalf("should be true") } - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { if mock.state["skipverify_true"] != structs.HealthPassing { return false, fmt.Errorf("should be passing %v", mock.state) } return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) + }); err != nil { + t.Fatal(err) + } } func TestCheckHTTP_TLSSkipVerify_true_fail(t *testing.T) { @@ -496,14 +496,14 @@ func TestCheckHTTP_TLSSkipVerify_true_fail(t *testing.T) { t.Fatalf("should be true") } - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { if mock.state["skipverify_true"] != structs.HealthCritical { return false, fmt.Errorf("should be critical %v", mock.state) } return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) + }); err != nil { + t.Fatal(err) + } } func TestCheckHTTP_TLSSkipVerify_false(t *testing.T) { @@ -532,7 +532,7 @@ func TestCheckHTTP_TLSSkipVerify_false(t *testing.T) { t.Fatalf("should be false") } - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { // This should fail due to an invalid SSL cert if mock.state["skipverify_false"] != structs.HealthCritical { return false, fmt.Errorf("should be critical %v", mock.state) @@ -542,9 +542,9 @@ func TestCheckHTTP_TLSSkipVerify_false(t *testing.T) { return false, fmt.Errorf("should fail with certificate error %v", mock.output) } return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) + }); err != nil { + t.Fatal(err) + } } func mockTCPServer(network string) net.Listener { @@ -582,7 +582,7 @@ func expectTCPStatus(t *testing.T, tcp string, status string) { check.Start() defer check.Stop() - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { // Should have at least 2 updates if mock.Updates("foo") < 2 { return false, fmt.Errorf("should have 2 updates %v", mock.updates) @@ -592,9 +592,9 @@ func expectTCPStatus(t *testing.T, tcp string, status string) { return false, fmt.Errorf("should be %v %v", status, mock.state) } return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) + }); err != nil { + t.Fatal(err) + } } func TestCheckTCPCritical(t *testing.T) { diff --git a/command/agent/command_test.go b/command/agent/command_test.go index 4459985c7b..ad32eafabb 100644 --- a/command/agent/command_test.go +++ b/command/agent/command_test.go @@ -104,7 +104,7 @@ func TestRetryJoin(t *testing.T) { close(doneCh) }() - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { mem := agent.LANMembers() if len(mem) != 2 { return false, fmt.Errorf("bad: %#v", mem) @@ -114,9 +114,9 @@ func TestRetryJoin(t *testing.T) { return false, fmt.Errorf("bad (wan): %#v", mem) } return true, nil - }, func(err error) { - t.Fatalf(err.Error()) - }) + }); err != nil { + t.Fatal(err) + } } func TestReadCliConfig(t *testing.T) { diff --git a/command/agent/dns_test.go b/command/agent/dns_test.go index 115578cfa2..e7ef4cc377 100644 --- a/command/agent/dns_test.go +++ b/command/agent/dns_test.go @@ -1299,13 +1299,11 @@ func TestDNS_ServiceLookup_WanAddress(t *testing.T) { t.Fatalf("err: %v", err) } - testutil.WaitForResult( - func() (bool, error) { - return len(srv1.agent.WANMembers()) > 1, nil - }, - func(err error) { - t.Fatalf("Failed waiting for WAN join: %v", err) - }) + if err := testutil.WaitForResult(func() (bool, error) { + return len(srv1.agent.WANMembers()) > 1, nil + }); err != nil { + t.Fatalf("Failed waiting for WAN join: %v", err) + } // Register a remote node with a service. { @@ -3376,13 +3374,11 @@ func TestDNS_PreparedQuery_Failover(t *testing.T) { if _, err := srv2.agent.JoinWAN([]string{addr}); err != nil { t.Fatalf("err: %v", err) } - testutil.WaitForResult( - func() (bool, error) { - return len(srv1.agent.WANMembers()) > 1, nil - }, - func(err error) { - t.Fatalf("Failed waiting for WAN join: %v", err) - }) + if err := testutil.WaitForResult(func() (bool, error) { + return len(srv1.agent.WANMembers()) > 1, nil + }); err != nil { + t.Fatalf("Failed waiting for WAN join: %v", err) + } // Register a remote node with a service. { diff --git a/command/agent/event_endpoint_test.go b/command/agent/event_endpoint_test.go index 53927daebf..b1632623ac 100644 --- a/command/agent/event_endpoint_test.go +++ b/command/agent/event_endpoint_test.go @@ -123,7 +123,7 @@ func TestEventList(t *testing.T) { t.Fatalf("err: %v", err) } - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { req, err := http.NewRequest("GET", "/v1/event/list", nil) if err != nil { return false, err @@ -146,9 +146,9 @@ func TestEventList(t *testing.T) { return false, fmt.Errorf("bad: %#v", header) } return true, nil - }, func(err error) { - t.Fatalf("err: %v", err) - }) + }); err != nil { + t.Fatal(err) + } }) } @@ -164,7 +164,7 @@ func TestEventList_Filter(t *testing.T) { t.Fatalf("err: %v", err) } - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { req, err := http.NewRequest("GET", "/v1/event/list?name=foo", nil) if err != nil { return false, err @@ -187,9 +187,9 @@ func TestEventList_Filter(t *testing.T) { return false, fmt.Errorf("bad: %#v", header) } return true, nil - }, func(err error) { - t.Fatalf("err: %v", err) - }) + }); err != nil { + t.Fatal(err) + } }) } @@ -207,7 +207,7 @@ func TestEventList_ACLFilter(t *testing.T) { // Try no token. { - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { req, err := http.NewRequest("GET", "/v1/event/list", nil) if err != nil { return false, err @@ -226,14 +226,14 @@ func TestEventList_ACLFilter(t *testing.T) { return false, fmt.Errorf("bad: %#v", list) } return true, nil - }, func(err error) { - t.Fatalf("err: %v", err) - }) + }); err != nil { + t.Fatal(err) + } } // Try the root token. { - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { req, err := http.NewRequest("GET", "/v1/event/list?token=root", nil) if err != nil { return false, err @@ -252,9 +252,9 @@ func TestEventList_ACLFilter(t *testing.T) { return false, fmt.Errorf("bad: %#v", list) } return true, nil - }, func(err error) { - t.Fatalf("err: %v", err) - }) + }); err != nil { + t.Fatal(err) + } } } @@ -266,7 +266,7 @@ func TestEventList_Blocking(t *testing.T) { } var index string - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { req, err := http.NewRequest("GET", "/v1/event/list", nil) if err != nil { return false, err @@ -282,9 +282,9 @@ func TestEventList_Blocking(t *testing.T) { } index = header return true, nil - }, func(err error) { - t.Fatalf("err: %v", err) - }) + }); err != nil { + t.Fatal(err) + } go func() { time.Sleep(50 * time.Millisecond) @@ -294,7 +294,7 @@ func TestEventList_Blocking(t *testing.T) { } }() - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { url := "/v1/event/list?index=" + index req, err := http.NewRequest("GET", url, nil) if err != nil { @@ -314,9 +314,9 @@ func TestEventList_Blocking(t *testing.T) { return false, fmt.Errorf("bad: %#v", list) } return true, nil - }, func(err error) { - t.Fatalf("err: %v", err) - }) + }); err != nil { + t.Fatal(err) + } }) } @@ -339,7 +339,7 @@ func TestEventList_EventBufOrder(t *testing.T) { // Test that the event order is preserved when name // filtering on a list of > 1 matching event. - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { url := "/v1/event/list?name=foo" req, err := http.NewRequest("GET", url, nil) if err != nil { @@ -358,9 +358,9 @@ func TestEventList_EventBufOrder(t *testing.T) { return false, fmt.Errorf("bad: %#v", list) } return true, nil - }, func(err error) { - t.Fatalf("err: %v", err) - }) + }); err != nil { + t.Fatal(err) + } }) } diff --git a/command/agent/health_endpoint_test.go b/command/agent/health_endpoint_test.go index a0f2a62e6e..3b006395cd 100644 --- a/command/agent/health_endpoint_test.go +++ b/command/agent/health_endpoint_test.go @@ -20,7 +20,7 @@ func TestHealthChecksInState(t *testing.T) { t.Fatalf("err: %v", err) } - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { resp := httptest.NewRecorder() obj, err := srv.HealthChecksInState(resp, req) if err != nil { @@ -36,7 +36,9 @@ func TestHealthChecksInState(t *testing.T) { return false, fmt.Errorf("bad: %v", obj) } return true, nil - }, func(err error) { t.Fatalf("err: %v", err) }) + }); err != nil { + t.Fatal(err) + } }) httpTest(t, func(srv *HTTPServer) { @@ -45,7 +47,7 @@ func TestHealthChecksInState(t *testing.T) { t.Fatalf("err: %v", err) } - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { resp := httptest.NewRecorder() obj, err := srv.HealthChecksInState(resp, req) if err != nil { @@ -61,7 +63,9 @@ func TestHealthChecksInState(t *testing.T) { return false, fmt.Errorf("bad: %v", obj) } return true, nil - }, func(err error) { t.Fatalf("err: %v", err) }) + }); err != nil { + t.Fatal(err) + } }) } @@ -88,7 +92,7 @@ func TestHealthChecksInState_NodeMetaFilter(t *testing.T) { t.Fatalf("err: %v", err) } - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { resp := httptest.NewRecorder() obj, err := srv.HealthChecksInState(resp, req) if err != nil { @@ -104,7 +108,9 @@ func TestHealthChecksInState_NodeMetaFilter(t *testing.T) { return false, fmt.Errorf("bad: %v", obj) } return true, nil - }, func(err error) { t.Fatalf("err: %v", err) }) + }); err != nil { + t.Fatal(err) + } }) } @@ -170,7 +176,7 @@ func TestHealthChecksInState_DistanceSort(t *testing.T) { } // Retry until foo moves to the front of the line. - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { resp = httptest.NewRecorder() obj, err = srv.HealthChecksInState(resp, req) if err != nil { @@ -188,9 +194,9 @@ func TestHealthChecksInState_DistanceSort(t *testing.T) { return false, fmt.Errorf("bad: %v", nodes) } return true, nil - }, func(err error) { - t.Fatalf("failed to get sorted service nodes: %v", err) - }) + }); err != nil { + t.Fatal(err) + } } func TestHealthNodeChecks(t *testing.T) { @@ -431,7 +437,7 @@ func TestHealthServiceChecks_DistanceSort(t *testing.T) { } // Retry until foo has moved to the front of the line. - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { resp = httptest.NewRecorder() obj, err = srv.HealthServiceChecks(resp, req) if err != nil { @@ -449,9 +455,9 @@ func TestHealthServiceChecks_DistanceSort(t *testing.T) { return false, fmt.Errorf("bad: %v", nodes) } return true, nil - }, func(err error) { - t.Fatalf("failed to get sorted service checks: %v", err) - }) + }); err != nil { + t.Fatal(err) + } } func TestHealthServiceNodes(t *testing.T) { @@ -665,7 +671,7 @@ func TestHealthServiceNodes_DistanceSort(t *testing.T) { } // Retry until foo has moved to the front of the line. - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { resp = httptest.NewRecorder() obj, err = srv.HealthServiceNodes(resp, req) if err != nil { @@ -683,9 +689,9 @@ func TestHealthServiceNodes_DistanceSort(t *testing.T) { return false, fmt.Errorf("bad: %v", nodes) } return true, nil - }, func(err error) { - t.Fatalf("failed to get sorted service nodes: %v", err) - }) + }); err != nil { + t.Fatal(err) + } } func TestHealthServiceNodes_PassingFilter(t *testing.T) { @@ -761,13 +767,11 @@ func TestHealthServiceNodes_WanTranslation(t *testing.T) { if _, err := srv2.agent.JoinWAN([]string{addr}); err != nil { t.Fatalf("err: %v", err) } - testutil.WaitForResult( - func() (bool, error) { - return len(srv1.agent.WANMembers()) > 1, nil - }, - func(err error) { - t.Fatalf("Failed waiting for WAN join: %v", err) - }) + if err := testutil.WaitForResult(func() (bool, error) { + return len(srv1.agent.WANMembers()) > 1, nil + }); err != nil { + t.Fatal(err) + } // Register a node with DC2. { diff --git a/command/agent/local_test.go b/command/agent/local_test.go index 53fc726a36..e1e8d5796c 100644 --- a/command/agent/local_test.go +++ b/command/agent/local_test.go @@ -183,9 +183,9 @@ func TestAgentAntiEntropy_Services(t *testing.T) { return true, nil } - testutil.WaitForResult(verifyServices, func(err error) { + if err := testutil.WaitForResult(verifyServices); err != nil { t.Fatal(err) - }) + } // Remove one of the services agent.state.RemoveService("api") @@ -246,9 +246,9 @@ func TestAgentAntiEntropy_Services(t *testing.T) { return true, nil } - testutil.WaitForResult(verifyServicesAfterRemove, func(err error) { + if err := testutil.WaitForResult(verifyServicesAfterRemove); err != nil { t.Fatal(err) - }) + } } func TestAgentAntiEntropy_EnableTagOverride(t *testing.T) { @@ -350,9 +350,9 @@ func TestAgentAntiEntropy_EnableTagOverride(t *testing.T) { return true, nil } - testutil.WaitForResult(verifyServices, func(err error) { + if err := testutil.WaitForResult(verifyServices); err != nil { t.Fatal(err) - }) + } } func TestAgentAntiEntropy_Services_WithChecks(t *testing.T) { @@ -667,7 +667,7 @@ func TestAgentAntiEntropy_Checks(t *testing.T) { var checks structs.IndexedHealthChecks // Verify that we are in sync - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { if err := agent.RPC("Health.NodeChecks", &req, &checks); err != nil { return false, fmt.Errorf("err: %v", err) } @@ -704,9 +704,9 @@ func TestAgentAntiEntropy_Checks(t *testing.T) { } } return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) + }); err != nil { + t.Fatal(err) + } // Check the local state if len(agent.state.checks) != 4 { @@ -749,7 +749,7 @@ func TestAgentAntiEntropy_Checks(t *testing.T) { agent.StartSync() // Verify that we are in sync - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { if err := agent.RPC("Health.NodeChecks", &req, &checks); err != nil { return false, fmt.Errorf("err: %v", err) } @@ -782,9 +782,9 @@ func TestAgentAntiEntropy_Checks(t *testing.T) { } } return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) + }); err != nil { + t.Fatal(err) + } // Check the local state if len(agent.state.checks) != 3 { @@ -829,7 +829,7 @@ func TestAgentAntiEntropy_Check_DeferSync(t *testing.T) { } var checks structs.IndexedHealthChecks - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { if err := agent.RPC("Health.NodeChecks", &req, &checks); err != nil { return false, fmt.Errorf("err: %v", err) } @@ -840,9 +840,9 @@ func TestAgentAntiEntropy_Check_DeferSync(t *testing.T) { } return true, nil - }, func(err error) { + }); err != nil { t.Fatal(err) - }) + } // Update the check output! Should be deferred agent.state.UpdateCheck("web", structs.HealthPassing, "output") @@ -864,7 +864,7 @@ func TestAgentAntiEntropy_Check_DeferSync(t *testing.T) { } // Wait for a deferred update - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { if err := agent.RPC("Health.NodeChecks", &req, &checks); err != nil { return false, err } @@ -880,9 +880,9 @@ func TestAgentAntiEntropy_Check_DeferSync(t *testing.T) { } return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) + }); err != nil { + t.Fatal(err) + } // Change the output in the catalog to force it out of sync. eCopy := check.Clone() @@ -970,7 +970,7 @@ func TestAgentAntiEntropy_Check_DeferSync(t *testing.T) { } // Wait for the deferred update. - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { if err := agent.RPC("Health.NodeChecks", &req, &checks); err != nil { return false, err } @@ -986,9 +986,9 @@ func TestAgentAntiEntropy_Check_DeferSync(t *testing.T) { } return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) + }); err != nil { + t.Fatal(err) + } } func TestAgentAntiEntropy_NodeInfo(t *testing.T) { @@ -1022,7 +1022,7 @@ func TestAgentAntiEntropy_NodeInfo(t *testing.T) { var services structs.IndexedNodeServices // Wait for the sync - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { if err := agent.RPC("Catalog.NodeServices", &req, &services); err != nil { return false, fmt.Errorf("err: %v", err) } @@ -1038,9 +1038,9 @@ func TestAgentAntiEntropy_NodeInfo(t *testing.T) { return false, fmt.Errorf("bad: %v", services.NodeServices.Node) } return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) + }); err != nil { + t.Fatal(err) + } // Blow away the catalog version of the node info if err := agent.RPC("Catalog.Register", args, &out); err != nil { @@ -1052,7 +1052,7 @@ func TestAgentAntiEntropy_NodeInfo(t *testing.T) { // Wait for the sync - this should have been a sync of just the // node info - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { if err := agent.RPC("Catalog.NodeServices", &req, &services); err != nil { return false, fmt.Errorf("err: %v", err) } @@ -1066,9 +1066,9 @@ func TestAgentAntiEntropy_NodeInfo(t *testing.T) { return false, fmt.Errorf("bad: %v", services.NodeServices.Node) } return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) + }); err != nil { + t.Fatal(err) + } } func TestAgentAntiEntropy_deleteService_fails(t *testing.T) { @@ -1247,7 +1247,7 @@ func TestAgent_sendCoordinate(t *testing.T) { Datacenter: agent.config.Datacenter, } var reply structs.IndexedCoordinates - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { if err := agent.RPC("Coordinate.ListNodes", &req, &reply); err != nil { return false, fmt.Errorf("err: %s", err) } @@ -1259,7 +1259,7 @@ func TestAgent_sendCoordinate(t *testing.T) { return false, fmt.Errorf("bad: %v", coord) } return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) + }); err != nil { + t.Fatal(err) + } } diff --git a/command/agent/operator_endpoint_test.go b/command/agent/operator_endpoint_test.go index f1ed6b122d..0d976eaef8 100644 --- a/command/agent/operator_endpoint_test.go +++ b/command/agent/operator_endpoint_test.go @@ -435,7 +435,7 @@ func TestOperator_ServerHealth(t *testing.T) { t.Fatalf("err: %v", err) } - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { resp := httptest.NewRecorder() obj, err := srv.OperatorServerHealth(resp, req) if err != nil { @@ -457,9 +457,9 @@ func TestOperator_ServerHealth(t *testing.T) { } return true, nil - }, func(err error) { + }); err != nil { t.Fatal(err) - }) + } }, cb) } @@ -477,7 +477,7 @@ func TestOperator_ServerHealth_Unhealthy(t *testing.T) { t.Fatalf("err: %v", err) } - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { resp := httptest.NewRecorder() obj, err := srv.OperatorServerHealth(resp, req) if err != nil { @@ -497,9 +497,9 @@ func TestOperator_ServerHealth_Unhealthy(t *testing.T) { } return true, nil - }, func(err error) { + }); err != nil { t.Fatal(err) - }) + } }, cb) } diff --git a/command/agent/user_event_test.go b/command/agent/user_event_test.go index 6a4c9919c9..4286fed2be 100644 --- a/command/agent/user_event_test.go +++ b/command/agent/user_event_test.go @@ -175,13 +175,11 @@ func TestFireReceiveEvent(t *testing.T) { t.Fatalf("err: %v", err) } - testutil.WaitForResult( - func() (bool, error) { - return len(agent.UserEvents()) == 1, nil - }, - func(err error) { - t.Fatalf("bad len") - }) + if err := testutil.WaitForResult(func() (bool, error) { + return len(agent.UserEvents()) == 1, nil + }); err != nil { + t.Fatal(err) + } last := agent.LastUserEvent() if last.ID != p2.ID { diff --git a/command/exec_test.go b/command/exec_test.go index 4b3b155379..d21681d62b 100644 --- a/command/exec_test.go +++ b/command/exec_test.go @@ -86,12 +86,12 @@ func waitForLeader(t *testing.T, httpAddr string) { if err != nil { t.Fatalf("err: %v", err) } - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { _, qm, err := client.Catalog().Nodes(nil) return err == nil && qm.KnownLeader && qm.LastIndex > 0, err - }, func(err error) { - t.Fatalf("failed to find leader: %v", err) - }) + }); err != nil { + t.Fatal(err) + } } func httpClient(addr string) (*consulapi.Client, error) { @@ -194,15 +194,15 @@ func TestExecCommand_Sessions_Foreign(t *testing.T) { c.conf.localNode = "foo" var id string - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { id, err = c.createSession() if err != nil && strings.Contains(err.Error(), "Failed to find Consul server") { err = nil } return id != "", err - }, func(err error) { - t.Fatalf("err: %v", err) - }) + }); err != nil { + t.Fatal(err) + } se, _, err := client.Session().Info(id, nil) if err != nil { diff --git a/command/force_leave_test.go b/command/force_leave_test.go index 257ce1b2b3..f7530167eb 100644 --- a/command/force_leave_test.go +++ b/command/force_leave_test.go @@ -3,12 +3,13 @@ package command import ( "errors" "fmt" + "strings" + "testing" + "github.com/hashicorp/consul/command/base" "github.com/hashicorp/consul/testutil" "github.com/hashicorp/serf/serf" "github.com/mitchellh/cli" - "strings" - "testing" ) func testForceLeaveCommand(t *testing.T) (*cli.MockUi, *ForceLeaveCommand) { @@ -56,13 +57,13 @@ func TestForceLeaveCommandRun(t *testing.T) { t.Fatalf("should have 2 members: %#v", m) } - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { m = a1.agent.LANMembers() success := m[1].Status == serf.StatusLeft return success, errors.New(m[1].Status.String()) - }, func(err error) { + }); err != nil { t.Fatalf("member status is %v, should be left", err) - }) + } } func TestForceLeaveCommandRun_noAddrs(t *testing.T) { diff --git a/command/rtt_test.go b/command/rtt_test.go index 57495e1f5a..2329c3121e 100644 --- a/command/rtt_test.go +++ b/command/rtt_test.go @@ -108,7 +108,7 @@ func TestRTTCommand_Run_LAN(t *testing.T) { } // Wait for the updates to get flushed to the data store. - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { code := c.Run(args) if code != 0 { return false, fmt.Errorf("bad: %d: %#v", code, ui.ErrorWriter.String()) @@ -121,9 +121,9 @@ func TestRTTCommand_Run_LAN(t *testing.T) { } return true, nil - }, func(err error) { - t.Fatalf("failed to get proper RTT output: %v", err) - }) + }); err != nil { + t.Fatal(err) + } // Default to the agent's node. { diff --git a/consul/acl_replication_test.go b/consul/acl_replication_test.go index 1b419fea58..0f4e9b6ebc 100644 --- a/consul/acl_replication_test.go +++ b/consul/acl_replication_test.go @@ -395,9 +395,9 @@ func TestACLReplication(t *testing.T) { } // Wait for the replica to converge. - testutil.WaitForResult(checkSame, func(err error) { + if err := testutil.WaitForResult(checkSame); err != nil { t.Fatalf("ACLs didn't converge") - }) + } // Create more new tokens. for i := 0; i < 1000; i++ { @@ -418,9 +418,9 @@ func TestACLReplication(t *testing.T) { } // Wait for the replica to converge. - testutil.WaitForResult(checkSame, func(err error) { + if err := testutil.WaitForResult(checkSame); err != nil { t.Fatalf("ACLs didn't converge") - }) + } // Delete a token. arg := structs.ACLRequest{ @@ -437,7 +437,7 @@ func TestACLReplication(t *testing.T) { } // Wait for the replica to converge. - testutil.WaitForResult(checkSame, func(err error) { + if err := testutil.WaitForResult(checkSame); err != nil { t.Fatalf("ACLs didn't converge") - }) + } } diff --git a/consul/acl_test.go b/consul/acl_test.go index c0f0fd091e..2657f12e01 100644 --- a/consul/acl_test.go +++ b/consul/acl_test.go @@ -232,12 +232,12 @@ func TestACL_NonAuthority_NotFound(t *testing.T) { t.Fatalf("err: %v", err) } - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { p1, _ := s1.numPeers() return p1 == 2, errors.New(fmt.Sprintf("%d", p1)) - }, func(err error) { - t.Fatalf("should have 2 peers: %v", err) - }) + }); err != nil { + t.Fatal(err) + } client := rpcClient(t, s1) defer client.Close() @@ -284,12 +284,12 @@ func TestACL_NonAuthority_Found(t *testing.T) { t.Fatalf("err: %v", err) } - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { p1, _ := s1.numPeers() return p1 == 2, errors.New(fmt.Sprintf("%d", p1)) - }, func(err error) { - t.Fatalf("should have 2 peers: %v", err) - }) + }); err != nil { + t.Fatal(err) + } testutil.WaitForLeader(t, s1.RPC, "dc1") // Create a new token @@ -360,12 +360,12 @@ func TestACL_NonAuthority_Management(t *testing.T) { t.Fatalf("err: %v", err) } - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { p1, _ := s1.numPeers() return p1 == 2, errors.New(fmt.Sprintf("%d", p1)) - }, func(err error) { - t.Fatalf("should have 2 peers: %v", err) - }) + }); err != nil { + t.Fatal(err) + } testutil.WaitForLeader(t, s1.RPC, "dc1") // find the non-authoritative server @@ -417,12 +417,12 @@ func TestACL_DownPolicy_Deny(t *testing.T) { t.Fatalf("err: %v", err) } - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { p1, _ := s1.numPeers() return p1 == 2, errors.New(fmt.Sprintf("%d", p1)) - }, func(err error) { - t.Fatalf("should have 2 peers: %v", err) - }) + }); err != nil { + t.Fatal(err) + } testutil.WaitForLeader(t, s1.RPC, "dc1") // Create a new token @@ -491,12 +491,12 @@ func TestACL_DownPolicy_Allow(t *testing.T) { t.Fatalf("err: %v", err) } - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { p1, _ := s1.numPeers() return p1 == 2, errors.New(fmt.Sprintf("%d", p1)) - }, func(err error) { - t.Fatalf("should have 2 peers: %v", err) - }) + }); err != nil { + t.Fatal(err) + } testutil.WaitForLeader(t, s1.RPC, "dc1") // Create a new token @@ -567,12 +567,12 @@ func TestACL_DownPolicy_ExtendCache(t *testing.T) { t.Fatalf("err: %v", err) } - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { p1, _ := s1.numPeers() return p1 == 2, errors.New(fmt.Sprintf("%d", p1)) - }, func(err error) { - t.Fatalf("should have 2 peers: %v", err) - }) + }); err != nil { + t.Fatal(err) + } testutil.WaitForLeader(t, s1.RPC, "dc1") // Create a new token @@ -687,7 +687,7 @@ func TestACL_Replication(t *testing.T) { } // Wait for replication to occur. - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { _, acl, err := s2.fsm.State().ACLGet(nil, id) if err != nil { return false, err @@ -703,9 +703,9 @@ func TestACL_Replication(t *testing.T) { return false, nil } return true, nil - }, func(err error) { - t.Fatalf("ACLs didn't converge") - }) + }); err != nil { + t.Fatal(err) + } // Kill the ACL datacenter. s1.Shutdown() diff --git a/consul/autopilot_test.go b/consul/autopilot_test.go index 5c771b55ba..c171eabd13 100644 --- a/consul/autopilot_test.go +++ b/consul/autopilot_test.go @@ -49,12 +49,12 @@ func testCleanupDeadServer(t *testing.T, raftVersion int) { } for _, s := range servers { - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { peers, _ := s.numPeers() return peers == 3, nil - }, func(err error) { - t.Fatalf("should have 3 peers") - }) + }); err != nil { + t.Fatal(err) + } } // Bring up a new server @@ -65,7 +65,7 @@ func testCleanupDeadServer(t *testing.T, raftVersion int) { // Kill a non-leader server s3.Shutdown() - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { alive := 0 for _, m := range s1.LANMembers() { if m.Status == serf.StatusAlive { @@ -73,9 +73,9 @@ func testCleanupDeadServer(t *testing.T, raftVersion int) { } } return alive == 2, nil - }, func(err error) { - t.Fatalf("should have 2 alive members") - }) + }); err != nil { + t.Fatal(err) + } // Join the new server if _, err := s4.JoinLAN([]string{addr}); err != nil { @@ -85,12 +85,12 @@ func testCleanupDeadServer(t *testing.T, raftVersion int) { // Make sure the dead server is removed and we're back to 3 total peers for _, s := range servers { - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { peers, _ := s.numPeers() return peers == 3, nil - }, func(err error) { - t.Fatalf("should have 3 peers") - }) + }); err != nil { + t.Fatal(err) + } } } @@ -131,12 +131,12 @@ func TestAutopilot_CleanupDeadServerPeriodic(t *testing.T) { } for _, s := range servers { - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { peers, _ := s.numPeers() return peers == 4, nil - }, func(err error) { - t.Fatalf("should have 4 peers") - }) + }); err != nil { + t.Fatal(err) + } } // Kill a non-leader server @@ -144,12 +144,12 @@ func TestAutopilot_CleanupDeadServerPeriodic(t *testing.T) { // Should be removed from the peers automatically for _, s := range []*Server{s1, s2, s3} { - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { peers, _ := s.numPeers() return peers == 3, nil - }, func(err error) { - t.Fatalf("should have 3 peers") - }) + }); err != nil { + t.Fatal(err) + } } } @@ -185,7 +185,7 @@ func TestAutopilot_PromoteNonVoter(t *testing.T) { // Wait for the new server to be added as a non-voter, but make sure // it doesn't get promoted to a voter even after ServerStabilizationTime, // because that would result in an even-numbered quorum count. - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { future := s1.raft.GetConfiguration() if err := future.Error(); err != nil { return false, err @@ -211,9 +211,9 @@ func TestAutopilot_PromoteNonVoter(t *testing.T) { } return true, nil - }, func(err error) { + }); err != nil { t.Fatal(err) - }) + } // Now add another server and make sure they both get promoted to voters after stabilization dir3, s3 := testServerWithConfig(t, func(c *Config) { @@ -227,7 +227,7 @@ func TestAutopilot_PromoteNonVoter(t *testing.T) { t.Fatalf("err: %v", err) } - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { future := s1.raft.GetConfiguration() if err := future.Error(); err != nil { return false, err @@ -246,7 +246,7 @@ func TestAutopilot_PromoteNonVoter(t *testing.T) { } return true, nil - }, func(err error) { + }); err != nil { t.Fatal(err) - }) + } } diff --git a/consul/catalog_endpoint_test.go b/consul/catalog_endpoint_test.go index 1dd79eac4e..6a0d36654d 100644 --- a/consul/catalog_endpoint_test.go +++ b/consul/catalog_endpoint_test.go @@ -601,12 +601,12 @@ func TestCatalog_ListNodes(t *testing.T) { t.Fatalf("err: %v", err) } - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { msgpackrpc.CallWithCodec(codec, "Catalog.ListNodes", &args, &out) return len(out.Nodes) == 2, nil - }, func(err error) { - t.Fatalf("err: %v", err) - }) + }); err != nil { + t.Fatal(err) + } // Server node is auto added from Serf if out.Nodes[1].Node != s1.config.NodeName { @@ -644,12 +644,12 @@ func TestCatalog_ListNodes_NodeMetaFilter(t *testing.T) { } var out structs.IndexedNodes - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { msgpackrpc.CallWithCodec(codec, "Catalog.ListNodes", &args, &out) return len(out.Nodes) == 1, nil - }, func(err error) { - t.Fatalf("err: %v", err) - }) + }); err != nil { + t.Fatal(err) + } // Verify that only the correct node was returned if out.Nodes[0].Node != "foo" { @@ -676,12 +676,12 @@ func TestCatalog_ListNodes_NodeMetaFilter(t *testing.T) { } // Should get an empty list of nodes back - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { msgpackrpc.CallWithCodec(codec, "Catalog.ListNodes", &args, &out) return len(out.Nodes) == 0, nil - }, func(err error) { - t.Fatalf("err: %v", err) - }) + }); err != nil { + t.Fatal(err) + } } func TestCatalog_ListNodes_StaleRaad(t *testing.T) { @@ -887,12 +887,12 @@ func TestCatalog_ListNodes_DistanceSort(t *testing.T) { Datacenter: "dc1", } var out structs.IndexedNodes - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { msgpackrpc.CallWithCodec(codec, "Catalog.ListNodes", &args, &out) return len(out.Nodes) == 5, nil - }, func(err error) { - t.Fatalf("err: %v", err) - }) + }); err != nil { + t.Fatal(err) + } if out.Nodes[0].Node != "aaa" { t.Fatalf("bad: %v", out) } @@ -915,12 +915,12 @@ func TestCatalog_ListNodes_DistanceSort(t *testing.T) { Datacenter: "dc1", Source: structs.QuerySource{Datacenter: "dc1", Node: "foo"}, } - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { msgpackrpc.CallWithCodec(codec, "Catalog.ListNodes", &args, &out) return len(out.Nodes) == 5, nil - }, func(err error) { - t.Fatalf("err: %v", err) - }) + }); err != nil { + t.Fatal(err) + } if out.Nodes[0].Node != "foo" { t.Fatalf("bad: %v", out) } diff --git a/consul/client_test.go b/consul/client_test.go index 8ea419de5b..9a721ca16c 100644 --- a/consul/client_test.go +++ b/consul/client_test.go @@ -84,27 +84,27 @@ func TestClient_JoinLAN(t *testing.T) { if _, err := c1.JoinLAN([]string{addr}); err != nil { t.Fatalf("err: %v", err) } - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { return c1.servers.NumServers() == 1, nil - }, func(err error) { - t.Fatalf("expected consul server") - }) + }); err != nil { + t.Fatal("expected consul server") + } // Check the members - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { server_check := len(s1.LANMembers()) == 2 client_check := len(c1.LANMembers()) == 2 return server_check && client_check, nil - }, func(err error) { - t.Fatalf("bad len") - }) + }); err != nil { + t.Fatal("bad len") + } // Check we have a new consul - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { return c1.servers.NumServers() == 1, nil - }, func(err error) { - t.Fatalf("expected consul server") - }) + }); err != nil { + t.Fatal("expected consul server") + } } func TestClient_JoinLAN_Invalid(t *testing.T) { @@ -189,12 +189,12 @@ func TestClient_RPC(t *testing.T) { } // RPC should succeed - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { err := c1.RPC("Status.Ping", struct{}{}, &out) return err == nil, err - }, func(err error) { - t.Fatalf("err: %v", err) - }) + }); err != nil { + t.Fatal(err) + } } func TestClient_RPC_Pool(t *testing.T) { @@ -214,12 +214,12 @@ func TestClient_RPC_Pool(t *testing.T) { } // Wait for both agents to finish joining - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { return len(s1.LANMembers()) == 2 && len(c1.LANMembers()) == 2, nil - }, func(err error) { + }); err != nil { t.Fatalf("Server has %v of %v expected members; Client has %v of %v expected members.", len(s1.LANMembers()), 2, len(c1.LANMembers()), 2) - }) + } // Blast out a bunch of RPC requests at the same time to try to get // contention opening new connections. @@ -230,12 +230,12 @@ func TestClient_RPC_Pool(t *testing.T) { go func() { defer wg.Done() var out struct{} - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { err := c1.RPC("Status.Ping", struct{}{}, &out) return err == nil, err - }, func(err error) { - t.Fatalf("err: %v", err) - }) + }); err != nil { + t.Fatal(err) + } }() } @@ -345,7 +345,7 @@ func TestClient_RPC_TLS(t *testing.T) { } // Wait for joins to finish/RPC to succeed - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { if len(s1.LANMembers()) != 2 { return false, fmt.Errorf("bad len: %v", len(s1.LANMembers())) } @@ -356,9 +356,9 @@ func TestClient_RPC_TLS(t *testing.T) { err := c1.RPC("Status.Ping", struct{}{}, &out) return err == nil, err - }, func(err error) { - t.Fatalf("err: %v", err) - }) + }); err != nil { + t.Fatal(err) + } } func TestClient_SnapshotRPC(t *testing.T) { @@ -384,11 +384,11 @@ func TestClient_SnapshotRPC(t *testing.T) { } // Wait until we've got a healthy server. - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { return c1.servers.NumServers() == 1, nil - }, func(err error) { - t.Fatalf("expected consul server") - }) + }); err != nil { + t.Fatal("expected consul server") + } // Take a snapshot. var snap bytes.Buffer @@ -443,11 +443,11 @@ func TestClient_SnapshotRPC_TLS(t *testing.T) { } // Wait until we've got a healthy server. - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { return c1.servers.NumServers() == 1, nil - }, func(err error) { - t.Fatalf("expected consul server") - }) + }); err != nil { + t.Fatal("expected consul server") + } // Take a snapshot. var snap bytes.Buffer @@ -496,11 +496,11 @@ func TestClientServer_UserEvent(t *testing.T) { testutil.WaitForLeader(t, s1.RPC, "dc1") // Check the members - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { return len(c1.LANMembers()) == 2 && len(s1.LANMembers()) == 2, nil - }, func(err error) { - t.Fatalf("bad len") - }) + }); err != nil { + t.Fatal("bad len") + } // Fire the user event codec := rpcClient(t, s1) diff --git a/consul/coordinate_endpoint_test.go b/consul/coordinate_endpoint_test.go index 42e41c0dcb..e088926713 100644 --- a/consul/coordinate_endpoint_test.go +++ b/consul/coordinate_endpoint_test.go @@ -351,7 +351,7 @@ func TestCoordinate_ListNodes(t *testing.T) { } // Now query back for all the nodes. - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { arg := structs.DCSpecificRequest{ Datacenter: "dc1", } @@ -369,7 +369,9 @@ func TestCoordinate_ListNodes(t *testing.T) { verifyCoordinatesEqual(t, resp.Coordinates[1].Coord, arg3.Coord) // baz verifyCoordinatesEqual(t, resp.Coordinates[2].Coord, arg1.Coord) // foo return true, nil - }, func(err error) { t.Fatalf("err: %v", err) }) + }); err != nil { + t.Fatal(err) + } } func TestCoordinate_ListNodes_ACLFilter(t *testing.T) { @@ -444,7 +446,7 @@ func TestCoordinate_ListNodes_ACLFilter(t *testing.T) { // Wait for all the coordinate updates to apply. Since we aren't // enforcing version 8 ACLs, this should also allow us to read // everything back without a token. - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { arg := structs.DCSpecificRequest{ Datacenter: "dc1", } @@ -456,7 +458,9 @@ func TestCoordinate_ListNodes_ACLFilter(t *testing.T) { return true, nil } return false, fmt.Errorf("bad: %v", resp.Coordinates) - }, func(err error) { t.Fatalf("err: %v", err) }) + }); err != nil { + t.Fatal(err) + } // Now that we've waited for the batch processing to ingest the // coordinates we can do the rest of the requests without the loop. We diff --git a/consul/leader_test.go b/consul/leader_test.go index 7dc9e0a248..cda80d51c2 100644 --- a/consul/leader_test.go +++ b/consul/leader_test.go @@ -33,15 +33,15 @@ func TestLeader_RegisterMember(t *testing.T) { // Client should be registered state := s1.fsm.State() - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { _, node, err := state.GetNode(c1.config.NodeName) if err != nil { t.Fatalf("err: %v", err) } return node != nil, nil - }, func(err error) { - t.Fatalf("client not registered") - }) + }); err != nil { + t.Fatal("client not registered") + } // Should have a check _, checks, err := state.NodeChecks(nil, c1.config.NodeName) @@ -103,15 +103,15 @@ func TestLeader_FailedMember(t *testing.T) { // Should be registered state := s1.fsm.State() - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { _, node, err := state.GetNode(c1.config.NodeName) if err != nil { t.Fatalf("err: %v", err) } return node != nil, nil - }, func(err error) { - t.Fatalf("client not registered") - }) + }); err != nil { + t.Fatal("client not registered") + } // Should have a check _, checks, err := state.NodeChecks(nil, c1.config.NodeName) @@ -128,15 +128,15 @@ func TestLeader_FailedMember(t *testing.T) { t.Fatalf("bad check: %v", checks[0]) } - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { _, checks, err = state.NodeChecks(nil, c1.config.NodeName) if err != nil { t.Fatalf("err: %v", err) } return checks[0].Status == structs.HealthCritical, errors.New(checks[0].Status) - }, func(err error) { + }); err != nil { t.Fatalf("check status is %v, should be critical", err) - }) + } } func TestLeader_LeftMember(t *testing.T) { @@ -158,30 +158,30 @@ func TestLeader_LeftMember(t *testing.T) { state := s1.fsm.State() // Should be registered - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { _, node, err := state.GetNode(c1.config.NodeName) if err != nil { t.Fatalf("err: %v", err) } return node != nil, nil - }, func(err error) { - t.Fatalf("client should be registered") - }) + }); err != nil { + t.Fatal("client should be registered") + } // Node should leave c1.Leave() c1.Shutdown() // Should be deregistered - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { _, node, err := state.GetNode(c1.config.NodeName) if err != nil { t.Fatalf("err: %v", err) } return node == nil, nil - }, func(err error) { - t.Fatalf("client should not be registered") - }) + }); err != nil { + t.Fatal("client should not be registered") + } } func TestLeader_ReapMember(t *testing.T) { @@ -203,15 +203,15 @@ func TestLeader_ReapMember(t *testing.T) { state := s1.fsm.State() // Should be registered - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { _, node, err := state.GetNode(c1.config.NodeName) if err != nil { t.Fatalf("err: %v", err) } return node != nil, nil - }, func(err error) { - t.Fatalf("client should be registered") - }) + }); err != nil { + t.Fatal("client should be registered") + } // Simulate a node reaping mems := s1.LANMembers() @@ -310,15 +310,15 @@ func TestLeader_Reconcile(t *testing.T) { } // Should be registered - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { _, node, err = state.GetNode(c1.config.NodeName) if err != nil { t.Fatalf("err: %v", err) } return node != nil, nil - }, func(err error) { - t.Fatalf("client should be registered") - }) + }); err != nil { + t.Fatal("client should be registered") + } } func TestLeader_LeftServer(t *testing.T) { @@ -346,15 +346,15 @@ func TestLeader_LeftServer(t *testing.T) { } for _, s := range servers { - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { peers, _ := s.numPeers() return peers == 3, nil - }, func(err error) { - t.Fatalf("should have 3 peers") - }) + }); err != nil { + t.Fatal("should have 3 peers") + } } - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { // Kill any server servers[0].Shutdown() @@ -369,9 +369,9 @@ func TestLeader_LeftServer(t *testing.T) { } return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) + }); err != nil { + t.Fatal(err) + } } func TestLeader_LeftLeader(t *testing.T) { @@ -399,12 +399,12 @@ func TestLeader_LeftLeader(t *testing.T) { } for _, s := range servers { - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { peers, _ := s.numPeers() return peers == 3, nil - }, func(err error) { - t.Fatalf("should have 3 peers") - }) + }); err != nil { + t.Fatal("should have 3 peers") + } } // Kill the leader! @@ -428,25 +428,25 @@ func TestLeader_LeftLeader(t *testing.T) { continue } remain = s - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { peers, _ := s.numPeers() return peers == 2, errors.New(fmt.Sprintf("%d", peers)) - }, func(err error) { - t.Fatalf("should have 2 peers: %v", err) - }) + }); err != nil { + t.Fatal("should have 2 peers") + } } // Verify the old leader is deregistered state := remain.fsm.State() - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { _, node, err := state.GetNode(leader.config.NodeName) if err != nil { t.Fatalf("err: %v", err) } return node == nil, nil - }, func(err error) { - t.Fatalf("leader should be deregistered") - }) + }); err != nil { + t.Fatal("should be deregistered") + } } func TestLeader_MultiBootstrap(t *testing.T) { @@ -468,12 +468,12 @@ func TestLeader_MultiBootstrap(t *testing.T) { } for _, s := range servers { - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { peers := s.serfLAN.Members() return len(peers) == 2, nil - }, func(err error) { - t.Fatalf("should have 2 peers") - }) + }); err != nil { + t.Fatal("should have 2 peerss") + } } // Ensure we don't have multiple raft peers @@ -510,12 +510,12 @@ func TestLeader_TombstoneGC_Reset(t *testing.T) { } for _, s := range servers { - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { peers, _ := s.numPeers() return peers == 3, nil - }, func(err error) { - t.Fatalf("should have 3 peers") - }) + }); err != nil { + t.Fatal("should have 3 peers") + } } var leader *Server @@ -540,7 +540,7 @@ func TestLeader_TombstoneGC_Reset(t *testing.T) { // Wait for a new leader leader = nil - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { for _, s := range servers { if s.IsLeader() { leader = s @@ -548,16 +548,16 @@ func TestLeader_TombstoneGC_Reset(t *testing.T) { } } return false, nil - }, func(err error) { - t.Fatalf("should have leader") - }) + }); err != nil { + t.Fatal("should have leader") + } // Check that the new leader has a pending GC expiration - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { return leader.tombstoneGC.PendingExpiration(), nil - }, func(err error) { - t.Fatalf("should have pending expiration") - }) + }); err != nil { + t.Fatal("should have pending expiration") + } } func TestLeader_ReapTombstones(t *testing.T) { @@ -610,7 +610,7 @@ func TestLeader_ReapTombstones(t *testing.T) { // Check that the new leader has a pending GC expiration by // watching for the tombstone to get removed. - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { snap := state.Snapshot() defer snap.Close() stones, err := snap.Tombstones() @@ -618,9 +618,9 @@ func TestLeader_ReapTombstones(t *testing.T) { return false, err } return stones.Next() == nil, nil - }, func(err error) { - t.Fatalf("err: %v", err) - }) + }); err != nil { + t.Fatal(err) + } } func TestLeader_RollRaftServer(t *testing.T) { @@ -656,24 +656,24 @@ func TestLeader_RollRaftServer(t *testing.T) { } for _, s := range servers { - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { peers, _ := s.numPeers() return peers == 3, nil - }, func(err error) { - t.Fatalf("should have 3 peers") - }) + }); err != nil { + t.Fatal("should have 3 peers") + } } // Kill the v1 server s2.Shutdown() for _, s := range []*Server{s1, s3} { - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { minVer, err := ServerMinRaftProtocol(s.LANMembers()) return minVer == 2, err - }, func(err error) { + }); err != nil { t.Fatalf("minimum protocol version among servers should be 2") - }) + } } // Replace the dead server with one running raft protocol v3 @@ -691,7 +691,7 @@ func TestLeader_RollRaftServer(t *testing.T) { // Make sure the dead server is removed and we're back to 3 total peers for _, s := range servers { - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { addrs := 0 ids := 0 future := s.raft.GetConfiguration() @@ -706,9 +706,9 @@ func TestLeader_RollRaftServer(t *testing.T) { } } return addrs == 2 && ids == 1, nil - }, func(err error) { + }); err != nil { t.Fatalf("should see 2 legacy IDs and 1 GUID") - }) + } } } @@ -744,18 +744,18 @@ func TestLeader_ChangeServerAddress(t *testing.T) { } for _, s := range servers { - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { peers, _ := s.numPeers() return peers == 3, nil - }, func(err error) { - t.Fatalf("should have 3 peers") - }) + }); err != nil { + t.Fatal("should have 3 peers") + } } // Shut down a server, freeing up its address/port s3.Shutdown() - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { alive := 0 for _, m := range s1.LANMembers() { if m.Status == serf.StatusAlive { @@ -763,9 +763,9 @@ func TestLeader_ChangeServerAddress(t *testing.T) { } } return alive == 2, nil - }, func(err error) { - t.Fatalf("should have 2 alive members") - }) + }); err != nil { + t.Fatal("should have 2 alive members") + } // Bring up a new server with s3's address that will get a different ID dir4, s4 := testServerWithConfig(t, func(c *Config) { @@ -784,12 +784,12 @@ func TestLeader_ChangeServerAddress(t *testing.T) { // Make sure the dead server is removed and we're back to 3 total peers for _, s := range servers { - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { peers, _ := s.numPeers() return peers == 3, nil - }, func(err error) { - t.Fatalf("should have 3 peers") - }) + }); err != nil { + t.Fatal("should have 3 members") + } } } @@ -825,18 +825,18 @@ func TestLeader_ChangeServerID(t *testing.T) { } for _, s := range servers { - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { peers, _ := s.numPeers() return peers == 3, nil - }, func(err error) { - t.Fatalf("should have 3 peers") - }) + }); err != nil { + t.Fatal("should have 3 peers") + } } // Shut down a server, freeing up its address/port s3.Shutdown() - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { alive := 0 for _, m := range s1.LANMembers() { if m.Status == serf.StatusAlive { @@ -844,9 +844,9 @@ func TestLeader_ChangeServerID(t *testing.T) { } } return alive == 2, nil - }, func(err error) { - t.Fatalf("should have 2 alive members") - }) + }); err != nil { + t.Fatal("should have 2 alive members") + } // Bring up a new server with s3's address that will get a different ID dir4, s4 := testServerWithConfig(t, func(c *Config) { @@ -867,11 +867,11 @@ func TestLeader_ChangeServerID(t *testing.T) { // Make sure the dead server is removed and we're back to 3 total peers for _, s := range servers { - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { peers, _ := s.numPeers() return peers == 3, nil - }, func(err error) { - t.Fatalf("should have 3 peers") - }) + }); err != nil { + t.Fatal("should have 3 peers") + } } } diff --git a/consul/operator_endpoint_test.go b/consul/operator_endpoint_test.go index bfe21d7dab..dcb761ff97 100644 --- a/consul/operator_endpoint_test.go +++ b/consul/operator_endpoint_test.go @@ -7,11 +7,12 @@ import ( "strings" "testing" + "time" + "github.com/hashicorp/consul/consul/structs" "github.com/hashicorp/consul/testutil" "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/raft" - "time" ) func TestOperator_RaftGetConfiguration(t *testing.T) { @@ -461,7 +462,7 @@ func TestOperator_ServerHealth(t *testing.T) { testutil.WaitForLeader(t, s1.RPC, "dc1") - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { arg := structs.DCSpecificRequest{ Datacenter: "dc1", } @@ -490,9 +491,9 @@ func TestOperator_ServerHealth(t *testing.T) { } } return true, nil - }, func(err error) { + }); err != nil { t.Fatal(err) - }) + } } func TestOperator_ServerHealth_UnsupportedRaftVersion(t *testing.T) { diff --git a/consul/prepared_query_endpoint_test.go b/consul/prepared_query_endpoint_test.go index 8189e2293c..1aa1836200 100644 --- a/consul/prepared_query_endpoint_test.go +++ b/consul/prepared_query_endpoint_test.go @@ -1453,13 +1453,11 @@ func TestPreparedQuery_Execute(t *testing.T) { if _, err := s2.JoinWAN([]string{addr}); err != nil { t.Fatalf("err: %v", err) } - testutil.WaitForResult( - func() (bool, error) { - return len(s1.WANMembers()) > 1, nil - }, - func(err error) { - t.Fatalf("Failed waiting for WAN join: %v", err) - }) + if err := testutil.WaitForResult(func() (bool, error) { + return len(s1.WANMembers()) > 1, nil + }); err != nil { + t.Fatalf("Failed waiting for WAN join: %v", err) + } // Create an ACL with read permission to the service. var execToken string @@ -2704,13 +2702,11 @@ func TestPreparedQuery_Wrapper(t *testing.T) { if _, err := s2.JoinWAN([]string{addr}); err != nil { t.Fatalf("err: %v", err) } - testutil.WaitForResult( - func() (bool, error) { - return len(s1.WANMembers()) > 1, nil - }, - func(err error) { - t.Fatalf("Failed waiting for WAN join: %v", err) - }) + if err := testutil.WaitForResult(func() (bool, error) { + return len(s1.WANMembers()) > 1, nil + }); err != nil { + t.Fatalf("Failed waiting for WAN join: %v", err) + } // Try all the operations on a real server via the wrapper. wrapper := &queryServerWrapper{s1} diff --git a/consul/server_test.go b/consul/server_test.go index ae8c7e5fcf..1258c0dd15 100644 --- a/consul/server_test.go +++ b/consul/server_test.go @@ -177,17 +177,17 @@ func TestServer_JoinLAN(t *testing.T) { } // Check the members - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { return len(s1.LANMembers()) == 2, nil - }, func(err error) { - t.Fatalf("bad len") - }) + }); err != nil { + t.Fatal("bad len") + } - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { return len(s2.LANMembers()) == 2, nil - }, func(err error) { - t.Fatalf("bad len") - }) + }); err != nil { + t.Fatal("bad len") + } } func TestServer_JoinWAN(t *testing.T) { @@ -207,28 +207,28 @@ func TestServer_JoinWAN(t *testing.T) { } // Check the members - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { return len(s1.WANMembers()) == 2, nil - }, func(err error) { - t.Fatalf("bad len") - }) + }); err != nil { + t.Fatal("bad len") + } - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { return len(s2.WANMembers()) == 2, nil - }, func(err error) { - t.Fatalf("bad len") - }) + }); err != nil { + t.Fatal("bad len") + } // Check the router has both if len(s1.router.GetDatacenters()) != 2 { t.Fatalf("remote consul missing") } - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { return len(s2.router.GetDatacenters()) == 2, nil - }, func(err error) { - t.Fatalf("remote consul missing") - }) + }); err != nil { + t.Fatal("remote consul missing") + } } func TestServer_JoinWAN_Flood(t *testing.T) { @@ -248,11 +248,11 @@ func TestServer_JoinWAN_Flood(t *testing.T) { } for i, s := range []*Server{s1, s2} { - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { return len(s.WANMembers()) == 2, nil - }, func(err error) { + }); err != nil { t.Fatalf("bad len for server %d", i) - }) + } } dir3, s3 := testServer(t) @@ -268,11 +268,11 @@ func TestServer_JoinWAN_Flood(t *testing.T) { } for i, s := range []*Server{s1, s2, s3} { - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { return len(s.WANMembers()) == 3, nil - }, func(err error) { + }); err != nil { t.Fatalf("bad len for server %d", i) - }) + } } } @@ -312,32 +312,32 @@ func TestServer_JoinSeparateLanAndWanAddresses(t *testing.T) { } // Check the WAN members on s1 - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { return len(s1.WANMembers()) == 3, nil - }, func(err error) { - t.Fatalf("bad len") - }) + }); err != nil { + t.Fatal("bad len") + } // Check the WAN members on s2 - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { return len(s2.WANMembers()) == 3, nil - }, func(err error) { - t.Fatalf("bad len") - }) + }); err != nil { + t.Fatal("bad len") + } // Check the LAN members on s2 - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { return len(s2.LANMembers()) == 2, nil - }, func(err error) { - t.Fatalf("bad len") - }) + }); err != nil { + t.Fatal("bad len") + } // Check the LAN members on s3 - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { return len(s3.LANMembers()) == 2, nil - }, func(err error) { - t.Fatalf("bad len") - }) + }); err != nil { + t.Fatal("bad len") + } // Check the router has both if len(s1.router.GetDatacenters()) != 2 { @@ -395,19 +395,19 @@ func TestServer_LeaveLeader(t *testing.T) { var p1 int var p2 int - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { p1, _ = s1.numPeers() return p1 == 2, errors.New(fmt.Sprintf("%d", p1)) - }, func(err error) { - t.Fatalf("should have 2 peers: %v", err) - }) + }); err != nil { + t.Fatalf("should have 2 peers %s", err) + } - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { p2, _ = s2.numPeers() return p2 == 2, errors.New(fmt.Sprintf("%d", p1)) - }, func(err error) { - t.Fatalf("should have 2 peers: %v", err) - }) + }); err != nil { + t.Fatalf("should have 2 peers %s", err) + } // Issue a leave to the leader for _, s := range []*Server{s1, s2} { @@ -421,12 +421,12 @@ func TestServer_LeaveLeader(t *testing.T) { // Should lose a peer for _, s := range []*Server{s1, s2} { - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { p1, _ = s.numPeers() return p1 == 1, nil - }, func(err error) { - t.Fatalf("should have 1 peer: %v", p1) - }) + }); err != nil { + t.Fatalf("should have 1 peer %s", err) + } } } @@ -450,19 +450,19 @@ func TestServer_Leave(t *testing.T) { var p1 int var p2 int - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { p1, _ = s1.numPeers() return p1 == 2, errors.New(fmt.Sprintf("%d", p1)) - }, func(err error) { - t.Fatalf("should have 2 peers: %v", err) - }) + }); err != nil { + t.Fatalf("should have 2 peers %s", err) + } - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { p2, _ = s2.numPeers() return p2 == 2, errors.New(fmt.Sprintf("%d", p1)) - }, func(err error) { - t.Fatalf("should have 2 peers: %v", err) - }) + }); err != nil { + t.Fatalf("should have 2 peers %s", err) + } // Issue a leave to the non-leader for _, s := range []*Server{s1, s2} { @@ -476,12 +476,12 @@ func TestServer_Leave(t *testing.T) { // Should lose a peer for _, s := range []*Server{s1, s2} { - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { p1, _ = s.numPeers() return p1 == 1, nil - }, func(err error) { - t.Fatalf("should have 1 peer: %v", p1) - }) + }); err != nil { + t.Fatalf("should have 1 peer %s", err) + } } } @@ -528,32 +528,32 @@ func TestServer_JoinLAN_TLS(t *testing.T) { } // Check the members - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { return len(s1.LANMembers()) == 2, nil - }, func(err error) { - t.Fatalf("bad len") - }) + }); err != nil { + t.Fatal("bad len") + } - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { return len(s2.LANMembers()) == 2, nil - }, func(err error) { - t.Fatalf("bad len") - }) + }); err != nil { + t.Fatal("bad len") + } // Verify Raft has established a peer - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { peers, _ := s1.numPeers() return peers == 2, nil - }, func(err error) { - t.Fatalf("no peer established") - }) + }); err != nil { + t.Fatalf("no peers") + } - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { peers, _ := s2.numPeers() return peers == 2, nil - }, func(err error) { - t.Fatalf("no peer established") - }) + }); err != nil { + t.Fatalf("no peers") + } } func TestServer_Expect(t *testing.T) { @@ -587,19 +587,19 @@ func TestServer_Expect(t *testing.T) { var p2 int // Should have no peers yet since the bootstrap didn't occur. - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { p1, _ = s1.numPeers() return p1 == 0, errors.New(fmt.Sprintf("%d", p1)) - }, func(err error) { - t.Fatalf("should have 0 peers: %v", err) - }) + }); err != nil { + t.Fatalf("should have 0 peers %s", err) + } - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { p2, _ = s2.numPeers() return p2 == 0, errors.New(fmt.Sprintf("%d", p2)) - }, func(err error) { - t.Fatalf("should have 0 peers: %v", err) - }) + }); err != nil { + t.Fatalf("should have 0 peers %s", err) + } // Join the third node. if _, err := s3.JoinLAN([]string{addr}); err != nil { @@ -609,26 +609,26 @@ func TestServer_Expect(t *testing.T) { var p3 int // Now we have three servers so we should bootstrap. - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { p1, _ = s1.numPeers() return p1 == 3, errors.New(fmt.Sprintf("%d", p1)) - }, func(err error) { - t.Fatalf("should have 3 peers: %v", err) - }) + }); err != nil { + t.Fatalf("should have 3 peers %s", err) + } - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { p2, _ = s2.numPeers() return p2 == 3, errors.New(fmt.Sprintf("%d", p2)) - }, func(err error) { - t.Fatalf("should have 3 peers: %v", err) - }) + }); err != nil { + t.Fatalf("should have 3 peers %s", err) + } - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { p3, _ = s3.numPeers() return p3 == 3, errors.New(fmt.Sprintf("%d", p3)) - }, func(err error) { - t.Fatalf("should have 3 peers: %v", err) - }) + }); err != nil { + t.Fatalf("should have 3 peers %s", err) + } // Make sure a leader is elected, grab the current term and then add in // the fourth server. @@ -640,12 +640,12 @@ func TestServer_Expect(t *testing.T) { // Wait for the new server to see itself added to the cluster. var p4 int - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { p4, _ = s4.numPeers() return p4 == 4, errors.New(fmt.Sprintf("%d", p4)) - }, func(err error) { - t.Fatalf("should have 4 peers: %v", err) - }) + }); err != nil { + t.Fatalf("should have 4 peers %s", err) + } // Make sure there's still a leader and that the term didn't change, // so we know an election didn't occur. @@ -683,19 +683,19 @@ func TestServer_BadExpect(t *testing.T) { var p2 int // should have no peers yet - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { p1, _ = s1.numPeers() return p1 == 0, errors.New(fmt.Sprintf("%d", p1)) - }, func(err error) { - t.Fatalf("should have 0 peers: %v", err) - }) + }); err != nil { + t.Fatalf("should have 0 peers %s", err) + } - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { p2, _ = s2.numPeers() return p2 == 0, errors.New(fmt.Sprintf("%d", p2)) - }, func(err error) { - t.Fatalf("should have 0 peers: %v", err) - }) + }); err != nil { + t.Fatalf("should have 0 peers %s", err) + } // join the third node if _, err := s3.JoinLAN([]string{addr}); err != nil { @@ -705,26 +705,26 @@ func TestServer_BadExpect(t *testing.T) { var p3 int // should still have no peers (because s2 is in expect=2 mode) - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { p1, _ = s1.numPeers() return p1 == 0, errors.New(fmt.Sprintf("%d", p1)) - }, func(err error) { - t.Fatalf("should have 0 peers: %v", err) - }) + }); err != nil { + t.Fatalf("should have 0 peers %s", err) + } - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { p2, _ = s2.numPeers() return p2 == 0, errors.New(fmt.Sprintf("%d", p2)) - }, func(err error) { - t.Fatalf("should have 0 peers: %v", err) - }) + }); err != nil { + t.Fatalf("should have 0 peers %s", err) + } - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { p3, _ = s3.numPeers() return p3 == 0, errors.New(fmt.Sprintf("%d", p3)) - }, func(err error) { - t.Fatalf("should have 0 peers: %v", err) - }) + }); err != nil { + t.Fatalf("should have 0 peers %s", err) + } } type fakeGlobalResp struct{} @@ -742,11 +742,11 @@ func TestServer_globalRPCErrors(t *testing.T) { defer os.RemoveAll(dir1) defer s1.Shutdown() - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { return len(s1.router.GetDatacenters()) == 1, nil - }, func(err error) { - t.Fatalf("Server did not join WAN successfully") - }) + }); err != nil { + t.Fatalf("did not join WAN") + } // Check that an error from a remote DC is returned err := s1.globalRPC("Bad.Method", nil, &fakeGlobalResp{}) diff --git a/consul/session_ttl_test.go b/consul/session_ttl_test.go index 880211899c..020b7f021e 100644 --- a/consul/session_ttl_test.go +++ b/consul/session_ttl_test.go @@ -298,12 +298,12 @@ func TestServer_SessionTTL_Failover(t *testing.T) { t.Fatalf("err: %v", err) } - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { peers, _ := s1.numPeers() return peers == 3, nil - }, func(err error) { - t.Fatalf("should have 3 peers") - }) + }); err != nil { + t.Fatalf("should have 3 peers %s", err) + } // Find the leader var leader *Server @@ -363,7 +363,7 @@ func TestServer_SessionTTL_Failover(t *testing.T) { } // Find the new leader - testutil.WaitForResult(func() (bool, error) { + if err := testutil.WaitForResult(func() (bool, error) { leader = nil for _, s := range servers { if s.IsLeader() { @@ -380,7 +380,7 @@ func TestServer_SessionTTL_Failover(t *testing.T) { } return true, nil - }, func(err error) { - t.Fatalf("err: %s", err) - }) + }); err != nil { + t.Fatal(err) + } } diff --git a/consul/snapshot_endpoint_test.go b/consul/snapshot_endpoint_test.go index b72b715bd8..9fdacd993d 100644 --- a/consul/snapshot_endpoint_test.go +++ b/consul/snapshot_endpoint_test.go @@ -329,13 +329,11 @@ func TestSnapshot_Forward_Datacenter(t *testing.T) { if _, err := s2.JoinWAN([]string{addr}); err != nil { t.Fatalf("err: %v", err) } - testutil.WaitForResult( - func() (bool, error) { - return len(s1.WANMembers()) > 1, nil - }, - func(err error) { - t.Fatalf("Failed waiting for WAN join: %v", err) - }) + if err := testutil.WaitForResult(func() (bool, error) { + return len(s1.WANMembers()) > 1, nil + }); err != nil { + t.Fatalf("failed to join WAN: %s", err) + } // Run a snapshot from each server locally and remotely to ensure we // forward.