test: Refactor WaitForResult tests with retry

Refactor tests that use testutil.WaitForResult to use retry.

Since this requires refactoring the test functions in general this patch
also shows the use of the github.com/pascaldekloe/goe/verify library
which provides a good mechanism for comparing nested data structures.
Instead of just converting the tests from testutil.WaitForResult to
retry the tests that performing a nested comparison of data structures
are converted to the verify library at the same time.
This commit is contained in:
Frank Schroeder 2017-04-29 09:34:02 -07:00
parent eb6465551b
commit 21a82a0a16
No known key found for this signature in database
GPG Key ID: 4D65C6EAEC87DECD
28 changed files with 978 additions and 1322 deletions

View File

@ -1,10 +1,11 @@
package api
import (
"fmt"
"testing"
"github.com/hashicorp/consul/testutil"
"github.com/hashicorp/consul/testutil/retry"
"github.com/pascaldekloe/goe/verify"
)
func TestCatalog_Datacenters(t *testing.T) {
@ -13,21 +14,15 @@ func TestCatalog_Datacenters(t *testing.T) {
defer s.Stop()
catalog := c.Catalog()
if err := testutil.WaitForResult(func() (bool, error) {
retry.Run("no datacenters", t, func(r *retry.R) {
datacenters, err := catalog.Datacenters()
if err != nil {
return false, err
r.Fatalf("catalog.Datacenters: ", err)
}
if len(datacenters) == 0 {
return false, fmt.Errorf("Bad: %v", datacenters)
if len(datacenters) < 1 {
r.Fatal("got 0 datacenters want at least one")
}
return true, nil
}); err != nil {
t.Fatal(err)
}
})
}
func TestCatalog_Nodes(t *testing.T) {
@ -35,33 +30,33 @@ func TestCatalog_Nodes(t *testing.T) {
defer s.Stop()
catalog := c.Catalog()
if err := testutil.WaitForResult(func() (bool, error) {
retry.RunWith(retry.ThreeTimes(), "no nodes", t, func(r *retry.R) {
nodes, meta, err := catalog.Nodes(nil)
if err != nil {
return false, err
r.Fatalf("catalog.Nodes: ", err)
}
if meta.LastIndex == 0 {
return false, fmt.Errorf("Bad: %v", meta)
r.Fatal("got last index 0 want > 0")
}
if len(nodes) == 0 {
return false, fmt.Errorf("Bad: %v", nodes)
want := []*Node{
{
ID: s.Config.NodeID,
Node: s.Config.NodeName,
Address: "127.0.0.1",
Datacenter: "dc1",
TaggedAddresses: map[string]string{
"lan": "127.0.0.1",
"wan": "127.0.0.1",
},
Meta: map[string]string{},
CreateIndex: meta.LastIndex - 1,
ModifyIndex: meta.LastIndex,
},
}
if _, ok := nodes[0].TaggedAddresses["wan"]; !ok {
return false, fmt.Errorf("Bad: %v", nodes[0])
if !verify.Values(r, "", nodes, want) {
r.FailNow()
}
if nodes[0].Datacenter != "dc1" {
return false, fmt.Errorf("Bad datacenter: %v", nodes[0])
}
return true, nil
}); err != nil {
t.Fatal(err)
}
})
}
func TestCatalog_Nodes_MetaFilter(t *testing.T) {
@ -72,58 +67,49 @@ func TestCatalog_Nodes_MetaFilter(t *testing.T) {
defer s.Stop()
catalog := c.Catalog()
// Make sure we get the node back when filtering by its metadata
if err := testutil.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
nodes, meta, err := catalog.Nodes(&QueryOptions{NodeMeta: meta})
if err != nil {
return false, err
r.Fatal(err)
}
if meta.LastIndex == 0 {
return false, fmt.Errorf("Bad: %v", meta)
r.Fatalf("Bad: %v", meta)
}
if len(nodes) == 0 {
return false, fmt.Errorf("Bad: %v", nodes)
r.Fatalf("Bad: %v", nodes)
}
if _, ok := nodes[0].TaggedAddresses["wan"]; !ok {
return false, fmt.Errorf("Bad: %v", nodes[0])
r.Fatalf("Bad: %v", nodes[0])
}
if v, ok := nodes[0].Meta["somekey"]; !ok || v != "somevalue" {
return false, fmt.Errorf("Bad: %v", nodes[0].Meta)
r.Fatalf("Bad: %v", nodes[0].Meta)
}
if nodes[0].Datacenter != "dc1" {
return false, fmt.Errorf("Bad datacenter: %v", nodes[0])
r.Fatalf("Bad datacenter: %v", nodes[0])
}
})
return true, nil
}); err != nil {
t.Fatal(err)
}
// Get nothing back when we use an invalid filter
if err := testutil.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
// Get nothing back when we use an invalid filter
nodes, meta, err := catalog.Nodes(&QueryOptions{NodeMeta: map[string]string{"nope": "nope"}})
if err != nil {
return false, err
r.Fatal(err)
}
if meta.LastIndex == 0 {
return false, fmt.Errorf("Bad: %v", meta)
r.Fatalf("Bad: %v", meta)
}
if len(nodes) != 0 {
return false, fmt.Errorf("Bad: %v", nodes)
r.Fatalf("Bad: %v", nodes)
}
return true, nil
}); err != nil {
t.Fatal(err)
}
})
}
func TestCatalog_Services(t *testing.T) {
@ -132,25 +118,20 @@ func TestCatalog_Services(t *testing.T) {
defer s.Stop()
catalog := c.Catalog()
if err := testutil.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
services, meta, err := catalog.Services(nil)
if err != nil {
return false, err
r.Fatal(err)
}
if meta.LastIndex == 0 {
return false, fmt.Errorf("Bad: %v", meta)
r.Fatalf("Bad: %v", meta)
}
if len(services) == 0 {
return false, fmt.Errorf("Bad: %v", services)
r.Fatalf("Bad: %v", services)
}
return true, nil
}); err != nil {
t.Fatal(err)
}
})
}
func TestCatalog_Services_NodeMetaFilter(t *testing.T) {
@ -161,46 +142,37 @@ func TestCatalog_Services_NodeMetaFilter(t *testing.T) {
defer s.Stop()
catalog := c.Catalog()
// Make sure we get the service back when filtering by the node's metadata
if err := testutil.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
services, meta, err := catalog.Services(&QueryOptions{NodeMeta: meta})
if err != nil {
return false, err
r.Fatal(err)
}
if meta.LastIndex == 0 {
return false, fmt.Errorf("Bad: %v", meta)
r.Fatalf("Bad: %v", meta)
}
if len(services) == 0 {
return false, fmt.Errorf("Bad: %v", services)
r.Fatalf("Bad: %v", services)
}
})
return true, nil
}); err != nil {
t.Fatal(err)
}
// Get nothing back when using an invalid filter
if err := testutil.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
// Get nothing back when using an invalid filter
services, meta, err := catalog.Services(&QueryOptions{NodeMeta: map[string]string{"nope": "nope"}})
if err != nil {
return false, err
r.Fatal(err)
}
if meta.LastIndex == 0 {
return false, fmt.Errorf("Bad: %v", meta)
r.Fatalf("Bad: %v", meta)
}
if len(services) != 0 {
return false, fmt.Errorf("Bad: %v", services)
r.Fatalf("Bad: %v", services)
}
return true, nil
}); err != nil {
t.Fatal(err)
}
})
}
func TestCatalog_Service(t *testing.T) {
@ -209,29 +181,24 @@ func TestCatalog_Service(t *testing.T) {
defer s.Stop()
catalog := c.Catalog()
if err := testutil.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
services, meta, err := catalog.Service("consul", "", nil)
if err != nil {
return false, err
r.Fatal(err)
}
if meta.LastIndex == 0 {
return false, fmt.Errorf("Bad: %v", meta)
r.Fatalf("Bad: %v", meta)
}
if len(services) == 0 {
return false, fmt.Errorf("Bad: %v", services)
r.Fatalf("Bad: %v", services)
}
if services[0].Datacenter != "dc1" {
return false, fmt.Errorf("Bad datacenter: %v", services[0])
r.Fatalf("Bad datacenter: %v", services[0])
}
return true, nil
}); err != nil {
t.Fatal(err)
}
})
}
func TestCatalog_Service_NodeMetaFilter(t *testing.T) {
@ -243,29 +210,24 @@ func TestCatalog_Service_NodeMetaFilter(t *testing.T) {
defer s.Stop()
catalog := c.Catalog()
if err := testutil.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
services, meta, err := catalog.Service("consul", "", &QueryOptions{NodeMeta: meta})
if err != nil {
return false, err
r.Fatal(err)
}
if meta.LastIndex == 0 {
return false, fmt.Errorf("Bad: %v", meta)
r.Fatalf("Bad: %v", meta)
}
if len(services) == 0 {
return false, fmt.Errorf("Bad: %v", services)
r.Fatalf("Bad: %v", services)
}
if services[0].Datacenter != "dc1" {
return false, fmt.Errorf("Bad datacenter: %v", services[0])
r.Fatalf("Bad datacenter: %v", services[0])
}
return true, nil
}); err != nil {
t.Fatal(err)
}
})
}
func TestCatalog_Node(t *testing.T) {
@ -275,33 +237,28 @@ func TestCatalog_Node(t *testing.T) {
catalog := c.Catalog()
name, _ := c.Agent().NodeName()
if err := testutil.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
info, meta, err := catalog.Node(name, nil)
if err != nil {
return false, err
r.Fatal(err)
}
if meta.LastIndex == 0 {
return false, fmt.Errorf("Bad: %v", meta)
r.Fatalf("Bad: %v", meta)
}
if len(info.Services) == 0 {
return false, fmt.Errorf("Bad: %v", info)
r.Fatalf("Bad: %v", info)
}
if _, ok := info.Node.TaggedAddresses["wan"]; !ok {
return false, fmt.Errorf("Bad: %v", info)
r.Fatalf("Bad: %v", info)
}
if info.Node.Datacenter != "dc1" {
return false, fmt.Errorf("Bad datacenter: %v", info)
r.Fatalf("Bad datacenter: %v", info)
}
return true, nil
}); err != nil {
t.Fatal(err)
}
})
}
func TestCatalog_Registration(t *testing.T) {
@ -335,38 +292,33 @@ func TestCatalog_Registration(t *testing.T) {
Service: service,
Check: check,
}
if err := testutil.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
if _, err := catalog.Register(reg, nil); err != nil {
return false, err
r.Fatal(err)
}
node, _, err := catalog.Node("foobar", nil)
if err != nil {
return false, err
r.Fatal(err)
}
if _, ok := node.Services["redis1"]; !ok {
return false, fmt.Errorf("missing service: redis1")
r.Fatal("missing service: redis1")
}
health, _, err := c.Health().Node("foobar", nil)
if err != nil {
return false, err
r.Fatal(err)
}
if health[0].CheckID != "service:redis1" {
return false, fmt.Errorf("missing checkid service:redis1")
r.Fatal("missing checkid service:redis1")
}
if v, ok := node.Node.Meta["somekey"]; !ok || v != "somevalue" {
return false, fmt.Errorf("missing node meta pair somekey:somevalue")
r.Fatal("missing node meta pair somekey:somevalue")
}
return true, nil
}); err != nil {
t.Fatal(err)
}
})
// Test catalog deregistration of the previously registered service
dereg := &CatalogDeregistration{
@ -380,20 +332,16 @@ func TestCatalog_Registration(t *testing.T) {
t.Fatalf("err: %v", err)
}
if err := testutil.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
node, _, err := catalog.Node("foobar", nil)
if err != nil {
return false, err
r.Fatal(err)
}
if _, ok := node.Services["redis1"]; ok {
return false, fmt.Errorf("ServiceID:redis1 is not deregistered")
r.Fatal("ServiceID:redis1 is not deregistered")
}
return true, nil
}); err != nil {
t.Fatal(err)
}
})
// Test deregistration of the previously registered check
dereg = &CatalogDeregistration{
@ -407,20 +355,16 @@ func TestCatalog_Registration(t *testing.T) {
t.Fatalf("err: %v", err)
}
if err := testutil.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
health, _, err := c.Health().Node("foobar", nil)
if err != nil {
return false, err
r.Fatal(err)
}
if len(health) != 0 {
return false, fmt.Errorf("CheckID:service:redis1 is not deregistered")
r.Fatal("CheckID:service:redis1 is not deregistered")
}
return true, nil
}); err != nil {
t.Fatal(err)
}
})
// Test node deregistration of the previously registered node
dereg = &CatalogDeregistration{
@ -433,20 +377,16 @@ func TestCatalog_Registration(t *testing.T) {
t.Fatalf("err: %v", err)
}
if err := testutil.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
node, _, err := catalog.Node("foobar", nil)
if err != nil {
return false, err
r.Fatal(err)
}
if node != nil {
return false, fmt.Errorf("node is not deregistered: %v", node)
r.Fatalf("node is not deregistered: %v", node)
}
return true, nil
}); err != nil {
t.Fatal(err)
}
})
}
func TestCatalog_EnableTagOverride(t *testing.T) {
@ -470,72 +410,65 @@ func TestCatalog_EnableTagOverride(t *testing.T) {
Service: service,
}
if err := testutil.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
if _, err := catalog.Register(reg, nil); err != nil {
return false, err
r.Fatal(err)
}
node, _, err := catalog.Node("foobar", nil)
if err != nil {
return false, err
r.Fatal(err)
}
if _, ok := node.Services["redis1"]; !ok {
return false, fmt.Errorf("missing service: redis1")
r.Fatal("missing service: redis1")
}
if node.Services["redis1"].EnableTagOverride != false {
return false, fmt.Errorf("tag override set")
r.Fatal("tag override set")
}
services, _, err := catalog.Service("redis", "", nil)
if err != nil {
return false, err
r.Fatal(err)
}
if len(services) < 1 || services[0].ServiceName != "redis" {
return false, fmt.Errorf("missing service: redis")
r.Fatal("missing service: redis")
}
if services[0].ServiceEnableTagOverride != false {
return false, fmt.Errorf("tag override set")
r.Fatal("tag override set")
}
return true, nil
}); err != nil {
t.Fatal(err)
}
})
service.EnableTagOverride = true
if err := testutil.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
if _, err := catalog.Register(reg, nil); err != nil {
return false, err
r.Fatal(err)
}
node, _, err := catalog.Node("foobar", nil)
if err != nil {
return false, err
r.Fatal(err)
}
if _, ok := node.Services["redis1"]; !ok {
return false, fmt.Errorf("missing service: redis1")
r.Fatal("missing service: redis1")
}
if node.Services["redis1"].EnableTagOverride != true {
return false, fmt.Errorf("tag override not set")
r.Fatal("tag override not set")
}
services, _, err := catalog.Service("redis", "", nil)
if err != nil {
return false, err
r.Fatal(err)
}
if len(services) < 1 || services[0].ServiceName != "redis" {
return false, fmt.Errorf("missing service: redis")
r.Fatal("missing service: redis")
}
if services[0].ServiceEnableTagOverride != true {
return false, fmt.Errorf("tag override not set")
r.Fatal("tag override not set")
}
return true, nil
}); err != nil {
t.Fatal(err)
}
})
}

View File

@ -1,10 +1,9 @@
package api
import (
"fmt"
"testing"
"github.com/hashicorp/consul/testutil"
"github.com/hashicorp/consul/testutil/retry"
)
func TestCoordinate_Datacenters(t *testing.T) {
@ -13,21 +12,16 @@ func TestCoordinate_Datacenters(t *testing.T) {
defer s.Stop()
coordinate := c.Coordinate()
if err := testutil.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
datacenters, err := coordinate.Datacenters()
if err != nil {
return false, err
r.Fatal(err)
}
if len(datacenters) == 0 {
return false, fmt.Errorf("Bad: %v", datacenters)
r.Fatalf("Bad: %v", datacenters)
}
return true, nil
}); err != nil {
t.Fatal(err)
}
})
}
func TestCoordinate_Nodes(t *testing.T) {
@ -36,19 +30,15 @@ func TestCoordinate_Nodes(t *testing.T) {
defer s.Stop()
coordinate := c.Coordinate()
if err := testutil.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
_, _, err := coordinate.Nodes(nil)
if err != nil {
return false, err
r.Fatal(err)
}
// There's not a good way to populate coordinates without
// waiting for them to calculate and update, so the best
// we can do is call the endpoint and make sure we don't
// get an error.
return true, nil
}); err != nil {
t.Fatal(err)
}
})
}

View File

@ -3,7 +3,7 @@ package api
import (
"testing"
"github.com/hashicorp/consul/testutil"
"github.com/hashicorp/consul/testutil/retry"
)
func TestEvent_FireList(t *testing.T) {
@ -29,15 +29,16 @@ func TestEvent_FireList(t *testing.T) {
var events []*UserEvent
var qm *QueryMeta
if err := testutil.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
events, qm, err = event.List("", nil)
if err != nil {
t.Fatalf("err: %v", err)
r.Fatalf("err: %v", err)
}
return len(events) > 0, err
}); err != nil {
t.Fatal(err)
}
if len(events) <= 0 {
r.Fatal(err)
}
})
if events[len(events)-1].ID != id {
t.Fatalf("bad: %#v", events)

View File

@ -5,6 +5,7 @@ import (
"testing"
"github.com/hashicorp/consul/testutil"
"github.com/hashicorp/consul/testutil/retry"
"github.com/pascaldekloe/goe/verify"
)
@ -21,22 +22,18 @@ func TestHealth_Node(t *testing.T) {
t.Fatalf("err: %v", err)
}
name := info["Config"]["NodeName"].(string)
if err := testutil.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
checks, meta, err := health.Node(name, nil)
if err != nil {
return false, err
r.Fatal(err)
}
if meta.LastIndex == 0 {
return false, fmt.Errorf("bad: %v", meta)
r.Fatalf("bad: %v", meta)
}
if len(checks) == 0 {
return false, fmt.Errorf("bad: %v", checks)
r.Fatalf("bad: %v", checks)
}
return true, nil
}); err != nil {
t.Fatal(err)
}
})
}
func TestHealthChecks_AggregatedStatus(t *testing.T) {
@ -195,7 +192,7 @@ func TestHealth_Checks(t *testing.T) {
}
defer agent.ServiceDeregister("foo")
if err := testutil.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
checks := HealthChecks{
&HealthCheck{
Node: "node123",
@ -210,18 +207,15 @@ func TestHealth_Checks(t *testing.T) {
out, meta, err := health.Checks("foo", nil)
if err != nil {
return false, err
r.Fatal(err)
}
if meta.LastIndex == 0 {
return false, fmt.Errorf("bad: %v", meta)
r.Fatalf("bad: %v", meta)
}
if got, want := out, checks; !verify.Values(t, "checks", got, want) {
return false, fmt.Errorf("health.Checks failed")
r.Fatal("health.Checks failed")
}
return true, nil
}); err != nil {
t.Fatal(err)
}
})
}
func TestHealth_Checks_NodeMetaFilter(t *testing.T) {
@ -247,21 +241,18 @@ func TestHealth_Checks_NodeMetaFilter(t *testing.T) {
}
defer agent.ServiceDeregister("foo")
if err := testutil.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
checks, meta, err := health.Checks("foo", &QueryOptions{NodeMeta: meta})
if err != nil {
return false, err
r.Fatal(err)
}
if meta.LastIndex == 0 {
return false, fmt.Errorf("bad: %v", meta)
r.Fatalf("bad: %v", meta)
}
if len(checks) == 0 {
return false, fmt.Errorf("Bad: %v", checks)
r.Fatalf("Bad: %v", checks)
}
return true, nil
}); err != nil {
t.Fatal(err)
}
})
}
func TestHealth_Service(t *testing.T) {
@ -269,29 +260,25 @@ func TestHealth_Service(t *testing.T) {
defer s.Stop()
health := c.Health()
if err := testutil.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
// consul service should always exist...
checks, meta, err := health.Service("consul", "", true, nil)
if err != nil {
return false, err
r.Fatal(err)
}
if meta.LastIndex == 0 {
return false, fmt.Errorf("bad: %v", meta)
r.Fatalf("bad: %v", meta)
}
if len(checks) == 0 {
return false, fmt.Errorf("Bad: %v", checks)
r.Fatalf("Bad: %v", checks)
}
if _, ok := checks[0].Node.TaggedAddresses["wan"]; !ok {
return false, fmt.Errorf("Bad: %v", checks[0].Node)
r.Fatalf("Bad: %v", checks[0].Node)
}
if checks[0].Node.Datacenter != "dc1" {
return false, fmt.Errorf("Bad datacenter: %v", checks[0].Node)
r.Fatalf("Bad datacenter: %v", checks[0].Node)
}
return true, nil
}); err != nil {
t.Fatal(err)
}
})
}
func TestHealth_Service_NodeMetaFilter(t *testing.T) {
@ -302,29 +289,25 @@ func TestHealth_Service_NodeMetaFilter(t *testing.T) {
defer s.Stop()
health := c.Health()
if err := testutil.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
// consul service should always exist...
checks, meta, err := health.Service("consul", "", true, &QueryOptions{NodeMeta: meta})
if err != nil {
return false, err
r.Fatal(err)
}
if meta.LastIndex == 0 {
return false, fmt.Errorf("bad: %v", meta)
r.Fatalf("bad: %v", meta)
}
if len(checks) == 0 {
return false, fmt.Errorf("Bad: %v", checks)
r.Fatalf("Bad: %v", checks)
}
if _, ok := checks[0].Node.TaggedAddresses["wan"]; !ok {
return false, fmt.Errorf("Bad: %v", checks[0].Node)
r.Fatalf("Bad: %v", checks[0].Node)
}
if checks[0].Node.Datacenter != "dc1" {
return false, fmt.Errorf("Bad datacenter: %v", checks[0].Node)
r.Fatalf("Bad datacenter: %v", checks[0].Node)
}
return true, nil
}); err != nil {
t.Fatal(err)
}
})
}
func TestHealth_State(t *testing.T) {
@ -333,22 +316,18 @@ func TestHealth_State(t *testing.T) {
defer s.Stop()
health := c.Health()
if err := testutil.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
checks, meta, err := health.State("any", nil)
if err != nil {
return false, err
r.Fatal(err)
}
if meta.LastIndex == 0 {
return false, fmt.Errorf("bad: %v", meta)
r.Fatalf("bad: %v", meta)
}
if len(checks) == 0 {
return false, fmt.Errorf("Bad: %v", checks)
r.Fatalf("Bad: %v", checks)
}
return true, nil
}); err != nil {
t.Fatal(err)
}
})
}
func TestHealth_State_NodeMetaFilter(t *testing.T) {
@ -360,20 +339,16 @@ func TestHealth_State_NodeMetaFilter(t *testing.T) {
defer s.Stop()
health := c.Health()
if err := testutil.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
checks, meta, err := health.State("any", &QueryOptions{NodeMeta: meta})
if err != nil {
return false, err
r.Fatal(err)
}
if meta.LastIndex == 0 {
return false, fmt.Errorf("bad: %v", meta)
r.Fatalf("bad: %v", meta)
}
if len(checks) == 0 {
return false, fmt.Errorf("Bad: %v", checks)
r.Fatalf("Bad: %v", checks)
}
return true, nil
}); err != nil {
t.Fatal(err)
}
})
}

View File

@ -1,10 +1,10 @@
package api
import (
"fmt"
"testing"
"github.com/hashicorp/consul/testutil"
"github.com/hashicorp/consul/testutil/retry"
)
func TestOperator_AutopilotGetSetConfiguration(t *testing.T) {
@ -89,19 +89,16 @@ func TestOperator_AutopilotServerHealth(t *testing.T) {
defer s.Stop()
operator := c.Operator()
if err := testutil.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
out, err := operator.AutopilotServerHealth(nil)
if err != nil {
return false, fmt.Errorf("err: %v", err)
r.Fatalf("err: %v", err)
}
if len(out.Servers) != 1 ||
!out.Servers[0].Healthy ||
out.Servers[0].Name != s.Config.NodeName {
return false, fmt.Errorf("bad: %v", out)
r.Fatalf("bad: %v", out)
}
return true, nil
}); err != nil {
t.Fatal(err)
}
})
}

View File

@ -4,7 +4,7 @@ import (
"reflect"
"testing"
"github.com/hashicorp/consul/testutil"
"github.com/hashicorp/consul/testutil/retry"
)
func TestPreparedQuery(t *testing.T) {
@ -30,19 +30,14 @@ func TestPreparedQuery(t *testing.T) {
}
catalog := c.Catalog()
if err := testutil.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
if _, err := catalog.Register(reg, nil); err != nil {
return false, err
r.Fatal(err)
}
if _, _, err := catalog.Node("foobar", nil); err != nil {
return false, err
r.Fatal(err)
}
return true, nil
}); err != nil {
t.Fatal(err)
}
})
// Create a simple prepared query.
def := &PreparedQueryDefinition{

View File

@ -3,7 +3,6 @@ package agent
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
@ -19,7 +18,7 @@ import (
"github.com/hashicorp/consul/command/base"
"github.com/hashicorp/consul/consul/structs"
"github.com/hashicorp/consul/logger"
"github.com/hashicorp/consul/testutil"
"github.com/hashicorp/consul/testutil/retry"
"github.com/hashicorp/consul/types"
"github.com/hashicorp/serf/serf"
"github.com/mitchellh/cli"
@ -348,11 +347,11 @@ func TestAgent_Reload(t *testing.T) {
close(doneCh)
}()
if err := testutil.WaitForResult(func() (bool, error) {
return len(cmd.httpServers) == 1, nil
}); err != nil {
t.Fatalf("should have an http server")
}
retry.Run("", t, func(r *retry.R) {
if got, want := len(cmd.httpServers), 1; got != want {
r.Fatalf("got %d servers want %d", got, want)
}
})
if _, ok := cmd.agent.state.services["redis"]; !ok {
t.Fatalf("missing redis service")
@ -537,11 +536,11 @@ func TestAgent_Join(t *testing.T) {
t.Fatalf("should have 2 members")
}
if err := testutil.WaitForResult(func() (bool, error) {
return len(a2.LANMembers()) == 2, nil
}); err != nil {
t.Fatal("should have 2 members")
}
retry.Run("", t, func(r *retry.R) {
if got, want := len(a2.LANMembers()), 2; got != want {
r.Fatalf("got %d LAN members want %d", got, want)
}
})
}
func TestAgent_Join_WAN(t *testing.T) {
@ -572,11 +571,11 @@ func TestAgent_Join_WAN(t *testing.T) {
t.Fatalf("should have 2 members")
}
if err := testutil.WaitForResult(func() (bool, error) {
return len(a2.WANMembers()) == 2, nil
}); err != nil {
t.Fatal("should have 2 members")
}
retry.Run("", t, func(r *retry.R) {
if got, want := len(a2.WANMembers()), 2; got != want {
r.Fatalf("got %d WAN members want %d", got, want)
}
})
}
func TestAgent_Join_ACLDeny(t *testing.T) {
@ -664,14 +663,12 @@ func TestAgent_Leave(t *testing.T) {
if obj != nil {
t.Fatalf("Err: %v", obj)
}
if err := testutil.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
m := srv.agent.LANMembers()
success := m[1].Status == serf.StatusLeft
return success, errors.New(m[1].Status.String())
}); err != nil {
t.Fatalf("member status is %v, should be left", err)
}
if got, want := m[1].Status, serf.StatusLeft; got != want {
r.Fatalf("got status %q want %q", got, want)
}
})
}
func TestAgent_Leave_ACLDeny(t *testing.T) {
@ -763,14 +760,13 @@ func TestAgent_ForceLeave(t *testing.T) {
if obj != nil {
t.Fatalf("Err: %v", obj)
}
if err := testutil.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
m := srv.agent.LANMembers()
success := m[1].Status == serf.StatusLeft
return success, errors.New(m[1].Status.String())
}); err != nil {
t.Fatalf("member status is %v, should be left", err)
}
if got, want := m[1].Status, serf.StatusLeft; got != want {
r.Fatalf("got status %q want %q", got, want)
}
})
}
func TestAgent_ForceLeave_ACLDeny(t *testing.T) {
@ -1932,8 +1928,7 @@ func TestAgent_Monitor(t *testing.T) {
}
// Try to stream logs until we see the expected log line
expected := []byte("raft: Initial configuration (index=1)")
if err := testutil.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
req, _ = http.NewRequest("GET", "/v1/agent/monitor?loglevel=debug", nil)
resp = newClosableRecorder()
done := make(chan struct{})
@ -1947,13 +1942,12 @@ func TestAgent_Monitor(t *testing.T) {
resp.Close()
<-done
if bytes.Contains(resp.Body.Bytes(), expected) {
return true, nil
got := resp.Body.Bytes()
want := []byte("raft: Initial configuration (index=1)")
if !bytes.Contains(got, want) {
r.Fatalf("got %q and did not find %q", got, want)
}
return false, fmt.Errorf("didn't see expected")
}); err != nil {
t.Fatalf("err: %v", err)
}
})
}
type closableRecorder struct {

View File

@ -10,6 +10,7 @@ import (
"github.com/hashicorp/consul/consul/structs"
"github.com/hashicorp/consul/testrpc"
"github.com/hashicorp/consul/testutil/retry"
"github.com/hashicorp/serf/coordinate"
)
@ -89,20 +90,17 @@ func TestCatalogDatacenters(t *testing.T) {
defer srv.Shutdown()
defer srv.agent.Shutdown()
if err := testrpc.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
obj, err := srv.CatalogDatacenters(nil, nil)
if err != nil {
return false, err
r.Fatal(err)
}
dcs := obj.([]string)
if len(dcs) != 1 {
return false, fmt.Errorf("missing dc: %v", dcs)
if got, want := len(dcs), 1; got != want {
r.Fatalf("got %d data centers want %d", got, want)
}
return true, nil
}); err != nil {
t.Fatal(err)
}
})
}
func TestCatalogNodes(t *testing.T) {
@ -214,16 +212,15 @@ func TestCatalogNodes_WanTranslation(t *testing.T) {
testrpc.WaitForLeader(t, srv2.agent.RPC, "dc2")
// Wait for the WAN join.
addr := fmt.Sprintf("127.0.0.1:%d",
srv1.agent.config.Ports.SerfWan)
addr := fmt.Sprintf("127.0.0.1:%d", srv1.agent.config.Ports.SerfWan)
if _, err := srv2.agent.JoinWAN([]string{addr}); err != nil {
t.Fatalf("err: %v", err)
}
if err := testrpc.WaitForResult(func() (bool, error) {
return len(srv1.agent.WANMembers()) > 1, nil
}); err != nil {
t.Fatalf("Failed waiting for WAN join: %v", err)
}
retry.Run("", t, func(r *retry.R) {
if got, want := len(srv1.agent.WANMembers()), 2; got < want {
r.Fatalf("got %d WAN members want at least %d", got, want)
}
})
// Register a node with DC2.
{
@ -699,11 +696,11 @@ func TestCatalogServiceNodes_WanTranslation(t *testing.T) {
if _, err := srv2.agent.JoinWAN([]string{addr}); err != nil {
t.Fatalf("err: %v", err)
}
if err := testrpc.WaitForResult(func() (bool, error) {
return len(srv1.agent.WANMembers()) > 1, nil
}); err != nil {
t.Fatalf("Failed waiting for WAN join: %v", err)
}
retry.Run("", t, func(r *retry.R) {
if got, want := len(srv1.agent.WANMembers()), 2; got < want {
r.Fatalf("got %d WAN members want at least %d", got, want)
}
})
// Register a node with DC2.
{
@ -938,11 +935,11 @@ func TestCatalogNodeServices_WanTranslation(t *testing.T) {
if _, err := srv2.agent.JoinWAN([]string{addr}); err != nil {
t.Fatalf("err: %v", err)
}
if err := testrpc.WaitForResult(func() (bool, error) {
return len(srv1.agent.WANMembers()) > 1, nil
}); err != nil {
t.Fatalf("Failed waiting for WAN join: %v", err)
}
retry.Run("", t, func(r *retry.R) {
if got, want := len(srv1.agent.WANMembers()), 2; got < want {
r.Fatalf("got %d WAN members want at least %d", got, want)
}
})
// Register a node with DC2.
{

View File

@ -17,7 +17,7 @@ import (
docker "github.com/fsouza/go-dockerclient"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/testutil"
"github.com/hashicorp/consul/testutil/retry"
"github.com/hashicorp/consul/types"
)
@ -78,21 +78,14 @@ func expectStatus(t *testing.T, script, status string) {
}
check.Start()
defer check.Stop()
if err := testutil.WaitForResult(func() (bool, error) {
// Should have at least 2 updates
if mock.Updates("foo") < 2 {
return false, fmt.Errorf("should have 2 updates %v", mock.updates)
retry.Run("", t, func(r *retry.R) {
if got, want := mock.Updates("foo"), 2; got < want {
r.Fatalf("got %d updates want at least %d", got, want)
}
if mock.State("foo") != status {
return false, fmt.Errorf("should be %v %v", status, mock.state)
if got, want := mock.State("foo"), status; got != want {
r.Fatalf("got state %q want %q", got, want)
}
return true, nil
}); err != nil {
t.Fatal(err)
}
})
}
func TestCheckMonitor_Passing(t *testing.T) {
@ -281,25 +274,18 @@ func expectHTTPStatus(t *testing.T, url string, status string) {
}
check.Start()
defer check.Stop()
if err := testutil.WaitForResult(func() (bool, error) {
// Should have at least 2 updates
if mock.Updates("foo") < 2 {
return false, fmt.Errorf("should have 2 updates %v", mock.updates)
retry.Run("", t, func(r *retry.R) {
if got, want := mock.Updates("foo"), 2; got < want {
r.Fatalf("got %d updates want at least %d", got, want)
}
if mock.State("foo") != status {
return false, fmt.Errorf("should be %v %v", status, mock.state)
if got, want := mock.State("foo"), status; got != want {
r.Fatalf("got state %q want %q", got, want)
}
// Allow slightly more data than CheckBufSize, for the header
if n := len(mock.Output("foo")); n > (CheckBufSize + 256) {
return false, fmt.Errorf("output too long: %d (%d-byte limit)", n, CheckBufSize)
r.Fatalf("output too long: %d (%d-byte limit)", n, CheckBufSize)
}
return true, nil
}); err != nil {
t.Fatal(err)
}
})
}
func TestCheckHTTPCritical(t *testing.T) {
@ -387,20 +373,14 @@ func TestCheckHTTPTimeout(t *testing.T) {
check.Start()
defer check.Stop()
if err := testutil.WaitForResult(func() (bool, error) {
// Should have at least 2 updates
if mock.updates["bar"] < 2 {
return false, fmt.Errorf("should have at least 2 updates %v", mock.updates)
retry.Run("", t, func(r *retry.R) {
if got, want := mock.Updates("bar"), 2; got < want {
r.Fatalf("got %d updates want at least %d", got, want)
}
if mock.state["bar"] != api.HealthCritical {
return false, fmt.Errorf("should be critical %v", mock.state)
if got, want := mock.State("bar"), api.HealthCritical; got != want {
r.Fatalf("got state %q want %q", got, want)
}
return true, nil
}); err != nil {
t.Fatal(err)
}
})
}
func TestCheckHTTP_disablesKeepAlives(t *testing.T) {
@ -460,15 +440,11 @@ func TestCheckHTTP_TLSSkipVerify_true_pass(t *testing.T) {
if !check.httpClient.Transport.(*http.Transport).TLSClientConfig.InsecureSkipVerify {
t.Fatalf("should be true")
}
if err := testutil.WaitForResult(func() (bool, error) {
if mock.state["skipverify_true"] != api.HealthPassing {
return false, fmt.Errorf("should be passing %v", mock.state)
retry.Run("", t, func(r *retry.R) {
if got, want := mock.state["skipverify_true"], api.HealthPassing; got != want {
r.Fatalf("got state %q want %q", got, want)
}
return true, nil
}); err != nil {
t.Fatal(err)
}
})
}
func TestCheckHTTP_TLSSkipVerify_true_fail(t *testing.T) {
@ -495,15 +471,11 @@ func TestCheckHTTP_TLSSkipVerify_true_fail(t *testing.T) {
if !check.httpClient.Transport.(*http.Transport).TLSClientConfig.InsecureSkipVerify {
t.Fatalf("should be true")
}
if err := testutil.WaitForResult(func() (bool, error) {
if mock.state["skipverify_true"] != api.HealthCritical {
return false, fmt.Errorf("should be critical %v", mock.state)
retry.Run("", t, func(r *retry.R) {
if got, want := mock.state["skipverify_true"], api.HealthCritical; got != want {
r.Fatalf("got state %q want %q", got, want)
}
return true, nil
}); err != nil {
t.Fatal(err)
}
})
}
func TestCheckHTTP_TLSSkipVerify_false(t *testing.T) {
@ -531,20 +503,15 @@ func TestCheckHTTP_TLSSkipVerify_false(t *testing.T) {
if check.httpClient.Transport.(*http.Transport).TLSClientConfig.InsecureSkipVerify {
t.Fatalf("should be false")
}
if err := testutil.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
// This should fail due to an invalid SSL cert
if mock.state["skipverify_false"] != api.HealthCritical {
return false, fmt.Errorf("should be critical %v", mock.state)
if got, want := mock.state["skipverify_false"], api.HealthCritical; got != want {
r.Fatalf("got state %q want %q", got, want)
}
if !strings.Contains(mock.output["skipverify_false"], "certificate signed by unknown authority") {
return false, fmt.Errorf("should fail with certificate error %v", mock.output)
r.Fatalf("should fail with certificate error %v", mock.output)
}
return true, nil
}); err != nil {
t.Fatal(err)
}
})
}
func mockTCPServer(network string) net.Listener {
@ -581,20 +548,14 @@ func expectTCPStatus(t *testing.T, tcp string, status string) {
}
check.Start()
defer check.Stop()
if err := testutil.WaitForResult(func() (bool, error) {
// Should have at least 2 updates
if mock.Updates("foo") < 2 {
return false, fmt.Errorf("should have 2 updates %v", mock.updates)
retry.Run("", t, func(r *retry.R) {
if got, want := mock.Updates("foo"), 2; got < want {
r.Fatalf("got %d updates want at least %d", got, want)
}
if mock.State("foo") != status {
return false, fmt.Errorf("should be %v %v", status, mock.state)
if got, want := mock.State("foo"), status; got != want {
r.Fatalf("got state %q want %q", got, want)
}
return true, nil
}); err != nil {
t.Fatal(err)
}
})
}
func TestCheckTCPCritical(t *testing.T) {

View File

@ -11,7 +11,7 @@ import (
"testing"
"github.com/hashicorp/consul/command/base"
"github.com/hashicorp/consul/testutil"
"github.com/hashicorp/consul/testutil/retry"
"github.com/hashicorp/consul/version"
"github.com/mitchellh/cli"
)
@ -103,20 +103,14 @@ func TestRetryJoin(t *testing.T) {
}
close(doneCh)
}()
if err := testutil.WaitForResult(func() (bool, error) {
mem := agent.LANMembers()
if len(mem) != 2 {
return false, fmt.Errorf("bad: %#v", mem)
retry.Run("", t, func(r *retry.R) {
if got, want := len(agent.LANMembers()), 2; got != want {
r.Fatalf("got %d LAN members want %d", got, want)
}
mem = agent.WANMembers()
if len(mem) != 2 {
return false, fmt.Errorf("bad (wan): %#v", mem)
if got, want := len(agent.WANMembers()), 2; got != want {
r.Fatalf("got %d WAN members want %d", got, want)
}
return true, nil
}); err != nil {
t.Fatal(err)
}
})
}
func TestReadCliConfig(t *testing.T) {

View File

@ -14,6 +14,7 @@ import (
"github.com/hashicorp/consul/consul/structs"
"github.com/hashicorp/consul/lib"
"github.com/hashicorp/consul/testrpc"
"github.com/hashicorp/consul/testutil/retry"
"github.com/miekg/dns"
)
@ -1299,12 +1300,11 @@ func TestDNS_ServiceLookup_WanAddress(t *testing.T) {
if _, err := srv2.agent.JoinWAN([]string{addr}); err != nil {
t.Fatalf("err: %v", err)
}
if err := testrpc.WaitForResult(func() (bool, error) {
return len(srv1.agent.WANMembers()) > 1, nil
}); err != nil {
t.Fatalf("Failed waiting for WAN join: %v", err)
}
retry.Run("", t, func(r *retry.R) {
if got, want := len(srv1.agent.WANMembers()), 2; got < want {
r.Fatalf("got %d WAN members want at least %d", got, want)
}
})
// Register a remote node with a service.
{
@ -3375,11 +3375,11 @@ func TestDNS_PreparedQuery_Failover(t *testing.T) {
if _, err := srv2.agent.JoinWAN([]string{addr}); err != nil {
t.Fatalf("err: %v", err)
}
if err := testrpc.WaitForResult(func() (bool, error) {
return len(srv1.agent.WANMembers()) > 1, nil
}); err != nil {
t.Fatalf("Failed waiting for WAN join: %v", err)
}
retry.Run("", t, func(r *retry.R) {
if got, want := len(srv1.agent.WANMembers()), 2; got < want {
r.Fatalf("got %d WAN members want at least %d", got, want)
}
})
// Register a remote node with a service.
{

View File

@ -11,7 +11,7 @@ import (
"time"
"github.com/hashicorp/consul/consul/structs"
"github.com/hashicorp/consul/testutil"
"github.com/hashicorp/consul/testutil/retry"
)
func TestEventFire(t *testing.T) {
@ -123,32 +123,29 @@ func TestEventList(t *testing.T) {
t.Fatalf("err: %v", err)
}
if err := testutil.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
req, err := http.NewRequest("GET", "/v1/event/list", nil)
if err != nil {
return false, err
r.Fatal(err)
}
resp := httptest.NewRecorder()
obj, err := srv.EventList(resp, req)
if err != nil {
return false, err
r.Fatal(err)
}
list, ok := obj.([]*UserEvent)
if !ok {
return false, fmt.Errorf("bad: %#v", obj)
r.Fatalf("bad: %#v", obj)
}
if len(list) != 1 || list[0].Name != "test" {
return false, fmt.Errorf("bad: %#v", list)
r.Fatalf("bad: %#v", list)
}
header := resp.Header().Get("X-Consul-Index")
if header == "" || header == "0" {
return false, fmt.Errorf("bad: %#v", header)
r.Fatalf("bad: %#v", header)
}
return true, nil
}); err != nil {
t.Fatal(err)
}
})
})
}
@ -164,32 +161,29 @@ func TestEventList_Filter(t *testing.T) {
t.Fatalf("err: %v", err)
}
if err := testutil.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
req, err := http.NewRequest("GET", "/v1/event/list?name=foo", nil)
if err != nil {
return false, err
r.Fatal(err)
}
resp := httptest.NewRecorder()
obj, err := srv.EventList(resp, req)
if err != nil {
return false, err
r.Fatal(err)
}
list, ok := obj.([]*UserEvent)
if !ok {
return false, fmt.Errorf("bad: %#v", obj)
r.Fatalf("bad: %#v", obj)
}
if len(list) != 1 || list[0].Name != "foo" {
return false, fmt.Errorf("bad: %#v", list)
r.Fatalf("bad: %#v", list)
}
header := resp.Header().Get("X-Consul-Index")
if header == "" || header == "0" {
return false, fmt.Errorf("bad: %#v", header)
r.Fatalf("bad: %#v", header)
}
return true, nil
}); err != nil {
t.Fatal(err)
}
})
})
}
@ -207,54 +201,48 @@ func TestEventList_ACLFilter(t *testing.T) {
// Try no token.
{
if err := testutil.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
req, err := http.NewRequest("GET", "/v1/event/list", nil)
if err != nil {
return false, err
r.Fatal(err)
}
resp := httptest.NewRecorder()
obj, err := srv.EventList(resp, req)
if err != nil {
return false, err
r.Fatal(err)
}
list, ok := obj.([]*UserEvent)
if !ok {
return false, fmt.Errorf("bad: %#v", obj)
r.Fatalf("bad: %#v", obj)
}
if len(list) != 0 {
return false, fmt.Errorf("bad: %#v", list)
r.Fatalf("bad: %#v", list)
}
return true, nil
}); err != nil {
t.Fatal(err)
}
})
}
// Try the root token.
{
if err := testutil.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
req, err := http.NewRequest("GET", "/v1/event/list?token=root", nil)
if err != nil {
return false, err
r.Fatal(err)
}
resp := httptest.NewRecorder()
obj, err := srv.EventList(resp, req)
if err != nil {
return false, err
r.Fatal(err)
}
list, ok := obj.([]*UserEvent)
if !ok {
return false, fmt.Errorf("bad: %#v", obj)
r.Fatalf("bad: %#v", obj)
}
if len(list) != 1 || list[0].Name != "foo" {
return false, fmt.Errorf("bad: %#v", list)
r.Fatalf("bad: %#v", list)
}
return true, nil
}); err != nil {
t.Fatal(err)
}
})
}
}
@ -266,25 +254,22 @@ func TestEventList_Blocking(t *testing.T) {
}
var index string
if err := testutil.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
req, err := http.NewRequest("GET", "/v1/event/list", nil)
if err != nil {
return false, err
r.Fatal(err)
}
resp := httptest.NewRecorder()
_, err = srv.EventList(resp, req)
if err != nil {
return false, err
r.Fatal(err)
}
header := resp.Header().Get("X-Consul-Index")
if header == "" || header == "0" {
return false, fmt.Errorf("bad: %#v", header)
r.Fatalf("bad: %#v", header)
}
index = header
return true, nil
}); err != nil {
t.Fatal(err)
}
})
go func() {
time.Sleep(50 * time.Millisecond)
@ -294,29 +279,26 @@ func TestEventList_Blocking(t *testing.T) {
}
}()
if err := testutil.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
url := "/v1/event/list?index=" + index
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return false, err
r.Fatal(err)
}
resp := httptest.NewRecorder()
obj, err := srv.EventList(resp, req)
if err != nil {
return false, err
r.Fatal(err)
}
list, ok := obj.([]*UserEvent)
if !ok {
return false, fmt.Errorf("bad: %#v", obj)
r.Fatalf("bad: %#v", obj)
}
if len(list) != 2 || list[1].Name != "second" {
return false, fmt.Errorf("bad: %#v", list)
r.Fatalf("bad: %#v", list)
}
return true, nil
}); err != nil {
t.Fatal(err)
}
})
})
}
@ -336,31 +318,31 @@ func TestEventList_EventBufOrder(t *testing.T) {
t.Fatalf("err: %v", err)
}
}
retry.
// Test that the event order is preserved when name
// filtering on a list of > 1 matching event.
Run("", t, func(r *retry.R) {
url := "/v1/event/list?name=foo"
req, err := http.NewRequest("GET", url, nil)
if err != nil {
r.Fatal(err)
}
resp := httptest.NewRecorder()
obj, err := srv.EventList(resp, req)
if err != nil {
r.Fatal(err)
}
list, ok := obj.([]*UserEvent)
if !ok {
r.Fatalf("bad: %#v", obj)
}
if len(list) != 3 || list[2].ID != expected.ID {
r.Fatalf("bad: %#v", list)
}
})
// Test that the event order is preserved when name
// filtering on a list of > 1 matching event.
if err := testutil.WaitForResult(func() (bool, error) {
url := "/v1/event/list?name=foo"
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return false, err
}
resp := httptest.NewRecorder()
obj, err := srv.EventList(resp, req)
if err != nil {
return false, err
}
list, ok := obj.([]*UserEvent)
if !ok {
return false, fmt.Errorf("bad: %#v", obj)
}
if len(list) != 3 || list[2].ID != expected.ID {
return false, fmt.Errorf("bad: %#v", list)
}
return true, nil
}); err != nil {
t.Fatal(err)
}
})
}

View File

@ -11,6 +11,7 @@ import (
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/consul/structs"
"github.com/hashicorp/consul/testrpc"
"github.com/hashicorp/consul/testutil/retry"
"github.com/hashicorp/serf/coordinate"
)
@ -21,25 +22,22 @@ func TestHealthChecksInState(t *testing.T) {
t.Fatalf("err: %v", err)
}
if err := testrpc.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
resp := httptest.NewRecorder()
obj, err := srv.HealthChecksInState(resp, req)
if err != nil {
return false, err
r.Fatal(err)
}
if err := checkIndex(resp); err != nil {
return false, err
r.Fatal(err)
}
// Should be a non-nil empty list
nodes := obj.(structs.HealthChecks)
if nodes == nil || len(nodes) != 0 {
return false, fmt.Errorf("bad: %v", obj)
r.Fatalf("bad: %v", obj)
}
return true, nil
}); err != nil {
t.Fatal(err)
}
})
})
httpTest(t, func(srv *HTTPServer) {
@ -48,25 +46,22 @@ func TestHealthChecksInState(t *testing.T) {
t.Fatalf("err: %v", err)
}
if err := testrpc.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
resp := httptest.NewRecorder()
obj, err := srv.HealthChecksInState(resp, req)
if err != nil {
return false, err
r.Fatal(err)
}
if err := checkIndex(resp); err != nil {
return false, err
r.Fatal(err)
}
// Should be 1 health check for the server
nodes := obj.(structs.HealthChecks)
if len(nodes) != 1 {
return false, fmt.Errorf("bad: %v", obj)
r.Fatalf("bad: %v", obj)
}
return true, nil
}); err != nil {
t.Fatal(err)
}
})
})
}
@ -93,25 +88,22 @@ func TestHealthChecksInState_NodeMetaFilter(t *testing.T) {
t.Fatalf("err: %v", err)
}
if err := testrpc.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
resp := httptest.NewRecorder()
obj, err := srv.HealthChecksInState(resp, req)
if err != nil {
return false, err
r.Fatal(err)
}
if err := checkIndex(resp); err != nil {
return false, err
r.Fatal(err)
}
// Should be 1 health check for the server
nodes := obj.(structs.HealthChecks)
if len(nodes) != 1 {
return false, fmt.Errorf("bad: %v", obj)
r.Fatalf("bad: %v", obj)
}
return true, nil
}); err != nil {
t.Fatal(err)
}
})
})
}
@ -175,29 +167,29 @@ func TestHealthChecksInState_DistanceSort(t *testing.T) {
if err := srv.agent.RPC("Coordinate.Update", &arg, &out); err != nil {
t.Fatalf("err: %v", err)
}
retry.
// Retry until foo moves to the front of the line.
Run("", t, func(r *retry.R) {
resp = httptest.NewRecorder()
obj, err = srv.HealthChecksInState(resp, req)
if err != nil {
r.Fatalf("err: %v", err)
}
assertIndex(t, resp)
nodes = obj.(structs.HealthChecks)
if len(nodes) != 2 {
r.Fatalf("bad: %v", nodes)
}
if nodes[0].Node != "foo" {
r.Fatalf("bad: %v", nodes)
}
if nodes[1].Node != "bar" {
r.Fatalf("bad: %v", nodes)
}
})
// Retry until foo moves to the front of the line.
if err := testrpc.WaitForResult(func() (bool, error) {
resp = httptest.NewRecorder()
obj, err = srv.HealthChecksInState(resp, req)
if err != nil {
return false, fmt.Errorf("err: %v", err)
}
assertIndex(t, resp)
nodes = obj.(structs.HealthChecks)
if len(nodes) != 2 {
return false, fmt.Errorf("bad: %v", nodes)
}
if nodes[0].Node != "foo" {
return false, fmt.Errorf("bad: %v", nodes)
}
if nodes[1].Node != "bar" {
return false, fmt.Errorf("bad: %v", nodes)
}
return true, nil
}); err != nil {
t.Fatal(err)
}
}
func TestHealthNodeChecks(t *testing.T) {
@ -436,29 +428,29 @@ func TestHealthServiceChecks_DistanceSort(t *testing.T) {
if err := srv.agent.RPC("Coordinate.Update", &arg, &out); err != nil {
t.Fatalf("err: %v", err)
}
retry.
// Retry until foo has moved to the front of the line.
Run("", t, func(r *retry.R) {
resp = httptest.NewRecorder()
obj, err = srv.HealthServiceChecks(resp, req)
if err != nil {
r.Fatalf("err: %v", err)
}
assertIndex(t, resp)
nodes = obj.(structs.HealthChecks)
if len(nodes) != 2 {
r.Fatalf("bad: %v", obj)
}
if nodes[0].Node != "foo" {
r.Fatalf("bad: %v", nodes)
}
if nodes[1].Node != "bar" {
r.Fatalf("bad: %v", nodes)
}
})
// Retry until foo has moved to the front of the line.
if err := testrpc.WaitForResult(func() (bool, error) {
resp = httptest.NewRecorder()
obj, err = srv.HealthServiceChecks(resp, req)
if err != nil {
return false, fmt.Errorf("err: %v", err)
}
assertIndex(t, resp)
nodes = obj.(structs.HealthChecks)
if len(nodes) != 2 {
return false, fmt.Errorf("bad: %v", obj)
}
if nodes[0].Node != "foo" {
return false, fmt.Errorf("bad: %v", nodes)
}
if nodes[1].Node != "bar" {
return false, fmt.Errorf("bad: %v", nodes)
}
return true, nil
}); err != nil {
t.Fatal(err)
}
}
func TestHealthServiceNodes(t *testing.T) {
@ -670,29 +662,29 @@ func TestHealthServiceNodes_DistanceSort(t *testing.T) {
if err := srv.agent.RPC("Coordinate.Update", &arg, &out); err != nil {
t.Fatalf("err: %v", err)
}
retry.
// Retry until foo has moved to the front of the line.
Run("", t, func(r *retry.R) {
resp = httptest.NewRecorder()
obj, err = srv.HealthServiceNodes(resp, req)
if err != nil {
r.Fatalf("err: %v", err)
}
assertIndex(t, resp)
nodes = obj.(structs.CheckServiceNodes)
if len(nodes) != 2 {
r.Fatalf("bad: %v", obj)
}
if nodes[0].Node.Node != "foo" {
r.Fatalf("bad: %v", nodes)
}
if nodes[1].Node.Node != "bar" {
r.Fatalf("bad: %v", nodes)
}
})
// Retry until foo has moved to the front of the line.
if err := testrpc.WaitForResult(func() (bool, error) {
resp = httptest.NewRecorder()
obj, err = srv.HealthServiceNodes(resp, req)
if err != nil {
return false, fmt.Errorf("err: %v", err)
}
assertIndex(t, resp)
nodes = obj.(structs.CheckServiceNodes)
if len(nodes) != 2 {
return false, fmt.Errorf("bad: %v", obj)
}
if nodes[0].Node.Node != "foo" {
return false, fmt.Errorf("bad: %v", nodes)
}
if nodes[1].Node.Node != "bar" {
return false, fmt.Errorf("bad: %v", nodes)
}
return true, nil
}); err != nil {
t.Fatal(err)
}
}
func TestHealthServiceNodes_PassingFilter(t *testing.T) {
@ -763,16 +755,15 @@ func TestHealthServiceNodes_WanTranslation(t *testing.T) {
testrpc.WaitForLeader(t, srv2.agent.RPC, "dc2")
// Wait for the WAN join.
addr := fmt.Sprintf("127.0.0.1:%d",
srv1.agent.config.Ports.SerfWan)
addr := fmt.Sprintf("127.0.0.1:%d", srv1.agent.config.Ports.SerfWan)
if _, err := srv2.agent.JoinWAN([]string{addr}); err != nil {
t.Fatalf("err: %v", err)
}
if err := testrpc.WaitForResult(func() (bool, error) {
return len(srv1.agent.WANMembers()) > 1, nil
}); err != nil {
t.Fatal(err)
}
retry.Run("", t, func(r *retry.R) {
if got, want := len(srv1.agent.WANMembers()), 2; got < want {
r.Fatalf("got %d WAN members want at least %d", got, want)
}
})
// Register a node with DC2.
{

View File

@ -1,7 +1,6 @@
package agent
import (
"fmt"
"os"
"reflect"
"testing"
@ -10,6 +9,7 @@ import (
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/consul/structs"
"github.com/hashicorp/consul/testrpc"
"github.com/hashicorp/consul/testutil/retry"
"github.com/hashicorp/consul/types"
)
@ -117,9 +117,9 @@ func TestAgentAntiEntropy_Services(t *testing.T) {
Node: agent.config.NodeName,
}
verifyServices := func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
if err := agent.RPC("Catalog.NodeServices", &req, &services); err != nil {
return false, fmt.Errorf("err: %v", err)
r.Fatalf("err: %v", err)
}
// Make sure we sent along our node info when we synced.
@ -129,12 +129,12 @@ func TestAgentAntiEntropy_Services(t *testing.T) {
if id != conf.NodeID ||
!reflect.DeepEqual(addrs, conf.TaggedAddresses) ||
!reflect.DeepEqual(meta, conf.Meta) {
return false, fmt.Errorf("bad: %v", services.NodeServices.Node)
r.Fatalf("bad: %v", services.NodeServices.Node)
}
// We should have 6 services (consul included)
if len(services.NodeServices.Services) != 6 {
return false, fmt.Errorf("bad: %v", services.NodeServices.Services)
r.Fatalf("bad: %v", services.NodeServices.Services)
}
// All the services should match
@ -143,50 +143,44 @@ func TestAgentAntiEntropy_Services(t *testing.T) {
switch id {
case "mysql":
if !reflect.DeepEqual(serv, srv1) {
return false, fmt.Errorf("bad: %v %v", serv, srv1)
r.Fatalf("bad: %v %v", serv, srv1)
}
case "redis":
if !reflect.DeepEqual(serv, srv2) {
return false, fmt.Errorf("bad: %#v %#v", serv, srv2)
r.Fatalf("bad: %#v %#v", serv, srv2)
}
case "web":
if !reflect.DeepEqual(serv, srv3) {
return false, fmt.Errorf("bad: %v %v", serv, srv3)
r.Fatalf("bad: %v %v", serv, srv3)
}
case "api":
if !reflect.DeepEqual(serv, srv5) {
return false, fmt.Errorf("bad: %v %v", serv, srv5)
r.Fatalf("bad: %v %v", serv, srv5)
}
case "cache":
if !reflect.DeepEqual(serv, srv6) {
return false, fmt.Errorf("bad: %v %v", serv, srv6)
r.Fatalf("bad: %v %v", serv, srv6)
}
case "consul":
// ignore
default:
return false, fmt.Errorf("unexpected service: %v", id)
r.Fatalf("unexpected service: %v", id)
}
}
// Check the local state
if len(agent.state.services) != 6 {
return false, fmt.Errorf("bad: %v", agent.state.services)
r.Fatalf("bad: %v", agent.state.services)
}
if len(agent.state.serviceStatus) != 6 {
return false, fmt.Errorf("bad: %v", agent.state.serviceStatus)
r.Fatalf("bad: %v", agent.state.serviceStatus)
}
for name, status := range agent.state.serviceStatus {
if !status.inSync {
return false, fmt.Errorf("should be in sync: %v %v", name, status)
r.Fatalf("should be in sync: %v %v", name, status)
}
}
return true, nil
}
if err := testrpc.WaitForResult(verifyServices); err != nil {
t.Fatal(err)
}
})
// Remove one of the services
agent.state.RemoveService("api")
@ -194,14 +188,14 @@ func TestAgentAntiEntropy_Services(t *testing.T) {
// Trigger anti-entropy run and wait
agent.StartSync()
verifyServicesAfterRemove := func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
if err := agent.RPC("Catalog.NodeServices", &req, &services); err != nil {
return false, fmt.Errorf("err: %v", err)
r.Fatalf("err: %v", err)
}
// We should have 5 services (consul included)
if len(services.NodeServices.Services) != 5 {
return false, fmt.Errorf("bad: %v", services.NodeServices.Services)
r.Fatalf("bad: %v", services.NodeServices.Services)
}
// All the services should match
@ -210,46 +204,40 @@ func TestAgentAntiEntropy_Services(t *testing.T) {
switch id {
case "mysql":
if !reflect.DeepEqual(serv, srv1) {
return false, fmt.Errorf("bad: %v %v", serv, srv1)
r.Fatalf("bad: %v %v", serv, srv1)
}
case "redis":
if !reflect.DeepEqual(serv, srv2) {
return false, fmt.Errorf("bad: %#v %#v", serv, srv2)
r.Fatalf("bad: %#v %#v", serv, srv2)
}
case "web":
if !reflect.DeepEqual(serv, srv3) {
return false, fmt.Errorf("bad: %v %v", serv, srv3)
r.Fatalf("bad: %v %v", serv, srv3)
}
case "cache":
if !reflect.DeepEqual(serv, srv6) {
return false, fmt.Errorf("bad: %v %v", serv, srv6)
r.Fatalf("bad: %v %v", serv, srv6)
}
case "consul":
// ignore
default:
return false, fmt.Errorf("unexpected service: %v", id)
r.Fatalf("unexpected service: %v", id)
}
}
// Check the local state
if len(agent.state.services) != 5 {
return false, fmt.Errorf("bad: %v", agent.state.services)
r.Fatalf("bad: %v", agent.state.services)
}
if len(agent.state.serviceStatus) != 5 {
return false, fmt.Errorf("bad: %v", agent.state.serviceStatus)
r.Fatalf("bad: %v", agent.state.serviceStatus)
}
for name, status := range agent.state.serviceStatus {
if !status.inSync {
return false, fmt.Errorf("should be in sync: %v %v", name, status)
r.Fatalf("should be in sync: %v %v", name, status)
}
}
return true, nil
}
if err := testrpc.WaitForResult(verifyServicesAfterRemove); err != nil {
t.Fatal(err)
}
})
}
func TestAgentAntiEntropy_EnableTagOverride(t *testing.T) {
@ -312,9 +300,9 @@ func TestAgentAntiEntropy_EnableTagOverride(t *testing.T) {
}
var services structs.IndexedNodeServices
verifyServices := func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
if err := agent.RPC("Catalog.NodeServices", &req, &services); err != nil {
return false, fmt.Errorf("err: %v", err)
r.Fatalf("err: %v", err)
}
// All the services should match
@ -326,34 +314,28 @@ func TestAgentAntiEntropy_EnableTagOverride(t *testing.T) {
serv.Service != "svc1" ||
serv.Port != 6100 ||
!reflect.DeepEqual(serv.Tags, []string{"tag1_mod"}) {
return false, fmt.Errorf("bad: %v %v", serv, srv1)
r.Fatalf("bad: %v %v", serv, srv1)
}
case "svc_id2":
if serv.ID != "svc_id2" ||
serv.Service != "svc2" ||
serv.Port != 6200 ||
!reflect.DeepEqual(serv.Tags, []string{"tag2"}) {
return false, fmt.Errorf("bad: %v %v", serv, srv2)
r.Fatalf("bad: %v %v", serv, srv2)
}
case "consul":
// ignore
default:
return false, fmt.Errorf("unexpected service: %v", id)
r.Fatalf("unexpected service: %v", id)
}
}
for name, status := range agent.state.serviceStatus {
if !status.inSync {
return false, fmt.Errorf("should be in sync: %v %v", name, status)
r.Fatalf("should be in sync: %v %v", name, status)
}
}
return true, nil
}
if err := testrpc.WaitForResult(verifyServices); err != nil {
t.Fatal(err)
}
})
}
func TestAgentAntiEntropy_Services_WithChecks(t *testing.T) {
@ -738,14 +720,14 @@ func TestAgentAntiEntropy_Checks(t *testing.T) {
var checks structs.IndexedHealthChecks
// Verify that we are in sync
if err := testrpc.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
if err := agent.RPC("Health.NodeChecks", &req, &checks); err != nil {
return false, fmt.Errorf("err: %v", err)
r.Fatalf("err: %v", err)
}
// We should have 5 checks (serf included)
if len(checks.HealthChecks) != 5 {
return false, fmt.Errorf("bad: %v", checks)
r.Fatalf("bad: %v", checks)
}
// All the checks should match
@ -754,30 +736,27 @@ func TestAgentAntiEntropy_Checks(t *testing.T) {
switch chk.CheckID {
case "mysql":
if !reflect.DeepEqual(chk, chk1) {
return false, fmt.Errorf("bad: %v %v", chk, chk1)
r.Fatalf("bad: %v %v", chk, chk1)
}
case "redis":
if !reflect.DeepEqual(chk, chk2) {
return false, fmt.Errorf("bad: %v %v", chk, chk2)
r.Fatalf("bad: %v %v", chk, chk2)
}
case "web":
if !reflect.DeepEqual(chk, chk3) {
return false, fmt.Errorf("bad: %v %v", chk, chk3)
r.Fatalf("bad: %v %v", chk, chk3)
}
case "cache":
if !reflect.DeepEqual(chk, chk5) {
return false, fmt.Errorf("bad: %v %v", chk, chk5)
r.Fatalf("bad: %v %v", chk, chk5)
}
case "serfHealth":
// ignore
default:
return false, fmt.Errorf("unexpected check: %v", chk)
r.Fatalf("unexpected check: %v", chk)
}
}
return true, nil
}); err != nil {
t.Fatal(err)
}
})
// Check the local state
if len(agent.state.checks) != 4 {
@ -820,14 +799,14 @@ func TestAgentAntiEntropy_Checks(t *testing.T) {
agent.StartSync()
// Verify that we are in sync
if err := testrpc.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
if err := agent.RPC("Health.NodeChecks", &req, &checks); err != nil {
return false, fmt.Errorf("err: %v", err)
r.Fatalf("err: %v", err)
}
// We should have 5 checks (serf included)
if len(checks.HealthChecks) != 4 {
return false, fmt.Errorf("bad: %v", checks)
r.Fatalf("bad: %v", checks)
}
// All the checks should match
@ -836,26 +815,23 @@ func TestAgentAntiEntropy_Checks(t *testing.T) {
switch chk.CheckID {
case "mysql":
if !reflect.DeepEqual(chk, chk1) {
return false, fmt.Errorf("bad: %v %v", chk, chk1)
r.Fatalf("bad: %v %v", chk, chk1)
}
case "web":
if !reflect.DeepEqual(chk, chk3) {
return false, fmt.Errorf("bad: %v %v", chk, chk3)
r.Fatalf("bad: %v %v", chk, chk3)
}
case "cache":
if !reflect.DeepEqual(chk, chk5) {
return false, fmt.Errorf("bad: %v %v", chk, chk5)
r.Fatalf("bad: %v %v", chk, chk5)
}
case "serfHealth":
// ignore
default:
return false, fmt.Errorf("unexpected check: %v", chk)
r.Fatalf("unexpected check: %v", chk)
}
}
return true, nil
}); err != nil {
t.Fatal(err)
}
})
// Check the local state
if len(agent.state.checks) != 3 {
@ -1002,7 +978,7 @@ func TestAgentAntiEntropy_Checks_ACLDeny(t *testing.T) {
time.Sleep(200 * time.Millisecond)
// Verify that we are in sync
if err := testrpc.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
req := structs.NodeSpecificRequest{
Datacenter: "dc1",
Node: agent.config.NodeName,
@ -1012,12 +988,12 @@ func TestAgentAntiEntropy_Checks_ACLDeny(t *testing.T) {
}
var checks structs.IndexedHealthChecks
if err := agent.RPC("Health.NodeChecks", &req, &checks); err != nil {
return false, fmt.Errorf("err: %v", err)
r.Fatalf("err: %v", err)
}
// We should have 2 checks (serf included)
if len(checks.HealthChecks) != 2 {
return false, fmt.Errorf("bad: %v", checks)
r.Fatalf("bad: %v", checks)
}
// All the checks should match
@ -1028,18 +1004,15 @@ func TestAgentAntiEntropy_Checks_ACLDeny(t *testing.T) {
t.Fatalf("should not be permitted")
case "api-check":
if !reflect.DeepEqual(chk, chk2) {
return false, fmt.Errorf("bad: %v %v", chk, chk2)
r.Fatalf("bad: %v %v", chk, chk2)
}
case "serfHealth":
// ignore
default:
return false, fmt.Errorf("unexpected check: %v", chk)
r.Fatalf("unexpected check: %v", chk)
}
}
return true, nil
}); err != nil {
t.Fatal(err)
}
})
// Check the local state.
if len(agent.state.checks) != 2 {
@ -1058,9 +1031,8 @@ func TestAgentAntiEntropy_Checks_ACLDeny(t *testing.T) {
agent.state.RemoveCheck("api-check")
agent.StartSync()
time.Sleep(200 * time.Millisecond)
// Verify that we are in sync
if err := testrpc.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
req := structs.NodeSpecificRequest{
Datacenter: "dc1",
Node: agent.config.NodeName,
@ -1070,12 +1042,12 @@ func TestAgentAntiEntropy_Checks_ACLDeny(t *testing.T) {
}
var checks structs.IndexedHealthChecks
if err := agent.RPC("Health.NodeChecks", &req, &checks); err != nil {
return false, fmt.Errorf("err: %v", err)
r.Fatalf("err: %v", err)
}
// We should have 1 check (just serf)
if len(checks.HealthChecks) != 1 {
return false, fmt.Errorf("bad: %v", checks)
r.Fatalf("bad: %v", checks)
}
// All the checks should match
@ -1083,19 +1055,16 @@ func TestAgentAntiEntropy_Checks_ACLDeny(t *testing.T) {
chk.CreateIndex, chk.ModifyIndex = 0, 0
switch chk.CheckID {
case "mysql-check":
t.Fatalf("should not be permitted")
r.Fatalf("should not be permitted")
case "api-check":
t.Fatalf("should be deleted")
r.Fatalf("should be deleted")
case "serfHealth":
// ignore
default:
return false, fmt.Errorf("unexpected check: %v", chk)
r.Fatalf("unexpected check: %v", chk)
}
}
return true, nil
}); err != nil {
t.Fatal(err)
}
})
// Check the local state.
if len(agent.state.checks) != 1 {
@ -1144,21 +1113,14 @@ func TestAgentAntiEntropy_Check_DeferSync(t *testing.T) {
Node: agent.config.NodeName,
}
var checks structs.IndexedHealthChecks
if err := testrpc.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
if err := agent.RPC("Health.NodeChecks", &req, &checks); err != nil {
return false, fmt.Errorf("err: %v", err)
r.Fatalf("err: %v", err)
}
// Verify checks in place
if len(checks.HealthChecks) != 2 {
return false, fmt.Errorf("checks: %v", check)
if got, want := len(checks.HealthChecks), 2; got != want {
r.Fatalf("got %d health checks want %d", got, want)
}
return true, nil
}); err != nil {
t.Fatal(err)
}
})
// Update the check output! Should be deferred
agent.state.UpdateCheck("web", api.HealthPassing, "output")
@ -1178,11 +1140,10 @@ func TestAgentAntiEntropy_Check_DeferSync(t *testing.T) {
}
}
}
// Wait for a deferred update
if err := testrpc.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
if err := agent.RPC("Health.NodeChecks", &req, &checks); err != nil {
return false, err
r.Fatal(err)
}
// Verify updated
@ -1190,15 +1151,11 @@ func TestAgentAntiEntropy_Check_DeferSync(t *testing.T) {
switch chk.CheckID {
case "web":
if chk.Output != "output" {
return false, fmt.Errorf("no update: %v", chk)
r.Fatalf("no update: %v", chk)
}
}
}
return true, nil
}); err != nil {
t.Fatal(err)
}
})
// Change the output in the catalog to force it out of sync.
eCopy := check.Clone()
@ -1284,11 +1241,10 @@ func TestAgentAntiEntropy_Check_DeferSync(t *testing.T) {
}
}
}
// Wait for the deferred update.
if err := testrpc.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
if err := agent.RPC("Health.NodeChecks", &req, &checks); err != nil {
return false, err
r.Fatal(err)
}
// Verify updated
@ -1296,15 +1252,12 @@ func TestAgentAntiEntropy_Check_DeferSync(t *testing.T) {
switch chk.CheckID {
case "web":
if chk.Output != "deferred" {
return false, fmt.Errorf("no update: %v", chk)
r.Fatalf("no update: %v", chk)
}
}
}
})
return true, nil
}); err != nil {
t.Fatal(err)
}
}
func TestAgentAntiEntropy_NodeInfo(t *testing.T) {
@ -1336,27 +1289,26 @@ func TestAgentAntiEntropy_NodeInfo(t *testing.T) {
Node: agent.config.NodeName,
}
var services structs.IndexedNodeServices
retry.
// Wait for the sync
if err := testrpc.WaitForResult(func() (bool, error) {
if err := agent.RPC("Catalog.NodeServices", &req, &services); err != nil {
return false, fmt.Errorf("err: %v", err)
}
// Wait for the sync
Run("", t, func(r *retry.R) {
// Make sure we synced our node info - this should have ridden on the
// "consul" service sync
id := services.NodeServices.Node.ID
addrs := services.NodeServices.Node.TaggedAddresses
meta := services.NodeServices.Node.Meta
if id != conf.NodeID ||
!reflect.DeepEqual(addrs, conf.TaggedAddresses) ||
!reflect.DeepEqual(meta, conf.Meta) {
return false, fmt.Errorf("bad: %v", services.NodeServices.Node)
}
return true, nil
}); err != nil {
t.Fatal(err)
}
if err := agent.RPC("Catalog.NodeServices", &req, &services); err != nil {
r.Fatalf("err: %v", err)
}
// Make sure we synced our node info - this should have ridden on the
// "consul" service sync
id := services.NodeServices.Node.ID
addrs := services.NodeServices.Node.TaggedAddresses
meta := services.NodeServices.Node.Meta
if id != conf.NodeID ||
!reflect.DeepEqual(addrs, conf.TaggedAddresses) ||
!reflect.DeepEqual(meta, conf.Meta) {
r.Fatalf("bad: %v", services.NodeServices.Node)
}
})
// Blow away the catalog version of the node info
if err := agent.RPC("Catalog.Register", args, &out); err != nil {
@ -1365,26 +1317,26 @@ func TestAgentAntiEntropy_NodeInfo(t *testing.T) {
// Trigger anti-entropy run and wait
agent.StartSync()
retry.
// Wait for the sync - this should have been a sync of just the
// node info
if err := testrpc.WaitForResult(func() (bool, error) {
if err := agent.RPC("Catalog.NodeServices", &req, &services); err != nil {
return false, fmt.Errorf("err: %v", err)
}
// Wait for the sync - this should have been a sync of just the
// node info
Run("", t, func(r *retry.R) {
if err := agent.RPC("Catalog.NodeServices", &req, &services); err != nil {
r.Fatalf("err: %v", err)
}
id := services.NodeServices.Node.ID
addrs := services.NodeServices.Node.TaggedAddresses
meta := services.NodeServices.Node.Meta
if id != conf.NodeID ||
!reflect.DeepEqual(addrs, conf.TaggedAddresses) ||
!reflect.DeepEqual(meta, conf.Meta) {
r.Fatalf("bad: %v", services.NodeServices.Node)
}
})
id := services.NodeServices.Node.ID
addrs := services.NodeServices.Node.TaggedAddresses
meta := services.NodeServices.Node.Meta
if id != conf.NodeID ||
!reflect.DeepEqual(addrs, conf.TaggedAddresses) ||
!reflect.DeepEqual(meta, conf.Meta) {
return false, fmt.Errorf("bad: %v", services.NodeServices.Node)
}
return true, nil
}); err != nil {
t.Fatal(err)
}
}
func TestAgentAntiEntropy_deleteService_fails(t *testing.T) {
@ -1542,7 +1494,6 @@ func TestAgent_nestedPauseResume(t *testing.T) {
}
}()
l.Resume()
}
func TestAgent_sendCoordinate(t *testing.T) {
@ -1563,19 +1514,16 @@ func TestAgent_sendCoordinate(t *testing.T) {
Datacenter: agent.config.Datacenter,
}
var reply structs.IndexedCoordinates
if err := testrpc.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
if err := agent.RPC("Coordinate.ListNodes", &req, &reply); err != nil {
return false, fmt.Errorf("err: %s", err)
r.Fatalf("err: %s", err)
}
if len(reply.Coordinates) != 1 {
return false, fmt.Errorf("expected a coordinate: %v", reply)
r.Fatalf("expected a coordinate: %v", reply)
}
coord := reply.Coordinates[0]
if coord.Node != agent.config.NodeName || coord.Coord == nil {
return false, fmt.Errorf("bad: %v", coord)
r.Fatalf("bad: %v", coord)
}
return true, nil
}); err != nil {
t.Fatal(err)
}
})
}

View File

@ -11,7 +11,7 @@ import (
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/consul/structs"
"github.com/hashicorp/consul/testutil"
"github.com/hashicorp/consul/testutil/retry"
)
func TestOperator_RaftConfiguration(t *testing.T) {
@ -451,33 +451,27 @@ func TestOperator_ServerHealth(t *testing.T) {
if err != nil {
t.Fatalf("err: %v", err)
}
if err := testutil.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
resp := httptest.NewRecorder()
obj, err := srv.OperatorServerHealth(resp, req)
if err != nil {
return false, fmt.Errorf("err: %v", err)
r.Fatalf("err: %v", err)
}
if resp.Code != 200 {
return false, fmt.Errorf("bad code: %d", resp.Code)
r.Fatalf("bad code: %d", resp.Code)
}
out, ok := obj.(*api.OperatorHealthReply)
if !ok {
return false, fmt.Errorf("unexpected: %T", obj)
r.Fatalf("unexpected: %T", obj)
}
if len(out.Servers) != 1 ||
!out.Servers[0].Healthy ||
out.Servers[0].Name != srv.agent.config.NodeName ||
out.Servers[0].SerfStatus != "alive" ||
out.FailureTolerance != 0 {
return false, fmt.Errorf("bad: %v", out)
r.Fatalf("bad: %v", out)
}
return true, nil
}); err != nil {
t.Fatal(err)
}
})
}, cb)
}
@ -493,30 +487,24 @@ func TestOperator_ServerHealth_Unhealthy(t *testing.T) {
if err != nil {
t.Fatalf("err: %v", err)
}
if err := testutil.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
resp := httptest.NewRecorder()
obj, err := srv.OperatorServerHealth(resp, req)
if err != nil {
return false, fmt.Errorf("err: %v", err)
r.Fatalf("err: %v", err)
}
if resp.Code != 429 {
return false, fmt.Errorf("bad code: %d", resp.Code)
r.Fatalf("bad code: %d", resp.Code)
}
out, ok := obj.(*api.OperatorHealthReply)
if !ok {
return false, fmt.Errorf("unexpected: %T", obj)
r.Fatalf("unexpected: %T", obj)
}
if len(out.Servers) != 1 ||
out.Healthy ||
out.Servers[0].Name != srv.agent.config.NodeName {
return false, fmt.Errorf("bad: %v", out)
r.Fatalf("bad: %#v", out.Servers)
}
return true, nil
}); err != nil {
t.Fatal(err)
}
})
}, cb)
}

View File

@ -7,6 +7,7 @@ import (
"github.com/hashicorp/consul/consul/structs"
"github.com/hashicorp/consul/testrpc"
"github.com/hashicorp/consul/testutil/retry"
)
func TestValidateUserEventParams(t *testing.T) {
@ -174,12 +175,11 @@ func TestFireReceiveEvent(t *testing.T) {
if err != nil {
t.Fatalf("err: %v", err)
}
if err := testrpc.WaitForResult(func() (bool, error) {
return len(agent.UserEvents()) == 1, nil
}); err != nil {
t.Fatal(err)
}
retry.Run("", t, func(r *retry.R) {
if got, want := len(agent.UserEvents()), 1; got != want {
r.Fatalf("got %d events want %d", got, want)
}
})
last := agent.LastUserEvent()
if last.ID != p2.ID {

View File

@ -1,13 +1,12 @@
package command
import (
"errors"
"fmt"
"strings"
"testing"
"github.com/hashicorp/consul/command/base"
"github.com/hashicorp/consul/testutil"
"github.com/hashicorp/consul/testutil/retry"
"github.com/hashicorp/serf/serf"
"github.com/mitchellh/cli"
)
@ -56,14 +55,12 @@ func TestForceLeaveCommandRun(t *testing.T) {
if len(m) != 2 {
t.Fatalf("should have 2 members: %#v", m)
}
if err := testutil.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
m = a1.agent.LANMembers()
success := m[1].Status == serf.StatusLeft
return success, errors.New(m[1].Status.String())
}); err != nil {
t.Fatalf("member status is %v, should be left", err)
}
if got, want := m[1].Status, serf.StatusLeft; got != want {
r.Fatalf("got status %q want %q", got, want)
}
})
}
func TestForceLeaveCommandRun_noAddrs(t *testing.T) {

View File

@ -9,7 +9,7 @@ import (
"github.com/hashicorp/consul/command/agent"
"github.com/hashicorp/consul/command/base"
"github.com/hashicorp/consul/consul/structs"
"github.com/hashicorp/consul/testutil"
"github.com/hashicorp/consul/testutil/retry"
"github.com/hashicorp/serf/coordinate"
"github.com/mitchellh/cli"
)
@ -106,24 +106,19 @@ func TestRTTCommand_Run_LAN(t *testing.T) {
a.config.NodeName,
"dogs",
}
// Wait for the updates to get flushed to the data store.
if err := testutil.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
code := c.Run(args)
if code != 0 {
return false, fmt.Errorf("bad: %d: %#v", code, ui.ErrorWriter.String())
r.Fatalf("bad: %d: %#v", code, ui.ErrorWriter.String())
}
// Make sure the proper RTT was reported in the output.
expected := fmt.Sprintf("rtt: %s", dist_str)
if !strings.Contains(ui.OutputWriter.String(), expected) {
return false, fmt.Errorf("bad: %#v", ui.OutputWriter.String())
r.Fatalf("bad: %#v", ui.OutputWriter.String())
}
return true, nil
}); err != nil {
t.Fatal(err)
}
})
// Default to the agent's node.
{

View File

@ -12,6 +12,7 @@ import (
"github.com/hashicorp/consul/consul/structs"
"github.com/hashicorp/consul/testrpc"
"github.com/hashicorp/consul/testutil/retry"
)
func TestACLReplication_Sorter(t *testing.T) {
@ -363,21 +364,21 @@ func TestACLReplication(t *testing.T) {
}
}
checkSame := func() (bool, error) {
checkSame := func() error {
index, remote, err := s1.fsm.State().ACLList(nil)
if err != nil {
return false, err
return err
}
_, local, err := s2.fsm.State().ACLList(nil)
if err != nil {
return false, err
return err
}
if len(remote) != len(local) {
return false, nil
if got, want := len(remote), len(local); got != want {
return fmt.Errorf("got %d remote ACLs want %d", got, want)
}
for i, acl := range remote {
if !acl.IsSame(local[i]) {
return false, nil
return fmt.Errorf("ACLs differ")
}
}
@ -388,16 +389,17 @@ func TestACLReplication(t *testing.T) {
if !status.Enabled || !status.Running ||
status.ReplicatedIndex != index ||
status.SourceDatacenter != "dc1" {
return false, nil
return fmt.Errorf("ACL replication status differs")
}
return true, nil
return nil
}
// Wait for the replica to converge.
if err := testrpc.WaitForResult(checkSame); err != nil {
t.Fatalf("ACLs didn't converge")
}
retry.Run("", t, func(r *retry.R) {
if err := checkSame(); err != nil {
r.Fatal(err)
}
})
// Create more new tokens.
for i := 0; i < 1000; i++ {
@ -416,11 +418,12 @@ func TestACLReplication(t *testing.T) {
t.Fatalf("err: %v", err)
}
}
// Wait for the replica to converge.
if err := testrpc.WaitForResult(checkSame); err != nil {
t.Fatalf("ACLs didn't converge")
}
retry.Run("", t, func(r *retry.R) {
if err := checkSame(); err != nil {
r.Fatal(err)
}
})
// Delete a token.
arg := structs.ACLRequest{
@ -435,9 +438,10 @@ func TestACLReplication(t *testing.T) {
if err := s1.RPC("ACL.Apply", &arg, &dontCare); err != nil {
t.Fatalf("err: %v", err)
}
// Wait for the replica to converge.
if err := testrpc.WaitForResult(checkSame); err != nil {
t.Fatalf("ACLs didn't converge")
}
retry.Run("", t, func(r *retry.R) {
if err := checkSame(); err != nil {
r.Fatal(err)
}
})
}

View File

@ -10,6 +10,7 @@ import (
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/consul/structs"
"github.com/hashicorp/consul/testrpc"
"github.com/hashicorp/consul/testutil/retry"
)
var testACLPolicy = `
@ -225,18 +226,15 @@ func TestACL_NonAuthority_NotFound(t *testing.T) {
defer s2.Shutdown()
// Try to join
addr := fmt.Sprintf("127.0.0.1:%d",
s1.config.SerfLANConfig.MemberlistConfig.BindPort)
addr := fmt.Sprintf("127.0.0.1:%d", s1.config.SerfLANConfig.MemberlistConfig.BindPort)
if _, err := s2.JoinLAN([]string{addr}); err != nil {
t.Fatalf("err: %v", err)
}
if err := testrpc.WaitForResult(func() (bool, error) {
p1, _ := s1.numPeers()
return p1 == 2, fmt.Errorf("%d", p1)
}); err != nil {
t.Fatal(err)
}
retry.Run("", t, func(r *retry.R) {
if got, want := numPeers(s1), 2; got != want {
r.Fatalf("got %d peers want %d", got, want)
}
})
client := rpcClient(t, s1)
defer client.Close()
@ -282,13 +280,12 @@ func TestACL_NonAuthority_Found(t *testing.T) {
if _, err := s2.JoinLAN([]string{addr}); err != nil {
t.Fatalf("err: %v", err)
}
retry.Run("", t, func(r *retry.R) {
if got, want := numPeers(s1), 2; got != want {
r.Fatalf("got %d peers want %d", got, want)
}
})
if err := testrpc.WaitForResult(func() (bool, error) {
p1, _ := s1.numPeers()
return p1 == 2, fmt.Errorf("%d", p1)
}); err != nil {
t.Fatal(err)
}
testrpc.WaitForLeader(t, s1.RPC, "dc1")
// Create a new token
@ -358,13 +355,12 @@ func TestACL_NonAuthority_Management(t *testing.T) {
if _, err := s2.JoinLAN([]string{addr}); err != nil {
t.Fatalf("err: %v", err)
}
retry.Run("", t, func(r *retry.R) {
if got, want := numPeers(s1), 2; got != want {
r.Fatalf("got %d peers want %d", got, want)
}
})
if err := testrpc.WaitForResult(func() (bool, error) {
p1, _ := s1.numPeers()
return p1 == 2, fmt.Errorf("%d", p1)
}); err != nil {
t.Fatal(err)
}
testrpc.WaitForLeader(t, s1.RPC, "dc1")
// find the non-authoritative server
@ -415,13 +411,12 @@ func TestACL_DownPolicy_Deny(t *testing.T) {
if _, err := s2.JoinLAN([]string{addr}); err != nil {
t.Fatalf("err: %v", err)
}
retry.Run("", t, func(r *retry.R) {
if got, want := numPeers(s1), 2; got != want {
r.Fatalf("got %d peers want %d", got, want)
}
})
if err := testrpc.WaitForResult(func() (bool, error) {
p1, _ := s1.numPeers()
return p1 == 2, fmt.Errorf("%d", p1)
}); err != nil {
t.Fatal(err)
}
testrpc.WaitForLeader(t, s1.RPC, "dc1")
// Create a new token
@ -489,13 +484,12 @@ func TestACL_DownPolicy_Allow(t *testing.T) {
if _, err := s2.JoinLAN([]string{addr}); err != nil {
t.Fatalf("err: %v", err)
}
retry.Run("", t, func(r *retry.R) {
if got, want := numPeers(s1), 2; got != want {
r.Fatalf("got %d peers want %d", got, want)
}
})
if err := testrpc.WaitForResult(func() (bool, error) {
p1, _ := s1.numPeers()
return p1 == 2, fmt.Errorf("%d", p1)
}); err != nil {
t.Fatal(err)
}
testrpc.WaitForLeader(t, s1.RPC, "dc1")
// Create a new token
@ -565,13 +559,12 @@ func TestACL_DownPolicy_ExtendCache(t *testing.T) {
if _, err := s2.JoinLAN([]string{addr}); err != nil {
t.Fatalf("err: %v", err)
}
retry.Run("", t, func(r *retry.R) {
if got, want := numPeers(s1), 2; got != want {
r.Fatalf("got %d peers want %d", got, want)
}
})
if err := testrpc.WaitForResult(func() (bool, error) {
p1, _ := s1.numPeers()
return p1 == 2, fmt.Errorf("%d", p1)
}); err != nil {
t.Fatal(err)
}
testrpc.WaitForLeader(t, s1.RPC, "dc1")
// Create a new token
@ -684,27 +677,23 @@ func TestACL_Replication(t *testing.T) {
if err := s1.RPC("ACL.Apply", &arg, &id); err != nil {
t.Fatalf("err: %v", err)
}
// Wait for replication to occur.
if err := testrpc.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
_, acl, err := s2.fsm.State().ACLGet(nil, id)
if err != nil {
return false, err
r.Fatal(err)
}
if acl == nil {
return false, nil
r.Fatal(nil)
}
_, acl, err = s3.fsm.State().ACLGet(nil, id)
if err != nil {
return false, err
r.Fatal(err)
}
if acl == nil {
return false, nil
r.Fatal(nil)
}
return true, nil
}); err != nil {
t.Fatal(err)
}
})
// Kill the ACL datacenter.
s1.Shutdown()
@ -1915,3 +1904,11 @@ service "service" {
t.Fatalf("err: %v", err)
}
}
func numPeers(s *Server) int {
n, err := s.numPeers()
if err != nil {
panic(err)
}
return n
}

View File

@ -7,6 +7,7 @@ import (
"time"
"github.com/hashicorp/consul/testrpc"
"github.com/hashicorp/consul/testutil/retry"
"github.com/hashicorp/raft"
"github.com/hashicorp/serf/serf"
)
@ -49,12 +50,11 @@ func testCleanupDeadServer(t *testing.T, raftVersion int) {
}
for _, s := range servers {
if err := testrpc.WaitForResult(func() (bool, error) {
peers, _ := s.numPeers()
return peers == 3, nil
}); err != nil {
t.Fatal(err)
}
retry.Run("", t, func(r *retry.R) {
if got, want := numPeers(s), 3; got != want {
r.Fatalf("got %d peers want %d", got, want)
}
})
}
// Bring up a new server
@ -64,18 +64,17 @@ func testCleanupDeadServer(t *testing.T, raftVersion int) {
// Kill a non-leader server
s3.Shutdown()
if err := testrpc.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
alive := 0
for _, m := range s1.LANMembers() {
if m.Status == serf.StatusAlive {
alive++
}
}
return alive == 2, nil
}); err != nil {
t.Fatal(err)
}
if alive != 2 {
r.Fatal(nil)
}
})
// Join the new server
if _, err := s4.JoinLAN([]string{addr}); err != nil {
@ -85,12 +84,11 @@ func testCleanupDeadServer(t *testing.T, raftVersion int) {
// Make sure the dead server is removed and we're back to 3 total peers
for _, s := range servers {
if err := testrpc.WaitForResult(func() (bool, error) {
peers, _ := s.numPeers()
return peers == 3, nil
}); err != nil {
t.Fatal(err)
}
retry.Run("", t, func(r *retry.R) {
if got, want := numPeers(s), 3; got != want {
r.Fatalf("got %d peers want %d", got, want)
}
})
}
}
@ -121,8 +119,7 @@ func TestAutopilot_CleanupDeadServerPeriodic(t *testing.T) {
servers := []*Server{s1, s2, s3, s4}
// Join the servers to s1
addr := fmt.Sprintf("127.0.0.1:%d",
s1.config.SerfLANConfig.MemberlistConfig.BindPort)
addr := fmt.Sprintf("127.0.0.1:%d", s1.config.SerfLANConfig.MemberlistConfig.BindPort)
for _, s := range servers[1:] {
if _, err := s.JoinLAN([]string{addr}); err != nil {
@ -131,12 +128,11 @@ func TestAutopilot_CleanupDeadServerPeriodic(t *testing.T) {
}
for _, s := range servers {
if err := testrpc.WaitForResult(func() (bool, error) {
peers, _ := s.numPeers()
return peers == 4, nil
}); err != nil {
t.Fatal(err)
}
retry.Run("", t, func(r *retry.R) {
if got, want := numPeers(s), 4; got != want {
r.Fatalf("got %d peers want %d", got, want)
}
})
}
// Kill a non-leader server
@ -144,12 +140,11 @@ func TestAutopilot_CleanupDeadServerPeriodic(t *testing.T) {
// Should be removed from the peers automatically
for _, s := range []*Server{s1, s2, s3} {
if err := testrpc.WaitForResult(func() (bool, error) {
peers, _ := s.numPeers()
return peers == 3, nil
}); err != nil {
t.Fatal(err)
}
retry.Run("", t, func(r *retry.R) {
if got, want := numPeers(s), 3; got != want {
r.Fatalf("got %d peers want %d", got, want)
}
})
}
}
@ -183,12 +178,11 @@ func TestAutopilot_CleanupStaleRaftServer(t *testing.T) {
}
for _, s := range servers {
if err := testrpc.WaitForResult(func() (bool, error) {
peers, _ := s.numPeers()
return peers == 3, nil
}); err != nil {
t.Fatal(err)
}
retry.Run("", t, func(r *retry.R) {
if got, want := numPeers(s), 3; got != want {
r.Fatalf("got %d peers want %d", got, want)
}
})
}
testrpc.WaitForLeader(t, s1.RPC, "dc1")
@ -209,12 +203,11 @@ func TestAutopilot_CleanupStaleRaftServer(t *testing.T) {
// Wait for s4 to be removed
for _, s := range []*Server{s1, s2, s3} {
if err := testrpc.WaitForResult(func() (bool, error) {
peers, _ := s.numPeers()
return peers == 3, nil
}); err != nil {
t.Fatal(err)
}
retry.Run("", t, func(r *retry.R) {
if got, want := numPeers(s), 3; got != want {
r.Fatalf("got %d peers want %d", got, want)
}
})
}
}
@ -246,39 +239,37 @@ func TestAutopilot_PromoteNonVoter(t *testing.T) {
}
testrpc.WaitForLeader(t, s1.RPC, "dc1")
retry.
// Wait for the new server to be added as a non-voter, but make sure
// it doesn't get promoted to a voter even after ServerStabilizationTime,
// because that would result in an even-numbered quorum count.
if err := testrpc.WaitForResult(func() (bool, error) {
future := s1.raft.GetConfiguration()
if err := future.Error(); err != nil {
return false, err
}
// Wait for the new server to be added as a non-voter, but make sure
// it doesn't get promoted to a voter even after ServerStabilizationTime,
// because that would result in an even-numbered quorum count.
Run("", t, func(r *retry.R) {
servers := future.Configuration().Servers
future := s1.raft.GetConfiguration()
if err := future.Error(); err != nil {
r.Fatal(err)
}
if len(servers) != 2 {
return false, fmt.Errorf("bad: %v", servers)
}
if servers[1].Suffrage != raft.Nonvoter {
return false, fmt.Errorf("bad: %v", servers)
}
health := s1.getServerHealth(string(servers[1].ID))
if health == nil {
return false, fmt.Errorf("nil health")
}
if !health.Healthy {
return false, fmt.Errorf("bad: %v", health)
}
if time.Now().Sub(health.StableSince) < s1.config.AutopilotConfig.ServerStabilizationTime {
return false, fmt.Errorf("stable period not elapsed")
}
servers := future.Configuration().Servers
return true, nil
}); err != nil {
t.Fatal(err)
}
if len(servers) != 2 {
r.Fatalf("bad: %v", servers)
}
if servers[1].Suffrage != raft.Nonvoter {
r.Fatalf("bad: %v", servers)
}
health := s1.getServerHealth(string(servers[1].ID))
if health == nil {
r.Fatal("nil health")
}
if !health.Healthy {
r.Fatalf("bad: %v", health)
}
if time.Now().Sub(health.StableSince) < s1.config.AutopilotConfig.ServerStabilizationTime {
r.Fatal("stable period not elapsed")
}
})
// Now add another server and make sure they both get promoted to voters after stabilization
dir3, s3 := testServerWithConfig(t, func(c *Config) {
@ -291,27 +282,21 @@ func TestAutopilot_PromoteNonVoter(t *testing.T) {
if _, err := s3.JoinLAN([]string{addr}); err != nil {
t.Fatalf("err: %v", err)
}
if err := testrpc.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
future := s1.raft.GetConfiguration()
if err := future.Error(); err != nil {
return false, err
r.Fatal(err)
}
servers := future.Configuration().Servers
if len(servers) != 3 {
return false, fmt.Errorf("bad: %v", servers)
r.Fatalf("bad: %v", servers)
}
if servers[1].Suffrage != raft.Voter {
return false, fmt.Errorf("bad: %v", servers)
r.Fatalf("bad: %v", servers)
}
if servers[2].Suffrage != raft.Voter {
return false, fmt.Errorf("bad: %v", servers)
r.Fatalf("bad: %v", servers)
}
return true, nil
}); err != nil {
t.Fatal(err)
}
})
}

View File

@ -12,6 +12,7 @@ import (
"github.com/hashicorp/consul/consul/structs"
"github.com/hashicorp/consul/lib"
"github.com/hashicorp/consul/testrpc"
"github.com/hashicorp/consul/testutil/retry"
"github.com/hashicorp/consul/types"
"github.com/hashicorp/net-rpc-msgpackrpc"
)
@ -603,13 +604,12 @@ func TestCatalog_ListNodes(t *testing.T) {
if err := s1.fsm.State().EnsureNode(1, &structs.Node{Node: "foo", Address: "127.0.0.1"}); err != nil {
t.Fatalf("err: %v", err)
}
if err := testrpc.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
msgpackrpc.CallWithCodec(codec, "Catalog.ListNodes", &args, &out)
return len(out.Nodes) == 2, nil
}); err != nil {
t.Fatal(err)
}
if got, want := len(out.Nodes), 2; got != want {
r.Fatalf("got %d nodes want %d", got, want)
}
})
// Server node is auto added from Serf
if out.Nodes[1].Node != s1.config.NodeName {
@ -646,13 +646,12 @@ func TestCatalog_ListNodes_NodeMetaFilter(t *testing.T) {
},
}
var out structs.IndexedNodes
if err := testrpc.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
msgpackrpc.CallWithCodec(codec, "Catalog.ListNodes", &args, &out)
return len(out.Nodes) == 1, nil
}); err != nil {
t.Fatal(err)
}
if got, want := len(out.Nodes), 1; got != want {
r.Fatalf("got %d nodes want %d", got, want)
}
})
// Verify that only the correct node was returned
if out.Nodes[0].Node != "foo" {
@ -677,14 +676,17 @@ func TestCatalog_ListNodes_NodeMetaFilter(t *testing.T) {
if err != nil {
t.Fatalf("err: %v", err)
}
retry.
// Should get an empty list of nodes back
Run("", t, func(r *retry.R) {
msgpackrpc.CallWithCodec(codec, "Catalog.ListNodes", &args, &out)
if len(out.Nodes) != 0 {
r.Fatal(nil)
}
})
// Should get an empty list of nodes back
if err := testrpc.WaitForResult(func() (bool, error) {
msgpackrpc.CallWithCodec(codec, "Catalog.ListNodes", &args, &out)
return len(out.Nodes) == 0, nil
}); err != nil {
t.Fatal(err)
}
}
func TestCatalog_ListNodes_StaleRaad(t *testing.T) {
@ -890,12 +892,13 @@ func TestCatalog_ListNodes_DistanceSort(t *testing.T) {
Datacenter: "dc1",
}
var out structs.IndexedNodes
if err := testrpc.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
msgpackrpc.CallWithCodec(codec, "Catalog.ListNodes", &args, &out)
return len(out.Nodes) == 5, nil
}); err != nil {
t.Fatal(err)
}
if got, want := len(out.Nodes), 5; got != want {
r.Fatalf("got %d nodes want %d", got, want)
}
})
if out.Nodes[0].Node != "aaa" {
t.Fatalf("bad: %v", out)
}
@ -918,12 +921,13 @@ func TestCatalog_ListNodes_DistanceSort(t *testing.T) {
Datacenter: "dc1",
Source: structs.QuerySource{Datacenter: "dc1", Node: "foo"},
}
if err := testrpc.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
msgpackrpc.CallWithCodec(codec, "Catalog.ListNodes", &args, &out)
return len(out.Nodes) == 5, nil
}); err != nil {
t.Fatal(err)
}
if got, want := len(out.Nodes), 5; got != want {
r.Fatalf("got %d nodes want %d", got, want)
}
})
if out.Nodes[0].Node != "foo" {
t.Fatalf("bad: %v", out)
}

View File

@ -11,6 +11,7 @@ import (
"github.com/hashicorp/consul/consul/structs"
"github.com/hashicorp/consul/testrpc"
"github.com/hashicorp/consul/testutil/retry"
"github.com/hashicorp/net-rpc-msgpackrpc"
"github.com/hashicorp/serf/coordinate"
)
@ -349,29 +350,25 @@ func TestCoordinate_ListNodes(t *testing.T) {
if err := msgpackrpc.CallWithCodec(codec, "Coordinate.Update", &arg3, &out); err != nil {
t.Fatalf("err: %v", err)
}
// Now query back for all the nodes.
if err := testrpc.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
arg := structs.DCSpecificRequest{
Datacenter: "dc1",
}
resp := structs.IndexedCoordinates{}
if err := msgpackrpc.CallWithCodec(codec, "Coordinate.ListNodes", &arg, &resp); err != nil {
t.Fatalf("err: %v", err)
r.Fatalf("err: %v", err)
}
if len(resp.Coordinates) != 3 ||
resp.Coordinates[0].Node != "bar" ||
resp.Coordinates[1].Node != "baz" ||
resp.Coordinates[2].Node != "foo" {
return false, fmt.Errorf("bad: %v", resp.Coordinates)
r.Fatalf("bad: %v", resp.Coordinates)
}
verifyCoordinatesEqual(t, resp.Coordinates[0].Coord, arg2.Coord) // bar
verifyCoordinatesEqual(t, resp.Coordinates[1].Coord, arg3.Coord) // baz
verifyCoordinatesEqual(t, resp.Coordinates[2].Coord, arg1.Coord) // foo
return true, nil
}); err != nil {
t.Fatal(err)
}
})
}
func TestCoordinate_ListNodes_ACLFilter(t *testing.T) {
@ -442,25 +439,21 @@ func TestCoordinate_ListNodes_ACLFilter(t *testing.T) {
if err := msgpackrpc.CallWithCodec(codec, "Coordinate.Update", &arg3, &out); err != nil {
t.Fatalf("err: %v", err)
}
// Wait for all the coordinate updates to apply. Since we aren't
// enforcing version 8 ACLs, this should also allow us to read
// everything back without a token.
if err := testrpc.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
arg := structs.DCSpecificRequest{
Datacenter: "dc1",
}
resp := structs.IndexedCoordinates{}
if err := msgpackrpc.CallWithCodec(codec, "Coordinate.ListNodes", &arg, &resp); err != nil {
t.Fatalf("err: %v", err)
r.Fatalf("err: %v", err)
}
if len(resp.Coordinates) == 3 {
return true, nil
if got, want := len(resp.Coordinates), 3; got != want {
r.Fatalf("got %d coordinates want %d", got, want)
}
return false, fmt.Errorf("bad: %v", resp.Coordinates)
}); err != nil {
t.Fatal(err)
}
})
// Now that we've waited for the batch processing to ingest the
// coordinates we can do the rest of the requests without the loop. We

View File

@ -9,6 +9,7 @@ import (
"github.com/hashicorp/consul/consul/structs"
"github.com/hashicorp/consul/testrpc"
"github.com/hashicorp/consul/testutil/retry"
"github.com/hashicorp/net-rpc-msgpackrpc"
"github.com/hashicorp/raft"
)
@ -228,39 +229,35 @@ func TestOperator_ServerHealth(t *testing.T) {
}
testrpc.WaitForLeader(t, s1.RPC, "dc1")
if err := testrpc.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
arg := structs.DCSpecificRequest{
Datacenter: "dc1",
}
var reply structs.OperatorHealthReply
err := msgpackrpc.CallWithCodec(codec, "Operator.ServerHealth", &arg, &reply)
if err != nil {
return false, fmt.Errorf("err: %v", err)
r.Fatalf("err: %v", err)
}
if !reply.Healthy {
return false, fmt.Errorf("bad: %v", reply)
r.Fatalf("bad: %v", reply)
}
if reply.FailureTolerance != 1 {
return false, fmt.Errorf("bad: %v", reply)
r.Fatalf("bad: %v", reply)
}
if len(reply.Servers) != 3 {
return false, fmt.Errorf("bad: %v", reply)
r.Fatalf("bad: %v", reply)
}
// Leader should have LastContact == 0, others should be positive
for _, s := range reply.Servers {
isLeader := s1.raft.Leader() == raft.ServerAddress(s.Address)
if isLeader && s.LastContact != 0 {
return false, fmt.Errorf("bad: %v", reply)
r.Fatalf("bad: %v", reply)
}
if !isLeader && s.LastContact <= 0 {
return false, fmt.Errorf("bad: %v", reply)
r.Fatalf("bad: %v", reply)
}
}
return true, nil
}); err != nil {
t.Fatal(err)
}
})
}
func TestOperator_ServerHealth_UnsupportedRaftVersion(t *testing.T) {

View File

@ -15,6 +15,7 @@ import (
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/consul/structs"
"github.com/hashicorp/consul/testrpc"
"github.com/hashicorp/consul/testutil/retry"
"github.com/hashicorp/net-rpc-msgpackrpc"
"github.com/hashicorp/serf/coordinate"
)
@ -1454,11 +1455,11 @@ func TestPreparedQuery_Execute(t *testing.T) {
if _, err := s2.JoinWAN([]string{addr}); err != nil {
t.Fatalf("err: %v", err)
}
if err := testrpc.WaitForResult(func() (bool, error) {
return len(s1.WANMembers()) > 1, nil
}); err != nil {
t.Fatalf("Failed waiting for WAN join: %v", err)
}
retry.Run("", t, func(r *retry.R) {
if got, want := len(s1.WANMembers()), 2; got != want {
r.Fatalf("got %d WAN members want %d", got, want)
}
})
// Create an ACL with read permission to the service.
var execToken string
@ -2703,11 +2704,11 @@ func TestPreparedQuery_Wrapper(t *testing.T) {
if _, err := s2.JoinWAN([]string{addr}); err != nil {
t.Fatalf("err: %v", err)
}
if err := testrpc.WaitForResult(func() (bool, error) {
return len(s1.WANMembers()) > 1, nil
}); err != nil {
t.Fatalf("Failed waiting for WAN join: %v", err)
}
retry.Run("", t, func(r *retry.R) {
if got, want := len(s1.WANMembers()), 2; got != want {
r.Fatalf("got %d WAN members want %d", got, want)
}
})
// Try all the operations on a real server via the wrapper.
wrapper := &queryServerWrapper{s1}

View File

@ -11,6 +11,7 @@ import (
"time"
"github.com/hashicorp/consul/testrpc"
"github.com/hashicorp/consul/testutil/retry"
"github.com/hashicorp/consul/types"
"github.com/hashicorp/go-uuid"
)
@ -156,19 +157,14 @@ func TestServer_JoinLAN(t *testing.T) {
if _, err := s2.JoinLAN([]string{addr}); err != nil {
t.Fatalf("err: %v", err)
}
// Check the members
if err := testrpc.WaitForResult(func() (bool, error) {
return len(s1.LANMembers()) == 2, nil
}); err != nil {
t.Fatal("bad len")
}
if err := testrpc.WaitForResult(func() (bool, error) {
return len(s2.LANMembers()) == 2, nil
}); err != nil {
t.Fatal("bad len")
}
retry.Run("", t, func(r *retry.R) {
if got, want := len(s1.LANMembers()), 2; got != want {
r.Fatalf("got %d s1 LAN members want %d", got, want)
}
if got, want := len(s2.LANMembers()), 2; got != want {
r.Fatalf("got %d s2 LAN members want %d", got, want)
}
})
}
func TestServer_JoinWAN(t *testing.T) {
@ -186,30 +182,24 @@ func TestServer_JoinWAN(t *testing.T) {
if _, err := s2.JoinWAN([]string{addr}); err != nil {
t.Fatalf("err: %v", err)
}
// Check the members
if err := testrpc.WaitForResult(func() (bool, error) {
return len(s1.WANMembers()) == 2, nil
}); err != nil {
t.Fatal("bad len")
}
if err := testrpc.WaitForResult(func() (bool, error) {
return len(s2.WANMembers()) == 2, nil
}); err != nil {
t.Fatal("bad len")
}
retry.Run("", t, func(r *retry.R) {
if got, want := len(s1.WANMembers()), 2; got != want {
r.Fatalf("got %d s1 WAN members want %d", got, want)
}
if got, want := len(s2.WANMembers()), 2; got != want {
r.Fatalf("got %d s2 WAN members want %d", got, want)
}
})
// Check the router has both
if len(s1.router.GetDatacenters()) != 2 {
t.Fatalf("remote consul missing")
}
if err := testrpc.WaitForResult(func() (bool, error) {
return len(s2.router.GetDatacenters()) == 2, nil
}); err != nil {
t.Fatal("remote consul missing")
}
retry.Run("", t, func(r *retry.R) {
if got, want := len(s2.router.GetDatacenters()), 2; got != want {
r.Fatalf("got %d data centers want %d", got, want)
}
})
}
func TestServer_JoinWAN_Flood(t *testing.T) {
@ -228,12 +218,12 @@ func TestServer_JoinWAN_Flood(t *testing.T) {
t.Fatalf("err: %v", err)
}
for i, s := range []*Server{s1, s2} {
if err := testrpc.WaitForResult(func() (bool, error) {
return len(s.WANMembers()) == 2, nil
}); err != nil {
t.Fatalf("bad len for server %d", i)
}
for _, s := range []*Server{s1, s2} {
retry.Run("", t, func(r *retry.R) {
if got, want := len(s.WANMembers()), 2; got != want {
r.Fatalf("got %d WAN members want %d", got, want)
}
})
}
dir3, s3 := testServer(t)
@ -248,12 +238,12 @@ func TestServer_JoinWAN_Flood(t *testing.T) {
t.Fatalf("err: %v", err)
}
for i, s := range []*Server{s1, s2, s3} {
if err := testrpc.WaitForResult(func() (bool, error) {
return len(s.WANMembers()) == 3, nil
}); err != nil {
t.Fatalf("bad len for server %d", i)
}
for _, s := range []*Server{s1, s2, s3} {
retry.Run("", t, func(r *retry.R) {
if got, want := len(s.WANMembers()), 3; got != want {
r.Fatalf("got %d WAN members want %d", got, want)
}
})
}
}
@ -291,34 +281,20 @@ func TestServer_JoinSeparateLanAndWanAddresses(t *testing.T) {
if _, err := s3.JoinLAN([]string{addrs2}); err != nil {
t.Fatalf("err: %v", err)
}
// Check the WAN members on s1
if err := testrpc.WaitForResult(func() (bool, error) {
return len(s1.WANMembers()) == 3, nil
}); err != nil {
t.Fatal("bad len")
}
// Check the WAN members on s2
if err := testrpc.WaitForResult(func() (bool, error) {
return len(s2.WANMembers()) == 3, nil
}); err != nil {
t.Fatal("bad len")
}
// Check the LAN members on s2
if err := testrpc.WaitForResult(func() (bool, error) {
return len(s2.LANMembers()) == 2, nil
}); err != nil {
t.Fatal("bad len")
}
// Check the LAN members on s3
if err := testrpc.WaitForResult(func() (bool, error) {
return len(s3.LANMembers()) == 2, nil
}); err != nil {
t.Fatal("bad len")
}
retry.Run("", t, func(r *retry.R) {
if got, want := len(s1.WANMembers()), 2; got != want {
r.Fatalf("got %d s1 WAN members want %d", got, want)
}
if got, want := len(s2.WANMembers()), 2; got != want {
r.Fatalf("got %d s2 WAN members want %d", got, want)
}
if got, want := len(s2.LANMembers()), 2; got != want {
r.Fatalf("got %d s2 LAN members want %d", got, want)
}
if got, want := len(s3.LANMembers()), 2; got != want {
r.Fatalf("got %d s3 WAN members want %d", got, want)
}
})
// Check the router has both
if len(s1.router.GetDatacenters()) != 2 {
@ -373,22 +349,14 @@ func TestServer_LeaveLeader(t *testing.T) {
t.Fatalf("err: %v", err)
}
var p1 int
var p2 int
if err := testrpc.WaitForResult(func() (bool, error) {
p1, _ = s1.numPeers()
return p1 == 2, fmt.Errorf("%d", p1)
}); err != nil {
t.Fatalf("should have 2 peers %s", err)
}
if err := testrpc.WaitForResult(func() (bool, error) {
p2, _ = s2.numPeers()
return p2 == 2, fmt.Errorf("%d", p1)
}); err != nil {
t.Fatalf("should have 2 peers %s", err)
}
retry.Run("", t, func(r *retry.R) {
if got, want := numPeers(s1), 2; got != want {
r.Fatalf("got %d s1 peers want %d", got, want)
}
if got, want := numPeers(s2), 2; got != want {
r.Fatalf("got %d s2 peers want %d", got, want)
}
})
// Issue a leave to the leader
for _, s := range []*Server{s1, s2} {
@ -401,14 +369,14 @@ func TestServer_LeaveLeader(t *testing.T) {
}
// Should lose a peer
for _, s := range []*Server{s1, s2} {
if err := testrpc.WaitForResult(func() (bool, error) {
p1, _ = s.numPeers()
return p1 == 1, nil
}); err != nil {
t.Fatalf("should have 1 peer %s", err)
retry.Run("", t, func(r *retry.R) {
if got, want := numPeers(s1), 1; got != want {
r.Fatalf("got %d s1 peers want %d", got, want)
}
}
if got, want := numPeers(s2), 1; got != want {
r.Fatalf("got %d s2 peers want %d", got, want)
}
})
}
func TestServer_Leave(t *testing.T) {
@ -428,22 +396,14 @@ func TestServer_Leave(t *testing.T) {
t.Fatalf("err: %v", err)
}
var p1 int
var p2 int
if err := testrpc.WaitForResult(func() (bool, error) {
p1, _ = s1.numPeers()
return p1 == 2, fmt.Errorf("%d", p1)
}); err != nil {
t.Fatalf("should have 2 peers %s", err)
}
if err := testrpc.WaitForResult(func() (bool, error) {
p2, _ = s2.numPeers()
return p2 == 2, fmt.Errorf("%d", p1)
}); err != nil {
t.Fatalf("should have 2 peers %s", err)
}
retry.Run("", t, func(r *retry.R) {
if got, want := numPeers(s1), 2; got != want {
r.Fatalf("got %d s1 peers want %d", got, want)
}
if got, want := numPeers(s2), 2; got != want {
r.Fatalf("got %d s2 peers want %d", got, want)
}
})
// Issue a leave to the non-leader
for _, s := range []*Server{s1, s2} {
@ -456,14 +416,14 @@ func TestServer_Leave(t *testing.T) {
}
// Should lose a peer
for _, s := range []*Server{s1, s2} {
if err := testrpc.WaitForResult(func() (bool, error) {
p1, _ = s.numPeers()
return p1 == 1, nil
}); err != nil {
t.Fatalf("should have 1 peer %s", err)
retry.Run("", t, func(r *retry.R) {
if got, want := numPeers(s1), 1; got != want {
r.Fatalf("got %d s1 peers want %d", got, want)
}
}
if got, want := numPeers(s2), 1; got != want {
r.Fatalf("got %d s2 peers want %d", got, want)
}
})
}
func TestServer_RPC(t *testing.T) {
@ -507,34 +467,23 @@ func TestServer_JoinLAN_TLS(t *testing.T) {
if _, err := s2.JoinLAN([]string{addr}); err != nil {
t.Fatalf("err: %v", err)
}
// Check the members
if err := testrpc.WaitForResult(func() (bool, error) {
return len(s1.LANMembers()) == 2, nil
}); err != nil {
t.Fatal("bad len")
}
if err := testrpc.WaitForResult(func() (bool, error) {
return len(s2.LANMembers()) == 2, nil
}); err != nil {
t.Fatal("bad len")
}
retry.Run("", t, func(r *retry.R) {
if got, want := len(s1.LANMembers()), 2; got != want {
r.Fatalf("got %d s1 LAN members want %d", got, want)
}
if got, want := len(s2.LANMembers()), 2; got != want {
r.Fatalf("got %d s2 LAN members want %d", got, want)
}
})
// Verify Raft has established a peer
if err := testrpc.WaitForResult(func() (bool, error) {
peers, _ := s1.numPeers()
return peers == 2, nil
}); err != nil {
t.Fatalf("no peers")
}
if err := testrpc.WaitForResult(func() (bool, error) {
peers, _ := s2.numPeers()
return peers == 2, nil
}); err != nil {
t.Fatalf("no peers")
}
retry.Run("", t, func(r *retry.R) {
if got, want := numPeers(s1), 2; got != want {
r.Fatalf("got %d s1 peers want %d", got, want)
}
if got, want := numPeers(s2), 2; got != want {
r.Fatalf("got %d s2 peers want %d", got, want)
}
})
}
func TestServer_Expect(t *testing.T) {
@ -564,52 +513,33 @@ func TestServer_Expect(t *testing.T) {
t.Fatalf("err: %v", err)
}
var p1 int
var p2 int
// Should have no peers yet since the bootstrap didn't occur.
if err := testrpc.WaitForResult(func() (bool, error) {
p1, _ = s1.numPeers()
return p1 == 0, fmt.Errorf("%d", p1)
}); err != nil {
t.Fatalf("should have 0 peers %s", err)
}
if err := testrpc.WaitForResult(func() (bool, error) {
p2, _ = s2.numPeers()
return p2 == 0, fmt.Errorf("%d", p2)
}); err != nil {
t.Fatalf("should have 0 peers %s", err)
}
retry.Run("", t, func(r *retry.R) {
if got, want := numPeers(s1), 0; got != want {
r.Fatalf("got %d s1 peers want %d", got, want)
}
if got, want := numPeers(s2), 0; got != want {
r.Fatalf("got %d s2 peers want %d", got, want)
}
})
// Join the third node.
if _, err := s3.JoinLAN([]string{addr}); err != nil {
t.Fatalf("err: %v", err)
}
var p3 int
// Now we have three servers so we should bootstrap.
if err := testrpc.WaitForResult(func() (bool, error) {
p1, _ = s1.numPeers()
return p1 == 3, fmt.Errorf("%d", p1)
}); err != nil {
t.Fatalf("should have 3 peers %s", err)
}
if err := testrpc.WaitForResult(func() (bool, error) {
p2, _ = s2.numPeers()
return p2 == 3, fmt.Errorf("%d", p2)
}); err != nil {
t.Fatalf("should have 3 peers %s", err)
}
if err := testrpc.WaitForResult(func() (bool, error) {
p3, _ = s3.numPeers()
return p3 == 3, fmt.Errorf("%d", p3)
}); err != nil {
t.Fatalf("should have 3 peers %s", err)
}
retry.Run("", t, func(r *retry.R) {
if got, want := numPeers(s1), 3; got != want {
r.Fatalf("got %d s1 peers want %d", got, want)
}
if got, want := numPeers(s2), 3; got != want {
r.Fatalf("got %d s2 peers want %d", got, want)
}
if got, want := numPeers(s3), 3; got != want {
r.Fatalf("got %d s3 peers want %d", got, want)
}
})
// Make sure a leader is elected, grab the current term and then add in
// the fourth server.
@ -620,13 +550,20 @@ func TestServer_Expect(t *testing.T) {
}
// Wait for the new server to see itself added to the cluster.
var p4 int
if err := testrpc.WaitForResult(func() (bool, error) {
p4, _ = s4.numPeers()
return p4 == 4, fmt.Errorf("%d", p4)
}); err != nil {
t.Fatalf("should have 4 peers %s", err)
}
retry.Run("", t, func(r *retry.R) {
if got, want := numPeers(s1), 4; got != want {
r.Fatalf("got %d s1 peers want %d", got, want)
}
if got, want := numPeers(s2), 4; got != want {
r.Fatalf("got %d s2 peers want %d", got, want)
}
if got, want := numPeers(s3), 4; got != want {
r.Fatalf("got %d s3 peers want %d", got, want)
}
if got, want := numPeers(s4), 4; got != want {
r.Fatalf("got %d s4 peers want %d", got, want)
}
})
// Make sure there's still a leader and that the term didn't change,
// so we know an election didn't occur.
@ -662,21 +599,23 @@ func TestServer_BadExpect(t *testing.T) {
var p1 int
var p2 int
retry.
// should have no peers yet
if err := testrpc.WaitForResult(func() (bool, error) {
p1, _ = s1.numPeers()
return p1 == 0, fmt.Errorf("%d", p1)
}); err != nil {
t.Fatalf("should have 0 peers %s", err)
}
// should have no peers yet
Run("", t, func(r *retry.R) {
p1, _ = s1.numPeers()
if p1 != 0 {
r.Fatalf("%d", p1)
}
})
retry.Run("", t, func(r *retry.R) {
if err := testrpc.WaitForResult(func() (bool, error) {
p2, _ = s2.numPeers()
return p2 == 0, fmt.Errorf("%d", p2)
}); err != nil {
t.Fatalf("should have 0 peers %s", err)
}
if p2 != 0 {
r.Fatalf("%d", p2)
}
})
// join the third node
if _, err := s3.JoinLAN([]string{addr}); err != nil {
@ -684,28 +623,31 @@ func TestServer_BadExpect(t *testing.T) {
}
var p3 int
retry.
// should still have no peers (because s2 is in expect=2 mode)
if err := testrpc.WaitForResult(func() (bool, error) {
p1, _ = s1.numPeers()
return p1 == 0, fmt.Errorf("%d", p1)
}); err != nil {
t.Fatalf("should have 0 peers %s", err)
}
// should still have no peers (because s2 is in expect=2 mode)
Run("", t, func(r *retry.R) {
p1, _ = s1.numPeers()
if p1 != 0 {
r.Fatalf("%d", p1)
}
})
retry.Run("", t, func(r *retry.R) {
if err := testrpc.WaitForResult(func() (bool, error) {
p2, _ = s2.numPeers()
return p2 == 0, fmt.Errorf("%d", p2)
}); err != nil {
t.Fatalf("should have 0 peers %s", err)
}
if p2 != 0 {
r.Fatalf("%d", p2)
}
})
retry.Run("", t, func(r *retry.R) {
if err := testrpc.WaitForResult(func() (bool, error) {
p3, _ = s3.numPeers()
return p3 == 0, fmt.Errorf("%d", p3)
}); err != nil {
t.Fatalf("should have 0 peers %s", err)
}
if p3 != 0 {
r.Fatalf("%d", p3)
}
})
}
type fakeGlobalResp struct{}
@ -722,12 +664,11 @@ func TestServer_globalRPCErrors(t *testing.T) {
dir1, s1 := testServerDC(t, "dc1")
defer os.RemoveAll(dir1)
defer s1.Shutdown()
if err := testrpc.WaitForResult(func() (bool, error) {
return len(s1.router.GetDatacenters()) == 1, nil
}); err != nil {
t.Fatalf("did not join WAN")
}
retry.Run("", t, func(r *retry.R) {
if len(s1.router.GetDatacenters()) != 1 {
r.Fatal(nil)
}
})
// Check that an error from a remote DC is returned
err := s1.globalRPC("Bad.Method", nil, &fakeGlobalResp{})

View File

@ -9,6 +9,7 @@ import (
"github.com/hashicorp/consul/consul/structs"
"github.com/hashicorp/consul/testrpc"
"github.com/hashicorp/consul/testutil/retry"
"github.com/hashicorp/net-rpc-msgpackrpc"
)
@ -297,13 +298,11 @@ func TestServer_SessionTTL_Failover(t *testing.T) {
if _, err := s3.JoinLAN([]string{addr}); err != nil {
t.Fatalf("err: %v", err)
}
if err := testrpc.WaitForResult(func() (bool, error) {
peers, _ := s1.numPeers()
return peers == 3, nil
}); err != nil {
t.Fatalf("should have 3 peers %s", err)
}
retry.Run("", t, func(r *retry.R) {
if got, want := numPeers(s1), 3; got != want {
r.Fatalf("got %d s1 peers want %d", got, want)
}
})
// Find the leader
var leader *Server
@ -361,9 +360,9 @@ func TestServer_SessionTTL_Failover(t *testing.T) {
if len(leader.sessionTimers) != 0 {
t.Fatalf("session timers should be empty on the shutdown leader")
}
// Find the new leader
if err := testrpc.WaitForResult(func() (bool, error) {
retry.Run("", t, func(r *retry.R) {
leader = nil
for _, s := range servers {
if s.IsLeader() {
@ -371,16 +370,12 @@ func TestServer_SessionTTL_Failover(t *testing.T) {
}
}
if leader == nil {
return false, fmt.Errorf("Should have a new leader")
r.Fatal("Should have a new leader")
}
// Ensure session timer is restored
if _, ok := leader.sessionTimers[id1]; !ok {
return false, fmt.Errorf("missing session timer")
r.Fatal("missing session timer")
}
return true, nil
}); err != nil {
t.Fatal(err)
}
})
}

View File

@ -10,6 +10,7 @@ import (
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/consul/structs"
"github.com/hashicorp/consul/testrpc"
"github.com/hashicorp/consul/testutil/retry"
"github.com/hashicorp/net-rpc-msgpackrpc"
)
@ -330,11 +331,11 @@ func TestSnapshot_Forward_Datacenter(t *testing.T) {
if _, err := s2.JoinWAN([]string{addr}); err != nil {
t.Fatalf("err: %v", err)
}
if err := testrpc.WaitForResult(func() (bool, error) {
return len(s1.WANMembers()) > 1, nil
}); err != nil {
t.Fatalf("failed to join WAN: %s", err)
}
retry.Run("", t, func(r *retry.R) {
if got, want := len(s1.WANMembers()), 2; got < want {
r.Fatalf("got %d WAN members want at least %d", got, want)
}
})
// Run a snapshot from each server locally and remotely to ensure we
// forward.