consul/consul/catalog_endpoint_test.go

730 lines
17 KiB
Go
Raw Normal View History

2013-12-11 22:57:40 +00:00
package consul
import (
2013-12-12 00:33:19 +00:00
"fmt"
2013-12-19 20:03:57 +00:00
"github.com/hashicorp/consul/consul/structs"
"net/rpc"
2013-12-11 22:57:40 +00:00
"os"
2013-12-12 18:35:50 +00:00
"sort"
"strings"
2013-12-11 22:57:40 +00:00
"testing"
"time"
)
func TestCatalogRegister(t *testing.T) {
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
client := rpcClient(t, s1)
defer client.Close()
2013-12-19 20:03:57 +00:00
arg := structs.RegisterRequest{
Datacenter: "dc1",
Node: "foo",
Address: "127.0.0.1",
Service: &structs.NodeService{
Service: "db",
2014-04-03 19:03:10 +00:00
Tags: []string{"master"},
Port: 8000,
},
2013-12-11 22:57:40 +00:00
}
var out struct{}
err := client.Call("Catalog.Register", &arg, &out)
if err == nil || err.Error() != "No cluster leader" {
t.Fatalf("err: %v", err)
}
// Wait for leader
time.Sleep(100 * time.Millisecond)
if err := client.Call("Catalog.Register", &arg, &out); err != nil {
t.Fatalf("err: %v", err)
}
}
2013-12-11 23:34:10 +00:00
2013-12-12 00:42:19 +00:00
func TestCatalogRegister_ForwardLeader(t *testing.T) {
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
client1 := rpcClient(t, s1)
defer client1.Close()
dir2, s2 := testServer(t)
defer os.RemoveAll(dir2)
defer s2.Shutdown()
client2 := rpcClient(t, s2)
defer client2.Close()
// Try to join
addr := fmt.Sprintf("127.0.0.1:%d",
s1.config.SerfLANConfig.MemberlistConfig.BindPort)
if _, err := s2.JoinLAN([]string{addr}); err != nil {
2013-12-12 00:42:19 +00:00
t.Fatalf("err: %v", err)
}
// Wait for a leader
time.Sleep(100 * time.Millisecond)
// Use the follower as the client
2013-12-19 20:03:57 +00:00
var client *rpc.Client
2013-12-12 00:42:19 +00:00
if !s1.IsLeader() {
client = client1
} else {
client = client2
}
2013-12-19 20:03:57 +00:00
arg := structs.RegisterRequest{
Datacenter: "dc1",
Node: "foo",
Address: "127.0.0.1",
Service: &structs.NodeService{
Service: "db",
2014-04-03 19:03:10 +00:00
Tags: []string{"master"},
Port: 8000,
},
2013-12-12 00:42:19 +00:00
}
var out struct{}
if err := client.Call("Catalog.Register", &arg, &out); err != nil {
t.Fatalf("err: %v", err)
}
}
2013-12-12 00:33:19 +00:00
func TestCatalogRegister_ForwardDC(t *testing.T) {
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
client := rpcClient(t, s1)
defer client.Close()
dir2, s2 := testServerDC(t, "dc2")
defer os.RemoveAll(dir2)
defer s2.Shutdown()
// Try to join
addr := fmt.Sprintf("127.0.0.1:%d",
s1.config.SerfWANConfig.MemberlistConfig.BindPort)
if _, err := s2.JoinWAN([]string{addr}); err != nil {
2013-12-12 00:33:19 +00:00
t.Fatalf("err: %v", err)
}
// Wait for the leaders
time.Sleep(100 * time.Millisecond)
2013-12-19 20:03:57 +00:00
arg := structs.RegisterRequest{
Datacenter: "dc2", // SHould forward through s1
Node: "foo",
Address: "127.0.0.1",
Service: &structs.NodeService{
Service: "db",
2014-04-03 19:03:10 +00:00
Tags: []string{"master"},
Port: 8000,
},
2013-12-12 00:33:19 +00:00
}
var out struct{}
if err := client.Call("Catalog.Register", &arg, &out); err != nil {
t.Fatalf("err: %v", err)
}
}
2013-12-11 23:34:10 +00:00
func TestCatalogDeregister(t *testing.T) {
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
client := rpcClient(t, s1)
defer client.Close()
2013-12-19 20:03:57 +00:00
arg := structs.DeregisterRequest{
2013-12-11 23:34:10 +00:00
Datacenter: "dc1",
Node: "foo",
}
var out struct{}
err := client.Call("Catalog.Deregister", &arg, &out)
if err == nil || err.Error() != "No cluster leader" {
t.Fatalf("err: %v", err)
}
// Wait for leader
time.Sleep(100 * time.Millisecond)
if err := client.Call("Catalog.Deregister", &arg, &out); err != nil {
t.Fatalf("err: %v", err)
}
}
2013-12-12 18:35:50 +00:00
func TestCatalogListDatacenters(t *testing.T) {
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
client := rpcClient(t, s1)
defer client.Close()
dir2, s2 := testServerDC(t, "dc2")
defer os.RemoveAll(dir2)
defer s2.Shutdown()
// Try to join
addr := fmt.Sprintf("127.0.0.1:%d",
s1.config.SerfWANConfig.MemberlistConfig.BindPort)
if _, err := s2.JoinWAN([]string{addr}); err != nil {
2013-12-12 18:35:50 +00:00
t.Fatalf("err: %v", err)
}
time.Sleep(10 * time.Millisecond)
var out []string
if err := client.Call("Catalog.ListDatacenters", struct{}{}, &out); err != nil {
t.Fatalf("err: %v", err)
}
// Sort the dcs
sort.Strings(out)
if len(out) != 2 {
t.Fatalf("bad: %v", out)
}
if out[0] != "dc1" {
t.Fatalf("bad: %v", out)
}
if out[1] != "dc2" {
t.Fatalf("bad: %v", out)
}
}
2013-12-12 18:48:36 +00:00
func TestCatalogListNodes(t *testing.T) {
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
client := rpcClient(t, s1)
defer client.Close()
args := structs.DCSpecificRequest{
Datacenter: "dc1",
}
var out structs.IndexedNodes
err := client.Call("Catalog.ListNodes", &args, &out)
2013-12-12 18:48:36 +00:00
if err == nil || err.Error() != "No cluster leader" {
t.Fatalf("err: %v", err)
}
// Wait for leader
time.Sleep(100 * time.Millisecond)
// Just add a node
s1.fsm.State().EnsureNode(1, structs.Node{"foo", "127.0.0.1"})
2013-12-12 18:48:36 +00:00
if err := client.Call("Catalog.ListNodes", &args, &out); err != nil {
2013-12-12 18:48:36 +00:00
t.Fatalf("err: %v", err)
}
if len(out.Nodes) != 2 {
2014-01-10 01:22:01 +00:00
t.Fatalf("bad: %v", out)
}
// Server node is auto added from Serf
if out.Nodes[0].Node != s1.config.NodeName {
2013-12-12 18:48:36 +00:00
t.Fatalf("bad: %v", out)
}
if out.Nodes[1].Node != "foo" {
2013-12-12 18:48:36 +00:00
t.Fatalf("bad: %v", out)
}
if out.Nodes[1].Address != "127.0.0.1" {
2013-12-12 18:48:36 +00:00
t.Fatalf("bad: %v", out)
}
}
2013-12-12 19:07:14 +00:00
2014-04-19 00:26:59 +00:00
func TestCatalogListNodes_StaleRaad(t *testing.T) {
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
client1 := rpcClient(t, s1)
defer client1.Close()
2014-04-19 00:48:50 +00:00
dir2, s2 := testServerDCBootstrap(t, "dc1", false)
2014-04-19 00:26:59 +00:00
defer os.RemoveAll(dir2)
defer s2.Shutdown()
client2 := rpcClient(t, s2)
defer client2.Close()
// Try to join
addr := fmt.Sprintf("127.0.0.1:%d",
s1.config.SerfLANConfig.MemberlistConfig.BindPort)
if _, err := s2.JoinLAN([]string{addr}); err != nil {
t.Fatalf("err: %v", err)
}
// Wait for a leader
time.Sleep(100 * time.Millisecond)
// Use the follower as the client
var client *rpc.Client
if !s1.IsLeader() {
client = client1
// Inject fake data on the follower!
s1.fsm.State().EnsureNode(1, structs.Node{"foo", "127.0.0.1"})
} else {
client = client2
// Inject fake data on the follower!
s2.fsm.State().EnsureNode(1, structs.Node{"foo", "127.0.0.1"})
}
args := structs.DCSpecificRequest{
Datacenter: "dc1",
QueryOptions: structs.QueryOptions{AllowStale: true},
}
var out structs.IndexedNodes
if err := client.Call("Catalog.ListNodes", &args, &out); err != nil {
t.Fatalf("err: %v", err)
}
2014-04-19 00:48:50 +00:00
found := false
for _, n := range out.Nodes {
if n.Node == "foo" {
found = true
}
}
if !found {
t.Fatalf("failed to find foo")
}
if out.QueryMeta.LastContact == 0 {
t.Fatalf("should have a last contact time")
}
if !out.QueryMeta.KnownLeader {
t.Fatalf("should have known leader")
}
}
func TestCatalogListNodes_ConsistentRead_Fail(t *testing.T) {
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
client1 := rpcClient(t, s1)
defer client1.Close()
dir2, s2 := testServerDCBootstrap(t, "dc1", false)
defer os.RemoveAll(dir2)
defer s2.Shutdown()
client2 := rpcClient(t, s2)
defer client2.Close()
// Try to join
addr := fmt.Sprintf("127.0.0.1:%d",
s1.config.SerfLANConfig.MemberlistConfig.BindPort)
if _, err := s2.JoinLAN([]string{addr}); err != nil {
t.Fatalf("err: %v", err)
}
// Wait for a leader
time.Sleep(100 * time.Millisecond)
// Use the leader as the client, kill the follower
var client *rpc.Client
if s1.IsLeader() {
client = client1
s2.Shutdown()
} else {
client = client2
s1.Shutdown()
}
args := structs.DCSpecificRequest{
Datacenter: "dc1",
QueryOptions: structs.QueryOptions{RequireConsistent: true},
}
var out structs.IndexedNodes
if err := client.Call("Catalog.ListNodes", &args, &out); !strings.HasPrefix(err.Error(), "leadership lost") {
t.Fatalf("err: %v", err)
}
if out.QueryMeta.LastContact != 0 {
t.Fatalf("should not have a last contact time")
}
if out.QueryMeta.KnownLeader {
t.Fatalf("should have no known leader")
}
}
func TestCatalogListNodes_ConsistentRead(t *testing.T) {
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
client1 := rpcClient(t, s1)
defer client1.Close()
dir2, s2 := testServerDCBootstrap(t, "dc1", false)
defer os.RemoveAll(dir2)
defer s2.Shutdown()
client2 := rpcClient(t, s2)
defer client2.Close()
// Try to join
addr := fmt.Sprintf("127.0.0.1:%d",
s1.config.SerfLANConfig.MemberlistConfig.BindPort)
if _, err := s2.JoinLAN([]string{addr}); err != nil {
t.Fatalf("err: %v", err)
}
// Wait for a leader
time.Sleep(100 * time.Millisecond)
// Use the leader as the client, kill the follower
var client *rpc.Client
if s1.IsLeader() {
client = client1
} else {
client = client2
}
args := structs.DCSpecificRequest{
Datacenter: "dc1",
QueryOptions: structs.QueryOptions{RequireConsistent: true},
}
var out structs.IndexedNodes
if err := client.Call("Catalog.ListNodes", &args, &out); err != nil {
t.Fatalf("err: %v", err)
}
if out.QueryMeta.LastContact != 0 {
t.Fatalf("should not have a last contact time")
}
if !out.QueryMeta.KnownLeader {
t.Fatalf("should have known leader")
2014-04-19 00:26:59 +00:00
}
}
func BenchmarkCatalogListNodes(t *testing.B) {
dir1, s1 := testServer(nil)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
client := rpcClient(nil, s1)
defer client.Close()
// Wait for leader
time.Sleep(100 * time.Millisecond)
// Just add a node
s1.fsm.State().EnsureNode(1, structs.Node{"foo", "127.0.0.1"})
args := structs.DCSpecificRequest{
Datacenter: "dc1",
}
for i := 0; i < t.N; i++ {
var out structs.IndexedNodes
if err := client.Call("Catalog.ListNodes", &args, &out); err != nil {
t.Fatalf("err: %v", err)
}
}
}
2013-12-12 19:07:14 +00:00
func TestCatalogListServices(t *testing.T) {
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
client := rpcClient(t, s1)
defer client.Close()
args := structs.DCSpecificRequest{
Datacenter: "dc1",
}
var out structs.IndexedServices
err := client.Call("Catalog.ListServices", &args, &out)
2013-12-12 19:07:14 +00:00
if err == nil || err.Error() != "No cluster leader" {
t.Fatalf("err: %v", err)
}
// Wait for leader
time.Sleep(100 * time.Millisecond)
// Just add a node
s1.fsm.State().EnsureNode(1, structs.Node{"foo", "127.0.0.1"})
2014-04-03 19:03:10 +00:00
s1.fsm.State().EnsureService(2, "foo", &structs.NodeService{"db", "db", []string{"primary"}, 5000})
2013-12-12 19:07:14 +00:00
if err := client.Call("Catalog.ListServices", &args, &out); err != nil {
2013-12-12 19:07:14 +00:00
t.Fatalf("err: %v", err)
}
if len(out.Services) != 2 {
2014-01-10 01:57:13 +00:00
t.Fatalf("bad: %v", out)
}
// Consul service should auto-register
2014-04-03 19:03:10 +00:00
if _, ok := out.Services["consul"]; !ok {
t.Fatalf("bad: %v", out)
}
if len(out.Services["db"]) != 1 {
t.Fatalf("bad: %v", out)
}
if out.Services["db"][0] != "primary" {
t.Fatalf("bad: %v", out)
}
}
func TestCatalogListServices_Blocking(t *testing.T) {
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
client := rpcClient(t, s1)
defer client.Close()
args := structs.DCSpecificRequest{
Datacenter: "dc1",
}
var out structs.IndexedServices
// Wait for leader
time.Sleep(100 * time.Millisecond)
// Run the query
if err := client.Call("Catalog.ListServices", &args, &out); err != nil {
t.Fatalf("err: %v", err)
}
// Setup a blocking query
args.MinQueryIndex = out.Index
args.MaxQueryTime = time.Second
// Async cause a change
start := time.Now()
go func() {
time.Sleep(100 * time.Millisecond)
s1.fsm.State().EnsureNode(1, structs.Node{"foo", "127.0.0.1"})
2014-04-03 19:03:10 +00:00
s1.fsm.State().EnsureService(2, "foo", &structs.NodeService{"db", "db", []string{"primary"}, 5000})
}()
// Re-run the query
out = structs.IndexedServices{}
if err := client.Call("Catalog.ListServices", &args, &out); err != nil {
t.Fatalf("err: %v", err)
}
// Should block at least 100ms
if time.Now().Sub(start) < 100*time.Millisecond {
t.Fatalf("too fast")
}
// Check the indexes
if out.Index != 2 {
2013-12-12 19:07:14 +00:00
t.Fatalf("bad: %v", out)
}
// Should find the service
if len(out.Services) != 2 {
2013-12-12 19:07:14 +00:00
t.Fatalf("bad: %v", out)
}
}
func TestCatalogListServices_Timeout(t *testing.T) {
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
client := rpcClient(t, s1)
defer client.Close()
args := structs.DCSpecificRequest{
Datacenter: "dc1",
}
var out structs.IndexedServices
// Wait for leader
time.Sleep(100 * time.Millisecond)
// Run the query
if err := client.Call("Catalog.ListServices", &args, &out); err != nil {
t.Fatalf("err: %v", err)
}
// Setup a blocking query
args.MinQueryIndex = out.Index
args.MaxQueryTime = 100 * time.Millisecond
// Re-run the query
start := time.Now()
out = structs.IndexedServices{}
if err := client.Call("Catalog.ListServices", &args, &out); err != nil {
t.Fatalf("err: %v", err)
}
// Should block at least 100ms
if time.Now().Sub(start) < 100*time.Millisecond {
t.Fatalf("too fast")
}
// Check the indexes, should not change
if out.Index != args.MinQueryIndex {
2013-12-12 19:07:14 +00:00
t.Fatalf("bad: %v", out)
}
}
2013-12-12 19:37:19 +00:00
2014-04-21 18:57:39 +00:00
func TestCatalogListServices_Stale(t *testing.T) {
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
client := rpcClient(t, s1)
defer client.Close()
args := structs.DCSpecificRequest{
Datacenter: "dc1",
}
args.AllowStale = true
var out structs.IndexedServices
// Inject a fake service
s1.fsm.State().EnsureNode(1, structs.Node{"foo", "127.0.0.1"})
s1.fsm.State().EnsureService(2, "foo", &structs.NodeService{"db", "db", []string{"primary"}, 5000})
// Run the query, do not wait for leader!
if err := client.Call("Catalog.ListServices", &args, &out); err != nil {
t.Fatalf("err: %v", err)
}
// Should find the service
if len(out.Services) != 1 {
t.Fatalf("bad: %v", out)
}
// Should not have a leader! Stale read
if out.KnownLeader {
t.Fatalf("bad: %v", out)
}
}
2013-12-12 19:37:19 +00:00
func TestCatalogListServiceNodes(t *testing.T) {
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
client := rpcClient(t, s1)
defer client.Close()
2014-01-08 21:52:09 +00:00
args := structs.ServiceSpecificRequest{
2013-12-12 19:37:19 +00:00
Datacenter: "dc1",
ServiceName: "db",
ServiceTag: "slave",
TagFilter: false,
}
var out structs.IndexedServiceNodes
2013-12-12 19:37:19 +00:00
err := client.Call("Catalog.ServiceNodes", &args, &out)
if err == nil || err.Error() != "No cluster leader" {
t.Fatalf("err: %v", err)
}
// Wait for leader
time.Sleep(100 * time.Millisecond)
// Just add a node
s1.fsm.State().EnsureNode(1, structs.Node{"foo", "127.0.0.1"})
2014-04-03 19:03:10 +00:00
s1.fsm.State().EnsureService(2, "foo", &structs.NodeService{"db", "db", []string{"primary"}, 5000})
2013-12-12 19:37:19 +00:00
if err := client.Call("Catalog.ServiceNodes", &args, &out); err != nil {
t.Fatalf("err: %v", err)
}
if len(out.ServiceNodes) != 1 {
2013-12-12 19:37:19 +00:00
t.Fatalf("bad: %v", out)
}
// Try with a filter
args.TagFilter = true
out = structs.IndexedServiceNodes{}
2013-12-12 19:37:19 +00:00
if err := client.Call("Catalog.ServiceNodes", &args, &out); err != nil {
t.Fatalf("err: %v", err)
}
if len(out.ServiceNodes) != 0 {
2013-12-12 19:37:19 +00:00
t.Fatalf("bad: %v", out)
}
}
2013-12-12 19:46:25 +00:00
func TestCatalogNodeServices(t *testing.T) {
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
client := rpcClient(t, s1)
defer client.Close()
2014-01-08 21:52:09 +00:00
args := structs.NodeSpecificRequest{
2013-12-12 19:46:25 +00:00
Datacenter: "dc1",
Node: "foo",
}
var out structs.IndexedNodeServices
2013-12-12 19:46:25 +00:00
err := client.Call("Catalog.NodeServices", &args, &out)
if err == nil || err.Error() != "No cluster leader" {
t.Fatalf("err: %v", err)
}
// Wait for leader
time.Sleep(100 * time.Millisecond)
// Just add a node
s1.fsm.State().EnsureNode(1, structs.Node{"foo", "127.0.0.1"})
2014-04-03 19:03:10 +00:00
s1.fsm.State().EnsureService(2, "foo", &structs.NodeService{"db", "db", []string{"primary"}, 5000})
s1.fsm.State().EnsureService(3, "foo", &structs.NodeService{"web", "web", nil, 80})
2013-12-12 19:46:25 +00:00
if err := client.Call("Catalog.NodeServices", &args, &out); err != nil {
t.Fatalf("err: %v", err)
}
if out.NodeServices.Node.Address != "127.0.0.1" {
t.Fatalf("bad: %v", out)
}
if len(out.NodeServices.Services) != 2 {
2013-12-12 19:46:25 +00:00
t.Fatalf("bad: %v", out)
}
services := out.NodeServices.Services
2014-04-03 19:03:10 +00:00
if !strContains(services["db"].Tags, "primary") || services["db"].Port != 5000 {
2013-12-12 19:46:25 +00:00
t.Fatalf("bad: %v", out)
}
2014-04-03 19:03:10 +00:00
if services["web"].Tags != nil || services["web"].Port != 80 {
2013-12-12 19:46:25 +00:00
t.Fatalf("bad: %v", out)
}
}
2014-01-01 02:31:17 +00:00
// Used to check for a regression against a known bug
func TestCatalogRegister_FailedCase1(t *testing.T) {
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
client := rpcClient(t, s1)
defer client.Close()
arg := structs.RegisterRequest{
Datacenter: "dc1",
Node: "bar",
Address: "127.0.0.2",
Service: &structs.NodeService{
Service: "web",
2014-04-03 19:03:10 +00:00
Tags: nil,
Port: 8000,
},
2014-01-01 02:31:17 +00:00
}
var out struct{}
err := client.Call("Catalog.Register", &arg, &out)
if err == nil || err.Error() != "No cluster leader" {
t.Fatalf("err: %v", err)
}
// Wait for leader
time.Sleep(100 * time.Millisecond)
if err := client.Call("Catalog.Register", &arg, &out); err != nil {
t.Fatalf("err: %v", err)
}
// Check we can get this back
2014-01-08 21:52:09 +00:00
query := &structs.ServiceSpecificRequest{
2014-01-01 02:31:17 +00:00
Datacenter: "dc1",
ServiceName: "web",
}
var out2 structs.IndexedServiceNodes
if err := client.Call("Catalog.ServiceNodes", query, &out2); err != nil {
2014-01-01 02:31:17 +00:00
t.Fatalf("err: %v", err)
}
// Check the output
if len(out2.ServiceNodes) != 1 {
t.Fatalf("Bad: %v", out2)
2014-01-01 02:31:17 +00:00
}
}