From 87e9d855fd072a7eab3e1f02b0efe7d0ae7047ab Mon Sep 17 00:00:00 2001 From: Atin Malaviya Date: Wed, 10 Dec 2014 16:43:15 -0500 Subject: [PATCH] Added more tests --- consul/session_endpoint_test.go | 41 ++++++-- consul/session_ttl_test.go | 171 ++++++++++++++++++++++++++++++++ 2 files changed, 204 insertions(+), 8 deletions(-) create mode 100644 consul/session_ttl_test.go diff --git a/consul/session_endpoint_test.go b/consul/session_endpoint_test.go index a745d16f10..b3c2a4fd18 100644 --- a/consul/session_endpoint_test.go +++ b/consul/session_endpoint_test.go @@ -232,6 +232,7 @@ func TestSessionEndpoint_Renew(t *testing.T) { defer client.Close() testutil.WaitForLeader(t, client.Call, "dc1") + TTL := "10s" s1.fsm.State().EnsureNode(1, structs.Node{"foo", "127.0.0.1"}) ids := []string{} @@ -241,7 +242,7 @@ func TestSessionEndpoint_Renew(t *testing.T) { Op: structs.SessionCreate, Session: structs.Session{ Node: "foo", - TTL: "10s", + TTL: TTL, }, } var out string @@ -274,9 +275,10 @@ func TestSessionEndpoint_Renew(t *testing.T) { if s.Node != "foo" { t.Fatalf("bad: %v", s) } - if s.TTL != "30s" { - t.Fatalf("bad: %v", s) + if s.TTL != TTL { + t.Fatalf("bad session TTL: %s %v", s.TTL, s) } + t.Logf("Created session '%s'", s.ID) } // now sleep for ttl - since internally we use ttl*2 to destroy, this is ok @@ -307,10 +309,12 @@ func TestSessionEndpoint_Renew(t *testing.T) { if s.Node != "foo" { t.Fatalf("bad: %v", s) } + + t.Logf("Renewed session '%s'", s.ID) } // now sleep for ttl*2 - 3 sessions should still be alive - time.Sleep(20 * time.Second) + time.Sleep(2 * 10 * time.Second) if err := client.Call("Session.List", &getR, &sessions); err != nil { t.Fatalf("err: %v", err) @@ -319,9 +323,9 @@ func TestSessionEndpoint_Renew(t *testing.T) { if sessions.Index == 0 { t.Fatalf("Bad: %v", sessions) } - if len(sessions.Sessions) != 3 { - t.Fatalf("Bad: %v", sessions.Sessions) - } + + t.Logf("Expect 2 sessions to be destroyed") + for i := 0; i < len(sessions.Sessions); i++ { s := sessions.Sessions[i] if !strContains(ids, s.ID) { @@ -330,9 +334,16 @@ func TestSessionEndpoint_Renew(t *testing.T) { if s.Node != "foo" { t.Fatalf("bad: %v", s) } - if s.TTL != "30s" { + if s.TTL != TTL { t.Fatalf("bad: %v", s) } + if i > 2 { + t.Errorf("session '%s' should be destroyed", s.ID) + } + } + + if len(sessions.Sessions) > 3 { + t.Fatalf("Bad: %v", sessions.Sessions) } // now sleep again for ttl*2 - no sessions should still be alive @@ -346,6 +357,20 @@ func TestSessionEndpoint_Renew(t *testing.T) { t.Fatalf("Bad: %v", sessions) } if len(sessions.Sessions) != 0 { + for i := 0; i < len(sessions.Sessions); i++ { + s := sessions.Sessions[i] + if !strContains(ids, s.ID) { + t.Fatalf("bad: %v", s) + } + if s.Node != "foo" { + t.Fatalf("bad: %v", s) + } + if s.TTL != TTL { + t.Fatalf("bad: %v", s) + } + t.Errorf("session '%s' should be destroyed", s.ID) + } + t.Fatalf("Bad: %v", sessions.Sessions) } } diff --git a/consul/session_ttl_test.go b/consul/session_ttl_test.go new file mode 100644 index 0000000000..c5ca43642a --- /dev/null +++ b/consul/session_ttl_test.go @@ -0,0 +1,171 @@ +package consul + +import ( + "errors" + "fmt" + "os" + "testing" + "time" + + "github.com/hashicorp/consul/consul/structs" + "github.com/hashicorp/consul/testutil" +) + +func TestServer_sessionTTL(t *testing.T) { + dir1, s1 := testServer(t) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + + dir2, s2 := testServerDCBootstrap(t, "dc1", false) + defer os.RemoveAll(dir2) + defer s2.Shutdown() + + dir3, s3 := testServerDCBootstrap(t, "dc1", false) + defer os.RemoveAll(dir3) + defer s3.Shutdown() + servers := []*Server{s1, s2, s3} + + // Try to join + addr := fmt.Sprintf("127.0.0.1:%d", + s1.config.SerfLANConfig.MemberlistConfig.BindPort) + if _, err := s2.JoinLAN([]string{addr}); err != nil { + t.Fatalf("err: %v", err) + } + if _, err := s3.JoinLAN([]string{addr}); err != nil { + t.Fatalf("err: %v", err) + } + + for _, s := range servers { + testutil.WaitForResult(func() (bool, error) { + peers, _ := s.raftPeers.Peers() + return len(peers) == 3, nil + }, func(err error) { + t.Fatalf("should have 3 peers") + }) + } + + // Find the leader + var leader *Server + for _, s := range servers { + // check that s.sessionTimers is empty + if len(s.sessionTimers) != 0 { + t.Fatalf("should have no sessionTimers") + } + // find the leader too + if s.IsLeader() { + leader = s + } + } + + if leader == nil { + t.Fatalf("Should have a leader") + } + + client := rpcClient(t, leader) + defer client.Close() + + leader.fsm.State().EnsureNode(1, structs.Node{"foo", "127.0.0.1"}) + + // create a TTL session + arg := structs.SessionRequest{ + Datacenter: "dc1", + Op: structs.SessionCreate, + Session: structs.Session{ + Node: "foo", + TTL: "10s", + }, + } + var id1 string + if err := client.Call("Session.Apply", &arg, &id1); err != nil { + t.Fatalf("err: %v", err) + } + + // check that leader.sessionTimers has the session id in it + // means initializeSessionTimers was called and resetSessionTimer was called + if len(leader.sessionTimers) == 0 || leader.sessionTimers[id1] == nil { + t.Fatalf("sessionTimers not initialized and does not contain session timer for session") + } + + time.Sleep(100 * time.Millisecond) + leader.Leave() + leader.Shutdown() + + // leader.sessionTimers should be empty due to clearAllSessionTimers getting called + if len(leader.sessionTimers) != 0 { + t.Fatalf("session timers should be empty on the shutdown leader") + } + + time.Sleep(100 * time.Millisecond) + + var remain *Server + for _, s := range servers { + if s == leader { + continue + } + remain = s + testutil.WaitForResult(func() (bool, error) { + peers, _ := s.raftPeers.Peers() + return len(peers) == 2, errors.New(fmt.Sprintf("%v", peers)) + }, func(err error) { + t.Fatalf("should have 2 peers: %v", err) + }) + } + + // Verify the old leader is deregistered + state := remain.fsm.State() + testutil.WaitForResult(func() (bool, error) { + _, found, _ := state.GetNode(leader.config.NodeName) + return !found, nil + }, func(err error) { + t.Fatalf("leader should be deregistered") + }) + + // Find the new leader + for _, s := range servers { + // find the leader too + if s.IsLeader() { + leader = s + } + } + + if leader == nil { + t.Fatalf("Should have a new leader") + } + + // check that new leader.sessionTimers has the session id in it + if len(leader.sessionTimers) == 0 || leader.sessionTimers[id1] == nil { + t.Fatalf("sessionTimers not initialized and does not contain session timer for session") + } + + // create another TTL session with the same parameters + var id2 string + if err := client.Call("Session.Apply", &arg, &id2); err != nil { + t.Fatalf("err: %v", err) + } + + if len(leader.sessionTimers) != 2 { + t.Fatalf("sessionTimes length should be 2") + } + + // destroy the id1 session (test clearSessionTimer) + arg.Op = structs.SessionDestroy + arg.Session.ID = id1 + if err := client.Call("Session.Apply", &arg, &id1); err != nil { + t.Fatalf("err: %v", err) + } + + if len(leader.sessionTimers) != 1 { + t.Fatalf("sessionTimers length should 1") + } + + // destroy the id2 session (test clearSessionTimer) + arg.Op = structs.SessionDestroy + arg.Session.ID = id2 + if err := client.Call("Session.Apply", &arg, &id2); err != nil { + t.Fatalf("err: %v", err) + } + + if len(leader.sessionTimers) != 0 { + t.Fatalf("sessionTimers length should be 0") + } +}