Use defers for WaitGroup and Ticker stop

This commit is contained in:
Kyle Havlovitz 2017-03-10 11:41:17 -08:00
parent 9b4497de09
commit 7608a3c15f
No known key found for this signature in database
GPG Key ID: 8A5E6B173056AD6C
4 changed files with 27 additions and 19 deletions

View File

@ -3,10 +3,11 @@ package command
import ( import (
"flag" "flag"
"fmt" "fmt"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/command/base"
"strings" "strings"
"time" "time"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/command/base"
) )
type OperatorAutopilotSetCommand struct { type OperatorAutopilotSetCommand struct {

View File

@ -3,11 +3,11 @@ package command
import ( import (
"strings" "strings"
"testing" "testing"
"time"
"github.com/hashicorp/consul/command/base" "github.com/hashicorp/consul/command/base"
"github.com/hashicorp/consul/consul/structs" "github.com/hashicorp/consul/consul/structs"
"github.com/mitchellh/cli" "github.com/mitchellh/cli"
"time"
) )
func TestOperator_Autopilot_Set_Implements(t *testing.T) { func TestOperator_Autopilot_Set_Implements(t *testing.T) {

View File

@ -33,13 +33,15 @@ func (s *Server) stopAutopilot() {
// autopilotLoop periodically looks for nonvoting servers to promote and dead servers to remove. // autopilotLoop periodically looks for nonvoting servers to promote and dead servers to remove.
func (s *Server) autopilotLoop() { func (s *Server) autopilotLoop() {
defer s.autopilotWaitGroup.Done()
// Monitor server health until shutdown // Monitor server health until shutdown
ticker := time.NewTicker(s.config.AutopilotInterval) ticker := time.NewTicker(s.config.AutopilotInterval)
defer ticker.Stop()
for { for {
select { select {
case <-s.autopilotShutdownCh: case <-s.autopilotShutdownCh:
ticker.Stop()
s.autopilotWaitGroup.Done()
return return
case <-ticker.C: case <-ticker.C:
state := s.fsm.State() state := s.fsm.State()
@ -93,7 +95,7 @@ func (s *Server) pruneDeadServers() error {
} }
// Only do removals if a minority of servers will be affected // Only do removals if a minority of servers will be affected
if len(failed) < peers/2 || (len(failed) == 1 && peers >= 3) { if len(failed) < peers/2 {
for _, server := range failed { for _, server := range failed {
s.logger.Printf("[INFO] consul: Attempting removal of failed server: %v", server) s.logger.Printf("[INFO] consul: Attempting removal of failed server: %v", server)
go s.serfLAN.RemoveFailedNode(server) go s.serfLAN.RemoveFailedNode(server)
@ -188,10 +190,11 @@ func (b *BasicAutopilot) PromoteNonVoters(autopilotConf *structs.AutopilotConfig
func (s *Server) serverHealthLoop() { func (s *Server) serverHealthLoop() {
// Monitor server health until shutdown // Monitor server health until shutdown
ticker := time.NewTicker(s.config.ServerHealthInterval) ticker := time.NewTicker(s.config.ServerHealthInterval)
defer ticker.Stop()
for { for {
select { select {
case <-s.shutdownCh: case <-s.shutdownCh:
ticker.Stop()
return return
case <-ticker.C: case <-ticker.C:
serverHealths := make(map[string]*structs.ServerHealth) serverHealths := make(map[string]*structs.ServerHealth)

View File

@ -102,37 +102,41 @@ func TestAutopilot_CleanupDeadServerPeriodic(t *testing.T) {
defer os.RemoveAll(dir3) defer os.RemoveAll(dir3)
defer s3.Shutdown() defer s3.Shutdown()
servers := []*Server{s1, s2, s3} dir4, s4 := testServerWithConfig(t, conf)
defer os.RemoveAll(dir4)
defer s4.Shutdown()
servers := []*Server{s1, s2, s3, s4}
// Join the servers to s1 // Join the servers to s1
addr := fmt.Sprintf("127.0.0.1:%d", addr := fmt.Sprintf("127.0.0.1:%d",
s1.config.SerfLANConfig.MemberlistConfig.BindPort) s1.config.SerfLANConfig.MemberlistConfig.BindPort)
if _, err := s2.JoinLAN([]string{addr}); err != nil {
for _, s := range servers[1:] {
if _, err := s.JoinLAN([]string{addr}); err != nil {
t.Fatalf("err: %v", err) t.Fatalf("err: %v", err)
} }
if _, err := s3.JoinLAN([]string{addr}); err != nil {
t.Fatalf("err: %v", err)
} }
for _, s := range servers { for _, s := range servers {
testutil.WaitForResult(func() (bool, error) { testutil.WaitForResult(func() (bool, error) {
peers, _ := s.numPeers() peers, _ := s.numPeers()
return peers == 3, nil return peers == 4, nil
}, func(err error) { }, func(err error) {
t.Fatalf("should have 3 peers") t.Fatalf("should have 4 peers")
}) })
} }
// Kill a non-leader server // Kill a non-leader server
s3.Shutdown() s4.Shutdown()
// Should be removed from the peers automatically // Should be removed from the peers automatically
for _, s := range []*Server{s1, s2} { for _, s := range []*Server{s1, s2, s3} {
testutil.WaitForResult(func() (bool, error) { testutil.WaitForResult(func() (bool, error) {
peers, _ := s.numPeers() peers, _ := s.numPeers()
return peers == 2, nil return peers == 3, nil
}, func(err error) { }, func(err error) {
t.Fatalf("should have 2 peers") t.Fatalf("should have 3 peers")
}) })
} }
} }