Use defers for WaitGroup and Ticker stop
This commit is contained in:
parent
8130f9b1c1
commit
b15d67bfac
|
@ -3,10 +3,11 @@ package command
|
|||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/command/base"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/command/base"
|
||||
)
|
||||
|
||||
type OperatorAutopilotSetCommand struct {
|
||||
|
|
|
@ -3,11 +3,11 @@ package command
|
|||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/command/base"
|
||||
"github.com/hashicorp/consul/consul/structs"
|
||||
"github.com/mitchellh/cli"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestOperator_Autopilot_Set_Implements(t *testing.T) {
|
||||
|
|
|
@ -33,13 +33,15 @@ func (s *Server) stopAutopilot() {
|
|||
|
||||
// autopilotLoop periodically looks for nonvoting servers to promote and dead servers to remove.
|
||||
func (s *Server) autopilotLoop() {
|
||||
defer s.autopilotWaitGroup.Done()
|
||||
|
||||
// Monitor server health until shutdown
|
||||
ticker := time.NewTicker(s.config.AutopilotInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-s.autopilotShutdownCh:
|
||||
ticker.Stop()
|
||||
s.autopilotWaitGroup.Done()
|
||||
return
|
||||
case <-ticker.C:
|
||||
state := s.fsm.State()
|
||||
|
@ -93,7 +95,7 @@ func (s *Server) pruneDeadServers() error {
|
|||
}
|
||||
|
||||
// Only do removals if a minority of servers will be affected
|
||||
if len(failed) < peers/2 || (len(failed) == 1 && peers >= 3) {
|
||||
if len(failed) < peers/2 {
|
||||
for _, server := range failed {
|
||||
s.logger.Printf("[INFO] consul: Attempting removal of failed server: %v", server)
|
||||
go s.serfLAN.RemoveFailedNode(server)
|
||||
|
@ -188,10 +190,11 @@ func (b *BasicAutopilot) PromoteNonVoters(autopilotConf *structs.AutopilotConfig
|
|||
func (s *Server) serverHealthLoop() {
|
||||
// Monitor server health until shutdown
|
||||
ticker := time.NewTicker(s.config.ServerHealthInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-s.shutdownCh:
|
||||
ticker.Stop()
|
||||
return
|
||||
case <-ticker.C:
|
||||
serverHealths := make(map[string]*structs.ServerHealth)
|
||||
|
|
|
@ -102,37 +102,41 @@ func TestAutopilot_CleanupDeadServerPeriodic(t *testing.T) {
|
|||
defer os.RemoveAll(dir3)
|
||||
defer s3.Shutdown()
|
||||
|
||||
servers := []*Server{s1, s2, s3}
|
||||
dir4, s4 := testServerWithConfig(t, conf)
|
||||
defer os.RemoveAll(dir4)
|
||||
defer s4.Shutdown()
|
||||
|
||||
servers := []*Server{s1, s2, s3, s4}
|
||||
|
||||
// Join the servers to s1
|
||||
addr := fmt.Sprintf("127.0.0.1:%d",
|
||||
s1.config.SerfLANConfig.MemberlistConfig.BindPort)
|
||||
if _, err := s2.JoinLAN([]string{addr}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if _, err := s3.JoinLAN([]string{addr}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
|
||||
for _, s := range servers[1:] {
|
||||
if _, err := s.JoinLAN([]string{addr}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, s := range servers {
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
peers, _ := s.numPeers()
|
||||
return peers == 3, nil
|
||||
return peers == 4, nil
|
||||
}, func(err error) {
|
||||
t.Fatalf("should have 3 peers")
|
||||
t.Fatalf("should have 4 peers")
|
||||
})
|
||||
}
|
||||
|
||||
// Kill a non-leader server
|
||||
s3.Shutdown()
|
||||
s4.Shutdown()
|
||||
|
||||
// Should be removed from the peers automatically
|
||||
for _, s := range []*Server{s1, s2} {
|
||||
for _, s := range []*Server{s1, s2, s3} {
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
peers, _ := s.numPeers()
|
||||
return peers == 2, nil
|
||||
return peers == 3, nil
|
||||
}, func(err error) {
|
||||
t.Fatalf("should have 2 peers")
|
||||
t.Fatalf("should have 3 peers")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue