open-consul/agent/consul/autopilot.go

436 lines
12 KiB
Go
Raw Normal View History

package consul
import (
"context"
"fmt"
"strconv"
"sync"
"time"
"github.com/armon/go-metrics"
pkg refactor command/agent/* -> agent/* command/consul/* -> agent/consul/* command/agent/command{,_test}.go -> command/agent{,_test}.go command/base/command.go -> command/base.go command/base/* -> command/* commands.go -> command/commands.go The script which did the refactor is: ( cd $GOPATH/src/github.com/hashicorp/consul git mv command/agent/command.go command/agent.go git mv command/agent/command_test.go command/agent_test.go git mv command/agent/flag_slice_value{,_test}.go command/ git mv command/agent . git mv command/base/command.go command/base.go git mv command/base/config_util{,_test}.go command/ git mv commands.go command/ git mv consul agent rmdir command/base/ gsed -i -e 's|package agent|package command|' command/agent{,_test}.go gsed -i -e 's|package agent|package command|' command/flag_slice_value{,_test}.go gsed -i -e 's|package base|package command|' command/base.go command/config_util{,_test}.go gsed -i -e 's|package main|package command|' command/commands.go gsed -i -e 's|base.Command|BaseCommand|' command/commands.go gsed -i -e 's|agent.Command|AgentCommand|' command/commands.go gsed -i -e 's|\tCommand:|\tBaseCommand:|' command/commands.go gsed -i -e 's|base\.||' command/commands.go gsed -i -e 's|command\.||' command/commands.go gsed -i -e 's|command|c|' main.go gsed -i -e 's|range Commands|range command.Commands|' main.go gsed -i -e 's|Commands: Commands|Commands: command.Commands|' main.go gsed -i -e 's|base\.BoolValue|BoolValue|' command/operator_autopilot_set.go gsed -i -e 's|base\.DurationValue|DurationValue|' command/operator_autopilot_set.go gsed -i -e 's|base\.StringValue|StringValue|' command/operator_autopilot_set.go gsed -i -e 's|base\.UintValue|UintValue|' command/operator_autopilot_set.go gsed -i -e 's|\bCommand\b|BaseCommand|' command/base.go gsed -i -e 's|BaseCommand Options|Command Options|' command/base.go gsed -i -e 's|base.Command|BaseCommand|' command/*.go gsed -i -e 's|c\.Command|c.BaseCommand|g' command/*.go gsed -i -e 's|\tCommand:|\tBaseCommand:|' command/*_test.go gsed -i -e 's|base\.||' command/*_test.go gsed -i -e 's|\bCommand\b|AgentCommand|' command/agent{,_test}.go gsed -i -e 's|cmd.AgentCommand|cmd.BaseCommand|' command/agent.go gsed -i -e 's|cli.AgentCommand = new(Command)|cli.Command = new(AgentCommand)|' command/agent_test.go gsed -i -e 's|exec.AgentCommand|exec.Command|' command/agent_test.go gsed -i -e 's|exec.BaseCommand|exec.Command|' command/agent_test.go gsed -i -e 's|NewTestAgent|agent.NewTestAgent|' command/agent_test.go gsed -i -e 's|= TestConfig|= agent.TestConfig|' command/agent_test.go gsed -i -e 's|: RetryJoin|: agent.RetryJoin|' command/agent_test.go gsed -i -e 's|\.\./\.\./|../|' command/config_util_test.go gsed -i -e 's|\bverifyUniqueListeners|VerifyUniqueListeners|' agent/config{,_test}.go command/agent.go gsed -i -e 's|\bserfLANKeyring\b|SerfLANKeyring|g' agent/{agent,keyring,testagent}.go command/agent.go gsed -i -e 's|\bserfWANKeyring\b|SerfWANKeyring|g' agent/{agent,keyring,testagent}.go command/agent.go gsed -i -e 's|\bNewAgent\b|agent.New|g' command/agent{,_test}.go gsed -i -e 's|\bNewAgent|New|' agent/{acl_test,agent,testagent}.go gsed -i -e 's|\bAgent\b|agent.&|g' command/agent{,_test}.go gsed -i -e 's|\bBool\b|agent.&|g' command/agent{,_test}.go gsed -i -e 's|\bConfig\b|agent.&|g' command/agent{,_test}.go gsed -i -e 's|\bDefaultConfig\b|agent.&|g' command/agent{,_test}.go gsed -i -e 's|\bDevConfig\b|agent.&|g' command/agent{,_test}.go gsed -i -e 's|\bMergeConfig\b|agent.&|g' command/agent{,_test}.go gsed -i -e 's|\bReadConfigPaths\b|agent.&|g' command/agent{,_test}.go gsed -i -e 's|\bParseMetaPair\b|agent.&|g' command/agent{,_test}.go gsed -i -e 's|\bSerfLANKeyring\b|agent.&|g' command/agent{,_test}.go gsed -i -e 's|\bSerfWANKeyring\b|agent.&|g' command/agent{,_test}.go gsed -i -e 's|circonus\.agent|circonus|g' command/agent{,_test}.go gsed -i -e 's|logger\.agent|logger|g' command/agent{,_test}.go gsed -i -e 's|metrics\.agent|metrics|g' command/agent{,_test}.go gsed -i -e 's|// agent.Agent|// agent|' command/agent{,_test}.go gsed -i -e 's|a\.agent\.Config|a.Config|' command/agent{,_test}.go gsed -i -e 's|agent\.AppendSliceValue|AppendSliceValue|' command/{configtest,validate}.go gsed -i -e 's|consul/consul|agent/consul|' GNUmakefile gsed -i -e 's|\.\./test|../../test|' agent/consul/server_test.go # fix imports f=$(grep -rl 'github.com/hashicorp/consul/command/agent' * | grep '\.go') gsed -i -e 's|github.com/hashicorp/consul/command/agent|github.com/hashicorp/consul/agent|' $f goimports -w $f f=$(grep -rl 'github.com/hashicorp/consul/consul' * | grep '\.go') gsed -i -e 's|github.com/hashicorp/consul/consul|github.com/hashicorp/consul/agent/consul|' $f goimports -w $f goimports -w command/*.go main.go )
2017-06-09 22:28:28 +00:00
"github.com/hashicorp/consul/agent/consul/agent"
"github.com/hashicorp/consul/agent/consul/structs"
"github.com/hashicorp/go-version"
"github.com/hashicorp/raft"
"github.com/hashicorp/serf/serf"
)
// AutopilotPolicy is the interface for the Autopilot mechanism
type AutopilotPolicy interface {
// PromoteNonVoters defines the handling of non-voting servers
PromoteNonVoters(*structs.AutopilotConfig) error
}
func (s *Server) startAutopilot() {
s.autopilotShutdownCh = make(chan struct{})
s.autopilotWaitGroup = sync.WaitGroup{}
s.autopilotWaitGroup.Add(1)
go s.autopilotLoop()
}
func (s *Server) stopAutopilot() {
close(s.autopilotShutdownCh)
s.autopilotWaitGroup.Wait()
}
var minAutopilotVersion = version.Must(version.NewVersion("0.8.0"))
// autopilotLoop periodically looks for nonvoting servers to promote and dead servers to remove.
func (s *Server) autopilotLoop() {
defer s.autopilotWaitGroup.Done()
// Monitor server health until shutdown
ticker := time.NewTicker(s.config.AutopilotInterval)
defer ticker.Stop()
for {
select {
case <-s.autopilotShutdownCh:
return
case <-ticker.C:
autopilotConfig, ok := s.getOrCreateAutopilotConfig()
if !ok {
continue
}
if err := s.autopilotPolicy.PromoteNonVoters(autopilotConfig); err != nil {
s.logger.Printf("[ERR] autopilot: error checking for non-voters to promote: %s", err)
}
if err := s.pruneDeadServers(autopilotConfig); err != nil {
s.logger.Printf("[ERR] autopilot: error checking for dead servers to remove: %s", err)
}
case <-s.autopilotRemoveDeadCh:
autopilotConfig, ok := s.getOrCreateAutopilotConfig()
if !ok {
continue
}
if err := s.pruneDeadServers(autopilotConfig); err != nil {
s.logger.Printf("[ERR] autopilot: error checking for dead servers to remove: %s", err)
}
}
}
}
// pruneDeadServers removes up to numPeers/2 failed servers
func (s *Server) pruneDeadServers(autopilotConfig *structs.AutopilotConfig) error {
2017-03-07 21:58:06 +00:00
// Find any failed servers
var failed []string
staleRaftServers := make(map[string]raft.Server)
if autopilotConfig.CleanupDeadServers {
future := s.raft.GetConfiguration()
if err := future.Error(); err != nil {
return err
}
for _, server := range future.Configuration().Servers {
staleRaftServers[string(server.Address)] = server
}
for _, member := range s.serfLAN.Members() {
valid, parts := agent.IsConsulServer(member)
if valid {
// Remove this server from the stale list; it has a serf entry
if _, ok := staleRaftServers[parts.Addr.String()]; ok {
delete(staleRaftServers, parts.Addr.String())
}
if member.Status == serf.StatusFailed {
failed = append(failed, member.Name)
}
}
}
}
removalCount := len(failed) + len(staleRaftServers)
// Nothing to remove, return early
if removalCount == 0 {
return nil
}
2017-03-07 21:58:06 +00:00
peers, err := s.numPeers()
if err != nil {
return err
}
// Only do removals if a minority of servers will be affected
if removalCount < peers/2 {
2017-03-07 21:58:06 +00:00
for _, server := range failed {
s.logger.Printf("[INFO] autopilot: Attempting removal of failed server: %v", server)
2017-03-07 21:58:06 +00:00
go s.serfLAN.RemoveFailedNode(server)
}
minRaftProtocol, err := ServerMinRaftProtocol(s.serfLAN.Members())
if err != nil {
return err
}
for _, raftServer := range staleRaftServers {
var future raft.Future
if minRaftProtocol >= 2 {
s.logger.Printf("[INFO] autopilot: Attempting removal of stale raft server : %v", raftServer.ID)
future = s.raft.RemoveServer(raftServer.ID, 0, 0)
} else {
s.logger.Printf("[INFO] autopilot: Attempting removal of stale raft server : %v", raftServer.ID)
future = s.raft.RemovePeer(raftServer.Address)
}
if err := future.Error(); err != nil {
return err
}
}
2017-03-07 21:58:06 +00:00
} else {
s.logger.Printf("[DEBUG] autopilot: Failed to remove dead servers: too many dead servers: %d/%d", removalCount, peers)
2017-03-07 21:58:06 +00:00
}
return nil
}
// BasicAutopilot defines a policy for promoting non-voting servers in a way
// that maintains an odd-numbered voter count.
type BasicAutopilot struct {
server *Server
}
// PromoteNonVoters promotes eligible non-voting servers to voters.
func (b *BasicAutopilot) PromoteNonVoters(autopilotConfig *structs.AutopilotConfig) error {
minRaftProtocol, err := ServerMinRaftProtocol(b.server.LANMembers())
if err != nil {
return fmt.Errorf("error getting server raft protocol versions: %s", err)
}
2017-03-07 21:58:06 +00:00
// If we don't meet the minimum version for non-voter features, bail early
if minRaftProtocol < 3 {
return nil
}
future := b.server.raft.GetConfiguration()
2017-03-07 21:58:06 +00:00
if err := future.Error(); err != nil {
return fmt.Errorf("failed to get raft configuration: %v", err)
}
2017-03-21 23:36:44 +00:00
// Find any non-voters eligible for promotion
2017-03-07 21:58:06 +00:00
var promotions []raft.Server
voterCount := 0
2017-03-21 23:36:44 +00:00
for _, server := range future.Configuration().Servers {
2017-03-07 21:58:06 +00:00
// If this server has been stable and passing for long enough, promote it to a voter
2017-03-21 23:36:44 +00:00
if !isVoter(server.Suffrage) {
health := b.server.getServerHealth(string(server.ID))
if health.IsStable(time.Now(), autopilotConfig) {
2017-03-07 21:58:06 +00:00
promotions = append(promotions, server)
}
2017-03-07 21:58:06 +00:00
} else {
voterCount++
}
2017-03-07 21:58:06 +00:00
}
2017-03-21 23:36:44 +00:00
if _, err := b.server.handlePromotions(voterCount, promotions); err != nil {
return err
}
return nil
}
func (s *Server) handlePromotions(voterCount int, promotions []raft.Server) (bool, error) {
2017-03-07 21:58:06 +00:00
if len(promotions) == 0 {
2017-03-21 23:36:44 +00:00
return false, nil
2017-03-07 21:58:06 +00:00
}
2017-03-07 21:58:06 +00:00
// If there's currently an even number of servers, we can promote the first server in the list
// to get to an odd-sized quorum
newServers := false
if voterCount%2 == 0 {
2017-03-21 23:36:44 +00:00
addFuture := s.raft.AddVoter(promotions[0].ID, promotions[0].Address, 0, 0)
2017-03-07 21:58:06 +00:00
if err := addFuture.Error(); err != nil {
2017-03-21 23:36:44 +00:00
return newServers, fmt.Errorf("failed to add raft peer: %v", err)
}
2017-03-07 21:58:06 +00:00
promotions = promotions[1:]
newServers = true
}
2017-03-07 21:58:06 +00:00
// Promote remaining servers in twos to maintain an odd quorum size
for i := 0; i < len(promotions)-1; i += 2 {
2017-03-21 23:36:44 +00:00
addFirst := s.raft.AddVoter(promotions[i].ID, promotions[i].Address, 0, 0)
2017-03-07 21:58:06 +00:00
if err := addFirst.Error(); err != nil {
2017-03-21 23:36:44 +00:00
return newServers, fmt.Errorf("failed to add raft peer: %v", err)
}
2017-03-21 23:36:44 +00:00
addSecond := s.raft.AddVoter(promotions[i+1].ID, promotions[i+1].Address, 0, 0)
2017-03-07 21:58:06 +00:00
if err := addSecond.Error(); err != nil {
2017-03-21 23:36:44 +00:00
return newServers, fmt.Errorf("failed to add raft peer: %v", err)
2017-03-07 21:58:06 +00:00
}
newServers = true
}
2017-03-07 21:58:06 +00:00
// If we added a new server, trigger a check to remove dead servers
if newServers {
select {
2017-03-21 23:36:44 +00:00
case s.autopilotRemoveDeadCh <- struct{}{}:
2017-03-07 21:58:06 +00:00
default:
}
}
2017-03-21 23:36:44 +00:00
return newServers, nil
}
// serverHealthLoop monitors the health of the servers in the cluster
func (s *Server) serverHealthLoop() {
// Monitor server health until shutdown
ticker := time.NewTicker(s.config.ServerHealthInterval)
defer ticker.Stop()
for {
select {
case <-s.shutdownCh:
return
case <-ticker.C:
if err := s.updateClusterHealth(); err != nil {
s.logger.Printf("[ERR] autopilot: error updating cluster health: %s", err)
}
}
}
}
// updateClusterHealth fetches the Raft stats of the other servers and updates
// s.clusterHealth based on the configured Autopilot thresholds
func (s *Server) updateClusterHealth() error {
// Don't do anything if the min Raft version is too low
minRaftProtocol, err := ServerMinRaftProtocol(s.LANMembers())
if err != nil {
return fmt.Errorf("error getting server raft protocol versions: %s", err)
}
if minRaftProtocol < 3 {
return nil
}
state := s.fsm.State()
_, autopilotConf, err := state.AutopilotConfig()
if err != nil {
return fmt.Errorf("error retrieving autopilot config: %s", err)
}
// Bail early if autopilot config hasn't been initialized yet
if autopilotConf == nil {
return nil
}
// Get the the serf members which are Consul servers
serverMap := make(map[string]*agent.Server)
for _, member := range s.LANMembers() {
if member.Status == serf.StatusLeft {
continue
}
valid, parts := agent.IsConsulServer(member)
if valid {
serverMap[parts.ID] = parts
}
}
future := s.raft.GetConfiguration()
if err := future.Error(); err != nil {
return fmt.Errorf("error getting Raft configuration %s", err)
}
servers := future.Configuration().Servers
// Fetch the health for each of the servers in parallel so we get as
// consistent of a sample as possible. We capture the leader's index
// here as well so it roughly lines up with the same point in time.
targetLastIndex := s.raft.LastIndex()
var fetchList []*agent.Server
for _, server := range servers {
if parts, ok := serverMap[string(server.ID)]; ok {
fetchList = append(fetchList, parts)
}
}
d := time.Now().Add(s.config.ServerHealthInterval / 2)
ctx, cancel := context.WithDeadline(context.Background(), d)
defer cancel()
fetchedStats := s.statsFetcher.Fetch(ctx, fetchList)
// Build a current list of server healths
2017-03-21 23:36:44 +00:00
leader := s.raft.Leader()
var clusterHealth structs.OperatorHealthReply
voterCount := 0
healthyCount := 0
healthyVoterCount := 0
for _, server := range servers {
health := structs.ServerHealth{
ID: string(server.ID),
Address: string(server.Address),
2017-03-21 23:36:44 +00:00
Leader: server.Address == leader,
LastContact: -1,
Voter: server.Suffrage == raft.Voter,
}
parts, ok := serverMap[string(server.ID)]
if ok {
health.Name = parts.Name
health.SerfStatus = parts.Status
2017-03-21 23:36:44 +00:00
health.Version = parts.Build.String()
if stats, ok := fetchedStats[string(server.ID)]; ok {
if err := s.updateServerHealth(&health, parts, stats, autopilotConf, targetLastIndex); err != nil {
s.logger.Printf("[WARN] autopilot: error updating server health: %s", err)
}
}
} else {
health.SerfStatus = serf.StatusNone
}
if health.Voter {
voterCount++
}
if health.Healthy {
healthyCount++
if health.Voter {
healthyVoterCount++
}
}
clusterHealth.Servers = append(clusterHealth.Servers, health)
}
clusterHealth.Healthy = healthyCount == len(servers)
// If we have extra healthy voters, update FailureTolerance
requiredQuorum := voterCount/2 + 1
if healthyVoterCount > requiredQuorum {
clusterHealth.FailureTolerance = healthyVoterCount - requiredQuorum
}
// Heartbeat a metric for monitoring if we're the leader
if s.IsLeader() {
metrics.SetGauge([]string{"consul", "autopilot", "failure_tolerance"}, float32(clusterHealth.FailureTolerance))
if clusterHealth.Healthy {
metrics.SetGauge([]string{"consul", "autopilot", "healthy"}, 1)
} else {
metrics.SetGauge([]string{"consul", "autopilot", "healthy"}, 0)
}
}
s.clusterHealthLock.Lock()
s.clusterHealth = clusterHealth
s.clusterHealthLock.Unlock()
return nil
}
// updateServerHealth computes the resulting health of the server based on its
// fetched stats and the state of the leader.
func (s *Server) updateServerHealth(health *structs.ServerHealth,
server *agent.Server, stats *structs.ServerStats,
autopilotConf *structs.AutopilotConfig, targetLastIndex uint64) error {
2017-03-07 21:58:06 +00:00
health.LastTerm = stats.LastTerm
health.LastIndex = stats.LastIndex
if stats.LastContact != "never" {
var err error
health.LastContact, err = time.ParseDuration(stats.LastContact)
if err != nil {
return fmt.Errorf("error parsing last_contact duration: %s", err)
}
}
2017-03-07 21:58:06 +00:00
lastTerm, err := strconv.ParseUint(s.raft.Stats()["last_log_term"], 10, 64)
if err != nil {
return fmt.Errorf("error parsing last_log_term: %s", err)
}
health.Healthy = health.IsHealthy(lastTerm, targetLastIndex, autopilotConf)
// If this is a new server or the health changed, reset StableSince
lastHealth := s.getServerHealth(server.ID)
if lastHealth == nil || lastHealth.Healthy != health.Healthy {
health.StableSince = time.Now()
} else {
health.StableSince = lastHealth.StableSince
}
return nil
}
func (s *Server) getClusterHealth() structs.OperatorHealthReply {
s.clusterHealthLock.RLock()
defer s.clusterHealthLock.RUnlock()
return s.clusterHealth
}
func (s *Server) getServerHealth(id string) *structs.ServerHealth {
s.clusterHealthLock.RLock()
defer s.clusterHealthLock.RUnlock()
for _, health := range s.clusterHealth.Servers {
if health.ID == id {
return &health
}
}
return nil
}
2017-03-21 23:36:44 +00:00
func isVoter(suffrage raft.ServerSuffrage) bool {
switch suffrage {
case raft.Voter, raft.Staging:
return true
default:
return false
}
}