consul: Minor cleanups

This commit is contained in:
Armon Dadgar 2014-06-18 16:15:28 -07:00
parent 45337371dc
commit 406d19f483
2 changed files with 58 additions and 35 deletions

View File

@ -146,48 +146,71 @@ func (s *Server) nodeJoin(me serf.MemberEvent, wan bool) {
s.remoteLock.Unlock() s.remoteLock.Unlock()
// Add to the local list as well // Add to the local list as well
if !wan { if !wan && parts.Datacenter == s.config.Datacenter {
s.localLock.Lock() s.localLock.Lock()
s.localConsuls[parts.Addr.String()] = parts s.localConsuls[parts.Addr.String()] = parts
s.localLock.Unlock() s.localLock.Unlock()
} }
// If we're still expecting, and they are too, check servers. // If we still expecting to bootstrap, may need to handle this
if s.config.Expect != 0 && parts.Expect != 0 { if s.config.Expect != 0 {
s.maybeBootstrap()
}
}
}
// maybeBootsrap is used to handle bootstrapping when a new consul server joins
func (s *Server) maybeBootstrap() {
index, err := s.raftStore.LastIndex() index, err := s.raftStore.LastIndex()
if err == nil && index == 0 { if err != nil {
s.logger.Printf("[ERR] consul: failed to read last raft index: %v", err)
return
}
// Bootstrap can only be done if there are no committed logs,
// remove our expectations of bootstrapping
if index != 0 {
s.config.Expect = 0
return
}
// Scan for all the known servers
members := s.serfLAN.Members() members := s.serfLAN.Members()
addrs := make([]net.Addr, 0) addrs := make([]net.Addr, 0)
for _, member := range members { for _, member := range members {
valid, p := isConsulServer(member) valid, p := isConsulServer(member)
if valid && p.Datacenter == parts.Datacenter { if !valid {
if p.Expect != parts.Expect { continue
s.logger.Printf("[ERR] consul: '%v' and '%v' have different expect values. All expect nodes should have the same value, will never leave expect mode", m.Name, member.Name) }
if p.Datacenter != s.config.Datacenter {
s.logger.Printf("[ERR] consul: Member %v has a conflicting datacenter, ignoring", member)
continue
}
if p.Expect != 0 && p.Expect != s.config.Expect {
s.logger.Printf("[ERR] consul: Member %v has a conflicting expect value. All nodes should expect the same number.", member)
return return
} else { }
if p.Bootstrap {
s.logger.Printf("[ERR] consul: Member %v has bootstrap mode. Expect disabled.", member)
return
}
addrs = append(addrs, &net.TCPAddr{IP: member.Addr, Port: p.Port}) addrs = append(addrs, &net.TCPAddr{IP: member.Addr, Port: p.Port})
} }
}
// Skip if we haven't met the minimum expect count
if len(addrs) < s.config.Expect {
return
} }
if len(addrs) >= s.config.Expect { // Update the peer set
// we have enough nodes, set peers. s.logger.Printf("[INFO] consul: Attempting bootstrap with nodes: %v", addrs)
if err := s.raft.SetPeers(addrs).Error(); err != nil {
s.logger.Printf("[ERR] consul: failed to bootstrap peers: %v", err)
}
future := s.raft.SetPeers(addrs) // Bootstrapping comlete, don't enter this again
if err := future.Error(); err != nil {
s.logger.Printf("[ERR] consul: failed to leave expect mode and set peers: %v", err)
} else {
// we've left expect mode, don't enter this again
s.config.Expect = 0 s.config.Expect = 0
} }
}
} else if err != nil {
s.logger.Printf("[ERR] consul: error retrieving index: %v", err)
}
}
}
}
// nodeFailed is used to handle fail events on both the serf clustes // nodeFailed is used to handle fail events on both the serf clustes
func (s *Server) nodeFailed(me serf.MemberEvent, wan bool) { func (s *Server) nodeFailed(me serf.MemberEvent, wan bool) {

View File

@ -329,7 +329,7 @@ func TestServer_Expect(t *testing.T) {
defer os.RemoveAll(dir2) defer os.RemoveAll(dir2)
defer s2.Shutdown() defer s2.Shutdown()
dir3, s3 := testServerDCExpect(t, "dc1", 3) dir3, s3 := testServerDCExpect(t, "dc1", 0)
defer os.RemoveAll(dir3) defer os.RemoveAll(dir3)
defer s3.Shutdown() defer s3.Shutdown()