2016-03-29 22:58:15 +00:00
|
|
|
package servers
|
2016-02-24 18:55:04 +00:00
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
2016-02-25 16:05:15 +00:00
|
|
|
"fmt"
|
2016-02-24 18:55:04 +00:00
|
|
|
"log"
|
2016-03-29 09:37:35 +00:00
|
|
|
"math/rand"
|
2017-06-15 13:16:16 +00:00
|
|
|
"net"
|
2016-02-25 16:05:15 +00:00
|
|
|
"os"
|
2016-02-24 18:55:04 +00:00
|
|
|
"testing"
|
2016-02-25 16:05:15 +00:00
|
|
|
"time"
|
2016-02-24 18:55:04 +00:00
|
|
|
|
pkg refactor
command/agent/* -> agent/*
command/consul/* -> agent/consul/*
command/agent/command{,_test}.go -> command/agent{,_test}.go
command/base/command.go -> command/base.go
command/base/* -> command/*
commands.go -> command/commands.go
The script which did the refactor is:
(
cd $GOPATH/src/github.com/hashicorp/consul
git mv command/agent/command.go command/agent.go
git mv command/agent/command_test.go command/agent_test.go
git mv command/agent/flag_slice_value{,_test}.go command/
git mv command/agent .
git mv command/base/command.go command/base.go
git mv command/base/config_util{,_test}.go command/
git mv commands.go command/
git mv consul agent
rmdir command/base/
gsed -i -e 's|package agent|package command|' command/agent{,_test}.go
gsed -i -e 's|package agent|package command|' command/flag_slice_value{,_test}.go
gsed -i -e 's|package base|package command|' command/base.go command/config_util{,_test}.go
gsed -i -e 's|package main|package command|' command/commands.go
gsed -i -e 's|base.Command|BaseCommand|' command/commands.go
gsed -i -e 's|agent.Command|AgentCommand|' command/commands.go
gsed -i -e 's|\tCommand:|\tBaseCommand:|' command/commands.go
gsed -i -e 's|base\.||' command/commands.go
gsed -i -e 's|command\.||' command/commands.go
gsed -i -e 's|command|c|' main.go
gsed -i -e 's|range Commands|range command.Commands|' main.go
gsed -i -e 's|Commands: Commands|Commands: command.Commands|' main.go
gsed -i -e 's|base\.BoolValue|BoolValue|' command/operator_autopilot_set.go
gsed -i -e 's|base\.DurationValue|DurationValue|' command/operator_autopilot_set.go
gsed -i -e 's|base\.StringValue|StringValue|' command/operator_autopilot_set.go
gsed -i -e 's|base\.UintValue|UintValue|' command/operator_autopilot_set.go
gsed -i -e 's|\bCommand\b|BaseCommand|' command/base.go
gsed -i -e 's|BaseCommand Options|Command Options|' command/base.go
gsed -i -e 's|base.Command|BaseCommand|' command/*.go
gsed -i -e 's|c\.Command|c.BaseCommand|g' command/*.go
gsed -i -e 's|\tCommand:|\tBaseCommand:|' command/*_test.go
gsed -i -e 's|base\.||' command/*_test.go
gsed -i -e 's|\bCommand\b|AgentCommand|' command/agent{,_test}.go
gsed -i -e 's|cmd.AgentCommand|cmd.BaseCommand|' command/agent.go
gsed -i -e 's|cli.AgentCommand = new(Command)|cli.Command = new(AgentCommand)|' command/agent_test.go
gsed -i -e 's|exec.AgentCommand|exec.Command|' command/agent_test.go
gsed -i -e 's|exec.BaseCommand|exec.Command|' command/agent_test.go
gsed -i -e 's|NewTestAgent|agent.NewTestAgent|' command/agent_test.go
gsed -i -e 's|= TestConfig|= agent.TestConfig|' command/agent_test.go
gsed -i -e 's|: RetryJoin|: agent.RetryJoin|' command/agent_test.go
gsed -i -e 's|\.\./\.\./|../|' command/config_util_test.go
gsed -i -e 's|\bverifyUniqueListeners|VerifyUniqueListeners|' agent/config{,_test}.go command/agent.go
gsed -i -e 's|\bserfLANKeyring\b|SerfLANKeyring|g' agent/{agent,keyring,testagent}.go command/agent.go
gsed -i -e 's|\bserfWANKeyring\b|SerfWANKeyring|g' agent/{agent,keyring,testagent}.go command/agent.go
gsed -i -e 's|\bNewAgent\b|agent.New|g' command/agent{,_test}.go
gsed -i -e 's|\bNewAgent|New|' agent/{acl_test,agent,testagent}.go
gsed -i -e 's|\bAgent\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bBool\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bDefaultConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bDevConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bMergeConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bReadConfigPaths\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bParseMetaPair\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bSerfLANKeyring\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bSerfWANKeyring\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|circonus\.agent|circonus|g' command/agent{,_test}.go
gsed -i -e 's|logger\.agent|logger|g' command/agent{,_test}.go
gsed -i -e 's|metrics\.agent|metrics|g' command/agent{,_test}.go
gsed -i -e 's|// agent.Agent|// agent|' command/agent{,_test}.go
gsed -i -e 's|a\.agent\.Config|a.Config|' command/agent{,_test}.go
gsed -i -e 's|agent\.AppendSliceValue|AppendSliceValue|' command/{configtest,validate}.go
gsed -i -e 's|consul/consul|agent/consul|' GNUmakefile
gsed -i -e 's|\.\./test|../../test|' agent/consul/server_test.go
# fix imports
f=$(grep -rl 'github.com/hashicorp/consul/command/agent' * | grep '\.go')
gsed -i -e 's|github.com/hashicorp/consul/command/agent|github.com/hashicorp/consul/agent|' $f
goimports -w $f
f=$(grep -rl 'github.com/hashicorp/consul/consul' * | grep '\.go')
gsed -i -e 's|github.com/hashicorp/consul/consul|github.com/hashicorp/consul/agent/consul|' $f
goimports -w $f
goimports -w command/*.go main.go
)
2017-06-09 22:28:28 +00:00
|
|
|
"github.com/hashicorp/consul/agent/consul/agent"
|
2016-02-24 18:55:04 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
|
|
|
localLogger *log.Logger
|
|
|
|
localLogBuffer *bytes.Buffer
|
|
|
|
)
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
localLogBuffer = new(bytes.Buffer)
|
|
|
|
localLogger = log.New(localLogBuffer, "", 0)
|
|
|
|
}
|
|
|
|
|
|
|
|
func GetBufferedLogger() *log.Logger {
|
|
|
|
return localLogger
|
|
|
|
}
|
|
|
|
|
2016-03-27 02:28:13 +00:00
|
|
|
type fauxConnPool struct {
|
2016-03-29 09:37:35 +00:00
|
|
|
// failPct between 0.0 and 1.0 == pct of time a Ping should fail
|
|
|
|
failPct float64
|
2016-03-27 02:28:13 +00:00
|
|
|
}
|
|
|
|
|
2017-06-15 13:16:16 +00:00
|
|
|
func (cp *fauxConnPool) Ping(string, net.Addr, int, bool) (bool, error) {
|
2016-03-29 09:37:35 +00:00
|
|
|
var success bool
|
|
|
|
successProb := rand.Float64()
|
|
|
|
if successProb > cp.failPct {
|
|
|
|
success = true
|
|
|
|
}
|
|
|
|
return success, nil
|
2016-03-27 02:28:13 +00:00
|
|
|
}
|
|
|
|
|
2016-02-24 23:04:04 +00:00
|
|
|
type fauxSerf struct {
|
2016-02-25 16:05:15 +00:00
|
|
|
numNodes int
|
2016-02-24 23:04:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *fauxSerf) NumNodes() int {
|
2016-02-25 16:05:15 +00:00
|
|
|
return s.numNodes
|
2016-02-24 23:04:04 +00:00
|
|
|
}
|
|
|
|
|
2016-03-29 22:58:15 +00:00
|
|
|
func testManager() (m *Manager) {
|
2016-02-24 18:55:04 +00:00
|
|
|
logger := GetBufferedLogger()
|
|
|
|
shutdownCh := make(chan struct{})
|
2016-03-29 22:58:15 +00:00
|
|
|
m = New(logger, shutdownCh, &fauxSerf{numNodes: 16384}, &fauxConnPool{})
|
|
|
|
return m
|
2016-02-24 18:55:04 +00:00
|
|
|
}
|
|
|
|
|
2016-03-29 22:58:15 +00:00
|
|
|
func testManagerFailProb(failPct float64) (m *Manager) {
|
2016-03-29 09:37:35 +00:00
|
|
|
logger := GetBufferedLogger()
|
|
|
|
logger = log.New(os.Stderr, "", log.LstdFlags)
|
|
|
|
shutdownCh := make(chan struct{})
|
2016-03-29 22:58:15 +00:00
|
|
|
m = New(logger, shutdownCh, &fauxSerf{}, &fauxConnPool{failPct: failPct})
|
|
|
|
return m
|
2016-03-29 09:37:35 +00:00
|
|
|
}
|
|
|
|
|
2016-03-30 00:39:19 +00:00
|
|
|
// func (l *serverList) cycleServer() (servers []*agent.Server) {
|
2016-03-29 22:58:15 +00:00
|
|
|
func TestManagerInternal_cycleServer(t *testing.T) {
|
|
|
|
m := testManager()
|
2016-03-29 23:17:16 +00:00
|
|
|
l := m.getServerList()
|
2016-02-24 18:55:04 +00:00
|
|
|
|
2016-03-30 00:39:19 +00:00
|
|
|
server0 := &agent.Server{Name: "server1"}
|
|
|
|
server1 := &agent.Server{Name: "server2"}
|
|
|
|
server2 := &agent.Server{Name: "server3"}
|
2016-03-29 23:17:16 +00:00
|
|
|
l.servers = append(l.servers, server0, server1, server2)
|
|
|
|
m.saveServerList(l)
|
2016-02-24 18:55:04 +00:00
|
|
|
|
2016-03-29 23:17:16 +00:00
|
|
|
l = m.getServerList()
|
|
|
|
if len(l.servers) != 3 {
|
|
|
|
t.Fatalf("server length incorrect: %d/3", len(l.servers))
|
2016-02-24 18:55:04 +00:00
|
|
|
}
|
2016-03-29 23:17:16 +00:00
|
|
|
if l.servers[0] != server0 &&
|
|
|
|
l.servers[1] != server1 &&
|
|
|
|
l.servers[2] != server2 {
|
2016-02-24 18:55:04 +00:00
|
|
|
t.Fatalf("initial server ordering not correct")
|
|
|
|
}
|
|
|
|
|
2016-03-29 23:17:16 +00:00
|
|
|
l.servers = l.cycleServer()
|
|
|
|
if len(l.servers) != 3 {
|
|
|
|
t.Fatalf("server length incorrect: %d/3", len(l.servers))
|
2016-02-24 18:55:04 +00:00
|
|
|
}
|
2016-03-29 23:17:16 +00:00
|
|
|
if l.servers[0] != server1 &&
|
|
|
|
l.servers[1] != server2 &&
|
|
|
|
l.servers[2] != server0 {
|
2016-02-24 18:55:04 +00:00
|
|
|
t.Fatalf("server ordering after one cycle not correct")
|
|
|
|
}
|
|
|
|
|
2016-03-29 23:17:16 +00:00
|
|
|
l.servers = l.cycleServer()
|
|
|
|
if len(l.servers) != 3 {
|
|
|
|
t.Fatalf("server length incorrect: %d/3", len(l.servers))
|
2016-02-24 18:55:04 +00:00
|
|
|
}
|
2016-03-29 23:17:16 +00:00
|
|
|
if l.servers[0] != server2 &&
|
|
|
|
l.servers[1] != server0 &&
|
|
|
|
l.servers[2] != server1 {
|
2016-02-24 18:55:04 +00:00
|
|
|
t.Fatalf("server ordering after two cycles not correct")
|
|
|
|
}
|
|
|
|
|
2016-03-29 23:17:16 +00:00
|
|
|
l.servers = l.cycleServer()
|
|
|
|
if len(l.servers) != 3 {
|
|
|
|
t.Fatalf("server length incorrect: %d/3", len(l.servers))
|
2016-02-24 18:55:04 +00:00
|
|
|
}
|
2016-03-29 23:17:16 +00:00
|
|
|
if l.servers[0] != server0 &&
|
|
|
|
l.servers[1] != server1 &&
|
|
|
|
l.servers[2] != server2 {
|
2016-02-24 18:55:04 +00:00
|
|
|
t.Fatalf("server ordering after three cycles not correct")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-29 23:17:16 +00:00
|
|
|
// func (m *Manager) getServerList() serverList {
|
|
|
|
func TestManagerInternal_getServerList(t *testing.T) {
|
2016-03-29 22:58:15 +00:00
|
|
|
m := testManager()
|
2016-03-29 23:17:16 +00:00
|
|
|
l := m.getServerList()
|
|
|
|
if l.servers == nil {
|
|
|
|
t.Fatalf("serverList.servers nil")
|
2016-02-24 18:55:04 +00:00
|
|
|
}
|
|
|
|
|
2016-03-29 23:17:16 +00:00
|
|
|
if len(l.servers) != 0 {
|
|
|
|
t.Fatalf("serverList.servers length not zero")
|
2016-02-24 18:55:04 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-29 22:58:15 +00:00
|
|
|
// func New(logger *log.Logger, shutdownCh chan struct{}, clusterInfo ConsulClusterInfo) (m *Manager) {
|
|
|
|
func TestManagerInternal_New(t *testing.T) {
|
|
|
|
m := testManager()
|
|
|
|
if m == nil {
|
|
|
|
t.Fatalf("Manager nil")
|
2016-02-24 18:55:04 +00:00
|
|
|
}
|
|
|
|
|
2016-03-29 22:58:15 +00:00
|
|
|
if m.clusterInfo == nil {
|
|
|
|
t.Fatalf("Manager.clusterInfo nil")
|
2016-02-25 16:05:15 +00:00
|
|
|
}
|
|
|
|
|
2016-03-29 22:58:15 +00:00
|
|
|
if m.logger == nil {
|
|
|
|
t.Fatalf("Manager.logger nil")
|
2016-02-24 18:55:04 +00:00
|
|
|
}
|
|
|
|
|
2016-03-29 22:58:15 +00:00
|
|
|
if m.shutdownCh == nil {
|
|
|
|
t.Fatalf("Manager.shutdownCh nil")
|
2016-02-24 18:55:04 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-29 23:17:16 +00:00
|
|
|
// func (m *Manager) reconcileServerList(l *serverList) bool {
|
2016-03-29 22:58:15 +00:00
|
|
|
func TestManagerInternal_reconcileServerList(t *testing.T) {
|
2016-03-29 09:37:35 +00:00
|
|
|
tests := []int{0, 1, 2, 3, 4, 5, 10, 100}
|
|
|
|
for _, n := range tests {
|
|
|
|
ok, err := test_reconcileServerList(n)
|
|
|
|
if !ok {
|
|
|
|
t.Errorf("Expected %d to pass: %v", n, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func test_reconcileServerList(maxServers int) (bool, error) {
|
|
|
|
// Build a server list, reconcile, verify the missing servers are
|
|
|
|
// missing, the added have been added, and the original server is
|
|
|
|
// present.
|
|
|
|
const failPct = 0.5
|
2016-03-29 22:58:15 +00:00
|
|
|
m := testManagerFailProb(failPct)
|
2016-03-29 09:37:35 +00:00
|
|
|
|
2016-03-30 00:39:19 +00:00
|
|
|
var failedServers, healthyServers []*agent.Server
|
2016-03-29 09:37:35 +00:00
|
|
|
for i := 0; i < maxServers; i++ {
|
|
|
|
nodeName := fmt.Sprintf("s%02d", i)
|
|
|
|
|
2016-03-30 00:39:19 +00:00
|
|
|
node := &agent.Server{Name: nodeName}
|
2016-03-29 22:58:15 +00:00
|
|
|
// Add 66% of servers to Manager
|
2016-03-29 09:37:35 +00:00
|
|
|
if rand.Float64() > 0.33 {
|
2016-03-29 22:58:15 +00:00
|
|
|
m.AddServer(node)
|
2016-03-29 09:37:35 +00:00
|
|
|
|
|
|
|
// Of healthy servers, (ab)use connPoolPinger to
|
|
|
|
// failPct of the servers for the reconcile. This
|
|
|
|
// allows for the selected server to no longer be
|
|
|
|
// healthy for the reconcile below.
|
2017-06-15 13:16:16 +00:00
|
|
|
if ok, _ := m.connPoolPinger.Ping(node.Datacenter, node.Addr, node.Version, node.UseTLS); ok {
|
2016-03-29 09:37:35 +00:00
|
|
|
// Will still be present
|
|
|
|
healthyServers = append(healthyServers, node)
|
|
|
|
} else {
|
|
|
|
// Will be missing
|
|
|
|
failedServers = append(failedServers, node)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Will be added from the call to reconcile
|
|
|
|
healthyServers = append(healthyServers, node)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-29 22:58:15 +00:00
|
|
|
// Randomize Manager's server list
|
|
|
|
m.RebalanceServers()
|
|
|
|
selectedServer := m.FindServer()
|
2016-03-29 09:37:35 +00:00
|
|
|
|
|
|
|
var selectedServerFailed bool
|
|
|
|
for _, s := range failedServers {
|
|
|
|
if selectedServer.Key().Equal(s.Key()) {
|
|
|
|
selectedServerFailed = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-29 22:58:15 +00:00
|
|
|
// Update Manager's server list to be "healthy" based on Serf.
|
2016-03-29 09:37:35 +00:00
|
|
|
// Reconcile this with origServers, which is shuffled and has a live
|
|
|
|
// connection, but possibly out of date.
|
2016-03-29 23:17:16 +00:00
|
|
|
origServers := m.getServerList()
|
|
|
|
m.saveServerList(serverList{servers: healthyServers})
|
2016-03-29 09:37:35 +00:00
|
|
|
|
|
|
|
// This should always succeed with non-zero server lists
|
2016-03-29 22:58:15 +00:00
|
|
|
if !selectedServerFailed && !m.reconcileServerList(&origServers) &&
|
2016-03-29 23:17:16 +00:00
|
|
|
len(m.getServerList().servers) != 0 &&
|
2016-03-29 09:37:35 +00:00
|
|
|
len(origServers.servers) != 0 {
|
|
|
|
// If the random gods are unfavorable and we end up with zero
|
|
|
|
// length lists, expect things to fail and retry the test.
|
|
|
|
return false, fmt.Errorf("Expected reconcile to succeed: %v %d %d",
|
|
|
|
selectedServerFailed,
|
2016-03-29 23:17:16 +00:00
|
|
|
len(m.getServerList().servers),
|
2016-03-29 09:37:35 +00:00
|
|
|
len(origServers.servers))
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we have zero-length server lists, test succeeded in degenerate
|
|
|
|
// case.
|
2016-03-29 23:17:16 +00:00
|
|
|
if len(m.getServerList().servers) == 0 &&
|
2016-03-29 09:37:35 +00:00
|
|
|
len(origServers.servers) == 0 {
|
|
|
|
// Failed as expected w/ zero length list
|
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
|
2016-03-30 00:39:19 +00:00
|
|
|
resultingServerMap := make(map[agent.Key]bool)
|
2016-03-29 23:17:16 +00:00
|
|
|
for _, s := range m.getServerList().servers {
|
2016-03-29 09:37:35 +00:00
|
|
|
resultingServerMap[*s.Key()] = true
|
|
|
|
}
|
|
|
|
|
2016-03-29 22:58:15 +00:00
|
|
|
// Test to make sure no failed servers are in the Manager's
|
2016-03-29 23:17:16 +00:00
|
|
|
// list. Error if there are any failedServers in l.servers
|
2016-03-29 09:37:35 +00:00
|
|
|
for _, s := range failedServers {
|
|
|
|
_, ok := resultingServerMap[*s.Key()]
|
|
|
|
if ok {
|
|
|
|
return false, fmt.Errorf("Found failed server %v in merged list %v", s, resultingServerMap)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test to make sure all healthy servers are in the healthy list.
|
2016-03-29 23:17:16 +00:00
|
|
|
if len(healthyServers) != len(m.getServerList().servers) {
|
2016-03-29 09:37:35 +00:00
|
|
|
return false, fmt.Errorf("Expected healthy map and servers to match: %d/%d", len(healthyServers), len(healthyServers))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test to make sure all healthy servers are in the resultingServerMap list.
|
|
|
|
for _, s := range healthyServers {
|
|
|
|
_, ok := resultingServerMap[*s.Key()]
|
|
|
|
if !ok {
|
|
|
|
return false, fmt.Errorf("Server %v missing from healthy map after merged lists", s)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
|
2016-03-29 23:17:16 +00:00
|
|
|
// func (l *serverList) refreshServerRebalanceTimer() {
|
2016-03-29 22:58:15 +00:00
|
|
|
func TestManagerInternal_refreshServerRebalanceTimer(t *testing.T) {
|
2016-02-25 16:05:15 +00:00
|
|
|
type clusterSizes struct {
|
|
|
|
numNodes int
|
|
|
|
numServers int
|
|
|
|
minRebalance time.Duration
|
|
|
|
}
|
|
|
|
clusters := []clusterSizes{
|
|
|
|
{0, 3, 2 * time.Minute},
|
|
|
|
{1, 0, 2 * time.Minute}, // partitioned cluster
|
|
|
|
{1, 3, 2 * time.Minute},
|
|
|
|
{2, 3, 2 * time.Minute},
|
|
|
|
{100, 0, 2 * time.Minute}, // partitioned
|
|
|
|
{100, 1, 2 * time.Minute}, // partitioned
|
|
|
|
{100, 3, 2 * time.Minute},
|
|
|
|
{1024, 1, 2 * time.Minute}, // partitioned
|
|
|
|
{1024, 3, 2 * time.Minute}, // partitioned
|
|
|
|
{1024, 5, 2 * time.Minute},
|
|
|
|
{16384, 1, 4 * time.Minute}, // partitioned
|
|
|
|
{16384, 2, 2 * time.Minute}, // partitioned
|
|
|
|
{16384, 3, 2 * time.Minute}, // partitioned
|
|
|
|
{16384, 5, 2 * time.Minute},
|
|
|
|
{65535, 0, 2 * time.Minute}, // partitioned
|
|
|
|
{65535, 1, 8 * time.Minute}, // partitioned
|
|
|
|
{65535, 2, 3 * time.Minute}, // partitioned
|
|
|
|
{65535, 3, 5 * time.Minute}, // partitioned
|
|
|
|
{65535, 5, 3 * time.Minute}, // partitioned
|
|
|
|
{65535, 7, 2 * time.Minute},
|
|
|
|
{1000000, 1, 4 * time.Hour}, // partitioned
|
|
|
|
{1000000, 2, 2 * time.Hour}, // partitioned
|
|
|
|
{1000000, 3, 80 * time.Minute}, // partitioned
|
|
|
|
{1000000, 5, 50 * time.Minute}, // partitioned
|
|
|
|
{1000000, 11, 20 * time.Minute}, // partitioned
|
|
|
|
{1000000, 19, 10 * time.Minute},
|
|
|
|
}
|
|
|
|
|
2016-03-27 07:04:52 +00:00
|
|
|
logger := log.New(os.Stderr, "", log.LstdFlags)
|
|
|
|
shutdownCh := make(chan struct{})
|
|
|
|
|
2016-02-25 16:05:15 +00:00
|
|
|
for _, s := range clusters {
|
2016-03-29 22:58:15 +00:00
|
|
|
m := New(logger, shutdownCh, &fauxSerf{numNodes: s.numNodes}, &fauxConnPool{})
|
2016-02-25 16:05:15 +00:00
|
|
|
for i := 0; i < s.numServers; i++ {
|
|
|
|
nodeName := fmt.Sprintf("s%02d", i)
|
2016-03-30 00:39:19 +00:00
|
|
|
m.AddServer(&agent.Server{Name: nodeName})
|
2016-02-25 16:05:15 +00:00
|
|
|
}
|
|
|
|
|
2016-03-29 22:58:15 +00:00
|
|
|
d := m.refreshServerRebalanceTimer()
|
2016-02-25 16:05:15 +00:00
|
|
|
if d < s.minRebalance {
|
2016-03-27 05:07:09 +00:00
|
|
|
t.Errorf("duration too short for cluster of size %d and %d servers (%s < %s)", s.numNodes, s.numServers, d, s.minRebalance)
|
2016-02-25 16:05:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-02-24 18:55:04 +00:00
|
|
|
|
2016-03-29 23:17:16 +00:00
|
|
|
// func (m *Manager) saveServerList(l serverList) {
|
|
|
|
func TestManagerInternal_saveServerList(t *testing.T) {
|
2016-03-29 22:58:15 +00:00
|
|
|
m := testManager()
|
2016-02-24 18:55:04 +00:00
|
|
|
|
|
|
|
// Initial condition
|
|
|
|
func() {
|
2016-03-29 23:17:16 +00:00
|
|
|
l := m.getServerList()
|
|
|
|
if len(l.servers) != 0 {
|
|
|
|
t.Fatalf("Manager.saveServerList failed to load init config")
|
2016-02-24 18:55:04 +00:00
|
|
|
}
|
|
|
|
|
2016-03-30 00:39:19 +00:00
|
|
|
newServer := new(agent.Server)
|
2016-03-29 23:17:16 +00:00
|
|
|
l.servers = append(l.servers, newServer)
|
|
|
|
m.saveServerList(l)
|
2016-02-24 18:55:04 +00:00
|
|
|
}()
|
|
|
|
|
|
|
|
// Test that save works
|
|
|
|
func() {
|
2016-03-29 23:17:16 +00:00
|
|
|
l1 := m.getServerList()
|
|
|
|
t1NumServers := len(l1.servers)
|
2016-02-24 18:55:04 +00:00
|
|
|
if t1NumServers != 1 {
|
2016-03-29 23:17:16 +00:00
|
|
|
t.Fatalf("Manager.saveServerList failed to save mutated config")
|
2016-02-24 18:55:04 +00:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
// Verify mutation w/o a save doesn't alter the original
|
|
|
|
func() {
|
2016-03-30 00:39:19 +00:00
|
|
|
newServer := new(agent.Server)
|
2016-03-29 23:17:16 +00:00
|
|
|
l := m.getServerList()
|
|
|
|
l.servers = append(l.servers, newServer)
|
2016-02-24 18:55:04 +00:00
|
|
|
|
2016-03-29 23:17:16 +00:00
|
|
|
l_orig := m.getServerList()
|
|
|
|
origNumServers := len(l_orig.servers)
|
|
|
|
if origNumServers >= len(l.servers) {
|
|
|
|
t.Fatalf("Manager.saveServerList unsaved config overwrote original")
|
2016-02-24 18:55:04 +00:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|