2014-01-10 01:46:33 +00:00
|
|
|
package consul
|
|
|
|
|
|
|
|
import (
|
|
|
|
"os"
|
|
|
|
"testing"
|
2014-05-09 01:24:13 +00:00
|
|
|
"time"
|
2014-10-14 05:14:43 +00:00
|
|
|
|
pkg refactor
command/agent/* -> agent/*
command/consul/* -> agent/consul/*
command/agent/command{,_test}.go -> command/agent{,_test}.go
command/base/command.go -> command/base.go
command/base/* -> command/*
commands.go -> command/commands.go
The script which did the refactor is:
(
cd $GOPATH/src/github.com/hashicorp/consul
git mv command/agent/command.go command/agent.go
git mv command/agent/command_test.go command/agent_test.go
git mv command/agent/flag_slice_value{,_test}.go command/
git mv command/agent .
git mv command/base/command.go command/base.go
git mv command/base/config_util{,_test}.go command/
git mv commands.go command/
git mv consul agent
rmdir command/base/
gsed -i -e 's|package agent|package command|' command/agent{,_test}.go
gsed -i -e 's|package agent|package command|' command/flag_slice_value{,_test}.go
gsed -i -e 's|package base|package command|' command/base.go command/config_util{,_test}.go
gsed -i -e 's|package main|package command|' command/commands.go
gsed -i -e 's|base.Command|BaseCommand|' command/commands.go
gsed -i -e 's|agent.Command|AgentCommand|' command/commands.go
gsed -i -e 's|\tCommand:|\tBaseCommand:|' command/commands.go
gsed -i -e 's|base\.||' command/commands.go
gsed -i -e 's|command\.||' command/commands.go
gsed -i -e 's|command|c|' main.go
gsed -i -e 's|range Commands|range command.Commands|' main.go
gsed -i -e 's|Commands: Commands|Commands: command.Commands|' main.go
gsed -i -e 's|base\.BoolValue|BoolValue|' command/operator_autopilot_set.go
gsed -i -e 's|base\.DurationValue|DurationValue|' command/operator_autopilot_set.go
gsed -i -e 's|base\.StringValue|StringValue|' command/operator_autopilot_set.go
gsed -i -e 's|base\.UintValue|UintValue|' command/operator_autopilot_set.go
gsed -i -e 's|\bCommand\b|BaseCommand|' command/base.go
gsed -i -e 's|BaseCommand Options|Command Options|' command/base.go
gsed -i -e 's|base.Command|BaseCommand|' command/*.go
gsed -i -e 's|c\.Command|c.BaseCommand|g' command/*.go
gsed -i -e 's|\tCommand:|\tBaseCommand:|' command/*_test.go
gsed -i -e 's|base\.||' command/*_test.go
gsed -i -e 's|\bCommand\b|AgentCommand|' command/agent{,_test}.go
gsed -i -e 's|cmd.AgentCommand|cmd.BaseCommand|' command/agent.go
gsed -i -e 's|cli.AgentCommand = new(Command)|cli.Command = new(AgentCommand)|' command/agent_test.go
gsed -i -e 's|exec.AgentCommand|exec.Command|' command/agent_test.go
gsed -i -e 's|exec.BaseCommand|exec.Command|' command/agent_test.go
gsed -i -e 's|NewTestAgent|agent.NewTestAgent|' command/agent_test.go
gsed -i -e 's|= TestConfig|= agent.TestConfig|' command/agent_test.go
gsed -i -e 's|: RetryJoin|: agent.RetryJoin|' command/agent_test.go
gsed -i -e 's|\.\./\.\./|../|' command/config_util_test.go
gsed -i -e 's|\bverifyUniqueListeners|VerifyUniqueListeners|' agent/config{,_test}.go command/agent.go
gsed -i -e 's|\bserfLANKeyring\b|SerfLANKeyring|g' agent/{agent,keyring,testagent}.go command/agent.go
gsed -i -e 's|\bserfWANKeyring\b|SerfWANKeyring|g' agent/{agent,keyring,testagent}.go command/agent.go
gsed -i -e 's|\bNewAgent\b|agent.New|g' command/agent{,_test}.go
gsed -i -e 's|\bNewAgent|New|' agent/{acl_test,agent,testagent}.go
gsed -i -e 's|\bAgent\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bBool\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bDefaultConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bDevConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bMergeConfig\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bReadConfigPaths\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bParseMetaPair\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bSerfLANKeyring\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|\bSerfWANKeyring\b|agent.&|g' command/agent{,_test}.go
gsed -i -e 's|circonus\.agent|circonus|g' command/agent{,_test}.go
gsed -i -e 's|logger\.agent|logger|g' command/agent{,_test}.go
gsed -i -e 's|metrics\.agent|metrics|g' command/agent{,_test}.go
gsed -i -e 's|// agent.Agent|// agent|' command/agent{,_test}.go
gsed -i -e 's|a\.agent\.Config|a.Config|' command/agent{,_test}.go
gsed -i -e 's|agent\.AppendSliceValue|AppendSliceValue|' command/{configtest,validate}.go
gsed -i -e 's|consul/consul|agent/consul|' GNUmakefile
gsed -i -e 's|\.\./test|../../test|' agent/consul/server_test.go
# fix imports
f=$(grep -rl 'github.com/hashicorp/consul/command/agent' * | grep '\.go')
gsed -i -e 's|github.com/hashicorp/consul/command/agent|github.com/hashicorp/consul/agent|' $f
goimports -w $f
f=$(grep -rl 'github.com/hashicorp/consul/consul' * | grep '\.go')
gsed -i -e 's|github.com/hashicorp/consul/consul|github.com/hashicorp/consul/agent/consul|' $f
goimports -w $f
goimports -w command/*.go main.go
)
2017-06-09 22:28:28 +00:00
|
|
|
"github.com/hashicorp/consul/agent/consul/structs"
|
2017-04-19 23:00:11 +00:00
|
|
|
"github.com/hashicorp/consul/api"
|
|
|
|
"github.com/hashicorp/consul/testrpc"
|
2017-05-05 11:58:13 +00:00
|
|
|
"github.com/hashicorp/consul/testutil/retry"
|
2015-10-13 23:43:52 +00:00
|
|
|
"github.com/hashicorp/net-rpc-msgpackrpc"
|
2014-10-14 05:14:43 +00:00
|
|
|
"github.com/hashicorp/serf/serf"
|
2014-01-10 01:46:33 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func TestLeader_RegisterMember(t *testing.T) {
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2017-03-23 20:34:30 +00:00
|
|
|
dir1, s1 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.ACLDatacenter = "dc1"
|
|
|
|
c.ACLMasterToken = "root"
|
|
|
|
c.ACLDefaultPolicy = "deny"
|
|
|
|
c.ACLEnforceVersion8 = true
|
|
|
|
})
|
2014-01-10 01:46:33 +00:00
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
dir2, c1 := testClient(t)
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer c1.Shutdown()
|
|
|
|
|
|
|
|
// Try to join
|
2017-05-05 10:29:49 +00:00
|
|
|
joinLAN(t, c1, s1)
|
2014-01-10 01:46:33 +00:00
|
|
|
|
2017-04-19 23:00:11 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
2014-01-10 01:46:33 +00:00
|
|
|
|
2014-01-10 02:02:44 +00:00
|
|
|
// Client should be registered
|
2015-10-13 05:21:39 +00:00
|
|
|
state := s1.fsm.State()
|
2017-05-05 11:58:13 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2015-10-14 02:18:43 +00:00
|
|
|
_, node, err := state.GetNode(c1.config.NodeName)
|
2015-10-12 07:42:09 +00:00
|
|
|
if err != nil {
|
2017-05-05 11:58:13 +00:00
|
|
|
r.Fatalf("err: %v", err)
|
2015-10-12 07:42:09 +00:00
|
|
|
}
|
2017-05-05 11:58:13 +00:00
|
|
|
if node == nil {
|
|
|
|
r.Fatal("client not registered")
|
|
|
|
}
|
|
|
|
})
|
2014-01-10 01:46:33 +00:00
|
|
|
|
|
|
|
// Should have a check
|
2017-01-24 07:37:21 +00:00
|
|
|
_, checks, err := state.NodeChecks(nil, c1.config.NodeName)
|
2015-10-12 07:42:09 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2014-01-10 01:46:33 +00:00
|
|
|
if len(checks) != 1 {
|
|
|
|
t.Fatalf("client missing check")
|
|
|
|
}
|
2014-01-16 03:24:16 +00:00
|
|
|
if checks[0].CheckID != SerfCheckID {
|
2014-01-10 01:46:33 +00:00
|
|
|
t.Fatalf("bad check: %v", checks[0])
|
|
|
|
}
|
2014-01-16 03:24:16 +00:00
|
|
|
if checks[0].Name != SerfCheckName {
|
2014-01-10 01:46:33 +00:00
|
|
|
t.Fatalf("bad check: %v", checks[0])
|
|
|
|
}
|
2017-04-19 23:00:11 +00:00
|
|
|
if checks[0].Status != api.HealthPassing {
|
2014-01-10 01:46:33 +00:00
|
|
|
t.Fatalf("bad check: %v", checks[0])
|
|
|
|
}
|
2014-01-10 02:02:44 +00:00
|
|
|
|
|
|
|
// Server should be registered
|
2015-10-14 02:18:43 +00:00
|
|
|
_, node, err := state.GetNode(s1.config.NodeName)
|
2015-10-12 07:42:09 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if node == nil {
|
2014-01-10 02:02:44 +00:00
|
|
|
t.Fatalf("server not registered")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Service should be registered
|
2017-01-20 07:36:50 +00:00
|
|
|
_, services, err := state.NodeServices(nil, s1.config.NodeName)
|
2015-10-12 07:42:09 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2014-01-10 02:02:44 +00:00
|
|
|
if _, ok := services.Services["consul"]; !ok {
|
|
|
|
t.Fatalf("consul service not registered: %v", services)
|
|
|
|
}
|
2014-01-10 01:46:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestLeader_FailedMember(t *testing.T) {
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2017-03-23 20:34:30 +00:00
|
|
|
dir1, s1 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.ACLDatacenter = "dc1"
|
|
|
|
c.ACLMasterToken = "root"
|
|
|
|
c.ACLDefaultPolicy = "deny"
|
|
|
|
c.ACLEnforceVersion8 = true
|
|
|
|
})
|
2014-01-10 01:46:33 +00:00
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
dir2, c1 := testClient(t)
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer c1.Shutdown()
|
|
|
|
|
2017-04-19 23:00:11 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
2014-01-10 01:46:33 +00:00
|
|
|
|
|
|
|
// Try to join
|
2017-05-05 10:29:49 +00:00
|
|
|
joinLAN(t, c1, s1)
|
2014-01-10 01:46:33 +00:00
|
|
|
|
|
|
|
// Fail the member
|
|
|
|
c1.Shutdown()
|
|
|
|
|
|
|
|
// Should be registered
|
2015-10-13 05:21:39 +00:00
|
|
|
state := s1.fsm.State()
|
2017-05-05 11:58:13 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2015-10-14 02:18:43 +00:00
|
|
|
_, node, err := state.GetNode(c1.config.NodeName)
|
2015-10-12 07:42:09 +00:00
|
|
|
if err != nil {
|
2017-05-05 11:58:13 +00:00
|
|
|
r.Fatalf("err: %v", err)
|
2015-10-12 07:42:09 +00:00
|
|
|
}
|
2017-05-05 11:58:13 +00:00
|
|
|
if node == nil {
|
|
|
|
r.Fatal("client not registered")
|
|
|
|
}
|
|
|
|
})
|
2014-01-10 01:46:33 +00:00
|
|
|
|
|
|
|
// Should have a check
|
2017-01-24 07:37:21 +00:00
|
|
|
_, checks, err := state.NodeChecks(nil, c1.config.NodeName)
|
2015-10-12 07:42:09 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2014-01-10 01:46:33 +00:00
|
|
|
if len(checks) != 1 {
|
|
|
|
t.Fatalf("client missing check")
|
|
|
|
}
|
2014-01-16 03:24:16 +00:00
|
|
|
if checks[0].CheckID != SerfCheckID {
|
2014-01-10 01:46:33 +00:00
|
|
|
t.Fatalf("bad check: %v", checks[0])
|
|
|
|
}
|
2014-01-16 03:24:16 +00:00
|
|
|
if checks[0].Name != SerfCheckName {
|
2014-01-10 01:46:33 +00:00
|
|
|
t.Fatalf("bad check: %v", checks[0])
|
|
|
|
}
|
2014-05-09 00:04:52 +00:00
|
|
|
|
2017-05-05 11:58:13 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2017-01-24 07:37:21 +00:00
|
|
|
_, checks, err = state.NodeChecks(nil, c1.config.NodeName)
|
2015-10-12 07:42:09 +00:00
|
|
|
if err != nil {
|
2017-05-05 11:58:13 +00:00
|
|
|
r.Fatalf("err: %v", err)
|
2015-10-12 07:42:09 +00:00
|
|
|
}
|
2017-05-05 11:58:13 +00:00
|
|
|
if got, want := checks[0].Status, api.HealthCritical; got != want {
|
|
|
|
r.Fatalf("got status %q want %q", got, want)
|
|
|
|
}
|
|
|
|
})
|
2014-01-10 01:46:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestLeader_LeftMember(t *testing.T) {
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2017-03-23 20:34:30 +00:00
|
|
|
dir1, s1 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.ACLDatacenter = "dc1"
|
|
|
|
c.ACLMasterToken = "root"
|
|
|
|
c.ACLDefaultPolicy = "deny"
|
|
|
|
c.ACLEnforceVersion8 = true
|
|
|
|
})
|
2014-01-10 01:46:33 +00:00
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
dir2, c1 := testClient(t)
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer c1.Shutdown()
|
|
|
|
|
|
|
|
// Try to join
|
2017-05-05 10:29:49 +00:00
|
|
|
joinLAN(t, c1, s1)
|
2014-01-10 01:46:33 +00:00
|
|
|
|
2015-10-13 05:21:39 +00:00
|
|
|
state := s1.fsm.State()
|
2014-01-10 01:46:33 +00:00
|
|
|
|
|
|
|
// Should be registered
|
2017-05-05 11:58:13 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2015-10-14 02:18:43 +00:00
|
|
|
_, node, err := state.GetNode(c1.config.NodeName)
|
2015-10-12 07:42:09 +00:00
|
|
|
if err != nil {
|
2017-05-05 11:58:13 +00:00
|
|
|
r.Fatalf("err: %v", err)
|
2015-10-12 07:42:09 +00:00
|
|
|
}
|
2017-05-05 11:58:13 +00:00
|
|
|
if node == nil {
|
|
|
|
r.Fatal("client not registered")
|
|
|
|
}
|
|
|
|
})
|
2014-01-10 01:46:33 +00:00
|
|
|
|
|
|
|
// Node should leave
|
|
|
|
c1.Leave()
|
|
|
|
c1.Shutdown()
|
|
|
|
|
|
|
|
// Should be deregistered
|
2017-05-05 11:58:13 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2015-10-14 02:18:43 +00:00
|
|
|
_, node, err := state.GetNode(c1.config.NodeName)
|
2015-10-12 07:42:09 +00:00
|
|
|
if err != nil {
|
2017-05-05 11:58:13 +00:00
|
|
|
r.Fatalf("err: %v", err)
|
2015-10-12 07:42:09 +00:00
|
|
|
}
|
2017-05-05 11:58:13 +00:00
|
|
|
if node != nil {
|
|
|
|
r.Fatal("client still registered")
|
|
|
|
}
|
|
|
|
})
|
2014-01-10 01:46:33 +00:00
|
|
|
}
|
2014-03-20 19:51:49 +00:00
|
|
|
func TestLeader_ReapMember(t *testing.T) {
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2017-03-23 20:34:30 +00:00
|
|
|
dir1, s1 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.ACLDatacenter = "dc1"
|
|
|
|
c.ACLMasterToken = "root"
|
|
|
|
c.ACLDefaultPolicy = "deny"
|
|
|
|
c.ACLEnforceVersion8 = true
|
|
|
|
})
|
2014-03-20 19:51:49 +00:00
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
dir2, c1 := testClient(t)
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer c1.Shutdown()
|
|
|
|
|
|
|
|
// Try to join
|
2017-05-05 10:29:49 +00:00
|
|
|
joinLAN(t, c1, s1)
|
2014-03-20 19:51:49 +00:00
|
|
|
|
2015-10-13 05:21:39 +00:00
|
|
|
state := s1.fsm.State()
|
2014-03-20 19:51:49 +00:00
|
|
|
|
|
|
|
// Should be registered
|
2017-05-05 11:58:13 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2015-10-14 02:18:43 +00:00
|
|
|
_, node, err := state.GetNode(c1.config.NodeName)
|
2015-10-12 07:42:09 +00:00
|
|
|
if err != nil {
|
2017-05-05 11:58:13 +00:00
|
|
|
r.Fatalf("err: %v", err)
|
2015-10-12 07:42:09 +00:00
|
|
|
}
|
2017-05-05 11:58:13 +00:00
|
|
|
if node == nil {
|
|
|
|
r.Fatal("client not registered")
|
|
|
|
}
|
|
|
|
})
|
2014-03-20 19:51:49 +00:00
|
|
|
|
|
|
|
// Simulate a node reaping
|
|
|
|
mems := s1.LANMembers()
|
|
|
|
var c1mem serf.Member
|
|
|
|
for _, m := range mems {
|
|
|
|
if m.Name == c1.config.NodeName {
|
|
|
|
c1mem = m
|
|
|
|
c1mem.Status = StatusReap
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
s1.reconcileCh <- c1mem
|
|
|
|
|
2017-01-26 06:11:30 +00:00
|
|
|
// Should be deregistered; we have to poll quickly here because
|
|
|
|
// anti-entropy will put it back.
|
|
|
|
reaped := false
|
|
|
|
for start := time.Now(); time.Since(start) < 5*time.Second; {
|
2015-10-14 02:18:43 +00:00
|
|
|
_, node, err := state.GetNode(c1.config.NodeName)
|
2015-10-12 07:42:09 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2017-01-26 06:11:30 +00:00
|
|
|
if node == nil {
|
|
|
|
reaped = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !reaped {
|
2014-05-09 00:04:52 +00:00
|
|
|
t.Fatalf("client should not be registered")
|
2017-01-26 06:11:30 +00:00
|
|
|
}
|
2014-03-20 19:51:49 +00:00
|
|
|
}
|
|
|
|
|
2014-04-03 22:51:03 +00:00
|
|
|
func TestLeader_Reconcile_ReapMember(t *testing.T) {
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2017-03-23 20:34:30 +00:00
|
|
|
dir1, s1 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.ACLDatacenter = "dc1"
|
|
|
|
c.ACLMasterToken = "root"
|
|
|
|
c.ACLDefaultPolicy = "deny"
|
|
|
|
c.ACLEnforceVersion8 = true
|
|
|
|
})
|
2014-04-03 22:51:03 +00:00
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
2017-04-19 23:00:11 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
2014-04-03 22:51:03 +00:00
|
|
|
|
|
|
|
// Register a non-existing member
|
|
|
|
dead := structs.RegisterRequest{
|
|
|
|
Datacenter: s1.config.Datacenter,
|
|
|
|
Node: "no-longer-around",
|
|
|
|
Address: "127.1.1.1",
|
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Node: "no-longer-around",
|
|
|
|
CheckID: SerfCheckID,
|
|
|
|
Name: SerfCheckName,
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthCritical,
|
2014-04-03 22:51:03 +00:00
|
|
|
},
|
2017-03-23 20:34:30 +00:00
|
|
|
WriteRequest: structs.WriteRequest{
|
|
|
|
Token: "root",
|
|
|
|
},
|
2014-04-03 22:51:03 +00:00
|
|
|
}
|
|
|
|
var out struct{}
|
|
|
|
if err := s1.RPC("Catalog.Register", &dead, &out); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Force a reconciliation
|
|
|
|
if err := s1.reconcile(); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Node should be gone
|
2015-10-13 05:21:39 +00:00
|
|
|
state := s1.fsm.State()
|
2015-10-14 02:18:43 +00:00
|
|
|
_, node, err := state.GetNode("no-longer-around")
|
2015-10-12 07:42:09 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if node != nil {
|
2014-04-03 22:51:03 +00:00
|
|
|
t.Fatalf("client registered")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-10 01:46:33 +00:00
|
|
|
func TestLeader_Reconcile(t *testing.T) {
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2017-03-23 20:34:30 +00:00
|
|
|
dir1, s1 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.ACLDatacenter = "dc1"
|
|
|
|
c.ACLMasterToken = "root"
|
|
|
|
c.ACLDefaultPolicy = "deny"
|
|
|
|
c.ACLEnforceVersion8 = true
|
|
|
|
})
|
2014-01-10 01:46:33 +00:00
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
dir2, c1 := testClient(t)
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer c1.Shutdown()
|
|
|
|
|
|
|
|
// Join before we have a leader, this should cause a reconcile!
|
2017-05-05 10:29:49 +00:00
|
|
|
joinLAN(t, c1, s1)
|
2014-01-10 01:46:33 +00:00
|
|
|
|
|
|
|
// Should not be registered
|
2015-10-13 05:21:39 +00:00
|
|
|
state := s1.fsm.State()
|
2015-10-14 02:18:43 +00:00
|
|
|
_, node, err := state.GetNode(c1.config.NodeName)
|
2015-10-12 07:42:09 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if node != nil {
|
2014-01-10 01:46:33 +00:00
|
|
|
t.Fatalf("client registered")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Should be registered
|
2017-05-05 11:58:13 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
_, node, err := state.GetNode(c1.config.NodeName)
|
2015-10-12 07:42:09 +00:00
|
|
|
if err != nil {
|
2017-05-05 11:58:13 +00:00
|
|
|
r.Fatalf("err: %v", err)
|
2015-10-12 07:42:09 +00:00
|
|
|
}
|
2017-05-05 11:58:13 +00:00
|
|
|
if node == nil {
|
|
|
|
r.Fatal("client not registered")
|
|
|
|
}
|
|
|
|
})
|
2014-01-10 01:46:33 +00:00
|
|
|
}
|
2014-01-10 20:55:55 +00:00
|
|
|
|
2017-03-23 22:01:46 +00:00
|
|
|
func TestLeader_Reconcile_Races(t *testing.T) {
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2017-03-23 22:01:46 +00:00
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
2017-04-19 23:00:11 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
2017-03-23 22:01:46 +00:00
|
|
|
|
|
|
|
dir2, c1 := testClient(t)
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer c1.Shutdown()
|
|
|
|
|
2017-05-05 10:29:49 +00:00
|
|
|
joinLAN(t, c1, s1)
|
2017-03-23 22:01:46 +00:00
|
|
|
|
|
|
|
// Wait for the server to reconcile the client and register it.
|
|
|
|
state := s1.fsm.State()
|
|
|
|
var nodeAddr string
|
2017-05-05 11:58:13 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2017-03-23 22:01:46 +00:00
|
|
|
_, node, err := state.GetNode(c1.config.NodeName)
|
|
|
|
if err != nil {
|
2017-05-05 11:58:13 +00:00
|
|
|
r.Fatalf("err: %v", err)
|
2017-03-23 22:01:46 +00:00
|
|
|
}
|
2017-05-05 11:58:13 +00:00
|
|
|
if node == nil {
|
|
|
|
r.Fatal("client not registered")
|
2017-03-23 22:01:46 +00:00
|
|
|
}
|
2017-05-05 11:58:13 +00:00
|
|
|
nodeAddr = node.Address
|
|
|
|
})
|
2017-03-23 22:01:46 +00:00
|
|
|
|
|
|
|
// Add in some metadata via the catalog (as if the agent synced it
|
|
|
|
// there). We also set the serfHealth check to failing so the reconile
|
|
|
|
// will attempt to flip it back
|
|
|
|
req := structs.RegisterRequest{
|
|
|
|
Datacenter: s1.config.Datacenter,
|
|
|
|
Node: c1.config.NodeName,
|
|
|
|
ID: c1.config.NodeID,
|
|
|
|
Address: nodeAddr,
|
|
|
|
NodeMeta: map[string]string{"hello": "world"},
|
|
|
|
Check: &structs.HealthCheck{
|
|
|
|
Node: c1.config.NodeName,
|
|
|
|
CheckID: SerfCheckID,
|
|
|
|
Name: SerfCheckName,
|
2017-04-19 23:00:11 +00:00
|
|
|
Status: api.HealthCritical,
|
2017-03-23 22:01:46 +00:00
|
|
|
Output: "",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var out struct{}
|
|
|
|
if err := s1.RPC("Catalog.Register", &req, &out); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Force a reconcile and make sure the metadata stuck around.
|
|
|
|
if err := s1.reconcile(); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
_, node, err := state.GetNode(c1.config.NodeName)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if node == nil {
|
|
|
|
t.Fatalf("bad")
|
|
|
|
}
|
|
|
|
if hello, ok := node.Meta["hello"]; !ok || hello != "world" {
|
|
|
|
t.Fatalf("bad")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fail the member and wait for the health to go critical.
|
|
|
|
c1.Shutdown()
|
2017-05-05 11:58:13 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2017-03-23 22:01:46 +00:00
|
|
|
_, checks, err := state.NodeChecks(nil, c1.config.NodeName)
|
|
|
|
if err != nil {
|
2017-05-05 11:58:13 +00:00
|
|
|
r.Fatalf("err: %v", err)
|
2017-03-23 22:01:46 +00:00
|
|
|
}
|
2017-05-05 11:58:13 +00:00
|
|
|
if got, want := checks[0].Status, api.HealthCritical; got != want {
|
|
|
|
r.Fatalf("got state %q want %q", got, want)
|
|
|
|
}
|
|
|
|
})
|
2017-03-23 22:01:46 +00:00
|
|
|
|
|
|
|
// Make sure the metadata didn't get clobbered.
|
|
|
|
_, node, err = state.GetNode(c1.config.NodeName)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if node == nil {
|
|
|
|
t.Fatalf("bad")
|
|
|
|
}
|
|
|
|
if hello, ok := node.Meta["hello"]; !ok || hello != "world" {
|
|
|
|
t.Fatalf("bad")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-10 20:55:55 +00:00
|
|
|
func TestLeader_LeftServer(t *testing.T) {
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2014-01-10 20:55:55 +00:00
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
dir2, s2 := testServerDCBootstrap(t, "dc1", false)
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer s2.Shutdown()
|
|
|
|
|
|
|
|
dir3, s3 := testServerDCBootstrap(t, "dc1", false)
|
|
|
|
defer os.RemoveAll(dir3)
|
|
|
|
defer s3.Shutdown()
|
2017-05-09 04:57:06 +00:00
|
|
|
|
|
|
|
// Put s1 last so we don't trigger a leader election.
|
|
|
|
servers := []*Server{s2, s3, s1}
|
2014-01-10 20:55:55 +00:00
|
|
|
|
|
|
|
// Try to join
|
2017-05-05 10:29:49 +00:00
|
|
|
joinLAN(t, s2, s1)
|
|
|
|
joinLAN(t, s3, s1)
|
2014-01-10 20:55:55 +00:00
|
|
|
for _, s := range servers {
|
2017-05-05 07:23:28 +00:00
|
|
|
retry.Run(t, func(r *retry.R) { r.Check(wantPeers(s, 3)) })
|
2014-01-10 20:55:55 +00:00
|
|
|
}
|
|
|
|
|
2017-05-09 04:57:06 +00:00
|
|
|
// Kill any server
|
|
|
|
servers[0].Shutdown()
|
2014-01-10 20:55:55 +00:00
|
|
|
|
2017-05-09 04:57:06 +00:00
|
|
|
// Force remove the non-leader (transition to left state)
|
|
|
|
if err := servers[1].RemoveFailedNode(servers[0].config.NodeName); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2014-01-10 20:55:55 +00:00
|
|
|
|
2017-05-09 04:57:06 +00:00
|
|
|
// Wait until the remaining servers show only 2 peers.
|
|
|
|
for _, s := range servers[1:] {
|
|
|
|
retry.Run(t, func(r *retry.R) { r.Check(wantPeers(s, 2)) })
|
|
|
|
}
|
2017-06-27 22:04:50 +00:00
|
|
|
s1.Shutdown()
|
2014-01-10 20:55:55 +00:00
|
|
|
}
|
2014-01-20 23:56:29 +00:00
|
|
|
|
2014-10-14 05:14:43 +00:00
|
|
|
func TestLeader_LeftLeader(t *testing.T) {
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2014-10-14 05:14:43 +00:00
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
dir2, s2 := testServerDCBootstrap(t, "dc1", false)
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer s2.Shutdown()
|
|
|
|
|
|
|
|
dir3, s3 := testServerDCBootstrap(t, "dc1", false)
|
|
|
|
defer os.RemoveAll(dir3)
|
|
|
|
defer s3.Shutdown()
|
|
|
|
servers := []*Server{s1, s2, s3}
|
|
|
|
|
|
|
|
// Try to join
|
2017-05-05 10:29:49 +00:00
|
|
|
joinLAN(t, s2, s1)
|
|
|
|
joinLAN(t, s3, s1)
|
2014-10-14 05:14:43 +00:00
|
|
|
|
|
|
|
for _, s := range servers {
|
2017-05-05 07:23:28 +00:00
|
|
|
retry.Run(t, func(r *retry.R) { r.Check(wantPeers(s, 3)) })
|
2014-10-14 05:14:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Kill the leader!
|
|
|
|
var leader *Server
|
|
|
|
for _, s := range servers {
|
|
|
|
if s.IsLeader() {
|
|
|
|
leader = s
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2014-12-10 15:02:23 +00:00
|
|
|
if leader == nil {
|
|
|
|
t.Fatalf("Should have a leader")
|
|
|
|
}
|
2017-06-16 15:49:54 +00:00
|
|
|
if !leader.isReadyForConsistentReads() {
|
|
|
|
t.Fatalf("Expected leader to be ready for consistent reads ")
|
|
|
|
}
|
2014-10-14 05:14:43 +00:00
|
|
|
leader.Leave()
|
2017-06-16 03:41:30 +00:00
|
|
|
if leader.isReadyForConsistentReads() {
|
2017-06-16 15:49:54 +00:00
|
|
|
t.Fatalf("Expected consistent read state to be false ")
|
2017-06-16 03:41:30 +00:00
|
|
|
}
|
2014-10-14 05:14:43 +00:00
|
|
|
leader.Shutdown()
|
|
|
|
time.Sleep(100 * time.Millisecond)
|
|
|
|
|
|
|
|
var remain *Server
|
|
|
|
for _, s := range servers {
|
|
|
|
if s == leader {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
remain = s
|
2017-05-05 07:23:28 +00:00
|
|
|
retry.Run(t, func(r *retry.R) { r.Check(wantPeers(s, 2)) })
|
2014-10-14 05:14:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Verify the old leader is deregistered
|
2015-10-13 05:21:39 +00:00
|
|
|
state := remain.fsm.State()
|
2017-05-05 11:58:13 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2015-10-14 02:18:43 +00:00
|
|
|
_, node, err := state.GetNode(leader.config.NodeName)
|
2015-10-12 07:42:09 +00:00
|
|
|
if err != nil {
|
2017-05-05 11:58:13 +00:00
|
|
|
r.Fatalf("err: %v", err)
|
2015-10-12 07:42:09 +00:00
|
|
|
}
|
2017-05-05 11:58:13 +00:00
|
|
|
if node != nil {
|
|
|
|
r.Fatal("leader should be deregistered")
|
|
|
|
}
|
|
|
|
})
|
2014-10-14 05:14:43 +00:00
|
|
|
}
|
|
|
|
|
2014-01-20 23:56:29 +00:00
|
|
|
func TestLeader_MultiBootstrap(t *testing.T) {
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2014-01-20 23:56:29 +00:00
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
dir2, s2 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer s2.Shutdown()
|
|
|
|
|
|
|
|
servers := []*Server{s1, s2}
|
|
|
|
|
|
|
|
// Try to join
|
2017-05-05 10:29:49 +00:00
|
|
|
joinLAN(t, s2, s1)
|
2014-01-20 23:56:29 +00:00
|
|
|
|
|
|
|
for _, s := range servers {
|
2017-05-05 11:58:13 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
if got, want := len(s.serfLAN.Members()), 2; got != want {
|
|
|
|
r.Fatalf("got %d peers want %d", got, want)
|
|
|
|
}
|
|
|
|
})
|
2014-01-20 23:56:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure we don't have multiple raft peers
|
|
|
|
for _, s := range servers {
|
2016-07-28 19:11:28 +00:00
|
|
|
peers, _ := s.numPeers()
|
|
|
|
if peers != 1 {
|
2014-01-20 23:56:29 +00:00
|
|
|
t.Fatalf("should only have 1 raft peer!")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2014-12-15 22:37:49 +00:00
|
|
|
|
|
|
|
func TestLeader_TombstoneGC_Reset(t *testing.T) {
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2014-12-15 22:37:49 +00:00
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
dir2, s2 := testServerDCBootstrap(t, "dc1", false)
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer s2.Shutdown()
|
|
|
|
|
|
|
|
dir3, s3 := testServerDCBootstrap(t, "dc1", false)
|
|
|
|
defer os.RemoveAll(dir3)
|
|
|
|
defer s3.Shutdown()
|
|
|
|
servers := []*Server{s1, s2, s3}
|
|
|
|
|
|
|
|
// Try to join
|
2017-05-05 10:29:49 +00:00
|
|
|
joinLAN(t, s2, s1)
|
|
|
|
joinLAN(t, s3, s1)
|
2014-12-15 22:37:49 +00:00
|
|
|
|
|
|
|
for _, s := range servers {
|
2017-05-05 07:23:28 +00:00
|
|
|
retry.Run(t, func(r *retry.R) { r.Check(wantPeers(s, 3)) })
|
2014-12-15 22:37:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var leader *Server
|
|
|
|
for _, s := range servers {
|
|
|
|
if s.IsLeader() {
|
|
|
|
leader = s
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if leader == nil {
|
|
|
|
t.Fatalf("Should have a leader")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that the leader has a pending GC expiration
|
|
|
|
if !leader.tombstoneGC.PendingExpiration() {
|
|
|
|
t.Fatalf("should have pending expiration")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Kill the leader
|
|
|
|
leader.Shutdown()
|
|
|
|
time.Sleep(100 * time.Millisecond)
|
|
|
|
|
|
|
|
// Wait for a new leader
|
|
|
|
leader = nil
|
2017-05-05 11:58:13 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2014-12-15 22:37:49 +00:00
|
|
|
for _, s := range servers {
|
|
|
|
if s.IsLeader() {
|
|
|
|
leader = s
|
2017-05-05 11:58:13 +00:00
|
|
|
return
|
2014-12-15 22:37:49 +00:00
|
|
|
}
|
|
|
|
}
|
2017-05-05 11:58:13 +00:00
|
|
|
r.Fatal("no leader")
|
|
|
|
})
|
2014-12-15 22:37:49 +00:00
|
|
|
|
2017-05-05 11:58:13 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
|
|
|
if !leader.tombstoneGC.PendingExpiration() {
|
|
|
|
r.Fatal("leader has no pending GC expiration")
|
|
|
|
}
|
|
|
|
})
|
2014-12-15 22:37:49 +00:00
|
|
|
}
|
2014-12-19 00:02:08 +00:00
|
|
|
|
|
|
|
func TestLeader_ReapTombstones(t *testing.T) {
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2014-12-19 00:02:08 +00:00
|
|
|
dir1, s1 := testServerWithConfig(t, func(c *Config) {
|
2017-03-23 20:34:30 +00:00
|
|
|
c.ACLDatacenter = "dc1"
|
|
|
|
c.ACLMasterToken = "root"
|
|
|
|
c.ACLDefaultPolicy = "deny"
|
2014-12-19 00:02:08 +00:00
|
|
|
c.TombstoneTTL = 50 * time.Millisecond
|
|
|
|
c.TombstoneTTLGranularity = 10 * time.Millisecond
|
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
2015-10-13 23:43:52 +00:00
|
|
|
codec := rpcClient(t, s1)
|
2014-12-19 00:02:08 +00:00
|
|
|
|
2017-04-19 23:00:11 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
2014-12-19 00:02:08 +00:00
|
|
|
|
|
|
|
// Create a KV entry
|
|
|
|
arg := structs.KVSRequest{
|
|
|
|
Datacenter: "dc1",
|
2017-04-19 23:00:11 +00:00
|
|
|
Op: api.KVSet,
|
2014-12-19 00:02:08 +00:00
|
|
|
DirEnt: structs.DirEntry{
|
|
|
|
Key: "test",
|
|
|
|
Value: []byte("test"),
|
|
|
|
},
|
2017-03-23 20:34:30 +00:00
|
|
|
WriteRequest: structs.WriteRequest{
|
|
|
|
Token: "root",
|
|
|
|
},
|
2014-12-19 00:02:08 +00:00
|
|
|
}
|
|
|
|
var out bool
|
2015-10-13 23:43:52 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "KVS.Apply", &arg, &out); err != nil {
|
2014-12-19 00:02:08 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2015-10-12 07:42:09 +00:00
|
|
|
// Delete the KV entry (tombstoned).
|
2017-04-19 23:00:11 +00:00
|
|
|
arg.Op = api.KVDelete
|
2015-10-13 23:43:52 +00:00
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "KVS.Apply", &arg, &out); err != nil {
|
2014-12-19 00:02:08 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2015-10-13 18:48:35 +00:00
|
|
|
// Make sure there's a tombstone.
|
|
|
|
state := s1.fsm.State()
|
|
|
|
func() {
|
|
|
|
snap := state.Snapshot()
|
|
|
|
defer snap.Close()
|
2015-10-19 22:51:11 +00:00
|
|
|
stones, err := snap.Tombstones()
|
2015-10-13 18:48:35 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
2015-10-19 22:51:11 +00:00
|
|
|
if stones.Next() == nil {
|
2015-10-19 21:56:22 +00:00
|
|
|
t.Fatalf("missing tombstones")
|
|
|
|
}
|
2015-10-19 22:51:11 +00:00
|
|
|
if stones.Next() != nil {
|
2015-10-19 21:56:22 +00:00
|
|
|
t.Fatalf("unexpected extra tombstones")
|
2015-10-13 18:48:35 +00:00
|
|
|
}
|
|
|
|
}()
|
2014-12-19 00:02:08 +00:00
|
|
|
|
2015-10-12 07:42:09 +00:00
|
|
|
// Check that the new leader has a pending GC expiration by
|
2015-10-13 18:48:35 +00:00
|
|
|
// watching for the tombstone to get removed.
|
2017-05-05 11:58:13 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2015-10-13 18:48:35 +00:00
|
|
|
snap := state.Snapshot()
|
|
|
|
defer snap.Close()
|
2015-10-19 22:51:11 +00:00
|
|
|
stones, err := snap.Tombstones()
|
2015-10-12 07:42:09 +00:00
|
|
|
if err != nil {
|
2017-05-05 11:58:13 +00:00
|
|
|
r.Fatal(err)
|
2015-10-12 07:42:09 +00:00
|
|
|
}
|
2017-05-05 11:58:13 +00:00
|
|
|
if stones.Next() != nil {
|
|
|
|
r.Fatal("should have no tombstones")
|
|
|
|
}
|
|
|
|
})
|
2014-12-19 00:02:08 +00:00
|
|
|
}
|
2017-02-17 18:49:16 +00:00
|
|
|
|
2017-02-22 20:53:32 +00:00
|
|
|
func TestLeader_RollRaftServer(t *testing.T) {
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2017-02-22 20:53:32 +00:00
|
|
|
dir1, s1 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.Bootstrap = true
|
|
|
|
c.Datacenter = "dc1"
|
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
dir2, s2 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.Bootstrap = false
|
|
|
|
c.Datacenter = "dc1"
|
|
|
|
c.RaftConfig.ProtocolVersion = 1
|
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer s2.Shutdown()
|
|
|
|
|
|
|
|
dir3, s3 := testServerDCBootstrap(t, "dc1", false)
|
|
|
|
defer os.RemoveAll(dir3)
|
|
|
|
defer s3.Shutdown()
|
|
|
|
|
|
|
|
servers := []*Server{s1, s2, s3}
|
|
|
|
|
|
|
|
// Try to join
|
2017-05-05 10:29:49 +00:00
|
|
|
joinLAN(t, s2, s1)
|
|
|
|
joinLAN(t, s3, s1)
|
2017-02-22 20:53:32 +00:00
|
|
|
|
|
|
|
for _, s := range servers {
|
2017-05-05 07:23:28 +00:00
|
|
|
retry.Run(t, func(r *retry.R) { r.Check(wantPeers(s, 3)) })
|
2017-02-22 20:53:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Kill the v1 server
|
|
|
|
s2.Shutdown()
|
|
|
|
|
|
|
|
for _, s := range []*Server{s1, s3} {
|
2017-05-05 11:58:13 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2017-02-22 20:53:32 +00:00
|
|
|
minVer, err := ServerMinRaftProtocol(s.LANMembers())
|
2017-05-05 11:58:13 +00:00
|
|
|
if err != nil {
|
|
|
|
r.Fatal(err)
|
|
|
|
}
|
|
|
|
if got, want := minVer, 2; got != want {
|
|
|
|
r.Fatalf("got min raft version %d want %d", got, want)
|
|
|
|
}
|
|
|
|
})
|
2017-02-22 20:53:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Replace the dead server with one running raft protocol v3
|
|
|
|
dir4, s4 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.Bootstrap = false
|
|
|
|
c.Datacenter = "dc1"
|
|
|
|
c.RaftConfig.ProtocolVersion = 3
|
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir4)
|
|
|
|
defer s4.Shutdown()
|
2017-05-05 10:29:49 +00:00
|
|
|
joinLAN(t, s4, s1)
|
2017-02-22 20:53:32 +00:00
|
|
|
servers[1] = s4
|
|
|
|
|
|
|
|
// Make sure the dead server is removed and we're back to 3 total peers
|
|
|
|
for _, s := range servers {
|
2017-05-05 11:58:13 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2017-02-22 20:53:32 +00:00
|
|
|
addrs := 0
|
|
|
|
ids := 0
|
|
|
|
future := s.raft.GetConfiguration()
|
|
|
|
if err := future.Error(); err != nil {
|
2017-05-05 11:58:13 +00:00
|
|
|
r.Fatal(err)
|
2017-02-22 20:53:32 +00:00
|
|
|
}
|
|
|
|
for _, server := range future.Configuration().Servers {
|
|
|
|
if string(server.ID) == string(server.Address) {
|
|
|
|
addrs++
|
|
|
|
} else {
|
|
|
|
ids++
|
|
|
|
}
|
|
|
|
}
|
2017-05-05 11:58:13 +00:00
|
|
|
if got, want := addrs, 2; got != want {
|
|
|
|
r.Fatalf("got %d server addresses want %d", got, want)
|
|
|
|
}
|
|
|
|
if got, want := ids, 1; got != want {
|
|
|
|
r.Fatalf("got %d server ids want %d", got, want)
|
|
|
|
}
|
|
|
|
})
|
2017-02-22 20:53:32 +00:00
|
|
|
}
|
2017-02-24 04:32:13 +00:00
|
|
|
}
|
2017-03-15 23:50:42 +00:00
|
|
|
|
|
|
|
func TestLeader_ChangeServerID(t *testing.T) {
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2017-03-15 23:50:42 +00:00
|
|
|
conf := func(c *Config) {
|
|
|
|
c.Bootstrap = false
|
|
|
|
c.BootstrapExpect = 3
|
|
|
|
c.Datacenter = "dc1"
|
|
|
|
c.RaftConfig.ProtocolVersion = 3
|
|
|
|
}
|
|
|
|
dir1, s1 := testServerWithConfig(t, conf)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
dir2, s2 := testServerWithConfig(t, conf)
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer s2.Shutdown()
|
|
|
|
|
|
|
|
dir3, s3 := testServerWithConfig(t, conf)
|
|
|
|
defer os.RemoveAll(dir3)
|
|
|
|
defer s3.Shutdown()
|
|
|
|
|
|
|
|
servers := []*Server{s1, s2, s3}
|
|
|
|
|
|
|
|
// Try to join
|
2017-05-05 10:29:49 +00:00
|
|
|
joinLAN(t, s2, s1)
|
|
|
|
joinLAN(t, s3, s1)
|
2017-03-15 23:50:42 +00:00
|
|
|
|
|
|
|
for _, s := range servers {
|
2017-05-05 07:23:28 +00:00
|
|
|
retry.Run(t, func(r *retry.R) { r.Check(wantPeers(s, 3)) })
|
2017-03-15 23:50:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Shut down a server, freeing up its address/port
|
|
|
|
s3.Shutdown()
|
|
|
|
|
2017-05-05 11:58:13 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2017-03-15 23:50:42 +00:00
|
|
|
alive := 0
|
|
|
|
for _, m := range s1.LANMembers() {
|
|
|
|
if m.Status == serf.StatusAlive {
|
|
|
|
alive++
|
|
|
|
}
|
|
|
|
}
|
2017-05-05 11:58:13 +00:00
|
|
|
if got, want := alive, 2; got != want {
|
|
|
|
r.Fatalf("got %d alive members want %d", got, want)
|
|
|
|
}
|
|
|
|
})
|
2017-03-15 23:50:42 +00:00
|
|
|
|
|
|
|
// Bring up a new server with s3's address that will get a different ID
|
|
|
|
dir4, s4 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.Bootstrap = false
|
|
|
|
c.BootstrapExpect = 3
|
|
|
|
c.Datacenter = "dc1"
|
|
|
|
c.RaftConfig.ProtocolVersion = 3
|
|
|
|
c.SerfLANConfig.MemberlistConfig = s3.config.SerfLANConfig.MemberlistConfig
|
|
|
|
c.RPCAddr = s3.config.RPCAddr
|
|
|
|
c.RPCAdvertise = s3.config.RPCAdvertise
|
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir4)
|
|
|
|
defer s4.Shutdown()
|
2017-05-05 10:29:49 +00:00
|
|
|
joinLAN(t, s4, s1)
|
2017-03-15 23:50:42 +00:00
|
|
|
servers[2] = s4
|
|
|
|
|
|
|
|
// Make sure the dead server is removed and we're back to 3 total peers
|
|
|
|
for _, s := range servers {
|
2017-05-05 07:23:28 +00:00
|
|
|
retry.Run(t, func(r *retry.R) { r.Check(wantPeers(s, 3)) })
|
2017-03-15 23:50:42 +00:00
|
|
|
}
|
2017-03-16 01:27:17 +00:00
|
|
|
}
|