2015-06-04 10:33:12 +00:00
|
|
|
package nomad
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
2015-06-04 11:11:35 +00:00
|
|
|
"io/ioutil"
|
2016-06-14 05:58:39 +00:00
|
|
|
"log"
|
2017-07-23 22:04:38 +00:00
|
|
|
"math/rand"
|
2015-06-04 10:33:12 +00:00
|
|
|
"net"
|
2017-05-11 21:55:12 +00:00
|
|
|
"os"
|
|
|
|
"path"
|
2018-01-17 17:02:40 +00:00
|
|
|
"strings"
|
2015-06-04 10:33:12 +00:00
|
|
|
"sync/atomic"
|
|
|
|
"testing"
|
|
|
|
"time"
|
2015-11-24 06:22:48 +00:00
|
|
|
|
2017-10-23 23:51:40 +00:00
|
|
|
"github.com/hashicorp/consul/lib/freeport"
|
2017-11-20 15:38:46 +00:00
|
|
|
msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc"
|
2016-06-14 05:58:39 +00:00
|
|
|
"github.com/hashicorp/nomad/command/agent/consul"
|
2017-09-29 16:58:48 +00:00
|
|
|
"github.com/hashicorp/nomad/helper/uuid"
|
2017-08-21 02:59:25 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad/mock"
|
2017-02-01 22:20:14 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad/structs"
|
2017-04-06 03:50:35 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad/structs/config"
|
2015-11-24 06:22:48 +00:00
|
|
|
"github.com/hashicorp/nomad/testutil"
|
2017-11-20 15:38:46 +00:00
|
|
|
"github.com/stretchr/testify/assert"
|
2015-06-04 10:33:12 +00:00
|
|
|
)
|
|
|
|
|
2016-06-01 10:48:31 +00:00
|
|
|
var (
|
|
|
|
nodeNumber uint32 = 0
|
|
|
|
)
|
2015-06-04 10:33:12 +00:00
|
|
|
|
2017-07-14 00:14:02 +00:00
|
|
|
func testLogger() *log.Logger {
|
|
|
|
return log.New(os.Stderr, "", log.LstdFlags)
|
|
|
|
}
|
|
|
|
|
2015-06-04 11:11:35 +00:00
|
|
|
func tmpDir(t *testing.T) string {
|
|
|
|
dir, err := ioutil.TempDir("", "nomad")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
return dir
|
|
|
|
}
|
|
|
|
|
2017-08-21 02:59:25 +00:00
|
|
|
func testACLServer(t *testing.T, cb func(*Config)) (*Server, *structs.ACLToken) {
|
|
|
|
server := testServer(t, func(c *Config) {
|
|
|
|
c.ACLEnabled = true
|
|
|
|
if cb != nil {
|
|
|
|
cb(c)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
token := mock.ACLManagementToken()
|
2017-09-10 23:03:30 +00:00
|
|
|
err := server.State().BootstrapACLTokens(1, 0, token)
|
2017-08-21 02:59:25 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to bootstrap ACL token: %v", err)
|
|
|
|
}
|
|
|
|
return server, token
|
|
|
|
}
|
|
|
|
|
2015-06-04 10:33:12 +00:00
|
|
|
func testServer(t *testing.T, cb func(*Config)) *Server {
|
|
|
|
// Setup the default settings
|
|
|
|
config := DefaultConfig()
|
2017-12-18 21:16:23 +00:00
|
|
|
config.Build = "0.8.0+unittest"
|
2015-06-04 10:33:12 +00:00
|
|
|
config.DevMode = true
|
2016-06-16 21:30:59 +00:00
|
|
|
nodeNum := atomic.AddUint32(&nodeNumber, 1)
|
|
|
|
config.NodeName = fmt.Sprintf("nomad-%03d", nodeNum)
|
2015-06-04 10:33:12 +00:00
|
|
|
|
|
|
|
// Tighten the Serf timing
|
|
|
|
config.SerfConfig.MemberlistConfig.BindAddr = "127.0.0.1"
|
|
|
|
config.SerfConfig.MemberlistConfig.SuspicionMult = 2
|
2015-06-04 11:02:39 +00:00
|
|
|
config.SerfConfig.MemberlistConfig.RetransmitMult = 2
|
2015-06-04 10:33:12 +00:00
|
|
|
config.SerfConfig.MemberlistConfig.ProbeTimeout = 50 * time.Millisecond
|
|
|
|
config.SerfConfig.MemberlistConfig.ProbeInterval = 100 * time.Millisecond
|
|
|
|
config.SerfConfig.MemberlistConfig.GossipInterval = 100 * time.Millisecond
|
|
|
|
|
|
|
|
// Tighten the Raft timing
|
2015-10-17 00:53:43 +00:00
|
|
|
config.RaftConfig.LeaderLeaseTimeout = 50 * time.Millisecond
|
|
|
|
config.RaftConfig.HeartbeatTimeout = 50 * time.Millisecond
|
|
|
|
config.RaftConfig.ElectionTimeout = 50 * time.Millisecond
|
2015-08-26 00:36:52 +00:00
|
|
|
config.RaftTimeout = 500 * time.Millisecond
|
2015-06-04 10:33:12 +00:00
|
|
|
|
2017-12-18 21:16:23 +00:00
|
|
|
// Tighten the autopilot timing
|
|
|
|
config.AutopilotConfig.ServerStabilizationTime = 100 * time.Millisecond
|
|
|
|
config.ServerHealthInterval = 50 * time.Millisecond
|
|
|
|
config.AutopilotInterval = 100 * time.Millisecond
|
|
|
|
|
2016-08-14 01:33:48 +00:00
|
|
|
// Disable Vault
|
2016-10-11 20:28:18 +00:00
|
|
|
f := false
|
|
|
|
config.VaultConfig.Enabled = &f
|
2016-08-14 01:33:48 +00:00
|
|
|
|
2017-04-06 03:50:35 +00:00
|
|
|
// Squelch output when -v isn't specified
|
|
|
|
if !testing.Verbose() {
|
|
|
|
config.LogOutput = ioutil.Discard
|
|
|
|
}
|
|
|
|
|
2015-06-04 10:33:12 +00:00
|
|
|
// Invoke the callback if any
|
|
|
|
if cb != nil {
|
|
|
|
cb(config)
|
|
|
|
}
|
|
|
|
|
2015-09-07 17:46:41 +00:00
|
|
|
// Enable raft as leader if we have bootstrap on
|
|
|
|
config.RaftConfig.StartAsLeader = !config.DevDisableBootstrap
|
|
|
|
|
2017-04-06 03:50:35 +00:00
|
|
|
logger := log.New(config.LogOutput, fmt.Sprintf("[%s] ", config.NodeName), log.LstdFlags)
|
2017-02-01 00:43:57 +00:00
|
|
|
catalog := consul.NewMockCatalog(logger)
|
2016-06-14 05:58:39 +00:00
|
|
|
|
2017-07-23 22:04:38 +00:00
|
|
|
for i := 10; i >= 0; i-- {
|
|
|
|
// Get random ports
|
2017-10-23 23:51:40 +00:00
|
|
|
ports := freeport.GetT(t, 2)
|
2017-07-23 22:04:38 +00:00
|
|
|
config.RPCAddr = &net.TCPAddr{
|
|
|
|
IP: []byte{127, 0, 0, 1},
|
2017-10-23 23:51:40 +00:00
|
|
|
Port: ports[0],
|
2017-07-23 22:04:38 +00:00
|
|
|
}
|
2017-10-23 23:51:40 +00:00
|
|
|
config.SerfConfig.MemberlistConfig.BindPort = ports[1]
|
2017-07-23 22:04:38 +00:00
|
|
|
|
|
|
|
// Create server
|
|
|
|
server, err := NewServer(config, catalog, logger)
|
|
|
|
if err == nil {
|
|
|
|
return server
|
|
|
|
} else if i == 0 {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
} else {
|
|
|
|
if server != nil {
|
|
|
|
server.Shutdown()
|
|
|
|
}
|
|
|
|
wait := time.Duration(rand.Int31n(2000)) * time.Millisecond
|
|
|
|
time.Sleep(wait)
|
|
|
|
}
|
2015-06-04 10:33:12 +00:00
|
|
|
}
|
2017-07-23 22:04:38 +00:00
|
|
|
|
|
|
|
return nil
|
2015-06-04 10:33:12 +00:00
|
|
|
}
|
2015-06-05 22:11:16 +00:00
|
|
|
|
2015-06-07 19:03:05 +00:00
|
|
|
func testJoin(t *testing.T, s1 *Server, other ...*Server) {
|
|
|
|
addr := fmt.Sprintf("127.0.0.1:%d",
|
|
|
|
s1.config.SerfConfig.MemberlistConfig.BindPort)
|
|
|
|
for _, s2 := range other {
|
|
|
|
if num, err := s2.Join([]string{addr}); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
} else if num != 1 {
|
|
|
|
t.Fatalf("bad: %d", num)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-05 22:11:16 +00:00
|
|
|
func TestServer_RPC(t *testing.T) {
|
2017-07-23 22:04:38 +00:00
|
|
|
t.Parallel()
|
2015-06-05 22:11:16 +00:00
|
|
|
s1 := testServer(t, nil)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
var out struct{}
|
|
|
|
if err := s1.RPC("Status.Ping", struct{}{}, &out); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
}
|
2015-11-24 06:22:48 +00:00
|
|
|
|
2017-04-06 03:50:35 +00:00
|
|
|
func TestServer_RPC_MixedTLS(t *testing.T) {
|
2017-07-23 22:04:38 +00:00
|
|
|
t.Parallel()
|
2017-04-06 03:50:35 +00:00
|
|
|
const (
|
|
|
|
cafile = "../helper/tlsutil/testdata/ca.pem"
|
|
|
|
foocert = "../helper/tlsutil/testdata/nomad-foo.pem"
|
|
|
|
fookey = "../helper/tlsutil/testdata/nomad-foo-key.pem"
|
|
|
|
)
|
2017-05-11 21:55:12 +00:00
|
|
|
dir := tmpDir(t)
|
|
|
|
defer os.RemoveAll(dir)
|
2017-04-06 03:50:35 +00:00
|
|
|
s1 := testServer(t, func(c *Config) {
|
|
|
|
c.BootstrapExpect = 3
|
2017-05-11 21:55:12 +00:00
|
|
|
c.DevMode = false
|
2017-04-06 03:50:35 +00:00
|
|
|
c.DevDisableBootstrap = true
|
2017-05-11 21:55:12 +00:00
|
|
|
c.DataDir = path.Join(dir, "node1")
|
2017-04-06 03:50:35 +00:00
|
|
|
c.TLSConfig = &config.TLSConfig{
|
|
|
|
EnableHTTP: true,
|
|
|
|
EnableRPC: true,
|
|
|
|
VerifyServerHostname: true,
|
|
|
|
CAFile: cafile,
|
|
|
|
CertFile: foocert,
|
|
|
|
KeyFile: fookey,
|
|
|
|
}
|
|
|
|
})
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
2017-05-11 21:55:12 +00:00
|
|
|
s2 := testServer(t, func(c *Config) {
|
2017-04-06 03:50:35 +00:00
|
|
|
c.BootstrapExpect = 3
|
2017-05-11 21:55:12 +00:00
|
|
|
c.DevMode = false
|
2017-04-06 03:50:35 +00:00
|
|
|
c.DevDisableBootstrap = true
|
2017-05-11 21:55:12 +00:00
|
|
|
c.DataDir = path.Join(dir, "node2")
|
|
|
|
})
|
2017-04-06 03:50:35 +00:00
|
|
|
defer s2.Shutdown()
|
2017-05-11 21:55:12 +00:00
|
|
|
s3 := testServer(t, func(c *Config) {
|
|
|
|
c.BootstrapExpect = 3
|
|
|
|
c.DevMode = false
|
|
|
|
c.DevDisableBootstrap = true
|
|
|
|
c.DataDir = path.Join(dir, "node3")
|
|
|
|
})
|
2017-04-06 03:50:35 +00:00
|
|
|
defer s3.Shutdown()
|
|
|
|
|
|
|
|
testJoin(t, s1, s2, s3)
|
|
|
|
|
2017-05-11 21:55:12 +00:00
|
|
|
l1, l2, l3, shutdown := make(chan error, 1), make(chan error, 1), make(chan error, 1), make(chan struct{}, 1)
|
|
|
|
|
|
|
|
wait := func(done chan error, rpc func(string, interface{}, interface{}) error) {
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-shutdown:
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
|
|
|
args := &structs.GenericRequest{}
|
|
|
|
var leader string
|
|
|
|
err := rpc("Status.Leader", args, &leader)
|
|
|
|
if err != nil || leader != "" {
|
|
|
|
done <- err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
go wait(l1, s1.RPC)
|
|
|
|
go wait(l2, s2.RPC)
|
|
|
|
go wait(l3, s3.RPC)
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
case err := <-l1:
|
|
|
|
t.Fatalf("Server 1 has leader or error: %v", err)
|
|
|
|
case err := <-l2:
|
|
|
|
t.Fatalf("Server 2 has leader or error: %v", err)
|
|
|
|
case err := <-l3:
|
|
|
|
t.Fatalf("Server 3 has leader or error: %v", err)
|
2017-04-06 03:50:35 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-24 06:22:48 +00:00
|
|
|
func TestServer_Regions(t *testing.T) {
|
2017-07-23 22:04:38 +00:00
|
|
|
t.Parallel()
|
2015-11-24 06:22:48 +00:00
|
|
|
// Make the servers
|
|
|
|
s1 := testServer(t, func(c *Config) {
|
|
|
|
c.Region = "region1"
|
|
|
|
})
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
s2 := testServer(t, func(c *Config) {
|
|
|
|
c.Region = "region2"
|
|
|
|
})
|
|
|
|
defer s2.Shutdown()
|
|
|
|
|
|
|
|
// Join them together
|
|
|
|
s2Addr := fmt.Sprintf("127.0.0.1:%d",
|
|
|
|
s2.config.SerfConfig.MemberlistConfig.BindPort)
|
|
|
|
if n, err := s1.Join([]string{s2Addr}); err != nil || n != 1 {
|
|
|
|
t.Fatalf("Failed joining: %v (%d joined)", err, n)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try listing the regions
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
out := s1.Regions()
|
|
|
|
if len(out) != 2 || out[0] != "region1" || out[1] != "region2" {
|
|
|
|
return false, fmt.Errorf("unexpected regions: %v", out)
|
|
|
|
}
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
})
|
|
|
|
}
|
2017-02-01 22:20:14 +00:00
|
|
|
|
|
|
|
func TestServer_Reload_Vault(t *testing.T) {
|
2017-07-23 22:04:38 +00:00
|
|
|
t.Parallel()
|
2017-02-01 22:20:14 +00:00
|
|
|
s1 := testServer(t, func(c *Config) {
|
|
|
|
c.Region = "region1"
|
|
|
|
})
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
if s1.vault.Running() {
|
|
|
|
t.Fatalf("Vault client should not be running")
|
|
|
|
}
|
|
|
|
|
|
|
|
tr := true
|
|
|
|
config := s1.config
|
|
|
|
config.VaultConfig.Enabled = &tr
|
2017-09-29 16:58:48 +00:00
|
|
|
config.VaultConfig.Token = uuid.Generate()
|
2017-02-01 22:20:14 +00:00
|
|
|
|
|
|
|
if err := s1.Reload(config); err != nil {
|
|
|
|
t.Fatalf("Reload failed: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if !s1.vault.Running() {
|
|
|
|
t.Fatalf("Vault client should be running")
|
|
|
|
}
|
|
|
|
}
|
2017-11-20 15:38:46 +00:00
|
|
|
|
2018-01-17 17:02:40 +00:00
|
|
|
func connectionReset(msg string) bool {
|
|
|
|
return strings.Contains(msg, "EOF") || strings.Contains(msg, "connection reset by peer")
|
|
|
|
}
|
|
|
|
|
2017-11-20 15:38:46 +00:00
|
|
|
// Tests that the server will successfully reload its network connections,
|
|
|
|
// upgrading from plaintext to TLS if the server's TLS configuration changes.
|
|
|
|
func TestServer_Reload_TLSConnections_PlaintextToTLS(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
assert := assert.New(t)
|
|
|
|
|
|
|
|
const (
|
|
|
|
cafile = "../helper/tlsutil/testdata/ca.pem"
|
|
|
|
foocert = "../helper/tlsutil/testdata/nomad-foo.pem"
|
|
|
|
fookey = "../helper/tlsutil/testdata/nomad-foo-key.pem"
|
|
|
|
)
|
|
|
|
dir := tmpDir(t)
|
|
|
|
defer os.RemoveAll(dir)
|
2018-01-16 19:16:35 +00:00
|
|
|
|
2017-11-20 15:38:46 +00:00
|
|
|
s1 := testServer(t, func(c *Config) {
|
|
|
|
c.DataDir = path.Join(dir, "nodeA")
|
|
|
|
})
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
// assert that the server started in plaintext mode
|
|
|
|
assert.Equal(s1.config.TLSConfig.CertFile, "")
|
|
|
|
|
|
|
|
newTLSConfig := &config.TLSConfig{
|
|
|
|
EnableHTTP: true,
|
|
|
|
EnableRPC: true,
|
|
|
|
VerifyServerHostname: true,
|
|
|
|
CAFile: cafile,
|
|
|
|
CertFile: foocert,
|
|
|
|
KeyFile: fookey,
|
|
|
|
}
|
|
|
|
|
2017-12-05 00:29:43 +00:00
|
|
|
err := s1.reloadTLSConnections(newTLSConfig)
|
2017-11-20 15:38:46 +00:00
|
|
|
assert.Nil(err)
|
|
|
|
assert.True(s1.config.TLSConfig.Equals(newTLSConfig))
|
|
|
|
|
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
|
|
|
|
node := mock.Node()
|
|
|
|
req := &structs.NodeRegisterRequest{
|
|
|
|
Node: node,
|
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
|
|
|
|
|
|
|
var resp structs.GenericResponse
|
|
|
|
err = msgpackrpc.CallWithCodec(codec, "Node.Register", req, &resp)
|
|
|
|
assert.NotNil(err)
|
2018-01-17 17:02:40 +00:00
|
|
|
assert.True(connectionReset(err.Error()))
|
2017-11-20 15:38:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Tests that the server will successfully reload its network connections,
|
|
|
|
// downgrading from TLS to plaintext if the server's TLS configuration changes.
|
2017-12-07 17:07:00 +00:00
|
|
|
func TestServer_Reload_TLSConnections_TLSToPlaintext_RPC(t *testing.T) {
|
2017-11-20 15:38:46 +00:00
|
|
|
t.Parallel()
|
|
|
|
assert := assert.New(t)
|
|
|
|
|
|
|
|
const (
|
|
|
|
cafile = "../helper/tlsutil/testdata/ca.pem"
|
|
|
|
foocert = "../helper/tlsutil/testdata/nomad-foo.pem"
|
|
|
|
fookey = "../helper/tlsutil/testdata/nomad-foo-key.pem"
|
|
|
|
)
|
|
|
|
|
|
|
|
dir := tmpDir(t)
|
|
|
|
defer os.RemoveAll(dir)
|
2018-01-16 19:16:35 +00:00
|
|
|
|
2017-11-20 15:38:46 +00:00
|
|
|
s1 := testServer(t, func(c *Config) {
|
|
|
|
c.DataDir = path.Join(dir, "nodeB")
|
|
|
|
c.TLSConfig = &config.TLSConfig{
|
|
|
|
EnableHTTP: true,
|
|
|
|
EnableRPC: true,
|
|
|
|
VerifyServerHostname: true,
|
|
|
|
CAFile: cafile,
|
|
|
|
CertFile: foocert,
|
|
|
|
KeyFile: fookey,
|
|
|
|
}
|
|
|
|
})
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
newTLSConfig := &config.TLSConfig{}
|
|
|
|
|
2017-12-05 00:29:43 +00:00
|
|
|
err := s1.reloadTLSConnections(newTLSConfig)
|
2017-11-20 15:38:46 +00:00
|
|
|
assert.Nil(err)
|
|
|
|
assert.True(s1.config.TLSConfig.Equals(newTLSConfig))
|
|
|
|
|
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
|
|
|
|
node := mock.Node()
|
|
|
|
req := &structs.NodeRegisterRequest{
|
|
|
|
Node: node,
|
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
|
|
|
|
|
|
|
var resp structs.GenericResponse
|
|
|
|
err = msgpackrpc.CallWithCodec(codec, "Node.Register", req, &resp)
|
|
|
|
assert.Nil(err)
|
|
|
|
}
|
2017-12-07 17:07:00 +00:00
|
|
|
|
|
|
|
// Test that Raft connections are reloaded as expected when a Nomad server is
|
|
|
|
// upgraded from plaintext to TLS
|
|
|
|
func TestServer_Reload_TLSConnections_Raft(t *testing.T) {
|
|
|
|
assert := assert.New(t)
|
|
|
|
t.Parallel()
|
|
|
|
const (
|
|
|
|
cafile = "../../helper/tlsutil/testdata/ca.pem"
|
|
|
|
foocert = "../../helper/tlsutil/testdata/nomad-foo.pem"
|
|
|
|
fookey = "../../helper/tlsutil/testdata/nomad-foo-key.pem"
|
|
|
|
barcert = "../dev/tls_cluster/certs/nomad.pem"
|
|
|
|
barkey = "../dev/tls_cluster/certs/nomad-key.pem"
|
|
|
|
)
|
|
|
|
dir := tmpDir(t)
|
|
|
|
defer os.RemoveAll(dir)
|
2018-01-16 19:16:35 +00:00
|
|
|
|
2017-12-07 17:07:00 +00:00
|
|
|
s1 := testServer(t, func(c *Config) {
|
|
|
|
c.BootstrapExpect = 2
|
|
|
|
c.DevMode = false
|
|
|
|
c.DevDisableBootstrap = true
|
|
|
|
c.DataDir = path.Join(dir, "node1")
|
|
|
|
c.NodeName = "node1"
|
|
|
|
c.Region = "regionFoo"
|
|
|
|
})
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
s2 := testServer(t, func(c *Config) {
|
|
|
|
c.BootstrapExpect = 2
|
|
|
|
c.DevMode = false
|
|
|
|
c.DevDisableBootstrap = true
|
|
|
|
c.DataDir = path.Join(dir, "node2")
|
|
|
|
c.NodeName = "node2"
|
|
|
|
c.Region = "regionFoo"
|
|
|
|
})
|
|
|
|
defer s2.Shutdown()
|
|
|
|
|
|
|
|
testJoin(t, s1, s2)
|
2018-01-19 10:12:14 +00:00
|
|
|
servers := []*Server{s1, s2}
|
2017-12-07 17:07:00 +00:00
|
|
|
|
2018-01-19 10:12:14 +00:00
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
2017-12-07 17:07:00 +00:00
|
|
|
|
|
|
|
newTLSConfig := &config.TLSConfig{
|
|
|
|
EnableHTTP: true,
|
|
|
|
VerifyHTTPSClient: true,
|
|
|
|
CAFile: cafile,
|
|
|
|
CertFile: foocert,
|
|
|
|
KeyFile: fookey,
|
|
|
|
}
|
|
|
|
|
|
|
|
err := s1.reloadTLSConnections(newTLSConfig)
|
|
|
|
assert.Nil(err)
|
|
|
|
|
|
|
|
{
|
2018-01-19 10:12:14 +00:00
|
|
|
for _, serv := range servers {
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
args := &structs.GenericRequest{}
|
|
|
|
var leader string
|
|
|
|
err := serv.RPC("Status.Leader", args, &leader)
|
|
|
|
if leader != "" && err != nil {
|
|
|
|
return false, fmt.Errorf("Should not have found leader but got %s", leader)
|
|
|
|
}
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
})
|
2017-12-07 17:07:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
secondNewTLSConfig := &config.TLSConfig{
|
|
|
|
EnableHTTP: true,
|
|
|
|
VerifyHTTPSClient: true,
|
|
|
|
CAFile: cafile,
|
|
|
|
CertFile: barcert,
|
|
|
|
KeyFile: barkey,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now, transition the other server to TLS, which should restore their
|
|
|
|
// ability to communicate.
|
|
|
|
err = s2.reloadTLSConnections(secondNewTLSConfig)
|
|
|
|
assert.Nil(err)
|
|
|
|
|
|
|
|
testutil.WaitForLeader(t, s2.RPC)
|
|
|
|
}
|