open-nomad/nomad/server_test.go
Alex Dadgar 15ffdff497 Vault Client on Server handles SIGHUP
This PR allows the Vault client on the server to handle a SIGHUP. This
allows updating the Vault token and any other configuration without
downtime.
2017-02-01 14:24:10 -08:00

164 lines
3.9 KiB
Go

package nomad
import (
"fmt"
"io/ioutil"
"log"
"net"
"sync/atomic"
"testing"
"time"
"github.com/hashicorp/nomad/command/agent/consul"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/testutil"
)
var (
nextPort uint32 = 15000
nodeNumber uint32 = 0
)
func getPort() int {
return int(atomic.AddUint32(&nextPort, 1))
}
func tmpDir(t *testing.T) string {
dir, err := ioutil.TempDir("", "nomad")
if err != nil {
t.Fatalf("err: %v", err)
}
return dir
}
func testServer(t *testing.T, cb func(*Config)) *Server {
// Setup the default settings
config := DefaultConfig()
config.Build = "unittest"
config.DevMode = true
config.RPCAddr = &net.TCPAddr{
IP: []byte{127, 0, 0, 1},
Port: getPort(),
}
nodeNum := atomic.AddUint32(&nodeNumber, 1)
config.NodeName = fmt.Sprintf("nomad-%03d", nodeNum)
// Tighten the Serf timing
config.SerfConfig.MemberlistConfig.BindAddr = "127.0.0.1"
config.SerfConfig.MemberlistConfig.BindPort = getPort()
config.SerfConfig.MemberlistConfig.SuspicionMult = 2
config.SerfConfig.MemberlistConfig.RetransmitMult = 2
config.SerfConfig.MemberlistConfig.ProbeTimeout = 50 * time.Millisecond
config.SerfConfig.MemberlistConfig.ProbeInterval = 100 * time.Millisecond
config.SerfConfig.MemberlistConfig.GossipInterval = 100 * time.Millisecond
// Tighten the Raft timing
config.RaftConfig.LeaderLeaseTimeout = 50 * time.Millisecond
config.RaftConfig.HeartbeatTimeout = 50 * time.Millisecond
config.RaftConfig.ElectionTimeout = 50 * time.Millisecond
config.RaftTimeout = 500 * time.Millisecond
// Disable Vault
f := false
config.VaultConfig.Enabled = &f
// Invoke the callback if any
if cb != nil {
cb(config)
}
// Enable raft as leader if we have bootstrap on
config.RaftConfig.StartAsLeader = !config.DevDisableBootstrap
shutdownCh := make(chan struct{})
logger := log.New(config.LogOutput, "", log.LstdFlags)
consulSyncer, err := consul.NewSyncer(config.ConsulConfig, shutdownCh, logger)
if err != nil {
t.Fatalf("err: %v", err)
}
// Create server
server, err := NewServer(config, consulSyncer, logger)
if err != nil {
t.Fatalf("err: %v", err)
}
return server
}
func testJoin(t *testing.T, s1 *Server, other ...*Server) {
addr := fmt.Sprintf("127.0.0.1:%d",
s1.config.SerfConfig.MemberlistConfig.BindPort)
for _, s2 := range other {
if num, err := s2.Join([]string{addr}); err != nil {
t.Fatalf("err: %v", err)
} else if num != 1 {
t.Fatalf("bad: %d", num)
}
}
}
func TestServer_RPC(t *testing.T) {
s1 := testServer(t, nil)
defer s1.Shutdown()
var out struct{}
if err := s1.RPC("Status.Ping", struct{}{}, &out); err != nil {
t.Fatalf("err: %v", err)
}
}
func TestServer_Regions(t *testing.T) {
// Make the servers
s1 := testServer(t, func(c *Config) {
c.Region = "region1"
})
defer s1.Shutdown()
s2 := testServer(t, func(c *Config) {
c.Region = "region2"
})
defer s2.Shutdown()
// Join them together
s2Addr := fmt.Sprintf("127.0.0.1:%d",
s2.config.SerfConfig.MemberlistConfig.BindPort)
if n, err := s1.Join([]string{s2Addr}); err != nil || n != 1 {
t.Fatalf("Failed joining: %v (%d joined)", err, n)
}
// Try listing the regions
testutil.WaitForResult(func() (bool, error) {
out := s1.Regions()
if len(out) != 2 || out[0] != "region1" || out[1] != "region2" {
return false, fmt.Errorf("unexpected regions: %v", out)
}
return true, nil
}, func(err error) {
t.Fatalf("err: %v", err)
})
}
func TestServer_Reload_Vault(t *testing.T) {
s1 := testServer(t, func(c *Config) {
c.Region = "region1"
})
defer s1.Shutdown()
if s1.vault.Running() {
t.Fatalf("Vault client should not be running")
}
tr := true
config := s1.config
config.VaultConfig.Enabled = &tr
config.VaultConfig.Token = structs.GenerateUUID()
if err := s1.Reload(config); err != nil {
t.Fatalf("Reload failed: %v", err)
}
if !s1.vault.Running() {
t.Fatalf("Vault client should be running")
}
}