open-nomad/nomad/server_test.go
Michael Schurter e204a287ed Refactor Consul Syncer into new ServiceClient
Fixes #2478 #2474 #1995 #2294

The new client only handles agent and task service advertisement. Server
discovery is mostly unchanged.

The Nomad client agent now handles all Consul operations instead of the
executor handling task related operations. When upgrading from an
earlier version of Nomad existing executors will be told to deregister
from Consul so that the Nomad agent can re-register the task's services
and checks.

Drivers - other than qemu - now support an Exec method for executing
abritrary commands in a task's environment. This is used to implement
script checks.

Interfaces are used extensively to avoid interacting with Consul in
tests that don't assert any Consul related behavior.
2017-04-19 12:42:47 -07:00

206 lines
5 KiB
Go

package nomad
import (
"fmt"
"io/ioutil"
"log"
"net"
"sync/atomic"
"testing"
"time"
"github.com/hashicorp/nomad/command/agent/consul"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/nomad/structs/config"
"github.com/hashicorp/nomad/testutil"
)
var (
nextPort uint32 = 15000
nodeNumber uint32 = 0
)
func getPort() int {
return int(atomic.AddUint32(&nextPort, 1))
}
func tmpDir(t *testing.T) string {
dir, err := ioutil.TempDir("", "nomad")
if err != nil {
t.Fatalf("err: %v", err)
}
return dir
}
func testServer(t *testing.T, cb func(*Config)) *Server {
// Setup the default settings
config := DefaultConfig()
config.Build = "unittest"
config.DevMode = true
config.RPCAddr = &net.TCPAddr{
IP: []byte{127, 0, 0, 1},
Port: getPort(),
}
nodeNum := atomic.AddUint32(&nodeNumber, 1)
config.NodeName = fmt.Sprintf("nomad-%03d", nodeNum)
// Tighten the Serf timing
config.SerfConfig.MemberlistConfig.BindAddr = "127.0.0.1"
config.SerfConfig.MemberlistConfig.BindPort = getPort()
config.SerfConfig.MemberlistConfig.SuspicionMult = 2
config.SerfConfig.MemberlistConfig.RetransmitMult = 2
config.SerfConfig.MemberlistConfig.ProbeTimeout = 50 * time.Millisecond
config.SerfConfig.MemberlistConfig.ProbeInterval = 100 * time.Millisecond
config.SerfConfig.MemberlistConfig.GossipInterval = 100 * time.Millisecond
// Tighten the Raft timing
config.RaftConfig.LeaderLeaseTimeout = 50 * time.Millisecond
config.RaftConfig.HeartbeatTimeout = 50 * time.Millisecond
config.RaftConfig.ElectionTimeout = 50 * time.Millisecond
config.RaftTimeout = 500 * time.Millisecond
// Disable Vault
f := false
config.VaultConfig.Enabled = &f
// Squelch output when -v isn't specified
if !testing.Verbose() {
config.LogOutput = ioutil.Discard
}
// Invoke the callback if any
if cb != nil {
cb(config)
}
// Enable raft as leader if we have bootstrap on
config.RaftConfig.StartAsLeader = !config.DevDisableBootstrap
logger := log.New(config.LogOutput, fmt.Sprintf("[%s] ", config.NodeName), log.LstdFlags)
catalog := consul.NewMockCatalog(logger)
// Create server
server, err := NewServer(config, catalog, logger)
if err != nil {
t.Fatalf("err: %v", err)
}
return server
}
func testJoin(t *testing.T, s1 *Server, other ...*Server) {
addr := fmt.Sprintf("127.0.0.1:%d",
s1.config.SerfConfig.MemberlistConfig.BindPort)
for _, s2 := range other {
if num, err := s2.Join([]string{addr}); err != nil {
t.Fatalf("err: %v", err)
} else if num != 1 {
t.Fatalf("bad: %d", num)
}
}
}
func TestServer_RPC(t *testing.T) {
s1 := testServer(t, nil)
defer s1.Shutdown()
var out struct{}
if err := s1.RPC("Status.Ping", struct{}{}, &out); err != nil {
t.Fatalf("err: %v", err)
}
}
func TestServer_RPC_MixedTLS(t *testing.T) {
const (
cafile = "../helper/tlsutil/testdata/ca.pem"
foocert = "../helper/tlsutil/testdata/nomad-foo.pem"
fookey = "../helper/tlsutil/testdata/nomad-foo-key.pem"
)
s1 := testServer(t, func(c *Config) {
c.BootstrapExpect = 3
c.DevDisableBootstrap = true
c.TLSConfig = &config.TLSConfig{
EnableHTTP: true,
EnableRPC: true,
VerifyServerHostname: true,
CAFile: cafile,
CertFile: foocert,
KeyFile: fookey,
}
})
defer s1.Shutdown()
cb := func(c *Config) {
c.BootstrapExpect = 3
c.DevDisableBootstrap = true
}
s2 := testServer(t, cb)
defer s2.Shutdown()
s3 := testServer(t, cb)
defer s3.Shutdown()
testJoin(t, s1, s2, s3)
testutil.WaitForLeader(t, s2.RPC)
testutil.WaitForLeader(t, s3.RPC)
// s1 shouldn't be able to join
leader := ""
if err := s1.RPC("Status.Leader", &structs.GenericRequest{}, &leader); err == nil {
t.Errorf("expected a connection error from TLS server but received none; found leader: %q", leader)
}
}
func TestServer_Regions(t *testing.T) {
// Make the servers
s1 := testServer(t, func(c *Config) {
c.Region = "region1"
})
defer s1.Shutdown()
s2 := testServer(t, func(c *Config) {
c.Region = "region2"
})
defer s2.Shutdown()
// Join them together
s2Addr := fmt.Sprintf("127.0.0.1:%d",
s2.config.SerfConfig.MemberlistConfig.BindPort)
if n, err := s1.Join([]string{s2Addr}); err != nil || n != 1 {
t.Fatalf("Failed joining: %v (%d joined)", err, n)
}
// Try listing the regions
testutil.WaitForResult(func() (bool, error) {
out := s1.Regions()
if len(out) != 2 || out[0] != "region1" || out[1] != "region2" {
return false, fmt.Errorf("unexpected regions: %v", out)
}
return true, nil
}, func(err error) {
t.Fatalf("err: %v", err)
})
}
func TestServer_Reload_Vault(t *testing.T) {
s1 := testServer(t, func(c *Config) {
c.Region = "region1"
})
defer s1.Shutdown()
if s1.vault.Running() {
t.Fatalf("Vault client should not be running")
}
tr := true
config := s1.config
config.VaultConfig.Enabled = &tr
config.VaultConfig.Token = structs.GenerateUUID()
if err := s1.Reload(config); err != nil {
t.Fatalf("Reload failed: %v", err)
}
if !s1.vault.Running() {
t.Fatalf("Vault client should be running")
}
}