2015-08-20 22:25:09 +00:00
|
|
|
package client
|
|
|
|
|
2015-08-20 23:12:28 +00:00
|
|
|
import (
|
2016-12-05 21:52:31 +00:00
|
|
|
"archive/tar"
|
|
|
|
"bytes"
|
2015-08-20 23:12:28 +00:00
|
|
|
"fmt"
|
2016-12-05 21:52:31 +00:00
|
|
|
"io"
|
2015-09-12 18:47:44 +00:00
|
|
|
"io/ioutil"
|
2015-09-26 01:12:11 +00:00
|
|
|
"log"
|
2015-08-20 23:12:28 +00:00
|
|
|
"net"
|
2015-09-12 18:47:44 +00:00
|
|
|
"os"
|
|
|
|
"path/filepath"
|
2016-07-10 21:53:13 +00:00
|
|
|
"runtime"
|
2015-08-20 23:12:28 +00:00
|
|
|
"sync/atomic"
|
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
2017-02-08 05:22:48 +00:00
|
|
|
memdb "github.com/hashicorp/go-memdb"
|
2015-08-25 23:21:29 +00:00
|
|
|
"github.com/hashicorp/nomad/client/config"
|
2016-06-08 06:02:37 +00:00
|
|
|
"github.com/hashicorp/nomad/command/agent/consul"
|
2015-08-20 23:12:28 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad"
|
2015-08-29 21:22:24 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad/mock"
|
2015-08-21 00:49:04 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad/structs"
|
2015-08-20 23:12:28 +00:00
|
|
|
"github.com/hashicorp/nomad/testutil"
|
2016-02-03 20:07:09 +00:00
|
|
|
"github.com/mitchellh/hashstructure"
|
2015-09-23 00:10:03 +00:00
|
|
|
|
2015-09-23 01:48:42 +00:00
|
|
|
ctestutil "github.com/hashicorp/nomad/client/testutil"
|
2015-08-20 23:12:28 +00:00
|
|
|
)
|
|
|
|
|
2016-07-10 21:53:13 +00:00
|
|
|
var (
|
|
|
|
nextPort uint32 = 16000
|
|
|
|
|
|
|
|
osExecDriverSupport = map[string]bool{
|
|
|
|
"linux": true,
|
|
|
|
}
|
|
|
|
)
|
2015-08-20 23:12:28 +00:00
|
|
|
|
|
|
|
func getPort() int {
|
|
|
|
return int(atomic.AddUint32(&nextPort, 1))
|
|
|
|
}
|
|
|
|
|
|
|
|
func testServer(t *testing.T, cb func(*nomad.Config)) (*nomad.Server, string) {
|
2016-10-11 20:28:18 +00:00
|
|
|
f := false
|
|
|
|
|
2015-08-20 23:12:28 +00:00
|
|
|
// Setup the default settings
|
|
|
|
config := nomad.DefaultConfig()
|
2016-10-11 20:28:18 +00:00
|
|
|
config.VaultConfig.Enabled = &f
|
2015-08-20 23:12:28 +00:00
|
|
|
config.Build = "unittest"
|
|
|
|
config.DevMode = true
|
|
|
|
config.RPCAddr = &net.TCPAddr{
|
|
|
|
IP: []byte{127, 0, 0, 1},
|
|
|
|
Port: getPort(),
|
|
|
|
}
|
|
|
|
config.NodeName = fmt.Sprintf("Node %d", config.RPCAddr.Port)
|
|
|
|
|
|
|
|
// Tighten the Serf timing
|
|
|
|
config.SerfConfig.MemberlistConfig.BindAddr = "127.0.0.1"
|
|
|
|
config.SerfConfig.MemberlistConfig.BindPort = getPort()
|
|
|
|
config.SerfConfig.MemberlistConfig.SuspicionMult = 2
|
|
|
|
config.SerfConfig.MemberlistConfig.RetransmitMult = 2
|
|
|
|
config.SerfConfig.MemberlistConfig.ProbeTimeout = 50 * time.Millisecond
|
|
|
|
config.SerfConfig.MemberlistConfig.ProbeInterval = 100 * time.Millisecond
|
|
|
|
config.SerfConfig.MemberlistConfig.GossipInterval = 100 * time.Millisecond
|
|
|
|
|
|
|
|
// Tighten the Raft timing
|
|
|
|
config.RaftConfig.LeaderLeaseTimeout = 20 * time.Millisecond
|
|
|
|
config.RaftConfig.HeartbeatTimeout = 40 * time.Millisecond
|
|
|
|
config.RaftConfig.ElectionTimeout = 40 * time.Millisecond
|
2015-09-07 17:46:41 +00:00
|
|
|
config.RaftConfig.StartAsLeader = true
|
|
|
|
config.RaftTimeout = 500 * time.Millisecond
|
2015-08-20 23:12:28 +00:00
|
|
|
|
|
|
|
// Invoke the callback if any
|
|
|
|
if cb != nil {
|
|
|
|
cb(config)
|
|
|
|
}
|
|
|
|
|
2016-06-16 23:34:22 +00:00
|
|
|
shutdownCh := make(chan struct{})
|
2016-06-17 06:29:23 +00:00
|
|
|
logger := log.New(config.LogOutput, "", log.LstdFlags)
|
|
|
|
consulSyncer, err := consul.NewSyncer(config.ConsulConfig, shutdownCh, logger)
|
2016-06-16 23:34:22 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2015-08-20 23:12:28 +00:00
|
|
|
// Create server
|
2016-06-17 06:29:23 +00:00
|
|
|
server, err := nomad.NewServer(config, consulSyncer, logger)
|
2015-08-20 23:12:28 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
return server, config.RPCAddr.String()
|
|
|
|
}
|
2015-08-20 22:25:09 +00:00
|
|
|
|
2015-08-25 23:21:29 +00:00
|
|
|
func testClient(t *testing.T, cb func(c *config.Config)) *Client {
|
2016-10-11 20:28:18 +00:00
|
|
|
f := false
|
|
|
|
|
2016-06-01 09:08:23 +00:00
|
|
|
conf := config.DefaultConfig()
|
2016-10-11 20:28:18 +00:00
|
|
|
conf.VaultConfig.Enabled = &f
|
2015-11-11 00:03:18 +00:00
|
|
|
conf.DevMode = true
|
2016-12-20 19:53:37 +00:00
|
|
|
conf.Node = &structs.Node{
|
|
|
|
Reserved: &structs.Resources{
|
|
|
|
DiskMB: 0,
|
|
|
|
},
|
|
|
|
}
|
2015-08-20 22:25:09 +00:00
|
|
|
if cb != nil {
|
|
|
|
cb(conf)
|
|
|
|
}
|
|
|
|
|
2016-06-02 16:15:30 +00:00
|
|
|
shutdownCh := make(chan struct{})
|
|
|
|
consulSyncer, err := consul.NewSyncer(conf.ConsulConfig, shutdownCh, log.New(os.Stderr, "", log.LstdFlags))
|
2016-05-27 09:19:01 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2016-06-17 06:29:23 +00:00
|
|
|
logger := log.New(conf.LogOutput, "", log.LstdFlags)
|
|
|
|
client, err := NewClient(conf, consulSyncer, logger)
|
2015-08-20 22:25:09 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
return client
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestClient_StartStop(t *testing.T) {
|
|
|
|
client := testClient(t, nil)
|
|
|
|
if err := client.Shutdown(); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
}
|
2015-08-20 23:12:28 +00:00
|
|
|
|
|
|
|
func TestClient_RPC(t *testing.T) {
|
|
|
|
s1, addr := testServer(t, nil)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
2015-08-25 23:21:29 +00:00
|
|
|
c1 := testClient(t, func(c *config.Config) {
|
2015-08-20 23:12:28 +00:00
|
|
|
c.Servers = []string{addr}
|
|
|
|
})
|
|
|
|
defer c1.Shutdown()
|
|
|
|
|
|
|
|
// RPC should succeed
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
var out struct{}
|
|
|
|
err := c1.RPC("Status.Ping", struct{}{}, &out)
|
|
|
|
return err == nil, err
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
})
|
|
|
|
}
|
2015-08-20 23:13:05 +00:00
|
|
|
|
|
|
|
func TestClient_RPC_Passthrough(t *testing.T) {
|
|
|
|
s1, _ := testServer(t, nil)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
2015-08-25 23:21:29 +00:00
|
|
|
c1 := testClient(t, func(c *config.Config) {
|
2015-08-20 23:13:05 +00:00
|
|
|
c.RPCHandler = s1
|
|
|
|
})
|
|
|
|
defer c1.Shutdown()
|
|
|
|
|
|
|
|
// RPC should succeed
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
var out struct{}
|
|
|
|
err := c1.RPC("Status.Ping", struct{}{}, &out)
|
|
|
|
return err == nil, err
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
})
|
|
|
|
}
|
2015-08-20 23:41:29 +00:00
|
|
|
|
|
|
|
func TestClient_Fingerprint(t *testing.T) {
|
|
|
|
c := testClient(t, nil)
|
|
|
|
defer c.Shutdown()
|
|
|
|
|
2015-08-28 08:30:47 +00:00
|
|
|
// Ensure kernel and arch are always present
|
2015-08-20 23:41:29 +00:00
|
|
|
node := c.Node()
|
2015-08-28 08:30:47 +00:00
|
|
|
if node.Attributes["kernel.name"] == "" {
|
|
|
|
t.Fatalf("missing kernel.name")
|
2015-08-20 23:41:29 +00:00
|
|
|
}
|
2017-01-08 21:53:27 +00:00
|
|
|
if node.Attributes["cpu.arch"] == "" {
|
|
|
|
t.Fatalf("missing cpu arch")
|
2015-08-20 23:41:29 +00:00
|
|
|
}
|
|
|
|
}
|
2015-08-20 23:53:43 +00:00
|
|
|
|
2016-02-03 20:07:09 +00:00
|
|
|
func TestClient_HasNodeChanged(t *testing.T) {
|
|
|
|
c := testClient(t, nil)
|
|
|
|
defer c.Shutdown()
|
|
|
|
|
|
|
|
node := c.Node()
|
|
|
|
attrHash, err := hashstructure.Hash(node.Attributes, nil)
|
|
|
|
if err != nil {
|
|
|
|
c.logger.Printf("[DEBUG] client: unable to calculate node attributes hash: %v", err)
|
|
|
|
}
|
|
|
|
// Calculate node meta map hash
|
|
|
|
metaHash, err := hashstructure.Hash(node.Meta, nil)
|
|
|
|
if err != nil {
|
|
|
|
c.logger.Printf("[DEBUG] client: unable to calculate node meta hash: %v", err)
|
|
|
|
}
|
|
|
|
if changed, _, _ := c.hasNodeChanged(attrHash, metaHash); changed {
|
|
|
|
t.Fatalf("Unexpected hash change.")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Change node attribute
|
|
|
|
node.Attributes["arch"] = "xyz_86"
|
|
|
|
if changed, newAttrHash, _ := c.hasNodeChanged(attrHash, metaHash); !changed {
|
|
|
|
t.Fatalf("Expected hash change in attributes: %d vs %d", attrHash, newAttrHash)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Change node meta map
|
|
|
|
node.Meta["foo"] = "bar"
|
|
|
|
if changed, _, newMetaHash := c.hasNodeChanged(attrHash, metaHash); !changed {
|
|
|
|
t.Fatalf("Expected hash change in meta map: %d vs %d", metaHash, newMetaHash)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-24 15:18:49 +00:00
|
|
|
func TestClient_Fingerprint_InWhitelist(t *testing.T) {
|
|
|
|
c := testClient(t, func(c *config.Config) {
|
2016-01-20 20:00:20 +00:00
|
|
|
if c.Options == nil {
|
|
|
|
c.Options = make(map[string]string)
|
|
|
|
}
|
|
|
|
|
2015-11-24 15:18:49 +00:00
|
|
|
// Weird spacing to test trimming. Whitelist all modules expect cpu.
|
2016-01-20 20:00:20 +00:00
|
|
|
c.Options["fingerprint.whitelist"] = " arch, consul,cpu,env_aws,env_gce,host,memory,network,storage,foo,bar "
|
2015-11-24 15:18:49 +00:00
|
|
|
})
|
|
|
|
defer c.Shutdown()
|
|
|
|
|
|
|
|
node := c.Node()
|
|
|
|
if node.Attributes["cpu.frequency"] == "" {
|
|
|
|
t.Fatalf("missing cpu fingerprint module")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-08 17:29:44 +00:00
|
|
|
func TestClient_Fingerprint_InBlacklist(t *testing.T) {
|
|
|
|
c := testClient(t, func(c *config.Config) {
|
|
|
|
if c.Options == nil {
|
|
|
|
c.Options = make(map[string]string)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Weird spacing to test trimming. Blacklist cpu.
|
|
|
|
c.Options["fingerprint.blacklist"] = " cpu "
|
|
|
|
})
|
|
|
|
defer c.Shutdown()
|
|
|
|
|
|
|
|
node := c.Node()
|
|
|
|
if node.Attributes["cpu.frequency"] != "" {
|
|
|
|
t.Fatalf("cpu fingerprint module loaded despite blacklisting")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-24 15:18:49 +00:00
|
|
|
func TestClient_Fingerprint_OutOfWhitelist(t *testing.T) {
|
|
|
|
c := testClient(t, func(c *config.Config) {
|
2016-01-20 20:00:20 +00:00
|
|
|
if c.Options == nil {
|
|
|
|
c.Options = make(map[string]string)
|
|
|
|
}
|
|
|
|
|
|
|
|
c.Options["fingerprint.whitelist"] = "arch,consul,env_aws,env_gce,host,memory,network,storage,foo,bar"
|
2015-11-24 15:18:49 +00:00
|
|
|
})
|
|
|
|
defer c.Shutdown()
|
|
|
|
|
|
|
|
node := c.Node()
|
|
|
|
if node.Attributes["cpu.frequency"] != "" {
|
|
|
|
t.Fatalf("found cpu fingerprint module")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-08 17:29:44 +00:00
|
|
|
func TestClient_Fingerprint_WhitelistBlacklistCombination(t *testing.T) {
|
|
|
|
c := testClient(t, func(c *config.Config) {
|
|
|
|
if c.Options == nil {
|
|
|
|
c.Options = make(map[string]string)
|
|
|
|
}
|
|
|
|
|
|
|
|
// With both white- and blacklist, should return the set difference of modules (arch, cpu)
|
|
|
|
c.Options["fingerprint.whitelist"] = "arch,memory,cpu"
|
|
|
|
c.Options["fingerprint.blacklist"] = "memory,nomad"
|
|
|
|
})
|
|
|
|
defer c.Shutdown()
|
|
|
|
|
|
|
|
node := c.Node()
|
|
|
|
// Check expected modules are present
|
|
|
|
if node.Attributes["cpu.frequency"] == "" {
|
|
|
|
t.Fatalf("missing cpu fingerprint module")
|
|
|
|
}
|
2017-01-08 21:53:27 +00:00
|
|
|
if node.Attributes["cpu.arch"] == "" {
|
2016-11-08 17:29:44 +00:00
|
|
|
t.Fatalf("missing arch fingerprint module")
|
|
|
|
}
|
|
|
|
// Check remainder _not_ present
|
|
|
|
if node.Attributes["memory.totalbytes"] != "" {
|
|
|
|
t.Fatalf("found memory fingerprint module")
|
|
|
|
}
|
|
|
|
if node.Attributes["nomad.version"] != "" {
|
|
|
|
t.Fatalf("found nomad fingerprint module")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-20 23:53:43 +00:00
|
|
|
func TestClient_Drivers(t *testing.T) {
|
|
|
|
c := testClient(t, nil)
|
|
|
|
defer c.Shutdown()
|
|
|
|
|
|
|
|
node := c.Node()
|
|
|
|
if node.Attributes["driver.exec"] == "" {
|
2016-07-10 21:53:13 +00:00
|
|
|
if v, ok := osExecDriverSupport[runtime.GOOS]; v && ok {
|
|
|
|
t.Fatalf("missing exec driver")
|
|
|
|
} else {
|
|
|
|
t.Skipf("missing exec driver, no OS support")
|
|
|
|
}
|
2015-08-20 23:53:43 +00:00
|
|
|
}
|
|
|
|
}
|
2015-08-21 00:49:04 +00:00
|
|
|
|
2015-11-20 00:43:08 +00:00
|
|
|
func TestClient_Drivers_InWhitelist(t *testing.T) {
|
|
|
|
c := testClient(t, func(c *config.Config) {
|
2016-01-20 20:00:20 +00:00
|
|
|
if c.Options == nil {
|
|
|
|
c.Options = make(map[string]string)
|
|
|
|
}
|
|
|
|
|
2015-11-20 00:43:08 +00:00
|
|
|
// Weird spacing to test trimming
|
|
|
|
c.Options["driver.whitelist"] = " exec , foo "
|
|
|
|
})
|
|
|
|
defer c.Shutdown()
|
|
|
|
|
|
|
|
node := c.Node()
|
|
|
|
if node.Attributes["driver.exec"] == "" {
|
2016-07-10 21:53:13 +00:00
|
|
|
if v, ok := osExecDriverSupport[runtime.GOOS]; v && ok {
|
|
|
|
t.Fatalf("missing exec driver")
|
|
|
|
} else {
|
|
|
|
t.Skipf("missing exec driver, no OS support")
|
|
|
|
}
|
2015-11-20 00:43:08 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-08 17:30:07 +00:00
|
|
|
func TestClient_Drivers_InBlacklist(t *testing.T) {
|
|
|
|
c := testClient(t, func(c *config.Config) {
|
|
|
|
if c.Options == nil {
|
|
|
|
c.Options = make(map[string]string)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Weird spacing to test trimming
|
|
|
|
c.Options["driver.blacklist"] = " exec , foo "
|
|
|
|
})
|
|
|
|
defer c.Shutdown()
|
|
|
|
|
|
|
|
node := c.Node()
|
|
|
|
if node.Attributes["driver.exec"] != "" {
|
|
|
|
if v, ok := osExecDriverSupport[runtime.GOOS]; !v && ok {
|
|
|
|
t.Fatalf("exec driver loaded despite blacklist")
|
|
|
|
} else {
|
|
|
|
t.Skipf("missing exec driver, no OS support")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-20 00:43:08 +00:00
|
|
|
func TestClient_Drivers_OutOfWhitelist(t *testing.T) {
|
|
|
|
c := testClient(t, func(c *config.Config) {
|
2016-01-20 20:00:20 +00:00
|
|
|
if c.Options == nil {
|
|
|
|
c.Options = make(map[string]string)
|
|
|
|
}
|
|
|
|
|
2015-11-20 00:43:08 +00:00
|
|
|
c.Options["driver.whitelist"] = "foo,bar,baz"
|
|
|
|
})
|
|
|
|
defer c.Shutdown()
|
|
|
|
|
|
|
|
node := c.Node()
|
|
|
|
if node.Attributes["driver.exec"] != "" {
|
|
|
|
t.Fatalf("found exec driver")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-08 17:30:07 +00:00
|
|
|
func TestClient_Drivers_WhitelistBlacklistCombination(t *testing.T) {
|
|
|
|
c := testClient(t, func(c *config.Config) {
|
|
|
|
if c.Options == nil {
|
|
|
|
c.Options = make(map[string]string)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Expected output is set difference (raw_exec)
|
|
|
|
c.Options["driver.whitelist"] = "raw_exec,exec"
|
|
|
|
c.Options["driver.blacklist"] = "exec"
|
|
|
|
})
|
|
|
|
defer c.Shutdown()
|
|
|
|
|
|
|
|
node := c.Node()
|
|
|
|
// Check expected present
|
|
|
|
if node.Attributes["driver.raw_exec"] == "" {
|
|
|
|
t.Fatalf("missing raw_exec driver")
|
|
|
|
}
|
|
|
|
// Check expected absent
|
|
|
|
if node.Attributes["driver.exec"] != "" {
|
|
|
|
t.Fatalf("exec driver loaded despite blacklist")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-21 00:49:04 +00:00
|
|
|
func TestClient_Register(t *testing.T) {
|
|
|
|
s1, _ := testServer(t, nil)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
|
2015-08-25 23:21:29 +00:00
|
|
|
c1 := testClient(t, func(c *config.Config) {
|
2015-08-21 00:49:04 +00:00
|
|
|
c.RPCHandler = s1
|
|
|
|
})
|
|
|
|
defer c1.Shutdown()
|
|
|
|
|
|
|
|
req := structs.NodeSpecificRequest{
|
|
|
|
NodeID: c1.Node().ID,
|
2015-09-14 01:18:40 +00:00
|
|
|
QueryOptions: structs.QueryOptions{Region: "global"},
|
2015-08-21 00:49:04 +00:00
|
|
|
}
|
|
|
|
var out structs.SingleNodeResponse
|
|
|
|
|
|
|
|
// Register should succeed
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
2015-09-07 03:31:32 +00:00
|
|
|
err := s1.RPC("Node.GetNode", &req, &out)
|
2015-08-21 00:49:04 +00:00
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
if out.Node == nil {
|
|
|
|
return false, fmt.Errorf("missing reg")
|
|
|
|
}
|
|
|
|
return out.Node.ID == req.NodeID, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
})
|
|
|
|
}
|
2015-08-29 21:15:34 +00:00
|
|
|
|
|
|
|
func TestClient_Heartbeat(t *testing.T) {
|
|
|
|
s1, _ := testServer(t, func(c *nomad.Config) {
|
|
|
|
c.MinHeartbeatTTL = 50 * time.Millisecond
|
|
|
|
})
|
|
|
|
defer s1.Shutdown()
|
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
|
|
|
|
c1 := testClient(t, func(c *config.Config) {
|
|
|
|
c.RPCHandler = s1
|
|
|
|
})
|
|
|
|
defer c1.Shutdown()
|
|
|
|
|
|
|
|
req := structs.NodeSpecificRequest{
|
|
|
|
NodeID: c1.Node().ID,
|
2015-09-14 01:18:40 +00:00
|
|
|
QueryOptions: structs.QueryOptions{Region: "global"},
|
2015-08-29 21:15:34 +00:00
|
|
|
}
|
|
|
|
var out structs.SingleNodeResponse
|
|
|
|
|
|
|
|
// Register should succeed
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
2015-09-07 03:31:32 +00:00
|
|
|
err := s1.RPC("Node.GetNode", &req, &out)
|
2015-08-29 21:15:34 +00:00
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
if out.Node == nil {
|
|
|
|
return false, fmt.Errorf("missing reg")
|
|
|
|
}
|
|
|
|
return out.Node.Status == structs.NodeStatusReady, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
})
|
|
|
|
}
|
2015-08-29 21:22:24 +00:00
|
|
|
|
|
|
|
func TestClient_UpdateAllocStatus(t *testing.T) {
|
|
|
|
s1, _ := testServer(t, nil)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
|
|
|
|
c1 := testClient(t, func(c *config.Config) {
|
|
|
|
c.RPCHandler = s1
|
|
|
|
})
|
|
|
|
defer c1.Shutdown()
|
|
|
|
|
2016-08-16 06:11:57 +00:00
|
|
|
// Wait til the node is ready
|
|
|
|
waitTilNodeReady(c1, t)
|
|
|
|
|
|
|
|
job := mock.Job()
|
2015-08-29 21:22:24 +00:00
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.NodeID = c1.Node().ID
|
2016-08-16 06:11:57 +00:00
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
2016-02-22 03:20:50 +00:00
|
|
|
originalStatus := "foo"
|
|
|
|
alloc.ClientStatus = originalStatus
|
2015-08-29 21:22:24 +00:00
|
|
|
|
2016-08-16 06:11:57 +00:00
|
|
|
// Insert at zero so they are pulled
|
2015-08-29 21:22:24 +00:00
|
|
|
state := s1.State()
|
2016-08-16 06:11:57 +00:00
|
|
|
if err := state.UpsertJob(0, job); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if err := state.UpsertJobSummary(100, mock.JobSummary(alloc.JobID)); err != nil {
|
2016-07-21 21:43:21 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2016-08-16 06:11:57 +00:00
|
|
|
state.UpsertAllocs(101, []*structs.Allocation{alloc})
|
2015-08-29 21:22:24 +00:00
|
|
|
|
2016-02-22 03:20:50 +00:00
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
out, err := state.AllocByID(ws, alloc.ID)
|
2016-02-22 03:20:50 +00:00
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
if out == nil {
|
|
|
|
return false, fmt.Errorf("no such alloc")
|
|
|
|
}
|
|
|
|
if out.ClientStatus == originalStatus {
|
|
|
|
return false, fmt.Errorf("Alloc client status not updated; got %v", out.ClientStatus)
|
|
|
|
}
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
2015-08-29 21:22:24 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
2016-02-22 03:20:50 +00:00
|
|
|
})
|
2015-08-29 21:22:24 +00:00
|
|
|
}
|
2015-08-29 21:33:30 +00:00
|
|
|
|
|
|
|
func TestClient_WatchAllocs(t *testing.T) {
|
2015-09-23 01:48:42 +00:00
|
|
|
ctestutil.ExecCompatible(t)
|
2015-08-29 21:33:30 +00:00
|
|
|
s1, _ := testServer(t, nil)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
|
|
|
|
c1 := testClient(t, func(c *config.Config) {
|
|
|
|
c.RPCHandler = s1
|
|
|
|
})
|
|
|
|
defer c1.Shutdown()
|
|
|
|
|
2016-08-16 06:11:57 +00:00
|
|
|
// Wait til the node is ready
|
|
|
|
waitTilNodeReady(c1, t)
|
|
|
|
|
2015-08-29 21:33:30 +00:00
|
|
|
// Create mock allocations
|
2016-08-16 06:11:57 +00:00
|
|
|
job := mock.Job()
|
2015-08-29 21:33:30 +00:00
|
|
|
alloc1 := mock.Alloc()
|
2016-08-16 06:11:57 +00:00
|
|
|
alloc1.JobID = job.ID
|
|
|
|
alloc1.Job = job
|
2015-08-29 21:33:30 +00:00
|
|
|
alloc1.NodeID = c1.Node().ID
|
|
|
|
alloc2 := mock.Alloc()
|
|
|
|
alloc2.NodeID = c1.Node().ID
|
2016-08-16 06:11:57 +00:00
|
|
|
alloc2.JobID = job.ID
|
|
|
|
alloc2.Job = job
|
2015-08-29 21:33:30 +00:00
|
|
|
|
2016-08-16 06:11:57 +00:00
|
|
|
// Insert at zero so they are pulled
|
2015-08-29 21:33:30 +00:00
|
|
|
state := s1.State()
|
2016-08-16 06:11:57 +00:00
|
|
|
if err := state.UpsertJob(100, job); err != nil {
|
2016-07-21 21:43:21 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2016-08-16 06:11:57 +00:00
|
|
|
if err := state.UpsertJobSummary(101, mock.JobSummary(alloc1.JobID)); err != nil {
|
2016-07-21 21:43:21 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2016-08-16 06:11:57 +00:00
|
|
|
err := state.UpsertAllocs(102, []*structs.Allocation{alloc1, alloc2})
|
2015-08-29 21:33:30 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Both allocations should get registered
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
c1.allocLock.RLock()
|
|
|
|
num := len(c1.allocs)
|
|
|
|
c1.allocLock.RUnlock()
|
|
|
|
return num == 2, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
})
|
|
|
|
|
|
|
|
// Delete one allocation
|
2016-08-16 06:11:57 +00:00
|
|
|
err = state.DeleteEval(103, nil, []string{alloc1.ID})
|
2015-08-29 21:33:30 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2016-02-01 21:57:35 +00:00
|
|
|
// Update the other allocation. Have to make a copy because the allocs are
|
|
|
|
// shared in memory in the test and the modify index would be updated in the
|
|
|
|
// alloc runner.
|
|
|
|
alloc2_2 := new(structs.Allocation)
|
|
|
|
*alloc2_2 = *alloc2
|
|
|
|
alloc2_2.DesiredStatus = structs.AllocDesiredStatusStop
|
2016-08-16 06:11:57 +00:00
|
|
|
err = state.UpsertAllocs(104, []*structs.Allocation{alloc2_2})
|
2015-08-29 21:33:30 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// One allocations should get de-registered
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
c1.allocLock.RLock()
|
|
|
|
num := len(c1.allocs)
|
|
|
|
c1.allocLock.RUnlock()
|
|
|
|
return num == 1, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
})
|
|
|
|
|
|
|
|
// One allocations should get updated
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
c1.allocLock.RLock()
|
|
|
|
ar := c1.allocs[alloc2.ID]
|
|
|
|
c1.allocLock.RUnlock()
|
|
|
|
return ar.Alloc().DesiredStatus == structs.AllocDesiredStatusStop, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
})
|
|
|
|
}
|
2015-08-31 00:19:20 +00:00
|
|
|
|
2016-08-16 06:11:57 +00:00
|
|
|
func waitTilNodeReady(client *Client, t *testing.T) {
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
n := client.Node()
|
|
|
|
if n.Status != structs.NodeStatusReady {
|
|
|
|
return false, fmt.Errorf("node not registered")
|
|
|
|
}
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2015-08-31 00:19:20 +00:00
|
|
|
func TestClient_SaveRestoreState(t *testing.T) {
|
2015-09-23 01:48:42 +00:00
|
|
|
ctestutil.ExecCompatible(t)
|
2015-08-31 00:19:20 +00:00
|
|
|
s1, _ := testServer(t, nil)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
|
|
|
|
c1 := testClient(t, func(c *config.Config) {
|
2015-11-11 00:03:18 +00:00
|
|
|
c.DevMode = false
|
2015-08-31 00:19:20 +00:00
|
|
|
c.RPCHandler = s1
|
|
|
|
})
|
|
|
|
defer c1.Shutdown()
|
|
|
|
|
2016-08-16 06:11:57 +00:00
|
|
|
// Wait til the node is ready
|
|
|
|
waitTilNodeReady(c1, t)
|
|
|
|
|
2015-08-31 00:19:20 +00:00
|
|
|
// Create mock allocations
|
2016-08-16 06:11:57 +00:00
|
|
|
job := mock.Job()
|
2015-08-31 00:19:20 +00:00
|
|
|
alloc1 := mock.Alloc()
|
|
|
|
alloc1.NodeID = c1.Node().ID
|
2016-08-16 06:11:57 +00:00
|
|
|
alloc1.Job = job
|
|
|
|
alloc1.JobID = job.ID
|
2016-09-05 02:09:08 +00:00
|
|
|
alloc1.Job.TaskGroups[0].Tasks[0].Driver = "mock_driver"
|
2015-08-31 00:19:20 +00:00
|
|
|
task := alloc1.Job.TaskGroups[0].Tasks[0]
|
2016-09-05 02:09:08 +00:00
|
|
|
task.Config["run_for"] = "10s"
|
2015-08-31 00:19:20 +00:00
|
|
|
|
|
|
|
state := s1.State()
|
2016-08-16 06:11:57 +00:00
|
|
|
if err := state.UpsertJob(100, job); err != nil {
|
2016-07-21 21:43:21 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2016-08-16 06:11:57 +00:00
|
|
|
if err := state.UpsertJobSummary(101, mock.JobSummary(alloc1.JobID)); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if err := state.UpsertAllocs(102, []*structs.Allocation{alloc1}); err != nil {
|
2015-08-31 00:19:20 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Allocations should get registered
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
c1.allocLock.RLock()
|
2015-11-11 00:03:18 +00:00
|
|
|
ar := c1.allocs[alloc1.ID]
|
2015-08-31 00:19:20 +00:00
|
|
|
c1.allocLock.RUnlock()
|
2016-08-16 06:11:57 +00:00
|
|
|
if ar == nil {
|
|
|
|
return false, fmt.Errorf("nil alloc runner")
|
|
|
|
}
|
|
|
|
if ar.Alloc().ClientStatus != structs.AllocClientStatusRunning {
|
|
|
|
return false, fmt.Errorf("client status: got %v; want %v", ar.Alloc().ClientStatus, structs.AllocClientStatusRunning)
|
|
|
|
}
|
|
|
|
return true, nil
|
2015-08-31 00:19:20 +00:00
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
})
|
|
|
|
|
|
|
|
// Shutdown the client, saves state
|
2016-07-21 21:43:21 +00:00
|
|
|
if err := c1.Shutdown(); err != nil {
|
2015-08-31 00:19:20 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a new client
|
2016-06-02 16:15:30 +00:00
|
|
|
shutdownCh := make(chan struct{})
|
2016-06-17 13:44:10 +00:00
|
|
|
logger := log.New(c1.config.LogOutput, "", log.LstdFlags)
|
2016-06-17 06:29:23 +00:00
|
|
|
consulSyncer, err := consul.NewSyncer(c1.config.ConsulConfig, shutdownCh, logger)
|
2016-05-27 09:19:01 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2016-06-17 06:29:23 +00:00
|
|
|
c2, err := NewClient(c1.config, consulSyncer, logger)
|
2015-08-31 00:19:20 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
defer c2.Shutdown()
|
|
|
|
|
|
|
|
// Ensure the allocation is running
|
2016-02-22 05:12:58 +00:00
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
c2.allocLock.RLock()
|
|
|
|
ar := c2.allocs[alloc1.ID]
|
|
|
|
c2.allocLock.RUnlock()
|
|
|
|
status := ar.Alloc().ClientStatus
|
|
|
|
alive := status != structs.AllocClientStatusRunning ||
|
|
|
|
status != structs.AllocClientStatusPending
|
|
|
|
if !alive {
|
|
|
|
return false, fmt.Errorf("incorrect client status: %#v", ar.Alloc())
|
|
|
|
}
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
})
|
2016-09-02 19:44:05 +00:00
|
|
|
|
|
|
|
// Destroy all the allocations
|
2017-01-05 21:19:01 +00:00
|
|
|
for _, ar := range c2.getAllocRunners() {
|
2016-09-02 19:44:05 +00:00
|
|
|
ar.Destroy()
|
|
|
|
}
|
2017-01-05 20:32:44 +00:00
|
|
|
|
2017-01-05 21:19:01 +00:00
|
|
|
for _, ar := range c2.getAllocRunners() {
|
2017-01-05 20:32:44 +00:00
|
|
|
<-ar.WaitCh()
|
|
|
|
}
|
2015-08-31 00:19:20 +00:00
|
|
|
}
|
2015-09-12 18:47:44 +00:00
|
|
|
|
|
|
|
func TestClient_Init(t *testing.T) {
|
|
|
|
dir, err := ioutil.TempDir("", "nomad")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
defer os.RemoveAll(dir)
|
|
|
|
allocDir := filepath.Join(dir, "alloc")
|
|
|
|
|
|
|
|
client := &Client{
|
|
|
|
config: &config.Config{
|
|
|
|
AllocDir: allocDir,
|
|
|
|
},
|
2015-09-26 01:12:11 +00:00
|
|
|
logger: log.New(os.Stderr, "", log.LstdFlags),
|
2015-09-12 18:47:44 +00:00
|
|
|
}
|
|
|
|
if err := client.init(); err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, err := os.Stat(allocDir); err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
}
|
2016-08-22 16:34:24 +00:00
|
|
|
|
|
|
|
func TestClient_BlockedAllocations(t *testing.T) {
|
|
|
|
s1, _ := testServer(t, nil)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
|
|
|
|
c1 := testClient(t, func(c *config.Config) {
|
|
|
|
c.RPCHandler = s1
|
|
|
|
})
|
2016-09-02 19:44:05 +00:00
|
|
|
defer c1.Shutdown()
|
2016-08-22 16:34:24 +00:00
|
|
|
|
|
|
|
// Wait for the node to be ready
|
|
|
|
state := s1.State()
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
out, err := state.NodeByID(ws, c1.Node().ID)
|
2016-08-22 16:34:24 +00:00
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
if out == nil || out.Status != structs.NodeStatusReady {
|
|
|
|
return false, fmt.Errorf("bad node: %#v", out)
|
|
|
|
}
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
})
|
|
|
|
|
|
|
|
// Add an allocation
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.NodeID = c1.Node().ID
|
|
|
|
alloc.Job.TaskGroups[0].Tasks[0].Driver = "mock_driver"
|
|
|
|
alloc.Job.TaskGroups[0].Tasks[0].Config = map[string]interface{}{
|
|
|
|
"kill_after": "1s",
|
|
|
|
"run_for": "100s",
|
|
|
|
"exit_code": 0,
|
|
|
|
"exit_signal": 0,
|
|
|
|
"exit_err": "",
|
|
|
|
}
|
|
|
|
|
|
|
|
state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID))
|
|
|
|
state.UpsertAllocs(100, []*structs.Allocation{alloc})
|
|
|
|
|
|
|
|
// Wait until the client downloads and starts the allocation
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
out, err := state.AllocByID(ws, alloc.ID)
|
2016-08-22 16:34:24 +00:00
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
if out == nil || out.ClientStatus != structs.AllocClientStatusRunning {
|
|
|
|
return false, fmt.Errorf("bad alloc: %#v", out)
|
|
|
|
}
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
})
|
|
|
|
|
|
|
|
// Add a new chained alloc
|
|
|
|
alloc2 := alloc.Copy()
|
|
|
|
alloc2.ID = structs.GenerateUUID()
|
|
|
|
alloc2.Job = alloc.Job
|
|
|
|
alloc2.JobID = alloc.JobID
|
|
|
|
alloc2.PreviousAllocation = alloc.ID
|
|
|
|
if err := state.UpsertAllocs(200, []*structs.Allocation{alloc2}); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Enusre that the chained allocation is being tracked as blocked
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
alloc, ok := c1.blockedAllocations[alloc2.PreviousAllocation]
|
|
|
|
if ok && alloc.ID == alloc2.ID {
|
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
return false, fmt.Errorf("no blocked allocations")
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
})
|
|
|
|
|
|
|
|
// Change the desired state of the parent alloc to stop
|
|
|
|
alloc1 := alloc.Copy()
|
|
|
|
alloc1.DesiredStatus = structs.AllocDesiredStatusStop
|
|
|
|
if err := state.UpsertAllocs(300, []*structs.Allocation{alloc1}); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure that there are no blocked allocations
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
_, ok := c1.blockedAllocations[alloc2.PreviousAllocation]
|
|
|
|
if ok {
|
|
|
|
return false, fmt.Errorf("blocked evals present")
|
|
|
|
}
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
})
|
2016-09-02 19:44:05 +00:00
|
|
|
|
|
|
|
// Destroy all the allocations
|
2017-01-05 21:19:01 +00:00
|
|
|
for _, ar := range c1.getAllocRunners() {
|
2016-09-02 19:44:05 +00:00
|
|
|
ar.Destroy()
|
|
|
|
}
|
|
|
|
|
2017-01-05 21:19:01 +00:00
|
|
|
for _, ar := range c1.getAllocRunners() {
|
2017-01-05 21:15:08 +00:00
|
|
|
<-ar.WaitCh()
|
|
|
|
}
|
2016-08-22 16:34:24 +00:00
|
|
|
}
|
2016-12-05 21:52:31 +00:00
|
|
|
|
|
|
|
func TestClient_UnarchiveAllocDir(t *testing.T) {
|
|
|
|
dir, err := ioutil.TempDir("", "")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
defer os.RemoveAll(dir)
|
|
|
|
|
|
|
|
if err := os.Mkdir(filepath.Join(dir, "foo"), 0777); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
dirInfo, err := os.Stat(filepath.Join(dir, "foo"))
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
f, err := os.Create(filepath.Join(dir, "foo", "bar"))
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if _, err := f.WriteString("foo"); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if err := f.Chmod(0644); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
fInfo, err := f.Stat()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
f.Close()
|
|
|
|
|
|
|
|
buf := new(bytes.Buffer)
|
|
|
|
tw := tar.NewWriter(buf)
|
|
|
|
|
|
|
|
walkFn := func(path string, fileInfo os.FileInfo, err error) error {
|
|
|
|
// Ignore if the file is a symlink
|
|
|
|
if fileInfo.Mode() == os.ModeSymlink {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Include the path of the file name relative to the alloc dir
|
|
|
|
// so that we can put the files in the right directories
|
|
|
|
hdr, err := tar.FileInfoHeader(fileInfo, "")
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("error creating file header: %v", err)
|
|
|
|
}
|
|
|
|
hdr.Name = fileInfo.Name()
|
|
|
|
tw.WriteHeader(hdr)
|
|
|
|
|
|
|
|
// If it's a directory we just write the header into the tar
|
|
|
|
if fileInfo.IsDir() {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write the file into the archive
|
|
|
|
file, err := os.Open(path)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer file.Close()
|
|
|
|
|
|
|
|
if _, err := io.Copy(tw, file); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := filepath.Walk(dir, walkFn); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
tw.Close()
|
|
|
|
|
|
|
|
dir1, err := ioutil.TempDir("", "")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
|
|
|
|
c1 := testClient(t, func(c *config.Config) {
|
|
|
|
c.RPCHandler = nil
|
|
|
|
})
|
|
|
|
defer c1.Shutdown()
|
|
|
|
|
|
|
|
rc := ioutil.NopCloser(buf)
|
|
|
|
|
2017-02-17 02:42:16 +00:00
|
|
|
c1.migratingAllocs["123"] = newMigrateAllocCtrl(mock.Alloc())
|
2016-12-05 21:52:31 +00:00
|
|
|
if err := c1.unarchiveAllocDir(rc, "123", dir1); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure foo is present
|
|
|
|
fi, err := os.Stat(filepath.Join(dir1, "foo"))
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if fi.Mode() != dirInfo.Mode() {
|
|
|
|
t.Fatalf("mode: %v", fi.Mode())
|
|
|
|
}
|
|
|
|
|
|
|
|
fi1, err := os.Stat(filepath.Join(dir1, "bar"))
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if fi1.Mode() != fInfo.Mode() {
|
|
|
|
t.Fatalf("mode: %v", fi1.Mode())
|
|
|
|
}
|
|
|
|
}
|