2015-08-30 03:07:24 +00:00
|
|
|
package client
|
|
|
|
|
|
|
|
import (
|
2016-01-21 22:09:04 +00:00
|
|
|
"fmt"
|
2016-10-18 18:22:16 +00:00
|
|
|
"io/ioutil"
|
2015-08-30 03:07:24 +00:00
|
|
|
"log"
|
2016-03-15 21:34:25 +00:00
|
|
|
"net/http"
|
|
|
|
"net/http/httptest"
|
2015-08-30 03:07:24 +00:00
|
|
|
"os"
|
2015-09-25 23:49:14 +00:00
|
|
|
"path/filepath"
|
2016-12-16 23:11:56 +00:00
|
|
|
"reflect"
|
2016-10-10 21:49:37 +00:00
|
|
|
"syscall"
|
2015-08-30 03:07:24 +00:00
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
2017-05-03 18:15:30 +00:00
|
|
|
"github.com/boltdb/bolt"
|
2016-12-16 23:11:56 +00:00
|
|
|
"github.com/golang/snappy"
|
2015-09-25 23:49:14 +00:00
|
|
|
"github.com/hashicorp/nomad/client/allocdir"
|
2016-06-01 08:22:39 +00:00
|
|
|
"github.com/hashicorp/nomad/client/config"
|
2016-12-03 01:04:07 +00:00
|
|
|
cstructs "github.com/hashicorp/nomad/client/structs"
|
2016-10-17 18:41:22 +00:00
|
|
|
"github.com/hashicorp/nomad/client/vaultclient"
|
2015-08-30 03:07:24 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad/mock"
|
|
|
|
"github.com/hashicorp/nomad/nomad/structs"
|
|
|
|
"github.com/hashicorp/nomad/testutil"
|
2015-09-23 00:10:03 +00:00
|
|
|
|
2015-09-23 01:48:42 +00:00
|
|
|
ctestutil "github.com/hashicorp/nomad/client/testutil"
|
2015-08-30 03:07:24 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func testLogger() *log.Logger {
|
2016-03-22 20:49:52 +00:00
|
|
|
return prefixedTestLogger("")
|
|
|
|
}
|
|
|
|
|
|
|
|
func prefixedTestLogger(prefix string) *log.Logger {
|
2017-05-11 00:39:45 +00:00
|
|
|
if testing.Verbose() {
|
2017-07-04 19:24:27 +00:00
|
|
|
return log.New(os.Stderr, prefix, log.LstdFlags|log.Lmicroseconds)
|
2017-05-11 00:39:45 +00:00
|
|
|
}
|
|
|
|
return log.New(ioutil.Discard, "", 0)
|
2015-08-30 03:07:24 +00:00
|
|
|
}
|
|
|
|
|
2016-02-02 19:09:29 +00:00
|
|
|
type MockTaskStateUpdater struct {
|
|
|
|
state string
|
2016-10-21 00:27:16 +00:00
|
|
|
failed bool
|
2016-02-02 19:09:29 +00:00
|
|
|
events []*structs.TaskEvent
|
|
|
|
}
|
2015-08-30 03:07:24 +00:00
|
|
|
|
2016-02-02 19:09:29 +00:00
|
|
|
func (m *MockTaskStateUpdater) Update(name, state string, event *structs.TaskEvent) {
|
2016-11-05 00:15:58 +00:00
|
|
|
if state != "" {
|
|
|
|
m.state = state
|
|
|
|
}
|
|
|
|
if event != nil {
|
|
|
|
if event.FailsTask {
|
|
|
|
m.failed = true
|
|
|
|
}
|
|
|
|
m.events = append(m.events, event)
|
2016-10-21 00:27:16 +00:00
|
|
|
}
|
2016-02-02 19:09:29 +00:00
|
|
|
}
|
2015-08-30 03:07:24 +00:00
|
|
|
|
2016-12-03 01:04:07 +00:00
|
|
|
type taskRunnerTestCtx struct {
|
|
|
|
upd *MockTaskStateUpdater
|
|
|
|
tr *TaskRunner
|
|
|
|
allocDir *allocdir.AllocDir
|
|
|
|
}
|
|
|
|
|
|
|
|
// Cleanup calls Destroy on the task runner and alloc dir
|
|
|
|
func (ctx *taskRunnerTestCtx) Cleanup() {
|
|
|
|
ctx.tr.Destroy(structs.NewTaskEvent(structs.TaskKilled))
|
|
|
|
ctx.allocDir.Destroy()
|
|
|
|
}
|
|
|
|
|
|
|
|
func testTaskRunner(t *testing.T, restarts bool) *taskRunnerTestCtx {
|
|
|
|
return testTaskRunnerFromAlloc(t, restarts, mock.Alloc())
|
2016-03-15 21:34:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Creates a mock task runner using the first task in the first task group of
|
|
|
|
// the passed allocation.
|
2016-12-03 01:04:07 +00:00
|
|
|
//
|
|
|
|
// Callers should defer Cleanup() to cleanup after completion
|
|
|
|
func testTaskRunnerFromAlloc(t *testing.T, restarts bool, alloc *structs.Allocation) *taskRunnerTestCtx {
|
2015-08-30 03:07:24 +00:00
|
|
|
logger := testLogger()
|
2016-06-01 08:22:39 +00:00
|
|
|
conf := config.DefaultConfig()
|
2017-05-03 22:14:19 +00:00
|
|
|
conf.Node = mock.Node()
|
2015-09-24 21:29:53 +00:00
|
|
|
conf.StateDir = os.TempDir()
|
|
|
|
conf.AllocDir = os.TempDir()
|
2017-05-03 18:15:30 +00:00
|
|
|
|
|
|
|
tmp, err := ioutil.TempFile("", "state-db")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error creating state db file: %v", err)
|
|
|
|
}
|
|
|
|
db, err := bolt.Open(tmp.Name(), 0600, nil)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error creating state db: %v", err)
|
|
|
|
}
|
|
|
|
|
2015-08-30 03:07:24 +00:00
|
|
|
upd := &MockTaskStateUpdater{}
|
|
|
|
task := alloc.Job.TaskGroups[0].Tasks[0]
|
2015-09-24 23:00:36 +00:00
|
|
|
// Initialize the port listing. This should be done by the offer process but
|
|
|
|
// we have a mock so that doesn't happen.
|
2017-02-28 00:00:19 +00:00
|
|
|
task.Resources.Networks[0].ReservedPorts = []structs.Port{{Label: "", Value: 80}}
|
2015-09-24 23:00:36 +00:00
|
|
|
|
2017-01-05 23:57:58 +00:00
|
|
|
allocDir := allocdir.NewAllocDir(testLogger(), filepath.Join(conf.AllocDir, alloc.ID))
|
2016-12-03 01:04:07 +00:00
|
|
|
if err := allocDir.Build(); err != nil {
|
|
|
|
t.Fatalf("error building alloc dir: %v", err)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
//HACK to get FSIsolation and chroot without using AllocRunner,
|
|
|
|
// TaskRunner, or Drivers
|
|
|
|
fsi := cstructs.FSIsolationImage
|
|
|
|
switch task.Driver {
|
|
|
|
case "raw_exec":
|
|
|
|
fsi = cstructs.FSIsolationNone
|
|
|
|
case "exec", "java":
|
|
|
|
fsi = cstructs.FSIsolationChroot
|
|
|
|
}
|
|
|
|
taskDir := allocDir.NewTaskDir(task.Name)
|
2017-03-02 23:50:18 +00:00
|
|
|
if err := taskDir.Build(false, config.DefaultChrootEnv, fsi); err != nil {
|
2016-12-03 01:04:07 +00:00
|
|
|
t.Fatalf("error building task dir %q: %v", task.Name, err)
|
|
|
|
return nil
|
|
|
|
}
|
2015-09-25 23:49:14 +00:00
|
|
|
|
2016-10-17 18:41:22 +00:00
|
|
|
vclient := vaultclient.NewMockVaultClient()
|
2017-02-01 00:43:57 +00:00
|
|
|
cclient := newMockConsulServiceClient()
|
2017-05-03 18:15:30 +00:00
|
|
|
tr := NewTaskRunner(logger, conf, db, upd.Update, taskDir, alloc, task, vclient, cclient)
|
2015-11-14 06:07:13 +00:00
|
|
|
if !restarts {
|
2016-02-04 00:16:48 +00:00
|
|
|
tr.restartTracker = noRestartsTracker()
|
2015-11-14 06:07:13 +00:00
|
|
|
}
|
2016-12-03 01:04:07 +00:00
|
|
|
return &taskRunnerTestCtx{upd, tr, allocDir}
|
2015-08-30 03:07:24 +00:00
|
|
|
}
|
|
|
|
|
2017-01-25 19:57:38 +00:00
|
|
|
// testWaitForTaskToStart waits for the task to or fails the test
|
|
|
|
func testWaitForTaskToStart(t *testing.T, ctx *taskRunnerTestCtx) {
|
|
|
|
// Wait for the task to start
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
2017-02-23 02:26:34 +00:00
|
|
|
l := len(ctx.upd.events)
|
|
|
|
if l < 2 {
|
2017-01-25 19:57:38 +00:00
|
|
|
return false, fmt.Errorf("Expect two events; got %v", l)
|
|
|
|
}
|
|
|
|
|
|
|
|
if ctx.upd.events[0].Type != structs.TaskReceived {
|
|
|
|
return false, fmt.Errorf("First Event was %v; want %v", ctx.upd.events[0].Type, structs.TaskReceived)
|
|
|
|
}
|
|
|
|
|
2017-02-23 02:26:34 +00:00
|
|
|
if l >= 3 {
|
|
|
|
if ctx.upd.events[1].Type != structs.TaskSetup {
|
|
|
|
return false, fmt.Errorf("Second Event was %v; want %v", ctx.upd.events[1].Type, structs.TaskSetup)
|
|
|
|
}
|
|
|
|
if ctx.upd.events[2].Type != structs.TaskStarted {
|
|
|
|
return false, fmt.Errorf("Third Event was %v; want %v", ctx.upd.events[2].Type, structs.TaskStarted)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if ctx.upd.events[1].Type != structs.TaskStarted {
|
|
|
|
return false, fmt.Errorf("Second Event was %v; want %v", ctx.upd.events[1].Type, structs.TaskStarted)
|
|
|
|
}
|
2017-01-25 19:57:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2015-08-30 03:07:24 +00:00
|
|
|
func TestTaskRunner_SimpleRun(t *testing.T) {
|
2015-09-23 01:48:42 +00:00
|
|
|
ctestutil.ExecCompatible(t)
|
2016-12-03 01:04:07 +00:00
|
|
|
ctx := testTaskRunner(t, false)
|
|
|
|
ctx.tr.MarkReceived()
|
|
|
|
go ctx.tr.Run()
|
|
|
|
defer ctx.Cleanup()
|
2015-08-30 03:07:24 +00:00
|
|
|
|
|
|
|
select {
|
2016-12-03 01:04:07 +00:00
|
|
|
case <-ctx.tr.WaitCh():
|
2016-01-21 23:24:24 +00:00
|
|
|
case <-time.After(time.Duration(testutil.TestMultiplier()*15) * time.Second):
|
2015-08-30 03:07:24 +00:00
|
|
|
t.Fatalf("timeout")
|
|
|
|
}
|
|
|
|
|
2017-02-23 02:26:34 +00:00
|
|
|
if len(ctx.upd.events) != 4 {
|
2017-03-03 03:35:31 +00:00
|
|
|
t.Fatalf("should have 3 ctx.updates: %#v", ctx.upd.events)
|
2015-08-30 03:07:24 +00:00
|
|
|
}
|
|
|
|
|
2016-12-03 01:04:07 +00:00
|
|
|
if ctx.upd.state != structs.TaskStateDead {
|
|
|
|
t.Fatalf("TaskState %v; want %v", ctx.upd.state, structs.TaskStateDead)
|
2015-08-30 03:07:24 +00:00
|
|
|
}
|
2015-11-14 06:07:13 +00:00
|
|
|
|
2016-12-03 01:04:07 +00:00
|
|
|
if ctx.upd.events[0].Type != structs.TaskReceived {
|
|
|
|
t.Fatalf("First Event was %v; want %v", ctx.upd.events[0].Type, structs.TaskReceived)
|
2015-08-30 03:07:24 +00:00
|
|
|
}
|
2015-11-14 06:07:13 +00:00
|
|
|
|
2017-02-23 02:26:34 +00:00
|
|
|
if ctx.upd.events[1].Type != structs.TaskSetup {
|
|
|
|
t.Fatalf("Second Event was %v; want %v", ctx.upd.events[1].Type, structs.TaskSetup)
|
|
|
|
}
|
|
|
|
|
|
|
|
if ctx.upd.events[2].Type != structs.TaskStarted {
|
|
|
|
t.Fatalf("Second Event was %v; want %v", ctx.upd.events[2].Type, structs.TaskStarted)
|
2016-02-19 22:49:43 +00:00
|
|
|
}
|
|
|
|
|
2017-02-23 02:26:34 +00:00
|
|
|
if ctx.upd.events[3].Type != structs.TaskTerminated {
|
|
|
|
t.Fatalf("Third Event was %v; want %v", ctx.upd.events[3].Type, structs.TaskTerminated)
|
2015-08-30 03:07:24 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-29 00:49:46 +00:00
|
|
|
func TestTaskRunner_Run_RecoverableStartError(t *testing.T) {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
task := alloc.Job.TaskGroups[0].Tasks[0]
|
|
|
|
task.Driver = "mock_driver"
|
|
|
|
task.Config = map[string]interface{}{
|
|
|
|
"exit_code": 0,
|
|
|
|
"start_error": "driver failure",
|
|
|
|
"start_error_recoverable": true,
|
|
|
|
}
|
|
|
|
|
2016-12-03 01:04:07 +00:00
|
|
|
ctx := testTaskRunnerFromAlloc(t, true, alloc)
|
|
|
|
ctx.tr.MarkReceived()
|
|
|
|
go ctx.tr.Run()
|
|
|
|
defer ctx.Cleanup()
|
2016-10-29 00:49:46 +00:00
|
|
|
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
2017-02-23 02:26:34 +00:00
|
|
|
if l := len(ctx.upd.events); l < 4 {
|
|
|
|
return false, fmt.Errorf("Expect at least four events; got %v", l)
|
2016-10-29 00:49:46 +00:00
|
|
|
}
|
|
|
|
|
2016-12-03 01:04:07 +00:00
|
|
|
if ctx.upd.events[0].Type != structs.TaskReceived {
|
|
|
|
return false, fmt.Errorf("First Event was %v; want %v", ctx.upd.events[0].Type, structs.TaskReceived)
|
2016-10-29 00:49:46 +00:00
|
|
|
}
|
|
|
|
|
2017-02-23 02:26:34 +00:00
|
|
|
if ctx.upd.events[1].Type != structs.TaskSetup {
|
|
|
|
return false, fmt.Errorf("Second Event was %v; want %v", ctx.upd.events[1].Type, structs.TaskSetup)
|
2016-10-29 00:49:46 +00:00
|
|
|
}
|
|
|
|
|
2017-02-23 02:26:34 +00:00
|
|
|
if ctx.upd.events[2].Type != structs.TaskDriverFailure {
|
|
|
|
return false, fmt.Errorf("Third Event was %v; want %v", ctx.upd.events[2].Type, structs.TaskDriverFailure)
|
|
|
|
}
|
|
|
|
|
|
|
|
if ctx.upd.events[3].Type != structs.TaskRestarting {
|
|
|
|
return false, fmt.Errorf("Fourth Event was %v; want %v", ctx.upd.events[3].Type, structs.TaskRestarting)
|
2016-10-29 00:49:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2015-08-30 03:07:24 +00:00
|
|
|
func TestTaskRunner_Destroy(t *testing.T) {
|
2015-09-23 01:48:42 +00:00
|
|
|
ctestutil.ExecCompatible(t)
|
2016-12-03 01:04:07 +00:00
|
|
|
ctx := testTaskRunner(t, true)
|
|
|
|
ctx.tr.MarkReceived()
|
|
|
|
//FIXME This didn't used to send a kill status update!!!???
|
|
|
|
defer ctx.Cleanup()
|
2015-08-30 03:07:24 +00:00
|
|
|
|
|
|
|
// Change command to ensure we run for a bit
|
2016-12-03 01:04:07 +00:00
|
|
|
ctx.tr.task.Config["command"] = "/bin/sleep"
|
|
|
|
ctx.tr.task.Config["args"] = []string{"1000"}
|
|
|
|
go ctx.tr.Run()
|
2015-08-30 03:07:24 +00:00
|
|
|
|
2017-01-25 19:57:38 +00:00
|
|
|
// Wait for the task to start
|
|
|
|
testWaitForTaskToStart(t, ctx)
|
2016-02-29 00:56:05 +00:00
|
|
|
|
2016-12-03 01:04:07 +00:00
|
|
|
// Make sure we are collecting a few stats
|
2016-05-29 02:38:09 +00:00
|
|
|
time.Sleep(2 * time.Second)
|
2016-12-03 01:04:07 +00:00
|
|
|
stats := ctx.tr.LatestResourceUsage()
|
2016-06-12 18:14:17 +00:00
|
|
|
if len(stats.Pids) == 0 || stats.ResourceUsage == nil || stats.ResourceUsage.MemoryStats.RSS == 0 {
|
2016-05-29 02:38:09 +00:00
|
|
|
t.Fatalf("expected task runner to have some stats")
|
|
|
|
}
|
|
|
|
|
2015-08-30 03:07:24 +00:00
|
|
|
// Begin the tear down
|
2016-12-03 01:04:07 +00:00
|
|
|
ctx.tr.Destroy(structs.NewTaskEvent(structs.TaskKilled))
|
2015-08-30 03:07:24 +00:00
|
|
|
|
|
|
|
select {
|
2016-12-03 01:04:07 +00:00
|
|
|
case <-ctx.tr.WaitCh():
|
2016-01-21 20:55:35 +00:00
|
|
|
case <-time.After(time.Duration(testutil.TestMultiplier()*15) * time.Second):
|
2015-08-30 03:07:24 +00:00
|
|
|
t.Fatalf("timeout")
|
|
|
|
}
|
|
|
|
|
2017-02-23 02:26:34 +00:00
|
|
|
if len(ctx.upd.events) != 5 {
|
2017-03-03 03:35:31 +00:00
|
|
|
t.Fatalf("should have 5 ctx.updates: %#v", ctx.upd.events)
|
2015-08-30 03:07:24 +00:00
|
|
|
}
|
2015-11-14 06:07:13 +00:00
|
|
|
|
2016-12-03 01:04:07 +00:00
|
|
|
if ctx.upd.state != structs.TaskStateDead {
|
|
|
|
t.Fatalf("TaskState %v; want %v", ctx.upd.state, structs.TaskStateDead)
|
2015-08-30 03:07:24 +00:00
|
|
|
}
|
2015-11-14 06:07:13 +00:00
|
|
|
|
2017-02-23 02:26:34 +00:00
|
|
|
if ctx.upd.events[3].Type != structs.TaskKilling {
|
|
|
|
t.Fatalf("Third Event was %v; want %v", ctx.upd.events[3].Type, structs.TaskKilling)
|
2015-08-30 03:07:24 +00:00
|
|
|
}
|
2015-11-14 06:07:13 +00:00
|
|
|
|
2017-02-23 02:26:34 +00:00
|
|
|
if ctx.upd.events[4].Type != structs.TaskKilled {
|
|
|
|
t.Fatalf("Third Event was %v; want %v", ctx.upd.events[4].Type, structs.TaskKilled)
|
2016-08-11 20:16:17 +00:00
|
|
|
}
|
2015-08-30 03:07:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestTaskRunner_Update(t *testing.T) {
|
2015-09-23 01:48:42 +00:00
|
|
|
ctestutil.ExecCompatible(t)
|
2016-12-03 01:04:07 +00:00
|
|
|
ctx := testTaskRunner(t, false)
|
2015-08-30 03:07:24 +00:00
|
|
|
|
|
|
|
// Change command to ensure we run for a bit
|
2016-12-03 01:04:07 +00:00
|
|
|
ctx.tr.task.Config["command"] = "/bin/sleep"
|
|
|
|
ctx.tr.task.Config["args"] = []string{"100"}
|
2017-05-04 22:06:15 +00:00
|
|
|
ctx.tr.task.Services[0].Checks[0].Args[0] = "${NOMAD_META_foo}"
|
2016-12-03 01:04:07 +00:00
|
|
|
go ctx.tr.Run()
|
|
|
|
defer ctx.Cleanup()
|
2015-08-30 03:07:24 +00:00
|
|
|
|
|
|
|
// Update the task definition
|
2016-12-03 01:04:07 +00:00
|
|
|
updateAlloc := ctx.tr.alloc.Copy()
|
2016-02-04 03:43:44 +00:00
|
|
|
|
|
|
|
// Update the restart policy
|
|
|
|
newTG := updateAlloc.Job.TaskGroups[0]
|
|
|
|
newMode := "foo"
|
|
|
|
newTG.RestartPolicy.Mode = newMode
|
|
|
|
|
2017-05-04 22:06:15 +00:00
|
|
|
newTask := newTG.Tasks[0]
|
|
|
|
newTask.Driver = "mock_driver"
|
|
|
|
|
|
|
|
// Update meta to make sure service checks are interpolated correctly
|
|
|
|
// #2180
|
|
|
|
newTask.Meta["foo"] = "UPDATE"
|
2016-02-04 03:43:44 +00:00
|
|
|
|
|
|
|
// Update the kill timeout
|
2016-02-04 03:58:39 +00:00
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
2016-12-03 01:04:07 +00:00
|
|
|
if ctx.tr.handle == nil {
|
2016-02-04 03:58:39 +00:00
|
|
|
return false, fmt.Errorf("task not started")
|
|
|
|
}
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
})
|
|
|
|
|
2016-12-03 01:04:07 +00:00
|
|
|
oldHandle := ctx.tr.handle.ID()
|
2016-02-04 03:43:44 +00:00
|
|
|
newTask.KillTimeout = time.Hour
|
|
|
|
|
2016-12-03 01:04:07 +00:00
|
|
|
ctx.tr.Update(updateAlloc)
|
2015-08-30 03:07:24 +00:00
|
|
|
|
2017-01-25 19:57:38 +00:00
|
|
|
// Wait for ctx.update to take place
|
2015-08-30 03:07:24 +00:00
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
2016-12-03 01:04:07 +00:00
|
|
|
if ctx.tr.task == newTask {
|
2016-11-04 22:10:18 +00:00
|
|
|
return false, fmt.Errorf("We copied the pointer! This would be very bad")
|
|
|
|
}
|
2016-12-03 01:04:07 +00:00
|
|
|
if ctx.tr.task.Driver != newTask.Driver {
|
2016-11-04 22:10:18 +00:00
|
|
|
return false, fmt.Errorf("Task not copied")
|
2016-02-04 03:43:44 +00:00
|
|
|
}
|
2016-12-03 01:04:07 +00:00
|
|
|
if ctx.tr.restartTracker.policy.Mode != newMode {
|
2017-03-24 19:04:22 +00:00
|
|
|
return false, fmt.Errorf("restart policy not ctx.updated")
|
2016-02-04 03:43:44 +00:00
|
|
|
}
|
2016-12-03 01:04:07 +00:00
|
|
|
if ctx.tr.handle.ID() == oldHandle {
|
2017-03-24 19:04:22 +00:00
|
|
|
return false, fmt.Errorf("handle not ctx.updated")
|
2016-02-04 03:43:44 +00:00
|
|
|
}
|
2017-05-04 22:06:15 +00:00
|
|
|
// Make sure Consul services were interpolated correctly during
|
|
|
|
// the update #2180
|
|
|
|
consul := ctx.tr.consul.(*mockConsulServiceClient)
|
|
|
|
consul.mu.Lock()
|
|
|
|
defer consul.mu.Unlock()
|
|
|
|
if len(consul.ops) < 2 {
|
|
|
|
return false, fmt.Errorf("expected at least 2 consul ops found: %d", len(consul.ops))
|
|
|
|
}
|
|
|
|
lastOp := consul.ops[len(consul.ops)-1]
|
|
|
|
if lastOp.op != "update" {
|
|
|
|
return false, fmt.Errorf("expected last consul op to be update not %q", lastOp.op)
|
|
|
|
}
|
|
|
|
if found := lastOp.task.Services[0].Checks[0].Args[0]; found != "UPDATE" {
|
|
|
|
return false, fmt.Errorf("expected consul check to be UPDATE but found: %q", found)
|
|
|
|
}
|
2016-02-04 03:43:44 +00:00
|
|
|
return true, nil
|
2015-08-30 03:07:24 +00:00
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestTaskRunner_SaveRestoreState(t *testing.T) {
|
2016-10-18 18:22:16 +00:00
|
|
|
alloc := mock.Alloc()
|
|
|
|
task := alloc.Job.TaskGroups[0].Tasks[0]
|
|
|
|
task.Driver = "mock_driver"
|
|
|
|
task.Config = map[string]interface{}{
|
|
|
|
"exit_code": "0",
|
|
|
|
"run_for": "5s",
|
|
|
|
}
|
2015-08-30 03:07:24 +00:00
|
|
|
|
2016-10-18 18:22:16 +00:00
|
|
|
// Give it a Vault token
|
|
|
|
task.Vault = &structs.Vault{Policies: []string{"default"}}
|
|
|
|
|
2016-12-03 01:04:07 +00:00
|
|
|
ctx := testTaskRunnerFromAlloc(t, false, alloc)
|
|
|
|
ctx.tr.MarkReceived()
|
|
|
|
go ctx.tr.Run()
|
|
|
|
//FIXME This test didn't used to defer destroy the allocidr ???!!!
|
|
|
|
defer ctx.Cleanup()
|
2015-08-30 03:07:24 +00:00
|
|
|
|
2016-10-18 18:22:16 +00:00
|
|
|
// Wait for the task to be running and then snapshot the state
|
2017-01-25 19:57:38 +00:00
|
|
|
testWaitForTaskToStart(t, ctx)
|
2016-10-18 18:22:16 +00:00
|
|
|
|
2016-12-03 01:04:07 +00:00
|
|
|
if err := ctx.tr.SaveState(); err != nil {
|
2015-08-30 03:07:24 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2016-10-18 18:22:16 +00:00
|
|
|
// Read the token from the file system
|
2016-12-03 01:04:07 +00:00
|
|
|
tokenPath := filepath.Join(ctx.tr.taskDir.SecretsDir, vaultTokenFile)
|
2016-10-18 18:22:16 +00:00
|
|
|
data, err := ioutil.ReadFile(tokenPath)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Failed to read file: %v", err)
|
|
|
|
}
|
|
|
|
token := string(data)
|
|
|
|
if len(token) == 0 {
|
|
|
|
t.Fatalf("Token not written to disk")
|
|
|
|
}
|
|
|
|
|
2015-08-30 03:07:24 +00:00
|
|
|
// Create a new task runner
|
2017-05-03 18:15:30 +00:00
|
|
|
task2 := &structs.Task{Name: ctx.tr.task.Name, Driver: ctx.tr.task.Driver, Vault: ctx.tr.task.Vault}
|
|
|
|
tr2 := NewTaskRunner(ctx.tr.logger, ctx.tr.config, ctx.tr.stateDB, ctx.upd.Update,
|
2017-02-01 00:43:57 +00:00
|
|
|
ctx.tr.taskDir, ctx.tr.alloc, task2, ctx.tr.vaultClient, ctx.tr.consul)
|
2016-10-18 18:22:16 +00:00
|
|
|
tr2.restartTracker = noRestartsTracker()
|
2017-04-25 18:41:03 +00:00
|
|
|
if _, err := tr2.RestoreState(); err != nil {
|
2015-08-30 03:07:24 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
go tr2.Run()
|
2016-08-23 21:51:09 +00:00
|
|
|
defer tr2.Destroy(structs.NewTaskEvent(structs.TaskKilled))
|
2015-08-30 03:07:24 +00:00
|
|
|
|
|
|
|
// Destroy and wait
|
2016-10-18 18:22:16 +00:00
|
|
|
select {
|
|
|
|
case <-tr2.WaitCh():
|
|
|
|
case <-time.After(time.Duration(testutil.TestMultiplier()*15) * time.Second):
|
|
|
|
t.Fatalf("timeout")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that we recovered the token
|
|
|
|
if act := tr2.vaultFuture.Get(); act != token {
|
|
|
|
t.Fatalf("Vault token not properly recovered")
|
|
|
|
}
|
2015-08-30 03:07:24 +00:00
|
|
|
}
|
2016-03-15 21:34:25 +00:00
|
|
|
|
|
|
|
func TestTaskRunner_Download_List(t *testing.T) {
|
|
|
|
ctestutil.ExecCompatible(t)
|
|
|
|
|
|
|
|
ts := httptest.NewServer(http.FileServer(http.Dir(filepath.Dir("."))))
|
|
|
|
defer ts.Close()
|
|
|
|
|
|
|
|
// Create an allocation that has a task with a list of artifacts.
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
task := alloc.Job.TaskGroups[0].Tasks[0]
|
|
|
|
f1 := "task_runner_test.go"
|
|
|
|
f2 := "task_runner.go"
|
|
|
|
artifact1 := structs.TaskArtifact{
|
|
|
|
GetterSource: fmt.Sprintf("%s/%s", ts.URL, f1),
|
|
|
|
}
|
|
|
|
artifact2 := structs.TaskArtifact{
|
|
|
|
GetterSource: fmt.Sprintf("%s/%s", ts.URL, f2),
|
|
|
|
}
|
|
|
|
task.Artifacts = []*structs.TaskArtifact{&artifact1, &artifact2}
|
|
|
|
|
2016-12-03 01:04:07 +00:00
|
|
|
ctx := testTaskRunnerFromAlloc(t, false, alloc)
|
|
|
|
ctx.tr.MarkReceived()
|
|
|
|
go ctx.tr.Run()
|
|
|
|
defer ctx.Cleanup()
|
2016-03-15 21:34:25 +00:00
|
|
|
|
|
|
|
select {
|
2016-12-03 01:04:07 +00:00
|
|
|
case <-ctx.tr.WaitCh():
|
2016-03-15 21:34:25 +00:00
|
|
|
case <-time.After(time.Duration(testutil.TestMultiplier()*15) * time.Second):
|
|
|
|
t.Fatalf("timeout")
|
|
|
|
}
|
|
|
|
|
2017-02-23 02:26:34 +00:00
|
|
|
if len(ctx.upd.events) != 5 {
|
2017-03-03 03:35:31 +00:00
|
|
|
t.Fatalf("should have 5 ctx.updates: %#v", ctx.upd.events)
|
2016-03-15 21:34:25 +00:00
|
|
|
}
|
|
|
|
|
2016-12-03 01:04:07 +00:00
|
|
|
if ctx.upd.state != structs.TaskStateDead {
|
|
|
|
t.Fatalf("TaskState %v; want %v", ctx.upd.state, structs.TaskStateDead)
|
2016-03-15 21:34:25 +00:00
|
|
|
}
|
|
|
|
|
2016-12-03 01:04:07 +00:00
|
|
|
if ctx.upd.events[0].Type != structs.TaskReceived {
|
|
|
|
t.Fatalf("First Event was %v; want %v", ctx.upd.events[0].Type, structs.TaskReceived)
|
2016-03-15 21:34:25 +00:00
|
|
|
}
|
|
|
|
|
2017-02-23 02:26:34 +00:00
|
|
|
if ctx.upd.events[1].Type != structs.TaskSetup {
|
|
|
|
t.Fatalf("Second Event was %v; want %v", ctx.upd.events[1].Type, structs.TaskSetup)
|
2016-03-15 21:34:25 +00:00
|
|
|
}
|
|
|
|
|
2017-02-23 02:26:34 +00:00
|
|
|
if ctx.upd.events[2].Type != structs.TaskDownloadingArtifacts {
|
|
|
|
t.Fatalf("Third Event was %v; want %v", ctx.upd.events[2].Type, structs.TaskDownloadingArtifacts)
|
2016-03-15 21:34:25 +00:00
|
|
|
}
|
|
|
|
|
2017-02-23 02:26:34 +00:00
|
|
|
if ctx.upd.events[3].Type != structs.TaskStarted {
|
|
|
|
t.Fatalf("Forth Event was %v; want %v", ctx.upd.events[3].Type, structs.TaskStarted)
|
|
|
|
}
|
|
|
|
|
|
|
|
if ctx.upd.events[4].Type != structs.TaskTerminated {
|
|
|
|
t.Fatalf("Fifth Event was %v; want %v", ctx.upd.events[4].Type, structs.TaskTerminated)
|
2016-03-15 21:34:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check that both files exist.
|
2016-12-03 01:04:07 +00:00
|
|
|
if _, err := os.Stat(filepath.Join(ctx.tr.taskDir.Dir, f1)); err != nil {
|
2016-03-15 21:34:25 +00:00
|
|
|
t.Fatalf("%v not downloaded", f1)
|
|
|
|
}
|
2016-12-03 01:04:07 +00:00
|
|
|
if _, err := os.Stat(filepath.Join(ctx.tr.taskDir.Dir, f2)); err != nil {
|
2016-03-15 21:34:25 +00:00
|
|
|
t.Fatalf("%v not downloaded", f2)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestTaskRunner_Download_Retries(t *testing.T) {
|
|
|
|
ctestutil.ExecCompatible(t)
|
|
|
|
|
|
|
|
// Create an allocation that has a task with bad artifacts.
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
task := alloc.Job.TaskGroups[0].Tasks[0]
|
|
|
|
artifact := structs.TaskArtifact{
|
|
|
|
GetterSource: "http://127.1.1.111:12315/foo/bar/baz",
|
|
|
|
}
|
|
|
|
task.Artifacts = []*structs.TaskArtifact{&artifact}
|
|
|
|
|
2017-04-19 00:18:23 +00:00
|
|
|
// Make the restart policy try one ctx.update
|
2016-03-15 21:34:25 +00:00
|
|
|
alloc.Job.TaskGroups[0].RestartPolicy = &structs.RestartPolicy{
|
|
|
|
Attempts: 1,
|
|
|
|
Interval: 10 * time.Minute,
|
|
|
|
Delay: 1 * time.Second,
|
|
|
|
Mode: structs.RestartPolicyModeFail,
|
|
|
|
}
|
|
|
|
|
2016-12-03 01:04:07 +00:00
|
|
|
ctx := testTaskRunnerFromAlloc(t, true, alloc)
|
|
|
|
ctx.tr.MarkReceived()
|
|
|
|
go ctx.tr.Run()
|
|
|
|
defer ctx.Cleanup()
|
2016-03-15 21:34:25 +00:00
|
|
|
|
|
|
|
select {
|
2016-12-03 01:04:07 +00:00
|
|
|
case <-ctx.tr.WaitCh():
|
2016-03-15 21:34:25 +00:00
|
|
|
case <-time.After(time.Duration(testutil.TestMultiplier()*15) * time.Second):
|
|
|
|
t.Fatalf("timeout")
|
|
|
|
}
|
|
|
|
|
2017-02-23 02:26:34 +00:00
|
|
|
if len(ctx.upd.events) != 8 {
|
2017-03-03 03:35:31 +00:00
|
|
|
t.Fatalf("should have 8 ctx.updates: %#v", ctx.upd.events)
|
2016-03-15 21:34:25 +00:00
|
|
|
}
|
|
|
|
|
2016-12-03 01:04:07 +00:00
|
|
|
if ctx.upd.state != structs.TaskStateDead {
|
|
|
|
t.Fatalf("TaskState %v; want %v", ctx.upd.state, structs.TaskStateDead)
|
2016-03-15 21:34:25 +00:00
|
|
|
}
|
|
|
|
|
2016-12-03 01:04:07 +00:00
|
|
|
if ctx.upd.events[0].Type != structs.TaskReceived {
|
|
|
|
t.Fatalf("First Event was %v; want %v", ctx.upd.events[0].Type, structs.TaskReceived)
|
2016-03-15 21:34:25 +00:00
|
|
|
}
|
|
|
|
|
2017-02-23 02:26:34 +00:00
|
|
|
if ctx.upd.events[1].Type != structs.TaskSetup {
|
|
|
|
t.Fatalf("Second Event was %v; want %v", ctx.upd.events[1].Type, structs.TaskSetup)
|
|
|
|
}
|
|
|
|
|
|
|
|
if ctx.upd.events[2].Type != structs.TaskDownloadingArtifacts {
|
|
|
|
t.Fatalf("Third Event was %v; want %v", ctx.upd.events[2].Type, structs.TaskDownloadingArtifacts)
|
2016-03-15 21:34:25 +00:00
|
|
|
}
|
|
|
|
|
2017-02-23 02:26:34 +00:00
|
|
|
if ctx.upd.events[3].Type != structs.TaskArtifactDownloadFailed {
|
|
|
|
t.Fatalf("Fourth Event was %v; want %v", ctx.upd.events[3].Type, structs.TaskArtifactDownloadFailed)
|
2016-03-15 21:34:25 +00:00
|
|
|
}
|
|
|
|
|
2017-02-23 02:26:34 +00:00
|
|
|
if ctx.upd.events[4].Type != structs.TaskRestarting {
|
|
|
|
t.Fatalf("Fifth Event was %v; want %v", ctx.upd.events[4].Type, structs.TaskRestarting)
|
2016-03-15 21:34:25 +00:00
|
|
|
}
|
|
|
|
|
2017-02-23 02:26:34 +00:00
|
|
|
if ctx.upd.events[5].Type != structs.TaskDownloadingArtifacts {
|
|
|
|
t.Fatalf("Sixth Event was %v; want %v", ctx.upd.events[5].Type, structs.TaskDownloadingArtifacts)
|
2016-03-15 21:34:25 +00:00
|
|
|
}
|
|
|
|
|
2017-02-23 02:26:34 +00:00
|
|
|
if ctx.upd.events[6].Type != structs.TaskArtifactDownloadFailed {
|
|
|
|
t.Fatalf("Seventh Event was %v; want %v", ctx.upd.events[6].Type, structs.TaskArtifactDownloadFailed)
|
2016-03-15 21:34:25 +00:00
|
|
|
}
|
|
|
|
|
2017-02-23 02:26:34 +00:00
|
|
|
if ctx.upd.events[7].Type != structs.TaskNotRestarting {
|
|
|
|
t.Fatalf("Eighth Event was %v; want %v", ctx.upd.events[7].Type, structs.TaskNotRestarting)
|
2016-03-15 21:34:25 +00:00
|
|
|
}
|
|
|
|
}
|
2016-03-24 17:55:14 +00:00
|
|
|
|
2017-04-19 00:18:23 +00:00
|
|
|
// TestTaskRunner_UnregisterConsul_Retries asserts a task is unregistered from
|
|
|
|
// Consul when waiting to be retried.
|
|
|
|
func TestTaskRunner_UnregisterConsul_Retries(t *testing.T) {
|
|
|
|
ctestutil.ExecCompatible(t)
|
|
|
|
|
|
|
|
// Create an allocation that has a task with bad artifacts.
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
|
|
|
|
// Make the restart policy try one ctx.update
|
|
|
|
alloc.Job.TaskGroups[0].RestartPolicy = &structs.RestartPolicy{
|
|
|
|
Attempts: 1,
|
|
|
|
Interval: 10 * time.Minute,
|
|
|
|
Delay: time.Nanosecond,
|
|
|
|
Mode: structs.RestartPolicyModeFail,
|
|
|
|
}
|
|
|
|
|
|
|
|
task := alloc.Job.TaskGroups[0].Tasks[0]
|
|
|
|
task.Driver = "mock_driver"
|
|
|
|
task.Config = map[string]interface{}{
|
|
|
|
"exit_code": "1",
|
|
|
|
"run_for": "1ns",
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx := testTaskRunnerFromAlloc(t, true, alloc)
|
|
|
|
ctx.tr.MarkReceived()
|
|
|
|
ctx.tr.Run()
|
|
|
|
defer ctx.Cleanup()
|
|
|
|
|
|
|
|
// Assert it is properly registered and unregistered
|
|
|
|
consul := ctx.tr.consul.(*mockConsulServiceClient)
|
|
|
|
if expected := 4; len(consul.ops) != expected {
|
|
|
|
t.Errorf("expected %d consul ops but found: %d", expected, len(consul.ops))
|
|
|
|
}
|
|
|
|
if consul.ops[0].op != "add" {
|
|
|
|
t.Errorf("expected first op to be add but found: %q", consul.ops[0].op)
|
|
|
|
}
|
|
|
|
if consul.ops[1].op != "remove" {
|
|
|
|
t.Errorf("expected second op to be remove but found: %q", consul.ops[1].op)
|
|
|
|
}
|
|
|
|
if consul.ops[2].op != "add" {
|
|
|
|
t.Errorf("expected third op to be add but found: %q", consul.ops[2].op)
|
|
|
|
}
|
|
|
|
if consul.ops[3].op != "remove" {
|
|
|
|
t.Errorf("expected fourth/final op to be remove but found: %q", consul.ops[3].op)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-24 17:55:14 +00:00
|
|
|
func TestTaskRunner_Validate_UserEnforcement(t *testing.T) {
|
2017-01-25 19:57:38 +00:00
|
|
|
ctestutil.ExecCompatible(t)
|
2016-12-03 01:04:07 +00:00
|
|
|
ctx := testTaskRunner(t, false)
|
|
|
|
defer ctx.Cleanup()
|
2016-03-24 17:55:14 +00:00
|
|
|
|
|
|
|
// Try to run as root with exec.
|
2016-12-03 01:04:07 +00:00
|
|
|
ctx.tr.task.Driver = "exec"
|
|
|
|
ctx.tr.task.User = "root"
|
|
|
|
if err := ctx.tr.validateTask(); err == nil {
|
2016-03-24 17:55:14 +00:00
|
|
|
t.Fatalf("expected error running as root with exec")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try to run a non-blacklisted user with exec.
|
2016-12-03 01:04:07 +00:00
|
|
|
ctx.tr.task.Driver = "exec"
|
|
|
|
ctx.tr.task.User = "foobar"
|
|
|
|
if err := ctx.tr.validateTask(); err != nil {
|
2016-03-24 17:55:14 +00:00
|
|
|
t.Fatalf("unexpected error: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try to run as root with docker.
|
2016-12-03 01:04:07 +00:00
|
|
|
ctx.tr.task.Driver = "docker"
|
|
|
|
ctx.tr.task.User = "root"
|
|
|
|
if err := ctx.tr.validateTask(); err != nil {
|
2016-03-24 17:55:14 +00:00
|
|
|
t.Fatalf("unexpected error: %v", err)
|
|
|
|
}
|
2016-05-27 18:25:36 +00:00
|
|
|
}
|
2016-09-16 00:39:08 +00:00
|
|
|
|
2016-10-06 22:17:34 +00:00
|
|
|
func TestTaskRunner_RestartTask(t *testing.T) {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
task := alloc.Job.TaskGroups[0].Tasks[0]
|
|
|
|
task.Driver = "mock_driver"
|
|
|
|
task.Config = map[string]interface{}{
|
|
|
|
"exit_code": "0",
|
2017-01-26 21:25:37 +00:00
|
|
|
"run_for": "100s",
|
2016-10-06 22:17:34 +00:00
|
|
|
}
|
|
|
|
|
2016-12-03 01:04:07 +00:00
|
|
|
ctx := testTaskRunnerFromAlloc(t, true, alloc)
|
|
|
|
ctx.tr.MarkReceived()
|
|
|
|
go ctx.tr.Run()
|
|
|
|
defer ctx.Cleanup()
|
2016-10-06 22:17:34 +00:00
|
|
|
|
2017-01-25 19:57:38 +00:00
|
|
|
// Wait for it to start
|
2016-10-06 22:17:34 +00:00
|
|
|
go func() {
|
2017-01-25 19:57:38 +00:00
|
|
|
testWaitForTaskToStart(t, ctx)
|
2016-12-03 01:04:07 +00:00
|
|
|
ctx.tr.Restart("test", "restart")
|
2017-01-25 19:57:38 +00:00
|
|
|
|
|
|
|
// Wait for it to restart then kill
|
|
|
|
go func() {
|
|
|
|
// Wait for the task to start again
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
2017-02-23 02:26:34 +00:00
|
|
|
if len(ctx.upd.events) != 8 {
|
|
|
|
t.Fatalf("task %q in alloc %q should have 8 ctx.updates: %#v", task.Name, alloc.ID, ctx.upd.events)
|
2017-01-25 19:57:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
})
|
|
|
|
ctx.tr.Kill("test", "restart", false)
|
|
|
|
}()
|
2016-10-06 22:17:34 +00:00
|
|
|
}()
|
|
|
|
|
|
|
|
select {
|
2016-12-03 01:04:07 +00:00
|
|
|
case <-ctx.tr.WaitCh():
|
2016-10-06 22:17:34 +00:00
|
|
|
case <-time.After(time.Duration(testutil.TestMultiplier()*15) * time.Second):
|
|
|
|
t.Fatalf("timeout")
|
|
|
|
}
|
|
|
|
|
2017-02-23 02:26:34 +00:00
|
|
|
if len(ctx.upd.events) != 10 {
|
2017-01-25 19:57:38 +00:00
|
|
|
t.Fatalf("should have 9 ctx.updates: %#v", ctx.upd.events)
|
2016-10-06 22:17:34 +00:00
|
|
|
}
|
|
|
|
|
2016-12-03 01:04:07 +00:00
|
|
|
if ctx.upd.state != structs.TaskStateDead {
|
|
|
|
t.Fatalf("TaskState %v; want %v", ctx.upd.state, structs.TaskStateDead)
|
2016-10-06 22:17:34 +00:00
|
|
|
}
|
|
|
|
|
2016-12-03 01:04:07 +00:00
|
|
|
if ctx.upd.events[0].Type != structs.TaskReceived {
|
|
|
|
t.Fatalf("First Event was %v; want %v", ctx.upd.events[0].Type, structs.TaskReceived)
|
2016-10-06 22:17:34 +00:00
|
|
|
}
|
|
|
|
|
2017-02-23 02:26:34 +00:00
|
|
|
if ctx.upd.events[1].Type != structs.TaskSetup {
|
|
|
|
t.Fatalf("Second Event was %v; want %v", ctx.upd.events[1].Type, structs.TaskSetup)
|
|
|
|
}
|
|
|
|
|
|
|
|
if ctx.upd.events[2].Type != structs.TaskStarted {
|
|
|
|
t.Fatalf("Third Event was %v; want %v", ctx.upd.events[2].Type, structs.TaskStarted)
|
2016-10-06 22:17:34 +00:00
|
|
|
}
|
|
|
|
|
2017-02-23 02:26:34 +00:00
|
|
|
if ctx.upd.events[3].Type != structs.TaskRestartSignal {
|
|
|
|
t.Fatalf("Fourth Event was %v; want %v", ctx.upd.events[3].Type, structs.TaskRestartSignal)
|
2016-10-06 22:17:34 +00:00
|
|
|
}
|
|
|
|
|
2017-02-23 02:26:34 +00:00
|
|
|
if ctx.upd.events[4].Type != structs.TaskKilling {
|
|
|
|
t.Fatalf("Fifth Event was %v; want %v", ctx.upd.events[4].Type, structs.TaskKilling)
|
2016-10-06 22:17:34 +00:00
|
|
|
}
|
|
|
|
|
2017-02-23 02:26:34 +00:00
|
|
|
if ctx.upd.events[5].Type != structs.TaskKilled {
|
|
|
|
t.Fatalf("Sixth Event was %v; want %v", ctx.upd.events[5].Type, structs.TaskKilled)
|
2016-10-06 22:17:34 +00:00
|
|
|
}
|
|
|
|
|
2017-02-23 02:26:34 +00:00
|
|
|
if ctx.upd.events[6].Type != structs.TaskRestarting {
|
|
|
|
t.Fatalf("Seventh Event was %v; want %v", ctx.upd.events[6].Type, structs.TaskRestarting)
|
2016-10-06 22:17:34 +00:00
|
|
|
}
|
|
|
|
|
2017-02-23 02:26:34 +00:00
|
|
|
if ctx.upd.events[7].Type != structs.TaskStarted {
|
|
|
|
t.Fatalf("Eighth Event was %v; want %v", ctx.upd.events[8].Type, structs.TaskStarted)
|
2016-10-06 22:17:34 +00:00
|
|
|
}
|
2017-02-23 02:26:34 +00:00
|
|
|
if ctx.upd.events[8].Type != structs.TaskKilling {
|
|
|
|
t.Fatalf("Nineth Event was %v; want %v", ctx.upd.events[8].Type, structs.TaskKilling)
|
2016-10-06 22:17:34 +00:00
|
|
|
}
|
|
|
|
|
2017-02-23 02:26:34 +00:00
|
|
|
if ctx.upd.events[9].Type != structs.TaskKilled {
|
|
|
|
t.Fatalf("Tenth Event was %v; want %v", ctx.upd.events[9].Type, structs.TaskKilled)
|
2016-10-06 22:17:34 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestTaskRunner_KillTask(t *testing.T) {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
task := alloc.Job.TaskGroups[0].Tasks[0]
|
|
|
|
task.Driver = "mock_driver"
|
|
|
|
task.Config = map[string]interface{}{
|
|
|
|
"exit_code": "0",
|
|
|
|
"run_for": "10s",
|
|
|
|
}
|
|
|
|
|
2016-12-03 01:04:07 +00:00
|
|
|
ctx := testTaskRunnerFromAlloc(t, false, alloc)
|
|
|
|
ctx.tr.MarkReceived()
|
|
|
|
go ctx.tr.Run()
|
|
|
|
defer ctx.Cleanup()
|
2016-10-06 22:17:34 +00:00
|
|
|
|
|
|
|
go func() {
|
2017-01-25 19:57:38 +00:00
|
|
|
testWaitForTaskToStart(t, ctx)
|
2016-12-03 01:04:07 +00:00
|
|
|
ctx.tr.Kill("test", "kill", true)
|
2016-10-06 22:17:34 +00:00
|
|
|
}()
|
|
|
|
|
|
|
|
select {
|
2016-12-03 01:04:07 +00:00
|
|
|
case <-ctx.tr.WaitCh():
|
2016-10-06 22:17:34 +00:00
|
|
|
case <-time.After(time.Duration(testutil.TestMultiplier()*15) * time.Second):
|
|
|
|
t.Fatalf("timeout")
|
|
|
|
}
|
|
|
|
|
2017-02-23 02:26:34 +00:00
|
|
|
if len(ctx.upd.events) != 5 {
|
|
|
|
t.Fatalf("should have 4 ctx.updates: %#v", ctx.upd.events)
|
2016-10-06 22:17:34 +00:00
|
|
|
}
|
|
|
|
|
2016-12-03 01:04:07 +00:00
|
|
|
if ctx.upd.state != structs.TaskStateDead {
|
|
|
|
t.Fatalf("TaskState %v; want %v", ctx.upd.state, structs.TaskStateDead)
|
2016-10-06 22:17:34 +00:00
|
|
|
}
|
|
|
|
|
2016-12-03 01:04:07 +00:00
|
|
|
if !ctx.upd.failed {
|
|
|
|
t.Fatalf("TaskState should be failed: %+v", ctx.upd)
|
2016-10-21 00:27:16 +00:00
|
|
|
}
|
|
|
|
|
2016-12-03 01:04:07 +00:00
|
|
|
if ctx.upd.events[0].Type != structs.TaskReceived {
|
|
|
|
t.Fatalf("First Event was %v; want %v", ctx.upd.events[0].Type, structs.TaskReceived)
|
2016-10-06 22:17:34 +00:00
|
|
|
}
|
|
|
|
|
2017-02-23 02:26:34 +00:00
|
|
|
if ctx.upd.events[1].Type != structs.TaskSetup {
|
|
|
|
t.Fatalf("Second Event was %v; want %v", ctx.upd.events[1].Type, structs.TaskSetup)
|
|
|
|
}
|
|
|
|
|
|
|
|
if ctx.upd.events[2].Type != structs.TaskStarted {
|
|
|
|
t.Fatalf("Third Event was %v; want %v", ctx.upd.events[2].Type, structs.TaskStarted)
|
2016-10-06 22:17:34 +00:00
|
|
|
}
|
|
|
|
|
2017-02-23 02:26:34 +00:00
|
|
|
if ctx.upd.events[3].Type != structs.TaskKilling {
|
|
|
|
t.Fatalf("Fourth Event was %v; want %v", ctx.upd.events[3].Type, structs.TaskKilling)
|
2016-10-06 22:17:34 +00:00
|
|
|
}
|
|
|
|
|
2017-02-23 02:26:34 +00:00
|
|
|
if ctx.upd.events[4].Type != structs.TaskKilled {
|
|
|
|
t.Fatalf("Fifth Event was %v; want %v", ctx.upd.events[4].Type, structs.TaskKilled)
|
2016-10-06 22:17:34 +00:00
|
|
|
}
|
|
|
|
}
|
2016-10-10 21:49:37 +00:00
|
|
|
|
|
|
|
func TestTaskRunner_SignalFailure(t *testing.T) {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
task := alloc.Job.TaskGroups[0].Tasks[0]
|
|
|
|
task.Driver = "mock_driver"
|
|
|
|
task.Config = map[string]interface{}{
|
|
|
|
"exit_code": "0",
|
|
|
|
"run_for": "10s",
|
|
|
|
"signal_error": "test forcing failure",
|
|
|
|
}
|
|
|
|
|
2016-12-03 01:04:07 +00:00
|
|
|
ctx := testTaskRunnerFromAlloc(t, false, alloc)
|
|
|
|
ctx.tr.MarkReceived()
|
|
|
|
go ctx.tr.Run()
|
|
|
|
defer ctx.Cleanup()
|
2016-10-10 21:49:37 +00:00
|
|
|
|
2017-01-24 17:40:13 +00:00
|
|
|
// Wait for the task to start
|
2017-01-25 19:57:38 +00:00
|
|
|
testWaitForTaskToStart(t, ctx)
|
2017-01-24 17:40:13 +00:00
|
|
|
|
2016-12-03 01:04:07 +00:00
|
|
|
if err := ctx.tr.Signal("test", "test", syscall.SIGINT); err == nil {
|
2016-10-10 21:49:37 +00:00
|
|
|
t.Fatalf("Didn't receive error")
|
|
|
|
}
|
|
|
|
}
|
2016-10-18 18:22:16 +00:00
|
|
|
|
|
|
|
func TestTaskRunner_BlockForVault(t *testing.T) {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
task := alloc.Job.TaskGroups[0].Tasks[0]
|
|
|
|
task.Driver = "mock_driver"
|
|
|
|
task.Config = map[string]interface{}{
|
|
|
|
"exit_code": "0",
|
|
|
|
"run_for": "1s",
|
|
|
|
}
|
|
|
|
task.Vault = &structs.Vault{Policies: []string{"default"}}
|
|
|
|
|
2016-12-03 01:04:07 +00:00
|
|
|
ctx := testTaskRunnerFromAlloc(t, false, alloc)
|
|
|
|
ctx.tr.MarkReceived()
|
|
|
|
defer ctx.Cleanup()
|
2016-10-18 18:22:16 +00:00
|
|
|
|
|
|
|
// Control when we get a Vault token
|
|
|
|
token := "1234"
|
|
|
|
waitCh := make(chan struct{})
|
|
|
|
handler := func(*structs.Allocation, []string) (map[string]string, error) {
|
|
|
|
<-waitCh
|
|
|
|
return map[string]string{task.Name: token}, nil
|
|
|
|
}
|
2016-12-03 01:04:07 +00:00
|
|
|
ctx.tr.vaultClient.(*vaultclient.MockVaultClient).DeriveTokenFn = handler
|
2016-10-18 18:22:16 +00:00
|
|
|
|
2016-12-03 01:04:07 +00:00
|
|
|
go ctx.tr.Run()
|
2016-10-18 18:22:16 +00:00
|
|
|
|
|
|
|
select {
|
2016-12-03 01:04:07 +00:00
|
|
|
case <-ctx.tr.WaitCh():
|
2016-10-18 18:22:16 +00:00
|
|
|
t.Fatalf("premature exit")
|
|
|
|
case <-time.After(1 * time.Second):
|
|
|
|
}
|
|
|
|
|
2017-02-23 02:26:34 +00:00
|
|
|
if len(ctx.upd.events) != 2 {
|
2017-03-03 03:35:31 +00:00
|
|
|
t.Fatalf("should have 2 ctx.updates: %#v", ctx.upd.events)
|
2016-10-18 18:22:16 +00:00
|
|
|
}
|
|
|
|
|
2016-12-03 01:04:07 +00:00
|
|
|
if ctx.upd.state != structs.TaskStatePending {
|
|
|
|
t.Fatalf("TaskState %v; want %v", ctx.upd.state, structs.TaskStatePending)
|
2016-10-18 18:22:16 +00:00
|
|
|
}
|
|
|
|
|
2016-12-03 01:04:07 +00:00
|
|
|
if ctx.upd.events[0].Type != structs.TaskReceived {
|
|
|
|
t.Fatalf("First Event was %v; want %v", ctx.upd.events[0].Type, structs.TaskReceived)
|
2016-10-18 18:22:16 +00:00
|
|
|
}
|
|
|
|
|
2017-02-23 02:26:34 +00:00
|
|
|
if ctx.upd.events[1].Type != structs.TaskSetup {
|
|
|
|
t.Fatalf("Second Event was %v; want %v", ctx.upd.events[1].Type, structs.TaskSetup)
|
|
|
|
}
|
|
|
|
|
2016-10-18 18:22:16 +00:00
|
|
|
// Unblock
|
|
|
|
close(waitCh)
|
|
|
|
|
|
|
|
select {
|
2016-12-03 01:04:07 +00:00
|
|
|
case <-ctx.tr.WaitCh():
|
2016-10-18 18:22:16 +00:00
|
|
|
case <-time.After(time.Duration(testutil.TestMultiplier()*15) * time.Second):
|
|
|
|
t.Fatalf("timeout")
|
|
|
|
}
|
|
|
|
|
2017-02-23 02:26:34 +00:00
|
|
|
if len(ctx.upd.events) != 4 {
|
2017-03-03 03:35:31 +00:00
|
|
|
t.Fatalf("should have 4 ctx.updates: %#v", ctx.upd.events)
|
2016-10-18 18:22:16 +00:00
|
|
|
}
|
|
|
|
|
2016-12-03 01:04:07 +00:00
|
|
|
if ctx.upd.state != structs.TaskStateDead {
|
|
|
|
t.Fatalf("TaskState %v; want %v", ctx.upd.state, structs.TaskStateDead)
|
2016-10-18 18:22:16 +00:00
|
|
|
}
|
|
|
|
|
2016-12-03 01:04:07 +00:00
|
|
|
if ctx.upd.events[0].Type != structs.TaskReceived {
|
|
|
|
t.Fatalf("First Event was %v; want %v", ctx.upd.events[0].Type, structs.TaskReceived)
|
2016-10-18 18:22:16 +00:00
|
|
|
}
|
|
|
|
|
2017-02-23 02:26:34 +00:00
|
|
|
if ctx.upd.events[1].Type != structs.TaskSetup {
|
|
|
|
t.Fatalf("Second Event was %v; want %v", ctx.upd.events[1].Type, structs.TaskSetup)
|
|
|
|
}
|
|
|
|
|
|
|
|
if ctx.upd.events[2].Type != structs.TaskStarted {
|
|
|
|
t.Fatalf("Third Event was %v; want %v", ctx.upd.events[2].Type, structs.TaskStarted)
|
2016-10-18 18:22:16 +00:00
|
|
|
}
|
|
|
|
|
2017-02-23 02:26:34 +00:00
|
|
|
if ctx.upd.events[3].Type != structs.TaskTerminated {
|
|
|
|
t.Fatalf("Fourth Event was %v; want %v", ctx.upd.events[3].Type, structs.TaskTerminated)
|
2016-10-18 18:22:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check that the token is on disk
|
2016-12-03 01:04:07 +00:00
|
|
|
tokenPath := filepath.Join(ctx.tr.taskDir.SecretsDir, vaultTokenFile)
|
2016-10-18 18:22:16 +00:00
|
|
|
data, err := ioutil.ReadFile(tokenPath)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Failed to read file: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if act := string(data); act != token {
|
|
|
|
t.Fatalf("Token didn't get written to disk properly, got %q; want %q", act, token)
|
|
|
|
}
|
2017-03-29 20:17:50 +00:00
|
|
|
|
|
|
|
// Check the token was revoked
|
|
|
|
m := ctx.tr.vaultClient.(*vaultclient.MockVaultClient)
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
if len(m.StoppedTokens) != 1 {
|
|
|
|
return false, fmt.Errorf("Expected a stopped token: %v", m.StoppedTokens)
|
|
|
|
}
|
|
|
|
|
|
|
|
if a := m.StoppedTokens[0]; a != token {
|
|
|
|
return false, fmt.Errorf("got stopped token %q; want %q", a, token)
|
|
|
|
}
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
})
|
2016-10-18 18:22:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestTaskRunner_DeriveToken_Retry(t *testing.T) {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
task := alloc.Job.TaskGroups[0].Tasks[0]
|
|
|
|
task.Driver = "mock_driver"
|
|
|
|
task.Config = map[string]interface{}{
|
|
|
|
"exit_code": "0",
|
|
|
|
"run_for": "1s",
|
|
|
|
}
|
|
|
|
task.Vault = &structs.Vault{Policies: []string{"default"}}
|
|
|
|
|
2016-12-03 01:04:07 +00:00
|
|
|
ctx := testTaskRunnerFromAlloc(t, false, alloc)
|
|
|
|
ctx.tr.MarkReceived()
|
|
|
|
defer ctx.Cleanup()
|
2016-10-18 18:22:16 +00:00
|
|
|
|
|
|
|
// Control when we get a Vault token
|
|
|
|
token := "1234"
|
|
|
|
count := 0
|
|
|
|
handler := func(*structs.Allocation, []string) (map[string]string, error) {
|
|
|
|
if count > 0 {
|
|
|
|
return map[string]string{task.Name: token}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
count++
|
2016-10-23 01:08:30 +00:00
|
|
|
return nil, structs.NewRecoverableError(fmt.Errorf("Want a retry"), true)
|
2016-10-18 18:22:16 +00:00
|
|
|
}
|
2016-12-03 01:04:07 +00:00
|
|
|
ctx.tr.vaultClient.(*vaultclient.MockVaultClient).DeriveTokenFn = handler
|
|
|
|
go ctx.tr.Run()
|
2016-10-18 18:22:16 +00:00
|
|
|
|
|
|
|
select {
|
2016-12-03 01:04:07 +00:00
|
|
|
case <-ctx.tr.WaitCh():
|
2016-10-18 18:22:16 +00:00
|
|
|
case <-time.After(time.Duration(testutil.TestMultiplier()*15) * time.Second):
|
|
|
|
t.Fatalf("timeout")
|
|
|
|
}
|
|
|
|
|
2017-02-23 02:26:34 +00:00
|
|
|
if len(ctx.upd.events) != 4 {
|
2017-03-03 03:35:31 +00:00
|
|
|
t.Fatalf("should have 4 ctx.updates: %#v", ctx.upd.events)
|
2016-10-18 18:22:16 +00:00
|
|
|
}
|
|
|
|
|
2016-12-03 01:04:07 +00:00
|
|
|
if ctx.upd.state != structs.TaskStateDead {
|
|
|
|
t.Fatalf("TaskState %v; want %v", ctx.upd.state, structs.TaskStateDead)
|
2016-10-18 18:22:16 +00:00
|
|
|
}
|
|
|
|
|
2016-12-03 01:04:07 +00:00
|
|
|
if ctx.upd.events[0].Type != structs.TaskReceived {
|
|
|
|
t.Fatalf("First Event was %v; want %v", ctx.upd.events[0].Type, structs.TaskReceived)
|
2016-10-18 18:22:16 +00:00
|
|
|
}
|
|
|
|
|
2017-02-23 02:26:34 +00:00
|
|
|
if ctx.upd.events[1].Type != structs.TaskSetup {
|
|
|
|
t.Fatalf("Second Event was %v; want %v", ctx.upd.events[1].Type, structs.TaskSetup)
|
2016-10-18 18:22:16 +00:00
|
|
|
}
|
|
|
|
|
2017-02-23 02:26:34 +00:00
|
|
|
if ctx.upd.events[2].Type != structs.TaskStarted {
|
|
|
|
t.Fatalf("Third Event was %v; want %v", ctx.upd.events[2].Type, structs.TaskStarted)
|
|
|
|
}
|
|
|
|
|
|
|
|
if ctx.upd.events[3].Type != structs.TaskTerminated {
|
|
|
|
t.Fatalf("Fourth Event was %v; want %v", ctx.upd.events[3].Type, structs.TaskTerminated)
|
2016-10-18 18:22:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check that the token is on disk
|
2016-12-03 01:04:07 +00:00
|
|
|
tokenPath := filepath.Join(ctx.tr.taskDir.SecretsDir, vaultTokenFile)
|
2016-10-18 18:22:16 +00:00
|
|
|
data, err := ioutil.ReadFile(tokenPath)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Failed to read file: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if act := string(data); act != token {
|
|
|
|
t.Fatalf("Token didn't get written to disk properly, got %q; want %q", act, token)
|
|
|
|
}
|
2017-03-29 20:17:50 +00:00
|
|
|
|
|
|
|
// Check the token was revoked
|
|
|
|
m := ctx.tr.vaultClient.(*vaultclient.MockVaultClient)
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
if len(m.StoppedTokens) != 1 {
|
|
|
|
return false, fmt.Errorf("Expected a stopped token: %v", m.StoppedTokens)
|
|
|
|
}
|
|
|
|
|
|
|
|
if a := m.StoppedTokens[0]; a != token {
|
|
|
|
return false, fmt.Errorf("got stopped token %q; want %q", a, token)
|
|
|
|
}
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
})
|
2016-10-18 18:22:16 +00:00
|
|
|
}
|
|
|
|
|
2016-10-23 01:08:30 +00:00
|
|
|
func TestTaskRunner_DeriveToken_Unrecoverable(t *testing.T) {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
task := alloc.Job.TaskGroups[0].Tasks[0]
|
|
|
|
task.Driver = "mock_driver"
|
|
|
|
task.Config = map[string]interface{}{
|
|
|
|
"exit_code": "0",
|
|
|
|
"run_for": "10s",
|
|
|
|
}
|
|
|
|
task.Vault = &structs.Vault{
|
|
|
|
Policies: []string{"default"},
|
|
|
|
ChangeMode: structs.VaultChangeModeRestart,
|
|
|
|
}
|
|
|
|
|
2016-12-03 01:04:07 +00:00
|
|
|
ctx := testTaskRunnerFromAlloc(t, false, alloc)
|
|
|
|
ctx.tr.MarkReceived()
|
|
|
|
defer ctx.Cleanup()
|
2016-10-23 01:08:30 +00:00
|
|
|
|
|
|
|
// Error the token derivation
|
2016-12-03 01:04:07 +00:00
|
|
|
vc := ctx.tr.vaultClient.(*vaultclient.MockVaultClient)
|
2016-10-23 01:08:30 +00:00
|
|
|
vc.SetDeriveTokenError(alloc.ID, []string{task.Name}, fmt.Errorf("Non recoverable"))
|
2016-12-03 01:04:07 +00:00
|
|
|
go ctx.tr.Run()
|
2016-10-23 01:08:30 +00:00
|
|
|
|
|
|
|
// Wait for the task to start
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
2017-02-23 02:26:34 +00:00
|
|
|
if l := len(ctx.upd.events); l != 3 {
|
|
|
|
return false, fmt.Errorf("Expect 3 events; got %v", l)
|
2016-10-23 01:08:30 +00:00
|
|
|
}
|
|
|
|
|
2016-12-03 01:04:07 +00:00
|
|
|
if ctx.upd.events[0].Type != structs.TaskReceived {
|
|
|
|
return false, fmt.Errorf("First Event was %v; want %v", ctx.upd.events[0].Type, structs.TaskReceived)
|
2016-10-23 01:08:30 +00:00
|
|
|
}
|
|
|
|
|
2017-02-23 02:26:34 +00:00
|
|
|
if ctx.upd.events[1].Type != structs.TaskSetup {
|
|
|
|
return false, fmt.Errorf("Second Event was %v; want %v", ctx.upd.events[1].Type, structs.TaskSetup)
|
|
|
|
}
|
|
|
|
|
|
|
|
if ctx.upd.events[2].Type != structs.TaskKilling {
|
|
|
|
return false, fmt.Errorf("Third Event was %v; want %v", ctx.upd.events[2].Type, structs.TaskKilling)
|
2016-10-23 01:08:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2016-10-18 18:22:16 +00:00
|
|
|
func TestTaskRunner_Template_Block(t *testing.T) {
|
2017-01-24 00:42:02 +00:00
|
|
|
testRetryRate = 2 * time.Second
|
|
|
|
defer func() {
|
|
|
|
testRetryRate = 0
|
|
|
|
}()
|
2016-10-18 18:22:16 +00:00
|
|
|
alloc := mock.Alloc()
|
|
|
|
task := alloc.Job.TaskGroups[0].Tasks[0]
|
|
|
|
task.Driver = "mock_driver"
|
|
|
|
task.Config = map[string]interface{}{
|
|
|
|
"exit_code": "0",
|
|
|
|
"run_for": "1s",
|
|
|
|
}
|
|
|
|
task.Templates = []*structs.Template{
|
|
|
|
{
|
|
|
|
EmbeddedTmpl: "{{key \"foo\"}}",
|
|
|
|
DestPath: "local/test",
|
|
|
|
ChangeMode: structs.TemplateChangeModeNoop,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2016-12-03 01:04:07 +00:00
|
|
|
ctx := testTaskRunnerFromAlloc(t, false, alloc)
|
|
|
|
ctx.tr.MarkReceived()
|
|
|
|
go ctx.tr.Run()
|
|
|
|
defer ctx.Cleanup()
|
2016-10-18 18:22:16 +00:00
|
|
|
|
|
|
|
select {
|
2016-12-03 01:04:07 +00:00
|
|
|
case <-ctx.tr.WaitCh():
|
2016-10-18 18:22:16 +00:00
|
|
|
t.Fatalf("premature exit")
|
|
|
|
case <-time.After(1 * time.Second):
|
|
|
|
}
|
|
|
|
|
2017-02-23 02:26:34 +00:00
|
|
|
if len(ctx.upd.events) != 2 {
|
2017-03-03 03:35:31 +00:00
|
|
|
t.Fatalf("should have 2 ctx.updates: %#v", ctx.upd.events)
|
2016-10-18 18:22:16 +00:00
|
|
|
}
|
|
|
|
|
2016-12-03 01:04:07 +00:00
|
|
|
if ctx.upd.state != structs.TaskStatePending {
|
|
|
|
t.Fatalf("TaskState %v; want %v", ctx.upd.state, structs.TaskStatePending)
|
2016-10-18 18:22:16 +00:00
|
|
|
}
|
|
|
|
|
2016-12-03 01:04:07 +00:00
|
|
|
if ctx.upd.events[0].Type != structs.TaskReceived {
|
|
|
|
t.Fatalf("First Event was %v; want %v", ctx.upd.events[0].Type, structs.TaskReceived)
|
2016-10-18 18:22:16 +00:00
|
|
|
}
|
|
|
|
|
2017-02-23 02:26:34 +00:00
|
|
|
if ctx.upd.events[1].Type != structs.TaskSetup {
|
|
|
|
t.Fatalf("Second Event was %v; want %v", ctx.upd.events[1].Type, structs.TaskSetup)
|
|
|
|
}
|
|
|
|
|
2016-10-18 18:22:16 +00:00
|
|
|
// Unblock
|
2016-12-03 01:04:07 +00:00
|
|
|
ctx.tr.UnblockStart("test")
|
2016-10-18 18:22:16 +00:00
|
|
|
|
|
|
|
select {
|
2016-12-03 01:04:07 +00:00
|
|
|
case <-ctx.tr.WaitCh():
|
2016-10-18 18:22:16 +00:00
|
|
|
case <-time.After(time.Duration(testutil.TestMultiplier()*15) * time.Second):
|
|
|
|
t.Fatalf("timeout")
|
|
|
|
}
|
|
|
|
|
2017-02-23 02:26:34 +00:00
|
|
|
if len(ctx.upd.events) != 4 {
|
2017-03-03 03:35:31 +00:00
|
|
|
t.Fatalf("should have 4 ctx.updates: %#v", ctx.upd.events)
|
2016-10-18 18:22:16 +00:00
|
|
|
}
|
|
|
|
|
2016-12-03 01:04:07 +00:00
|
|
|
if ctx.upd.state != structs.TaskStateDead {
|
|
|
|
t.Fatalf("TaskState %v; want %v", ctx.upd.state, structs.TaskStateDead)
|
2016-10-18 18:22:16 +00:00
|
|
|
}
|
|
|
|
|
2016-12-03 01:04:07 +00:00
|
|
|
if ctx.upd.events[0].Type != structs.TaskReceived {
|
|
|
|
t.Fatalf("First Event was %v; want %v", ctx.upd.events[0].Type, structs.TaskReceived)
|
2016-10-18 18:22:16 +00:00
|
|
|
}
|
|
|
|
|
2017-02-23 02:26:34 +00:00
|
|
|
if ctx.upd.events[1].Type != structs.TaskSetup {
|
|
|
|
t.Fatalf("Second Event was %v; want %v", ctx.upd.events[1].Type, structs.TaskSetup)
|
|
|
|
}
|
|
|
|
|
|
|
|
if ctx.upd.events[2].Type != structs.TaskStarted {
|
|
|
|
t.Fatalf("Third Event was %v; want %v", ctx.upd.events[2].Type, structs.TaskStarted)
|
2016-10-18 18:22:16 +00:00
|
|
|
}
|
|
|
|
|
2017-02-23 02:26:34 +00:00
|
|
|
if ctx.upd.events[3].Type != structs.TaskTerminated {
|
|
|
|
t.Fatalf("Fourth Event was %v; want %v", ctx.upd.events[3].Type, structs.TaskTerminated)
|
2016-10-18 18:22:16 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-31 18:29:26 +00:00
|
|
|
func TestTaskRunner_Template_Artifact(t *testing.T) {
|
|
|
|
dir, err := os.Getwd()
|
|
|
|
if err != nil {
|
2017-02-28 00:00:19 +00:00
|
|
|
t.Fatalf("bad: %v", err)
|
2016-10-31 18:29:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ts := httptest.NewServer(http.FileServer(http.Dir(filepath.Join(dir, ".."))))
|
|
|
|
defer ts.Close()
|
|
|
|
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
task := alloc.Job.TaskGroups[0].Tasks[0]
|
|
|
|
task.Driver = "mock_driver"
|
|
|
|
task.Config = map[string]interface{}{
|
|
|
|
"exit_code": "0",
|
|
|
|
"run_for": "1s",
|
|
|
|
}
|
|
|
|
// Create an allocation that has a task that renders a template from an
|
|
|
|
// artifact
|
|
|
|
f1 := "CHANGELOG.md"
|
|
|
|
artifact := structs.TaskArtifact{
|
|
|
|
GetterSource: fmt.Sprintf("%s/%s", ts.URL, f1),
|
|
|
|
}
|
|
|
|
task.Artifacts = []*structs.TaskArtifact{&artifact}
|
|
|
|
task.Templates = []*structs.Template{
|
|
|
|
{
|
|
|
|
SourcePath: "CHANGELOG.md",
|
|
|
|
DestPath: "local/test",
|
|
|
|
ChangeMode: structs.TemplateChangeModeNoop,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2016-12-03 01:04:07 +00:00
|
|
|
ctx := testTaskRunnerFromAlloc(t, false, alloc)
|
|
|
|
ctx.tr.MarkReceived()
|
|
|
|
defer ctx.Cleanup()
|
|
|
|
go ctx.tr.Run()
|
2016-10-31 18:29:26 +00:00
|
|
|
|
|
|
|
select {
|
2016-12-03 01:04:07 +00:00
|
|
|
case <-ctx.tr.WaitCh():
|
2016-10-31 18:29:26 +00:00
|
|
|
case <-time.After(time.Duration(testutil.TestMultiplier()*15) * time.Second):
|
|
|
|
t.Fatalf("timeout")
|
|
|
|
}
|
|
|
|
|
2017-02-23 02:26:34 +00:00
|
|
|
if len(ctx.upd.events) != 5 {
|
2017-03-03 03:35:31 +00:00
|
|
|
t.Fatalf("should have 5 ctx.updates: %#v", ctx.upd.events)
|
2016-10-31 18:29:26 +00:00
|
|
|
}
|
|
|
|
|
2016-12-03 01:04:07 +00:00
|
|
|
if ctx.upd.state != structs.TaskStateDead {
|
|
|
|
t.Fatalf("TaskState %v; want %v", ctx.upd.state, structs.TaskStateDead)
|
2016-10-31 18:29:26 +00:00
|
|
|
}
|
|
|
|
|
2016-12-03 01:04:07 +00:00
|
|
|
if ctx.upd.events[0].Type != structs.TaskReceived {
|
|
|
|
t.Fatalf("First Event was %v; want %v", ctx.upd.events[0].Type, structs.TaskReceived)
|
2016-10-31 18:29:26 +00:00
|
|
|
}
|
|
|
|
|
2017-02-23 02:26:34 +00:00
|
|
|
if ctx.upd.events[1].Type != structs.TaskSetup {
|
|
|
|
t.Fatalf("Second Event was %v; want %v", ctx.upd.events[1].Type, structs.TaskSetup)
|
2016-10-31 18:29:26 +00:00
|
|
|
}
|
|
|
|
|
2017-02-23 02:26:34 +00:00
|
|
|
if ctx.upd.events[2].Type != structs.TaskDownloadingArtifacts {
|
|
|
|
t.Fatalf("Third Event was %v; want %v", ctx.upd.events[2].Type, structs.TaskDownloadingArtifacts)
|
2016-10-31 18:29:26 +00:00
|
|
|
}
|
|
|
|
|
2017-02-23 02:26:34 +00:00
|
|
|
if ctx.upd.events[3].Type != structs.TaskStarted {
|
|
|
|
t.Fatalf("Fourth Event was %v; want %v", ctx.upd.events[3].Type, structs.TaskStarted)
|
|
|
|
}
|
|
|
|
|
|
|
|
if ctx.upd.events[4].Type != structs.TaskTerminated {
|
|
|
|
t.Fatalf("Fifth Event was %v; want %v", ctx.upd.events[4].Type, structs.TaskTerminated)
|
2016-10-31 18:29:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check that both files exist.
|
2016-12-03 01:04:07 +00:00
|
|
|
if _, err := os.Stat(filepath.Join(ctx.tr.taskDir.Dir, f1)); err != nil {
|
2016-10-31 18:29:26 +00:00
|
|
|
t.Fatalf("%v not downloaded", f1)
|
|
|
|
}
|
2016-12-03 01:04:07 +00:00
|
|
|
if _, err := os.Stat(filepath.Join(ctx.tr.taskDir.LocalDir, "test")); err != nil {
|
2016-10-31 18:29:26 +00:00
|
|
|
t.Fatalf("template not rendered")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-18 18:22:16 +00:00
|
|
|
func TestTaskRunner_Template_NewVaultToken(t *testing.T) {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
task := alloc.Job.TaskGroups[0].Tasks[0]
|
|
|
|
task.Driver = "mock_driver"
|
|
|
|
task.Config = map[string]interface{}{
|
|
|
|
"exit_code": "0",
|
|
|
|
"run_for": "1s",
|
|
|
|
}
|
|
|
|
task.Templates = []*structs.Template{
|
|
|
|
{
|
|
|
|
EmbeddedTmpl: "{{key \"foo\"}}",
|
|
|
|
DestPath: "local/test",
|
|
|
|
ChangeMode: structs.TemplateChangeModeNoop,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
task.Vault = &structs.Vault{Policies: []string{"default"}}
|
|
|
|
|
2016-12-03 01:04:07 +00:00
|
|
|
ctx := testTaskRunnerFromAlloc(t, false, alloc)
|
|
|
|
ctx.tr.MarkReceived()
|
|
|
|
defer ctx.Cleanup()
|
|
|
|
go ctx.tr.Run()
|
2016-10-18 18:22:16 +00:00
|
|
|
|
|
|
|
// Wait for a Vault token
|
|
|
|
var token string
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
2016-12-03 01:04:07 +00:00
|
|
|
if token = ctx.tr.vaultFuture.Get(); token == "" {
|
2016-10-18 18:22:16 +00:00
|
|
|
return false, fmt.Errorf("No Vault token")
|
|
|
|
}
|
|
|
|
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
})
|
|
|
|
|
|
|
|
// Error the token renewal
|
2016-12-03 01:04:07 +00:00
|
|
|
vc := ctx.tr.vaultClient.(*vaultclient.MockVaultClient)
|
2016-10-18 18:22:16 +00:00
|
|
|
renewalCh, ok := vc.RenewTokens[token]
|
|
|
|
if !ok {
|
|
|
|
t.Fatalf("no renewal channel")
|
|
|
|
}
|
|
|
|
|
2016-12-03 01:04:07 +00:00
|
|
|
originalManager := ctx.tr.templateManager
|
2016-10-18 18:22:16 +00:00
|
|
|
|
|
|
|
renewalCh <- fmt.Errorf("Test killing")
|
|
|
|
close(renewalCh)
|
|
|
|
|
|
|
|
// Wait for a new Vault token
|
|
|
|
var token2 string
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
2016-12-03 01:04:07 +00:00
|
|
|
if token2 = ctx.tr.vaultFuture.Get(); token2 == "" || token2 == token {
|
2016-10-18 18:22:16 +00:00
|
|
|
return false, fmt.Errorf("No new Vault token")
|
|
|
|
}
|
|
|
|
|
2016-12-03 01:04:07 +00:00
|
|
|
if originalManager == ctx.tr.templateManager {
|
2017-03-24 19:04:22 +00:00
|
|
|
return false, fmt.Errorf("Template manager not ctx.updated")
|
2016-10-18 18:22:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
})
|
2017-03-29 20:17:50 +00:00
|
|
|
|
|
|
|
// Check the token was revoked
|
|
|
|
m := ctx.tr.vaultClient.(*vaultclient.MockVaultClient)
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
if len(m.StoppedTokens) != 1 {
|
|
|
|
return false, fmt.Errorf("Expected a stopped token: %v", m.StoppedTokens)
|
|
|
|
}
|
|
|
|
|
|
|
|
if a := m.StoppedTokens[0]; a != token {
|
|
|
|
return false, fmt.Errorf("got stopped token %q; want %q", a, token)
|
|
|
|
}
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
})
|
2016-10-18 18:22:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestTaskRunner_VaultManager_Restart(t *testing.T) {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
task := alloc.Job.TaskGroups[0].Tasks[0]
|
|
|
|
task.Driver = "mock_driver"
|
|
|
|
task.Config = map[string]interface{}{
|
|
|
|
"exit_code": "0",
|
|
|
|
"run_for": "10s",
|
|
|
|
}
|
|
|
|
task.Vault = &structs.Vault{
|
|
|
|
Policies: []string{"default"},
|
|
|
|
ChangeMode: structs.VaultChangeModeRestart,
|
|
|
|
}
|
|
|
|
|
2016-12-03 01:04:07 +00:00
|
|
|
ctx := testTaskRunnerFromAlloc(t, false, alloc)
|
|
|
|
ctx.tr.MarkReceived()
|
|
|
|
defer ctx.Cleanup()
|
|
|
|
go ctx.tr.Run()
|
2016-10-18 18:22:16 +00:00
|
|
|
|
|
|
|
// Wait for the task to start
|
2017-01-25 19:57:38 +00:00
|
|
|
testWaitForTaskToStart(t, ctx)
|
2016-10-18 18:22:16 +00:00
|
|
|
|
|
|
|
// Error the token renewal
|
2016-12-03 01:04:07 +00:00
|
|
|
vc := ctx.tr.vaultClient.(*vaultclient.MockVaultClient)
|
|
|
|
renewalCh, ok := vc.RenewTokens[ctx.tr.vaultFuture.Get()]
|
2016-10-18 18:22:16 +00:00
|
|
|
if !ok {
|
|
|
|
t.Fatalf("no renewal channel")
|
|
|
|
}
|
|
|
|
|
|
|
|
renewalCh <- fmt.Errorf("Test killing")
|
|
|
|
close(renewalCh)
|
|
|
|
|
|
|
|
// Ensure a restart
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
2017-02-23 02:26:34 +00:00
|
|
|
if l := len(ctx.upd.events); l != 8 {
|
|
|
|
return false, fmt.Errorf("Expect eight events; got %#v", ctx.upd.events)
|
2016-10-18 18:22:16 +00:00
|
|
|
}
|
|
|
|
|
2016-12-03 01:04:07 +00:00
|
|
|
if ctx.upd.events[0].Type != structs.TaskReceived {
|
|
|
|
return false, fmt.Errorf("First Event was %v; want %v", ctx.upd.events[0].Type, structs.TaskReceived)
|
2016-10-18 18:22:16 +00:00
|
|
|
}
|
|
|
|
|
2017-02-23 02:26:34 +00:00
|
|
|
if ctx.upd.events[1].Type != structs.TaskSetup {
|
2016-12-03 01:04:07 +00:00
|
|
|
return false, fmt.Errorf("Second Event was %v; want %v", ctx.upd.events[1].Type, structs.TaskStarted)
|
2016-10-18 18:22:16 +00:00
|
|
|
}
|
|
|
|
|
2017-02-23 02:26:34 +00:00
|
|
|
if ctx.upd.events[2].Type != structs.TaskStarted {
|
|
|
|
return false, fmt.Errorf("Third Event was %v; want %v", ctx.upd.events[2].Type, structs.TaskStarted)
|
|
|
|
}
|
|
|
|
|
|
|
|
if ctx.upd.events[3].Type != structs.TaskRestartSignal {
|
|
|
|
return false, fmt.Errorf("Fourth Event was %v; want %v", ctx.upd.events[3].Type, structs.TaskRestartSignal)
|
2016-10-18 18:22:16 +00:00
|
|
|
}
|
|
|
|
|
2017-02-23 02:26:34 +00:00
|
|
|
if ctx.upd.events[4].Type != structs.TaskKilling {
|
|
|
|
return false, fmt.Errorf("Fifth Event was %v; want %v", ctx.upd.events[4].Type, structs.TaskKilling)
|
2016-10-18 18:22:16 +00:00
|
|
|
}
|
|
|
|
|
2017-02-23 02:26:34 +00:00
|
|
|
if ctx.upd.events[5].Type != structs.TaskKilled {
|
|
|
|
return false, fmt.Errorf("Sixth Event was %v; want %v", ctx.upd.events[5].Type, structs.TaskKilled)
|
2016-10-18 18:22:16 +00:00
|
|
|
}
|
|
|
|
|
2017-02-23 02:26:34 +00:00
|
|
|
if ctx.upd.events[6].Type != structs.TaskRestarting {
|
|
|
|
return false, fmt.Errorf("Seventh Event was %v; want %v", ctx.upd.events[6].Type, structs.TaskRestarting)
|
2016-10-18 18:22:16 +00:00
|
|
|
}
|
|
|
|
|
2017-02-23 02:26:34 +00:00
|
|
|
if ctx.upd.events[7].Type != structs.TaskStarted {
|
|
|
|
return false, fmt.Errorf("Eight Event was %v; want %v", ctx.upd.events[7].Type, structs.TaskStarted)
|
2016-10-18 18:22:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestTaskRunner_VaultManager_Signal(t *testing.T) {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
task := alloc.Job.TaskGroups[0].Tasks[0]
|
|
|
|
task.Driver = "mock_driver"
|
|
|
|
task.Config = map[string]interface{}{
|
|
|
|
"exit_code": "0",
|
|
|
|
"run_for": "10s",
|
|
|
|
}
|
|
|
|
task.Vault = &structs.Vault{
|
|
|
|
Policies: []string{"default"},
|
|
|
|
ChangeMode: structs.VaultChangeModeSignal,
|
|
|
|
ChangeSignal: "SIGUSR1",
|
|
|
|
}
|
|
|
|
|
2016-12-03 01:04:07 +00:00
|
|
|
ctx := testTaskRunnerFromAlloc(t, false, alloc)
|
|
|
|
ctx.tr.MarkReceived()
|
|
|
|
go ctx.tr.Run()
|
|
|
|
defer ctx.Cleanup()
|
2016-10-18 18:22:16 +00:00
|
|
|
|
|
|
|
// Wait for the task to start
|
2017-01-25 19:57:38 +00:00
|
|
|
testWaitForTaskToStart(t, ctx)
|
2016-10-18 18:22:16 +00:00
|
|
|
|
|
|
|
// Error the token renewal
|
2016-12-03 01:04:07 +00:00
|
|
|
vc := ctx.tr.vaultClient.(*vaultclient.MockVaultClient)
|
|
|
|
renewalCh, ok := vc.RenewTokens[ctx.tr.vaultFuture.Get()]
|
2016-10-18 18:22:16 +00:00
|
|
|
if !ok {
|
|
|
|
t.Fatalf("no renewal channel")
|
|
|
|
}
|
|
|
|
|
|
|
|
renewalCh <- fmt.Errorf("Test killing")
|
|
|
|
close(renewalCh)
|
|
|
|
|
|
|
|
// Ensure a restart
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
2017-02-23 02:26:34 +00:00
|
|
|
if l := len(ctx.upd.events); l != 4 {
|
|
|
|
return false, fmt.Errorf("Expect four events; got %#v", ctx.upd.events)
|
2016-10-18 18:22:16 +00:00
|
|
|
}
|
|
|
|
|
2016-12-03 01:04:07 +00:00
|
|
|
if ctx.upd.events[0].Type != structs.TaskReceived {
|
|
|
|
return false, fmt.Errorf("First Event was %v; want %v", ctx.upd.events[0].Type, structs.TaskReceived)
|
2016-10-18 18:22:16 +00:00
|
|
|
}
|
|
|
|
|
2017-02-23 02:26:34 +00:00
|
|
|
if ctx.upd.events[1].Type != structs.TaskSetup {
|
|
|
|
return false, fmt.Errorf("Second Event was %v; want %v", ctx.upd.events[1].Type, structs.TaskSetup)
|
2016-10-18 18:22:16 +00:00
|
|
|
}
|
|
|
|
|
2017-02-23 02:26:34 +00:00
|
|
|
if ctx.upd.events[2].Type != structs.TaskStarted {
|
|
|
|
return false, fmt.Errorf("Third Event was %v; want %v", ctx.upd.events[2].Type, structs.TaskStarted)
|
|
|
|
}
|
|
|
|
|
|
|
|
if ctx.upd.events[3].Type != structs.TaskSignaling {
|
|
|
|
return false, fmt.Errorf("Fourth Event was %v; want %v", ctx.upd.events[3].Type, structs.TaskSignaling)
|
2016-10-18 18:22:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
})
|
|
|
|
}
|
2016-12-16 23:11:56 +00:00
|
|
|
|
|
|
|
// Test that the payload is written to disk
|
|
|
|
func TestTaskRunner_SimpleRun_Dispatch(t *testing.T) {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
task := alloc.Job.TaskGroups[0].Tasks[0]
|
|
|
|
task.Driver = "mock_driver"
|
|
|
|
task.Config = map[string]interface{}{
|
|
|
|
"exit_code": "0",
|
|
|
|
"run_for": "1s",
|
|
|
|
}
|
|
|
|
fileName := "test"
|
2017-01-26 05:06:16 +00:00
|
|
|
task.DispatchPayload = &structs.DispatchPayloadConfig{
|
2016-12-16 23:11:56 +00:00
|
|
|
File: fileName,
|
|
|
|
}
|
2017-01-20 18:33:52 +00:00
|
|
|
alloc.Job.ParameterizedJob = &structs.ParameterizedJobConfig{}
|
2016-12-16 23:11:56 +00:00
|
|
|
|
|
|
|
// Add an encrypted payload
|
|
|
|
expected := []byte("hello world")
|
|
|
|
compressed := snappy.Encode(nil, expected)
|
|
|
|
alloc.Job.Payload = compressed
|
|
|
|
|
2017-01-06 00:35:08 +00:00
|
|
|
ctx := testTaskRunnerFromAlloc(t, false, alloc)
|
|
|
|
ctx.tr.MarkReceived()
|
|
|
|
defer ctx.tr.Destroy(structs.NewTaskEvent(structs.TaskKilled))
|
|
|
|
defer ctx.allocDir.Destroy()
|
|
|
|
go ctx.tr.Run()
|
2016-12-16 23:11:56 +00:00
|
|
|
|
|
|
|
select {
|
2017-01-06 00:35:08 +00:00
|
|
|
case <-ctx.tr.WaitCh():
|
2016-12-16 23:11:56 +00:00
|
|
|
case <-time.After(time.Duration(testutil.TestMultiplier()*15) * time.Second):
|
|
|
|
t.Fatalf("timeout")
|
|
|
|
}
|
|
|
|
|
2017-02-23 02:26:34 +00:00
|
|
|
if len(ctx.upd.events) != 4 {
|
|
|
|
t.Fatalf("should have 4 updates: %#v", ctx.upd.events)
|
2016-12-16 23:11:56 +00:00
|
|
|
}
|
|
|
|
|
2017-01-06 00:35:08 +00:00
|
|
|
if ctx.upd.state != structs.TaskStateDead {
|
|
|
|
t.Fatalf("TaskState %v; want %v", ctx.upd.state, structs.TaskStateDead)
|
2016-12-16 23:11:56 +00:00
|
|
|
}
|
|
|
|
|
2017-01-06 00:35:08 +00:00
|
|
|
if ctx.upd.events[0].Type != structs.TaskReceived {
|
|
|
|
t.Fatalf("First Event was %v; want %v", ctx.upd.events[0].Type, structs.TaskReceived)
|
2016-12-16 23:11:56 +00:00
|
|
|
}
|
|
|
|
|
2017-02-23 02:26:34 +00:00
|
|
|
if ctx.upd.events[1].Type != structs.TaskSetup {
|
|
|
|
t.Fatalf("Second Event was %v; want %v", ctx.upd.events[1].Type, structs.TaskSetup)
|
2016-12-16 23:11:56 +00:00
|
|
|
}
|
|
|
|
|
2017-02-23 02:26:34 +00:00
|
|
|
if ctx.upd.events[2].Type != structs.TaskStarted {
|
|
|
|
t.Fatalf("Third Event was %v; want %v", ctx.upd.events[2].Type, structs.TaskStarted)
|
|
|
|
}
|
|
|
|
|
|
|
|
if ctx.upd.events[3].Type != structs.TaskTerminated {
|
|
|
|
t.Fatalf("Fourth Event was %v; want %v", ctx.upd.events[3].Type, structs.TaskTerminated)
|
2016-12-16 23:11:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check that the file was written to disk properly
|
2017-01-06 00:35:08 +00:00
|
|
|
payloadPath := filepath.Join(ctx.tr.taskDir.LocalDir, fileName)
|
2016-12-16 23:11:56 +00:00
|
|
|
data, err := ioutil.ReadFile(payloadPath)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Failed to read file: %v", err)
|
|
|
|
}
|
|
|
|
if !reflect.DeepEqual(data, expected) {
|
|
|
|
t.Fatalf("Bad; got %v; want %v", string(data), string(expected))
|
|
|
|
}
|
|
|
|
}
|
2017-01-13 01:21:54 +00:00
|
|
|
|
2017-02-10 18:11:15 +00:00
|
|
|
// TestTaskRunner_CleanupEmpty ensures TaskRunner works when createdResources
|
|
|
|
// is empty.
|
|
|
|
func TestTaskRunner_CleanupEmpty(t *testing.T) {
|
2017-01-31 18:51:32 +00:00
|
|
|
alloc := mock.Alloc()
|
|
|
|
task := alloc.Job.TaskGroups[0].Tasks[0]
|
|
|
|
task.Driver = "mock_driver"
|
|
|
|
|
|
|
|
ctx := testTaskRunnerFromAlloc(t, false, alloc)
|
|
|
|
ctx.tr.MarkReceived()
|
|
|
|
|
|
|
|
defer ctx.Cleanup()
|
|
|
|
ctx.tr.Run()
|
|
|
|
|
|
|
|
// Since we only failed once, createdResources should be empty
|
2017-02-10 18:11:15 +00:00
|
|
|
if len(ctx.tr.createdResources.Resources) != 0 {
|
|
|
|
t.Fatalf("createdResources should still be empty: %v", ctx.tr.createdResources)
|
2017-01-31 18:51:32 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-13 01:21:54 +00:00
|
|
|
func TestTaskRunner_CleanupOK(t *testing.T) {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
task := alloc.Job.TaskGroups[0].Tasks[0]
|
|
|
|
task.Driver = "mock_driver"
|
|
|
|
key := "ERR"
|
|
|
|
|
|
|
|
ctx := testTaskRunnerFromAlloc(t, false, alloc)
|
|
|
|
ctx.tr.config.Options = map[string]string{
|
|
|
|
"cleanup_fail_on": key,
|
|
|
|
"cleanup_fail_num": "1",
|
|
|
|
}
|
|
|
|
ctx.tr.MarkReceived()
|
|
|
|
|
|
|
|
ctx.tr.createdResources.Resources[key] = []string{"x", "y"}
|
|
|
|
ctx.tr.createdResources.Resources["foo"] = []string{"z"}
|
|
|
|
|
|
|
|
defer ctx.Cleanup()
|
|
|
|
ctx.tr.Run()
|
|
|
|
|
|
|
|
// Since we only failed once, createdResources should be empty
|
|
|
|
if len(ctx.tr.createdResources.Resources) > 0 {
|
|
|
|
t.Fatalf("expected all created resources to be removed: %#v", ctx.tr.createdResources.Resources)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestTaskRunner_CleanupFail(t *testing.T) {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
task := alloc.Job.TaskGroups[0].Tasks[0]
|
|
|
|
task.Driver = "mock_driver"
|
|
|
|
key := "ERR"
|
|
|
|
ctx := testTaskRunnerFromAlloc(t, false, alloc)
|
|
|
|
ctx.tr.config.Options = map[string]string{
|
|
|
|
"cleanup_fail_on": key,
|
|
|
|
"cleanup_fail_num": "5",
|
|
|
|
}
|
|
|
|
ctx.tr.MarkReceived()
|
|
|
|
|
|
|
|
ctx.tr.createdResources.Resources[key] = []string{"x"}
|
|
|
|
ctx.tr.createdResources.Resources["foo"] = []string{"y", "z"}
|
|
|
|
|
|
|
|
defer ctx.Cleanup()
|
|
|
|
ctx.tr.Run()
|
|
|
|
|
|
|
|
// Since we failed > 3 times, the failed key should remain
|
|
|
|
expected := map[string][]string{key: {"x"}}
|
|
|
|
if !reflect.DeepEqual(expected, ctx.tr.createdResources.Resources) {
|
|
|
|
t.Fatalf("expected %#v but found: %#v", expected, ctx.tr.createdResources.Resources)
|
|
|
|
}
|
|
|
|
}
|
2017-05-04 23:49:00 +00:00
|
|
|
|
|
|
|
func TestTaskRunner_Pre06ScriptCheck(t *testing.T) {
|
|
|
|
run := func(ver, driver, checkType string, exp bool) (string, func(t *testing.T)) {
|
|
|
|
name := fmt.Sprintf("%s %s %s returns %t", ver, driver, checkType, exp)
|
|
|
|
return name, func(t *testing.T) {
|
|
|
|
services := []*structs.Service{
|
|
|
|
{
|
|
|
|
Checks: []*structs.ServiceCheck{
|
|
|
|
{
|
|
|
|
Type: checkType,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
if act := pre06ScriptCheck(ver, driver, services); act != exp {
|
|
|
|
t.Errorf("expected %t received %t", exp, act)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
t.Run(run("0.5.6", "exec", "script", true))
|
|
|
|
t.Run(run("0.5.6", "java", "script", true))
|
|
|
|
t.Run(run("0.5.6", "mock_driver", "script", true))
|
|
|
|
t.Run(run("0.5.9", "exec", "script", true))
|
|
|
|
t.Run(run("0.5.9", "java", "script", true))
|
|
|
|
t.Run(run("0.5.9", "mock_driver", "script", true))
|
|
|
|
|
|
|
|
t.Run(run("0.6.0dev", "exec", "script", false))
|
|
|
|
t.Run(run("0.6.0dev", "java", "script", false))
|
|
|
|
t.Run(run("0.6.0dev", "mock_driver", "script", false))
|
|
|
|
t.Run(run("0.6.0", "exec", "script", false))
|
|
|
|
t.Run(run("0.6.0", "java", "script", false))
|
|
|
|
t.Run(run("0.6.0", "mock_driver", "script", false))
|
|
|
|
t.Run(run("1.0.0", "exec", "script", false))
|
|
|
|
t.Run(run("1.0.0", "java", "script", false))
|
|
|
|
t.Run(run("1.0.0", "mock_driver", "script", false))
|
|
|
|
|
|
|
|
t.Run(run("0.5.6", "rkt", "script", false))
|
|
|
|
t.Run(run("0.5.6", "docker", "script", false))
|
|
|
|
t.Run(run("0.5.6", "qemu", "script", false))
|
|
|
|
t.Run(run("0.5.6", "raw_exec", "script", false))
|
|
|
|
t.Run(run("0.5.6", "invalid", "script", false))
|
|
|
|
|
|
|
|
t.Run(run("0.5.6", "exec", "tcp", false))
|
|
|
|
t.Run(run("0.5.6", "java", "tcp", false))
|
|
|
|
t.Run(run("0.5.6", "mock_driver", "tcp", false))
|
|
|
|
}
|