2015-08-30 23:35:04 +00:00
|
|
|
package client
|
|
|
|
|
|
|
|
import (
|
2016-08-11 07:20:53 +00:00
|
|
|
"bufio"
|
2016-02-04 03:58:39 +00:00
|
|
|
"fmt"
|
2015-09-24 21:29:53 +00:00
|
|
|
"os"
|
2015-08-30 23:35:04 +00:00
|
|
|
"testing"
|
2015-08-31 00:10:17 +00:00
|
|
|
"time"
|
2015-08-30 23:35:04 +00:00
|
|
|
|
|
|
|
"github.com/hashicorp/nomad/nomad/mock"
|
|
|
|
"github.com/hashicorp/nomad/nomad/structs"
|
2015-08-31 00:10:17 +00:00
|
|
|
"github.com/hashicorp/nomad/testutil"
|
2015-09-23 00:10:03 +00:00
|
|
|
|
2016-06-01 08:22:39 +00:00
|
|
|
"github.com/hashicorp/nomad/client/config"
|
2015-09-23 01:48:42 +00:00
|
|
|
ctestutil "github.com/hashicorp/nomad/client/testutil"
|
2015-08-30 23:35:04 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
type MockAllocStateUpdater struct {
|
|
|
|
Count int
|
|
|
|
Allocs []*structs.Allocation
|
|
|
|
}
|
|
|
|
|
2016-02-22 03:20:50 +00:00
|
|
|
func (m *MockAllocStateUpdater) Update(alloc *structs.Allocation) {
|
2015-08-30 23:35:04 +00:00
|
|
|
m.Count += 1
|
|
|
|
m.Allocs = append(m.Allocs, alloc)
|
|
|
|
}
|
|
|
|
|
2015-11-14 06:07:13 +00:00
|
|
|
func testAllocRunner(restarts bool) (*MockAllocStateUpdater, *AllocRunner) {
|
2015-08-30 23:35:04 +00:00
|
|
|
logger := testLogger()
|
2016-06-01 08:22:39 +00:00
|
|
|
conf := config.DefaultConfig()
|
2015-09-24 21:29:53 +00:00
|
|
|
conf.StateDir = os.TempDir()
|
|
|
|
conf.AllocDir = os.TempDir()
|
2015-08-30 23:35:04 +00:00
|
|
|
upd := &MockAllocStateUpdater{}
|
|
|
|
alloc := mock.Alloc()
|
2015-11-14 06:07:13 +00:00
|
|
|
if !restarts {
|
2016-02-02 23:08:07 +00:00
|
|
|
*alloc.Job.LookupTaskGroup(alloc.TaskGroup).RestartPolicy = structs.RestartPolicy{Attempts: 0}
|
2016-02-02 23:35:25 +00:00
|
|
|
alloc.Job.Type = structs.JobTypeBatch
|
2015-11-14 06:07:13 +00:00
|
|
|
}
|
|
|
|
|
2016-03-23 19:19:19 +00:00
|
|
|
ar := NewAllocRunner(logger, conf, upd.Update, alloc)
|
2015-08-30 23:35:04 +00:00
|
|
|
return upd, ar
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAllocRunner_SimpleRun(t *testing.T) {
|
2015-09-23 01:48:42 +00:00
|
|
|
ctestutil.ExecCompatible(t)
|
2015-11-14 06:07:13 +00:00
|
|
|
upd, ar := testAllocRunner(false)
|
2015-08-31 00:10:17 +00:00
|
|
|
go ar.Run()
|
|
|
|
defer ar.Destroy()
|
2015-08-30 23:35:04 +00:00
|
|
|
|
2015-08-31 00:10:17 +00:00
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
if upd.Count == 0 {
|
2016-02-04 03:58:39 +00:00
|
|
|
return false, fmt.Errorf("No updates")
|
2015-08-31 00:10:17 +00:00
|
|
|
}
|
|
|
|
last := upd.Allocs[upd.Count-1]
|
2016-03-24 01:08:19 +00:00
|
|
|
if last.ClientStatus != structs.AllocClientStatusComplete {
|
|
|
|
return false, fmt.Errorf("got status %v; want %v", last.ClientStatus, structs.AllocClientStatusComplete)
|
2016-02-04 03:58:39 +00:00
|
|
|
}
|
|
|
|
return true, nil
|
2015-08-31 00:10:17 +00:00
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
})
|
2015-08-30 23:35:04 +00:00
|
|
|
}
|
|
|
|
|
2016-02-04 21:09:53 +00:00
|
|
|
func TestAllocRunner_TerminalUpdate_Destroy(t *testing.T) {
|
|
|
|
ctestutil.ExecCompatible(t)
|
|
|
|
upd, ar := testAllocRunner(false)
|
|
|
|
|
|
|
|
// Ensure task takes some time
|
|
|
|
task := ar.alloc.Job.TaskGroups[0].Tasks[0]
|
|
|
|
task.Config["command"] = "/bin/sleep"
|
|
|
|
task.Config["args"] = []string{"10"}
|
|
|
|
go ar.Run()
|
|
|
|
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
if upd.Count == 0 {
|
|
|
|
return false, fmt.Errorf("No updates")
|
|
|
|
}
|
|
|
|
last := upd.Allocs[upd.Count-1]
|
2016-02-09 02:51:11 +00:00
|
|
|
if last.ClientStatus != structs.AllocClientStatusRunning {
|
2016-02-04 21:09:53 +00:00
|
|
|
return false, fmt.Errorf("got status %v; want %v", last.ClientStatus, structs.AllocClientStatusRunning)
|
|
|
|
}
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
})
|
|
|
|
|
|
|
|
// Update the alloc to be terminal which should cause the alloc runner to
|
|
|
|
// stop the tasks and wait for a destroy.
|
|
|
|
update := ar.alloc.Copy()
|
|
|
|
update.DesiredStatus = structs.AllocDesiredStatusStop
|
|
|
|
ar.Update(update)
|
|
|
|
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
if upd.Count == 0 {
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check the status has changed.
|
|
|
|
last := upd.Allocs[upd.Count-1]
|
2016-03-24 01:08:19 +00:00
|
|
|
if last.ClientStatus != structs.AllocClientStatusComplete {
|
|
|
|
return false, fmt.Errorf("got client status %v; want %v", last.ClientStatus, structs.AllocClientStatusComplete)
|
2016-02-04 21:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check the state still exists
|
|
|
|
if _, err := os.Stat(ar.stateFilePath()); err != nil {
|
|
|
|
return false, fmt.Errorf("state file destroyed: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check the alloc directory still exists
|
|
|
|
if _, err := os.Stat(ar.ctx.AllocDir.AllocDir); err != nil {
|
|
|
|
return false, fmt.Errorf("alloc dir destroyed: %v", ar.ctx.AllocDir.AllocDir)
|
|
|
|
}
|
|
|
|
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
2016-02-04 22:19:27 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
2016-02-04 21:09:53 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
// Send the destroy signal and ensure the AllocRunner cleans up.
|
|
|
|
ar.Destroy()
|
|
|
|
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
if upd.Count == 0 {
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check the status has changed.
|
|
|
|
last := upd.Allocs[upd.Count-1]
|
2016-03-24 01:08:19 +00:00
|
|
|
if last.ClientStatus != structs.AllocClientStatusComplete {
|
|
|
|
return false, fmt.Errorf("got client status %v; want %v", last.ClientStatus, structs.AllocClientStatusComplete)
|
2016-02-04 21:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check the state was cleaned
|
|
|
|
if _, err := os.Stat(ar.stateFilePath()); err == nil {
|
|
|
|
return false, fmt.Errorf("state file still exists: %v", ar.stateFilePath())
|
|
|
|
} else if !os.IsNotExist(err) {
|
|
|
|
return false, fmt.Errorf("stat err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check the alloc directory was cleaned
|
|
|
|
if _, err := os.Stat(ar.ctx.AllocDir.AllocDir); err == nil {
|
|
|
|
return false, fmt.Errorf("alloc dir still exists: %v", ar.ctx.AllocDir.AllocDir)
|
|
|
|
} else if !os.IsNotExist(err) {
|
|
|
|
return false, fmt.Errorf("stat err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
2016-02-04 22:19:27 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
2016-02-04 21:09:53 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2016-08-11 07:20:53 +00:00
|
|
|
func TestAllocRunner_DiskExceeded_Destroy(t *testing.T) {
|
|
|
|
ctestutil.ExecCompatible(t)
|
|
|
|
upd, ar := testAllocRunner(false)
|
|
|
|
|
|
|
|
// Ensure task takes some time
|
|
|
|
task := ar.alloc.Job.TaskGroups[0].Tasks[0]
|
|
|
|
task.Config["command"] = "/bin/sleep"
|
|
|
|
task.Config["args"] = []string{"60"}
|
|
|
|
go ar.Run()
|
|
|
|
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
if upd.Count == 0 {
|
|
|
|
return false, fmt.Errorf("No updates")
|
|
|
|
}
|
|
|
|
last := upd.Allocs[upd.Count-1]
|
|
|
|
if last.ClientStatus != structs.AllocClientStatusRunning {
|
|
|
|
return false, fmt.Errorf("got status %v; want %v", last.ClientStatus, structs.AllocClientStatusRunning)
|
|
|
|
}
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
})
|
|
|
|
|
|
|
|
// Create a 20mb file in the shared alloc directory, which should cause the
|
|
|
|
// allocation to terminate in a failed state.
|
|
|
|
name := ar.ctx.AllocDir.SharedDir + "/20mb.bin"
|
|
|
|
f, err := os.Create(name)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal("unable to create file: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
if err := f.Close(); err != nil {
|
|
|
|
t.Fatal("unable to close file: %v", err)
|
|
|
|
}
|
|
|
|
os.Remove(name)
|
|
|
|
}()
|
|
|
|
|
|
|
|
// write 20 megabytes (1280 * 16384 bytes) of zeros to the file
|
|
|
|
w := bufio.NewWriter(f)
|
|
|
|
buf := make([]byte, 16384)
|
|
|
|
for i := 0; i < 1280; i++ {
|
|
|
|
if _, err := w.Write(buf); err != nil {
|
|
|
|
t.Fatal("unable to write to file: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
if upd.Count == 0 {
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check the status has changed.
|
|
|
|
last := upd.Allocs[upd.Count-1]
|
|
|
|
if last.ClientStatus != structs.AllocClientStatusFailed {
|
|
|
|
return false, fmt.Errorf("got client status %v; want %v", last.ClientStatus, structs.AllocClientStatusFailed)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check the state still exists
|
|
|
|
if _, err := os.Stat(ar.stateFilePath()); err != nil {
|
|
|
|
return false, fmt.Errorf("state file destroyed: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check the alloc directory still exists
|
|
|
|
if _, err := os.Stat(ar.ctx.AllocDir.AllocDir); err != nil {
|
|
|
|
return false, fmt.Errorf("alloc dir destroyed: %v", ar.ctx.AllocDir.AllocDir)
|
|
|
|
}
|
|
|
|
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
})
|
|
|
|
|
|
|
|
// Send the destroy signal and ensure the AllocRunner cleans up.
|
|
|
|
ar.Destroy()
|
|
|
|
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
if upd.Count == 0 {
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check the status has changed.
|
|
|
|
last := upd.Allocs[upd.Count-1]
|
|
|
|
if last.ClientStatus != structs.AllocClientStatusFailed {
|
|
|
|
return false, fmt.Errorf("got client status %v; want %v", last.ClientStatus, structs.AllocClientStatusFailed)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check the state was cleaned
|
|
|
|
if _, err := os.Stat(ar.stateFilePath()); err == nil {
|
|
|
|
return false, fmt.Errorf("state file still exists: %v", ar.stateFilePath())
|
|
|
|
} else if !os.IsNotExist(err) {
|
|
|
|
return false, fmt.Errorf("stat err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check the alloc directory was cleaned
|
|
|
|
if _, err := os.Stat(ar.ctx.AllocDir.AllocDir); err == nil {
|
|
|
|
return false, fmt.Errorf("alloc dir still exists: %v", ar.ctx.AllocDir.AllocDir)
|
|
|
|
} else if !os.IsNotExist(err) {
|
|
|
|
return false, fmt.Errorf("stat err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
})
|
|
|
|
}
|
2015-08-30 23:35:04 +00:00
|
|
|
func TestAllocRunner_Destroy(t *testing.T) {
|
2015-09-23 01:48:42 +00:00
|
|
|
ctestutil.ExecCompatible(t)
|
2015-11-14 06:07:13 +00:00
|
|
|
upd, ar := testAllocRunner(false)
|
2015-08-31 00:10:17 +00:00
|
|
|
|
|
|
|
// Ensure task takes some time
|
|
|
|
task := ar.alloc.Job.TaskGroups[0].Tasks[0]
|
|
|
|
task.Config["command"] = "/bin/sleep"
|
2015-11-18 23:16:42 +00:00
|
|
|
task.Config["args"] = []string{"10"}
|
2015-08-31 00:10:17 +00:00
|
|
|
go ar.Run()
|
|
|
|
start := time.Now()
|
|
|
|
|
|
|
|
// Begin the tear down
|
|
|
|
go func() {
|
|
|
|
time.Sleep(100 * time.Millisecond)
|
|
|
|
ar.Destroy()
|
|
|
|
}()
|
|
|
|
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
if upd.Count == 0 {
|
|
|
|
return false, nil
|
|
|
|
}
|
2016-02-04 21:09:53 +00:00
|
|
|
|
|
|
|
// Check the status has changed.
|
2015-08-31 00:10:17 +00:00
|
|
|
last := upd.Allocs[upd.Count-1]
|
2016-03-24 01:08:19 +00:00
|
|
|
if last.ClientStatus != structs.AllocClientStatusComplete {
|
|
|
|
return false, fmt.Errorf("got client status %v; want %v", last.ClientStatus, structs.AllocClientStatusComplete)
|
2016-02-04 21:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check the state was cleaned
|
|
|
|
if _, err := os.Stat(ar.stateFilePath()); err == nil {
|
|
|
|
return false, fmt.Errorf("state file still exists: %v", ar.stateFilePath())
|
|
|
|
} else if !os.IsNotExist(err) {
|
|
|
|
return false, fmt.Errorf("stat err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check the alloc directory was cleaned
|
|
|
|
if _, err := os.Stat(ar.ctx.AllocDir.AllocDir); err == nil {
|
|
|
|
return false, fmt.Errorf("alloc dir still exists: %v", ar.ctx.AllocDir.AllocDir)
|
|
|
|
} else if !os.IsNotExist(err) {
|
|
|
|
return false, fmt.Errorf("stat err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return true, nil
|
2015-08-31 00:10:17 +00:00
|
|
|
}, func(err error) {
|
2016-02-04 22:19:27 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
2015-08-31 00:10:17 +00:00
|
|
|
})
|
|
|
|
|
2016-01-20 20:00:20 +00:00
|
|
|
if time.Since(start) > 15*time.Second {
|
2015-08-31 00:10:17 +00:00
|
|
|
t.Fatalf("took too long to terminate")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAllocRunner_Update(t *testing.T) {
|
2015-09-23 01:48:42 +00:00
|
|
|
ctestutil.ExecCompatible(t)
|
2016-02-01 21:57:35 +00:00
|
|
|
_, ar := testAllocRunner(false)
|
2015-08-31 00:10:17 +00:00
|
|
|
|
|
|
|
// Ensure task takes some time
|
|
|
|
task := ar.alloc.Job.TaskGroups[0].Tasks[0]
|
|
|
|
task.Config["command"] = "/bin/sleep"
|
2015-11-18 23:16:42 +00:00
|
|
|
task.Config["args"] = []string{"10"}
|
2015-08-31 00:10:17 +00:00
|
|
|
go ar.Run()
|
|
|
|
defer ar.Destroy()
|
|
|
|
|
|
|
|
// Update the alloc definition
|
|
|
|
newAlloc := new(structs.Allocation)
|
|
|
|
*newAlloc = *ar.alloc
|
2016-02-01 21:57:35 +00:00
|
|
|
newAlloc.Name = "FOO"
|
|
|
|
newAlloc.AllocModifyIndex++
|
2015-08-31 00:10:17 +00:00
|
|
|
ar.Update(newAlloc)
|
|
|
|
|
2016-02-01 21:57:35 +00:00
|
|
|
// Check the alloc runner stores the update allocation.
|
2015-08-31 00:10:17 +00:00
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
2016-02-01 21:57:35 +00:00
|
|
|
return ar.Alloc().Name == "FOO", nil
|
2015-08-31 00:10:17 +00:00
|
|
|
}, func(err error) {
|
2016-02-01 21:57:35 +00:00
|
|
|
t.Fatalf("err: %v %#v", err, ar.Alloc())
|
2015-08-31 00:10:17 +00:00
|
|
|
})
|
2015-08-30 23:35:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestAllocRunner_SaveRestoreState(t *testing.T) {
|
2015-11-11 00:53:59 +00:00
|
|
|
ctestutil.ExecCompatible(t)
|
2015-11-14 06:07:13 +00:00
|
|
|
upd, ar := testAllocRunner(false)
|
2015-08-31 00:10:17 +00:00
|
|
|
|
|
|
|
// Ensure task takes some time
|
|
|
|
task := ar.alloc.Job.TaskGroups[0].Tasks[0]
|
|
|
|
task.Config["command"] = "/bin/sleep"
|
2015-11-18 23:16:42 +00:00
|
|
|
task.Config["args"] = []string{"10"}
|
2015-08-31 00:10:17 +00:00
|
|
|
go ar.Run()
|
|
|
|
|
|
|
|
// Snapshot state
|
2016-01-21 22:52:41 +00:00
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
return len(ar.tasks) == 1, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("task never started: %v", err)
|
|
|
|
})
|
|
|
|
|
2015-08-31 00:10:17 +00:00
|
|
|
err := ar.SaveState()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a new alloc runner
|
|
|
|
ar2 := NewAllocRunner(ar.logger, ar.config, upd.Update,
|
2016-03-23 19:19:19 +00:00
|
|
|
&structs.Allocation{ID: ar.alloc.ID})
|
2015-08-31 00:10:17 +00:00
|
|
|
err = ar2.RestoreState()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
go ar2.Run()
|
|
|
|
|
|
|
|
// Destroy and wait
|
|
|
|
ar2.Destroy()
|
|
|
|
start := time.Now()
|
|
|
|
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
if upd.Count == 0 {
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
last := upd.Allocs[upd.Count-1]
|
2016-01-22 00:05:35 +00:00
|
|
|
return last.ClientStatus != structs.AllocClientStatusPending, nil
|
2015-08-31 00:10:17 +00:00
|
|
|
}, func(err error) {
|
2015-11-14 06:07:13 +00:00
|
|
|
t.Fatalf("err: %v %#v %#v", err, upd.Allocs[0], ar.alloc.TaskStates)
|
2015-08-31 00:10:17 +00:00
|
|
|
})
|
|
|
|
|
2016-01-21 22:52:41 +00:00
|
|
|
if time.Since(start) > time.Duration(testutil.TestMultiplier()*15)*time.Second {
|
2015-08-31 00:10:17 +00:00
|
|
|
t.Fatalf("took too long to terminate")
|
|
|
|
}
|
2015-08-30 23:35:04 +00:00
|
|
|
}
|
2016-02-04 21:09:53 +00:00
|
|
|
|
|
|
|
func TestAllocRunner_SaveRestoreState_TerminalAlloc(t *testing.T) {
|
|
|
|
ctestutil.ExecCompatible(t)
|
|
|
|
upd, ar := testAllocRunner(false)
|
2016-03-22 20:49:52 +00:00
|
|
|
ar.logger = prefixedTestLogger("ar1: ")
|
2016-02-04 21:09:53 +00:00
|
|
|
|
|
|
|
// Ensure task takes some time
|
|
|
|
task := ar.alloc.Job.TaskGroups[0].Tasks[0]
|
|
|
|
task.Config["command"] = "/bin/sleep"
|
2016-03-22 02:59:58 +00:00
|
|
|
task.Config["args"] = []string{"1000"}
|
2016-02-04 21:09:53 +00:00
|
|
|
go ar.Run()
|
|
|
|
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
if upd.Count == 0 {
|
|
|
|
return false, fmt.Errorf("No updates")
|
|
|
|
}
|
|
|
|
last := upd.Allocs[upd.Count-1]
|
2016-03-22 20:49:52 +00:00
|
|
|
if last.ClientStatus != structs.AllocClientStatusRunning {
|
2016-02-04 21:09:53 +00:00
|
|
|
return false, fmt.Errorf("got status %v; want %v", last.ClientStatus, structs.AllocClientStatusRunning)
|
|
|
|
}
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
})
|
|
|
|
|
|
|
|
// Update the alloc to be terminal which should cause the alloc runner to
|
|
|
|
// stop the tasks and wait for a destroy.
|
|
|
|
update := ar.alloc.Copy()
|
|
|
|
update.DesiredStatus = structs.AllocDesiredStatusStop
|
|
|
|
ar.Update(update)
|
|
|
|
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
return ar.alloc.DesiredStatus == structs.AllocDesiredStatusStop, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
})
|
|
|
|
|
|
|
|
err := ar.SaveState()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2016-02-04 22:19:27 +00:00
|
|
|
// Ensure both alloc runners don't destroy
|
|
|
|
ar.destroy = true
|
|
|
|
|
2016-02-04 21:09:53 +00:00
|
|
|
// Create a new alloc runner
|
|
|
|
ar2 := NewAllocRunner(ar.logger, ar.config, upd.Update,
|
2016-03-23 19:19:19 +00:00
|
|
|
&structs.Allocation{ID: ar.alloc.ID})
|
2016-03-22 20:49:52 +00:00
|
|
|
ar2.logger = prefixedTestLogger("ar2: ")
|
2016-02-04 21:09:53 +00:00
|
|
|
err = ar2.RestoreState()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
go ar2.Run()
|
2016-03-22 20:49:52 +00:00
|
|
|
ar2.logger.Println("[TESTING] starting second alloc runner")
|
2016-02-04 21:09:53 +00:00
|
|
|
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
// Check the state still exists
|
|
|
|
if _, err := os.Stat(ar.stateFilePath()); err != nil {
|
|
|
|
return false, fmt.Errorf("state file destroyed: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check the alloc directory still exists
|
|
|
|
if _, err := os.Stat(ar.ctx.AllocDir.AllocDir); err != nil {
|
|
|
|
return false, fmt.Errorf("alloc dir destroyed: %v", ar.ctx.AllocDir.AllocDir)
|
|
|
|
}
|
|
|
|
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("err: %v %#v %#v", err, upd.Allocs[0], ar.alloc.TaskStates)
|
|
|
|
})
|
|
|
|
|
|
|
|
// Send the destroy signal and ensure the AllocRunner cleans up.
|
2016-03-22 20:49:52 +00:00
|
|
|
ar2.logger.Println("[TESTING] destroying second alloc runner")
|
2016-02-04 21:09:53 +00:00
|
|
|
ar2.Destroy()
|
|
|
|
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
if upd.Count == 0 {
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check the status has changed.
|
|
|
|
last := upd.Allocs[upd.Count-1]
|
2016-03-24 01:08:19 +00:00
|
|
|
if last.ClientStatus != structs.AllocClientStatusComplete {
|
|
|
|
return false, fmt.Errorf("got client status %v; want %v", last.ClientStatus, structs.AllocClientStatusComplete)
|
2016-02-04 21:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check the state was cleaned
|
|
|
|
if _, err := os.Stat(ar.stateFilePath()); err == nil {
|
|
|
|
return false, fmt.Errorf("state file still exists: %v", ar.stateFilePath())
|
|
|
|
} else if !os.IsNotExist(err) {
|
|
|
|
return false, fmt.Errorf("stat err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check the alloc directory was cleaned
|
|
|
|
if _, err := os.Stat(ar.ctx.AllocDir.AllocDir); err == nil {
|
|
|
|
return false, fmt.Errorf("alloc dir still exists: %v", ar.ctx.AllocDir.AllocDir)
|
|
|
|
} else if !os.IsNotExist(err) {
|
|
|
|
return false, fmt.Errorf("stat err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
2016-02-04 22:19:27 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
2016-02-04 21:09:53 +00:00
|
|
|
})
|
|
|
|
}
|
2016-03-22 20:49:52 +00:00
|
|
|
|
|
|
|
func TestAllocRunner_TaskFailed_KillTG(t *testing.T) {
|
|
|
|
ctestutil.ExecCompatible(t)
|
|
|
|
upd, ar := testAllocRunner(false)
|
|
|
|
|
|
|
|
// Create two tasks in the task group
|
|
|
|
task := ar.alloc.Job.TaskGroups[0].Tasks[0]
|
|
|
|
task.Config["command"] = "/bin/sleep"
|
|
|
|
task.Config["args"] = []string{"1000"}
|
|
|
|
|
|
|
|
task2 := ar.alloc.Job.TaskGroups[0].Tasks[0].Copy()
|
|
|
|
task2.Name = "task 2"
|
|
|
|
task2.Config = map[string]interface{}{"command": "invalidBinaryToFail"}
|
|
|
|
ar.alloc.Job.TaskGroups[0].Tasks = append(ar.alloc.Job.TaskGroups[0].Tasks, task2)
|
|
|
|
ar.alloc.TaskResources[task2.Name] = task2.Resources
|
|
|
|
//t.Logf("%#v", ar.alloc.Job.TaskGroups[0])
|
|
|
|
go ar.Run()
|
|
|
|
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
if upd.Count == 0 {
|
|
|
|
return false, fmt.Errorf("No updates")
|
|
|
|
}
|
|
|
|
last := upd.Allocs[upd.Count-1]
|
|
|
|
if last.ClientStatus != structs.AllocClientStatusFailed {
|
|
|
|
return false, fmt.Errorf("got status %v; want %v", last.ClientStatus, structs.AllocClientStatusFailed)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Task One should be killed
|
|
|
|
state1 := last.TaskStates[task.Name]
|
|
|
|
if state1.State != structs.TaskStateDead {
|
|
|
|
return false, fmt.Errorf("got state %v; want %v", state1.State, structs.TaskStateDead)
|
|
|
|
}
|
|
|
|
if lastE := state1.Events[len(state1.Events)-1]; lastE.Type != structs.TaskKilled {
|
|
|
|
return false, fmt.Errorf("got last event %v; want %v", lastE.Type, structs.TaskKilled)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Task Two should be failed
|
|
|
|
state2 := last.TaskStates[task2.Name]
|
|
|
|
if state2.State != structs.TaskStateDead {
|
|
|
|
return false, fmt.Errorf("got state %v; want %v", state2.State, structs.TaskStateDead)
|
|
|
|
}
|
|
|
|
if !state2.Failed() {
|
|
|
|
return false, fmt.Errorf("task2 should have failed")
|
|
|
|
}
|
|
|
|
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
})
|
|
|
|
}
|