2021-10-01 13:59:55 +00:00
|
|
|
//go:build !windows
|
2019-03-01 23:02:53 +00:00
|
|
|
// +build !windows
|
|
|
|
|
|
|
|
package allocrunner
|
|
|
|
|
|
|
|
import (
|
|
|
|
"encoding/json"
|
|
|
|
"fmt"
|
|
|
|
"os"
|
|
|
|
"syscall"
|
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
2019-03-05 23:12:02 +00:00
|
|
|
"github.com/hashicorp/nomad/client/consul"
|
2019-03-01 23:02:53 +00:00
|
|
|
"github.com/hashicorp/nomad/client/state"
|
|
|
|
"github.com/hashicorp/nomad/nomad/mock"
|
|
|
|
"github.com/hashicorp/nomad/nomad/structs"
|
|
|
|
"github.com/hashicorp/nomad/testutil"
|
|
|
|
"github.com/stretchr/testify/require"
|
|
|
|
)
|
|
|
|
|
|
|
|
// TestAllocRunner_Restore_RunningTerminal asserts that restoring a terminal
|
|
|
|
// alloc with a running task properly kills the running the task. This is meant
|
|
|
|
// to simulate a Nomad agent crash after receiving an updated alloc with
|
|
|
|
// DesiredStatus=Stop, persisting the update, but crashing before terminating
|
|
|
|
// the task.
|
|
|
|
func TestAllocRunner_Restore_RunningTerminal(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
// 1. Run task
|
|
|
|
// 2. Shutdown alloc runner
|
|
|
|
// 3. Set alloc.desiredstatus=false
|
|
|
|
// 4. Start new alloc runner
|
|
|
|
// 5. Assert task and logmon are cleaned up
|
|
|
|
|
|
|
|
alloc := mock.Alloc()
|
2019-11-18 18:04:01 +00:00
|
|
|
alloc.Job.TaskGroups[0].Services = []*structs.Service{
|
|
|
|
{
|
|
|
|
Name: "foo",
|
|
|
|
PortLabel: "8888",
|
|
|
|
},
|
|
|
|
}
|
2019-03-01 23:02:53 +00:00
|
|
|
task := alloc.Job.TaskGroups[0].Tasks[0]
|
|
|
|
task.Driver = "mock_driver"
|
|
|
|
task.Config = map[string]interface{}{
|
|
|
|
"run_for": "1h",
|
|
|
|
}
|
|
|
|
|
|
|
|
conf, cleanup := testAllocRunnerConfig(t, alloc.Copy())
|
|
|
|
defer cleanup()
|
|
|
|
|
|
|
|
// Maintain state for subsequent run
|
|
|
|
conf.StateDB = state.NewMemDB(conf.Logger)
|
|
|
|
|
|
|
|
// Start and wait for task to be running
|
|
|
|
ar, err := NewAllocRunner(conf)
|
|
|
|
require.NoError(t, err)
|
|
|
|
go ar.Run()
|
|
|
|
defer destroy(ar)
|
|
|
|
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
s := ar.AllocState()
|
|
|
|
return s.ClientStatus == structs.AllocClientStatusRunning, fmt.Errorf("expected running, got %s", s.ClientStatus)
|
|
|
|
}, func(err error) {
|
|
|
|
require.NoError(t, err)
|
|
|
|
})
|
|
|
|
|
|
|
|
// Shutdown the AR and manually change the state to mimic a crash where
|
|
|
|
// a stopped alloc update is received, but Nomad crashes before
|
|
|
|
// stopping the alloc.
|
|
|
|
ar.Shutdown()
|
|
|
|
select {
|
|
|
|
case <-ar.ShutdownCh():
|
|
|
|
case <-time.After(30 * time.Second):
|
|
|
|
require.Fail(t, "AR took too long to exit")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Assert logmon is still running. This is a super ugly hack that pulls
|
|
|
|
// logmon's PID out of its reattach config, but it does properly ensure
|
|
|
|
// logmon gets cleaned up.
|
|
|
|
ls, _, err := conf.StateDB.GetTaskRunnerState(alloc.ID, task.Name)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.NotNil(t, ls)
|
|
|
|
|
|
|
|
logmonReattach := struct {
|
|
|
|
Pid int
|
|
|
|
}{}
|
|
|
|
err = json.Unmarshal([]byte(ls.Hooks["logmon"].Data["reattach_config"]), &logmonReattach)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
logmonProc, _ := os.FindProcess(logmonReattach.Pid)
|
|
|
|
require.NoError(t, logmonProc.Signal(syscall.Signal(0)))
|
|
|
|
|
|
|
|
// Fake alloc terminal during Restore()
|
|
|
|
alloc.DesiredStatus = structs.AllocDesiredStatusStop
|
|
|
|
alloc.ModifyIndex++
|
|
|
|
alloc.AllocModifyIndex++
|
|
|
|
|
|
|
|
// Start a new alloc runner and assert it gets stopped
|
|
|
|
conf2, cleanup2 := testAllocRunnerConfig(t, alloc)
|
|
|
|
defer cleanup2()
|
|
|
|
|
|
|
|
// Use original statedb to maintain hook state
|
|
|
|
conf2.StateDB = conf.StateDB
|
|
|
|
|
|
|
|
// Restore, start, and wait for task to be killed
|
|
|
|
ar2, err := NewAllocRunner(conf2)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.NoError(t, ar2.Restore())
|
|
|
|
|
|
|
|
go ar2.Run()
|
|
|
|
defer destroy(ar2)
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-ar2.WaitCh():
|
|
|
|
case <-time.After(30 * time.Second):
|
|
|
|
}
|
|
|
|
|
|
|
|
// Assert logmon was cleaned up
|
|
|
|
require.Error(t, logmonProc.Signal(syscall.Signal(0)))
|
2019-03-05 23:12:02 +00:00
|
|
|
|
|
|
|
// Assert consul was cleaned up:
|
2021-07-06 14:37:53 +00:00
|
|
|
// 1 removal during prekill
|
2022-02-11 08:29:38 +00:00
|
|
|
// - removal during exited is de-duped due to prekill
|
|
|
|
// - removal during stop is de-duped due to prekill
|
2021-07-06 14:37:53 +00:00
|
|
|
// 1 removal group during stop
|
2019-03-05 23:12:02 +00:00
|
|
|
consulOps := conf2.Consul.(*consul.MockConsulServiceClient).GetOps()
|
2022-02-11 08:29:38 +00:00
|
|
|
require.Len(t, consulOps, 2)
|
2019-11-18 18:04:01 +00:00
|
|
|
for _, op := range consulOps {
|
2019-03-05 23:12:02 +00:00
|
|
|
require.Equal(t, "remove", op.Op)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Assert terminated task event was emitted
|
|
|
|
events := ar2.AllocState().TaskStates[task.Name].Events
|
|
|
|
require.Len(t, events, 4)
|
|
|
|
require.Equal(t, events[0].Type, structs.TaskReceived)
|
|
|
|
require.Equal(t, events[1].Type, structs.TaskSetup)
|
|
|
|
require.Equal(t, events[2].Type, structs.TaskStarted)
|
|
|
|
require.Equal(t, events[3].Type, structs.TaskTerminated)
|
2019-03-01 23:02:53 +00:00
|
|
|
}
|
2019-06-26 15:39:10 +00:00
|
|
|
|
2019-07-02 06:53:50 +00:00
|
|
|
// TestAllocRunner_Restore_CompletedBatch asserts that restoring a completed
|
2019-06-26 15:39:10 +00:00
|
|
|
// batch alloc doesn't run it again
|
|
|
|
func TestAllocRunner_Restore_CompletedBatch(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
// 1. Run task and wait for it to complete
|
|
|
|
// 2. Start new alloc runner
|
|
|
|
// 3. Assert task didn't run again
|
|
|
|
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job.Type = structs.JobTypeBatch
|
|
|
|
task := alloc.Job.TaskGroups[0].Tasks[0]
|
|
|
|
task.Driver = "mock_driver"
|
|
|
|
task.Config = map[string]interface{}{
|
|
|
|
"run_for": "2ms",
|
|
|
|
}
|
|
|
|
|
|
|
|
conf, cleanup := testAllocRunnerConfig(t, alloc.Copy())
|
|
|
|
defer cleanup()
|
|
|
|
|
|
|
|
// Maintain state for subsequent run
|
|
|
|
conf.StateDB = state.NewMemDB(conf.Logger)
|
|
|
|
|
|
|
|
// Start and wait for task to be running
|
|
|
|
ar, err := NewAllocRunner(conf)
|
|
|
|
require.NoError(t, err)
|
|
|
|
go ar.Run()
|
|
|
|
defer destroy(ar)
|
|
|
|
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
s := ar.AllocState()
|
|
|
|
if s.ClientStatus != structs.AllocClientStatusComplete {
|
|
|
|
return false, fmt.Errorf("expected complete, got %s", s.ClientStatus)
|
|
|
|
}
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
|
|
|
require.NoError(t, err)
|
|
|
|
})
|
|
|
|
|
|
|
|
// once job finishes, it shouldn't run again
|
|
|
|
require.False(t, ar.shouldRun())
|
|
|
|
initialRunEvents := ar.AllocState().TaskStates[task.Name].Events
|
|
|
|
require.Len(t, initialRunEvents, 4)
|
|
|
|
|
|
|
|
ls, ts, err := conf.StateDB.GetTaskRunnerState(alloc.ID, task.Name)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.NotNil(t, ls)
|
|
|
|
require.Equal(t, structs.TaskStateDead, ts.State)
|
|
|
|
|
|
|
|
// Start a new alloc runner and assert it gets stopped
|
|
|
|
conf2, cleanup2 := testAllocRunnerConfig(t, alloc)
|
|
|
|
defer cleanup2()
|
|
|
|
|
|
|
|
// Use original statedb to maintain hook state
|
|
|
|
conf2.StateDB = conf.StateDB
|
|
|
|
|
|
|
|
// Restore, start, and wait for task to be killed
|
|
|
|
ar2, err := NewAllocRunner(conf2)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.NoError(t, ar2.Restore())
|
|
|
|
|
|
|
|
go ar2.Run()
|
|
|
|
defer destroy(ar2)
|
|
|
|
|
|
|
|
// AR waitCh must be closed even when task doesn't run again
|
|
|
|
select {
|
|
|
|
case <-ar2.WaitCh():
|
|
|
|
case <-time.After(10 * time.Second):
|
|
|
|
require.Fail(t, "alloc.waitCh wasn't closed")
|
|
|
|
}
|
|
|
|
|
|
|
|
// TR waitCh must be closed too!
|
|
|
|
select {
|
|
|
|
case <-ar2.tasks[task.Name].WaitCh():
|
|
|
|
case <-time.After(10 * time.Second):
|
|
|
|
require.Fail(t, "tr.waitCh wasn't closed")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Assert that events are unmodified, which they would if task re-run
|
|
|
|
events := ar2.AllocState().TaskStates[task.Name].Events
|
|
|
|
require.Equal(t, initialRunEvents, events)
|
|
|
|
}
|
2019-06-29 08:56:40 +00:00
|
|
|
|
|
|
|
// TestAllocRunner_PreStartFailuresLeadToFailed asserts that if an alloc
|
|
|
|
// prestart hooks failed, then the alloc and subsequent tasks transition
|
|
|
|
// to failed state
|
|
|
|
func TestAllocRunner_PreStartFailuresLeadToFailed(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job.Type = structs.JobTypeBatch
|
|
|
|
task := alloc.Job.TaskGroups[0].Tasks[0]
|
|
|
|
task.Driver = "mock_driver"
|
|
|
|
task.Config = map[string]interface{}{
|
|
|
|
"run_for": "2ms",
|
|
|
|
}
|
2020-03-25 01:52:39 +00:00
|
|
|
rp := &structs.RestartPolicy{Attempts: 0}
|
|
|
|
alloc.Job.TaskGroups[0].RestartPolicy = rp
|
|
|
|
task.RestartPolicy = rp
|
2019-06-29 08:56:40 +00:00
|
|
|
|
|
|
|
conf, cleanup := testAllocRunnerConfig(t, alloc.Copy())
|
|
|
|
defer cleanup()
|
|
|
|
|
|
|
|
// Maintain state for subsequent run
|
|
|
|
conf.StateDB = state.NewMemDB(conf.Logger)
|
|
|
|
|
|
|
|
// Start and wait for task to be running
|
|
|
|
ar, err := NewAllocRunner(conf)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
ar.runnerHooks = append(ar.runnerHooks, &allocFailingPrestartHook{})
|
|
|
|
|
|
|
|
go ar.Run()
|
|
|
|
defer destroy(ar)
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-ar.WaitCh():
|
|
|
|
case <-time.After(10 * time.Second):
|
|
|
|
require.Fail(t, "alloc.waitCh wasn't closed")
|
|
|
|
}
|
|
|
|
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
s := ar.AllocState()
|
|
|
|
if s.ClientStatus != structs.AllocClientStatusFailed {
|
|
|
|
return false, fmt.Errorf("expected complete, got %s", s.ClientStatus)
|
|
|
|
}
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
|
|
|
require.NoError(t, err)
|
|
|
|
})
|
|
|
|
|
|
|
|
// once job finishes, it shouldn't run again
|
|
|
|
require.False(t, ar.shouldRun())
|
|
|
|
initialRunEvents := ar.AllocState().TaskStates[task.Name].Events
|
|
|
|
require.Len(t, initialRunEvents, 2)
|
|
|
|
|
|
|
|
ls, ts, err := conf.StateDB.GetTaskRunnerState(alloc.ID, task.Name)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.NotNil(t, ls)
|
|
|
|
require.NotNil(t, ts)
|
|
|
|
require.Equal(t, structs.TaskStateDead, ts.State)
|
|
|
|
require.True(t, ts.Failed)
|
|
|
|
|
|
|
|
// TR waitCh must be closed too!
|
|
|
|
select {
|
|
|
|
case <-ar.tasks[task.Name].WaitCh():
|
|
|
|
case <-time.After(10 * time.Second):
|
|
|
|
require.Fail(t, "tr.waitCh wasn't closed")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
type allocFailingPrestartHook struct{}
|
|
|
|
|
|
|
|
func (*allocFailingPrestartHook) Name() string { return "failing_prestart" }
|
|
|
|
|
|
|
|
func (*allocFailingPrestartHook) Prerun() error {
|
|
|
|
return fmt.Errorf("failing prestart hooks")
|
|
|
|
}
|