2018-10-04 23:22:01 +00:00
|
|
|
package allocrunner
|
2018-06-22 00:35:07 +00:00
|
|
|
|
|
|
|
import (
|
2018-07-17 20:57:57 +00:00
|
|
|
"context"
|
2018-06-22 00:35:07 +00:00
|
|
|
"fmt"
|
2018-06-29 00:01:05 +00:00
|
|
|
"path/filepath"
|
2018-06-22 00:35:07 +00:00
|
|
|
"sync"
|
2018-07-19 00:06:44 +00:00
|
|
|
"time"
|
2018-06-22 00:35:07 +00:00
|
|
|
|
|
|
|
log "github.com/hashicorp/go-hclog"
|
2019-04-01 12:56:02 +00:00
|
|
|
multierror "github.com/hashicorp/go-multierror"
|
2018-06-22 00:35:07 +00:00
|
|
|
"github.com/hashicorp/nomad/client/allocdir"
|
2018-10-04 23:22:01 +00:00
|
|
|
"github.com/hashicorp/nomad/client/allocrunner/interfaces"
|
|
|
|
"github.com/hashicorp/nomad/client/allocrunner/state"
|
|
|
|
"github.com/hashicorp/nomad/client/allocrunner/taskrunner"
|
2018-08-23 19:03:17 +00:00
|
|
|
"github.com/hashicorp/nomad/client/allocwatcher"
|
2018-07-13 16:45:29 +00:00
|
|
|
"github.com/hashicorp/nomad/client/config"
|
2018-07-20 00:40:25 +00:00
|
|
|
"github.com/hashicorp/nomad/client/consul"
|
2018-11-16 23:29:59 +00:00
|
|
|
"github.com/hashicorp/nomad/client/devicemanager"
|
2019-10-22 13:20:26 +00:00
|
|
|
"github.com/hashicorp/nomad/client/dynamicplugins"
|
2018-07-19 00:06:44 +00:00
|
|
|
cinterfaces "github.com/hashicorp/nomad/client/interfaces"
|
2020-01-08 12:47:07 +00:00
|
|
|
"github.com/hashicorp/nomad/client/pluginmanager/csimanager"
|
2018-11-28 03:42:22 +00:00
|
|
|
"github.com/hashicorp/nomad/client/pluginmanager/drivermanager"
|
2018-08-08 00:46:37 +00:00
|
|
|
cstate "github.com/hashicorp/nomad/client/state"
|
2018-06-29 00:01:05 +00:00
|
|
|
cstructs "github.com/hashicorp/nomad/client/structs"
|
2018-07-13 16:45:29 +00:00
|
|
|
"github.com/hashicorp/nomad/client/vaultclient"
|
2019-11-18 18:04:01 +00:00
|
|
|
agentconsul "github.com/hashicorp/nomad/command/agent/consul"
|
2018-07-19 00:06:44 +00:00
|
|
|
"github.com/hashicorp/nomad/helper"
|
2018-06-22 00:35:07 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad/structs"
|
2018-11-15 15:13:14 +00:00
|
|
|
"github.com/hashicorp/nomad/plugins/device"
|
2018-12-18 03:36:06 +00:00
|
|
|
"github.com/hashicorp/nomad/plugins/drivers"
|
2018-06-22 00:35:07 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// allocRunner is used to run all the tasks in a given allocation
|
|
|
|
type allocRunner struct {
|
2018-07-19 00:06:44 +00:00
|
|
|
// id is the ID of the allocation. Can be accessed without a lock
|
|
|
|
id string
|
|
|
|
|
2018-06-22 00:35:07 +00:00
|
|
|
// Logger is the logger for the alloc runner.
|
|
|
|
logger log.Logger
|
|
|
|
|
2018-07-13 16:45:29 +00:00
|
|
|
clientConfig *config.Config
|
|
|
|
|
2018-10-12 01:03:48 +00:00
|
|
|
// stateUpdater is used to emit updated alloc state
|
2018-07-19 00:06:44 +00:00
|
|
|
stateUpdater cinterfaces.AllocStateHandler
|
|
|
|
|
2018-11-14 18:29:07 +00:00
|
|
|
// taskStateUpdatedCh is ticked whenever task state as changed. Must
|
2018-10-12 01:03:48 +00:00
|
|
|
// have len==1 to allow nonblocking notification of state updates while
|
|
|
|
// the goroutine is already processing a previous update.
|
|
|
|
taskStateUpdatedCh chan struct{}
|
|
|
|
|
2018-10-16 03:38:12 +00:00
|
|
|
// taskStateUpdateHandlerCh is closed when the task state handling
|
2018-10-12 01:03:48 +00:00
|
|
|
// goroutine exits. It is unsafe to destroy the local allocation state
|
|
|
|
// before this goroutine exits.
|
|
|
|
taskStateUpdateHandlerCh chan struct{}
|
|
|
|
|
2018-12-17 12:27:54 +00:00
|
|
|
// allocUpdatedCh is a channel that is used to stream allocation updates into
|
|
|
|
// the allocUpdate handler. Must have len==1 to allow nonblocking notification
|
|
|
|
// of new allocation updates while the goroutine is processing a previous
|
|
|
|
// update.
|
|
|
|
allocUpdatedCh chan *structs.Allocation
|
|
|
|
|
2018-07-20 00:40:25 +00:00
|
|
|
// consulClient is the client used by the consul service hook for
|
|
|
|
// registering services and checks
|
|
|
|
consulClient consul.ConsulServiceAPI
|
|
|
|
|
2019-11-27 21:41:45 +00:00
|
|
|
// sidsClient is the client used by the service identity hook for
|
|
|
|
// managing SI tokens
|
|
|
|
sidsClient consul.ServiceIdentityAPI
|
|
|
|
|
2018-07-13 16:45:29 +00:00
|
|
|
// vaultClient is the used to manage Vault tokens
|
|
|
|
vaultClient vaultclient.VaultClient
|
2018-06-22 00:35:07 +00:00
|
|
|
|
2019-02-21 23:37:22 +00:00
|
|
|
// waitCh is closed when the Run loop has exited
|
2018-06-22 00:35:07 +00:00
|
|
|
waitCh chan struct{}
|
|
|
|
|
2019-02-21 23:37:22 +00:00
|
|
|
// destroyed is true when the Run loop has exited, postrun hooks have
|
2018-10-16 22:17:36 +00:00
|
|
|
// run, and alloc runner has been destroyed. Must acquire destroyedLock
|
|
|
|
// to access.
|
|
|
|
destroyed bool
|
|
|
|
|
2019-02-21 23:37:22 +00:00
|
|
|
// destroyCh is closed when the Run loop has exited, postrun hooks have
|
2018-12-14 15:02:47 +00:00
|
|
|
// run, and alloc runner has been destroyed.
|
|
|
|
destroyCh chan struct{}
|
|
|
|
|
2019-02-21 23:37:22 +00:00
|
|
|
// shutdown is true when the Run loop has exited, and shutdown hooks have
|
2018-12-14 15:02:47 +00:00
|
|
|
// run. Must acquire destroyedLock to access.
|
|
|
|
shutdown bool
|
|
|
|
|
2019-02-21 23:37:22 +00:00
|
|
|
// shutdownCh is closed when the Run loop has exited, and shutdown hooks
|
2018-12-14 15:02:47 +00:00
|
|
|
// have run.
|
|
|
|
shutdownCh chan struct{}
|
|
|
|
|
|
|
|
// destroyLaunched is true if Destroy has been called. Must acquire
|
|
|
|
// destroyedLock to access.
|
|
|
|
destroyLaunched bool
|
|
|
|
|
|
|
|
// shutdownLaunched is true if Shutdown has been called. Must acquire
|
|
|
|
// destroyedLock to access.
|
|
|
|
shutdownLaunched bool
|
|
|
|
|
2019-02-21 23:37:22 +00:00
|
|
|
// destroyedLock guards destroyed, destroyLaunched, shutdownLaunched,
|
|
|
|
// and serializes Shutdown/Destroy calls.
|
2018-08-23 19:03:17 +00:00
|
|
|
destroyedLock sync.Mutex
|
|
|
|
|
2018-06-22 00:35:07 +00:00
|
|
|
// Alloc captures the allocation being run.
|
|
|
|
alloc *structs.Allocation
|
2018-06-29 00:01:05 +00:00
|
|
|
allocLock sync.RWMutex
|
2018-06-22 00:35:07 +00:00
|
|
|
|
2018-09-28 00:30:10 +00:00
|
|
|
// state is the alloc runner's state
|
|
|
|
state *state.State
|
2018-07-19 00:06:44 +00:00
|
|
|
stateLock sync.RWMutex
|
2018-06-22 00:35:07 +00:00
|
|
|
|
2018-08-08 00:46:37 +00:00
|
|
|
stateDB cstate.StateDB
|
2018-07-11 04:21:12 +00:00
|
|
|
|
2018-06-22 00:35:07 +00:00
|
|
|
// allocDir is used to build the allocations directory structure.
|
|
|
|
allocDir *allocdir.AllocDir
|
|
|
|
|
|
|
|
// runnerHooks are alloc runner lifecycle hooks that should be run on state
|
|
|
|
// transistions.
|
|
|
|
runnerHooks []interfaces.RunnerHook
|
|
|
|
|
2020-02-11 16:39:16 +00:00
|
|
|
// hookState is the output of allocrunner hooks
|
|
|
|
hookState *cstructs.AllocHookResources
|
|
|
|
hookStateMu sync.RWMutex
|
|
|
|
|
2018-06-22 00:35:07 +00:00
|
|
|
// tasks are the set of task runners
|
2018-10-12 01:03:48 +00:00
|
|
|
tasks map[string]*taskrunner.TaskRunner
|
2018-06-28 00:27:03 +00:00
|
|
|
|
2018-11-15 15:13:14 +00:00
|
|
|
// deviceStatsReporter is used to lookup resource usage for alloc devices
|
|
|
|
deviceStatsReporter cinterfaces.DeviceStatsReporter
|
|
|
|
|
2018-08-23 19:03:17 +00:00
|
|
|
// allocBroadcaster sends client allocation updates to all listeners
|
|
|
|
allocBroadcaster *cstructs.AllocBroadcaster
|
|
|
|
|
2018-12-06 11:15:59 +00:00
|
|
|
// prevAllocWatcher allows waiting for any previous or preempted allocations
|
|
|
|
// to exit
|
2018-08-23 19:03:17 +00:00
|
|
|
prevAllocWatcher allocwatcher.PrevAllocWatcher
|
2018-10-06 01:42:15 +00:00
|
|
|
|
2018-12-06 11:15:59 +00:00
|
|
|
// prevAllocMigrator allows the migration of a previous allocations alloc dir.
|
|
|
|
prevAllocMigrator allocwatcher.PrevAllocMigrator
|
2018-12-05 18:18:04 +00:00
|
|
|
|
2019-10-22 13:20:26 +00:00
|
|
|
// dynamicRegistry contains all locally registered dynamic plugins (e.g csi
|
|
|
|
// plugins).
|
|
|
|
dynamicRegistry dynamicplugins.Registry
|
|
|
|
|
2020-01-08 12:47:07 +00:00
|
|
|
// csiManager is used to wait for CSI Volumes to be attached, and by the task
|
|
|
|
// runner to manage their mounting
|
|
|
|
csiManager csimanager.Manager
|
|
|
|
|
2018-11-16 23:29:59 +00:00
|
|
|
// devicemanager is used to mount devices as well as lookup device
|
|
|
|
// statistics
|
|
|
|
devicemanager devicemanager.Manager
|
2018-11-28 03:42:22 +00:00
|
|
|
|
|
|
|
// driverManager is responsible for dispensing driver plugins and registering
|
|
|
|
// event handlers
|
|
|
|
driverManager drivermanager.Manager
|
2019-05-10 15:51:06 +00:00
|
|
|
|
|
|
|
// serversContactedCh is passed to TaskRunners so they can detect when
|
|
|
|
// servers have been contacted for the first time in case of a failed
|
|
|
|
// restore.
|
|
|
|
serversContactedCh chan struct{}
|
2019-12-04 20:44:21 +00:00
|
|
|
|
|
|
|
taskHookCoordinator *taskHookCoordinator
|
2020-02-11 13:30:34 +00:00
|
|
|
|
|
|
|
// rpcClient is the RPC Client that should be used by the allocrunner and its
|
|
|
|
// hooks to communicate with Nomad Servers.
|
|
|
|
rpcClient RPCer
|
|
|
|
}
|
|
|
|
|
|
|
|
// RPCer is the interface needed by hooks to make RPC calls.
|
|
|
|
type RPCer interface {
|
|
|
|
RPC(method string, args interface{}, reply interface{}) error
|
2018-06-22 00:35:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewAllocRunner returns a new allocation runner.
|
2018-07-13 00:56:52 +00:00
|
|
|
func NewAllocRunner(config *Config) (*allocRunner, error) {
|
|
|
|
alloc := config.Alloc
|
|
|
|
tg := alloc.Job.LookupTaskGroup(alloc.TaskGroup)
|
|
|
|
if tg == nil {
|
|
|
|
return nil, fmt.Errorf("failed to lookup task group %q", alloc.TaskGroup)
|
|
|
|
}
|
|
|
|
|
2018-06-22 00:35:07 +00:00
|
|
|
ar := &allocRunner{
|
2018-10-12 01:03:48 +00:00
|
|
|
id: alloc.ID,
|
|
|
|
alloc: alloc,
|
|
|
|
clientConfig: config.ClientConfig,
|
|
|
|
consulClient: config.Consul,
|
2019-11-27 21:41:45 +00:00
|
|
|
sidsClient: config.ConsulSI,
|
2018-10-12 01:03:48 +00:00
|
|
|
vaultClient: config.Vault,
|
|
|
|
tasks: make(map[string]*taskrunner.TaskRunner, len(tg.Tasks)),
|
|
|
|
waitCh: make(chan struct{}),
|
2018-12-14 15:02:47 +00:00
|
|
|
destroyCh: make(chan struct{}),
|
|
|
|
shutdownCh: make(chan struct{}),
|
2018-10-12 01:03:48 +00:00
|
|
|
state: &state.State{},
|
|
|
|
stateDB: config.StateDB,
|
|
|
|
stateUpdater: config.StateUpdater,
|
|
|
|
taskStateUpdatedCh: make(chan struct{}, 1),
|
|
|
|
taskStateUpdateHandlerCh: make(chan struct{}),
|
2018-12-17 12:27:54 +00:00
|
|
|
allocUpdatedCh: make(chan *structs.Allocation, 1),
|
2018-11-15 15:13:14 +00:00
|
|
|
deviceStatsReporter: config.DeviceStatsReporter,
|
2018-10-12 01:03:48 +00:00
|
|
|
prevAllocWatcher: config.PrevAllocWatcher,
|
2018-12-06 11:15:59 +00:00
|
|
|
prevAllocMigrator: config.PrevAllocMigrator,
|
2019-10-22 13:20:26 +00:00
|
|
|
dynamicRegistry: config.DynamicRegistry,
|
2020-01-08 12:47:07 +00:00
|
|
|
csiManager: config.CSIManager,
|
2018-11-16 23:29:59 +00:00
|
|
|
devicemanager: config.DeviceManager,
|
2018-11-28 03:42:22 +00:00
|
|
|
driverManager: config.DriverManager,
|
2019-05-10 15:51:06 +00:00
|
|
|
serversContactedCh: config.ServersContactedCh,
|
2020-02-11 13:30:34 +00:00
|
|
|
rpcClient: config.RPCClient,
|
2018-06-22 00:35:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Create the logger based on the allocation ID
|
2018-08-30 21:33:50 +00:00
|
|
|
ar.logger = config.Logger.Named("alloc_runner").With("alloc_id", alloc.ID)
|
2018-06-22 00:35:07 +00:00
|
|
|
|
2018-11-17 01:29:25 +00:00
|
|
|
// Create alloc broadcaster
|
|
|
|
ar.allocBroadcaster = cstructs.NewAllocBroadcaster(ar.logger)
|
|
|
|
|
2018-08-29 22:05:03 +00:00
|
|
|
// Create alloc dir
|
|
|
|
ar.allocDir = allocdir.NewAllocDir(ar.logger, filepath.Join(config.ClientConfig.AllocDir, alloc.ID))
|
|
|
|
|
2019-12-04 20:44:21 +00:00
|
|
|
ar.taskHookCoordinator = newTaskHookCoordinator(ar.logger, tg.Tasks)
|
|
|
|
|
2018-06-22 00:35:07 +00:00
|
|
|
// Initialize the runners hooks.
|
2019-06-14 03:05:57 +00:00
|
|
|
if err := ar.initRunnerHooks(config.ClientConfig); err != nil {
|
2019-05-08 17:45:20 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
2018-06-22 00:35:07 +00:00
|
|
|
|
2018-07-13 00:56:52 +00:00
|
|
|
// Create the TaskRunners
|
|
|
|
if err := ar.initTaskRunners(tg.Tasks); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return ar, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// initTaskRunners creates task runners but does *not* run them.
|
|
|
|
func (ar *allocRunner) initTaskRunners(tasks []*structs.Task) error {
|
|
|
|
for _, task := range tasks {
|
|
|
|
config := &taskrunner.Config{
|
2019-12-04 20:44:21 +00:00
|
|
|
Alloc: ar.alloc,
|
|
|
|
ClientConfig: ar.clientConfig,
|
|
|
|
Task: task,
|
|
|
|
TaskDir: ar.allocDir.NewTaskDir(task.Name),
|
|
|
|
Logger: ar.logger,
|
|
|
|
StateDB: ar.stateDB,
|
|
|
|
StateUpdater: ar,
|
2019-10-22 13:20:26 +00:00
|
|
|
DynamicRegistry: ar.dynamicRegistry,
|
2019-12-04 20:44:21 +00:00
|
|
|
Consul: ar.consulClient,
|
|
|
|
ConsulSI: ar.sidsClient,
|
|
|
|
Vault: ar.vaultClient,
|
|
|
|
DeviceStatsReporter: ar.deviceStatsReporter,
|
2020-01-08 12:47:07 +00:00
|
|
|
CSIManager: ar.csiManager,
|
2019-12-04 20:44:21 +00:00
|
|
|
DeviceManager: ar.devicemanager,
|
|
|
|
DriverManager: ar.driverManager,
|
|
|
|
ServersContactedCh: ar.serversContactedCh,
|
|
|
|
StartConditionMetCtx: ar.taskHookCoordinator.startConditionForTask(task),
|
2018-07-13 00:56:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Create, but do not Run, the task runner
|
|
|
|
tr, err := taskrunner.NewTaskRunner(config)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed creating runner for task %q: %v", task.Name, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
ar.tasks[task.Name] = tr
|
|
|
|
}
|
|
|
|
return nil
|
2018-06-22 00:35:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (ar *allocRunner) WaitCh() <-chan struct{} {
|
|
|
|
return ar.waitCh
|
|
|
|
}
|
|
|
|
|
2018-11-26 20:50:35 +00:00
|
|
|
// Run the AllocRunner. Starts tasks if the alloc is non-terminal and closes
|
|
|
|
// WaitCh when it exits. Should be started in a goroutine.
|
2018-06-22 00:35:07 +00:00
|
|
|
func (ar *allocRunner) Run() {
|
2018-11-14 18:29:07 +00:00
|
|
|
// Close the wait channel on return
|
|
|
|
defer close(ar.waitCh)
|
2018-10-16 22:17:36 +00:00
|
|
|
|
2018-11-14 18:29:07 +00:00
|
|
|
// Start the task state update handler
|
|
|
|
go ar.handleTaskStateUpdates()
|
2018-10-16 22:17:36 +00:00
|
|
|
|
2018-12-17 12:27:54 +00:00
|
|
|
// Start the alloc update handler
|
|
|
|
go ar.handleAllocUpdates()
|
|
|
|
|
2018-11-14 18:29:07 +00:00
|
|
|
// If task update chan has been closed, that means we've been shutdown.
|
|
|
|
select {
|
|
|
|
case <-ar.taskStateUpdateHandlerCh:
|
2018-11-01 00:41:37 +00:00
|
|
|
return
|
2018-11-14 18:29:07 +00:00
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
2019-06-29 08:56:40 +00:00
|
|
|
// When handling (potentially restored) terminal alloc, ensure tasks and post-run hooks are run
|
|
|
|
// to perform any cleanup that's necessary, potentially not done prior to earlier termination
|
|
|
|
|
2019-02-21 23:37:22 +00:00
|
|
|
// Run the prestart hooks if non-terminal
|
|
|
|
if ar.shouldRun() {
|
|
|
|
if err := ar.prerun(); err != nil {
|
|
|
|
ar.logger.Error("prerun failed", "error", err)
|
2019-06-29 08:56:40 +00:00
|
|
|
|
|
|
|
for _, tr := range ar.tasks {
|
2019-09-04 19:33:21 +00:00
|
|
|
tr.MarkFailedDead(fmt.Sprintf("failed to setup alloc: %v", err))
|
2019-06-29 08:56:40 +00:00
|
|
|
}
|
|
|
|
|
2019-02-21 23:37:22 +00:00
|
|
|
goto POST
|
|
|
|
}
|
2018-11-14 18:29:07 +00:00
|
|
|
}
|
|
|
|
|
2018-11-26 20:51:18 +00:00
|
|
|
// Run the runners (blocks until they exit)
|
|
|
|
ar.runTasks()
|
2018-11-14 18:29:07 +00:00
|
|
|
|
|
|
|
POST:
|
2019-09-19 15:42:17 +00:00
|
|
|
if ar.isShuttingDown() {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-11-14 18:29:07 +00:00
|
|
|
// Run the postrun hooks
|
|
|
|
if err := ar.postrun(); err != nil {
|
|
|
|
ar.logger.Error("postrun failed", "error", err)
|
2018-11-01 00:41:37 +00:00
|
|
|
}
|
|
|
|
|
2018-11-05 20:20:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// shouldRun returns true if the alloc is in a state that the alloc runner
|
|
|
|
// should run it.
|
|
|
|
func (ar *allocRunner) shouldRun() bool {
|
|
|
|
// Do not run allocs that are terminal
|
|
|
|
if ar.Alloc().TerminalStatus() {
|
|
|
|
ar.logger.Trace("alloc terminal; not running",
|
|
|
|
"desired_status", ar.Alloc().DesiredStatus,
|
|
|
|
"client_status", ar.Alloc().ClientStatus,
|
|
|
|
)
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2018-11-01 00:41:37 +00:00
|
|
|
// It's possible that the alloc local state was marked terminal before
|
|
|
|
// the server copy of the alloc (checked above) was marked as terminal,
|
|
|
|
// so check the local state as well.
|
|
|
|
switch clientStatus := ar.AllocState().ClientStatus; clientStatus {
|
|
|
|
case structs.AllocClientStatusComplete, structs.AllocClientStatusFailed, structs.AllocClientStatusLost:
|
|
|
|
ar.logger.Trace("alloc terminal; updating server and not running", "status", clientStatus)
|
2018-11-05 20:20:45 +00:00
|
|
|
return false
|
2018-11-01 00:41:37 +00:00
|
|
|
}
|
|
|
|
|
2018-11-05 20:20:45 +00:00
|
|
|
return true
|
2018-10-16 22:17:36 +00:00
|
|
|
}
|
|
|
|
|
2018-11-26 20:51:18 +00:00
|
|
|
// runTasks is used to run the task runners and block until they exit.
|
|
|
|
func (ar *allocRunner) runTasks() {
|
2018-07-13 00:56:52 +00:00
|
|
|
for _, task := range ar.tasks {
|
|
|
|
go task.Run()
|
2018-06-22 00:35:07 +00:00
|
|
|
}
|
|
|
|
|
2018-11-26 20:51:18 +00:00
|
|
|
for _, task := range ar.tasks {
|
|
|
|
<-task.WaitCh()
|
|
|
|
}
|
2018-06-22 00:35:07 +00:00
|
|
|
}
|
2018-06-28 00:27:03 +00:00
|
|
|
|
2018-09-20 00:32:50 +00:00
|
|
|
// Alloc returns the current allocation being run by this runner as sent by the
|
|
|
|
// server. This view of the allocation does not have updated task states.
|
2018-06-29 00:01:05 +00:00
|
|
|
func (ar *allocRunner) Alloc() *structs.Allocation {
|
|
|
|
ar.allocLock.RLock()
|
|
|
|
defer ar.allocLock.RUnlock()
|
|
|
|
return ar.alloc
|
|
|
|
}
|
|
|
|
|
2018-08-01 18:03:52 +00:00
|
|
|
func (ar *allocRunner) setAlloc(updated *structs.Allocation) {
|
|
|
|
ar.allocLock.Lock()
|
|
|
|
ar.alloc = updated
|
|
|
|
ar.allocLock.Unlock()
|
|
|
|
}
|
|
|
|
|
2018-08-23 19:03:17 +00:00
|
|
|
// GetAllocDir returns the alloc dir which is safe for concurrent use.
|
|
|
|
func (ar *allocRunner) GetAllocDir() *allocdir.AllocDir {
|
|
|
|
return ar.allocDir
|
|
|
|
}
|
|
|
|
|
2018-07-13 00:56:52 +00:00
|
|
|
// Restore state from database. Must be called after NewAllocRunner but before
|
|
|
|
// Run.
|
|
|
|
func (ar *allocRunner) Restore() error {
|
2018-12-07 01:24:43 +00:00
|
|
|
// Retrieve deployment status to avoid reseting it across agent
|
|
|
|
// restarts. Once a deployment status is set Nomad no longer monitors
|
|
|
|
// alloc health, so we must persist deployment state across restarts.
|
|
|
|
ds, err := ar.stateDB.GetDeploymentStatus(ar.id)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
ar.stateLock.Lock()
|
|
|
|
ar.state.DeploymentStatus = ds
|
|
|
|
ar.stateLock.Unlock()
|
|
|
|
|
2020-01-15 21:28:58 +00:00
|
|
|
states := make(map[string]*structs.TaskState)
|
|
|
|
|
2018-08-08 00:46:37 +00:00
|
|
|
// Restore task runners
|
|
|
|
for _, tr := range ar.tasks {
|
|
|
|
if err := tr.Restore(); err != nil {
|
|
|
|
return err
|
2018-07-13 00:56:52 +00:00
|
|
|
}
|
2020-01-15 21:28:58 +00:00
|
|
|
states[tr.Task().Name] = tr.TaskState()
|
2018-08-08 00:46:37 +00:00
|
|
|
}
|
|
|
|
|
2020-01-15 21:28:58 +00:00
|
|
|
ar.taskHookCoordinator.taskStateUpdated(states)
|
|
|
|
|
2018-08-08 00:46:37 +00:00
|
|
|
return nil
|
2018-06-29 00:01:05 +00:00
|
|
|
}
|
|
|
|
|
2018-12-07 01:24:43 +00:00
|
|
|
// persistDeploymentStatus stores AllocDeploymentStatus.
|
|
|
|
func (ar *allocRunner) persistDeploymentStatus(ds *structs.AllocDeploymentStatus) {
|
|
|
|
if err := ar.stateDB.PutDeploymentStatus(ar.id, ds); err != nil {
|
|
|
|
// While any persistence errors are very bad, the worst case
|
|
|
|
// scenario for failing to persist deployment status is that if
|
|
|
|
// the agent is restarted it will monitor the deployment status
|
|
|
|
// again. This could cause a deployment's status to change when
|
|
|
|
// that shouldn't happen. However, allowing that seems better
|
|
|
|
// than failing the entire allocation.
|
|
|
|
ar.logger.Error("error storing deployment status", "error", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-19 17:48:01 +00:00
|
|
|
// TaskStateUpdated is called by TaskRunner when a task's state has been
|
2018-10-12 01:03:48 +00:00
|
|
|
// updated. It does not process the update synchronously but instead notifies a
|
|
|
|
// goroutine the state has change. Since processing the state change may cause
|
|
|
|
// the task to be killed (thus change its state again) it cannot be done
|
|
|
|
// synchronously as it would cause a deadlock due to reentrancy.
|
|
|
|
//
|
|
|
|
// The goroutine is used to compute changes to the alloc's ClientStatus and to
|
|
|
|
// update the server with the new state.
|
2018-10-16 03:38:12 +00:00
|
|
|
func (ar *allocRunner) TaskStateUpdated() {
|
2018-10-12 01:03:48 +00:00
|
|
|
select {
|
|
|
|
case ar.taskStateUpdatedCh <- struct{}{}:
|
|
|
|
default:
|
|
|
|
// already pending updates
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// handleTaskStateUpdates must be run in goroutine as it monitors
|
2018-11-14 18:29:07 +00:00
|
|
|
// taskStateUpdatedCh for task state update notifications and processes task
|
2018-10-12 01:03:48 +00:00
|
|
|
// states.
|
|
|
|
//
|
|
|
|
// Processing task state updates must be done in a goroutine as it may have to
|
|
|
|
// kill tasks which causes further task state updates.
|
|
|
|
func (ar *allocRunner) handleTaskStateUpdates() {
|
|
|
|
defer close(ar.taskStateUpdateHandlerCh)
|
|
|
|
|
2020-06-29 19:07:48 +00:00
|
|
|
hasSidecars := hasSidecarTasks(ar.tasks)
|
|
|
|
|
2018-10-12 01:03:48 +00:00
|
|
|
for done := false; !done; {
|
|
|
|
select {
|
|
|
|
case <-ar.taskStateUpdatedCh:
|
|
|
|
case <-ar.waitCh:
|
2018-11-14 18:29:07 +00:00
|
|
|
// Run has exited, sync once more to ensure final
|
2018-10-12 01:03:48 +00:00
|
|
|
// states are collected.
|
|
|
|
done = true
|
2018-07-19 00:06:44 +00:00
|
|
|
}
|
|
|
|
|
2018-11-14 18:29:07 +00:00
|
|
|
ar.logger.Trace("handling task state update", "done", done)
|
|
|
|
|
2018-10-12 01:03:48 +00:00
|
|
|
// Set with the appropriate event if task runners should be
|
|
|
|
// killed.
|
|
|
|
var killEvent *structs.TaskEvent
|
|
|
|
|
|
|
|
// If task runners should be killed, this is set to the task
|
|
|
|
// name whose fault it is.
|
|
|
|
killTask := ""
|
|
|
|
|
|
|
|
// Task state has been updated; gather the state of the other tasks
|
|
|
|
trNum := len(ar.tasks)
|
|
|
|
liveRunners := make([]*taskrunner.TaskRunner, 0, trNum)
|
|
|
|
states := make(map[string]*structs.TaskState, trNum)
|
|
|
|
|
|
|
|
for name, tr := range ar.tasks {
|
|
|
|
state := tr.TaskState()
|
|
|
|
states[name] = state
|
|
|
|
|
|
|
|
// Capture live task runners in case we need to kill them
|
|
|
|
if state.State != structs.TaskStateDead {
|
|
|
|
liveRunners = append(liveRunners, tr)
|
|
|
|
continue
|
2018-07-19 17:49:46 +00:00
|
|
|
}
|
2018-10-12 01:03:48 +00:00
|
|
|
|
|
|
|
// Task is dead, determine if other tasks should be killed
|
|
|
|
if state.Failed {
|
|
|
|
// Only set failed event if no event has been
|
|
|
|
// set yet to give dead leaders priority.
|
|
|
|
if killEvent == nil {
|
|
|
|
killTask = name
|
|
|
|
killEvent = structs.NewTaskEvent(structs.TaskSiblingFailed).
|
|
|
|
SetFailedSibling(name)
|
|
|
|
}
|
|
|
|
} else if tr.IsLeader() {
|
|
|
|
killEvent = structs.NewTaskEvent(structs.TaskLeaderDead)
|
2018-07-19 00:06:44 +00:00
|
|
|
}
|
2018-10-12 01:03:48 +00:00
|
|
|
}
|
|
|
|
|
2020-06-29 19:07:48 +00:00
|
|
|
// if all live runners are sidecars - kill alloc
|
|
|
|
if killEvent == nil && hasSidecars && !hasNonSidecarTasks(liveRunners) {
|
|
|
|
killEvent = structs.NewTaskEvent(structs.TaskMainDead)
|
|
|
|
}
|
|
|
|
|
2018-10-12 01:03:48 +00:00
|
|
|
// If there's a kill event set and live runners, kill them
|
|
|
|
if killEvent != nil && len(liveRunners) > 0 {
|
|
|
|
|
|
|
|
// Log kill reason
|
2020-06-29 19:07:48 +00:00
|
|
|
switch killEvent.Type {
|
|
|
|
case structs.TaskLeaderDead:
|
2018-10-12 01:03:48 +00:00
|
|
|
ar.logger.Debug("leader task dead, destroying all tasks", "leader_task", killTask)
|
2020-06-29 19:07:48 +00:00
|
|
|
case structs.TaskMainDead:
|
|
|
|
ar.logger.Debug("main tasks dead, destroying all sidecar tasks")
|
|
|
|
default:
|
2018-10-12 01:03:48 +00:00
|
|
|
ar.logger.Debug("task failure, destroying all tasks", "failed_task", killTask)
|
2018-07-19 00:06:44 +00:00
|
|
|
}
|
2018-10-12 01:03:48 +00:00
|
|
|
|
2019-01-04 23:19:57 +00:00
|
|
|
// Emit kill event for live runners
|
|
|
|
for _, tr := range liveRunners {
|
|
|
|
tr.EmitEvent(killEvent)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Kill 'em all
|
2018-11-05 23:11:10 +00:00
|
|
|
states = ar.killTasks()
|
2019-01-04 23:19:57 +00:00
|
|
|
|
|
|
|
// Wait for TaskRunners to exit before continuing to
|
|
|
|
// prevent looping before TaskRunners have transitioned
|
|
|
|
// to Dead.
|
|
|
|
for _, tr := range liveRunners {
|
|
|
|
select {
|
|
|
|
case <-tr.WaitCh():
|
|
|
|
case <-ar.waitCh:
|
|
|
|
}
|
|
|
|
}
|
2018-07-19 00:06:44 +00:00
|
|
|
}
|
|
|
|
|
2019-12-04 20:44:21 +00:00
|
|
|
ar.taskHookCoordinator.taskStateUpdated(states)
|
|
|
|
|
2018-10-12 01:03:48 +00:00
|
|
|
// Get the client allocation
|
|
|
|
calloc := ar.clientAlloc(states)
|
2018-07-19 00:06:44 +00:00
|
|
|
|
2018-10-12 01:03:48 +00:00
|
|
|
// Update the server
|
|
|
|
ar.stateUpdater.AllocStateUpdated(calloc)
|
2018-08-23 19:03:17 +00:00
|
|
|
|
2018-10-12 01:03:48 +00:00
|
|
|
// Broadcast client alloc to listeners
|
|
|
|
ar.allocBroadcaster.Send(calloc)
|
|
|
|
}
|
2018-07-18 20:45:55 +00:00
|
|
|
}
|
|
|
|
|
2018-10-16 03:38:12 +00:00
|
|
|
// killTasks kills all task runners, leader (if there is one) first. Errors are
|
2018-11-05 23:11:10 +00:00
|
|
|
// logged except taskrunner.ErrTaskNotRunning which is ignored. Task states
|
|
|
|
// after Kill has been called are returned.
|
|
|
|
func (ar *allocRunner) killTasks() map[string]*structs.TaskState {
|
|
|
|
var mu sync.Mutex
|
|
|
|
states := make(map[string]*structs.TaskState, len(ar.tasks))
|
|
|
|
|
2019-11-18 16:16:25 +00:00
|
|
|
// run alloc prekill hooks
|
|
|
|
ar.preKillHooks()
|
|
|
|
|
2018-10-16 03:38:12 +00:00
|
|
|
// Kill leader first, synchronously
|
|
|
|
for name, tr := range ar.tasks {
|
|
|
|
if !tr.IsLeader() {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2019-07-09 14:37:09 +00:00
|
|
|
taskEvent := structs.NewTaskEvent(structs.TaskKilling)
|
|
|
|
taskEvent.SetKillTimeout(tr.Task().KillTimeout)
|
|
|
|
err := tr.Kill(context.TODO(), taskEvent)
|
2018-10-16 22:17:36 +00:00
|
|
|
if err != nil && err != taskrunner.ErrTaskNotRunning {
|
2018-10-16 03:38:12 +00:00
|
|
|
ar.logger.Warn("error stopping leader task", "error", err, "task_name", name)
|
|
|
|
}
|
2018-11-05 23:11:10 +00:00
|
|
|
|
|
|
|
state := tr.TaskState()
|
|
|
|
states[name] = state
|
2018-10-16 03:38:12 +00:00
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
// Kill the rest concurrently
|
|
|
|
wg := sync.WaitGroup{}
|
|
|
|
for name, tr := range ar.tasks {
|
|
|
|
if tr.IsLeader() {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
wg.Add(1)
|
|
|
|
go func(name string, tr *taskrunner.TaskRunner) {
|
|
|
|
defer wg.Done()
|
2019-07-09 14:37:09 +00:00
|
|
|
taskEvent := structs.NewTaskEvent(structs.TaskKilling)
|
|
|
|
taskEvent.SetKillTimeout(tr.Task().KillTimeout)
|
|
|
|
err := tr.Kill(context.TODO(), taskEvent)
|
2018-10-16 03:38:12 +00:00
|
|
|
if err != nil && err != taskrunner.ErrTaskNotRunning {
|
|
|
|
ar.logger.Warn("error stopping task", "error", err, "task_name", name)
|
|
|
|
}
|
2018-11-05 23:11:10 +00:00
|
|
|
|
|
|
|
state := tr.TaskState()
|
|
|
|
mu.Lock()
|
|
|
|
states[name] = state
|
|
|
|
mu.Unlock()
|
2018-10-16 03:38:12 +00:00
|
|
|
}(name, tr)
|
|
|
|
}
|
|
|
|
wg.Wait()
|
2018-11-05 23:11:10 +00:00
|
|
|
|
|
|
|
return states
|
2018-10-16 03:38:12 +00:00
|
|
|
}
|
|
|
|
|
2018-07-19 00:06:44 +00:00
|
|
|
// clientAlloc takes in the task states and returns an Allocation populated
|
|
|
|
// with Client specific fields
|
|
|
|
func (ar *allocRunner) clientAlloc(taskStates map[string]*structs.TaskState) *structs.Allocation {
|
2018-10-12 01:03:48 +00:00
|
|
|
ar.stateLock.Lock()
|
|
|
|
defer ar.stateLock.Unlock()
|
2018-07-19 00:06:44 +00:00
|
|
|
|
2018-09-27 00:08:43 +00:00
|
|
|
// store task states for AllocState to expose
|
|
|
|
ar.state.TaskStates = taskStates
|
|
|
|
|
2018-07-19 00:06:44 +00:00
|
|
|
a := &structs.Allocation{
|
|
|
|
ID: ar.id,
|
|
|
|
TaskStates: taskStates,
|
|
|
|
}
|
|
|
|
|
2018-09-27 00:08:43 +00:00
|
|
|
if d := ar.state.DeploymentStatus; d != nil {
|
2018-07-19 00:06:44 +00:00
|
|
|
a.DeploymentStatus = d.Copy()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Compute the ClientStatus
|
2018-09-27 00:08:43 +00:00
|
|
|
if ar.state.ClientStatus != "" {
|
2018-07-19 00:06:44 +00:00
|
|
|
// The client status is being forced
|
2018-09-27 00:08:43 +00:00
|
|
|
a.ClientStatus, a.ClientDescription = ar.state.ClientStatus, ar.state.ClientDescription
|
2018-07-19 00:06:44 +00:00
|
|
|
} else {
|
|
|
|
a.ClientStatus, a.ClientDescription = getClientStatus(taskStates)
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the allocation is terminal, make sure all required fields are properly
|
|
|
|
// set.
|
|
|
|
if a.ClientTerminalStatus() {
|
|
|
|
alloc := ar.Alloc()
|
|
|
|
|
2019-05-03 15:01:30 +00:00
|
|
|
// If we are part of a deployment and the alloc has failed, mark the
|
2018-07-19 00:06:44 +00:00
|
|
|
// alloc as unhealthy. This guards against the watcher not be started.
|
2019-05-03 03:59:56 +00:00
|
|
|
// If the health status is already set then terminal allocations should not
|
2018-07-19 00:06:44 +00:00
|
|
|
if a.ClientStatus == structs.AllocClientStatusFailed &&
|
2019-05-03 15:00:17 +00:00
|
|
|
alloc.DeploymentID != "" && !a.DeploymentStatus.HasHealth() {
|
2018-07-19 00:06:44 +00:00
|
|
|
a.DeploymentStatus = &structs.AllocDeploymentStatus{
|
|
|
|
Healthy: helper.BoolToPtr(false),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make sure we have marked the finished at for every task. This is used
|
|
|
|
// to calculate the reschedule time for failed allocations.
|
|
|
|
now := time.Now()
|
2019-06-10 21:24:41 +00:00
|
|
|
for taskName := range ar.tasks {
|
|
|
|
ts, ok := a.TaskStates[taskName]
|
2018-07-19 00:06:44 +00:00
|
|
|
if !ok {
|
|
|
|
ts = &structs.TaskState{}
|
2019-06-10 21:24:41 +00:00
|
|
|
a.TaskStates[taskName] = ts
|
2018-07-19 00:06:44 +00:00
|
|
|
}
|
|
|
|
if ts.FinishedAt.IsZero() {
|
|
|
|
ts.FinishedAt = now
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return a
|
|
|
|
}
|
|
|
|
|
|
|
|
// getClientStatus takes in the task states for a given allocation and computes
|
|
|
|
// the client status and description
|
|
|
|
func getClientStatus(taskStates map[string]*structs.TaskState) (status, description string) {
|
|
|
|
var pending, running, dead, failed bool
|
|
|
|
for _, state := range taskStates {
|
|
|
|
switch state.State {
|
|
|
|
case structs.TaskStateRunning:
|
|
|
|
running = true
|
|
|
|
case structs.TaskStatePending:
|
|
|
|
pending = true
|
|
|
|
case structs.TaskStateDead:
|
|
|
|
if state.Failed {
|
|
|
|
failed = true
|
|
|
|
} else {
|
|
|
|
dead = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Determine the alloc status
|
|
|
|
if failed {
|
|
|
|
return structs.AllocClientStatusFailed, "Failed tasks"
|
|
|
|
} else if running {
|
|
|
|
return structs.AllocClientStatusRunning, "Tasks are running"
|
|
|
|
} else if pending {
|
|
|
|
return structs.AllocClientStatusPending, "No tasks have started"
|
|
|
|
} else if dead {
|
|
|
|
return structs.AllocClientStatusComplete, "All tasks have completed"
|
|
|
|
}
|
|
|
|
|
|
|
|
return "", ""
|
|
|
|
}
|
|
|
|
|
2019-01-09 16:16:33 +00:00
|
|
|
// SetClientStatus is a helper for forcing a specific client
|
|
|
|
// status on the alloc runner. This is used during restore errors
|
|
|
|
// when the task state can't be restored.
|
|
|
|
func (ar *allocRunner) SetClientStatus(clientStatus string) {
|
|
|
|
ar.stateLock.Lock()
|
|
|
|
defer ar.stateLock.Unlock()
|
|
|
|
ar.state.ClientStatus = clientStatus
|
|
|
|
}
|
|
|
|
|
2018-09-27 00:08:43 +00:00
|
|
|
// AllocState returns a copy of allocation state including a snapshot of task
|
|
|
|
// states.
|
|
|
|
func (ar *allocRunner) AllocState() *state.State {
|
2018-09-28 00:30:10 +00:00
|
|
|
ar.stateLock.RLock()
|
|
|
|
state := ar.state.Copy()
|
|
|
|
ar.stateLock.RUnlock()
|
2018-09-27 00:08:43 +00:00
|
|
|
|
|
|
|
// If TaskStateUpdated has not been called yet, ar.state.TaskStates
|
|
|
|
// won't be set as it is not the canonical source of TaskStates.
|
2018-09-28 00:30:10 +00:00
|
|
|
if len(state.TaskStates) == 0 {
|
2018-09-27 00:08:43 +00:00
|
|
|
ar.state.TaskStates = make(map[string]*structs.TaskState, len(ar.tasks))
|
|
|
|
for k, tr := range ar.tasks {
|
2018-09-28 00:30:10 +00:00
|
|
|
state.TaskStates[k] = tr.TaskState()
|
2018-09-27 00:08:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-01 00:41:37 +00:00
|
|
|
// Generate alloc to get other state fields
|
|
|
|
alloc := ar.clientAlloc(state.TaskStates)
|
|
|
|
state.ClientStatus = alloc.ClientStatus
|
|
|
|
state.ClientDescription = alloc.ClientDescription
|
|
|
|
state.DeploymentStatus = alloc.DeploymentStatus
|
|
|
|
|
2018-09-28 00:30:10 +00:00
|
|
|
return state
|
2018-09-27 00:08:43 +00:00
|
|
|
}
|
|
|
|
|
2018-12-17 12:27:54 +00:00
|
|
|
// Update asyncronously updates the running allocation with a new version
|
|
|
|
// received from the server.
|
|
|
|
// When processing a new update, we will first attempt to drain stale updates
|
|
|
|
// from the queue, before appending the new one.
|
|
|
|
func (ar *allocRunner) Update(update *structs.Allocation) {
|
|
|
|
select {
|
|
|
|
// Drain queued update from the channel if possible, and check the modify
|
|
|
|
// index
|
|
|
|
case oldUpdate := <-ar.allocUpdatedCh:
|
|
|
|
// If the old update is newer than the replacement, then skip the new one
|
|
|
|
// and return. This case shouldn't happen, but may in the case of a bug
|
|
|
|
// elsewhere inside the system.
|
|
|
|
if oldUpdate.AllocModifyIndex > update.AllocModifyIndex {
|
2019-01-03 13:04:31 +00:00
|
|
|
ar.logger.Debug("Discarding allocation update due to newer alloc revision in queue",
|
2018-12-17 14:55:36 +00:00
|
|
|
"old_modify_index", oldUpdate.AllocModifyIndex,
|
|
|
|
"new_modify_index", update.AllocModifyIndex)
|
2018-12-17 12:27:54 +00:00
|
|
|
ar.allocUpdatedCh <- oldUpdate
|
|
|
|
return
|
2018-12-18 22:36:29 +00:00
|
|
|
} else {
|
2019-01-03 13:04:31 +00:00
|
|
|
ar.logger.Debug("Discarding allocation update",
|
2018-12-18 22:36:29 +00:00
|
|
|
"skipped_modify_index", oldUpdate.AllocModifyIndex,
|
|
|
|
"new_modify_index", update.AllocModifyIndex)
|
2018-12-17 12:27:54 +00:00
|
|
|
}
|
2018-12-18 22:36:29 +00:00
|
|
|
case <-ar.waitCh:
|
|
|
|
ar.logger.Trace("AllocRunner has terminated, skipping alloc update",
|
|
|
|
"modify_index", update.AllocModifyIndex)
|
|
|
|
return
|
2018-12-17 12:27:54 +00:00
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
|
|
|
// Queue the new update
|
|
|
|
ar.allocUpdatedCh <- update
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ar *allocRunner) handleAllocUpdates() {
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case update := <-ar.allocUpdatedCh:
|
|
|
|
ar.handleAllocUpdate(update)
|
|
|
|
case <-ar.waitCh:
|
2018-12-19 17:32:51 +00:00
|
|
|
return
|
2018-12-17 12:27:54 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-01 18:03:52 +00:00
|
|
|
// This method sends the updated alloc to Run for serially processing updates.
|
|
|
|
// If there is already a pending update it will be discarded and replaced by
|
|
|
|
// the latest update.
|
2018-12-17 12:27:54 +00:00
|
|
|
func (ar *allocRunner) handleAllocUpdate(update *structs.Allocation) {
|
2018-10-12 01:03:48 +00:00
|
|
|
// Detect Stop updates
|
|
|
|
stopping := !ar.Alloc().TerminalStatus() && update.TerminalStatus()
|
|
|
|
|
2018-08-17 17:34:44 +00:00
|
|
|
// Update ar.alloc
|
|
|
|
ar.setAlloc(update)
|
|
|
|
|
2018-10-16 03:38:12 +00:00
|
|
|
// Run update hooks if not stopping or dead
|
|
|
|
if !update.TerminalStatus() {
|
|
|
|
if err := ar.update(update); err != nil {
|
|
|
|
ar.logger.Error("error running update hooks", "error", err)
|
2018-10-12 01:03:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2018-08-17 17:34:44 +00:00
|
|
|
// Update task runners
|
|
|
|
for _, tr := range ar.tasks {
|
|
|
|
tr.Update(update)
|
2018-08-01 18:03:52 +00:00
|
|
|
}
|
2018-10-16 03:38:12 +00:00
|
|
|
|
|
|
|
// If alloc is being terminated, kill all tasks, leader first
|
|
|
|
if stopping {
|
|
|
|
ar.killTasks()
|
|
|
|
}
|
|
|
|
|
2018-06-28 00:27:03 +00:00
|
|
|
}
|
2018-06-29 00:01:05 +00:00
|
|
|
|
2018-08-23 19:03:17 +00:00
|
|
|
func (ar *allocRunner) Listener() *cstructs.AllocListener {
|
|
|
|
return ar.allocBroadcaster.Listen()
|
|
|
|
}
|
|
|
|
|
2018-12-14 15:02:47 +00:00
|
|
|
func (ar *allocRunner) destroyImpl() {
|
2018-11-05 23:11:10 +00:00
|
|
|
// Stop any running tasks and persist states in case the client is
|
|
|
|
// shutdown before Destroy finishes.
|
|
|
|
states := ar.killTasks()
|
|
|
|
calloc := ar.clientAlloc(states)
|
|
|
|
ar.stateUpdater.AllocStateUpdated(calloc)
|
2018-07-17 20:57:57 +00:00
|
|
|
|
2018-11-14 18:29:07 +00:00
|
|
|
// Wait for tasks to exit and postrun hooks to finish
|
|
|
|
<-ar.waitCh
|
2018-08-23 19:03:17 +00:00
|
|
|
|
|
|
|
// Run destroy hooks
|
|
|
|
if err := ar.destroy(); err != nil {
|
|
|
|
ar.logger.Warn("error running destroy hooks", "error", err)
|
2018-07-17 20:57:57 +00:00
|
|
|
}
|
2018-08-23 19:03:17 +00:00
|
|
|
|
2018-10-12 01:03:48 +00:00
|
|
|
// Wait for task state update handler to exit before removing local
|
2018-10-16 22:17:36 +00:00
|
|
|
// state if Run() ran at all.
|
2018-11-14 18:29:07 +00:00
|
|
|
<-ar.taskStateUpdateHandlerCh
|
2018-10-12 01:03:48 +00:00
|
|
|
|
2019-08-26 17:45:58 +00:00
|
|
|
// Mark alloc as destroyed
|
|
|
|
ar.destroyedLock.Lock()
|
|
|
|
|
|
|
|
// Cleanup state db; while holding the lock to avoid
|
|
|
|
// a race periodic PersistState that may resurrect the alloc
|
2018-08-23 19:03:17 +00:00
|
|
|
if err := ar.stateDB.DeleteAllocationBucket(ar.id); err != nil {
|
|
|
|
ar.logger.Warn("failed to delete allocation state", "error", err)
|
|
|
|
}
|
|
|
|
|
2018-12-14 15:02:47 +00:00
|
|
|
if !ar.shutdown {
|
|
|
|
ar.shutdown = true
|
|
|
|
close(ar.shutdownCh)
|
|
|
|
}
|
|
|
|
|
2018-08-23 19:03:17 +00:00
|
|
|
ar.destroyed = true
|
2018-12-14 15:02:47 +00:00
|
|
|
close(ar.destroyCh)
|
|
|
|
|
|
|
|
ar.destroyedLock.Unlock()
|
|
|
|
}
|
|
|
|
|
2019-08-25 15:03:49 +00:00
|
|
|
func (ar *allocRunner) PersistState() error {
|
2019-08-26 17:45:58 +00:00
|
|
|
ar.destroyedLock.Lock()
|
|
|
|
defer ar.destroyedLock.Unlock()
|
|
|
|
|
|
|
|
if ar.destroyed {
|
2019-08-25 15:03:49 +00:00
|
|
|
err := ar.stateDB.DeleteAllocationBucket(ar.id)
|
|
|
|
if err != nil {
|
|
|
|
ar.logger.Warn("failed to delete allocation bucket", "error", err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: consider persisting deployment state along with task status.
|
|
|
|
// While we study why only the alloc is persisted, I opted to maintain current
|
|
|
|
// behavior and not risk adding yet more IO calls unnecessarily.
|
|
|
|
return ar.stateDB.PutAllocation(ar.Alloc())
|
|
|
|
}
|
|
|
|
|
2018-12-14 15:02:47 +00:00
|
|
|
// Destroy the alloc runner by stopping it if it is still running and cleaning
|
|
|
|
// up all of its resources.
|
|
|
|
//
|
|
|
|
// This method is safe for calling concurrently with Run() and will cause it to
|
|
|
|
// exit (thus closing WaitCh).
|
2018-12-18 22:28:48 +00:00
|
|
|
// When the destroy action is completed, it will close DestroyCh().
|
2018-12-14 15:02:47 +00:00
|
|
|
func (ar *allocRunner) Destroy() {
|
|
|
|
ar.destroyedLock.Lock()
|
|
|
|
defer ar.destroyedLock.Unlock()
|
|
|
|
|
|
|
|
if ar.destroyed {
|
|
|
|
// Only destroy once
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if ar.destroyLaunched {
|
|
|
|
// Only dispatch a destroy once
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
ar.destroyLaunched = true
|
|
|
|
|
|
|
|
// Synchronize calls to shutdown/destroy
|
|
|
|
if ar.shutdownLaunched {
|
|
|
|
go func() {
|
|
|
|
ar.logger.Debug("Waiting for shutdown before destroying runner")
|
|
|
|
<-ar.shutdownCh
|
|
|
|
ar.destroyImpl()
|
|
|
|
}()
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
go ar.destroyImpl()
|
2018-06-29 00:01:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// IsDestroyed returns true if the alloc runner has been destroyed (stopped and
|
|
|
|
// garbage collected).
|
|
|
|
//
|
|
|
|
// This method is safe for calling concurrently with Run(). Callers must
|
|
|
|
// receive on WaitCh() to block until alloc runner has stopped and been
|
|
|
|
// destroyed.
|
|
|
|
func (ar *allocRunner) IsDestroyed() bool {
|
2018-08-23 19:03:17 +00:00
|
|
|
ar.destroyedLock.Lock()
|
|
|
|
defer ar.destroyedLock.Unlock()
|
|
|
|
return ar.destroyed
|
2018-06-29 00:01:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// IsWaiting returns true if the alloc runner is waiting for its previous
|
|
|
|
// allocation to terminate.
|
|
|
|
//
|
|
|
|
// This method is safe for calling concurrently with Run().
|
|
|
|
func (ar *allocRunner) IsWaiting() bool {
|
2018-12-06 11:15:59 +00:00
|
|
|
return ar.prevAllocWatcher.IsWaiting()
|
2018-06-29 00:01:05 +00:00
|
|
|
}
|
|
|
|
|
2019-09-19 15:42:17 +00:00
|
|
|
// isShuttingDown returns true if the alloc runner is in a shutdown state
|
|
|
|
// due to a call to Shutdown() or Destroy()
|
|
|
|
func (ar *allocRunner) isShuttingDown() bool {
|
|
|
|
ar.destroyedLock.Lock()
|
|
|
|
defer ar.destroyedLock.Unlock()
|
|
|
|
return ar.shutdownLaunched
|
|
|
|
}
|
|
|
|
|
2018-12-18 22:28:48 +00:00
|
|
|
// DestroyCh is a channel that is closed when an allocrunner is closed due to
|
|
|
|
// an explicit call to Destroy().
|
2018-12-14 15:02:47 +00:00
|
|
|
func (ar *allocRunner) DestroyCh() <-chan struct{} {
|
|
|
|
return ar.destroyCh
|
|
|
|
}
|
|
|
|
|
2018-12-18 22:28:48 +00:00
|
|
|
// ShutdownCh is a channel that is closed when an allocrunner is closed due to
|
|
|
|
// either an explicit call to Shutdown(), or Destroy().
|
2018-12-14 15:02:47 +00:00
|
|
|
func (ar *allocRunner) ShutdownCh() <-chan struct{} {
|
|
|
|
return ar.shutdownCh
|
|
|
|
}
|
|
|
|
|
2018-12-18 22:28:48 +00:00
|
|
|
// Shutdown AllocRunner gracefully. Asynchronously shuts down all TaskRunners.
|
2018-11-14 18:29:07 +00:00
|
|
|
// Tasks are unaffected and may be restored.
|
2018-12-18 22:28:48 +00:00
|
|
|
// When the destroy action is completed, it will close ShutdownCh().
|
2018-11-14 18:29:07 +00:00
|
|
|
func (ar *allocRunner) Shutdown() {
|
|
|
|
ar.destroyedLock.Lock()
|
|
|
|
defer ar.destroyedLock.Unlock()
|
|
|
|
|
|
|
|
// Destroy is a superset of Shutdown so there's nothing to do if this
|
|
|
|
// has already been destroyed.
|
|
|
|
if ar.destroyed {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-12-14 15:02:47 +00:00
|
|
|
// Destroy is a superset of Shutdown so if it's been marked for destruction,
|
|
|
|
// don't try and shutdown in parallel. If shutdown has been launched, don't
|
|
|
|
// try again.
|
|
|
|
if ar.destroyLaunched || ar.shutdownLaunched {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
ar.shutdownLaunched = true
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
ar.logger.Trace("shutting down")
|
2018-11-14 18:29:07 +00:00
|
|
|
|
2018-12-14 15:02:47 +00:00
|
|
|
// Shutdown tasks gracefully if they were run
|
2019-02-21 23:37:22 +00:00
|
|
|
wg := sync.WaitGroup{}
|
|
|
|
for _, tr := range ar.tasks {
|
|
|
|
wg.Add(1)
|
|
|
|
go func(tr *taskrunner.TaskRunner) {
|
|
|
|
tr.Shutdown()
|
|
|
|
wg.Done()
|
|
|
|
}(tr)
|
2018-11-14 18:29:07 +00:00
|
|
|
}
|
2019-02-21 23:37:22 +00:00
|
|
|
wg.Wait()
|
2018-11-14 18:29:07 +00:00
|
|
|
|
2018-12-14 15:02:47 +00:00
|
|
|
// Wait for Run to exit
|
|
|
|
<-ar.waitCh
|
2018-11-14 18:29:07 +00:00
|
|
|
|
2018-12-14 15:02:47 +00:00
|
|
|
// Run shutdown hooks
|
|
|
|
ar.shutdownHooks()
|
2018-11-14 18:29:07 +00:00
|
|
|
|
2018-12-14 15:02:47 +00:00
|
|
|
// Wait for updater to finish its final run
|
|
|
|
<-ar.taskStateUpdateHandlerCh
|
|
|
|
|
|
|
|
ar.destroyedLock.Lock()
|
|
|
|
ar.shutdown = true
|
|
|
|
close(ar.shutdownCh)
|
|
|
|
ar.destroyedLock.Unlock()
|
|
|
|
}()
|
2018-11-14 18:29:07 +00:00
|
|
|
}
|
|
|
|
|
2018-06-29 00:01:05 +00:00
|
|
|
// IsMigrating returns true if the alloc runner is migrating data from its
|
|
|
|
// previous allocation.
|
|
|
|
//
|
|
|
|
// This method is safe for calling concurrently with Run().
|
|
|
|
func (ar *allocRunner) IsMigrating() bool {
|
2018-12-06 11:15:59 +00:00
|
|
|
return ar.prevAllocMigrator.IsMigrating()
|
2018-06-29 00:01:05 +00:00
|
|
|
}
|
|
|
|
|
2018-10-04 22:45:46 +00:00
|
|
|
func (ar *allocRunner) StatsReporter() interfaces.AllocStatsReporter {
|
2018-09-15 00:08:26 +00:00
|
|
|
return ar
|
2018-06-29 00:01:05 +00:00
|
|
|
}
|
|
|
|
|
2018-09-15 00:08:26 +00:00
|
|
|
// LatestAllocStats returns the latest stats for an allocation. If taskFilter
|
|
|
|
// is set, only stats for that task -- if it exists -- are returned.
|
|
|
|
func (ar *allocRunner) LatestAllocStats(taskFilter string) (*cstructs.AllocResourceUsage, error) {
|
|
|
|
astat := &cstructs.AllocResourceUsage{
|
|
|
|
Tasks: make(map[string]*cstructs.TaskResourceUsage, len(ar.tasks)),
|
|
|
|
ResourceUsage: &cstructs.ResourceUsage{
|
|
|
|
MemoryStats: &cstructs.MemoryStats{},
|
|
|
|
CpuStats: &cstructs.CpuStats{},
|
2018-11-15 15:13:14 +00:00
|
|
|
DeviceStats: []*device.DeviceGroupStats{},
|
2018-09-15 00:08:26 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for name, tr := range ar.tasks {
|
|
|
|
if taskFilter != "" && taskFilter != name {
|
|
|
|
// Getting stats for a particular task and its not this one!
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if usage := tr.LatestResourceUsage(); usage != nil {
|
|
|
|
astat.Tasks[name] = usage
|
|
|
|
astat.ResourceUsage.Add(usage.ResourceUsage)
|
|
|
|
if usage.Timestamp > astat.Timestamp {
|
|
|
|
astat.Timestamp = usage.Timestamp
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-06-29 00:01:05 +00:00
|
|
|
|
2018-09-15 00:08:26 +00:00
|
|
|
return astat, nil
|
2018-06-29 00:01:05 +00:00
|
|
|
}
|
2018-12-18 03:36:06 +00:00
|
|
|
|
|
|
|
func (ar *allocRunner) GetTaskEventHandler(taskName string) drivermanager.EventHandler {
|
|
|
|
if tr, ok := ar.tasks[taskName]; ok {
|
|
|
|
return func(ev *drivers.TaskEvent) {
|
|
|
|
tr.EmitEvent(&structs.TaskEvent{
|
|
|
|
Type: structs.TaskDriverMessage,
|
|
|
|
Time: ev.Timestamp.UnixNano(),
|
|
|
|
Details: ev.Annotations,
|
|
|
|
DriverMessage: ev.Message,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2019-04-01 12:56:02 +00:00
|
|
|
|
|
|
|
// RestartTask signalls the task runner for the provided task to restart.
|
|
|
|
func (ar *allocRunner) RestartTask(taskName string, taskEvent *structs.TaskEvent) error {
|
|
|
|
tr, ok := ar.tasks[taskName]
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("Could not find task runner for task: %s", taskName)
|
|
|
|
}
|
|
|
|
|
|
|
|
return tr.Restart(context.TODO(), taskEvent, false)
|
|
|
|
}
|
|
|
|
|
2019-11-18 18:04:01 +00:00
|
|
|
// Restart satisfies the WorkloadRestarter interface restarts all task runners
|
|
|
|
// concurrently
|
|
|
|
func (ar *allocRunner) Restart(ctx context.Context, event *structs.TaskEvent, failure bool) error {
|
|
|
|
waitCh := make(chan struct{})
|
|
|
|
var err *multierror.Error
|
|
|
|
var errMutex sync.Mutex
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
defer close(waitCh)
|
|
|
|
for tn, tr := range ar.tasks {
|
|
|
|
wg.Add(1)
|
|
|
|
go func(taskName string, r agentconsul.WorkloadRestarter) {
|
|
|
|
defer wg.Done()
|
|
|
|
e := r.Restart(ctx, event, failure)
|
|
|
|
if e != nil {
|
|
|
|
errMutex.Lock()
|
|
|
|
defer errMutex.Unlock()
|
|
|
|
err = multierror.Append(err, fmt.Errorf("failed to restart task %s: %v", taskName, e))
|
|
|
|
}
|
|
|
|
}(tn, tr)
|
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
}()
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-waitCh:
|
|
|
|
case <-ctx.Done():
|
|
|
|
}
|
|
|
|
|
|
|
|
return err.ErrorOrNil()
|
|
|
|
}
|
|
|
|
|
2019-04-01 12:56:02 +00:00
|
|
|
// RestartAll signalls all task runners in the allocation to restart and passes
|
|
|
|
// a copy of the task event to each restart event.
|
|
|
|
// Returns any errors in a concatenated form.
|
|
|
|
func (ar *allocRunner) RestartAll(taskEvent *structs.TaskEvent) error {
|
|
|
|
var err *multierror.Error
|
|
|
|
|
|
|
|
for tn := range ar.tasks {
|
|
|
|
rerr := ar.RestartTask(tn, taskEvent.Copy())
|
|
|
|
if rerr != nil {
|
|
|
|
err = multierror.Append(err, rerr)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return err.ErrorOrNil()
|
|
|
|
}
|
2019-04-03 10:46:15 +00:00
|
|
|
|
|
|
|
// Signal sends a signal request to task runners inside an allocation. If the
|
|
|
|
// taskName is empty, then it is sent to all tasks.
|
|
|
|
func (ar *allocRunner) Signal(taskName, signal string) error {
|
|
|
|
event := structs.NewTaskEvent(structs.TaskSignaling).SetSignalText(signal)
|
|
|
|
|
|
|
|
if taskName != "" {
|
|
|
|
tr, ok := ar.tasks[taskName]
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("Task not found")
|
|
|
|
}
|
|
|
|
|
|
|
|
return tr.Signal(event, signal)
|
|
|
|
}
|
|
|
|
|
|
|
|
var err *multierror.Error
|
|
|
|
|
|
|
|
for tn, tr := range ar.tasks {
|
|
|
|
rerr := tr.Signal(event.Copy(), signal)
|
|
|
|
if rerr != nil {
|
|
|
|
err = multierror.Append(err, fmt.Errorf("Failed to signal task: %s, err: %v", tn, rerr))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return err.ErrorOrNil()
|
|
|
|
}
|
2019-04-28 21:22:53 +00:00
|
|
|
|
|
|
|
func (ar *allocRunner) GetTaskExecHandler(taskName string) drivermanager.TaskExecHandler {
|
|
|
|
tr, ok := ar.tasks[taskName]
|
|
|
|
if !ok {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return tr.TaskExecHandler()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ar *allocRunner) GetTaskDriverCapabilities(taskName string) (*drivers.Capabilities, error) {
|
|
|
|
tr, ok := ar.tasks[taskName]
|
|
|
|
if !ok {
|
|
|
|
return nil, fmt.Errorf("task not found")
|
|
|
|
}
|
|
|
|
|
|
|
|
return tr.DriverCapabilities()
|
|
|
|
}
|