2018-10-04 23:22:01 +00:00
|
|
|
package allocrunner
|
2018-06-22 00:35:07 +00:00
|
|
|
|
|
|
|
import (
|
2018-07-17 20:57:57 +00:00
|
|
|
"context"
|
2018-06-22 00:35:07 +00:00
|
|
|
"fmt"
|
|
|
|
"sync"
|
2018-07-19 00:06:44 +00:00
|
|
|
"time"
|
2018-06-22 00:35:07 +00:00
|
|
|
|
|
|
|
log "github.com/hashicorp/go-hclog"
|
2019-04-01 12:56:02 +00:00
|
|
|
multierror "github.com/hashicorp/go-multierror"
|
2018-06-22 00:35:07 +00:00
|
|
|
"github.com/hashicorp/nomad/client/allocdir"
|
2018-10-04 23:22:01 +00:00
|
|
|
"github.com/hashicorp/nomad/client/allocrunner/interfaces"
|
|
|
|
"github.com/hashicorp/nomad/client/allocrunner/state"
|
2022-08-22 22:38:49 +00:00
|
|
|
"github.com/hashicorp/nomad/client/allocrunner/tasklifecycle"
|
2018-10-04 23:22:01 +00:00
|
|
|
"github.com/hashicorp/nomad/client/allocrunner/taskrunner"
|
2018-08-23 19:03:17 +00:00
|
|
|
"github.com/hashicorp/nomad/client/allocwatcher"
|
2018-07-13 16:45:29 +00:00
|
|
|
"github.com/hashicorp/nomad/client/config"
|
2018-07-20 00:40:25 +00:00
|
|
|
"github.com/hashicorp/nomad/client/consul"
|
2018-11-16 23:29:59 +00:00
|
|
|
"github.com/hashicorp/nomad/client/devicemanager"
|
2019-10-22 13:20:26 +00:00
|
|
|
"github.com/hashicorp/nomad/client/dynamicplugins"
|
2018-07-19 00:06:44 +00:00
|
|
|
cinterfaces "github.com/hashicorp/nomad/client/interfaces"
|
2022-03-21 09:29:57 +00:00
|
|
|
"github.com/hashicorp/nomad/client/lib/cgutil"
|
2020-01-08 12:47:07 +00:00
|
|
|
"github.com/hashicorp/nomad/client/pluginmanager/csimanager"
|
2018-11-28 03:42:22 +00:00
|
|
|
"github.com/hashicorp/nomad/client/pluginmanager/drivermanager"
|
2022-03-15 08:38:30 +00:00
|
|
|
"github.com/hashicorp/nomad/client/serviceregistration"
|
2022-06-07 14:18:19 +00:00
|
|
|
"github.com/hashicorp/nomad/client/serviceregistration/checks/checkstore"
|
2022-03-21 09:29:57 +00:00
|
|
|
"github.com/hashicorp/nomad/client/serviceregistration/wrapper"
|
2018-08-08 00:46:37 +00:00
|
|
|
cstate "github.com/hashicorp/nomad/client/state"
|
2018-06-29 00:01:05 +00:00
|
|
|
cstructs "github.com/hashicorp/nomad/client/structs"
|
2018-07-13 16:45:29 +00:00
|
|
|
"github.com/hashicorp/nomad/client/vaultclient"
|
2022-08-17 16:26:34 +00:00
|
|
|
"github.com/hashicorp/nomad/helper/pointer"
|
2018-06-22 00:35:07 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad/structs"
|
2018-11-15 15:13:14 +00:00
|
|
|
"github.com/hashicorp/nomad/plugins/device"
|
2018-12-18 03:36:06 +00:00
|
|
|
"github.com/hashicorp/nomad/plugins/drivers"
|
2018-06-22 00:35:07 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// allocRunner is used to run all the tasks in a given allocation
|
|
|
|
type allocRunner struct {
|
2018-07-19 00:06:44 +00:00
|
|
|
// id is the ID of the allocation. Can be accessed without a lock
|
|
|
|
id string
|
|
|
|
|
2018-06-22 00:35:07 +00:00
|
|
|
// Logger is the logger for the alloc runner.
|
|
|
|
logger log.Logger
|
|
|
|
|
2022-06-07 14:18:19 +00:00
|
|
|
// clientConfig is the client configuration block.
|
2018-07-13 16:45:29 +00:00
|
|
|
clientConfig *config.Config
|
|
|
|
|
2018-10-12 01:03:48 +00:00
|
|
|
// stateUpdater is used to emit updated alloc state
|
2018-07-19 00:06:44 +00:00
|
|
|
stateUpdater cinterfaces.AllocStateHandler
|
|
|
|
|
2018-11-14 18:29:07 +00:00
|
|
|
// taskStateUpdatedCh is ticked whenever task state as changed. Must
|
2018-10-12 01:03:48 +00:00
|
|
|
// have len==1 to allow nonblocking notification of state updates while
|
|
|
|
// the goroutine is already processing a previous update.
|
|
|
|
taskStateUpdatedCh chan struct{}
|
|
|
|
|
2018-10-16 03:38:12 +00:00
|
|
|
// taskStateUpdateHandlerCh is closed when the task state handling
|
2018-10-12 01:03:48 +00:00
|
|
|
// goroutine exits. It is unsafe to destroy the local allocation state
|
|
|
|
// before this goroutine exits.
|
|
|
|
taskStateUpdateHandlerCh chan struct{}
|
|
|
|
|
2018-12-17 12:27:54 +00:00
|
|
|
// allocUpdatedCh is a channel that is used to stream allocation updates into
|
|
|
|
// the allocUpdate handler. Must have len==1 to allow nonblocking notification
|
|
|
|
// of new allocation updates while the goroutine is processing a previous
|
|
|
|
// update.
|
|
|
|
allocUpdatedCh chan *structs.Allocation
|
|
|
|
|
2018-07-20 00:40:25 +00:00
|
|
|
// consulClient is the client used by the consul service hook for
|
|
|
|
// registering services and checks
|
2022-03-15 08:38:30 +00:00
|
|
|
consulClient serviceregistration.Handler
|
2018-07-20 00:40:25 +00:00
|
|
|
|
2020-09-04 17:50:11 +00:00
|
|
|
// consulProxiesClient is the client used by the envoy version hook for
|
|
|
|
// looking up supported envoy versions of the consul agent.
|
|
|
|
consulProxiesClient consul.SupportedProxiesAPI
|
|
|
|
|
2019-11-27 21:41:45 +00:00
|
|
|
// sidsClient is the client used by the service identity hook for
|
|
|
|
// managing SI tokens
|
|
|
|
sidsClient consul.ServiceIdentityAPI
|
|
|
|
|
2018-07-13 16:45:29 +00:00
|
|
|
// vaultClient is the used to manage Vault tokens
|
|
|
|
vaultClient vaultclient.VaultClient
|
2018-06-22 00:35:07 +00:00
|
|
|
|
2019-02-21 23:37:22 +00:00
|
|
|
// waitCh is closed when the Run loop has exited
|
2018-06-22 00:35:07 +00:00
|
|
|
waitCh chan struct{}
|
|
|
|
|
2019-02-21 23:37:22 +00:00
|
|
|
// destroyed is true when the Run loop has exited, postrun hooks have
|
2018-10-16 22:17:36 +00:00
|
|
|
// run, and alloc runner has been destroyed. Must acquire destroyedLock
|
|
|
|
// to access.
|
|
|
|
destroyed bool
|
|
|
|
|
2019-02-21 23:37:22 +00:00
|
|
|
// destroyCh is closed when the Run loop has exited, postrun hooks have
|
2018-12-14 15:02:47 +00:00
|
|
|
// run, and alloc runner has been destroyed.
|
|
|
|
destroyCh chan struct{}
|
|
|
|
|
2019-02-21 23:37:22 +00:00
|
|
|
// shutdown is true when the Run loop has exited, and shutdown hooks have
|
2018-12-14 15:02:47 +00:00
|
|
|
// run. Must acquire destroyedLock to access.
|
|
|
|
shutdown bool
|
|
|
|
|
2019-02-21 23:37:22 +00:00
|
|
|
// shutdownCh is closed when the Run loop has exited, and shutdown hooks
|
2018-12-14 15:02:47 +00:00
|
|
|
// have run.
|
|
|
|
shutdownCh chan struct{}
|
|
|
|
|
|
|
|
// destroyLaunched is true if Destroy has been called. Must acquire
|
|
|
|
// destroyedLock to access.
|
|
|
|
destroyLaunched bool
|
|
|
|
|
|
|
|
// shutdownLaunched is true if Shutdown has been called. Must acquire
|
|
|
|
// destroyedLock to access.
|
|
|
|
shutdownLaunched bool
|
|
|
|
|
2019-02-21 23:37:22 +00:00
|
|
|
// destroyedLock guards destroyed, destroyLaunched, shutdownLaunched,
|
|
|
|
// and serializes Shutdown/Destroy calls.
|
2018-08-23 19:03:17 +00:00
|
|
|
destroyedLock sync.Mutex
|
|
|
|
|
2018-06-22 00:35:07 +00:00
|
|
|
// Alloc captures the allocation being run.
|
|
|
|
alloc *structs.Allocation
|
2018-06-29 00:01:05 +00:00
|
|
|
allocLock sync.RWMutex
|
2018-06-22 00:35:07 +00:00
|
|
|
|
2018-09-28 00:30:10 +00:00
|
|
|
// state is the alloc runner's state
|
|
|
|
state *state.State
|
2018-07-19 00:06:44 +00:00
|
|
|
stateLock sync.RWMutex
|
2018-06-22 00:35:07 +00:00
|
|
|
|
2018-08-08 00:46:37 +00:00
|
|
|
stateDB cstate.StateDB
|
2018-07-11 04:21:12 +00:00
|
|
|
|
2018-06-22 00:35:07 +00:00
|
|
|
// allocDir is used to build the allocations directory structure.
|
|
|
|
allocDir *allocdir.AllocDir
|
|
|
|
|
|
|
|
// runnerHooks are alloc runner lifecycle hooks that should be run on state
|
2023-01-27 15:59:31 +00:00
|
|
|
// transitions.
|
2018-06-22 00:35:07 +00:00
|
|
|
runnerHooks []interfaces.RunnerHook
|
|
|
|
|
2020-02-11 16:39:16 +00:00
|
|
|
// hookState is the output of allocrunner hooks
|
|
|
|
hookState *cstructs.AllocHookResources
|
|
|
|
hookStateMu sync.RWMutex
|
|
|
|
|
2018-06-22 00:35:07 +00:00
|
|
|
// tasks are the set of task runners
|
2018-10-12 01:03:48 +00:00
|
|
|
tasks map[string]*taskrunner.TaskRunner
|
2018-06-28 00:27:03 +00:00
|
|
|
|
2018-11-15 15:13:14 +00:00
|
|
|
// deviceStatsReporter is used to lookup resource usage for alloc devices
|
|
|
|
deviceStatsReporter cinterfaces.DeviceStatsReporter
|
|
|
|
|
2018-08-23 19:03:17 +00:00
|
|
|
// allocBroadcaster sends client allocation updates to all listeners
|
|
|
|
allocBroadcaster *cstructs.AllocBroadcaster
|
|
|
|
|
2018-12-06 11:15:59 +00:00
|
|
|
// prevAllocWatcher allows waiting for any previous or preempted allocations
|
|
|
|
// to exit
|
2018-08-23 19:03:17 +00:00
|
|
|
prevAllocWatcher allocwatcher.PrevAllocWatcher
|
2018-10-06 01:42:15 +00:00
|
|
|
|
2018-12-06 11:15:59 +00:00
|
|
|
// prevAllocMigrator allows the migration of a previous allocations alloc dir.
|
|
|
|
prevAllocMigrator allocwatcher.PrevAllocMigrator
|
2018-12-05 18:18:04 +00:00
|
|
|
|
2019-10-22 13:20:26 +00:00
|
|
|
// dynamicRegistry contains all locally registered dynamic plugins (e.g csi
|
|
|
|
// plugins).
|
|
|
|
dynamicRegistry dynamicplugins.Registry
|
|
|
|
|
2020-01-08 12:47:07 +00:00
|
|
|
// csiManager is used to wait for CSI Volumes to be attached, and by the task
|
|
|
|
// runner to manage their mounting
|
|
|
|
csiManager csimanager.Manager
|
|
|
|
|
2021-04-08 05:04:47 +00:00
|
|
|
// cpusetManager is responsible for configuring task cgroups if supported by the platform
|
|
|
|
cpusetManager cgutil.CpusetManager
|
|
|
|
|
2018-11-16 23:29:59 +00:00
|
|
|
// devicemanager is used to mount devices as well as lookup device
|
|
|
|
// statistics
|
|
|
|
devicemanager devicemanager.Manager
|
2018-11-28 03:42:22 +00:00
|
|
|
|
|
|
|
// driverManager is responsible for dispensing driver plugins and registering
|
|
|
|
// event handlers
|
|
|
|
driverManager drivermanager.Manager
|
2019-05-10 15:51:06 +00:00
|
|
|
|
|
|
|
// serversContactedCh is passed to TaskRunners so they can detect when
|
|
|
|
// servers have been contacted for the first time in case of a failed
|
|
|
|
// restore.
|
|
|
|
serversContactedCh chan struct{}
|
2019-12-04 20:44:21 +00:00
|
|
|
|
2022-08-22 22:38:49 +00:00
|
|
|
// taskCoordinator is used to controlled when tasks are allowed to run
|
|
|
|
// depending on their lifecycle configuration.
|
|
|
|
taskCoordinator *tasklifecycle.Coordinator
|
2020-02-11 13:30:34 +00:00
|
|
|
|
2021-12-13 19:54:53 +00:00
|
|
|
shutdownDelayCtx context.Context
|
|
|
|
shutdownDelayCancelFn context.CancelFunc
|
|
|
|
|
2020-02-11 13:30:34 +00:00
|
|
|
// rpcClient is the RPC Client that should be used by the allocrunner and its
|
|
|
|
// hooks to communicate with Nomad Servers.
|
|
|
|
rpcClient RPCer
|
2022-03-21 09:29:57 +00:00
|
|
|
|
|
|
|
// serviceRegWrapper is the handler wrapper that is used by service hooks
|
|
|
|
// to perform service and check registration and deregistration.
|
|
|
|
serviceRegWrapper *wrapper.HandlerWrapper
|
2022-05-03 22:38:32 +00:00
|
|
|
|
2022-06-07 14:18:19 +00:00
|
|
|
// checkStore contains check status information
|
|
|
|
checkStore checkstore.Shim
|
|
|
|
|
2022-05-03 22:38:32 +00:00
|
|
|
// getter is an interface for retrieving artifacts.
|
|
|
|
getter cinterfaces.ArtifactGetter
|
2020-02-11 13:30:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// RPCer is the interface needed by hooks to make RPC calls.
|
|
|
|
type RPCer interface {
|
|
|
|
RPC(method string, args interface{}, reply interface{}) error
|
2018-06-22 00:35:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewAllocRunner returns a new allocation runner.
|
2018-07-13 00:56:52 +00:00
|
|
|
func NewAllocRunner(config *Config) (*allocRunner, error) {
|
|
|
|
alloc := config.Alloc
|
|
|
|
tg := alloc.Job.LookupTaskGroup(alloc.TaskGroup)
|
|
|
|
if tg == nil {
|
|
|
|
return nil, fmt.Errorf("failed to lookup task group %q", alloc.TaskGroup)
|
|
|
|
}
|
|
|
|
|
2018-06-22 00:35:07 +00:00
|
|
|
ar := &allocRunner{
|
2018-10-12 01:03:48 +00:00
|
|
|
id: alloc.ID,
|
|
|
|
alloc: alloc,
|
|
|
|
clientConfig: config.ClientConfig,
|
|
|
|
consulClient: config.Consul,
|
2020-09-04 17:50:11 +00:00
|
|
|
consulProxiesClient: config.ConsulProxies,
|
2019-11-27 21:41:45 +00:00
|
|
|
sidsClient: config.ConsulSI,
|
2018-10-12 01:03:48 +00:00
|
|
|
vaultClient: config.Vault,
|
|
|
|
tasks: make(map[string]*taskrunner.TaskRunner, len(tg.Tasks)),
|
|
|
|
waitCh: make(chan struct{}),
|
2018-12-14 15:02:47 +00:00
|
|
|
destroyCh: make(chan struct{}),
|
|
|
|
shutdownCh: make(chan struct{}),
|
2018-10-12 01:03:48 +00:00
|
|
|
state: &state.State{},
|
|
|
|
stateDB: config.StateDB,
|
|
|
|
stateUpdater: config.StateUpdater,
|
|
|
|
taskStateUpdatedCh: make(chan struct{}, 1),
|
|
|
|
taskStateUpdateHandlerCh: make(chan struct{}),
|
2018-12-17 12:27:54 +00:00
|
|
|
allocUpdatedCh: make(chan *structs.Allocation, 1),
|
2018-11-15 15:13:14 +00:00
|
|
|
deviceStatsReporter: config.DeviceStatsReporter,
|
2018-10-12 01:03:48 +00:00
|
|
|
prevAllocWatcher: config.PrevAllocWatcher,
|
2018-12-06 11:15:59 +00:00
|
|
|
prevAllocMigrator: config.PrevAllocMigrator,
|
2019-10-22 13:20:26 +00:00
|
|
|
dynamicRegistry: config.DynamicRegistry,
|
2020-01-08 12:47:07 +00:00
|
|
|
csiManager: config.CSIManager,
|
2021-04-08 05:04:47 +00:00
|
|
|
cpusetManager: config.CpusetManager,
|
2018-11-16 23:29:59 +00:00
|
|
|
devicemanager: config.DeviceManager,
|
2018-11-28 03:42:22 +00:00
|
|
|
driverManager: config.DriverManager,
|
2019-05-10 15:51:06 +00:00
|
|
|
serversContactedCh: config.ServersContactedCh,
|
2020-02-11 13:30:34 +00:00
|
|
|
rpcClient: config.RPCClient,
|
2022-03-21 09:29:57 +00:00
|
|
|
serviceRegWrapper: config.ServiceRegWrapper,
|
2022-06-07 14:18:19 +00:00
|
|
|
checkStore: config.CheckStore,
|
2022-05-03 22:38:32 +00:00
|
|
|
getter: config.Getter,
|
2018-06-22 00:35:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Create the logger based on the allocation ID
|
2018-08-30 21:33:50 +00:00
|
|
|
ar.logger = config.Logger.Named("alloc_runner").With("alloc_id", alloc.ID)
|
2018-06-22 00:35:07 +00:00
|
|
|
|
2018-11-17 01:29:25 +00:00
|
|
|
// Create alloc broadcaster
|
|
|
|
ar.allocBroadcaster = cstructs.NewAllocBroadcaster(ar.logger)
|
|
|
|
|
2018-08-29 22:05:03 +00:00
|
|
|
// Create alloc dir
|
2021-10-15 23:56:14 +00:00
|
|
|
ar.allocDir = allocdir.NewAllocDir(ar.logger, config.ClientConfig.AllocDir, alloc.ID)
|
2018-08-29 22:05:03 +00:00
|
|
|
|
2022-08-22 22:38:49 +00:00
|
|
|
ar.taskCoordinator = tasklifecycle.NewCoordinator(ar.logger, tg.Tasks, ar.waitCh)
|
2019-12-04 20:44:21 +00:00
|
|
|
|
2021-12-13 19:54:53 +00:00
|
|
|
shutdownDelayCtx, shutdownDelayCancel := context.WithCancel(context.Background())
|
|
|
|
ar.shutdownDelayCtx = shutdownDelayCtx
|
|
|
|
ar.shutdownDelayCancelFn = shutdownDelayCancel
|
|
|
|
|
2018-06-22 00:35:07 +00:00
|
|
|
// Initialize the runners hooks.
|
2019-06-14 03:05:57 +00:00
|
|
|
if err := ar.initRunnerHooks(config.ClientConfig); err != nil {
|
2019-05-08 17:45:20 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
2018-06-22 00:35:07 +00:00
|
|
|
|
2018-07-13 00:56:52 +00:00
|
|
|
// Create the TaskRunners
|
|
|
|
if err := ar.initTaskRunners(tg.Tasks); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return ar, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// initTaskRunners creates task runners but does *not* run them.
|
|
|
|
func (ar *allocRunner) initTaskRunners(tasks []*structs.Task) error {
|
|
|
|
for _, task := range tasks {
|
2020-09-04 17:50:11 +00:00
|
|
|
trConfig := &taskrunner.Config{
|
2022-08-22 22:38:49 +00:00
|
|
|
Alloc: ar.alloc,
|
|
|
|
ClientConfig: ar.clientConfig,
|
|
|
|
Task: task,
|
|
|
|
TaskDir: ar.allocDir.NewTaskDir(task.Name),
|
|
|
|
Logger: ar.logger,
|
|
|
|
StateDB: ar.stateDB,
|
|
|
|
StateUpdater: ar,
|
|
|
|
DynamicRegistry: ar.dynamicRegistry,
|
|
|
|
Consul: ar.consulClient,
|
|
|
|
ConsulProxies: ar.consulProxiesClient,
|
|
|
|
ConsulSI: ar.sidsClient,
|
|
|
|
Vault: ar.vaultClient,
|
|
|
|
DeviceStatsReporter: ar.deviceStatsReporter,
|
|
|
|
CSIManager: ar.csiManager,
|
|
|
|
DeviceManager: ar.devicemanager,
|
|
|
|
DriverManager: ar.driverManager,
|
|
|
|
ServersContactedCh: ar.serversContactedCh,
|
|
|
|
StartConditionMetCh: ar.taskCoordinator.StartConditionForTask(task),
|
|
|
|
ShutdownDelayCtx: ar.shutdownDelayCtx,
|
|
|
|
ServiceRegWrapper: ar.serviceRegWrapper,
|
|
|
|
Getter: ar.getter,
|
2021-04-14 14:17:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if ar.cpusetManager != nil {
|
|
|
|
trConfig.CpusetCgroupPathGetter = ar.cpusetManager.CgroupPathFor(ar.id, task.Name)
|
2018-07-13 00:56:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Create, but do not Run, the task runner
|
2020-09-04 17:50:11 +00:00
|
|
|
tr, err := taskrunner.NewTaskRunner(trConfig)
|
2018-07-13 00:56:52 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed creating runner for task %q: %v", task.Name, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
ar.tasks[task.Name] = tr
|
|
|
|
}
|
|
|
|
return nil
|
2018-06-22 00:35:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (ar *allocRunner) WaitCh() <-chan struct{} {
|
|
|
|
return ar.waitCh
|
|
|
|
}
|
|
|
|
|
2018-11-26 20:50:35 +00:00
|
|
|
// Run the AllocRunner. Starts tasks if the alloc is non-terminal and closes
|
|
|
|
// WaitCh when it exits. Should be started in a goroutine.
|
2018-06-22 00:35:07 +00:00
|
|
|
func (ar *allocRunner) Run() {
|
2018-11-14 18:29:07 +00:00
|
|
|
// Close the wait channel on return
|
|
|
|
defer close(ar.waitCh)
|
2018-10-16 22:17:36 +00:00
|
|
|
|
2018-11-14 18:29:07 +00:00
|
|
|
// Start the task state update handler
|
|
|
|
go ar.handleTaskStateUpdates()
|
2018-10-16 22:17:36 +00:00
|
|
|
|
2018-12-17 12:27:54 +00:00
|
|
|
// Start the alloc update handler
|
|
|
|
go ar.handleAllocUpdates()
|
|
|
|
|
2018-11-14 18:29:07 +00:00
|
|
|
// If task update chan has been closed, that means we've been shutdown.
|
|
|
|
select {
|
|
|
|
case <-ar.taskStateUpdateHandlerCh:
|
2018-11-01 00:41:37 +00:00
|
|
|
return
|
2018-11-14 18:29:07 +00:00
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
2019-06-29 08:56:40 +00:00
|
|
|
// When handling (potentially restored) terminal alloc, ensure tasks and post-run hooks are run
|
|
|
|
// to perform any cleanup that's necessary, potentially not done prior to earlier termination
|
|
|
|
|
2019-02-21 23:37:22 +00:00
|
|
|
// Run the prestart hooks if non-terminal
|
|
|
|
if ar.shouldRun() {
|
|
|
|
if err := ar.prerun(); err != nil {
|
|
|
|
ar.logger.Error("prerun failed", "error", err)
|
2019-06-29 08:56:40 +00:00
|
|
|
|
|
|
|
for _, tr := range ar.tasks {
|
2019-09-04 19:33:21 +00:00
|
|
|
tr.MarkFailedDead(fmt.Sprintf("failed to setup alloc: %v", err))
|
2019-06-29 08:56:40 +00:00
|
|
|
}
|
|
|
|
|
2019-02-21 23:37:22 +00:00
|
|
|
goto POST
|
|
|
|
}
|
2018-11-14 18:29:07 +00:00
|
|
|
}
|
|
|
|
|
2018-11-26 20:51:18 +00:00
|
|
|
// Run the runners (blocks until they exit)
|
|
|
|
ar.runTasks()
|
2018-11-14 18:29:07 +00:00
|
|
|
|
|
|
|
POST:
|
2019-09-19 15:42:17 +00:00
|
|
|
if ar.isShuttingDown() {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-11-14 18:29:07 +00:00
|
|
|
// Run the postrun hooks
|
|
|
|
if err := ar.postrun(); err != nil {
|
|
|
|
ar.logger.Error("postrun failed", "error", err)
|
2018-11-01 00:41:37 +00:00
|
|
|
}
|
|
|
|
|
2018-11-05 20:20:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// shouldRun returns true if the alloc is in a state that the alloc runner
|
|
|
|
// should run it.
|
|
|
|
func (ar *allocRunner) shouldRun() bool {
|
|
|
|
// Do not run allocs that are terminal
|
|
|
|
if ar.Alloc().TerminalStatus() {
|
|
|
|
ar.logger.Trace("alloc terminal; not running",
|
|
|
|
"desired_status", ar.Alloc().DesiredStatus,
|
|
|
|
"client_status", ar.Alloc().ClientStatus,
|
|
|
|
)
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2018-11-01 00:41:37 +00:00
|
|
|
// It's possible that the alloc local state was marked terminal before
|
|
|
|
// the server copy of the alloc (checked above) was marked as terminal,
|
|
|
|
// so check the local state as well.
|
|
|
|
switch clientStatus := ar.AllocState().ClientStatus; clientStatus {
|
|
|
|
case structs.AllocClientStatusComplete, structs.AllocClientStatusFailed, structs.AllocClientStatusLost:
|
|
|
|
ar.logger.Trace("alloc terminal; updating server and not running", "status", clientStatus)
|
2018-11-05 20:20:45 +00:00
|
|
|
return false
|
2018-11-01 00:41:37 +00:00
|
|
|
}
|
|
|
|
|
2018-11-05 20:20:45 +00:00
|
|
|
return true
|
2018-10-16 22:17:36 +00:00
|
|
|
}
|
|
|
|
|
2018-11-26 20:51:18 +00:00
|
|
|
// runTasks is used to run the task runners and block until they exit.
|
|
|
|
func (ar *allocRunner) runTasks() {
|
2022-08-22 22:38:49 +00:00
|
|
|
// Start and wait for all tasks.
|
2018-07-13 00:56:52 +00:00
|
|
|
for _, task := range ar.tasks {
|
|
|
|
go task.Run()
|
2018-06-22 00:35:07 +00:00
|
|
|
}
|
2018-11-26 20:51:18 +00:00
|
|
|
for _, task := range ar.tasks {
|
2022-08-22 22:38:49 +00:00
|
|
|
<-task.WaitCh()
|
2018-11-26 20:51:18 +00:00
|
|
|
}
|
2018-06-22 00:35:07 +00:00
|
|
|
}
|
2018-06-28 00:27:03 +00:00
|
|
|
|
2018-09-20 00:32:50 +00:00
|
|
|
// Alloc returns the current allocation being run by this runner as sent by the
|
|
|
|
// server. This view of the allocation does not have updated task states.
|
2018-06-29 00:01:05 +00:00
|
|
|
func (ar *allocRunner) Alloc() *structs.Allocation {
|
|
|
|
ar.allocLock.RLock()
|
|
|
|
defer ar.allocLock.RUnlock()
|
|
|
|
return ar.alloc
|
|
|
|
}
|
|
|
|
|
2018-08-01 18:03:52 +00:00
|
|
|
func (ar *allocRunner) setAlloc(updated *structs.Allocation) {
|
|
|
|
ar.allocLock.Lock()
|
|
|
|
ar.alloc = updated
|
|
|
|
ar.allocLock.Unlock()
|
|
|
|
}
|
|
|
|
|
2018-08-23 19:03:17 +00:00
|
|
|
// GetAllocDir returns the alloc dir which is safe for concurrent use.
|
|
|
|
func (ar *allocRunner) GetAllocDir() *allocdir.AllocDir {
|
|
|
|
return ar.allocDir
|
|
|
|
}
|
|
|
|
|
2018-07-13 00:56:52 +00:00
|
|
|
// Restore state from database. Must be called after NewAllocRunner but before
|
|
|
|
// Run.
|
|
|
|
func (ar *allocRunner) Restore() error {
|
2018-12-07 01:24:43 +00:00
|
|
|
// Retrieve deployment status to avoid reseting it across agent
|
|
|
|
// restarts. Once a deployment status is set Nomad no longer monitors
|
|
|
|
// alloc health, so we must persist deployment state across restarts.
|
|
|
|
ds, err := ar.stateDB.GetDeploymentStatus(ar.id)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-10-12 17:43:04 +00:00
|
|
|
ns, err := ar.stateDB.GetNetworkStatus(ar.id)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-12-07 01:24:43 +00:00
|
|
|
ar.stateLock.Lock()
|
|
|
|
ar.state.DeploymentStatus = ds
|
2020-10-12 17:43:04 +00:00
|
|
|
ar.state.NetworkStatus = ns
|
2018-12-07 01:24:43 +00:00
|
|
|
ar.stateLock.Unlock()
|
|
|
|
|
2020-01-15 21:28:58 +00:00
|
|
|
states := make(map[string]*structs.TaskState)
|
|
|
|
|
2018-08-08 00:46:37 +00:00
|
|
|
// Restore task runners
|
|
|
|
for _, tr := range ar.tasks {
|
|
|
|
if err := tr.Restore(); err != nil {
|
|
|
|
return err
|
2018-07-13 00:56:52 +00:00
|
|
|
}
|
2020-01-15 21:28:58 +00:00
|
|
|
states[tr.Task().Name] = tr.TaskState()
|
2018-08-08 00:46:37 +00:00
|
|
|
}
|
|
|
|
|
2022-08-22 22:38:49 +00:00
|
|
|
ar.taskCoordinator.Restore(states)
|
2020-01-15 21:28:58 +00:00
|
|
|
|
2018-08-08 00:46:37 +00:00
|
|
|
return nil
|
2018-06-29 00:01:05 +00:00
|
|
|
}
|
|
|
|
|
2018-12-07 01:24:43 +00:00
|
|
|
// persistDeploymentStatus stores AllocDeploymentStatus.
|
|
|
|
func (ar *allocRunner) persistDeploymentStatus(ds *structs.AllocDeploymentStatus) {
|
|
|
|
if err := ar.stateDB.PutDeploymentStatus(ar.id, ds); err != nil {
|
|
|
|
// While any persistence errors are very bad, the worst case
|
|
|
|
// scenario for failing to persist deployment status is that if
|
|
|
|
// the agent is restarted it will monitor the deployment status
|
|
|
|
// again. This could cause a deployment's status to change when
|
|
|
|
// that shouldn't happen. However, allowing that seems better
|
|
|
|
// than failing the entire allocation.
|
|
|
|
ar.logger.Error("error storing deployment status", "error", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-19 17:48:01 +00:00
|
|
|
// TaskStateUpdated is called by TaskRunner when a task's state has been
|
2018-10-12 01:03:48 +00:00
|
|
|
// updated. It does not process the update synchronously but instead notifies a
|
|
|
|
// goroutine the state has change. Since processing the state change may cause
|
|
|
|
// the task to be killed (thus change its state again) it cannot be done
|
|
|
|
// synchronously as it would cause a deadlock due to reentrancy.
|
|
|
|
//
|
|
|
|
// The goroutine is used to compute changes to the alloc's ClientStatus and to
|
|
|
|
// update the server with the new state.
|
2018-10-16 03:38:12 +00:00
|
|
|
func (ar *allocRunner) TaskStateUpdated() {
|
2018-10-12 01:03:48 +00:00
|
|
|
select {
|
|
|
|
case ar.taskStateUpdatedCh <- struct{}{}:
|
|
|
|
default:
|
|
|
|
// already pending updates
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// handleTaskStateUpdates must be run in goroutine as it monitors
|
2018-11-14 18:29:07 +00:00
|
|
|
// taskStateUpdatedCh for task state update notifications and processes task
|
2018-10-12 01:03:48 +00:00
|
|
|
// states.
|
|
|
|
//
|
|
|
|
// Processing task state updates must be done in a goroutine as it may have to
|
|
|
|
// kill tasks which causes further task state updates.
|
|
|
|
func (ar *allocRunner) handleTaskStateUpdates() {
|
|
|
|
defer close(ar.taskStateUpdateHandlerCh)
|
|
|
|
|
2020-06-29 19:07:48 +00:00
|
|
|
hasSidecars := hasSidecarTasks(ar.tasks)
|
|
|
|
|
2018-10-12 01:03:48 +00:00
|
|
|
for done := false; !done; {
|
|
|
|
select {
|
|
|
|
case <-ar.taskStateUpdatedCh:
|
|
|
|
case <-ar.waitCh:
|
2018-11-14 18:29:07 +00:00
|
|
|
// Run has exited, sync once more to ensure final
|
2018-10-12 01:03:48 +00:00
|
|
|
// states are collected.
|
|
|
|
done = true
|
2018-07-19 00:06:44 +00:00
|
|
|
}
|
|
|
|
|
2018-11-14 18:29:07 +00:00
|
|
|
ar.logger.Trace("handling task state update", "done", done)
|
|
|
|
|
2018-10-12 01:03:48 +00:00
|
|
|
// Set with the appropriate event if task runners should be
|
|
|
|
// killed.
|
|
|
|
var killEvent *structs.TaskEvent
|
|
|
|
|
|
|
|
// If task runners should be killed, this is set to the task
|
|
|
|
// name whose fault it is.
|
|
|
|
killTask := ""
|
|
|
|
|
|
|
|
// Task state has been updated; gather the state of the other tasks
|
|
|
|
trNum := len(ar.tasks)
|
|
|
|
liveRunners := make([]*taskrunner.TaskRunner, 0, trNum)
|
|
|
|
states := make(map[string]*structs.TaskState, trNum)
|
|
|
|
|
|
|
|
for name, tr := range ar.tasks {
|
2022-06-07 15:35:19 +00:00
|
|
|
taskState := tr.TaskState()
|
|
|
|
states[name] = taskState
|
2018-10-12 01:03:48 +00:00
|
|
|
|
2020-11-12 16:01:42 +00:00
|
|
|
if tr.IsPoststopTask() {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2018-10-12 01:03:48 +00:00
|
|
|
// Capture live task runners in case we need to kill them
|
2022-06-07 15:35:19 +00:00
|
|
|
if taskState.State != structs.TaskStateDead {
|
2018-10-12 01:03:48 +00:00
|
|
|
liveRunners = append(liveRunners, tr)
|
|
|
|
continue
|
2018-07-19 17:49:46 +00:00
|
|
|
}
|
2018-10-12 01:03:48 +00:00
|
|
|
|
|
|
|
// Task is dead, determine if other tasks should be killed
|
2022-06-07 15:35:19 +00:00
|
|
|
if taskState.Failed {
|
2018-10-12 01:03:48 +00:00
|
|
|
// Only set failed event if no event has been
|
|
|
|
// set yet to give dead leaders priority.
|
|
|
|
if killEvent == nil {
|
|
|
|
killTask = name
|
|
|
|
killEvent = structs.NewTaskEvent(structs.TaskSiblingFailed).
|
|
|
|
SetFailedSibling(name)
|
|
|
|
}
|
|
|
|
} else if tr.IsLeader() {
|
|
|
|
killEvent = structs.NewTaskEvent(structs.TaskLeaderDead)
|
2018-07-19 00:06:44 +00:00
|
|
|
}
|
2018-10-12 01:03:48 +00:00
|
|
|
}
|
|
|
|
|
2023-01-27 15:59:31 +00:00
|
|
|
// kill remaining live tasks
|
Task lifecycle restart (#14127)
* allocrunner: handle lifecycle when all tasks die
When all tasks die the Coordinator must transition to its terminal
state, coordinatorStatePoststop, to unblock poststop tasks. Since this
could happen at any time (for example, a prestart task dies), all states
must be able to transition to this terminal state.
* allocrunner: implement different alloc restarts
Add a new alloc restart mode where all tasks are restarted, even if they
have already exited. Also unifies the alloc restart logic to use the
implementation that restarts tasks concurrently and ignores
ErrTaskNotRunning errors since those are expected when restarting the
allocation.
* allocrunner: allow tasks to run again
Prevent the task runner Run() method from exiting to allow a dead task
to run again. When the task runner is signaled to restart, the function
will jump back to the MAIN loop and run it again.
The task runner determines if a task needs to run again based on two new
task events that were added to differentiate between a request to
restart a specific task, the tasks that are currently running, or all
tasks that have already run.
* api/cli: add support for all tasks alloc restart
Implement the new -all-tasks alloc restart CLI flag and its API
counterpar, AllTasks. The client endpoint calls the appropriate restart
method from the allocrunner depending on the restart parameters used.
* test: fix tasklifecycle Coordinator test
* allocrunner: kill taskrunners if all tasks are dead
When all non-poststop tasks are dead we need to kill the taskrunners so
we don't leak their goroutines, which are blocked in the alloc restart
loop. This also ensures the allocrunner exits on its own.
* taskrunner: fix tests that waited on WaitCh
Now that "dead" tasks may run again, the taskrunner Run() method will
not return when the task finishes running, so tests must wait for the
task state to be "dead" instead of using the WaitCh, since it won't be
closed until the taskrunner is killed.
* tests: add tests for all tasks alloc restart
* changelog: add entry for #14127
* taskrunner: fix restore logic.
The first implementation of the task runner restore process relied on
server data (`tr.Alloc().TerminalStatus()`) which may not be available
to the client at the time of restore.
It also had the incorrect code path. When restoring a dead task the
driver handle always needs to be clear cleanly using `clearDriverHandle`
otherwise, after exiting the MAIN loop, the task may be killed by
`tr.handleKill`.
The fix is to store the state of the Run() loop in the task runner local
client state: if the task runner ever exits this loop cleanly (not with
a shutdown) it will never be able to run again. So if the Run() loops
starts with this local state flag set, it must exit early.
This local state flag is also being checked on task restart requests. If
the task is "dead" and its Run() loop is not active it will never be
able to run again.
* address code review requests
* apply more code review changes
* taskrunner: add different Restart modes
Using the task event to differentiate between the allocrunner restart
methods proved to be confusing for developers to understand how it all
worked.
So instead of relying on the event type, this commit separated the logic
of restarting an taskRunner into two methods:
- `Restart` will retain the current behaviour and only will only restart
the task if it's currently running.
- `ForceRestart` is the new method where a `dead` task is allowed to
restart if its `Run()` method is still active. Callers will need to
restart the allocRunner taskCoordinator to make sure it will allow the
task to run again.
* minor fixes
2022-08-24 21:43:07 +00:00
|
|
|
if len(liveRunners) > 0 {
|
2023-01-27 15:59:31 +00:00
|
|
|
|
Task lifecycle restart (#14127)
* allocrunner: handle lifecycle when all tasks die
When all tasks die the Coordinator must transition to its terminal
state, coordinatorStatePoststop, to unblock poststop tasks. Since this
could happen at any time (for example, a prestart task dies), all states
must be able to transition to this terminal state.
* allocrunner: implement different alloc restarts
Add a new alloc restart mode where all tasks are restarted, even if they
have already exited. Also unifies the alloc restart logic to use the
implementation that restarts tasks concurrently and ignores
ErrTaskNotRunning errors since those are expected when restarting the
allocation.
* allocrunner: allow tasks to run again
Prevent the task runner Run() method from exiting to allow a dead task
to run again. When the task runner is signaled to restart, the function
will jump back to the MAIN loop and run it again.
The task runner determines if a task needs to run again based on two new
task events that were added to differentiate between a request to
restart a specific task, the tasks that are currently running, or all
tasks that have already run.
* api/cli: add support for all tasks alloc restart
Implement the new -all-tasks alloc restart CLI flag and its API
counterpar, AllTasks. The client endpoint calls the appropriate restart
method from the allocrunner depending on the restart parameters used.
* test: fix tasklifecycle Coordinator test
* allocrunner: kill taskrunners if all tasks are dead
When all non-poststop tasks are dead we need to kill the taskrunners so
we don't leak their goroutines, which are blocked in the alloc restart
loop. This also ensures the allocrunner exits on its own.
* taskrunner: fix tests that waited on WaitCh
Now that "dead" tasks may run again, the taskrunner Run() method will
not return when the task finishes running, so tests must wait for the
task state to be "dead" instead of using the WaitCh, since it won't be
closed until the taskrunner is killed.
* tests: add tests for all tasks alloc restart
* changelog: add entry for #14127
* taskrunner: fix restore logic.
The first implementation of the task runner restore process relied on
server data (`tr.Alloc().TerminalStatus()`) which may not be available
to the client at the time of restore.
It also had the incorrect code path. When restoring a dead task the
driver handle always needs to be clear cleanly using `clearDriverHandle`
otherwise, after exiting the MAIN loop, the task may be killed by
`tr.handleKill`.
The fix is to store the state of the Run() loop in the task runner local
client state: if the task runner ever exits this loop cleanly (not with
a shutdown) it will never be able to run again. So if the Run() loops
starts with this local state flag set, it must exit early.
This local state flag is also being checked on task restart requests. If
the task is "dead" and its Run() loop is not active it will never be
able to run again.
* address code review requests
* apply more code review changes
* taskrunner: add different Restart modes
Using the task event to differentiate between the allocrunner restart
methods proved to be confusing for developers to understand how it all
worked.
So instead of relying on the event type, this commit separated the logic
of restarting an taskRunner into two methods:
- `Restart` will retain the current behaviour and only will only restart
the task if it's currently running.
- `ForceRestart` is the new method where a `dead` task is allowed to
restart if its `Run()` method is still active. Callers will need to
restart the allocRunner taskCoordinator to make sure it will allow the
task to run again.
* minor fixes
2022-08-24 21:43:07 +00:00
|
|
|
// if all live runners are sidecars - kill alloc
|
|
|
|
onlySidecarsRemaining := hasSidecars && !hasNonSidecarTasks(liveRunners)
|
|
|
|
if killEvent == nil && onlySidecarsRemaining {
|
|
|
|
killEvent = structs.NewTaskEvent(structs.TaskMainDead)
|
2018-07-19 00:06:44 +00:00
|
|
|
}
|
2018-10-12 01:03:48 +00:00
|
|
|
|
Task lifecycle restart (#14127)
* allocrunner: handle lifecycle when all tasks die
When all tasks die the Coordinator must transition to its terminal
state, coordinatorStatePoststop, to unblock poststop tasks. Since this
could happen at any time (for example, a prestart task dies), all states
must be able to transition to this terminal state.
* allocrunner: implement different alloc restarts
Add a new alloc restart mode where all tasks are restarted, even if they
have already exited. Also unifies the alloc restart logic to use the
implementation that restarts tasks concurrently and ignores
ErrTaskNotRunning errors since those are expected when restarting the
allocation.
* allocrunner: allow tasks to run again
Prevent the task runner Run() method from exiting to allow a dead task
to run again. When the task runner is signaled to restart, the function
will jump back to the MAIN loop and run it again.
The task runner determines if a task needs to run again based on two new
task events that were added to differentiate between a request to
restart a specific task, the tasks that are currently running, or all
tasks that have already run.
* api/cli: add support for all tasks alloc restart
Implement the new -all-tasks alloc restart CLI flag and its API
counterpar, AllTasks. The client endpoint calls the appropriate restart
method from the allocrunner depending on the restart parameters used.
* test: fix tasklifecycle Coordinator test
* allocrunner: kill taskrunners if all tasks are dead
When all non-poststop tasks are dead we need to kill the taskrunners so
we don't leak their goroutines, which are blocked in the alloc restart
loop. This also ensures the allocrunner exits on its own.
* taskrunner: fix tests that waited on WaitCh
Now that "dead" tasks may run again, the taskrunner Run() method will
not return when the task finishes running, so tests must wait for the
task state to be "dead" instead of using the WaitCh, since it won't be
closed until the taskrunner is killed.
* tests: add tests for all tasks alloc restart
* changelog: add entry for #14127
* taskrunner: fix restore logic.
The first implementation of the task runner restore process relied on
server data (`tr.Alloc().TerminalStatus()`) which may not be available
to the client at the time of restore.
It also had the incorrect code path. When restoring a dead task the
driver handle always needs to be clear cleanly using `clearDriverHandle`
otherwise, after exiting the MAIN loop, the task may be killed by
`tr.handleKill`.
The fix is to store the state of the Run() loop in the task runner local
client state: if the task runner ever exits this loop cleanly (not with
a shutdown) it will never be able to run again. So if the Run() loops
starts with this local state flag set, it must exit early.
This local state flag is also being checked on task restart requests. If
the task is "dead" and its Run() loop is not active it will never be
able to run again.
* address code review requests
* apply more code review changes
* taskrunner: add different Restart modes
Using the task event to differentiate between the allocrunner restart
methods proved to be confusing for developers to understand how it all
worked.
So instead of relying on the event type, this commit separated the logic
of restarting an taskRunner into two methods:
- `Restart` will retain the current behaviour and only will only restart
the task if it's currently running.
- `ForceRestart` is the new method where a `dead` task is allowed to
restart if its `Run()` method is still active. Callers will need to
restart the allocRunner taskCoordinator to make sure it will allow the
task to run again.
* minor fixes
2022-08-24 21:43:07 +00:00
|
|
|
// If there's a kill event set and live runners, kill them
|
|
|
|
if killEvent != nil {
|
|
|
|
|
|
|
|
// Log kill reason
|
|
|
|
switch killEvent.Type {
|
|
|
|
case structs.TaskLeaderDead:
|
|
|
|
ar.logger.Debug("leader task dead, destroying all tasks", "leader_task", killTask)
|
|
|
|
case structs.TaskMainDead:
|
|
|
|
ar.logger.Debug("main tasks dead, destroying all sidecar tasks")
|
|
|
|
default:
|
|
|
|
ar.logger.Debug("task failure, destroying all tasks", "failed_task", killTask)
|
|
|
|
}
|
2019-01-04 23:19:57 +00:00
|
|
|
|
Task lifecycle restart (#14127)
* allocrunner: handle lifecycle when all tasks die
When all tasks die the Coordinator must transition to its terminal
state, coordinatorStatePoststop, to unblock poststop tasks. Since this
could happen at any time (for example, a prestart task dies), all states
must be able to transition to this terminal state.
* allocrunner: implement different alloc restarts
Add a new alloc restart mode where all tasks are restarted, even if they
have already exited. Also unifies the alloc restart logic to use the
implementation that restarts tasks concurrently and ignores
ErrTaskNotRunning errors since those are expected when restarting the
allocation.
* allocrunner: allow tasks to run again
Prevent the task runner Run() method from exiting to allow a dead task
to run again. When the task runner is signaled to restart, the function
will jump back to the MAIN loop and run it again.
The task runner determines if a task needs to run again based on two new
task events that were added to differentiate between a request to
restart a specific task, the tasks that are currently running, or all
tasks that have already run.
* api/cli: add support for all tasks alloc restart
Implement the new -all-tasks alloc restart CLI flag and its API
counterpar, AllTasks. The client endpoint calls the appropriate restart
method from the allocrunner depending on the restart parameters used.
* test: fix tasklifecycle Coordinator test
* allocrunner: kill taskrunners if all tasks are dead
When all non-poststop tasks are dead we need to kill the taskrunners so
we don't leak their goroutines, which are blocked in the alloc restart
loop. This also ensures the allocrunner exits on its own.
* taskrunner: fix tests that waited on WaitCh
Now that "dead" tasks may run again, the taskrunner Run() method will
not return when the task finishes running, so tests must wait for the
task state to be "dead" instead of using the WaitCh, since it won't be
closed until the taskrunner is killed.
* tests: add tests for all tasks alloc restart
* changelog: add entry for #14127
* taskrunner: fix restore logic.
The first implementation of the task runner restore process relied on
server data (`tr.Alloc().TerminalStatus()`) which may not be available
to the client at the time of restore.
It also had the incorrect code path. When restoring a dead task the
driver handle always needs to be clear cleanly using `clearDriverHandle`
otherwise, after exiting the MAIN loop, the task may be killed by
`tr.handleKill`.
The fix is to store the state of the Run() loop in the task runner local
client state: if the task runner ever exits this loop cleanly (not with
a shutdown) it will never be able to run again. So if the Run() loops
starts with this local state flag set, it must exit early.
This local state flag is also being checked on task restart requests. If
the task is "dead" and its Run() loop is not active it will never be
able to run again.
* address code review requests
* apply more code review changes
* taskrunner: add different Restart modes
Using the task event to differentiate between the allocrunner restart
methods proved to be confusing for developers to understand how it all
worked.
So instead of relying on the event type, this commit separated the logic
of restarting an taskRunner into two methods:
- `Restart` will retain the current behaviour and only will only restart
the task if it's currently running.
- `ForceRestart` is the new method where a `dead` task is allowed to
restart if its `Run()` method is still active. Callers will need to
restart the allocRunner taskCoordinator to make sure it will allow the
task to run again.
* minor fixes
2022-08-24 21:43:07 +00:00
|
|
|
// Emit kill event for live runners
|
|
|
|
for _, tr := range liveRunners {
|
|
|
|
tr.EmitEvent(killEvent)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Kill 'em all
|
|
|
|
states = ar.killTasks()
|
|
|
|
|
|
|
|
// Wait for TaskRunners to exit before continuing. This will
|
|
|
|
// prevent looping before TaskRunners have transitioned to
|
|
|
|
// Dead.
|
|
|
|
for _, tr := range liveRunners {
|
|
|
|
ar.logger.Info("waiting for task to exit", "task", tr.Task().Name)
|
|
|
|
select {
|
|
|
|
case <-tr.WaitCh():
|
|
|
|
case <-ar.waitCh:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
2023-01-27 15:59:31 +00:00
|
|
|
// there are no live runners left
|
|
|
|
|
|
|
|
// run AR pre-kill hooks if this alloc is done, but not if it's because
|
|
|
|
// the agent is shutting down.
|
|
|
|
if !ar.isShuttingDown() && done {
|
|
|
|
ar.preKillHooks()
|
|
|
|
}
|
|
|
|
|
Task lifecycle restart (#14127)
* allocrunner: handle lifecycle when all tasks die
When all tasks die the Coordinator must transition to its terminal
state, coordinatorStatePoststop, to unblock poststop tasks. Since this
could happen at any time (for example, a prestart task dies), all states
must be able to transition to this terminal state.
* allocrunner: implement different alloc restarts
Add a new alloc restart mode where all tasks are restarted, even if they
have already exited. Also unifies the alloc restart logic to use the
implementation that restarts tasks concurrently and ignores
ErrTaskNotRunning errors since those are expected when restarting the
allocation.
* allocrunner: allow tasks to run again
Prevent the task runner Run() method from exiting to allow a dead task
to run again. When the task runner is signaled to restart, the function
will jump back to the MAIN loop and run it again.
The task runner determines if a task needs to run again based on two new
task events that were added to differentiate between a request to
restart a specific task, the tasks that are currently running, or all
tasks that have already run.
* api/cli: add support for all tasks alloc restart
Implement the new -all-tasks alloc restart CLI flag and its API
counterpar, AllTasks. The client endpoint calls the appropriate restart
method from the allocrunner depending on the restart parameters used.
* test: fix tasklifecycle Coordinator test
* allocrunner: kill taskrunners if all tasks are dead
When all non-poststop tasks are dead we need to kill the taskrunners so
we don't leak their goroutines, which are blocked in the alloc restart
loop. This also ensures the allocrunner exits on its own.
* taskrunner: fix tests that waited on WaitCh
Now that "dead" tasks may run again, the taskrunner Run() method will
not return when the task finishes running, so tests must wait for the
task state to be "dead" instead of using the WaitCh, since it won't be
closed until the taskrunner is killed.
* tests: add tests for all tasks alloc restart
* changelog: add entry for #14127
* taskrunner: fix restore logic.
The first implementation of the task runner restore process relied on
server data (`tr.Alloc().TerminalStatus()`) which may not be available
to the client at the time of restore.
It also had the incorrect code path. When restoring a dead task the
driver handle always needs to be clear cleanly using `clearDriverHandle`
otherwise, after exiting the MAIN loop, the task may be killed by
`tr.handleKill`.
The fix is to store the state of the Run() loop in the task runner local
client state: if the task runner ever exits this loop cleanly (not with
a shutdown) it will never be able to run again. So if the Run() loops
starts with this local state flag set, it must exit early.
This local state flag is also being checked on task restart requests. If
the task is "dead" and its Run() loop is not active it will never be
able to run again.
* address code review requests
* apply more code review changes
* taskrunner: add different Restart modes
Using the task event to differentiate between the allocrunner restart
methods proved to be confusing for developers to understand how it all
worked.
So instead of relying on the event type, this commit separated the logic
of restarting an taskRunner into two methods:
- `Restart` will retain the current behaviour and only will only restart
the task if it's currently running.
- `ForceRestart` is the new method where a `dead` task is allowed to
restart if its `Run()` method is still active. Callers will need to
restart the allocRunner taskCoordinator to make sure it will allow the
task to run again.
* minor fixes
2022-08-24 21:43:07 +00:00
|
|
|
// If there are no live runners left kill all non-poststop task
|
|
|
|
// runners to unblock them from the alloc restart loop.
|
|
|
|
for _, tr := range ar.tasks {
|
|
|
|
if tr.IsPoststopTask() {
|
|
|
|
continue
|
|
|
|
}
|
2019-01-04 23:19:57 +00:00
|
|
|
|
|
|
|
select {
|
|
|
|
case <-tr.WaitCh():
|
|
|
|
case <-ar.waitCh:
|
Task lifecycle restart (#14127)
* allocrunner: handle lifecycle when all tasks die
When all tasks die the Coordinator must transition to its terminal
state, coordinatorStatePoststop, to unblock poststop tasks. Since this
could happen at any time (for example, a prestart task dies), all states
must be able to transition to this terminal state.
* allocrunner: implement different alloc restarts
Add a new alloc restart mode where all tasks are restarted, even if they
have already exited. Also unifies the alloc restart logic to use the
implementation that restarts tasks concurrently and ignores
ErrTaskNotRunning errors since those are expected when restarting the
allocation.
* allocrunner: allow tasks to run again
Prevent the task runner Run() method from exiting to allow a dead task
to run again. When the task runner is signaled to restart, the function
will jump back to the MAIN loop and run it again.
The task runner determines if a task needs to run again based on two new
task events that were added to differentiate between a request to
restart a specific task, the tasks that are currently running, or all
tasks that have already run.
* api/cli: add support for all tasks alloc restart
Implement the new -all-tasks alloc restart CLI flag and its API
counterpar, AllTasks. The client endpoint calls the appropriate restart
method from the allocrunner depending on the restart parameters used.
* test: fix tasklifecycle Coordinator test
* allocrunner: kill taskrunners if all tasks are dead
When all non-poststop tasks are dead we need to kill the taskrunners so
we don't leak their goroutines, which are blocked in the alloc restart
loop. This also ensures the allocrunner exits on its own.
* taskrunner: fix tests that waited on WaitCh
Now that "dead" tasks may run again, the taskrunner Run() method will
not return when the task finishes running, so tests must wait for the
task state to be "dead" instead of using the WaitCh, since it won't be
closed until the taskrunner is killed.
* tests: add tests for all tasks alloc restart
* changelog: add entry for #14127
* taskrunner: fix restore logic.
The first implementation of the task runner restore process relied on
server data (`tr.Alloc().TerminalStatus()`) which may not be available
to the client at the time of restore.
It also had the incorrect code path. When restoring a dead task the
driver handle always needs to be clear cleanly using `clearDriverHandle`
otherwise, after exiting the MAIN loop, the task may be killed by
`tr.handleKill`.
The fix is to store the state of the Run() loop in the task runner local
client state: if the task runner ever exits this loop cleanly (not with
a shutdown) it will never be able to run again. So if the Run() loops
starts with this local state flag set, it must exit early.
This local state flag is also being checked on task restart requests. If
the task is "dead" and its Run() loop is not active it will never be
able to run again.
* address code review requests
* apply more code review changes
* taskrunner: add different Restart modes
Using the task event to differentiate between the allocrunner restart
methods proved to be confusing for developers to understand how it all
worked.
So instead of relying on the event type, this commit separated the logic
of restarting an taskRunner into two methods:
- `Restart` will retain the current behaviour and only will only restart
the task if it's currently running.
- `ForceRestart` is the new method where a `dead` task is allowed to
restart if its `Run()` method is still active. Callers will need to
restart the allocRunner taskCoordinator to make sure it will allow the
task to run again.
* minor fixes
2022-08-24 21:43:07 +00:00
|
|
|
default:
|
|
|
|
// Kill task runner without setting an event because the
|
|
|
|
// task is already dead, it's just waiting in the alloc
|
|
|
|
// restart loop.
|
|
|
|
err := tr.Kill(context.TODO(), nil)
|
|
|
|
if err != nil {
|
|
|
|
ar.logger.Warn("failed to kill task", "task", tr.Task().Name, "error", err)
|
|
|
|
}
|
2019-01-04 23:19:57 +00:00
|
|
|
}
|
|
|
|
}
|
2018-07-19 00:06:44 +00:00
|
|
|
}
|
|
|
|
|
2022-08-22 22:38:49 +00:00
|
|
|
ar.taskCoordinator.TaskStateUpdated(states)
|
2019-12-04 20:44:21 +00:00
|
|
|
|
2018-10-12 01:03:48 +00:00
|
|
|
// Get the client allocation
|
|
|
|
calloc := ar.clientAlloc(states)
|
2018-07-19 00:06:44 +00:00
|
|
|
|
2018-10-12 01:03:48 +00:00
|
|
|
// Update the server
|
|
|
|
ar.stateUpdater.AllocStateUpdated(calloc)
|
2018-08-23 19:03:17 +00:00
|
|
|
|
2018-10-12 01:03:48 +00:00
|
|
|
// Broadcast client alloc to listeners
|
|
|
|
ar.allocBroadcaster.Send(calloc)
|
|
|
|
}
|
2018-07-18 20:45:55 +00:00
|
|
|
}
|
|
|
|
|
2022-08-22 22:38:49 +00:00
|
|
|
// hasNonSidecarTasks returns false if all the passed tasks are sidecar tasks
|
|
|
|
func hasNonSidecarTasks(tasks []*taskrunner.TaskRunner) bool {
|
|
|
|
for _, tr := range tasks {
|
|
|
|
if !tr.IsSidecarTask() {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// hasSidecarTasks returns true if any of the passed tasks are sidecar tasks
|
|
|
|
func hasSidecarTasks(tasks map[string]*taskrunner.TaskRunner) bool {
|
|
|
|
for _, tr := range tasks {
|
|
|
|
if tr.IsSidecarTask() {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2018-10-16 03:38:12 +00:00
|
|
|
// killTasks kills all task runners, leader (if there is one) first. Errors are
|
2018-11-05 23:11:10 +00:00
|
|
|
// logged except taskrunner.ErrTaskNotRunning which is ignored. Task states
|
|
|
|
// after Kill has been called are returned.
|
|
|
|
func (ar *allocRunner) killTasks() map[string]*structs.TaskState {
|
|
|
|
var mu sync.Mutex
|
|
|
|
states := make(map[string]*structs.TaskState, len(ar.tasks))
|
|
|
|
|
2019-11-18 16:16:25 +00:00
|
|
|
// run alloc prekill hooks
|
|
|
|
ar.preKillHooks()
|
|
|
|
|
2018-10-16 03:38:12 +00:00
|
|
|
// Kill leader first, synchronously
|
|
|
|
for name, tr := range ar.tasks {
|
|
|
|
if !tr.IsLeader() {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2019-07-09 14:37:09 +00:00
|
|
|
taskEvent := structs.NewTaskEvent(structs.TaskKilling)
|
2022-07-06 19:44:58 +00:00
|
|
|
taskEvent.SetKillTimeout(tr.Task().KillTimeout, ar.clientConfig.MaxKillTimeout)
|
2019-07-09 14:37:09 +00:00
|
|
|
err := tr.Kill(context.TODO(), taskEvent)
|
2018-10-16 22:17:36 +00:00
|
|
|
if err != nil && err != taskrunner.ErrTaskNotRunning {
|
2018-10-16 03:38:12 +00:00
|
|
|
ar.logger.Warn("error stopping leader task", "error", err, "task_name", name)
|
|
|
|
}
|
2018-11-05 23:11:10 +00:00
|
|
|
|
2022-06-07 15:35:19 +00:00
|
|
|
taskState := tr.TaskState()
|
|
|
|
states[name] = taskState
|
2018-10-16 03:38:12 +00:00
|
|
|
break
|
|
|
|
}
|
|
|
|
|
Task lifecycle restart (#14127)
* allocrunner: handle lifecycle when all tasks die
When all tasks die the Coordinator must transition to its terminal
state, coordinatorStatePoststop, to unblock poststop tasks. Since this
could happen at any time (for example, a prestart task dies), all states
must be able to transition to this terminal state.
* allocrunner: implement different alloc restarts
Add a new alloc restart mode where all tasks are restarted, even if they
have already exited. Also unifies the alloc restart logic to use the
implementation that restarts tasks concurrently and ignores
ErrTaskNotRunning errors since those are expected when restarting the
allocation.
* allocrunner: allow tasks to run again
Prevent the task runner Run() method from exiting to allow a dead task
to run again. When the task runner is signaled to restart, the function
will jump back to the MAIN loop and run it again.
The task runner determines if a task needs to run again based on two new
task events that were added to differentiate between a request to
restart a specific task, the tasks that are currently running, or all
tasks that have already run.
* api/cli: add support for all tasks alloc restart
Implement the new -all-tasks alloc restart CLI flag and its API
counterpar, AllTasks. The client endpoint calls the appropriate restart
method from the allocrunner depending on the restart parameters used.
* test: fix tasklifecycle Coordinator test
* allocrunner: kill taskrunners if all tasks are dead
When all non-poststop tasks are dead we need to kill the taskrunners so
we don't leak their goroutines, which are blocked in the alloc restart
loop. This also ensures the allocrunner exits on its own.
* taskrunner: fix tests that waited on WaitCh
Now that "dead" tasks may run again, the taskrunner Run() method will
not return when the task finishes running, so tests must wait for the
task state to be "dead" instead of using the WaitCh, since it won't be
closed until the taskrunner is killed.
* tests: add tests for all tasks alloc restart
* changelog: add entry for #14127
* taskrunner: fix restore logic.
The first implementation of the task runner restore process relied on
server data (`tr.Alloc().TerminalStatus()`) which may not be available
to the client at the time of restore.
It also had the incorrect code path. When restoring a dead task the
driver handle always needs to be clear cleanly using `clearDriverHandle`
otherwise, after exiting the MAIN loop, the task may be killed by
`tr.handleKill`.
The fix is to store the state of the Run() loop in the task runner local
client state: if the task runner ever exits this loop cleanly (not with
a shutdown) it will never be able to run again. So if the Run() loops
starts with this local state flag set, it must exit early.
This local state flag is also being checked on task restart requests. If
the task is "dead" and its Run() loop is not active it will never be
able to run again.
* address code review requests
* apply more code review changes
* taskrunner: add different Restart modes
Using the task event to differentiate between the allocrunner restart
methods proved to be confusing for developers to understand how it all
worked.
So instead of relying on the event type, this commit separated the logic
of restarting an taskRunner into two methods:
- `Restart` will retain the current behaviour and only will only restart
the task if it's currently running.
- `ForceRestart` is the new method where a `dead` task is allowed to
restart if its `Run()` method is still active. Callers will need to
restart the allocRunner taskCoordinator to make sure it will allow the
task to run again.
* minor fixes
2022-08-24 21:43:07 +00:00
|
|
|
// Kill the rest non-sidecar and non-poststop tasks concurrently
|
2018-10-16 03:38:12 +00:00
|
|
|
wg := sync.WaitGroup{}
|
|
|
|
for name, tr := range ar.tasks {
|
2022-06-07 15:35:19 +00:00
|
|
|
// Filter out poststop and sidecar tasks so that they stop after all the other tasks are killed
|
|
|
|
if tr.IsLeader() || tr.IsPoststopTask() || tr.IsSidecarTask() {
|
2018-10-16 03:38:12 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
wg.Add(1)
|
|
|
|
go func(name string, tr *taskrunner.TaskRunner) {
|
|
|
|
defer wg.Done()
|
2019-07-09 14:37:09 +00:00
|
|
|
taskEvent := structs.NewTaskEvent(structs.TaskKilling)
|
2022-07-06 19:44:58 +00:00
|
|
|
taskEvent.SetKillTimeout(tr.Task().KillTimeout, ar.clientConfig.MaxKillTimeout)
|
2019-07-09 14:37:09 +00:00
|
|
|
err := tr.Kill(context.TODO(), taskEvent)
|
2018-10-16 03:38:12 +00:00
|
|
|
if err != nil && err != taskrunner.ErrTaskNotRunning {
|
|
|
|
ar.logger.Warn("error stopping task", "error", err, "task_name", name)
|
|
|
|
}
|
2018-11-05 23:11:10 +00:00
|
|
|
|
2022-06-07 15:35:19 +00:00
|
|
|
taskState := tr.TaskState()
|
2018-11-05 23:11:10 +00:00
|
|
|
mu.Lock()
|
2022-06-07 15:35:19 +00:00
|
|
|
states[name] = taskState
|
|
|
|
mu.Unlock()
|
|
|
|
}(name, tr)
|
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
|
|
|
|
// Kill the sidecar tasks last.
|
|
|
|
for name, tr := range ar.tasks {
|
|
|
|
if !tr.IsSidecarTask() || tr.IsLeader() || tr.IsPoststopTask() {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
wg.Add(1)
|
|
|
|
go func(name string, tr *taskrunner.TaskRunner) {
|
|
|
|
defer wg.Done()
|
|
|
|
taskEvent := structs.NewTaskEvent(structs.TaskKilling)
|
2022-07-06 19:44:58 +00:00
|
|
|
taskEvent.SetKillTimeout(tr.Task().KillTimeout, ar.clientConfig.MaxKillTimeout)
|
2022-06-07 15:35:19 +00:00
|
|
|
err := tr.Kill(context.TODO(), taskEvent)
|
|
|
|
if err != nil && err != taskrunner.ErrTaskNotRunning {
|
|
|
|
ar.logger.Warn("error stopping sidecar task", "error", err, "task_name", name)
|
|
|
|
}
|
|
|
|
|
|
|
|
taskState := tr.TaskState()
|
|
|
|
mu.Lock()
|
|
|
|
states[name] = taskState
|
2018-11-05 23:11:10 +00:00
|
|
|
mu.Unlock()
|
2018-10-16 03:38:12 +00:00
|
|
|
}(name, tr)
|
|
|
|
}
|
|
|
|
wg.Wait()
|
2018-11-05 23:11:10 +00:00
|
|
|
|
|
|
|
return states
|
2018-10-16 03:38:12 +00:00
|
|
|
}
|
|
|
|
|
2018-07-19 00:06:44 +00:00
|
|
|
// clientAlloc takes in the task states and returns an Allocation populated
|
|
|
|
// with Client specific fields
|
|
|
|
func (ar *allocRunner) clientAlloc(taskStates map[string]*structs.TaskState) *structs.Allocation {
|
2018-10-12 01:03:48 +00:00
|
|
|
ar.stateLock.Lock()
|
|
|
|
defer ar.stateLock.Unlock()
|
2018-07-19 00:06:44 +00:00
|
|
|
|
2018-09-27 00:08:43 +00:00
|
|
|
// store task states for AllocState to expose
|
|
|
|
ar.state.TaskStates = taskStates
|
|
|
|
|
2018-07-19 00:06:44 +00:00
|
|
|
a := &structs.Allocation{
|
|
|
|
ID: ar.id,
|
|
|
|
TaskStates: taskStates,
|
|
|
|
}
|
|
|
|
|
2018-09-27 00:08:43 +00:00
|
|
|
if d := ar.state.DeploymentStatus; d != nil {
|
2018-07-19 00:06:44 +00:00
|
|
|
a.DeploymentStatus = d.Copy()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Compute the ClientStatus
|
2018-09-27 00:08:43 +00:00
|
|
|
if ar.state.ClientStatus != "" {
|
2018-07-19 00:06:44 +00:00
|
|
|
// The client status is being forced
|
2018-09-27 00:08:43 +00:00
|
|
|
a.ClientStatus, a.ClientDescription = ar.state.ClientStatus, ar.state.ClientDescription
|
2018-07-19 00:06:44 +00:00
|
|
|
} else {
|
|
|
|
a.ClientStatus, a.ClientDescription = getClientStatus(taskStates)
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the allocation is terminal, make sure all required fields are properly
|
|
|
|
// set.
|
|
|
|
if a.ClientTerminalStatus() {
|
|
|
|
alloc := ar.Alloc()
|
|
|
|
|
2019-05-03 15:01:30 +00:00
|
|
|
// If we are part of a deployment and the alloc has failed, mark the
|
2018-07-19 00:06:44 +00:00
|
|
|
// alloc as unhealthy. This guards against the watcher not be started.
|
2019-05-03 03:59:56 +00:00
|
|
|
// If the health status is already set then terminal allocations should not
|
2018-07-19 00:06:44 +00:00
|
|
|
if a.ClientStatus == structs.AllocClientStatusFailed &&
|
2019-05-03 15:00:17 +00:00
|
|
|
alloc.DeploymentID != "" && !a.DeploymentStatus.HasHealth() {
|
2018-07-19 00:06:44 +00:00
|
|
|
a.DeploymentStatus = &structs.AllocDeploymentStatus{
|
2022-08-17 16:26:34 +00:00
|
|
|
Healthy: pointer.Of(false),
|
2018-07-19 00:06:44 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make sure we have marked the finished at for every task. This is used
|
|
|
|
// to calculate the reschedule time for failed allocations.
|
|
|
|
now := time.Now()
|
2019-06-10 21:24:41 +00:00
|
|
|
for taskName := range ar.tasks {
|
|
|
|
ts, ok := a.TaskStates[taskName]
|
2018-07-19 00:06:44 +00:00
|
|
|
if !ok {
|
|
|
|
ts = &structs.TaskState{}
|
2019-06-10 21:24:41 +00:00
|
|
|
a.TaskStates[taskName] = ts
|
2018-07-19 00:06:44 +00:00
|
|
|
}
|
|
|
|
if ts.FinishedAt.IsZero() {
|
|
|
|
ts.FinishedAt = now
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-12 17:43:04 +00:00
|
|
|
// Set the NetworkStatus and default DNSConfig if one is not returned from the client
|
|
|
|
netStatus := ar.state.NetworkStatus
|
|
|
|
if netStatus != nil {
|
|
|
|
a.NetworkStatus = netStatus
|
|
|
|
} else {
|
|
|
|
a.NetworkStatus = new(structs.AllocNetworkStatus)
|
|
|
|
}
|
|
|
|
|
|
|
|
if a.NetworkStatus.DNS == nil {
|
|
|
|
alloc := ar.Alloc()
|
|
|
|
nws := alloc.Job.LookupTaskGroup(alloc.TaskGroup).Networks
|
|
|
|
if len(nws) > 0 {
|
|
|
|
a.NetworkStatus.DNS = nws[0].DNS.Copy()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-19 00:06:44 +00:00
|
|
|
return a
|
|
|
|
}
|
|
|
|
|
|
|
|
// getClientStatus takes in the task states for a given allocation and computes
|
|
|
|
// the client status and description
|
|
|
|
func getClientStatus(taskStates map[string]*structs.TaskState) (status, description string) {
|
|
|
|
var pending, running, dead, failed bool
|
|
|
|
for _, state := range taskStates {
|
|
|
|
switch state.State {
|
|
|
|
case structs.TaskStateRunning:
|
|
|
|
running = true
|
|
|
|
case structs.TaskStatePending:
|
|
|
|
pending = true
|
|
|
|
case structs.TaskStateDead:
|
|
|
|
if state.Failed {
|
|
|
|
failed = true
|
|
|
|
} else {
|
|
|
|
dead = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Determine the alloc status
|
|
|
|
if failed {
|
|
|
|
return structs.AllocClientStatusFailed, "Failed tasks"
|
|
|
|
} else if running {
|
|
|
|
return structs.AllocClientStatusRunning, "Tasks are running"
|
|
|
|
} else if pending {
|
|
|
|
return structs.AllocClientStatusPending, "No tasks have started"
|
|
|
|
} else if dead {
|
|
|
|
return structs.AllocClientStatusComplete, "All tasks have completed"
|
|
|
|
}
|
|
|
|
|
|
|
|
return "", ""
|
|
|
|
}
|
|
|
|
|
2019-01-09 16:16:33 +00:00
|
|
|
// SetClientStatus is a helper for forcing a specific client
|
|
|
|
// status on the alloc runner. This is used during restore errors
|
|
|
|
// when the task state can't be restored.
|
|
|
|
func (ar *allocRunner) SetClientStatus(clientStatus string) {
|
|
|
|
ar.stateLock.Lock()
|
|
|
|
defer ar.stateLock.Unlock()
|
|
|
|
ar.state.ClientStatus = clientStatus
|
|
|
|
}
|
|
|
|
|
2020-10-12 17:43:04 +00:00
|
|
|
func (ar *allocRunner) SetNetworkStatus(s *structs.AllocNetworkStatus) {
|
|
|
|
ar.stateLock.Lock()
|
|
|
|
defer ar.stateLock.Unlock()
|
|
|
|
ar.state.NetworkStatus = s.Copy()
|
|
|
|
}
|
|
|
|
|
2020-10-15 19:32:21 +00:00
|
|
|
func (ar *allocRunner) NetworkStatus() *structs.AllocNetworkStatus {
|
|
|
|
ar.stateLock.Lock()
|
|
|
|
defer ar.stateLock.Unlock()
|
|
|
|
return ar.state.NetworkStatus.Copy()
|
|
|
|
}
|
|
|
|
|
2022-04-06 13:33:32 +00:00
|
|
|
// setIndexes is a helper for forcing alloc state on the alloc runner. This is
|
|
|
|
// used during reconnect when the task has been marked unknown by the server.
|
2022-03-02 10:47:26 +00:00
|
|
|
func (ar *allocRunner) setIndexes(update *structs.Allocation) {
|
|
|
|
ar.allocLock.Lock()
|
|
|
|
defer ar.allocLock.Unlock()
|
|
|
|
ar.alloc.AllocModifyIndex = update.AllocModifyIndex
|
|
|
|
ar.alloc.ModifyIndex = update.ModifyIndex
|
|
|
|
ar.alloc.ModifyTime = update.ModifyTime
|
|
|
|
}
|
|
|
|
|
2018-09-27 00:08:43 +00:00
|
|
|
// AllocState returns a copy of allocation state including a snapshot of task
|
|
|
|
// states.
|
|
|
|
func (ar *allocRunner) AllocState() *state.State {
|
2018-09-28 00:30:10 +00:00
|
|
|
ar.stateLock.RLock()
|
|
|
|
state := ar.state.Copy()
|
|
|
|
ar.stateLock.RUnlock()
|
2018-09-27 00:08:43 +00:00
|
|
|
|
|
|
|
// If TaskStateUpdated has not been called yet, ar.state.TaskStates
|
|
|
|
// won't be set as it is not the canonical source of TaskStates.
|
2018-09-28 00:30:10 +00:00
|
|
|
if len(state.TaskStates) == 0 {
|
2018-09-27 00:08:43 +00:00
|
|
|
ar.state.TaskStates = make(map[string]*structs.TaskState, len(ar.tasks))
|
|
|
|
for k, tr := range ar.tasks {
|
2018-09-28 00:30:10 +00:00
|
|
|
state.TaskStates[k] = tr.TaskState()
|
2018-09-27 00:08:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-01 00:41:37 +00:00
|
|
|
// Generate alloc to get other state fields
|
|
|
|
alloc := ar.clientAlloc(state.TaskStates)
|
|
|
|
state.ClientStatus = alloc.ClientStatus
|
|
|
|
state.ClientDescription = alloc.ClientDescription
|
|
|
|
state.DeploymentStatus = alloc.DeploymentStatus
|
|
|
|
|
2018-09-28 00:30:10 +00:00
|
|
|
return state
|
2018-09-27 00:08:43 +00:00
|
|
|
}
|
|
|
|
|
2018-12-17 12:27:54 +00:00
|
|
|
// Update asyncronously updates the running allocation with a new version
|
|
|
|
// received from the server.
|
|
|
|
// When processing a new update, we will first attempt to drain stale updates
|
|
|
|
// from the queue, before appending the new one.
|
|
|
|
func (ar *allocRunner) Update(update *structs.Allocation) {
|
|
|
|
select {
|
|
|
|
// Drain queued update from the channel if possible, and check the modify
|
|
|
|
// index
|
|
|
|
case oldUpdate := <-ar.allocUpdatedCh:
|
|
|
|
// If the old update is newer than the replacement, then skip the new one
|
|
|
|
// and return. This case shouldn't happen, but may in the case of a bug
|
|
|
|
// elsewhere inside the system.
|
|
|
|
if oldUpdate.AllocModifyIndex > update.AllocModifyIndex {
|
2019-01-03 13:04:31 +00:00
|
|
|
ar.logger.Debug("Discarding allocation update due to newer alloc revision in queue",
|
2018-12-17 14:55:36 +00:00
|
|
|
"old_modify_index", oldUpdate.AllocModifyIndex,
|
|
|
|
"new_modify_index", update.AllocModifyIndex)
|
2018-12-17 12:27:54 +00:00
|
|
|
ar.allocUpdatedCh <- oldUpdate
|
|
|
|
return
|
2018-12-18 22:36:29 +00:00
|
|
|
} else {
|
2019-01-03 13:04:31 +00:00
|
|
|
ar.logger.Debug("Discarding allocation update",
|
2018-12-18 22:36:29 +00:00
|
|
|
"skipped_modify_index", oldUpdate.AllocModifyIndex,
|
|
|
|
"new_modify_index", update.AllocModifyIndex)
|
2018-12-17 12:27:54 +00:00
|
|
|
}
|
2018-12-18 22:36:29 +00:00
|
|
|
case <-ar.waitCh:
|
|
|
|
ar.logger.Trace("AllocRunner has terminated, skipping alloc update",
|
|
|
|
"modify_index", update.AllocModifyIndex)
|
|
|
|
return
|
2018-12-17 12:27:54 +00:00
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
2021-12-13 19:54:53 +00:00
|
|
|
if update.DesiredTransition.ShouldIgnoreShutdownDelay() {
|
|
|
|
ar.shutdownDelayCancelFn()
|
|
|
|
}
|
|
|
|
|
2018-12-17 12:27:54 +00:00
|
|
|
// Queue the new update
|
|
|
|
ar.allocUpdatedCh <- update
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ar *allocRunner) handleAllocUpdates() {
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case update := <-ar.allocUpdatedCh:
|
|
|
|
ar.handleAllocUpdate(update)
|
|
|
|
case <-ar.waitCh:
|
2018-12-19 17:32:51 +00:00
|
|
|
return
|
2018-12-17 12:27:54 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-01 18:03:52 +00:00
|
|
|
// This method sends the updated alloc to Run for serially processing updates.
|
|
|
|
// If there is already a pending update it will be discarded and replaced by
|
|
|
|
// the latest update.
|
2018-12-17 12:27:54 +00:00
|
|
|
func (ar *allocRunner) handleAllocUpdate(update *structs.Allocation) {
|
2018-10-12 01:03:48 +00:00
|
|
|
// Detect Stop updates
|
|
|
|
stopping := !ar.Alloc().TerminalStatus() && update.TerminalStatus()
|
|
|
|
|
2018-08-17 17:34:44 +00:00
|
|
|
// Update ar.alloc
|
|
|
|
ar.setAlloc(update)
|
|
|
|
|
2018-10-16 03:38:12 +00:00
|
|
|
// Run update hooks if not stopping or dead
|
|
|
|
if !update.TerminalStatus() {
|
|
|
|
if err := ar.update(update); err != nil {
|
|
|
|
ar.logger.Error("error running update hooks", "error", err)
|
2018-10-12 01:03:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2018-08-17 17:34:44 +00:00
|
|
|
// Update task runners
|
|
|
|
for _, tr := range ar.tasks {
|
|
|
|
tr.Update(update)
|
2018-08-01 18:03:52 +00:00
|
|
|
}
|
2018-10-16 03:38:12 +00:00
|
|
|
|
|
|
|
// If alloc is being terminated, kill all tasks, leader first
|
|
|
|
if stopping {
|
|
|
|
ar.killTasks()
|
|
|
|
}
|
|
|
|
|
2018-06-28 00:27:03 +00:00
|
|
|
}
|
2018-06-29 00:01:05 +00:00
|
|
|
|
2018-08-23 19:03:17 +00:00
|
|
|
func (ar *allocRunner) Listener() *cstructs.AllocListener {
|
|
|
|
return ar.allocBroadcaster.Listen()
|
|
|
|
}
|
|
|
|
|
2018-12-14 15:02:47 +00:00
|
|
|
func (ar *allocRunner) destroyImpl() {
|
2018-11-05 23:11:10 +00:00
|
|
|
// Stop any running tasks and persist states in case the client is
|
|
|
|
// shutdown before Destroy finishes.
|
|
|
|
states := ar.killTasks()
|
|
|
|
calloc := ar.clientAlloc(states)
|
|
|
|
ar.stateUpdater.AllocStateUpdated(calloc)
|
2018-07-17 20:57:57 +00:00
|
|
|
|
2018-11-14 18:29:07 +00:00
|
|
|
// Wait for tasks to exit and postrun hooks to finish
|
|
|
|
<-ar.waitCh
|
2018-08-23 19:03:17 +00:00
|
|
|
|
|
|
|
// Run destroy hooks
|
|
|
|
if err := ar.destroy(); err != nil {
|
|
|
|
ar.logger.Warn("error running destroy hooks", "error", err)
|
2018-07-17 20:57:57 +00:00
|
|
|
}
|
2018-08-23 19:03:17 +00:00
|
|
|
|
2018-10-12 01:03:48 +00:00
|
|
|
// Wait for task state update handler to exit before removing local
|
2018-10-16 22:17:36 +00:00
|
|
|
// state if Run() ran at all.
|
2018-11-14 18:29:07 +00:00
|
|
|
<-ar.taskStateUpdateHandlerCh
|
2018-10-12 01:03:48 +00:00
|
|
|
|
2019-08-26 17:45:58 +00:00
|
|
|
// Mark alloc as destroyed
|
|
|
|
ar.destroyedLock.Lock()
|
|
|
|
|
|
|
|
// Cleanup state db; while holding the lock to avoid
|
|
|
|
// a race periodic PersistState that may resurrect the alloc
|
2018-08-23 19:03:17 +00:00
|
|
|
if err := ar.stateDB.DeleteAllocationBucket(ar.id); err != nil {
|
|
|
|
ar.logger.Warn("failed to delete allocation state", "error", err)
|
|
|
|
}
|
|
|
|
|
2018-12-14 15:02:47 +00:00
|
|
|
if !ar.shutdown {
|
|
|
|
ar.shutdown = true
|
|
|
|
close(ar.shutdownCh)
|
|
|
|
}
|
|
|
|
|
2018-08-23 19:03:17 +00:00
|
|
|
ar.destroyed = true
|
2018-12-14 15:02:47 +00:00
|
|
|
close(ar.destroyCh)
|
|
|
|
|
|
|
|
ar.destroyedLock.Unlock()
|
|
|
|
}
|
|
|
|
|
2019-08-25 15:03:49 +00:00
|
|
|
func (ar *allocRunner) PersistState() error {
|
2019-08-26 17:45:58 +00:00
|
|
|
ar.destroyedLock.Lock()
|
|
|
|
defer ar.destroyedLock.Unlock()
|
|
|
|
|
|
|
|
if ar.destroyed {
|
2020-10-20 20:15:37 +00:00
|
|
|
err := ar.stateDB.DeleteAllocationBucket(ar.id, cstate.WithBatchMode())
|
2019-08-25 15:03:49 +00:00
|
|
|
if err != nil {
|
|
|
|
ar.logger.Warn("failed to delete allocation bucket", "error", err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-10-12 17:43:04 +00:00
|
|
|
// persist network status, wrapping in a func to release state lock as early as possible
|
2020-10-20 20:15:37 +00:00
|
|
|
err := func() error {
|
2020-10-12 17:43:04 +00:00
|
|
|
ar.stateLock.Lock()
|
|
|
|
defer ar.stateLock.Unlock()
|
|
|
|
if ar.state.NetworkStatus != nil {
|
2020-10-20 20:15:37 +00:00
|
|
|
err := ar.stateDB.PutNetworkStatus(ar.id, ar.state.NetworkStatus, cstate.WithBatchMode())
|
|
|
|
if err != nil {
|
2020-10-12 17:43:04 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
2020-10-20 20:15:37 +00:00
|
|
|
}()
|
|
|
|
if err != nil {
|
2020-10-12 17:43:04 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-08-25 15:03:49 +00:00
|
|
|
// TODO: consider persisting deployment state along with task status.
|
|
|
|
// While we study why only the alloc is persisted, I opted to maintain current
|
|
|
|
// behavior and not risk adding yet more IO calls unnecessarily.
|
2020-10-20 20:15:37 +00:00
|
|
|
return ar.stateDB.PutAllocation(ar.Alloc(), cstate.WithBatchMode())
|
2019-08-25 15:03:49 +00:00
|
|
|
}
|
|
|
|
|
2018-12-14 15:02:47 +00:00
|
|
|
// Destroy the alloc runner by stopping it if it is still running and cleaning
|
|
|
|
// up all of its resources.
|
|
|
|
//
|
|
|
|
// This method is safe for calling concurrently with Run() and will cause it to
|
|
|
|
// exit (thus closing WaitCh).
|
2018-12-18 22:28:48 +00:00
|
|
|
// When the destroy action is completed, it will close DestroyCh().
|
2018-12-14 15:02:47 +00:00
|
|
|
func (ar *allocRunner) Destroy() {
|
|
|
|
ar.destroyedLock.Lock()
|
|
|
|
defer ar.destroyedLock.Unlock()
|
|
|
|
|
|
|
|
if ar.destroyed {
|
|
|
|
// Only destroy once
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if ar.destroyLaunched {
|
|
|
|
// Only dispatch a destroy once
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
ar.destroyLaunched = true
|
|
|
|
|
|
|
|
// Synchronize calls to shutdown/destroy
|
|
|
|
if ar.shutdownLaunched {
|
|
|
|
go func() {
|
|
|
|
ar.logger.Debug("Waiting for shutdown before destroying runner")
|
|
|
|
<-ar.shutdownCh
|
|
|
|
ar.destroyImpl()
|
|
|
|
}()
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
go ar.destroyImpl()
|
2018-06-29 00:01:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// IsDestroyed returns true if the alloc runner has been destroyed (stopped and
|
|
|
|
// garbage collected).
|
|
|
|
//
|
|
|
|
// This method is safe for calling concurrently with Run(). Callers must
|
|
|
|
// receive on WaitCh() to block until alloc runner has stopped and been
|
|
|
|
// destroyed.
|
|
|
|
func (ar *allocRunner) IsDestroyed() bool {
|
2018-08-23 19:03:17 +00:00
|
|
|
ar.destroyedLock.Lock()
|
|
|
|
defer ar.destroyedLock.Unlock()
|
|
|
|
return ar.destroyed
|
2018-06-29 00:01:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// IsWaiting returns true if the alloc runner is waiting for its previous
|
|
|
|
// allocation to terminate.
|
|
|
|
//
|
|
|
|
// This method is safe for calling concurrently with Run().
|
|
|
|
func (ar *allocRunner) IsWaiting() bool {
|
2018-12-06 11:15:59 +00:00
|
|
|
return ar.prevAllocWatcher.IsWaiting()
|
2018-06-29 00:01:05 +00:00
|
|
|
}
|
|
|
|
|
2019-09-19 15:42:17 +00:00
|
|
|
// isShuttingDown returns true if the alloc runner is in a shutdown state
|
|
|
|
// due to a call to Shutdown() or Destroy()
|
|
|
|
func (ar *allocRunner) isShuttingDown() bool {
|
|
|
|
ar.destroyedLock.Lock()
|
|
|
|
defer ar.destroyedLock.Unlock()
|
|
|
|
return ar.shutdownLaunched
|
|
|
|
}
|
|
|
|
|
2018-12-18 22:28:48 +00:00
|
|
|
// DestroyCh is a channel that is closed when an allocrunner is closed due to
|
|
|
|
// an explicit call to Destroy().
|
2018-12-14 15:02:47 +00:00
|
|
|
func (ar *allocRunner) DestroyCh() <-chan struct{} {
|
|
|
|
return ar.destroyCh
|
|
|
|
}
|
|
|
|
|
2018-12-18 22:28:48 +00:00
|
|
|
// ShutdownCh is a channel that is closed when an allocrunner is closed due to
|
|
|
|
// either an explicit call to Shutdown(), or Destroy().
|
2018-12-14 15:02:47 +00:00
|
|
|
func (ar *allocRunner) ShutdownCh() <-chan struct{} {
|
|
|
|
return ar.shutdownCh
|
|
|
|
}
|
|
|
|
|
2018-12-18 22:28:48 +00:00
|
|
|
// Shutdown AllocRunner gracefully. Asynchronously shuts down all TaskRunners.
|
2018-11-14 18:29:07 +00:00
|
|
|
// Tasks are unaffected and may be restored.
|
2018-12-18 22:28:48 +00:00
|
|
|
// When the destroy action is completed, it will close ShutdownCh().
|
2018-11-14 18:29:07 +00:00
|
|
|
func (ar *allocRunner) Shutdown() {
|
|
|
|
ar.destroyedLock.Lock()
|
|
|
|
defer ar.destroyedLock.Unlock()
|
|
|
|
|
|
|
|
// Destroy is a superset of Shutdown so there's nothing to do if this
|
|
|
|
// has already been destroyed.
|
|
|
|
if ar.destroyed {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-12-14 15:02:47 +00:00
|
|
|
// Destroy is a superset of Shutdown so if it's been marked for destruction,
|
|
|
|
// don't try and shutdown in parallel. If shutdown has been launched, don't
|
|
|
|
// try again.
|
|
|
|
if ar.destroyLaunched || ar.shutdownLaunched {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
ar.shutdownLaunched = true
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
ar.logger.Trace("shutting down")
|
2018-11-14 18:29:07 +00:00
|
|
|
|
2018-12-14 15:02:47 +00:00
|
|
|
// Shutdown tasks gracefully if they were run
|
2019-02-21 23:37:22 +00:00
|
|
|
wg := sync.WaitGroup{}
|
|
|
|
for _, tr := range ar.tasks {
|
|
|
|
wg.Add(1)
|
|
|
|
go func(tr *taskrunner.TaskRunner) {
|
|
|
|
tr.Shutdown()
|
|
|
|
wg.Done()
|
|
|
|
}(tr)
|
2018-11-14 18:29:07 +00:00
|
|
|
}
|
2019-02-21 23:37:22 +00:00
|
|
|
wg.Wait()
|
2018-11-14 18:29:07 +00:00
|
|
|
|
2018-12-14 15:02:47 +00:00
|
|
|
// Wait for Run to exit
|
|
|
|
<-ar.waitCh
|
2018-11-14 18:29:07 +00:00
|
|
|
|
2018-12-14 15:02:47 +00:00
|
|
|
// Run shutdown hooks
|
|
|
|
ar.shutdownHooks()
|
2018-11-14 18:29:07 +00:00
|
|
|
|
2018-12-14 15:02:47 +00:00
|
|
|
// Wait for updater to finish its final run
|
|
|
|
<-ar.taskStateUpdateHandlerCh
|
|
|
|
|
|
|
|
ar.destroyedLock.Lock()
|
|
|
|
ar.shutdown = true
|
|
|
|
close(ar.shutdownCh)
|
|
|
|
ar.destroyedLock.Unlock()
|
|
|
|
}()
|
2018-11-14 18:29:07 +00:00
|
|
|
}
|
|
|
|
|
2018-06-29 00:01:05 +00:00
|
|
|
// IsMigrating returns true if the alloc runner is migrating data from its
|
|
|
|
// previous allocation.
|
|
|
|
//
|
|
|
|
// This method is safe for calling concurrently with Run().
|
|
|
|
func (ar *allocRunner) IsMigrating() bool {
|
2018-12-06 11:15:59 +00:00
|
|
|
return ar.prevAllocMigrator.IsMigrating()
|
2018-06-29 00:01:05 +00:00
|
|
|
}
|
|
|
|
|
2018-10-04 22:45:46 +00:00
|
|
|
func (ar *allocRunner) StatsReporter() interfaces.AllocStatsReporter {
|
2018-09-15 00:08:26 +00:00
|
|
|
return ar
|
2018-06-29 00:01:05 +00:00
|
|
|
}
|
|
|
|
|
2018-09-15 00:08:26 +00:00
|
|
|
// LatestAllocStats returns the latest stats for an allocation. If taskFilter
|
|
|
|
// is set, only stats for that task -- if it exists -- are returned.
|
|
|
|
func (ar *allocRunner) LatestAllocStats(taskFilter string) (*cstructs.AllocResourceUsage, error) {
|
|
|
|
astat := &cstructs.AllocResourceUsage{
|
|
|
|
Tasks: make(map[string]*cstructs.TaskResourceUsage, len(ar.tasks)),
|
|
|
|
ResourceUsage: &cstructs.ResourceUsage{
|
|
|
|
MemoryStats: &cstructs.MemoryStats{},
|
|
|
|
CpuStats: &cstructs.CpuStats{},
|
2018-11-15 15:13:14 +00:00
|
|
|
DeviceStats: []*device.DeviceGroupStats{},
|
2018-09-15 00:08:26 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for name, tr := range ar.tasks {
|
|
|
|
if taskFilter != "" && taskFilter != name {
|
|
|
|
// Getting stats for a particular task and its not this one!
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if usage := tr.LatestResourceUsage(); usage != nil {
|
|
|
|
astat.Tasks[name] = usage
|
|
|
|
astat.ResourceUsage.Add(usage.ResourceUsage)
|
|
|
|
if usage.Timestamp > astat.Timestamp {
|
|
|
|
astat.Timestamp = usage.Timestamp
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-06-29 00:01:05 +00:00
|
|
|
|
2018-09-15 00:08:26 +00:00
|
|
|
return astat, nil
|
2018-06-29 00:01:05 +00:00
|
|
|
}
|
2018-12-18 03:36:06 +00:00
|
|
|
|
|
|
|
func (ar *allocRunner) GetTaskEventHandler(taskName string) drivermanager.EventHandler {
|
|
|
|
if tr, ok := ar.tasks[taskName]; ok {
|
|
|
|
return func(ev *drivers.TaskEvent) {
|
|
|
|
tr.EmitEvent(&structs.TaskEvent{
|
|
|
|
Type: structs.TaskDriverMessage,
|
|
|
|
Time: ev.Timestamp.UnixNano(),
|
|
|
|
Details: ev.Annotations,
|
|
|
|
DriverMessage: ev.Message,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2019-04-01 12:56:02 +00:00
|
|
|
|
Task lifecycle restart (#14127)
* allocrunner: handle lifecycle when all tasks die
When all tasks die the Coordinator must transition to its terminal
state, coordinatorStatePoststop, to unblock poststop tasks. Since this
could happen at any time (for example, a prestart task dies), all states
must be able to transition to this terminal state.
* allocrunner: implement different alloc restarts
Add a new alloc restart mode where all tasks are restarted, even if they
have already exited. Also unifies the alloc restart logic to use the
implementation that restarts tasks concurrently and ignores
ErrTaskNotRunning errors since those are expected when restarting the
allocation.
* allocrunner: allow tasks to run again
Prevent the task runner Run() method from exiting to allow a dead task
to run again. When the task runner is signaled to restart, the function
will jump back to the MAIN loop and run it again.
The task runner determines if a task needs to run again based on two new
task events that were added to differentiate between a request to
restart a specific task, the tasks that are currently running, or all
tasks that have already run.
* api/cli: add support for all tasks alloc restart
Implement the new -all-tasks alloc restart CLI flag and its API
counterpar, AllTasks. The client endpoint calls the appropriate restart
method from the allocrunner depending on the restart parameters used.
* test: fix tasklifecycle Coordinator test
* allocrunner: kill taskrunners if all tasks are dead
When all non-poststop tasks are dead we need to kill the taskrunners so
we don't leak their goroutines, which are blocked in the alloc restart
loop. This also ensures the allocrunner exits on its own.
* taskrunner: fix tests that waited on WaitCh
Now that "dead" tasks may run again, the taskrunner Run() method will
not return when the task finishes running, so tests must wait for the
task state to be "dead" instead of using the WaitCh, since it won't be
closed until the taskrunner is killed.
* tests: add tests for all tasks alloc restart
* changelog: add entry for #14127
* taskrunner: fix restore logic.
The first implementation of the task runner restore process relied on
server data (`tr.Alloc().TerminalStatus()`) which may not be available
to the client at the time of restore.
It also had the incorrect code path. When restoring a dead task the
driver handle always needs to be clear cleanly using `clearDriverHandle`
otherwise, after exiting the MAIN loop, the task may be killed by
`tr.handleKill`.
The fix is to store the state of the Run() loop in the task runner local
client state: if the task runner ever exits this loop cleanly (not with
a shutdown) it will never be able to run again. So if the Run() loops
starts with this local state flag set, it must exit early.
This local state flag is also being checked on task restart requests. If
the task is "dead" and its Run() loop is not active it will never be
able to run again.
* address code review requests
* apply more code review changes
* taskrunner: add different Restart modes
Using the task event to differentiate between the allocrunner restart
methods proved to be confusing for developers to understand how it all
worked.
So instead of relying on the event type, this commit separated the logic
of restarting an taskRunner into two methods:
- `Restart` will retain the current behaviour and only will only restart
the task if it's currently running.
- `ForceRestart` is the new method where a `dead` task is allowed to
restart if its `Run()` method is still active. Callers will need to
restart the allocRunner taskCoordinator to make sure it will allow the
task to run again.
* minor fixes
2022-08-24 21:43:07 +00:00
|
|
|
// Restart satisfies the WorkloadRestarter interface and restarts all tasks
|
|
|
|
// that are currently running.
|
|
|
|
func (ar *allocRunner) Restart(ctx context.Context, event *structs.TaskEvent, failure bool) error {
|
|
|
|
return ar.restartTasks(ctx, event, failure, false)
|
|
|
|
}
|
|
|
|
|
|
|
|
// RestartTask restarts the provided task.
|
|
|
|
func (ar *allocRunner) RestartTask(taskName string, event *structs.TaskEvent) error {
|
2019-04-01 12:56:02 +00:00
|
|
|
tr, ok := ar.tasks[taskName]
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("Could not find task runner for task: %s", taskName)
|
|
|
|
}
|
|
|
|
|
Task lifecycle restart (#14127)
* allocrunner: handle lifecycle when all tasks die
When all tasks die the Coordinator must transition to its terminal
state, coordinatorStatePoststop, to unblock poststop tasks. Since this
could happen at any time (for example, a prestart task dies), all states
must be able to transition to this terminal state.
* allocrunner: implement different alloc restarts
Add a new alloc restart mode where all tasks are restarted, even if they
have already exited. Also unifies the alloc restart logic to use the
implementation that restarts tasks concurrently and ignores
ErrTaskNotRunning errors since those are expected when restarting the
allocation.
* allocrunner: allow tasks to run again
Prevent the task runner Run() method from exiting to allow a dead task
to run again. When the task runner is signaled to restart, the function
will jump back to the MAIN loop and run it again.
The task runner determines if a task needs to run again based on two new
task events that were added to differentiate between a request to
restart a specific task, the tasks that are currently running, or all
tasks that have already run.
* api/cli: add support for all tasks alloc restart
Implement the new -all-tasks alloc restart CLI flag and its API
counterpar, AllTasks. The client endpoint calls the appropriate restart
method from the allocrunner depending on the restart parameters used.
* test: fix tasklifecycle Coordinator test
* allocrunner: kill taskrunners if all tasks are dead
When all non-poststop tasks are dead we need to kill the taskrunners so
we don't leak their goroutines, which are blocked in the alloc restart
loop. This also ensures the allocrunner exits on its own.
* taskrunner: fix tests that waited on WaitCh
Now that "dead" tasks may run again, the taskrunner Run() method will
not return when the task finishes running, so tests must wait for the
task state to be "dead" instead of using the WaitCh, since it won't be
closed until the taskrunner is killed.
* tests: add tests for all tasks alloc restart
* changelog: add entry for #14127
* taskrunner: fix restore logic.
The first implementation of the task runner restore process relied on
server data (`tr.Alloc().TerminalStatus()`) which may not be available
to the client at the time of restore.
It also had the incorrect code path. When restoring a dead task the
driver handle always needs to be clear cleanly using `clearDriverHandle`
otherwise, after exiting the MAIN loop, the task may be killed by
`tr.handleKill`.
The fix is to store the state of the Run() loop in the task runner local
client state: if the task runner ever exits this loop cleanly (not with
a shutdown) it will never be able to run again. So if the Run() loops
starts with this local state flag set, it must exit early.
This local state flag is also being checked on task restart requests. If
the task is "dead" and its Run() loop is not active it will never be
able to run again.
* address code review requests
* apply more code review changes
* taskrunner: add different Restart modes
Using the task event to differentiate between the allocrunner restart
methods proved to be confusing for developers to understand how it all
worked.
So instead of relying on the event type, this commit separated the logic
of restarting an taskRunner into two methods:
- `Restart` will retain the current behaviour and only will only restart
the task if it's currently running.
- `ForceRestart` is the new method where a `dead` task is allowed to
restart if its `Run()` method is still active. Callers will need to
restart the allocRunner taskCoordinator to make sure it will allow the
task to run again.
* minor fixes
2022-08-24 21:43:07 +00:00
|
|
|
return tr.Restart(context.TODO(), event, false)
|
2019-04-01 12:56:02 +00:00
|
|
|
}
|
|
|
|
|
Task lifecycle restart (#14127)
* allocrunner: handle lifecycle when all tasks die
When all tasks die the Coordinator must transition to its terminal
state, coordinatorStatePoststop, to unblock poststop tasks. Since this
could happen at any time (for example, a prestart task dies), all states
must be able to transition to this terminal state.
* allocrunner: implement different alloc restarts
Add a new alloc restart mode where all tasks are restarted, even if they
have already exited. Also unifies the alloc restart logic to use the
implementation that restarts tasks concurrently and ignores
ErrTaskNotRunning errors since those are expected when restarting the
allocation.
* allocrunner: allow tasks to run again
Prevent the task runner Run() method from exiting to allow a dead task
to run again. When the task runner is signaled to restart, the function
will jump back to the MAIN loop and run it again.
The task runner determines if a task needs to run again based on two new
task events that were added to differentiate between a request to
restart a specific task, the tasks that are currently running, or all
tasks that have already run.
* api/cli: add support for all tasks alloc restart
Implement the new -all-tasks alloc restart CLI flag and its API
counterpar, AllTasks. The client endpoint calls the appropriate restart
method from the allocrunner depending on the restart parameters used.
* test: fix tasklifecycle Coordinator test
* allocrunner: kill taskrunners if all tasks are dead
When all non-poststop tasks are dead we need to kill the taskrunners so
we don't leak their goroutines, which are blocked in the alloc restart
loop. This also ensures the allocrunner exits on its own.
* taskrunner: fix tests that waited on WaitCh
Now that "dead" tasks may run again, the taskrunner Run() method will
not return when the task finishes running, so tests must wait for the
task state to be "dead" instead of using the WaitCh, since it won't be
closed until the taskrunner is killed.
* tests: add tests for all tasks alloc restart
* changelog: add entry for #14127
* taskrunner: fix restore logic.
The first implementation of the task runner restore process relied on
server data (`tr.Alloc().TerminalStatus()`) which may not be available
to the client at the time of restore.
It also had the incorrect code path. When restoring a dead task the
driver handle always needs to be clear cleanly using `clearDriverHandle`
otherwise, after exiting the MAIN loop, the task may be killed by
`tr.handleKill`.
The fix is to store the state of the Run() loop in the task runner local
client state: if the task runner ever exits this loop cleanly (not with
a shutdown) it will never be able to run again. So if the Run() loops
starts with this local state flag set, it must exit early.
This local state flag is also being checked on task restart requests. If
the task is "dead" and its Run() loop is not active it will never be
able to run again.
* address code review requests
* apply more code review changes
* taskrunner: add different Restart modes
Using the task event to differentiate between the allocrunner restart
methods proved to be confusing for developers to understand how it all
worked.
So instead of relying on the event type, this commit separated the logic
of restarting an taskRunner into two methods:
- `Restart` will retain the current behaviour and only will only restart
the task if it's currently running.
- `ForceRestart` is the new method where a `dead` task is allowed to
restart if its `Run()` method is still active. Callers will need to
restart the allocRunner taskCoordinator to make sure it will allow the
task to run again.
* minor fixes
2022-08-24 21:43:07 +00:00
|
|
|
// RestartRunning restarts all tasks that are currently running.
|
|
|
|
func (ar *allocRunner) RestartRunning(event *structs.TaskEvent) error {
|
|
|
|
return ar.restartTasks(context.TODO(), event, false, false)
|
|
|
|
}
|
|
|
|
|
|
|
|
// RestartAll restarts all tasks in the allocation, including dead ones. They
|
|
|
|
// will restart following their lifecycle order.
|
|
|
|
func (ar *allocRunner) RestartAll(event *structs.TaskEvent) error {
|
|
|
|
// Restart the taskCoordinator to allow dead tasks to run again.
|
|
|
|
ar.taskCoordinator.Restart()
|
|
|
|
return ar.restartTasks(context.TODO(), event, false, true)
|
|
|
|
}
|
|
|
|
|
|
|
|
// restartTasks restarts all task runners concurrently.
|
|
|
|
func (ar *allocRunner) restartTasks(ctx context.Context, event *structs.TaskEvent, failure bool, force bool) error {
|
2019-11-18 18:04:01 +00:00
|
|
|
waitCh := make(chan struct{})
|
|
|
|
var err *multierror.Error
|
|
|
|
var errMutex sync.Mutex
|
|
|
|
|
2021-01-21 16:36:00 +00:00
|
|
|
// run alloc task restart hooks
|
|
|
|
ar.taskRestartHooks()
|
|
|
|
|
2019-11-18 18:04:01 +00:00
|
|
|
go func() {
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
defer close(waitCh)
|
|
|
|
for tn, tr := range ar.tasks {
|
|
|
|
wg.Add(1)
|
Task lifecycle restart (#14127)
* allocrunner: handle lifecycle when all tasks die
When all tasks die the Coordinator must transition to its terminal
state, coordinatorStatePoststop, to unblock poststop tasks. Since this
could happen at any time (for example, a prestart task dies), all states
must be able to transition to this terminal state.
* allocrunner: implement different alloc restarts
Add a new alloc restart mode where all tasks are restarted, even if they
have already exited. Also unifies the alloc restart logic to use the
implementation that restarts tasks concurrently and ignores
ErrTaskNotRunning errors since those are expected when restarting the
allocation.
* allocrunner: allow tasks to run again
Prevent the task runner Run() method from exiting to allow a dead task
to run again. When the task runner is signaled to restart, the function
will jump back to the MAIN loop and run it again.
The task runner determines if a task needs to run again based on two new
task events that were added to differentiate between a request to
restart a specific task, the tasks that are currently running, or all
tasks that have already run.
* api/cli: add support for all tasks alloc restart
Implement the new -all-tasks alloc restart CLI flag and its API
counterpar, AllTasks. The client endpoint calls the appropriate restart
method from the allocrunner depending on the restart parameters used.
* test: fix tasklifecycle Coordinator test
* allocrunner: kill taskrunners if all tasks are dead
When all non-poststop tasks are dead we need to kill the taskrunners so
we don't leak their goroutines, which are blocked in the alloc restart
loop. This also ensures the allocrunner exits on its own.
* taskrunner: fix tests that waited on WaitCh
Now that "dead" tasks may run again, the taskrunner Run() method will
not return when the task finishes running, so tests must wait for the
task state to be "dead" instead of using the WaitCh, since it won't be
closed until the taskrunner is killed.
* tests: add tests for all tasks alloc restart
* changelog: add entry for #14127
* taskrunner: fix restore logic.
The first implementation of the task runner restore process relied on
server data (`tr.Alloc().TerminalStatus()`) which may not be available
to the client at the time of restore.
It also had the incorrect code path. When restoring a dead task the
driver handle always needs to be clear cleanly using `clearDriverHandle`
otherwise, after exiting the MAIN loop, the task may be killed by
`tr.handleKill`.
The fix is to store the state of the Run() loop in the task runner local
client state: if the task runner ever exits this loop cleanly (not with
a shutdown) it will never be able to run again. So if the Run() loops
starts with this local state flag set, it must exit early.
This local state flag is also being checked on task restart requests. If
the task is "dead" and its Run() loop is not active it will never be
able to run again.
* address code review requests
* apply more code review changes
* taskrunner: add different Restart modes
Using the task event to differentiate between the allocrunner restart
methods proved to be confusing for developers to understand how it all
worked.
So instead of relying on the event type, this commit separated the logic
of restarting an taskRunner into two methods:
- `Restart` will retain the current behaviour and only will only restart
the task if it's currently running.
- `ForceRestart` is the new method where a `dead` task is allowed to
restart if its `Run()` method is still active. Callers will need to
restart the allocRunner taskCoordinator to make sure it will allow the
task to run again.
* minor fixes
2022-08-24 21:43:07 +00:00
|
|
|
go func(taskName string, taskRunner *taskrunner.TaskRunner) {
|
2019-11-18 18:04:01 +00:00
|
|
|
defer wg.Done()
|
Task lifecycle restart (#14127)
* allocrunner: handle lifecycle when all tasks die
When all tasks die the Coordinator must transition to its terminal
state, coordinatorStatePoststop, to unblock poststop tasks. Since this
could happen at any time (for example, a prestart task dies), all states
must be able to transition to this terminal state.
* allocrunner: implement different alloc restarts
Add a new alloc restart mode where all tasks are restarted, even if they
have already exited. Also unifies the alloc restart logic to use the
implementation that restarts tasks concurrently and ignores
ErrTaskNotRunning errors since those are expected when restarting the
allocation.
* allocrunner: allow tasks to run again
Prevent the task runner Run() method from exiting to allow a dead task
to run again. When the task runner is signaled to restart, the function
will jump back to the MAIN loop and run it again.
The task runner determines if a task needs to run again based on two new
task events that were added to differentiate between a request to
restart a specific task, the tasks that are currently running, or all
tasks that have already run.
* api/cli: add support for all tasks alloc restart
Implement the new -all-tasks alloc restart CLI flag and its API
counterpar, AllTasks. The client endpoint calls the appropriate restart
method from the allocrunner depending on the restart parameters used.
* test: fix tasklifecycle Coordinator test
* allocrunner: kill taskrunners if all tasks are dead
When all non-poststop tasks are dead we need to kill the taskrunners so
we don't leak their goroutines, which are blocked in the alloc restart
loop. This also ensures the allocrunner exits on its own.
* taskrunner: fix tests that waited on WaitCh
Now that "dead" tasks may run again, the taskrunner Run() method will
not return when the task finishes running, so tests must wait for the
task state to be "dead" instead of using the WaitCh, since it won't be
closed until the taskrunner is killed.
* tests: add tests for all tasks alloc restart
* changelog: add entry for #14127
* taskrunner: fix restore logic.
The first implementation of the task runner restore process relied on
server data (`tr.Alloc().TerminalStatus()`) which may not be available
to the client at the time of restore.
It also had the incorrect code path. When restoring a dead task the
driver handle always needs to be clear cleanly using `clearDriverHandle`
otherwise, after exiting the MAIN loop, the task may be killed by
`tr.handleKill`.
The fix is to store the state of the Run() loop in the task runner local
client state: if the task runner ever exits this loop cleanly (not with
a shutdown) it will never be able to run again. So if the Run() loops
starts with this local state flag set, it must exit early.
This local state flag is also being checked on task restart requests. If
the task is "dead" and its Run() loop is not active it will never be
able to run again.
* address code review requests
* apply more code review changes
* taskrunner: add different Restart modes
Using the task event to differentiate between the allocrunner restart
methods proved to be confusing for developers to understand how it all
worked.
So instead of relying on the event type, this commit separated the logic
of restarting an taskRunner into two methods:
- `Restart` will retain the current behaviour and only will only restart
the task if it's currently running.
- `ForceRestart` is the new method where a `dead` task is allowed to
restart if its `Run()` method is still active. Callers will need to
restart the allocRunner taskCoordinator to make sure it will allow the
task to run again.
* minor fixes
2022-08-24 21:43:07 +00:00
|
|
|
|
|
|
|
var e error
|
|
|
|
if force {
|
|
|
|
e = taskRunner.ForceRestart(ctx, event.Copy(), failure)
|
|
|
|
} else {
|
|
|
|
e = taskRunner.Restart(ctx, event.Copy(), failure)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ignore ErrTaskNotRunning errors since tasks that are not
|
|
|
|
// running are expected to not be restarted.
|
|
|
|
if e != nil && e != taskrunner.ErrTaskNotRunning {
|
2019-11-18 18:04:01 +00:00
|
|
|
errMutex.Lock()
|
|
|
|
defer errMutex.Unlock()
|
|
|
|
err = multierror.Append(err, fmt.Errorf("failed to restart task %s: %v", taskName, e))
|
|
|
|
}
|
|
|
|
}(tn, tr)
|
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
}()
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-waitCh:
|
|
|
|
case <-ctx.Done():
|
|
|
|
}
|
|
|
|
|
|
|
|
return err.ErrorOrNil()
|
|
|
|
}
|
|
|
|
|
2019-04-03 10:46:15 +00:00
|
|
|
// Signal sends a signal request to task runners inside an allocation. If the
|
|
|
|
// taskName is empty, then it is sent to all tasks.
|
|
|
|
func (ar *allocRunner) Signal(taskName, signal string) error {
|
|
|
|
event := structs.NewTaskEvent(structs.TaskSignaling).SetSignalText(signal)
|
|
|
|
|
|
|
|
if taskName != "" {
|
|
|
|
tr, ok := ar.tasks[taskName]
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("Task not found")
|
|
|
|
}
|
|
|
|
|
|
|
|
return tr.Signal(event, signal)
|
|
|
|
}
|
|
|
|
|
|
|
|
var err *multierror.Error
|
|
|
|
|
|
|
|
for tn, tr := range ar.tasks {
|
|
|
|
rerr := tr.Signal(event.Copy(), signal)
|
|
|
|
if rerr != nil {
|
|
|
|
err = multierror.Append(err, fmt.Errorf("Failed to signal task: %s, err: %v", tn, rerr))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return err.ErrorOrNil()
|
|
|
|
}
|
2019-04-28 21:22:53 +00:00
|
|
|
|
2022-03-02 10:47:26 +00:00
|
|
|
// Reconnect logs a reconnect event for each task in the allocation and syncs the current alloc state with the server.
|
|
|
|
func (ar *allocRunner) Reconnect(update *structs.Allocation) (err error) {
|
|
|
|
event := structs.NewTaskEvent(structs.TaskClientReconnected)
|
2022-03-31 15:32:18 +00:00
|
|
|
event.Time = time.Now().UnixNano()
|
2022-03-02 10:47:26 +00:00
|
|
|
for _, tr := range ar.tasks {
|
|
|
|
tr.AppendEvent(event)
|
|
|
|
}
|
|
|
|
|
2022-04-06 13:33:32 +00:00
|
|
|
// Update the client alloc with the server side indexes.
|
2022-03-02 10:47:26 +00:00
|
|
|
ar.setIndexes(update)
|
|
|
|
|
|
|
|
// Calculate alloc state to get the final state with the new events.
|
|
|
|
// Cannot rely on AllocStates as it won't recompute TaskStates once they are set.
|
|
|
|
states := make(map[string]*structs.TaskState, len(ar.tasks))
|
|
|
|
for name, tr := range ar.tasks {
|
|
|
|
states[name] = tr.TaskState()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Build the client allocation
|
|
|
|
alloc := ar.clientAlloc(states)
|
|
|
|
|
|
|
|
// Update the client state store.
|
|
|
|
err = ar.stateUpdater.PutAllocation(alloc)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update the server.
|
|
|
|
ar.stateUpdater.AllocStateUpdated(alloc)
|
|
|
|
|
|
|
|
// Broadcast client alloc to listeners.
|
|
|
|
err = ar.allocBroadcaster.Send(alloc)
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-04-28 21:22:53 +00:00
|
|
|
func (ar *allocRunner) GetTaskExecHandler(taskName string) drivermanager.TaskExecHandler {
|
|
|
|
tr, ok := ar.tasks[taskName]
|
|
|
|
if !ok {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return tr.TaskExecHandler()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ar *allocRunner) GetTaskDriverCapabilities(taskName string) (*drivers.Capabilities, error) {
|
|
|
|
tr, ok := ar.tasks[taskName]
|
|
|
|
if !ok {
|
|
|
|
return nil, fmt.Errorf("task not found")
|
|
|
|
}
|
|
|
|
|
|
|
|
return tr.DriverCapabilities()
|
|
|
|
}
|