2018-06-22 00:35:07 +00:00
|
|
|
package taskrunner
|
|
|
|
|
|
|
|
import (
|
2018-07-16 21:37:27 +00:00
|
|
|
"context"
|
2018-06-22 00:35:07 +00:00
|
|
|
"fmt"
|
2019-10-22 13:20:26 +00:00
|
|
|
"path/filepath"
|
2018-11-16 23:29:59 +00:00
|
|
|
"sync"
|
2018-06-22 00:35:07 +00:00
|
|
|
"time"
|
|
|
|
|
2019-10-08 18:34:09 +00:00
|
|
|
"github.com/LK4D4/joincontext"
|
2018-07-20 00:40:25 +00:00
|
|
|
multierror "github.com/hashicorp/go-multierror"
|
2018-10-04 23:22:01 +00:00
|
|
|
"github.com/hashicorp/nomad/client/allocrunner/interfaces"
|
|
|
|
"github.com/hashicorp/nomad/client/allocrunner/taskrunner/state"
|
2018-06-22 00:35:07 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad/structs"
|
2018-11-16 23:29:59 +00:00
|
|
|
"github.com/hashicorp/nomad/plugins/drivers"
|
2018-06-22 00:35:07 +00:00
|
|
|
)
|
|
|
|
|
2018-11-16 23:29:59 +00:00
|
|
|
// hookResources captures the resources for the task provided by hooks.
|
|
|
|
type hookResources struct {
|
|
|
|
Devices []*drivers.DeviceConfig
|
|
|
|
Mounts []*drivers.MountConfig
|
|
|
|
sync.RWMutex
|
|
|
|
}
|
|
|
|
|
|
|
|
func (h *hookResources) setDevices(d []*drivers.DeviceConfig) {
|
|
|
|
h.Lock()
|
|
|
|
h.Devices = d
|
|
|
|
h.Unlock()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (h *hookResources) getDevices() []*drivers.DeviceConfig {
|
|
|
|
h.RLock()
|
|
|
|
defer h.RUnlock()
|
|
|
|
return h.Devices
|
|
|
|
}
|
|
|
|
|
|
|
|
func (h *hookResources) setMounts(m []*drivers.MountConfig) {
|
|
|
|
h.Lock()
|
|
|
|
h.Mounts = m
|
|
|
|
h.Unlock()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (h *hookResources) getMounts() []*drivers.MountConfig {
|
|
|
|
h.RLock()
|
|
|
|
defer h.RUnlock()
|
|
|
|
return h.Mounts
|
|
|
|
}
|
|
|
|
|
2019-11-27 21:41:45 +00:00
|
|
|
// initHooks initializes the tasks hooks.
|
2018-06-22 00:35:07 +00:00
|
|
|
func (tr *TaskRunner) initHooks() {
|
|
|
|
hookLogger := tr.logger.Named("task_hook")
|
2018-07-16 21:37:27 +00:00
|
|
|
task := tr.Task()
|
2018-06-22 00:35:07 +00:00
|
|
|
|
2018-09-24 18:37:45 +00:00
|
|
|
tr.logmonHookConfig = newLogMonHookConfig(task.Name, tr.taskDir.LogDir)
|
|
|
|
|
2018-11-16 23:29:59 +00:00
|
|
|
// Add the hook resources
|
|
|
|
tr.hookResources = &hookResources{}
|
|
|
|
|
2018-06-22 00:35:07 +00:00
|
|
|
// Create the task directory hook. This is run first to ensure the
|
2018-10-05 02:36:40 +00:00
|
|
|
// directory path exists for other hooks.
|
2019-08-12 22:41:39 +00:00
|
|
|
alloc := tr.Alloc()
|
2018-06-27 23:57:31 +00:00
|
|
|
tr.runnerHooks = []interfaces.TaskHook{
|
2018-07-18 23:51:50 +00:00
|
|
|
newValidateHook(tr.clientConfig, hookLogger),
|
2018-06-27 23:57:31 +00:00
|
|
|
newTaskDirHook(tr, hookLogger),
|
2019-12-07 03:11:41 +00:00
|
|
|
newLogMonHook(tr, hookLogger),
|
2019-08-12 22:41:39 +00:00
|
|
|
newDispatchHook(alloc, hookLogger),
|
2019-07-25 14:48:28 +00:00
|
|
|
newVolumeHook(tr, hookLogger),
|
2018-06-27 23:57:31 +00:00
|
|
|
newArtifactHook(tr, hookLogger),
|
2018-09-15 00:08:26 +00:00
|
|
|
newStatsHook(tr, tr.clientConfig.StatsCollectionInterval, hookLogger),
|
2018-11-16 23:29:59 +00:00
|
|
|
newDeviceHook(tr.devicemanager, hookLogger),
|
2018-06-27 23:57:31 +00:00
|
|
|
}
|
2018-07-12 23:15:33 +00:00
|
|
|
|
2019-10-22 13:20:26 +00:00
|
|
|
// If the task has a CSI stanza, add the hook.
|
|
|
|
if task.CSIPluginConfig != nil {
|
|
|
|
tr.runnerHooks = append(tr.runnerHooks, newCSIPluginSupervisorHook(filepath.Join(tr.clientConfig.StateDir, "csi"), tr, tr, hookLogger))
|
|
|
|
}
|
|
|
|
|
2018-07-12 23:15:33 +00:00
|
|
|
// If Vault is enabled, add the hook
|
2018-07-16 21:37:27 +00:00
|
|
|
if task.Vault != nil {
|
2018-07-12 23:15:33 +00:00
|
|
|
tr.runnerHooks = append(tr.runnerHooks, newVaultHook(&vaultHookConfig{
|
|
|
|
vaultStanza: task.Vault,
|
|
|
|
client: tr.vaultClient,
|
|
|
|
events: tr,
|
|
|
|
lifecycle: tr,
|
|
|
|
updater: tr,
|
|
|
|
logger: hookLogger,
|
|
|
|
alloc: tr.Alloc(),
|
|
|
|
task: tr.taskName,
|
|
|
|
}))
|
|
|
|
}
|
2018-07-13 20:45:57 +00:00
|
|
|
|
2021-03-16 18:22:21 +00:00
|
|
|
// Get the consul namespace for the TG of the allocation
|
|
|
|
consulNamespace := tr.alloc.ConsulNamespace()
|
|
|
|
|
2018-07-13 20:45:57 +00:00
|
|
|
// If there are templates is enabled, add the hook
|
2018-07-16 21:37:27 +00:00
|
|
|
if len(task.Templates) != 0 {
|
2018-07-13 20:45:57 +00:00
|
|
|
tr.runnerHooks = append(tr.runnerHooks, newTemplateHook(&templateHookConfig{
|
2021-03-16 18:22:21 +00:00
|
|
|
logger: hookLogger,
|
|
|
|
lifecycle: tr,
|
|
|
|
events: tr,
|
|
|
|
templates: task.Templates,
|
|
|
|
clientConfig: tr.clientConfig,
|
|
|
|
envBuilder: tr.envBuilder,
|
|
|
|
consulNamespace: consulNamespace,
|
2018-07-13 20:45:57 +00:00
|
|
|
}))
|
|
|
|
}
|
2018-07-20 00:40:25 +00:00
|
|
|
|
2021-01-04 19:03:51 +00:00
|
|
|
// Always add the service hook. A task with no services on initial registration
|
|
|
|
// may be updated to include services, which must be handled with this hook.
|
|
|
|
tr.runnerHooks = append(tr.runnerHooks, newServiceHook(serviceHookConfig{
|
2021-03-16 18:22:21 +00:00
|
|
|
alloc: tr.Alloc(),
|
|
|
|
task: tr.Task(),
|
|
|
|
consulServices: tr.consulServiceClient,
|
|
|
|
consulNamespace: consulNamespace,
|
|
|
|
restarter: tr,
|
|
|
|
logger: hookLogger,
|
2021-01-04 19:03:51 +00:00
|
|
|
}))
|
support script checks for task group services (#6197)
In Nomad prior to Consul Connect, all Consul checks work the same
except for Script checks. Because the Task being checked is running in
its own container namespaces, the check is executed by Nomad in the
Task's context. If the Script check passes, Nomad uses the TTL check
feature of Consul to update the check status. This means in order to
run a Script check, we need to know what Task to execute it in.
To support Consul Connect, we need Group Services, and these need to
be registered in Consul along with their checks. We could push the
Service down into the Task, but this doesn't work if someone wants to
associate a service with a task's ports, but do script checks in
another task in the allocation.
Because Nomad is handling the Script check and not Consul anyways,
this moves the script check handling into the task runner so that the
task runner can own the script check's configuration and
lifecycle. This will allow us to pass the group service check
configuration down into a task without associating the service itself
with the task.
When tasks are checked for script checks, we walk back through their
task group to see if there are script checks associated with the
task. If so, we'll spin off script check tasklets for them. The
group-level service and any restart behaviors it needs are entirely
encapsulated within the group service hook.
2019-09-03 19:09:04 +00:00
|
|
|
|
2019-12-18 16:23:16 +00:00
|
|
|
// If this is a Connect sidecar proxy (or a Connect Native) service,
|
|
|
|
// add the sidsHook for requesting a Service Identity token (if ACLs).
|
2019-12-06 20:46:46 +00:00
|
|
|
if task.UsesConnect() {
|
2020-01-28 22:33:59 +00:00
|
|
|
// Enable the Service Identity hook only if the Nomad client is configured
|
|
|
|
// with a consul token, indicating that Consul ACLs are enabled
|
|
|
|
if tr.clientConfig.ConsulConfig.Token != "" {
|
|
|
|
tr.runnerHooks = append(tr.runnerHooks, newSIDSHook(sidsHookConfig{
|
|
|
|
alloc: tr.Alloc(),
|
|
|
|
task: tr.Task(),
|
|
|
|
sidsClient: tr.siClient,
|
|
|
|
lifecycle: tr,
|
|
|
|
logger: hookLogger,
|
|
|
|
}))
|
|
|
|
}
|
|
|
|
|
2020-09-04 17:50:11 +00:00
|
|
|
if task.UsesConnectSidecar() {
|
|
|
|
tr.runnerHooks = append(tr.runnerHooks,
|
|
|
|
newEnvoyVersionHook(newEnvoyVersionHookConfig(alloc, tr.consulProxiesClient, hookLogger)),
|
2021-03-16 18:22:21 +00:00
|
|
|
newEnvoyBootstrapHook(newEnvoyBootstrapHookConfig(alloc, tr.clientConfig.ConsulConfig, consulNamespace, hookLogger)),
|
2020-09-04 17:50:11 +00:00
|
|
|
)
|
2020-05-13 20:15:55 +00:00
|
|
|
} else if task.Kind.IsConnectNative() {
|
|
|
|
tr.runnerHooks = append(tr.runnerHooks, newConnectNativeHook(
|
|
|
|
newConnectNativeHookConfig(alloc, tr.clientConfig.ConsulConfig, hookLogger),
|
|
|
|
))
|
|
|
|
}
|
2019-11-27 21:41:45 +00:00
|
|
|
}
|
|
|
|
|
2021-04-19 21:37:42 +00:00
|
|
|
// Always add the script checks hook. A task with no script check hook on
|
|
|
|
// initial registration may be updated to include script checks, which must
|
|
|
|
// be handled with this hook.
|
|
|
|
tr.runnerHooks = append(tr.runnerHooks, newScriptCheckHook(scriptCheckHookConfig{
|
support script checks for task group services (#6197)
In Nomad prior to Consul Connect, all Consul checks work the same
except for Script checks. Because the Task being checked is running in
its own container namespaces, the check is executed by Nomad in the
Task's context. If the Script check passes, Nomad uses the TTL check
feature of Consul to update the check status. This means in order to
run a Script check, we need to know what Task to execute it in.
To support Consul Connect, we need Group Services, and these need to
be registered in Consul along with their checks. We could push the
Service down into the Task, but this doesn't work if someone wants to
associate a service with a task's ports, but do script checks in
another task in the allocation.
Because Nomad is handling the Script check and not Consul anyways,
this moves the script check handling into the task runner so that the
task runner can own the script check's configuration and
lifecycle. This will allow us to pass the group service check
configuration down into a task without associating the service itself
with the task.
When tasks are checked for script checks, we walk back through their
task group to see if there are script checks associated with the
task. If so, we'll spin off script check tasklets for them. The
group-level service and any restart behaviors it needs are entirely
encapsulated within the group service hook.
2019-09-03 19:09:04 +00:00
|
|
|
alloc: tr.Alloc(),
|
|
|
|
task: tr.Task(),
|
2020-09-04 17:50:11 +00:00
|
|
|
consul: tr.consulServiceClient,
|
support script checks for task group services (#6197)
In Nomad prior to Consul Connect, all Consul checks work the same
except for Script checks. Because the Task being checked is running in
its own container namespaces, the check is executed by Nomad in the
Task's context. If the Script check passes, Nomad uses the TTL check
feature of Consul to update the check status. This means in order to
run a Script check, we need to know what Task to execute it in.
To support Consul Connect, we need Group Services, and these need to
be registered in Consul along with their checks. We could push the
Service down into the Task, but this doesn't work if someone wants to
associate a service with a task's ports, but do script checks in
another task in the allocation.
Because Nomad is handling the Script check and not Consul anyways,
this moves the script check handling into the task runner so that the
task runner can own the script check's configuration and
lifecycle. This will allow us to pass the group service check
configuration down into a task without associating the service itself
with the task.
When tasks are checked for script checks, we walk back through their
task group to see if there are script checks associated with the
task. If so, we'll spin off script check tasklets for them. The
group-level service and any restart behaviors it needs are entirely
encapsulated within the group service hook.
2019-09-03 19:09:04 +00:00
|
|
|
logger: hookLogger,
|
2021-04-19 21:37:42 +00:00
|
|
|
}))
|
2020-12-17 23:21:46 +00:00
|
|
|
|
|
|
|
// If this task driver has remote capabilities, add the remote task
|
|
|
|
// hook.
|
|
|
|
if tr.driverCapabilities.RemoteTasks {
|
|
|
|
tr.runnerHooks = append(tr.runnerHooks, newRemoteTaskHook(tr, hookLogger))
|
|
|
|
}
|
2018-06-22 00:35:07 +00:00
|
|
|
}
|
|
|
|
|
2018-12-13 17:20:18 +00:00
|
|
|
func (tr *TaskRunner) emitHookError(err error, hookName string) {
|
|
|
|
var taskEvent *structs.TaskEvent
|
2018-12-13 17:21:32 +00:00
|
|
|
if herr, ok := err.(*hookError); ok {
|
|
|
|
taskEvent = herr.taskEvent
|
2018-12-13 17:20:18 +00:00
|
|
|
} else {
|
|
|
|
message := fmt.Sprintf("%s: %v", hookName, err)
|
|
|
|
taskEvent = structs.NewTaskEvent(structs.TaskHookFailed).SetMessage(message)
|
|
|
|
}
|
|
|
|
|
2019-02-13 16:26:23 +00:00
|
|
|
tr.EmitEvent(taskEvent)
|
2018-12-13 17:20:18 +00:00
|
|
|
}
|
|
|
|
|
2018-07-17 00:19:56 +00:00
|
|
|
// prestart is used to run the runners prestart hooks.
|
|
|
|
func (tr *TaskRunner) prestart() error {
|
2019-11-27 21:41:45 +00:00
|
|
|
// Determine if the allocation is terminal and we should avoid running
|
2018-07-17 00:19:56 +00:00
|
|
|
// prestart hooks.
|
2020-11-12 16:01:42 +00:00
|
|
|
if tr.shouldShutdown() {
|
2018-07-17 00:19:56 +00:00
|
|
|
tr.logger.Trace("skipping prestart hooks since allocation is terminal")
|
2018-06-22 00:35:07 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if tr.logger.IsTrace() {
|
|
|
|
start := time.Now()
|
2018-07-17 00:19:56 +00:00
|
|
|
tr.logger.Trace("running prestart hooks", "start", start)
|
2018-06-22 00:35:07 +00:00
|
|
|
defer func() {
|
|
|
|
end := time.Now()
|
2018-07-17 00:19:56 +00:00
|
|
|
tr.logger.Trace("finished prestart hooks", "end", end, "duration", end.Sub(start))
|
2018-06-22 00:35:07 +00:00
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
2021-12-23 16:50:51 +00:00
|
|
|
// use a join context to allow any blocking pre-start hooks
|
|
|
|
// to be canceled by either killCtx or shutdownCtx
|
|
|
|
joinedCtx, joinedCancel := joincontext.Join(tr.killCtx, tr.shutdownCtx)
|
|
|
|
defer joinedCancel()
|
|
|
|
|
2018-06-22 00:35:07 +00:00
|
|
|
for _, hook := range tr.runnerHooks {
|
2018-07-17 00:19:56 +00:00
|
|
|
pre, ok := hook.(interfaces.TaskPrestartHook)
|
2018-06-22 00:35:07 +00:00
|
|
|
if !ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
name := pre.Name()
|
2018-11-27 19:53:47 +00:00
|
|
|
|
2018-06-22 00:35:07 +00:00
|
|
|
// Build the request
|
2018-07-17 00:19:56 +00:00
|
|
|
req := interfaces.TaskPrestartRequest{
|
2018-11-16 23:29:59 +00:00
|
|
|
Task: tr.Task(),
|
|
|
|
TaskDir: tr.taskDir,
|
|
|
|
TaskEnv: tr.envBuilder.Build(),
|
|
|
|
TaskResources: tr.taskResources,
|
2018-06-22 00:35:07 +00:00
|
|
|
}
|
|
|
|
|
2019-02-21 23:37:22 +00:00
|
|
|
origHookState := tr.hookState(name)
|
2018-11-16 23:29:59 +00:00
|
|
|
if origHookState != nil {
|
|
|
|
if origHookState.PrestartDone {
|
|
|
|
tr.logger.Trace("skipping done prestart hook", "name", pre.Name())
|
2018-12-19 22:23:09 +00:00
|
|
|
|
2018-11-27 19:53:47 +00:00
|
|
|
// Always set env vars from hooks
|
2018-12-19 22:23:09 +00:00
|
|
|
if name == HookNameDevices {
|
|
|
|
tr.envBuilder.SetDeviceHookEnv(name, origHookState.Env)
|
|
|
|
} else {
|
|
|
|
tr.envBuilder.SetHookEnv(name, origHookState.Env)
|
|
|
|
}
|
|
|
|
|
2018-11-16 23:29:59 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Give the hook it's old data
|
2019-02-21 23:37:22 +00:00
|
|
|
req.PreviousState = origHookState.Data
|
2018-06-22 00:35:07 +00:00
|
|
|
}
|
|
|
|
|
2018-07-12 23:15:33 +00:00
|
|
|
req.VaultToken = tr.getVaultToken()
|
2018-06-22 00:35:07 +00:00
|
|
|
|
2018-07-17 00:19:56 +00:00
|
|
|
// Time the prestart hook
|
2018-06-29 23:22:18 +00:00
|
|
|
var start time.Time
|
|
|
|
if tr.logger.IsTrace() {
|
|
|
|
start = time.Now()
|
2018-07-17 00:19:56 +00:00
|
|
|
tr.logger.Trace("running prestart hook", "name", name, "start", start)
|
2018-06-29 23:22:18 +00:00
|
|
|
}
|
|
|
|
|
2018-07-17 00:19:56 +00:00
|
|
|
// Run the prestart hook
|
|
|
|
var resp interfaces.TaskPrestartResponse
|
2019-10-08 18:34:09 +00:00
|
|
|
if err := pre.Prestart(joinedCtx, &req, &resp); err != nil {
|
2018-12-13 17:20:18 +00:00
|
|
|
tr.emitHookError(err, name)
|
2018-07-17 00:19:56 +00:00
|
|
|
return structs.WrapRecoverable(fmt.Sprintf("prestart hook %q failed: %v", name, err), err)
|
2018-06-29 22:39:54 +00:00
|
|
|
}
|
2018-06-22 00:35:07 +00:00
|
|
|
|
|
|
|
// Store the hook state
|
|
|
|
{
|
2018-08-08 00:38:40 +00:00
|
|
|
hookState := &state.HookState{
|
2019-02-21 23:37:22 +00:00
|
|
|
Data: resp.State,
|
2018-08-08 00:38:40 +00:00
|
|
|
PrestartDone: resp.Done,
|
2018-11-27 19:53:47 +00:00
|
|
|
Env: resp.Env,
|
2018-06-22 00:35:07 +00:00
|
|
|
}
|
|
|
|
|
2018-07-13 00:56:52 +00:00
|
|
|
// Store and persist local state if the hook state has changed
|
2018-07-11 04:22:04 +00:00
|
|
|
if !hookState.Equal(origHookState) {
|
2018-11-14 18:29:07 +00:00
|
|
|
tr.stateLock.Lock()
|
2018-07-13 00:56:52 +00:00
|
|
|
tr.localState.Hooks[name] = hookState
|
2018-11-14 18:29:07 +00:00
|
|
|
tr.stateLock.Unlock()
|
2018-08-08 00:38:40 +00:00
|
|
|
|
2018-07-11 04:22:04 +00:00
|
|
|
if err := tr.persistLocalState(); err != nil {
|
|
|
|
return err
|
2018-06-29 00:01:05 +00:00
|
|
|
}
|
2018-07-11 04:22:04 +00:00
|
|
|
}
|
2018-06-22 00:35:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Store the environment variables returned by the hook
|
2018-12-19 22:23:09 +00:00
|
|
|
if name == HookNameDevices {
|
2019-01-03 12:13:57 +00:00
|
|
|
tr.envBuilder.SetDeviceHookEnv(name, resp.Env)
|
2018-12-19 22:23:09 +00:00
|
|
|
} else {
|
2019-01-03 12:13:57 +00:00
|
|
|
tr.envBuilder.SetHookEnv(name, resp.Env)
|
2018-12-19 22:23:09 +00:00
|
|
|
}
|
2018-06-22 00:35:07 +00:00
|
|
|
|
2018-11-16 23:29:59 +00:00
|
|
|
// Store the resources
|
|
|
|
if len(resp.Devices) != 0 {
|
|
|
|
tr.hookResources.setDevices(resp.Devices)
|
|
|
|
}
|
|
|
|
if len(resp.Mounts) != 0 {
|
|
|
|
tr.hookResources.setMounts(resp.Mounts)
|
|
|
|
}
|
|
|
|
|
2018-06-22 00:35:07 +00:00
|
|
|
if tr.logger.IsTrace() {
|
|
|
|
end := time.Now()
|
2018-10-10 19:31:59 +00:00
|
|
|
tr.logger.Trace("finished prestart hook", "name", name, "end", end, "duration", end.Sub(start))
|
2018-06-22 00:35:07 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-07-17 00:19:56 +00:00
|
|
|
// poststart is used to run the runners poststart hooks.
|
|
|
|
func (tr *TaskRunner) poststart() error {
|
2018-06-22 00:35:07 +00:00
|
|
|
if tr.logger.IsTrace() {
|
|
|
|
start := time.Now()
|
2018-07-17 00:19:56 +00:00
|
|
|
tr.logger.Trace("running poststart hooks", "start", start)
|
2018-06-22 00:35:07 +00:00
|
|
|
defer func() {
|
|
|
|
end := time.Now()
|
2018-07-17 00:19:56 +00:00
|
|
|
tr.logger.Trace("finished poststart hooks", "end", end, "duration", end.Sub(start))
|
2018-06-22 00:35:07 +00:00
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
2018-10-04 19:08:20 +00:00
|
|
|
handle := tr.getDriverHandle()
|
2018-07-30 23:37:26 +00:00
|
|
|
net := handle.Network()
|
2018-07-20 00:40:25 +00:00
|
|
|
|
2019-01-03 23:00:52 +00:00
|
|
|
// Pass the lazy handle to the hooks so even if the driver exits and we
|
|
|
|
// launch a new one (external plugin), the handle will refresh.
|
2019-01-09 00:42:26 +00:00
|
|
|
lazyHandle := NewLazyHandle(tr.shutdownCtx, tr.getDriverHandle, tr.logger)
|
2019-01-03 23:00:52 +00:00
|
|
|
|
2018-07-20 00:40:25 +00:00
|
|
|
var merr multierror.Error
|
2018-06-22 00:35:07 +00:00
|
|
|
for _, hook := range tr.runnerHooks {
|
2018-07-17 00:19:56 +00:00
|
|
|
post, ok := hook.(interfaces.TaskPoststartHook)
|
2018-06-22 00:35:07 +00:00
|
|
|
if !ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
name := post.Name()
|
|
|
|
var start time.Time
|
|
|
|
if tr.logger.IsTrace() {
|
|
|
|
start = time.Now()
|
2018-07-17 00:19:56 +00:00
|
|
|
tr.logger.Trace("running poststart hook", "name", name, "start", start)
|
2018-06-22 00:35:07 +00:00
|
|
|
}
|
|
|
|
|
2018-07-20 00:40:25 +00:00
|
|
|
req := interfaces.TaskPoststartRequest{
|
2019-01-03 23:00:52 +00:00
|
|
|
DriverExec: lazyHandle,
|
2018-07-20 00:40:25 +00:00
|
|
|
DriverNetwork: net,
|
2019-01-03 23:00:52 +00:00
|
|
|
DriverStats: lazyHandle,
|
2018-07-20 00:40:25 +00:00
|
|
|
TaskEnv: tr.envBuilder.Build(),
|
|
|
|
}
|
2018-07-17 00:19:56 +00:00
|
|
|
var resp interfaces.TaskPoststartResponse
|
2018-10-18 20:39:02 +00:00
|
|
|
if err := post.Poststart(tr.killCtx, &req, &resp); err != nil {
|
2018-12-13 17:20:18 +00:00
|
|
|
tr.emitHookError(err, name)
|
2018-07-20 00:40:25 +00:00
|
|
|
merr.Errors = append(merr.Errors, fmt.Errorf("poststart hook %q failed: %v", name, err))
|
2018-06-22 00:35:07 +00:00
|
|
|
}
|
|
|
|
|
2018-08-08 00:38:40 +00:00
|
|
|
// No need to persist as PoststartResponse is currently empty
|
|
|
|
|
2018-06-22 00:35:07 +00:00
|
|
|
if tr.logger.IsTrace() {
|
|
|
|
end := time.Now()
|
2018-07-17 00:19:56 +00:00
|
|
|
tr.logger.Trace("finished poststart hooks", "name", name, "end", end, "duration", end.Sub(start))
|
2018-06-22 00:35:07 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-20 00:40:25 +00:00
|
|
|
return merr.ErrorOrNil()
|
|
|
|
}
|
|
|
|
|
|
|
|
// exited is used to run the exited hooks before a task is stopped.
|
|
|
|
func (tr *TaskRunner) exited() error {
|
|
|
|
if tr.logger.IsTrace() {
|
|
|
|
start := time.Now()
|
|
|
|
tr.logger.Trace("running exited hooks", "start", start)
|
|
|
|
defer func() {
|
|
|
|
end := time.Now()
|
|
|
|
tr.logger.Trace("finished exited hooks", "end", end, "duration", end.Sub(start))
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
|
|
|
var merr multierror.Error
|
|
|
|
for _, hook := range tr.runnerHooks {
|
|
|
|
post, ok := hook.(interfaces.TaskExitedHook)
|
|
|
|
if !ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
name := post.Name()
|
|
|
|
var start time.Time
|
|
|
|
if tr.logger.IsTrace() {
|
|
|
|
start = time.Now()
|
|
|
|
tr.logger.Trace("running exited hook", "name", name, "start", start)
|
|
|
|
}
|
|
|
|
|
|
|
|
req := interfaces.TaskExitedRequest{}
|
|
|
|
var resp interfaces.TaskExitedResponse
|
2018-10-18 20:39:02 +00:00
|
|
|
if err := post.Exited(tr.killCtx, &req, &resp); err != nil {
|
2018-12-13 17:20:18 +00:00
|
|
|
tr.emitHookError(err, name)
|
2018-07-20 00:40:25 +00:00
|
|
|
merr.Errors = append(merr.Errors, fmt.Errorf("exited hook %q failed: %v", name, err))
|
|
|
|
}
|
|
|
|
|
2018-08-08 00:38:40 +00:00
|
|
|
// No need to persist as TaskExitedResponse is currently empty
|
|
|
|
|
2018-07-20 00:40:25 +00:00
|
|
|
if tr.logger.IsTrace() {
|
|
|
|
end := time.Now()
|
|
|
|
tr.logger.Trace("finished exited hooks", "name", name, "end", end, "duration", end.Sub(start))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return merr.ErrorOrNil()
|
|
|
|
|
2018-06-22 00:35:07 +00:00
|
|
|
}
|
|
|
|
|
2018-07-17 00:19:56 +00:00
|
|
|
// stop is used to run the stop hooks.
|
|
|
|
func (tr *TaskRunner) stop() error {
|
2018-06-22 00:35:07 +00:00
|
|
|
if tr.logger.IsTrace() {
|
|
|
|
start := time.Now()
|
2018-07-17 00:19:56 +00:00
|
|
|
tr.logger.Trace("running stop hooks", "start", start)
|
2018-06-22 00:35:07 +00:00
|
|
|
defer func() {
|
|
|
|
end := time.Now()
|
2018-07-17 00:19:56 +00:00
|
|
|
tr.logger.Trace("finished stop hooks", "end", end, "duration", end.Sub(start))
|
2018-06-22 00:35:07 +00:00
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
2018-07-20 00:40:25 +00:00
|
|
|
var merr multierror.Error
|
2018-06-22 00:35:07 +00:00
|
|
|
for _, hook := range tr.runnerHooks {
|
2018-07-17 00:19:56 +00:00
|
|
|
post, ok := hook.(interfaces.TaskStopHook)
|
2018-06-22 00:35:07 +00:00
|
|
|
if !ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
name := post.Name()
|
|
|
|
var start time.Time
|
|
|
|
if tr.logger.IsTrace() {
|
|
|
|
start = time.Now()
|
2018-07-17 00:19:56 +00:00
|
|
|
tr.logger.Trace("running stop hook", "name", name, "start", start)
|
2018-06-22 00:35:07 +00:00
|
|
|
}
|
|
|
|
|
2018-07-17 00:19:56 +00:00
|
|
|
req := interfaces.TaskStopRequest{}
|
2019-02-21 23:37:22 +00:00
|
|
|
|
|
|
|
origHookState := tr.hookState(name)
|
|
|
|
if origHookState != nil {
|
|
|
|
// Give the hook data provided by prestart
|
|
|
|
req.ExistingState = origHookState.Data
|
|
|
|
}
|
|
|
|
|
2018-07-17 00:19:56 +00:00
|
|
|
var resp interfaces.TaskStopResponse
|
2018-10-18 20:39:02 +00:00
|
|
|
if err := post.Stop(tr.killCtx, &req, &resp); err != nil {
|
2018-12-13 17:20:18 +00:00
|
|
|
tr.emitHookError(err, name)
|
2018-07-20 00:40:25 +00:00
|
|
|
merr.Errors = append(merr.Errors, fmt.Errorf("stop hook %q failed: %v", name, err))
|
2018-06-22 00:35:07 +00:00
|
|
|
}
|
|
|
|
|
2019-02-21 23:37:22 +00:00
|
|
|
// Stop hooks cannot alter state and must be idempotent, so
|
|
|
|
// unlike prestart there's no state to persist here.
|
2018-08-08 00:38:40 +00:00
|
|
|
|
2018-06-22 00:35:07 +00:00
|
|
|
if tr.logger.IsTrace() {
|
|
|
|
end := time.Now()
|
2018-10-12 01:01:42 +00:00
|
|
|
tr.logger.Trace("finished stop hook", "name", name, "end", end, "duration", end.Sub(start))
|
2018-06-22 00:35:07 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-20 00:40:25 +00:00
|
|
|
return merr.ErrorOrNil()
|
2018-06-22 00:35:07 +00:00
|
|
|
}
|
|
|
|
|
2018-08-01 18:03:52 +00:00
|
|
|
// update is used to run the runners update hooks. Should only be called from
|
|
|
|
// Run(). To trigger an update, update state on the TaskRunner and call
|
|
|
|
// triggerUpdateHooks.
|
2018-07-12 23:15:33 +00:00
|
|
|
func (tr *TaskRunner) updateHooks() {
|
|
|
|
if tr.logger.IsTrace() {
|
|
|
|
start := time.Now()
|
|
|
|
tr.logger.Trace("running update hooks", "start", start)
|
|
|
|
defer func() {
|
|
|
|
end := time.Now()
|
|
|
|
tr.logger.Trace("finished update hooks", "end", end, "duration", end.Sub(start))
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
2018-07-20 00:40:25 +00:00
|
|
|
// Prepare state needed by Update hooks
|
|
|
|
alloc := tr.Alloc()
|
|
|
|
|
|
|
|
// Execute Update hooks
|
2018-07-12 23:15:33 +00:00
|
|
|
for _, hook := range tr.runnerHooks {
|
|
|
|
upd, ok := hook.(interfaces.TaskUpdateHook)
|
|
|
|
if !ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
name := upd.Name()
|
|
|
|
|
|
|
|
// Build the request
|
|
|
|
req := interfaces.TaskUpdateRequest{
|
|
|
|
VaultToken: tr.getVaultToken(),
|
2018-07-20 00:40:25 +00:00
|
|
|
Alloc: alloc,
|
|
|
|
TaskEnv: tr.envBuilder.Build(),
|
2018-07-12 23:15:33 +00:00
|
|
|
}
|
|
|
|
|
2018-07-17 00:19:56 +00:00
|
|
|
// Time the update hook
|
2018-07-12 23:15:33 +00:00
|
|
|
var start time.Time
|
|
|
|
if tr.logger.IsTrace() {
|
|
|
|
start = time.Now()
|
|
|
|
tr.logger.Trace("running update hook", "name", name, "start", start)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Run the update hook
|
|
|
|
var resp interfaces.TaskUpdateResponse
|
2018-10-18 20:39:02 +00:00
|
|
|
if err := upd.Update(tr.killCtx, &req, &resp); err != nil {
|
2018-12-13 17:20:18 +00:00
|
|
|
tr.emitHookError(err, name)
|
2018-07-12 23:15:33 +00:00
|
|
|
tr.logger.Error("update hook failed", "name", name, "error", err)
|
|
|
|
}
|
|
|
|
|
2018-08-08 00:38:40 +00:00
|
|
|
// No need to persist as TaskUpdateResponse is currently empty
|
|
|
|
|
2018-07-12 23:15:33 +00:00
|
|
|
if tr.logger.IsTrace() {
|
|
|
|
end := time.Now()
|
|
|
|
tr.logger.Trace("finished update hooks", "name", name, "end", end, "duration", end.Sub(start))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-17 21:27:15 +00:00
|
|
|
// preKill is used to run the runners preKill hooks
|
|
|
|
// preKill hooks contain logic that must be executed before
|
|
|
|
// a task is killed or restarted
|
|
|
|
func (tr *TaskRunner) preKill() {
|
2018-07-16 21:37:27 +00:00
|
|
|
if tr.logger.IsTrace() {
|
|
|
|
start := time.Now()
|
2019-01-22 15:45:58 +00:00
|
|
|
tr.logger.Trace("running pre kill hooks", "start", start)
|
2018-07-16 21:37:27 +00:00
|
|
|
defer func() {
|
|
|
|
end := time.Now()
|
2019-01-22 15:45:58 +00:00
|
|
|
tr.logger.Trace("finished pre kill hooks", "end", end, "duration", end.Sub(start))
|
2018-07-16 21:37:27 +00:00
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, hook := range tr.runnerHooks {
|
2019-01-17 21:27:15 +00:00
|
|
|
killHook, ok := hook.(interfaces.TaskPreKillHook)
|
2018-07-16 21:37:27 +00:00
|
|
|
if !ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2018-11-14 18:29:07 +00:00
|
|
|
name := killHook.Name()
|
2018-07-16 21:37:27 +00:00
|
|
|
|
2019-01-17 21:27:15 +00:00
|
|
|
// Time the pre kill hook
|
2018-07-16 21:37:27 +00:00
|
|
|
var start time.Time
|
|
|
|
if tr.logger.IsTrace() {
|
|
|
|
start = time.Now()
|
2019-02-12 23:28:13 +00:00
|
|
|
tr.logger.Trace("running prekill hook", "name", name, "start", start)
|
2018-07-16 21:37:27 +00:00
|
|
|
}
|
|
|
|
|
2019-01-17 21:27:15 +00:00
|
|
|
// Run the pre kill hook
|
2019-01-22 15:54:02 +00:00
|
|
|
req := interfaces.TaskPreKillRequest{}
|
|
|
|
var resp interfaces.TaskPreKillResponse
|
2019-01-17 21:27:15 +00:00
|
|
|
if err := killHook.PreKilling(context.Background(), &req, &resp); err != nil {
|
2018-12-13 17:20:18 +00:00
|
|
|
tr.emitHookError(err, name)
|
2019-02-12 23:28:13 +00:00
|
|
|
tr.logger.Error("prekill hook failed", "name", name, "error", err)
|
2018-07-16 21:37:27 +00:00
|
|
|
}
|
|
|
|
|
2018-08-08 00:38:40 +00:00
|
|
|
// No need to persist as TaskKillResponse is currently empty
|
|
|
|
|
2018-07-16 21:37:27 +00:00
|
|
|
if tr.logger.IsTrace() {
|
|
|
|
end := time.Now()
|
2019-02-12 23:28:13 +00:00
|
|
|
tr.logger.Trace("finished prekill hook", "name", name, "end", end, "duration", end.Sub(start))
|
2018-07-16 21:37:27 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-11-14 18:29:07 +00:00
|
|
|
|
|
|
|
// shutdownHooks is called when the TaskRunner is gracefully shutdown but the
|
|
|
|
// task is not being stopped or garbage collected.
|
|
|
|
func (tr *TaskRunner) shutdownHooks() {
|
|
|
|
for _, hook := range tr.runnerHooks {
|
|
|
|
sh, ok := hook.(interfaces.ShutdownHook)
|
|
|
|
if !ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
name := sh.Name()
|
|
|
|
|
|
|
|
// Time the update hook
|
|
|
|
var start time.Time
|
|
|
|
if tr.logger.IsTrace() {
|
|
|
|
start = time.Now()
|
|
|
|
tr.logger.Trace("running shutdown hook", "name", name, "start", start)
|
|
|
|
}
|
|
|
|
|
|
|
|
sh.Shutdown()
|
|
|
|
|
|
|
|
if tr.logger.IsTrace() {
|
|
|
|
end := time.Now()
|
|
|
|
tr.logger.Trace("finished shutdown hook", "name", name, "end", end, "duration", end.Sub(start))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|