2023-04-10 15:36:59 +00:00
|
|
|
// Copyright (c) HashiCorp, Inc.
|
|
|
|
// SPDX-License-Identifier: MPL-2.0
|
|
|
|
|
2022-08-22 22:38:49 +00:00
|
|
|
package tasklifecycle
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"sync"
|
|
|
|
|
|
|
|
"github.com/hashicorp/go-hclog"
|
|
|
|
"github.com/hashicorp/nomad/nomad/structs"
|
|
|
|
)
|
|
|
|
|
|
|
|
// coordinatorState represents a state of the task lifecycle Coordinator FSM.
|
|
|
|
type coordinatorState uint8
|
|
|
|
|
|
|
|
const (
|
|
|
|
coordinatorStateInit coordinatorState = iota
|
|
|
|
coordinatorStatePrestart
|
|
|
|
coordinatorStateMain
|
|
|
|
coordinatorStatePoststart
|
Task lifecycle restart (#14127)
* allocrunner: handle lifecycle when all tasks die
When all tasks die the Coordinator must transition to its terminal
state, coordinatorStatePoststop, to unblock poststop tasks. Since this
could happen at any time (for example, a prestart task dies), all states
must be able to transition to this terminal state.
* allocrunner: implement different alloc restarts
Add a new alloc restart mode where all tasks are restarted, even if they
have already exited. Also unifies the alloc restart logic to use the
implementation that restarts tasks concurrently and ignores
ErrTaskNotRunning errors since those are expected when restarting the
allocation.
* allocrunner: allow tasks to run again
Prevent the task runner Run() method from exiting to allow a dead task
to run again. When the task runner is signaled to restart, the function
will jump back to the MAIN loop and run it again.
The task runner determines if a task needs to run again based on two new
task events that were added to differentiate between a request to
restart a specific task, the tasks that are currently running, or all
tasks that have already run.
* api/cli: add support for all tasks alloc restart
Implement the new -all-tasks alloc restart CLI flag and its API
counterpar, AllTasks. The client endpoint calls the appropriate restart
method from the allocrunner depending on the restart parameters used.
* test: fix tasklifecycle Coordinator test
* allocrunner: kill taskrunners if all tasks are dead
When all non-poststop tasks are dead we need to kill the taskrunners so
we don't leak their goroutines, which are blocked in the alloc restart
loop. This also ensures the allocrunner exits on its own.
* taskrunner: fix tests that waited on WaitCh
Now that "dead" tasks may run again, the taskrunner Run() method will
not return when the task finishes running, so tests must wait for the
task state to be "dead" instead of using the WaitCh, since it won't be
closed until the taskrunner is killed.
* tests: add tests for all tasks alloc restart
* changelog: add entry for #14127
* taskrunner: fix restore logic.
The first implementation of the task runner restore process relied on
server data (`tr.Alloc().TerminalStatus()`) which may not be available
to the client at the time of restore.
It also had the incorrect code path. When restoring a dead task the
driver handle always needs to be clear cleanly using `clearDriverHandle`
otherwise, after exiting the MAIN loop, the task may be killed by
`tr.handleKill`.
The fix is to store the state of the Run() loop in the task runner local
client state: if the task runner ever exits this loop cleanly (not with
a shutdown) it will never be able to run again. So if the Run() loops
starts with this local state flag set, it must exit early.
This local state flag is also being checked on task restart requests. If
the task is "dead" and its Run() loop is not active it will never be
able to run again.
* address code review requests
* apply more code review changes
* taskrunner: add different Restart modes
Using the task event to differentiate between the allocrunner restart
methods proved to be confusing for developers to understand how it all
worked.
So instead of relying on the event type, this commit separated the logic
of restarting an taskRunner into two methods:
- `Restart` will retain the current behaviour and only will only restart
the task if it's currently running.
- `ForceRestart` is the new method where a `dead` task is allowed to
restart if its `Run()` method is still active. Callers will need to
restart the allocRunner taskCoordinator to make sure it will allow the
task to run again.
* minor fixes
2022-08-24 21:43:07 +00:00
|
|
|
coordinatorStateWaitAlloc
|
2022-08-22 22:38:49 +00:00
|
|
|
coordinatorStatePoststop
|
|
|
|
)
|
|
|
|
|
|
|
|
func (s coordinatorState) String() string {
|
|
|
|
switch s {
|
|
|
|
case coordinatorStateInit:
|
|
|
|
return "init"
|
|
|
|
case coordinatorStatePrestart:
|
|
|
|
return "prestart"
|
|
|
|
case coordinatorStateMain:
|
|
|
|
return "main"
|
|
|
|
case coordinatorStatePoststart:
|
|
|
|
return "poststart"
|
Task lifecycle restart (#14127)
* allocrunner: handle lifecycle when all tasks die
When all tasks die the Coordinator must transition to its terminal
state, coordinatorStatePoststop, to unblock poststop tasks. Since this
could happen at any time (for example, a prestart task dies), all states
must be able to transition to this terminal state.
* allocrunner: implement different alloc restarts
Add a new alloc restart mode where all tasks are restarted, even if they
have already exited. Also unifies the alloc restart logic to use the
implementation that restarts tasks concurrently and ignores
ErrTaskNotRunning errors since those are expected when restarting the
allocation.
* allocrunner: allow tasks to run again
Prevent the task runner Run() method from exiting to allow a dead task
to run again. When the task runner is signaled to restart, the function
will jump back to the MAIN loop and run it again.
The task runner determines if a task needs to run again based on two new
task events that were added to differentiate between a request to
restart a specific task, the tasks that are currently running, or all
tasks that have already run.
* api/cli: add support for all tasks alloc restart
Implement the new -all-tasks alloc restart CLI flag and its API
counterpar, AllTasks. The client endpoint calls the appropriate restart
method from the allocrunner depending on the restart parameters used.
* test: fix tasklifecycle Coordinator test
* allocrunner: kill taskrunners if all tasks are dead
When all non-poststop tasks are dead we need to kill the taskrunners so
we don't leak their goroutines, which are blocked in the alloc restart
loop. This also ensures the allocrunner exits on its own.
* taskrunner: fix tests that waited on WaitCh
Now that "dead" tasks may run again, the taskrunner Run() method will
not return when the task finishes running, so tests must wait for the
task state to be "dead" instead of using the WaitCh, since it won't be
closed until the taskrunner is killed.
* tests: add tests for all tasks alloc restart
* changelog: add entry for #14127
* taskrunner: fix restore logic.
The first implementation of the task runner restore process relied on
server data (`tr.Alloc().TerminalStatus()`) which may not be available
to the client at the time of restore.
It also had the incorrect code path. When restoring a dead task the
driver handle always needs to be clear cleanly using `clearDriverHandle`
otherwise, after exiting the MAIN loop, the task may be killed by
`tr.handleKill`.
The fix is to store the state of the Run() loop in the task runner local
client state: if the task runner ever exits this loop cleanly (not with
a shutdown) it will never be able to run again. So if the Run() loops
starts with this local state flag set, it must exit early.
This local state flag is also being checked on task restart requests. If
the task is "dead" and its Run() loop is not active it will never be
able to run again.
* address code review requests
* apply more code review changes
* taskrunner: add different Restart modes
Using the task event to differentiate between the allocrunner restart
methods proved to be confusing for developers to understand how it all
worked.
So instead of relying on the event type, this commit separated the logic
of restarting an taskRunner into two methods:
- `Restart` will retain the current behaviour and only will only restart
the task if it's currently running.
- `ForceRestart` is the new method where a `dead` task is allowed to
restart if its `Run()` method is still active. Callers will need to
restart the allocRunner taskCoordinator to make sure it will allow the
task to run again.
* minor fixes
2022-08-24 21:43:07 +00:00
|
|
|
case coordinatorStateWaitAlloc:
|
|
|
|
return "wait_alloc"
|
2022-08-22 22:38:49 +00:00
|
|
|
case coordinatorStatePoststop:
|
|
|
|
return "poststart"
|
|
|
|
}
|
|
|
|
panic(fmt.Sprintf("Unexpected task coordinator state %d", s))
|
|
|
|
}
|
|
|
|
|
|
|
|
// lifecycleStage represents a lifecycle configuration used for task
|
|
|
|
// coordination.
|
|
|
|
//
|
|
|
|
// Not all possible combinations of hook X sidecar are defined, only the ones
|
|
|
|
// that are relevant for coordinating task initialization order. For example, a
|
|
|
|
// main task with sidecar set to `true` starts at the same time as a
|
|
|
|
// non-sidecar main task, so there is no need to treat them differently.
|
|
|
|
type lifecycleStage uint8
|
|
|
|
|
|
|
|
const (
|
|
|
|
// lifecycleStagePrestartEphemeral are tasks with the "prestart" hook and
|
|
|
|
// sidecar set to "false".
|
|
|
|
lifecycleStagePrestartEphemeral lifecycleStage = iota
|
|
|
|
|
|
|
|
// lifecycleStagePrestartSidecar are tasks with the "prestart" hook and
|
|
|
|
// sidecar set to "true".
|
|
|
|
lifecycleStagePrestartSidecar
|
|
|
|
|
|
|
|
// lifecycleStageMain are tasks without a lifecycle or a lifecycle with an
|
|
|
|
// empty hook value.
|
|
|
|
lifecycleStageMain
|
|
|
|
|
|
|
|
// lifecycleStagePoststartEphemeral are tasks with the "poststart" hook and
|
|
|
|
// sidecar set to "false"
|
|
|
|
lifecycleStagePoststartEphemeral
|
|
|
|
|
|
|
|
// lifecycleStagePoststartSidecar are tasks with the "poststart" hook and
|
|
|
|
// sidecar set to "true".
|
|
|
|
lifecycleStagePoststartSidecar
|
|
|
|
|
|
|
|
// lifecycleStagePoststop are tasks with the "poststop" hook.
|
|
|
|
lifecycleStagePoststop
|
|
|
|
)
|
|
|
|
|
|
|
|
// Coordinator controls when tasks with a given lifecycle configuration are
|
|
|
|
// allowed to start and run.
|
|
|
|
//
|
|
|
|
// It behaves like a finite state machine where each state transition blocks or
|
|
|
|
// allows some task lifecycle types to run.
|
|
|
|
type Coordinator struct {
|
|
|
|
logger hclog.Logger
|
|
|
|
|
|
|
|
// tasksByLifecycle is an index used to group and quickly access tasks by
|
|
|
|
// their lifecycle stage.
|
|
|
|
tasksByLifecycle map[lifecycleStage][]string
|
|
|
|
|
|
|
|
// currentState is the current state of the FSM. It must only be accessed
|
|
|
|
// while holding the lock.
|
|
|
|
currentState coordinatorState
|
|
|
|
currentStateLock sync.RWMutex
|
|
|
|
|
|
|
|
// gates store the gates that control each task lifecycle stage.
|
|
|
|
gates map[lifecycleStage]*Gate
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewCoordinator returns a new Coordinator with all tasks initially blocked.
|
|
|
|
func NewCoordinator(logger hclog.Logger, tasks []*structs.Task, shutdownCh <-chan struct{}) *Coordinator {
|
|
|
|
c := &Coordinator{
|
|
|
|
logger: logger.Named("task_coordinator"),
|
|
|
|
tasksByLifecycle: indexTasksByLifecycle(tasks),
|
|
|
|
gates: make(map[lifecycleStage]*Gate),
|
|
|
|
}
|
|
|
|
|
|
|
|
for lifecycle := range c.tasksByLifecycle {
|
|
|
|
c.gates[lifecycle] = NewGate(shutdownCh)
|
|
|
|
}
|
|
|
|
|
|
|
|
c.enterStateLocked(coordinatorStateInit)
|
|
|
|
return c
|
|
|
|
}
|
|
|
|
|
Task lifecycle restart (#14127)
* allocrunner: handle lifecycle when all tasks die
When all tasks die the Coordinator must transition to its terminal
state, coordinatorStatePoststop, to unblock poststop tasks. Since this
could happen at any time (for example, a prestart task dies), all states
must be able to transition to this terminal state.
* allocrunner: implement different alloc restarts
Add a new alloc restart mode where all tasks are restarted, even if they
have already exited. Also unifies the alloc restart logic to use the
implementation that restarts tasks concurrently and ignores
ErrTaskNotRunning errors since those are expected when restarting the
allocation.
* allocrunner: allow tasks to run again
Prevent the task runner Run() method from exiting to allow a dead task
to run again. When the task runner is signaled to restart, the function
will jump back to the MAIN loop and run it again.
The task runner determines if a task needs to run again based on two new
task events that were added to differentiate between a request to
restart a specific task, the tasks that are currently running, or all
tasks that have already run.
* api/cli: add support for all tasks alloc restart
Implement the new -all-tasks alloc restart CLI flag and its API
counterpar, AllTasks. The client endpoint calls the appropriate restart
method from the allocrunner depending on the restart parameters used.
* test: fix tasklifecycle Coordinator test
* allocrunner: kill taskrunners if all tasks are dead
When all non-poststop tasks are dead we need to kill the taskrunners so
we don't leak their goroutines, which are blocked in the alloc restart
loop. This also ensures the allocrunner exits on its own.
* taskrunner: fix tests that waited on WaitCh
Now that "dead" tasks may run again, the taskrunner Run() method will
not return when the task finishes running, so tests must wait for the
task state to be "dead" instead of using the WaitCh, since it won't be
closed until the taskrunner is killed.
* tests: add tests for all tasks alloc restart
* changelog: add entry for #14127
* taskrunner: fix restore logic.
The first implementation of the task runner restore process relied on
server data (`tr.Alloc().TerminalStatus()`) which may not be available
to the client at the time of restore.
It also had the incorrect code path. When restoring a dead task the
driver handle always needs to be clear cleanly using `clearDriverHandle`
otherwise, after exiting the MAIN loop, the task may be killed by
`tr.handleKill`.
The fix is to store the state of the Run() loop in the task runner local
client state: if the task runner ever exits this loop cleanly (not with
a shutdown) it will never be able to run again. So if the Run() loops
starts with this local state flag set, it must exit early.
This local state flag is also being checked on task restart requests. If
the task is "dead" and its Run() loop is not active it will never be
able to run again.
* address code review requests
* apply more code review changes
* taskrunner: add different Restart modes
Using the task event to differentiate between the allocrunner restart
methods proved to be confusing for developers to understand how it all
worked.
So instead of relying on the event type, this commit separated the logic
of restarting an taskRunner into two methods:
- `Restart` will retain the current behaviour and only will only restart
the task if it's currently running.
- `ForceRestart` is the new method where a `dead` task is allowed to
restart if its `Run()` method is still active. Callers will need to
restart the allocRunner taskCoordinator to make sure it will allow the
task to run again.
* minor fixes
2022-08-24 21:43:07 +00:00
|
|
|
// Restart sets the Coordinator state back to "init" and is used to coordinate
|
|
|
|
// a full alloc restart. Since all tasks will run again they need to be pending
|
|
|
|
// before they are allowed to proceed.
|
|
|
|
func (c *Coordinator) Restart() {
|
|
|
|
c.currentStateLock.Lock()
|
|
|
|
defer c.currentStateLock.Unlock()
|
|
|
|
c.enterStateLocked(coordinatorStateInit)
|
|
|
|
}
|
|
|
|
|
2022-08-22 22:38:49 +00:00
|
|
|
// Restore is used to set the Coordinator FSM to the correct state when an
|
|
|
|
// alloc is restored. Must be called before the allocrunner is running.
|
|
|
|
func (c *Coordinator) Restore(states map[string]*structs.TaskState) {
|
|
|
|
// Skip the "init" state when restoring since the tasks were likely already
|
|
|
|
// running, causing the Coordinator to be stuck waiting for them to be
|
|
|
|
// "pending".
|
|
|
|
c.enterStateLocked(coordinatorStatePrestart)
|
|
|
|
c.TaskStateUpdated(states)
|
|
|
|
}
|
|
|
|
|
|
|
|
// StartConditionForTask returns a channel that is unblocked when the task is
|
|
|
|
// allowed to run.
|
|
|
|
func (c *Coordinator) StartConditionForTask(task *structs.Task) <-chan struct{} {
|
|
|
|
lifecycle := taskLifecycleStage(task)
|
|
|
|
return c.gates[lifecycle].WaitCh()
|
|
|
|
}
|
|
|
|
|
|
|
|
// TaskStateUpdated notifies that a task state has changed. This may cause the
|
|
|
|
// Coordinator to transition to another state.
|
|
|
|
func (c *Coordinator) TaskStateUpdated(states map[string]*structs.TaskState) {
|
|
|
|
c.currentStateLock.Lock()
|
|
|
|
defer c.currentStateLock.Unlock()
|
|
|
|
|
|
|
|
// We may be able to move directly through some states (for example, when
|
|
|
|
// an alloc doesn't have any prestart task we can skip the prestart state),
|
|
|
|
// so loop until we stabilize.
|
|
|
|
// This is also important when restoring an alloc since we need to find the
|
|
|
|
// state where FSM was last positioned.
|
|
|
|
for {
|
|
|
|
nextState := c.nextStateLocked(states)
|
|
|
|
if nextState == c.currentState {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
c.enterStateLocked(nextState)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// nextStateLocked returns the state the FSM should transition to given its
|
|
|
|
// current internal state and the received states of the tasks.
|
|
|
|
// The currentStateLock must be held before calling this method.
|
|
|
|
func (c *Coordinator) nextStateLocked(states map[string]*structs.TaskState) coordinatorState {
|
Task lifecycle restart (#14127)
* allocrunner: handle lifecycle when all tasks die
When all tasks die the Coordinator must transition to its terminal
state, coordinatorStatePoststop, to unblock poststop tasks. Since this
could happen at any time (for example, a prestart task dies), all states
must be able to transition to this terminal state.
* allocrunner: implement different alloc restarts
Add a new alloc restart mode where all tasks are restarted, even if they
have already exited. Also unifies the alloc restart logic to use the
implementation that restarts tasks concurrently and ignores
ErrTaskNotRunning errors since those are expected when restarting the
allocation.
* allocrunner: allow tasks to run again
Prevent the task runner Run() method from exiting to allow a dead task
to run again. When the task runner is signaled to restart, the function
will jump back to the MAIN loop and run it again.
The task runner determines if a task needs to run again based on two new
task events that were added to differentiate between a request to
restart a specific task, the tasks that are currently running, or all
tasks that have already run.
* api/cli: add support for all tasks alloc restart
Implement the new -all-tasks alloc restart CLI flag and its API
counterpar, AllTasks. The client endpoint calls the appropriate restart
method from the allocrunner depending on the restart parameters used.
* test: fix tasklifecycle Coordinator test
* allocrunner: kill taskrunners if all tasks are dead
When all non-poststop tasks are dead we need to kill the taskrunners so
we don't leak their goroutines, which are blocked in the alloc restart
loop. This also ensures the allocrunner exits on its own.
* taskrunner: fix tests that waited on WaitCh
Now that "dead" tasks may run again, the taskrunner Run() method will
not return when the task finishes running, so tests must wait for the
task state to be "dead" instead of using the WaitCh, since it won't be
closed until the taskrunner is killed.
* tests: add tests for all tasks alloc restart
* changelog: add entry for #14127
* taskrunner: fix restore logic.
The first implementation of the task runner restore process relied on
server data (`tr.Alloc().TerminalStatus()`) which may not be available
to the client at the time of restore.
It also had the incorrect code path. When restoring a dead task the
driver handle always needs to be clear cleanly using `clearDriverHandle`
otherwise, after exiting the MAIN loop, the task may be killed by
`tr.handleKill`.
The fix is to store the state of the Run() loop in the task runner local
client state: if the task runner ever exits this loop cleanly (not with
a shutdown) it will never be able to run again. So if the Run() loops
starts with this local state flag set, it must exit early.
This local state flag is also being checked on task restart requests. If
the task is "dead" and its Run() loop is not active it will never be
able to run again.
* address code review requests
* apply more code review changes
* taskrunner: add different Restart modes
Using the task event to differentiate between the allocrunner restart
methods proved to be confusing for developers to understand how it all
worked.
So instead of relying on the event type, this commit separated the logic
of restarting an taskRunner into two methods:
- `Restart` will retain the current behaviour and only will only restart
the task if it's currently running.
- `ForceRestart` is the new method where a `dead` task is allowed to
restart if its `Run()` method is still active. Callers will need to
restart the allocRunner taskCoordinator to make sure it will allow the
task to run again.
* minor fixes
2022-08-24 21:43:07 +00:00
|
|
|
|
|
|
|
// coordinatorStatePoststop is the terminal state of the FSM, and can be
|
|
|
|
// reached at any time.
|
|
|
|
if c.isAllocDone(states) {
|
|
|
|
return coordinatorStatePoststop
|
|
|
|
}
|
|
|
|
|
2022-08-22 22:38:49 +00:00
|
|
|
switch c.currentState {
|
|
|
|
case coordinatorStateInit:
|
|
|
|
if !c.isInitDone(states) {
|
|
|
|
return coordinatorStateInit
|
|
|
|
}
|
|
|
|
return coordinatorStatePrestart
|
|
|
|
|
|
|
|
case coordinatorStatePrestart:
|
|
|
|
if !c.isPrestartDone(states) {
|
|
|
|
return coordinatorStatePrestart
|
|
|
|
}
|
|
|
|
return coordinatorStateMain
|
|
|
|
|
|
|
|
case coordinatorStateMain:
|
|
|
|
if !c.isMainDone(states) {
|
|
|
|
return coordinatorStateMain
|
|
|
|
}
|
|
|
|
return coordinatorStatePoststart
|
|
|
|
|
|
|
|
case coordinatorStatePoststart:
|
|
|
|
if !c.isPoststartDone(states) {
|
|
|
|
return coordinatorStatePoststart
|
|
|
|
}
|
Task lifecycle restart (#14127)
* allocrunner: handle lifecycle when all tasks die
When all tasks die the Coordinator must transition to its terminal
state, coordinatorStatePoststop, to unblock poststop tasks. Since this
could happen at any time (for example, a prestart task dies), all states
must be able to transition to this terminal state.
* allocrunner: implement different alloc restarts
Add a new alloc restart mode where all tasks are restarted, even if they
have already exited. Also unifies the alloc restart logic to use the
implementation that restarts tasks concurrently and ignores
ErrTaskNotRunning errors since those are expected when restarting the
allocation.
* allocrunner: allow tasks to run again
Prevent the task runner Run() method from exiting to allow a dead task
to run again. When the task runner is signaled to restart, the function
will jump back to the MAIN loop and run it again.
The task runner determines if a task needs to run again based on two new
task events that were added to differentiate between a request to
restart a specific task, the tasks that are currently running, or all
tasks that have already run.
* api/cli: add support for all tasks alloc restart
Implement the new -all-tasks alloc restart CLI flag and its API
counterpar, AllTasks. The client endpoint calls the appropriate restart
method from the allocrunner depending on the restart parameters used.
* test: fix tasklifecycle Coordinator test
* allocrunner: kill taskrunners if all tasks are dead
When all non-poststop tasks are dead we need to kill the taskrunners so
we don't leak their goroutines, which are blocked in the alloc restart
loop. This also ensures the allocrunner exits on its own.
* taskrunner: fix tests that waited on WaitCh
Now that "dead" tasks may run again, the taskrunner Run() method will
not return when the task finishes running, so tests must wait for the
task state to be "dead" instead of using the WaitCh, since it won't be
closed until the taskrunner is killed.
* tests: add tests for all tasks alloc restart
* changelog: add entry for #14127
* taskrunner: fix restore logic.
The first implementation of the task runner restore process relied on
server data (`tr.Alloc().TerminalStatus()`) which may not be available
to the client at the time of restore.
It also had the incorrect code path. When restoring a dead task the
driver handle always needs to be clear cleanly using `clearDriverHandle`
otherwise, after exiting the MAIN loop, the task may be killed by
`tr.handleKill`.
The fix is to store the state of the Run() loop in the task runner local
client state: if the task runner ever exits this loop cleanly (not with
a shutdown) it will never be able to run again. So if the Run() loops
starts with this local state flag set, it must exit early.
This local state flag is also being checked on task restart requests. If
the task is "dead" and its Run() loop is not active it will never be
able to run again.
* address code review requests
* apply more code review changes
* taskrunner: add different Restart modes
Using the task event to differentiate between the allocrunner restart
methods proved to be confusing for developers to understand how it all
worked.
So instead of relying on the event type, this commit separated the logic
of restarting an taskRunner into two methods:
- `Restart` will retain the current behaviour and only will only restart
the task if it's currently running.
- `ForceRestart` is the new method where a `dead` task is allowed to
restart if its `Run()` method is still active. Callers will need to
restart the allocRunner taskCoordinator to make sure it will allow the
task to run again.
* minor fixes
2022-08-24 21:43:07 +00:00
|
|
|
return coordinatorStateWaitAlloc
|
2022-08-22 22:38:49 +00:00
|
|
|
|
Task lifecycle restart (#14127)
* allocrunner: handle lifecycle when all tasks die
When all tasks die the Coordinator must transition to its terminal
state, coordinatorStatePoststop, to unblock poststop tasks. Since this
could happen at any time (for example, a prestart task dies), all states
must be able to transition to this terminal state.
* allocrunner: implement different alloc restarts
Add a new alloc restart mode where all tasks are restarted, even if they
have already exited. Also unifies the alloc restart logic to use the
implementation that restarts tasks concurrently and ignores
ErrTaskNotRunning errors since those are expected when restarting the
allocation.
* allocrunner: allow tasks to run again
Prevent the task runner Run() method from exiting to allow a dead task
to run again. When the task runner is signaled to restart, the function
will jump back to the MAIN loop and run it again.
The task runner determines if a task needs to run again based on two new
task events that were added to differentiate between a request to
restart a specific task, the tasks that are currently running, or all
tasks that have already run.
* api/cli: add support for all tasks alloc restart
Implement the new -all-tasks alloc restart CLI flag and its API
counterpar, AllTasks. The client endpoint calls the appropriate restart
method from the allocrunner depending on the restart parameters used.
* test: fix tasklifecycle Coordinator test
* allocrunner: kill taskrunners if all tasks are dead
When all non-poststop tasks are dead we need to kill the taskrunners so
we don't leak their goroutines, which are blocked in the alloc restart
loop. This also ensures the allocrunner exits on its own.
* taskrunner: fix tests that waited on WaitCh
Now that "dead" tasks may run again, the taskrunner Run() method will
not return when the task finishes running, so tests must wait for the
task state to be "dead" instead of using the WaitCh, since it won't be
closed until the taskrunner is killed.
* tests: add tests for all tasks alloc restart
* changelog: add entry for #14127
* taskrunner: fix restore logic.
The first implementation of the task runner restore process relied on
server data (`tr.Alloc().TerminalStatus()`) which may not be available
to the client at the time of restore.
It also had the incorrect code path. When restoring a dead task the
driver handle always needs to be clear cleanly using `clearDriverHandle`
otherwise, after exiting the MAIN loop, the task may be killed by
`tr.handleKill`.
The fix is to store the state of the Run() loop in the task runner local
client state: if the task runner ever exits this loop cleanly (not with
a shutdown) it will never be able to run again. So if the Run() loops
starts with this local state flag set, it must exit early.
This local state flag is also being checked on task restart requests. If
the task is "dead" and its Run() loop is not active it will never be
able to run again.
* address code review requests
* apply more code review changes
* taskrunner: add different Restart modes
Using the task event to differentiate between the allocrunner restart
methods proved to be confusing for developers to understand how it all
worked.
So instead of relying on the event type, this commit separated the logic
of restarting an taskRunner into two methods:
- `Restart` will retain the current behaviour and only will only restart
the task if it's currently running.
- `ForceRestart` is the new method where a `dead` task is allowed to
restart if its `Run()` method is still active. Callers will need to
restart the allocRunner taskCoordinator to make sure it will allow the
task to run again.
* minor fixes
2022-08-24 21:43:07 +00:00
|
|
|
case coordinatorStateWaitAlloc:
|
|
|
|
if !c.isAllocDone(states) {
|
|
|
|
return coordinatorStateWaitAlloc
|
2022-08-22 22:38:49 +00:00
|
|
|
}
|
|
|
|
return coordinatorStatePoststop
|
|
|
|
|
|
|
|
case coordinatorStatePoststop:
|
|
|
|
return coordinatorStatePoststop
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the code reaches here it's a programming error, since the switch
|
|
|
|
// statement should cover all possible states and return the next state.
|
|
|
|
panic(fmt.Sprintf("unexpected state %s", c.currentState))
|
|
|
|
}
|
|
|
|
|
|
|
|
// enterStateLocked updates the current state of the Coordinator FSM and
|
|
|
|
// executes any action necessary for the state transition.
|
|
|
|
// The currentStateLock must be held before calling this method.
|
|
|
|
func (c *Coordinator) enterStateLocked(state coordinatorState) {
|
|
|
|
c.logger.Trace("state transition", "from", c.currentState, "to", state)
|
|
|
|
|
|
|
|
switch state {
|
|
|
|
case coordinatorStateInit:
|
|
|
|
c.block(lifecycleStagePrestartEphemeral)
|
|
|
|
c.block(lifecycleStagePrestartSidecar)
|
|
|
|
c.block(lifecycleStageMain)
|
|
|
|
c.block(lifecycleStagePoststartEphemeral)
|
|
|
|
c.block(lifecycleStagePoststartSidecar)
|
|
|
|
c.block(lifecycleStagePoststop)
|
|
|
|
|
|
|
|
case coordinatorStatePrestart:
|
|
|
|
c.block(lifecycleStageMain)
|
|
|
|
c.block(lifecycleStagePoststartEphemeral)
|
|
|
|
c.block(lifecycleStagePoststartSidecar)
|
|
|
|
c.block(lifecycleStagePoststop)
|
|
|
|
|
|
|
|
c.allow(lifecycleStagePrestartEphemeral)
|
|
|
|
c.allow(lifecycleStagePrestartSidecar)
|
|
|
|
|
|
|
|
case coordinatorStateMain:
|
|
|
|
c.block(lifecycleStagePrestartEphemeral)
|
|
|
|
c.block(lifecycleStagePoststartEphemeral)
|
|
|
|
c.block(lifecycleStagePoststartSidecar)
|
|
|
|
c.block(lifecycleStagePoststop)
|
|
|
|
|
|
|
|
c.allow(lifecycleStagePrestartSidecar)
|
|
|
|
c.allow(lifecycleStageMain)
|
|
|
|
|
|
|
|
case coordinatorStatePoststart:
|
|
|
|
c.block(lifecycleStagePrestartEphemeral)
|
|
|
|
c.block(lifecycleStagePoststop)
|
|
|
|
|
|
|
|
c.allow(lifecycleStagePrestartSidecar)
|
|
|
|
c.allow(lifecycleStageMain)
|
|
|
|
c.allow(lifecycleStagePoststartEphemeral)
|
|
|
|
c.allow(lifecycleStagePoststartSidecar)
|
|
|
|
|
Task lifecycle restart (#14127)
* allocrunner: handle lifecycle when all tasks die
When all tasks die the Coordinator must transition to its terminal
state, coordinatorStatePoststop, to unblock poststop tasks. Since this
could happen at any time (for example, a prestart task dies), all states
must be able to transition to this terminal state.
* allocrunner: implement different alloc restarts
Add a new alloc restart mode where all tasks are restarted, even if they
have already exited. Also unifies the alloc restart logic to use the
implementation that restarts tasks concurrently and ignores
ErrTaskNotRunning errors since those are expected when restarting the
allocation.
* allocrunner: allow tasks to run again
Prevent the task runner Run() method from exiting to allow a dead task
to run again. When the task runner is signaled to restart, the function
will jump back to the MAIN loop and run it again.
The task runner determines if a task needs to run again based on two new
task events that were added to differentiate between a request to
restart a specific task, the tasks that are currently running, or all
tasks that have already run.
* api/cli: add support for all tasks alloc restart
Implement the new -all-tasks alloc restart CLI flag and its API
counterpar, AllTasks. The client endpoint calls the appropriate restart
method from the allocrunner depending on the restart parameters used.
* test: fix tasklifecycle Coordinator test
* allocrunner: kill taskrunners if all tasks are dead
When all non-poststop tasks are dead we need to kill the taskrunners so
we don't leak their goroutines, which are blocked in the alloc restart
loop. This also ensures the allocrunner exits on its own.
* taskrunner: fix tests that waited on WaitCh
Now that "dead" tasks may run again, the taskrunner Run() method will
not return when the task finishes running, so tests must wait for the
task state to be "dead" instead of using the WaitCh, since it won't be
closed until the taskrunner is killed.
* tests: add tests for all tasks alloc restart
* changelog: add entry for #14127
* taskrunner: fix restore logic.
The first implementation of the task runner restore process relied on
server data (`tr.Alloc().TerminalStatus()`) which may not be available
to the client at the time of restore.
It also had the incorrect code path. When restoring a dead task the
driver handle always needs to be clear cleanly using `clearDriverHandle`
otherwise, after exiting the MAIN loop, the task may be killed by
`tr.handleKill`.
The fix is to store the state of the Run() loop in the task runner local
client state: if the task runner ever exits this loop cleanly (not with
a shutdown) it will never be able to run again. So if the Run() loops
starts with this local state flag set, it must exit early.
This local state flag is also being checked on task restart requests. If
the task is "dead" and its Run() loop is not active it will never be
able to run again.
* address code review requests
* apply more code review changes
* taskrunner: add different Restart modes
Using the task event to differentiate between the allocrunner restart
methods proved to be confusing for developers to understand how it all
worked.
So instead of relying on the event type, this commit separated the logic
of restarting an taskRunner into two methods:
- `Restart` will retain the current behaviour and only will only restart
the task if it's currently running.
- `ForceRestart` is the new method where a `dead` task is allowed to
restart if its `Run()` method is still active. Callers will need to
restart the allocRunner taskCoordinator to make sure it will allow the
task to run again.
* minor fixes
2022-08-24 21:43:07 +00:00
|
|
|
case coordinatorStateWaitAlloc:
|
2022-08-22 22:38:49 +00:00
|
|
|
c.block(lifecycleStagePrestartEphemeral)
|
|
|
|
c.block(lifecycleStagePoststartEphemeral)
|
|
|
|
c.block(lifecycleStagePoststop)
|
|
|
|
|
|
|
|
c.allow(lifecycleStagePrestartSidecar)
|
|
|
|
c.allow(lifecycleStageMain)
|
|
|
|
c.allow(lifecycleStagePoststartSidecar)
|
|
|
|
|
|
|
|
case coordinatorStatePoststop:
|
|
|
|
c.block(lifecycleStagePrestartEphemeral)
|
|
|
|
c.block(lifecycleStagePrestartSidecar)
|
|
|
|
c.block(lifecycleStageMain)
|
|
|
|
c.block(lifecycleStagePoststartEphemeral)
|
|
|
|
c.block(lifecycleStagePoststartSidecar)
|
|
|
|
|
|
|
|
c.allow(lifecycleStagePoststop)
|
|
|
|
}
|
|
|
|
|
|
|
|
c.currentState = state
|
|
|
|
}
|
|
|
|
|
|
|
|
// isInitDone returns true when the following conditions are met:
|
|
|
|
// - all tasks are in the "pending" state.
|
|
|
|
func (c *Coordinator) isInitDone(states map[string]*structs.TaskState) bool {
|
|
|
|
for _, task := range states {
|
|
|
|
if task.State != structs.TaskStatePending {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
// isPrestartDone returns true when the following conditions are met:
|
|
|
|
// - there is at least one prestart task
|
Task lifecycle restart (#14127)
* allocrunner: handle lifecycle when all tasks die
When all tasks die the Coordinator must transition to its terminal
state, coordinatorStatePoststop, to unblock poststop tasks. Since this
could happen at any time (for example, a prestart task dies), all states
must be able to transition to this terminal state.
* allocrunner: implement different alloc restarts
Add a new alloc restart mode where all tasks are restarted, even if they
have already exited. Also unifies the alloc restart logic to use the
implementation that restarts tasks concurrently and ignores
ErrTaskNotRunning errors since those are expected when restarting the
allocation.
* allocrunner: allow tasks to run again
Prevent the task runner Run() method from exiting to allow a dead task
to run again. When the task runner is signaled to restart, the function
will jump back to the MAIN loop and run it again.
The task runner determines if a task needs to run again based on two new
task events that were added to differentiate between a request to
restart a specific task, the tasks that are currently running, or all
tasks that have already run.
* api/cli: add support for all tasks alloc restart
Implement the new -all-tasks alloc restart CLI flag and its API
counterpar, AllTasks. The client endpoint calls the appropriate restart
method from the allocrunner depending on the restart parameters used.
* test: fix tasklifecycle Coordinator test
* allocrunner: kill taskrunners if all tasks are dead
When all non-poststop tasks are dead we need to kill the taskrunners so
we don't leak their goroutines, which are blocked in the alloc restart
loop. This also ensures the allocrunner exits on its own.
* taskrunner: fix tests that waited on WaitCh
Now that "dead" tasks may run again, the taskrunner Run() method will
not return when the task finishes running, so tests must wait for the
task state to be "dead" instead of using the WaitCh, since it won't be
closed until the taskrunner is killed.
* tests: add tests for all tasks alloc restart
* changelog: add entry for #14127
* taskrunner: fix restore logic.
The first implementation of the task runner restore process relied on
server data (`tr.Alloc().TerminalStatus()`) which may not be available
to the client at the time of restore.
It also had the incorrect code path. When restoring a dead task the
driver handle always needs to be clear cleanly using `clearDriverHandle`
otherwise, after exiting the MAIN loop, the task may be killed by
`tr.handleKill`.
The fix is to store the state of the Run() loop in the task runner local
client state: if the task runner ever exits this loop cleanly (not with
a shutdown) it will never be able to run again. So if the Run() loops
starts with this local state flag set, it must exit early.
This local state flag is also being checked on task restart requests. If
the task is "dead" and its Run() loop is not active it will never be
able to run again.
* address code review requests
* apply more code review changes
* taskrunner: add different Restart modes
Using the task event to differentiate between the allocrunner restart
methods proved to be confusing for developers to understand how it all
worked.
So instead of relying on the event type, this commit separated the logic
of restarting an taskRunner into two methods:
- `Restart` will retain the current behaviour and only will only restart
the task if it's currently running.
- `ForceRestart` is the new method where a `dead` task is allowed to
restart if its `Run()` method is still active. Callers will need to
restart the allocRunner taskCoordinator to make sure it will allow the
task to run again.
* minor fixes
2022-08-24 21:43:07 +00:00
|
|
|
// - all ephemeral prestart tasks are successful.
|
2022-08-22 22:38:49 +00:00
|
|
|
// - no ephemeral prestart task has failed.
|
|
|
|
// - all prestart sidecar tasks are running.
|
|
|
|
func (c *Coordinator) isPrestartDone(states map[string]*structs.TaskState) bool {
|
|
|
|
if !c.hasPrestart() {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, task := range c.tasksByLifecycle[lifecycleStagePrestartEphemeral] {
|
Task lifecycle restart (#14127)
* allocrunner: handle lifecycle when all tasks die
When all tasks die the Coordinator must transition to its terminal
state, coordinatorStatePoststop, to unblock poststop tasks. Since this
could happen at any time (for example, a prestart task dies), all states
must be able to transition to this terminal state.
* allocrunner: implement different alloc restarts
Add a new alloc restart mode where all tasks are restarted, even if they
have already exited. Also unifies the alloc restart logic to use the
implementation that restarts tasks concurrently and ignores
ErrTaskNotRunning errors since those are expected when restarting the
allocation.
* allocrunner: allow tasks to run again
Prevent the task runner Run() method from exiting to allow a dead task
to run again. When the task runner is signaled to restart, the function
will jump back to the MAIN loop and run it again.
The task runner determines if a task needs to run again based on two new
task events that were added to differentiate between a request to
restart a specific task, the tasks that are currently running, or all
tasks that have already run.
* api/cli: add support for all tasks alloc restart
Implement the new -all-tasks alloc restart CLI flag and its API
counterpar, AllTasks. The client endpoint calls the appropriate restart
method from the allocrunner depending on the restart parameters used.
* test: fix tasklifecycle Coordinator test
* allocrunner: kill taskrunners if all tasks are dead
When all non-poststop tasks are dead we need to kill the taskrunners so
we don't leak their goroutines, which are blocked in the alloc restart
loop. This also ensures the allocrunner exits on its own.
* taskrunner: fix tests that waited on WaitCh
Now that "dead" tasks may run again, the taskrunner Run() method will
not return when the task finishes running, so tests must wait for the
task state to be "dead" instead of using the WaitCh, since it won't be
closed until the taskrunner is killed.
* tests: add tests for all tasks alloc restart
* changelog: add entry for #14127
* taskrunner: fix restore logic.
The first implementation of the task runner restore process relied on
server data (`tr.Alloc().TerminalStatus()`) which may not be available
to the client at the time of restore.
It also had the incorrect code path. When restoring a dead task the
driver handle always needs to be clear cleanly using `clearDriverHandle`
otherwise, after exiting the MAIN loop, the task may be killed by
`tr.handleKill`.
The fix is to store the state of the Run() loop in the task runner local
client state: if the task runner ever exits this loop cleanly (not with
a shutdown) it will never be able to run again. So if the Run() loops
starts with this local state flag set, it must exit early.
This local state flag is also being checked on task restart requests. If
the task is "dead" and its Run() loop is not active it will never be
able to run again.
* address code review requests
* apply more code review changes
* taskrunner: add different Restart modes
Using the task event to differentiate between the allocrunner restart
methods proved to be confusing for developers to understand how it all
worked.
So instead of relying on the event type, this commit separated the logic
of restarting an taskRunner into two methods:
- `Restart` will retain the current behaviour and only will only restart
the task if it's currently running.
- `ForceRestart` is the new method where a `dead` task is allowed to
restart if its `Run()` method is still active. Callers will need to
restart the allocRunner taskCoordinator to make sure it will allow the
task to run again.
* minor fixes
2022-08-24 21:43:07 +00:00
|
|
|
if !states[task].Successful() {
|
2022-08-22 22:38:49 +00:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for _, task := range c.tasksByLifecycle[lifecycleStagePrestartSidecar] {
|
|
|
|
if states[task].State != structs.TaskStateRunning {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
// isMainDone returns true when the following conditions are met:
|
|
|
|
// - there is at least one main task.
|
|
|
|
// - all main tasks are no longer "pending".
|
|
|
|
func (c *Coordinator) isMainDone(states map[string]*structs.TaskState) bool {
|
|
|
|
if !c.hasMain() {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, task := range c.tasksByLifecycle[lifecycleStageMain] {
|
|
|
|
if states[task].State == structs.TaskStatePending {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
// isPoststartDone returns true when the following conditions are met:
|
|
|
|
// - there is at least one poststart task.
|
|
|
|
// - all ephemeral poststart tasks are in the "dead" state.
|
|
|
|
func (c *Coordinator) isPoststartDone(states map[string]*structs.TaskState) bool {
|
|
|
|
if !c.hasPoststart() {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, task := range c.tasksByLifecycle[lifecycleStagePoststartEphemeral] {
|
|
|
|
if states[task].State != structs.TaskStateDead {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
Task lifecycle restart (#14127)
* allocrunner: handle lifecycle when all tasks die
When all tasks die the Coordinator must transition to its terminal
state, coordinatorStatePoststop, to unblock poststop tasks. Since this
could happen at any time (for example, a prestart task dies), all states
must be able to transition to this terminal state.
* allocrunner: implement different alloc restarts
Add a new alloc restart mode where all tasks are restarted, even if they
have already exited. Also unifies the alloc restart logic to use the
implementation that restarts tasks concurrently and ignores
ErrTaskNotRunning errors since those are expected when restarting the
allocation.
* allocrunner: allow tasks to run again
Prevent the task runner Run() method from exiting to allow a dead task
to run again. When the task runner is signaled to restart, the function
will jump back to the MAIN loop and run it again.
The task runner determines if a task needs to run again based on two new
task events that were added to differentiate between a request to
restart a specific task, the tasks that are currently running, or all
tasks that have already run.
* api/cli: add support for all tasks alloc restart
Implement the new -all-tasks alloc restart CLI flag and its API
counterpar, AllTasks. The client endpoint calls the appropriate restart
method from the allocrunner depending on the restart parameters used.
* test: fix tasklifecycle Coordinator test
* allocrunner: kill taskrunners if all tasks are dead
When all non-poststop tasks are dead we need to kill the taskrunners so
we don't leak their goroutines, which are blocked in the alloc restart
loop. This also ensures the allocrunner exits on its own.
* taskrunner: fix tests that waited on WaitCh
Now that "dead" tasks may run again, the taskrunner Run() method will
not return when the task finishes running, so tests must wait for the
task state to be "dead" instead of using the WaitCh, since it won't be
closed until the taskrunner is killed.
* tests: add tests for all tasks alloc restart
* changelog: add entry for #14127
* taskrunner: fix restore logic.
The first implementation of the task runner restore process relied on
server data (`tr.Alloc().TerminalStatus()`) which may not be available
to the client at the time of restore.
It also had the incorrect code path. When restoring a dead task the
driver handle always needs to be clear cleanly using `clearDriverHandle`
otherwise, after exiting the MAIN loop, the task may be killed by
`tr.handleKill`.
The fix is to store the state of the Run() loop in the task runner local
client state: if the task runner ever exits this loop cleanly (not with
a shutdown) it will never be able to run again. So if the Run() loops
starts with this local state flag set, it must exit early.
This local state flag is also being checked on task restart requests. If
the task is "dead" and its Run() loop is not active it will never be
able to run again.
* address code review requests
* apply more code review changes
* taskrunner: add different Restart modes
Using the task event to differentiate between the allocrunner restart
methods proved to be confusing for developers to understand how it all
worked.
So instead of relying on the event type, this commit separated the logic
of restarting an taskRunner into two methods:
- `Restart` will retain the current behaviour and only will only restart
the task if it's currently running.
- `ForceRestart` is the new method where a `dead` task is allowed to
restart if its `Run()` method is still active. Callers will need to
restart the allocRunner taskCoordinator to make sure it will allow the
task to run again.
* minor fixes
2022-08-24 21:43:07 +00:00
|
|
|
// isAllocDone returns true when the following conditions are met:
|
|
|
|
// - all non-poststop tasks are in the "dead" state.
|
|
|
|
func (c *Coordinator) isAllocDone(states map[string]*structs.TaskState) bool {
|
2022-08-22 22:38:49 +00:00
|
|
|
for lifecycle, tasks := range c.tasksByLifecycle {
|
|
|
|
if lifecycle == lifecycleStagePoststop {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, task := range tasks {
|
|
|
|
if states[task].State != structs.TaskStateDead {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Coordinator) hasPrestart() bool {
|
|
|
|
return len(c.tasksByLifecycle[lifecycleStagePrestartEphemeral])+
|
|
|
|
len(c.tasksByLifecycle[lifecycleStagePrestartSidecar]) > 0
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Coordinator) hasMain() bool {
|
|
|
|
return len(c.tasksByLifecycle[lifecycleStageMain]) > 0
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Coordinator) hasPoststart() bool {
|
|
|
|
return len(c.tasksByLifecycle[lifecycleStagePoststartEphemeral])+
|
|
|
|
len(c.tasksByLifecycle[lifecycleStagePoststartSidecar]) > 0
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Coordinator) hasPoststop() bool {
|
|
|
|
return len(c.tasksByLifecycle[lifecycleStagePoststop]) > 0
|
|
|
|
}
|
|
|
|
|
|
|
|
// block is used to block the execution of tasks in the given lifecycle stage.
|
|
|
|
func (c *Coordinator) block(lifecycle lifecycleStage) {
|
|
|
|
gate := c.gates[lifecycle]
|
|
|
|
if gate != nil {
|
|
|
|
gate.Close()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// allows is used to allow the execution of tasks in the given lifecycle stage.
|
|
|
|
func (c *Coordinator) allow(lifecycle lifecycleStage) {
|
|
|
|
gate := c.gates[lifecycle]
|
|
|
|
if gate != nil {
|
|
|
|
gate.Open()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// indexTasksByLifecycle generates a map that groups tasks by their lifecycle
|
|
|
|
// configuration. This makes it easier to retrieve tasks by these groups or to
|
|
|
|
// determine if a task has a certain lifecycle configuration.
|
|
|
|
func indexTasksByLifecycle(tasks []*structs.Task) map[lifecycleStage][]string {
|
|
|
|
index := make(map[lifecycleStage][]string)
|
|
|
|
|
|
|
|
for _, task := range tasks {
|
|
|
|
lifecycle := taskLifecycleStage(task)
|
|
|
|
|
|
|
|
if _, ok := index[lifecycle]; !ok {
|
|
|
|
index[lifecycle] = []string{}
|
|
|
|
}
|
|
|
|
index[lifecycle] = append(index[lifecycle], task.Name)
|
|
|
|
}
|
|
|
|
|
|
|
|
return index
|
|
|
|
}
|
|
|
|
|
|
|
|
// taskLifecycleStage returns the relevant lifecycle stage for a given task.
|
|
|
|
func taskLifecycleStage(task *structs.Task) lifecycleStage {
|
|
|
|
if task.IsPrestart() {
|
|
|
|
if task.Lifecycle.Sidecar {
|
|
|
|
return lifecycleStagePrestartSidecar
|
|
|
|
}
|
|
|
|
return lifecycleStagePrestartEphemeral
|
|
|
|
} else if task.IsPoststart() {
|
|
|
|
if task.Lifecycle.Sidecar {
|
|
|
|
return lifecycleStagePoststartSidecar
|
|
|
|
}
|
|
|
|
return lifecycleStagePoststartEphemeral
|
|
|
|
} else if task.IsPoststop() {
|
|
|
|
return lifecycleStagePoststop
|
|
|
|
}
|
|
|
|
|
|
|
|
// Assume task is "main" by default.
|
|
|
|
return lifecycleStageMain
|
|
|
|
}
|