2023-04-10 15:36:59 +00:00
|
|
|
// Copyright (c) HashiCorp, Inc.
|
|
|
|
// SPDX-License-Identifier: MPL-2.0
|
|
|
|
|
2015-09-08 22:37:07 +00:00
|
|
|
package api
|
|
|
|
|
2015-09-14 02:55:47 +00:00
|
|
|
import (
|
2019-04-28 21:34:01 +00:00
|
|
|
"context"
|
2022-08-05 15:05:47 +00:00
|
|
|
"errors"
|
2019-04-28 21:34:01 +00:00
|
|
|
"io"
|
2015-09-17 19:40:51 +00:00
|
|
|
"sort"
|
2022-08-02 14:33:08 +00:00
|
|
|
"strings"
|
2015-09-14 02:55:47 +00:00
|
|
|
"time"
|
|
|
|
)
|
|
|
|
|
2016-10-21 01:05:58 +00:00
|
|
|
var (
|
2016-10-25 18:31:09 +00:00
|
|
|
// NodeDownErr marks an operation as not able to complete since the node is
|
|
|
|
// down.
|
2022-08-05 15:05:47 +00:00
|
|
|
NodeDownErr = errors.New("node down")
|
2016-10-21 01:05:58 +00:00
|
|
|
)
|
|
|
|
|
2019-01-18 23:36:16 +00:00
|
|
|
const (
|
|
|
|
AllocDesiredStatusRun = "run" // Allocation should run
|
|
|
|
AllocDesiredStatusStop = "stop" // Allocation should stop
|
|
|
|
AllocDesiredStatusEvict = "evict" // Allocation should stop, and was evicted
|
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
AllocClientStatusPending = "pending"
|
|
|
|
AllocClientStatusRunning = "running"
|
|
|
|
AllocClientStatusComplete = "complete"
|
|
|
|
AllocClientStatusFailed = "failed"
|
|
|
|
AllocClientStatusLost = "lost"
|
|
|
|
)
|
|
|
|
|
2022-08-02 14:33:08 +00:00
|
|
|
const (
|
|
|
|
AllocRestartReasonWithinPolicy = "Restart within policy"
|
|
|
|
)
|
|
|
|
|
2015-09-09 00:49:31 +00:00
|
|
|
// Allocations is used to query the alloc-related endpoints.
|
|
|
|
type Allocations struct {
|
2015-09-08 22:37:07 +00:00
|
|
|
client *Client
|
|
|
|
}
|
|
|
|
|
2015-09-09 00:49:31 +00:00
|
|
|
// Allocations returns a handle on the allocs endpoints.
|
|
|
|
func (c *Client) Allocations() *Allocations {
|
|
|
|
return &Allocations{client: c}
|
2015-09-08 22:37:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// List returns a list of all of the allocations.
|
2015-09-14 02:55:47 +00:00
|
|
|
func (a *Allocations) List(q *QueryOptions) ([]*AllocationListStub, *QueryMeta, error) {
|
|
|
|
var resp []*AllocationListStub
|
2015-09-09 20:18:50 +00:00
|
|
|
qm, err := a.client.query("/v1/allocations", &resp, q)
|
2015-09-08 22:37:07 +00:00
|
|
|
if err != nil {
|
2015-09-08 23:45:16 +00:00
|
|
|
return nil, nil, err
|
2015-09-08 22:37:07 +00:00
|
|
|
}
|
2015-09-17 19:40:51 +00:00
|
|
|
sort.Sort(AllocIndexSort(resp))
|
2015-09-08 23:45:16 +00:00
|
|
|
return resp, qm, nil
|
2015-09-08 22:37:07 +00:00
|
|
|
}
|
|
|
|
|
2015-12-24 10:46:59 +00:00
|
|
|
func (a *Allocations) PrefixList(prefix string) ([]*AllocationListStub, *QueryMeta, error) {
|
|
|
|
return a.List(&QueryOptions{Prefix: prefix})
|
|
|
|
}
|
|
|
|
|
2015-09-09 20:18:50 +00:00
|
|
|
// Info is used to retrieve a single allocation.
|
|
|
|
func (a *Allocations) Info(allocID string, q *QueryOptions) (*Allocation, *QueryMeta, error) {
|
|
|
|
var resp Allocation
|
|
|
|
qm, err := a.client.query("/v1/allocation/"+allocID, &resp, q)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
return &resp, qm, nil
|
|
|
|
}
|
|
|
|
|
2019-04-28 21:34:01 +00:00
|
|
|
// Exec is used to execute a command inside a running task. The command is to run inside
|
|
|
|
// the task environment.
|
|
|
|
//
|
|
|
|
// The parameters are:
|
2022-08-05 13:30:17 +00:00
|
|
|
// - ctx: context to set deadlines or timeout
|
|
|
|
// - allocation: the allocation to execute command inside
|
|
|
|
// - task: the task's name to execute command in
|
|
|
|
// - tty: indicates whether to start a pseudo-tty for the command
|
|
|
|
// - stdin, stdout, stderr: the std io to pass to command.
|
|
|
|
// If tty is true, then streams need to point to a tty that's alive for the whole process
|
|
|
|
// - terminalSizeCh: A channel to send new tty terminal sizes
|
2019-04-28 21:34:01 +00:00
|
|
|
//
|
|
|
|
// The call blocks until command terminates (or an error occurs), and returns the exit code.
|
2022-08-15 20:06:02 +00:00
|
|
|
//
|
|
|
|
// Note: for cluster topologies where API consumers don't have network access to
|
|
|
|
// Nomad clients, set api.ClientConnTimeout to a small value (ex 1ms) to avoid
|
|
|
|
// long pauses on this API call.
|
2019-04-28 21:34:01 +00:00
|
|
|
func (a *Allocations) Exec(ctx context.Context,
|
|
|
|
alloc *Allocation, task string, tty bool, command []string,
|
|
|
|
stdin io.Reader, stdout, stderr io.Writer,
|
|
|
|
terminalSizeCh <-chan TerminalSize, q *QueryOptions) (exitCode int, err error) {
|
|
|
|
|
2021-05-24 18:52:00 +00:00
|
|
|
s := &execSession{
|
|
|
|
client: a.client,
|
|
|
|
alloc: alloc,
|
|
|
|
task: task,
|
|
|
|
tty: tty,
|
|
|
|
command: command,
|
2019-04-28 21:34:01 +00:00
|
|
|
|
2021-05-24 18:52:00 +00:00
|
|
|
stdin: stdin,
|
|
|
|
stdout: stdout,
|
|
|
|
stderr: stderr,
|
2019-10-04 15:23:59 +00:00
|
|
|
|
2021-05-24 18:52:00 +00:00
|
|
|
terminalSizeCh: terminalSizeCh,
|
|
|
|
q: q,
|
2019-10-04 15:23:59 +00:00
|
|
|
}
|
2019-04-28 21:34:01 +00:00
|
|
|
|
2021-05-24 18:52:00 +00:00
|
|
|
return s.run(ctx)
|
2019-04-28 21:34:01 +00:00
|
|
|
}
|
|
|
|
|
2022-08-15 20:06:02 +00:00
|
|
|
// Stats gets allocation resource usage statistics about an allocation.
|
|
|
|
//
|
|
|
|
// Note: for cluster topologies where API consumers don't have network access to
|
|
|
|
// Nomad clients, set api.ClientConnTimeout to a small value (ex 1ms) to avoid
|
|
|
|
// long pauses on this API call.
|
2016-06-12 03:15:50 +00:00
|
|
|
func (a *Allocations) Stats(alloc *Allocation, q *QueryOptions) (*AllocResourceUsage, error) {
|
|
|
|
var resp AllocResourceUsage
|
2022-08-05 15:05:47 +00:00
|
|
|
_, err := a.client.query("/v1/client/allocation/"+alloc.ID+"/stats", &resp, q)
|
2016-06-12 03:15:50 +00:00
|
|
|
return &resp, err
|
2016-04-29 20:03:02 +00:00
|
|
|
}
|
|
|
|
|
2022-08-05 13:30:17 +00:00
|
|
|
// Checks gets status information for nomad service checks that exist in the allocation.
|
|
|
|
//
|
|
|
|
// Note: for cluster topologies where API consumers don't have network access to
|
|
|
|
// Nomad clients, set api.ClientConnTimeout to a small value (ex 1ms) to avoid
|
|
|
|
// long pauses on this API call.
|
|
|
|
func (a *Allocations) Checks(allocID string, q *QueryOptions) (AllocCheckStatuses, error) {
|
|
|
|
var resp AllocCheckStatuses
|
|
|
|
_, err := a.client.query("/v1/client/allocation/"+allocID+"/checks", &resp, q)
|
|
|
|
return resp, err
|
|
|
|
}
|
|
|
|
|
2022-08-15 20:06:02 +00:00
|
|
|
// GC forces a garbage collection of client state for an allocation.
|
|
|
|
//
|
|
|
|
// Note: for cluster topologies where API consumers don't have network access to
|
|
|
|
// Nomad clients, set api.ClientConnTimeout to a small value (ex 1ms) to avoid
|
|
|
|
// long pauses on this API call.
|
2017-01-13 00:18:29 +00:00
|
|
|
func (a *Allocations) GC(alloc *Allocation, q *QueryOptions) error {
|
|
|
|
var resp struct{}
|
2020-09-16 13:34:17 +00:00
|
|
|
_, err := a.client.query("/v1/client/allocation/"+alloc.ID+"/gc", &resp, nil)
|
2017-01-13 00:18:29 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
Task lifecycle restart (#14127)
* allocrunner: handle lifecycle when all tasks die
When all tasks die the Coordinator must transition to its terminal
state, coordinatorStatePoststop, to unblock poststop tasks. Since this
could happen at any time (for example, a prestart task dies), all states
must be able to transition to this terminal state.
* allocrunner: implement different alloc restarts
Add a new alloc restart mode where all tasks are restarted, even if they
have already exited. Also unifies the alloc restart logic to use the
implementation that restarts tasks concurrently and ignores
ErrTaskNotRunning errors since those are expected when restarting the
allocation.
* allocrunner: allow tasks to run again
Prevent the task runner Run() method from exiting to allow a dead task
to run again. When the task runner is signaled to restart, the function
will jump back to the MAIN loop and run it again.
The task runner determines if a task needs to run again based on two new
task events that were added to differentiate between a request to
restart a specific task, the tasks that are currently running, or all
tasks that have already run.
* api/cli: add support for all tasks alloc restart
Implement the new -all-tasks alloc restart CLI flag and its API
counterpar, AllTasks. The client endpoint calls the appropriate restart
method from the allocrunner depending on the restart parameters used.
* test: fix tasklifecycle Coordinator test
* allocrunner: kill taskrunners if all tasks are dead
When all non-poststop tasks are dead we need to kill the taskrunners so
we don't leak their goroutines, which are blocked in the alloc restart
loop. This also ensures the allocrunner exits on its own.
* taskrunner: fix tests that waited on WaitCh
Now that "dead" tasks may run again, the taskrunner Run() method will
not return when the task finishes running, so tests must wait for the
task state to be "dead" instead of using the WaitCh, since it won't be
closed until the taskrunner is killed.
* tests: add tests for all tasks alloc restart
* changelog: add entry for #14127
* taskrunner: fix restore logic.
The first implementation of the task runner restore process relied on
server data (`tr.Alloc().TerminalStatus()`) which may not be available
to the client at the time of restore.
It also had the incorrect code path. When restoring a dead task the
driver handle always needs to be clear cleanly using `clearDriverHandle`
otherwise, after exiting the MAIN loop, the task may be killed by
`tr.handleKill`.
The fix is to store the state of the Run() loop in the task runner local
client state: if the task runner ever exits this loop cleanly (not with
a shutdown) it will never be able to run again. So if the Run() loops
starts with this local state flag set, it must exit early.
This local state flag is also being checked on task restart requests. If
the task is "dead" and its Run() loop is not active it will never be
able to run again.
* address code review requests
* apply more code review changes
* taskrunner: add different Restart modes
Using the task event to differentiate between the allocrunner restart
methods proved to be confusing for developers to understand how it all
worked.
So instead of relying on the event type, this commit separated the logic
of restarting an taskRunner into two methods:
- `Restart` will retain the current behaviour and only will only restart
the task if it's currently running.
- `ForceRestart` is the new method where a `dead` task is allowed to
restart if its `Run()` method is still active. Callers will need to
restart the allocRunner taskCoordinator to make sure it will allow the
task to run again.
* minor fixes
2022-08-24 21:43:07 +00:00
|
|
|
// Restart restarts the tasks that are currently running or a specific task if
|
|
|
|
// taskName is provided. An error is returned if the task to be restarted is
|
|
|
|
// not running.
|
2022-08-15 20:06:02 +00:00
|
|
|
//
|
|
|
|
// Note: for cluster topologies where API consumers don't have network access to
|
|
|
|
// Nomad clients, set api.ClientConnTimeout to a small value (ex 1ms) to avoid
|
|
|
|
// long pauses on this API call.
|
2019-04-01 12:56:02 +00:00
|
|
|
func (a *Allocations) Restart(alloc *Allocation, taskName string, q *QueryOptions) error {
|
|
|
|
req := AllocationRestartRequest{
|
|
|
|
TaskName: taskName,
|
|
|
|
}
|
|
|
|
|
|
|
|
var resp struct{}
|
|
|
|
_, err := a.client.putQuery("/v1/client/allocation/"+alloc.ID+"/restart", &req, &resp, q)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
Task lifecycle restart (#14127)
* allocrunner: handle lifecycle when all tasks die
When all tasks die the Coordinator must transition to its terminal
state, coordinatorStatePoststop, to unblock poststop tasks. Since this
could happen at any time (for example, a prestart task dies), all states
must be able to transition to this terminal state.
* allocrunner: implement different alloc restarts
Add a new alloc restart mode where all tasks are restarted, even if they
have already exited. Also unifies the alloc restart logic to use the
implementation that restarts tasks concurrently and ignores
ErrTaskNotRunning errors since those are expected when restarting the
allocation.
* allocrunner: allow tasks to run again
Prevent the task runner Run() method from exiting to allow a dead task
to run again. When the task runner is signaled to restart, the function
will jump back to the MAIN loop and run it again.
The task runner determines if a task needs to run again based on two new
task events that were added to differentiate between a request to
restart a specific task, the tasks that are currently running, or all
tasks that have already run.
* api/cli: add support for all tasks alloc restart
Implement the new -all-tasks alloc restart CLI flag and its API
counterpar, AllTasks. The client endpoint calls the appropriate restart
method from the allocrunner depending on the restart parameters used.
* test: fix tasklifecycle Coordinator test
* allocrunner: kill taskrunners if all tasks are dead
When all non-poststop tasks are dead we need to kill the taskrunners so
we don't leak their goroutines, which are blocked in the alloc restart
loop. This also ensures the allocrunner exits on its own.
* taskrunner: fix tests that waited on WaitCh
Now that "dead" tasks may run again, the taskrunner Run() method will
not return when the task finishes running, so tests must wait for the
task state to be "dead" instead of using the WaitCh, since it won't be
closed until the taskrunner is killed.
* tests: add tests for all tasks alloc restart
* changelog: add entry for #14127
* taskrunner: fix restore logic.
The first implementation of the task runner restore process relied on
server data (`tr.Alloc().TerminalStatus()`) which may not be available
to the client at the time of restore.
It also had the incorrect code path. When restoring a dead task the
driver handle always needs to be clear cleanly using `clearDriverHandle`
otherwise, after exiting the MAIN loop, the task may be killed by
`tr.handleKill`.
The fix is to store the state of the Run() loop in the task runner local
client state: if the task runner ever exits this loop cleanly (not with
a shutdown) it will never be able to run again. So if the Run() loops
starts with this local state flag set, it must exit early.
This local state flag is also being checked on task restart requests. If
the task is "dead" and its Run() loop is not active it will never be
able to run again.
* address code review requests
* apply more code review changes
* taskrunner: add different Restart modes
Using the task event to differentiate between the allocrunner restart
methods proved to be confusing for developers to understand how it all
worked.
So instead of relying on the event type, this commit separated the logic
of restarting an taskRunner into two methods:
- `Restart` will retain the current behaviour and only will only restart
the task if it's currently running.
- `ForceRestart` is the new method where a `dead` task is allowed to
restart if its `Run()` method is still active. Callers will need to
restart the allocRunner taskCoordinator to make sure it will allow the
task to run again.
* minor fixes
2022-08-24 21:43:07 +00:00
|
|
|
// RestartAllTasks restarts all tasks in the allocation, regardless of
|
|
|
|
// lifecycle type or state. Tasks will restart following their lifecycle order.
|
|
|
|
//
|
|
|
|
// Note: for cluster topologies where API consumers don't have network access to
|
|
|
|
// Nomad clients, set api.ClientConnTimeout to a small value (ex 1ms) to avoid
|
|
|
|
// long pauses on this API call.
|
2023-03-03 20:52:41 +00:00
|
|
|
//
|
|
|
|
// DEPRECATED: This method will be removed in 1.6.0
|
Task lifecycle restart (#14127)
* allocrunner: handle lifecycle when all tasks die
When all tasks die the Coordinator must transition to its terminal
state, coordinatorStatePoststop, to unblock poststop tasks. Since this
could happen at any time (for example, a prestart task dies), all states
must be able to transition to this terminal state.
* allocrunner: implement different alloc restarts
Add a new alloc restart mode where all tasks are restarted, even if they
have already exited. Also unifies the alloc restart logic to use the
implementation that restarts tasks concurrently and ignores
ErrTaskNotRunning errors since those are expected when restarting the
allocation.
* allocrunner: allow tasks to run again
Prevent the task runner Run() method from exiting to allow a dead task
to run again. When the task runner is signaled to restart, the function
will jump back to the MAIN loop and run it again.
The task runner determines if a task needs to run again based on two new
task events that were added to differentiate between a request to
restart a specific task, the tasks that are currently running, or all
tasks that have already run.
* api/cli: add support for all tasks alloc restart
Implement the new -all-tasks alloc restart CLI flag and its API
counterpar, AllTasks. The client endpoint calls the appropriate restart
method from the allocrunner depending on the restart parameters used.
* test: fix tasklifecycle Coordinator test
* allocrunner: kill taskrunners if all tasks are dead
When all non-poststop tasks are dead we need to kill the taskrunners so
we don't leak their goroutines, which are blocked in the alloc restart
loop. This also ensures the allocrunner exits on its own.
* taskrunner: fix tests that waited on WaitCh
Now that "dead" tasks may run again, the taskrunner Run() method will
not return when the task finishes running, so tests must wait for the
task state to be "dead" instead of using the WaitCh, since it won't be
closed until the taskrunner is killed.
* tests: add tests for all tasks alloc restart
* changelog: add entry for #14127
* taskrunner: fix restore logic.
The first implementation of the task runner restore process relied on
server data (`tr.Alloc().TerminalStatus()`) which may not be available
to the client at the time of restore.
It also had the incorrect code path. When restoring a dead task the
driver handle always needs to be clear cleanly using `clearDriverHandle`
otherwise, after exiting the MAIN loop, the task may be killed by
`tr.handleKill`.
The fix is to store the state of the Run() loop in the task runner local
client state: if the task runner ever exits this loop cleanly (not with
a shutdown) it will never be able to run again. So if the Run() loops
starts with this local state flag set, it must exit early.
This local state flag is also being checked on task restart requests. If
the task is "dead" and its Run() loop is not active it will never be
able to run again.
* address code review requests
* apply more code review changes
* taskrunner: add different Restart modes
Using the task event to differentiate between the allocrunner restart
methods proved to be confusing for developers to understand how it all
worked.
So instead of relying on the event type, this commit separated the logic
of restarting an taskRunner into two methods:
- `Restart` will retain the current behaviour and only will only restart
the task if it's currently running.
- `ForceRestart` is the new method where a `dead` task is allowed to
restart if its `Run()` method is still active. Callers will need to
restart the allocRunner taskCoordinator to make sure it will allow the
task to run again.
* minor fixes
2022-08-24 21:43:07 +00:00
|
|
|
func (a *Allocations) RestartAllTasks(alloc *Allocation, q *QueryOptions) error {
|
|
|
|
req := AllocationRestartRequest{
|
|
|
|
AllTasks: true,
|
|
|
|
}
|
|
|
|
|
|
|
|
var resp struct{}
|
|
|
|
_, err := a.client.putQuery("/v1/client/allocation/"+alloc.ID+"/restart", &req, &resp, q)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2022-08-15 20:06:02 +00:00
|
|
|
// Stop stops an allocation.
|
|
|
|
//
|
|
|
|
// Note: for cluster topologies where API consumers don't have network access to
|
|
|
|
// Nomad clients, set api.ClientConnTimeout to a small value (ex 1ms) to avoid
|
|
|
|
// long pauses on this API call.
|
2023-03-03 20:52:41 +00:00
|
|
|
//
|
|
|
|
// BREAKING: This method will have the following signature in 1.6.0
|
|
|
|
// func (a *Allocations) Stop(allocID string, w *WriteOptions) (*AllocStopResponse, error) {
|
2019-04-01 14:21:03 +00:00
|
|
|
func (a *Allocations) Stop(alloc *Allocation, q *QueryOptions) (*AllocStopResponse, error) {
|
2023-03-03 20:52:41 +00:00
|
|
|
// COMPAT: Remove in 1.6.0
|
|
|
|
var w *WriteOptions
|
|
|
|
if q != nil {
|
|
|
|
w = &WriteOptions{
|
|
|
|
Region: q.Region,
|
|
|
|
Namespace: q.Namespace,
|
|
|
|
AuthToken: q.AuthToken,
|
|
|
|
Headers: q.Headers,
|
|
|
|
ctx: q.ctx,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-01 14:21:03 +00:00
|
|
|
var resp AllocStopResponse
|
2023-03-03 20:52:41 +00:00
|
|
|
wm, err := a.client.put("/v1/allocation/"+alloc.ID+"/stop", nil, &resp, w)
|
|
|
|
if wm != nil {
|
|
|
|
resp.LastIndex = wm.LastIndex
|
|
|
|
resp.RequestTime = wm.RequestTime
|
|
|
|
}
|
|
|
|
|
2019-04-01 14:21:03 +00:00
|
|
|
return &resp, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// AllocStopResponse is the response to an `AllocStopRequest`
|
|
|
|
type AllocStopResponse struct {
|
|
|
|
// EvalID is the id of the follow up evalution for the rescheduled alloc.
|
|
|
|
EvalID string
|
|
|
|
|
|
|
|
WriteMeta
|
|
|
|
}
|
|
|
|
|
2022-08-15 20:06:02 +00:00
|
|
|
// Signal sends a signal to the allocation.
|
|
|
|
//
|
|
|
|
// Note: for cluster topologies where API consumers don't have network access to
|
|
|
|
// Nomad clients, set api.ClientConnTimeout to a small value (ex 1ms) to avoid
|
|
|
|
// long pauses on this API call.
|
2019-04-03 10:46:15 +00:00
|
|
|
func (a *Allocations) Signal(alloc *Allocation, q *QueryOptions, task, signal string) error {
|
2019-04-27 12:33:47 +00:00
|
|
|
req := AllocSignalRequest{
|
2019-06-21 21:44:38 +00:00
|
|
|
Signal: signal,
|
|
|
|
Task: task,
|
2019-04-03 10:46:15 +00:00
|
|
|
}
|
|
|
|
|
2019-04-27 12:33:47 +00:00
|
|
|
var resp GenericResponse
|
2020-09-16 13:34:17 +00:00
|
|
|
_, err := a.client.putQuery("/v1/client/allocation/"+alloc.ID+"/signal", &req, &resp, q)
|
2019-04-03 10:46:15 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2022-03-03 11:14:00 +00:00
|
|
|
// Services is used to return a list of service registrations associated to the
|
|
|
|
// specified allocID.
|
|
|
|
func (a *Allocations) Services(allocID string, q *QueryOptions) ([]*ServiceRegistration, *QueryMeta, error) {
|
|
|
|
var resp []*ServiceRegistration
|
|
|
|
qm, err := a.client.query("/v1/allocation/"+allocID+"/services", &resp, q)
|
|
|
|
return resp, qm, err
|
|
|
|
}
|
|
|
|
|
2015-09-09 00:49:31 +00:00
|
|
|
// Allocation is used for serialization of allocations.
|
|
|
|
type Allocation struct {
|
2018-09-10 17:38:36 +00:00
|
|
|
ID string
|
|
|
|
Namespace string
|
|
|
|
EvalID string
|
|
|
|
Name string
|
|
|
|
NodeID string
|
2018-07-17 10:03:13 +00:00
|
|
|
NodeName string
|
2018-09-10 17:38:36 +00:00
|
|
|
JobID string
|
|
|
|
Job *Job
|
|
|
|
TaskGroup string
|
|
|
|
Resources *Resources
|
|
|
|
TaskResources map[string]*Resources
|
|
|
|
AllocatedResources *AllocatedResources
|
|
|
|
Services map[string]string
|
|
|
|
Metrics *AllocationMetric
|
|
|
|
DesiredStatus string
|
|
|
|
DesiredDescription string
|
|
|
|
DesiredTransition DesiredTransition
|
|
|
|
ClientStatus string
|
|
|
|
ClientDescription string
|
|
|
|
TaskStates map[string]*TaskState
|
|
|
|
DeploymentID string
|
|
|
|
DeploymentStatus *AllocDeploymentStatus
|
|
|
|
FollowupEvalID string
|
|
|
|
PreviousAllocation string
|
|
|
|
NextAllocation string
|
|
|
|
RescheduleTracker *RescheduleTracker
|
|
|
|
PreemptedAllocations []string
|
|
|
|
PreemptedByAllocation string
|
|
|
|
CreateIndex uint64
|
|
|
|
ModifyIndex uint64
|
|
|
|
AllocModifyIndex uint64
|
|
|
|
CreateTime int64
|
|
|
|
ModifyTime int64
|
2015-09-14 02:55:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// AllocationMetric is used to deserialize allocation metrics.
|
|
|
|
type AllocationMetric struct {
|
|
|
|
NodesEvaluated int
|
|
|
|
NodesFiltered int
|
2023-06-07 14:39:03 +00:00
|
|
|
NodesInPool int
|
2016-01-04 22:23:06 +00:00
|
|
|
NodesAvailable map[string]int
|
2015-09-14 02:55:47 +00:00
|
|
|
ClassFiltered map[string]int
|
|
|
|
ConstraintFiltered map[string]int
|
|
|
|
NodesExhausted int
|
|
|
|
ClassExhausted map[string]int
|
2015-09-18 19:06:51 +00:00
|
|
|
DimensionExhausted map[string]int
|
2017-10-13 21:36:02 +00:00
|
|
|
QuotaExhausted []string
|
2021-04-29 19:03:45 +00:00
|
|
|
ResourcesExhausted map[string]*Resources
|
2018-08-18 01:11:07 +00:00
|
|
|
// Deprecated, replaced with ScoreMetaData
|
|
|
|
Scores map[string]float64
|
|
|
|
AllocationTime time.Duration
|
|
|
|
CoalescedFailures int
|
|
|
|
ScoreMetaData []*NodeScoreMeta
|
2018-08-08 14:41:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// NodeScoreMeta is used to serialize node scoring metadata
|
|
|
|
// displayed in the CLI during verbose mode
|
|
|
|
type NodeScoreMeta struct {
|
2018-08-17 23:44:00 +00:00
|
|
|
NodeID string
|
|
|
|
Scores map[string]float64
|
|
|
|
NormScore float64
|
2015-09-14 02:55:47 +00:00
|
|
|
}
|
|
|
|
|
2020-03-06 15:09:10 +00:00
|
|
|
// Stub returns a list stub for the allocation
|
|
|
|
func (a *Allocation) Stub() *AllocationListStub {
|
|
|
|
return &AllocationListStub{
|
|
|
|
ID: a.ID,
|
|
|
|
EvalID: a.EvalID,
|
|
|
|
Name: a.Name,
|
|
|
|
Namespace: a.Namespace,
|
|
|
|
NodeID: a.NodeID,
|
|
|
|
NodeName: a.NodeName,
|
|
|
|
JobID: a.JobID,
|
|
|
|
JobType: *a.Job.Type,
|
|
|
|
JobVersion: *a.Job.Version,
|
|
|
|
TaskGroup: a.TaskGroup,
|
|
|
|
DesiredStatus: a.DesiredStatus,
|
|
|
|
DesiredDescription: a.DesiredDescription,
|
|
|
|
ClientStatus: a.ClientStatus,
|
|
|
|
ClientDescription: a.ClientDescription,
|
|
|
|
TaskStates: a.TaskStates,
|
|
|
|
DeploymentStatus: a.DeploymentStatus,
|
|
|
|
FollowupEvalID: a.FollowupEvalID,
|
2023-05-18 21:38:34 +00:00
|
|
|
NextAllocation: a.NextAllocation,
|
2020-03-06 15:09:10 +00:00
|
|
|
RescheduleTracker: a.RescheduleTracker,
|
|
|
|
PreemptedAllocations: a.PreemptedAllocations,
|
|
|
|
PreemptedByAllocation: a.PreemptedByAllocation,
|
|
|
|
CreateIndex: a.CreateIndex,
|
|
|
|
ModifyIndex: a.ModifyIndex,
|
|
|
|
CreateTime: a.CreateTime,
|
|
|
|
ModifyTime: a.ModifyTime,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-25 07:52:59 +00:00
|
|
|
// ServerTerminalStatus returns true if the desired state of the allocation is
|
|
|
|
// terminal.
|
|
|
|
func (a *Allocation) ServerTerminalStatus() bool {
|
|
|
|
switch a.DesiredStatus {
|
|
|
|
case AllocDesiredStatusStop, AllocDesiredStatusEvict:
|
|
|
|
return true
|
|
|
|
default:
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// ClientTerminalStatus returns true if the client status is terminal and will
|
|
|
|
// therefore no longer transition.
|
|
|
|
func (a *Allocation) ClientTerminalStatus() bool {
|
|
|
|
switch a.ClientStatus {
|
|
|
|
case AllocClientStatusComplete, AllocClientStatusFailed, AllocClientStatusLost:
|
|
|
|
return true
|
|
|
|
default:
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-14 02:55:47 +00:00
|
|
|
// AllocationListStub is used to return a subset of an allocation
|
|
|
|
// during list operations.
|
|
|
|
type AllocationListStub struct {
|
2019-04-18 15:24:31 +00:00
|
|
|
ID string
|
|
|
|
EvalID string
|
|
|
|
Name string
|
|
|
|
Namespace string
|
|
|
|
NodeID string
|
|
|
|
NodeName string
|
|
|
|
JobID string
|
|
|
|
JobType string
|
|
|
|
JobVersion uint64
|
|
|
|
TaskGroup string
|
2020-10-09 05:21:41 +00:00
|
|
|
AllocatedResources *AllocatedResources `json:",omitempty"`
|
2019-04-18 15:24:31 +00:00
|
|
|
DesiredStatus string
|
|
|
|
DesiredDescription string
|
|
|
|
ClientStatus string
|
|
|
|
ClientDescription string
|
|
|
|
TaskStates map[string]*TaskState
|
|
|
|
DeploymentStatus *AllocDeploymentStatus
|
|
|
|
FollowupEvalID string
|
2023-05-18 20:24:41 +00:00
|
|
|
NextAllocation string
|
2019-04-18 15:24:31 +00:00
|
|
|
RescheduleTracker *RescheduleTracker
|
|
|
|
PreemptedAllocations []string
|
|
|
|
PreemptedByAllocation string
|
|
|
|
CreateIndex uint64
|
|
|
|
ModifyIndex uint64
|
|
|
|
CreateTime int64
|
|
|
|
ModifyTime int64
|
2015-09-08 22:37:07 +00:00
|
|
|
}
|
2015-09-17 19:40:51 +00:00
|
|
|
|
2017-06-26 21:23:52 +00:00
|
|
|
// AllocDeploymentStatus captures the status of the allocation as part of the
|
|
|
|
// deployment. This can include things like if the allocation has been marked as
|
2017-10-25 18:06:25 +00:00
|
|
|
// healthy.
|
2017-06-26 21:23:52 +00:00
|
|
|
type AllocDeploymentStatus struct {
|
|
|
|
Healthy *bool
|
2018-04-19 20:58:06 +00:00
|
|
|
Timestamp time.Time
|
|
|
|
Canary bool
|
2017-06-26 21:23:52 +00:00
|
|
|
ModifyIndex uint64
|
|
|
|
}
|
|
|
|
|
2018-10-02 20:36:04 +00:00
|
|
|
type AllocatedResources struct {
|
|
|
|
Tasks map[string]*AllocatedTaskResources
|
|
|
|
Shared AllocatedSharedResources
|
|
|
|
}
|
|
|
|
|
|
|
|
type AllocatedTaskResources struct {
|
|
|
|
Cpu AllocatedCpuResources
|
|
|
|
Memory AllocatedMemoryResources
|
|
|
|
Networks []*NetworkResource
|
2021-02-22 19:47:36 +00:00
|
|
|
Devices []*AllocatedDeviceResource
|
2018-10-02 20:36:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type AllocatedSharedResources struct {
|
2019-06-25 22:11:09 +00:00
|
|
|
DiskMB int64
|
|
|
|
Networks []*NetworkResource
|
2020-06-25 19:16:01 +00:00
|
|
|
Ports []PortMapping
|
|
|
|
}
|
|
|
|
|
|
|
|
type PortMapping struct {
|
|
|
|
Label string
|
|
|
|
Value int
|
|
|
|
To int
|
|
|
|
HostIP string
|
2018-10-02 20:36:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type AllocatedCpuResources struct {
|
2018-10-16 22:34:32 +00:00
|
|
|
CpuShares int64
|
2018-10-02 20:36:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type AllocatedMemoryResources struct {
|
2021-03-26 20:07:12 +00:00
|
|
|
MemoryMB int64
|
|
|
|
MemoryMaxMB int64
|
2018-10-02 20:36:04 +00:00
|
|
|
}
|
|
|
|
|
2021-02-22 19:47:36 +00:00
|
|
|
type AllocatedDeviceResource struct {
|
|
|
|
Vendor string
|
|
|
|
Type string
|
|
|
|
Name string
|
|
|
|
DeviceIDs []string
|
|
|
|
}
|
|
|
|
|
2015-09-17 19:40:51 +00:00
|
|
|
// AllocIndexSort reverse sorts allocs by CreateIndex.
|
|
|
|
type AllocIndexSort []*AllocationListStub
|
|
|
|
|
|
|
|
func (a AllocIndexSort) Len() int {
|
|
|
|
return len(a)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (a AllocIndexSort) Less(i, j int) bool {
|
|
|
|
return a[i].CreateIndex > a[j].CreateIndex
|
|
|
|
}
|
|
|
|
|
|
|
|
func (a AllocIndexSort) Swap(i, j int) {
|
|
|
|
a[i], a[j] = a[j], a[i]
|
|
|
|
}
|
2018-01-19 00:05:20 +00:00
|
|
|
|
2020-03-11 16:47:14 +00:00
|
|
|
func (a Allocation) GetTaskGroup() *TaskGroup {
|
2018-01-25 17:46:12 +00:00
|
|
|
for _, tg := range a.Job.TaskGroups {
|
|
|
|
if *tg.Name == a.TaskGroup {
|
2020-03-11 16:47:14 +00:00
|
|
|
return tg
|
2018-01-25 17:46:12 +00:00
|
|
|
}
|
|
|
|
}
|
2020-03-11 16:47:14 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// RescheduleInfo is used to calculate remaining reschedule attempts
|
|
|
|
// according to the given time and the task groups reschedule policy
|
|
|
|
func (a Allocation) RescheduleInfo(t time.Time) (int, int) {
|
|
|
|
tg := a.GetTaskGroup()
|
|
|
|
if tg == nil || tg.ReschedulePolicy == nil {
|
2018-01-29 22:31:25 +00:00
|
|
|
return 0, 0
|
2018-01-25 17:46:12 +00:00
|
|
|
}
|
2020-03-11 16:47:14 +00:00
|
|
|
reschedulePolicy := tg.ReschedulePolicy
|
2018-01-29 22:31:25 +00:00
|
|
|
availableAttempts := *reschedulePolicy.Attempts
|
|
|
|
interval := *reschedulePolicy.Interval
|
|
|
|
attempted := 0
|
|
|
|
|
|
|
|
// Loop over reschedule tracker to find attempts within the restart policy's interval
|
2018-01-25 17:46:12 +00:00
|
|
|
if a.RescheduleTracker != nil && availableAttempts > 0 && interval > 0 {
|
|
|
|
for j := len(a.RescheduleTracker.Events) - 1; j >= 0; j-- {
|
|
|
|
lastAttempt := a.RescheduleTracker.Events[j].RescheduleTime
|
|
|
|
timeDiff := t.UTC().UnixNano() - lastAttempt
|
|
|
|
if timeDiff < interval.Nanoseconds() {
|
|
|
|
attempted += 1
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-01-29 22:31:25 +00:00
|
|
|
return attempted, availableAttempts
|
2018-01-25 17:46:12 +00:00
|
|
|
}
|
|
|
|
|
2019-04-01 12:56:02 +00:00
|
|
|
type AllocationRestartRequest struct {
|
|
|
|
TaskName string
|
Task lifecycle restart (#14127)
* allocrunner: handle lifecycle when all tasks die
When all tasks die the Coordinator must transition to its terminal
state, coordinatorStatePoststop, to unblock poststop tasks. Since this
could happen at any time (for example, a prestart task dies), all states
must be able to transition to this terminal state.
* allocrunner: implement different alloc restarts
Add a new alloc restart mode where all tasks are restarted, even if they
have already exited. Also unifies the alloc restart logic to use the
implementation that restarts tasks concurrently and ignores
ErrTaskNotRunning errors since those are expected when restarting the
allocation.
* allocrunner: allow tasks to run again
Prevent the task runner Run() method from exiting to allow a dead task
to run again. When the task runner is signaled to restart, the function
will jump back to the MAIN loop and run it again.
The task runner determines if a task needs to run again based on two new
task events that were added to differentiate between a request to
restart a specific task, the tasks that are currently running, or all
tasks that have already run.
* api/cli: add support for all tasks alloc restart
Implement the new -all-tasks alloc restart CLI flag and its API
counterpar, AllTasks. The client endpoint calls the appropriate restart
method from the allocrunner depending on the restart parameters used.
* test: fix tasklifecycle Coordinator test
* allocrunner: kill taskrunners if all tasks are dead
When all non-poststop tasks are dead we need to kill the taskrunners so
we don't leak their goroutines, which are blocked in the alloc restart
loop. This also ensures the allocrunner exits on its own.
* taskrunner: fix tests that waited on WaitCh
Now that "dead" tasks may run again, the taskrunner Run() method will
not return when the task finishes running, so tests must wait for the
task state to be "dead" instead of using the WaitCh, since it won't be
closed until the taskrunner is killed.
* tests: add tests for all tasks alloc restart
* changelog: add entry for #14127
* taskrunner: fix restore logic.
The first implementation of the task runner restore process relied on
server data (`tr.Alloc().TerminalStatus()`) which may not be available
to the client at the time of restore.
It also had the incorrect code path. When restoring a dead task the
driver handle always needs to be clear cleanly using `clearDriverHandle`
otherwise, after exiting the MAIN loop, the task may be killed by
`tr.handleKill`.
The fix is to store the state of the Run() loop in the task runner local
client state: if the task runner ever exits this loop cleanly (not with
a shutdown) it will never be able to run again. So if the Run() loops
starts with this local state flag set, it must exit early.
This local state flag is also being checked on task restart requests. If
the task is "dead" and its Run() loop is not active it will never be
able to run again.
* address code review requests
* apply more code review changes
* taskrunner: add different Restart modes
Using the task event to differentiate between the allocrunner restart
methods proved to be confusing for developers to understand how it all
worked.
So instead of relying on the event type, this commit separated the logic
of restarting an taskRunner into two methods:
- `Restart` will retain the current behaviour and only will only restart
the task if it's currently running.
- `ForceRestart` is the new method where a `dead` task is allowed to
restart if its `Run()` method is still active. Callers will need to
restart the allocRunner taskCoordinator to make sure it will allow the
task to run again.
* minor fixes
2022-08-24 21:43:07 +00:00
|
|
|
AllTasks bool
|
2019-04-01 12:56:02 +00:00
|
|
|
}
|
|
|
|
|
2019-04-27 12:33:47 +00:00
|
|
|
type AllocSignalRequest struct {
|
2019-06-21 21:44:38 +00:00
|
|
|
Task string
|
|
|
|
Signal string
|
2019-04-27 12:33:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// GenericResponse is used to respond to a request where no
|
|
|
|
// specific response information is needed.
|
|
|
|
type GenericResponse struct {
|
|
|
|
WriteMeta
|
|
|
|
}
|
|
|
|
|
2018-01-19 00:05:20 +00:00
|
|
|
// RescheduleTracker encapsulates previous reschedule events
|
|
|
|
type RescheduleTracker struct {
|
|
|
|
Events []*RescheduleEvent
|
|
|
|
}
|
|
|
|
|
|
|
|
// RescheduleEvent is used to keep track of previous attempts at rescheduling an allocation
|
|
|
|
type RescheduleEvent struct {
|
|
|
|
// RescheduleTime is the timestamp of a reschedule attempt
|
|
|
|
RescheduleTime int64
|
|
|
|
|
|
|
|
// PrevAllocID is the ID of the previous allocation being restarted
|
|
|
|
PrevAllocID string
|
|
|
|
|
|
|
|
// PrevNodeID is the node ID of the previous allocation
|
|
|
|
PrevNodeID string
|
|
|
|
}
|
2018-02-21 18:58:04 +00:00
|
|
|
|
2018-02-23 01:38:44 +00:00
|
|
|
// DesiredTransition is used to mark an allocation as having a desired state
|
|
|
|
// transition. This information can be used by the scheduler to make the
|
2018-02-21 18:58:04 +00:00
|
|
|
// correct decision.
|
2018-02-23 01:38:44 +00:00
|
|
|
type DesiredTransition struct {
|
2018-02-21 18:58:04 +00:00
|
|
|
// Migrate is used to indicate that this allocation should be stopped and
|
|
|
|
// migrated to another node.
|
|
|
|
Migrate *bool
|
2018-04-07 00:23:35 +00:00
|
|
|
|
|
|
|
// Reschedule is used to indicate that this allocation is eligible to be
|
|
|
|
// rescheduled.
|
|
|
|
Reschedule *bool
|
2018-02-21 18:58:04 +00:00
|
|
|
}
|
2018-03-07 00:23:21 +00:00
|
|
|
|
|
|
|
// ShouldMigrate returns whether the transition object dictates a migration.
|
|
|
|
func (d DesiredTransition) ShouldMigrate() bool {
|
|
|
|
return d.Migrate != nil && *d.Migrate
|
|
|
|
}
|
2019-04-28 21:34:01 +00:00
|
|
|
|
|
|
|
// ExecStreamingIOOperation represents a stream write operation: either appending data or close (exclusively)
|
|
|
|
type ExecStreamingIOOperation struct {
|
|
|
|
Data []byte `json:"data,omitempty"`
|
|
|
|
Close bool `json:"close,omitempty"`
|
|
|
|
}
|
|
|
|
|
|
|
|
// TerminalSize represents the size of the terminal
|
|
|
|
type TerminalSize struct {
|
|
|
|
Height int `json:"height,omitempty"`
|
|
|
|
Width int `json:"width,omitempty"`
|
|
|
|
}
|
|
|
|
|
|
|
|
var execStreamingInputHeartbeat = ExecStreamingInput{}
|
|
|
|
|
|
|
|
// ExecStreamingInput represents user input to be sent to nomad exec handler.
|
|
|
|
//
|
|
|
|
// At most one field should be set.
|
|
|
|
type ExecStreamingInput struct {
|
|
|
|
Stdin *ExecStreamingIOOperation `json:"stdin,omitempty"`
|
|
|
|
TTYSize *TerminalSize `json:"tty_size,omitempty"`
|
|
|
|
}
|
|
|
|
|
2022-08-05 13:30:17 +00:00
|
|
|
// ExecStreamingExitResult captures the exit code of just completed nomad exec command
|
2019-04-28 21:34:01 +00:00
|
|
|
type ExecStreamingExitResult struct {
|
|
|
|
ExitCode int `json:"exit_code"`
|
|
|
|
}
|
|
|
|
|
2022-08-05 13:30:17 +00:00
|
|
|
// ExecStreamingOutput represents an output streaming entity, e.g. stdout/stderr update or termination
|
2019-04-28 21:34:01 +00:00
|
|
|
//
|
|
|
|
// At most one of these fields should be set: `Stdout`, `Stderr`, or `Result`.
|
|
|
|
// If `Exited` is true, then `Result` is non-nil, and other fields are nil.
|
|
|
|
type ExecStreamingOutput struct {
|
|
|
|
Stdout *ExecStreamingIOOperation `json:"stdout,omitempty"`
|
|
|
|
Stderr *ExecStreamingIOOperation `json:"stderr,omitempty"`
|
|
|
|
|
|
|
|
Exited bool `json:"exited,omitempty"`
|
|
|
|
Result *ExecStreamingExitResult `json:"result,omitempty"`
|
|
|
|
}
|
2022-08-02 14:33:08 +00:00
|
|
|
|
|
|
|
func AllocSuffix(name string) string {
|
|
|
|
idx := strings.LastIndex(name, "[")
|
|
|
|
if idx == -1 {
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
suffix := name[idx:]
|
|
|
|
return suffix
|
|
|
|
}
|