open-nomad/client/alloc_endpoint.go

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

346 lines
9.6 KiB
Go
Raw Permalink Normal View History

// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package client
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"time"
"github.com/armon/go-metrics"
"github.com/hashicorp/go-msgpack/codec"
"github.com/hashicorp/nomad/acl"
cstructs "github.com/hashicorp/nomad/client/structs"
"github.com/hashicorp/nomad/helper/pointer"
"github.com/hashicorp/nomad/helper/uuid"
nstructs "github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/plugins/drivers"
)
// Allocations endpoint is used for interacting with client allocations
type Allocations struct {
c *Client
}
func NewAllocationsEndpoint(c *Client) *Allocations {
a := &Allocations{c: c}
a.c.streamingRpcs.Register("Allocations.Exec", a.exec)
return a
}
// GarbageCollectAll is used to garbage collect all allocations on a client.
2018-02-06 01:20:42 +00:00
func (a *Allocations) GarbageCollectAll(args *nstructs.NodeSpecificRequest, reply *nstructs.GenericResponse) error {
defer metrics.MeasureSince([]string{"client", "allocations", "garbage_collect_all"}, time.Now())
2018-02-13 23:50:51 +00:00
// Check node write permissions
if aclObj, err := a.c.ResolveToken(args.AuthToken); err != nil {
return err
} else if aclObj != nil && !aclObj.AllowNodeWrite() {
return nstructs.ErrPermissionDenied
}
a.c.CollectAllAllocs()
return nil
}
// GarbageCollect is used to garbage collect an allocation on a client.
func (a *Allocations) GarbageCollect(args *nstructs.AllocSpecificRequest, reply *nstructs.GenericResponse) error {
defer metrics.MeasureSince([]string{"client", "allocations", "garbage_collect"}, time.Now())
alloc, err := a.c.GetAlloc(args.AllocID)
if err != nil {
return err
}
// Check namespace submit job permission.
if aclObj, err := a.c.ResolveToken(args.AuthToken); err != nil {
return err
} else if aclObj != nil && !aclObj.AllowNsOp(alloc.Namespace, acl.NamespaceCapabilitySubmitJob) {
return nstructs.ErrPermissionDenied
}
if !a.c.CollectAllocation(args.AllocID) {
return fmt.Errorf("No such allocation on client, or allocation not eligible for GC")
}
return nil
}
// Signal is used to send a signal to an allocation's tasks on a client.
func (a *Allocations) Signal(args *nstructs.AllocSignalRequest, reply *nstructs.GenericResponse) error {
defer metrics.MeasureSince([]string{"client", "allocations", "signal"}, time.Now())
alloc, err := a.c.GetAlloc(args.AllocID)
if err != nil {
return err
}
// Check namespace alloc-lifecycle permission.
if aclObj, err := a.c.ResolveToken(args.AuthToken); err != nil {
return err
} else if aclObj != nil && !aclObj.AllowNsOp(alloc.Namespace, acl.NamespaceCapabilityAllocLifecycle) {
return nstructs.ErrPermissionDenied
}
return a.c.SignalAllocation(args.AllocID, args.Task, args.Signal)
}
// Restart is used to trigger a restart of an allocation or a subtask on a client.
func (a *Allocations) Restart(args *nstructs.AllocRestartRequest, reply *nstructs.GenericResponse) error {
defer metrics.MeasureSince([]string{"client", "allocations", "restart"}, time.Now())
alloc, err := a.c.GetAlloc(args.AllocID)
if err != nil {
return err
}
// Check namespace alloc-lifecycle permission.
if aclObj, err := a.c.ResolveToken(args.AuthToken); err != nil {
return err
} else if aclObj != nil && !aclObj.AllowNsOp(alloc.Namespace, acl.NamespaceCapabilityAllocLifecycle) {
return nstructs.ErrPermissionDenied
}
Task lifecycle restart (#14127) * allocrunner: handle lifecycle when all tasks die When all tasks die the Coordinator must transition to its terminal state, coordinatorStatePoststop, to unblock poststop tasks. Since this could happen at any time (for example, a prestart task dies), all states must be able to transition to this terminal state. * allocrunner: implement different alloc restarts Add a new alloc restart mode where all tasks are restarted, even if they have already exited. Also unifies the alloc restart logic to use the implementation that restarts tasks concurrently and ignores ErrTaskNotRunning errors since those are expected when restarting the allocation. * allocrunner: allow tasks to run again Prevent the task runner Run() method from exiting to allow a dead task to run again. When the task runner is signaled to restart, the function will jump back to the MAIN loop and run it again. The task runner determines if a task needs to run again based on two new task events that were added to differentiate between a request to restart a specific task, the tasks that are currently running, or all tasks that have already run. * api/cli: add support for all tasks alloc restart Implement the new -all-tasks alloc restart CLI flag and its API counterpar, AllTasks. The client endpoint calls the appropriate restart method from the allocrunner depending on the restart parameters used. * test: fix tasklifecycle Coordinator test * allocrunner: kill taskrunners if all tasks are dead When all non-poststop tasks are dead we need to kill the taskrunners so we don't leak their goroutines, which are blocked in the alloc restart loop. This also ensures the allocrunner exits on its own. * taskrunner: fix tests that waited on WaitCh Now that "dead" tasks may run again, the taskrunner Run() method will not return when the task finishes running, so tests must wait for the task state to be "dead" instead of using the WaitCh, since it won't be closed until the taskrunner is killed. * tests: add tests for all tasks alloc restart * changelog: add entry for #14127 * taskrunner: fix restore logic. The first implementation of the task runner restore process relied on server data (`tr.Alloc().TerminalStatus()`) which may not be available to the client at the time of restore. It also had the incorrect code path. When restoring a dead task the driver handle always needs to be clear cleanly using `clearDriverHandle` otherwise, after exiting the MAIN loop, the task may be killed by `tr.handleKill`. The fix is to store the state of the Run() loop in the task runner local client state: if the task runner ever exits this loop cleanly (not with a shutdown) it will never be able to run again. So if the Run() loops starts with this local state flag set, it must exit early. This local state flag is also being checked on task restart requests. If the task is "dead" and its Run() loop is not active it will never be able to run again. * address code review requests * apply more code review changes * taskrunner: add different Restart modes Using the task event to differentiate between the allocrunner restart methods proved to be confusing for developers to understand how it all worked. So instead of relying on the event type, this commit separated the logic of restarting an taskRunner into two methods: - `Restart` will retain the current behaviour and only will only restart the task if it's currently running. - `ForceRestart` is the new method where a `dead` task is allowed to restart if its `Run()` method is still active. Callers will need to restart the allocRunner taskCoordinator to make sure it will allow the task to run again. * minor fixes
2022-08-24 21:43:07 +00:00
return a.c.RestartAllocation(args.AllocID, args.TaskName, args.AllTasks)
}
// Stats is used to collect allocation statistics
func (a *Allocations) Stats(args *cstructs.AllocStatsRequest, reply *cstructs.AllocStatsResponse) error {
defer metrics.MeasureSince([]string{"client", "allocations", "stats"}, time.Now())
alloc, err := a.c.GetAlloc(args.AllocID)
if err != nil {
return err
}
// Check read-job permission.
if aclObj, err := a.c.ResolveToken(args.AuthToken); err != nil {
return err
} else if aclObj != nil && !aclObj.AllowNsOp(alloc.Namespace, acl.NamespaceCapabilityReadJob) {
return nstructs.ErrPermissionDenied
}
clientStats := a.c.StatsReporter()
aStats, err := clientStats.GetAllocStats(args.AllocID)
if err != nil {
return err
}
stats, err := aStats.LatestAllocStats(args.Task)
if err != nil {
return err
}
reply.Stats = stats
return nil
}
// Checks is used to retrieve nomad service discovery check status information.
func (a *Allocations) Checks(args *cstructs.AllocChecksRequest, reply *cstructs.AllocChecksResponse) error {
defer metrics.MeasureSince([]string{"client", "allocations", "checks"}, time.Now())
// Get the allocation
alloc, err := a.c.GetAlloc(args.AllocID)
if err != nil {
return err
}
// Check read-job permission
if aclObj, aclErr := a.c.ResolveToken(args.AuthToken); aclErr != nil {
return aclErr
} else if aclObj != nil && !aclObj.AllowNsOp(alloc.Namespace, acl.NamespaceCapabilityReadJob) {
return nstructs.ErrPermissionDenied
}
// Get the status information for the allocation
reply.Results = a.c.checkStore.List(alloc.ID)
return nil
}
// exec is used to execute command in a running task
func (a *Allocations) exec(conn io.ReadWriteCloser) {
defer metrics.MeasureSince([]string{"client", "allocations", "exec"}, time.Now())
defer conn.Close()
execID := uuid.Generate()
2021-05-13 18:30:31 +00:00
decoder := codec.NewDecoder(conn, nstructs.MsgpackHandle)
encoder := codec.NewEncoder(conn, nstructs.MsgpackHandle)
code, err := a.execImpl(encoder, decoder, execID)
if err != nil {
a.c.logger.Info("task exec session ended with an error", "error", err, "code", code)
handleStreamResultError(err, code, encoder)
return
}
a.c.logger.Info("task exec session ended", "exec_id", execID)
}
func (a *Allocations) execImpl(encoder *codec.Encoder, decoder *codec.Decoder, execID string) (code *int64, err error) {
// Decode the arguments
var req cstructs.AllocExecRequest
if err := decoder.Decode(&req); err != nil {
return pointer.Of(int64(500)), err
}
if a.c.GetConfig().DisableRemoteExec {
2021-05-13 18:30:31 +00:00
return nil, nstructs.ErrPermissionDenied
}
if req.AllocID == "" {
return pointer.Of(int64(400)), allocIDNotPresentErr
}
ar, err := a.c.getAllocRunner(req.AllocID)
if err != nil {
code := pointer.Of(int64(500))
2021-05-13 18:30:31 +00:00
if nstructs.IsErrUnknownAllocation(err) {
code = pointer.Of(int64(404))
}
return code, err
}
alloc := ar.Alloc()
aclObj, ident, err := a.c.resolveTokenAndACL(req.QueryOptions.AuthToken)
{
// log access
logArgs := []any{
"exec_id", execID,
"alloc_id", req.AllocID,
"task", req.Task,
"command", req.Cmd,
"tty", req.Tty,
}
if ident != nil {
if ident.ACLToken != nil {
logArgs = append(logArgs,
"access_token_name", ident.ACLToken.Name,
"access_token_id", ident.ACLToken.AccessorID,
)
} else if ident.Claims != nil {
logArgs = append(logArgs,
"ns", ident.Claims.Namespace,
"job", ident.Claims.JobID,
"alloc", ident.Claims.AllocationID,
"task", ident.Claims.TaskName,
)
}
}
a.c.logger.Info("task exec session starting", logArgs...)
}
// Check alloc-exec permission.
if err != nil {
return nil, err
} else if aclObj != nil && !aclObj.AllowNsOp(alloc.Namespace, acl.NamespaceCapabilityAllocExec) {
2021-05-13 18:30:31 +00:00
return nil, nstructs.ErrPermissionDenied
}
// Validate the arguments
if req.Task == "" {
return pointer.Of(int64(400)), taskNotPresentErr
}
if len(req.Cmd) == 0 {
return pointer.Of(int64(400)), errors.New("command is not present")
}
capabilities, err := ar.GetTaskDriverCapabilities(req.Task)
if err != nil {
code := pointer.Of(int64(500))
2021-05-13 18:30:31 +00:00
if nstructs.IsErrUnknownAllocation(err) {
code = pointer.Of(int64(404))
}
return code, err
}
// check node access
if aclObj != nil && capabilities.FSIsolation == drivers.FSIsolationNone {
exec := aclObj.AllowNsOp(alloc.Namespace, acl.NamespaceCapabilityAllocNodeExec)
if !exec {
2021-05-13 18:30:31 +00:00
return nil, nstructs.ErrPermissionDenied
}
}
allocState, err := a.c.GetAllocState(req.AllocID)
if err != nil {
code := pointer.Of(int64(500))
2021-05-13 18:30:31 +00:00
if nstructs.IsErrUnknownAllocation(err) {
code = pointer.Of(int64(404))
}
return code, err
}
// Check that the task is there
taskState := allocState.TaskStates[req.Task]
if taskState == nil {
return pointer.Of(int64(400)), fmt.Errorf("unknown task name %q", req.Task)
}
if taskState.StartedAt.IsZero() {
return pointer.Of(int64(404)), fmt.Errorf("task %q not started yet.", req.Task)
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
h := ar.GetTaskExecHandler(req.Task)
if h == nil {
return pointer.Of(int64(404)), fmt.Errorf("task %q is not running.", req.Task)
}
err = h(ctx, req.Cmd, req.Tty, newExecStream(decoder, encoder))
if err != nil {
code := pointer.Of(int64(500))
return code, err
}
return nil, nil
}
// newExecStream returns a new exec stream as expected by drivers that interpolate with RPC streaming format
func newExecStream(decoder *codec.Decoder, encoder *codec.Encoder) drivers.ExecTaskStream {
buf := new(bytes.Buffer)
return &execStream{
decoder: decoder,
buf: buf,
encoder: encoder,
frameCodec: codec.NewEncoder(buf, nstructs.JsonHandle),
}
}
type execStream struct {
decoder *codec.Decoder
encoder *codec.Encoder
buf *bytes.Buffer
frameCodec *codec.Encoder
}
// Send sends driver output response across RPC mechanism using cstructs.StreamErrWrapper
func (s *execStream) Send(m *drivers.ExecTaskStreamingResponseMsg) error {
s.buf.Reset()
s.frameCodec.Reset(s.buf)
s.frameCodec.MustEncode(m)
return s.encoder.Encode(cstructs.StreamErrWrapper{
Payload: s.buf.Bytes(),
})
}
// Recv returns next exec user input from the RPC to be passed to driver exec handler
func (s *execStream) Recv() (*drivers.ExecTaskStreamingRequestMsg, error) {
req := drivers.ExecTaskStreamingRequestMsg{}
err := s.decoder.Decode(&req)
return &req, err
}