open-nomad/command/alloc_logs.go

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

437 lines
12 KiB
Go
Raw Normal View History

// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
2016-07-18 18:39:38 +00:00
package command
import (
2019-04-28 21:35:17 +00:00
"errors"
2016-07-18 18:39:38 +00:00
"fmt"
"io"
"os"
"os/signal"
"strings"
"syscall"
"time"
2016-07-18 18:39:38 +00:00
"github.com/hashicorp/nomad/api"
2017-08-22 20:11:32 +00:00
"github.com/hashicorp/nomad/api/contexts"
"github.com/hashicorp/nomad/command/ui"
2017-08-22 20:11:32 +00:00
"github.com/posener/complete"
2016-07-18 18:39:38 +00:00
)
2018-03-21 00:37:28 +00:00
type AllocLogsCommand struct {
2016-07-18 18:39:38 +00:00
Meta
// The fields below represent the commands flags.
verbose, job, tail, stderr, stdout, follow bool
numLines int64
numBytes int64
task string
2016-07-18 18:39:38 +00:00
}
2018-03-21 00:37:28 +00:00
func (l *AllocLogsCommand) Help() string {
2016-07-18 18:39:38 +00:00
helpText := `
2018-03-21 00:37:28 +00:00
Usage: nomad alloc logs [options] <allocation> <task>
2018-03-21 01:28:14 +00:00
Alias: nomad logs
2016-07-18 18:39:38 +00:00
2016-07-20 15:53:59 +00:00
Streams the stdout/stderr of the given allocation and task.
2016-07-18 18:39:38 +00:00
When ACLs are enabled, this command requires a token with the 'read-logs',
'read-job', and 'list-jobs' capabilities for the allocation's namespace.
2016-07-18 18:39:38 +00:00
General Options:
` + generalOptionsUsage(usageOptsDefault) + `
2016-07-18 18:39:38 +00:00
Logs Specific Options:
-stdout
Display stdout logs. This is used as the default value in all commands
except when using the "-f" flag where both stdout and stderr are used as
default.
2016-12-25 01:12:16 +00:00
-stderr
2016-07-25 20:08:39 +00:00
Display stderr logs.
2016-07-18 18:39:38 +00:00
-verbose
Show full information.
-task <task-name>
Sets the task to view the logs. If task name is given with both an argument
and the '-task' option, preference is given to the '-task' option.
2016-07-18 18:39:38 +00:00
-job <job-id>
Use a random allocation from the specified job ID or prefix.
2016-07-18 18:39:38 +00:00
2016-07-20 17:18:05 +00:00
-f
Causes the output to not stop when the end of the logs are reached, but
rather to wait for additional output. When supplied with no other flags
except optionally "-job" and "-task", both stdout and stderr logs will be
followed.
2016-07-20 17:18:05 +00:00
-tail
2016-07-25 20:08:39 +00:00
Show the logs contents with offsets relative to the end of the logs. If no
2016-07-20 15:53:59 +00:00
offset is given, -n is defaulted to 10.
2016-07-18 18:39:38 +00:00
-n
2016-07-20 15:53:59 +00:00
Sets the tail location in best-efforted number of lines relative to the end
2016-07-25 20:08:39 +00:00
of the logs.
2016-07-18 18:39:38 +00:00
-c
2016-07-25 20:08:39 +00:00
Sets the tail location in number of bytes relative to the end of the logs.
Note that the -no-color option applies to Nomad's own output. If the task's
logs include terminal escape sequences for color codes, Nomad will not
remove them.
`
2016-07-18 18:39:38 +00:00
return strings.TrimSpace(helpText)
}
2018-03-21 00:37:28 +00:00
func (l *AllocLogsCommand) Synopsis() string {
2016-07-20 15:53:59 +00:00
return "Streams the logs of a task."
2016-07-18 18:39:38 +00:00
}
func (l *AllocLogsCommand) AutocompleteFlags() complete.Flags {
return mergeAutocompleteFlags(l.Meta.AutocompleteFlags(FlagSetClient),
2017-08-23 21:56:21 +00:00
complete.Flags{
"-stderr": complete.PredictNothing,
"-stdout": complete.PredictNothing,
2017-08-23 21:56:21 +00:00
"-verbose": complete.PredictNothing,
"-task": complete.PredictAnything,
2017-08-23 21:56:21 +00:00
"-job": complete.PredictAnything,
"-f": complete.PredictNothing,
"-tail": complete.PredictAnything,
"-n": complete.PredictAnything,
"-c": complete.PredictAnything,
})
2017-08-22 20:11:32 +00:00
}
2018-03-21 00:37:28 +00:00
func (l *AllocLogsCommand) AutocompleteArgs() complete.Predictor {
2017-08-22 20:11:32 +00:00
return complete.PredictFunc(func(a complete.Args) []string {
client, err := l.Meta.Client()
if err != nil {
return nil
}
resp, _, err := client.Search().PrefixSearch(a.Last, contexts.Allocs, nil)
2017-08-22 20:11:32 +00:00
if err != nil {
return []string{}
}
return resp.Matches[contexts.Allocs]
})
}
func (l *AllocLogsCommand) Name() string { return "alloc logs" }
2018-03-21 00:37:28 +00:00
func (l *AllocLogsCommand) Run(args []string) int {
2016-07-18 18:39:38 +00:00
flags := l.Meta.FlagSet(l.Name(), FlagSetClient)
2016-07-18 18:39:38 +00:00
flags.Usage = func() { l.Ui.Output(l.Help()) }
flags.BoolVar(&l.verbose, "verbose", false, "")
flags.BoolVar(&l.job, "job", false, "")
flags.BoolVar(&l.tail, "tail", false, "")
flags.BoolVar(&l.follow, "f", false, "")
flags.BoolVar(&l.stderr, "stderr", false, "")
flags.BoolVar(&l.stdout, "stdout", false, "")
flags.Int64Var(&l.numLines, "n", -1, "")
flags.Int64Var(&l.numBytes, "c", -1, "")
flags.StringVar(&l.task, "task", "", "")
2016-07-18 18:39:38 +00:00
if err := flags.Parse(args); err != nil {
return 1
}
args = flags.Args()
2016-07-20 22:18:54 +00:00
if numArgs := len(args); numArgs < 1 {
if l.job {
l.Ui.Error("A job ID is required")
2016-07-18 18:39:38 +00:00
} else {
l.Ui.Error("An allocation ID is required")
2016-07-18 18:39:38 +00:00
}
l.Ui.Error(commandErrorText(l))
2016-07-20 22:18:54 +00:00
return 1
} else if numArgs > 2 {
l.Ui.Error("This command takes one or two arguments")
l.Ui.Error(commandErrorText(l))
2016-07-18 18:39:38 +00:00
return 1
}
client, err := l.Meta.Client()
if err != nil {
l.Ui.Error(fmt.Sprintf("Error initializing client: %v", err))
return 1
}
// If -job is specified, use random allocation, otherwise use provided allocation
allocID := args[0]
if l.job {
jobID, ns, err := l.JobIDByPrefix(client, args[0], nil)
if err != nil {
l.Ui.Error(err.Error())
return 1
}
allocID, err = getRandomJobAllocID(client, jobID, ns)
2016-07-18 18:39:38 +00:00
if err != nil {
l.Ui.Error(fmt.Sprintf("Error fetching allocations: %v", err))
return 1
}
}
// Truncate the id unless full length is requested
length := shortId
if l.verbose {
2016-07-18 18:39:38 +00:00
length = fullId
}
// Query the allocation info
if len(allocID) == 1 {
2020-12-09 19:05:18 +00:00
l.Ui.Error("Alloc ID must contain at least two characters.")
2016-07-18 18:39:38 +00:00
return 1
}
2018-03-11 18:52:59 +00:00
allocID = sanitizeUUIDPrefix(allocID)
2016-07-18 18:39:38 +00:00
allocs, _, err := client.Allocations().PrefixList(allocID)
if err != nil {
l.Ui.Error(fmt.Sprintf("Error querying allocation: %v", err))
return 1
}
if len(allocs) == 0 {
l.Ui.Error(fmt.Sprintf("No allocation(s) with prefix or id %q found", allocID))
return 1
}
if len(allocs) > 1 {
// Format the allocs
out := formatAllocListStubs(allocs, l.verbose, length)
l.Ui.Error(fmt.Sprintf("Prefix matched multiple allocations\n\n%s", out))
return 1
2016-07-18 18:39:38 +00:00
}
// Prefix lookup matched a single allocation
q := &api.QueryOptions{Namespace: allocs[0].Namespace}
alloc, _, err := client.Allocations().Info(allocs[0].ID, q)
2016-07-18 18:39:38 +00:00
if err != nil {
l.Ui.Error(fmt.Sprintf("Error querying allocation: %s", err))
return 1
}
// If -task isn't provided fallback to reading the task name
// from args.
if l.task != "" {
err = validateTaskExistsInAllocation(l.task, alloc)
2016-07-20 16:13:48 +00:00
} else {
if len(args) >= 2 {
l.task = args[1]
if l.task == "" {
l.Ui.Error("Task name required")
return 1
}
} else {
l.task, err = lookupAllocTask(alloc)
2016-07-20 16:13:48 +00:00
}
}
if err != nil {
l.Ui.Error(fmt.Sprintf("Failed to validate task: %s", err))
return 1
}
2016-07-20 16:13:48 +00:00
// In order to run the mixed log output, we can only follow the files from
// their current positions. There is no way to interleave previous log
// lines as there is no timestamp references.
if l.follow && !(l.stderr || l.stdout || l.tail || l.numLines > 0 || l.numBytes > 0) {
if err := l.tailMultipleFiles(client, alloc); err != nil {
l.Ui.Error(fmt.Sprintf("Failed to tail stdout and stderr files: %v", err))
return 1
}
} else {
// If we are not strictly following the two files, we cannot support
// specifying both are targets.
if l.stderr && l.stdout {
l.Ui.Error("Unable to support both stdout and stderr")
return 1
}
logType := api.FSLogNameStdout
if l.stderr {
logType = api.FSLogNameStderr
}
if err := l.handleSingleFile(client, alloc, logType); err != nil {
l.Ui.Error(fmt.Sprintf("Failed to read %s file: %v", logType, err))
return 1
}
2016-07-18 18:39:38 +00:00
}
return 0
}
func (l *AllocLogsCommand) handleSingleFile(client *api.Client, alloc *api.Allocation, logType string) error {
2016-07-18 18:39:38 +00:00
// We have a file, output it.
var r io.ReadCloser
var readErr error
if !l.tail {
r, readErr = l.followFile(client, alloc, logType, api.OriginStart, 0)
2016-07-18 18:39:38 +00:00
if readErr != nil {
return fmt.Errorf("error reading file: %v", readErr)
2016-07-18 18:39:38 +00:00
}
} else {
// Parse the offset
var offset = defaultTailLines * bytesToLines
2016-07-18 18:39:38 +00:00
if nLines, nBytes := l.numLines != -1, l.numBytes != -1; nLines && nBytes {
return errors.New("both -n and -c set")
2016-07-18 18:39:38 +00:00
} else if nLines {
offset = l.numLines * bytesToLines
2016-07-18 18:39:38 +00:00
} else if nBytes {
offset = l.numBytes
2016-07-18 18:39:38 +00:00
} else {
l.numLines = defaultTailLines
2016-07-18 18:39:38 +00:00
}
r, readErr = l.followFile(client, alloc, logType, api.OriginEnd, offset)
2016-07-18 18:39:38 +00:00
// If numLines is set, wrap the reader
if l.numLines != -1 {
r = NewLineLimitReader(r, int(l.numLines), int(l.numLines*bytesToLines), 1*time.Second)
2016-07-18 18:39:38 +00:00
}
if readErr != nil {
return fmt.Errorf("error tailing file: %v", readErr)
2016-07-18 18:39:38 +00:00
}
}
defer r.Close()
if _, err := io.Copy(os.Stdout, r); err != nil {
return fmt.Errorf("error following logs: %s", err)
2017-09-19 12:59:05 +00:00
}
return nil
2016-07-18 18:39:38 +00:00
}
// followFile outputs the contents of the file to stdout relative to the end of
2016-07-19 22:58:02 +00:00
// the file.
2018-03-21 00:37:28 +00:00
func (l *AllocLogsCommand) followFile(client *api.Client, alloc *api.Allocation,
logType, origin string, offset int64) (io.ReadCloser, error) {
2016-07-18 18:39:38 +00:00
cancel := make(chan struct{})
frames, errCh := client.AllocFS().Logs(alloc, l.follow, l.task, logType, origin, offset, cancel, nil)
// Setting up the logs stream can fail, therefore we need to check the
// error channel before continuing further.
2017-09-19 12:59:05 +00:00
select {
case err := <-errCh:
2016-07-18 18:39:38 +00:00
return nil, err
2017-09-19 12:59:05 +00:00
default:
2016-07-18 18:39:38 +00:00
}
// Create a reader but don't initially cast it to an io.ReadCloser so that
// we can set the unblock time.
2016-07-18 18:39:38 +00:00
var r io.ReadCloser
2017-09-19 12:59:05 +00:00
frameReader := api.NewFrameReader(frames, errCh, cancel)
frameReader.SetUnblockTime(500 * time.Millisecond)
2016-07-18 18:39:38 +00:00
r = frameReader
signalCh := make(chan os.Signal, 1)
signal.Notify(signalCh, os.Interrupt, syscall.SIGTERM)
// This go routine blocks until the command receives an interrupt or
// terminate signal, at which point we close the ReadCloser.
2016-07-18 18:39:38 +00:00
go func() {
<-signalCh
_ = r.Close()
2016-07-18 18:39:38 +00:00
}()
return r, nil
}
2019-04-28 21:35:17 +00:00
// tailMultipleFiles will follow both stdout and stderr log files of the passed
// allocation. Each stream will be output to the users console via stout and
// stderr until the user cancels it.
func (l *AllocLogsCommand) tailMultipleFiles(client *api.Client, alloc *api.Allocation) error {
// Use a single cancel channel for both log streams, so we only have to
// close one.
cancel := make(chan struct{})
// Ensure the channel is closed in order to notify listeners whenever we
// exit.
defer close(cancel)
stdoutFrames, stdoutErrCh := client.AllocFS().Logs(
alloc, true, l.task, api.FSLogNameStdout, api.OriginEnd, 0, cancel, nil)
// Setting up the logs stream can fail, therefore we need to check the
// error channel before continuing further.
select {
case err := <-stdoutErrCh:
return fmt.Errorf("failed to setup stdout log tailing: %v", err)
default:
}
stderrFrames, stderrErrCh := client.AllocFS().Logs(
alloc, true, l.task, api.FSLogNameStderr, api.OriginEnd, 0, cancel, nil)
// Setting up the logs stream can fail, therefore we need to check the
// error channel before continuing further.
select {
case err := <-stderrErrCh:
return fmt.Errorf("failed to setup stderr log tailing: %v", err)
default:
}
// Trap user signals, so we know when to exit and cancel the log streams
// running in the background.
signalCh := make(chan os.Signal, 1)
signal.Notify(signalCh, os.Interrupt, syscall.SIGTERM)
// Generate our logging UI that doesn't add any additional formatting to
// output strings.
logUI, err := ui.NewLogUI(l.Ui)
if err != nil {
return err
}
// Enter the main loop where we listen for log frames, errors, and a cancel
// signal. Any error at this point will result in the stream being ended,
// therefore should result in this command exiting. Otherwise, we would
// just be printing a single stream, which might be hard to notice for the
// user.
for {
select {
case <-signalCh:
return nil
case stdoutErr := <-stdoutErrCh:
return fmt.Errorf("received an error from stdout log stream: %v", stdoutErr)
case stdoutFrame := <-stdoutFrames:
if stdoutFrame != nil {
logUI.Output(string(stdoutFrame.Data))
}
case stderrErr := <-stderrErrCh:
return fmt.Errorf("received an error from stderr log stream: %v", stderrErr)
case stderrFrame := <-stderrFrames:
if stderrFrame != nil {
logUI.Warn(string(stderrFrame.Data))
}
}
}
}
2019-04-28 21:35:17 +00:00
func lookupAllocTask(alloc *api.Allocation) (string, error) {
tg := alloc.Job.LookupTaskGroup(alloc.TaskGroup)
if tg == nil {
return "", fmt.Errorf("Could not find allocation task group: %s", alloc.TaskGroup)
}
if len(tg.Tasks) == 1 {
return tg.Tasks[0].Name, nil
}
var errStr strings.Builder
fmt.Fprintf(&errStr, "Allocation %q is running the following tasks:\n", limit(alloc.ID, shortId))
for _, t := range tg.Tasks {
fmt.Fprintf(&errStr, " * %s\n", t.Name)
}
fmt.Fprintf(&errStr, "\nPlease specify the task.")
return "", errors.New(errStr.String())
}