open-nomad/command/alloc_status.go

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

895 lines
26 KiB
Go
Raw Normal View History

package command
import (
"fmt"
"math"
2015-11-17 02:09:00 +00:00
"sort"
2016-04-29 20:03:02 +00:00
"strconv"
"strings"
2015-11-17 02:09:00 +00:00
"time"
"github.com/dustin/go-humanize"
"github.com/posener/complete"
2015-11-17 02:09:00 +00:00
"github.com/hashicorp/nomad/api"
2017-08-11 13:33:00 +00:00
"github.com/hashicorp/nomad/api/contexts"
2021-04-02 15:56:27 +00:00
"github.com/hashicorp/nomad/helper"
)
type AllocStatusCommand struct {
Meta
}
func (c *AllocStatusCommand) Help() string {
helpText := `
2018-03-21 00:37:28 +00:00
Usage: nomad alloc status [options] <allocation>
2015-11-17 02:09:00 +00:00
Display information about existing allocations and its tasks. This command can
be used to inspect the current status of an allocation, including its running
2015-11-17 02:09:00 +00:00
status, metadata, and verbose failure messages reported by internal
subsystems.
When ACLs are enabled, this command requires a token with the 'read-job' and
'list-jobs' capabilities for the allocation's namespace.
General Options:
` + generalOptionsUsage(usageOptsDefault) + `
2015-11-17 02:09:00 +00:00
Alloc Status Options:
2015-11-17 02:09:00 +00:00
-short
2015-11-17 02:36:13 +00:00
Display short output. Shows only the most recent task event.
-stats
Display detailed resource usage statistics.
-verbose
Show full information.
-json
2016-08-06 09:54:30 +00:00
Output the allocation in its JSON format.
-t
2016-08-06 09:54:30 +00:00
Format and display allocation using a Go template.
2015-11-17 02:09:00 +00:00
`
return strings.TrimSpace(helpText)
}
func (c *AllocStatusCommand) Synopsis() string {
return "Display allocation status information and metadata"
}
func (c *AllocStatusCommand) AutocompleteFlags() complete.Flags {
2017-08-23 22:52:31 +00:00
return mergeAutocompleteFlags(c.Meta.AutocompleteFlags(FlagSetClient),
complete.Flags{
"-short": complete.PredictNothing,
"-verbose": complete.PredictNothing,
"-json": complete.PredictNothing,
"-t": complete.PredictAnything,
})
}
func (c *AllocStatusCommand) AutocompleteArgs() complete.Predictor {
return complete.PredictFunc(func(a complete.Args) []string {
client, err := c.Meta.Client()
if err != nil {
return nil
}
resp, _, err := client.Search().PrefixSearch(a.Last, contexts.Allocs, nil)
if err != nil {
return []string{}
}
return resp.Matches[contexts.Allocs]
})
}
func (c *AllocStatusCommand) Name() string { return "alloc status" }
func (c *AllocStatusCommand) Run(args []string) int {
var short, displayStats, verbose, json bool
var tmpl string
2015-11-17 02:09:00 +00:00
flags := c.Meta.FlagSet(c.Name(), FlagSetClient)
flags.Usage = func() { c.Ui.Output(c.Help()) }
2015-11-17 02:09:00 +00:00
flags.BoolVar(&short, "short", false, "")
flags.BoolVar(&verbose, "verbose", false, "")
flags.BoolVar(&displayStats, "stats", false, "")
flags.BoolVar(&json, "json", false, "")
flags.StringVar(&tmpl, "t", "", "")
2015-11-17 02:09:00 +00:00
if err := flags.Parse(args); err != nil {
return 1
}
// Check that we got exactly one allocation ID
args = flags.Args()
// Get the HTTP client
client, err := c.Meta.Client()
if err != nil {
c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err))
return 1
}
// If args not specified but output format is specified, format and output the allocations data list
if len(args) == 0 && (json || len(tmpl) > 0) {
2017-07-01 01:10:19 +00:00
allocs, _, err := client.Allocations().List(nil)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error querying allocations: %v", err))
return 1
}
2017-07-01 01:10:19 +00:00
out, err := Format(json, tmpl, allocs)
if err != nil {
c.Ui.Error(err.Error())
return 1
}
2017-07-01 01:10:19 +00:00
c.Ui.Output(out)
return 0
}
if len(args) != 1 {
c.Ui.Error("This command takes one of the following argument conditions:")
c.Ui.Error(" * A single <allocation>")
c.Ui.Error(" * No arguments, with output format specified")
c.Ui.Error(commandErrorText(c))
return 1
}
allocID := args[0]
// Truncate the id unless full length is requested
length := shortId
if verbose {
length = fullId
}
// Query the allocation info
2016-03-17 23:48:45 +00:00
if len(allocID) == 1 {
2020-12-09 19:05:18 +00:00
c.Ui.Error("Identifier must contain at least two characters.")
2016-03-17 23:48:45 +00:00
return 1
}
2018-03-11 18:52:59 +00:00
allocID = sanitizeUUIDPrefix(allocID)
2016-03-17 23:48:45 +00:00
allocs, _, err := client.Allocations().PrefixList(allocID)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error querying allocation: %v", err))
return 1
}
if len(allocs) == 0 {
c.Ui.Error(fmt.Sprintf("No allocation(s) with prefix or id %q found", allocID))
return 1
}
if len(allocs) > 1 {
2017-07-07 04:51:13 +00:00
out := formatAllocListStubs(allocs, verbose, length)
c.Ui.Output(fmt.Sprintf("Prefix matched multiple allocations\n\n%s", out))
2016-03-17 23:48:45 +00:00
return 0
}
// Prefix lookup matched a single allocation
q := &api.QueryOptions{Namespace: allocs[0].Namespace}
alloc, _, err := client.Allocations().Info(allocs[0].ID, q)
2016-03-17 23:48:45 +00:00
if err != nil {
c.Ui.Error(fmt.Sprintf("Error querying allocation: %s", err))
return 1
}
// If output format is specified, format and output the data
2017-07-01 01:10:19 +00:00
if json || len(tmpl) > 0 {
out, err := Format(json, tmpl, alloc)
if err != nil {
2017-07-01 01:10:19 +00:00
c.Ui.Error(err.Error())
return 1
}
c.Ui.Output(out)
return 0
}
// Format the allocation data
if short {
c.Ui.Output(formatAllocShortInfo(alloc, client))
} else {
output, err := formatAllocBasicInfo(alloc, client, length, verbose)
if err != nil {
c.Ui.Error(err.Error())
return 1
}
c.Ui.Output(output)
// add allocation network addresses
if alloc.AllocatedResources != nil && len(alloc.AllocatedResources.Shared.Networks) > 0 && alloc.AllocatedResources.Shared.Networks[0].HasPorts() {
c.Ui.Output("")
c.Ui.Output(formatAllocNetworkInfo(alloc))
}
// add allocation nomad service discovery checks
if checkOutput := formatAllocNomadServiceChecks(alloc.ID, client); checkOutput != "" {
c.Ui.Output("")
c.Ui.Output(checkOutput)
}
}
2015-11-17 02:09:00 +00:00
if short {
c.shortTaskStatus(alloc)
} else {
var statsErr error
var stats *api.AllocResourceUsage
stats, statsErr = client.Allocations().Stats(alloc, nil)
if statsErr != nil {
c.Ui.Output("")
if statsErr != api.NodeDownErr {
c.Ui.Error(fmt.Sprintf("Couldn't retrieve stats: %v", statsErr))
2016-10-25 18:31:09 +00:00
} else {
c.Ui.Output("Omitting resource statistics since the node is down.")
}
}
c.outputTaskDetails(alloc, stats, displayStats, verbose)
2015-11-17 02:09:00 +00:00
}
// Format the detailed status
2016-06-12 21:08:47 +00:00
if verbose {
c.Ui.Output(c.Colorize().Color("\n[bold]Placement Metrics[reset]"))
c.Ui.Output(formatAllocMetrics(alloc.Metrics, true, " "))
Print resource usage w/ alloc-status + node-status When alloc-status is called, in it's long form only, print the resource utilization for that single allocation. When node-status is called, in it's long form only, print the TOTAL resource utilization that is occurring on that single node. Nomad Alloc Status: ``` % nomad alloc-status 195d3bf2 ID = 195d3bf2 Eval ID = c917e3ee Name = example.cache[1] Node ID = 1b2520a7 Job ID = example Client Status = running Evaluated Nodes = 1 Filtered Nodes = 0 Exhausted Nodes = 0 Allocation Time = 17.73µs Failures = 0 ==> Task "redis" is "running" Recent Events: Time Type Description 04/03/16 21:20:45 EST Started Task started by client 04/03/16 21:20:42 EST Received Task received by client ==> Status Allocation "195d3bf2" status "running" (0/1 nodes filtered) * Score "1b2520a7-6714-e78d-a8f7-68467dda6db7.binpack" = 1.209464 * Score "1b2520a7-6714-e78d-a8f7-68467dda6db7.job-anti-affinity" = -10.000000 ==> Resources CPU MemoryMB DiskMB IOPS 500 256 300 0 ``` Nomad Node Status: ``` % nomad node-status 57b3a55a ID = 57b3a55a Name = biscuits Class = <none> DC = dc1 Drain = false Status = ready Attributes = arch:amd64, cpu.frequency:3753.458875, cpu.modelname:Intel(R) Core(TM) i7-4770K CPU @ 3.50GHz, cpu.numcores:8, cpu.totalcompute:30027.671000, driver.docker:1, driver.docker.version:1.10.2, driver.exec:1, driver.raw_exec:1, hostname:biscuits, kernel.name:linux, kernel.version:4.4.0-9-generic, memory.totalbytes:25208934400, os.name:ubuntu, os.version:16.04, unique.cgroup.mountpoint:/sys/fs/cgroup, unique.network.ip-address:127.0.0.1, unique.storage.bytesfree:219781419008, unique.storage.bytestotal:246059892736, unique.storage.volume:/dev/sdb3 ==> Allocations ID Eval ID Job ID Task Group Desired Status Client Status 2c236883 aa11aca8 example cache run running 32f6e3d6 aa11aca8 example cache run running ==> Resource Utilization CPU MemoryMB DiskMB IOPS 1000 512 600 0 ```
2016-03-05 02:29:39 +00:00
}
return 0
}
2015-11-17 02:09:00 +00:00
func formatAllocShortInfo(alloc *api.Allocation, client *api.Client) string {
formattedCreateTime := prettyTimeDiff(time.Unix(0, alloc.CreateTime), time.Now())
formattedModifyTime := prettyTimeDiff(time.Unix(0, alloc.ModifyTime), time.Now())
basic := []string{
fmt.Sprintf("ID|%s", alloc.ID),
fmt.Sprintf("Name|%s", alloc.Name),
fmt.Sprintf("Created|%s", formattedCreateTime),
fmt.Sprintf("Modified|%s", formattedModifyTime),
}
return formatKV(basic)
}
2017-07-07 05:18:44 +00:00
func formatAllocBasicInfo(alloc *api.Allocation, client *api.Client, uuidLength int, verbose bool) (string, error) {
var formattedCreateTime, formattedModifyTime string
if verbose {
formattedCreateTime = formatUnixNanoTime(alloc.CreateTime)
formattedModifyTime = formatUnixNanoTime(alloc.ModifyTime)
} else {
formattedCreateTime = prettyTimeDiff(time.Unix(0, alloc.CreateTime), time.Now())
formattedModifyTime = prettyTimeDiff(time.Unix(0, alloc.ModifyTime), time.Now())
}
2017-07-07 05:18:44 +00:00
basic := []string{
fmt.Sprintf("ID|%s", alloc.ID),
2017-07-07 05:18:44 +00:00
fmt.Sprintf("Eval ID|%s", limit(alloc.EvalID, uuidLength)),
fmt.Sprintf("Name|%s", alloc.Name),
fmt.Sprintf("Node ID|%s", limit(alloc.NodeID, uuidLength)),
fmt.Sprintf("Node Name|%s", alloc.NodeName),
2017-07-07 05:18:44 +00:00
fmt.Sprintf("Job ID|%s", alloc.JobID),
fmt.Sprintf("Job Version|%d", *alloc.Job.Version),
2017-07-07 05:18:44 +00:00
fmt.Sprintf("Client Status|%s", alloc.ClientStatus),
fmt.Sprintf("Client Description|%s", alloc.ClientDescription),
fmt.Sprintf("Desired Status|%s", alloc.DesiredStatus),
fmt.Sprintf("Desired Description|%s", alloc.DesiredDescription),
fmt.Sprintf("Created|%s", formattedCreateTime),
fmt.Sprintf("Modified|%s", formattedModifyTime),
2017-07-07 05:18:44 +00:00
}
if alloc.DeploymentID != "" {
health := "unset"
canary := false
if alloc.DeploymentStatus != nil {
if alloc.DeploymentStatus.Healthy != nil {
if *alloc.DeploymentStatus.Healthy {
health = "healthy"
} else {
health = "unhealthy"
}
2017-07-07 05:18:44 +00:00
}
canary = alloc.DeploymentStatus.Canary
2017-07-07 05:18:44 +00:00
}
basic = append(basic,
fmt.Sprintf("Deployment ID|%s", limit(alloc.DeploymentID, uuidLength)),
fmt.Sprintf("Deployment Health|%s", health))
if canary {
basic = append(basic, fmt.Sprintf("Canary|%v", true))
}
}
if alloc.RescheduleTracker != nil && len(alloc.RescheduleTracker.Events) > 0 {
attempts, total := alloc.RescheduleInfo(time.Unix(0, alloc.ModifyTime))
// Show this section only if the reschedule policy limits the number of attempts
if total > 0 {
reschedInfo := fmt.Sprintf("Reschedule Attempts|%d/%d", attempts, total)
basic = append(basic, reschedInfo)
}
}
if alloc.NextAllocation != "" {
basic = append(basic,
2018-01-29 22:31:25 +00:00
fmt.Sprintf("Replacement Alloc ID|%s", limit(alloc.NextAllocation, uuidLength)))
}
if alloc.FollowupEvalID != "" {
nextEvalTime := futureEvalTimePretty(alloc.FollowupEvalID, client)
if nextEvalTime != "" {
basic = append(basic,
fmt.Sprintf("Reschedule Eligibility|%s", nextEvalTime))
}
}
2017-07-07 05:18:44 +00:00
if verbose {
basic = append(basic,
fmt.Sprintf("Evaluated Nodes|%d", alloc.Metrics.NodesEvaluated),
fmt.Sprintf("Filtered Nodes|%d", alloc.Metrics.NodesFiltered),
fmt.Sprintf("Exhausted Nodes|%d", alloc.Metrics.NodesExhausted),
fmt.Sprintf("Allocation Time|%s", alloc.Metrics.AllocationTime),
fmt.Sprintf("Failures|%d", alloc.Metrics.CoalescedFailures))
}
return formatKV(basic), nil
}
func formatAllocNetworkInfo(alloc *api.Allocation) string {
nw := alloc.AllocatedResources.Shared.Networks[0]
addrs := []string{"Label|Dynamic|Address"}
portFmt := func(label string, value, to int, hostIP, dyn string) string {
s := fmt.Sprintf("%s|%s|%s:%d", label, dyn, hostIP, value)
if to > 0 {
s += fmt.Sprintf(" -> %d", to)
}
return s
}
if len(alloc.AllocatedResources.Shared.Ports) > 0 {
for _, port := range alloc.AllocatedResources.Shared.Ports {
addrs = append(addrs, portFmt("*"+port.Label, port.Value, port.To, port.HostIP, "yes"))
}
} else {
for _, port := range nw.DynamicPorts {
addrs = append(addrs, portFmt(port.Label, port.Value, port.To, nw.IP, "yes"))
}
for _, port := range nw.ReservedPorts {
addrs = append(addrs, portFmt(port.Label, port.Value, port.To, nw.IP, "yes"))
}
}
var mode string
if nw.Mode != "" {
mode = fmt.Sprintf(" (mode = %q)", nw.Mode)
}
return fmt.Sprintf("Allocation Addresses%s:\n%s", mode, formatList(addrs))
}
func formatAllocNomadServiceChecks(allocID string, client *api.Client) string {
statuses, err := client.Allocations().Checks(allocID, nil)
if err != nil {
return ""
} else if len(statuses) == 0 {
return ""
}
results := []string{"Service|Task|Name|Mode|Status"}
for _, status := range statuses {
task := "(group)"
if status.Task != "" {
task = status.Task
}
// check | group | mode | status
s := fmt.Sprintf("%s|%s|%s|%s|%s", status.Service, task, status.Check, status.Mode, status.Status)
results = append(results, s)
}
sort.Strings(results[1:])
return fmt.Sprintf("Nomad Service Checks:\n%s", formatList(results))
}
2018-03-13 23:05:44 +00:00
// futureEvalTimePretty returns when the eval is eligible to reschedule
// relative to current time, based on the WaitUntil field
func futureEvalTimePretty(evalID string, client *api.Client) string {
evaluation, _, err := client.Evaluations().Info(evalID, nil)
// Eval time is not a critical output,
// don't return it on errors, if its not set or already in the past
if err != nil || evaluation.WaitUntil.IsZero() || time.Now().After(evaluation.WaitUntil) {
return ""
}
return prettyTimeDiff(evaluation.WaitUntil, time.Now())
}
2016-06-12 21:08:47 +00:00
// outputTaskDetails prints task details for each task in the allocation,
// optionally printing verbose statistics if displayStats is set
func (c *AllocStatusCommand) outputTaskDetails(alloc *api.Allocation, stats *api.AllocResourceUsage, displayStats bool, verbose bool) {
taskLifecycles := map[string]*api.TaskLifecycle{}
for _, t := range alloc.Job.LookupTaskGroup(alloc.TaskGroup).Tasks {
taskLifecycles[t.Name] = t.Lifecycle
}
for _, task := range c.sortedTaskStateIterator(alloc.TaskStates, taskLifecycles) {
2015-11-17 02:09:00 +00:00
state := alloc.TaskStates[task]
lcIndicator := ""
if lc := taskLifecycles[task]; !lc.Empty() {
lcIndicator = " (" + lifecycleDisplayName(lc) + ")"
}
c.Ui.Output(c.Colorize().Color(fmt.Sprintf("\n[bold]Task %q%v is %q[reset]", task, lcIndicator, state.State)))
2016-06-12 21:08:47 +00:00
c.outputTaskResources(alloc, task, stats, displayStats)
c.Ui.Output("")
c.outputTaskVolumes(alloc, task, verbose)
2016-06-12 21:08:47 +00:00
c.outputTaskStatus(state)
2015-11-17 02:09:00 +00:00
}
}
2017-07-07 05:18:44 +00:00
func formatTaskTimes(t time.Time) string {
if t.IsZero() {
return "N/A"
}
return formatTime(t)
}
2016-06-12 21:08:47 +00:00
// outputTaskStatus prints out a list of the most recent events for the given
// task state.
func (c *AllocStatusCommand) outputTaskStatus(state *api.TaskState) {
2017-07-07 05:18:44 +00:00
basic := []string{
fmt.Sprintf("Started At|%s", formatTaskTimes(state.StartedAt)),
fmt.Sprintf("Finished At|%s", formatTaskTimes(state.FinishedAt)),
fmt.Sprintf("Total Restarts|%d", state.Restarts),
fmt.Sprintf("Last Restart|%s", formatTaskTimes(state.LastRestart))}
2017-07-07 05:18:44 +00:00
c.Ui.Output("Task Events:")
c.Ui.Output(formatKV(basic))
c.Ui.Output("")
2016-06-12 21:08:47 +00:00
c.Ui.Output("Recent Events:")
events := make([]string, len(state.Events)+1)
events[0] = "Time|Type|Description"
size := len(state.Events)
for i, event := range state.Events {
msg := event.DisplayMessage
if msg == "" {
msg = buildDisplayMessage(event)
}
formattedTime := formatUnixNanoTime(event.Time)
events[size-i] = fmt.Sprintf("%s|%s|%s", formattedTime, event.Type, msg)
// Reverse order so we are sorted by time
}
c.Ui.Output(formatList(events))
}
2015-11-17 02:09:00 +00:00
func buildDisplayMessage(event *api.TaskEvent) string {
// Build up the description based on the event type.
var desc string
switch event.Type {
case api.TaskSetup:
desc = event.Message
case api.TaskStarted:
desc = "Task started by client"
case api.TaskReceived:
desc = "Task received by client"
case api.TaskFailedValidation:
if event.ValidationError != "" {
desc = event.ValidationError
} else {
desc = "Validation of task failed"
}
case api.TaskSetupFailure:
if event.SetupError != "" {
desc = event.SetupError
} else {
desc = "Task setup failed"
}
case api.TaskDriverFailure:
if event.DriverError != "" {
desc = event.DriverError
} else {
desc = "Failed to start task"
}
case api.TaskDownloadingArtifacts:
desc = "Client is downloading artifacts"
case api.TaskArtifactDownloadFailed:
if event.DownloadError != "" {
desc = event.DownloadError
} else {
desc = "Failed to download artifacts"
}
case api.TaskKilling:
if event.KillReason != "" {
desc = fmt.Sprintf("Killing task: %v", event.KillReason)
} else if event.KillTimeout != 0 {
desc = fmt.Sprintf("Sent interrupt. Waiting %v before force killing", event.KillTimeout)
} else {
desc = "Sent interrupt"
}
case api.TaskKilled:
if event.KillError != "" {
desc = event.KillError
} else {
desc = "Task successfully killed"
}
case api.TaskTerminated:
var parts []string
parts = append(parts, fmt.Sprintf("Exit Code: %d", event.ExitCode))
2015-11-17 02:09:00 +00:00
if event.Signal != 0 {
parts = append(parts, fmt.Sprintf("Signal: %d", event.Signal))
2016-06-12 21:08:47 +00:00
}
2015-11-17 02:09:00 +00:00
if event.Message != "" {
parts = append(parts, fmt.Sprintf("Exit Message: %q", event.Message))
}
desc = strings.Join(parts, ", ")
case api.TaskRestarting:
in := fmt.Sprintf("Task restarting in %v", time.Duration(event.StartDelay))
if event.RestartReason != "" && event.RestartReason != api.AllocRestartReasonWithinPolicy {
desc = fmt.Sprintf("%s - %s", event.RestartReason, in)
} else {
desc = in
}
case api.TaskNotRestarting:
if event.RestartReason != "" {
desc = event.RestartReason
} else {
desc = "Task exceeded restart policy"
}
case api.TaskSiblingFailed:
if event.FailedSibling != "" {
desc = fmt.Sprintf("Task's sibling %q failed", event.FailedSibling)
} else {
desc = "Task's sibling failed"
}
case api.TaskSignaling:
sig := event.TaskSignal
reason := event.TaskSignalReason
if sig == "" && reason == "" {
desc = "Task being sent a signal"
} else if sig == "" {
desc = reason
} else if reason == "" {
desc = fmt.Sprintf("Task being sent signal %v", sig)
} else {
desc = fmt.Sprintf("Task being sent signal %v: %v", sig, reason)
}
case api.TaskRestartSignal:
if event.RestartReason != "" {
desc = event.RestartReason
} else {
desc = "Task signaled to restart"
}
case api.TaskDriverMessage:
desc = event.DriverMessage
case api.TaskLeaderDead:
desc = "Leader Task in Group dead"
case api.TaskClientReconnected:
desc = "Client reconnected"
default:
desc = event.Message
2015-11-17 02:09:00 +00:00
}
return desc
2015-11-17 02:09:00 +00:00
}
Print resource usage w/ alloc-status + node-status When alloc-status is called, in it's long form only, print the resource utilization for that single allocation. When node-status is called, in it's long form only, print the TOTAL resource utilization that is occurring on that single node. Nomad Alloc Status: ``` % nomad alloc-status 195d3bf2 ID = 195d3bf2 Eval ID = c917e3ee Name = example.cache[1] Node ID = 1b2520a7 Job ID = example Client Status = running Evaluated Nodes = 1 Filtered Nodes = 0 Exhausted Nodes = 0 Allocation Time = 17.73µs Failures = 0 ==> Task "redis" is "running" Recent Events: Time Type Description 04/03/16 21:20:45 EST Started Task started by client 04/03/16 21:20:42 EST Received Task received by client ==> Status Allocation "195d3bf2" status "running" (0/1 nodes filtered) * Score "1b2520a7-6714-e78d-a8f7-68467dda6db7.binpack" = 1.209464 * Score "1b2520a7-6714-e78d-a8f7-68467dda6db7.job-anti-affinity" = -10.000000 ==> Resources CPU MemoryMB DiskMB IOPS 500 256 300 0 ``` Nomad Node Status: ``` % nomad node-status 57b3a55a ID = 57b3a55a Name = biscuits Class = <none> DC = dc1 Drain = false Status = ready Attributes = arch:amd64, cpu.frequency:3753.458875, cpu.modelname:Intel(R) Core(TM) i7-4770K CPU @ 3.50GHz, cpu.numcores:8, cpu.totalcompute:30027.671000, driver.docker:1, driver.docker.version:1.10.2, driver.exec:1, driver.raw_exec:1, hostname:biscuits, kernel.name:linux, kernel.version:4.4.0-9-generic, memory.totalbytes:25208934400, os.name:ubuntu, os.version:16.04, unique.cgroup.mountpoint:/sys/fs/cgroup, unique.network.ip-address:127.0.0.1, unique.storage.bytesfree:219781419008, unique.storage.bytestotal:246059892736, unique.storage.volume:/dev/sdb3 ==> Allocations ID Eval ID Job ID Task Group Desired Status Client Status 2c236883 aa11aca8 example cache run running 32f6e3d6 aa11aca8 example cache run running ==> Resource Utilization CPU MemoryMB DiskMB IOPS 1000 512 600 0 ```
2016-03-05 02:29:39 +00:00
2016-06-12 21:08:47 +00:00
// outputTaskResources prints the task resources for the passed task and if
// displayStats is set, verbose resource usage statistics
func (c *AllocStatusCommand) outputTaskResources(alloc *api.Allocation, task string, stats *api.AllocResourceUsage, displayStats bool) {
resource, ok := alloc.TaskResources[task]
if !ok {
2016-03-20 23:52:24 +00:00
return
}
c.Ui.Output("Task Resources:")
2016-06-12 21:08:47 +00:00
var addr []string
for _, nw := range resource.Networks {
ports := append(nw.DynamicPorts, nw.ReservedPorts...) //nolint:gocritic
2016-06-12 21:08:47 +00:00
for _, port := range ports {
addr = append(addr, fmt.Sprintf("%v: %v:%v\n", port.Label, nw.IP, port.Value))
2016-03-11 00:20:51 +00:00
}
2016-06-12 21:08:47 +00:00
}
2016-06-12 21:08:47 +00:00
var resourcesOutput []string
cpuHeader := "CPU"
if resource.Cores != nil && *resource.Cores > 0 {
cpuHeader = fmt.Sprintf("CPU (%v cores)", *resource.Cores)
}
resourcesOutput = append(resourcesOutput, fmt.Sprintf("%s|Memory|Disk|Addresses", cpuHeader))
2016-06-12 21:08:47 +00:00
firstAddr := ""
secondAddr := ""
2016-06-12 21:08:47 +00:00
if len(addr) > 0 {
firstAddr = addr[0]
}
if len(addr) > 1 {
secondAddr = addr[1]
}
// Display the rolled up stats. If possible prefer the live statistics
2017-02-06 19:48:28 +00:00
cpuUsage := strconv.Itoa(*resource.CPU)
memUsage := humanize.IBytes(uint64(*resource.MemoryMB * bytesPerMegabyte))
memMax := ""
if max := resource.MemoryMaxMB; max != nil && *max != 0 && *max != *resource.MemoryMB {
memMax = "Max: " + humanize.IBytes(uint64(*resource.MemoryMaxMB*bytesPerMegabyte))
}
var deviceStats []*api.DeviceGroupStats
if stats != nil {
if ru, ok := stats.Tasks[task]; ok && ru != nil && ru.ResourceUsage != nil {
if cs := ru.ResourceUsage.CpuStats; cs != nil {
2017-02-25 21:36:23 +00:00
cpuUsage = fmt.Sprintf("%v/%v", math.Floor(cs.TotalTicks), cpuUsage)
}
if ms := ru.ResourceUsage.MemoryStats; ms != nil {
// Nomad uses RSS as the top-level metric to report, for historical reasons,
// but it's not always measured (e.g. with cgroup-v2)
usage := ms.RSS
2021-04-02 15:56:27 +00:00
if usage == 0 && !helper.SliceStringContains(ms.Measured, "RSS") {
usage = ms.Usage
}
memUsage = fmt.Sprintf("%v/%v", humanize.IBytes(usage), memUsage)
}
deviceStats = ru.ResourceUsage.DeviceStats
2016-03-11 00:20:51 +00:00
}
2016-06-12 21:08:47 +00:00
}
resourcesOutput = append(resourcesOutput, fmt.Sprintf("%v MHz|%v|%v|%v",
2016-06-12 21:08:47 +00:00
cpuUsage,
memUsage,
humanize.IBytes(uint64(*alloc.Resources.DiskMB*bytesPerMegabyte)),
2016-06-12 21:08:47 +00:00
firstAddr))
if memMax != "" || secondAddr != "" {
resourcesOutput = append(resourcesOutput, fmt.Sprintf("|%v||%v", memMax, secondAddr))
}
for i := 2; i < len(addr); i++ {
resourcesOutput = append(resourcesOutput, fmt.Sprintf("|||%v", addr[i]))
2016-06-12 21:08:47 +00:00
}
c.Ui.Output(formatListWithSpaces(resourcesOutput))
if len(deviceStats) > 0 {
c.Ui.Output("")
c.Ui.Output("Device Stats")
c.Ui.Output(formatList(getDeviceResources(deviceStats)))
}
if stats != nil {
if ru, ok := stats.Tasks[task]; ok && ru != nil && displayStats && ru.ResourceUsage != nil {
c.Ui.Output("")
c.outputVerboseResourceUsage(task, ru.ResourceUsage)
}
2016-03-11 00:20:51 +00:00
}
}
2016-06-12 21:08:47 +00:00
// outputVerboseResourceUsage outputs the verbose resource usage for the passed
// task
func (c *AllocStatusCommand) outputVerboseResourceUsage(task string, resourceUsage *api.ResourceUsage) {
memoryStats := resourceUsage.MemoryStats
cpuStats := resourceUsage.CpuStats
deviceStats := resourceUsage.DeviceStats
if memoryStats != nil && len(memoryStats.Measured) > 0 {
c.Ui.Output("Memory Stats")
// Sort the measured stats
sort.Strings(memoryStats.Measured)
var measuredStats []string
for _, measured := range memoryStats.Measured {
switch measured {
case "RSS":
2016-06-12 21:20:39 +00:00
measuredStats = append(measuredStats, humanize.IBytes(memoryStats.RSS))
case "Cache":
2016-06-12 21:20:39 +00:00
measuredStats = append(measuredStats, humanize.IBytes(memoryStats.Cache))
case "Swap":
2016-06-12 21:20:39 +00:00
measuredStats = append(measuredStats, humanize.IBytes(memoryStats.Swap))
case "Usage":
measuredStats = append(measuredStats, humanize.IBytes(memoryStats.Usage))
case "Max Usage":
2016-06-12 21:20:39 +00:00
measuredStats = append(measuredStats, humanize.IBytes(memoryStats.MaxUsage))
case "Kernel Usage":
2016-06-12 21:20:39 +00:00
measuredStats = append(measuredStats, humanize.IBytes(memoryStats.KernelUsage))
case "Kernel Max Usage":
2016-06-12 21:20:39 +00:00
measuredStats = append(measuredStats, humanize.IBytes(memoryStats.KernelMaxUsage))
}
}
out := make([]string, 2)
out[0] = strings.Join(memoryStats.Measured, "|")
out[1] = strings.Join(measuredStats, "|")
c.Ui.Output(formatList(out))
c.Ui.Output("")
}
if cpuStats != nil && len(cpuStats.Measured) > 0 {
c.Ui.Output("CPU Stats")
// Sort the measured stats
sort.Strings(cpuStats.Measured)
var measuredStats []string
for _, measured := range cpuStats.Measured {
switch measured {
case "Percent":
percent := strconv.FormatFloat(cpuStats.Percent, 'f', 2, 64)
measuredStats = append(measuredStats, fmt.Sprintf("%v%%", percent))
case "Throttled Periods":
measuredStats = append(measuredStats, fmt.Sprintf("%v", cpuStats.ThrottledPeriods))
case "Throttled Time":
measuredStats = append(measuredStats, fmt.Sprintf("%v", cpuStats.ThrottledTime))
case "User Mode":
percent := strconv.FormatFloat(cpuStats.UserMode, 'f', 2, 64)
measuredStats = append(measuredStats, fmt.Sprintf("%v%%", percent))
case "System Mode":
percent := strconv.FormatFloat(cpuStats.SystemMode, 'f', 2, 64)
measuredStats = append(measuredStats, fmt.Sprintf("%v%%", percent))
}
}
out := make([]string, 2)
out[0] = strings.Join(cpuStats.Measured, "|")
out[1] = strings.Join(measuredStats, "|")
c.Ui.Output(formatList(out))
}
if len(deviceStats) > 0 {
c.Ui.Output("")
c.Ui.Output("Device Stats")
printDeviceStats(c.Ui, deviceStats)
}
}
2016-06-12 21:08:47 +00:00
// shortTaskStatus prints out the current state of each task.
func (c *AllocStatusCommand) shortTaskStatus(alloc *api.Allocation) {
tasks := make([]string, 0, len(alloc.TaskStates)+1)
tasks = append(tasks, "Name|State|Last Event|Time|Lifecycle")
2019-12-16 19:36:08 +00:00
taskLifecycles := map[string]*api.TaskLifecycle{}
2019-12-16 19:36:08 +00:00
for _, t := range alloc.Job.LookupTaskGroup(alloc.TaskGroup).Tasks {
taskLifecycles[t.Name] = t.Lifecycle
2019-12-16 19:36:08 +00:00
}
for _, task := range c.sortedTaskStateIterator(alloc.TaskStates, taskLifecycles) {
2016-06-12 21:08:47 +00:00
state := alloc.TaskStates[task]
lastState := state.State
var lastEvent, lastTime string
l := len(state.Events)
if l != 0 {
last := state.Events[l-1]
lastEvent = last.Type
lastTime = formatUnixNanoTime(last.Time)
2016-06-12 21:08:47 +00:00
}
tasks = append(tasks, fmt.Sprintf("%s|%s|%s|%s|%s",
task, lastState, lastEvent, lastTime, lifecycleDisplayName(taskLifecycles[task])))
2016-06-12 21:08:47 +00:00
}
c.Ui.Output(c.Colorize().Color("\n[bold]Tasks[reset]"))
c.Ui.Output(formatList(tasks))
}
// sortedTaskStateIterator is a helper that takes the task state map and returns a
// channel that returns the keys in a sorted order.
func (c *AllocStatusCommand) sortedTaskStateIterator(m map[string]*api.TaskState, lifecycles map[string]*api.TaskLifecycle) []string {
2016-06-12 21:08:47 +00:00
keys := make([]string, len(m))
i := 0
for k := range m {
keys[i] = k
i++
}
sort.Strings(keys)
// display prestart then prestart sidecar then main
sort.SliceStable(keys, func(i, j int) bool {
lci := lifecycles[keys[i]]
lcj := lifecycles[keys[j]]
switch {
case lci == nil:
return false
case lcj == nil:
return true
case !lci.Sidecar && lcj.Sidecar:
return true
default:
return false
}
})
return keys
}
func lifecycleDisplayName(l *api.TaskLifecycle) string {
if l.Empty() {
return "main"
2016-06-12 21:08:47 +00:00
}
sidecar := ""
if l.Sidecar {
sidecar = " sidecar"
}
return l.Hook + sidecar
2016-06-12 21:08:47 +00:00
}
func (c *AllocStatusCommand) outputTaskVolumes(alloc *api.Allocation, taskName string, verbose bool) {
var task *api.Task
var tg *api.TaskGroup
FOUND:
for _, tg = range alloc.Job.TaskGroups {
for _, task = range tg.Tasks {
if task.Name == taskName {
break FOUND
}
}
}
if task == nil || tg == nil {
c.Ui.Error(fmt.Sprintf("Could not find task data for %q", taskName))
return
}
if len(task.VolumeMounts) == 0 {
return
}
client, err := c.Meta.Client()
if err != nil {
c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err))
return
}
var hostVolumesOutput []string
var csiVolumesOutput []string
hostVolumesOutput = append(hostVolumesOutput, "ID|Read Only")
if verbose {
csiVolumesOutput = append(csiVolumesOutput,
"Name|ID|Plugin|Provider|Schedulable|Read Only|Mount Options")
} else {
csiVolumesOutput = append(csiVolumesOutput, "ID|Read Only")
}
for _, volMount := range task.VolumeMounts {
volReq := tg.Volumes[*volMount.Volume]
switch volReq.Type {
case api.CSIVolumeTypeHost:
hostVolumesOutput = append(hostVolumesOutput,
fmt.Sprintf("%s|%v", volReq.Name, *volMount.ReadOnly))
case api.CSIVolumeTypeCSI:
if verbose {
source := volReq.Source
if volReq.PerAlloc {
source = source + api.AllocSuffix(alloc.Name)
}
// there's an extra API call per volume here so we toggle it
// off with the -verbose flag
vol, _, err := client.CSIVolumes().Info(source, nil)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error retrieving volume info for %q: %s",
volReq.Name, err))
continue
}
csiVolumesOutput = append(csiVolumesOutput,
fmt.Sprintf("%s|%s|%s|%s|%v|%v|%s",
volReq.Name,
vol.ID,
vol.PluginID,
vol.Provider,
vol.Schedulable,
volReq.ReadOnly,
csi: add mount_options to volumes and volume requests (#7398) Add mount_options to both the volume definition on registration and to the volume block in the group where the volume is requested. If both are specified, the options provided in the request replace the options defined in the volume. They get passed to the NodePublishVolume, which causes the node plugin to actually mount the volume on the host. Individual tasks just mount bind into the host mounted volume (unchanged behavior). An operator can mount the same volume with different options by specifying it twice in the group context. closes #7007 * nomad/structs/volumes: add MountOptions to volume request * jobspec/test-fixtures/basic.hcl: add mount_options to volume block * jobspec/parse_test: add expected MountOptions * api/tasks: add mount_options * jobspec/parse_group: use hcl decode not mapstructure, mount_options * client/allocrunner/csi_hook: pass MountOptions through client/allocrunner/csi_hook: add a VolumeMountOptions client/allocrunner/csi_hook: drop Options client/allocrunner/csi_hook: use the structs options * client/pluginmanager/csimanager/interface: UsageOptions.MountOptions * client/pluginmanager/csimanager/volume: pass MountOptions in capabilities * plugins/csi/plugin: remove todo 7007 comment * nomad/structs/csi: MountOptions * api/csi: add options to the api for parsing, match structs * plugins/csi/plugin: move VolumeMountOptions to structs * api/csi: use specific type for mount_options * client/allocrunner/csi_hook: merge MountOptions here * rename CSIOptions to CSIMountOptions * client/allocrunner/csi_hook * client/pluginmanager/csimanager/volume * nomad/structs/csi * plugins/csi/fake/client: add PrevVolumeCapability * plugins/csi/plugin * client/pluginmanager/csimanager/volume_test: remove debugging * client/pluginmanager/csimanager/volume: fix odd merging logic * api: rename CSIOptions -> CSIMountOptions * nomad/csi_endpoint: remove a 7007 comment * command/alloc_status: show mount options in the volume list * nomad/structs/csi: include MountOptions in the volume stub * api/csi: add MountOptions to stub * command/volume_status_csi: clean up csiVolMountOption, add it * command/alloc_status: csiVolMountOption lives in volume_csi_status * command/node_status: display mount flags * nomad/structs/volumes: npe * plugins/csi/plugin: npe in ToCSIRepresentation * jobspec/parse_test: expand volume parse test cases * command/agent/job_endpoint: ApiTgToStructsTG needs MountOptions * command/volume_status_csi: copy paste error * jobspec/test-fixtures/basic: hclfmt * command/volume_status_csi: clean up csiVolMountOption
2020-03-23 17:55:26 +00:00
csiVolMountOption(vol.MountOptions, volReq.MountOptions),
))
} else {
csiVolumesOutput = append(csiVolumesOutput,
fmt.Sprintf("%s|%v", volReq.Name, volReq.ReadOnly))
}
}
}
if len(hostVolumesOutput) > 1 {
c.Ui.Output("Host Volumes:")
c.Ui.Output(formatList(hostVolumesOutput))
c.Ui.Output("") // line padding to next stanza
}
if len(csiVolumesOutput) > 1 {
c.Ui.Output("CSI Volumes:")
c.Ui.Output(formatList(csiVolumesOutput))
c.Ui.Output("") // line padding to next stanza
}
}