2015-09-27 19:02:14 +00:00
|
|
|
package command
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
2016-06-06 22:10:22 +00:00
|
|
|
"math"
|
2015-11-17 02:09:00 +00:00
|
|
|
"sort"
|
2016-04-29 20:03:02 +00:00
|
|
|
"strconv"
|
2015-09-27 19:02:14 +00:00
|
|
|
"strings"
|
2015-11-17 02:09:00 +00:00
|
|
|
"time"
|
|
|
|
|
2016-06-06 22:10:22 +00:00
|
|
|
"github.com/dustin/go-humanize"
|
|
|
|
"github.com/mitchellh/colorstring"
|
|
|
|
|
2015-11-17 02:09:00 +00:00
|
|
|
"github.com/hashicorp/nomad/api"
|
2016-03-24 22:43:55 +00:00
|
|
|
"github.com/hashicorp/nomad/client"
|
2015-09-27 19:02:14 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
type AllocStatusCommand struct {
|
|
|
|
Meta
|
2016-06-06 22:10:22 +00:00
|
|
|
color *colorstring.Colorize
|
2015-09-27 19:02:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *AllocStatusCommand) Help() string {
|
|
|
|
helpText := `
|
2015-09-29 02:41:00 +00:00
|
|
|
Usage: nomad alloc-status [options] <allocation>
|
2015-09-27 19:02:14 +00:00
|
|
|
|
2015-11-17 02:09:00 +00:00
|
|
|
Display information about existing allocations and its tasks. This command can
|
|
|
|
be used to inspect the current status of all allocation, including its running
|
|
|
|
status, metadata, and verbose failure messages reported by internal
|
|
|
|
subsystems.
|
2015-09-27 19:02:14 +00:00
|
|
|
|
|
|
|
General Options:
|
|
|
|
|
2015-11-17 02:09:00 +00:00
|
|
|
` + generalOptionsUsage() + `
|
|
|
|
|
2016-05-25 21:11:14 +00:00
|
|
|
Alloc Status Options:
|
2015-11-17 02:09:00 +00:00
|
|
|
|
|
|
|
-short
|
2015-11-17 02:36:13 +00:00
|
|
|
Display short output. Shows only the most recent task event.
|
2016-01-14 20:57:43 +00:00
|
|
|
|
2016-07-30 10:20:43 +00:00
|
|
|
-stats
|
|
|
|
Display detailed resource usage statistics.
|
2016-06-06 22:10:22 +00:00
|
|
|
|
2016-01-15 22:32:38 +00:00
|
|
|
-verbose
|
|
|
|
Show full information.
|
2016-07-30 10:20:43 +00:00
|
|
|
|
2016-08-04 10:19:31 +00:00
|
|
|
-json
|
2016-08-06 09:54:30 +00:00
|
|
|
Output the allocation in its JSON format.
|
2016-07-30 10:20:43 +00:00
|
|
|
|
|
|
|
-t
|
2016-08-06 09:54:30 +00:00
|
|
|
Format and display allocation using a Go template.
|
2015-11-17 02:09:00 +00:00
|
|
|
`
|
|
|
|
|
2015-09-27 19:02:14 +00:00
|
|
|
return strings.TrimSpace(helpText)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *AllocStatusCommand) Synopsis() string {
|
|
|
|
return "Display allocation status information and metadata"
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *AllocStatusCommand) Run(args []string) int {
|
2016-08-04 10:19:31 +00:00
|
|
|
var short, displayStats, verbose, json bool
|
|
|
|
var tmpl string
|
2015-11-17 02:09:00 +00:00
|
|
|
|
2015-09-27 19:02:14 +00:00
|
|
|
flags := c.Meta.FlagSet("alloc-status", FlagSetClient)
|
|
|
|
flags.Usage = func() { c.Ui.Output(c.Help()) }
|
2015-11-17 02:09:00 +00:00
|
|
|
flags.BoolVar(&short, "short", false, "")
|
2016-01-15 22:32:38 +00:00
|
|
|
flags.BoolVar(&verbose, "verbose", false, "")
|
2016-06-06 22:10:22 +00:00
|
|
|
flags.BoolVar(&displayStats, "stats", false, "")
|
2016-08-04 10:19:31 +00:00
|
|
|
flags.BoolVar(&json, "json", false, "")
|
2016-07-30 10:20:43 +00:00
|
|
|
flags.StringVar(&tmpl, "t", "", "")
|
2015-11-17 02:09:00 +00:00
|
|
|
|
2015-09-27 19:02:14 +00:00
|
|
|
if err := flags.Parse(args); err != nil {
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that we got exactly one allocation ID
|
|
|
|
args = flags.Args()
|
|
|
|
|
|
|
|
// Get the HTTP client
|
|
|
|
client, err := c.Meta.Client()
|
|
|
|
if err != nil {
|
|
|
|
c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err))
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
2016-08-06 11:30:12 +00:00
|
|
|
// If args not specified but output format is specified, format and output the allocations data list
|
|
|
|
if len(args) == 0 {
|
|
|
|
var format string
|
2016-08-06 12:38:41 +00:00
|
|
|
if json && len(tmpl) > 0 {
|
|
|
|
c.Ui.Error("Both -json and -t are not allowed")
|
|
|
|
return 1
|
|
|
|
} else if json {
|
2016-08-06 11:30:12 +00:00
|
|
|
format = "json"
|
|
|
|
} else if len(tmpl) > 0 {
|
|
|
|
format = "template"
|
|
|
|
}
|
|
|
|
if len(format) > 0 {
|
|
|
|
allocs, _, err := client.Allocations().List(nil)
|
|
|
|
if err != nil {
|
|
|
|
c.Ui.Error(fmt.Sprintf("Error querying allocations: %v", err))
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
// Return nothing if no allocations found
|
|
|
|
if len(allocs) == 0 {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
f, err := DataFormat(format, tmpl)
|
|
|
|
if err != nil {
|
|
|
|
c.Ui.Error(fmt.Sprintf("Error getting formatter: %s", err))
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
|
|
|
out, err := f.TransformData(allocs)
|
|
|
|
if err != nil {
|
|
|
|
c.Ui.Error(fmt.Sprintf("Error formatting the data: %s", err))
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
c.Ui.Output(out)
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(args) != 1 {
|
|
|
|
c.Ui.Error(c.Help())
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
allocID := args[0]
|
|
|
|
|
2016-01-14 20:57:43 +00:00
|
|
|
// Truncate the id unless full length is requested
|
2016-01-15 22:32:38 +00:00
|
|
|
length := shortId
|
|
|
|
if verbose {
|
|
|
|
length = fullId
|
2016-01-14 20:57:43 +00:00
|
|
|
}
|
|
|
|
|
2015-09-27 19:02:14 +00:00
|
|
|
// Query the allocation info
|
2016-03-17 23:48:45 +00:00
|
|
|
if len(allocID) == 1 {
|
|
|
|
c.Ui.Error(fmt.Sprintf("Identifier must contain at least two characters."))
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
if len(allocID)%2 == 1 {
|
|
|
|
// Identifiers must be of even length, so we strip off the last byte
|
|
|
|
// to provide a consistent user experience.
|
|
|
|
allocID = allocID[:len(allocID)-1]
|
|
|
|
}
|
2016-01-21 21:21:35 +00:00
|
|
|
|
2016-03-17 23:48:45 +00:00
|
|
|
allocs, _, err := client.Allocations().PrefixList(allocID)
|
|
|
|
if err != nil {
|
|
|
|
c.Ui.Error(fmt.Sprintf("Error querying allocation: %v", err))
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
if len(allocs) == 0 {
|
|
|
|
c.Ui.Error(fmt.Sprintf("No allocation(s) with prefix or id %q found", allocID))
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
if len(allocs) > 1 {
|
|
|
|
// Format the allocs
|
|
|
|
out := make([]string, len(allocs)+1)
|
|
|
|
out[0] = "ID|Eval ID|Job ID|Task Group|Desired Status|Client Status"
|
|
|
|
for i, alloc := range allocs {
|
|
|
|
out[i+1] = fmt.Sprintf("%s|%s|%s|%s|%s|%s",
|
|
|
|
limit(alloc.ID, length),
|
|
|
|
limit(alloc.EvalID, length),
|
|
|
|
alloc.JobID,
|
|
|
|
alloc.TaskGroup,
|
|
|
|
alloc.DesiredStatus,
|
|
|
|
alloc.ClientStatus,
|
|
|
|
)
|
2015-12-24 10:46:59 +00:00
|
|
|
}
|
2016-03-17 23:48:45 +00:00
|
|
|
c.Ui.Output(fmt.Sprintf("Prefix matched multiple allocations\n\n%s", formatList(out)))
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
// Prefix lookup matched a single allocation
|
|
|
|
alloc, _, err := client.Allocations().Info(allocs[0].ID, nil)
|
|
|
|
if err != nil {
|
|
|
|
c.Ui.Error(fmt.Sprintf("Error querying allocation: %s", err))
|
|
|
|
return 1
|
2015-09-27 19:02:14 +00:00
|
|
|
}
|
|
|
|
|
2016-07-30 10:20:43 +00:00
|
|
|
// If output format is specified, format and output the data
|
2016-08-04 10:19:31 +00:00
|
|
|
var format string
|
2016-08-06 12:38:41 +00:00
|
|
|
if json && len(tmpl) > 0 {
|
|
|
|
c.Ui.Error("Both -json and -t are not allowed")
|
|
|
|
return 1
|
|
|
|
} else if json {
|
2016-08-04 10:19:31 +00:00
|
|
|
format = "json"
|
|
|
|
} else if len(tmpl) > 0 {
|
|
|
|
format = "template"
|
|
|
|
}
|
|
|
|
if len(format) > 0 {
|
2016-07-30 10:20:43 +00:00
|
|
|
f, err := DataFormat(format, tmpl)
|
|
|
|
if err != nil {
|
|
|
|
c.Ui.Error(fmt.Sprintf("Error getting formatter: %s", err))
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
|
|
|
out, err := f.TransformData(alloc)
|
|
|
|
if err != nil {
|
2016-08-06 09:54:30 +00:00
|
|
|
c.Ui.Error(fmt.Sprintf("Error formatting the data: %s", err))
|
2016-07-30 10:20:43 +00:00
|
|
|
return 1
|
|
|
|
}
|
|
|
|
c.Ui.Output(out)
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2015-09-27 19:02:14 +00:00
|
|
|
// Format the allocation data
|
|
|
|
basic := []string{
|
2016-01-27 18:42:10 +00:00
|
|
|
fmt.Sprintf("ID|%s", limit(alloc.ID, length)),
|
|
|
|
fmt.Sprintf("Eval ID|%s", limit(alloc.EvalID, length)),
|
2015-09-27 19:02:14 +00:00
|
|
|
fmt.Sprintf("Name|%s", alloc.Name),
|
2016-01-27 18:42:10 +00:00
|
|
|
fmt.Sprintf("Node ID|%s", limit(alloc.NodeID, length)),
|
2016-01-21 20:35:56 +00:00
|
|
|
fmt.Sprintf("Job ID|%s", alloc.JobID),
|
|
|
|
fmt.Sprintf("Client Status|%s", alloc.ClientStatus),
|
2016-09-14 20:30:01 +00:00
|
|
|
fmt.Sprintf("Client Description|%s", alloc.ClientDescription),
|
2016-08-19 02:25:32 +00:00
|
|
|
fmt.Sprintf("Created At|%s", formatUnixNanoTime(alloc.CreateTime)),
|
2016-03-20 23:52:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if verbose {
|
|
|
|
basic = append(basic,
|
|
|
|
fmt.Sprintf("Evaluated Nodes|%d", alloc.Metrics.NodesEvaluated),
|
|
|
|
fmt.Sprintf("Filtered Nodes|%d", alloc.Metrics.NodesFiltered),
|
|
|
|
fmt.Sprintf("Exhausted Nodes|%d", alloc.Metrics.NodesExhausted),
|
|
|
|
fmt.Sprintf("Allocation Time|%s", alloc.Metrics.AllocationTime),
|
|
|
|
fmt.Sprintf("Failures|%d", alloc.Metrics.CoalescedFailures))
|
2015-09-27 19:02:14 +00:00
|
|
|
}
|
|
|
|
c.Ui.Output(formatKV(basic))
|
|
|
|
|
2015-11-17 02:09:00 +00:00
|
|
|
if short {
|
|
|
|
c.shortTaskStatus(alloc)
|
|
|
|
} else {
|
2016-08-29 15:43:09 +00:00
|
|
|
var statsErr error
|
|
|
|
var stats *api.AllocResourceUsage
|
|
|
|
stats, statsErr = client.Allocations().Stats(alloc, nil)
|
|
|
|
if statsErr != nil {
|
|
|
|
c.Ui.Output("")
|
2016-10-21 01:05:58 +00:00
|
|
|
if statsErr != api.NodeDownErr {
|
|
|
|
c.Ui.Error(fmt.Sprintf("couldn't retrieve stats (HINT: ensure Client.Advertise.HTTP is set): %v", statsErr))
|
2016-10-25 18:31:09 +00:00
|
|
|
} else {
|
|
|
|
c.Ui.Output("Omitting resource statistics since the node is down.")
|
2016-10-21 01:05:58 +00:00
|
|
|
}
|
2016-08-29 15:43:09 +00:00
|
|
|
}
|
2016-06-12 21:08:47 +00:00
|
|
|
c.outputTaskDetails(alloc, stats, displayStats)
|
2015-11-17 02:09:00 +00:00
|
|
|
}
|
|
|
|
|
2015-09-27 20:59:27 +00:00
|
|
|
// Format the detailed status
|
2016-06-12 21:08:47 +00:00
|
|
|
if verbose {
|
|
|
|
c.Ui.Output(c.Colorize().Color("\n[bold]Placement Metrics[reset]"))
|
|
|
|
c.Ui.Output(formatAllocMetrics(alloc.Metrics, true, " "))
|
Print resource usage w/ alloc-status + node-status
When alloc-status is called, in it's long form only, print the resource
utilization for that single allocation.
When node-status is called, in it's long form only, print the TOTAL
resource utilization that is occurring on that single node.
Nomad Alloc Status:
```
% nomad alloc-status 195d3bf2
ID = 195d3bf2
Eval ID = c917e3ee
Name = example.cache[1]
Node ID = 1b2520a7
Job ID = example
Client Status = running
Evaluated Nodes = 1
Filtered Nodes = 0
Exhausted Nodes = 0
Allocation Time = 17.73µs
Failures = 0
==> Task "redis" is "running"
Recent Events:
Time Type Description
04/03/16 21:20:45 EST Started Task started by client
04/03/16 21:20:42 EST Received Task received by client
==> Status
Allocation "195d3bf2" status "running" (0/1 nodes filtered)
* Score "1b2520a7-6714-e78d-a8f7-68467dda6db7.binpack" = 1.209464
* Score "1b2520a7-6714-e78d-a8f7-68467dda6db7.job-anti-affinity" = -10.000000
==> Resources
CPU MemoryMB DiskMB IOPS
500 256 300 0
```
Nomad Node Status:
```
% nomad node-status 57b3a55a
ID = 57b3a55a
Name = biscuits
Class = <none>
DC = dc1
Drain = false
Status = ready
Attributes = arch:amd64, cpu.frequency:3753.458875, cpu.modelname:Intel(R) Core(TM) i7-4770K CPU @ 3.50GHz, cpu.numcores:8, cpu.totalcompute:30027.671000, driver.docker:1, driver.docker.version:1.10.2, driver.exec:1, driver.raw_exec:1, hostname:biscuits, kernel.name:linux, kernel.version:4.4.0-9-generic, memory.totalbytes:25208934400, os.name:ubuntu, os.version:16.04, unique.cgroup.mountpoint:/sys/fs/cgroup, unique.network.ip-address:127.0.0.1, unique.storage.bytesfree:219781419008, unique.storage.bytestotal:246059892736, unique.storage.volume:/dev/sdb3
==> Allocations
ID Eval ID Job ID Task Group Desired Status Client Status
2c236883 aa11aca8 example cache run running
32f6e3d6 aa11aca8 example cache run running
==> Resource Utilization
CPU MemoryMB DiskMB IOPS
1000 512 600 0
```
2016-03-05 02:29:39 +00:00
|
|
|
}
|
|
|
|
|
2015-09-27 19:02:14 +00:00
|
|
|
return 0
|
|
|
|
}
|
2015-11-17 02:09:00 +00:00
|
|
|
|
2016-06-12 21:08:47 +00:00
|
|
|
// outputTaskDetails prints task details for each task in the allocation,
|
|
|
|
// optionally printing verbose statistics if displayStats is set
|
|
|
|
func (c *AllocStatusCommand) outputTaskDetails(alloc *api.Allocation, stats *api.AllocResourceUsage, displayStats bool) {
|
2015-11-17 02:09:00 +00:00
|
|
|
for task := range c.sortedTaskStateIterator(alloc.TaskStates) {
|
|
|
|
state := alloc.TaskStates[task]
|
2016-06-12 21:08:47 +00:00
|
|
|
c.Ui.Output(c.Colorize().Color(fmt.Sprintf("\n[bold]Task %q is %q[reset]", task, state.State)))
|
|
|
|
c.outputTaskResources(alloc, task, stats, displayStats)
|
|
|
|
c.Ui.Output("")
|
|
|
|
c.outputTaskStatus(state)
|
2015-11-17 02:09:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-12 21:08:47 +00:00
|
|
|
// outputTaskStatus prints out a list of the most recent events for the given
|
|
|
|
// task state.
|
|
|
|
func (c *AllocStatusCommand) outputTaskStatus(state *api.TaskState) {
|
|
|
|
c.Ui.Output("Recent Events:")
|
|
|
|
events := make([]string, len(state.Events)+1)
|
|
|
|
events[0] = "Time|Type|Description"
|
|
|
|
|
|
|
|
size := len(state.Events)
|
|
|
|
for i, event := range state.Events {
|
2016-08-09 02:24:38 +00:00
|
|
|
formatedTime := formatUnixNanoTime(event.Time)
|
2016-06-12 21:08:47 +00:00
|
|
|
|
|
|
|
// Build up the description based on the event type.
|
|
|
|
var desc string
|
|
|
|
switch event.Type {
|
|
|
|
case api.TaskStarted:
|
|
|
|
desc = "Task started by client"
|
|
|
|
case api.TaskReceived:
|
|
|
|
desc = "Task received by client"
|
|
|
|
case api.TaskFailedValidation:
|
|
|
|
if event.ValidationError != "" {
|
|
|
|
desc = event.ValidationError
|
|
|
|
} else {
|
|
|
|
desc = "Validation of task failed"
|
2015-11-17 02:09:00 +00:00
|
|
|
}
|
2016-10-10 21:49:37 +00:00
|
|
|
case api.TaskSetupFailure:
|
|
|
|
if event.SetupError != "" {
|
|
|
|
desc = event.SetupError
|
|
|
|
} else {
|
|
|
|
desc = "Task setup failed"
|
|
|
|
}
|
2016-06-12 21:08:47 +00:00
|
|
|
case api.TaskDriverFailure:
|
|
|
|
if event.DriverError != "" {
|
|
|
|
desc = event.DriverError
|
|
|
|
} else {
|
|
|
|
desc = "Failed to start task"
|
|
|
|
}
|
|
|
|
case api.TaskDownloadingArtifacts:
|
|
|
|
desc = "Client is downloading artifacts"
|
|
|
|
case api.TaskArtifactDownloadFailed:
|
|
|
|
if event.DownloadError != "" {
|
|
|
|
desc = event.DownloadError
|
|
|
|
} else {
|
|
|
|
desc = "Failed to download artifacts"
|
|
|
|
}
|
2016-07-21 22:49:54 +00:00
|
|
|
case api.TaskKilling:
|
2016-10-10 21:49:37 +00:00
|
|
|
if event.KillReason != "" {
|
|
|
|
desc = fmt.Sprintf("Killing task: %v", event.KillReason)
|
|
|
|
} else if event.KillTimeout != 0 {
|
2016-09-13 15:47:11 +00:00
|
|
|
desc = fmt.Sprintf("Sent interrupt. Waiting %v before force killing", event.KillTimeout)
|
2016-07-21 22:49:54 +00:00
|
|
|
} else {
|
2016-09-13 15:47:11 +00:00
|
|
|
desc = "Sent interrupt"
|
2016-07-21 22:49:54 +00:00
|
|
|
}
|
2016-06-12 21:08:47 +00:00
|
|
|
case api.TaskKilled:
|
|
|
|
if event.KillError != "" {
|
|
|
|
desc = event.KillError
|
|
|
|
} else {
|
|
|
|
desc = "Task successfully killed"
|
|
|
|
}
|
|
|
|
case api.TaskTerminated:
|
|
|
|
var parts []string
|
|
|
|
parts = append(parts, fmt.Sprintf("Exit Code: %d", event.ExitCode))
|
2015-11-17 02:09:00 +00:00
|
|
|
|
2016-06-12 21:08:47 +00:00
|
|
|
if event.Signal != 0 {
|
|
|
|
parts = append(parts, fmt.Sprintf("Signal: %d", event.Signal))
|
|
|
|
}
|
2015-11-17 02:09:00 +00:00
|
|
|
|
2016-06-12 21:08:47 +00:00
|
|
|
if event.Message != "" {
|
|
|
|
parts = append(parts, fmt.Sprintf("Exit Message: %q", event.Message))
|
|
|
|
}
|
|
|
|
desc = strings.Join(parts, ", ")
|
|
|
|
case api.TaskRestarting:
|
|
|
|
in := fmt.Sprintf("Task restarting in %v", time.Duration(event.StartDelay))
|
|
|
|
if event.RestartReason != "" && event.RestartReason != client.ReasonWithinPolicy {
|
2016-10-05 22:11:09 +00:00
|
|
|
desc = fmt.Sprintf("%s - %s", event.RestartReason, in)
|
2016-06-12 21:08:47 +00:00
|
|
|
} else {
|
|
|
|
desc = in
|
|
|
|
}
|
|
|
|
case api.TaskNotRestarting:
|
|
|
|
if event.RestartReason != "" {
|
|
|
|
desc = event.RestartReason
|
|
|
|
} else {
|
|
|
|
desc = "Task exceeded restart policy"
|
|
|
|
}
|
2016-09-15 01:27:13 +00:00
|
|
|
case api.TaskVaultRenewalFailed:
|
|
|
|
if event.VaultError != "" {
|
|
|
|
desc = event.VaultError
|
|
|
|
} else {
|
|
|
|
desc = "Task's Vault token failed to be renewed"
|
|
|
|
}
|
|
|
|
case api.TaskSiblingFailed:
|
|
|
|
if event.FailedSibling != "" {
|
|
|
|
desc = fmt.Sprintf("Task's sibling %q failed", event.FailedSibling)
|
|
|
|
} else {
|
|
|
|
desc = "Task's sibling failed"
|
|
|
|
}
|
2016-10-05 20:41:29 +00:00
|
|
|
case api.TaskSignaling:
|
|
|
|
sig := event.TaskSignal
|
|
|
|
reason := event.TaskSignalReason
|
|
|
|
|
|
|
|
if sig == "" && reason == "" {
|
|
|
|
desc = "Task being sent a signal"
|
|
|
|
} else if sig == "" {
|
|
|
|
desc = reason
|
|
|
|
} else if reason == "" {
|
|
|
|
desc = fmt.Sprintf("Task being sent signal %v", sig)
|
|
|
|
} else {
|
|
|
|
desc = fmt.Sprintf("Task being sent signal %v: %v", sig, reason)
|
|
|
|
}
|
2016-10-05 22:11:09 +00:00
|
|
|
case api.TaskRestartSignal:
|
|
|
|
if event.RestartReason != "" {
|
|
|
|
desc = event.RestartReason
|
|
|
|
} else {
|
|
|
|
desc = "Task signaled to restart"
|
|
|
|
}
|
2016-06-12 21:08:47 +00:00
|
|
|
}
|
2015-11-17 02:09:00 +00:00
|
|
|
|
2016-06-12 21:08:47 +00:00
|
|
|
// Reverse order so we are sorted by time
|
|
|
|
events[size-i] = fmt.Sprintf("%s|%s|%s", formatedTime, event.Type, desc)
|
2015-11-17 02:09:00 +00:00
|
|
|
}
|
2016-06-12 21:08:47 +00:00
|
|
|
c.Ui.Output(formatList(events))
|
2015-11-17 02:09:00 +00:00
|
|
|
}
|
Print resource usage w/ alloc-status + node-status
When alloc-status is called, in it's long form only, print the resource
utilization for that single allocation.
When node-status is called, in it's long form only, print the TOTAL
resource utilization that is occurring on that single node.
Nomad Alloc Status:
```
% nomad alloc-status 195d3bf2
ID = 195d3bf2
Eval ID = c917e3ee
Name = example.cache[1]
Node ID = 1b2520a7
Job ID = example
Client Status = running
Evaluated Nodes = 1
Filtered Nodes = 0
Exhausted Nodes = 0
Allocation Time = 17.73µs
Failures = 0
==> Task "redis" is "running"
Recent Events:
Time Type Description
04/03/16 21:20:45 EST Started Task started by client
04/03/16 21:20:42 EST Received Task received by client
==> Status
Allocation "195d3bf2" status "running" (0/1 nodes filtered)
* Score "1b2520a7-6714-e78d-a8f7-68467dda6db7.binpack" = 1.209464
* Score "1b2520a7-6714-e78d-a8f7-68467dda6db7.job-anti-affinity" = -10.000000
==> Resources
CPU MemoryMB DiskMB IOPS
500 256 300 0
```
Nomad Node Status:
```
% nomad node-status 57b3a55a
ID = 57b3a55a
Name = biscuits
Class = <none>
DC = dc1
Drain = false
Status = ready
Attributes = arch:amd64, cpu.frequency:3753.458875, cpu.modelname:Intel(R) Core(TM) i7-4770K CPU @ 3.50GHz, cpu.numcores:8, cpu.totalcompute:30027.671000, driver.docker:1, driver.docker.version:1.10.2, driver.exec:1, driver.raw_exec:1, hostname:biscuits, kernel.name:linux, kernel.version:4.4.0-9-generic, memory.totalbytes:25208934400, os.name:ubuntu, os.version:16.04, unique.cgroup.mountpoint:/sys/fs/cgroup, unique.network.ip-address:127.0.0.1, unique.storage.bytesfree:219781419008, unique.storage.bytestotal:246059892736, unique.storage.volume:/dev/sdb3
==> Allocations
ID Eval ID Job ID Task Group Desired Status Client Status
2c236883 aa11aca8 example cache run running
32f6e3d6 aa11aca8 example cache run running
==> Resource Utilization
CPU MemoryMB DiskMB IOPS
1000 512 600 0
```
2016-03-05 02:29:39 +00:00
|
|
|
|
2016-06-12 21:08:47 +00:00
|
|
|
// outputTaskResources prints the task resources for the passed task and if
|
|
|
|
// displayStats is set, verbose resource usage statistics
|
|
|
|
func (c *AllocStatusCommand) outputTaskResources(alloc *api.Allocation, task string, stats *api.AllocResourceUsage, displayStats bool) {
|
|
|
|
resource, ok := alloc.TaskResources[task]
|
|
|
|
if !ok {
|
2016-03-20 23:52:24 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2016-06-12 21:08:47 +00:00
|
|
|
c.Ui.Output("Task Resources")
|
|
|
|
var addr []string
|
|
|
|
for _, nw := range resource.Networks {
|
|
|
|
ports := append(nw.DynamicPorts, nw.ReservedPorts...)
|
|
|
|
for _, port := range ports {
|
|
|
|
addr = append(addr, fmt.Sprintf("%v: %v:%v\n", port.Label, nw.IP, port.Value))
|
2016-03-11 00:20:51 +00:00
|
|
|
}
|
2016-06-12 21:08:47 +00:00
|
|
|
}
|
|
|
|
var resourcesOutput []string
|
|
|
|
resourcesOutput = append(resourcesOutput, "CPU|Memory|Disk|IOPS|Addresses")
|
|
|
|
firstAddr := ""
|
|
|
|
if len(addr) > 0 {
|
|
|
|
firstAddr = addr[0]
|
|
|
|
}
|
2016-06-12 03:15:50 +00:00
|
|
|
|
2016-06-12 21:08:47 +00:00
|
|
|
// Display the rolled up stats. If possible prefer the live stastics
|
|
|
|
cpuUsage := strconv.Itoa(resource.CPU)
|
2016-06-12 21:20:39 +00:00
|
|
|
memUsage := humanize.IBytes(uint64(resource.MemoryMB * bytesPerMegabyte))
|
2016-10-21 01:05:58 +00:00
|
|
|
if stats != nil {
|
|
|
|
if ru, ok := stats.Tasks[task]; ok && ru != nil && ru.ResourceUsage != nil {
|
|
|
|
if cs := ru.ResourceUsage.CpuStats; cs != nil {
|
|
|
|
cpuUsage = fmt.Sprintf("%v/%v", math.Floor(cs.TotalTicks), resource.CPU)
|
|
|
|
}
|
|
|
|
if ms := ru.ResourceUsage.MemoryStats; ms != nil {
|
|
|
|
memUsage = fmt.Sprintf("%v/%v", humanize.IBytes(ms.RSS), memUsage)
|
|
|
|
}
|
2016-03-11 00:20:51 +00:00
|
|
|
}
|
2016-06-12 21:08:47 +00:00
|
|
|
}
|
2016-07-20 08:48:52 +00:00
|
|
|
resourcesOutput = append(resourcesOutput, fmt.Sprintf("%v MHz|%v|%v|%v|%v",
|
2016-06-12 21:08:47 +00:00
|
|
|
cpuUsage,
|
|
|
|
memUsage,
|
2016-06-12 21:20:39 +00:00
|
|
|
humanize.IBytes(uint64(resource.DiskMB*bytesPerMegabyte)),
|
2016-06-12 21:08:47 +00:00
|
|
|
resource.IOPS,
|
|
|
|
firstAddr))
|
|
|
|
for i := 1; i < len(addr); i++ {
|
|
|
|
resourcesOutput = append(resourcesOutput, fmt.Sprintf("||||%v", addr[i]))
|
|
|
|
}
|
|
|
|
c.Ui.Output(formatListWithSpaces(resourcesOutput))
|
2016-06-06 22:10:22 +00:00
|
|
|
|
2016-10-21 01:05:58 +00:00
|
|
|
if stats != nil {
|
|
|
|
if ru, ok := stats.Tasks[task]; ok && ru != nil && displayStats && ru.ResourceUsage != nil {
|
|
|
|
c.Ui.Output("")
|
|
|
|
c.outputVerboseResourceUsage(task, ru.ResourceUsage)
|
|
|
|
}
|
2016-03-11 00:20:51 +00:00
|
|
|
}
|
|
|
|
}
|
2016-06-06 22:10:22 +00:00
|
|
|
|
2016-06-12 21:08:47 +00:00
|
|
|
// outputVerboseResourceUsage outputs the verbose resource usage for the passed
|
|
|
|
// task
|
|
|
|
func (c *AllocStatusCommand) outputVerboseResourceUsage(task string, resourceUsage *api.ResourceUsage) {
|
2016-06-06 22:10:22 +00:00
|
|
|
memoryStats := resourceUsage.MemoryStats
|
|
|
|
cpuStats := resourceUsage.CpuStats
|
2016-06-10 17:38:29 +00:00
|
|
|
if memoryStats != nil && len(memoryStats.Measured) > 0 {
|
|
|
|
c.Ui.Output("Memory Stats")
|
|
|
|
|
|
|
|
// Sort the measured stats
|
|
|
|
sort.Strings(memoryStats.Measured)
|
|
|
|
|
|
|
|
var measuredStats []string
|
|
|
|
for _, measured := range memoryStats.Measured {
|
|
|
|
switch measured {
|
|
|
|
case "RSS":
|
2016-06-12 21:20:39 +00:00
|
|
|
measuredStats = append(measuredStats, humanize.IBytes(memoryStats.RSS))
|
2016-06-10 17:38:29 +00:00
|
|
|
case "Cache":
|
2016-06-12 21:20:39 +00:00
|
|
|
measuredStats = append(measuredStats, humanize.IBytes(memoryStats.Cache))
|
2016-06-10 17:38:29 +00:00
|
|
|
case "Swap":
|
2016-06-12 21:20:39 +00:00
|
|
|
measuredStats = append(measuredStats, humanize.IBytes(memoryStats.Swap))
|
2016-06-10 17:38:29 +00:00
|
|
|
case "Max Usage":
|
2016-06-12 21:20:39 +00:00
|
|
|
measuredStats = append(measuredStats, humanize.IBytes(memoryStats.MaxUsage))
|
2016-06-10 17:38:29 +00:00
|
|
|
case "Kernel Usage":
|
2016-06-12 21:20:39 +00:00
|
|
|
measuredStats = append(measuredStats, humanize.IBytes(memoryStats.KernelUsage))
|
2016-06-10 17:38:29 +00:00
|
|
|
case "Kernel Max Usage":
|
2016-06-12 21:20:39 +00:00
|
|
|
measuredStats = append(measuredStats, humanize.IBytes(memoryStats.KernelMaxUsage))
|
2016-06-10 17:38:29 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
out := make([]string, 2)
|
|
|
|
out[0] = strings.Join(memoryStats.Measured, "|")
|
|
|
|
out[1] = strings.Join(measuredStats, "|")
|
|
|
|
c.Ui.Output(formatList(out))
|
|
|
|
c.Ui.Output("")
|
|
|
|
}
|
|
|
|
|
|
|
|
if cpuStats != nil && len(cpuStats.Measured) > 0 {
|
|
|
|
c.Ui.Output("CPU Stats")
|
|
|
|
|
|
|
|
// Sort the measured stats
|
|
|
|
sort.Strings(cpuStats.Measured)
|
|
|
|
|
|
|
|
var measuredStats []string
|
|
|
|
for _, measured := range cpuStats.Measured {
|
|
|
|
switch measured {
|
|
|
|
case "Percent":
|
|
|
|
percent := strconv.FormatFloat(cpuStats.Percent, 'f', 2, 64)
|
|
|
|
measuredStats = append(measuredStats, fmt.Sprintf("%v%%", percent))
|
|
|
|
case "Throttled Periods":
|
|
|
|
measuredStats = append(measuredStats, fmt.Sprintf("%v", cpuStats.ThrottledPeriods))
|
|
|
|
case "Throttled Time":
|
|
|
|
measuredStats = append(measuredStats, fmt.Sprintf("%v", cpuStats.ThrottledTime))
|
|
|
|
case "User Mode":
|
|
|
|
percent := strconv.FormatFloat(cpuStats.UserMode, 'f', 2, 64)
|
|
|
|
measuredStats = append(measuredStats, fmt.Sprintf("%v%%", percent))
|
|
|
|
case "System Mode":
|
|
|
|
percent := strconv.FormatFloat(cpuStats.SystemMode, 'f', 2, 64)
|
|
|
|
measuredStats = append(measuredStats, fmt.Sprintf("%v%%", percent))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
out := make([]string, 2)
|
|
|
|
out[0] = strings.Join(cpuStats.Measured, "|")
|
|
|
|
out[1] = strings.Join(measuredStats, "|")
|
|
|
|
c.Ui.Output(formatList(out))
|
|
|
|
}
|
2016-06-06 22:10:22 +00:00
|
|
|
}
|
2016-06-12 21:08:47 +00:00
|
|
|
|
|
|
|
// shortTaskStatus prints out the current state of each task.
|
|
|
|
func (c *AllocStatusCommand) shortTaskStatus(alloc *api.Allocation) {
|
|
|
|
tasks := make([]string, 0, len(alloc.TaskStates)+1)
|
|
|
|
tasks = append(tasks, "Name|State|Last Event|Time")
|
|
|
|
for task := range c.sortedTaskStateIterator(alloc.TaskStates) {
|
|
|
|
state := alloc.TaskStates[task]
|
|
|
|
lastState := state.State
|
|
|
|
var lastEvent, lastTime string
|
|
|
|
|
|
|
|
l := len(state.Events)
|
|
|
|
if l != 0 {
|
|
|
|
last := state.Events[l-1]
|
|
|
|
lastEvent = last.Type
|
2016-08-09 02:24:38 +00:00
|
|
|
lastTime = formatUnixNanoTime(last.Time)
|
2016-06-12 21:08:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
tasks = append(tasks, fmt.Sprintf("%s|%s|%s|%s",
|
|
|
|
task, lastState, lastEvent, lastTime))
|
|
|
|
}
|
|
|
|
|
|
|
|
c.Ui.Output(c.Colorize().Color("\n[bold]Tasks[reset]"))
|
|
|
|
c.Ui.Output(formatList(tasks))
|
|
|
|
}
|
|
|
|
|
|
|
|
// sortedTaskStateIterator is a helper that takes the task state map and returns a
|
|
|
|
// channel that returns the keys in a sorted order.
|
|
|
|
func (c *AllocStatusCommand) sortedTaskStateIterator(m map[string]*api.TaskState) <-chan string {
|
|
|
|
output := make(chan string, len(m))
|
|
|
|
keys := make([]string, len(m))
|
|
|
|
i := 0
|
|
|
|
for k := range m {
|
|
|
|
keys[i] = k
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
sort.Strings(keys)
|
|
|
|
|
|
|
|
for _, key := range keys {
|
|
|
|
output <- key
|
|
|
|
}
|
|
|
|
|
|
|
|
close(output)
|
|
|
|
return output
|
|
|
|
}
|