2023-04-10 15:36:59 +00:00
|
|
|
// Copyright (c) HashiCorp, Inc.
|
|
|
|
// SPDX-License-Identifier: MPL-2.0
|
|
|
|
|
2015-09-27 19:02:14 +00:00
|
|
|
package command
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
2016-06-06 22:10:22 +00:00
|
|
|
"math"
|
2015-11-17 02:09:00 +00:00
|
|
|
"sort"
|
2016-04-29 20:03:02 +00:00
|
|
|
"strconv"
|
2015-09-27 19:02:14 +00:00
|
|
|
"strings"
|
2015-11-17 02:09:00 +00:00
|
|
|
"time"
|
|
|
|
|
2022-08-17 20:22:26 +00:00
|
|
|
"github.com/dustin/go-humanize"
|
2021-05-04 17:58:07 +00:00
|
|
|
"github.com/posener/complete"
|
2022-09-21 19:53:25 +00:00
|
|
|
"golang.org/x/exp/slices"
|
2021-05-04 17:58:07 +00:00
|
|
|
|
2015-11-17 02:09:00 +00:00
|
|
|
"github.com/hashicorp/nomad/api"
|
2017-08-11 13:33:00 +00:00
|
|
|
"github.com/hashicorp/nomad/api/contexts"
|
2015-09-27 19:02:14 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
type AllocStatusCommand struct {
|
|
|
|
Meta
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *AllocStatusCommand) Help() string {
|
|
|
|
helpText := `
|
2018-03-21 00:37:28 +00:00
|
|
|
Usage: nomad alloc status [options] <allocation>
|
2015-09-27 19:02:14 +00:00
|
|
|
|
2015-11-17 02:09:00 +00:00
|
|
|
Display information about existing allocations and its tasks. This command can
|
2016-12-15 17:31:29 +00:00
|
|
|
be used to inspect the current status of an allocation, including its running
|
2015-11-17 02:09:00 +00:00
|
|
|
status, metadata, and verbose failure messages reported by internal
|
|
|
|
subsystems.
|
2015-09-27 19:02:14 +00:00
|
|
|
|
2020-11-19 21:38:08 +00:00
|
|
|
When ACLs are enabled, this command requires a token with the 'read-job' and
|
|
|
|
'list-jobs' capabilities for the allocation's namespace.
|
|
|
|
|
2015-09-27 19:02:14 +00:00
|
|
|
General Options:
|
|
|
|
|
2020-11-19 16:15:23 +00:00
|
|
|
` + generalOptionsUsage(usageOptsDefault) + `
|
2015-11-17 02:09:00 +00:00
|
|
|
|
2016-05-25 21:11:14 +00:00
|
|
|
Alloc Status Options:
|
2015-11-17 02:09:00 +00:00
|
|
|
|
|
|
|
-short
|
2015-11-17 02:36:13 +00:00
|
|
|
Display short output. Shows only the most recent task event.
|
2016-01-14 20:57:43 +00:00
|
|
|
|
2016-07-30 10:20:43 +00:00
|
|
|
-stats
|
|
|
|
Display detailed resource usage statistics.
|
2016-06-06 22:10:22 +00:00
|
|
|
|
2016-01-15 22:32:38 +00:00
|
|
|
-verbose
|
|
|
|
Show full information.
|
2016-07-30 10:20:43 +00:00
|
|
|
|
2016-08-04 10:19:31 +00:00
|
|
|
-json
|
2016-08-06 09:54:30 +00:00
|
|
|
Output the allocation in its JSON format.
|
2016-07-30 10:20:43 +00:00
|
|
|
|
|
|
|
-t
|
2016-08-06 09:54:30 +00:00
|
|
|
Format and display allocation using a Go template.
|
2015-11-17 02:09:00 +00:00
|
|
|
`
|
|
|
|
|
2015-09-27 19:02:14 +00:00
|
|
|
return strings.TrimSpace(helpText)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *AllocStatusCommand) Synopsis() string {
|
|
|
|
return "Display allocation status information and metadata"
|
|
|
|
}
|
|
|
|
|
2017-08-22 20:41:42 +00:00
|
|
|
func (c *AllocStatusCommand) AutocompleteFlags() complete.Flags {
|
2017-08-23 22:52:31 +00:00
|
|
|
return mergeAutocompleteFlags(c.Meta.AutocompleteFlags(FlagSetClient),
|
|
|
|
complete.Flags{
|
|
|
|
"-short": complete.PredictNothing,
|
|
|
|
"-verbose": complete.PredictNothing,
|
|
|
|
"-json": complete.PredictNothing,
|
|
|
|
"-t": complete.PredictAnything,
|
|
|
|
})
|
2017-08-22 20:41:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *AllocStatusCommand) AutocompleteArgs() complete.Predictor {
|
|
|
|
return complete.PredictFunc(func(a complete.Args) []string {
|
2017-08-29 21:29:32 +00:00
|
|
|
client, err := c.Meta.Client()
|
|
|
|
if err != nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-08-28 05:17:51 +00:00
|
|
|
resp, _, err := client.Search().PrefixSearch(a.Last, contexts.Allocs, nil)
|
2017-08-22 20:41:42 +00:00
|
|
|
if err != nil {
|
|
|
|
return []string{}
|
|
|
|
}
|
|
|
|
return resp.Matches[contexts.Allocs]
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2018-04-18 16:02:11 +00:00
|
|
|
func (c *AllocStatusCommand) Name() string { return "alloc status" }
|
|
|
|
|
2015-09-27 19:02:14 +00:00
|
|
|
func (c *AllocStatusCommand) Run(args []string) int {
|
2016-08-04 10:19:31 +00:00
|
|
|
var short, displayStats, verbose, json bool
|
|
|
|
var tmpl string
|
2015-11-17 02:09:00 +00:00
|
|
|
|
2018-04-18 16:02:11 +00:00
|
|
|
flags := c.Meta.FlagSet(c.Name(), FlagSetClient)
|
2015-09-27 19:02:14 +00:00
|
|
|
flags.Usage = func() { c.Ui.Output(c.Help()) }
|
2015-11-17 02:09:00 +00:00
|
|
|
flags.BoolVar(&short, "short", false, "")
|
2016-01-15 22:32:38 +00:00
|
|
|
flags.BoolVar(&verbose, "verbose", false, "")
|
2016-06-06 22:10:22 +00:00
|
|
|
flags.BoolVar(&displayStats, "stats", false, "")
|
2016-08-04 10:19:31 +00:00
|
|
|
flags.BoolVar(&json, "json", false, "")
|
2016-07-30 10:20:43 +00:00
|
|
|
flags.StringVar(&tmpl, "t", "", "")
|
2015-11-17 02:09:00 +00:00
|
|
|
|
2015-09-27 19:02:14 +00:00
|
|
|
if err := flags.Parse(args); err != nil {
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that we got exactly one allocation ID
|
|
|
|
args = flags.Args()
|
|
|
|
|
|
|
|
// Get the HTTP client
|
|
|
|
client, err := c.Meta.Client()
|
|
|
|
if err != nil {
|
|
|
|
c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err))
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
2016-08-06 11:30:12 +00:00
|
|
|
// If args not specified but output format is specified, format and output the allocations data list
|
2021-02-03 21:29:05 +00:00
|
|
|
if len(args) == 0 && (json || len(tmpl) > 0) {
|
2017-07-01 01:10:19 +00:00
|
|
|
allocs, _, err := client.Allocations().List(nil)
|
|
|
|
if err != nil {
|
|
|
|
c.Ui.Error(fmt.Sprintf("Error querying allocations: %v", err))
|
2016-08-06 12:38:41 +00:00
|
|
|
return 1
|
2016-08-06 11:30:12 +00:00
|
|
|
}
|
2017-07-01 01:10:19 +00:00
|
|
|
|
|
|
|
out, err := Format(json, tmpl, allocs)
|
|
|
|
if err != nil {
|
|
|
|
c.Ui.Error(err.Error())
|
|
|
|
return 1
|
2016-08-06 11:30:12 +00:00
|
|
|
}
|
2017-07-01 01:10:19 +00:00
|
|
|
|
|
|
|
c.Ui.Output(out)
|
|
|
|
return 0
|
2016-08-06 11:30:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if len(args) != 1 {
|
2018-04-18 16:02:11 +00:00
|
|
|
c.Ui.Error("This command takes one of the following argument conditions:")
|
|
|
|
c.Ui.Error(" * A single <allocation>")
|
|
|
|
c.Ui.Error(" * No arguments, with output format specified")
|
|
|
|
c.Ui.Error(commandErrorText(c))
|
2016-08-06 11:30:12 +00:00
|
|
|
return 1
|
|
|
|
}
|
|
|
|
allocID := args[0]
|
|
|
|
|
2016-01-14 20:57:43 +00:00
|
|
|
// Truncate the id unless full length is requested
|
2016-01-15 22:32:38 +00:00
|
|
|
length := shortId
|
|
|
|
if verbose {
|
|
|
|
length = fullId
|
2016-01-14 20:57:43 +00:00
|
|
|
}
|
|
|
|
|
2015-09-27 19:02:14 +00:00
|
|
|
// Query the allocation info
|
2016-03-17 23:48:45 +00:00
|
|
|
if len(allocID) == 1 {
|
2020-12-09 19:05:18 +00:00
|
|
|
c.Ui.Error("Identifier must contain at least two characters.")
|
2016-03-17 23:48:45 +00:00
|
|
|
return 1
|
|
|
|
}
|
2016-01-21 21:21:35 +00:00
|
|
|
|
2018-03-11 18:52:59 +00:00
|
|
|
allocID = sanitizeUUIDPrefix(allocID)
|
2016-03-17 23:48:45 +00:00
|
|
|
allocs, _, err := client.Allocations().PrefixList(allocID)
|
|
|
|
if err != nil {
|
|
|
|
c.Ui.Error(fmt.Sprintf("Error querying allocation: %v", err))
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
if len(allocs) == 0 {
|
|
|
|
c.Ui.Error(fmt.Sprintf("No allocation(s) with prefix or id %q found", allocID))
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
if len(allocs) > 1 {
|
2017-07-07 04:51:13 +00:00
|
|
|
out := formatAllocListStubs(allocs, verbose, length)
|
|
|
|
c.Ui.Output(fmt.Sprintf("Prefix matched multiple allocations\n\n%s", out))
|
2016-03-17 23:48:45 +00:00
|
|
|
return 0
|
|
|
|
}
|
|
|
|
// Prefix lookup matched a single allocation
|
2020-06-17 13:12:20 +00:00
|
|
|
q := &api.QueryOptions{Namespace: allocs[0].Namespace}
|
|
|
|
alloc, _, err := client.Allocations().Info(allocs[0].ID, q)
|
2016-03-17 23:48:45 +00:00
|
|
|
if err != nil {
|
|
|
|
c.Ui.Error(fmt.Sprintf("Error querying allocation: %s", err))
|
|
|
|
return 1
|
2015-09-27 19:02:14 +00:00
|
|
|
}
|
|
|
|
|
2016-07-30 10:20:43 +00:00
|
|
|
// If output format is specified, format and output the data
|
2017-07-01 01:10:19 +00:00
|
|
|
if json || len(tmpl) > 0 {
|
|
|
|
out, err := Format(json, tmpl, alloc)
|
2016-07-30 10:20:43 +00:00
|
|
|
if err != nil {
|
2017-07-01 01:10:19 +00:00
|
|
|
c.Ui.Error(err.Error())
|
2016-07-30 10:20:43 +00:00
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
|
|
|
c.Ui.Output(out)
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2015-09-27 19:02:14 +00:00
|
|
|
// Format the allocation data
|
2019-12-16 19:10:20 +00:00
|
|
|
if short {
|
|
|
|
c.Ui.Output(formatAllocShortInfo(alloc, client))
|
|
|
|
} else {
|
|
|
|
output, err := formatAllocBasicInfo(alloc, client, length, verbose)
|
|
|
|
if err != nil {
|
|
|
|
c.Ui.Error(err.Error())
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
c.Ui.Output(output)
|
2015-09-27 19:02:14 +00:00
|
|
|
|
2022-08-05 13:30:17 +00:00
|
|
|
// add allocation network addresses
|
2019-12-16 19:10:20 +00:00
|
|
|
if alloc.AllocatedResources != nil && len(alloc.AllocatedResources.Shared.Networks) > 0 && alloc.AllocatedResources.Shared.Networks[0].HasPorts() {
|
|
|
|
c.Ui.Output("")
|
|
|
|
c.Ui.Output(formatAllocNetworkInfo(alloc))
|
|
|
|
}
|
2022-08-05 13:30:17 +00:00
|
|
|
|
|
|
|
// add allocation nomad service discovery checks
|
|
|
|
if checkOutput := formatAllocNomadServiceChecks(alloc.ID, client); checkOutput != "" {
|
|
|
|
c.Ui.Output("")
|
|
|
|
c.Ui.Output(checkOutput)
|
|
|
|
}
|
2019-08-28 03:59:36 +00:00
|
|
|
}
|
|
|
|
|
2015-11-17 02:09:00 +00:00
|
|
|
if short {
|
|
|
|
c.shortTaskStatus(alloc)
|
|
|
|
} else {
|
2016-08-29 15:43:09 +00:00
|
|
|
var statsErr error
|
|
|
|
var stats *api.AllocResourceUsage
|
|
|
|
stats, statsErr = client.Allocations().Stats(alloc, nil)
|
|
|
|
if statsErr != nil {
|
|
|
|
c.Ui.Output("")
|
2016-10-21 01:05:58 +00:00
|
|
|
if statsErr != api.NodeDownErr {
|
2018-04-09 19:09:47 +00:00
|
|
|
c.Ui.Error(fmt.Sprintf("Couldn't retrieve stats: %v", statsErr))
|
2016-10-25 18:31:09 +00:00
|
|
|
} else {
|
|
|
|
c.Ui.Output("Omitting resource statistics since the node is down.")
|
2016-10-21 01:05:58 +00:00
|
|
|
}
|
2016-08-29 15:43:09 +00:00
|
|
|
}
|
2020-03-06 14:44:43 +00:00
|
|
|
c.outputTaskDetails(alloc, stats, displayStats, verbose)
|
2015-11-17 02:09:00 +00:00
|
|
|
}
|
|
|
|
|
2015-09-27 20:59:27 +00:00
|
|
|
// Format the detailed status
|
2016-06-12 21:08:47 +00:00
|
|
|
if verbose {
|
|
|
|
c.Ui.Output(c.Colorize().Color("\n[bold]Placement Metrics[reset]"))
|
|
|
|
c.Ui.Output(formatAllocMetrics(alloc.Metrics, true, " "))
|
Print resource usage w/ alloc-status + node-status
When alloc-status is called, in it's long form only, print the resource
utilization for that single allocation.
When node-status is called, in it's long form only, print the TOTAL
resource utilization that is occurring on that single node.
Nomad Alloc Status:
```
% nomad alloc-status 195d3bf2
ID = 195d3bf2
Eval ID = c917e3ee
Name = example.cache[1]
Node ID = 1b2520a7
Job ID = example
Client Status = running
Evaluated Nodes = 1
Filtered Nodes = 0
Exhausted Nodes = 0
Allocation Time = 17.73µs
Failures = 0
==> Task "redis" is "running"
Recent Events:
Time Type Description
04/03/16 21:20:45 EST Started Task started by client
04/03/16 21:20:42 EST Received Task received by client
==> Status
Allocation "195d3bf2" status "running" (0/1 nodes filtered)
* Score "1b2520a7-6714-e78d-a8f7-68467dda6db7.binpack" = 1.209464
* Score "1b2520a7-6714-e78d-a8f7-68467dda6db7.job-anti-affinity" = -10.000000
==> Resources
CPU MemoryMB DiskMB IOPS
500 256 300 0
```
Nomad Node Status:
```
% nomad node-status 57b3a55a
ID = 57b3a55a
Name = biscuits
Class = <none>
DC = dc1
Drain = false
Status = ready
Attributes = arch:amd64, cpu.frequency:3753.458875, cpu.modelname:Intel(R) Core(TM) i7-4770K CPU @ 3.50GHz, cpu.numcores:8, cpu.totalcompute:30027.671000, driver.docker:1, driver.docker.version:1.10.2, driver.exec:1, driver.raw_exec:1, hostname:biscuits, kernel.name:linux, kernel.version:4.4.0-9-generic, memory.totalbytes:25208934400, os.name:ubuntu, os.version:16.04, unique.cgroup.mountpoint:/sys/fs/cgroup, unique.network.ip-address:127.0.0.1, unique.storage.bytesfree:219781419008, unique.storage.bytestotal:246059892736, unique.storage.volume:/dev/sdb3
==> Allocations
ID Eval ID Job ID Task Group Desired Status Client Status
2c236883 aa11aca8 example cache run running
32f6e3d6 aa11aca8 example cache run running
==> Resource Utilization
CPU MemoryMB DiskMB IOPS
1000 512 600 0
```
2016-03-05 02:29:39 +00:00
|
|
|
}
|
|
|
|
|
2015-09-27 19:02:14 +00:00
|
|
|
return 0
|
|
|
|
}
|
2015-11-17 02:09:00 +00:00
|
|
|
|
2019-12-16 19:10:20 +00:00
|
|
|
func formatAllocShortInfo(alloc *api.Allocation, client *api.Client) string {
|
|
|
|
formattedCreateTime := prettyTimeDiff(time.Unix(0, alloc.CreateTime), time.Now())
|
|
|
|
formattedModifyTime := prettyTimeDiff(time.Unix(0, alloc.ModifyTime), time.Now())
|
|
|
|
|
|
|
|
basic := []string{
|
|
|
|
fmt.Sprintf("ID|%s", alloc.ID),
|
|
|
|
fmt.Sprintf("Name|%s", alloc.Name),
|
|
|
|
fmt.Sprintf("Created|%s", formattedCreateTime),
|
|
|
|
fmt.Sprintf("Modified|%s", formattedModifyTime),
|
|
|
|
}
|
|
|
|
|
|
|
|
return formatKV(basic)
|
|
|
|
}
|
|
|
|
|
2017-07-07 05:18:44 +00:00
|
|
|
func formatAllocBasicInfo(alloc *api.Allocation, client *api.Client, uuidLength int, verbose bool) (string, error) {
|
2017-10-27 22:24:42 +00:00
|
|
|
var formattedCreateTime, formattedModifyTime string
|
|
|
|
|
|
|
|
if verbose {
|
|
|
|
formattedCreateTime = formatUnixNanoTime(alloc.CreateTime)
|
|
|
|
formattedModifyTime = formatUnixNanoTime(alloc.ModifyTime)
|
|
|
|
} else {
|
|
|
|
formattedCreateTime = prettyTimeDiff(time.Unix(0, alloc.CreateTime), time.Now())
|
|
|
|
formattedModifyTime = prettyTimeDiff(time.Unix(0, alloc.ModifyTime), time.Now())
|
|
|
|
}
|
2017-10-26 14:21:05 +00:00
|
|
|
|
2017-07-07 05:18:44 +00:00
|
|
|
basic := []string{
|
2019-10-04 17:06:12 +00:00
|
|
|
fmt.Sprintf("ID|%s", alloc.ID),
|
2017-07-07 05:18:44 +00:00
|
|
|
fmt.Sprintf("Eval ID|%s", limit(alloc.EvalID, uuidLength)),
|
|
|
|
fmt.Sprintf("Name|%s", alloc.Name),
|
|
|
|
fmt.Sprintf("Node ID|%s", limit(alloc.NodeID, uuidLength)),
|
2018-07-17 10:03:13 +00:00
|
|
|
fmt.Sprintf("Node Name|%s", alloc.NodeName),
|
2017-07-07 05:18:44 +00:00
|
|
|
fmt.Sprintf("Job ID|%s", alloc.JobID),
|
2019-09-13 23:01:34 +00:00
|
|
|
fmt.Sprintf("Job Version|%d", *alloc.Job.Version),
|
2017-07-07 05:18:44 +00:00
|
|
|
fmt.Sprintf("Client Status|%s", alloc.ClientStatus),
|
|
|
|
fmt.Sprintf("Client Description|%s", alloc.ClientDescription),
|
|
|
|
fmt.Sprintf("Desired Status|%s", alloc.DesiredStatus),
|
|
|
|
fmt.Sprintf("Desired Description|%s", alloc.DesiredDescription),
|
2017-10-26 14:21:05 +00:00
|
|
|
fmt.Sprintf("Created|%s", formattedCreateTime),
|
|
|
|
fmt.Sprintf("Modified|%s", formattedModifyTime),
|
2017-07-07 05:18:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if alloc.DeploymentID != "" {
|
|
|
|
health := "unset"
|
2018-04-19 20:58:06 +00:00
|
|
|
canary := false
|
|
|
|
if alloc.DeploymentStatus != nil {
|
|
|
|
if alloc.DeploymentStatus.Healthy != nil {
|
|
|
|
if *alloc.DeploymentStatus.Healthy {
|
|
|
|
health = "healthy"
|
|
|
|
} else {
|
|
|
|
health = "unhealthy"
|
|
|
|
}
|
2017-07-07 05:18:44 +00:00
|
|
|
}
|
2018-04-19 20:58:06 +00:00
|
|
|
|
|
|
|
canary = alloc.DeploymentStatus.Canary
|
2017-07-07 05:18:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
basic = append(basic,
|
|
|
|
fmt.Sprintf("Deployment ID|%s", limit(alloc.DeploymentID, uuidLength)),
|
|
|
|
fmt.Sprintf("Deployment Health|%s", health))
|
|
|
|
if canary {
|
|
|
|
basic = append(basic, fmt.Sprintf("Canary|%v", true))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-26 21:43:06 +00:00
|
|
|
if alloc.RescheduleTracker != nil && len(alloc.RescheduleTracker.Events) > 0 {
|
|
|
|
attempts, total := alloc.RescheduleInfo(time.Unix(0, alloc.ModifyTime))
|
2018-03-19 23:06:15 +00:00
|
|
|
// Show this section only if the reschedule policy limits the number of attempts
|
|
|
|
if total > 0 {
|
|
|
|
reschedInfo := fmt.Sprintf("Reschedule Attempts|%d/%d", attempts, total)
|
|
|
|
basic = append(basic, reschedInfo)
|
|
|
|
}
|
2018-01-26 21:43:06 +00:00
|
|
|
}
|
2018-01-24 22:51:22 +00:00
|
|
|
if alloc.NextAllocation != "" {
|
|
|
|
basic = append(basic,
|
2018-01-29 22:31:25 +00:00
|
|
|
fmt.Sprintf("Replacement Alloc ID|%s", limit(alloc.NextAllocation, uuidLength)))
|
2018-01-24 22:51:22 +00:00
|
|
|
}
|
2018-03-02 23:43:49 +00:00
|
|
|
if alloc.FollowupEvalID != "" {
|
|
|
|
nextEvalTime := futureEvalTimePretty(alloc.FollowupEvalID, client)
|
|
|
|
if nextEvalTime != "" {
|
|
|
|
basic = append(basic,
|
|
|
|
fmt.Sprintf("Reschedule Eligibility|%s", nextEvalTime))
|
|
|
|
}
|
|
|
|
}
|
2018-01-24 22:51:22 +00:00
|
|
|
|
2017-07-07 05:18:44 +00:00
|
|
|
if verbose {
|
|
|
|
basic = append(basic,
|
|
|
|
fmt.Sprintf("Evaluated Nodes|%d", alloc.Metrics.NodesEvaluated),
|
|
|
|
fmt.Sprintf("Filtered Nodes|%d", alloc.Metrics.NodesFiltered),
|
|
|
|
fmt.Sprintf("Exhausted Nodes|%d", alloc.Metrics.NodesExhausted),
|
|
|
|
fmt.Sprintf("Allocation Time|%s", alloc.Metrics.AllocationTime),
|
|
|
|
fmt.Sprintf("Failures|%d", alloc.Metrics.CoalescedFailures))
|
|
|
|
}
|
|
|
|
|
|
|
|
return formatKV(basic), nil
|
|
|
|
}
|
|
|
|
|
2019-08-28 03:59:36 +00:00
|
|
|
func formatAllocNetworkInfo(alloc *api.Allocation) string {
|
|
|
|
nw := alloc.AllocatedResources.Shared.Networks[0]
|
2020-06-25 19:16:01 +00:00
|
|
|
addrs := []string{"Label|Dynamic|Address"}
|
|
|
|
portFmt := func(label string, value, to int, hostIP, dyn string) string {
|
|
|
|
s := fmt.Sprintf("%s|%s|%s:%d", label, dyn, hostIP, value)
|
|
|
|
if to > 0 {
|
|
|
|
s += fmt.Sprintf(" -> %d", to)
|
2019-08-28 03:59:36 +00:00
|
|
|
}
|
|
|
|
return s
|
|
|
|
}
|
2020-06-25 19:16:01 +00:00
|
|
|
if len(alloc.AllocatedResources.Shared.Ports) > 0 {
|
|
|
|
for _, port := range alloc.AllocatedResources.Shared.Ports {
|
|
|
|
addrs = append(addrs, portFmt("*"+port.Label, port.Value, port.To, port.HostIP, "yes"))
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
for _, port := range nw.DynamicPorts {
|
|
|
|
addrs = append(addrs, portFmt(port.Label, port.Value, port.To, nw.IP, "yes"))
|
|
|
|
}
|
|
|
|
for _, port := range nw.ReservedPorts {
|
|
|
|
addrs = append(addrs, portFmt(port.Label, port.Value, port.To, nw.IP, "yes"))
|
|
|
|
}
|
2019-08-28 03:59:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var mode string
|
|
|
|
if nw.Mode != "" {
|
|
|
|
mode = fmt.Sprintf(" (mode = %q)", nw.Mode)
|
|
|
|
}
|
|
|
|
|
2022-08-05 13:30:17 +00:00
|
|
|
return fmt.Sprintf("Allocation Addresses%s:\n%s", mode, formatList(addrs))
|
|
|
|
}
|
|
|
|
|
|
|
|
func formatAllocNomadServiceChecks(allocID string, client *api.Client) string {
|
|
|
|
statuses, err := client.Allocations().Checks(allocID, nil)
|
|
|
|
if err != nil {
|
|
|
|
return ""
|
|
|
|
} else if len(statuses) == 0 {
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
results := []string{"Service|Task|Name|Mode|Status"}
|
|
|
|
for _, status := range statuses {
|
|
|
|
task := "(group)"
|
|
|
|
if status.Task != "" {
|
|
|
|
task = status.Task
|
|
|
|
}
|
|
|
|
// check | group | mode | status
|
|
|
|
s := fmt.Sprintf("%s|%s|%s|%s|%s", status.Service, task, status.Check, status.Mode, status.Status)
|
|
|
|
results = append(results, s)
|
|
|
|
}
|
|
|
|
sort.Strings(results[1:])
|
|
|
|
return fmt.Sprintf("Nomad Service Checks:\n%s", formatList(results))
|
2019-08-28 03:59:36 +00:00
|
|
|
}
|
|
|
|
|
2018-03-13 23:05:44 +00:00
|
|
|
// futureEvalTimePretty returns when the eval is eligible to reschedule
|
|
|
|
// relative to current time, based on the WaitUntil field
|
2018-03-02 23:43:49 +00:00
|
|
|
func futureEvalTimePretty(evalID string, client *api.Client) string {
|
|
|
|
evaluation, _, err := client.Evaluations().Info(evalID, nil)
|
|
|
|
// Eval time is not a critical output,
|
|
|
|
// don't return it on errors, if its not set or already in the past
|
|
|
|
if err != nil || evaluation.WaitUntil.IsZero() || time.Now().After(evaluation.WaitUntil) {
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
return prettyTimeDiff(evaluation.WaitUntil, time.Now())
|
|
|
|
}
|
|
|
|
|
2016-06-12 21:08:47 +00:00
|
|
|
// outputTaskDetails prints task details for each task in the allocation,
|
|
|
|
// optionally printing verbose statistics if displayStats is set
|
2020-03-06 14:44:43 +00:00
|
|
|
func (c *AllocStatusCommand) outputTaskDetails(alloc *api.Allocation, stats *api.AllocResourceUsage, displayStats bool, verbose bool) {
|
2020-03-22 12:20:42 +00:00
|
|
|
taskLifecycles := map[string]*api.TaskLifecycle{}
|
|
|
|
for _, t := range alloc.Job.LookupTaskGroup(alloc.TaskGroup).Tasks {
|
|
|
|
taskLifecycles[t.Name] = t.Lifecycle
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, task := range c.sortedTaskStateIterator(alloc.TaskStates, taskLifecycles) {
|
2015-11-17 02:09:00 +00:00
|
|
|
state := alloc.TaskStates[task]
|
2020-03-22 12:20:42 +00:00
|
|
|
|
|
|
|
lcIndicator := ""
|
|
|
|
if lc := taskLifecycles[task]; !lc.Empty() {
|
|
|
|
lcIndicator = " (" + lifecycleDisplayName(lc) + ")"
|
|
|
|
}
|
|
|
|
|
|
|
|
c.Ui.Output(c.Colorize().Color(fmt.Sprintf("\n[bold]Task %q%v is %q[reset]", task, lcIndicator, state.State)))
|
2016-06-12 21:08:47 +00:00
|
|
|
c.outputTaskResources(alloc, task, stats, displayStats)
|
|
|
|
c.Ui.Output("")
|
2020-03-06 14:44:43 +00:00
|
|
|
c.outputTaskVolumes(alloc, task, verbose)
|
2016-06-12 21:08:47 +00:00
|
|
|
c.outputTaskStatus(state)
|
2015-11-17 02:09:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-07 05:18:44 +00:00
|
|
|
func formatTaskTimes(t time.Time) string {
|
|
|
|
if t.IsZero() {
|
|
|
|
return "N/A"
|
|
|
|
}
|
|
|
|
|
|
|
|
return formatTime(t)
|
|
|
|
}
|
|
|
|
|
2016-06-12 21:08:47 +00:00
|
|
|
// outputTaskStatus prints out a list of the most recent events for the given
|
|
|
|
// task state.
|
|
|
|
func (c *AllocStatusCommand) outputTaskStatus(state *api.TaskState) {
|
2017-07-07 05:18:44 +00:00
|
|
|
basic := []string{
|
|
|
|
fmt.Sprintf("Started At|%s", formatTaskTimes(state.StartedAt)),
|
|
|
|
fmt.Sprintf("Finished At|%s", formatTaskTimes(state.FinishedAt)),
|
2017-07-07 06:04:32 +00:00
|
|
|
fmt.Sprintf("Total Restarts|%d", state.Restarts),
|
|
|
|
fmt.Sprintf("Last Restart|%s", formatTaskTimes(state.LastRestart))}
|
2017-07-07 05:18:44 +00:00
|
|
|
|
|
|
|
c.Ui.Output("Task Events:")
|
|
|
|
c.Ui.Output(formatKV(basic))
|
|
|
|
c.Ui.Output("")
|
|
|
|
|
2016-06-12 21:08:47 +00:00
|
|
|
c.Ui.Output("Recent Events:")
|
|
|
|
events := make([]string, len(state.Events)+1)
|
|
|
|
events[0] = "Time|Type|Description"
|
|
|
|
|
|
|
|
size := len(state.Events)
|
|
|
|
for i, event := range state.Events {
|
2017-11-03 14:34:30 +00:00
|
|
|
msg := event.DisplayMessage
|
|
|
|
if msg == "" {
|
|
|
|
msg = buildDisplayMessage(event)
|
2017-10-31 15:35:14 +00:00
|
|
|
}
|
2017-11-03 14:34:30 +00:00
|
|
|
formattedTime := formatUnixNanoTime(event.Time)
|
|
|
|
events[size-i] = fmt.Sprintf("%s|%s|%s", formattedTime, event.Type, msg)
|
|
|
|
// Reverse order so we are sorted by time
|
2017-10-31 15:35:14 +00:00
|
|
|
}
|
|
|
|
c.Ui.Output(formatList(events))
|
|
|
|
}
|
2015-11-17 02:09:00 +00:00
|
|
|
|
2017-10-31 15:35:14 +00:00
|
|
|
func buildDisplayMessage(event *api.TaskEvent) string {
|
|
|
|
// Build up the description based on the event type.
|
|
|
|
var desc string
|
|
|
|
switch event.Type {
|
|
|
|
case api.TaskSetup:
|
|
|
|
desc = event.Message
|
|
|
|
case api.TaskStarted:
|
|
|
|
desc = "Task started by client"
|
|
|
|
case api.TaskReceived:
|
|
|
|
desc = "Task received by client"
|
|
|
|
case api.TaskFailedValidation:
|
|
|
|
if event.ValidationError != "" {
|
|
|
|
desc = event.ValidationError
|
|
|
|
} else {
|
|
|
|
desc = "Validation of task failed"
|
|
|
|
}
|
|
|
|
case api.TaskSetupFailure:
|
|
|
|
if event.SetupError != "" {
|
|
|
|
desc = event.SetupError
|
|
|
|
} else {
|
|
|
|
desc = "Task setup failed"
|
|
|
|
}
|
|
|
|
case api.TaskDriverFailure:
|
|
|
|
if event.DriverError != "" {
|
|
|
|
desc = event.DriverError
|
|
|
|
} else {
|
|
|
|
desc = "Failed to start task"
|
|
|
|
}
|
|
|
|
case api.TaskDownloadingArtifacts:
|
|
|
|
desc = "Client is downloading artifacts"
|
|
|
|
case api.TaskArtifactDownloadFailed:
|
|
|
|
if event.DownloadError != "" {
|
|
|
|
desc = event.DownloadError
|
|
|
|
} else {
|
|
|
|
desc = "Failed to download artifacts"
|
|
|
|
}
|
|
|
|
case api.TaskKilling:
|
|
|
|
if event.KillReason != "" {
|
|
|
|
desc = fmt.Sprintf("Killing task: %v", event.KillReason)
|
|
|
|
} else if event.KillTimeout != 0 {
|
|
|
|
desc = fmt.Sprintf("Sent interrupt. Waiting %v before force killing", event.KillTimeout)
|
|
|
|
} else {
|
|
|
|
desc = "Sent interrupt"
|
|
|
|
}
|
|
|
|
case api.TaskKilled:
|
|
|
|
if event.KillError != "" {
|
|
|
|
desc = event.KillError
|
|
|
|
} else {
|
|
|
|
desc = "Task successfully killed"
|
|
|
|
}
|
|
|
|
case api.TaskTerminated:
|
|
|
|
var parts []string
|
|
|
|
parts = append(parts, fmt.Sprintf("Exit Code: %d", event.ExitCode))
|
2015-11-17 02:09:00 +00:00
|
|
|
|
2017-10-31 15:35:14 +00:00
|
|
|
if event.Signal != 0 {
|
|
|
|
parts = append(parts, fmt.Sprintf("Signal: %d", event.Signal))
|
2016-06-12 21:08:47 +00:00
|
|
|
}
|
2015-11-17 02:09:00 +00:00
|
|
|
|
2017-10-31 15:35:14 +00:00
|
|
|
if event.Message != "" {
|
|
|
|
parts = append(parts, fmt.Sprintf("Exit Message: %q", event.Message))
|
|
|
|
}
|
|
|
|
desc = strings.Join(parts, ", ")
|
|
|
|
case api.TaskRestarting:
|
|
|
|
in := fmt.Sprintf("Task restarting in %v", time.Duration(event.StartDelay))
|
2022-08-02 14:33:08 +00:00
|
|
|
if event.RestartReason != "" && event.RestartReason != api.AllocRestartReasonWithinPolicy {
|
2017-10-31 15:35:14 +00:00
|
|
|
desc = fmt.Sprintf("%s - %s", event.RestartReason, in)
|
|
|
|
} else {
|
|
|
|
desc = in
|
|
|
|
}
|
|
|
|
case api.TaskNotRestarting:
|
|
|
|
if event.RestartReason != "" {
|
|
|
|
desc = event.RestartReason
|
|
|
|
} else {
|
|
|
|
desc = "Task exceeded restart policy"
|
|
|
|
}
|
|
|
|
case api.TaskSiblingFailed:
|
|
|
|
if event.FailedSibling != "" {
|
|
|
|
desc = fmt.Sprintf("Task's sibling %q failed", event.FailedSibling)
|
|
|
|
} else {
|
|
|
|
desc = "Task's sibling failed"
|
|
|
|
}
|
|
|
|
case api.TaskSignaling:
|
|
|
|
sig := event.TaskSignal
|
|
|
|
reason := event.TaskSignalReason
|
|
|
|
|
|
|
|
if sig == "" && reason == "" {
|
|
|
|
desc = "Task being sent a signal"
|
|
|
|
} else if sig == "" {
|
|
|
|
desc = reason
|
|
|
|
} else if reason == "" {
|
|
|
|
desc = fmt.Sprintf("Task being sent signal %v", sig)
|
|
|
|
} else {
|
|
|
|
desc = fmt.Sprintf("Task being sent signal %v: %v", sig, reason)
|
|
|
|
}
|
|
|
|
case api.TaskRestartSignal:
|
|
|
|
if event.RestartReason != "" {
|
|
|
|
desc = event.RestartReason
|
|
|
|
} else {
|
|
|
|
desc = "Task signaled to restart"
|
|
|
|
}
|
|
|
|
case api.TaskDriverMessage:
|
|
|
|
desc = event.DriverMessage
|
|
|
|
case api.TaskLeaderDead:
|
|
|
|
desc = "Leader Task in Group dead"
|
2022-03-02 10:47:26 +00:00
|
|
|
case api.TaskClientReconnected:
|
|
|
|
desc = "Client reconnected"
|
2017-11-13 17:14:57 +00:00
|
|
|
default:
|
2017-10-31 15:35:14 +00:00
|
|
|
desc = event.Message
|
2015-11-17 02:09:00 +00:00
|
|
|
}
|
2017-10-31 15:35:14 +00:00
|
|
|
|
2017-11-03 14:34:30 +00:00
|
|
|
return desc
|
2015-11-17 02:09:00 +00:00
|
|
|
}
|
Print resource usage w/ alloc-status + node-status
When alloc-status is called, in it's long form only, print the resource
utilization for that single allocation.
When node-status is called, in it's long form only, print the TOTAL
resource utilization that is occurring on that single node.
Nomad Alloc Status:
```
% nomad alloc-status 195d3bf2
ID = 195d3bf2
Eval ID = c917e3ee
Name = example.cache[1]
Node ID = 1b2520a7
Job ID = example
Client Status = running
Evaluated Nodes = 1
Filtered Nodes = 0
Exhausted Nodes = 0
Allocation Time = 17.73µs
Failures = 0
==> Task "redis" is "running"
Recent Events:
Time Type Description
04/03/16 21:20:45 EST Started Task started by client
04/03/16 21:20:42 EST Received Task received by client
==> Status
Allocation "195d3bf2" status "running" (0/1 nodes filtered)
* Score "1b2520a7-6714-e78d-a8f7-68467dda6db7.binpack" = 1.209464
* Score "1b2520a7-6714-e78d-a8f7-68467dda6db7.job-anti-affinity" = -10.000000
==> Resources
CPU MemoryMB DiskMB IOPS
500 256 300 0
```
Nomad Node Status:
```
% nomad node-status 57b3a55a
ID = 57b3a55a
Name = biscuits
Class = <none>
DC = dc1
Drain = false
Status = ready
Attributes = arch:amd64, cpu.frequency:3753.458875, cpu.modelname:Intel(R) Core(TM) i7-4770K CPU @ 3.50GHz, cpu.numcores:8, cpu.totalcompute:30027.671000, driver.docker:1, driver.docker.version:1.10.2, driver.exec:1, driver.raw_exec:1, hostname:biscuits, kernel.name:linux, kernel.version:4.4.0-9-generic, memory.totalbytes:25208934400, os.name:ubuntu, os.version:16.04, unique.cgroup.mountpoint:/sys/fs/cgroup, unique.network.ip-address:127.0.0.1, unique.storage.bytesfree:219781419008, unique.storage.bytestotal:246059892736, unique.storage.volume:/dev/sdb3
==> Allocations
ID Eval ID Job ID Task Group Desired Status Client Status
2c236883 aa11aca8 example cache run running
32f6e3d6 aa11aca8 example cache run running
==> Resource Utilization
CPU MemoryMB DiskMB IOPS
1000 512 600 0
```
2016-03-05 02:29:39 +00:00
|
|
|
|
2016-06-12 21:08:47 +00:00
|
|
|
// outputTaskResources prints the task resources for the passed task and if
|
|
|
|
// displayStats is set, verbose resource usage statistics
|
|
|
|
func (c *AllocStatusCommand) outputTaskResources(alloc *api.Allocation, task string, stats *api.AllocResourceUsage, displayStats bool) {
|
|
|
|
resource, ok := alloc.TaskResources[task]
|
|
|
|
if !ok {
|
2016-03-20 23:52:24 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-08-05 13:30:17 +00:00
|
|
|
c.Ui.Output("Task Resources:")
|
2016-06-12 21:08:47 +00:00
|
|
|
var addr []string
|
|
|
|
for _, nw := range resource.Networks {
|
2021-09-06 08:49:44 +00:00
|
|
|
ports := append(nw.DynamicPorts, nw.ReservedPorts...) //nolint:gocritic
|
2016-06-12 21:08:47 +00:00
|
|
|
for _, port := range ports {
|
|
|
|
addr = append(addr, fmt.Sprintf("%v: %v:%v\n", port.Label, nw.IP, port.Value))
|
2016-03-11 00:20:51 +00:00
|
|
|
}
|
2016-06-12 21:08:47 +00:00
|
|
|
}
|
2018-11-15 19:57:15 +00:00
|
|
|
|
2016-06-12 21:08:47 +00:00
|
|
|
var resourcesOutput []string
|
2021-05-04 17:58:07 +00:00
|
|
|
cpuHeader := "CPU"
|
|
|
|
if resource.Cores != nil && *resource.Cores > 0 {
|
|
|
|
cpuHeader = fmt.Sprintf("CPU (%v cores)", *resource.Cores)
|
|
|
|
}
|
|
|
|
resourcesOutput = append(resourcesOutput, fmt.Sprintf("%s|Memory|Disk|Addresses", cpuHeader))
|
2016-06-12 21:08:47 +00:00
|
|
|
firstAddr := ""
|
2021-03-26 20:07:12 +00:00
|
|
|
secondAddr := ""
|
2016-06-12 21:08:47 +00:00
|
|
|
if len(addr) > 0 {
|
|
|
|
firstAddr = addr[0]
|
|
|
|
}
|
2021-03-26 20:07:12 +00:00
|
|
|
if len(addr) > 1 {
|
|
|
|
secondAddr = addr[1]
|
|
|
|
}
|
2016-06-12 03:15:50 +00:00
|
|
|
|
2016-12-15 17:31:29 +00:00
|
|
|
// Display the rolled up stats. If possible prefer the live statistics
|
2017-02-06 19:48:28 +00:00
|
|
|
cpuUsage := strconv.Itoa(*resource.CPU)
|
|
|
|
memUsage := humanize.IBytes(uint64(*resource.MemoryMB * bytesPerMegabyte))
|
2021-03-26 20:07:12 +00:00
|
|
|
memMax := ""
|
|
|
|
if max := resource.MemoryMaxMB; max != nil && *max != 0 && *max != *resource.MemoryMB {
|
|
|
|
memMax = "Max: " + humanize.IBytes(uint64(*resource.MemoryMaxMB*bytesPerMegabyte))
|
|
|
|
}
|
2018-11-15 19:57:15 +00:00
|
|
|
var deviceStats []*api.DeviceGroupStats
|
|
|
|
|
2016-10-21 01:05:58 +00:00
|
|
|
if stats != nil {
|
|
|
|
if ru, ok := stats.Tasks[task]; ok && ru != nil && ru.ResourceUsage != nil {
|
|
|
|
if cs := ru.ResourceUsage.CpuStats; cs != nil {
|
2017-02-25 21:36:23 +00:00
|
|
|
cpuUsage = fmt.Sprintf("%v/%v", math.Floor(cs.TotalTicks), cpuUsage)
|
2016-10-21 01:05:58 +00:00
|
|
|
}
|
|
|
|
if ms := ru.ResourceUsage.MemoryStats; ms != nil {
|
2021-04-01 15:56:23 +00:00
|
|
|
// Nomad uses RSS as the top-level metric to report, for historical reasons,
|
|
|
|
// but it's not always measured (e.g. with cgroup-v2)
|
|
|
|
usage := ms.RSS
|
2022-09-21 19:53:25 +00:00
|
|
|
if usage == 0 && !slices.Contains(ms.Measured, "RSS") {
|
2021-04-01 15:56:23 +00:00
|
|
|
usage = ms.Usage
|
|
|
|
}
|
|
|
|
memUsage = fmt.Sprintf("%v/%v", humanize.IBytes(usage), memUsage)
|
2016-10-21 01:05:58 +00:00
|
|
|
}
|
2018-11-15 19:57:15 +00:00
|
|
|
deviceStats = ru.ResourceUsage.DeviceStats
|
2016-03-11 00:20:51 +00:00
|
|
|
}
|
2016-06-12 21:08:47 +00:00
|
|
|
}
|
2018-12-06 23:09:26 +00:00
|
|
|
resourcesOutput = append(resourcesOutput, fmt.Sprintf("%v MHz|%v|%v|%v",
|
2016-06-12 21:08:47 +00:00
|
|
|
cpuUsage,
|
|
|
|
memUsage,
|
2017-03-06 20:53:24 +00:00
|
|
|
humanize.IBytes(uint64(*alloc.Resources.DiskMB*bytesPerMegabyte)),
|
2016-06-12 21:08:47 +00:00
|
|
|
firstAddr))
|
2021-03-26 20:07:12 +00:00
|
|
|
if memMax != "" || secondAddr != "" {
|
|
|
|
resourcesOutput = append(resourcesOutput, fmt.Sprintf("|%v||%v", memMax, secondAddr))
|
|
|
|
}
|
|
|
|
for i := 2; i < len(addr); i++ {
|
2020-06-15 12:35:47 +00:00
|
|
|
resourcesOutput = append(resourcesOutput, fmt.Sprintf("|||%v", addr[i]))
|
2016-06-12 21:08:47 +00:00
|
|
|
}
|
|
|
|
c.Ui.Output(formatListWithSpaces(resourcesOutput))
|
2016-06-06 22:10:22 +00:00
|
|
|
|
2018-11-15 19:57:15 +00:00
|
|
|
if len(deviceStats) > 0 {
|
|
|
|
c.Ui.Output("")
|
2018-11-16 22:30:48 +00:00
|
|
|
c.Ui.Output("Device Stats")
|
2018-11-15 19:57:15 +00:00
|
|
|
c.Ui.Output(formatList(getDeviceResources(deviceStats)))
|
|
|
|
}
|
|
|
|
|
2016-10-21 01:05:58 +00:00
|
|
|
if stats != nil {
|
|
|
|
if ru, ok := stats.Tasks[task]; ok && ru != nil && displayStats && ru.ResourceUsage != nil {
|
|
|
|
c.Ui.Output("")
|
|
|
|
c.outputVerboseResourceUsage(task, ru.ResourceUsage)
|
|
|
|
}
|
2016-03-11 00:20:51 +00:00
|
|
|
}
|
|
|
|
}
|
2016-06-06 22:10:22 +00:00
|
|
|
|
2016-06-12 21:08:47 +00:00
|
|
|
// outputVerboseResourceUsage outputs the verbose resource usage for the passed
|
|
|
|
// task
|
|
|
|
func (c *AllocStatusCommand) outputVerboseResourceUsage(task string, resourceUsage *api.ResourceUsage) {
|
2016-06-06 22:10:22 +00:00
|
|
|
memoryStats := resourceUsage.MemoryStats
|
|
|
|
cpuStats := resourceUsage.CpuStats
|
2018-11-15 19:57:15 +00:00
|
|
|
deviceStats := resourceUsage.DeviceStats
|
|
|
|
|
2016-06-10 17:38:29 +00:00
|
|
|
if memoryStats != nil && len(memoryStats.Measured) > 0 {
|
|
|
|
c.Ui.Output("Memory Stats")
|
|
|
|
|
|
|
|
// Sort the measured stats
|
|
|
|
sort.Strings(memoryStats.Measured)
|
|
|
|
|
|
|
|
var measuredStats []string
|
|
|
|
for _, measured := range memoryStats.Measured {
|
|
|
|
switch measured {
|
|
|
|
case "RSS":
|
2016-06-12 21:20:39 +00:00
|
|
|
measuredStats = append(measuredStats, humanize.IBytes(memoryStats.RSS))
|
2016-06-10 17:38:29 +00:00
|
|
|
case "Cache":
|
2016-06-12 21:20:39 +00:00
|
|
|
measuredStats = append(measuredStats, humanize.IBytes(memoryStats.Cache))
|
2016-06-10 17:38:29 +00:00
|
|
|
case "Swap":
|
2016-06-12 21:20:39 +00:00
|
|
|
measuredStats = append(measuredStats, humanize.IBytes(memoryStats.Swap))
|
2019-01-14 23:47:52 +00:00
|
|
|
case "Usage":
|
|
|
|
measuredStats = append(measuredStats, humanize.IBytes(memoryStats.Usage))
|
2016-06-10 17:38:29 +00:00
|
|
|
case "Max Usage":
|
2016-06-12 21:20:39 +00:00
|
|
|
measuredStats = append(measuredStats, humanize.IBytes(memoryStats.MaxUsage))
|
2016-06-10 17:38:29 +00:00
|
|
|
case "Kernel Usage":
|
2016-06-12 21:20:39 +00:00
|
|
|
measuredStats = append(measuredStats, humanize.IBytes(memoryStats.KernelUsage))
|
2016-06-10 17:38:29 +00:00
|
|
|
case "Kernel Max Usage":
|
2016-06-12 21:20:39 +00:00
|
|
|
measuredStats = append(measuredStats, humanize.IBytes(memoryStats.KernelMaxUsage))
|
2016-06-10 17:38:29 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
out := make([]string, 2)
|
|
|
|
out[0] = strings.Join(memoryStats.Measured, "|")
|
|
|
|
out[1] = strings.Join(measuredStats, "|")
|
|
|
|
c.Ui.Output(formatList(out))
|
|
|
|
c.Ui.Output("")
|
|
|
|
}
|
|
|
|
|
|
|
|
if cpuStats != nil && len(cpuStats.Measured) > 0 {
|
|
|
|
c.Ui.Output("CPU Stats")
|
|
|
|
|
|
|
|
// Sort the measured stats
|
|
|
|
sort.Strings(cpuStats.Measured)
|
|
|
|
|
|
|
|
var measuredStats []string
|
|
|
|
for _, measured := range cpuStats.Measured {
|
|
|
|
switch measured {
|
|
|
|
case "Percent":
|
|
|
|
percent := strconv.FormatFloat(cpuStats.Percent, 'f', 2, 64)
|
|
|
|
measuredStats = append(measuredStats, fmt.Sprintf("%v%%", percent))
|
|
|
|
case "Throttled Periods":
|
|
|
|
measuredStats = append(measuredStats, fmt.Sprintf("%v", cpuStats.ThrottledPeriods))
|
|
|
|
case "Throttled Time":
|
|
|
|
measuredStats = append(measuredStats, fmt.Sprintf("%v", cpuStats.ThrottledTime))
|
|
|
|
case "User Mode":
|
|
|
|
percent := strconv.FormatFloat(cpuStats.UserMode, 'f', 2, 64)
|
|
|
|
measuredStats = append(measuredStats, fmt.Sprintf("%v%%", percent))
|
|
|
|
case "System Mode":
|
|
|
|
percent := strconv.FormatFloat(cpuStats.SystemMode, 'f', 2, 64)
|
|
|
|
measuredStats = append(measuredStats, fmt.Sprintf("%v%%", percent))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
out := make([]string, 2)
|
|
|
|
out[0] = strings.Join(cpuStats.Measured, "|")
|
|
|
|
out[1] = strings.Join(measuredStats, "|")
|
|
|
|
c.Ui.Output(formatList(out))
|
|
|
|
}
|
2018-11-15 19:57:15 +00:00
|
|
|
|
|
|
|
if len(deviceStats) > 0 {
|
2018-11-16 15:37:39 +00:00
|
|
|
c.Ui.Output("")
|
2018-11-15 19:57:15 +00:00
|
|
|
c.Ui.Output("Device Stats")
|
|
|
|
|
|
|
|
printDeviceStats(c.Ui, deviceStats)
|
|
|
|
}
|
2016-06-06 22:10:22 +00:00
|
|
|
}
|
2016-06-12 21:08:47 +00:00
|
|
|
|
|
|
|
// shortTaskStatus prints out the current state of each task.
|
|
|
|
func (c *AllocStatusCommand) shortTaskStatus(alloc *api.Allocation) {
|
|
|
|
tasks := make([]string, 0, len(alloc.TaskStates)+1)
|
2019-12-16 19:10:20 +00:00
|
|
|
tasks = append(tasks, "Name|State|Last Event|Time|Lifecycle")
|
2019-12-16 19:36:08 +00:00
|
|
|
|
2020-03-22 12:20:42 +00:00
|
|
|
taskLifecycles := map[string]*api.TaskLifecycle{}
|
2019-12-16 19:36:08 +00:00
|
|
|
for _, t := range alloc.Job.LookupTaskGroup(alloc.TaskGroup).Tasks {
|
2020-03-22 12:20:42 +00:00
|
|
|
taskLifecycles[t.Name] = t.Lifecycle
|
2019-12-16 19:36:08 +00:00
|
|
|
}
|
|
|
|
|
2020-03-22 12:20:42 +00:00
|
|
|
for _, task := range c.sortedTaskStateIterator(alloc.TaskStates, taskLifecycles) {
|
2016-06-12 21:08:47 +00:00
|
|
|
state := alloc.TaskStates[task]
|
|
|
|
lastState := state.State
|
|
|
|
var lastEvent, lastTime string
|
|
|
|
|
|
|
|
l := len(state.Events)
|
|
|
|
if l != 0 {
|
|
|
|
last := state.Events[l-1]
|
|
|
|
lastEvent = last.Type
|
2016-08-09 02:24:38 +00:00
|
|
|
lastTime = formatUnixNanoTime(last.Time)
|
2016-06-12 21:08:47 +00:00
|
|
|
}
|
|
|
|
|
2019-12-16 19:10:20 +00:00
|
|
|
tasks = append(tasks, fmt.Sprintf("%s|%s|%s|%s|%s",
|
2020-03-22 12:20:42 +00:00
|
|
|
task, lastState, lastEvent, lastTime, lifecycleDisplayName(taskLifecycles[task])))
|
2016-06-12 21:08:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
c.Ui.Output(c.Colorize().Color("\n[bold]Tasks[reset]"))
|
|
|
|
c.Ui.Output(formatList(tasks))
|
|
|
|
}
|
|
|
|
|
|
|
|
// sortedTaskStateIterator is a helper that takes the task state map and returns a
|
|
|
|
// channel that returns the keys in a sorted order.
|
2020-03-22 12:20:42 +00:00
|
|
|
func (c *AllocStatusCommand) sortedTaskStateIterator(m map[string]*api.TaskState, lifecycles map[string]*api.TaskLifecycle) []string {
|
2016-06-12 21:08:47 +00:00
|
|
|
keys := make([]string, len(m))
|
|
|
|
i := 0
|
|
|
|
for k := range m {
|
|
|
|
keys[i] = k
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
sort.Strings(keys)
|
|
|
|
|
2020-03-22 12:20:42 +00:00
|
|
|
// display prestart then prestart sidecar then main
|
|
|
|
sort.SliceStable(keys, func(i, j int) bool {
|
|
|
|
lci := lifecycles[keys[i]]
|
|
|
|
lcj := lifecycles[keys[j]]
|
|
|
|
|
|
|
|
switch {
|
|
|
|
case lci == nil:
|
|
|
|
return false
|
|
|
|
case lcj == nil:
|
|
|
|
return true
|
|
|
|
case !lci.Sidecar && lcj.Sidecar:
|
|
|
|
return true
|
|
|
|
default:
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
return keys
|
|
|
|
}
|
|
|
|
|
|
|
|
func lifecycleDisplayName(l *api.TaskLifecycle) string {
|
|
|
|
if l.Empty() {
|
|
|
|
return "main"
|
2016-06-12 21:08:47 +00:00
|
|
|
}
|
|
|
|
|
2020-03-22 12:20:42 +00:00
|
|
|
sidecar := ""
|
|
|
|
if l.Sidecar {
|
|
|
|
sidecar = " sidecar"
|
|
|
|
}
|
|
|
|
return l.Hook + sidecar
|
2016-06-12 21:08:47 +00:00
|
|
|
}
|
2020-03-06 14:44:43 +00:00
|
|
|
|
|
|
|
func (c *AllocStatusCommand) outputTaskVolumes(alloc *api.Allocation, taskName string, verbose bool) {
|
|
|
|
var task *api.Task
|
|
|
|
var tg *api.TaskGroup
|
|
|
|
FOUND:
|
|
|
|
for _, tg = range alloc.Job.TaskGroups {
|
|
|
|
for _, task = range tg.Tasks {
|
|
|
|
if task.Name == taskName {
|
|
|
|
break FOUND
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if task == nil || tg == nil {
|
|
|
|
c.Ui.Error(fmt.Sprintf("Could not find task data for %q", taskName))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if len(task.VolumeMounts) == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
client, err := c.Meta.Client()
|
|
|
|
if err != nil {
|
|
|
|
c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
var hostVolumesOutput []string
|
|
|
|
var csiVolumesOutput []string
|
|
|
|
hostVolumesOutput = append(hostVolumesOutput, "ID|Read Only")
|
|
|
|
if verbose {
|
|
|
|
csiVolumesOutput = append(csiVolumesOutput,
|
2020-11-16 13:28:52 +00:00
|
|
|
"Name|ID|Plugin|Provider|Schedulable|Read Only|Mount Options")
|
2020-03-06 14:44:43 +00:00
|
|
|
} else {
|
|
|
|
csiVolumesOutput = append(csiVolumesOutput, "ID|Read Only")
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, volMount := range task.VolumeMounts {
|
|
|
|
volReq := tg.Volumes[*volMount.Volume]
|
|
|
|
switch volReq.Type {
|
2022-08-02 14:33:08 +00:00
|
|
|
case api.CSIVolumeTypeHost:
|
2020-03-06 14:44:43 +00:00
|
|
|
hostVolumesOutput = append(hostVolumesOutput,
|
|
|
|
fmt.Sprintf("%s|%v", volReq.Name, *volMount.ReadOnly))
|
2022-08-02 14:33:08 +00:00
|
|
|
case api.CSIVolumeTypeCSI:
|
2020-03-06 14:44:43 +00:00
|
|
|
if verbose {
|
2022-04-15 13:26:19 +00:00
|
|
|
source := volReq.Source
|
|
|
|
if volReq.PerAlloc {
|
2022-08-02 14:33:08 +00:00
|
|
|
source = source + api.AllocSuffix(alloc.Name)
|
2022-04-15 13:26:19 +00:00
|
|
|
}
|
|
|
|
|
2020-03-06 14:44:43 +00:00
|
|
|
// there's an extra API call per volume here so we toggle it
|
|
|
|
// off with the -verbose flag
|
2022-04-15 13:26:19 +00:00
|
|
|
vol, _, err := client.CSIVolumes().Info(source, nil)
|
2020-03-06 14:44:43 +00:00
|
|
|
if err != nil {
|
|
|
|
c.Ui.Error(fmt.Sprintf("Error retrieving volume info for %q: %s",
|
|
|
|
volReq.Name, err))
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
csiVolumesOutput = append(csiVolumesOutput,
|
2020-11-16 13:28:52 +00:00
|
|
|
fmt.Sprintf("%s|%s|%s|%s|%v|%v|%s",
|
2020-03-09 13:57:59 +00:00
|
|
|
volReq.Name,
|
2020-11-16 13:28:52 +00:00
|
|
|
vol.ID,
|
2020-03-09 13:57:59 +00:00
|
|
|
vol.PluginID,
|
|
|
|
vol.Provider,
|
2020-03-06 14:44:43 +00:00
|
|
|
vol.Schedulable,
|
|
|
|
volReq.ReadOnly,
|
2020-03-23 17:55:26 +00:00
|
|
|
csiVolMountOption(vol.MountOptions, volReq.MountOptions),
|
2020-03-06 14:44:43 +00:00
|
|
|
))
|
|
|
|
} else {
|
|
|
|
csiVolumesOutput = append(csiVolumesOutput,
|
|
|
|
fmt.Sprintf("%s|%v", volReq.Name, volReq.ReadOnly))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(hostVolumesOutput) > 1 {
|
|
|
|
c.Ui.Output("Host Volumes:")
|
|
|
|
c.Ui.Output(formatList(hostVolumesOutput))
|
2023-01-30 14:48:43 +00:00
|
|
|
c.Ui.Output("") // line padding to next block
|
2020-03-06 14:44:43 +00:00
|
|
|
}
|
|
|
|
if len(csiVolumesOutput) > 1 {
|
|
|
|
c.Ui.Output("CSI Volumes:")
|
|
|
|
c.Ui.Output(formatList(csiVolumesOutput))
|
2023-01-30 14:48:43 +00:00
|
|
|
c.Ui.Output("") // line padding to next block
|
2020-03-06 14:44:43 +00:00
|
|
|
}
|
|
|
|
}
|