2015-09-12 20:55:51 +00:00
|
|
|
package command
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
2016-06-10 21:14:33 +00:00
|
|
|
"math"
|
2015-10-21 01:00:00 +00:00
|
|
|
"sort"
|
2018-05-02 23:19:15 +00:00
|
|
|
"strconv"
|
2015-09-12 20:55:51 +00:00
|
|
|
"strings"
|
2016-05-22 09:36:12 +00:00
|
|
|
"time"
|
2016-03-21 00:30:33 +00:00
|
|
|
|
2017-08-15 18:30:23 +00:00
|
|
|
humanize "github.com/dustin/go-humanize"
|
2016-03-21 00:30:33 +00:00
|
|
|
"github.com/hashicorp/nomad/api"
|
2017-08-14 14:24:35 +00:00
|
|
|
"github.com/hashicorp/nomad/api/contexts"
|
2017-02-06 19:48:28 +00:00
|
|
|
"github.com/hashicorp/nomad/helper"
|
2018-12-06 23:09:26 +00:00
|
|
|
"github.com/posener/complete"
|
2015-09-12 20:55:51 +00:00
|
|
|
)
|
|
|
|
|
2016-06-11 21:40:51 +00:00
|
|
|
const (
|
|
|
|
// floatFormat is a format string for formatting floats.
|
|
|
|
floatFormat = "#,###.##"
|
2016-06-12 04:01:53 +00:00
|
|
|
|
|
|
|
// bytesPerMegabyte is the number of bytes per MB
|
|
|
|
bytesPerMegabyte = 1024 * 1024
|
2016-06-11 21:40:51 +00:00
|
|
|
)
|
|
|
|
|
2015-09-12 20:55:51 +00:00
|
|
|
type NodeStatusCommand struct {
|
2015-09-14 20:13:52 +00:00
|
|
|
Meta
|
2016-06-11 21:40:51 +00:00
|
|
|
length int
|
|
|
|
short bool
|
|
|
|
verbose bool
|
|
|
|
list_allocs bool
|
|
|
|
self bool
|
|
|
|
stats bool
|
2016-08-04 10:19:31 +00:00
|
|
|
json bool
|
2016-07-30 10:20:43 +00:00
|
|
|
tmpl string
|
2015-09-12 20:55:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *NodeStatusCommand) Help() string {
|
|
|
|
helpText := `
|
2018-02-23 23:56:36 +00:00
|
|
|
Usage: nomad node status [options] <node>
|
2015-09-12 20:55:51 +00:00
|
|
|
|
2015-09-13 18:39:49 +00:00
|
|
|
Display status information about a given node. The list of nodes
|
|
|
|
returned includes only nodes which jobs may be scheduled to, and
|
2015-09-12 20:55:51 +00:00
|
|
|
includes status and other high-level information.
|
|
|
|
|
2016-06-16 21:47:06 +00:00
|
|
|
If a node ID is passed, information for that specific node will be displayed,
|
|
|
|
including resource usage statistics. If no node ID's are passed, then a
|
|
|
|
short-hand list of all nodes will be displayed. The -self flag is useful to
|
2016-03-29 19:36:24 +00:00
|
|
|
quickly access the status of the local node.
|
2015-09-12 20:55:51 +00:00
|
|
|
|
2015-09-14 20:13:52 +00:00
|
|
|
General Options:
|
2015-09-12 20:55:51 +00:00
|
|
|
|
2015-09-15 18:20:08 +00:00
|
|
|
` + generalOptionsUsage() + `
|
|
|
|
|
|
|
|
Node Status Options:
|
|
|
|
|
2016-06-16 21:47:06 +00:00
|
|
|
-self
|
|
|
|
Query the status of the local node.
|
|
|
|
|
2017-08-10 07:27:26 +00:00
|
|
|
-stats
|
2016-06-16 21:47:06 +00:00
|
|
|
Display detailed resource usage statistics.
|
|
|
|
|
|
|
|
-allocs
|
|
|
|
Display a count of running allocations for each node.
|
|
|
|
|
2015-09-15 18:20:08 +00:00
|
|
|
-short
|
|
|
|
Display short output. Used only when a single node is being
|
|
|
|
queried, and drops verbose output about node allocations.
|
2016-01-14 20:57:43 +00:00
|
|
|
|
2016-01-15 22:32:38 +00:00
|
|
|
-verbose
|
|
|
|
Display full information.
|
2016-07-30 10:20:43 +00:00
|
|
|
|
2016-08-04 10:19:31 +00:00
|
|
|
-json
|
2016-08-06 09:54:30 +00:00
|
|
|
Output the node in its JSON format.
|
2016-07-30 10:20:43 +00:00
|
|
|
|
|
|
|
-t
|
2016-08-06 09:54:30 +00:00
|
|
|
Format and display node using a Go template.
|
2015-09-15 18:20:08 +00:00
|
|
|
`
|
2015-09-12 20:55:51 +00:00
|
|
|
return strings.TrimSpace(helpText)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *NodeStatusCommand) Synopsis() string {
|
2015-09-13 18:39:49 +00:00
|
|
|
return "Display status information about nodes"
|
2015-09-12 20:55:51 +00:00
|
|
|
}
|
|
|
|
|
2017-08-22 20:41:42 +00:00
|
|
|
func (c *NodeStatusCommand) AutocompleteFlags() complete.Flags {
|
2017-08-23 21:56:21 +00:00
|
|
|
return mergeAutocompleteFlags(c.Meta.AutocompleteFlags(FlagSetClient),
|
|
|
|
complete.Flags{
|
|
|
|
"-allocs": complete.PredictNothing,
|
|
|
|
"-json": complete.PredictNothing,
|
|
|
|
"-self": complete.PredictNothing,
|
|
|
|
"-short": complete.PredictNothing,
|
|
|
|
"-stats": complete.PredictNothing,
|
|
|
|
"-t": complete.PredictAnything,
|
|
|
|
"-verbose": complete.PredictNothing,
|
|
|
|
})
|
2017-08-22 20:41:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *NodeStatusCommand) AutocompleteArgs() complete.Predictor {
|
|
|
|
return complete.PredictFunc(func(a complete.Args) []string {
|
2017-08-29 21:29:32 +00:00
|
|
|
client, err := c.Meta.Client()
|
|
|
|
if err != nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-08-28 05:17:51 +00:00
|
|
|
resp, _, err := client.Search().PrefixSearch(a.Last, contexts.Nodes, nil)
|
2017-08-22 20:41:42 +00:00
|
|
|
if err != nil {
|
|
|
|
return []string{}
|
|
|
|
}
|
|
|
|
return resp.Matches[contexts.Nodes]
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2018-04-18 16:02:11 +00:00
|
|
|
func (c *NodeStatusCommand) Name() string { return "node-status" }
|
|
|
|
|
2015-09-12 20:55:51 +00:00
|
|
|
func (c *NodeStatusCommand) Run(args []string) int {
|
2015-09-15 18:20:08 +00:00
|
|
|
|
2018-04-18 16:02:11 +00:00
|
|
|
flags := c.Meta.FlagSet(c.Name(), FlagSetClient)
|
2015-09-12 20:55:51 +00:00
|
|
|
flags.Usage = func() { c.Ui.Output(c.Help()) }
|
2016-06-11 21:40:51 +00:00
|
|
|
flags.BoolVar(&c.short, "short", false, "")
|
|
|
|
flags.BoolVar(&c.verbose, "verbose", false, "")
|
|
|
|
flags.BoolVar(&c.list_allocs, "allocs", false, "")
|
|
|
|
flags.BoolVar(&c.self, "self", false, "")
|
|
|
|
flags.BoolVar(&c.stats, "stats", false, "")
|
2016-08-04 10:19:31 +00:00
|
|
|
flags.BoolVar(&c.json, "json", false, "")
|
2016-07-30 10:20:43 +00:00
|
|
|
flags.StringVar(&c.tmpl, "t", "", "")
|
2015-09-15 18:20:08 +00:00
|
|
|
|
2015-09-12 20:55:51 +00:00
|
|
|
if err := flags.Parse(args); err != nil {
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that we got either a single node or none
|
2015-09-14 20:13:52 +00:00
|
|
|
args = flags.Args()
|
|
|
|
if len(args) > 1 {
|
2018-04-18 16:02:11 +00:00
|
|
|
c.Ui.Error("This command takes either one or no arguments")
|
|
|
|
c.Ui.Error(commandErrorText(c))
|
2015-09-12 20:55:51 +00:00
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
2016-01-14 20:57:43 +00:00
|
|
|
// Truncate the id unless full length is requested
|
2016-06-11 21:40:51 +00:00
|
|
|
c.length = shortId
|
|
|
|
if c.verbose {
|
|
|
|
c.length = fullId
|
2016-01-14 20:57:43 +00:00
|
|
|
}
|
|
|
|
|
2015-09-12 20:55:51 +00:00
|
|
|
// Get the HTTP client
|
2015-09-14 20:13:52 +00:00
|
|
|
client, err := c.Meta.Client()
|
2015-09-12 20:55:51 +00:00
|
|
|
if err != nil {
|
2015-09-14 20:13:52 +00:00
|
|
|
c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err))
|
2015-09-12 20:55:51 +00:00
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
|
|
|
// Use list mode if no node name was provided
|
2016-06-11 21:40:51 +00:00
|
|
|
if len(args) == 0 && !c.self {
|
2016-08-06 12:38:41 +00:00
|
|
|
|
2015-09-12 20:55:51 +00:00
|
|
|
// Query the node info
|
|
|
|
nodes, _, err := client.Nodes().List(nil)
|
|
|
|
if err != nil {
|
2015-09-14 20:13:52 +00:00
|
|
|
c.Ui.Error(fmt.Sprintf("Error querying node status: %s", err))
|
2015-09-12 20:55:51 +00:00
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
2017-07-01 01:10:19 +00:00
|
|
|
// If output format is specified, format and output the node data list
|
|
|
|
if c.json || len(c.tmpl) > 0 {
|
|
|
|
out, err := Format(c.json, c.tmpl, nodes)
|
2016-08-06 11:30:12 +00:00
|
|
|
if err != nil {
|
2017-07-01 01:10:19 +00:00
|
|
|
c.Ui.Error(err.Error())
|
2016-08-06 11:30:12 +00:00
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
|
|
|
c.Ui.Output(out)
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2017-07-01 01:10:19 +00:00
|
|
|
// Return nothing if no nodes found
|
|
|
|
if len(nodes) == 0 {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2015-09-12 20:55:51 +00:00
|
|
|
// Format the nodes list
|
|
|
|
out := make([]string, len(nodes)+1)
|
2017-08-16 20:33:11 +00:00
|
|
|
|
|
|
|
out[0] = "ID|DC|Name|Class|"
|
|
|
|
|
|
|
|
if c.verbose {
|
2017-12-21 08:58:35 +00:00
|
|
|
out[0] += "Address|Version|"
|
2017-08-16 20:33:11 +00:00
|
|
|
}
|
|
|
|
|
2018-02-27 22:00:55 +00:00
|
|
|
out[0] += "Drain|Eligibility|Status"
|
2017-08-16 20:33:11 +00:00
|
|
|
|
2016-06-11 21:40:51 +00:00
|
|
|
if c.list_allocs {
|
2017-08-16 20:33:11 +00:00
|
|
|
out[0] += "|Running Allocs"
|
2016-03-03 19:09:07 +00:00
|
|
|
}
|
2016-08-06 11:30:12 +00:00
|
|
|
|
2015-09-12 20:55:51 +00:00
|
|
|
for i, node := range nodes {
|
2017-08-16 20:33:11 +00:00
|
|
|
out[i+1] = fmt.Sprintf("%s|%s|%s|%s",
|
|
|
|
limit(node.ID, c.length),
|
|
|
|
node.Datacenter,
|
|
|
|
node.Name,
|
|
|
|
node.NodeClass)
|
|
|
|
if c.verbose {
|
2017-12-21 08:58:35 +00:00
|
|
|
out[i+1] += fmt.Sprintf("|%s|%s",
|
|
|
|
node.Address, node.Version)
|
2017-08-16 20:33:11 +00:00
|
|
|
}
|
2018-02-27 22:00:55 +00:00
|
|
|
out[i+1] += fmt.Sprintf("|%v|%s|%s",
|
2017-08-16 20:33:11 +00:00
|
|
|
node.Drain,
|
2018-02-27 22:00:55 +00:00
|
|
|
node.SchedulingEligibility,
|
2017-08-16 20:33:11 +00:00
|
|
|
node.Status)
|
2018-02-27 22:00:55 +00:00
|
|
|
|
2016-06-11 21:40:51 +00:00
|
|
|
if c.list_allocs {
|
2016-03-05 03:14:57 +00:00
|
|
|
numAllocs, err := getRunningAllocs(client, node.ID)
|
2016-03-03 19:09:07 +00:00
|
|
|
if err != nil {
|
|
|
|
c.Ui.Error(fmt.Sprintf("Error querying node allocations: %s", err))
|
|
|
|
return 1
|
|
|
|
}
|
2017-08-16 20:33:11 +00:00
|
|
|
out[i+1] += fmt.Sprintf("|%v",
|
2016-03-05 03:14:57 +00:00
|
|
|
len(numAllocs))
|
2016-03-03 17:19:56 +00:00
|
|
|
}
|
2015-09-12 20:55:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Dump the output
|
2015-09-15 23:44:38 +00:00
|
|
|
c.Ui.Output(formatList(out))
|
2015-09-12 20:55:51 +00:00
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
// Query the specific node
|
2017-09-26 22:26:33 +00:00
|
|
|
var nodeID string
|
2016-06-11 21:40:51 +00:00
|
|
|
if !c.self {
|
2016-03-29 19:36:24 +00:00
|
|
|
nodeID = args[0]
|
2016-04-11 22:20:49 +00:00
|
|
|
} else {
|
|
|
|
var err error
|
|
|
|
if nodeID, err = getLocalNodeID(client); err != nil {
|
|
|
|
c.Ui.Error(err.Error())
|
|
|
|
return 1
|
|
|
|
}
|
2016-03-29 19:36:24 +00:00
|
|
|
}
|
2016-03-17 23:48:45 +00:00
|
|
|
if len(nodeID) == 1 {
|
|
|
|
c.Ui.Error(fmt.Sprintf("Identifier must contain at least two characters."))
|
|
|
|
return 1
|
|
|
|
}
|
2016-01-21 19:53:05 +00:00
|
|
|
|
2018-03-11 18:52:59 +00:00
|
|
|
nodeID = sanitizeUUIDPrefix(nodeID)
|
2016-03-17 23:48:45 +00:00
|
|
|
nodes, _, err := client.Nodes().PrefixList(nodeID)
|
|
|
|
if err != nil {
|
|
|
|
c.Ui.Error(fmt.Sprintf("Error querying node info: %s", err))
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
// Return error if no nodes are found
|
|
|
|
if len(nodes) == 0 {
|
|
|
|
c.Ui.Error(fmt.Sprintf("No node(s) with prefix %q found", nodeID))
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
if len(nodes) > 1 {
|
|
|
|
// Dump the output
|
2018-02-27 22:43:35 +00:00
|
|
|
c.Ui.Error(fmt.Sprintf("Prefix matched multiple nodes\n\n%s",
|
|
|
|
formatNodeStubList(nodes, c.verbose)))
|
2017-07-21 00:31:07 +00:00
|
|
|
return 1
|
2016-03-17 23:48:45 +00:00
|
|
|
}
|
2018-02-27 22:43:35 +00:00
|
|
|
|
2016-03-17 23:48:45 +00:00
|
|
|
// Prefix lookup matched a single node
|
|
|
|
node, _, err := client.Nodes().Info(nodes[0].ID, nil)
|
|
|
|
if err != nil {
|
|
|
|
c.Ui.Error(fmt.Sprintf("Error querying node info: %s", err))
|
|
|
|
return 1
|
2015-09-12 20:55:51 +00:00
|
|
|
}
|
|
|
|
|
2016-07-30 10:20:43 +00:00
|
|
|
// If output format is specified, format and output the data
|
2017-07-01 01:10:19 +00:00
|
|
|
if c.json || len(c.tmpl) > 0 {
|
|
|
|
out, err := Format(c.json, c.tmpl, node)
|
2016-07-30 10:20:43 +00:00
|
|
|
if err != nil {
|
2017-07-01 01:10:19 +00:00
|
|
|
c.Ui.Error(err.Error())
|
2016-07-30 10:20:43 +00:00
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
|
|
|
c.Ui.Output(out)
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2016-06-11 21:40:51 +00:00
|
|
|
return c.formatNode(client, node)
|
|
|
|
}
|
|
|
|
|
2017-02-23 02:54:28 +00:00
|
|
|
func nodeDrivers(n *api.Node) []string {
|
|
|
|
var drivers []string
|
|
|
|
for k, v := range n.Attributes {
|
|
|
|
// driver.docker = 1
|
|
|
|
parts := strings.Split(k, ".")
|
|
|
|
if len(parts) != 2 {
|
|
|
|
continue
|
|
|
|
} else if parts[0] != "driver" {
|
|
|
|
continue
|
|
|
|
} else if v != "1" {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
drivers = append(drivers, parts[1])
|
|
|
|
}
|
|
|
|
|
|
|
|
sort.Strings(drivers)
|
|
|
|
return drivers
|
|
|
|
}
|
|
|
|
|
2019-12-20 11:48:34 +00:00
|
|
|
func nodeCSIControllerNames(n *api.Node) []string {
|
|
|
|
var names []string
|
|
|
|
for name := range n.CSIControllerPlugins {
|
|
|
|
names = append(names, name)
|
|
|
|
}
|
|
|
|
sort.Strings(names)
|
|
|
|
return names
|
|
|
|
}
|
|
|
|
|
|
|
|
func nodeCSINodeNames(n *api.Node) []string {
|
|
|
|
var names []string
|
|
|
|
for name := range n.CSINodePlugins {
|
|
|
|
names = append(names, name)
|
|
|
|
}
|
|
|
|
sort.Strings(names)
|
|
|
|
return names
|
|
|
|
}
|
|
|
|
|
2020-03-11 16:47:14 +00:00
|
|
|
func nodeCSIVolumeNames(n *api.Node, allocs []*api.Allocation) []string {
|
|
|
|
var names []string
|
|
|
|
for _, alloc := range allocs {
|
|
|
|
tg := alloc.GetTaskGroup()
|
|
|
|
if tg == nil || len(tg.Volumes) == 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, v := range tg.Volumes {
|
|
|
|
names = append(names, v.Name)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
sort.Strings(names)
|
|
|
|
return names
|
|
|
|
}
|
|
|
|
|
2019-07-25 13:44:19 +00:00
|
|
|
func nodeVolumeNames(n *api.Node) []string {
|
|
|
|
var volumes []string
|
2019-08-09 10:47:09 +00:00
|
|
|
for name := range n.HostVolumes {
|
2019-07-25 13:44:19 +00:00
|
|
|
volumes = append(volumes, name)
|
|
|
|
}
|
|
|
|
|
|
|
|
sort.Strings(volumes)
|
|
|
|
return volumes
|
|
|
|
}
|
|
|
|
|
2018-05-02 23:19:15 +00:00
|
|
|
func formatDrain(n *api.Node) string {
|
|
|
|
if n.DrainStrategy != nil {
|
|
|
|
b := new(strings.Builder)
|
|
|
|
b.WriteString("true")
|
2018-06-06 20:05:39 +00:00
|
|
|
if n.DrainStrategy.DrainSpec.Deadline.Nanoseconds() < 0 {
|
|
|
|
b.WriteString("; force drain")
|
|
|
|
} else if n.DrainStrategy.ForceDeadline.IsZero() {
|
2018-05-02 23:19:15 +00:00
|
|
|
b.WriteString("; no deadline")
|
|
|
|
} else {
|
|
|
|
fmt.Fprintf(b, "; %s deadline", formatTime(n.DrainStrategy.ForceDeadline))
|
|
|
|
}
|
|
|
|
|
|
|
|
if n.DrainStrategy.IgnoreSystemJobs {
|
|
|
|
b.WriteString("; ignoring system jobs")
|
|
|
|
}
|
|
|
|
return b.String()
|
|
|
|
}
|
|
|
|
|
|
|
|
return strconv.FormatBool(n.Drain)
|
|
|
|
}
|
|
|
|
|
2016-06-11 21:40:51 +00:00
|
|
|
func (c *NodeStatusCommand) formatNode(client *api.Client, node *api.Node) int {
|
2020-03-11 16:47:14 +00:00
|
|
|
// Make one API call for allocations
|
|
|
|
nodeAllocs, _, err := client.Nodes().Allocations(node.ID, nil)
|
|
|
|
if err != nil {
|
|
|
|
c.Ui.Error(fmt.Sprintf("Error querying node allocations: %s", err))
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
|
|
|
var runningAllocs []*api.Allocation
|
|
|
|
for _, alloc := range nodeAllocs {
|
|
|
|
if alloc.ClientStatus == "running" {
|
|
|
|
runningAllocs = append(runningAllocs, alloc)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-11 21:40:51 +00:00
|
|
|
// Format the header output
|
2015-09-15 18:20:08 +00:00
|
|
|
basic := []string{
|
2019-10-04 17:06:12 +00:00
|
|
|
fmt.Sprintf("ID|%s", node.ID),
|
2015-09-15 23:51:17 +00:00
|
|
|
fmt.Sprintf("Name|%s", node.Name),
|
|
|
|
fmt.Sprintf("Class|%s", node.NodeClass),
|
2016-03-03 19:09:07 +00:00
|
|
|
fmt.Sprintf("DC|%s", node.Datacenter),
|
2018-05-02 23:19:15 +00:00
|
|
|
fmt.Sprintf("Drain|%v", formatDrain(node)),
|
2018-02-27 22:00:55 +00:00
|
|
|
fmt.Sprintf("Eligibility|%s", node.SchedulingEligibility),
|
2015-09-15 23:51:17 +00:00
|
|
|
fmt.Sprintf("Status|%s", node.Status),
|
2019-12-20 11:48:34 +00:00
|
|
|
fmt.Sprintf("CSI Controllers|%s", strings.Join(nodeCSIControllerNames(node), ",")),
|
|
|
|
fmt.Sprintf("CSI Drivers|%s", strings.Join(nodeCSINodeNames(node), ",")),
|
2015-09-12 20:55:51 +00:00
|
|
|
}
|
2016-03-21 00:30:33 +00:00
|
|
|
|
2016-08-29 15:44:23 +00:00
|
|
|
if c.short {
|
2019-07-25 13:44:19 +00:00
|
|
|
basic = append(basic, fmt.Sprintf("Host Volumes|%s", strings.Join(nodeVolumeNames(node), ",")))
|
2020-03-11 16:47:14 +00:00
|
|
|
basic = append(basic, fmt.Sprintf("CSI Volumes|%s", strings.Join(nodeCSIVolumeNames(node, runningAllocs), ",")))
|
2018-03-23 00:18:32 +00:00
|
|
|
basic = append(basic, fmt.Sprintf("Drivers|%s", strings.Join(nodeDrivers(node), ",")))
|
2016-08-29 15:44:23 +00:00
|
|
|
c.Ui.Output(c.Colorize().Color(formatKV(basic)))
|
|
|
|
|
2019-08-09 10:47:09 +00:00
|
|
|
// Output alloc info
|
2020-03-11 16:47:14 +00:00
|
|
|
if err := c.outputAllocInfo(node, nodeAllocs); err != nil {
|
2019-08-09 10:47:09 +00:00
|
|
|
c.Ui.Error(fmt.Sprintf("%s", err))
|
|
|
|
return 1
|
2019-07-25 13:44:19 +00:00
|
|
|
}
|
|
|
|
|
2019-08-09 10:47:09 +00:00
|
|
|
return 0
|
|
|
|
}
|
2018-04-03 16:28:29 +00:00
|
|
|
|
2019-08-09 10:47:09 +00:00
|
|
|
// Get the host stats
|
|
|
|
hostStats, nodeStatsErr := client.Nodes().Stats(node.ID, nil)
|
|
|
|
if nodeStatsErr != nil {
|
|
|
|
c.Ui.Output("")
|
|
|
|
c.Ui.Error(fmt.Sprintf("error fetching node stats: %v", nodeStatsErr))
|
|
|
|
}
|
|
|
|
if hostStats != nil {
|
|
|
|
uptime := time.Duration(hostStats.Uptime * uint64(time.Second))
|
|
|
|
basic = append(basic, fmt.Sprintf("Uptime|%s", uptime.String()))
|
|
|
|
}
|
2018-04-03 16:28:29 +00:00
|
|
|
|
2019-08-09 10:47:09 +00:00
|
|
|
// When we're not running in verbose mode, then also include host volumes and
|
|
|
|
// driver info in the basic output
|
|
|
|
if !c.verbose {
|
|
|
|
basic = append(basic, fmt.Sprintf("Host Volumes|%s", strings.Join(nodeVolumeNames(node), ",")))
|
2020-03-11 16:47:14 +00:00
|
|
|
basic = append(basic, fmt.Sprintf("CSI Volumes|%s", strings.Join(nodeCSIVolumeNames(node, runningAllocs), ",")))
|
2019-08-09 10:47:09 +00:00
|
|
|
driverStatus := fmt.Sprintf("Driver Status| %s", c.outputTruncatedNodeDriverInfo(node))
|
|
|
|
basic = append(basic, driverStatus)
|
|
|
|
}
|
2018-03-08 14:34:08 +00:00
|
|
|
|
2019-08-09 10:47:09 +00:00
|
|
|
// Output the basic info
|
|
|
|
c.Ui.Output(c.Colorize().Color(formatKV(basic)))
|
2016-06-12 03:55:22 +00:00
|
|
|
|
2019-08-09 10:47:09 +00:00
|
|
|
// If we're running in verbose mode, include full host volume and driver info
|
|
|
|
if c.verbose {
|
|
|
|
c.outputNodeVolumeInfo(node)
|
2020-03-11 16:47:14 +00:00
|
|
|
c.outputNodeCSIVolumeInfo(client, node, runningAllocs)
|
2019-08-09 10:47:09 +00:00
|
|
|
c.outputNodeDriverInfo(node)
|
|
|
|
}
|
2016-06-06 21:10:43 +00:00
|
|
|
|
2019-08-09 10:47:09 +00:00
|
|
|
// Emit node events
|
|
|
|
c.outputNodeStatusEvents(node)
|
2016-06-11 21:40:51 +00:00
|
|
|
|
2019-08-09 10:47:09 +00:00
|
|
|
// Get list of running allocations on the node
|
|
|
|
allocatedResources := getAllocatedResources(client, runningAllocs, node)
|
|
|
|
c.Ui.Output(c.Colorize().Color("\n[bold]Allocated Resources[reset]"))
|
|
|
|
c.Ui.Output(formatList(allocatedResources))
|
|
|
|
|
|
|
|
actualResources, err := getActualResources(client, runningAllocs, node)
|
|
|
|
if err == nil {
|
|
|
|
c.Ui.Output(c.Colorize().Color("\n[bold]Allocation Resource Utilization[reset]"))
|
|
|
|
c.Ui.Output(formatList(actualResources))
|
|
|
|
}
|
|
|
|
|
|
|
|
hostResources, err := getHostResources(hostStats, node)
|
|
|
|
if err != nil {
|
|
|
|
c.Ui.Output("")
|
|
|
|
c.Ui.Error(fmt.Sprintf("error fetching node stats: %v", err))
|
|
|
|
}
|
|
|
|
if err == nil {
|
|
|
|
c.Ui.Output(c.Colorize().Color("\n[bold]Host Resource Utilization[reset]"))
|
|
|
|
c.Ui.Output(formatList(hostResources))
|
|
|
|
}
|
|
|
|
|
|
|
|
if err == nil && node.NodeResources != nil && len(node.NodeResources.Devices) > 0 {
|
|
|
|
c.Ui.Output(c.Colorize().Color("\n[bold]Device Resource Utilization[reset]"))
|
|
|
|
c.Ui.Output(formatList(getDeviceResourcesForNode(hostStats.DeviceStats, node)))
|
|
|
|
}
|
|
|
|
if hostStats != nil && c.stats {
|
|
|
|
c.Ui.Output(c.Colorize().Color("\n[bold]CPU Stats[reset]"))
|
|
|
|
c.printCpuStats(hostStats)
|
|
|
|
c.Ui.Output(c.Colorize().Color("\n[bold]Memory Stats[reset]"))
|
|
|
|
c.printMemoryStats(hostStats)
|
|
|
|
c.Ui.Output(c.Colorize().Color("\n[bold]Disk Stats[reset]"))
|
|
|
|
c.printDiskStats(hostStats)
|
|
|
|
if len(hostStats.DeviceStats) > 0 {
|
|
|
|
c.Ui.Output(c.Colorize().Color("\n[bold]Device Stats[reset]"))
|
|
|
|
printDeviceStats(c.Ui, hostStats.DeviceStats)
|
2016-05-22 09:04:27 +00:00
|
|
|
}
|
2016-03-21 00:30:33 +00:00
|
|
|
}
|
|
|
|
|
2020-03-11 16:47:14 +00:00
|
|
|
if err := c.outputAllocInfo(node, nodeAllocs); err != nil {
|
2019-08-09 10:47:09 +00:00
|
|
|
c.Ui.Error(fmt.Sprintf("%s", err))
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2020-03-11 16:47:14 +00:00
|
|
|
func (c *NodeStatusCommand) outputAllocInfo(node *api.Node, nodeAllocs []*api.Allocation) error {
|
2017-07-07 04:51:13 +00:00
|
|
|
c.Ui.Output(c.Colorize().Color("\n[bold]Allocations[reset]"))
|
|
|
|
c.Ui.Output(formatAllocList(nodeAllocs, c.verbose, c.length))
|
2016-03-21 00:30:33 +00:00
|
|
|
|
2016-06-11 21:40:51 +00:00
|
|
|
if c.verbose {
|
|
|
|
c.formatAttributes(node)
|
2018-12-10 16:51:43 +00:00
|
|
|
c.formatDeviceAttributes(node)
|
2016-10-21 00:36:34 +00:00
|
|
|
c.formatMeta(node)
|
2016-06-11 21:40:51 +00:00
|
|
|
}
|
|
|
|
|
2019-08-09 10:47:09 +00:00
|
|
|
return nil
|
2016-06-11 21:40:51 +00:00
|
|
|
}
|
|
|
|
|
2018-04-03 16:28:29 +00:00
|
|
|
func (c *NodeStatusCommand) outputTruncatedNodeDriverInfo(node *api.Node) string {
|
2018-04-09 14:51:08 +00:00
|
|
|
drivers := make([]string, 0, len(node.Drivers))
|
2018-04-03 16:28:29 +00:00
|
|
|
|
|
|
|
for driverName, driverInfo := range node.Drivers {
|
2018-04-12 18:26:00 +00:00
|
|
|
if !driverInfo.Detected {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2018-04-03 16:28:29 +00:00
|
|
|
if !driverInfo.Healthy {
|
|
|
|
drivers = append(drivers, fmt.Sprintf("%s (unhealthy)", driverName))
|
|
|
|
} else {
|
|
|
|
drivers = append(drivers, driverName)
|
|
|
|
}
|
|
|
|
}
|
2018-04-18 21:30:44 +00:00
|
|
|
sort.Strings(drivers)
|
2018-04-12 18:26:00 +00:00
|
|
|
return strings.Trim(strings.Join(drivers, ","), ", ")
|
2018-04-03 16:28:29 +00:00
|
|
|
}
|
|
|
|
|
2019-07-25 13:44:19 +00:00
|
|
|
func (c *NodeStatusCommand) outputNodeVolumeInfo(node *api.Node) {
|
|
|
|
|
|
|
|
names := make([]string, 0, len(node.HostVolumes))
|
|
|
|
for name := range node.HostVolumes {
|
|
|
|
names = append(names, name)
|
|
|
|
}
|
|
|
|
sort.Strings(names)
|
|
|
|
|
|
|
|
output := make([]string, 0, len(names)+1)
|
2019-08-21 14:18:23 +00:00
|
|
|
output = append(output, "Name|ReadOnly|Source")
|
2019-07-25 13:44:19 +00:00
|
|
|
|
2020-07-24 12:17:27 +00:00
|
|
|
if len(names) > 0 {
|
|
|
|
c.Ui.Output(c.Colorize().Color("\n[bold]Host Volumes"))
|
|
|
|
for _, volName := range names {
|
|
|
|
info := node.HostVolumes[volName]
|
|
|
|
output = append(output, fmt.Sprintf("%s|%v|%s", volName, info.ReadOnly, info.Path))
|
|
|
|
}
|
|
|
|
c.Ui.Output(formatList(output))
|
2019-07-25 13:44:19 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-11 16:47:14 +00:00
|
|
|
func (c *NodeStatusCommand) outputNodeCSIVolumeInfo(client *api.Client, node *api.Node, runningAllocs []*api.Allocation) {
|
|
|
|
|
|
|
|
// Duplicate nodeCSIVolumeNames to sort by name but also index volume names to ids
|
|
|
|
var names []string
|
2020-03-23 17:55:26 +00:00
|
|
|
requests := map[string]*api.VolumeRequest{}
|
2020-03-11 16:47:14 +00:00
|
|
|
for _, alloc := range runningAllocs {
|
|
|
|
tg := alloc.GetTaskGroup()
|
|
|
|
if tg == nil || len(tg.Volumes) == 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, v := range tg.Volumes {
|
|
|
|
names = append(names, v.Name)
|
2020-03-23 17:55:26 +00:00
|
|
|
requests[v.Source] = v
|
2020-03-11 16:47:14 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(names) == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
sort.Strings(names)
|
|
|
|
|
|
|
|
// Fetch the volume objects with current status
|
|
|
|
// Ignore an error, all we're going to do is omit the volumes
|
|
|
|
volumes := map[string]*api.CSIVolumeListStub{}
|
|
|
|
vs, _ := client.Nodes().CSIVolumes(node.ID, nil)
|
|
|
|
for _, v := range vs {
|
2020-07-24 12:17:27 +00:00
|
|
|
n, ok := requests[v.ID]
|
|
|
|
if ok {
|
|
|
|
volumes[n.Name] = v
|
|
|
|
}
|
2020-03-11 16:47:14 +00:00
|
|
|
}
|
|
|
|
|
2020-07-24 12:17:27 +00:00
|
|
|
if len(names) > 0 {
|
|
|
|
c.Ui.Output(c.Colorize().Color("\n[bold]CSI Volumes"))
|
|
|
|
|
|
|
|
// Output the volumes in name order
|
|
|
|
output := make([]string, 0, len(names)+1)
|
|
|
|
output = append(output, "ID|Name|Plugin ID|Schedulable|Provider|Access Mode")
|
|
|
|
for _, name := range names {
|
|
|
|
v := volumes[name]
|
|
|
|
output = append(output, fmt.Sprintf(
|
|
|
|
"%s|%s|%s|%t|%s|%s",
|
|
|
|
v.ID,
|
|
|
|
name,
|
|
|
|
v.PluginID,
|
|
|
|
v.Schedulable,
|
|
|
|
v.Provider,
|
|
|
|
v.AccessMode,
|
|
|
|
))
|
|
|
|
}
|
|
|
|
|
|
|
|
c.Ui.Output(formatList(output))
|
|
|
|
}
|
2020-03-11 16:47:14 +00:00
|
|
|
}
|
|
|
|
|
2018-03-23 00:18:32 +00:00
|
|
|
func (c *NodeStatusCommand) outputNodeDriverInfo(node *api.Node) {
|
|
|
|
c.Ui.Output(c.Colorize().Color("\n[bold]Drivers"))
|
|
|
|
|
|
|
|
size := len(node.Drivers)
|
|
|
|
nodeDrivers := make([]string, 0, size+1)
|
|
|
|
|
2018-04-03 16:28:29 +00:00
|
|
|
nodeDrivers = append(nodeDrivers, "Driver|Detected|Healthy|Message|Time")
|
2018-03-23 00:18:32 +00:00
|
|
|
|
2018-03-23 17:59:18 +00:00
|
|
|
drivers := make([]string, 0, len(node.Drivers))
|
2018-04-03 16:28:29 +00:00
|
|
|
for driver := range node.Drivers {
|
|
|
|
drivers = append(drivers, driver)
|
2018-03-23 17:59:18 +00:00
|
|
|
}
|
|
|
|
sort.Strings(drivers)
|
|
|
|
|
2018-04-03 16:28:29 +00:00
|
|
|
for _, driver := range drivers {
|
|
|
|
info := node.Drivers[driver]
|
|
|
|
timestamp := formatTime(info.UpdateTime)
|
|
|
|
nodeDrivers = append(nodeDrivers, fmt.Sprintf("%s|%v|%v|%s|%s", driver, info.Detected, info.Healthy, info.HealthDescription, timestamp))
|
2018-03-23 00:18:32 +00:00
|
|
|
}
|
2018-04-03 16:28:29 +00:00
|
|
|
c.Ui.Output(formatList(nodeDrivers))
|
2018-03-23 00:18:32 +00:00
|
|
|
}
|
|
|
|
|
2018-03-08 14:34:08 +00:00
|
|
|
func (c *NodeStatusCommand) outputNodeStatusEvents(node *api.Node) {
|
2018-03-23 00:18:32 +00:00
|
|
|
c.Ui.Output(c.Colorize().Color("\n[bold]Node Events"))
|
2018-03-14 00:59:37 +00:00
|
|
|
c.outputNodeEvent(node.Events)
|
2018-03-08 14:34:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *NodeStatusCommand) outputNodeEvent(events []*api.NodeEvent) {
|
|
|
|
size := len(events)
|
|
|
|
nodeEvents := make([]string, size+1)
|
2018-03-13 13:33:53 +00:00
|
|
|
if c.verbose {
|
|
|
|
nodeEvents[0] = "Time|Subsystem|Message|Details"
|
|
|
|
} else {
|
|
|
|
nodeEvents[0] = "Time|Subsystem|Message"
|
|
|
|
}
|
2018-03-08 14:34:08 +00:00
|
|
|
|
|
|
|
for i, event := range events {
|
2018-03-27 17:22:29 +00:00
|
|
|
timestamp := formatTime(event.Timestamp)
|
2018-03-29 17:30:34 +00:00
|
|
|
subsystem := formatEventSubsystem(event.Subsystem, event.Details["driver"])
|
|
|
|
msg := event.Message
|
2018-03-13 13:33:53 +00:00
|
|
|
if c.verbose {
|
|
|
|
details := formatEventDetails(event.Details)
|
|
|
|
nodeEvents[size-i] = fmt.Sprintf("%s|%s|%s|%s", timestamp, subsystem, msg, details)
|
|
|
|
} else {
|
|
|
|
nodeEvents[size-i] = fmt.Sprintf("%s|%s|%s", timestamp, subsystem, msg)
|
|
|
|
}
|
2018-03-08 14:34:08 +00:00
|
|
|
}
|
|
|
|
c.Ui.Output(formatList(nodeEvents))
|
|
|
|
}
|
|
|
|
|
2018-03-29 17:30:34 +00:00
|
|
|
func formatEventSubsystem(subsystem, driverName string) string {
|
2018-03-27 23:03:01 +00:00
|
|
|
if driverName == "" {
|
2018-03-29 17:30:34 +00:00
|
|
|
return subsystem
|
2018-03-27 23:03:01 +00:00
|
|
|
}
|
|
|
|
|
2018-03-29 17:30:34 +00:00
|
|
|
// If this event is for a driver, append the driver name to make the message
|
|
|
|
// clearer
|
|
|
|
return fmt.Sprintf("Driver: %s", driverName)
|
2018-03-27 23:03:01 +00:00
|
|
|
}
|
|
|
|
|
2018-03-08 14:34:08 +00:00
|
|
|
func formatEventDetails(details map[string]string) string {
|
2018-03-14 00:52:12 +00:00
|
|
|
output := make([]string, 0, len(details))
|
2018-03-08 14:34:08 +00:00
|
|
|
for k, v := range details {
|
2020-03-31 21:13:52 +00:00
|
|
|
output = append(output, fmt.Sprintf("%s: %s", k, v))
|
2018-03-08 14:34:08 +00:00
|
|
|
}
|
2018-03-14 00:52:12 +00:00
|
|
|
return strings.Join(output, ", ")
|
2018-03-08 14:34:08 +00:00
|
|
|
}
|
|
|
|
|
2016-06-11 21:40:51 +00:00
|
|
|
func (c *NodeStatusCommand) formatAttributes(node *api.Node) {
|
|
|
|
// Print the attributes
|
|
|
|
keys := make([]string, len(node.Attributes))
|
|
|
|
for k := range node.Attributes {
|
|
|
|
keys = append(keys, k)
|
|
|
|
}
|
|
|
|
sort.Strings(keys)
|
|
|
|
|
|
|
|
var attributes []string
|
|
|
|
for _, k := range keys {
|
|
|
|
if k != "" {
|
|
|
|
attributes = append(attributes, fmt.Sprintf("%s|%s", k, node.Attributes[k]))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
c.Ui.Output(c.Colorize().Color("\n[bold]Attributes[reset]"))
|
|
|
|
c.Ui.Output(formatKV(attributes))
|
2015-09-12 20:55:51 +00:00
|
|
|
}
|
2016-03-03 19:09:07 +00:00
|
|
|
|
2018-12-10 16:51:43 +00:00
|
|
|
func (c *NodeStatusCommand) formatDeviceAttributes(node *api.Node) {
|
2019-01-23 01:34:02 +00:00
|
|
|
if node.NodeResources == nil {
|
|
|
|
return
|
|
|
|
}
|
2018-12-10 16:51:43 +00:00
|
|
|
devices := node.NodeResources.Devices
|
|
|
|
if len(devices) == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
sort.Slice(devices, func(i, j int) bool {
|
|
|
|
return devices[i].ID() < devices[j].ID()
|
|
|
|
})
|
|
|
|
|
|
|
|
first := true
|
|
|
|
for _, d := range devices {
|
|
|
|
if len(d.Attributes) == 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if first {
|
|
|
|
c.Ui.Output("\nDevice Group Attributes")
|
|
|
|
first = false
|
|
|
|
} else {
|
|
|
|
c.Ui.Output("")
|
|
|
|
}
|
|
|
|
c.Ui.Output(formatKV(getDeviceAttributes(d)))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-21 00:36:34 +00:00
|
|
|
func (c *NodeStatusCommand) formatMeta(node *api.Node) {
|
|
|
|
// Print the meta
|
|
|
|
keys := make([]string, 0, len(node.Meta))
|
|
|
|
for k := range node.Meta {
|
|
|
|
keys = append(keys, k)
|
|
|
|
}
|
|
|
|
sort.Strings(keys)
|
|
|
|
|
|
|
|
var meta []string
|
|
|
|
for _, k := range keys {
|
|
|
|
if k != "" {
|
|
|
|
meta = append(meta, fmt.Sprintf("%s|%s", k, node.Meta[k]))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
c.Ui.Output(c.Colorize().Color("\n[bold]Meta[reset]"))
|
|
|
|
c.Ui.Output(formatKV(meta))
|
|
|
|
}
|
|
|
|
|
2016-05-22 09:04:27 +00:00
|
|
|
func (c *NodeStatusCommand) printCpuStats(hostStats *api.HostStats) {
|
2016-06-11 21:40:51 +00:00
|
|
|
l := len(hostStats.CPU)
|
|
|
|
for i, cpuStat := range hostStats.CPU {
|
2016-05-22 09:04:27 +00:00
|
|
|
cpuStatsAttr := make([]string, 4)
|
|
|
|
cpuStatsAttr[0] = fmt.Sprintf("CPU|%v", cpuStat.CPU)
|
2016-06-11 21:40:51 +00:00
|
|
|
cpuStatsAttr[1] = fmt.Sprintf("User|%v%%", humanize.FormatFloat(floatFormat, cpuStat.User))
|
|
|
|
cpuStatsAttr[2] = fmt.Sprintf("System|%v%%", humanize.FormatFloat(floatFormat, cpuStat.System))
|
|
|
|
cpuStatsAttr[3] = fmt.Sprintf("Idle|%v%%", humanize.FormatFloat(floatFormat, cpuStat.Idle))
|
2016-05-22 09:04:27 +00:00
|
|
|
c.Ui.Output(formatKV(cpuStatsAttr))
|
2016-06-11 21:40:51 +00:00
|
|
|
if i+1 < l {
|
|
|
|
c.Ui.Output("")
|
|
|
|
}
|
2016-05-22 09:04:27 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *NodeStatusCommand) printMemoryStats(hostStats *api.HostStats) {
|
|
|
|
memoryStat := hostStats.Memory
|
|
|
|
memStatsAttr := make([]string, 4)
|
2016-06-12 21:20:39 +00:00
|
|
|
memStatsAttr[0] = fmt.Sprintf("Total|%v", humanize.IBytes(memoryStat.Total))
|
|
|
|
memStatsAttr[1] = fmt.Sprintf("Available|%v", humanize.IBytes(memoryStat.Available))
|
|
|
|
memStatsAttr[2] = fmt.Sprintf("Used|%v", humanize.IBytes(memoryStat.Used))
|
|
|
|
memStatsAttr[3] = fmt.Sprintf("Free|%v", humanize.IBytes(memoryStat.Free))
|
2016-05-22 09:04:27 +00:00
|
|
|
c.Ui.Output(formatKV(memStatsAttr))
|
|
|
|
}
|
|
|
|
|
2016-05-22 10:46:49 +00:00
|
|
|
func (c *NodeStatusCommand) printDiskStats(hostStats *api.HostStats) {
|
2016-06-11 21:40:51 +00:00
|
|
|
l := len(hostStats.DiskStats)
|
|
|
|
for i, diskStat := range hostStats.DiskStats {
|
2016-06-06 22:31:50 +00:00
|
|
|
diskStatsAttr := make([]string, 7)
|
2016-05-22 10:46:49 +00:00
|
|
|
diskStatsAttr[0] = fmt.Sprintf("Device|%s", diskStat.Device)
|
|
|
|
diskStatsAttr[1] = fmt.Sprintf("MountPoint|%s", diskStat.Mountpoint)
|
2016-06-12 21:20:39 +00:00
|
|
|
diskStatsAttr[2] = fmt.Sprintf("Size|%s", humanize.IBytes(diskStat.Size))
|
|
|
|
diskStatsAttr[3] = fmt.Sprintf("Used|%s", humanize.IBytes(diskStat.Used))
|
|
|
|
diskStatsAttr[4] = fmt.Sprintf("Available|%s", humanize.IBytes(diskStat.Available))
|
2016-06-11 21:40:51 +00:00
|
|
|
diskStatsAttr[5] = fmt.Sprintf("Used Percent|%v%%", humanize.FormatFloat(floatFormat, diskStat.UsedPercent))
|
|
|
|
diskStatsAttr[6] = fmt.Sprintf("Inodes Percent|%v%%", humanize.FormatFloat(floatFormat, diskStat.InodesUsedPercent))
|
2016-05-22 10:46:49 +00:00
|
|
|
c.Ui.Output(formatKV(diskStatsAttr))
|
2016-06-11 21:40:51 +00:00
|
|
|
if i+1 < l {
|
|
|
|
c.Ui.Output("")
|
|
|
|
}
|
2016-05-22 10:46:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Print resource usage w/ alloc-status + node-status
When alloc-status is called, in it's long form only, print the resource
utilization for that single allocation.
When node-status is called, in it's long form only, print the TOTAL
resource utilization that is occurring on that single node.
Nomad Alloc Status:
```
% nomad alloc-status 195d3bf2
ID = 195d3bf2
Eval ID = c917e3ee
Name = example.cache[1]
Node ID = 1b2520a7
Job ID = example
Client Status = running
Evaluated Nodes = 1
Filtered Nodes = 0
Exhausted Nodes = 0
Allocation Time = 17.73µs
Failures = 0
==> Task "redis" is "running"
Recent Events:
Time Type Description
04/03/16 21:20:45 EST Started Task started by client
04/03/16 21:20:42 EST Received Task received by client
==> Status
Allocation "195d3bf2" status "running" (0/1 nodes filtered)
* Score "1b2520a7-6714-e78d-a8f7-68467dda6db7.binpack" = 1.209464
* Score "1b2520a7-6714-e78d-a8f7-68467dda6db7.job-anti-affinity" = -10.000000
==> Resources
CPU MemoryMB DiskMB IOPS
500 256 300 0
```
Nomad Node Status:
```
% nomad node-status 57b3a55a
ID = 57b3a55a
Name = biscuits
Class = <none>
DC = dc1
Drain = false
Status = ready
Attributes = arch:amd64, cpu.frequency:3753.458875, cpu.modelname:Intel(R) Core(TM) i7-4770K CPU @ 3.50GHz, cpu.numcores:8, cpu.totalcompute:30027.671000, driver.docker:1, driver.docker.version:1.10.2, driver.exec:1, driver.raw_exec:1, hostname:biscuits, kernel.name:linux, kernel.version:4.4.0-9-generic, memory.totalbytes:25208934400, os.name:ubuntu, os.version:16.04, unique.cgroup.mountpoint:/sys/fs/cgroup, unique.network.ip-address:127.0.0.1, unique.storage.bytesfree:219781419008, unique.storage.bytestotal:246059892736, unique.storage.volume:/dev/sdb3
==> Allocations
ID Eval ID Job ID Task Group Desired Status Client Status
2c236883 aa11aca8 example cache run running
32f6e3d6 aa11aca8 example cache run running
==> Resource Utilization
CPU MemoryMB DiskMB IOPS
1000 512 600 0
```
2016-03-05 02:29:39 +00:00
|
|
|
// getRunningAllocs returns a slice of allocation id's running on the node
|
2016-03-05 03:14:57 +00:00
|
|
|
func getRunningAllocs(client *api.Client, nodeID string) ([]*api.Allocation, error) {
|
Print resource usage w/ alloc-status + node-status
When alloc-status is called, in it's long form only, print the resource
utilization for that single allocation.
When node-status is called, in it's long form only, print the TOTAL
resource utilization that is occurring on that single node.
Nomad Alloc Status:
```
% nomad alloc-status 195d3bf2
ID = 195d3bf2
Eval ID = c917e3ee
Name = example.cache[1]
Node ID = 1b2520a7
Job ID = example
Client Status = running
Evaluated Nodes = 1
Filtered Nodes = 0
Exhausted Nodes = 0
Allocation Time = 17.73µs
Failures = 0
==> Task "redis" is "running"
Recent Events:
Time Type Description
04/03/16 21:20:45 EST Started Task started by client
04/03/16 21:20:42 EST Received Task received by client
==> Status
Allocation "195d3bf2" status "running" (0/1 nodes filtered)
* Score "1b2520a7-6714-e78d-a8f7-68467dda6db7.binpack" = 1.209464
* Score "1b2520a7-6714-e78d-a8f7-68467dda6db7.job-anti-affinity" = -10.000000
==> Resources
CPU MemoryMB DiskMB IOPS
500 256 300 0
```
Nomad Node Status:
```
% nomad node-status 57b3a55a
ID = 57b3a55a
Name = biscuits
Class = <none>
DC = dc1
Drain = false
Status = ready
Attributes = arch:amd64, cpu.frequency:3753.458875, cpu.modelname:Intel(R) Core(TM) i7-4770K CPU @ 3.50GHz, cpu.numcores:8, cpu.totalcompute:30027.671000, driver.docker:1, driver.docker.version:1.10.2, driver.exec:1, driver.raw_exec:1, hostname:biscuits, kernel.name:linux, kernel.version:4.4.0-9-generic, memory.totalbytes:25208934400, os.name:ubuntu, os.version:16.04, unique.cgroup.mountpoint:/sys/fs/cgroup, unique.network.ip-address:127.0.0.1, unique.storage.bytesfree:219781419008, unique.storage.bytestotal:246059892736, unique.storage.volume:/dev/sdb3
==> Allocations
ID Eval ID Job ID Task Group Desired Status Client Status
2c236883 aa11aca8 example cache run running
32f6e3d6 aa11aca8 example cache run running
==> Resource Utilization
CPU MemoryMB DiskMB IOPS
1000 512 600 0
```
2016-03-05 02:29:39 +00:00
|
|
|
var allocs []*api.Allocation
|
|
|
|
|
|
|
|
// Query the node allocations
|
2016-03-05 03:14:57 +00:00
|
|
|
nodeAllocs, _, err := client.Nodes().Allocations(nodeID, nil)
|
Print resource usage w/ alloc-status + node-status
When alloc-status is called, in it's long form only, print the resource
utilization for that single allocation.
When node-status is called, in it's long form only, print the TOTAL
resource utilization that is occurring on that single node.
Nomad Alloc Status:
```
% nomad alloc-status 195d3bf2
ID = 195d3bf2
Eval ID = c917e3ee
Name = example.cache[1]
Node ID = 1b2520a7
Job ID = example
Client Status = running
Evaluated Nodes = 1
Filtered Nodes = 0
Exhausted Nodes = 0
Allocation Time = 17.73µs
Failures = 0
==> Task "redis" is "running"
Recent Events:
Time Type Description
04/03/16 21:20:45 EST Started Task started by client
04/03/16 21:20:42 EST Received Task received by client
==> Status
Allocation "195d3bf2" status "running" (0/1 nodes filtered)
* Score "1b2520a7-6714-e78d-a8f7-68467dda6db7.binpack" = 1.209464
* Score "1b2520a7-6714-e78d-a8f7-68467dda6db7.job-anti-affinity" = -10.000000
==> Resources
CPU MemoryMB DiskMB IOPS
500 256 300 0
```
Nomad Node Status:
```
% nomad node-status 57b3a55a
ID = 57b3a55a
Name = biscuits
Class = <none>
DC = dc1
Drain = false
Status = ready
Attributes = arch:amd64, cpu.frequency:3753.458875, cpu.modelname:Intel(R) Core(TM) i7-4770K CPU @ 3.50GHz, cpu.numcores:8, cpu.totalcompute:30027.671000, driver.docker:1, driver.docker.version:1.10.2, driver.exec:1, driver.raw_exec:1, hostname:biscuits, kernel.name:linux, kernel.version:4.4.0-9-generic, memory.totalbytes:25208934400, os.name:ubuntu, os.version:16.04, unique.cgroup.mountpoint:/sys/fs/cgroup, unique.network.ip-address:127.0.0.1, unique.storage.bytesfree:219781419008, unique.storage.bytestotal:246059892736, unique.storage.volume:/dev/sdb3
==> Allocations
ID Eval ID Job ID Task Group Desired Status Client Status
2c236883 aa11aca8 example cache run running
32f6e3d6 aa11aca8 example cache run running
==> Resource Utilization
CPU MemoryMB DiskMB IOPS
1000 512 600 0
```
2016-03-05 02:29:39 +00:00
|
|
|
// Filter list to only running allocations
|
|
|
|
for _, alloc := range nodeAllocs {
|
|
|
|
if alloc.ClientStatus == "running" {
|
|
|
|
allocs = append(allocs, alloc)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return allocs, err
|
|
|
|
}
|
|
|
|
|
2016-06-06 21:10:43 +00:00
|
|
|
// getAllocatedResources returns the resource usage of the node.
|
2016-06-12 03:55:22 +00:00
|
|
|
func getAllocatedResources(client *api.Client, runningAllocs []*api.Allocation, node *api.Node) []string {
|
2016-03-21 00:30:33 +00:00
|
|
|
// Compute the total
|
2016-06-12 03:55:22 +00:00
|
|
|
total := computeNodeTotalResources(node)
|
Print resource usage w/ alloc-status + node-status
When alloc-status is called, in it's long form only, print the resource
utilization for that single allocation.
When node-status is called, in it's long form only, print the TOTAL
resource utilization that is occurring on that single node.
Nomad Alloc Status:
```
% nomad alloc-status 195d3bf2
ID = 195d3bf2
Eval ID = c917e3ee
Name = example.cache[1]
Node ID = 1b2520a7
Job ID = example
Client Status = running
Evaluated Nodes = 1
Filtered Nodes = 0
Exhausted Nodes = 0
Allocation Time = 17.73µs
Failures = 0
==> Task "redis" is "running"
Recent Events:
Time Type Description
04/03/16 21:20:45 EST Started Task started by client
04/03/16 21:20:42 EST Received Task received by client
==> Status
Allocation "195d3bf2" status "running" (0/1 nodes filtered)
* Score "1b2520a7-6714-e78d-a8f7-68467dda6db7.binpack" = 1.209464
* Score "1b2520a7-6714-e78d-a8f7-68467dda6db7.job-anti-affinity" = -10.000000
==> Resources
CPU MemoryMB DiskMB IOPS
500 256 300 0
```
Nomad Node Status:
```
% nomad node-status 57b3a55a
ID = 57b3a55a
Name = biscuits
Class = <none>
DC = dc1
Drain = false
Status = ready
Attributes = arch:amd64, cpu.frequency:3753.458875, cpu.modelname:Intel(R) Core(TM) i7-4770K CPU @ 3.50GHz, cpu.numcores:8, cpu.totalcompute:30027.671000, driver.docker:1, driver.docker.version:1.10.2, driver.exec:1, driver.raw_exec:1, hostname:biscuits, kernel.name:linux, kernel.version:4.4.0-9-generic, memory.totalbytes:25208934400, os.name:ubuntu, os.version:16.04, unique.cgroup.mountpoint:/sys/fs/cgroup, unique.network.ip-address:127.0.0.1, unique.storage.bytesfree:219781419008, unique.storage.bytestotal:246059892736, unique.storage.volume:/dev/sdb3
==> Allocations
ID Eval ID Job ID Task Group Desired Status Client Status
2c236883 aa11aca8 example cache run running
32f6e3d6 aa11aca8 example cache run running
==> Resource Utilization
CPU MemoryMB DiskMB IOPS
1000 512 600 0
```
2016-03-05 02:29:39 +00:00
|
|
|
|
|
|
|
// Get Resources
|
2018-12-06 23:09:26 +00:00
|
|
|
var cpu, mem, disk int
|
Print resource usage w/ alloc-status + node-status
When alloc-status is called, in it's long form only, print the resource
utilization for that single allocation.
When node-status is called, in it's long form only, print the TOTAL
resource utilization that is occurring on that single node.
Nomad Alloc Status:
```
% nomad alloc-status 195d3bf2
ID = 195d3bf2
Eval ID = c917e3ee
Name = example.cache[1]
Node ID = 1b2520a7
Job ID = example
Client Status = running
Evaluated Nodes = 1
Filtered Nodes = 0
Exhausted Nodes = 0
Allocation Time = 17.73µs
Failures = 0
==> Task "redis" is "running"
Recent Events:
Time Type Description
04/03/16 21:20:45 EST Started Task started by client
04/03/16 21:20:42 EST Received Task received by client
==> Status
Allocation "195d3bf2" status "running" (0/1 nodes filtered)
* Score "1b2520a7-6714-e78d-a8f7-68467dda6db7.binpack" = 1.209464
* Score "1b2520a7-6714-e78d-a8f7-68467dda6db7.job-anti-affinity" = -10.000000
==> Resources
CPU MemoryMB DiskMB IOPS
500 256 300 0
```
Nomad Node Status:
```
% nomad node-status 57b3a55a
ID = 57b3a55a
Name = biscuits
Class = <none>
DC = dc1
Drain = false
Status = ready
Attributes = arch:amd64, cpu.frequency:3753.458875, cpu.modelname:Intel(R) Core(TM) i7-4770K CPU @ 3.50GHz, cpu.numcores:8, cpu.totalcompute:30027.671000, driver.docker:1, driver.docker.version:1.10.2, driver.exec:1, driver.raw_exec:1, hostname:biscuits, kernel.name:linux, kernel.version:4.4.0-9-generic, memory.totalbytes:25208934400, os.name:ubuntu, os.version:16.04, unique.cgroup.mountpoint:/sys/fs/cgroup, unique.network.ip-address:127.0.0.1, unique.storage.bytesfree:219781419008, unique.storage.bytestotal:246059892736, unique.storage.volume:/dev/sdb3
==> Allocations
ID Eval ID Job ID Task Group Desired Status Client Status
2c236883 aa11aca8 example cache run running
32f6e3d6 aa11aca8 example cache run running
==> Resource Utilization
CPU MemoryMB DiskMB IOPS
1000 512 600 0
```
2016-03-05 02:29:39 +00:00
|
|
|
for _, alloc := range runningAllocs {
|
2017-02-06 19:48:28 +00:00
|
|
|
cpu += *alloc.Resources.CPU
|
|
|
|
mem += *alloc.Resources.MemoryMB
|
|
|
|
disk += *alloc.Resources.DiskMB
|
Print resource usage w/ alloc-status + node-status
When alloc-status is called, in it's long form only, print the resource
utilization for that single allocation.
When node-status is called, in it's long form only, print the TOTAL
resource utilization that is occurring on that single node.
Nomad Alloc Status:
```
% nomad alloc-status 195d3bf2
ID = 195d3bf2
Eval ID = c917e3ee
Name = example.cache[1]
Node ID = 1b2520a7
Job ID = example
Client Status = running
Evaluated Nodes = 1
Filtered Nodes = 0
Exhausted Nodes = 0
Allocation Time = 17.73µs
Failures = 0
==> Task "redis" is "running"
Recent Events:
Time Type Description
04/03/16 21:20:45 EST Started Task started by client
04/03/16 21:20:42 EST Received Task received by client
==> Status
Allocation "195d3bf2" status "running" (0/1 nodes filtered)
* Score "1b2520a7-6714-e78d-a8f7-68467dda6db7.binpack" = 1.209464
* Score "1b2520a7-6714-e78d-a8f7-68467dda6db7.job-anti-affinity" = -10.000000
==> Resources
CPU MemoryMB DiskMB IOPS
500 256 300 0
```
Nomad Node Status:
```
% nomad node-status 57b3a55a
ID = 57b3a55a
Name = biscuits
Class = <none>
DC = dc1
Drain = false
Status = ready
Attributes = arch:amd64, cpu.frequency:3753.458875, cpu.modelname:Intel(R) Core(TM) i7-4770K CPU @ 3.50GHz, cpu.numcores:8, cpu.totalcompute:30027.671000, driver.docker:1, driver.docker.version:1.10.2, driver.exec:1, driver.raw_exec:1, hostname:biscuits, kernel.name:linux, kernel.version:4.4.0-9-generic, memory.totalbytes:25208934400, os.name:ubuntu, os.version:16.04, unique.cgroup.mountpoint:/sys/fs/cgroup, unique.network.ip-address:127.0.0.1, unique.storage.bytesfree:219781419008, unique.storage.bytestotal:246059892736, unique.storage.volume:/dev/sdb3
==> Allocations
ID Eval ID Job ID Task Group Desired Status Client Status
2c236883 aa11aca8 example cache run running
32f6e3d6 aa11aca8 example cache run running
==> Resource Utilization
CPU MemoryMB DiskMB IOPS
1000 512 600 0
```
2016-03-05 02:29:39 +00:00
|
|
|
}
|
|
|
|
|
2016-06-12 03:55:22 +00:00
|
|
|
resources := make([]string, 2)
|
2018-12-06 23:09:26 +00:00
|
|
|
resources[0] = "CPU|Memory|Disk"
|
|
|
|
resources[1] = fmt.Sprintf("%d/%d MHz|%s/%s|%s/%s",
|
Print resource usage w/ alloc-status + node-status
When alloc-status is called, in it's long form only, print the resource
utilization for that single allocation.
When node-status is called, in it's long form only, print the TOTAL
resource utilization that is occurring on that single node.
Nomad Alloc Status:
```
% nomad alloc-status 195d3bf2
ID = 195d3bf2
Eval ID = c917e3ee
Name = example.cache[1]
Node ID = 1b2520a7
Job ID = example
Client Status = running
Evaluated Nodes = 1
Filtered Nodes = 0
Exhausted Nodes = 0
Allocation Time = 17.73µs
Failures = 0
==> Task "redis" is "running"
Recent Events:
Time Type Description
04/03/16 21:20:45 EST Started Task started by client
04/03/16 21:20:42 EST Received Task received by client
==> Status
Allocation "195d3bf2" status "running" (0/1 nodes filtered)
* Score "1b2520a7-6714-e78d-a8f7-68467dda6db7.binpack" = 1.209464
* Score "1b2520a7-6714-e78d-a8f7-68467dda6db7.job-anti-affinity" = -10.000000
==> Resources
CPU MemoryMB DiskMB IOPS
500 256 300 0
```
Nomad Node Status:
```
% nomad node-status 57b3a55a
ID = 57b3a55a
Name = biscuits
Class = <none>
DC = dc1
Drain = false
Status = ready
Attributes = arch:amd64, cpu.frequency:3753.458875, cpu.modelname:Intel(R) Core(TM) i7-4770K CPU @ 3.50GHz, cpu.numcores:8, cpu.totalcompute:30027.671000, driver.docker:1, driver.docker.version:1.10.2, driver.exec:1, driver.raw_exec:1, hostname:biscuits, kernel.name:linux, kernel.version:4.4.0-9-generic, memory.totalbytes:25208934400, os.name:ubuntu, os.version:16.04, unique.cgroup.mountpoint:/sys/fs/cgroup, unique.network.ip-address:127.0.0.1, unique.storage.bytesfree:219781419008, unique.storage.bytestotal:246059892736, unique.storage.volume:/dev/sdb3
==> Allocations
ID Eval ID Job ID Task Group Desired Status Client Status
2c236883 aa11aca8 example cache run running
32f6e3d6 aa11aca8 example cache run running
==> Resource Utilization
CPU MemoryMB DiskMB IOPS
1000 512 600 0
```
2016-03-05 02:29:39 +00:00
|
|
|
cpu,
|
2017-02-28 00:00:19 +00:00
|
|
|
*total.CPU,
|
2016-06-12 21:20:39 +00:00
|
|
|
humanize.IBytes(uint64(mem*bytesPerMegabyte)),
|
2017-02-06 19:48:28 +00:00
|
|
|
humanize.IBytes(uint64(*total.MemoryMB*bytesPerMegabyte)),
|
2016-06-12 21:20:39 +00:00
|
|
|
humanize.IBytes(uint64(disk*bytesPerMegabyte)),
|
2018-12-06 23:09:26 +00:00
|
|
|
humanize.IBytes(uint64(*total.DiskMB*bytesPerMegabyte)))
|
2016-06-12 03:55:22 +00:00
|
|
|
|
|
|
|
return resources
|
|
|
|
}
|
|
|
|
|
|
|
|
// computeNodeTotalResources returns the total allocatable resources (resources
|
|
|
|
// minus reserved)
|
|
|
|
func computeNodeTotalResources(node *api.Node) api.Resources {
|
|
|
|
total := api.Resources{}
|
|
|
|
|
|
|
|
r := node.Resources
|
|
|
|
res := node.Reserved
|
|
|
|
if res == nil {
|
|
|
|
res = &api.Resources{}
|
|
|
|
}
|
2017-02-06 19:48:28 +00:00
|
|
|
total.CPU = helper.IntToPtr(*r.CPU - *res.CPU)
|
|
|
|
total.MemoryMB = helper.IntToPtr(*r.MemoryMB - *res.MemoryMB)
|
|
|
|
total.DiskMB = helper.IntToPtr(*r.DiskMB - *res.DiskMB)
|
2016-06-12 03:55:22 +00:00
|
|
|
return total
|
|
|
|
}
|
|
|
|
|
|
|
|
// getActualResources returns the actual resource usage of the allocations.
|
|
|
|
func getActualResources(client *api.Client, runningAllocs []*api.Allocation, node *api.Node) ([]string, error) {
|
|
|
|
// Compute the total
|
|
|
|
total := computeNodeTotalResources(node)
|
Print resource usage w/ alloc-status + node-status
When alloc-status is called, in it's long form only, print the resource
utilization for that single allocation.
When node-status is called, in it's long form only, print the TOTAL
resource utilization that is occurring on that single node.
Nomad Alloc Status:
```
% nomad alloc-status 195d3bf2
ID = 195d3bf2
Eval ID = c917e3ee
Name = example.cache[1]
Node ID = 1b2520a7
Job ID = example
Client Status = running
Evaluated Nodes = 1
Filtered Nodes = 0
Exhausted Nodes = 0
Allocation Time = 17.73µs
Failures = 0
==> Task "redis" is "running"
Recent Events:
Time Type Description
04/03/16 21:20:45 EST Started Task started by client
04/03/16 21:20:42 EST Received Task received by client
==> Status
Allocation "195d3bf2" status "running" (0/1 nodes filtered)
* Score "1b2520a7-6714-e78d-a8f7-68467dda6db7.binpack" = 1.209464
* Score "1b2520a7-6714-e78d-a8f7-68467dda6db7.job-anti-affinity" = -10.000000
==> Resources
CPU MemoryMB DiskMB IOPS
500 256 300 0
```
Nomad Node Status:
```
% nomad node-status 57b3a55a
ID = 57b3a55a
Name = biscuits
Class = <none>
DC = dc1
Drain = false
Status = ready
Attributes = arch:amd64, cpu.frequency:3753.458875, cpu.modelname:Intel(R) Core(TM) i7-4770K CPU @ 3.50GHz, cpu.numcores:8, cpu.totalcompute:30027.671000, driver.docker:1, driver.docker.version:1.10.2, driver.exec:1, driver.raw_exec:1, hostname:biscuits, kernel.name:linux, kernel.version:4.4.0-9-generic, memory.totalbytes:25208934400, os.name:ubuntu, os.version:16.04, unique.cgroup.mountpoint:/sys/fs/cgroup, unique.network.ip-address:127.0.0.1, unique.storage.bytesfree:219781419008, unique.storage.bytestotal:246059892736, unique.storage.volume:/dev/sdb3
==> Allocations
ID Eval ID Job ID Task Group Desired Status Client Status
2c236883 aa11aca8 example cache run running
32f6e3d6 aa11aca8 example cache run running
==> Resource Utilization
CPU MemoryMB DiskMB IOPS
1000 512 600 0
```
2016-03-05 02:29:39 +00:00
|
|
|
|
2016-06-12 03:55:22 +00:00
|
|
|
// Get Resources
|
|
|
|
var cpu float64
|
|
|
|
var mem uint64
|
|
|
|
for _, alloc := range runningAllocs {
|
|
|
|
// Make the call to the client to get the actual usage.
|
|
|
|
stats, err := client.Allocations().Stats(alloc, nil)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
cpu += stats.ResourceUsage.CpuStats.TotalTicks
|
|
|
|
mem += stats.ResourceUsage.MemoryStats.RSS
|
|
|
|
}
|
|
|
|
|
|
|
|
resources := make([]string, 2)
|
|
|
|
resources[0] = "CPU|Memory"
|
2017-02-28 00:00:19 +00:00
|
|
|
resources[1] = fmt.Sprintf("%v/%d MHz|%v/%v",
|
2016-06-12 03:55:22 +00:00
|
|
|
math.Floor(cpu),
|
2017-02-28 00:00:19 +00:00
|
|
|
*total.CPU,
|
2016-06-12 21:20:39 +00:00
|
|
|
humanize.IBytes(mem),
|
2017-02-06 19:48:28 +00:00
|
|
|
humanize.IBytes(uint64(*total.MemoryMB*bytesPerMegabyte)))
|
2016-06-12 03:55:22 +00:00
|
|
|
|
|
|
|
return resources, nil
|
Print resource usage w/ alloc-status + node-status
When alloc-status is called, in it's long form only, print the resource
utilization for that single allocation.
When node-status is called, in it's long form only, print the TOTAL
resource utilization that is occurring on that single node.
Nomad Alloc Status:
```
% nomad alloc-status 195d3bf2
ID = 195d3bf2
Eval ID = c917e3ee
Name = example.cache[1]
Node ID = 1b2520a7
Job ID = example
Client Status = running
Evaluated Nodes = 1
Filtered Nodes = 0
Exhausted Nodes = 0
Allocation Time = 17.73µs
Failures = 0
==> Task "redis" is "running"
Recent Events:
Time Type Description
04/03/16 21:20:45 EST Started Task started by client
04/03/16 21:20:42 EST Received Task received by client
==> Status
Allocation "195d3bf2" status "running" (0/1 nodes filtered)
* Score "1b2520a7-6714-e78d-a8f7-68467dda6db7.binpack" = 1.209464
* Score "1b2520a7-6714-e78d-a8f7-68467dda6db7.job-anti-affinity" = -10.000000
==> Resources
CPU MemoryMB DiskMB IOPS
500 256 300 0
```
Nomad Node Status:
```
% nomad node-status 57b3a55a
ID = 57b3a55a
Name = biscuits
Class = <none>
DC = dc1
Drain = false
Status = ready
Attributes = arch:amd64, cpu.frequency:3753.458875, cpu.modelname:Intel(R) Core(TM) i7-4770K CPU @ 3.50GHz, cpu.numcores:8, cpu.totalcompute:30027.671000, driver.docker:1, driver.docker.version:1.10.2, driver.exec:1, driver.raw_exec:1, hostname:biscuits, kernel.name:linux, kernel.version:4.4.0-9-generic, memory.totalbytes:25208934400, os.name:ubuntu, os.version:16.04, unique.cgroup.mountpoint:/sys/fs/cgroup, unique.network.ip-address:127.0.0.1, unique.storage.bytesfree:219781419008, unique.storage.bytestotal:246059892736, unique.storage.volume:/dev/sdb3
==> Allocations
ID Eval ID Job ID Task Group Desired Status Client Status
2c236883 aa11aca8 example cache run running
32f6e3d6 aa11aca8 example cache run running
==> Resource Utilization
CPU MemoryMB DiskMB IOPS
1000 512 600 0
```
2016-03-05 02:29:39 +00:00
|
|
|
}
|
2016-05-22 09:04:27 +00:00
|
|
|
|
2016-06-12 03:55:22 +00:00
|
|
|
// getHostResources returns the actual resource usage of the node.
|
|
|
|
func getHostResources(hostStats *api.HostStats, node *api.Node) ([]string, error) {
|
2016-06-06 21:10:43 +00:00
|
|
|
if hostStats == nil {
|
|
|
|
return nil, fmt.Errorf("actual resource usage not present")
|
|
|
|
}
|
|
|
|
var resources []string
|
|
|
|
|
2016-06-06 22:23:39 +00:00
|
|
|
// calculate disk usage
|
2016-06-06 21:10:43 +00:00
|
|
|
storageDevice := node.Attributes["unique.storage.volume"]
|
|
|
|
var diskUsed, diskSize uint64
|
2016-08-07 12:34:02 +00:00
|
|
|
var physical bool
|
2016-06-06 21:10:43 +00:00
|
|
|
for _, disk := range hostStats.DiskStats {
|
|
|
|
if disk.Device == storageDevice {
|
|
|
|
diskUsed = disk.Used
|
|
|
|
diskSize = disk.Size
|
2016-08-07 12:34:02 +00:00
|
|
|
physical = true
|
2016-06-06 21:10:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
resources = make([]string, 2)
|
2016-06-06 23:09:46 +00:00
|
|
|
resources[0] = "CPU|Memory|Disk"
|
2016-08-07 12:34:02 +00:00
|
|
|
if physical {
|
2017-02-28 00:00:19 +00:00
|
|
|
resources[1] = fmt.Sprintf("%v/%d MHz|%s/%s|%s/%s",
|
2016-08-07 12:34:02 +00:00
|
|
|
math.Floor(hostStats.CPUTicksConsumed),
|
2017-02-28 00:00:19 +00:00
|
|
|
*node.Resources.CPU,
|
2016-08-07 12:34:02 +00:00
|
|
|
humanize.IBytes(hostStats.Memory.Used),
|
|
|
|
humanize.IBytes(hostStats.Memory.Total),
|
|
|
|
humanize.IBytes(diskUsed),
|
|
|
|
humanize.IBytes(diskSize),
|
|
|
|
)
|
|
|
|
} else {
|
|
|
|
// If non-physical device are used, output device name only,
|
|
|
|
// since nomad doesn't collect the stats data.
|
2017-02-28 00:00:19 +00:00
|
|
|
resources[1] = fmt.Sprintf("%v/%d MHz|%s/%s|(%s)",
|
2016-08-07 12:34:02 +00:00
|
|
|
math.Floor(hostStats.CPUTicksConsumed),
|
2017-02-28 00:00:19 +00:00
|
|
|
*node.Resources.CPU,
|
2016-08-07 12:34:02 +00:00
|
|
|
humanize.IBytes(hostStats.Memory.Used),
|
|
|
|
humanize.IBytes(hostStats.Memory.Total),
|
|
|
|
storageDevice,
|
|
|
|
)
|
|
|
|
}
|
2016-06-06 21:10:43 +00:00
|
|
|
return resources, nil
|
|
|
|
}
|
2018-02-27 22:43:35 +00:00
|
|
|
|
|
|
|
// formatNodeStubList is used to return a table format of a list of node stubs.
|
|
|
|
func formatNodeStubList(nodes []*api.NodeListStub, verbose bool) string {
|
|
|
|
// Return error if no nodes are found
|
|
|
|
if len(nodes) == 0 {
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
// Truncate the id unless full length is requested
|
|
|
|
length := shortId
|
|
|
|
if verbose {
|
|
|
|
length = fullId
|
|
|
|
}
|
|
|
|
|
|
|
|
// Format the nodes list that matches the prefix so that the user
|
|
|
|
// can create a more specific request
|
|
|
|
out := make([]string, len(nodes)+1)
|
|
|
|
out[0] = "ID|DC|Name|Class|Drain|Eligibility|Status"
|
|
|
|
for i, node := range nodes {
|
|
|
|
out[i+1] = fmt.Sprintf("%s|%s|%s|%s|%v|%s|%s",
|
|
|
|
limit(node.ID, length),
|
|
|
|
node.Datacenter,
|
|
|
|
node.Name,
|
|
|
|
node.NodeClass,
|
|
|
|
node.Drain,
|
|
|
|
node.SchedulingEligibility,
|
|
|
|
node.Status)
|
|
|
|
}
|
|
|
|
|
|
|
|
return formatList(out)
|
|
|
|
}
|