open-nomad/command/node_status.go

927 lines
25 KiB
Go
Raw Normal View History

2015-09-12 20:55:51 +00:00
package command
import (
"fmt"
"math"
"sort"
2018-05-02 23:19:15 +00:00
"strconv"
2015-09-12 20:55:51 +00:00
"strings"
2016-05-22 09:36:12 +00:00
"time"
2017-08-15 18:30:23 +00:00
humanize "github.com/dustin/go-humanize"
"github.com/hashicorp/nomad/api"
"github.com/hashicorp/nomad/api/contexts"
2017-02-06 19:48:28 +00:00
"github.com/hashicorp/nomad/helper"
"github.com/posener/complete"
2015-09-12 20:55:51 +00:00
)
2016-06-11 21:40:51 +00:00
const (
// floatFormat is a format string for formatting floats.
floatFormat = "#,###.##"
2016-06-12 04:01:53 +00:00
// bytesPerMegabyte is the number of bytes per MB
bytesPerMegabyte = 1024 * 1024
2016-06-11 21:40:51 +00:00
)
2015-09-12 20:55:51 +00:00
type NodeStatusCommand struct {
Meta
2016-06-11 21:40:51 +00:00
length int
short bool
verbose bool
list_allocs bool
self bool
stats bool
json bool
tmpl string
2015-09-12 20:55:51 +00:00
}
func (c *NodeStatusCommand) Help() string {
helpText := `
2018-02-23 23:56:36 +00:00
Usage: nomad node status [options] <node>
2015-09-12 20:55:51 +00:00
2015-09-13 18:39:49 +00:00
Display status information about a given node. The list of nodes
returned includes only nodes which jobs may be scheduled to, and
2015-09-12 20:55:51 +00:00
includes status and other high-level information.
2016-06-16 21:47:06 +00:00
If a node ID is passed, information for that specific node will be displayed,
including resource usage statistics. If no node ID's are passed, then a
short-hand list of all nodes will be displayed. The -self flag is useful to
2016-03-29 19:36:24 +00:00
quickly access the status of the local node.
2015-09-12 20:55:51 +00:00
General Options:
2015-09-12 20:55:51 +00:00
` + generalOptionsUsage() + `
Node Status Options:
2016-06-16 21:47:06 +00:00
-self
Query the status of the local node.
-stats
2016-06-16 21:47:06 +00:00
Display detailed resource usage statistics.
-allocs
Display a count of running allocations for each node.
-short
Display short output. Used only when a single node is being
queried, and drops verbose output about node allocations.
-verbose
Display full information.
-json
2016-08-06 09:54:30 +00:00
Output the node in its JSON format.
-t
2016-08-06 09:54:30 +00:00
Format and display node using a Go template.
`
2015-09-12 20:55:51 +00:00
return strings.TrimSpace(helpText)
}
func (c *NodeStatusCommand) Synopsis() string {
2015-09-13 18:39:49 +00:00
return "Display status information about nodes"
2015-09-12 20:55:51 +00:00
}
func (c *NodeStatusCommand) AutocompleteFlags() complete.Flags {
2017-08-23 21:56:21 +00:00
return mergeAutocompleteFlags(c.Meta.AutocompleteFlags(FlagSetClient),
complete.Flags{
"-allocs": complete.PredictNothing,
"-json": complete.PredictNothing,
"-self": complete.PredictNothing,
"-short": complete.PredictNothing,
"-stats": complete.PredictNothing,
"-t": complete.PredictAnything,
"-verbose": complete.PredictNothing,
})
}
func (c *NodeStatusCommand) AutocompleteArgs() complete.Predictor {
return complete.PredictFunc(func(a complete.Args) []string {
client, err := c.Meta.Client()
if err != nil {
return nil
}
resp, _, err := client.Search().PrefixSearch(a.Last, contexts.Nodes, nil)
if err != nil {
return []string{}
}
return resp.Matches[contexts.Nodes]
})
}
func (c *NodeStatusCommand) Name() string { return "node-status" }
2015-09-12 20:55:51 +00:00
func (c *NodeStatusCommand) Run(args []string) int {
flags := c.Meta.FlagSet(c.Name(), FlagSetClient)
2015-09-12 20:55:51 +00:00
flags.Usage = func() { c.Ui.Output(c.Help()) }
2016-06-11 21:40:51 +00:00
flags.BoolVar(&c.short, "short", false, "")
flags.BoolVar(&c.verbose, "verbose", false, "")
flags.BoolVar(&c.list_allocs, "allocs", false, "")
flags.BoolVar(&c.self, "self", false, "")
flags.BoolVar(&c.stats, "stats", false, "")
flags.BoolVar(&c.json, "json", false, "")
flags.StringVar(&c.tmpl, "t", "", "")
2015-09-12 20:55:51 +00:00
if err := flags.Parse(args); err != nil {
return 1
}
// Check that we got either a single node or none
args = flags.Args()
if len(args) > 1 {
c.Ui.Error("This command takes either one or no arguments")
c.Ui.Error(commandErrorText(c))
2015-09-12 20:55:51 +00:00
return 1
}
// Truncate the id unless full length is requested
2016-06-11 21:40:51 +00:00
c.length = shortId
if c.verbose {
c.length = fullId
}
2015-09-12 20:55:51 +00:00
// Get the HTTP client
client, err := c.Meta.Client()
2015-09-12 20:55:51 +00:00
if err != nil {
c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err))
2015-09-12 20:55:51 +00:00
return 1
}
// Use list mode if no node name was provided
2016-06-11 21:40:51 +00:00
if len(args) == 0 && !c.self {
2015-09-12 20:55:51 +00:00
// Query the node info
nodes, _, err := client.Nodes().List(nil)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error querying node status: %s", err))
2015-09-12 20:55:51 +00:00
return 1
}
2017-07-01 01:10:19 +00:00
// If output format is specified, format and output the node data list
if c.json || len(c.tmpl) > 0 {
out, err := Format(c.json, c.tmpl, nodes)
if err != nil {
2017-07-01 01:10:19 +00:00
c.Ui.Error(err.Error())
return 1
}
c.Ui.Output(out)
return 0
}
2017-07-01 01:10:19 +00:00
// Return nothing if no nodes found
if len(nodes) == 0 {
return 0
}
2015-09-12 20:55:51 +00:00
// Format the nodes list
out := make([]string, len(nodes)+1)
out[0] = "ID|DC|Name|Class|"
if c.verbose {
out[0] += "Address|Version|"
}
2018-02-27 22:00:55 +00:00
out[0] += "Drain|Eligibility|Status"
2016-06-11 21:40:51 +00:00
if c.list_allocs {
out[0] += "|Running Allocs"
}
2015-09-12 20:55:51 +00:00
for i, node := range nodes {
out[i+1] = fmt.Sprintf("%s|%s|%s|%s",
limit(node.ID, c.length),
node.Datacenter,
node.Name,
node.NodeClass)
if c.verbose {
out[i+1] += fmt.Sprintf("|%s|%s",
node.Address, node.Version)
}
2018-02-27 22:00:55 +00:00
out[i+1] += fmt.Sprintf("|%v|%s|%s",
node.Drain,
2018-02-27 22:00:55 +00:00
node.SchedulingEligibility,
node.Status)
2018-02-27 22:00:55 +00:00
2016-06-11 21:40:51 +00:00
if c.list_allocs {
2016-03-05 03:14:57 +00:00
numAllocs, err := getRunningAllocs(client, node.ID)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error querying node allocations: %s", err))
return 1
}
out[i+1] += fmt.Sprintf("|%v",
2016-03-05 03:14:57 +00:00
len(numAllocs))
}
2015-09-12 20:55:51 +00:00
}
// Dump the output
2015-09-15 23:44:38 +00:00
c.Ui.Output(formatList(out))
2015-09-12 20:55:51 +00:00
return 0
}
// Query the specific node
2017-09-26 22:26:33 +00:00
var nodeID string
2016-06-11 21:40:51 +00:00
if !c.self {
2016-03-29 19:36:24 +00:00
nodeID = args[0]
2016-04-11 22:20:49 +00:00
} else {
var err error
if nodeID, err = getLocalNodeID(client); err != nil {
c.Ui.Error(err.Error())
return 1
}
2016-03-29 19:36:24 +00:00
}
2016-03-17 23:48:45 +00:00
if len(nodeID) == 1 {
c.Ui.Error(fmt.Sprintf("Identifier must contain at least two characters."))
return 1
}
2018-03-11 18:52:59 +00:00
nodeID = sanitizeUUIDPrefix(nodeID)
2016-03-17 23:48:45 +00:00
nodes, _, err := client.Nodes().PrefixList(nodeID)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error querying node info: %s", err))
return 1
}
// Return error if no nodes are found
if len(nodes) == 0 {
c.Ui.Error(fmt.Sprintf("No node(s) with prefix %q found", nodeID))
return 1
}
if len(nodes) > 1 {
// Dump the output
2018-02-27 22:43:35 +00:00
c.Ui.Error(fmt.Sprintf("Prefix matched multiple nodes\n\n%s",
formatNodeStubList(nodes, c.verbose)))
return 1
2016-03-17 23:48:45 +00:00
}
2018-02-27 22:43:35 +00:00
2016-03-17 23:48:45 +00:00
// Prefix lookup matched a single node
node, _, err := client.Nodes().Info(nodes[0].ID, nil)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error querying node info: %s", err))
return 1
2015-09-12 20:55:51 +00:00
}
// If output format is specified, format and output the data
2017-07-01 01:10:19 +00:00
if c.json || len(c.tmpl) > 0 {
out, err := Format(c.json, c.tmpl, node)
if err != nil {
2017-07-01 01:10:19 +00:00
c.Ui.Error(err.Error())
return 1
}
c.Ui.Output(out)
return 0
}
2016-06-11 21:40:51 +00:00
return c.formatNode(client, node)
}
func nodeDrivers(n *api.Node) []string {
var drivers []string
for k, v := range n.Attributes {
// driver.docker = 1
parts := strings.Split(k, ".")
if len(parts) != 2 {
continue
} else if parts[0] != "driver" {
continue
} else if v != "1" {
continue
}
drivers = append(drivers, parts[1])
}
sort.Strings(drivers)
return drivers
}
func nodeCSIControllerNames(n *api.Node) []string {
var names []string
for name := range n.CSIControllerPlugins {
names = append(names, name)
}
sort.Strings(names)
return names
}
func nodeCSINodeNames(n *api.Node) []string {
var names []string
for name := range n.CSINodePlugins {
names = append(names, name)
}
sort.Strings(names)
return names
}
func nodeCSIVolumeNames(n *api.Node, allocs []*api.Allocation) []string {
var names []string
for _, alloc := range allocs {
tg := alloc.GetTaskGroup()
if tg == nil || len(tg.Volumes) == 0 {
continue
}
for _, v := range tg.Volumes {
names = append(names, v.Name)
}
}
sort.Strings(names)
return names
}
func nodeVolumeNames(n *api.Node) []string {
var volumes []string
2019-08-09 10:47:09 +00:00
for name := range n.HostVolumes {
volumes = append(volumes, name)
}
sort.Strings(volumes)
return volumes
}
2018-05-02 23:19:15 +00:00
func formatDrain(n *api.Node) string {
if n.DrainStrategy != nil {
b := new(strings.Builder)
b.WriteString("true")
2018-06-06 20:05:39 +00:00
if n.DrainStrategy.DrainSpec.Deadline.Nanoseconds() < 0 {
b.WriteString("; force drain")
} else if n.DrainStrategy.ForceDeadline.IsZero() {
2018-05-02 23:19:15 +00:00
b.WriteString("; no deadline")
} else {
fmt.Fprintf(b, "; %s deadline", formatTime(n.DrainStrategy.ForceDeadline))
}
if n.DrainStrategy.IgnoreSystemJobs {
b.WriteString("; ignoring system jobs")
}
return b.String()
}
return strconv.FormatBool(n.Drain)
}
2016-06-11 21:40:51 +00:00
func (c *NodeStatusCommand) formatNode(client *api.Client, node *api.Node) int {
// Make one API call for allocations
nodeAllocs, _, err := client.Nodes().Allocations(node.ID, nil)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error querying node allocations: %s", err))
return 1
}
var runningAllocs []*api.Allocation
for _, alloc := range nodeAllocs {
if alloc.ClientStatus == "running" {
runningAllocs = append(runningAllocs, alloc)
}
}
2016-06-11 21:40:51 +00:00
// Format the header output
basic := []string{
fmt.Sprintf("ID|%s", node.ID),
fmt.Sprintf("Name|%s", node.Name),
fmt.Sprintf("Class|%s", node.NodeClass),
fmt.Sprintf("DC|%s", node.Datacenter),
2018-05-02 23:19:15 +00:00
fmt.Sprintf("Drain|%v", formatDrain(node)),
2018-02-27 22:00:55 +00:00
fmt.Sprintf("Eligibility|%s", node.SchedulingEligibility),
fmt.Sprintf("Status|%s", node.Status),
fmt.Sprintf("CSI Controllers|%s", strings.Join(nodeCSIControllerNames(node), ",")),
fmt.Sprintf("CSI Drivers|%s", strings.Join(nodeCSINodeNames(node), ",")),
2015-09-12 20:55:51 +00:00
}
if c.short {
basic = append(basic, fmt.Sprintf("Host Volumes|%s", strings.Join(nodeVolumeNames(node), ",")))
basic = append(basic, fmt.Sprintf("CSI Volumes|%s", strings.Join(nodeCSIVolumeNames(node, runningAllocs), ",")))
2018-03-23 00:18:32 +00:00
basic = append(basic, fmt.Sprintf("Drivers|%s", strings.Join(nodeDrivers(node), ",")))
c.Ui.Output(c.Colorize().Color(formatKV(basic)))
2019-08-09 10:47:09 +00:00
// Output alloc info
if err := c.outputAllocInfo(node, nodeAllocs); err != nil {
2019-08-09 10:47:09 +00:00
c.Ui.Error(fmt.Sprintf("%s", err))
return 1
}
2019-08-09 10:47:09 +00:00
return 0
}
2019-08-09 10:47:09 +00:00
// Get the host stats
hostStats, nodeStatsErr := client.Nodes().Stats(node.ID, nil)
if nodeStatsErr != nil {
c.Ui.Output("")
c.Ui.Error(fmt.Sprintf("error fetching node stats: %v", nodeStatsErr))
}
if hostStats != nil {
uptime := time.Duration(hostStats.Uptime * uint64(time.Second))
basic = append(basic, fmt.Sprintf("Uptime|%s", uptime.String()))
}
2019-08-09 10:47:09 +00:00
// When we're not running in verbose mode, then also include host volumes and
// driver info in the basic output
if !c.verbose {
basic = append(basic, fmt.Sprintf("Host Volumes|%s", strings.Join(nodeVolumeNames(node), ",")))
basic = append(basic, fmt.Sprintf("CSI Volumes|%s", strings.Join(nodeCSIVolumeNames(node, runningAllocs), ",")))
2019-08-09 10:47:09 +00:00
driverStatus := fmt.Sprintf("Driver Status| %s", c.outputTruncatedNodeDriverInfo(node))
basic = append(basic, driverStatus)
}
2019-08-09 10:47:09 +00:00
// Output the basic info
c.Ui.Output(c.Colorize().Color(formatKV(basic)))
2019-08-09 10:47:09 +00:00
// If we're running in verbose mode, include full host volume and driver info
if c.verbose {
c.outputNodeVolumeInfo(node)
c.outputNodeCSIVolumeInfo(client, node, runningAllocs)
2019-08-09 10:47:09 +00:00
c.outputNodeDriverInfo(node)
}
2016-06-06 21:10:43 +00:00
2019-08-09 10:47:09 +00:00
// Emit node events
c.outputNodeStatusEvents(node)
2016-06-11 21:40:51 +00:00
2019-08-09 10:47:09 +00:00
// Get list of running allocations on the node
allocatedResources := getAllocatedResources(client, runningAllocs, node)
c.Ui.Output(c.Colorize().Color("\n[bold]Allocated Resources[reset]"))
c.Ui.Output(formatList(allocatedResources))
actualResources, err := getActualResources(client, runningAllocs, node)
if err == nil {
c.Ui.Output(c.Colorize().Color("\n[bold]Allocation Resource Utilization[reset]"))
c.Ui.Output(formatList(actualResources))
}
hostResources, err := getHostResources(hostStats, node)
if err != nil {
c.Ui.Output("")
c.Ui.Error(fmt.Sprintf("error fetching node stats: %v", err))
}
if err == nil {
c.Ui.Output(c.Colorize().Color("\n[bold]Host Resource Utilization[reset]"))
c.Ui.Output(formatList(hostResources))
}
if err == nil && node.NodeResources != nil && len(node.NodeResources.Devices) > 0 {
c.Ui.Output(c.Colorize().Color("\n[bold]Device Resource Utilization[reset]"))
c.Ui.Output(formatList(getDeviceResourcesForNode(hostStats.DeviceStats, node)))
}
if hostStats != nil && c.stats {
c.Ui.Output(c.Colorize().Color("\n[bold]CPU Stats[reset]"))
c.printCpuStats(hostStats)
c.Ui.Output(c.Colorize().Color("\n[bold]Memory Stats[reset]"))
c.printMemoryStats(hostStats)
c.Ui.Output(c.Colorize().Color("\n[bold]Disk Stats[reset]"))
c.printDiskStats(hostStats)
if len(hostStats.DeviceStats) > 0 {
c.Ui.Output(c.Colorize().Color("\n[bold]Device Stats[reset]"))
printDeviceStats(c.Ui, hostStats.DeviceStats)
2016-05-22 09:04:27 +00:00
}
}
if err := c.outputAllocInfo(node, nodeAllocs); err != nil {
2019-08-09 10:47:09 +00:00
c.Ui.Error(fmt.Sprintf("%s", err))
return 1
}
return 0
}
func (c *NodeStatusCommand) outputAllocInfo(node *api.Node, nodeAllocs []*api.Allocation) error {
2017-07-07 04:51:13 +00:00
c.Ui.Output(c.Colorize().Color("\n[bold]Allocations[reset]"))
c.Ui.Output(formatAllocList(nodeAllocs, c.verbose, c.length))
2016-06-11 21:40:51 +00:00
if c.verbose {
c.formatAttributes(node)
c.formatDeviceAttributes(node)
2016-10-21 00:36:34 +00:00
c.formatMeta(node)
2016-06-11 21:40:51 +00:00
}
2019-08-09 10:47:09 +00:00
return nil
2016-06-11 21:40:51 +00:00
}
func (c *NodeStatusCommand) outputTruncatedNodeDriverInfo(node *api.Node) string {
2018-04-09 14:51:08 +00:00
drivers := make([]string, 0, len(node.Drivers))
for driverName, driverInfo := range node.Drivers {
if !driverInfo.Detected {
continue
}
if !driverInfo.Healthy {
drivers = append(drivers, fmt.Sprintf("%s (unhealthy)", driverName))
} else {
drivers = append(drivers, driverName)
}
}
sort.Strings(drivers)
return strings.Trim(strings.Join(drivers, ","), ", ")
}
func (c *NodeStatusCommand) outputNodeVolumeInfo(node *api.Node) {
c.Ui.Output(c.Colorize().Color("\n[bold]Host Volumes"))
names := make([]string, 0, len(node.HostVolumes))
for name := range node.HostVolumes {
names = append(names, name)
}
sort.Strings(names)
output := make([]string, 0, len(names)+1)
output = append(output, "Name|ReadOnly|Source")
for _, volName := range names {
info := node.HostVolumes[volName]
output = append(output, fmt.Sprintf("%s|%v|%s", volName, info.ReadOnly, info.Path))
}
c.Ui.Output(formatList(output))
}
func (c *NodeStatusCommand) outputNodeCSIVolumeInfo(client *api.Client, node *api.Node, runningAllocs []*api.Allocation) {
c.Ui.Output(c.Colorize().Color("\n[bold]CSI Volumes"))
// Duplicate nodeCSIVolumeNames to sort by name but also index volume names to ids
var names []string
csi: add mount_options to volumes and volume requests (#7398) Add mount_options to both the volume definition on registration and to the volume block in the group where the volume is requested. If both are specified, the options provided in the request replace the options defined in the volume. They get passed to the NodePublishVolume, which causes the node plugin to actually mount the volume on the host. Individual tasks just mount bind into the host mounted volume (unchanged behavior). An operator can mount the same volume with different options by specifying it twice in the group context. closes #7007 * nomad/structs/volumes: add MountOptions to volume request * jobspec/test-fixtures/basic.hcl: add mount_options to volume block * jobspec/parse_test: add expected MountOptions * api/tasks: add mount_options * jobspec/parse_group: use hcl decode not mapstructure, mount_options * client/allocrunner/csi_hook: pass MountOptions through client/allocrunner/csi_hook: add a VolumeMountOptions client/allocrunner/csi_hook: drop Options client/allocrunner/csi_hook: use the structs options * client/pluginmanager/csimanager/interface: UsageOptions.MountOptions * client/pluginmanager/csimanager/volume: pass MountOptions in capabilities * plugins/csi/plugin: remove todo 7007 comment * nomad/structs/csi: MountOptions * api/csi: add options to the api for parsing, match structs * plugins/csi/plugin: move VolumeMountOptions to structs * api/csi: use specific type for mount_options * client/allocrunner/csi_hook: merge MountOptions here * rename CSIOptions to CSIMountOptions * client/allocrunner/csi_hook * client/pluginmanager/csimanager/volume * nomad/structs/csi * plugins/csi/fake/client: add PrevVolumeCapability * plugins/csi/plugin * client/pluginmanager/csimanager/volume_test: remove debugging * client/pluginmanager/csimanager/volume: fix odd merging logic * api: rename CSIOptions -> CSIMountOptions * nomad/csi_endpoint: remove a 7007 comment * command/alloc_status: show mount options in the volume list * nomad/structs/csi: include MountOptions in the volume stub * api/csi: add MountOptions to stub * command/volume_status_csi: clean up csiVolMountOption, add it * command/alloc_status: csiVolMountOption lives in volume_csi_status * command/node_status: display mount flags * nomad/structs/volumes: npe * plugins/csi/plugin: npe in ToCSIRepresentation * jobspec/parse_test: expand volume parse test cases * command/agent/job_endpoint: ApiTgToStructsTG needs MountOptions * command/volume_status_csi: copy paste error * jobspec/test-fixtures/basic: hclfmt * command/volume_status_csi: clean up csiVolMountOption
2020-03-23 17:55:26 +00:00
requests := map[string]*api.VolumeRequest{}
for _, alloc := range runningAllocs {
tg := alloc.GetTaskGroup()
if tg == nil || len(tg.Volumes) == 0 {
continue
}
for _, v := range tg.Volumes {
names = append(names, v.Name)
csi: add mount_options to volumes and volume requests (#7398) Add mount_options to both the volume definition on registration and to the volume block in the group where the volume is requested. If both are specified, the options provided in the request replace the options defined in the volume. They get passed to the NodePublishVolume, which causes the node plugin to actually mount the volume on the host. Individual tasks just mount bind into the host mounted volume (unchanged behavior). An operator can mount the same volume with different options by specifying it twice in the group context. closes #7007 * nomad/structs/volumes: add MountOptions to volume request * jobspec/test-fixtures/basic.hcl: add mount_options to volume block * jobspec/parse_test: add expected MountOptions * api/tasks: add mount_options * jobspec/parse_group: use hcl decode not mapstructure, mount_options * client/allocrunner/csi_hook: pass MountOptions through client/allocrunner/csi_hook: add a VolumeMountOptions client/allocrunner/csi_hook: drop Options client/allocrunner/csi_hook: use the structs options * client/pluginmanager/csimanager/interface: UsageOptions.MountOptions * client/pluginmanager/csimanager/volume: pass MountOptions in capabilities * plugins/csi/plugin: remove todo 7007 comment * nomad/structs/csi: MountOptions * api/csi: add options to the api for parsing, match structs * plugins/csi/plugin: move VolumeMountOptions to structs * api/csi: use specific type for mount_options * client/allocrunner/csi_hook: merge MountOptions here * rename CSIOptions to CSIMountOptions * client/allocrunner/csi_hook * client/pluginmanager/csimanager/volume * nomad/structs/csi * plugins/csi/fake/client: add PrevVolumeCapability * plugins/csi/plugin * client/pluginmanager/csimanager/volume_test: remove debugging * client/pluginmanager/csimanager/volume: fix odd merging logic * api: rename CSIOptions -> CSIMountOptions * nomad/csi_endpoint: remove a 7007 comment * command/alloc_status: show mount options in the volume list * nomad/structs/csi: include MountOptions in the volume stub * api/csi: add MountOptions to stub * command/volume_status_csi: clean up csiVolMountOption, add it * command/alloc_status: csiVolMountOption lives in volume_csi_status * command/node_status: display mount flags * nomad/structs/volumes: npe * plugins/csi/plugin: npe in ToCSIRepresentation * jobspec/parse_test: expand volume parse test cases * command/agent/job_endpoint: ApiTgToStructsTG needs MountOptions * command/volume_status_csi: copy paste error * jobspec/test-fixtures/basic: hclfmt * command/volume_status_csi: clean up csiVolMountOption
2020-03-23 17:55:26 +00:00
requests[v.Source] = v
}
}
if len(names) == 0 {
return
}
sort.Strings(names)
// Fetch the volume objects with current status
// Ignore an error, all we're going to do is omit the volumes
volumes := map[string]*api.CSIVolumeListStub{}
vs, _ := client.Nodes().CSIVolumes(node.ID, nil)
for _, v := range vs {
csi: add mount_options to volumes and volume requests (#7398) Add mount_options to both the volume definition on registration and to the volume block in the group where the volume is requested. If both are specified, the options provided in the request replace the options defined in the volume. They get passed to the NodePublishVolume, which causes the node plugin to actually mount the volume on the host. Individual tasks just mount bind into the host mounted volume (unchanged behavior). An operator can mount the same volume with different options by specifying it twice in the group context. closes #7007 * nomad/structs/volumes: add MountOptions to volume request * jobspec/test-fixtures/basic.hcl: add mount_options to volume block * jobspec/parse_test: add expected MountOptions * api/tasks: add mount_options * jobspec/parse_group: use hcl decode not mapstructure, mount_options * client/allocrunner/csi_hook: pass MountOptions through client/allocrunner/csi_hook: add a VolumeMountOptions client/allocrunner/csi_hook: drop Options client/allocrunner/csi_hook: use the structs options * client/pluginmanager/csimanager/interface: UsageOptions.MountOptions * client/pluginmanager/csimanager/volume: pass MountOptions in capabilities * plugins/csi/plugin: remove todo 7007 comment * nomad/structs/csi: MountOptions * api/csi: add options to the api for parsing, match structs * plugins/csi/plugin: move VolumeMountOptions to structs * api/csi: use specific type for mount_options * client/allocrunner/csi_hook: merge MountOptions here * rename CSIOptions to CSIMountOptions * client/allocrunner/csi_hook * client/pluginmanager/csimanager/volume * nomad/structs/csi * plugins/csi/fake/client: add PrevVolumeCapability * plugins/csi/plugin * client/pluginmanager/csimanager/volume_test: remove debugging * client/pluginmanager/csimanager/volume: fix odd merging logic * api: rename CSIOptions -> CSIMountOptions * nomad/csi_endpoint: remove a 7007 comment * command/alloc_status: show mount options in the volume list * nomad/structs/csi: include MountOptions in the volume stub * api/csi: add MountOptions to stub * command/volume_status_csi: clean up csiVolMountOption, add it * command/alloc_status: csiVolMountOption lives in volume_csi_status * command/node_status: display mount flags * nomad/structs/volumes: npe * plugins/csi/plugin: npe in ToCSIRepresentation * jobspec/parse_test: expand volume parse test cases * command/agent/job_endpoint: ApiTgToStructsTG needs MountOptions * command/volume_status_csi: copy paste error * jobspec/test-fixtures/basic: hclfmt * command/volume_status_csi: clean up csiVolMountOption
2020-03-23 17:55:26 +00:00
n := requests[v.ID].Name
volumes[n] = v
}
// Output the volumes in name order
output := make([]string, 0, len(names)+1)
csi: add mount_options to volumes and volume requests (#7398) Add mount_options to both the volume definition on registration and to the volume block in the group where the volume is requested. If both are specified, the options provided in the request replace the options defined in the volume. They get passed to the NodePublishVolume, which causes the node plugin to actually mount the volume on the host. Individual tasks just mount bind into the host mounted volume (unchanged behavior). An operator can mount the same volume with different options by specifying it twice in the group context. closes #7007 * nomad/structs/volumes: add MountOptions to volume request * jobspec/test-fixtures/basic.hcl: add mount_options to volume block * jobspec/parse_test: add expected MountOptions * api/tasks: add mount_options * jobspec/parse_group: use hcl decode not mapstructure, mount_options * client/allocrunner/csi_hook: pass MountOptions through client/allocrunner/csi_hook: add a VolumeMountOptions client/allocrunner/csi_hook: drop Options client/allocrunner/csi_hook: use the structs options * client/pluginmanager/csimanager/interface: UsageOptions.MountOptions * client/pluginmanager/csimanager/volume: pass MountOptions in capabilities * plugins/csi/plugin: remove todo 7007 comment * nomad/structs/csi: MountOptions * api/csi: add options to the api for parsing, match structs * plugins/csi/plugin: move VolumeMountOptions to structs * api/csi: use specific type for mount_options * client/allocrunner/csi_hook: merge MountOptions here * rename CSIOptions to CSIMountOptions * client/allocrunner/csi_hook * client/pluginmanager/csimanager/volume * nomad/structs/csi * plugins/csi/fake/client: add PrevVolumeCapability * plugins/csi/plugin * client/pluginmanager/csimanager/volume_test: remove debugging * client/pluginmanager/csimanager/volume: fix odd merging logic * api: rename CSIOptions -> CSIMountOptions * nomad/csi_endpoint: remove a 7007 comment * command/alloc_status: show mount options in the volume list * nomad/structs/csi: include MountOptions in the volume stub * api/csi: add MountOptions to stub * command/volume_status_csi: clean up csiVolMountOption, add it * command/alloc_status: csiVolMountOption lives in volume_csi_status * command/node_status: display mount flags * nomad/structs/volumes: npe * plugins/csi/plugin: npe in ToCSIRepresentation * jobspec/parse_test: expand volume parse test cases * command/agent/job_endpoint: ApiTgToStructsTG needs MountOptions * command/volume_status_csi: copy paste error * jobspec/test-fixtures/basic: hclfmt * command/volume_status_csi: clean up csiVolMountOption
2020-03-23 17:55:26 +00:00
output = append(output, "ID|Name|Plugin ID|Schedulable|Provider|Access Mode|Mount Options")
for _, name := range names {
v := volumes[name]
csi: add mount_options to volumes and volume requests (#7398) Add mount_options to both the volume definition on registration and to the volume block in the group where the volume is requested. If both are specified, the options provided in the request replace the options defined in the volume. They get passed to the NodePublishVolume, which causes the node plugin to actually mount the volume on the host. Individual tasks just mount bind into the host mounted volume (unchanged behavior). An operator can mount the same volume with different options by specifying it twice in the group context. closes #7007 * nomad/structs/volumes: add MountOptions to volume request * jobspec/test-fixtures/basic.hcl: add mount_options to volume block * jobspec/parse_test: add expected MountOptions * api/tasks: add mount_options * jobspec/parse_group: use hcl decode not mapstructure, mount_options * client/allocrunner/csi_hook: pass MountOptions through client/allocrunner/csi_hook: add a VolumeMountOptions client/allocrunner/csi_hook: drop Options client/allocrunner/csi_hook: use the structs options * client/pluginmanager/csimanager/interface: UsageOptions.MountOptions * client/pluginmanager/csimanager/volume: pass MountOptions in capabilities * plugins/csi/plugin: remove todo 7007 comment * nomad/structs/csi: MountOptions * api/csi: add options to the api for parsing, match structs * plugins/csi/plugin: move VolumeMountOptions to structs * api/csi: use specific type for mount_options * client/allocrunner/csi_hook: merge MountOptions here * rename CSIOptions to CSIMountOptions * client/allocrunner/csi_hook * client/pluginmanager/csimanager/volume * nomad/structs/csi * plugins/csi/fake/client: add PrevVolumeCapability * plugins/csi/plugin * client/pluginmanager/csimanager/volume_test: remove debugging * client/pluginmanager/csimanager/volume: fix odd merging logic * api: rename CSIOptions -> CSIMountOptions * nomad/csi_endpoint: remove a 7007 comment * command/alloc_status: show mount options in the volume list * nomad/structs/csi: include MountOptions in the volume stub * api/csi: add MountOptions to stub * command/volume_status_csi: clean up csiVolMountOption, add it * command/alloc_status: csiVolMountOption lives in volume_csi_status * command/node_status: display mount flags * nomad/structs/volumes: npe * plugins/csi/plugin: npe in ToCSIRepresentation * jobspec/parse_test: expand volume parse test cases * command/agent/job_endpoint: ApiTgToStructsTG needs MountOptions * command/volume_status_csi: copy paste error * jobspec/test-fixtures/basic: hclfmt * command/volume_status_csi: clean up csiVolMountOption
2020-03-23 17:55:26 +00:00
r := requests[v.ID]
output = append(output, fmt.Sprintf(
csi: add mount_options to volumes and volume requests (#7398) Add mount_options to both the volume definition on registration and to the volume block in the group where the volume is requested. If both are specified, the options provided in the request replace the options defined in the volume. They get passed to the NodePublishVolume, which causes the node plugin to actually mount the volume on the host. Individual tasks just mount bind into the host mounted volume (unchanged behavior). An operator can mount the same volume with different options by specifying it twice in the group context. closes #7007 * nomad/structs/volumes: add MountOptions to volume request * jobspec/test-fixtures/basic.hcl: add mount_options to volume block * jobspec/parse_test: add expected MountOptions * api/tasks: add mount_options * jobspec/parse_group: use hcl decode not mapstructure, mount_options * client/allocrunner/csi_hook: pass MountOptions through client/allocrunner/csi_hook: add a VolumeMountOptions client/allocrunner/csi_hook: drop Options client/allocrunner/csi_hook: use the structs options * client/pluginmanager/csimanager/interface: UsageOptions.MountOptions * client/pluginmanager/csimanager/volume: pass MountOptions in capabilities * plugins/csi/plugin: remove todo 7007 comment * nomad/structs/csi: MountOptions * api/csi: add options to the api for parsing, match structs * plugins/csi/plugin: move VolumeMountOptions to structs * api/csi: use specific type for mount_options * client/allocrunner/csi_hook: merge MountOptions here * rename CSIOptions to CSIMountOptions * client/allocrunner/csi_hook * client/pluginmanager/csimanager/volume * nomad/structs/csi * plugins/csi/fake/client: add PrevVolumeCapability * plugins/csi/plugin * client/pluginmanager/csimanager/volume_test: remove debugging * client/pluginmanager/csimanager/volume: fix odd merging logic * api: rename CSIOptions -> CSIMountOptions * nomad/csi_endpoint: remove a 7007 comment * command/alloc_status: show mount options in the volume list * nomad/structs/csi: include MountOptions in the volume stub * api/csi: add MountOptions to stub * command/volume_status_csi: clean up csiVolMountOption, add it * command/alloc_status: csiVolMountOption lives in volume_csi_status * command/node_status: display mount flags * nomad/structs/volumes: npe * plugins/csi/plugin: npe in ToCSIRepresentation * jobspec/parse_test: expand volume parse test cases * command/agent/job_endpoint: ApiTgToStructsTG needs MountOptions * command/volume_status_csi: copy paste error * jobspec/test-fixtures/basic: hclfmt * command/volume_status_csi: clean up csiVolMountOption
2020-03-23 17:55:26 +00:00
"%s|%s|%s|%t|%s|%s|%s",
v.ID,
name,
v.PluginID,
v.Schedulable,
v.Provider,
v.AccessMode,
csi: add mount_options to volumes and volume requests (#7398) Add mount_options to both the volume definition on registration and to the volume block in the group where the volume is requested. If both are specified, the options provided in the request replace the options defined in the volume. They get passed to the NodePublishVolume, which causes the node plugin to actually mount the volume on the host. Individual tasks just mount bind into the host mounted volume (unchanged behavior). An operator can mount the same volume with different options by specifying it twice in the group context. closes #7007 * nomad/structs/volumes: add MountOptions to volume request * jobspec/test-fixtures/basic.hcl: add mount_options to volume block * jobspec/parse_test: add expected MountOptions * api/tasks: add mount_options * jobspec/parse_group: use hcl decode not mapstructure, mount_options * client/allocrunner/csi_hook: pass MountOptions through client/allocrunner/csi_hook: add a VolumeMountOptions client/allocrunner/csi_hook: drop Options client/allocrunner/csi_hook: use the structs options * client/pluginmanager/csimanager/interface: UsageOptions.MountOptions * client/pluginmanager/csimanager/volume: pass MountOptions in capabilities * plugins/csi/plugin: remove todo 7007 comment * nomad/structs/csi: MountOptions * api/csi: add options to the api for parsing, match structs * plugins/csi/plugin: move VolumeMountOptions to structs * api/csi: use specific type for mount_options * client/allocrunner/csi_hook: merge MountOptions here * rename CSIOptions to CSIMountOptions * client/allocrunner/csi_hook * client/pluginmanager/csimanager/volume * nomad/structs/csi * plugins/csi/fake/client: add PrevVolumeCapability * plugins/csi/plugin * client/pluginmanager/csimanager/volume_test: remove debugging * client/pluginmanager/csimanager/volume: fix odd merging logic * api: rename CSIOptions -> CSIMountOptions * nomad/csi_endpoint: remove a 7007 comment * command/alloc_status: show mount options in the volume list * nomad/structs/csi: include MountOptions in the volume stub * api/csi: add MountOptions to stub * command/volume_status_csi: clean up csiVolMountOption, add it * command/alloc_status: csiVolMountOption lives in volume_csi_status * command/node_status: display mount flags * nomad/structs/volumes: npe * plugins/csi/plugin: npe in ToCSIRepresentation * jobspec/parse_test: expand volume parse test cases * command/agent/job_endpoint: ApiTgToStructsTG needs MountOptions * command/volume_status_csi: copy paste error * jobspec/test-fixtures/basic: hclfmt * command/volume_status_csi: clean up csiVolMountOption
2020-03-23 17:55:26 +00:00
csiVolMountOption(v.MountOptions, r.MountOptions),
))
}
c.Ui.Output(formatList(output))
}
2018-03-23 00:18:32 +00:00
func (c *NodeStatusCommand) outputNodeDriverInfo(node *api.Node) {
c.Ui.Output(c.Colorize().Color("\n[bold]Drivers"))
size := len(node.Drivers)
nodeDrivers := make([]string, 0, size+1)
nodeDrivers = append(nodeDrivers, "Driver|Detected|Healthy|Message|Time")
2018-03-23 00:18:32 +00:00
2018-03-23 17:59:18 +00:00
drivers := make([]string, 0, len(node.Drivers))
for driver := range node.Drivers {
drivers = append(drivers, driver)
2018-03-23 17:59:18 +00:00
}
sort.Strings(drivers)
for _, driver := range drivers {
info := node.Drivers[driver]
timestamp := formatTime(info.UpdateTime)
nodeDrivers = append(nodeDrivers, fmt.Sprintf("%s|%v|%v|%s|%s", driver, info.Detected, info.Healthy, info.HealthDescription, timestamp))
2018-03-23 00:18:32 +00:00
}
c.Ui.Output(formatList(nodeDrivers))
2018-03-23 00:18:32 +00:00
}
func (c *NodeStatusCommand) outputNodeStatusEvents(node *api.Node) {
2018-03-23 00:18:32 +00:00
c.Ui.Output(c.Colorize().Color("\n[bold]Node Events"))
2018-03-14 00:59:37 +00:00
c.outputNodeEvent(node.Events)
}
func (c *NodeStatusCommand) outputNodeEvent(events []*api.NodeEvent) {
size := len(events)
nodeEvents := make([]string, size+1)
2018-03-13 13:33:53 +00:00
if c.verbose {
nodeEvents[0] = "Time|Subsystem|Message|Details"
} else {
nodeEvents[0] = "Time|Subsystem|Message"
}
for i, event := range events {
timestamp := formatTime(event.Timestamp)
subsystem := formatEventSubsystem(event.Subsystem, event.Details["driver"])
msg := event.Message
2018-03-13 13:33:53 +00:00
if c.verbose {
details := formatEventDetails(event.Details)
nodeEvents[size-i] = fmt.Sprintf("%s|%s|%s|%s", timestamp, subsystem, msg, details)
} else {
nodeEvents[size-i] = fmt.Sprintf("%s|%s|%s", timestamp, subsystem, msg)
}
}
c.Ui.Output(formatList(nodeEvents))
}
func formatEventSubsystem(subsystem, driverName string) string {
2018-03-27 23:03:01 +00:00
if driverName == "" {
return subsystem
2018-03-27 23:03:01 +00:00
}
// If this event is for a driver, append the driver name to make the message
// clearer
return fmt.Sprintf("Driver: %s", driverName)
2018-03-27 23:03:01 +00:00
}
func formatEventDetails(details map[string]string) string {
2018-03-14 00:52:12 +00:00
output := make([]string, 0, len(details))
for k, v := range details {
output = append(output, fmt.Sprintf("%s: %s", k, v))
}
2018-03-14 00:52:12 +00:00
return strings.Join(output, ", ")
}
2016-06-11 21:40:51 +00:00
func (c *NodeStatusCommand) formatAttributes(node *api.Node) {
// Print the attributes
keys := make([]string, len(node.Attributes))
for k := range node.Attributes {
keys = append(keys, k)
}
sort.Strings(keys)
var attributes []string
for _, k := range keys {
if k != "" {
attributes = append(attributes, fmt.Sprintf("%s|%s", k, node.Attributes[k]))
}
}
c.Ui.Output(c.Colorize().Color("\n[bold]Attributes[reset]"))
c.Ui.Output(formatKV(attributes))
2015-09-12 20:55:51 +00:00
}
func (c *NodeStatusCommand) formatDeviceAttributes(node *api.Node) {
if node.NodeResources == nil {
return
}
devices := node.NodeResources.Devices
if len(devices) == 0 {
return
}
sort.Slice(devices, func(i, j int) bool {
return devices[i].ID() < devices[j].ID()
})
first := true
for _, d := range devices {
if len(d.Attributes) == 0 {
continue
}
if first {
c.Ui.Output("\nDevice Group Attributes")
first = false
} else {
c.Ui.Output("")
}
c.Ui.Output(formatKV(getDeviceAttributes(d)))
}
}
2016-10-21 00:36:34 +00:00
func (c *NodeStatusCommand) formatMeta(node *api.Node) {
// Print the meta
keys := make([]string, 0, len(node.Meta))
for k := range node.Meta {
keys = append(keys, k)
}
sort.Strings(keys)
var meta []string
for _, k := range keys {
if k != "" {
meta = append(meta, fmt.Sprintf("%s|%s", k, node.Meta[k]))
}
}
c.Ui.Output(c.Colorize().Color("\n[bold]Meta[reset]"))
c.Ui.Output(formatKV(meta))
}
2016-05-22 09:04:27 +00:00
func (c *NodeStatusCommand) printCpuStats(hostStats *api.HostStats) {
2016-06-11 21:40:51 +00:00
l := len(hostStats.CPU)
for i, cpuStat := range hostStats.CPU {
2016-05-22 09:04:27 +00:00
cpuStatsAttr := make([]string, 4)
cpuStatsAttr[0] = fmt.Sprintf("CPU|%v", cpuStat.CPU)
2016-06-11 21:40:51 +00:00
cpuStatsAttr[1] = fmt.Sprintf("User|%v%%", humanize.FormatFloat(floatFormat, cpuStat.User))
cpuStatsAttr[2] = fmt.Sprintf("System|%v%%", humanize.FormatFloat(floatFormat, cpuStat.System))
cpuStatsAttr[3] = fmt.Sprintf("Idle|%v%%", humanize.FormatFloat(floatFormat, cpuStat.Idle))
2016-05-22 09:04:27 +00:00
c.Ui.Output(formatKV(cpuStatsAttr))
2016-06-11 21:40:51 +00:00
if i+1 < l {
c.Ui.Output("")
}
2016-05-22 09:04:27 +00:00
}
}
func (c *NodeStatusCommand) printMemoryStats(hostStats *api.HostStats) {
memoryStat := hostStats.Memory
memStatsAttr := make([]string, 4)
2016-06-12 21:20:39 +00:00
memStatsAttr[0] = fmt.Sprintf("Total|%v", humanize.IBytes(memoryStat.Total))
memStatsAttr[1] = fmt.Sprintf("Available|%v", humanize.IBytes(memoryStat.Available))
memStatsAttr[2] = fmt.Sprintf("Used|%v", humanize.IBytes(memoryStat.Used))
memStatsAttr[3] = fmt.Sprintf("Free|%v", humanize.IBytes(memoryStat.Free))
2016-05-22 09:04:27 +00:00
c.Ui.Output(formatKV(memStatsAttr))
}
2016-05-22 10:46:49 +00:00
func (c *NodeStatusCommand) printDiskStats(hostStats *api.HostStats) {
2016-06-11 21:40:51 +00:00
l := len(hostStats.DiskStats)
for i, diskStat := range hostStats.DiskStats {
2016-06-06 22:31:50 +00:00
diskStatsAttr := make([]string, 7)
2016-05-22 10:46:49 +00:00
diskStatsAttr[0] = fmt.Sprintf("Device|%s", diskStat.Device)
diskStatsAttr[1] = fmt.Sprintf("MountPoint|%s", diskStat.Mountpoint)
2016-06-12 21:20:39 +00:00
diskStatsAttr[2] = fmt.Sprintf("Size|%s", humanize.IBytes(diskStat.Size))
diskStatsAttr[3] = fmt.Sprintf("Used|%s", humanize.IBytes(diskStat.Used))
diskStatsAttr[4] = fmt.Sprintf("Available|%s", humanize.IBytes(diskStat.Available))
2016-06-11 21:40:51 +00:00
diskStatsAttr[5] = fmt.Sprintf("Used Percent|%v%%", humanize.FormatFloat(floatFormat, diskStat.UsedPercent))
diskStatsAttr[6] = fmt.Sprintf("Inodes Percent|%v%%", humanize.FormatFloat(floatFormat, diskStat.InodesUsedPercent))
2016-05-22 10:46:49 +00:00
c.Ui.Output(formatKV(diskStatsAttr))
2016-06-11 21:40:51 +00:00
if i+1 < l {
c.Ui.Output("")
}
2016-05-22 10:46:49 +00:00
}
}
Print resource usage w/ alloc-status + node-status When alloc-status is called, in it's long form only, print the resource utilization for that single allocation. When node-status is called, in it's long form only, print the TOTAL resource utilization that is occurring on that single node. Nomad Alloc Status: ``` % nomad alloc-status 195d3bf2 ID = 195d3bf2 Eval ID = c917e3ee Name = example.cache[1] Node ID = 1b2520a7 Job ID = example Client Status = running Evaluated Nodes = 1 Filtered Nodes = 0 Exhausted Nodes = 0 Allocation Time = 17.73µs Failures = 0 ==> Task "redis" is "running" Recent Events: Time Type Description 04/03/16 21:20:45 EST Started Task started by client 04/03/16 21:20:42 EST Received Task received by client ==> Status Allocation "195d3bf2" status "running" (0/1 nodes filtered) * Score "1b2520a7-6714-e78d-a8f7-68467dda6db7.binpack" = 1.209464 * Score "1b2520a7-6714-e78d-a8f7-68467dda6db7.job-anti-affinity" = -10.000000 ==> Resources CPU MemoryMB DiskMB IOPS 500 256 300 0 ``` Nomad Node Status: ``` % nomad node-status 57b3a55a ID = 57b3a55a Name = biscuits Class = <none> DC = dc1 Drain = false Status = ready Attributes = arch:amd64, cpu.frequency:3753.458875, cpu.modelname:Intel(R) Core(TM) i7-4770K CPU @ 3.50GHz, cpu.numcores:8, cpu.totalcompute:30027.671000, driver.docker:1, driver.docker.version:1.10.2, driver.exec:1, driver.raw_exec:1, hostname:biscuits, kernel.name:linux, kernel.version:4.4.0-9-generic, memory.totalbytes:25208934400, os.name:ubuntu, os.version:16.04, unique.cgroup.mountpoint:/sys/fs/cgroup, unique.network.ip-address:127.0.0.1, unique.storage.bytesfree:219781419008, unique.storage.bytestotal:246059892736, unique.storage.volume:/dev/sdb3 ==> Allocations ID Eval ID Job ID Task Group Desired Status Client Status 2c236883 aa11aca8 example cache run running 32f6e3d6 aa11aca8 example cache run running ==> Resource Utilization CPU MemoryMB DiskMB IOPS 1000 512 600 0 ```
2016-03-05 02:29:39 +00:00
// getRunningAllocs returns a slice of allocation id's running on the node
2016-03-05 03:14:57 +00:00
func getRunningAllocs(client *api.Client, nodeID string) ([]*api.Allocation, error) {
Print resource usage w/ alloc-status + node-status When alloc-status is called, in it's long form only, print the resource utilization for that single allocation. When node-status is called, in it's long form only, print the TOTAL resource utilization that is occurring on that single node. Nomad Alloc Status: ``` % nomad alloc-status 195d3bf2 ID = 195d3bf2 Eval ID = c917e3ee Name = example.cache[1] Node ID = 1b2520a7 Job ID = example Client Status = running Evaluated Nodes = 1 Filtered Nodes = 0 Exhausted Nodes = 0 Allocation Time = 17.73µs Failures = 0 ==> Task "redis" is "running" Recent Events: Time Type Description 04/03/16 21:20:45 EST Started Task started by client 04/03/16 21:20:42 EST Received Task received by client ==> Status Allocation "195d3bf2" status "running" (0/1 nodes filtered) * Score "1b2520a7-6714-e78d-a8f7-68467dda6db7.binpack" = 1.209464 * Score "1b2520a7-6714-e78d-a8f7-68467dda6db7.job-anti-affinity" = -10.000000 ==> Resources CPU MemoryMB DiskMB IOPS 500 256 300 0 ``` Nomad Node Status: ``` % nomad node-status 57b3a55a ID = 57b3a55a Name = biscuits Class = <none> DC = dc1 Drain = false Status = ready Attributes = arch:amd64, cpu.frequency:3753.458875, cpu.modelname:Intel(R) Core(TM) i7-4770K CPU @ 3.50GHz, cpu.numcores:8, cpu.totalcompute:30027.671000, driver.docker:1, driver.docker.version:1.10.2, driver.exec:1, driver.raw_exec:1, hostname:biscuits, kernel.name:linux, kernel.version:4.4.0-9-generic, memory.totalbytes:25208934400, os.name:ubuntu, os.version:16.04, unique.cgroup.mountpoint:/sys/fs/cgroup, unique.network.ip-address:127.0.0.1, unique.storage.bytesfree:219781419008, unique.storage.bytestotal:246059892736, unique.storage.volume:/dev/sdb3 ==> Allocations ID Eval ID Job ID Task Group Desired Status Client Status 2c236883 aa11aca8 example cache run running 32f6e3d6 aa11aca8 example cache run running ==> Resource Utilization CPU MemoryMB DiskMB IOPS 1000 512 600 0 ```
2016-03-05 02:29:39 +00:00
var allocs []*api.Allocation
// Query the node allocations
2016-03-05 03:14:57 +00:00
nodeAllocs, _, err := client.Nodes().Allocations(nodeID, nil)
Print resource usage w/ alloc-status + node-status When alloc-status is called, in it's long form only, print the resource utilization for that single allocation. When node-status is called, in it's long form only, print the TOTAL resource utilization that is occurring on that single node. Nomad Alloc Status: ``` % nomad alloc-status 195d3bf2 ID = 195d3bf2 Eval ID = c917e3ee Name = example.cache[1] Node ID = 1b2520a7 Job ID = example Client Status = running Evaluated Nodes = 1 Filtered Nodes = 0 Exhausted Nodes = 0 Allocation Time = 17.73µs Failures = 0 ==> Task "redis" is "running" Recent Events: Time Type Description 04/03/16 21:20:45 EST Started Task started by client 04/03/16 21:20:42 EST Received Task received by client ==> Status Allocation "195d3bf2" status "running" (0/1 nodes filtered) * Score "1b2520a7-6714-e78d-a8f7-68467dda6db7.binpack" = 1.209464 * Score "1b2520a7-6714-e78d-a8f7-68467dda6db7.job-anti-affinity" = -10.000000 ==> Resources CPU MemoryMB DiskMB IOPS 500 256 300 0 ``` Nomad Node Status: ``` % nomad node-status 57b3a55a ID = 57b3a55a Name = biscuits Class = <none> DC = dc1 Drain = false Status = ready Attributes = arch:amd64, cpu.frequency:3753.458875, cpu.modelname:Intel(R) Core(TM) i7-4770K CPU @ 3.50GHz, cpu.numcores:8, cpu.totalcompute:30027.671000, driver.docker:1, driver.docker.version:1.10.2, driver.exec:1, driver.raw_exec:1, hostname:biscuits, kernel.name:linux, kernel.version:4.4.0-9-generic, memory.totalbytes:25208934400, os.name:ubuntu, os.version:16.04, unique.cgroup.mountpoint:/sys/fs/cgroup, unique.network.ip-address:127.0.0.1, unique.storage.bytesfree:219781419008, unique.storage.bytestotal:246059892736, unique.storage.volume:/dev/sdb3 ==> Allocations ID Eval ID Job ID Task Group Desired Status Client Status 2c236883 aa11aca8 example cache run running 32f6e3d6 aa11aca8 example cache run running ==> Resource Utilization CPU MemoryMB DiskMB IOPS 1000 512 600 0 ```
2016-03-05 02:29:39 +00:00
// Filter list to only running allocations
for _, alloc := range nodeAllocs {
if alloc.ClientStatus == "running" {
allocs = append(allocs, alloc)
}
}
return allocs, err
}
2016-06-06 21:10:43 +00:00
// getAllocatedResources returns the resource usage of the node.
func getAllocatedResources(client *api.Client, runningAllocs []*api.Allocation, node *api.Node) []string {
// Compute the total
total := computeNodeTotalResources(node)
Print resource usage w/ alloc-status + node-status When alloc-status is called, in it's long form only, print the resource utilization for that single allocation. When node-status is called, in it's long form only, print the TOTAL resource utilization that is occurring on that single node. Nomad Alloc Status: ``` % nomad alloc-status 195d3bf2 ID = 195d3bf2 Eval ID = c917e3ee Name = example.cache[1] Node ID = 1b2520a7 Job ID = example Client Status = running Evaluated Nodes = 1 Filtered Nodes = 0 Exhausted Nodes = 0 Allocation Time = 17.73µs Failures = 0 ==> Task "redis" is "running" Recent Events: Time Type Description 04/03/16 21:20:45 EST Started Task started by client 04/03/16 21:20:42 EST Received Task received by client ==> Status Allocation "195d3bf2" status "running" (0/1 nodes filtered) * Score "1b2520a7-6714-e78d-a8f7-68467dda6db7.binpack" = 1.209464 * Score "1b2520a7-6714-e78d-a8f7-68467dda6db7.job-anti-affinity" = -10.000000 ==> Resources CPU MemoryMB DiskMB IOPS 500 256 300 0 ``` Nomad Node Status: ``` % nomad node-status 57b3a55a ID = 57b3a55a Name = biscuits Class = <none> DC = dc1 Drain = false Status = ready Attributes = arch:amd64, cpu.frequency:3753.458875, cpu.modelname:Intel(R) Core(TM) i7-4770K CPU @ 3.50GHz, cpu.numcores:8, cpu.totalcompute:30027.671000, driver.docker:1, driver.docker.version:1.10.2, driver.exec:1, driver.raw_exec:1, hostname:biscuits, kernel.name:linux, kernel.version:4.4.0-9-generic, memory.totalbytes:25208934400, os.name:ubuntu, os.version:16.04, unique.cgroup.mountpoint:/sys/fs/cgroup, unique.network.ip-address:127.0.0.1, unique.storage.bytesfree:219781419008, unique.storage.bytestotal:246059892736, unique.storage.volume:/dev/sdb3 ==> Allocations ID Eval ID Job ID Task Group Desired Status Client Status 2c236883 aa11aca8 example cache run running 32f6e3d6 aa11aca8 example cache run running ==> Resource Utilization CPU MemoryMB DiskMB IOPS 1000 512 600 0 ```
2016-03-05 02:29:39 +00:00
// Get Resources
var cpu, mem, disk int
Print resource usage w/ alloc-status + node-status When alloc-status is called, in it's long form only, print the resource utilization for that single allocation. When node-status is called, in it's long form only, print the TOTAL resource utilization that is occurring on that single node. Nomad Alloc Status: ``` % nomad alloc-status 195d3bf2 ID = 195d3bf2 Eval ID = c917e3ee Name = example.cache[1] Node ID = 1b2520a7 Job ID = example Client Status = running Evaluated Nodes = 1 Filtered Nodes = 0 Exhausted Nodes = 0 Allocation Time = 17.73µs Failures = 0 ==> Task "redis" is "running" Recent Events: Time Type Description 04/03/16 21:20:45 EST Started Task started by client 04/03/16 21:20:42 EST Received Task received by client ==> Status Allocation "195d3bf2" status "running" (0/1 nodes filtered) * Score "1b2520a7-6714-e78d-a8f7-68467dda6db7.binpack" = 1.209464 * Score "1b2520a7-6714-e78d-a8f7-68467dda6db7.job-anti-affinity" = -10.000000 ==> Resources CPU MemoryMB DiskMB IOPS 500 256 300 0 ``` Nomad Node Status: ``` % nomad node-status 57b3a55a ID = 57b3a55a Name = biscuits Class = <none> DC = dc1 Drain = false Status = ready Attributes = arch:amd64, cpu.frequency:3753.458875, cpu.modelname:Intel(R) Core(TM) i7-4770K CPU @ 3.50GHz, cpu.numcores:8, cpu.totalcompute:30027.671000, driver.docker:1, driver.docker.version:1.10.2, driver.exec:1, driver.raw_exec:1, hostname:biscuits, kernel.name:linux, kernel.version:4.4.0-9-generic, memory.totalbytes:25208934400, os.name:ubuntu, os.version:16.04, unique.cgroup.mountpoint:/sys/fs/cgroup, unique.network.ip-address:127.0.0.1, unique.storage.bytesfree:219781419008, unique.storage.bytestotal:246059892736, unique.storage.volume:/dev/sdb3 ==> Allocations ID Eval ID Job ID Task Group Desired Status Client Status 2c236883 aa11aca8 example cache run running 32f6e3d6 aa11aca8 example cache run running ==> Resource Utilization CPU MemoryMB DiskMB IOPS 1000 512 600 0 ```
2016-03-05 02:29:39 +00:00
for _, alloc := range runningAllocs {
2017-02-06 19:48:28 +00:00
cpu += *alloc.Resources.CPU
mem += *alloc.Resources.MemoryMB
disk += *alloc.Resources.DiskMB
Print resource usage w/ alloc-status + node-status When alloc-status is called, in it's long form only, print the resource utilization for that single allocation. When node-status is called, in it's long form only, print the TOTAL resource utilization that is occurring on that single node. Nomad Alloc Status: ``` % nomad alloc-status 195d3bf2 ID = 195d3bf2 Eval ID = c917e3ee Name = example.cache[1] Node ID = 1b2520a7 Job ID = example Client Status = running Evaluated Nodes = 1 Filtered Nodes = 0 Exhausted Nodes = 0 Allocation Time = 17.73µs Failures = 0 ==> Task "redis" is "running" Recent Events: Time Type Description 04/03/16 21:20:45 EST Started Task started by client 04/03/16 21:20:42 EST Received Task received by client ==> Status Allocation "195d3bf2" status "running" (0/1 nodes filtered) * Score "1b2520a7-6714-e78d-a8f7-68467dda6db7.binpack" = 1.209464 * Score "1b2520a7-6714-e78d-a8f7-68467dda6db7.job-anti-affinity" = -10.000000 ==> Resources CPU MemoryMB DiskMB IOPS 500 256 300 0 ``` Nomad Node Status: ``` % nomad node-status 57b3a55a ID = 57b3a55a Name = biscuits Class = <none> DC = dc1 Drain = false Status = ready Attributes = arch:amd64, cpu.frequency:3753.458875, cpu.modelname:Intel(R) Core(TM) i7-4770K CPU @ 3.50GHz, cpu.numcores:8, cpu.totalcompute:30027.671000, driver.docker:1, driver.docker.version:1.10.2, driver.exec:1, driver.raw_exec:1, hostname:biscuits, kernel.name:linux, kernel.version:4.4.0-9-generic, memory.totalbytes:25208934400, os.name:ubuntu, os.version:16.04, unique.cgroup.mountpoint:/sys/fs/cgroup, unique.network.ip-address:127.0.0.1, unique.storage.bytesfree:219781419008, unique.storage.bytestotal:246059892736, unique.storage.volume:/dev/sdb3 ==> Allocations ID Eval ID Job ID Task Group Desired Status Client Status 2c236883 aa11aca8 example cache run running 32f6e3d6 aa11aca8 example cache run running ==> Resource Utilization CPU MemoryMB DiskMB IOPS 1000 512 600 0 ```
2016-03-05 02:29:39 +00:00
}
resources := make([]string, 2)
resources[0] = "CPU|Memory|Disk"
resources[1] = fmt.Sprintf("%d/%d MHz|%s/%s|%s/%s",
Print resource usage w/ alloc-status + node-status When alloc-status is called, in it's long form only, print the resource utilization for that single allocation. When node-status is called, in it's long form only, print the TOTAL resource utilization that is occurring on that single node. Nomad Alloc Status: ``` % nomad alloc-status 195d3bf2 ID = 195d3bf2 Eval ID = c917e3ee Name = example.cache[1] Node ID = 1b2520a7 Job ID = example Client Status = running Evaluated Nodes = 1 Filtered Nodes = 0 Exhausted Nodes = 0 Allocation Time = 17.73µs Failures = 0 ==> Task "redis" is "running" Recent Events: Time Type Description 04/03/16 21:20:45 EST Started Task started by client 04/03/16 21:20:42 EST Received Task received by client ==> Status Allocation "195d3bf2" status "running" (0/1 nodes filtered) * Score "1b2520a7-6714-e78d-a8f7-68467dda6db7.binpack" = 1.209464 * Score "1b2520a7-6714-e78d-a8f7-68467dda6db7.job-anti-affinity" = -10.000000 ==> Resources CPU MemoryMB DiskMB IOPS 500 256 300 0 ``` Nomad Node Status: ``` % nomad node-status 57b3a55a ID = 57b3a55a Name = biscuits Class = <none> DC = dc1 Drain = false Status = ready Attributes = arch:amd64, cpu.frequency:3753.458875, cpu.modelname:Intel(R) Core(TM) i7-4770K CPU @ 3.50GHz, cpu.numcores:8, cpu.totalcompute:30027.671000, driver.docker:1, driver.docker.version:1.10.2, driver.exec:1, driver.raw_exec:1, hostname:biscuits, kernel.name:linux, kernel.version:4.4.0-9-generic, memory.totalbytes:25208934400, os.name:ubuntu, os.version:16.04, unique.cgroup.mountpoint:/sys/fs/cgroup, unique.network.ip-address:127.0.0.1, unique.storage.bytesfree:219781419008, unique.storage.bytestotal:246059892736, unique.storage.volume:/dev/sdb3 ==> Allocations ID Eval ID Job ID Task Group Desired Status Client Status 2c236883 aa11aca8 example cache run running 32f6e3d6 aa11aca8 example cache run running ==> Resource Utilization CPU MemoryMB DiskMB IOPS 1000 512 600 0 ```
2016-03-05 02:29:39 +00:00
cpu,
*total.CPU,
2016-06-12 21:20:39 +00:00
humanize.IBytes(uint64(mem*bytesPerMegabyte)),
2017-02-06 19:48:28 +00:00
humanize.IBytes(uint64(*total.MemoryMB*bytesPerMegabyte)),
2016-06-12 21:20:39 +00:00
humanize.IBytes(uint64(disk*bytesPerMegabyte)),
humanize.IBytes(uint64(*total.DiskMB*bytesPerMegabyte)))
return resources
}
// computeNodeTotalResources returns the total allocatable resources (resources
// minus reserved)
func computeNodeTotalResources(node *api.Node) api.Resources {
total := api.Resources{}
r := node.Resources
res := node.Reserved
if res == nil {
res = &api.Resources{}
}
2017-02-06 19:48:28 +00:00
total.CPU = helper.IntToPtr(*r.CPU - *res.CPU)
total.MemoryMB = helper.IntToPtr(*r.MemoryMB - *res.MemoryMB)
total.DiskMB = helper.IntToPtr(*r.DiskMB - *res.DiskMB)
return total
}
// getActualResources returns the actual resource usage of the allocations.
func getActualResources(client *api.Client, runningAllocs []*api.Allocation, node *api.Node) ([]string, error) {
// Compute the total
total := computeNodeTotalResources(node)
Print resource usage w/ alloc-status + node-status When alloc-status is called, in it's long form only, print the resource utilization for that single allocation. When node-status is called, in it's long form only, print the TOTAL resource utilization that is occurring on that single node. Nomad Alloc Status: ``` % nomad alloc-status 195d3bf2 ID = 195d3bf2 Eval ID = c917e3ee Name = example.cache[1] Node ID = 1b2520a7 Job ID = example Client Status = running Evaluated Nodes = 1 Filtered Nodes = 0 Exhausted Nodes = 0 Allocation Time = 17.73µs Failures = 0 ==> Task "redis" is "running" Recent Events: Time Type Description 04/03/16 21:20:45 EST Started Task started by client 04/03/16 21:20:42 EST Received Task received by client ==> Status Allocation "195d3bf2" status "running" (0/1 nodes filtered) * Score "1b2520a7-6714-e78d-a8f7-68467dda6db7.binpack" = 1.209464 * Score "1b2520a7-6714-e78d-a8f7-68467dda6db7.job-anti-affinity" = -10.000000 ==> Resources CPU MemoryMB DiskMB IOPS 500 256 300 0 ``` Nomad Node Status: ``` % nomad node-status 57b3a55a ID = 57b3a55a Name = biscuits Class = <none> DC = dc1 Drain = false Status = ready Attributes = arch:amd64, cpu.frequency:3753.458875, cpu.modelname:Intel(R) Core(TM) i7-4770K CPU @ 3.50GHz, cpu.numcores:8, cpu.totalcompute:30027.671000, driver.docker:1, driver.docker.version:1.10.2, driver.exec:1, driver.raw_exec:1, hostname:biscuits, kernel.name:linux, kernel.version:4.4.0-9-generic, memory.totalbytes:25208934400, os.name:ubuntu, os.version:16.04, unique.cgroup.mountpoint:/sys/fs/cgroup, unique.network.ip-address:127.0.0.1, unique.storage.bytesfree:219781419008, unique.storage.bytestotal:246059892736, unique.storage.volume:/dev/sdb3 ==> Allocations ID Eval ID Job ID Task Group Desired Status Client Status 2c236883 aa11aca8 example cache run running 32f6e3d6 aa11aca8 example cache run running ==> Resource Utilization CPU MemoryMB DiskMB IOPS 1000 512 600 0 ```
2016-03-05 02:29:39 +00:00
// Get Resources
var cpu float64
var mem uint64
for _, alloc := range runningAllocs {
// Make the call to the client to get the actual usage.
stats, err := client.Allocations().Stats(alloc, nil)
if err != nil {
return nil, err
}
cpu += stats.ResourceUsage.CpuStats.TotalTicks
mem += stats.ResourceUsage.MemoryStats.RSS
}
resources := make([]string, 2)
resources[0] = "CPU|Memory"
resources[1] = fmt.Sprintf("%v/%d MHz|%v/%v",
math.Floor(cpu),
*total.CPU,
2016-06-12 21:20:39 +00:00
humanize.IBytes(mem),
2017-02-06 19:48:28 +00:00
humanize.IBytes(uint64(*total.MemoryMB*bytesPerMegabyte)))
return resources, nil
Print resource usage w/ alloc-status + node-status When alloc-status is called, in it's long form only, print the resource utilization for that single allocation. When node-status is called, in it's long form only, print the TOTAL resource utilization that is occurring on that single node. Nomad Alloc Status: ``` % nomad alloc-status 195d3bf2 ID = 195d3bf2 Eval ID = c917e3ee Name = example.cache[1] Node ID = 1b2520a7 Job ID = example Client Status = running Evaluated Nodes = 1 Filtered Nodes = 0 Exhausted Nodes = 0 Allocation Time = 17.73µs Failures = 0 ==> Task "redis" is "running" Recent Events: Time Type Description 04/03/16 21:20:45 EST Started Task started by client 04/03/16 21:20:42 EST Received Task received by client ==> Status Allocation "195d3bf2" status "running" (0/1 nodes filtered) * Score "1b2520a7-6714-e78d-a8f7-68467dda6db7.binpack" = 1.209464 * Score "1b2520a7-6714-e78d-a8f7-68467dda6db7.job-anti-affinity" = -10.000000 ==> Resources CPU MemoryMB DiskMB IOPS 500 256 300 0 ``` Nomad Node Status: ``` % nomad node-status 57b3a55a ID = 57b3a55a Name = biscuits Class = <none> DC = dc1 Drain = false Status = ready Attributes = arch:amd64, cpu.frequency:3753.458875, cpu.modelname:Intel(R) Core(TM) i7-4770K CPU @ 3.50GHz, cpu.numcores:8, cpu.totalcompute:30027.671000, driver.docker:1, driver.docker.version:1.10.2, driver.exec:1, driver.raw_exec:1, hostname:biscuits, kernel.name:linux, kernel.version:4.4.0-9-generic, memory.totalbytes:25208934400, os.name:ubuntu, os.version:16.04, unique.cgroup.mountpoint:/sys/fs/cgroup, unique.network.ip-address:127.0.0.1, unique.storage.bytesfree:219781419008, unique.storage.bytestotal:246059892736, unique.storage.volume:/dev/sdb3 ==> Allocations ID Eval ID Job ID Task Group Desired Status Client Status 2c236883 aa11aca8 example cache run running 32f6e3d6 aa11aca8 example cache run running ==> Resource Utilization CPU MemoryMB DiskMB IOPS 1000 512 600 0 ```
2016-03-05 02:29:39 +00:00
}
2016-05-22 09:04:27 +00:00
// getHostResources returns the actual resource usage of the node.
func getHostResources(hostStats *api.HostStats, node *api.Node) ([]string, error) {
2016-06-06 21:10:43 +00:00
if hostStats == nil {
return nil, fmt.Errorf("actual resource usage not present")
}
var resources []string
// calculate disk usage
2016-06-06 21:10:43 +00:00
storageDevice := node.Attributes["unique.storage.volume"]
var diskUsed, diskSize uint64
var physical bool
2016-06-06 21:10:43 +00:00
for _, disk := range hostStats.DiskStats {
if disk.Device == storageDevice {
diskUsed = disk.Used
diskSize = disk.Size
physical = true
2016-06-06 21:10:43 +00:00
}
}
resources = make([]string, 2)
resources[0] = "CPU|Memory|Disk"
if physical {
resources[1] = fmt.Sprintf("%v/%d MHz|%s/%s|%s/%s",
math.Floor(hostStats.CPUTicksConsumed),
*node.Resources.CPU,
humanize.IBytes(hostStats.Memory.Used),
humanize.IBytes(hostStats.Memory.Total),
humanize.IBytes(diskUsed),
humanize.IBytes(diskSize),
)
} else {
// If non-physical device are used, output device name only,
// since nomad doesn't collect the stats data.
resources[1] = fmt.Sprintf("%v/%d MHz|%s/%s|(%s)",
math.Floor(hostStats.CPUTicksConsumed),
*node.Resources.CPU,
humanize.IBytes(hostStats.Memory.Used),
humanize.IBytes(hostStats.Memory.Total),
storageDevice,
)
}
2016-06-06 21:10:43 +00:00
return resources, nil
}
2018-02-27 22:43:35 +00:00
// formatNodeStubList is used to return a table format of a list of node stubs.
func formatNodeStubList(nodes []*api.NodeListStub, verbose bool) string {
// Return error if no nodes are found
if len(nodes) == 0 {
return ""
}
// Truncate the id unless full length is requested
length := shortId
if verbose {
length = fullId
}
// Format the nodes list that matches the prefix so that the user
// can create a more specific request
out := make([]string, len(nodes)+1)
out[0] = "ID|DC|Name|Class|Drain|Eligibility|Status"
for i, node := range nodes {
out[i+1] = fmt.Sprintf("%s|%s|%s|%s|%v|%s|%s",
limit(node.ID, length),
node.Datacenter,
node.Name,
node.NodeClass,
node.Drain,
node.SchedulingEligibility,
node.Status)
}
return formatList(out)
}