storage: add volumes to 'nomad alloc status' CLI (#7256)

Adds a stanza for both Host Volumes and CSI Volumes to the the CLI
output for `nomad alloc status`. Mostly relies on information already
in the API structs, but in the case where there are CSI Volumes we
need to make extra API calls to get the volume status. To reduce
overhead, these extra calls are hidden behind the `-verbose` flag.
This commit is contained in:
Tim Gross 2020-03-06 09:44:43 -05:00 committed by Tim Gross
parent b3bf64485e
commit 016281135c
2 changed files with 226 additions and 2 deletions

View File

@ -12,6 +12,7 @@ import (
"github.com/hashicorp/nomad/api"
"github.com/hashicorp/nomad/api/contexts"
"github.com/hashicorp/nomad/client/allocrunner/taskrunner/restarts"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/posener/complete"
)
@ -214,7 +215,7 @@ func (c *AllocStatusCommand) Run(args []string) int {
c.Ui.Output("Omitting resource statistics since the node is down.")
}
}
c.outputTaskDetails(alloc, stats, displayStats)
c.outputTaskDetails(alloc, stats, displayStats, verbose)
}
// Format the detailed status
@ -362,12 +363,13 @@ func futureEvalTimePretty(evalID string, client *api.Client) string {
// outputTaskDetails prints task details for each task in the allocation,
// optionally printing verbose statistics if displayStats is set
func (c *AllocStatusCommand) outputTaskDetails(alloc *api.Allocation, stats *api.AllocResourceUsage, displayStats bool) {
func (c *AllocStatusCommand) outputTaskDetails(alloc *api.Allocation, stats *api.AllocResourceUsage, displayStats bool, verbose bool) {
for task := range c.sortedTaskStateIterator(alloc.TaskStates) {
state := alloc.TaskStates[task]
c.Ui.Output(c.Colorize().Color(fmt.Sprintf("\n[bold]Task %q is %q[reset]", task, state.State)))
c.outputTaskResources(alloc, task, stats, displayStats)
c.Ui.Output("")
c.outputTaskVolumes(alloc, task, verbose)
c.outputTaskStatus(state)
}
}
@ -721,3 +723,79 @@ func (c *AllocStatusCommand) sortedTaskStateIterator(m map[string]*api.TaskState
close(output)
return output
}
func (c *AllocStatusCommand) outputTaskVolumes(alloc *api.Allocation, taskName string, verbose bool) {
var task *api.Task
var tg *api.TaskGroup
FOUND:
for _, tg = range alloc.Job.TaskGroups {
for _, task = range tg.Tasks {
if task.Name == taskName {
break FOUND
}
}
}
if task == nil || tg == nil {
c.Ui.Error(fmt.Sprintf("Could not find task data for %q", taskName))
return
}
if len(task.VolumeMounts) == 0 {
return
}
client, err := c.Meta.Client()
if err != nil {
c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err))
return
}
var hostVolumesOutput []string
var csiVolumesOutput []string
hostVolumesOutput = append(hostVolumesOutput, "ID|Read Only")
if verbose {
csiVolumesOutput = append(csiVolumesOutput,
"ID|Plugin|Provider|Schedulable|Read Only|Mount Options")
} else {
csiVolumesOutput = append(csiVolumesOutput, "ID|Read Only")
}
for _, volMount := range task.VolumeMounts {
volReq := tg.Volumes[*volMount.Volume]
switch volReq.Type {
case structs.VolumeTypeHost:
hostVolumesOutput = append(hostVolumesOutput,
fmt.Sprintf("%s|%v", volReq.Name, *volMount.ReadOnly))
case structs.VolumeTypeCSI:
if verbose {
// there's an extra API call per volume here so we toggle it
// off with the -verbose flag
vol, _, err := client.CSIVolumes().Info(volReq.Name, nil)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error retrieving volume info for %q: %s",
volReq.Name, err))
continue
}
csiVolumesOutput = append(csiVolumesOutput,
fmt.Sprintf("%s|%s|%s|%v|%v|%s",
volReq.Name, vol.PluginID,
"n/a", // TODO(tgross): https://github.com/hashicorp/nomad/issues/7248
vol.Schedulable,
volReq.ReadOnly,
"n/a", // TODO(tgross): https://github.com/hashicorp/nomad/issues/7007
))
} else {
csiVolumesOutput = append(csiVolumesOutput,
fmt.Sprintf("%s|%v", volReq.Name, volReq.ReadOnly))
}
}
}
if len(hostVolumesOutput) > 1 {
c.Ui.Output("Host Volumes:")
c.Ui.Output(formatList(hostVolumesOutput))
c.Ui.Output("") // line padding to next stanza
}
if len(csiVolumesOutput) > 1 {
c.Ui.Output("CSI Volumes:")
c.Ui.Output(formatList(csiVolumesOutput))
c.Ui.Output("") // line padding to next stanza
}
}

View File

@ -2,11 +2,14 @@ package command
import (
"fmt"
"io/ioutil"
"os"
"regexp"
"strings"
"testing"
"time"
"github.com/hashicorp/nomad/command/agent"
"github.com/hashicorp/nomad/helper/uuid"
"github.com/hashicorp/nomad/nomad/mock"
"github.com/hashicorp/nomad/nomad/structs"
@ -315,3 +318,146 @@ func TestAllocStatusCommand_AutocompleteArgs(t *testing.T) {
assert.Equal(1, len(res))
assert.Equal(a.ID, res[0])
}
func TestAllocStatusCommand_HostVolumes(t *testing.T) {
t.Parallel()
// We have to create a tempdir for the host volume even though we're
// not going to use it b/c the server validates the config on startup
tmpDir, err := ioutil.TempDir("", "vol0")
if err != nil {
t.Fatalf("unable to create tempdir for test: %v", err)
}
defer os.RemoveAll(tmpDir)
vol0 := uuid.Generate()
srv, _, url := testServer(t, true, func(c *agent.Config) {
c.Client.HostVolumes = []*structs.ClientHostVolumeConfig{
{
Name: vol0,
Path: tmpDir,
ReadOnly: false,
},
}
})
defer srv.Shutdown()
state := srv.Agent.Server().State()
// Upsert the job and alloc
node := mock.Node()
alloc := mock.Alloc()
alloc.Metrics = &structs.AllocMetric{}
alloc.NodeID = node.ID
job := alloc.Job
job.TaskGroups[0].Volumes = map[string]*structs.VolumeRequest{
vol0: {
Name: vol0,
Type: structs.VolumeTypeHost,
Source: tmpDir,
},
}
job.TaskGroups[0].Tasks[0].VolumeMounts = []*structs.VolumeMount{
{
Volume: vol0,
Destination: "/var/www",
ReadOnly: true,
PropagationMode: "private",
},
}
// fakes the placement enough so that we have something to iterate
// on in 'nomad alloc status'
alloc.TaskStates = map[string]*structs.TaskState{
"web": &structs.TaskState{
Events: []*structs.TaskEvent{
structs.NewTaskEvent("test event").SetMessage("test msg"),
},
},
}
summary := mock.JobSummary(alloc.JobID)
require.NoError(t, state.UpsertJobSummary(1004, summary))
require.NoError(t, state.UpsertAllocs(1005, []*structs.Allocation{alloc}))
ui := new(cli.MockUi)
cmd := &AllocStatusCommand{Meta: Meta{Ui: ui}}
if code := cmd.Run([]string{"-address=" + url, "-verbose", alloc.ID}); code != 0 {
t.Fatalf("expected exit 0, got: %d", code)
}
out := ui.OutputWriter.String()
require.Contains(t, out, "Host Volumes")
require.Contains(t, out, fmt.Sprintf("%s true", vol0))
require.NotContains(t, out, "CSI Volumes")
}
func TestAllocStatusCommand_CSIVolumes(t *testing.T) {
t.Parallel()
srv, _, url := testServer(t, true, nil)
defer srv.Shutdown()
state := srv.Agent.Server().State()
// Upsert the node, plugin, and volume
vol0 := uuid.Generate()
node := mock.Node()
node.CSINodePlugins = map[string]*structs.CSIInfo{
"minnie": {
PluginID: "minnie",
Healthy: true,
NodeInfo: &structs.CSINodeInfo{},
},
}
err := state.UpsertNode(1001, node)
require.NoError(t, err)
vols := []*structs.CSIVolume{{
ID: vol0,
Namespace: "notTheNamespace",
PluginID: "minnie",
AccessMode: structs.CSIVolumeAccessModeMultiNodeSingleWriter,
AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem,
Topologies: []*structs.CSITopology{{
Segments: map[string]string{"foo": "bar"},
}},
}}
err = state.CSIVolumeRegister(1002, vols)
require.NoError(t, err)
// Upsert the job and alloc
alloc := mock.Alloc()
alloc.Metrics = &structs.AllocMetric{}
alloc.NodeID = node.ID
job := alloc.Job
job.TaskGroups[0].Volumes = map[string]*structs.VolumeRequest{
vol0: {
Name: vol0,
Type: structs.VolumeTypeCSI,
Source: "/tmp/vol0",
},
}
job.TaskGroups[0].Tasks[0].VolumeMounts = []*structs.VolumeMount{
{
Volume: vol0,
Destination: "/var/www",
ReadOnly: true,
PropagationMode: "private",
},
}
// if we don't set a task state, there's nothing to iterate on alloc status
alloc.TaskStates = map[string]*structs.TaskState{
"web": &structs.TaskState{
Events: []*structs.TaskEvent{
structs.NewTaskEvent("test event").SetMessage("test msg"),
},
},
}
summary := mock.JobSummary(alloc.JobID)
require.NoError(t, state.UpsertJobSummary(1004, summary))
require.NoError(t, state.UpsertAllocs(1005, []*structs.Allocation{alloc}))
ui := new(cli.MockUi)
cmd := &AllocStatusCommand{Meta: Meta{Ui: ui}}
if code := cmd.Run([]string{"-address=" + url, "-verbose", alloc.ID}); code != 0 {
t.Fatalf("expected exit 0, got: %d", code)
}
out := ui.OutputWriter.String()
require.Contains(t, out, "CSI Volumes")
require.Contains(t, out, fmt.Sprintf("%s minnie", vol0))
require.NotContains(t, out, "Host Volumes")
}