Merge pull request #675 from nautsio/f-cli-short-ids

Shorten CLI identifiers
This commit is contained in:
Alex Dadgar 2016-01-19 15:11:41 -08:00
commit 248b20ae24
33 changed files with 281 additions and 148 deletions

View File

@ -87,7 +87,7 @@ func TestNodes_Info(t *testing.T) {
nodes := c.Nodes()
// Retrieving a non-existent node returns error
_, _, err := nodes.Info("nope", nil)
_, _, err := nodes.Info("12345678-abcd-efab-cdef-123456789abc", nil)
if err == nil || !strings.Contains(err.Error(), "not found") {
t.Fatalf("expected not found error, got: %#v", err)
}
@ -215,7 +215,7 @@ func TestNodes_ForceEvaluate(t *testing.T) {
nodes := c.Nodes()
// Force-eval on a non-existent node fails
_, _, err := nodes.ForceEvaluate("nope", nil)
_, _, err := nodes.ForceEvaluate("12345678-abcd-efab-cdef-123456789abc", nil)
if err == nil || !strings.Contains(err.Error(), "not found") {
t.Fatalf("expected not found error, got: %#v", err)
}

View File

@ -450,6 +450,12 @@ func (c *Client) setupNode() error {
node = &structs.Node{}
c.config.Node = node
}
// Generate an iD for the node
var err error
node.ID, err = c.nodeID()
if err != nil {
return fmt.Errorf("node ID setup failed: %v", err)
}
if node.Attributes == nil {
node.Attributes = make(map[string]string)
}
@ -462,13 +468,6 @@ func (c *Client) setupNode() error {
if node.Resources == nil {
node.Resources = &structs.Resources{}
}
if node.ID == "" {
id, err := c.nodeID()
if err != nil {
return fmt.Errorf("node ID setup failed: %v", err)
}
node.ID = id
}
if node.Datacenter == "" {
node.Datacenter = "dc1"
}

View File

@ -220,7 +220,6 @@ func (a *Agent) setupClient() error {
conf.Node = new(structs.Node)
conf.Node.Datacenter = a.config.Datacenter
conf.Node.Name = a.config.NodeName
conf.Node.ID = a.config.Client.NodeID
conf.Node.Meta = a.config.Client.Meta
conf.Node.NodeClass = a.config.Client.NodeClass

View File

@ -82,7 +82,6 @@ func (c *Command) readConfig() *Config {
// Client-only options
flags.StringVar(&cmdConfig.Client.StateDir, "state-dir", "", "")
flags.StringVar(&cmdConfig.Client.AllocDir, "alloc-dir", "", "")
flags.StringVar(&cmdConfig.Client.NodeID, "node-id", "", "")
flags.StringVar(&cmdConfig.Client.NodeClass, "node-class", "", "")
flags.StringVar(&servers, "servers", "", "")
flags.Var((*sliceflag.StringFlag)(&meta), "meta", "")

View File

@ -135,10 +135,6 @@ type ClientConfig struct {
// Servers is a list of known server addresses. These are as "host:port"
Servers []string `hcl:"servers"`
// NodeID is the unique node identifier to use. A UUID is used
// if not provided, and stored in the data directory
NodeID string `hcl:"node_id"`
// NodeClass is used to group the node by class
NodeClass string `hcl:"node_class"`
@ -492,9 +488,6 @@ func (a *ClientConfig) Merge(b *ClientConfig) *ClientConfig {
if b.AllocDir != "" {
result.AllocDir = b.AllocDir
}
if b.NodeID != "" {
result.NodeID = b.NodeID
}
if b.NodeClass != "" {
result.NodeClass = b.NodeClass
}

View File

@ -35,7 +35,6 @@ func TestConfig_Merge(t *testing.T) {
Enabled: false,
StateDir: "/tmp/state1",
AllocDir: "/tmp/alloc1",
NodeID: "node1",
NodeClass: "class1",
Options: map[string]string{
"foo": "bar",
@ -96,7 +95,6 @@ func TestConfig_Merge(t *testing.T) {
Enabled: true,
StateDir: "/tmp/state2",
AllocDir: "/tmp/alloc2",
NodeID: "node2",
NodeClass: "class2",
Servers: []string{"server2"},
Meta: map[string]string{
@ -413,7 +411,6 @@ func TestConfig_LoadConfigString(t *testing.T) {
StateDir: "/tmp/client-state",
AllocDir: "/tmp/alloc",
Servers: []string{"a.b.c:80", "127.0.0.1:1234"},
NodeID: "xyz123",
NodeClass: "linux-medium-64bit",
Meta: map[string]string{
"foo": "bar",

View File

@ -58,7 +58,14 @@ func TestHTTP_NodesList(t *testing.T) {
func TestHTTP_NodesPrefixList(t *testing.T) {
httpTest(t, nil, func(s *TestServer) {
ids := []string{"aaaaa", "aaaab", "aaabb", "aabbb", "abbbb", "bbbbb"}
ids := []string{
"12345678-abcd-efab-cdef-123456789abc",
"12345678-aaaa-efab-cdef-123456789abc",
"1234aaaa-abcd-efab-cdef-123456789abc",
"1234bbbb-abcd-efab-cdef-123456789abc",
"1234cccc-abcd-efab-cdef-123456789abc",
"1234dddd-abcd-efab-cdef-123456789abc",
}
for i := 0; i < 5; i++ {
// Create the node
node := mock.Node()
@ -74,7 +81,7 @@ func TestHTTP_NodesPrefixList(t *testing.T) {
}
// Make the HTTP request
req, err := http.NewRequest("GET", "/v1/nodes?prefix=aaa", nil)
req, err := http.NewRequest("GET", "/v1/nodes?prefix=12345678", nil)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -99,7 +106,7 @@ func TestHTTP_NodesPrefixList(t *testing.T) {
// Check the nodes
n := obj.([]*structs.NodeListStub)
if len(n) != 3 {
if len(n) != 2 {
t.Fatalf("bad: %#v", n)
}
})

View File

@ -26,10 +26,12 @@ General Options:
` + generalOptionsUsage() + `
Alloc Status Options:
-short
Display short output. Shows only the most recent task event.
-verbose
Show full information.
`
return strings.TrimSpace(helpText)
@ -40,11 +42,12 @@ func (c *AllocStatusCommand) Synopsis() string {
}
func (c *AllocStatusCommand) Run(args []string) int {
var short bool
var short, verbose bool
flags := c.Meta.FlagSet("alloc-status", FlagSetClient)
flags.Usage = func() { c.Ui.Output(c.Help()) }
flags.BoolVar(&short, "short", false, "")
flags.BoolVar(&verbose, "verbose", false, "")
if err := flags.Parse(args); err != nil {
return 1
@ -65,6 +68,12 @@ func (c *AllocStatusCommand) Run(args []string) int {
return 1
}
// Truncate the id unless full length is requested
length := shortId
if verbose {
length = fullId
}
// Query the allocation info
alloc, _, err := client.Allocations().Info(allocID, nil)
if err != nil {
@ -83,12 +92,13 @@ func (c *AllocStatusCommand) Run(args []string) int {
out[0] = "ID|EvalID|JobID|TaskGroup|DesiredStatus|ClientStatus"
for i, alloc := range allocs {
out[i+1] = fmt.Sprintf("%s|%s|%s|%s|%s|%s",
alloc.ID,
alloc.EvalID,
alloc.ID[:length],
alloc.EvalID[:length],
alloc.JobID,
alloc.TaskGroup,
alloc.DesiredStatus,
alloc.ClientStatus)
alloc.ClientStatus,
)
}
c.Ui.Output(fmt.Sprintf("Prefix matched multiple allocations\n\n%s", formatList(out)))
return 0
@ -103,10 +113,10 @@ func (c *AllocStatusCommand) Run(args []string) int {
// Format the allocation data
basic := []string{
fmt.Sprintf("ID|%s", alloc.ID),
fmt.Sprintf("EvalID|%s", alloc.EvalID),
fmt.Sprintf("ID|%s", alloc.ID[:length]),
fmt.Sprintf("EvalID|%s", alloc.EvalID[:length]),
fmt.Sprintf("Name|%s", alloc.Name),
fmt.Sprintf("NodeID|%s", alloc.NodeID),
fmt.Sprintf("NodeID|%s", alloc.NodeID[:length]),
fmt.Sprintf("JobID|%s", alloc.JobID),
fmt.Sprintf("ClientStatus|%s", alloc.ClientStatus),
fmt.Sprintf("NodesEvaluated|%d", alloc.Metrics.NodesEvaluated),
@ -126,7 +136,7 @@ func (c *AllocStatusCommand) Run(args []string) int {
// Format the detailed status
c.Ui.Output("\n==> Status")
dumpAllocStatus(c.Ui, alloc)
dumpAllocStatus(c.Ui, alloc, length)
return 0
}

View File

@ -29,7 +29,13 @@ Usage: nomad eval-monitor [options] <evaluation>
General Options:
` + generalOptionsUsage()
` + generalOptionsUsage() + `
Eval Monitor Options:
-verbose
Show full information.
`
return strings.TrimSpace(helpText)
}
@ -38,12 +44,22 @@ func (c *EvalMonitorCommand) Synopsis() string {
}
func (c *EvalMonitorCommand) Run(args []string) int {
var verbose bool
flags := c.Meta.FlagSet("eval-monitor", FlagSetClient)
flags.Usage = func() { c.Ui.Output(c.Help()) }
flags.BoolVar(&verbose, "verbose", false, "")
if err := flags.Parse(args); err != nil {
return 1
}
// Truncate the id unless full length is requested
length := shortId
if verbose {
length = fullId
}
// Check that we got exactly one eval ID
args = flags.Args()
if len(args) != 1 {
@ -60,6 +76,6 @@ func (c *EvalMonitorCommand) Run(args []string) int {
}
// Start monitoring
mon := newMonitor(c.Ui, client)
mon := newMonitor(c.Ui, client, length)
return mon.monitor(evalID, true)
}

View File

@ -37,7 +37,7 @@ func TestEvalMonitorCommand_Fails(t *testing.T) {
ui.ErrorWriter.Reset()
// Fails on connection failure
if code := cmd.Run([]string{"-address=nope", "nope"}); code != 1 {
if code := cmd.Run([]string{"-address=nope", "12345678-abcd-efab-cdef-123456789abc"}); code != 1 {
t.Fatalf("expected exit code 1, got: %d", code)
}
if out := ui.ErrorWriter.String(); !strings.Contains(out, "Error reading evaluation") {

View File

@ -15,6 +15,10 @@ const (
// Names of environment variables used to supply various
// config options to the Nomad CLI.
EnvNomadAddress = "NOMAD_ADDR"
// Constants for CLI identifier length
shortId = 8
fullId = 36
)
// FlagSetFlags is an enum to define what flags are present in the

View File

@ -61,12 +61,16 @@ type monitor struct {
client *api.Client
state *evalState
// length determines the number of characters for identifiers in the ui.
length int
sync.Mutex
}
// newMonitor returns a new monitor. The returned monitor will
// write output information to the provided ui.
func newMonitor(ui cli.Ui, client *api.Client) *monitor {
// write output information to the provided ui. The length parameter determines
// the number of characters for identifiers in the ui.
func newMonitor(ui cli.Ui, client *api.Client, length int) *monitor {
mon := &monitor{
ui: &cli.PrefixedUi{
InfoPrefix: "==> ",
@ -76,6 +80,7 @@ func newMonitor(ui cli.Ui, client *api.Client) *monitor {
},
client: client,
state: newEvalState(),
length: length,
}
return mon
}
@ -97,7 +102,7 @@ func (m *monitor) update(update *evalState) {
// Check if the evaluation was triggered by a node
if existing.node == "" && update.node != "" {
m.ui.Output(fmt.Sprintf("Evaluation triggered by node %q",
update.node))
update.node[:m.length]))
}
// Check if the evaluation was triggered by a job
@ -123,7 +128,7 @@ func (m *monitor) update(update *evalState) {
// Generate a more descriptive error for why the allocation
// failed and dump it to the screen
if alloc.full != nil {
dumpAllocStatus(m.ui, alloc.full)
dumpAllocStatus(m.ui, alloc.full, m.length)
}
case alloc.index < update.index:
@ -131,13 +136,13 @@ func (m *monitor) update(update *evalState) {
// create index indicates modification
m.ui.Output(fmt.Sprintf(
"Allocation %q modified: node %q, group %q",
alloc.id, alloc.node, alloc.group))
alloc.id[:m.length], alloc.node[:m.length], alloc.group))
case alloc.desired == structs.AllocDesiredStatusRun:
// New allocation with desired status running
m.ui.Output(fmt.Sprintf(
"Allocation %q created: node %q, group %q",
alloc.id, alloc.node, alloc.group))
alloc.id[:m.length], alloc.node[:m.length], alloc.group))
}
} else {
switch {
@ -145,7 +150,7 @@ func (m *monitor) update(update *evalState) {
// Allocation status has changed
m.ui.Output(fmt.Sprintf(
"Allocation %q status changed: %q -> %q (%s)",
alloc.id, existing.client, alloc.client, alloc.clientDesc))
alloc.id[:m.length], existing.client, alloc.client, alloc.clientDesc))
}
}
}
@ -175,10 +180,14 @@ func (m *monitor) monitor(evalID string, allowPrefix bool) int {
// carry that status into the return code.
var schedFailure bool
// The user may have specified a prefix as eval id. We need to lookup the
// full id from the database first. Since we do this in a loop we need a
// variable to keep track if we've already written the header message.
var headerWritten bool
// Add the initial pending state
m.update(newEvalState())
m.ui.Info(fmt.Sprintf("Monitoring evaluation %q", evalID))
for {
// Query the evaluation
eval, _, err := m.client.Evaluations().Info(evalID, nil)
@ -203,7 +212,7 @@ func (m *monitor) monitor(evalID string, allowPrefix bool) int {
out[0] = "ID|Priority|Type|TriggeredBy|Status"
for i, eval := range evals {
out[i+1] = fmt.Sprintf("%s|%d|%s|%s|%s",
eval.ID,
eval.ID[:m.length],
eval.Priority,
eval.Type,
eval.TriggeredBy,
@ -219,6 +228,11 @@ func (m *monitor) monitor(evalID string, allowPrefix bool) int {
}
}
if !headerWritten {
m.ui.Info(fmt.Sprintf("Monitoring evaluation %q", eval.ID[:m.length]))
headerWritten = true
}
// Create the new eval state.
state := newEvalState()
state.status = eval.Status
@ -267,7 +281,7 @@ func (m *monitor) monitor(evalID string, allowPrefix bool) int {
switch eval.Status {
case structs.EvalStatusComplete, structs.EvalStatusFailed:
m.ui.Info(fmt.Sprintf("Evaluation %q finished with status %q",
eval.ID, eval.Status))
eval.ID[:m.length], eval.Status))
default:
// Wait for the next update
time.Sleep(updateWait)
@ -302,10 +316,10 @@ func (m *monitor) monitor(evalID string, allowPrefix bool) int {
// dumpAllocStatus is a helper to generate a more user-friendly error message
// for scheduling failures, displaying a high level status of why the job
// could not be scheduled out.
func dumpAllocStatus(ui cli.Ui, alloc *api.Allocation) {
func dumpAllocStatus(ui cli.Ui, alloc *api.Allocation, length int) {
// Print filter stats
ui.Output(fmt.Sprintf("Allocation %q status %q (%d/%d nodes filtered)",
alloc.ID, alloc.ClientStatus,
alloc.ID[:length], alloc.ClientStatus,
alloc.Metrics.NodesFiltered, alloc.Metrics.NodesEvaluated))
// Print a helpful message if we have an eligibility problem

View File

@ -12,7 +12,7 @@ import (
func TestMonitor_Update_Eval(t *testing.T) {
ui := new(cli.MockUi)
mon := newMonitor(ui, nil)
mon := newMonitor(ui, nil, fullId)
// Evals triggered by jobs log
state := &evalState{
@ -30,12 +30,12 @@ func TestMonitor_Update_Eval(t *testing.T) {
// Evals trigerred by nodes log
state = &evalState{
status: structs.EvalStatusPending,
node: "node1",
node: "12345678-abcd-efab-cdef-123456789abc",
}
mon.update(state)
out = ui.OutputWriter.String()
if !strings.Contains(out, "node1") {
if !strings.Contains(out, "12345678-abcd-efab-cdef-123456789abc") {
t.Fatalf("missing node\n\n%s", out)
}
@ -54,7 +54,7 @@ func TestMonitor_Update_Eval(t *testing.T) {
// Status change sends more logs
state = &evalState{
status: structs.EvalStatusComplete,
node: "node1",
node: "12345678-abcd-efab-cdef-123456789abc",
}
mon.update(state)
out = ui.OutputWriter.String()
@ -65,15 +65,15 @@ func TestMonitor_Update_Eval(t *testing.T) {
func TestMonitor_Update_Allocs(t *testing.T) {
ui := new(cli.MockUi)
mon := newMonitor(ui, nil)
mon := newMonitor(ui, nil, fullId)
// New allocations write new logs
state := &evalState{
allocs: map[string]*allocState{
"alloc1": &allocState{
id: "alloc1",
id: "87654321-abcd-efab-cdef-123456789abc",
group: "group1",
node: "node1",
node: "12345678-abcd-efab-cdef-123456789abc",
desired: structs.AllocDesiredStatusRun,
client: structs.AllocClientStatusPending,
index: 1,
@ -84,13 +84,13 @@ func TestMonitor_Update_Allocs(t *testing.T) {
// Logs were output
out := ui.OutputWriter.String()
if !strings.Contains(out, "alloc1") {
if !strings.Contains(out, "87654321-abcd-efab-cdef-123456789abc") {
t.Fatalf("missing alloc\n\n%s", out)
}
if !strings.Contains(out, "group1") {
t.Fatalf("missing group\n\n%s", out)
}
if !strings.Contains(out, "node1") {
if !strings.Contains(out, "12345678-abcd-efab-cdef-123456789abc") {
t.Fatalf("missing node\n\n%s", out)
}
if !strings.Contains(out, "created") {
@ -109,9 +109,9 @@ func TestMonitor_Update_Allocs(t *testing.T) {
state = &evalState{
allocs: map[string]*allocState{
"alloc1": &allocState{
id: "alloc1",
id: "87654321-abcd-efab-cdef-123456789abc",
group: "group1",
node: "node1",
node: "12345678-abcd-efab-cdef-123456789abc",
desired: structs.AllocDesiredStatusRun,
client: structs.AllocClientStatusRunning,
index: 2,
@ -122,7 +122,7 @@ func TestMonitor_Update_Allocs(t *testing.T) {
// Updates were logged
out = ui.OutputWriter.String()
if !strings.Contains(out, "alloc1") {
if !strings.Contains(out, "87654321-abcd-efab-cdef-123456789abc") {
t.Fatalf("missing alloc\n\n%s", out)
}
if !strings.Contains(out, "pending") {
@ -135,13 +135,13 @@ func TestMonitor_Update_Allocs(t *testing.T) {
func TestMonitor_Update_SchedulingFailure(t *testing.T) {
ui := new(cli.MockUi)
mon := newMonitor(ui, nil)
mon := newMonitor(ui, nil, shortId)
// New allocs with desired status failed warns
state := &evalState{
allocs: map[string]*allocState{
"alloc2": &allocState{
id: "alloc2",
id: "87654321-dcba-efab-cdef-123456789abc",
group: "group2",
desired: structs.AllocDesiredStatusFailed,
desiredDesc: "something failed",
@ -151,7 +151,7 @@ func TestMonitor_Update_SchedulingFailure(t *testing.T) {
// Attach the full failed allocation
full: &api.Allocation{
ID: "alloc2",
ID: "87654321-dcba-efab-cdef-123456789abc",
TaskGroup: "group2",
ClientStatus: structs.AllocClientStatusFailed,
DesiredStatus: structs.AllocDesiredStatusFailed,
@ -197,7 +197,7 @@ func TestMonitor_Update_SchedulingFailure(t *testing.T) {
func TestMonitor_Update_AllocModification(t *testing.T) {
ui := new(cli.MockUi)
mon := newMonitor(ui, nil)
mon := newMonitor(ui, nil, fullId)
// New allocs with a create index lower than the
// eval create index are logged as modifications
@ -205,8 +205,8 @@ func TestMonitor_Update_AllocModification(t *testing.T) {
index: 2,
allocs: map[string]*allocState{
"alloc3": &allocState{
id: "alloc3",
node: "node1",
id: "87654321-abcd-bafe-cdef-123456789abc",
node: "12345678-abcd-efab-cdef-123456789abc",
group: "group2",
index: 1,
},
@ -216,13 +216,13 @@ func TestMonitor_Update_AllocModification(t *testing.T) {
// Modification was logged
out := ui.OutputWriter.String()
if !strings.Contains(out, "alloc3") {
if !strings.Contains(out, "87654321-abcd-bafe-cdef-123456789abc") {
t.Fatalf("missing alloc\n\n%s", out)
}
if !strings.Contains(out, "group2") {
t.Fatalf("missing group\n\n%s", out)
}
if !strings.Contains(out, "node1") {
if !strings.Contains(out, "12345678-abcd-efab-cdef-123456789abc") {
t.Fatalf("missing node\n\n%s", out)
}
if !strings.Contains(out, "modified") {
@ -236,7 +236,7 @@ func TestMonitor_Monitor(t *testing.T) {
// Create the monitor
ui := new(cli.MockUi)
mon := newMonitor(ui, client)
mon := newMonitor(ui, client, fullId)
// Submit a job - this creates a new evaluation we can monitor
job := testJob("job1")
@ -282,7 +282,7 @@ func TestMonitor_MonitorWithPrefix(t *testing.T) {
// Create the monitor
ui := new(cli.MockUi)
mon := newMonitor(ui, client)
mon := newMonitor(ui, client, shortId)
// Submit a job - this creates a new evaluation we can monitor
job := testJob("job1")
@ -296,7 +296,7 @@ func TestMonitor_MonitorWithPrefix(t *testing.T) {
doneCh := make(chan struct{})
go func() {
defer close(doneCh)
code = mon.monitor(evalID[:4], true)
code = mon.monitor(evalID[:8], true)
}()
// Wait for completion
@ -314,9 +314,12 @@ func TestMonitor_MonitorWithPrefix(t *testing.T) {
// Check the output
out := ui.OutputWriter.String()
if !strings.Contains(out, evalID) {
if !strings.Contains(out, evalID[:8]) {
t.Fatalf("missing eval\n\n%s", out)
}
if strings.Contains(out, evalID) {
t.Fatalf("expected truncated eval id, got: %s", out)
}
if !strings.Contains(out, "finished with status") {
t.Fatalf("missing final status\n\n%s", out)
}
@ -327,7 +330,7 @@ func TestMonitor_DumpAllocStatus(t *testing.T) {
// Create an allocation and dump its status to the UI
alloc := &api.Allocation{
ID: "alloc1",
ID: "87654321-abcd-efab-cdef-123456789abc",
TaskGroup: "group1",
ClientStatus: structs.AllocClientStatusRunning,
Metrics: &api.AllocationMetric{
@ -345,11 +348,11 @@ func TestMonitor_DumpAllocStatus(t *testing.T) {
},
},
}
dumpAllocStatus(ui, alloc)
dumpAllocStatus(ui, alloc, fullId)
// Check the output
out := ui.OutputWriter.String()
if !strings.Contains(out, "alloc1") {
if !strings.Contains(out, "87654321-abcd-efab-cdef-123456789abc") {
t.Fatalf("missing alloc\n\n%s", out)
}
if !strings.Contains(out, structs.AllocClientStatusRunning) {
@ -375,11 +378,17 @@ func TestMonitor_DumpAllocStatus(t *testing.T) {
// Dumping alloc status with no eligible nodes adds a warning
alloc.Metrics.NodesEvaluated = 0
dumpAllocStatus(ui, alloc)
dumpAllocStatus(ui, alloc, shortId)
// Check the output
out = ui.OutputWriter.String()
if !strings.Contains(out, "No nodes were eligible") {
t.Fatalf("missing eligibility warning\n\n%s", out)
}
if strings.Contains(out, "87654321-abcd-efab-cdef-123456789abc") {
t.Fatalf("expected truncated id, got %s", out)
}
if !strings.Contains(out, "87654321") {
t.Fatalf("expected alloc id, got %s", out)
}
}

View File

@ -28,7 +28,7 @@ func TestNodeDrainCommand_Fails(t *testing.T) {
ui.ErrorWriter.Reset()
// Fails on connection failure
if code := cmd.Run([]string{"-address=nope", "-enable", "nope"}); code != 1 {
if code := cmd.Run([]string{"-address=nope", "-enable", "12345678-abcd-efab-cdef-123456789abc"}); code != 1 {
t.Fatalf("expected exit code 1, got: %d", code)
}
if out := ui.ErrorWriter.String(); !strings.Contains(out, "Error toggling") {
@ -37,7 +37,7 @@ func TestNodeDrainCommand_Fails(t *testing.T) {
ui.ErrorWriter.Reset()
// Fails on non-existent node
if code := cmd.Run([]string{"-address=" + url, "-enable", "nope"}); code != 1 {
if code := cmd.Run([]string{"-address=" + url, "-enable", "12345678-abcd-efab-cdef-123456789abc"}); code != 1 {
t.Fatalf("expected exit 1, got: %d", code)
}
if out := ui.ErrorWriter.String(); !strings.Contains(out, "No node(s) with prefix or id") {
@ -46,7 +46,7 @@ func TestNodeDrainCommand_Fails(t *testing.T) {
ui.ErrorWriter.Reset()
// Fails if both enable and disable specified
if code := cmd.Run([]string{"-enable", "-disable", "nope"}); code != 1 {
if code := cmd.Run([]string{"-enable", "-disable", "12345678-abcd-efab-cdef-123456789abc"}); code != 1 {
t.Fatalf("expected exit 1, got: %d", code)
}
if out := ui.ErrorWriter.String(); !strings.Contains(out, cmd.Help()) {
@ -55,7 +55,7 @@ func TestNodeDrainCommand_Fails(t *testing.T) {
ui.ErrorWriter.Reset()
// Fails if neither enable or disable specified
if code := cmd.Run([]string{"nope"}); code != 1 {
if code := cmd.Run([]string{"12345678-abcd-efab-cdef-123456789abc"}); code != 1 {
t.Fatalf("expected exit 1, got: %d", code)
}
if out := ui.ErrorWriter.String(); !strings.Contains(out, cmd.Help()) {

View File

@ -31,6 +31,9 @@ Node Status Options:
-short
Display short output. Used only when a single node is being
queried, and drops verbose output about node allocations.
-verbose
Display full information.
`
return strings.TrimSpace(helpText)
}
@ -40,11 +43,12 @@ func (c *NodeStatusCommand) Synopsis() string {
}
func (c *NodeStatusCommand) Run(args []string) int {
var short bool
var short, verbose bool
flags := c.Meta.FlagSet("node-status", FlagSetClient)
flags.Usage = func() { c.Ui.Output(c.Help()) }
flags.BoolVar(&short, "short", false, "")
flags.BoolVar(&verbose, "verbose", false, "")
if err := flags.Parse(args); err != nil {
return 1
@ -57,6 +61,12 @@ func (c *NodeStatusCommand) Run(args []string) int {
return 1
}
// Truncate the id unless full length is requested
length := shortId
if verbose {
length = fullId
}
// Get the HTTP client
client, err := c.Meta.Client()
if err != nil {
@ -83,7 +93,7 @@ func (c *NodeStatusCommand) Run(args []string) int {
out[0] = "ID|DC|Name|Class|Drain|Status"
for i, node := range nodes {
out[i+1] = fmt.Sprintf("%s|%s|%s|%s|%v|%s",
node.ID,
node.ID[:length],
node.Datacenter,
node.Name,
node.NodeClass,
@ -118,7 +128,7 @@ func (c *NodeStatusCommand) Run(args []string) int {
out[0] = "ID|DC|Name|Class|Drain|Status"
for i, node := range nodes {
out[i+1] = fmt.Sprintf("%s|%s|%s|%s|%v|%s",
node.ID,
node.ID[:length],
node.Datacenter,
node.Name,
node.NodeClass,
@ -153,7 +163,7 @@ func (c *NodeStatusCommand) Run(args []string) int {
// Format the output
basic := []string{
fmt.Sprintf("ID|%s", node.ID),
fmt.Sprintf("ID|%s", node.ID[:length]),
fmt.Sprintf("Name|%s", node.Name),
fmt.Sprintf("Class|%s", node.NodeClass),
fmt.Sprintf("Datacenter|%s", node.Datacenter),
@ -176,8 +186,8 @@ func (c *NodeStatusCommand) Run(args []string) int {
allocs[0] = "ID|EvalID|JobID|TaskGroup|DesiredStatus|ClientStatus"
for i, alloc := range nodeAllocs {
allocs[i+1] = fmt.Sprintf("%s|%s|%s|%s|%s|%s",
alloc.ID,
alloc.EvalID,
alloc.ID[:length],
alloc.EvalID[:length],
alloc.JobID,
alloc.TaskGroup,
alloc.DesiredStatus,

View File

@ -86,7 +86,24 @@ func TestNodeStatusCommand_Run(t *testing.T) {
if !strings.Contains(out, "Allocations") {
t.Fatalf("expected allocations, got: %s", out)
}
if strings.Contains(out, nodeID) {
t.Fatalf("expected truncated node id, got: %s", out)
}
if !strings.Contains(out, nodeID[:8]) {
t.Fatalf("expected node id %q, got: %s", nodeID[:8], out)
}
ui.OutputWriter.Reset()
// Request full id output
if code := cmd.Run([]string{"-address=" + url, "-verbose", nodeID[:4]}); code != 0 {
t.Fatalf("expected exit 0, got: %d", code)
}
out = ui.OutputWriter.String()
if !strings.Contains(out, nodeID) {
t.Fatalf("expected full node id %q, got: %s", nodeID, out)
}
ui.OutputWriter.Reset()
}
func TestNodeStatusCommand_Fails(t *testing.T) {
@ -115,7 +132,7 @@ func TestNodeStatusCommand_Fails(t *testing.T) {
ui.ErrorWriter.Reset()
// Fails on non-existent node
if code := cmd.Run([]string{"-address=" + url, "nope"}); code != 1 {
if code := cmd.Run([]string{"-address=" + url, "12345678-abcd-efab-cdef-123456789abc"}); code != 1 {
t.Fatalf("expected exit 1, got: %d", code)
}
if out := ui.ErrorWriter.String(); !strings.Contains(out, "No node(s) with prefix") {

View File

@ -47,6 +47,9 @@ Run Options:
submission, the evaluation ID will be printed to the screen.
You can use this ID to start a monitor using the eval-monitor
command later if needed.
-verbose
Display full information.
`
return strings.TrimSpace(helpText)
}
@ -56,16 +59,23 @@ func (c *RunCommand) Synopsis() string {
}
func (c *RunCommand) Run(args []string) int {
var detach bool
var detach, verbose bool
flags := c.Meta.FlagSet("run", FlagSetClient)
flags.Usage = func() { c.Ui.Output(c.Help()) }
flags.BoolVar(&detach, "detach", false, "")
flags.BoolVar(&verbose, "verbose", false, "")
if err := flags.Parse(args); err != nil {
return 1
}
// Truncate the id unless full length is requested
length := shortId
if verbose {
length = fullId
}
// Check that we got exactly one node
args = flags.Args()
if len(args) != 1 {
@ -127,7 +137,7 @@ func (c *RunCommand) Run(args []string) int {
}
// Detach was not specified, so start monitoring
mon := newMonitor(c.Ui, client)
mon := newMonitor(c.Ui, client, length)
return mon.monitor(evalID, false)
}

View File

@ -13,6 +13,7 @@ import (
type StatusCommand struct {
Meta
length int
}
func (c *StatusCommand) Help() string {
@ -32,6 +33,9 @@ Status Options:
Display short output. Used only when a single job is being
queried, and drops verbose information about allocations
and evaluations.
-verbose
Display full information.
`
return strings.TrimSpace(helpText)
}
@ -41,11 +45,12 @@ func (c *StatusCommand) Synopsis() string {
}
func (c *StatusCommand) Run(args []string) int {
var short bool
var short, verbose bool
flags := c.Meta.FlagSet("status", FlagSetClient)
flags.Usage = func() { c.Ui.Output(c.Help()) }
flags.BoolVar(&short, "short", false, "")
flags.BoolVar(&verbose, "verbose", false, "")
if err := flags.Parse(args); err != nil {
return 1
@ -58,6 +63,12 @@ func (c *StatusCommand) Run(args []string) int {
return 1
}
// Truncate the id unless full length is requested
c.length = shortId
if verbose {
c.length = fullId
}
// Get the HTTP client
client, err := c.Meta.Client()
if err != nil {
@ -230,7 +241,7 @@ func (c *StatusCommand) outputJobInfo(client *api.Client, job *api.Job) error {
evals[0] = "ID|Priority|TriggeredBy|Status"
for i, eval := range jobEvals {
evals[i+1] = fmt.Sprintf("%s|%d|%s|%s",
eval.ID,
eval.ID[:c.length],
eval.Priority,
eval.TriggeredBy,
eval.Status)
@ -241,9 +252,9 @@ func (c *StatusCommand) outputJobInfo(client *api.Client, job *api.Job) error {
allocs[0] = "ID|EvalID|NodeID|TaskGroup|Desired|Status"
for i, alloc := range jobAllocs {
allocs[i+1] = fmt.Sprintf("%s|%s|%s|%s|%s|%s",
alloc.ID,
alloc.EvalID,
alloc.NodeID,
alloc.ID[:c.length],
alloc.EvalID[:c.length],
alloc.NodeID[:c.length],
alloc.TaskGroup,
alloc.DesiredStatus,
alloc.ClientStatus)

View File

@ -33,7 +33,8 @@ func TestStatusCommand_Run(t *testing.T) {
// Register two jobs
job1 := testJob("job1_sfx")
if _, _, err := client.Jobs().Register(job1, nil); err != nil {
evalId, _, err := client.Jobs().Register(job1, nil)
if err != nil {
t.Fatalf("err: %s", err)
}
job2 := testJob("job2_sfx")
@ -101,6 +102,19 @@ func TestStatusCommand_Run(t *testing.T) {
if strings.Contains(out, "Allocations") {
t.Fatalf("should not dump allocations")
}
if strings.Contains(out, evalId) {
t.Fatalf("should not contain full identifiers, got %s", out)
}
ui.OutputWriter.Reset()
// Request full identifiers
if code := cmd.Run([]string{"-address=" + url, "-verbose", "job1"}); code != 0 {
t.Fatalf("expected exit 0, got: %d", code)
}
out = ui.OutputWriter.String()
if !strings.Contains(out, evalId) {
t.Fatalf("should contain full identifiers, got %s", out)
}
}
func TestStatusCommand_Fails(t *testing.T) {

View File

@ -30,6 +30,9 @@ Stop Options:
deregister command is submitted, a new evaluation ID is printed
to the screen, which can be used to call up a monitor later if
needed using the eval-monitor command.
-verbose
Display full information.
`
return strings.TrimSpace(helpText)
}
@ -39,16 +42,23 @@ func (c *StopCommand) Synopsis() string {
}
func (c *StopCommand) Run(args []string) int {
var detach bool
var detach, verbose bool
flags := c.Meta.FlagSet("stop", FlagSetClient)
flags.Usage = func() { c.Ui.Output(c.Help()) }
flags.BoolVar(&detach, "detach", false, "")
flags.BoolVar(&verbose, "verbose", false, "")
if err := flags.Parse(args); err != nil {
return 1
}
// Truncate the id unless full length is requested
length := shortId
if verbose {
length = fullId
}
// Check that we got exactly one job
args = flags.Args()
if len(args) != 1 {
@ -115,6 +125,6 @@ func (c *StopCommand) Run(args []string) int {
}
// Start monitoring the stop eval
mon := newMonitor(c.Ui, client)
mon := newMonitor(c.Ui, client, length)
return mon.monitor(evalID, false)
}

View File

@ -217,7 +217,7 @@ func Alloc() *structs.Allocation {
alloc := &structs.Allocation{
ID: structs.GenerateUUID(),
EvalID: structs.GenerateUUID(),
NodeID: "foo",
NodeID: "12345678-abcd-efab-cdef-123456789abc",
TaskGroup: "web",
Resources: &structs.Resources{
CPU: 500,

View File

@ -359,7 +359,7 @@ func TestClientEndpoint_GetNode(t *testing.T) {
}
// Lookup non-existing node
get.NodeID = "foobarbaz"
get.NodeID = "12345678-abcd-efab-cdef-123456789abc"
if err := msgpackrpc.CallWithCodec(codec, "Node.GetNode", get, &resp2); err != nil {
t.Fatalf("err: %v", err)
}

View File

@ -322,7 +322,7 @@ func TestPlanApply_EvalNodePlan_NodeNotExist(t *testing.T) {
state := testStateStore(t)
snap, _ := state.Snapshot()
nodeID := "foo"
nodeID := "12345678-abcd-efab-cdef-123456789abc"
alloc := mock.Alloc()
plan := &structs.Plan{
NodeAllocation: map[string][]*structs.Allocation{

View File

@ -66,9 +66,8 @@ func nodeTableSchema() *memdb.TableSchema {
Name: "id",
AllowMissing: false,
Unique: true,
Indexer: &memdb.StringFieldIndex{
Field: "ID",
Lowercase: true,
Indexer: &memdb.UUIDFieldIndex{
Field: "ID",
},
},
},

View File

@ -277,7 +277,7 @@ func TestStateStore_NodesByIDPrefix(t *testing.T) {
t.Fatalf("err: %v", err)
}
iter, err = state.NodesByIDPrefix("111")
iter, err = state.NodesByIDPrefix("1111")
if err != nil {
t.Fatalf("err: %v", err)
}

View File

@ -464,7 +464,7 @@ func TestServiceSched_JobDeregister(t *testing.T) {
plan := h.Plans[0]
// Ensure the plan evicted all nodes
if len(plan.NodeUpdate["foo"]) != len(allocs) {
if len(plan.NodeUpdate["12345678-abcd-efab-cdef-123456789abc"]) != len(allocs) {
t.Fatalf("bad: %#v", plan)
}

View File

@ -275,7 +275,7 @@ func TestTaintedNodes(t *testing.T) {
&structs.Allocation{NodeID: node2.ID},
&structs.Allocation{NodeID: node3.ID},
&structs.Allocation{NodeID: node4.ID},
&structs.Allocation{NodeID: "blah"},
&structs.Allocation{NodeID: "12345678-abcd-efab-cdef-123456789abc"},
}
tainted, err := taintedNodes(state, allocs)
if err != nil {
@ -288,7 +288,7 @@ func TestTaintedNodes(t *testing.T) {
if tainted[node1.ID] || tainted[node2.ID] {
t.Fatalf("Bad: %v", tainted)
}
if !tainted[node3.ID] || !tainted[node4.ID] || !tainted["blah"] {
if !tainted[node3.ID] || !tainted[node4.ID] || !tainted["12345678-abcd-efab-cdef-123456789abc"] {
t.Fatalf("Bad: %v", tainted)
}
}

View File

@ -26,21 +26,22 @@ allocations and information will be displayed.
## General Options
<%= general_options_usage %>
#
## Status Options
* `-short`: Display short output. Shows only the most recent task event.
* `-verbose`: Show full information.
## Examples
Short status of an alloc:
```
$ nomad alloc-status --short a7365fe4-8b9f-4284-612d-a101fb41e773
ID = a7365fe4-8b9f-4284-612d-a101fb41e773
EvalID = 44c2d9ed-6377-ca3d-14a8-b2e6327230ce
$ nomad alloc-status --short a7365fe4
ID = a7365fe4
EvalID = 44c2d9ed
Name = example.cache[0]
NodeID = e55859b1-4330-f00b-da49-8a292432ead3
NodeID = e55859b1
JobID = example
ClientStatus = running
NodesEvaluated = 1
@ -57,18 +58,18 @@ redis running Started 02:29:40 11/17/15
web running Started 02:30:41 11/17/15
==> Status
Allocation "a7365fe4-8b9f-4284-612d-a101fb41e773" status "running" (0/1 nodes filtered)
* Score "e55859b1-4330-f00b-da49-8a292432ead3.binpack" = 10.334026
Allocation "a7365fe4" status "running" (0/1 nodes filtered)
* Score "e55859b1.binpack" = 10.334026
```
Full status of an alloc, which shows one of the tasks dying and then being restarted:
```
$ nomad alloc-status a7365fe4-8b9f-4284-612d-a101fb41e773
ID = a7365fe4-8b9f-4284-612d-a101fb41e773
EvalID = 44c2d9ed-6377-ca3d-14a8-b2e6327230ce
$ nomad alloc-status a7365fe4
ID = a7365fe4
EvalID = 44c2d9ed
Name = example.cache[0]
NodeID = e55859b1-4330-f00b-da49-8a292432ead3
NodeID = e55859b1
JobID = example
ClientStatus = running
NodesEvaluated = 1
@ -90,7 +91,7 @@ Time Type Description
02:29:40 11/17/15 Started <none>
==> Status
Allocation "a7365fe4-8b9f-4284-612d-a101fb41e773" status "running" (0/1 nodes filtered)
* Score "e55859b1-4330-f00b-da49-8a292432ead3.binpack" = 10.334026
Allocation "a7365fe4" status "running" (0/1 nodes filtered)
* Score "e55859b1.binpack" = 10.334026
```

View File

@ -39,14 +39,18 @@ indicated by exit code 1.
<%= general_options_usage %>
## Status Options
* `-verbose`: Show full information.
## Examples
Monitor an existing evaluation
```
$ nomad eval-monitor 8262bc83-3be0-2894-237c-c06ab5e14785
==> Monitoring evaluation "8262bc83-3be0-2894-237c-c06ab5e14785"
Allocation "bd6bd0de-1c97-1e6c-ab8b-106618a0393c" created: node "6f299da5-8e4e-0e48-93f4-f544f4b948a8", group "group1"
$ nomad eval-monitor 8262bc83
==> Monitoring evaluation "8262bc83"
Allocation "bd6bd0de" created: node "6f299da5", group "group1"
Evaluation status changed: "pending" -> "complete"
==> Evaluation "8262bc83-3be0-2894-237c-c06ab5e14785" finished with status "complete"
==> Evaluation "8262bc83" finished with status "complete"
```

View File

@ -34,6 +34,7 @@ Otherwise, a list of matching nodes and information will be displayed.
* `-short`: Display short output. Used only when querying a single node. Drops
verbose information about node allocations.
* `-verbose`: Show full information.
## Examples
@ -41,16 +42,16 @@ List view:
```
$ nomad node-status
ID DC Name Drain Status
a72dfba2-c01f-49de-5ac6-e3391de2c50c dc1 node1 false ready
1f3f03ea-a420-b64b-c73b-51290ed7f481 dc1 node2 false ready
ID DC Name Drain Status
a72dfba2 dc1 node1 false ready
1f3f03ea dc1 node2 false ready
```
Single-node view in short mode:
```
$ nomad node-status -short 1f3f03ea-a420-b64b-c73b-51290ed7f481
ID = 1f3f03ea-a420-b64b-c73b-51290ed7f481
$ nomad node-status -short 1f3f03ea
ID = 1f3f03ea
Name = node2
Class =
Datacenter = dc1
@ -61,8 +62,8 @@ Status = ready
Full output for a single node:
```
$ nomad node-status 1f3f03ea-a420-b64b-c73b-51290ed7f481
ID = 1f3f03ea-a420-b64b-c73b-51290ed7f481
$ nomad node-status 1f3f03ea
ID = 1f3f03ea
Name = node2
Class =
Datacenter = dc1
@ -70,6 +71,6 @@ Drain = false
Status = ready
### Allocations
ID EvalID JobID TaskGroup DesiredStatus ClientStatus
678c51dc-6c55-0ac8-d92d-675a1e8ea6b0 193229c4-aa02-bbe6-f996-fd7d6974a309 job8 grp8 failed failed
ID EvalID JobID TaskGroup DesiredStatus ClientStatus
678c51dc 193229c4 job8 grp8 failed failed
```

View File

@ -42,24 +42,28 @@ client connection issues or internal errors, are indicated by exit code 1.
will be output, which can be used to call the monitor later using the
[eval-monitor](/docs/commands/eval-monitor.html) command.
## Status Options
* `-verbose`: Show full information.
## Examples
Schedule the job contained in the file `job1.nomad`, monitoring placement:
```
$ nomad run job1.nomad
==> Monitoring evaluation "52dee78a-a1f0-95c9-81a5-95b4cbe7f6f8"
Allocation "5e0b39f0-5e69-6fec-d17b-4b8b7d922cc8" created: node "3e84d3d2-8500-42a3-9d1b-7ad88913352b", group "group1"
Allocation "5e0b39f0-5e69-6fec-d17b-4b8b7d922cc8" status changed: "pending" -> "running"
==> Monitoring evaluation "52dee78a"
Allocation "5e0b39f0" created: node "3e84d3d2", group "group1"
Allocation "5e0b39f0" status changed: "pending" -> "running"
Evaluation status changed: "pending" -> "complete"
==> Evaluation "52dee78a-a1f0-95c9-81a5-95b4cbe7f6f8" finished with status "complete"
==> Evaluation "52dee78a" finished with status "complete"
```
Schedule the job contained in `job1.nomad` and return immediately:
```
$ nomad run -detach job1.nomad
4947e728-fb4e-90c6-895a-42479940e0bc
4947e728
```
Schedule a job which cannot get placement. This results in a scheduling failure
@ -67,10 +71,10 @@ and the specifics of the placement are printed:
```
$ nomad run failing.nomad
==> Monitoring evaluation "0d7447d9-43fd-4994-6812-500c93c08fce"
==> Monitoring evaluation "0d7447d9"
Scheduling error for group "group1" (failed to find a node for placement)
Allocation "a739288e-547a-cd26-1355-e92ca10f165c" status "failed" (1/1 nodes filtered)
Allocation "a739288e" status "failed" (1/1 nodes filtered)
* Constraint "$attr.kernel.name = linux" filtered 1 nodes
Evaluation status changed: "pending" -> "complete"
==> Evaluation "0d7447d9-43fd-4994-6812-500c93c08fce" finished with status "complete"
==> Evaluation "0d7447d9" finished with status "complete"
```

View File

@ -32,6 +32,7 @@ the most useful status fields for each.
* `-short`: Display short output. Used only when a single node is being queried.
Drops verbose node allocation data from the output.
* `-verbose`: Show full information.
## Examples
@ -70,10 +71,10 @@ Datacenters = dc1,dc2,dc3
Status = pending
### Evaluations
ID Priority Type TriggeredBy NodeID Status
193229c4-aa02-bbe6-f996-fd7d6974a309 3 service job-register node2 complete
ID Priority Type TriggeredBy NodeID Status
193229c4 3 service job-register node2 complete
### Allocations
ID EvalID NodeID TaskGroup DesiredStatus ClientStatus
678c51dc-6c55-0ac8-d92d-675a1e8ea6b0 193229c4-aa02-bbe6-f996-fd7d6974a309 node2 grp8 failed failed
ID EvalID NodeID TaskGroup DesiredStatus ClientStatus
678c51dc 193229c4 node2 grp8 failed failed
```

View File

@ -37,20 +37,24 @@ reached a terminal state. It is safe to exit the monitor early using ctrl+c.
will be output, which can be used to call the monitor later using the
[eval-monitor](/docs/commands/eval-monitor.html) command.
## Status Options
* `-verbose`: Show full information.
## Examples
Stop the job with ID "job1":
```
$ nomad stop job1
==> Monitoring evaluation "43bfe672-e9a2-1cb8-457a-4c2dca5ce1c7"
==> Monitoring evaluation "43bfe672"
Evaluation status changed: "pending" -> "complete"
==> Evaluation "43bfe672-e9a2-1cb8-457a-4c2dca5ce1c7" finished with status "complete"
==> Evaluation "43bfe672" finished with status "complete"
```
Stop the job with ID "job1" and return immediately:
```
$ nomad stop -detach job1
507d26cb-1d67-672c-da9d-f6053837bb70
507d26cb
```