Add NodeName to the alloc/job status outputs.
Currently when operators need to log onto a machine where an alloc is running they will need to perform both an alloc/job status call and then a call to discover the node name from the node list. This updates both the job status and alloc status output to include the node name within the information to make operator use easier. Closes #2359 Cloess #1180
This commit is contained in:
parent
e52b6be10f
commit
9470507cf4
|
@ -86,6 +86,7 @@ type Allocation struct {
|
|||
EvalID string
|
||||
Name string
|
||||
NodeID string
|
||||
NodeName string
|
||||
JobID string
|
||||
Job *Job
|
||||
TaskGroup string
|
||||
|
@ -149,6 +150,7 @@ type AllocationListStub struct {
|
|||
Name string
|
||||
Namespace string
|
||||
NodeID string
|
||||
NodeName string
|
||||
JobID string
|
||||
JobType string
|
||||
JobVersion uint64
|
||||
|
|
|
@ -233,6 +233,7 @@ func formatAllocBasicInfo(alloc *api.Allocation, client *api.Client, uuidLength
|
|||
fmt.Sprintf("Eval ID|%s", limit(alloc.EvalID, uuidLength)),
|
||||
fmt.Sprintf("Name|%s", alloc.Name),
|
||||
fmt.Sprintf("Node ID|%s", limit(alloc.NodeID, uuidLength)),
|
||||
fmt.Sprintf("Node Name|%s", alloc.NodeName),
|
||||
fmt.Sprintf("Job ID|%s", alloc.JobID),
|
||||
fmt.Sprintf("Job Version|%d", getVersion(alloc.Job)),
|
||||
fmt.Sprintf("Client Status|%s", alloc.ClientStatus),
|
||||
|
|
|
@ -413,12 +413,13 @@ func formatAllocListStubs(stubs []*api.AllocationListStub, verbose bool, uuidLen
|
|||
|
||||
allocs := make([]string, len(stubs)+1)
|
||||
if verbose {
|
||||
allocs[0] = "ID|Eval ID|Node ID|Task Group|Version|Desired|Status|Created|Modified"
|
||||
allocs[0] = "ID|Eval ID|Node ID|Node Name|Task Group|Version|Desired|Status|Created|Modified"
|
||||
for i, alloc := range stubs {
|
||||
allocs[i+1] = fmt.Sprintf("%s|%s|%s|%s|%d|%s|%s|%s|%s",
|
||||
allocs[i+1] = fmt.Sprintf("%s|%s|%s|%s|%s|%d|%s|%s|%s|%s",
|
||||
limit(alloc.ID, uuidLength),
|
||||
limit(alloc.EvalID, uuidLength),
|
||||
limit(alloc.NodeID, uuidLength),
|
||||
alloc.NodeName,
|
||||
alloc.TaskGroup,
|
||||
alloc.JobVersion,
|
||||
alloc.DesiredStatus,
|
||||
|
@ -427,14 +428,15 @@ func formatAllocListStubs(stubs []*api.AllocationListStub, verbose bool, uuidLen
|
|||
formatUnixNanoTime(alloc.ModifyTime))
|
||||
}
|
||||
} else {
|
||||
allocs[0] = "ID|Node ID|Task Group|Version|Desired|Status|Created|Modified"
|
||||
allocs[0] = "ID|Node ID|Node Name|Task Group|Version|Desired|Status|Created|Modified"
|
||||
for i, alloc := range stubs {
|
||||
now := time.Now()
|
||||
createTimePretty := prettyTimeDiff(time.Unix(0, alloc.CreateTime), now)
|
||||
modTimePretty := prettyTimeDiff(time.Unix(0, alloc.ModifyTime), now)
|
||||
allocs[i+1] = fmt.Sprintf("%s|%s|%s|%d|%s|%s|%s|%s",
|
||||
allocs[i+1] = fmt.Sprintf("%s|%s|%s|%s|%d|%s|%s|%s|%s",
|
||||
limit(alloc.ID, uuidLength),
|
||||
limit(alloc.NodeID, uuidLength),
|
||||
alloc.NodeName,
|
||||
alloc.TaskGroup,
|
||||
alloc.JobVersion,
|
||||
alloc.DesiredStatus,
|
||||
|
|
|
@ -7154,6 +7154,9 @@ type Allocation struct {
|
|||
// NodeID is the node this is being placed on
|
||||
NodeID string
|
||||
|
||||
// NodeName is the name of the node this is being placed on.
|
||||
NodeName string
|
||||
|
||||
// Job is the parent job of the task group being allocated.
|
||||
// This is copied at allocation time to avoid issues if the job
|
||||
// definition is updated.
|
||||
|
@ -7615,6 +7618,7 @@ func (a *Allocation) Stub() *AllocListStub {
|
|||
Name: a.Name,
|
||||
Namespace: a.Namespace,
|
||||
NodeID: a.NodeID,
|
||||
NodeName: a.NodeName,
|
||||
JobID: a.JobID,
|
||||
JobType: a.Job.Type,
|
||||
JobVersion: a.Job.Version,
|
||||
|
@ -7642,6 +7646,7 @@ type AllocListStub struct {
|
|||
Name string
|
||||
Namespace string
|
||||
NodeID string
|
||||
NodeName string
|
||||
JobID string
|
||||
JobType string
|
||||
JobVersion uint64
|
||||
|
|
|
@ -495,12 +495,12 @@ func (s *GenericScheduler) computePlacements(destructive, place []placementResul
|
|||
TaskGroup: tg.Name,
|
||||
Metrics: s.ctx.Metrics(),
|
||||
NodeID: option.Node.ID,
|
||||
NodeName: option.Node.Name,
|
||||
DeploymentID: deploymentID,
|
||||
TaskResources: resources.OldTaskResources(),
|
||||
AllocatedResources: resources,
|
||||
DesiredStatus: structs.AllocDesiredStatusRun,
|
||||
ClientStatus: structs.AllocClientStatusPending,
|
||||
|
||||
SharedResources: &structs.Resources{
|
||||
DiskMB: tg.EphemeralDisk.SizeMB,
|
||||
},
|
||||
|
|
|
@ -331,11 +331,11 @@ func (s *SystemScheduler) computePlacements(place []allocTuple) error {
|
|||
TaskGroup: missing.TaskGroup.Name,
|
||||
Metrics: s.ctx.Metrics(),
|
||||
NodeID: option.Node.ID,
|
||||
NodeName: option.Node.Name,
|
||||
TaskResources: resources.OldTaskResources(),
|
||||
AllocatedResources: resources,
|
||||
DesiredStatus: structs.AllocDesiredStatusRun,
|
||||
ClientStatus: structs.AllocClientStatusPending,
|
||||
|
||||
SharedResources: &structs.Resources{
|
||||
DiskMB: missing.TaskGroup.EphemeralDisk.SizeMB,
|
||||
},
|
||||
|
|
Loading…
Reference in New Issue