open-nomad/command/monitor.go

397 lines
12 KiB
Go
Raw Normal View History

package command
import (
"fmt"
"sort"
"strings"
"sync"
"time"
"github.com/hashicorp/nomad/api"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/mitchellh/cli"
)
const (
2015-09-18 17:03:23 +00:00
// updateWait is the amount of time to wait between status
// updates. Because the monitor is poll-based, we use this
// delay to avoid overwhelming the API server.
updateWait = time.Second
)
2015-09-18 17:03:23 +00:00
// evalState is used to store the current "state of the world"
// in the context of monitoring an evaluation.
type evalState struct {
status string
desc string
node string
deployment string
job string
allocs map[string]*allocState
wait time.Duration
index uint64
2015-09-18 17:03:23 +00:00
}
// newEvalState creates and initializes a new monitorState
func newEvalState() *evalState {
return &evalState{
status: structs.EvalStatusPending,
allocs: make(map[string]*allocState),
}
}
2015-09-18 17:03:23 +00:00
// allocState is used to track the state of an allocation
type allocState struct {
id string
group string
node string
desired string
desiredDesc string
client string
2015-09-21 22:55:10 +00:00
clientDesc string
2015-09-18 17:03:23 +00:00
index uint64
}
// monitor wraps an evaluation monitor and holds metadata and
// state information.
type monitor struct {
ui cli.Ui
client *api.Client
2015-09-16 23:27:55 +00:00
state *evalState
2016-01-19 23:02:17 +00:00
// length determines the number of characters for identifiers in the ui.
length int
sync.Mutex
}
// newMonitor returns a new monitor. The returned monitor will
2016-01-19 23:02:17 +00:00
// write output information to the provided ui. The length parameter determines
// the number of characters for identifiers in the ui.
func newMonitor(ui cli.Ui, client *api.Client, length int) *monitor {
if colorUi, ok := ui.(*cli.ColoredUi); ok {
// Disable Info color for monitored output
ui = &cli.ColoredUi{
ErrorColor: colorUi.ErrorColor,
WarnColor: colorUi.WarnColor,
InfoColor: cli.UiColorNone,
Ui: colorUi.Ui,
}
}
2015-09-18 17:03:23 +00:00
mon := &monitor{
ui: &cli.PrefixedUi{
InfoPrefix: "==> ",
OutputPrefix: " ",
ErrorPrefix: "==> ",
Ui: ui,
},
client: client,
state: newEvalState(),
length: length,
}
2015-09-18 17:03:23 +00:00
return mon
}
// update is used to update our monitor with new state. It can be
// called whether the passed information is new or not, and will
// only dump update messages when state changes.
func (m *monitor) update(update *evalState) {
m.Lock()
defer m.Unlock()
existing := m.state
// Swap in the new state at the end
defer func() {
m.state = update
}()
2015-09-21 22:55:10 +00:00
// Check if the evaluation was triggered by a node
if existing.node == "" && update.node != "" {
m.ui.Output(fmt.Sprintf("%s: Evaluation triggered by node %q",
formatTime(time.Now()), limit(update.node, m.length)))
2015-09-21 22:55:10 +00:00
}
// Check if the evaluation was triggered by a job
if existing.job == "" && update.job != "" {
m.ui.Output(fmt.Sprintf("%s: Evaluation triggered by job %q",
formatTime(time.Now()), update.job))
2015-09-21 22:55:10 +00:00
}
// Check if the evaluation was triggered by a deployment
if existing.deployment == "" && update.deployment != "" {
m.ui.Output(fmt.Sprintf("%s: Evaluation within deployment: %q",
formatTime(time.Now()), limit(update.deployment, m.length)))
}
2015-09-16 21:45:21 +00:00
// Check the allocations
for allocID, alloc := range update.allocs {
if existing, ok := existing.allocs[allocID]; !ok {
switch {
case alloc.index < update.index:
// New alloc with create index lower than the eval
// create index indicates modification
2015-09-18 17:03:23 +00:00
m.ui.Output(fmt.Sprintf(
"%s: Allocation %q modified: node %q, group %q",
formatTime(time.Now()), limit(alloc.id, m.length),
limit(alloc.node, m.length), alloc.group))
case alloc.desired == structs.AllocDesiredStatusRun:
// New allocation with desired status running
2015-09-18 17:03:23 +00:00
m.ui.Output(fmt.Sprintf(
"%s: Allocation %q created: node %q, group %q",
formatTime(time.Now()), limit(alloc.id, m.length),
limit(alloc.node, m.length), alloc.group))
}
} else {
switch {
case existing.client != alloc.client:
2016-05-25 01:42:05 +00:00
description := ""
if alloc.clientDesc != "" {
description = fmt.Sprintf(" (%s)", alloc.clientDesc)
}
// Allocation status has changed
2015-09-18 17:03:23 +00:00
m.ui.Output(fmt.Sprintf(
"%s: Allocation %q status changed: %q -> %q%s",
formatTime(time.Now()), limit(alloc.id, m.length),
existing.client, alloc.client, description))
2015-09-16 21:45:21 +00:00
}
}
}
2015-09-21 19:53:48 +00:00
// Check if the status changed. We skip any transitions to pending status.
if existing.status != "" &&
update.status != structs.AllocClientStatusPending &&
existing.status != update.status {
m.ui.Output(fmt.Sprintf("%s: Evaluation status changed: %q -> %q",
formatTime(time.Now()), existing.status, update.status))
}
}
// monitor is used to start monitoring the given evaluation ID. It
// writes output directly to the monitor's ui, and returns the
// exit code for the command.
//
// The return code will be 0 on successful evaluation. If there are
// problems scheduling the job (impossible constraints, resources
// exhausted, etc), then the return code will be 2. For any other
// failures (API connectivity, internal errors, etc), the return code
// will be 1.
func (m *monitor) monitor(evalID string) int {
// Track if we encounter a scheduling failure. This can only be
// detected while querying allocations, so we use this bool to
// carry that status into the return code.
var schedFailure bool
// Add the initial pending state
m.update(newEvalState())
for {
2015-09-16 23:27:55 +00:00
// Query the evaluation
eval, _, err := m.client.Evaluations().Info(evalID, nil)
if err != nil {
m.ui.Error(fmt.Sprintf("No evaluation with id %q found", evalID))
return 1
}
m.ui.Info(fmt.Sprintf("%s: Monitoring evaluation %q",
formatTime(time.Now()), limit(eval.ID, m.length)))
// Create the new eval state.
state := newEvalState()
state.status = eval.Status
state.desc = eval.StatusDescription
state.node = eval.NodeID
state.job = eval.JobID
state.deployment = eval.DeploymentID
state.wait = eval.Wait
state.index = eval.CreateIndex
2015-09-16 23:27:55 +00:00
// Query the allocations associated with the evaluation
allocs, _, err := m.client.Evaluations().Allocations(eval.ID, nil)
2015-09-16 21:45:21 +00:00
if err != nil {
m.ui.Error(fmt.Sprintf("%s: Error reading allocations: %s", formatTime(time.Now()), err))
2015-09-16 21:45:21 +00:00
return 1
}
// Add the allocs to the state
for _, alloc := range allocs {
state.allocs[alloc.ID] = &allocState{
id: alloc.ID,
group: alloc.TaskGroup,
node: alloc.NodeID,
desired: alloc.DesiredStatus,
desiredDesc: alloc.DesiredDescription,
client: alloc.ClientStatus,
2015-09-21 22:55:10 +00:00
clientDesc: alloc.ClientDescription,
index: alloc.CreateIndex,
}
}
// Update the state
m.update(state)
2015-09-16 21:45:21 +00:00
switch eval.Status {
case structs.EvalStatusComplete, structs.EvalStatusFailed, structs.EvalStatusCancelled:
if len(eval.FailedTGAllocs) == 0 {
m.ui.Info(fmt.Sprintf("%s: Evaluation %q finished with status %q",
formatTime(time.Now()), limit(eval.ID, m.length), eval.Status))
} else {
// There were failures making the allocations
2016-05-25 01:42:05 +00:00
schedFailure = true
m.ui.Info(fmt.Sprintf("%s: Evaluation %q finished with status %q but failed to place all allocations:",
formatTime(time.Now()), limit(eval.ID, m.length), eval.Status))
// Print the failures per task group
for tg, metrics := range eval.FailedTGAllocs {
noun := "allocation"
if metrics.CoalescedFailures > 0 {
noun += "s"
}
m.ui.Output(fmt.Sprintf("%s: Task Group %q (failed to place %d %s):",
formatTime(time.Now()), tg, metrics.CoalescedFailures+1, noun))
metrics := formatAllocMetrics(metrics, false, " ")
for _, line := range strings.Split(metrics, "\n") {
m.ui.Output(line)
}
}
2016-05-19 20:16:10 +00:00
if eval.BlockedEval != "" {
m.ui.Output(fmt.Sprintf("%s: Evaluation %q waiting for additional capacity to place remainder",
formatTime(time.Now()), limit(eval.BlockedEval, m.length)))
2016-05-19 20:16:10 +00:00
}
}
2015-09-16 21:45:21 +00:00
default:
// Wait for the next update
2015-09-18 17:03:23 +00:00
time.Sleep(updateWait)
2015-09-16 21:45:21 +00:00
continue
}
2015-09-21 22:55:10 +00:00
// Monitor the next eval in the chain, if present
2015-09-16 21:45:21 +00:00
if eval.NextEval != "" {
if eval.Wait.Nanoseconds() != 0 {
m.ui.Info(fmt.Sprintf(
"%s: Monitoring next evaluation %q in %s",
formatTime(time.Now()), limit(eval.NextEval, m.length), eval.Wait))
2015-09-21 22:55:10 +00:00
// Skip some unnecessary polling
time.Sleep(eval.Wait)
}
2015-09-21 22:55:10 +00:00
// Reset the state and monitor the new eval
m.state = newEvalState()
return m.monitor(eval.NextEval)
2015-09-16 21:45:21 +00:00
}
break
}
// Monitor the deployment if it exists
dID := m.state.deployment
if dID != "" {
m.ui.Info(fmt.Sprintf("%s: Monitoring deployment %q", formatTime(time.Now()), limit(dID, m.length)))
var verbose bool
if m.length == fullId {
verbose = true
} else {
verbose = false
}
meta := new(Meta)
meta.Ui = m.ui
cmd := &DeploymentStatusCommand{Meta: *meta}
cmd.monitor(m.client, dID, 0, verbose)
}
// Treat scheduling failures specially using a dedicated exit code.
// This makes it easier to detect failures from the CLI.
if schedFailure {
return 2
}
return 0
}
func formatAllocMetrics(metrics *api.AllocationMetric, scores bool, prefix string) string {
// Print a helpful message if we have an eligibility problem
var out string
if metrics.NodesEvaluated == 0 {
out += fmt.Sprintf("%s* No nodes were eligible for evaluation\n", prefix)
}
2016-01-04 22:23:06 +00:00
// Print a helpful message if the user has asked for a DC that has no
// available nodes.
for dc, available := range metrics.NodesAvailable {
2016-01-04 22:23:06 +00:00
if available == 0 {
out += fmt.Sprintf("%s* No nodes are available in datacenter %q\n", prefix, dc)
2016-01-04 22:23:06 +00:00
}
}
2015-09-21 22:55:10 +00:00
// Print filter info
for class, num := range metrics.ClassFiltered {
out += fmt.Sprintf("%s* Class %q: %d nodes excluded by filter\n", prefix, class, num)
2015-09-21 22:55:10 +00:00
}
for cs, num := range metrics.ConstraintFiltered {
out += fmt.Sprintf("%s* Constraint %q: %d nodes excluded by filter\n", prefix, cs, num)
2015-09-21 22:55:10 +00:00
}
// Print exhaustion info
if ne := metrics.NodesExhausted; ne > 0 {
out += fmt.Sprintf("%s* Resources exhausted on %d nodes\n", prefix, ne)
}
for class, num := range metrics.ClassExhausted {
out += fmt.Sprintf("%s* Class %q exhausted on %d nodes\n", prefix, class, num)
}
for dim, num := range metrics.DimensionExhausted {
out += fmt.Sprintf("%s* Dimension %q exhausted on %d nodes\n", prefix, dim, num)
}
2017-10-13 21:36:02 +00:00
// Print quota info
for _, dim := range metrics.QuotaExhausted {
out += fmt.Sprintf("%s* Quota limit hit %q\n", prefix, dim)
}
// Print scores
if scores {
if len(metrics.ScoreMetaData) > 0 {
scoreOutput := make([]string, len(metrics.ScoreMetaData)+1)
var scorerNames []string
for i, scoreMeta := range metrics.ScoreMetaData {
// Add header as first row
if i == 0 {
scoreOutput[0] = "Node|"
// sort scores alphabetically
scores := make([]string, 0, len(scoreMeta.Scores))
for score := range scoreMeta.Scores {
scores = append(scores, score)
}
sort.Strings(scores)
// build score header output
for _, scorerName := range scores {
scoreOutput[0] += fmt.Sprintf("%v|", scorerName)
scorerNames = append(scorerNames, scorerName)
}
scoreOutput[0] += "final score"
}
scoreOutput[i+1] = fmt.Sprintf("%v|", scoreMeta.NodeID)
for _, scorerName := range scorerNames {
scoreVal := scoreMeta.Scores[scorerName]
scoreOutput[i+1] += fmt.Sprintf("%.3g|", scoreVal)
}
scoreOutput[i+1] += fmt.Sprintf("%.3g", scoreMeta.NormScore)
}
out += formatList(scoreOutput)
} else {
// Backwards compatibility for old allocs
for name, score := range metrics.Scores {
out += fmt.Sprintf("%s* Score %q = %f\n", prefix, name, score)
}
}
}
out = strings.TrimSuffix(out, "\n")
return out
}