open-nomad/nomad/drainer/watch_nodes.go

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

250 lines
6.5 KiB
Go
Raw Normal View History

// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
2018-03-08 23:08:23 +00:00
package drainer
2018-03-02 00:37:19 +00:00
2018-03-03 01:15:38 +00:00
import (
"context"
2018-03-02 00:37:19 +00:00
2018-09-15 23:23:13 +00:00
log "github.com/hashicorp/go-hclog"
2018-03-03 01:15:38 +00:00
memdb "github.com/hashicorp/go-memdb"
"github.com/hashicorp/nomad/helper"
2018-09-15 23:23:13 +00:00
2018-03-03 01:15:38 +00:00
"github.com/hashicorp/nomad/nomad/state"
"github.com/hashicorp/nomad/nomad/structs"
"golang.org/x/time/rate"
)
// DrainingNodeWatcher is the interface for watching for draining nodes.
type DrainingNodeWatcher interface{}
2018-03-06 18:12:17 +00:00
// TrackedNodes returns the set of tracked nodes
func (n *NodeDrainer) TrackedNodes() map[string]*structs.Node {
2018-03-03 01:15:38 +00:00
n.l.RLock()
defer n.l.RUnlock()
2018-03-06 18:12:17 +00:00
t := make(map[string]*structs.Node, len(n.nodes))
for n, d := range n.nodes {
t[n] = d.GetNode()
2018-03-03 01:15:38 +00:00
}
2018-03-06 18:12:17 +00:00
return t
2018-03-03 01:15:38 +00:00
}
// Remove removes the given node from being tracked
func (n *NodeDrainer) Remove(nodeID string) {
n.l.Lock()
defer n.l.Unlock()
// Remove it from being tracked and remove it from the dealiner
2018-03-03 01:15:38 +00:00
delete(n.nodes, nodeID)
n.deadlineNotifier.Remove(nodeID)
2018-03-03 01:15:38 +00:00
}
// Update updates the node, either updating the tracked version or starting to
// track the node.
func (n *NodeDrainer) Update(node *structs.Node) {
n.l.Lock()
defer n.l.Unlock()
if node == nil {
return
}
draining, ok := n.nodes[node.ID]
if !ok {
2018-03-07 22:57:35 +00:00
draining = NewDrainingNode(node, n.state)
n.nodes[node.ID] = draining
} else {
// Update it
draining.Update(node)
2018-03-03 01:15:38 +00:00
}
if inf, deadline := node.DrainStrategy.DeadlineTime(); !inf {
n.deadlineNotifier.Watch(node.ID, deadline)
} else {
// There is an infinite deadline so it shouldn't be tracked for
// deadlining
n.deadlineNotifier.Remove(node.ID)
}
2018-03-07 22:57:35 +00:00
// Register interest in the draining jobs.
2018-03-29 21:30:38 +00:00
jobs, err := draining.DrainingJobs()
2018-03-07 22:57:35 +00:00
if err != nil {
2018-09-15 23:23:13 +00:00
n.logger.Error("error retrieving draining jobs on node", "node_id", node.ID, "error", err)
2018-03-07 22:57:35 +00:00
return
}
2018-09-15 23:23:13 +00:00
n.logger.Trace("node has draining jobs on it", "node_id", node.ID, "num_jobs", len(jobs))
n.jobWatcher.RegisterJobs(jobs)
2018-03-07 22:57:35 +00:00
2018-03-07 23:16:45 +00:00
// Check if the node is done such that if an operator drains a node with
// nothing on it we unset drain
done, err := draining.IsDone()
if err != nil {
2018-09-15 23:23:13 +00:00
n.logger.Error("failed to check if node is done draining", "node_id", node.ID, "error", err)
2018-03-07 23:16:45 +00:00
return
}
2018-03-07 22:57:35 +00:00
2018-03-07 23:16:45 +00:00
if done {
// Node is done draining. Stop remaining system allocs before marking
// node as complete.
remaining, err := draining.RemainingAllocs()
if err != nil {
2018-09-15 23:23:13 +00:00
n.logger.Error("error getting remaining allocs on drained node", "node_id", node.ID, "error", err)
} else if len(remaining) > 0 {
future := structs.NewBatchFuture()
n.drainAllocs(future, remaining)
if err := future.Wait(); err != nil {
2018-09-15 23:23:13 +00:00
n.logger.Error("failed to drain remaining allocs from done node", "num_allocs", len(remaining), "node_id", node.ID, "error", err)
}
}
2018-05-11 00:22:06 +00:00
// Create the node event
event := structs.NewNodeEvent().
SetSubsystem(structs.NodeEventSubsystemDrain).
SetMessage(NodeDrainEventComplete)
index, err := n.raft.NodesDrainComplete([]string{node.ID}, event)
2018-03-07 23:16:45 +00:00
if err != nil {
2018-09-15 23:23:13 +00:00
n.logger.Error("failed to unset drain for node", "node_id", node.ID, "error", err)
2018-03-07 23:16:45 +00:00
} else {
2018-09-15 23:23:13 +00:00
n.logger.Info("node completed draining at index", "node_id", node.ID, "index", index)
2018-03-07 23:16:45 +00:00
}
}
2018-03-03 01:15:38 +00:00
}
// nodeDrainWatcher is used to watch nodes that are entering, leaving or
// changing their drain strategy.
type nodeDrainWatcher struct {
ctx context.Context
2018-09-15 23:23:13 +00:00
logger log.Logger
2018-03-03 01:15:38 +00:00
// state is the state that is watched for state changes.
state *state.StateStore
// limiter is used to limit the rate of blocking queries
limiter *rate.Limiter
// tracker is the object that is tracking the nodes and provides us with the
// needed callbacks
tracker NodeTracker
}
// NewNodeDrainWatcher returns a new node drain watcher.
2018-09-15 23:23:13 +00:00
func NewNodeDrainWatcher(ctx context.Context, limiter *rate.Limiter, state *state.StateStore, logger log.Logger, tracker NodeTracker) *nodeDrainWatcher {
2018-03-03 01:15:38 +00:00
w := &nodeDrainWatcher{
ctx: ctx,
limiter: limiter,
2018-09-15 23:23:13 +00:00
logger: logger.Named("node_watcher"),
2018-03-03 01:15:38 +00:00
tracker: tracker,
state: state,
}
go w.watch()
return w
}
// watch is the long lived watching routine that detects node changes.
func (w *nodeDrainWatcher) watch() {
timer, stop := helper.NewSafeTimer(stateReadErrorDelay)
defer stop()
2018-03-03 01:15:38 +00:00
nindex := uint64(1)
2018-03-03 01:15:38 +00:00
for {
timer.Reset(stateReadErrorDelay)
2018-03-03 01:15:38 +00:00
nodes, index, err := w.getNodes(nindex)
if err != nil {
if err == context.Canceled {
return
}
2018-09-15 23:23:13 +00:00
w.logger.Error("error watching node updates at index", "index", nindex, "error", err)
2018-03-03 01:15:38 +00:00
select {
case <-w.ctx.Done():
return
case <-timer.C:
2018-03-03 01:15:38 +00:00
continue
}
}
// update index for next run
nindex = index
2018-03-06 18:12:17 +00:00
tracked := w.tracker.TrackedNodes()
for nodeID, node := range nodes {
2018-03-03 01:15:38 +00:00
newDraining := node.DrainStrategy != nil
2018-03-06 18:12:17 +00:00
currentNode, tracked := tracked[nodeID]
2018-03-03 01:15:38 +00:00
switch {
2018-03-03 01:15:38 +00:00
case tracked && !newDraining:
// If the node is tracked but not draining, untrack
2018-03-06 18:12:17 +00:00
w.tracker.Remove(nodeID)
2018-03-03 01:15:38 +00:00
case !tracked && newDraining:
// If the node is not being tracked but is draining, track
2018-03-03 01:15:38 +00:00
w.tracker.Update(node)
case tracked && newDraining && !currentNode.DrainStrategy.Equal(node.DrainStrategy):
// If the node is being tracked but has changed, update
2018-03-03 01:15:38 +00:00
w.tracker.Update(node)
2018-03-03 01:15:38 +00:00
default:
// note that down/disconnected nodes are handled the same as any
// other node here, because we don't want to stop draining a
// node that might heartbeat again. The job watcher will let us
// know if we can stop watching the node when all the allocs are
// evicted
2018-03-06 18:12:17 +00:00
}
}
for nodeID := range tracked {
if _, ok := nodes[nodeID]; !ok {
w.tracker.Remove(nodeID)
2018-03-03 01:15:38 +00:00
}
}
}
}
// getNodes returns all nodes blocking until the nodes are after the given index.
2018-03-06 18:12:17 +00:00
func (w *nodeDrainWatcher) getNodes(minIndex uint64) (map[string]*structs.Node, uint64, error) {
2018-03-03 01:15:38 +00:00
if err := w.limiter.Wait(w.ctx); err != nil {
return nil, 0, err
}
resp, index, err := w.state.BlockingQuery(w.getNodesImpl, minIndex, w.ctx)
if err != nil {
return nil, 0, err
}
2018-03-06 18:12:17 +00:00
return resp.(map[string]*structs.Node), index, nil
2018-03-03 01:15:38 +00:00
}
// getNodesImpl is used to get nodes from the state store, returning the set of
// nodes and the current node table index.
2018-03-03 01:15:38 +00:00
func (w *nodeDrainWatcher) getNodesImpl(ws memdb.WatchSet, state *state.StateStore) (interface{}, uint64, error) {
iter, err := state.Nodes(ws)
if err != nil {
return nil, 0, err
}
index, err := state.Index("nodes")
if err != nil {
return nil, 0, err
}
2018-03-06 18:12:17 +00:00
resp := make(map[string]*structs.Node, 64)
2018-03-03 01:15:38 +00:00
for {
raw := iter.Next()
if raw == nil {
break
}
node := raw.(*structs.Node)
2018-03-06 18:12:17 +00:00
resp[node.ID] = node
2018-03-03 01:15:38 +00:00
}
return resp, index, nil
2018-03-02 00:37:19 +00:00
}