2018-03-08 23:08:23 +00:00
|
|
|
package drainer
|
2018-03-02 00:37:19 +00:00
|
|
|
|
2018-03-03 01:15:38 +00:00
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"time"
|
2018-03-02 00:37:19 +00:00
|
|
|
|
2018-09-15 23:23:13 +00:00
|
|
|
log "github.com/hashicorp/go-hclog"
|
2018-03-03 01:15:38 +00:00
|
|
|
memdb "github.com/hashicorp/go-memdb"
|
2018-09-15 23:23:13 +00:00
|
|
|
|
2018-03-03 01:15:38 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad/state"
|
|
|
|
"github.com/hashicorp/nomad/nomad/structs"
|
|
|
|
"golang.org/x/time/rate"
|
|
|
|
)
|
|
|
|
|
|
|
|
// DrainingNodeWatcher is the interface for watching for draining nodes.
|
|
|
|
type DrainingNodeWatcher interface{}
|
|
|
|
|
2018-03-06 18:12:17 +00:00
|
|
|
// TrackedNodes returns the set of tracked nodes
|
|
|
|
func (n *NodeDrainer) TrackedNodes() map[string]*structs.Node {
|
2018-03-03 01:15:38 +00:00
|
|
|
n.l.RLock()
|
|
|
|
defer n.l.RUnlock()
|
|
|
|
|
2018-03-06 18:12:17 +00:00
|
|
|
t := make(map[string]*structs.Node, len(n.nodes))
|
|
|
|
for n, d := range n.nodes {
|
|
|
|
t[n] = d.GetNode()
|
2018-03-03 01:15:38 +00:00
|
|
|
}
|
|
|
|
|
2018-03-06 18:12:17 +00:00
|
|
|
return t
|
2018-03-03 01:15:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Remove removes the given node from being tracked
|
|
|
|
func (n *NodeDrainer) Remove(nodeID string) {
|
|
|
|
n.l.Lock()
|
|
|
|
defer n.l.Unlock()
|
2018-03-03 01:24:48 +00:00
|
|
|
|
|
|
|
// TODO test the notifier is updated
|
|
|
|
// Remove it from being tracked and remove it from the dealiner
|
2018-03-03 01:15:38 +00:00
|
|
|
delete(n.nodes, nodeID)
|
2018-03-03 01:24:48 +00:00
|
|
|
n.deadlineNotifier.Remove(nodeID)
|
2018-03-03 01:15:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Update updates the node, either updating the tracked version or starting to
|
|
|
|
// track the node.
|
|
|
|
func (n *NodeDrainer) Update(node *structs.Node) {
|
|
|
|
n.l.Lock()
|
|
|
|
defer n.l.Unlock()
|
|
|
|
|
|
|
|
if node == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
draining, ok := n.nodes[node.ID]
|
|
|
|
if !ok {
|
2018-03-07 22:57:35 +00:00
|
|
|
draining = NewDrainingNode(node, n.state)
|
|
|
|
n.nodes[node.ID] = draining
|
|
|
|
} else {
|
|
|
|
// Update it
|
|
|
|
draining.Update(node)
|
2018-03-03 01:15:38 +00:00
|
|
|
}
|
|
|
|
|
2018-03-03 01:24:48 +00:00
|
|
|
// TODO test the notifier is updated
|
|
|
|
if inf, deadline := node.DrainStrategy.DeadlineTime(); !inf {
|
|
|
|
n.deadlineNotifier.Watch(node.ID, deadline)
|
|
|
|
} else {
|
|
|
|
// There is an infinite deadline so it shouldn't be tracked for
|
|
|
|
// deadlining
|
|
|
|
n.deadlineNotifier.Remove(node.ID)
|
|
|
|
}
|
|
|
|
|
2018-03-07 22:57:35 +00:00
|
|
|
// TODO Test this
|
|
|
|
// Register interest in the draining jobs.
|
2018-03-29 21:30:38 +00:00
|
|
|
jobs, err := draining.DrainingJobs()
|
2018-03-07 22:57:35 +00:00
|
|
|
if err != nil {
|
2018-09-15 23:23:13 +00:00
|
|
|
n.logger.Error("error retrieving draining jobs on node", "node_id", node.ID, "error", err)
|
2018-03-07 22:57:35 +00:00
|
|
|
return
|
|
|
|
}
|
2018-09-15 23:23:13 +00:00
|
|
|
n.logger.Trace("node has draining jobs on it", "node_id", node.ID, "num_jobs", len(jobs))
|
2018-03-10 00:25:46 +00:00
|
|
|
n.jobWatcher.RegisterJobs(jobs)
|
2018-03-07 22:57:35 +00:00
|
|
|
|
2018-03-07 23:42:17 +00:00
|
|
|
// TODO Test at this layer as well that a node drain on a node without
|
|
|
|
// allocs immediately gets unmarked as draining
|
2018-03-07 23:16:45 +00:00
|
|
|
// Check if the node is done such that if an operator drains a node with
|
|
|
|
// nothing on it we unset drain
|
|
|
|
done, err := draining.IsDone()
|
|
|
|
if err != nil {
|
2018-09-15 23:23:13 +00:00
|
|
|
n.logger.Error("failed to check if node is done draining", "node_id", node.ID, "error", err)
|
2018-03-07 23:16:45 +00:00
|
|
|
return
|
|
|
|
}
|
2018-03-07 22:57:35 +00:00
|
|
|
|
2018-03-07 23:16:45 +00:00
|
|
|
if done {
|
2018-03-27 17:01:15 +00:00
|
|
|
// Node is done draining. Stop remaining system allocs before
|
|
|
|
// marking node as complete.
|
|
|
|
remaining, err := draining.RemainingAllocs()
|
|
|
|
if err != nil {
|
2018-09-15 23:23:13 +00:00
|
|
|
n.logger.Error("error getting remaining allocs on drained node", "node_id", node.ID, "error", err)
|
2018-03-27 17:01:15 +00:00
|
|
|
} else if len(remaining) > 0 {
|
|
|
|
future := structs.NewBatchFuture()
|
|
|
|
n.drainAllocs(future, remaining)
|
|
|
|
if err := future.Wait(); err != nil {
|
2018-09-15 23:23:13 +00:00
|
|
|
n.logger.Error("failed to drain remaining allocs from done node", "num_allocs", len(remaining), "node_id", node.ID, "error", err)
|
2018-03-27 17:01:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-11 00:22:06 +00:00
|
|
|
// Create the node event
|
|
|
|
event := structs.NewNodeEvent().
|
|
|
|
SetSubsystem(structs.NodeEventSubsystemDrain).
|
|
|
|
SetMessage(NodeDrainEventComplete)
|
|
|
|
|
|
|
|
index, err := n.raft.NodesDrainComplete([]string{node.ID}, event)
|
2018-03-07 23:16:45 +00:00
|
|
|
if err != nil {
|
2018-09-15 23:23:13 +00:00
|
|
|
n.logger.Error("failed to unset drain for node", "node_id", node.ID, "error", err)
|
2018-03-07 23:16:45 +00:00
|
|
|
} else {
|
2018-09-15 23:23:13 +00:00
|
|
|
n.logger.Info("node completed draining at index", "node_id", node.ID, "index", index)
|
2018-03-07 23:16:45 +00:00
|
|
|
}
|
|
|
|
}
|
2018-03-03 01:15:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// nodeDrainWatcher is used to watch nodes that are entering, leaving or
|
|
|
|
// changing their drain strategy.
|
|
|
|
type nodeDrainWatcher struct {
|
|
|
|
ctx context.Context
|
2018-09-15 23:23:13 +00:00
|
|
|
logger log.Logger
|
2018-03-03 01:15:38 +00:00
|
|
|
|
|
|
|
// state is the state that is watched for state changes.
|
|
|
|
state *state.StateStore
|
|
|
|
|
|
|
|
// limiter is used to limit the rate of blocking queries
|
|
|
|
limiter *rate.Limiter
|
|
|
|
|
|
|
|
// tracker is the object that is tracking the nodes and provides us with the
|
|
|
|
// needed callbacks
|
|
|
|
tracker NodeTracker
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewNodeDrainWatcher returns a new node drain watcher.
|
2018-09-15 23:23:13 +00:00
|
|
|
func NewNodeDrainWatcher(ctx context.Context, limiter *rate.Limiter, state *state.StateStore, logger log.Logger, tracker NodeTracker) *nodeDrainWatcher {
|
2018-03-03 01:15:38 +00:00
|
|
|
w := &nodeDrainWatcher{
|
|
|
|
ctx: ctx,
|
|
|
|
limiter: limiter,
|
2018-09-15 23:23:13 +00:00
|
|
|
logger: logger.Named("node_watcher"),
|
2018-03-03 01:15:38 +00:00
|
|
|
tracker: tracker,
|
|
|
|
state: state,
|
|
|
|
}
|
|
|
|
|
|
|
|
go w.watch()
|
|
|
|
return w
|
|
|
|
}
|
|
|
|
|
|
|
|
// watch is the long lived watching routine that detects node changes.
|
|
|
|
func (w *nodeDrainWatcher) watch() {
|
|
|
|
nindex := uint64(1)
|
|
|
|
for {
|
2018-09-15 23:23:13 +00:00
|
|
|
w.logger.Trace("getting nodes at index", "index", nindex)
|
2018-03-03 01:15:38 +00:00
|
|
|
nodes, index, err := w.getNodes(nindex)
|
2018-09-15 23:23:13 +00:00
|
|
|
w.logger.Trace("got nodes at index", "num_nodes", len(nodes), "index", nindex, "error", err)
|
2018-03-03 01:15:38 +00:00
|
|
|
if err != nil {
|
|
|
|
if err == context.Canceled {
|
2018-09-15 23:23:13 +00:00
|
|
|
w.logger.Trace("shutting down")
|
2018-03-03 01:15:38 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-09-15 23:23:13 +00:00
|
|
|
w.logger.Error("error watching node updates at index", "index", nindex, "error", err)
|
2018-03-03 01:15:38 +00:00
|
|
|
select {
|
|
|
|
case <-w.ctx.Done():
|
2018-09-15 23:23:13 +00:00
|
|
|
w.logger.Trace("shutting down")
|
2018-03-03 01:15:38 +00:00
|
|
|
return
|
|
|
|
case <-time.After(stateReadErrorDelay):
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// update index for next run
|
|
|
|
nindex = index
|
|
|
|
|
2018-03-06 18:12:17 +00:00
|
|
|
tracked := w.tracker.TrackedNodes()
|
|
|
|
for nodeID, node := range nodes {
|
2018-03-03 01:15:38 +00:00
|
|
|
newDraining := node.DrainStrategy != nil
|
2018-03-06 18:12:17 +00:00
|
|
|
currentNode, tracked := tracked[nodeID]
|
2018-03-03 01:15:38 +00:00
|
|
|
|
|
|
|
switch {
|
|
|
|
// If the node is tracked but not draining, untrack
|
|
|
|
case tracked && !newDraining:
|
2018-09-15 23:23:13 +00:00
|
|
|
w.logger.Trace("tracked node is no longer draining", "node_id", nodeID)
|
2018-03-06 18:12:17 +00:00
|
|
|
w.tracker.Remove(nodeID)
|
2018-03-03 01:15:38 +00:00
|
|
|
|
|
|
|
// If the node is not being tracked but is draining, track
|
|
|
|
case !tracked && newDraining:
|
2018-09-15 23:23:13 +00:00
|
|
|
w.logger.Trace("untracked node is draining", "node_id", nodeID)
|
2018-03-03 01:15:38 +00:00
|
|
|
w.tracker.Update(node)
|
|
|
|
|
|
|
|
// If the node is being tracked but has changed, update:
|
|
|
|
case tracked && newDraining && !currentNode.DrainStrategy.Equal(node.DrainStrategy):
|
2018-09-15 23:23:13 +00:00
|
|
|
w.logger.Trace("tracked node has updated drain", "node_id", nodeID)
|
2018-03-03 01:15:38 +00:00
|
|
|
w.tracker.Update(node)
|
|
|
|
default:
|
2018-09-15 23:23:13 +00:00
|
|
|
w.logger.Trace("no changes for node", "node_id", nodeID, "node_modify_index", node.ModifyIndex, "tracked", tracked, "newly_draining", newDraining)
|
2018-03-06 18:12:17 +00:00
|
|
|
}
|
2018-03-07 23:42:17 +00:00
|
|
|
|
|
|
|
// TODO(schmichael) handle the case of a lost node
|
2018-03-06 18:12:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for nodeID := range tracked {
|
|
|
|
if _, ok := nodes[nodeID]; !ok {
|
2018-09-15 23:23:13 +00:00
|
|
|
w.logger.Trace("tracked node no longer exists", "node_id", nodeID)
|
2018-03-06 18:12:17 +00:00
|
|
|
w.tracker.Remove(nodeID)
|
2018-03-03 01:15:38 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// getNodes returns all nodes blocking until the nodes are after the given index.
|
2018-03-06 18:12:17 +00:00
|
|
|
func (w *nodeDrainWatcher) getNodes(minIndex uint64) (map[string]*structs.Node, uint64, error) {
|
2018-03-03 01:15:38 +00:00
|
|
|
if err := w.limiter.Wait(w.ctx); err != nil {
|
|
|
|
return nil, 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
resp, index, err := w.state.BlockingQuery(w.getNodesImpl, minIndex, w.ctx)
|
|
|
|
if err != nil {
|
|
|
|
return nil, 0, err
|
|
|
|
}
|
|
|
|
|
2018-03-06 18:12:17 +00:00
|
|
|
return resp.(map[string]*structs.Node), index, nil
|
2018-03-03 01:15:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// getNodesImpl is used to get nodes from the state store, returning the set of
|
|
|
|
// nodes and the given index.
|
|
|
|
func (w *nodeDrainWatcher) getNodesImpl(ws memdb.WatchSet, state *state.StateStore) (interface{}, uint64, error) {
|
|
|
|
iter, err := state.Nodes(ws)
|
|
|
|
if err != nil {
|
|
|
|
return nil, 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
index, err := state.Index("nodes")
|
|
|
|
if err != nil {
|
|
|
|
return nil, 0, err
|
|
|
|
}
|
|
|
|
|
2018-09-24 20:50:57 +00:00
|
|
|
var maxIndex uint64 = 0
|
2018-03-06 18:12:17 +00:00
|
|
|
resp := make(map[string]*structs.Node, 64)
|
2018-03-03 01:15:38 +00:00
|
|
|
for {
|
|
|
|
raw := iter.Next()
|
|
|
|
if raw == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
node := raw.(*structs.Node)
|
2018-03-06 18:12:17 +00:00
|
|
|
resp[node.ID] = node
|
2018-09-24 20:50:57 +00:00
|
|
|
if maxIndex < node.ModifyIndex {
|
|
|
|
maxIndex = node.ModifyIndex
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Prefer using the actual max index of affected nodes since it means less
|
|
|
|
// unblocking
|
|
|
|
if maxIndex != 0 {
|
|
|
|
index = maxIndex
|
2018-03-03 01:15:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return resp, index, nil
|
2018-03-02 00:37:19 +00:00
|
|
|
}
|