open-nomad/nomad/node_endpoint.go

1581 lines
46 KiB
Go
Raw Normal View History

2015-06-07 19:14:41 +00:00
package nomad
import (
2016-08-18 21:31:44 +00:00
"context"
2015-06-07 19:14:41 +00:00
"fmt"
"strings"
2016-02-22 02:51:34 +00:00
"sync"
2015-06-07 19:14:41 +00:00
"time"
2016-08-18 21:31:44 +00:00
"golang.org/x/sync/errgroup"
2019-01-15 19:46:12 +00:00
metrics "github.com/armon/go-metrics"
2018-09-15 23:23:13 +00:00
log "github.com/hashicorp/go-hclog"
2019-01-15 19:46:12 +00:00
memdb "github.com/hashicorp/go-memdb"
multierror "github.com/hashicorp/go-multierror"
2018-09-15 23:23:13 +00:00
vapi "github.com/hashicorp/vault/api"
2017-09-15 04:42:19 +00:00
"github.com/hashicorp/nomad/acl"
"github.com/hashicorp/nomad/helper/uuid"
"github.com/hashicorp/nomad/nomad/state"
2015-06-07 19:14:41 +00:00
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/raft"
2015-06-07 19:14:41 +00:00
)
2016-02-22 02:51:34 +00:00
const (
// batchUpdateInterval is how long we wait to batch updates
batchUpdateInterval = 50 * time.Millisecond
2016-08-18 21:31:44 +00:00
// maxParallelRequestsPerDerive is the maximum number of parallel Vault
// create token requests that may be outstanding per derive request
maxParallelRequestsPerDerive = 16
// NodeDrainEvents are the various drain messages
NodeDrainEventDrainSet = "Node drain strategy set"
NodeDrainEventDrainDisabled = "Node drain disabled"
NodeDrainEventDrainUpdated = "Node drain stategy updated"
2018-05-11 21:32:34 +00:00
// NodeEligibilityEventEligible is used when the nodes eligiblity is marked
// eligible
NodeEligibilityEventEligible = "Node marked as eligible for scheduling"
// NodeEligibilityEventIneligible is used when the nodes eligiblity is marked
// ineligible
NodeEligibilityEventIneligible = "Node marked as ineligible for scheduling"
2018-05-12 00:26:25 +00:00
// NodeHeartbeatEventReregistered is the message used when the node becomes
// reregistered by the heartbeat.
NodeHeartbeatEventReregistered = "Node reregistered by heartbeat"
2016-02-22 02:51:34 +00:00
)
// Node endpoint is used for client interactions
type Node struct {
2018-09-15 23:23:13 +00:00
srv *Server
logger log.Logger
2016-02-22 02:51:34 +00:00
2018-01-05 21:50:04 +00:00
// ctx provides context regarding the underlying connection
ctx *RPCContext
2016-02-22 02:51:34 +00:00
// updates holds pending client status updates for allocations
updates []*structs.Allocation
// evals holds pending rescheduling eval updates triggered by failed allocations
evals []*structs.Evaluation
2016-02-22 02:51:34 +00:00
// updateFuture is used to wait for the pending batch update
// to complete. This may be nil if no batch is pending.
2018-03-06 22:37:37 +00:00
updateFuture *structs.BatchFuture
2016-02-22 02:51:34 +00:00
// updateTimer is the timer that will trigger the next batch
// update, and may be nil if there is no batch pending.
updateTimer *time.Timer
// updatesLock synchronizes access to the updates list,
// the future and the timer.
updatesLock sync.Mutex
2015-06-07 19:14:41 +00:00
}
// Register is used to upsert a client that is available for scheduling
func (n *Node) Register(args *structs.NodeRegisterRequest, reply *structs.NodeUpdateResponse) error {
if done, err := n.srv.forward("Node.Register", args, args, reply); done {
// We have a valid node connection since there is no error from the
// forwarded server, so add the mapping to cache the
// connection and allow the server to send RPCs to the client.
if err == nil && n.ctx != nil && n.ctx.NodeID == "" {
n.ctx.NodeID = args.Node.ID
n.srv.addNodeConn(n.ctx)
}
2015-06-07 19:14:41 +00:00
return err
}
defer metrics.MeasureSince([]string{"nomad", "client", "register"}, time.Now())
// Validate the arguments
2015-07-04 00:47:55 +00:00
if args.Node == nil {
return fmt.Errorf("missing node for client registration")
}
if args.Node.ID == "" {
return fmt.Errorf("missing node ID for client registration")
}
if args.Node.Datacenter == "" {
2015-06-07 19:14:41 +00:00
return fmt.Errorf("missing datacenter for client registration")
}
2015-07-04 00:47:55 +00:00
if args.Node.Name == "" {
2015-06-07 19:14:41 +00:00
return fmt.Errorf("missing node name for client registration")
}
if len(args.Node.Attributes) == 0 {
return fmt.Errorf("missing attributes for client registration")
}
if args.Node.SecretID == "" {
2016-08-19 17:50:49 +00:00
return fmt.Errorf("missing node secret ID for client registration")
}
2015-06-07 19:14:41 +00:00
// Default the status if none is given
2015-07-04 00:47:55 +00:00
if args.Node.Status == "" {
args.Node.Status = structs.NodeStatusInit
2015-06-07 19:14:41 +00:00
}
if !structs.ValidNodeStatus(args.Node.Status) {
return fmt.Errorf("invalid status for node")
}
2015-06-07 19:14:41 +00:00
// Default to eligible for scheduling if unset
if args.Node.SchedulingEligibility == "" {
args.Node.SchedulingEligibility = structs.NodeSchedulingEligible
}
// Set the timestamp when the node is registered
args.Node.StatusUpdatedAt = time.Now().Unix()
2016-01-21 01:30:02 +00:00
// Compute the node class
if err := args.Node.ComputeClass(); err != nil {
return fmt.Errorf("failed to computed node class: %v", err)
}
// Look for the node so we can detect a state transition
snap, err := n.srv.fsm.State().Snapshot()
if err != nil {
return err
}
2017-02-08 04:31:23 +00:00
ws := memdb.NewWatchSet()
originalNode, err := snap.NodeByID(ws, args.Node.ID)
if err != nil {
return err
}
2016-08-19 17:50:49 +00:00
// Check if the SecretID has been tampered with
if originalNode != nil {
2016-10-27 05:05:44 +00:00
if args.Node.SecretID != originalNode.SecretID && originalNode.SecretID != "" {
2016-08-19 17:50:49 +00:00
return fmt.Errorf("node secret ID does not match. Not registering node.")
}
}
2018-01-05 21:50:04 +00:00
// We have a valid node connection, so add the mapping to cache the
// connection and allow the server to send RPCs to the client. We only cache
// the connection if it is not being forwarded from another server.
if n.ctx != nil && n.ctx.NodeID == "" && !args.IsForwarded() {
2018-01-05 21:50:04 +00:00
n.ctx.NodeID = args.Node.ID
n.srv.addNodeConn(n.ctx)
}
2015-06-07 19:14:41 +00:00
// Commit this update via Raft
_, index, err := n.srv.raftApply(structs.NodeRegisterRequestType, args)
2015-06-07 19:14:41 +00:00
if err != nil {
2018-09-15 23:23:13 +00:00
n.logger.Error("register failed", "error", err)
2015-06-07 19:14:41 +00:00
return err
}
reply.NodeModifyIndex = index
// Check if we should trigger evaluations
originalStatus := structs.NodeStatusInit
if originalNode != nil {
originalStatus = originalNode.Status
}
transitionToReady := transitionedToReady(args.Node.Status, originalStatus)
if structs.ShouldDrainNode(args.Node.Status) || transitionToReady {
evalIDs, evalIndex, err := n.createNodeEvals(args.Node.ID, index)
if err != nil {
2018-09-15 23:23:13 +00:00
n.logger.Error("eval creation failed", "error", err)
return err
}
reply.EvalIDs = evalIDs
reply.EvalCreateIndex = evalIndex
}
2015-07-06 20:34:32 +00:00
2015-08-23 00:37:50 +00:00
// Check if we need to setup a heartbeat
if !args.Node.TerminalStatus() {
ttl, err := n.srv.resetHeartbeatTimer(args.Node.ID)
2015-08-23 00:37:50 +00:00
if err != nil {
2018-09-15 23:23:13 +00:00
n.logger.Error("heartbeat reset failed", "error", err)
2015-08-23 00:37:50 +00:00
return err
}
reply.HeartbeatTTL = ttl
}
2015-07-06 20:34:32 +00:00
// Set the reply index
reply.Index = index
snap, err = n.srv.fsm.State().Snapshot()
if err != nil {
return err
}
n.srv.peerLock.RLock()
defer n.srv.peerLock.RUnlock()
if err := n.constructNodeServerInfoResponse(snap, reply); err != nil {
2018-09-15 23:23:13 +00:00
n.logger.Error("failed to populate NodeUpdateResponse", "error", err)
return err
}
return nil
}
// updateNodeUpdateResponse assumes the n.srv.peerLock is held for reading.
func (n *Node) constructNodeServerInfoResponse(snap *state.StateSnapshot, reply *structs.NodeUpdateResponse) error {
2017-02-03 00:07:15 +00:00
reply.LeaderRPCAddr = string(n.srv.raft.Leader())
// Reply with config information required for future RPC requests
reply.Servers = make([]*structs.NodeServerInfo, 0, len(n.srv.localPeers))
for _, v := range n.srv.localPeers {
reply.Servers = append(reply.Servers,
&structs.NodeServerInfo{
RPCAdvertiseAddr: v.RPCAddr.String(),
RPCMajorVersion: int32(v.MajorVersion),
RPCMinorVersion: int32(v.MinorVersion),
Datacenter: v.Datacenter,
})
}
// TODO(sean@): Use an indexed node count instead
//
// Snapshot is used only to iterate over all nodes to create a node
// count to send back to Nomad Clients in their heartbeat so Clients
// can estimate the size of the cluster.
2017-02-08 04:31:23 +00:00
ws := memdb.NewWatchSet()
iter, err := snap.Nodes(ws)
if err == nil {
for {
raw := iter.Next()
if raw == nil {
break
}
reply.NumNodes++
}
}
2015-07-06 20:34:32 +00:00
return nil
}
// Deregister is used to remove a client from the cluster. If a client should
2016-05-15 16:41:34 +00:00
// just be made unavailable for scheduling, a status update is preferred.
func (n *Node) Deregister(args *structs.NodeDeregisterRequest, reply *structs.NodeUpdateResponse) error {
if done, err := n.srv.forward("Node.Deregister", args, args, reply); done {
return err
}
defer metrics.MeasureSince([]string{"nomad", "client", "deregister"}, time.Now())
// Check node permissions
if aclObj, err := n.srv.ResolveToken(args.AuthToken); err != nil {
return err
} else if aclObj != nil && !aclObj.AllowNodeWrite() {
return structs.ErrPermissionDenied
}
// Verify the arguments
var nodeIDs []string
if len(args.NodeIDs) == 0 {
if args.NodeID == "" {
return fmt.Errorf("missing node IDs for client deregistration")
}
nodeIDs = append(nodeIDs, args.NodeID)
} else if args.NodeID != "" {
return fmt.Errorf("use only NodeIDs, the NodeID field is deprecated")
2019-06-07 18:26:29 +00:00
} else {
nodeIDs = args.NodeIDs
}
// Open state handles
2017-10-26 06:51:53 +00:00
snap, err := n.srv.fsm.State().Snapshot()
if err != nil {
return err
}
ws := memdb.NewWatchSet()
// Assert that the state contains the nodes
for _, nodeID := range nodeIDs {
node, err := snap.NodeByID(ws, nodeID)
if err != nil {
2019-06-05 16:45:42 +00:00
return fmt.Errorf("node lookup failed: %s: %v", nodeID, err)
}
if node == nil {
2019-06-05 16:45:42 +00:00
return fmt.Errorf("node not found: %s", nodeID)
}
}
// Commit this update to Raft, before we clear the heartbeatTimer so that failure
// leaves the node running
_, index, err := n.srv.raftApply(structs.NodeDeregisterRequestType, args)
if err != nil {
n.logger.Error("deregister failed", "error", err)
return err
}
for _, nodeID := range nodeIDs {
n.srv.clearHeartbeatTimer(nodeID)
2019-06-05 16:45:42 +00:00
// If there are any Vault accessors on the node, revoke them
accessors, err := snap.VaultAccessorsByNode(ws, nodeID)
if err != nil {
n.logger.Error("looking up accessors for node failed", "node_id", nodeID, "error", err)
return err
}
if l := len(accessors); l != 0 {
n.logger.Debug("revoking accessors on node due to deregister", "num_accessors", l, "node_id", nodeID)
if err := n.srv.vault.RevokeTokens(context.Background(), accessors, true); err != nil {
n.logger.Error("revoking accessors for node failed", "node_id", nodeID, "error", err)
return err
}
}
// Create the evaluations for these nodes
evalIDs, evalIndex, err := n.createNodeEvals(nodeID, index)
if err != nil {
n.logger.Error("eval creation failed", "error", err)
return err
}
2019-06-05 16:45:42 +00:00
reply.EvalIDs = append(reply.EvalIDs, evalIDs...)
2019-06-05 16:45:42 +00:00
// Set the reply evalIndex only the first time
if reply.EvalCreateIndex == 0 {
reply.EvalCreateIndex = evalIndex
}
}
// Setup the reply
reply.NodeModifyIndex = index
reply.Index = index
2015-06-07 19:14:41 +00:00
return nil
}
// UpdateStatus is used to update the status of a client node
func (n *Node) UpdateStatus(args *structs.NodeUpdateStatusRequest, reply *structs.NodeUpdateResponse) error {
if done, err := n.srv.forward("Node.UpdateStatus", args, args, reply); done {
// We have a valid node connection since there is no error from the
// forwarded server, so add the mapping to cache the
// connection and allow the server to send RPCs to the client.
if err == nil && n.ctx != nil && n.ctx.NodeID == "" {
n.ctx.NodeID = args.NodeID
n.srv.addNodeConn(n.ctx)
}
return err
}
defer metrics.MeasureSince([]string{"nomad", "client", "update_status"}, time.Now())
// Verify the arguments
if args.NodeID == "" {
return fmt.Errorf("missing node ID for client status update")
}
if !structs.ValidNodeStatus(args.Status) {
return fmt.Errorf("invalid status for node")
}
// Look for the node
snap, err := n.srv.fsm.State().Snapshot()
if err != nil {
return err
}
2017-02-08 04:31:23 +00:00
ws := memdb.NewWatchSet()
node, err := snap.NodeByID(ws, args.NodeID)
if err != nil {
return err
}
if node == nil {
return fmt.Errorf("node not found")
}
2018-01-05 21:50:04 +00:00
// We have a valid node connection, so add the mapping to cache the
// connection and allow the server to send RPCs to the client. We only cache
// the connection if it is not being forwarded from another server.
if n.ctx != nil && n.ctx.NodeID == "" && !args.IsForwarded() {
2018-01-05 21:50:04 +00:00
n.ctx.NodeID = args.NodeID
n.srv.addNodeConn(n.ctx)
}
// XXX: Could use the SecretID here but have to update the heartbeat system
// to track SecretIDs.
// Update the timestamp of when the node status was updated
args.UpdatedAt = time.Now().Unix()
// Commit this update via Raft
var index uint64
if node.Status != args.Status {
2018-05-12 00:26:25 +00:00
// Attach an event if we are updating the node status to ready when it
// is down via a heartbeat
if node.Status == structs.NodeStatusDown && args.NodeEvent == nil {
args.NodeEvent = structs.NewNodeEvent().
SetSubsystem(structs.NodeEventSubsystemCluster).
SetMessage(NodeHeartbeatEventReregistered)
}
_, index, err = n.srv.raftApply(structs.NodeUpdateStatusRequestType, args)
if err != nil {
2018-09-15 23:23:13 +00:00
n.logger.Error("status update failed", "error", err)
return err
}
reply.NodeModifyIndex = index
}
// Check if we should trigger evaluations
transitionToReady := transitionedToReady(args.Status, node.Status)
2015-10-22 00:58:54 +00:00
if structs.ShouldDrainNode(args.Status) || transitionToReady {
evalIDs, evalIndex, err := n.createNodeEvals(args.NodeID, index)
if err != nil {
2018-09-15 23:23:13 +00:00
n.logger.Error("eval creation failed", "error", err)
return err
}
reply.EvalIDs = evalIDs
reply.EvalCreateIndex = evalIndex
}
// Check if we need to setup a heartbeat
switch args.Status {
case structs.NodeStatusDown:
// Determine if there are any Vault accessors on the node
2017-02-08 04:31:23 +00:00
accessors, err := n.srv.State().VaultAccessorsByNode(ws, args.NodeID)
if err != nil {
2018-09-15 23:23:13 +00:00
n.logger.Error("looking up accessors for node failed", "node_id", args.NodeID, "error", err)
return err
}
2017-03-06 18:25:26 +00:00
if l := len(accessors); l != 0 {
2018-09-15 23:23:13 +00:00
n.logger.Debug("revoking accessors on node due to down state", "num_accessors", l, "node_id", args.NodeID)
if err := n.srv.vault.RevokeTokens(context.Background(), accessors, true); err != nil {
2018-09-15 23:23:13 +00:00
n.logger.Error("revoking accessors for node failed", "node_id", args.NodeID, "error", err)
return err
}
}
default:
ttl, err := n.srv.resetHeartbeatTimer(args.NodeID)
if err != nil {
2018-09-15 23:23:13 +00:00
n.logger.Error("heartbeat reset failed", "error", err)
return err
}
reply.HeartbeatTTL = ttl
}
// Set the reply index and leader
reply.Index = index
n.srv.peerLock.RLock()
defer n.srv.peerLock.RUnlock()
if err := n.constructNodeServerInfoResponse(snap, reply); err != nil {
2018-09-15 23:23:13 +00:00
n.logger.Error("failed to populate NodeUpdateResponse", "error", err)
return err
}
return nil
}
2015-07-06 21:23:15 +00:00
// transitionedToReady is a helper that takes a nodes new and old status and
2018-03-11 19:06:05 +00:00
// returns whether it has transitioned to ready.
func transitionedToReady(newStatus, oldStatus string) bool {
initToReady := oldStatus == structs.NodeStatusInit && newStatus == structs.NodeStatusReady
terminalToReady := oldStatus == structs.NodeStatusDown && newStatus == structs.NodeStatusReady
return initToReady || terminalToReady
}
2015-09-07 03:00:12 +00:00
// UpdateDrain is used to update the drain mode of a client node
func (n *Node) UpdateDrain(args *structs.NodeUpdateDrainRequest,
2015-09-07 03:00:12 +00:00
reply *structs.NodeDrainUpdateResponse) error {
if done, err := n.srv.forward("Node.UpdateDrain", args, args, reply); done {
2015-09-07 03:00:12 +00:00
return err
}
defer metrics.MeasureSince([]string{"nomad", "client", "update_drain"}, time.Now())
2017-09-15 03:33:31 +00:00
// Check node write permissions
2017-10-12 22:16:33 +00:00
if aclObj, err := n.srv.ResolveToken(args.AuthToken); err != nil {
2017-09-15 03:33:31 +00:00
return err
} else if aclObj != nil && !aclObj.AllowNodeWrite() {
return structs.ErrPermissionDenied
}
2015-09-07 03:00:12 +00:00
// Verify the arguments
if args.NodeID == "" {
return fmt.Errorf("missing node ID for drain update")
}
2018-05-10 23:30:54 +00:00
if args.NodeEvent != nil {
2018-05-22 21:01:43 +00:00
return fmt.Errorf("node event must not be set")
2018-05-10 23:30:54 +00:00
}
2015-09-07 03:00:12 +00:00
// Look for the node
snap, err := n.srv.fsm.State().Snapshot()
2015-09-07 03:00:12 +00:00
if err != nil {
return err
}
node, err := snap.NodeByID(nil, args.NodeID)
2015-09-07 03:00:12 +00:00
if err != nil {
return err
}
if node == nil {
return fmt.Errorf("node not found")
}
// Update the timestamp of when the node status was updated
args.UpdatedAt = time.Now().Unix()
2018-02-27 17:40:17 +00:00
// COMPAT: Remove in 0.9. Attempt to upgrade the request if it is of the old
// format.
if args.Drain && args.DrainStrategy == nil {
args.DrainStrategy = &structs.DrainStrategy{
DrainSpec: structs.DrainSpec{
Deadline: -1 * time.Second, // Force drain
},
}
}
2018-03-01 00:25:56 +00:00
// Mark the deadline time
if args.DrainStrategy != nil && args.DrainStrategy.Deadline.Nanoseconds() > 0 {
args.DrainStrategy.ForceDeadline = time.Now().Add(args.DrainStrategy.Deadline)
}
// Construct the node event
args.NodeEvent = structs.NewNodeEvent().SetSubsystem(structs.NodeEventSubsystemDrain)
if node.DrainStrategy == nil && args.DrainStrategy != nil {
args.NodeEvent.SetMessage(NodeDrainEventDrainSet)
} else if node.DrainStrategy != nil && args.DrainStrategy != nil {
args.NodeEvent.SetMessage(NodeDrainEventDrainUpdated)
} else if node.DrainStrategy != nil && args.DrainStrategy == nil {
args.NodeEvent.SetMessage(NodeDrainEventDrainDisabled)
} else {
args.NodeEvent = nil
}
2015-09-07 03:00:12 +00:00
// Commit this update via Raft
2018-02-23 18:42:43 +00:00
_, index, err := n.srv.raftApply(structs.NodeUpdateDrainRequestType, args)
2016-04-19 01:43:52 +00:00
if err != nil {
2018-09-15 23:23:13 +00:00
n.logger.Error("drain update failed", "error", err)
2016-04-19 01:43:52 +00:00
return err
2015-09-07 03:00:12 +00:00
}
2018-02-23 18:42:43 +00:00
reply.NodeModifyIndex = index
2015-09-07 03:00:12 +00:00
2018-04-10 22:02:52 +00:00
// If the node is transitioning to be eligible, create Node evaluations
// because there may be a System job registered that should be evaluated.
if node.SchedulingEligibility == structs.NodeSchedulingIneligible && args.MarkEligible && args.DrainStrategy == nil {
evalIDs, evalIndex, err := n.createNodeEvals(args.NodeID, index)
if err != nil {
2018-09-15 23:23:13 +00:00
n.logger.Error("eval creation failed", "error", err)
return err
}
reply.EvalIDs = evalIDs
reply.EvalCreateIndex = evalIndex
}
2015-09-07 03:00:12 +00:00
// Set the reply index
reply.Index = index
return nil
}
2018-02-27 00:34:42 +00:00
// UpdateEligibility is used to update the scheduling eligibility of a node
func (n *Node) UpdateEligibility(args *structs.NodeUpdateEligibilityRequest,
reply *structs.NodeEligibilityUpdateResponse) error {
2018-02-27 00:34:42 +00:00
if done, err := n.srv.forward("Node.UpdateEligibility", args, args, reply); done {
return err
}
defer metrics.MeasureSince([]string{"nomad", "client", "update_eligibility"}, time.Now())
// Check node write permissions
if aclObj, err := n.srv.ResolveToken(args.AuthToken); err != nil {
return err
} else if aclObj != nil && !aclObj.AllowNodeWrite() {
return structs.ErrPermissionDenied
}
// Verify the arguments
if args.NodeID == "" {
return fmt.Errorf("missing node ID for setting scheduling eligibility")
}
2018-05-11 21:32:34 +00:00
if args.NodeEvent != nil {
2018-05-22 21:02:44 +00:00
return fmt.Errorf("node event must not be set")
2018-05-11 21:32:34 +00:00
}
2018-02-27 00:34:42 +00:00
// Check that only allowed types are set
switch args.Eligibility {
case structs.NodeSchedulingEligible, structs.NodeSchedulingIneligible:
default:
return fmt.Errorf("invalid scheduling eligibility %q", args.Eligibility)
}
2018-02-27 00:34:42 +00:00
// Look for the node
snap, err := n.srv.fsm.State().Snapshot()
if err != nil {
return err
}
node, err := snap.NodeByID(nil, args.NodeID)
2018-02-27 00:34:42 +00:00
if err != nil {
return err
}
if node == nil {
return fmt.Errorf("node not found")
}
if node.DrainStrategy != nil && args.Eligibility == structs.NodeSchedulingEligible {
return fmt.Errorf("can not set node's scheduling eligibility to eligible while it is draining")
}
2018-02-27 20:59:27 +00:00
switch args.Eligibility {
case structs.NodeSchedulingEligible, structs.NodeSchedulingIneligible:
default:
return fmt.Errorf("invalid scheduling eligibility %q", args.Eligibility)
}
// Update the timestamp of when the node status was updated
args.UpdatedAt = time.Now().Unix()
2018-05-11 21:32:34 +00:00
// Construct the node event
args.NodeEvent = structs.NewNodeEvent().SetSubsystem(structs.NodeEventSubsystemCluster)
if node.SchedulingEligibility == args.Eligibility {
return nil // Nothing to do
} else if args.Eligibility == structs.NodeSchedulingEligible {
args.NodeEvent.SetMessage(NodeEligibilityEventEligible)
} else {
args.NodeEvent.SetMessage(NodeEligibilityEventIneligible)
}
2018-02-27 00:34:42 +00:00
// Commit this update via Raft
outErr, index, err := n.srv.raftApply(structs.NodeUpdateEligibilityRequestType, args)
if err != nil {
2018-09-15 23:23:13 +00:00
n.logger.Error("eligibility update failed", "error", err)
2018-02-27 00:34:42 +00:00
return err
}
if outErr != nil {
if err, ok := outErr.(error); ok && err != nil {
2018-09-15 23:23:13 +00:00
n.logger.Error("eligibility update failed", "error", err)
2018-02-27 00:34:42 +00:00
return err
}
}
2018-04-10 22:02:52 +00:00
// If the node is transitioning to be eligible, create Node evaluations
// because there may be a System job registered that should be evaluated.
if node.SchedulingEligibility == structs.NodeSchedulingIneligible && args.Eligibility == structs.NodeSchedulingEligible {
evalIDs, evalIndex, err := n.createNodeEvals(args.NodeID, index)
if err != nil {
2018-09-15 23:23:13 +00:00
n.logger.Error("eval creation failed", "error", err)
return err
}
reply.EvalIDs = evalIDs
reply.EvalCreateIndex = evalIndex
}
2018-02-27 00:34:42 +00:00
// Set the reply index
reply.Index = index
return nil
}
// Evaluate is used to force a re-evaluation of the node
func (n *Node) Evaluate(args *structs.NodeEvaluateRequest, reply *structs.NodeUpdateResponse) error {
if done, err := n.srv.forward("Node.Evaluate", args, args, reply); done {
return err
}
defer metrics.MeasureSince([]string{"nomad", "client", "evaluate"}, time.Now())
2017-09-15 03:41:44 +00:00
// Check node write permissions
2017-10-12 22:16:33 +00:00
if aclObj, err := n.srv.ResolveToken(args.AuthToken); err != nil {
2017-09-15 03:41:44 +00:00
return err
} else if aclObj != nil && !aclObj.AllowNodeWrite() {
return structs.ErrPermissionDenied
}
// Verify the arguments
if args.NodeID == "" {
return fmt.Errorf("missing node ID for evaluation")
}
// Look for the node
snap, err := n.srv.fsm.State().Snapshot()
if err != nil {
return err
}
2017-02-08 04:31:23 +00:00
ws := memdb.NewWatchSet()
node, err := snap.NodeByID(ws, args.NodeID)
if err != nil {
return err
}
if node == nil {
return fmt.Errorf("node not found")
}
// Create the evaluation
evalIDs, evalIndex, err := n.createNodeEvals(args.NodeID, node.ModifyIndex)
if err != nil {
2018-09-15 23:23:13 +00:00
n.logger.Error("eval creation failed", "error", err)
return err
}
reply.EvalIDs = evalIDs
reply.EvalCreateIndex = evalIndex
// Set the reply index
reply.Index = evalIndex
n.srv.peerLock.RLock()
defer n.srv.peerLock.RUnlock()
if err := n.constructNodeServerInfoResponse(snap, reply); err != nil {
2018-09-15 23:23:13 +00:00
n.logger.Error("failed to populate NodeUpdateResponse", "error", err)
return err
}
return nil
}
// GetNode is used to request information about a specific node
func (n *Node) GetNode(args *structs.NodeSpecificRequest,
2015-07-06 21:23:15 +00:00
reply *structs.SingleNodeResponse) error {
if done, err := n.srv.forward("Node.GetNode", args, args, reply); done {
2015-07-06 21:23:15 +00:00
return err
}
defer metrics.MeasureSince([]string{"nomad", "client", "get_node"}, time.Now())
2017-09-15 03:59:18 +00:00
// Check node read permissions
if aclObj, err := n.srv.ResolveToken(args.AuthToken); err != nil {
// If ResolveToken had an unexpected error return that
if err != structs.ErrTokenNotFound {
return err
}
// Attempt to lookup AuthToken as a Node.SecretID since nodes
// call this endpoint and don't have an ACL token.
node, stateErr := n.srv.fsm.State().NodeBySecretID(nil, args.AuthToken)
if stateErr != nil {
// Return the original ResolveToken error with this err
var merr multierror.Error
merr.Errors = append(merr.Errors, err, stateErr)
return merr.ErrorOrNil()
}
// Not a node or a valid ACL token
if node == nil {
return structs.ErrTokenNotFound
}
2017-09-15 03:59:18 +00:00
} else if aclObj != nil && !aclObj.AllowNodeRead() {
return structs.ErrPermissionDenied
}
// Setup the blocking query
opts := blockingOptions{
queryOpts: &args.QueryOptions,
queryMeta: &reply.QueryMeta,
2017-02-08 04:31:23 +00:00
run: func(ws memdb.WatchSet, state *state.StateStore) error {
// Verify the arguments
if args.NodeID == "" {
return fmt.Errorf("missing node ID")
}
// Look for the node
2017-02-08 04:31:23 +00:00
out, err := state.NodeByID(ws, args.NodeID)
if err != nil {
return err
}
// Setup the output
if out != nil {
// Clear the secret ID
reply.Node = out.Copy()
reply.Node.SecretID = ""
reply.Index = out.ModifyIndex
} else {
// Use the last index that affected the nodes table
2017-02-08 04:31:23 +00:00
index, err := state.Index("nodes")
if err != nil {
return err
}
reply.Node = nil
reply.Index = index
}
// Set the query response
n.srv.setQueryMeta(&reply.QueryMeta)
return nil
}}
return n.srv.blockingRPC(&opts)
2015-07-06 21:23:15 +00:00
}
// GetAllocs is used to request allocations for a specific node
func (n *Node) GetAllocs(args *structs.NodeSpecificRequest,
reply *structs.NodeAllocsResponse) error {
if done, err := n.srv.forward("Node.GetAllocs", args, args, reply); done {
return err
}
defer metrics.MeasureSince([]string{"nomad", "client", "get_allocs"}, time.Now())
2017-09-15 04:42:19 +00:00
// Check node read and namespace job read permissions
2017-10-13 00:12:41 +00:00
aclObj, err := n.srv.ResolveToken(args.AuthToken)
if err != nil {
2017-09-15 04:42:19 +00:00
return err
}
if aclObj != nil && !aclObj.AllowNodeRead() {
return structs.ErrPermissionDenied
}
// cache namespace perms
readableNamespaces := map[string]bool{}
// readNS is a caching namespace read-job helper
readNS := func(ns string) bool {
if aclObj == nil {
// ACLs are disabled; everything is readable
return true
2017-09-15 04:42:19 +00:00
}
if readable, ok := readableNamespaces[ns]; ok {
// cache hit
return readable
2017-09-15 04:42:19 +00:00
}
// cache miss
readable := aclObj.AllowNsOp(ns, acl.NamespaceCapabilityReadJob)
readableNamespaces[ns] = readable
return readable
2017-09-15 04:42:19 +00:00
}
// Verify the arguments
if args.NodeID == "" {
return fmt.Errorf("missing node ID")
}
// Setup the blocking query
opts := blockingOptions{
queryOpts: &args.QueryOptions,
queryMeta: &reply.QueryMeta,
2017-02-08 04:31:23 +00:00
run: func(ws memdb.WatchSet, state *state.StateStore) error {
// Look for the node
2017-02-08 04:31:23 +00:00
allocs, err := state.AllocsByNode(ws, args.NodeID)
if err != nil {
return err
}
// Setup the output
if n := len(allocs); n != 0 {
reply.Allocs = make([]*structs.Allocation, 0, n)
for _, alloc := range allocs {
if readNS(alloc.Namespace) {
reply.Allocs = append(reply.Allocs, alloc)
}
// Get the max of all allocs since
// subsequent requests need to start
// from the latest index
reply.Index = maxUint64(reply.Index, alloc.ModifyIndex)
}
} else {
reply.Allocs = nil
// Use the last index that affected the nodes table
2017-02-08 04:31:23 +00:00
index, err := state.Index("allocs")
if err != nil {
return err
}
// Must provide non-zero index to prevent blocking
// Index 1 is impossible anyways (due to Raft internals)
if index == 0 {
reply.Index = 1
} else {
reply.Index = index
}
}
return nil
}}
return n.srv.blockingRPC(&opts)
}
// GetClientAllocs is used to request a lightweight list of alloc modify indexes
// per allocation.
func (n *Node) GetClientAllocs(args *structs.NodeSpecificRequest,
reply *structs.NodeClientAllocsResponse) error {
if done, err := n.srv.forward("Node.GetClientAllocs", args, args, reply); done {
// We have a valid node connection since there is no error from the
// forwarded server, so add the mapping to cache the
// connection and allow the server to send RPCs to the client.
if err == nil && n.ctx != nil && n.ctx.NodeID == "" {
n.ctx.NodeID = args.NodeID
n.srv.addNodeConn(n.ctx)
}
return err
}
defer metrics.MeasureSince([]string{"nomad", "client", "get_client_allocs"}, time.Now())
// Verify the arguments
if args.NodeID == "" {
return fmt.Errorf("missing node ID")
}
// numOldAllocs is used to detect if there is a garbage collection event
// that effects the node. When an allocation is garbage collected, that does
// not change the modify index changes and thus the query won't unblock,
// even though the set of allocations on the node has changed.
var numOldAllocs int
// Setup the blocking query
opts := blockingOptions{
queryOpts: &args.QueryOptions,
queryMeta: &reply.QueryMeta,
2017-02-08 04:31:23 +00:00
run: func(ws memdb.WatchSet, state *state.StateStore) error {
// Look for the node
2017-02-08 04:31:23 +00:00
node, err := state.NodeByID(ws, args.NodeID)
if err != nil {
return err
}
var allocs []*structs.Allocation
if node != nil {
if args.SecretID == "" {
return fmt.Errorf("missing node secret ID for client status update")
} else if args.SecretID != node.SecretID {
return fmt.Errorf("node secret ID does not match")
}
2018-01-05 21:50:04 +00:00
// We have a valid node connection, so add the mapping to cache the
// connection and allow the server to send RPCs to the client. We only cache
// the connection if it is not being forwarded from another server.
if n.ctx != nil && n.ctx.NodeID == "" && !args.IsForwarded() {
2018-01-05 21:50:04 +00:00
n.ctx.NodeID = args.NodeID
n.srv.addNodeConn(n.ctx)
}
var err error
2017-02-08 04:31:23 +00:00
allocs, err = state.AllocsByNode(ws, args.NodeID)
if err != nil {
return err
}
}
reply.Allocs = make(map[string]uint64)
reply.MigrateTokens = make(map[string]string)
// preferTableIndex is used to determine whether we should build the
// response index based on the full table indexes versus the modify
2017-10-31 20:32:31 +00:00
// indexes of the allocations on the specific node. This is
// preferred in the case that the node doesn't yet have allocations
// or when we detect a GC that effects the node.
preferTableIndex := true
// Setup the output
if numAllocs := len(allocs); numAllocs != 0 {
preferTableIndex = false
for _, alloc := range allocs {
reply.Allocs[alloc.ID] = alloc.AllocModifyIndex
// If the allocation is going to do a migration, create a
// migration token so that the client can authenticate with
// the node hosting the previous allocation.
if alloc.ShouldMigrate() {
prevAllocation, err := state.AllocByID(ws, alloc.PreviousAllocation)
if err != nil {
return err
}
if prevAllocation != nil && prevAllocation.NodeID != alloc.NodeID {
allocNode, err := state.NodeByID(ws, prevAllocation.NodeID)
if err != nil {
return err
}
if allocNode == nil {
// Node must have been GC'd so skip the token
continue
}
2018-01-12 21:58:44 +00:00
token, err := structs.GenerateMigrateToken(prevAllocation.ID, allocNode.SecretID)
if err != nil {
return err
}
reply.MigrateTokens[alloc.ID] = token
}
}
reply.Index = maxUint64(reply.Index, alloc.ModifyIndex)
}
// Determine if we have less allocations than before. This
// indicates there was a garbage collection
if numAllocs < numOldAllocs {
preferTableIndex = true
}
// Store the new number of allocations
numOldAllocs = numAllocs
}
if preferTableIndex {
// Use the last index that affected the nodes table
2017-02-08 04:31:23 +00:00
index, err := state.Index("allocs")
if err != nil {
return err
}
// Must provide non-zero index to prevent blocking
// Index 1 is impossible anyways (due to Raft internals)
if index == 0 {
reply.Index = 1
} else {
reply.Index = index
}
}
return nil
}}
return n.srv.blockingRPC(&opts)
}
// UpdateAlloc is used to update the client status of an allocation
func (n *Node) UpdateAlloc(args *structs.AllocUpdateRequest, reply *structs.GenericResponse) error {
if done, err := n.srv.forward("Node.UpdateAlloc", args, args, reply); done {
return err
}
defer metrics.MeasureSince([]string{"nomad", "client", "update_alloc"}, time.Now())
2016-02-22 05:03:24 +00:00
// Ensure at least a single alloc
if len(args.Alloc) == 0 {
return fmt.Errorf("must update at least one allocation")
}
// Ensure that evals aren't set from client RPCs
// We create them here before the raft update
if len(args.Evals) != 0 {
return fmt.Errorf("evals field must not be set")
}
// Update modified timestamp for client initiated allocation updates
now := time.Now()
var evals []*structs.Evaluation
for _, alloc := range args.Alloc {
alloc.ModifyTime = now.UTC().UnixNano()
// Add an evaluation if this is a failed alloc that is eligible for rescheduling
if alloc.ClientStatus == structs.AllocClientStatusFailed {
// Only create evaluations if this is an existing alloc,
// and eligible as per its task group's ReschedulePolicy
if existingAlloc, _ := n.srv.State().AllocByID(nil, alloc.ID); existingAlloc != nil {
job, err := n.srv.State().JobByID(nil, existingAlloc.Namespace, existingAlloc.JobID)
if err != nil {
2018-09-15 23:23:13 +00:00
n.logger.Error("UpdateAlloc unable to find job", "job", existingAlloc.JobID, "error", err)
continue
}
if job == nil {
2018-09-15 23:23:13 +00:00
n.logger.Debug("UpdateAlloc unable to find job", "job", existingAlloc.JobID)
continue
}
taskGroup := job.LookupTaskGroup(existingAlloc.TaskGroup)
if taskGroup != nil && existingAlloc.FollowupEvalID == "" && existingAlloc.RescheduleEligible(taskGroup.ReschedulePolicy, now) {
eval := &structs.Evaluation{
ID: uuid.Generate(),
Namespace: existingAlloc.Namespace,
TriggeredBy: structs.EvalTriggerRetryFailedAlloc,
JobID: existingAlloc.JobID,
Type: job.Type,
Priority: job.Priority,
Status: structs.EvalStatusPending,
}
evals = append(evals, eval)
}
}
}
}
// Add this to the batch
n.updatesLock.Lock()
2016-02-22 02:51:34 +00:00
n.updates = append(n.updates, args.Alloc...)
n.evals = append(n.evals, evals...)
2016-02-22 02:51:34 +00:00
// Start a new batch if none
future := n.updateFuture
if future == nil {
2018-03-06 22:37:37 +00:00
future = structs.NewBatchFuture()
2016-02-22 02:51:34 +00:00
n.updateFuture = future
n.updateTimer = time.AfterFunc(batchUpdateInterval, func() {
// Get the pending updates
n.updatesLock.Lock()
updates := n.updates
evals := n.evals
2016-02-22 02:51:34 +00:00
future := n.updateFuture
n.updates = nil
n.evals = nil
2016-02-22 02:51:34 +00:00
n.updateFuture = nil
n.updateTimer = nil
n.updatesLock.Unlock()
// Perform the batch update
n.batchUpdate(future, updates, evals)
2016-02-22 02:51:34 +00:00
})
}
n.updatesLock.Unlock()
// Wait for the future
if err := future.Wait(); err != nil {
return err
}
// Setup the response
2016-02-22 02:51:34 +00:00
reply.Index = future.Index()
return nil
}
2016-02-22 02:51:34 +00:00
// batchUpdate is used to update all the allocations
2018-03-06 22:37:37 +00:00
func (n *Node) batchUpdate(future *structs.BatchFuture, updates []*structs.Allocation, evals []*structs.Evaluation) {
// Group pending evals by jobID to prevent creating unnecessary evals
evalsByJobId := make(map[structs.NamespacedID]struct{})
var trimmedEvals []*structs.Evaluation
for _, eval := range evals {
namespacedID := structs.NamespacedID{
ID: eval.JobID,
Namespace: eval.Namespace,
}
_, exists := evalsByJobId[namespacedID]
if !exists {
trimmedEvals = append(trimmedEvals, eval)
evalsByJobId[namespacedID] = struct{}{}
}
}
if len(trimmedEvals) > 0 {
2018-09-15 23:23:13 +00:00
n.logger.Debug("adding evaluations for rescheduling failed allocations", "num_evals", len(trimmedEvals))
}
2016-02-22 02:51:34 +00:00
// Prepare the batch update
batch := &structs.AllocUpdateRequest{
Alloc: updates,
Evals: trimmedEvals,
2016-02-22 02:51:34 +00:00
WriteRequest: structs.WriteRequest{Region: n.srv.config.Region},
}
// Commit this update via Raft
var mErr multierror.Error
2016-02-22 02:51:34 +00:00
_, index, err := n.srv.raftApply(structs.AllocClientUpdateRequestType, batch)
if err != nil {
2018-09-15 23:23:13 +00:00
n.logger.Error("alloc update failed", "error", err)
mErr.Errors = append(mErr.Errors, err)
}
2016-08-31 21:10:33 +00:00
// For each allocation we are updating check if we should revoke any
// Vault Accessors
var revoke []*structs.VaultAccessor
for _, alloc := range updates {
// Skip any allocation that isn't dead on the client
if !alloc.Terminated() {
continue
}
// Determine if there are any Vault accessors for the allocation
2017-02-08 04:31:23 +00:00
ws := memdb.NewWatchSet()
accessors, err := n.srv.State().VaultAccessorsByAlloc(ws, alloc.ID)
if err != nil {
2018-09-15 23:23:13 +00:00
n.logger.Error("looking up Vault accessors for alloc failed", "alloc_id", alloc.ID, "error", err)
mErr.Errors = append(mErr.Errors, err)
}
revoke = append(revoke, accessors...)
}
2017-03-06 18:25:26 +00:00
if l := len(revoke); l != 0 {
2018-09-15 23:23:13 +00:00
n.logger.Debug("revoking accessors due to terminal allocations", "num_accessors", l)
if err := n.srv.vault.RevokeTokens(context.Background(), revoke, true); err != nil {
2018-09-15 23:23:13 +00:00
n.logger.Error("batched Vault accessor revocation failed", "error", err)
mErr.Errors = append(mErr.Errors, err)
}
2016-02-22 02:51:34 +00:00
}
// Respond to the future
future.Respond(index, mErr.ErrorOrNil())
2016-02-22 02:51:34 +00:00
}
2015-09-06 21:28:29 +00:00
// List is used to list the available nodes
func (n *Node) List(args *structs.NodeListRequest,
2015-09-06 21:28:29 +00:00
reply *structs.NodeListResponse) error {
if done, err := n.srv.forward("Node.List", args, args, reply); done {
2015-09-06 21:28:29 +00:00
return err
}
defer metrics.MeasureSince([]string{"nomad", "client", "list"}, time.Now())
2017-09-15 05:01:18 +00:00
// Check node read permissions
2017-10-12 22:16:33 +00:00
if aclObj, err := n.srv.ResolveToken(args.AuthToken); err != nil {
2017-09-15 05:01:18 +00:00
return err
} else if aclObj != nil && !aclObj.AllowNodeRead() {
return structs.ErrPermissionDenied
}
// Setup the blocking query
opts := blockingOptions{
queryOpts: &args.QueryOptions,
queryMeta: &reply.QueryMeta,
2017-02-08 04:31:23 +00:00
run: func(ws memdb.WatchSet, state *state.StateStore) error {
// Capture all the nodes
2017-02-08 04:31:23 +00:00
var err error
var iter memdb.ResultIterator
if prefix := args.QueryOptions.Prefix; prefix != "" {
2017-02-08 04:31:23 +00:00
iter, err = state.NodesByIDPrefix(ws, prefix)
} else {
2017-02-08 04:31:23 +00:00
iter, err = state.Nodes(ws)
}
if err != nil {
return err
}
2015-09-06 21:28:29 +00:00
2015-10-28 19:29:06 +00:00
var nodes []*structs.NodeListStub
for {
raw := iter.Next()
if raw == nil {
break
}
node := raw.(*structs.Node)
2015-10-28 19:29:06 +00:00
nodes = append(nodes, node.Stub())
}
2015-10-28 19:29:06 +00:00
reply.Nodes = nodes
2015-09-06 21:28:29 +00:00
// Use the last index that affected the jobs table
2017-02-08 04:31:23 +00:00
index, err := state.Index("nodes")
if err != nil {
return err
}
reply.Index = index
// Set the query response
n.srv.setQueryMeta(&reply.QueryMeta)
return nil
}}
return n.srv.blockingRPC(&opts)
2015-09-06 21:28:29 +00:00
}
// createNodeEvals is used to create evaluations for each alloc on a node.
// Each Eval is scoped to a job, so we need to potentially trigger many evals.
func (n *Node) createNodeEvals(nodeID string, nodeIndex uint64) ([]string, uint64, error) {
// Snapshot the state
snap, err := n.srv.fsm.State().Snapshot()
if err != nil {
return nil, 0, fmt.Errorf("failed to snapshot state: %v", err)
}
// Find all the allocations for this node
2017-02-08 04:31:23 +00:00
ws := memdb.NewWatchSet()
allocs, err := snap.AllocsByNode(ws, nodeID)
if err != nil {
return nil, 0, fmt.Errorf("failed to find allocs for '%s': %v", nodeID, err)
}
2017-02-08 04:31:23 +00:00
sysJobsIter, err := snap.JobsByScheduler(ws, "system")
if err != nil {
return nil, 0, fmt.Errorf("failed to find system jobs for '%s': %v", nodeID, err)
}
var sysJobs []*structs.Job
for job := sysJobsIter.Next(); job != nil; job = sysJobsIter.Next() {
sysJobs = append(sysJobs, job.(*structs.Job))
}
// Fast-path if nothing to do
if len(allocs) == 0 && len(sysJobs) == 0 {
return nil, 0, nil
}
// Create an eval for each JobID affected
var evals []*structs.Evaluation
var evalIDs []string
jobIDs := make(map[string]struct{})
for _, alloc := range allocs {
// Deduplicate on JobID
if _, ok := jobIDs[alloc.JobID]; ok {
continue
}
jobIDs[alloc.JobID] = struct{}{}
// Create a new eval
eval := &structs.Evaluation{
ID: uuid.Generate(),
2017-09-07 23:56:15 +00:00
Namespace: alloc.Namespace,
Priority: alloc.Job.Priority,
Type: alloc.Job.Type,
TriggeredBy: structs.EvalTriggerNodeUpdate,
JobID: alloc.JobID,
NodeID: nodeID,
NodeModifyIndex: nodeIndex,
Status: structs.EvalStatusPending,
}
evals = append(evals, eval)
evalIDs = append(evalIDs, eval.ID)
}
// Create an evaluation for each system job.
2015-10-20 20:02:55 +00:00
for _, job := range sysJobs {
// Still dedup on JobID as the node may already have the system job.
2015-10-20 20:02:55 +00:00
if _, ok := jobIDs[job.ID]; ok {
continue
}
2015-10-20 20:02:55 +00:00
jobIDs[job.ID] = struct{}{}
// Create a new eval
eval := &structs.Evaluation{
ID: uuid.Generate(),
2017-09-07 23:56:15 +00:00
Namespace: job.Namespace,
2015-10-20 20:02:55 +00:00
Priority: job.Priority,
Type: job.Type,
TriggeredBy: structs.EvalTriggerNodeUpdate,
2015-10-20 20:02:55 +00:00
JobID: job.ID,
NodeID: nodeID,
NodeModifyIndex: nodeIndex,
Status: structs.EvalStatusPending,
}
evals = append(evals, eval)
evalIDs = append(evalIDs, eval.ID)
}
// Create the Raft transaction
update := &structs.EvalUpdateRequest{
Evals: evals,
WriteRequest: structs.WriteRequest{Region: n.srv.config.Region},
}
// Commit this evaluation via Raft
2015-08-16 01:03:05 +00:00
// XXX: There is a risk of partial failure where the node update succeeds
// but that the EvalUpdate does not.
_, evalIndex, err := n.srv.raftApply(structs.EvalUpdateRequestType, update)
if err != nil {
return nil, 0, err
}
return evalIDs, evalIndex, nil
}
2016-02-22 02:51:34 +00:00
2016-08-18 17:50:47 +00:00
// DeriveVaultToken is used by the clients to request wrapped Vault tokens for
// tasks
func (n *Node) DeriveVaultToken(args *structs.DeriveVaultTokenRequest,
reply *structs.DeriveVaultTokenResponse) error {
// setErr is a helper for setting the recoverable error on the reply and
// logging it
setErr := func(e error, recoverable bool) {
2017-02-01 21:37:19 +00:00
if e == nil {
return
}
2018-03-13 22:52:33 +00:00
re, ok := e.(*structs.RecoverableError)
if ok {
2018-03-13 22:52:33 +00:00
// No need to wrap if error is already a RecoverableError
reply.Error = re
} else {
reply.Error = structs.NewRecoverableError(e, recoverable).(*structs.RecoverableError)
}
2018-09-15 23:23:13 +00:00
n.logger.Error("DeriveVaultToken failed", "recoverable", recoverable, "error", e)
}
2016-08-18 17:50:47 +00:00
if done, err := n.srv.forward("Node.DeriveVaultToken", args, args, reply); done {
2017-02-05 21:14:24 +00:00
setErr(err, structs.IsRecoverable(err) || err == structs.ErrNoLeader)
return nil
2016-08-18 17:50:47 +00:00
}
defer metrics.MeasureSince([]string{"nomad", "client", "derive_vault_token"}, time.Now())
// Verify the arguments
if args.NodeID == "" {
setErr(fmt.Errorf("missing node ID"), false)
return nil
2016-08-18 17:50:47 +00:00
}
if args.SecretID == "" {
setErr(fmt.Errorf("missing node SecretID"), false)
return nil
2016-08-18 17:50:47 +00:00
}
if args.AllocID == "" {
setErr(fmt.Errorf("missing allocation ID"), false)
return nil
2016-08-18 17:50:47 +00:00
}
if len(args.Tasks) == 0 {
setErr(fmt.Errorf("no tasks specified"), false)
return nil
2016-08-18 17:50:47 +00:00
}
// Verify the following:
// * The Node exists and has the correct SecretID
// * The Allocation exists on the specified node
// * The allocation contains the given tasks and they each require Vault
// tokens
snap, err := n.srv.fsm.State().Snapshot()
if err != nil {
setErr(err, false)
return nil
2016-08-18 17:50:47 +00:00
}
2017-02-08 04:31:23 +00:00
ws := memdb.NewWatchSet()
node, err := snap.NodeByID(ws, args.NodeID)
2016-08-18 17:50:47 +00:00
if err != nil {
setErr(err, false)
return nil
2016-08-18 17:50:47 +00:00
}
if node == nil {
setErr(fmt.Errorf("Node %q does not exist", args.NodeID), false)
return nil
2016-08-18 17:50:47 +00:00
}
if node.SecretID != args.SecretID {
setErr(fmt.Errorf("SecretID mismatch"), false)
return nil
}
2016-08-18 17:50:47 +00:00
2017-02-08 04:31:23 +00:00
alloc, err := snap.AllocByID(ws, args.AllocID)
2016-08-18 17:50:47 +00:00
if err != nil {
setErr(err, false)
return nil
2016-08-18 17:50:47 +00:00
}
if alloc == nil {
setErr(fmt.Errorf("Allocation %q does not exist", args.AllocID), false)
return nil
2016-08-18 17:50:47 +00:00
}
if alloc.NodeID != args.NodeID {
setErr(fmt.Errorf("Allocation %q not running on Node %q", args.AllocID, args.NodeID), false)
return nil
2016-08-18 17:50:47 +00:00
}
2016-08-19 20:13:51 +00:00
if alloc.TerminalStatus() {
setErr(fmt.Errorf("Can't request Vault token for terminal allocation"), false)
return nil
2016-08-19 20:13:51 +00:00
}
2016-08-18 17:50:47 +00:00
// Check the policies
policies := alloc.Job.VaultPolicies()
if policies == nil {
setErr(fmt.Errorf("Job doesn't require Vault policies"), false)
return nil
2016-08-18 17:50:47 +00:00
}
tg, ok := policies[alloc.TaskGroup]
if !ok {
setErr(fmt.Errorf("Task group does not require Vault policies"), false)
return nil
2016-08-18 17:50:47 +00:00
}
var unneeded []string
for _, task := range args.Tasks {
taskVault := tg[task]
2016-08-19 20:13:51 +00:00
if taskVault == nil || len(taskVault.Policies) == 0 {
2016-08-18 17:50:47 +00:00
unneeded = append(unneeded, task)
}
}
if len(unneeded) != 0 {
e := fmt.Errorf("Requested Vault tokens for tasks without defined Vault policies: %s",
2016-08-18 17:50:47 +00:00
strings.Join(unneeded, ", "))
setErr(e, false)
return nil
2016-08-18 17:50:47 +00:00
}
2016-08-18 21:31:44 +00:00
// At this point the request is valid and we should contact Vault for
// tokens.
// Create an error group where we will spin up a fixed set of goroutines to
// handle deriving tokens but where if any fails the whole group is
// canceled.
g, ctx := errgroup.WithContext(context.Background())
// Cap the handlers
handlers := len(args.Tasks)
if handlers > maxParallelRequestsPerDerive {
handlers = maxParallelRequestsPerDerive
}
// Create the Vault Tokens
input := make(chan string, handlers)
results := make(map[string]*vapi.Secret, len(args.Tasks))
for i := 0; i < handlers; i++ {
g.Go(func() error {
2016-08-20 02:55:06 +00:00
for {
select {
case task, ok := <-input:
if !ok {
return nil
}
2016-08-18 21:31:44 +00:00
2016-08-20 02:55:06 +00:00
secret, err := n.srv.vault.CreateToken(ctx, alloc, task)
if err != nil {
return err
2016-08-20 02:55:06 +00:00
}
2016-08-18 21:31:44 +00:00
2016-08-20 02:55:06 +00:00
results[task] = secret
case <-ctx.Done():
return nil
}
}
2016-08-18 21:31:44 +00:00
})
}
// Send the input
go func() {
2016-08-20 02:55:06 +00:00
defer close(input)
2016-08-18 21:31:44 +00:00
for _, task := range args.Tasks {
select {
case <-ctx.Done():
return
case input <- task:
}
}
2016-08-20 02:55:06 +00:00
2016-08-18 21:31:44 +00:00
}()
// Wait for everything to complete or for an error
createErr := g.Wait()
// Retrieve the results
accessors := make([]*structs.VaultAccessor, 0, len(results))
tokens := make(map[string]string, len(results))
for task, secret := range results {
w := secret.WrapInfo
tokens[task] = w.Token
accessor := &structs.VaultAccessor{
Accessor: w.WrappedAccessor,
Task: task,
NodeID: alloc.NodeID,
AllocID: alloc.ID,
CreationTTL: w.TTL,
}
2016-08-18 17:50:47 +00:00
accessors = append(accessors, accessor)
}
// If there was an error revoke the created tokens
if createErr != nil {
2018-09-15 23:23:13 +00:00
n.logger.Error("Vault token creation for alloc failed", "alloc_id", alloc.ID, "error", createErr)
if revokeErr := n.srv.vault.RevokeTokens(context.Background(), accessors, false); revokeErr != nil {
2018-09-15 23:23:13 +00:00
n.logger.Error("Vault token revocation for alloc failed", "alloc_id", alloc.ID, "error", revokeErr)
}
if rerr, ok := createErr.(*structs.RecoverableError); ok {
reply.Error = rerr
2017-03-29 20:59:43 +00:00
} else {
2017-02-01 21:18:12 +00:00
reply.Error = structs.NewRecoverableError(createErr, false).(*structs.RecoverableError)
}
return nil
}
// Commit to Raft before returning any of the tokens
req := structs.VaultAccessorsRequest{Accessors: accessors}
_, index, err := n.srv.raftApply(structs.VaultAccessorRegisterRequestType, &req)
if err != nil {
2018-09-15 23:23:13 +00:00
n.logger.Error("registering Vault accessors for alloc failed", "alloc_id", alloc.ID, "error", err)
// Determine if we can recover from the error
retry := false
switch err {
case raft.ErrNotLeader, raft.ErrLeadershipLost, raft.ErrRaftShutdown, raft.ErrEnqueueTimeout:
retry = true
}
setErr(err, retry)
return nil
}
reply.Index = index
reply.Tasks = tokens
n.srv.setQueryMeta(&reply.QueryMeta)
2016-08-18 17:50:47 +00:00
return nil
}
2018-03-14 00:52:12 +00:00
func (n *Node) EmitEvents(args *structs.EmitNodeEventsRequest, reply *structs.EmitNodeEventsResponse) error {
if done, err := n.srv.forward("Node.EmitEvents", args, args, reply); done {
return err
}
defer metrics.MeasureSince([]string{"nomad", "client", "emit_events"}, time.Now())
if len(args.NodeEvents) == 0 {
return fmt.Errorf("no node events given")
}
for nodeID, events := range args.NodeEvents {
if len(events) == 0 {
return fmt.Errorf("no node events given for node %q", nodeID)
}
}
_, index, err := n.srv.raftApply(structs.UpsertNodeEventsType, args)
if err != nil {
2018-09-15 23:23:13 +00:00
n.logger.Error("upserting node events failed", "error", err)
2018-03-14 00:52:12 +00:00
return err
}
reply.Index = index
return nil
}