2015-06-07 19:14:41 +00:00
|
|
|
package nomad
|
|
|
|
|
|
|
|
import (
|
2016-08-18 21:31:44 +00:00
|
|
|
"context"
|
2015-06-07 19:14:41 +00:00
|
|
|
"fmt"
|
2016-08-16 06:11:57 +00:00
|
|
|
"strings"
|
2016-02-22 02:51:34 +00:00
|
|
|
"sync"
|
2015-06-07 19:14:41 +00:00
|
|
|
"time"
|
|
|
|
|
2016-08-18 21:31:44 +00:00
|
|
|
"golang.org/x/sync/errgroup"
|
|
|
|
|
2015-06-07 19:14:41 +00:00
|
|
|
"github.com/armon/go-metrics"
|
2015-12-22 22:44:33 +00:00
|
|
|
"github.com/hashicorp/go-memdb"
|
2016-08-22 20:57:27 +00:00
|
|
|
"github.com/hashicorp/go-multierror"
|
2017-09-15 04:42:19 +00:00
|
|
|
"github.com/hashicorp/nomad/acl"
|
2017-09-29 16:58:48 +00:00
|
|
|
"github.com/hashicorp/nomad/helper/uuid"
|
2016-06-01 10:47:19 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad/state"
|
2015-06-07 19:14:41 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad/structs"
|
2016-10-23 01:08:30 +00:00
|
|
|
"github.com/hashicorp/raft"
|
2016-08-18 21:31:44 +00:00
|
|
|
vapi "github.com/hashicorp/vault/api"
|
2015-06-07 19:14:41 +00:00
|
|
|
)
|
|
|
|
|
2016-02-22 02:51:34 +00:00
|
|
|
const (
|
|
|
|
// batchUpdateInterval is how long we wait to batch updates
|
|
|
|
batchUpdateInterval = 50 * time.Millisecond
|
2016-08-18 21:31:44 +00:00
|
|
|
|
|
|
|
// maxParallelRequestsPerDerive is the maximum number of parallel Vault
|
|
|
|
// create token requests that may be outstanding per derive request
|
|
|
|
maxParallelRequestsPerDerive = 16
|
2016-02-22 02:51:34 +00:00
|
|
|
)
|
|
|
|
|
2015-09-07 03:31:32 +00:00
|
|
|
// Node endpoint is used for client interactions
|
|
|
|
type Node struct {
|
2015-06-07 19:14:41 +00:00
|
|
|
srv *Server
|
2016-02-22 02:51:34 +00:00
|
|
|
|
|
|
|
// updates holds pending client status updates for allocations
|
|
|
|
updates []*structs.Allocation
|
|
|
|
|
|
|
|
// updateFuture is used to wait for the pending batch update
|
|
|
|
// to complete. This may be nil if no batch is pending.
|
|
|
|
updateFuture *batchFuture
|
|
|
|
|
|
|
|
// updateTimer is the timer that will trigger the next batch
|
|
|
|
// update, and may be nil if there is no batch pending.
|
|
|
|
updateTimer *time.Timer
|
|
|
|
|
|
|
|
// updatesLock synchronizes access to the updates list,
|
|
|
|
// the future and the timer.
|
|
|
|
updatesLock sync.Mutex
|
2015-06-07 19:14:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Register is used to upsert a client that is available for scheduling
|
2015-09-07 03:31:32 +00:00
|
|
|
func (n *Node) Register(args *structs.NodeRegisterRequest, reply *structs.NodeUpdateResponse) error {
|
|
|
|
if done, err := n.srv.forward("Node.Register", args, args, reply); done {
|
2015-06-07 19:14:41 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer metrics.MeasureSince([]string{"nomad", "client", "register"}, time.Now())
|
|
|
|
|
|
|
|
// Validate the arguments
|
2015-07-04 00:47:55 +00:00
|
|
|
if args.Node == nil {
|
|
|
|
return fmt.Errorf("missing node for client registration")
|
|
|
|
}
|
|
|
|
if args.Node.ID == "" {
|
|
|
|
return fmt.Errorf("missing node ID for client registration")
|
|
|
|
}
|
|
|
|
if args.Node.Datacenter == "" {
|
2015-06-07 19:14:41 +00:00
|
|
|
return fmt.Errorf("missing datacenter for client registration")
|
|
|
|
}
|
2015-07-04 00:47:55 +00:00
|
|
|
if args.Node.Name == "" {
|
2015-06-07 19:14:41 +00:00
|
|
|
return fmt.Errorf("missing node name for client registration")
|
|
|
|
}
|
2016-08-16 06:11:57 +00:00
|
|
|
if len(args.Node.Attributes) == 0 {
|
|
|
|
return fmt.Errorf("missing attributes for client registration")
|
|
|
|
}
|
2016-08-19 17:50:49 +00:00
|
|
|
|
|
|
|
// COMPAT: Remove after 0.6
|
|
|
|
// Need to check if this node is <0.4.x since SecretID is new in 0.5
|
|
|
|
pre, err := nodePreSecretID(args.Node)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if args.Node.SecretID == "" && !pre {
|
|
|
|
return fmt.Errorf("missing node secret ID for client registration")
|
2016-08-16 06:11:57 +00:00
|
|
|
}
|
2015-06-07 19:14:41 +00:00
|
|
|
|
|
|
|
// Default the status if none is given
|
2015-07-04 00:47:55 +00:00
|
|
|
if args.Node.Status == "" {
|
|
|
|
args.Node.Status = structs.NodeStatusInit
|
2015-06-07 19:14:41 +00:00
|
|
|
}
|
2015-08-06 23:39:20 +00:00
|
|
|
if !structs.ValidNodeStatus(args.Node.Status) {
|
|
|
|
return fmt.Errorf("invalid status for node")
|
|
|
|
}
|
2015-06-07 19:14:41 +00:00
|
|
|
|
2016-07-12 17:29:23 +00:00
|
|
|
// Set the timestamp when the node is registered
|
|
|
|
args.Node.StatusUpdatedAt = time.Now().Unix()
|
|
|
|
|
2016-01-21 01:30:02 +00:00
|
|
|
// Compute the node class
|
|
|
|
if err := args.Node.ComputeClass(); err != nil {
|
|
|
|
return fmt.Errorf("failed to computed node class: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-08-07 21:13:05 +00:00
|
|
|
// Look for the node so we can detect a state transition
|
2016-07-21 22:22:02 +00:00
|
|
|
snap, err := n.srv.fsm.State().Snapshot()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-02-08 04:31:23 +00:00
|
|
|
|
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
originalNode, err := snap.NodeByID(ws, args.Node.ID)
|
2016-07-21 22:22:02 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-08-19 17:50:49 +00:00
|
|
|
// Check if the SecretID has been tampered with
|
|
|
|
if !pre && originalNode != nil {
|
2016-10-27 05:05:44 +00:00
|
|
|
if args.Node.SecretID != originalNode.SecretID && originalNode.SecretID != "" {
|
2016-08-19 17:50:49 +00:00
|
|
|
return fmt.Errorf("node secret ID does not match. Not registering node.")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-07 19:14:41 +00:00
|
|
|
// Commit this update via Raft
|
2015-09-07 03:31:32 +00:00
|
|
|
_, index, err := n.srv.raftApply(structs.NodeRegisterRequestType, args)
|
2015-06-07 19:14:41 +00:00
|
|
|
if err != nil {
|
2015-09-07 03:31:32 +00:00
|
|
|
n.srv.logger.Printf("[ERR] nomad.client: Register failed: %v", err)
|
2015-06-07 19:14:41 +00:00
|
|
|
return err
|
|
|
|
}
|
2015-08-06 23:39:20 +00:00
|
|
|
reply.NodeModifyIndex = index
|
|
|
|
|
|
|
|
// Check if we should trigger evaluations
|
2016-07-21 22:22:02 +00:00
|
|
|
originalStatus := structs.NodeStatusInit
|
|
|
|
if originalNode != nil {
|
|
|
|
originalStatus = originalNode.Status
|
|
|
|
}
|
|
|
|
transitionToReady := transitionedToReady(args.Node.Status, originalStatus)
|
|
|
|
if structs.ShouldDrainNode(args.Node.Status) || transitionToReady {
|
2015-09-07 03:31:32 +00:00
|
|
|
evalIDs, evalIndex, err := n.createNodeEvals(args.Node.ID, index)
|
2015-08-06 23:39:20 +00:00
|
|
|
if err != nil {
|
2015-09-07 03:31:32 +00:00
|
|
|
n.srv.logger.Printf("[ERR] nomad.client: eval creation failed: %v", err)
|
2015-08-06 23:39:20 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
reply.EvalIDs = evalIDs
|
|
|
|
reply.EvalCreateIndex = evalIndex
|
|
|
|
}
|
2015-07-06 20:34:32 +00:00
|
|
|
|
2015-08-23 00:37:50 +00:00
|
|
|
// Check if we need to setup a heartbeat
|
|
|
|
if !args.Node.TerminalStatus() {
|
2015-09-07 03:31:32 +00:00
|
|
|
ttl, err := n.srv.resetHeartbeatTimer(args.Node.ID)
|
2015-08-23 00:37:50 +00:00
|
|
|
if err != nil {
|
2015-09-07 03:31:32 +00:00
|
|
|
n.srv.logger.Printf("[ERR] nomad.client: heartbeat reset failed: %v", err)
|
2015-08-23 00:37:50 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
reply.HeartbeatTTL = ttl
|
|
|
|
}
|
|
|
|
|
2015-07-06 20:34:32 +00:00
|
|
|
// Set the reply index
|
|
|
|
reply.Index = index
|
2016-07-21 22:22:02 +00:00
|
|
|
snap, err = n.srv.fsm.State().Snapshot()
|
2016-06-10 05:16:02 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-06-01 10:47:19 +00:00
|
|
|
|
|
|
|
n.srv.peerLock.RLock()
|
|
|
|
defer n.srv.peerLock.RUnlock()
|
2016-06-10 05:16:02 +00:00
|
|
|
if err := n.constructNodeServerInfoResponse(snap, reply); err != nil {
|
2016-06-01 10:47:19 +00:00
|
|
|
n.srv.logger.Printf("[ERR] nomad.client: failed to populate NodeUpdateResponse: %v", err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-08-16 06:11:57 +00:00
|
|
|
// nodePreSecretID is a helper that returns whether the node is on a version
|
|
|
|
// that is before SecretIDs were introduced
|
|
|
|
func nodePreSecretID(node *structs.Node) (bool, error) {
|
|
|
|
a := node.Attributes
|
|
|
|
if a == nil {
|
|
|
|
return false, fmt.Errorf("node doesn't have attributes set")
|
|
|
|
}
|
|
|
|
|
|
|
|
v, ok := a["nomad.version"]
|
|
|
|
if !ok {
|
|
|
|
return false, fmt.Errorf("missing Nomad version in attributes")
|
|
|
|
}
|
|
|
|
|
|
|
|
return !strings.HasPrefix(v, "0.5"), nil
|
|
|
|
}
|
|
|
|
|
2016-06-01 10:47:19 +00:00
|
|
|
// updateNodeUpdateResponse assumes the n.srv.peerLock is held for reading.
|
2016-06-10 05:07:21 +00:00
|
|
|
func (n *Node) constructNodeServerInfoResponse(snap *state.StateSnapshot, reply *structs.NodeUpdateResponse) error {
|
2017-02-03 00:07:15 +00:00
|
|
|
reply.LeaderRPCAddr = string(n.srv.raft.Leader())
|
2016-06-01 10:47:19 +00:00
|
|
|
|
|
|
|
// Reply with config information required for future RPC requests
|
|
|
|
reply.Servers = make([]*structs.NodeServerInfo, 0, len(n.srv.localPeers))
|
|
|
|
for k, v := range n.srv.localPeers {
|
|
|
|
reply.Servers = append(reply.Servers,
|
|
|
|
&structs.NodeServerInfo{
|
2017-02-03 00:07:15 +00:00
|
|
|
RPCAdvertiseAddr: string(k),
|
2016-06-11 03:26:15 +00:00
|
|
|
RPCMajorVersion: int32(v.MajorVersion),
|
|
|
|
RPCMinorVersion: int32(v.MinorVersion),
|
2016-06-01 10:47:19 +00:00
|
|
|
Datacenter: v.Datacenter,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2016-06-10 05:16:02 +00:00
|
|
|
// TODO(sean@): Use an indexed node count instead
|
|
|
|
//
|
|
|
|
// Snapshot is used only to iterate over all nodes to create a node
|
|
|
|
// count to send back to Nomad Clients in their heartbeat so Clients
|
|
|
|
// can estimate the size of the cluster.
|
2017-02-08 04:31:23 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
iter, err := snap.Nodes(ws)
|
2016-06-01 10:47:19 +00:00
|
|
|
if err == nil {
|
|
|
|
for {
|
|
|
|
raw := iter.Next()
|
|
|
|
if raw == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
reply.NumNodes++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-07-06 20:34:32 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-08-22 20:57:27 +00:00
|
|
|
// Deregister is used to remove a client from the cluster. If a client should
|
2016-05-15 16:41:34 +00:00
|
|
|
// just be made unavailable for scheduling, a status update is preferred.
|
2015-09-07 03:31:32 +00:00
|
|
|
func (n *Node) Deregister(args *structs.NodeDeregisterRequest, reply *structs.NodeUpdateResponse) error {
|
|
|
|
if done, err := n.srv.forward("Node.Deregister", args, args, reply); done {
|
2015-07-06 20:42:33 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer metrics.MeasureSince([]string{"nomad", "client", "deregister"}, time.Now())
|
|
|
|
|
|
|
|
// Verify the arguments
|
|
|
|
if args.NodeID == "" {
|
|
|
|
return fmt.Errorf("missing node ID for client deregistration")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Commit this update via Raft
|
2015-09-07 03:31:32 +00:00
|
|
|
_, index, err := n.srv.raftApply(structs.NodeDeregisterRequestType, args)
|
2015-07-06 20:42:33 +00:00
|
|
|
if err != nil {
|
2015-09-07 03:31:32 +00:00
|
|
|
n.srv.logger.Printf("[ERR] nomad.client: Deregister failed: %v", err)
|
2015-07-06 20:42:33 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2015-08-23 00:37:50 +00:00
|
|
|
// Clear the heartbeat timer if any
|
2015-09-07 03:31:32 +00:00
|
|
|
n.srv.clearHeartbeatTimer(args.NodeID)
|
2015-08-23 00:37:50 +00:00
|
|
|
|
2015-08-06 23:39:20 +00:00
|
|
|
// Create the evaluations for this node
|
2015-09-07 03:31:32 +00:00
|
|
|
evalIDs, evalIndex, err := n.createNodeEvals(args.NodeID, index)
|
2015-08-06 23:39:20 +00:00
|
|
|
if err != nil {
|
2015-09-07 03:31:32 +00:00
|
|
|
n.srv.logger.Printf("[ERR] nomad.client: eval creation failed: %v", err)
|
2015-08-06 23:39:20 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-08-22 20:57:27 +00:00
|
|
|
// Determine if there are any Vault accessors on the node
|
2017-02-08 04:31:23 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
accessors, err := n.srv.State().VaultAccessorsByNode(ws, args.NodeID)
|
2016-08-22 20:57:27 +00:00
|
|
|
if err != nil {
|
|
|
|
n.srv.logger.Printf("[ERR] nomad.client: looking up accessors for node %q failed: %v", args.NodeID, err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-03-06 18:25:26 +00:00
|
|
|
if l := len(accessors); l != 0 {
|
|
|
|
n.srv.logger.Printf("[DEBUG] nomad.client: revoking %d accessors on node %q due to deregister", l, args.NodeID)
|
2016-08-22 20:57:27 +00:00
|
|
|
if err := n.srv.vault.RevokeTokens(context.Background(), accessors, true); err != nil {
|
|
|
|
n.srv.logger.Printf("[ERR] nomad.client: revoking accessors for node %q failed: %v", args.NodeID, err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-06 23:39:20 +00:00
|
|
|
// Setup the reply
|
|
|
|
reply.EvalIDs = evalIDs
|
|
|
|
reply.EvalCreateIndex = evalIndex
|
|
|
|
reply.NodeModifyIndex = index
|
2015-07-06 20:42:33 +00:00
|
|
|
reply.Index = index
|
2015-06-07 19:14:41 +00:00
|
|
|
return nil
|
|
|
|
}
|
2015-07-06 20:50:40 +00:00
|
|
|
|
|
|
|
// UpdateStatus is used to update the status of a client node
|
2015-09-07 03:31:32 +00:00
|
|
|
func (n *Node) UpdateStatus(args *structs.NodeUpdateStatusRequest, reply *structs.NodeUpdateResponse) error {
|
|
|
|
if done, err := n.srv.forward("Node.UpdateStatus", args, args, reply); done {
|
2015-07-06 20:50:40 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer metrics.MeasureSince([]string{"nomad", "client", "update_status"}, time.Now())
|
|
|
|
|
|
|
|
// Verify the arguments
|
|
|
|
if args.NodeID == "" {
|
2016-08-16 06:11:57 +00:00
|
|
|
return fmt.Errorf("missing node ID for client status update")
|
2015-07-06 20:50:40 +00:00
|
|
|
}
|
2015-08-06 23:39:20 +00:00
|
|
|
if !structs.ValidNodeStatus(args.Status) {
|
2015-07-06 20:50:40 +00:00
|
|
|
return fmt.Errorf("invalid status for node")
|
|
|
|
}
|
|
|
|
|
2015-08-23 00:49:48 +00:00
|
|
|
// Look for the node
|
2015-09-07 03:31:32 +00:00
|
|
|
snap, err := n.srv.fsm.State().Snapshot()
|
2015-07-06 20:50:40 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-02-08 04:31:23 +00:00
|
|
|
|
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
node, err := snap.NodeByID(ws, args.NodeID)
|
2015-08-23 00:49:48 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if node == nil {
|
|
|
|
return fmt.Errorf("node not found")
|
|
|
|
}
|
|
|
|
|
2016-08-16 06:11:57 +00:00
|
|
|
// XXX: Could use the SecretID here but have to update the heartbeat system
|
|
|
|
// to track SecretIDs.
|
|
|
|
|
2016-07-12 17:29:23 +00:00
|
|
|
// Update the timestamp of when the node status was updated
|
|
|
|
node.StatusUpdatedAt = time.Now().Unix()
|
|
|
|
|
2015-08-23 00:49:48 +00:00
|
|
|
// Commit this update via Raft
|
|
|
|
var index uint64
|
|
|
|
if node.Status != args.Status {
|
2015-09-07 03:31:32 +00:00
|
|
|
_, index, err = n.srv.raftApply(structs.NodeUpdateStatusRequestType, args)
|
2015-08-23 00:49:48 +00:00
|
|
|
if err != nil {
|
2015-09-07 03:31:32 +00:00
|
|
|
n.srv.logger.Printf("[ERR] nomad.client: status update failed: %v", err)
|
2015-08-23 00:49:48 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
reply.NodeModifyIndex = index
|
|
|
|
}
|
2015-08-06 23:39:20 +00:00
|
|
|
|
|
|
|
// Check if we should trigger evaluations
|
2016-07-21 22:22:02 +00:00
|
|
|
transitionToReady := transitionedToReady(args.Status, node.Status)
|
2015-10-22 00:58:54 +00:00
|
|
|
if structs.ShouldDrainNode(args.Status) || transitionToReady {
|
2015-09-07 03:31:32 +00:00
|
|
|
evalIDs, evalIndex, err := n.createNodeEvals(args.NodeID, index)
|
2015-08-06 23:39:20 +00:00
|
|
|
if err != nil {
|
2015-09-07 03:31:32 +00:00
|
|
|
n.srv.logger.Printf("[ERR] nomad.client: eval creation failed: %v", err)
|
2015-08-06 23:39:20 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
reply.EvalIDs = evalIDs
|
|
|
|
reply.EvalCreateIndex = evalIndex
|
|
|
|
}
|
2015-07-06 20:50:40 +00:00
|
|
|
|
2015-08-23 00:49:48 +00:00
|
|
|
// Check if we need to setup a heartbeat
|
2016-08-22 20:57:27 +00:00
|
|
|
switch args.Status {
|
|
|
|
case structs.NodeStatusDown:
|
|
|
|
// Determine if there are any Vault accessors on the node
|
2017-02-08 04:31:23 +00:00
|
|
|
accessors, err := n.srv.State().VaultAccessorsByNode(ws, args.NodeID)
|
2016-08-22 20:57:27 +00:00
|
|
|
if err != nil {
|
|
|
|
n.srv.logger.Printf("[ERR] nomad.client: looking up accessors for node %q failed: %v", args.NodeID, err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-03-06 18:25:26 +00:00
|
|
|
if l := len(accessors); l != 0 {
|
|
|
|
n.srv.logger.Printf("[DEBUG] nomad.client: revoking %d accessors on node %q due to down state", l, args.NodeID)
|
2016-08-22 20:57:27 +00:00
|
|
|
if err := n.srv.vault.RevokeTokens(context.Background(), accessors, true); err != nil {
|
|
|
|
n.srv.logger.Printf("[ERR] nomad.client: revoking accessors for node %q failed: %v", args.NodeID, err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
default:
|
2015-09-07 03:31:32 +00:00
|
|
|
ttl, err := n.srv.resetHeartbeatTimer(args.NodeID)
|
2015-08-23 00:49:48 +00:00
|
|
|
if err != nil {
|
2015-09-07 03:31:32 +00:00
|
|
|
n.srv.logger.Printf("[ERR] nomad.client: heartbeat reset failed: %v", err)
|
2015-08-23 00:49:48 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
reply.HeartbeatTTL = ttl
|
|
|
|
}
|
|
|
|
|
2016-05-23 18:09:31 +00:00
|
|
|
// Set the reply index and leader
|
2016-06-01 10:47:19 +00:00
|
|
|
reply.Index = index
|
2016-05-23 18:09:31 +00:00
|
|
|
n.srv.peerLock.RLock()
|
|
|
|
defer n.srv.peerLock.RUnlock()
|
2016-06-10 05:07:21 +00:00
|
|
|
if err := n.constructNodeServerInfoResponse(snap, reply); err != nil {
|
2016-06-01 10:47:19 +00:00
|
|
|
n.srv.logger.Printf("[ERR] nomad.client: failed to populate NodeUpdateResponse: %v", err)
|
|
|
|
return err
|
2016-05-23 18:09:31 +00:00
|
|
|
}
|
|
|
|
|
2015-07-06 20:50:40 +00:00
|
|
|
return nil
|
|
|
|
}
|
2015-07-06 21:23:15 +00:00
|
|
|
|
2016-07-21 22:22:02 +00:00
|
|
|
// transitionedToReady is a helper that takes a nodes new and old status and
|
|
|
|
// returns whether it has transistioned to ready.
|
|
|
|
func transitionedToReady(newStatus, oldStatus string) bool {
|
|
|
|
initToReady := oldStatus == structs.NodeStatusInit && newStatus == structs.NodeStatusReady
|
|
|
|
terminalToReady := oldStatus == structs.NodeStatusDown && newStatus == structs.NodeStatusReady
|
|
|
|
return initToReady || terminalToReady
|
|
|
|
}
|
|
|
|
|
2015-09-07 03:00:12 +00:00
|
|
|
// UpdateDrain is used to update the drain mode of a client node
|
2015-09-07 03:31:32 +00:00
|
|
|
func (n *Node) UpdateDrain(args *structs.NodeUpdateDrainRequest,
|
2015-09-07 03:00:12 +00:00
|
|
|
reply *structs.NodeDrainUpdateResponse) error {
|
2015-09-07 03:31:32 +00:00
|
|
|
if done, err := n.srv.forward("Node.UpdateDrain", args, args, reply); done {
|
2015-09-07 03:00:12 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer metrics.MeasureSince([]string{"nomad", "client", "update_drain"}, time.Now())
|
|
|
|
|
2017-09-15 03:33:31 +00:00
|
|
|
// Check node write permissions
|
2017-10-09 22:49:04 +00:00
|
|
|
if aclObj, err := n.srv.ResolveToken(args.SecretID); err != nil {
|
2017-09-15 03:33:31 +00:00
|
|
|
return err
|
|
|
|
} else if aclObj != nil && !aclObj.AllowNodeWrite() {
|
|
|
|
return structs.ErrPermissionDenied
|
|
|
|
}
|
|
|
|
|
2015-09-07 03:00:12 +00:00
|
|
|
// Verify the arguments
|
|
|
|
if args.NodeID == "" {
|
|
|
|
return fmt.Errorf("missing node ID for drain update")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Look for the node
|
2015-09-07 03:31:32 +00:00
|
|
|
snap, err := n.srv.fsm.State().Snapshot()
|
2015-09-07 03:00:12 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-02-08 04:31:23 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
node, err := snap.NodeByID(ws, args.NodeID)
|
2015-09-07 03:00:12 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if node == nil {
|
|
|
|
return fmt.Errorf("node not found")
|
|
|
|
}
|
|
|
|
|
2016-07-12 17:29:23 +00:00
|
|
|
// Update the timestamp to
|
|
|
|
node.StatusUpdatedAt = time.Now().Unix()
|
|
|
|
|
2015-09-07 03:00:12 +00:00
|
|
|
// Commit this update via Raft
|
|
|
|
var index uint64
|
|
|
|
if node.Drain != args.Drain {
|
2015-09-07 03:31:32 +00:00
|
|
|
_, index, err = n.srv.raftApply(structs.NodeUpdateDrainRequestType, args)
|
2015-09-07 03:00:12 +00:00
|
|
|
if err != nil {
|
2015-09-07 03:31:32 +00:00
|
|
|
n.srv.logger.Printf("[ERR] nomad.client: drain update failed: %v", err)
|
2015-09-07 03:00:12 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
reply.NodeModifyIndex = index
|
|
|
|
}
|
|
|
|
|
2016-04-19 01:43:52 +00:00
|
|
|
// Always attempt to create Node evaluations because there may be a System
|
|
|
|
// job registered that should be evaluated.
|
|
|
|
evalIDs, evalIndex, err := n.createNodeEvals(args.NodeID, index)
|
|
|
|
if err != nil {
|
|
|
|
n.srv.logger.Printf("[ERR] nomad.client: eval creation failed: %v", err)
|
|
|
|
return err
|
2015-09-07 03:00:12 +00:00
|
|
|
}
|
2016-04-19 01:43:52 +00:00
|
|
|
reply.EvalIDs = evalIDs
|
|
|
|
reply.EvalCreateIndex = evalIndex
|
2015-09-07 03:00:12 +00:00
|
|
|
|
|
|
|
// Set the reply index
|
|
|
|
reply.Index = index
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-08-16 01:20:35 +00:00
|
|
|
// Evaluate is used to force a re-evaluation of the node
|
2015-09-07 03:31:32 +00:00
|
|
|
func (n *Node) Evaluate(args *structs.NodeEvaluateRequest, reply *structs.NodeUpdateResponse) error {
|
|
|
|
if done, err := n.srv.forward("Node.Evaluate", args, args, reply); done {
|
2015-08-16 01:20:35 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer metrics.MeasureSince([]string{"nomad", "client", "evaluate"}, time.Now())
|
|
|
|
|
2017-09-15 03:41:44 +00:00
|
|
|
// Check node write permissions
|
2017-10-09 22:49:04 +00:00
|
|
|
if aclObj, err := n.srv.ResolveToken(args.SecretID); err != nil {
|
2017-09-15 03:41:44 +00:00
|
|
|
return err
|
|
|
|
} else if aclObj != nil && !aclObj.AllowNodeWrite() {
|
|
|
|
return structs.ErrPermissionDenied
|
|
|
|
}
|
|
|
|
|
2015-08-16 01:20:35 +00:00
|
|
|
// Verify the arguments
|
|
|
|
if args.NodeID == "" {
|
|
|
|
return fmt.Errorf("missing node ID for evaluation")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Look for the node
|
2015-09-07 03:31:32 +00:00
|
|
|
snap, err := n.srv.fsm.State().Snapshot()
|
2015-08-16 01:20:35 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-02-08 04:31:23 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
node, err := snap.NodeByID(ws, args.NodeID)
|
2015-08-16 01:20:35 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if node == nil {
|
|
|
|
return fmt.Errorf("node not found")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create the evaluation
|
2015-09-07 03:31:32 +00:00
|
|
|
evalIDs, evalIndex, err := n.createNodeEvals(args.NodeID, node.ModifyIndex)
|
2015-08-16 01:20:35 +00:00
|
|
|
if err != nil {
|
2015-09-07 03:31:32 +00:00
|
|
|
n.srv.logger.Printf("[ERR] nomad.client: eval creation failed: %v", err)
|
2015-08-16 01:20:35 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
reply.EvalIDs = evalIDs
|
|
|
|
reply.EvalCreateIndex = evalIndex
|
|
|
|
|
|
|
|
// Set the reply index
|
|
|
|
reply.Index = evalIndex
|
2016-06-01 10:47:19 +00:00
|
|
|
|
|
|
|
n.srv.peerLock.RLock()
|
|
|
|
defer n.srv.peerLock.RUnlock()
|
2016-06-10 05:07:21 +00:00
|
|
|
if err := n.constructNodeServerInfoResponse(snap, reply); err != nil {
|
2016-06-01 10:47:19 +00:00
|
|
|
n.srv.logger.Printf("[ERR] nomad.client: failed to populate NodeUpdateResponse: %v", err)
|
|
|
|
return err
|
|
|
|
}
|
2015-08-16 01:20:35 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-10-20 17:57:53 +00:00
|
|
|
// GetNode is used to request information about a specific node
|
2015-09-07 03:31:32 +00:00
|
|
|
func (n *Node) GetNode(args *structs.NodeSpecificRequest,
|
2015-07-06 21:23:15 +00:00
|
|
|
reply *structs.SingleNodeResponse) error {
|
2015-09-07 03:31:32 +00:00
|
|
|
if done, err := n.srv.forward("Node.GetNode", args, args, reply); done {
|
2015-07-06 21:23:15 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer metrics.MeasureSince([]string{"nomad", "client", "get_node"}, time.Now())
|
|
|
|
|
2017-09-15 03:59:18 +00:00
|
|
|
// Check node read permissions
|
2017-10-09 22:49:04 +00:00
|
|
|
if aclObj, err := n.srv.ResolveToken(args.SecretID); err != nil {
|
2017-09-15 03:59:18 +00:00
|
|
|
return err
|
|
|
|
} else if aclObj != nil && !aclObj.AllowNodeRead() {
|
|
|
|
return structs.ErrPermissionDenied
|
|
|
|
}
|
|
|
|
|
2015-10-29 22:48:44 +00:00
|
|
|
// Setup the blocking query
|
|
|
|
opts := blockingOptions{
|
|
|
|
queryOpts: &args.QueryOptions,
|
|
|
|
queryMeta: &reply.QueryMeta,
|
2017-02-08 04:31:23 +00:00
|
|
|
run: func(ws memdb.WatchSet, state *state.StateStore) error {
|
2015-10-29 22:48:44 +00:00
|
|
|
// Verify the arguments
|
|
|
|
if args.NodeID == "" {
|
|
|
|
return fmt.Errorf("missing node ID")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Look for the node
|
2017-02-08 04:31:23 +00:00
|
|
|
out, err := state.NodeByID(ws, args.NodeID)
|
2015-10-29 22:48:44 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Setup the output
|
|
|
|
if out != nil {
|
2016-08-16 06:11:57 +00:00
|
|
|
// Clear the secret ID
|
|
|
|
reply.Node = out.Copy()
|
|
|
|
reply.Node.SecretID = ""
|
2015-10-29 22:48:44 +00:00
|
|
|
reply.Index = out.ModifyIndex
|
|
|
|
} else {
|
|
|
|
// Use the last index that affected the nodes table
|
2017-02-08 04:31:23 +00:00
|
|
|
index, err := state.Index("nodes")
|
2015-10-29 22:48:44 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-08-16 06:11:57 +00:00
|
|
|
reply.Node = nil
|
2015-10-29 22:48:44 +00:00
|
|
|
reply.Index = index
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set the query response
|
|
|
|
n.srv.setQueryMeta(&reply.QueryMeta)
|
|
|
|
return nil
|
|
|
|
}}
|
|
|
|
return n.srv.blockingRPC(&opts)
|
2015-07-06 21:23:15 +00:00
|
|
|
}
|
2015-08-06 23:39:20 +00:00
|
|
|
|
2015-10-20 17:57:53 +00:00
|
|
|
// GetAllocs is used to request allocations for a specific node
|
2015-09-07 03:31:32 +00:00
|
|
|
func (n *Node) GetAllocs(args *structs.NodeSpecificRequest,
|
2015-08-23 02:17:49 +00:00
|
|
|
reply *structs.NodeAllocsResponse) error {
|
2015-09-07 03:31:32 +00:00
|
|
|
if done, err := n.srv.forward("Node.GetAllocs", args, args, reply); done {
|
2015-08-23 02:17:49 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer metrics.MeasureSince([]string{"nomad", "client", "get_allocs"}, time.Now())
|
|
|
|
|
2017-09-15 04:42:19 +00:00
|
|
|
// Check node read and namespace job read permissions
|
2017-10-09 22:49:04 +00:00
|
|
|
aclObj, err := n.srv.ResolveToken(args.SecretID)
|
2017-09-15 21:27:11 +00:00
|
|
|
if err != nil {
|
2017-09-15 04:42:19 +00:00
|
|
|
return err
|
2017-09-15 21:27:11 +00:00
|
|
|
}
|
|
|
|
if aclObj != nil && !aclObj.AllowNodeRead() {
|
|
|
|
return structs.ErrPermissionDenied
|
|
|
|
}
|
|
|
|
|
|
|
|
// cache namespace perms
|
|
|
|
readableNamespaces := map[string]bool{}
|
|
|
|
|
|
|
|
// readNS is a caching namespace read-job helper
|
|
|
|
readNS := func(ns string) bool {
|
|
|
|
if aclObj == nil {
|
|
|
|
// ACLs are disabled; everything is readable
|
|
|
|
return true
|
2017-09-15 04:42:19 +00:00
|
|
|
}
|
2017-09-15 21:27:11 +00:00
|
|
|
|
|
|
|
if readable, ok := readableNamespaces[ns]; ok {
|
|
|
|
// cache hit
|
|
|
|
return readable
|
2017-09-15 04:42:19 +00:00
|
|
|
}
|
2017-09-15 21:27:11 +00:00
|
|
|
|
|
|
|
// cache miss
|
|
|
|
readable := aclObj.AllowNsOp(ns, acl.NamespaceCapabilityReadJob)
|
|
|
|
readableNamespaces[ns] = readable
|
|
|
|
return readable
|
2017-09-15 04:42:19 +00:00
|
|
|
}
|
|
|
|
|
2015-08-23 02:17:49 +00:00
|
|
|
// Verify the arguments
|
|
|
|
if args.NodeID == "" {
|
|
|
|
return fmt.Errorf("missing node ID")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Setup the blocking query
|
|
|
|
opts := blockingOptions{
|
2015-10-29 21:47:39 +00:00
|
|
|
queryOpts: &args.QueryOptions,
|
|
|
|
queryMeta: &reply.QueryMeta,
|
2017-02-08 04:31:23 +00:00
|
|
|
run: func(ws memdb.WatchSet, state *state.StateStore) error {
|
2015-08-23 02:17:49 +00:00
|
|
|
// Look for the node
|
2017-02-08 04:31:23 +00:00
|
|
|
allocs, err := state.AllocsByNode(ws, args.NodeID)
|
2015-08-23 02:17:49 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Setup the output
|
2017-09-15 21:27:11 +00:00
|
|
|
if n := len(allocs); n != 0 {
|
|
|
|
reply.Allocs = make([]*structs.Allocation, 0, n)
|
2015-08-23 02:17:49 +00:00
|
|
|
for _, alloc := range allocs {
|
2017-09-15 21:27:11 +00:00
|
|
|
if readNS(alloc.Namespace) {
|
|
|
|
reply.Allocs = append(reply.Allocs, alloc)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the max of all allocs since
|
|
|
|
// subsequent requests need to start
|
|
|
|
// from the latest index
|
2015-08-23 02:17:49 +00:00
|
|
|
reply.Index = maxUint64(reply.Index, alloc.ModifyIndex)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
reply.Allocs = nil
|
|
|
|
|
|
|
|
// Use the last index that affected the nodes table
|
2017-02-08 04:31:23 +00:00
|
|
|
index, err := state.Index("allocs")
|
2015-08-23 02:17:49 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Must provide non-zero index to prevent blocking
|
2016-01-29 14:29:52 +00:00
|
|
|
// Index 1 is impossible anyways (due to Raft internals)
|
|
|
|
if index == 0 {
|
|
|
|
reply.Index = 1
|
|
|
|
} else {
|
|
|
|
reply.Index = index
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}}
|
|
|
|
return n.srv.blockingRPC(&opts)
|
|
|
|
}
|
|
|
|
|
2016-02-01 21:57:35 +00:00
|
|
|
// GetClientAllocs is used to request a lightweight list of alloc modify indexes
|
2016-01-29 14:29:52 +00:00
|
|
|
// per allocation.
|
|
|
|
func (n *Node) GetClientAllocs(args *structs.NodeSpecificRequest,
|
|
|
|
reply *structs.NodeClientAllocsResponse) error {
|
|
|
|
if done, err := n.srv.forward("Node.GetClientAllocs", args, args, reply); done {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer metrics.MeasureSince([]string{"nomad", "client", "get_client_allocs"}, time.Now())
|
|
|
|
|
|
|
|
// Verify the arguments
|
|
|
|
if args.NodeID == "" {
|
|
|
|
return fmt.Errorf("missing node ID")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Setup the blocking query
|
|
|
|
opts := blockingOptions{
|
|
|
|
queryOpts: &args.QueryOptions,
|
|
|
|
queryMeta: &reply.QueryMeta,
|
2017-02-08 04:31:23 +00:00
|
|
|
run: func(ws memdb.WatchSet, state *state.StateStore) error {
|
2016-01-29 14:29:52 +00:00
|
|
|
// Look for the node
|
2017-02-08 04:31:23 +00:00
|
|
|
node, err := state.NodeByID(ws, args.NodeID)
|
2016-01-29 14:29:52 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-08-16 06:11:57 +00:00
|
|
|
var allocs []*structs.Allocation
|
|
|
|
if node != nil {
|
|
|
|
// COMPAT: Remove in 0.6
|
|
|
|
// Check if the node should have a SecretID set
|
|
|
|
if args.SecretID == "" {
|
|
|
|
if pre, err := nodePreSecretID(node); err != nil {
|
|
|
|
return err
|
|
|
|
} else if !pre {
|
|
|
|
return fmt.Errorf("missing node secret ID for client status update")
|
|
|
|
}
|
|
|
|
} else if args.SecretID != node.SecretID {
|
|
|
|
return fmt.Errorf("node secret ID does not match")
|
|
|
|
}
|
|
|
|
|
|
|
|
var err error
|
2017-02-08 04:31:23 +00:00
|
|
|
allocs, err = state.AllocsByNode(ws, args.NodeID)
|
2016-08-16 06:11:57 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-29 14:29:52 +00:00
|
|
|
reply.Allocs = make(map[string]uint64)
|
|
|
|
// Setup the output
|
|
|
|
if len(allocs) != 0 {
|
|
|
|
for _, alloc := range allocs {
|
2016-02-01 21:57:35 +00:00
|
|
|
reply.Allocs[alloc.ID] = alloc.AllocModifyIndex
|
2016-01-29 14:29:52 +00:00
|
|
|
reply.Index = maxUint64(reply.Index, alloc.ModifyIndex)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Use the last index that affected the nodes table
|
2017-02-08 04:31:23 +00:00
|
|
|
index, err := state.Index("allocs")
|
2016-01-29 14:29:52 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Must provide non-zero index to prevent blocking
|
2015-08-23 02:17:49 +00:00
|
|
|
// Index 1 is impossible anyways (due to Raft internals)
|
|
|
|
if index == 0 {
|
|
|
|
reply.Index = 1
|
|
|
|
} else {
|
|
|
|
reply.Index = index
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}}
|
2015-09-07 03:31:32 +00:00
|
|
|
return n.srv.blockingRPC(&opts)
|
2015-08-23 02:17:49 +00:00
|
|
|
}
|
|
|
|
|
2015-08-26 01:12:51 +00:00
|
|
|
// UpdateAlloc is used to update the client status of an allocation
|
2015-09-07 03:31:32 +00:00
|
|
|
func (n *Node) UpdateAlloc(args *structs.AllocUpdateRequest, reply *structs.GenericResponse) error {
|
|
|
|
if done, err := n.srv.forward("Node.UpdateAlloc", args, args, reply); done {
|
2015-08-26 01:12:51 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer metrics.MeasureSince([]string{"nomad", "client", "update_alloc"}, time.Now())
|
|
|
|
|
2016-02-22 05:03:24 +00:00
|
|
|
// Ensure at least a single alloc
|
2016-02-22 02:00:46 +00:00
|
|
|
if len(args.Alloc) == 0 {
|
|
|
|
return fmt.Errorf("must update at least one allocation")
|
2015-08-26 01:12:51 +00:00
|
|
|
}
|
|
|
|
|
2016-02-22 02:51:34 +00:00
|
|
|
// Add this to the batch
|
|
|
|
n.updatesLock.Lock()
|
|
|
|
n.updates = append(n.updates, args.Alloc...)
|
|
|
|
|
|
|
|
// Start a new batch if none
|
|
|
|
future := n.updateFuture
|
|
|
|
if future == nil {
|
|
|
|
future = NewBatchFuture()
|
|
|
|
n.updateFuture = future
|
|
|
|
n.updateTimer = time.AfterFunc(batchUpdateInterval, func() {
|
|
|
|
// Get the pending updates
|
|
|
|
n.updatesLock.Lock()
|
|
|
|
updates := n.updates
|
|
|
|
future := n.updateFuture
|
|
|
|
n.updates = nil
|
|
|
|
n.updateFuture = nil
|
|
|
|
n.updateTimer = nil
|
|
|
|
n.updatesLock.Unlock()
|
|
|
|
|
|
|
|
// Perform the batch update
|
|
|
|
n.batchUpdate(future, updates)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
n.updatesLock.Unlock()
|
|
|
|
|
|
|
|
// Wait for the future
|
|
|
|
if err := future.Wait(); err != nil {
|
2015-08-26 01:12:51 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Setup the response
|
2016-02-22 02:51:34 +00:00
|
|
|
reply.Index = future.Index()
|
2015-08-26 01:12:51 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-02-22 02:51:34 +00:00
|
|
|
// batchUpdate is used to update all the allocations
|
|
|
|
func (n *Node) batchUpdate(future *batchFuture, updates []*structs.Allocation) {
|
|
|
|
// Prepare the batch update
|
|
|
|
batch := &structs.AllocUpdateRequest{
|
|
|
|
Alloc: updates,
|
|
|
|
WriteRequest: structs.WriteRequest{Region: n.srv.config.Region},
|
|
|
|
}
|
|
|
|
|
|
|
|
// Commit this update via Raft
|
2016-08-22 20:57:27 +00:00
|
|
|
var mErr multierror.Error
|
2016-02-22 02:51:34 +00:00
|
|
|
_, index, err := n.srv.raftApply(structs.AllocClientUpdateRequestType, batch)
|
|
|
|
if err != nil {
|
|
|
|
n.srv.logger.Printf("[ERR] nomad.client: alloc update failed: %v", err)
|
2016-08-22 20:57:27 +00:00
|
|
|
mErr.Errors = append(mErr.Errors, err)
|
|
|
|
}
|
|
|
|
|
2016-08-31 21:10:33 +00:00
|
|
|
// For each allocation we are updating check if we should revoke any
|
|
|
|
// Vault Accessors
|
2016-08-22 20:57:27 +00:00
|
|
|
var revoke []*structs.VaultAccessor
|
|
|
|
for _, alloc := range updates {
|
|
|
|
// Skip any allocation that isn't dead on the client
|
|
|
|
if !alloc.Terminated() {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Determine if there are any Vault accessors for the allocation
|
2017-02-08 04:31:23 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
accessors, err := n.srv.State().VaultAccessorsByAlloc(ws, alloc.ID)
|
2016-08-22 20:57:27 +00:00
|
|
|
if err != nil {
|
|
|
|
n.srv.logger.Printf("[ERR] nomad.client: looking up accessors for alloc %q failed: %v", alloc.ID, err)
|
|
|
|
mErr.Errors = append(mErr.Errors, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
revoke = append(revoke, accessors...)
|
|
|
|
}
|
|
|
|
|
2017-03-06 18:25:26 +00:00
|
|
|
if l := len(revoke); l != 0 {
|
|
|
|
n.srv.logger.Printf("[DEBUG] nomad.client: revoking %d accessors due to terminal allocations", l)
|
2016-08-22 20:57:27 +00:00
|
|
|
if err := n.srv.vault.RevokeTokens(context.Background(), revoke, true); err != nil {
|
|
|
|
n.srv.logger.Printf("[ERR] nomad.client: batched accessor revocation failed: %v", err)
|
|
|
|
mErr.Errors = append(mErr.Errors, err)
|
|
|
|
}
|
2016-02-22 02:51:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Respond to the future
|
2016-08-22 20:57:27 +00:00
|
|
|
future.Respond(index, mErr.ErrorOrNil())
|
2016-02-22 02:51:34 +00:00
|
|
|
}
|
|
|
|
|
2015-09-06 21:28:29 +00:00
|
|
|
// List is used to list the available nodes
|
2015-09-07 03:31:32 +00:00
|
|
|
func (n *Node) List(args *structs.NodeListRequest,
|
2015-09-06 21:28:29 +00:00
|
|
|
reply *structs.NodeListResponse) error {
|
2015-09-07 03:31:32 +00:00
|
|
|
if done, err := n.srv.forward("Node.List", args, args, reply); done {
|
2015-09-06 21:28:29 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer metrics.MeasureSince([]string{"nomad", "client", "list"}, time.Now())
|
|
|
|
|
2017-09-15 05:01:18 +00:00
|
|
|
// Check node read permissions
|
2017-10-09 22:49:04 +00:00
|
|
|
if aclObj, err := n.srv.ResolveToken(args.SecretID); err != nil {
|
2017-09-15 05:01:18 +00:00
|
|
|
return err
|
|
|
|
} else if aclObj != nil && !aclObj.AllowNodeRead() {
|
|
|
|
return structs.ErrPermissionDenied
|
|
|
|
}
|
|
|
|
|
2015-10-28 18:21:39 +00:00
|
|
|
// Setup the blocking query
|
|
|
|
opts := blockingOptions{
|
2015-10-29 21:47:39 +00:00
|
|
|
queryOpts: &args.QueryOptions,
|
|
|
|
queryMeta: &reply.QueryMeta,
|
2017-02-08 04:31:23 +00:00
|
|
|
run: func(ws memdb.WatchSet, state *state.StateStore) error {
|
2015-10-28 18:21:39 +00:00
|
|
|
// Capture all the nodes
|
2017-02-08 04:31:23 +00:00
|
|
|
var err error
|
2015-12-22 22:44:33 +00:00
|
|
|
var iter memdb.ResultIterator
|
|
|
|
if prefix := args.QueryOptions.Prefix; prefix != "" {
|
2017-02-08 04:31:23 +00:00
|
|
|
iter, err = state.NodesByIDPrefix(ws, prefix)
|
2015-12-22 22:44:33 +00:00
|
|
|
} else {
|
2017-02-08 04:31:23 +00:00
|
|
|
iter, err = state.Nodes(ws)
|
2015-12-22 22:44:33 +00:00
|
|
|
}
|
2015-10-28 18:21:39 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-09-06 21:28:29 +00:00
|
|
|
|
2015-10-28 19:29:06 +00:00
|
|
|
var nodes []*structs.NodeListStub
|
2015-10-28 18:21:39 +00:00
|
|
|
for {
|
|
|
|
raw := iter.Next()
|
|
|
|
if raw == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
node := raw.(*structs.Node)
|
2015-10-28 19:29:06 +00:00
|
|
|
nodes = append(nodes, node.Stub())
|
2015-10-28 18:21:39 +00:00
|
|
|
}
|
2015-10-28 19:29:06 +00:00
|
|
|
reply.Nodes = nodes
|
2015-09-06 21:28:29 +00:00
|
|
|
|
2015-10-28 18:21:39 +00:00
|
|
|
// Use the last index that affected the jobs table
|
2017-02-08 04:31:23 +00:00
|
|
|
index, err := state.Index("nodes")
|
2015-10-28 18:21:39 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
reply.Index = index
|
|
|
|
|
|
|
|
// Set the query response
|
|
|
|
n.srv.setQueryMeta(&reply.QueryMeta)
|
|
|
|
return nil
|
|
|
|
}}
|
|
|
|
return n.srv.blockingRPC(&opts)
|
2015-09-06 21:28:29 +00:00
|
|
|
}
|
|
|
|
|
2015-08-06 23:39:20 +00:00
|
|
|
// createNodeEvals is used to create evaluations for each alloc on a node.
|
|
|
|
// Each Eval is scoped to a job, so we need to potentially trigger many evals.
|
2015-09-07 03:31:32 +00:00
|
|
|
func (n *Node) createNodeEvals(nodeID string, nodeIndex uint64) ([]string, uint64, error) {
|
2015-08-06 23:39:20 +00:00
|
|
|
// Snapshot the state
|
2015-09-07 03:31:32 +00:00
|
|
|
snap, err := n.srv.fsm.State().Snapshot()
|
2015-08-06 23:39:20 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, 0, fmt.Errorf("failed to snapshot state: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Find all the allocations for this node
|
2017-02-08 04:31:23 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
allocs, err := snap.AllocsByNode(ws, nodeID)
|
2015-08-06 23:39:20 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, 0, fmt.Errorf("failed to find allocs for '%s': %v", nodeID, err)
|
|
|
|
}
|
|
|
|
|
2017-02-08 04:31:23 +00:00
|
|
|
sysJobsIter, err := snap.JobsByScheduler(ws, "system")
|
2015-10-20 17:57:53 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, 0, fmt.Errorf("failed to find system jobs for '%s': %v", nodeID, err)
|
|
|
|
}
|
2015-10-21 00:11:57 +00:00
|
|
|
|
|
|
|
var sysJobs []*structs.Job
|
|
|
|
for job := sysJobsIter.Next(); job != nil; job = sysJobsIter.Next() {
|
|
|
|
sysJobs = append(sysJobs, job.(*structs.Job))
|
|
|
|
}
|
2015-10-20 17:57:53 +00:00
|
|
|
|
2015-08-06 23:39:20 +00:00
|
|
|
// Fast-path if nothing to do
|
2015-10-21 00:11:57 +00:00
|
|
|
if len(allocs) == 0 && len(sysJobs) == 0 {
|
2015-08-06 23:39:20 +00:00
|
|
|
return nil, 0, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create an eval for each JobID affected
|
|
|
|
var evals []*structs.Evaluation
|
|
|
|
var evalIDs []string
|
|
|
|
jobIDs := make(map[string]struct{})
|
|
|
|
|
|
|
|
for _, alloc := range allocs {
|
|
|
|
// Deduplicate on JobID
|
|
|
|
if _, ok := jobIDs[alloc.JobID]; ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
jobIDs[alloc.JobID] = struct{}{}
|
|
|
|
|
|
|
|
// Create a new eval
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: alloc.Namespace,
|
2015-08-06 23:39:20 +00:00
|
|
|
Priority: alloc.Job.Priority,
|
|
|
|
Type: alloc.Job.Type,
|
|
|
|
TriggeredBy: structs.EvalTriggerNodeUpdate,
|
|
|
|
JobID: alloc.JobID,
|
|
|
|
NodeID: nodeID,
|
|
|
|
NodeModifyIndex: nodeIndex,
|
|
|
|
Status: structs.EvalStatusPending,
|
|
|
|
}
|
|
|
|
evals = append(evals, eval)
|
|
|
|
evalIDs = append(evalIDs, eval.ID)
|
|
|
|
}
|
|
|
|
|
2015-10-20 17:57:53 +00:00
|
|
|
// Create an evaluation for each system job.
|
2015-10-20 20:02:55 +00:00
|
|
|
for _, job := range sysJobs {
|
2015-10-20 17:57:53 +00:00
|
|
|
// Still dedup on JobID as the node may already have the system job.
|
2015-10-20 20:02:55 +00:00
|
|
|
if _, ok := jobIDs[job.ID]; ok {
|
2015-10-20 17:57:53 +00:00
|
|
|
continue
|
|
|
|
}
|
2015-10-20 20:02:55 +00:00
|
|
|
jobIDs[job.ID] = struct{}{}
|
2015-10-20 17:57:53 +00:00
|
|
|
|
|
|
|
// Create a new eval
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: job.Namespace,
|
2015-10-20 20:02:55 +00:00
|
|
|
Priority: job.Priority,
|
|
|
|
Type: job.Type,
|
2015-10-20 17:57:53 +00:00
|
|
|
TriggeredBy: structs.EvalTriggerNodeUpdate,
|
2015-10-20 20:02:55 +00:00
|
|
|
JobID: job.ID,
|
2015-10-20 17:57:53 +00:00
|
|
|
NodeID: nodeID,
|
|
|
|
NodeModifyIndex: nodeIndex,
|
|
|
|
Status: structs.EvalStatusPending,
|
|
|
|
}
|
|
|
|
evals = append(evals, eval)
|
|
|
|
evalIDs = append(evalIDs, eval.ID)
|
|
|
|
}
|
|
|
|
|
2015-08-06 23:39:20 +00:00
|
|
|
// Create the Raft transaction
|
|
|
|
update := &structs.EvalUpdateRequest{
|
|
|
|
Evals: evals,
|
2015-09-07 03:31:32 +00:00
|
|
|
WriteRequest: structs.WriteRequest{Region: n.srv.config.Region},
|
2015-08-06 23:39:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Commit this evaluation via Raft
|
2015-08-16 01:03:05 +00:00
|
|
|
// XXX: There is a risk of partial failure where the node update succeeds
|
|
|
|
// but that the EvalUpdate does not.
|
2015-09-07 03:31:32 +00:00
|
|
|
_, evalIndex, err := n.srv.raftApply(structs.EvalUpdateRequestType, update)
|
2015-08-06 23:39:20 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, 0, err
|
|
|
|
}
|
|
|
|
return evalIDs, evalIndex, nil
|
|
|
|
}
|
2016-02-22 02:51:34 +00:00
|
|
|
|
|
|
|
// batchFuture is used to wait on a batch update to complete
|
|
|
|
type batchFuture struct {
|
|
|
|
doneCh chan struct{}
|
|
|
|
err error
|
|
|
|
index uint64
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewBatchFuture creates a new batch future
|
|
|
|
func NewBatchFuture() *batchFuture {
|
|
|
|
return &batchFuture{
|
|
|
|
doneCh: make(chan struct{}),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait is used to block for the future to complete and returns the error
|
|
|
|
func (b *batchFuture) Wait() error {
|
|
|
|
<-b.doneCh
|
|
|
|
return b.err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Index is used to return the index of the batch, only after Wait()
|
|
|
|
func (b *batchFuture) Index() uint64 {
|
|
|
|
return b.index
|
|
|
|
}
|
|
|
|
|
|
|
|
// Respond is used to unblock the future
|
|
|
|
func (b *batchFuture) Respond(index uint64, err error) {
|
|
|
|
b.index = index
|
|
|
|
b.err = err
|
|
|
|
close(b.doneCh)
|
|
|
|
}
|
2016-08-18 17:50:47 +00:00
|
|
|
|
|
|
|
// DeriveVaultToken is used by the clients to request wrapped Vault tokens for
|
|
|
|
// tasks
|
|
|
|
func (n *Node) DeriveVaultToken(args *structs.DeriveVaultTokenRequest,
|
|
|
|
reply *structs.DeriveVaultTokenResponse) error {
|
2016-10-28 22:50:35 +00:00
|
|
|
|
|
|
|
// setErr is a helper for setting the recoverable error on the reply and
|
|
|
|
// logging it
|
|
|
|
setErr := func(e error, recoverable bool) {
|
2017-02-01 21:37:19 +00:00
|
|
|
if e == nil {
|
|
|
|
return
|
|
|
|
}
|
2017-02-01 21:18:12 +00:00
|
|
|
reply.Error = structs.NewRecoverableError(e, recoverable).(*structs.RecoverableError)
|
2016-10-28 22:50:35 +00:00
|
|
|
n.srv.logger.Printf("[ERR] nomad.client: DeriveVaultToken failed (recoverable %v): %v", recoverable, e)
|
|
|
|
}
|
|
|
|
|
2016-08-18 17:50:47 +00:00
|
|
|
if done, err := n.srv.forward("Node.DeriveVaultToken", args, args, reply); done {
|
2017-02-05 21:14:24 +00:00
|
|
|
setErr(err, structs.IsRecoverable(err) || err == structs.ErrNoLeader)
|
2016-10-23 01:08:30 +00:00
|
|
|
return nil
|
2016-08-18 17:50:47 +00:00
|
|
|
}
|
|
|
|
defer metrics.MeasureSince([]string{"nomad", "client", "derive_vault_token"}, time.Now())
|
|
|
|
|
|
|
|
// Verify the arguments
|
|
|
|
if args.NodeID == "" {
|
2016-10-28 22:50:35 +00:00
|
|
|
setErr(fmt.Errorf("missing node ID"), false)
|
|
|
|
return nil
|
2016-08-18 17:50:47 +00:00
|
|
|
}
|
|
|
|
if args.SecretID == "" {
|
2016-10-28 22:50:35 +00:00
|
|
|
setErr(fmt.Errorf("missing node SecretID"), false)
|
2016-10-23 01:08:30 +00:00
|
|
|
return nil
|
2016-08-18 17:50:47 +00:00
|
|
|
}
|
|
|
|
if args.AllocID == "" {
|
2016-10-28 22:50:35 +00:00
|
|
|
setErr(fmt.Errorf("missing allocation ID"), false)
|
2016-10-23 01:08:30 +00:00
|
|
|
return nil
|
2016-08-18 17:50:47 +00:00
|
|
|
}
|
|
|
|
if len(args.Tasks) == 0 {
|
2016-10-28 22:50:35 +00:00
|
|
|
setErr(fmt.Errorf("no tasks specified"), false)
|
2016-10-23 01:08:30 +00:00
|
|
|
return nil
|
2016-08-18 17:50:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Verify the following:
|
|
|
|
// * The Node exists and has the correct SecretID
|
|
|
|
// * The Allocation exists on the specified node
|
|
|
|
// * The allocation contains the given tasks and they each require Vault
|
|
|
|
// tokens
|
|
|
|
snap, err := n.srv.fsm.State().Snapshot()
|
|
|
|
if err != nil {
|
2016-10-28 22:50:35 +00:00
|
|
|
setErr(err, false)
|
2016-10-23 01:08:30 +00:00
|
|
|
return nil
|
2016-08-18 17:50:47 +00:00
|
|
|
}
|
2017-02-08 04:31:23 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
node, err := snap.NodeByID(ws, args.NodeID)
|
2016-08-18 17:50:47 +00:00
|
|
|
if err != nil {
|
2016-10-28 22:50:35 +00:00
|
|
|
setErr(err, false)
|
2016-10-23 01:08:30 +00:00
|
|
|
return nil
|
2016-08-18 17:50:47 +00:00
|
|
|
}
|
|
|
|
if node == nil {
|
2016-10-28 22:50:35 +00:00
|
|
|
setErr(fmt.Errorf("Node %q does not exist", args.NodeID), false)
|
2016-10-23 01:08:30 +00:00
|
|
|
return nil
|
2016-08-18 17:50:47 +00:00
|
|
|
}
|
2016-08-19 01:57:33 +00:00
|
|
|
if node.SecretID != args.SecretID {
|
2016-10-28 22:50:35 +00:00
|
|
|
setErr(fmt.Errorf("SecretID mismatch"), false)
|
2016-10-23 01:08:30 +00:00
|
|
|
return nil
|
2016-08-19 01:57:33 +00:00
|
|
|
}
|
2016-08-18 17:50:47 +00:00
|
|
|
|
2017-02-08 04:31:23 +00:00
|
|
|
alloc, err := snap.AllocByID(ws, args.AllocID)
|
2016-08-18 17:50:47 +00:00
|
|
|
if err != nil {
|
2016-10-28 22:50:35 +00:00
|
|
|
setErr(err, false)
|
2016-10-23 01:08:30 +00:00
|
|
|
return nil
|
2016-08-18 17:50:47 +00:00
|
|
|
}
|
|
|
|
if alloc == nil {
|
2016-10-28 22:50:35 +00:00
|
|
|
setErr(fmt.Errorf("Allocation %q does not exist", args.AllocID), false)
|
2016-10-23 01:08:30 +00:00
|
|
|
return nil
|
2016-08-18 17:50:47 +00:00
|
|
|
}
|
|
|
|
if alloc.NodeID != args.NodeID {
|
2016-10-28 22:50:35 +00:00
|
|
|
setErr(fmt.Errorf("Allocation %q not running on Node %q", args.AllocID, args.NodeID), false)
|
2016-10-23 01:08:30 +00:00
|
|
|
return nil
|
2016-08-18 17:50:47 +00:00
|
|
|
}
|
2016-08-19 20:13:51 +00:00
|
|
|
if alloc.TerminalStatus() {
|
2016-10-28 22:50:35 +00:00
|
|
|
setErr(fmt.Errorf("Can't request Vault token for terminal allocation"), false)
|
2016-10-23 01:08:30 +00:00
|
|
|
return nil
|
2016-08-19 20:13:51 +00:00
|
|
|
}
|
2016-08-18 17:50:47 +00:00
|
|
|
|
|
|
|
// Check the policies
|
|
|
|
policies := alloc.Job.VaultPolicies()
|
|
|
|
if policies == nil {
|
2016-10-28 22:50:35 +00:00
|
|
|
setErr(fmt.Errorf("Job doesn't require Vault policies"), false)
|
2016-10-23 01:08:30 +00:00
|
|
|
return nil
|
2016-08-18 17:50:47 +00:00
|
|
|
}
|
|
|
|
tg, ok := policies[alloc.TaskGroup]
|
|
|
|
if !ok {
|
2016-10-28 22:50:35 +00:00
|
|
|
setErr(fmt.Errorf("Task group does not require Vault policies"), false)
|
2016-10-23 01:08:30 +00:00
|
|
|
return nil
|
2016-08-18 17:50:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var unneeded []string
|
|
|
|
for _, task := range args.Tasks {
|
|
|
|
taskVault := tg[task]
|
2016-08-19 20:13:51 +00:00
|
|
|
if taskVault == nil || len(taskVault.Policies) == 0 {
|
2016-08-18 17:50:47 +00:00
|
|
|
unneeded = append(unneeded, task)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(unneeded) != 0 {
|
2016-10-23 01:08:30 +00:00
|
|
|
e := fmt.Errorf("Requested Vault tokens for tasks without defined Vault policies: %s",
|
2016-08-18 17:50:47 +00:00
|
|
|
strings.Join(unneeded, ", "))
|
2016-10-28 22:50:35 +00:00
|
|
|
setErr(e, false)
|
2016-10-23 01:08:30 +00:00
|
|
|
return nil
|
2016-08-18 17:50:47 +00:00
|
|
|
}
|
|
|
|
|
2016-08-18 21:31:44 +00:00
|
|
|
// At this point the request is valid and we should contact Vault for
|
|
|
|
// tokens.
|
|
|
|
|
|
|
|
// Create an error group where we will spin up a fixed set of goroutines to
|
|
|
|
// handle deriving tokens but where if any fails the whole group is
|
|
|
|
// canceled.
|
|
|
|
g, ctx := errgroup.WithContext(context.Background())
|
|
|
|
|
|
|
|
// Cap the handlers
|
|
|
|
handlers := len(args.Tasks)
|
|
|
|
if handlers > maxParallelRequestsPerDerive {
|
|
|
|
handlers = maxParallelRequestsPerDerive
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create the Vault Tokens
|
|
|
|
input := make(chan string, handlers)
|
|
|
|
results := make(map[string]*vapi.Secret, len(args.Tasks))
|
|
|
|
for i := 0; i < handlers; i++ {
|
|
|
|
g.Go(func() error {
|
2016-08-20 02:55:06 +00:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case task, ok := <-input:
|
|
|
|
if !ok {
|
|
|
|
return nil
|
|
|
|
}
|
2016-08-18 21:31:44 +00:00
|
|
|
|
2016-08-20 02:55:06 +00:00
|
|
|
secret, err := n.srv.vault.CreateToken(ctx, alloc, task)
|
|
|
|
if err != nil {
|
2017-03-27 22:37:15 +00:00
|
|
|
wrapped := fmt.Sprintf("failed to create token for task %q on alloc %q: %v", task, alloc.ID, err)
|
|
|
|
return structs.WrapRecoverable(wrapped, err)
|
2016-08-20 02:55:06 +00:00
|
|
|
}
|
2016-08-18 21:31:44 +00:00
|
|
|
|
2016-08-20 02:55:06 +00:00
|
|
|
results[task] = secret
|
|
|
|
case <-ctx.Done():
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
2016-08-18 21:31:44 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// Send the input
|
|
|
|
go func() {
|
2016-08-20 02:55:06 +00:00
|
|
|
defer close(input)
|
2016-08-18 21:31:44 +00:00
|
|
|
for _, task := range args.Tasks {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return
|
|
|
|
case input <- task:
|
|
|
|
}
|
|
|
|
}
|
2016-08-20 02:55:06 +00:00
|
|
|
|
2016-08-18 21:31:44 +00:00
|
|
|
}()
|
|
|
|
|
|
|
|
// Wait for everything to complete or for an error
|
2016-10-23 01:08:30 +00:00
|
|
|
createErr := g.Wait()
|
2016-08-19 01:57:33 +00:00
|
|
|
|
2016-10-23 01:08:30 +00:00
|
|
|
// Retrieve the results
|
2016-08-19 01:57:33 +00:00
|
|
|
accessors := make([]*structs.VaultAccessor, 0, len(results))
|
|
|
|
tokens := make(map[string]string, len(results))
|
|
|
|
for task, secret := range results {
|
|
|
|
w := secret.WrapInfo
|
|
|
|
if w == nil {
|
|
|
|
return fmt.Errorf("Vault returned Secret without WrapInfo")
|
|
|
|
}
|
|
|
|
|
|
|
|
tokens[task] = w.Token
|
|
|
|
accessor := &structs.VaultAccessor{
|
|
|
|
Accessor: w.WrappedAccessor,
|
|
|
|
Task: task,
|
|
|
|
NodeID: alloc.NodeID,
|
|
|
|
AllocID: alloc.ID,
|
|
|
|
CreationTTL: w.TTL,
|
|
|
|
}
|
2016-08-18 17:50:47 +00:00
|
|
|
|
2016-08-19 01:57:33 +00:00
|
|
|
accessors = append(accessors, accessor)
|
|
|
|
}
|
|
|
|
|
2016-08-22 20:57:27 +00:00
|
|
|
// If there was an error revoke the created tokens
|
2016-10-23 01:08:30 +00:00
|
|
|
if createErr != nil {
|
2017-02-15 00:26:49 +00:00
|
|
|
n.srv.logger.Printf("[ERR] nomad.node: Vault token creation for alloc %q failed: %v", alloc.ID, createErr)
|
2016-10-28 22:50:35 +00:00
|
|
|
|
2016-10-23 01:08:30 +00:00
|
|
|
if revokeErr := n.srv.vault.RevokeTokens(context.Background(), accessors, false); revokeErr != nil {
|
2017-02-15 00:26:49 +00:00
|
|
|
n.srv.logger.Printf("[ERR] nomad.node: Vault token revocation for alloc %q failed: %v", alloc.ID, revokeErr)
|
2016-10-23 01:08:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if rerr, ok := createErr.(*structs.RecoverableError); ok {
|
|
|
|
reply.Error = rerr
|
2017-03-29 20:59:43 +00:00
|
|
|
} else {
|
2017-02-01 21:18:12 +00:00
|
|
|
reply.Error = structs.NewRecoverableError(createErr, false).(*structs.RecoverableError)
|
2016-08-22 20:57:27 +00:00
|
|
|
}
|
2016-10-23 01:08:30 +00:00
|
|
|
|
|
|
|
return nil
|
2016-08-22 20:57:27 +00:00
|
|
|
}
|
|
|
|
|
2016-10-23 01:08:30 +00:00
|
|
|
// Commit to Raft before returning any of the tokens
|
2016-08-22 20:57:27 +00:00
|
|
|
req := structs.VaultAccessorsRequest{Accessors: accessors}
|
2016-08-19 01:57:33 +00:00
|
|
|
_, index, err := n.srv.raftApply(structs.VaultAccessorRegisterRequestType, &req)
|
|
|
|
if err != nil {
|
2017-02-15 00:26:49 +00:00
|
|
|
n.srv.logger.Printf("[ERR] nomad.client: Register Vault accessors for alloc %q failed: %v", alloc.ID, err)
|
2016-10-23 01:08:30 +00:00
|
|
|
|
|
|
|
// Determine if we can recover from the error
|
|
|
|
retry := false
|
|
|
|
switch err {
|
|
|
|
case raft.ErrNotLeader, raft.ErrLeadershipLost, raft.ErrRaftShutdown, raft.ErrEnqueueTimeout:
|
|
|
|
retry = true
|
|
|
|
}
|
|
|
|
|
2016-10-28 22:50:35 +00:00
|
|
|
setErr(err, retry)
|
2016-10-23 01:08:30 +00:00
|
|
|
return nil
|
2016-08-19 01:57:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
reply.Index = index
|
|
|
|
reply.Tasks = tokens
|
|
|
|
n.srv.setQueryMeta(&reply.QueryMeta)
|
2016-08-18 17:50:47 +00:00
|
|
|
return nil
|
|
|
|
}
|