2015-06-07 19:14:41 +00:00
|
|
|
package nomad
|
|
|
|
|
|
|
|
import (
|
2016-08-18 21:31:44 +00:00
|
|
|
"context"
|
2015-06-07 19:14:41 +00:00
|
|
|
"fmt"
|
2016-08-16 06:11:57 +00:00
|
|
|
"strings"
|
2016-02-22 02:51:34 +00:00
|
|
|
"sync"
|
2015-06-07 19:14:41 +00:00
|
|
|
"time"
|
|
|
|
|
2016-08-18 21:31:44 +00:00
|
|
|
"golang.org/x/sync/errgroup"
|
|
|
|
|
2019-01-15 19:46:12 +00:00
|
|
|
metrics "github.com/armon/go-metrics"
|
2018-09-15 23:23:13 +00:00
|
|
|
log "github.com/hashicorp/go-hclog"
|
2019-01-15 19:46:12 +00:00
|
|
|
memdb "github.com/hashicorp/go-memdb"
|
|
|
|
multierror "github.com/hashicorp/go-multierror"
|
2018-09-15 23:23:13 +00:00
|
|
|
vapi "github.com/hashicorp/vault/api"
|
|
|
|
|
2017-09-15 04:42:19 +00:00
|
|
|
"github.com/hashicorp/nomad/acl"
|
2017-09-29 16:58:48 +00:00
|
|
|
"github.com/hashicorp/nomad/helper/uuid"
|
2016-06-01 10:47:19 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad/state"
|
2015-06-07 19:14:41 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad/structs"
|
2016-10-23 01:08:30 +00:00
|
|
|
"github.com/hashicorp/raft"
|
2019-12-06 20:46:46 +00:00
|
|
|
"github.com/pkg/errors"
|
2015-06-07 19:14:41 +00:00
|
|
|
)
|
|
|
|
|
2016-02-22 02:51:34 +00:00
|
|
|
const (
|
|
|
|
// batchUpdateInterval is how long we wait to batch updates
|
|
|
|
batchUpdateInterval = 50 * time.Millisecond
|
2016-08-18 21:31:44 +00:00
|
|
|
|
|
|
|
// maxParallelRequestsPerDerive is the maximum number of parallel Vault
|
|
|
|
// create token requests that may be outstanding per derive request
|
|
|
|
maxParallelRequestsPerDerive = 16
|
2018-05-10 23:54:43 +00:00
|
|
|
|
|
|
|
// NodeDrainEvents are the various drain messages
|
|
|
|
NodeDrainEventDrainSet = "Node drain strategy set"
|
|
|
|
NodeDrainEventDrainDisabled = "Node drain disabled"
|
|
|
|
NodeDrainEventDrainUpdated = "Node drain stategy updated"
|
2018-05-11 21:32:34 +00:00
|
|
|
|
|
|
|
// NodeEligibilityEventEligible is used when the nodes eligiblity is marked
|
|
|
|
// eligible
|
|
|
|
NodeEligibilityEventEligible = "Node marked as eligible for scheduling"
|
|
|
|
|
|
|
|
// NodeEligibilityEventIneligible is used when the nodes eligiblity is marked
|
|
|
|
// ineligible
|
|
|
|
NodeEligibilityEventIneligible = "Node marked as ineligible for scheduling"
|
2018-05-12 00:26:25 +00:00
|
|
|
|
|
|
|
// NodeHeartbeatEventReregistered is the message used when the node becomes
|
|
|
|
// reregistered by the heartbeat.
|
|
|
|
NodeHeartbeatEventReregistered = "Node reregistered by heartbeat"
|
2016-02-22 02:51:34 +00:00
|
|
|
)
|
|
|
|
|
2015-09-07 03:31:32 +00:00
|
|
|
// Node endpoint is used for client interactions
|
|
|
|
type Node struct {
|
2018-09-15 23:23:13 +00:00
|
|
|
srv *Server
|
|
|
|
logger log.Logger
|
2016-02-22 02:51:34 +00:00
|
|
|
|
2018-01-05 21:50:04 +00:00
|
|
|
// ctx provides context regarding the underlying connection
|
|
|
|
ctx *RPCContext
|
|
|
|
|
2016-02-22 02:51:34 +00:00
|
|
|
// updates holds pending client status updates for allocations
|
|
|
|
updates []*structs.Allocation
|
|
|
|
|
2018-04-09 19:05:31 +00:00
|
|
|
// evals holds pending rescheduling eval updates triggered by failed allocations
|
|
|
|
evals []*structs.Evaluation
|
|
|
|
|
2016-02-22 02:51:34 +00:00
|
|
|
// updateFuture is used to wait for the pending batch update
|
|
|
|
// to complete. This may be nil if no batch is pending.
|
2018-03-06 22:37:37 +00:00
|
|
|
updateFuture *structs.BatchFuture
|
2016-02-22 02:51:34 +00:00
|
|
|
|
|
|
|
// updateTimer is the timer that will trigger the next batch
|
|
|
|
// update, and may be nil if there is no batch pending.
|
|
|
|
updateTimer *time.Timer
|
|
|
|
|
|
|
|
// updatesLock synchronizes access to the updates list,
|
|
|
|
// the future and the timer.
|
|
|
|
updatesLock sync.Mutex
|
2015-06-07 19:14:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Register is used to upsert a client that is available for scheduling
|
2015-09-07 03:31:32 +00:00
|
|
|
func (n *Node) Register(args *structs.NodeRegisterRequest, reply *structs.NodeUpdateResponse) error {
|
|
|
|
if done, err := n.srv.forward("Node.Register", args, args, reply); done {
|
2018-03-27 01:10:43 +00:00
|
|
|
// We have a valid node connection since there is no error from the
|
|
|
|
// forwarded server, so add the mapping to cache the
|
|
|
|
// connection and allow the server to send RPCs to the client.
|
2020-03-17 20:14:11 +00:00
|
|
|
if err == nil && n.ctx != nil && n.ctx.NodeID == "" && !args.IsForwarded() {
|
2018-03-27 01:10:43 +00:00
|
|
|
n.ctx.NodeID = args.Node.ID
|
|
|
|
n.srv.addNodeConn(n.ctx)
|
|
|
|
}
|
|
|
|
|
2015-06-07 19:14:41 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer metrics.MeasureSince([]string{"nomad", "client", "register"}, time.Now())
|
|
|
|
|
|
|
|
// Validate the arguments
|
2015-07-04 00:47:55 +00:00
|
|
|
if args.Node == nil {
|
|
|
|
return fmt.Errorf("missing node for client registration")
|
|
|
|
}
|
|
|
|
if args.Node.ID == "" {
|
|
|
|
return fmt.Errorf("missing node ID for client registration")
|
|
|
|
}
|
|
|
|
if args.Node.Datacenter == "" {
|
2015-06-07 19:14:41 +00:00
|
|
|
return fmt.Errorf("missing datacenter for client registration")
|
|
|
|
}
|
2015-07-04 00:47:55 +00:00
|
|
|
if args.Node.Name == "" {
|
2015-06-07 19:14:41 +00:00
|
|
|
return fmt.Errorf("missing node name for client registration")
|
|
|
|
}
|
2016-08-16 06:11:57 +00:00
|
|
|
if len(args.Node.Attributes) == 0 {
|
|
|
|
return fmt.Errorf("missing attributes for client registration")
|
|
|
|
}
|
2017-10-13 18:28:47 +00:00
|
|
|
if args.Node.SecretID == "" {
|
2016-08-19 17:50:49 +00:00
|
|
|
return fmt.Errorf("missing node secret ID for client registration")
|
2016-08-16 06:11:57 +00:00
|
|
|
}
|
2015-06-07 19:14:41 +00:00
|
|
|
|
|
|
|
// Default the status if none is given
|
2015-07-04 00:47:55 +00:00
|
|
|
if args.Node.Status == "" {
|
|
|
|
args.Node.Status = structs.NodeStatusInit
|
2015-06-07 19:14:41 +00:00
|
|
|
}
|
2015-08-06 23:39:20 +00:00
|
|
|
if !structs.ValidNodeStatus(args.Node.Status) {
|
|
|
|
return fmt.Errorf("invalid status for node")
|
|
|
|
}
|
2015-06-07 19:14:41 +00:00
|
|
|
|
2018-01-24 00:47:00 +00:00
|
|
|
// Default to eligible for scheduling if unset
|
|
|
|
if args.Node.SchedulingEligibility == "" {
|
|
|
|
args.Node.SchedulingEligibility = structs.NodeSchedulingEligible
|
|
|
|
}
|
|
|
|
|
2016-07-12 17:29:23 +00:00
|
|
|
// Set the timestamp when the node is registered
|
|
|
|
args.Node.StatusUpdatedAt = time.Now().Unix()
|
|
|
|
|
2016-01-21 01:30:02 +00:00
|
|
|
// Compute the node class
|
|
|
|
if err := args.Node.ComputeClass(); err != nil {
|
|
|
|
return fmt.Errorf("failed to computed node class: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-08-07 21:13:05 +00:00
|
|
|
// Look for the node so we can detect a state transition
|
2016-07-21 22:22:02 +00:00
|
|
|
snap, err := n.srv.fsm.State().Snapshot()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-02-08 04:31:23 +00:00
|
|
|
|
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
originalNode, err := snap.NodeByID(ws, args.Node.ID)
|
2016-07-21 22:22:02 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-08-19 17:50:49 +00:00
|
|
|
// Check if the SecretID has been tampered with
|
2017-10-13 18:28:47 +00:00
|
|
|
if originalNode != nil {
|
2016-10-27 05:05:44 +00:00
|
|
|
if args.Node.SecretID != originalNode.SecretID && originalNode.SecretID != "" {
|
2016-08-19 17:50:49 +00:00
|
|
|
return fmt.Errorf("node secret ID does not match. Not registering node.")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-05 21:50:04 +00:00
|
|
|
// We have a valid node connection, so add the mapping to cache the
|
2018-03-27 01:10:43 +00:00
|
|
|
// connection and allow the server to send RPCs to the client. We only cache
|
|
|
|
// the connection if it is not being forwarded from another server.
|
|
|
|
if n.ctx != nil && n.ctx.NodeID == "" && !args.IsForwarded() {
|
2018-01-05 21:50:04 +00:00
|
|
|
n.ctx.NodeID = args.Node.ID
|
|
|
|
n.srv.addNodeConn(n.ctx)
|
|
|
|
}
|
|
|
|
|
2015-06-07 19:14:41 +00:00
|
|
|
// Commit this update via Raft
|
2015-09-07 03:31:32 +00:00
|
|
|
_, index, err := n.srv.raftApply(structs.NodeRegisterRequestType, args)
|
2015-06-07 19:14:41 +00:00
|
|
|
if err != nil {
|
2018-09-15 23:23:13 +00:00
|
|
|
n.logger.Error("register failed", "error", err)
|
2015-06-07 19:14:41 +00:00
|
|
|
return err
|
|
|
|
}
|
2015-08-06 23:39:20 +00:00
|
|
|
reply.NodeModifyIndex = index
|
|
|
|
|
|
|
|
// Check if we should trigger evaluations
|
2016-07-21 22:22:02 +00:00
|
|
|
originalStatus := structs.NodeStatusInit
|
|
|
|
if originalNode != nil {
|
|
|
|
originalStatus = originalNode.Status
|
|
|
|
}
|
|
|
|
transitionToReady := transitionedToReady(args.Node.Status, originalStatus)
|
|
|
|
if structs.ShouldDrainNode(args.Node.Status) || transitionToReady {
|
2015-09-07 03:31:32 +00:00
|
|
|
evalIDs, evalIndex, err := n.createNodeEvals(args.Node.ID, index)
|
2015-08-06 23:39:20 +00:00
|
|
|
if err != nil {
|
2018-09-15 23:23:13 +00:00
|
|
|
n.logger.Error("eval creation failed", "error", err)
|
2015-08-06 23:39:20 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
reply.EvalIDs = evalIDs
|
|
|
|
reply.EvalCreateIndex = evalIndex
|
|
|
|
}
|
2015-07-06 20:34:32 +00:00
|
|
|
|
2015-08-23 00:37:50 +00:00
|
|
|
// Check if we need to setup a heartbeat
|
|
|
|
if !args.Node.TerminalStatus() {
|
2015-09-07 03:31:32 +00:00
|
|
|
ttl, err := n.srv.resetHeartbeatTimer(args.Node.ID)
|
2015-08-23 00:37:50 +00:00
|
|
|
if err != nil {
|
2018-09-15 23:23:13 +00:00
|
|
|
n.logger.Error("heartbeat reset failed", "error", err)
|
2015-08-23 00:37:50 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
reply.HeartbeatTTL = ttl
|
|
|
|
}
|
|
|
|
|
2015-07-06 20:34:32 +00:00
|
|
|
// Set the reply index
|
|
|
|
reply.Index = index
|
2016-07-21 22:22:02 +00:00
|
|
|
snap, err = n.srv.fsm.State().Snapshot()
|
2016-06-10 05:16:02 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-06-01 10:47:19 +00:00
|
|
|
|
|
|
|
n.srv.peerLock.RLock()
|
|
|
|
defer n.srv.peerLock.RUnlock()
|
2016-06-10 05:16:02 +00:00
|
|
|
if err := n.constructNodeServerInfoResponse(snap, reply); err != nil {
|
2018-09-15 23:23:13 +00:00
|
|
|
n.logger.Error("failed to populate NodeUpdateResponse", "error", err)
|
2016-06-01 10:47:19 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// updateNodeUpdateResponse assumes the n.srv.peerLock is held for reading.
|
2016-06-10 05:07:21 +00:00
|
|
|
func (n *Node) constructNodeServerInfoResponse(snap *state.StateSnapshot, reply *structs.NodeUpdateResponse) error {
|
2017-02-03 00:07:15 +00:00
|
|
|
reply.LeaderRPCAddr = string(n.srv.raft.Leader())
|
2016-06-01 10:47:19 +00:00
|
|
|
|
|
|
|
// Reply with config information required for future RPC requests
|
|
|
|
reply.Servers = make([]*structs.NodeServerInfo, 0, len(n.srv.localPeers))
|
2018-03-16 22:53:14 +00:00
|
|
|
for _, v := range n.srv.localPeers {
|
2016-06-01 10:47:19 +00:00
|
|
|
reply.Servers = append(reply.Servers,
|
|
|
|
&structs.NodeServerInfo{
|
2018-03-16 22:53:14 +00:00
|
|
|
RPCAdvertiseAddr: v.RPCAddr.String(),
|
2016-06-11 03:26:15 +00:00
|
|
|
RPCMajorVersion: int32(v.MajorVersion),
|
|
|
|
RPCMinorVersion: int32(v.MinorVersion),
|
2016-06-01 10:47:19 +00:00
|
|
|
Datacenter: v.Datacenter,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2016-06-10 05:16:02 +00:00
|
|
|
// TODO(sean@): Use an indexed node count instead
|
|
|
|
//
|
|
|
|
// Snapshot is used only to iterate over all nodes to create a node
|
|
|
|
// count to send back to Nomad Clients in their heartbeat so Clients
|
|
|
|
// can estimate the size of the cluster.
|
2017-02-08 04:31:23 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
iter, err := snap.Nodes(ws)
|
2016-06-01 10:47:19 +00:00
|
|
|
if err == nil {
|
|
|
|
for {
|
|
|
|
raw := iter.Next()
|
|
|
|
if raw == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
reply.NumNodes++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-07-06 20:34:32 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-06-26 20:14:36 +00:00
|
|
|
// Deregister is used to remove a client from the cluster. If a client should
|
|
|
|
// just be made unavailable for scheduling, a status update is preferred.
|
2015-09-07 03:31:32 +00:00
|
|
|
func (n *Node) Deregister(args *structs.NodeDeregisterRequest, reply *structs.NodeUpdateResponse) error {
|
2019-06-26 20:14:36 +00:00
|
|
|
if done, err := n.srv.forward("Node.Deregister", args, args, reply); done {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer metrics.MeasureSince([]string{"nomad", "client", "deregister"}, time.Now())
|
|
|
|
|
|
|
|
if args.NodeID == "" {
|
|
|
|
return fmt.Errorf("missing node ID for client deregistration")
|
|
|
|
}
|
|
|
|
|
|
|
|
// deregister takes a batch
|
|
|
|
repack := &structs.NodeBatchDeregisterRequest{
|
2019-06-13 13:46:05 +00:00
|
|
|
NodeIDs: []string{args.NodeID},
|
|
|
|
WriteRequest: args.WriteRequest,
|
2019-06-26 20:14:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return n.deregister(repack, reply, func() (interface{}, uint64, error) {
|
|
|
|
return n.srv.raftApply(structs.NodeDeregisterRequestType, args)
|
|
|
|
})
|
2019-06-13 13:46:05 +00:00
|
|
|
}
|
|
|
|
|
2019-06-26 20:14:36 +00:00
|
|
|
// BatchDeregister is used to remove client nodes from the cluster.
|
|
|
|
func (n *Node) BatchDeregister(args *structs.NodeBatchDeregisterRequest, reply *structs.NodeUpdateResponse) error {
|
2019-06-26 14:57:58 +00:00
|
|
|
if done, err := n.srv.forward("Node.BatchDeregister", args, args, reply); done {
|
2015-07-06 20:42:33 +00:00
|
|
|
return err
|
|
|
|
}
|
2019-06-26 20:14:36 +00:00
|
|
|
defer metrics.MeasureSince([]string{"nomad", "client", "batch_deregister"}, time.Now())
|
2015-07-06 20:42:33 +00:00
|
|
|
|
2019-06-26 20:14:36 +00:00
|
|
|
if len(args.NodeIDs) == 0 {
|
|
|
|
return fmt.Errorf("missing node IDs for client deregistration")
|
|
|
|
}
|
|
|
|
|
|
|
|
return n.deregister(args, reply, func() (interface{}, uint64, error) {
|
|
|
|
return n.srv.raftApply(structs.NodeBatchDeregisterRequestType, args)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// deregister takes a raftMessage closure, to support both Deregister and BatchDeregister
|
|
|
|
func (n *Node) deregister(args *structs.NodeBatchDeregisterRequest,
|
|
|
|
reply *structs.NodeUpdateResponse,
|
|
|
|
raftApplyFn func() (interface{}, uint64, error),
|
|
|
|
) error {
|
|
|
|
// Check request permissions
|
2017-10-26 21:12:17 +00:00
|
|
|
if aclObj, err := n.srv.ResolveToken(args.AuthToken); err != nil {
|
|
|
|
return err
|
|
|
|
} else if aclObj != nil && !aclObj.AllowNodeWrite() {
|
|
|
|
return structs.ErrPermissionDenied
|
|
|
|
}
|
|
|
|
|
2019-06-26 20:14:36 +00:00
|
|
|
// Look for the node
|
2017-10-26 06:51:53 +00:00
|
|
|
snap, err := n.srv.fsm.State().Snapshot()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
ws := memdb.NewWatchSet()
|
2019-06-13 13:46:05 +00:00
|
|
|
for _, nodeID := range args.NodeIDs {
|
2019-06-05 14:49:57 +00:00
|
|
|
node, err := snap.NodeByID(ws, nodeID)
|
|
|
|
if err != nil {
|
2019-06-26 20:14:36 +00:00
|
|
|
return err
|
2019-06-05 14:49:57 +00:00
|
|
|
}
|
|
|
|
if node == nil {
|
2019-06-26 20:14:36 +00:00
|
|
|
return fmt.Errorf("node not found")
|
2019-06-05 14:49:57 +00:00
|
|
|
}
|
2019-06-07 15:25:55 +00:00
|
|
|
}
|
|
|
|
|
2019-06-26 20:14:36 +00:00
|
|
|
// Commit this update via Raft
|
|
|
|
_, index, err := raftApplyFn()
|
2019-06-07 15:25:55 +00:00
|
|
|
if err != nil {
|
2019-06-26 20:14:36 +00:00
|
|
|
n.logger.Error("raft message failed", "error", err)
|
2019-06-07 15:25:55 +00:00
|
|
|
return err
|
|
|
|
}
|
2019-06-05 14:49:57 +00:00
|
|
|
|
2019-06-13 13:46:05 +00:00
|
|
|
for _, nodeID := range args.NodeIDs {
|
2019-06-26 20:14:36 +00:00
|
|
|
// Clear the heartbeat timer if any
|
2019-06-05 14:49:57 +00:00
|
|
|
n.srv.clearHeartbeatTimer(nodeID)
|
|
|
|
|
2019-06-26 20:14:36 +00:00
|
|
|
// Create the evaluations for this node
|
|
|
|
evalIDs, evalIndex, err := n.createNodeEvals(nodeID, index)
|
|
|
|
if err != nil {
|
|
|
|
n.logger.Error("eval creation failed", "error", err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Determine if there are any Vault accessors on the node
|
2019-12-06 20:46:46 +00:00
|
|
|
if accessors, err := snap.VaultAccessorsByNode(ws, nodeID); err != nil {
|
|
|
|
n.logger.Error("looking up vault accessors for node failed", "node_id", nodeID, "error", err)
|
2019-06-05 14:49:57 +00:00
|
|
|
return err
|
2019-12-06 20:46:46 +00:00
|
|
|
} else if l := len(accessors); l > 0 {
|
|
|
|
n.logger.Debug("revoking vault accessors on node due to deregister", "num_accessors", l, "node_id", nodeID)
|
|
|
|
if err := n.srv.vault.RevokeTokens(context.Background(), accessors, true); err != nil {
|
|
|
|
n.logger.Error("revoking vault accessors for node failed", "node_id", nodeID, "error", err)
|
|
|
|
return err
|
|
|
|
}
|
2019-06-05 14:49:57 +00:00
|
|
|
}
|
|
|
|
|
2019-12-06 20:46:46 +00:00
|
|
|
// Determine if there are any SI token accessors on the node
|
|
|
|
if accessors, err := snap.SITokenAccessorsByNode(ws, nodeID); err != nil {
|
|
|
|
n.logger.Error("looking up si accessors for node failed", "node_id", nodeID, "error", err)
|
|
|
|
return err
|
|
|
|
} else if l := len(accessors); l > 0 {
|
|
|
|
n.logger.Debug("revoking si accessors on node due to deregister", "num_accessors", l, "node_id", nodeID)
|
2020-01-07 17:58:29 +00:00
|
|
|
// Unlike with the Vault integration, there's no error returned here, since
|
|
|
|
// bootstrapping the Consul client is elsewhere. Errors in revocation trigger
|
|
|
|
// background retry attempts rather than inline error handling.
|
2020-01-02 15:03:05 +00:00
|
|
|
_ = n.srv.consulACLs.RevokeTokens(context.Background(), accessors, true)
|
2019-06-05 14:49:57 +00:00
|
|
|
}
|
2015-07-06 20:42:33 +00:00
|
|
|
|
2019-06-05 14:49:57 +00:00
|
|
|
reply.EvalIDs = append(reply.EvalIDs, evalIDs...)
|
2019-06-26 20:14:36 +00:00
|
|
|
// Set the reply eval create index just the first time
|
2019-06-05 16:45:42 +00:00
|
|
|
if reply.EvalCreateIndex == 0 {
|
|
|
|
reply.EvalCreateIndex = evalIndex
|
|
|
|
}
|
2016-08-22 20:57:27 +00:00
|
|
|
}
|
|
|
|
|
2015-08-06 23:39:20 +00:00
|
|
|
reply.NodeModifyIndex = index
|
2015-07-06 20:42:33 +00:00
|
|
|
reply.Index = index
|
2015-06-07 19:14:41 +00:00
|
|
|
return nil
|
|
|
|
}
|
2015-07-06 20:50:40 +00:00
|
|
|
|
|
|
|
// UpdateStatus is used to update the status of a client node
|
2015-09-07 03:31:32 +00:00
|
|
|
func (n *Node) UpdateStatus(args *structs.NodeUpdateStatusRequest, reply *structs.NodeUpdateResponse) error {
|
|
|
|
if done, err := n.srv.forward("Node.UpdateStatus", args, args, reply); done {
|
2018-03-27 01:10:43 +00:00
|
|
|
// We have a valid node connection since there is no error from the
|
|
|
|
// forwarded server, so add the mapping to cache the
|
|
|
|
// connection and allow the server to send RPCs to the client.
|
2020-03-17 20:14:11 +00:00
|
|
|
if err == nil && n.ctx != nil && n.ctx.NodeID == "" && !args.IsForwarded() {
|
2018-03-27 01:10:43 +00:00
|
|
|
n.ctx.NodeID = args.NodeID
|
|
|
|
n.srv.addNodeConn(n.ctx)
|
|
|
|
}
|
|
|
|
|
2015-07-06 20:50:40 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer metrics.MeasureSince([]string{"nomad", "client", "update_status"}, time.Now())
|
|
|
|
|
|
|
|
// Verify the arguments
|
|
|
|
if args.NodeID == "" {
|
2016-08-16 06:11:57 +00:00
|
|
|
return fmt.Errorf("missing node ID for client status update")
|
2015-07-06 20:50:40 +00:00
|
|
|
}
|
2015-08-06 23:39:20 +00:00
|
|
|
if !structs.ValidNodeStatus(args.Status) {
|
2015-07-06 20:50:40 +00:00
|
|
|
return fmt.Errorf("invalid status for node")
|
|
|
|
}
|
|
|
|
|
2015-08-23 00:49:48 +00:00
|
|
|
// Look for the node
|
2015-09-07 03:31:32 +00:00
|
|
|
snap, err := n.srv.fsm.State().Snapshot()
|
2015-07-06 20:50:40 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-02-08 04:31:23 +00:00
|
|
|
|
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
node, err := snap.NodeByID(ws, args.NodeID)
|
2015-08-23 00:49:48 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if node == nil {
|
|
|
|
return fmt.Errorf("node not found")
|
|
|
|
}
|
|
|
|
|
2018-01-05 21:50:04 +00:00
|
|
|
// We have a valid node connection, so add the mapping to cache the
|
2018-03-27 01:10:43 +00:00
|
|
|
// connection and allow the server to send RPCs to the client. We only cache
|
|
|
|
// the connection if it is not being forwarded from another server.
|
|
|
|
if n.ctx != nil && n.ctx.NodeID == "" && !args.IsForwarded() {
|
2018-01-05 21:50:04 +00:00
|
|
|
n.ctx.NodeID = args.NodeID
|
|
|
|
n.srv.addNodeConn(n.ctx)
|
|
|
|
}
|
|
|
|
|
2016-08-16 06:11:57 +00:00
|
|
|
// XXX: Could use the SecretID here but have to update the heartbeat system
|
|
|
|
// to track SecretIDs.
|
|
|
|
|
2016-07-12 17:29:23 +00:00
|
|
|
// Update the timestamp of when the node status was updated
|
2019-05-21 19:45:00 +00:00
|
|
|
args.UpdatedAt = time.Now().Unix()
|
2016-07-12 17:29:23 +00:00
|
|
|
|
2015-08-23 00:49:48 +00:00
|
|
|
// Commit this update via Raft
|
|
|
|
var index uint64
|
|
|
|
if node.Status != args.Status {
|
2018-05-12 00:26:25 +00:00
|
|
|
// Attach an event if we are updating the node status to ready when it
|
|
|
|
// is down via a heartbeat
|
|
|
|
if node.Status == structs.NodeStatusDown && args.NodeEvent == nil {
|
|
|
|
args.NodeEvent = structs.NewNodeEvent().
|
|
|
|
SetSubsystem(structs.NodeEventSubsystemCluster).
|
|
|
|
SetMessage(NodeHeartbeatEventReregistered)
|
|
|
|
}
|
|
|
|
|
2015-09-07 03:31:32 +00:00
|
|
|
_, index, err = n.srv.raftApply(structs.NodeUpdateStatusRequestType, args)
|
2015-08-23 00:49:48 +00:00
|
|
|
if err != nil {
|
2018-09-15 23:23:13 +00:00
|
|
|
n.logger.Error("status update failed", "error", err)
|
2015-08-23 00:49:48 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
reply.NodeModifyIndex = index
|
|
|
|
}
|
2015-08-06 23:39:20 +00:00
|
|
|
|
|
|
|
// Check if we should trigger evaluations
|
2016-07-21 22:22:02 +00:00
|
|
|
transitionToReady := transitionedToReady(args.Status, node.Status)
|
2015-10-22 00:58:54 +00:00
|
|
|
if structs.ShouldDrainNode(args.Status) || transitionToReady {
|
2015-09-07 03:31:32 +00:00
|
|
|
evalIDs, evalIndex, err := n.createNodeEvals(args.NodeID, index)
|
2015-08-06 23:39:20 +00:00
|
|
|
if err != nil {
|
2018-09-15 23:23:13 +00:00
|
|
|
n.logger.Error("eval creation failed", "error", err)
|
2015-08-06 23:39:20 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
reply.EvalIDs = evalIDs
|
|
|
|
reply.EvalCreateIndex = evalIndex
|
|
|
|
}
|
2015-07-06 20:50:40 +00:00
|
|
|
|
2015-08-23 00:49:48 +00:00
|
|
|
// Check if we need to setup a heartbeat
|
2016-08-22 20:57:27 +00:00
|
|
|
switch args.Status {
|
|
|
|
case structs.NodeStatusDown:
|
2019-12-06 20:46:46 +00:00
|
|
|
// Determine if there are any Vault accessors on the node to cleanup
|
|
|
|
if accessors, err := n.srv.State().VaultAccessorsByNode(ws, args.NodeID); err != nil {
|
|
|
|
n.logger.Error("looking up vault accessors for node failed", "node_id", args.NodeID, "error", err)
|
2016-08-22 20:57:27 +00:00
|
|
|
return err
|
2019-12-06 20:46:46 +00:00
|
|
|
} else if l := len(accessors); l > 0 {
|
|
|
|
n.logger.Debug("revoking vault accessors on node due to down state", "num_accessors", l, "node_id", args.NodeID)
|
|
|
|
if err := n.srv.vault.RevokeTokens(context.Background(), accessors, true); err != nil {
|
|
|
|
n.logger.Error("revoking vault accessors for node failed", "node_id", args.NodeID, "error", err)
|
|
|
|
return err
|
|
|
|
}
|
2016-08-22 20:57:27 +00:00
|
|
|
}
|
|
|
|
|
2019-12-06 20:46:46 +00:00
|
|
|
// Determine if there are any SI token accessors on the node to cleanup
|
|
|
|
if accessors, err := n.srv.State().SITokenAccessorsByNode(ws, args.NodeID); err != nil {
|
2020-01-07 17:58:29 +00:00
|
|
|
n.logger.Error("looking up SI accessors for node failed", "node_id", args.NodeID, "error", err)
|
2019-12-06 20:46:46 +00:00
|
|
|
return err
|
|
|
|
} else if l := len(accessors); l > 0 {
|
2020-01-07 17:58:29 +00:00
|
|
|
n.logger.Debug("revoking SI accessors on node due to down state", "num_accessors", l, "node_id", args.NodeID)
|
2020-01-02 15:03:05 +00:00
|
|
|
_ = n.srv.consulACLs.RevokeTokens(context.Background(), accessors, true)
|
2016-08-22 20:57:27 +00:00
|
|
|
}
|
|
|
|
default:
|
2015-09-07 03:31:32 +00:00
|
|
|
ttl, err := n.srv.resetHeartbeatTimer(args.NodeID)
|
2015-08-23 00:49:48 +00:00
|
|
|
if err != nil {
|
2018-09-15 23:23:13 +00:00
|
|
|
n.logger.Error("heartbeat reset failed", "error", err)
|
2015-08-23 00:49:48 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
reply.HeartbeatTTL = ttl
|
|
|
|
}
|
|
|
|
|
2016-05-23 18:09:31 +00:00
|
|
|
// Set the reply index and leader
|
2016-06-01 10:47:19 +00:00
|
|
|
reply.Index = index
|
2016-05-23 18:09:31 +00:00
|
|
|
n.srv.peerLock.RLock()
|
|
|
|
defer n.srv.peerLock.RUnlock()
|
2016-06-10 05:07:21 +00:00
|
|
|
if err := n.constructNodeServerInfoResponse(snap, reply); err != nil {
|
2018-09-15 23:23:13 +00:00
|
|
|
n.logger.Error("failed to populate NodeUpdateResponse", "error", err)
|
2016-06-01 10:47:19 +00:00
|
|
|
return err
|
2016-05-23 18:09:31 +00:00
|
|
|
}
|
|
|
|
|
2015-07-06 20:50:40 +00:00
|
|
|
return nil
|
|
|
|
}
|
2015-07-06 21:23:15 +00:00
|
|
|
|
2016-07-21 22:22:02 +00:00
|
|
|
// transitionedToReady is a helper that takes a nodes new and old status and
|
2018-03-11 19:06:05 +00:00
|
|
|
// returns whether it has transitioned to ready.
|
2016-07-21 22:22:02 +00:00
|
|
|
func transitionedToReady(newStatus, oldStatus string) bool {
|
|
|
|
initToReady := oldStatus == structs.NodeStatusInit && newStatus == structs.NodeStatusReady
|
|
|
|
terminalToReady := oldStatus == structs.NodeStatusDown && newStatus == structs.NodeStatusReady
|
|
|
|
return initToReady || terminalToReady
|
|
|
|
}
|
|
|
|
|
2015-09-07 03:00:12 +00:00
|
|
|
// UpdateDrain is used to update the drain mode of a client node
|
2015-09-07 03:31:32 +00:00
|
|
|
func (n *Node) UpdateDrain(args *structs.NodeUpdateDrainRequest,
|
2015-09-07 03:00:12 +00:00
|
|
|
reply *structs.NodeDrainUpdateResponse) error {
|
2015-09-07 03:31:32 +00:00
|
|
|
if done, err := n.srv.forward("Node.UpdateDrain", args, args, reply); done {
|
2015-09-07 03:00:12 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer metrics.MeasureSince([]string{"nomad", "client", "update_drain"}, time.Now())
|
|
|
|
|
2017-09-15 03:33:31 +00:00
|
|
|
// Check node write permissions
|
2017-10-12 22:16:33 +00:00
|
|
|
if aclObj, err := n.srv.ResolveToken(args.AuthToken); err != nil {
|
2017-09-15 03:33:31 +00:00
|
|
|
return err
|
|
|
|
} else if aclObj != nil && !aclObj.AllowNodeWrite() {
|
|
|
|
return structs.ErrPermissionDenied
|
|
|
|
}
|
|
|
|
|
2015-09-07 03:00:12 +00:00
|
|
|
// Verify the arguments
|
|
|
|
if args.NodeID == "" {
|
|
|
|
return fmt.Errorf("missing node ID for drain update")
|
|
|
|
}
|
2018-05-10 23:30:54 +00:00
|
|
|
if args.NodeEvent != nil {
|
2018-05-22 21:01:43 +00:00
|
|
|
return fmt.Errorf("node event must not be set")
|
2018-05-10 23:30:54 +00:00
|
|
|
}
|
2015-09-07 03:00:12 +00:00
|
|
|
|
|
|
|
// Look for the node
|
2015-09-07 03:31:32 +00:00
|
|
|
snap, err := n.srv.fsm.State().Snapshot()
|
2015-09-07 03:00:12 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-03-27 22:53:24 +00:00
|
|
|
node, err := snap.NodeByID(nil, args.NodeID)
|
2015-09-07 03:00:12 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if node == nil {
|
|
|
|
return fmt.Errorf("node not found")
|
|
|
|
}
|
|
|
|
|
2019-11-14 21:06:09 +00:00
|
|
|
now := time.Now().UTC()
|
|
|
|
|
2019-05-21 19:45:00 +00:00
|
|
|
// Update the timestamp of when the node status was updated
|
2019-11-14 21:06:09 +00:00
|
|
|
args.UpdatedAt = now.Unix()
|
2019-05-21 19:45:00 +00:00
|
|
|
|
2018-02-27 17:40:17 +00:00
|
|
|
// COMPAT: Remove in 0.9. Attempt to upgrade the request if it is of the old
|
|
|
|
// format.
|
|
|
|
if args.Drain && args.DrainStrategy == nil {
|
|
|
|
args.DrainStrategy = &structs.DrainStrategy{
|
|
|
|
DrainSpec: structs.DrainSpec{
|
|
|
|
Deadline: -1 * time.Second, // Force drain
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-14 21:06:09 +00:00
|
|
|
// Setup drain strategy
|
2019-11-13 22:26:35 +00:00
|
|
|
if args.DrainStrategy != nil {
|
2019-11-14 21:06:09 +00:00
|
|
|
// Mark start time for the drain
|
|
|
|
if node.DrainStrategy == nil {
|
|
|
|
args.DrainStrategy.StartedAt = now
|
2019-11-13 22:26:35 +00:00
|
|
|
} else {
|
|
|
|
args.DrainStrategy.StartedAt = node.DrainStrategy.StartedAt
|
|
|
|
}
|
|
|
|
|
2019-11-14 21:06:09 +00:00
|
|
|
// Mark the deadline time
|
|
|
|
if args.DrainStrategy.Deadline.Nanoseconds() > 0 {
|
|
|
|
args.DrainStrategy.ForceDeadline = now.Add(args.DrainStrategy.Deadline)
|
|
|
|
}
|
2018-03-01 00:25:56 +00:00
|
|
|
}
|
|
|
|
|
2018-05-10 23:54:43 +00:00
|
|
|
// Construct the node event
|
|
|
|
args.NodeEvent = structs.NewNodeEvent().SetSubsystem(structs.NodeEventSubsystemDrain)
|
2018-05-30 17:28:46 +00:00
|
|
|
if node.DrainStrategy == nil && args.DrainStrategy != nil {
|
2018-05-10 23:54:43 +00:00
|
|
|
args.NodeEvent.SetMessage(NodeDrainEventDrainSet)
|
|
|
|
} else if node.DrainStrategy != nil && args.DrainStrategy != nil {
|
|
|
|
args.NodeEvent.SetMessage(NodeDrainEventDrainUpdated)
|
|
|
|
} else if node.DrainStrategy != nil && args.DrainStrategy == nil {
|
|
|
|
args.NodeEvent.SetMessage(NodeDrainEventDrainDisabled)
|
2018-06-06 18:02:10 +00:00
|
|
|
} else {
|
|
|
|
args.NodeEvent = nil
|
2018-05-10 23:54:43 +00:00
|
|
|
}
|
|
|
|
|
2015-09-07 03:00:12 +00:00
|
|
|
// Commit this update via Raft
|
2018-02-23 18:42:43 +00:00
|
|
|
_, index, err := n.srv.raftApply(structs.NodeUpdateDrainRequestType, args)
|
2016-04-19 01:43:52 +00:00
|
|
|
if err != nil {
|
2018-09-15 23:23:13 +00:00
|
|
|
n.logger.Error("drain update failed", "error", err)
|
2016-04-19 01:43:52 +00:00
|
|
|
return err
|
2015-09-07 03:00:12 +00:00
|
|
|
}
|
2018-02-23 18:42:43 +00:00
|
|
|
reply.NodeModifyIndex = index
|
2015-09-07 03:00:12 +00:00
|
|
|
|
2018-04-10 22:02:52 +00:00
|
|
|
// If the node is transitioning to be eligible, create Node evaluations
|
2018-03-27 22:53:24 +00:00
|
|
|
// because there may be a System job registered that should be evaluated.
|
|
|
|
if node.SchedulingEligibility == structs.NodeSchedulingIneligible && args.MarkEligible && args.DrainStrategy == nil {
|
|
|
|
evalIDs, evalIndex, err := n.createNodeEvals(args.NodeID, index)
|
|
|
|
if err != nil {
|
2018-09-15 23:23:13 +00:00
|
|
|
n.logger.Error("eval creation failed", "error", err)
|
2018-03-27 22:53:24 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
reply.EvalIDs = evalIDs
|
|
|
|
reply.EvalCreateIndex = evalIndex
|
|
|
|
}
|
|
|
|
|
2015-09-07 03:00:12 +00:00
|
|
|
// Set the reply index
|
|
|
|
reply.Index = index
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-02-27 00:34:42 +00:00
|
|
|
// UpdateEligibility is used to update the scheduling eligibility of a node
|
|
|
|
func (n *Node) UpdateEligibility(args *structs.NodeUpdateEligibilityRequest,
|
2018-03-27 22:53:24 +00:00
|
|
|
reply *structs.NodeEligibilityUpdateResponse) error {
|
2018-02-27 00:34:42 +00:00
|
|
|
if done, err := n.srv.forward("Node.UpdateEligibility", args, args, reply); done {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer metrics.MeasureSince([]string{"nomad", "client", "update_eligibility"}, time.Now())
|
|
|
|
|
|
|
|
// Check node write permissions
|
|
|
|
if aclObj, err := n.srv.ResolveToken(args.AuthToken); err != nil {
|
|
|
|
return err
|
|
|
|
} else if aclObj != nil && !aclObj.AllowNodeWrite() {
|
|
|
|
return structs.ErrPermissionDenied
|
|
|
|
}
|
|
|
|
|
|
|
|
// Verify the arguments
|
|
|
|
if args.NodeID == "" {
|
|
|
|
return fmt.Errorf("missing node ID for setting scheduling eligibility")
|
|
|
|
}
|
2018-05-11 21:32:34 +00:00
|
|
|
if args.NodeEvent != nil {
|
2018-05-22 21:02:44 +00:00
|
|
|
return fmt.Errorf("node event must not be set")
|
2018-05-11 21:32:34 +00:00
|
|
|
}
|
2018-02-27 00:34:42 +00:00
|
|
|
|
2018-03-27 22:53:24 +00:00
|
|
|
// Check that only allowed types are set
|
|
|
|
switch args.Eligibility {
|
|
|
|
case structs.NodeSchedulingEligible, structs.NodeSchedulingIneligible:
|
|
|
|
default:
|
|
|
|
return fmt.Errorf("invalid scheduling eligibility %q", args.Eligibility)
|
|
|
|
}
|
|
|
|
|
2018-02-27 00:34:42 +00:00
|
|
|
// Look for the node
|
|
|
|
snap, err := n.srv.fsm.State().Snapshot()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-03-27 22:53:24 +00:00
|
|
|
node, err := snap.NodeByID(nil, args.NodeID)
|
2018-02-27 00:34:42 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if node == nil {
|
|
|
|
return fmt.Errorf("node not found")
|
|
|
|
}
|
|
|
|
|
|
|
|
if node.DrainStrategy != nil && args.Eligibility == structs.NodeSchedulingEligible {
|
|
|
|
return fmt.Errorf("can not set node's scheduling eligibility to eligible while it is draining")
|
|
|
|
}
|
|
|
|
|
2018-02-27 20:59:27 +00:00
|
|
|
switch args.Eligibility {
|
|
|
|
case structs.NodeSchedulingEligible, structs.NodeSchedulingIneligible:
|
|
|
|
default:
|
|
|
|
return fmt.Errorf("invalid scheduling eligibility %q", args.Eligibility)
|
|
|
|
}
|
|
|
|
|
2019-05-21 19:45:00 +00:00
|
|
|
// Update the timestamp of when the node status was updated
|
|
|
|
args.UpdatedAt = time.Now().Unix()
|
|
|
|
|
2018-05-11 21:32:34 +00:00
|
|
|
// Construct the node event
|
|
|
|
args.NodeEvent = structs.NewNodeEvent().SetSubsystem(structs.NodeEventSubsystemCluster)
|
|
|
|
if node.SchedulingEligibility == args.Eligibility {
|
|
|
|
return nil // Nothing to do
|
|
|
|
} else if args.Eligibility == structs.NodeSchedulingEligible {
|
|
|
|
args.NodeEvent.SetMessage(NodeEligibilityEventEligible)
|
|
|
|
} else {
|
|
|
|
args.NodeEvent.SetMessage(NodeEligibilityEventIneligible)
|
|
|
|
}
|
|
|
|
|
2018-02-27 00:34:42 +00:00
|
|
|
// Commit this update via Raft
|
|
|
|
outErr, index, err := n.srv.raftApply(structs.NodeUpdateEligibilityRequestType, args)
|
|
|
|
if err != nil {
|
2018-09-15 23:23:13 +00:00
|
|
|
n.logger.Error("eligibility update failed", "error", err)
|
2018-02-27 00:34:42 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
if outErr != nil {
|
|
|
|
if err, ok := outErr.(error); ok && err != nil {
|
2018-09-15 23:23:13 +00:00
|
|
|
n.logger.Error("eligibility update failed", "error", err)
|
2018-02-27 00:34:42 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-10 22:02:52 +00:00
|
|
|
// If the node is transitioning to be eligible, create Node evaluations
|
2018-03-27 22:53:24 +00:00
|
|
|
// because there may be a System job registered that should be evaluated.
|
|
|
|
if node.SchedulingEligibility == structs.NodeSchedulingIneligible && args.Eligibility == structs.NodeSchedulingEligible {
|
|
|
|
evalIDs, evalIndex, err := n.createNodeEvals(args.NodeID, index)
|
|
|
|
if err != nil {
|
2018-09-15 23:23:13 +00:00
|
|
|
n.logger.Error("eval creation failed", "error", err)
|
2018-03-27 22:53:24 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
reply.EvalIDs = evalIDs
|
|
|
|
reply.EvalCreateIndex = evalIndex
|
|
|
|
}
|
|
|
|
|
2018-02-27 00:34:42 +00:00
|
|
|
// Set the reply index
|
|
|
|
reply.Index = index
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-08-16 01:20:35 +00:00
|
|
|
// Evaluate is used to force a re-evaluation of the node
|
2015-09-07 03:31:32 +00:00
|
|
|
func (n *Node) Evaluate(args *structs.NodeEvaluateRequest, reply *structs.NodeUpdateResponse) error {
|
|
|
|
if done, err := n.srv.forward("Node.Evaluate", args, args, reply); done {
|
2015-08-16 01:20:35 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer metrics.MeasureSince([]string{"nomad", "client", "evaluate"}, time.Now())
|
|
|
|
|
2017-09-15 03:41:44 +00:00
|
|
|
// Check node write permissions
|
2017-10-12 22:16:33 +00:00
|
|
|
if aclObj, err := n.srv.ResolveToken(args.AuthToken); err != nil {
|
2017-09-15 03:41:44 +00:00
|
|
|
return err
|
|
|
|
} else if aclObj != nil && !aclObj.AllowNodeWrite() {
|
|
|
|
return structs.ErrPermissionDenied
|
|
|
|
}
|
|
|
|
|
2015-08-16 01:20:35 +00:00
|
|
|
// Verify the arguments
|
|
|
|
if args.NodeID == "" {
|
|
|
|
return fmt.Errorf("missing node ID for evaluation")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Look for the node
|
2015-09-07 03:31:32 +00:00
|
|
|
snap, err := n.srv.fsm.State().Snapshot()
|
2015-08-16 01:20:35 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-02-08 04:31:23 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
node, err := snap.NodeByID(ws, args.NodeID)
|
2015-08-16 01:20:35 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if node == nil {
|
|
|
|
return fmt.Errorf("node not found")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create the evaluation
|
2015-09-07 03:31:32 +00:00
|
|
|
evalIDs, evalIndex, err := n.createNodeEvals(args.NodeID, node.ModifyIndex)
|
2015-08-16 01:20:35 +00:00
|
|
|
if err != nil {
|
2018-09-15 23:23:13 +00:00
|
|
|
n.logger.Error("eval creation failed", "error", err)
|
2015-08-16 01:20:35 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
reply.EvalIDs = evalIDs
|
|
|
|
reply.EvalCreateIndex = evalIndex
|
|
|
|
|
|
|
|
// Set the reply index
|
|
|
|
reply.Index = evalIndex
|
2016-06-01 10:47:19 +00:00
|
|
|
|
|
|
|
n.srv.peerLock.RLock()
|
|
|
|
defer n.srv.peerLock.RUnlock()
|
2016-06-10 05:07:21 +00:00
|
|
|
if err := n.constructNodeServerInfoResponse(snap, reply); err != nil {
|
2018-09-15 23:23:13 +00:00
|
|
|
n.logger.Error("failed to populate NodeUpdateResponse", "error", err)
|
2016-06-01 10:47:19 +00:00
|
|
|
return err
|
|
|
|
}
|
2015-08-16 01:20:35 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-10-20 17:57:53 +00:00
|
|
|
// GetNode is used to request information about a specific node
|
2015-09-07 03:31:32 +00:00
|
|
|
func (n *Node) GetNode(args *structs.NodeSpecificRequest,
|
2015-07-06 21:23:15 +00:00
|
|
|
reply *structs.SingleNodeResponse) error {
|
2015-09-07 03:31:32 +00:00
|
|
|
if done, err := n.srv.forward("Node.GetNode", args, args, reply); done {
|
2015-07-06 21:23:15 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer metrics.MeasureSince([]string{"nomad", "client", "get_node"}, time.Now())
|
|
|
|
|
2017-09-15 03:59:18 +00:00
|
|
|
// Check node read permissions
|
2017-10-12 23:27:33 +00:00
|
|
|
if aclObj, err := n.srv.ResolveToken(args.AuthToken); err != nil {
|
|
|
|
// If ResolveToken had an unexpected error return that
|
|
|
|
if err != structs.ErrTokenNotFound {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Attempt to lookup AuthToken as a Node.SecretID since nodes
|
|
|
|
// call this endpoint and don't have an ACL token.
|
|
|
|
node, stateErr := n.srv.fsm.State().NodeBySecretID(nil, args.AuthToken)
|
|
|
|
if stateErr != nil {
|
|
|
|
// Return the original ResolveToken error with this err
|
|
|
|
var merr multierror.Error
|
|
|
|
merr.Errors = append(merr.Errors, err, stateErr)
|
|
|
|
return merr.ErrorOrNil()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Not a node or a valid ACL token
|
|
|
|
if node == nil {
|
|
|
|
return structs.ErrTokenNotFound
|
|
|
|
}
|
2017-09-15 03:59:18 +00:00
|
|
|
} else if aclObj != nil && !aclObj.AllowNodeRead() {
|
|
|
|
return structs.ErrPermissionDenied
|
|
|
|
}
|
|
|
|
|
2015-10-29 22:48:44 +00:00
|
|
|
// Setup the blocking query
|
|
|
|
opts := blockingOptions{
|
|
|
|
queryOpts: &args.QueryOptions,
|
|
|
|
queryMeta: &reply.QueryMeta,
|
2017-02-08 04:31:23 +00:00
|
|
|
run: func(ws memdb.WatchSet, state *state.StateStore) error {
|
2015-10-29 22:48:44 +00:00
|
|
|
// Verify the arguments
|
|
|
|
if args.NodeID == "" {
|
|
|
|
return fmt.Errorf("missing node ID")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Look for the node
|
2017-02-08 04:31:23 +00:00
|
|
|
out, err := state.NodeByID(ws, args.NodeID)
|
2015-10-29 22:48:44 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Setup the output
|
|
|
|
if out != nil {
|
2016-08-16 06:11:57 +00:00
|
|
|
// Clear the secret ID
|
|
|
|
reply.Node = out.Copy()
|
|
|
|
reply.Node.SecretID = ""
|
2015-10-29 22:48:44 +00:00
|
|
|
reply.Index = out.ModifyIndex
|
|
|
|
} else {
|
|
|
|
// Use the last index that affected the nodes table
|
2017-02-08 04:31:23 +00:00
|
|
|
index, err := state.Index("nodes")
|
2015-10-29 22:48:44 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-08-16 06:11:57 +00:00
|
|
|
reply.Node = nil
|
2015-10-29 22:48:44 +00:00
|
|
|
reply.Index = index
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set the query response
|
|
|
|
n.srv.setQueryMeta(&reply.QueryMeta)
|
|
|
|
return nil
|
|
|
|
}}
|
|
|
|
return n.srv.blockingRPC(&opts)
|
2015-07-06 21:23:15 +00:00
|
|
|
}
|
2015-08-06 23:39:20 +00:00
|
|
|
|
2015-10-20 17:57:53 +00:00
|
|
|
// GetAllocs is used to request allocations for a specific node
|
2015-09-07 03:31:32 +00:00
|
|
|
func (n *Node) GetAllocs(args *structs.NodeSpecificRequest,
|
2015-08-23 02:17:49 +00:00
|
|
|
reply *structs.NodeAllocsResponse) error {
|
2015-09-07 03:31:32 +00:00
|
|
|
if done, err := n.srv.forward("Node.GetAllocs", args, args, reply); done {
|
2015-08-23 02:17:49 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer metrics.MeasureSince([]string{"nomad", "client", "get_allocs"}, time.Now())
|
|
|
|
|
2017-09-15 04:42:19 +00:00
|
|
|
// Check node read and namespace job read permissions
|
2017-10-13 00:12:41 +00:00
|
|
|
aclObj, err := n.srv.ResolveToken(args.AuthToken)
|
2017-09-15 21:27:11 +00:00
|
|
|
if err != nil {
|
2017-09-15 04:42:19 +00:00
|
|
|
return err
|
2017-09-15 21:27:11 +00:00
|
|
|
}
|
|
|
|
if aclObj != nil && !aclObj.AllowNodeRead() {
|
|
|
|
return structs.ErrPermissionDenied
|
|
|
|
}
|
|
|
|
|
|
|
|
// cache namespace perms
|
|
|
|
readableNamespaces := map[string]bool{}
|
|
|
|
|
|
|
|
// readNS is a caching namespace read-job helper
|
|
|
|
readNS := func(ns string) bool {
|
|
|
|
if aclObj == nil {
|
|
|
|
// ACLs are disabled; everything is readable
|
|
|
|
return true
|
2017-09-15 04:42:19 +00:00
|
|
|
}
|
2017-09-15 21:27:11 +00:00
|
|
|
|
|
|
|
if readable, ok := readableNamespaces[ns]; ok {
|
|
|
|
// cache hit
|
|
|
|
return readable
|
2017-09-15 04:42:19 +00:00
|
|
|
}
|
2017-09-15 21:27:11 +00:00
|
|
|
|
|
|
|
// cache miss
|
|
|
|
readable := aclObj.AllowNsOp(ns, acl.NamespaceCapabilityReadJob)
|
|
|
|
readableNamespaces[ns] = readable
|
|
|
|
return readable
|
2017-09-15 04:42:19 +00:00
|
|
|
}
|
|
|
|
|
2015-08-23 02:17:49 +00:00
|
|
|
// Verify the arguments
|
|
|
|
if args.NodeID == "" {
|
|
|
|
return fmt.Errorf("missing node ID")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Setup the blocking query
|
|
|
|
opts := blockingOptions{
|
2015-10-29 21:47:39 +00:00
|
|
|
queryOpts: &args.QueryOptions,
|
|
|
|
queryMeta: &reply.QueryMeta,
|
2017-02-08 04:31:23 +00:00
|
|
|
run: func(ws memdb.WatchSet, state *state.StateStore) error {
|
2015-08-23 02:17:49 +00:00
|
|
|
// Look for the node
|
2017-02-08 04:31:23 +00:00
|
|
|
allocs, err := state.AllocsByNode(ws, args.NodeID)
|
2015-08-23 02:17:49 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Setup the output
|
2017-09-15 21:27:11 +00:00
|
|
|
if n := len(allocs); n != 0 {
|
|
|
|
reply.Allocs = make([]*structs.Allocation, 0, n)
|
2015-08-23 02:17:49 +00:00
|
|
|
for _, alloc := range allocs {
|
2017-09-15 21:27:11 +00:00
|
|
|
if readNS(alloc.Namespace) {
|
|
|
|
reply.Allocs = append(reply.Allocs, alloc)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the max of all allocs since
|
|
|
|
// subsequent requests need to start
|
|
|
|
// from the latest index
|
2015-08-23 02:17:49 +00:00
|
|
|
reply.Index = maxUint64(reply.Index, alloc.ModifyIndex)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
reply.Allocs = nil
|
|
|
|
|
|
|
|
// Use the last index that affected the nodes table
|
2017-02-08 04:31:23 +00:00
|
|
|
index, err := state.Index("allocs")
|
2015-08-23 02:17:49 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Must provide non-zero index to prevent blocking
|
2016-01-29 14:29:52 +00:00
|
|
|
// Index 1 is impossible anyways (due to Raft internals)
|
|
|
|
if index == 0 {
|
|
|
|
reply.Index = 1
|
|
|
|
} else {
|
|
|
|
reply.Index = index
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}}
|
|
|
|
return n.srv.blockingRPC(&opts)
|
|
|
|
}
|
|
|
|
|
2016-02-01 21:57:35 +00:00
|
|
|
// GetClientAllocs is used to request a lightweight list of alloc modify indexes
|
2016-01-29 14:29:52 +00:00
|
|
|
// per allocation.
|
|
|
|
func (n *Node) GetClientAllocs(args *structs.NodeSpecificRequest,
|
|
|
|
reply *structs.NodeClientAllocsResponse) error {
|
|
|
|
if done, err := n.srv.forward("Node.GetClientAllocs", args, args, reply); done {
|
2018-03-27 01:10:43 +00:00
|
|
|
// We have a valid node connection since there is no error from the
|
|
|
|
// forwarded server, so add the mapping to cache the
|
|
|
|
// connection and allow the server to send RPCs to the client.
|
2020-03-17 20:14:11 +00:00
|
|
|
if err == nil && n.ctx != nil && n.ctx.NodeID == "" && !args.IsForwarded() {
|
2018-03-27 01:10:43 +00:00
|
|
|
n.ctx.NodeID = args.NodeID
|
|
|
|
n.srv.addNodeConn(n.ctx)
|
|
|
|
}
|
|
|
|
|
2016-01-29 14:29:52 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer metrics.MeasureSince([]string{"nomad", "client", "get_client_allocs"}, time.Now())
|
|
|
|
|
|
|
|
// Verify the arguments
|
|
|
|
if args.NodeID == "" {
|
|
|
|
return fmt.Errorf("missing node ID")
|
|
|
|
}
|
|
|
|
|
2017-10-26 23:59:37 +00:00
|
|
|
// numOldAllocs is used to detect if there is a garbage collection event
|
|
|
|
// that effects the node. When an allocation is garbage collected, that does
|
|
|
|
// not change the modify index changes and thus the query won't unblock,
|
|
|
|
// even though the set of allocations on the node has changed.
|
|
|
|
var numOldAllocs int
|
|
|
|
|
2016-01-29 14:29:52 +00:00
|
|
|
// Setup the blocking query
|
|
|
|
opts := blockingOptions{
|
|
|
|
queryOpts: &args.QueryOptions,
|
|
|
|
queryMeta: &reply.QueryMeta,
|
2017-02-08 04:31:23 +00:00
|
|
|
run: func(ws memdb.WatchSet, state *state.StateStore) error {
|
2016-01-29 14:29:52 +00:00
|
|
|
// Look for the node
|
2017-02-08 04:31:23 +00:00
|
|
|
node, err := state.NodeByID(ws, args.NodeID)
|
2016-01-29 14:29:52 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-08-16 06:11:57 +00:00
|
|
|
var allocs []*structs.Allocation
|
|
|
|
if node != nil {
|
|
|
|
if args.SecretID == "" {
|
2017-10-13 18:28:47 +00:00
|
|
|
return fmt.Errorf("missing node secret ID for client status update")
|
2016-08-16 06:11:57 +00:00
|
|
|
} else if args.SecretID != node.SecretID {
|
|
|
|
return fmt.Errorf("node secret ID does not match")
|
|
|
|
}
|
|
|
|
|
2018-01-05 21:50:04 +00:00
|
|
|
// We have a valid node connection, so add the mapping to cache the
|
2018-03-27 01:10:43 +00:00
|
|
|
// connection and allow the server to send RPCs to the client. We only cache
|
|
|
|
// the connection if it is not being forwarded from another server.
|
|
|
|
if n.ctx != nil && n.ctx.NodeID == "" && !args.IsForwarded() {
|
2018-01-05 21:50:04 +00:00
|
|
|
n.ctx.NodeID = args.NodeID
|
|
|
|
n.srv.addNodeConn(n.ctx)
|
|
|
|
}
|
|
|
|
|
2016-08-16 06:11:57 +00:00
|
|
|
var err error
|
2017-02-08 04:31:23 +00:00
|
|
|
allocs, err = state.AllocsByNode(ws, args.NodeID)
|
2016-08-16 06:11:57 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-29 14:29:52 +00:00
|
|
|
reply.Allocs = make(map[string]uint64)
|
2017-10-02 19:18:33 +00:00
|
|
|
reply.MigrateTokens = make(map[string]string)
|
2017-10-12 00:04:09 +00:00
|
|
|
|
2017-10-26 23:59:37 +00:00
|
|
|
// preferTableIndex is used to determine whether we should build the
|
|
|
|
// response index based on the full table indexes versus the modify
|
2017-10-31 20:32:31 +00:00
|
|
|
// indexes of the allocations on the specific node. This is
|
|
|
|
// preferred in the case that the node doesn't yet have allocations
|
|
|
|
// or when we detect a GC that effects the node.
|
2017-10-26 23:59:37 +00:00
|
|
|
preferTableIndex := true
|
|
|
|
|
2016-01-29 14:29:52 +00:00
|
|
|
// Setup the output
|
2017-10-26 23:59:37 +00:00
|
|
|
if numAllocs := len(allocs); numAllocs != 0 {
|
|
|
|
preferTableIndex = false
|
|
|
|
|
2016-01-29 14:29:52 +00:00
|
|
|
for _, alloc := range allocs {
|
2016-02-01 21:57:35 +00:00
|
|
|
reply.Allocs[alloc.ID] = alloc.AllocModifyIndex
|
2017-10-02 19:18:33 +00:00
|
|
|
|
2017-10-12 00:04:09 +00:00
|
|
|
// If the allocation is going to do a migration, create a
|
|
|
|
// migration token so that the client can authenticate with
|
|
|
|
// the node hosting the previous allocation.
|
2017-10-10 00:23:26 +00:00
|
|
|
if alloc.ShouldMigrate() {
|
|
|
|
prevAllocation, err := state.AllocByID(ws, alloc.PreviousAllocation)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-10-12 00:04:09 +00:00
|
|
|
if prevAllocation != nil && prevAllocation.NodeID != alloc.NodeID {
|
2017-10-10 00:23:26 +00:00
|
|
|
allocNode, err := state.NodeByID(ws, prevAllocation.NodeID)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-10-12 00:04:09 +00:00
|
|
|
if allocNode == nil {
|
|
|
|
// Node must have been GC'd so skip the token
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2018-01-12 21:58:44 +00:00
|
|
|
token, err := structs.GenerateMigrateToken(prevAllocation.ID, allocNode.SecretID)
|
2017-10-10 00:23:26 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
reply.MigrateTokens[alloc.ID] = token
|
|
|
|
}
|
2017-10-02 19:18:33 +00:00
|
|
|
}
|
|
|
|
|
2016-01-29 14:29:52 +00:00
|
|
|
reply.Index = maxUint64(reply.Index, alloc.ModifyIndex)
|
|
|
|
}
|
2017-10-26 23:59:37 +00:00
|
|
|
|
|
|
|
// Determine if we have less allocations than before. This
|
|
|
|
// indicates there was a garbage collection
|
|
|
|
if numAllocs < numOldAllocs {
|
|
|
|
preferTableIndex = true
|
|
|
|
}
|
|
|
|
|
|
|
|
// Store the new number of allocations
|
|
|
|
numOldAllocs = numAllocs
|
|
|
|
}
|
|
|
|
|
|
|
|
if preferTableIndex {
|
2016-01-29 14:29:52 +00:00
|
|
|
// Use the last index that affected the nodes table
|
2017-02-08 04:31:23 +00:00
|
|
|
index, err := state.Index("allocs")
|
2016-01-29 14:29:52 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Must provide non-zero index to prevent blocking
|
2015-08-23 02:17:49 +00:00
|
|
|
// Index 1 is impossible anyways (due to Raft internals)
|
|
|
|
if index == 0 {
|
|
|
|
reply.Index = 1
|
|
|
|
} else {
|
|
|
|
reply.Index = index
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}}
|
2015-09-07 03:31:32 +00:00
|
|
|
return n.srv.blockingRPC(&opts)
|
2015-08-23 02:17:49 +00:00
|
|
|
}
|
|
|
|
|
2015-08-26 01:12:51 +00:00
|
|
|
// UpdateAlloc is used to update the client status of an allocation
|
2015-09-07 03:31:32 +00:00
|
|
|
func (n *Node) UpdateAlloc(args *structs.AllocUpdateRequest, reply *structs.GenericResponse) error {
|
|
|
|
if done, err := n.srv.forward("Node.UpdateAlloc", args, args, reply); done {
|
2015-08-26 01:12:51 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer metrics.MeasureSince([]string{"nomad", "client", "update_alloc"}, time.Now())
|
|
|
|
|
2016-02-22 05:03:24 +00:00
|
|
|
// Ensure at least a single alloc
|
2016-02-22 02:00:46 +00:00
|
|
|
if len(args.Alloc) == 0 {
|
|
|
|
return fmt.Errorf("must update at least one allocation")
|
2015-08-26 01:12:51 +00:00
|
|
|
}
|
|
|
|
|
2018-01-20 02:48:37 +00:00
|
|
|
// Ensure that evals aren't set from client RPCs
|
|
|
|
// We create them here before the raft update
|
2018-01-16 14:55:35 +00:00
|
|
|
if len(args.Evals) != 0 {
|
2018-02-21 18:58:04 +00:00
|
|
|
return fmt.Errorf("evals field must not be set")
|
2018-01-16 14:55:35 +00:00
|
|
|
}
|
|
|
|
|
2017-10-26 13:52:57 +00:00
|
|
|
// Update modified timestamp for client initiated allocation updates
|
2018-01-17 22:24:57 +00:00
|
|
|
now := time.Now()
|
2018-01-16 14:55:35 +00:00
|
|
|
var evals []*structs.Evaluation
|
|
|
|
|
2017-10-25 18:06:25 +00:00
|
|
|
for _, alloc := range args.Alloc {
|
2018-01-17 22:24:57 +00:00
|
|
|
alloc.ModifyTime = now.UTC().UnixNano()
|
2018-01-16 14:55:35 +00:00
|
|
|
|
|
|
|
// Add an evaluation if this is a failed alloc that is eligible for rescheduling
|
|
|
|
if alloc.ClientStatus == structs.AllocClientStatusFailed {
|
2018-01-20 02:48:37 +00:00
|
|
|
// Only create evaluations if this is an existing alloc,
|
|
|
|
// and eligible as per its task group's ReschedulePolicy
|
2018-01-23 17:38:53 +00:00
|
|
|
if existingAlloc, _ := n.srv.State().AllocByID(nil, alloc.ID); existingAlloc != nil {
|
|
|
|
job, err := n.srv.State().JobByID(nil, existingAlloc.Namespace, existingAlloc.JobID)
|
|
|
|
if err != nil {
|
2018-09-15 23:23:13 +00:00
|
|
|
n.logger.Error("UpdateAlloc unable to find job", "job", existingAlloc.JobID, "error", err)
|
2018-01-23 17:38:53 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
if job == nil {
|
2018-09-15 23:23:13 +00:00
|
|
|
n.logger.Debug("UpdateAlloc unable to find job", "job", existingAlloc.JobID)
|
2018-01-23 17:38:53 +00:00
|
|
|
continue
|
|
|
|
}
|
2018-01-16 14:55:35 +00:00
|
|
|
taskGroup := job.LookupTaskGroup(existingAlloc.TaskGroup)
|
2018-04-09 19:05:31 +00:00
|
|
|
if taskGroup != nil && existingAlloc.FollowupEvalID == "" && existingAlloc.RescheduleEligible(taskGroup.ReschedulePolicy, now) {
|
2018-01-16 14:55:35 +00:00
|
|
|
eval := &structs.Evaluation{
|
2018-04-10 20:30:15 +00:00
|
|
|
ID: uuid.Generate(),
|
|
|
|
Namespace: existingAlloc.Namespace,
|
|
|
|
TriggeredBy: structs.EvalTriggerRetryFailedAlloc,
|
|
|
|
JobID: existingAlloc.JobID,
|
|
|
|
Type: job.Type,
|
|
|
|
Priority: job.Priority,
|
|
|
|
Status: structs.EvalStatusPending,
|
2019-08-07 16:50:35 +00:00
|
|
|
CreateTime: now.UTC().UnixNano(),
|
|
|
|
ModifyTime: now.UTC().UnixNano(),
|
2018-01-16 14:55:35 +00:00
|
|
|
}
|
|
|
|
evals = append(evals, eval)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-04-10 19:00:07 +00:00
|
|
|
|
2017-10-26 13:52:57 +00:00
|
|
|
// Add this to the batch
|
|
|
|
n.updatesLock.Lock()
|
2016-02-22 02:51:34 +00:00
|
|
|
n.updates = append(n.updates, args.Alloc...)
|
2018-04-09 19:05:31 +00:00
|
|
|
n.evals = append(n.evals, evals...)
|
2016-02-22 02:51:34 +00:00
|
|
|
|
|
|
|
// Start a new batch if none
|
|
|
|
future := n.updateFuture
|
|
|
|
if future == nil {
|
2018-03-06 22:37:37 +00:00
|
|
|
future = structs.NewBatchFuture()
|
2016-02-22 02:51:34 +00:00
|
|
|
n.updateFuture = future
|
|
|
|
n.updateTimer = time.AfterFunc(batchUpdateInterval, func() {
|
|
|
|
// Get the pending updates
|
|
|
|
n.updatesLock.Lock()
|
|
|
|
updates := n.updates
|
2018-04-09 19:05:31 +00:00
|
|
|
evals := n.evals
|
2016-02-22 02:51:34 +00:00
|
|
|
future := n.updateFuture
|
|
|
|
n.updates = nil
|
2018-04-09 19:05:31 +00:00
|
|
|
n.evals = nil
|
2016-02-22 02:51:34 +00:00
|
|
|
n.updateFuture = nil
|
|
|
|
n.updateTimer = nil
|
|
|
|
n.updatesLock.Unlock()
|
|
|
|
|
|
|
|
// Perform the batch update
|
2018-01-16 14:55:35 +00:00
|
|
|
n.batchUpdate(future, updates, evals)
|
2016-02-22 02:51:34 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
n.updatesLock.Unlock()
|
|
|
|
|
|
|
|
// Wait for the future
|
|
|
|
if err := future.Wait(); err != nil {
|
2015-08-26 01:12:51 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Setup the response
|
2016-02-22 02:51:34 +00:00
|
|
|
reply.Index = future.Index()
|
2015-08-26 01:12:51 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-02-22 02:51:34 +00:00
|
|
|
// batchUpdate is used to update all the allocations
|
2018-03-06 22:37:37 +00:00
|
|
|
func (n *Node) batchUpdate(future *structs.BatchFuture, updates []*structs.Allocation, evals []*structs.Evaluation) {
|
2018-04-09 19:05:31 +00:00
|
|
|
// Group pending evals by jobID to prevent creating unnecessary evals
|
2018-04-10 19:00:07 +00:00
|
|
|
evalsByJobId := make(map[structs.NamespacedID]struct{})
|
2018-04-09 19:05:31 +00:00
|
|
|
var trimmedEvals []*structs.Evaluation
|
|
|
|
for _, eval := range evals {
|
|
|
|
namespacedID := structs.NamespacedID{
|
|
|
|
ID: eval.JobID,
|
|
|
|
Namespace: eval.Namespace,
|
|
|
|
}
|
|
|
|
_, exists := evalsByJobId[namespacedID]
|
|
|
|
if !exists {
|
2019-08-07 16:50:35 +00:00
|
|
|
now := time.Now().UTC().UnixNano()
|
|
|
|
eval.CreateTime = now
|
|
|
|
eval.ModifyTime = now
|
2018-04-09 19:05:31 +00:00
|
|
|
trimmedEvals = append(trimmedEvals, eval)
|
2018-04-10 19:00:07 +00:00
|
|
|
evalsByJobId[namespacedID] = struct{}{}
|
2018-04-09 19:05:31 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-10 19:00:07 +00:00
|
|
|
if len(trimmedEvals) > 0 {
|
2018-09-15 23:23:13 +00:00
|
|
|
n.logger.Debug("adding evaluations for rescheduling failed allocations", "num_evals", len(trimmedEvals))
|
2018-04-10 19:00:07 +00:00
|
|
|
}
|
2016-02-22 02:51:34 +00:00
|
|
|
// Prepare the batch update
|
|
|
|
batch := &structs.AllocUpdateRequest{
|
|
|
|
Alloc: updates,
|
2018-04-09 19:05:31 +00:00
|
|
|
Evals: trimmedEvals,
|
2016-02-22 02:51:34 +00:00
|
|
|
WriteRequest: structs.WriteRequest{Region: n.srv.config.Region},
|
|
|
|
}
|
|
|
|
|
|
|
|
// Commit this update via Raft
|
2016-08-22 20:57:27 +00:00
|
|
|
var mErr multierror.Error
|
2016-02-22 02:51:34 +00:00
|
|
|
_, index, err := n.srv.raftApply(structs.AllocClientUpdateRequestType, batch)
|
|
|
|
if err != nil {
|
2018-09-15 23:23:13 +00:00
|
|
|
n.logger.Error("alloc update failed", "error", err)
|
2016-08-22 20:57:27 +00:00
|
|
|
mErr.Errors = append(mErr.Errors, err)
|
|
|
|
}
|
|
|
|
|
2019-12-06 20:46:46 +00:00
|
|
|
// For each allocation we are updating, check if we should revoke any
|
|
|
|
// - Vault token accessors
|
|
|
|
// - Service Identity token accessors
|
|
|
|
var (
|
|
|
|
revokeVault []*structs.VaultAccessor
|
|
|
|
revokeSI []*structs.SITokenAccessor
|
|
|
|
)
|
|
|
|
|
2016-08-22 20:57:27 +00:00
|
|
|
for _, alloc := range updates {
|
|
|
|
// Skip any allocation that isn't dead on the client
|
|
|
|
if !alloc.Terminated() {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2017-02-08 04:31:23 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
2019-12-06 20:46:46 +00:00
|
|
|
|
|
|
|
// Determine if there are any orphaned Vault accessors for the allocation
|
|
|
|
if accessors, err := n.srv.State().VaultAccessorsByAlloc(ws, alloc.ID); err != nil {
|
|
|
|
n.logger.Error("looking up vault accessors for alloc failed", "alloc_id", alloc.ID, "error", err)
|
2016-08-22 20:57:27 +00:00
|
|
|
mErr.Errors = append(mErr.Errors, err)
|
2019-12-06 20:46:46 +00:00
|
|
|
} else {
|
|
|
|
revokeVault = append(revokeVault, accessors...)
|
2016-08-22 20:57:27 +00:00
|
|
|
}
|
|
|
|
|
2019-12-06 20:46:46 +00:00
|
|
|
// Determine if there are any orphaned SI accessors for the allocation
|
|
|
|
if accessors, err := n.srv.State().SITokenAccessorsByAlloc(ws, alloc.ID); err != nil {
|
|
|
|
n.logger.Error("looking up si accessors for alloc failed", "alloc_id", alloc.ID, "error", err)
|
|
|
|
mErr.Errors = append(mErr.Errors, err)
|
|
|
|
} else {
|
|
|
|
revokeSI = append(revokeSI, accessors...)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-02 15:03:05 +00:00
|
|
|
// Revoke any orphaned Vault token accessors
|
2019-12-06 20:46:46 +00:00
|
|
|
if l := len(revokeVault); l > 0 {
|
|
|
|
n.logger.Debug("revoking vault accessors due to terminal allocations", "num_accessors", l)
|
|
|
|
if err := n.srv.vault.RevokeTokens(context.Background(), revokeVault, true); err != nil {
|
|
|
|
n.logger.Error("batched vault accessor revocation failed", "error", err)
|
|
|
|
mErr.Errors = append(mErr.Errors, err)
|
|
|
|
}
|
2016-08-22 20:57:27 +00:00
|
|
|
}
|
|
|
|
|
2020-01-02 15:03:05 +00:00
|
|
|
// Revoke any orphaned SI token accessors
|
2019-12-06 20:46:46 +00:00
|
|
|
if l := len(revokeSI); l > 0 {
|
|
|
|
n.logger.Debug("revoking si accessors due to terminal allocations", "num_accessors", l)
|
2020-01-02 15:03:05 +00:00
|
|
|
_ = n.srv.consulACLs.RevokeTokens(context.Background(), revokeSI, true)
|
2016-02-22 02:51:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Respond to the future
|
2016-08-22 20:57:27 +00:00
|
|
|
future.Respond(index, mErr.ErrorOrNil())
|
2016-02-22 02:51:34 +00:00
|
|
|
}
|
|
|
|
|
2015-09-06 21:28:29 +00:00
|
|
|
// List is used to list the available nodes
|
2015-09-07 03:31:32 +00:00
|
|
|
func (n *Node) List(args *structs.NodeListRequest,
|
2015-09-06 21:28:29 +00:00
|
|
|
reply *structs.NodeListResponse) error {
|
2015-09-07 03:31:32 +00:00
|
|
|
if done, err := n.srv.forward("Node.List", args, args, reply); done {
|
2015-09-06 21:28:29 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer metrics.MeasureSince([]string{"nomad", "client", "list"}, time.Now())
|
|
|
|
|
2017-09-15 05:01:18 +00:00
|
|
|
// Check node read permissions
|
2017-10-12 22:16:33 +00:00
|
|
|
if aclObj, err := n.srv.ResolveToken(args.AuthToken); err != nil {
|
2017-09-15 05:01:18 +00:00
|
|
|
return err
|
|
|
|
} else if aclObj != nil && !aclObj.AllowNodeRead() {
|
|
|
|
return structs.ErrPermissionDenied
|
|
|
|
}
|
|
|
|
|
2015-10-28 18:21:39 +00:00
|
|
|
// Setup the blocking query
|
|
|
|
opts := blockingOptions{
|
2015-10-29 21:47:39 +00:00
|
|
|
queryOpts: &args.QueryOptions,
|
|
|
|
queryMeta: &reply.QueryMeta,
|
2017-02-08 04:31:23 +00:00
|
|
|
run: func(ws memdb.WatchSet, state *state.StateStore) error {
|
2015-10-28 18:21:39 +00:00
|
|
|
// Capture all the nodes
|
2017-02-08 04:31:23 +00:00
|
|
|
var err error
|
2015-12-22 22:44:33 +00:00
|
|
|
var iter memdb.ResultIterator
|
|
|
|
if prefix := args.QueryOptions.Prefix; prefix != "" {
|
2017-02-08 04:31:23 +00:00
|
|
|
iter, err = state.NodesByIDPrefix(ws, prefix)
|
2015-12-22 22:44:33 +00:00
|
|
|
} else {
|
2017-02-08 04:31:23 +00:00
|
|
|
iter, err = state.Nodes(ws)
|
2015-12-22 22:44:33 +00:00
|
|
|
}
|
2015-10-28 18:21:39 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-09-06 21:28:29 +00:00
|
|
|
|
2015-10-28 19:29:06 +00:00
|
|
|
var nodes []*structs.NodeListStub
|
2015-10-28 18:21:39 +00:00
|
|
|
for {
|
|
|
|
raw := iter.Next()
|
|
|
|
if raw == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
node := raw.(*structs.Node)
|
2015-10-28 19:29:06 +00:00
|
|
|
nodes = append(nodes, node.Stub())
|
2015-10-28 18:21:39 +00:00
|
|
|
}
|
2015-10-28 19:29:06 +00:00
|
|
|
reply.Nodes = nodes
|
2015-09-06 21:28:29 +00:00
|
|
|
|
2015-10-28 18:21:39 +00:00
|
|
|
// Use the last index that affected the jobs table
|
2017-02-08 04:31:23 +00:00
|
|
|
index, err := state.Index("nodes")
|
2015-10-28 18:21:39 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
reply.Index = index
|
|
|
|
|
|
|
|
// Set the query response
|
|
|
|
n.srv.setQueryMeta(&reply.QueryMeta)
|
|
|
|
return nil
|
|
|
|
}}
|
|
|
|
return n.srv.blockingRPC(&opts)
|
2015-09-06 21:28:29 +00:00
|
|
|
}
|
|
|
|
|
2015-08-06 23:39:20 +00:00
|
|
|
// createNodeEvals is used to create evaluations for each alloc on a node.
|
|
|
|
// Each Eval is scoped to a job, so we need to potentially trigger many evals.
|
2015-09-07 03:31:32 +00:00
|
|
|
func (n *Node) createNodeEvals(nodeID string, nodeIndex uint64) ([]string, uint64, error) {
|
2015-08-06 23:39:20 +00:00
|
|
|
// Snapshot the state
|
2015-09-07 03:31:32 +00:00
|
|
|
snap, err := n.srv.fsm.State().Snapshot()
|
2015-08-06 23:39:20 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, 0, fmt.Errorf("failed to snapshot state: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Find all the allocations for this node
|
2017-02-08 04:31:23 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
allocs, err := snap.AllocsByNode(ws, nodeID)
|
2015-08-06 23:39:20 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, 0, fmt.Errorf("failed to find allocs for '%s': %v", nodeID, err)
|
|
|
|
}
|
|
|
|
|
2017-02-08 04:31:23 +00:00
|
|
|
sysJobsIter, err := snap.JobsByScheduler(ws, "system")
|
2015-10-20 17:57:53 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, 0, fmt.Errorf("failed to find system jobs for '%s': %v", nodeID, err)
|
|
|
|
}
|
2015-10-21 00:11:57 +00:00
|
|
|
|
|
|
|
var sysJobs []*structs.Job
|
|
|
|
for job := sysJobsIter.Next(); job != nil; job = sysJobsIter.Next() {
|
|
|
|
sysJobs = append(sysJobs, job.(*structs.Job))
|
|
|
|
}
|
2015-10-20 17:57:53 +00:00
|
|
|
|
2015-08-06 23:39:20 +00:00
|
|
|
// Fast-path if nothing to do
|
2015-10-21 00:11:57 +00:00
|
|
|
if len(allocs) == 0 && len(sysJobs) == 0 {
|
2015-08-06 23:39:20 +00:00
|
|
|
return nil, 0, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create an eval for each JobID affected
|
|
|
|
var evals []*structs.Evaluation
|
|
|
|
var evalIDs []string
|
|
|
|
jobIDs := make(map[string]struct{})
|
2019-08-07 16:50:35 +00:00
|
|
|
now := time.Now().UTC().UnixNano()
|
2015-08-06 23:39:20 +00:00
|
|
|
|
|
|
|
for _, alloc := range allocs {
|
|
|
|
// Deduplicate on JobID
|
|
|
|
if _, ok := jobIDs[alloc.JobID]; ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
jobIDs[alloc.JobID] = struct{}{}
|
|
|
|
|
|
|
|
// Create a new eval
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: alloc.Namespace,
|
2015-08-06 23:39:20 +00:00
|
|
|
Priority: alloc.Job.Priority,
|
|
|
|
Type: alloc.Job.Type,
|
|
|
|
TriggeredBy: structs.EvalTriggerNodeUpdate,
|
|
|
|
JobID: alloc.JobID,
|
|
|
|
NodeID: nodeID,
|
|
|
|
NodeModifyIndex: nodeIndex,
|
|
|
|
Status: structs.EvalStatusPending,
|
2019-08-07 16:50:35 +00:00
|
|
|
CreateTime: now,
|
|
|
|
ModifyTime: now,
|
2015-08-06 23:39:20 +00:00
|
|
|
}
|
|
|
|
evals = append(evals, eval)
|
|
|
|
evalIDs = append(evalIDs, eval.ID)
|
|
|
|
}
|
|
|
|
|
2015-10-20 17:57:53 +00:00
|
|
|
// Create an evaluation for each system job.
|
2015-10-20 20:02:55 +00:00
|
|
|
for _, job := range sysJobs {
|
2015-10-20 17:57:53 +00:00
|
|
|
// Still dedup on JobID as the node may already have the system job.
|
2015-10-20 20:02:55 +00:00
|
|
|
if _, ok := jobIDs[job.ID]; ok {
|
2015-10-20 17:57:53 +00:00
|
|
|
continue
|
|
|
|
}
|
2015-10-20 20:02:55 +00:00
|
|
|
jobIDs[job.ID] = struct{}{}
|
2015-10-20 17:57:53 +00:00
|
|
|
|
|
|
|
// Create a new eval
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: job.Namespace,
|
2015-10-20 20:02:55 +00:00
|
|
|
Priority: job.Priority,
|
|
|
|
Type: job.Type,
|
2015-10-20 17:57:53 +00:00
|
|
|
TriggeredBy: structs.EvalTriggerNodeUpdate,
|
2015-10-20 20:02:55 +00:00
|
|
|
JobID: job.ID,
|
2015-10-20 17:57:53 +00:00
|
|
|
NodeID: nodeID,
|
|
|
|
NodeModifyIndex: nodeIndex,
|
|
|
|
Status: structs.EvalStatusPending,
|
2019-08-07 16:50:35 +00:00
|
|
|
CreateTime: now,
|
|
|
|
ModifyTime: now,
|
2015-10-20 17:57:53 +00:00
|
|
|
}
|
|
|
|
evals = append(evals, eval)
|
|
|
|
evalIDs = append(evalIDs, eval.ID)
|
|
|
|
}
|
|
|
|
|
2015-08-06 23:39:20 +00:00
|
|
|
// Create the Raft transaction
|
|
|
|
update := &structs.EvalUpdateRequest{
|
|
|
|
Evals: evals,
|
2015-09-07 03:31:32 +00:00
|
|
|
WriteRequest: structs.WriteRequest{Region: n.srv.config.Region},
|
2015-08-06 23:39:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Commit this evaluation via Raft
|
2015-08-16 01:03:05 +00:00
|
|
|
// XXX: There is a risk of partial failure where the node update succeeds
|
|
|
|
// but that the EvalUpdate does not.
|
2015-09-07 03:31:32 +00:00
|
|
|
_, evalIndex, err := n.srv.raftApply(structs.EvalUpdateRequestType, update)
|
2015-08-06 23:39:20 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, 0, err
|
|
|
|
}
|
|
|
|
return evalIDs, evalIndex, nil
|
|
|
|
}
|
2016-02-22 02:51:34 +00:00
|
|
|
|
2016-08-18 17:50:47 +00:00
|
|
|
// DeriveVaultToken is used by the clients to request wrapped Vault tokens for
|
|
|
|
// tasks
|
2019-12-06 20:46:46 +00:00
|
|
|
func (n *Node) DeriveVaultToken(args *structs.DeriveVaultTokenRequest, reply *structs.DeriveVaultTokenResponse) error {
|
|
|
|
setError := func(e error, recoverable bool) {
|
|
|
|
if e != nil {
|
|
|
|
if re, ok := e.(*structs.RecoverableError); ok {
|
|
|
|
reply.Error = re // No need to wrap if error is already a RecoverableError
|
|
|
|
} else {
|
|
|
|
reply.Error = structs.NewRecoverableError(e, recoverable).(*structs.RecoverableError)
|
|
|
|
}
|
|
|
|
n.logger.Error("DeriveVaultToken failed", "recoverable", recoverable, "error", e)
|
2018-03-13 22:09:03 +00:00
|
|
|
}
|
2016-10-28 22:50:35 +00:00
|
|
|
}
|
|
|
|
|
2016-08-18 17:50:47 +00:00
|
|
|
if done, err := n.srv.forward("Node.DeriveVaultToken", args, args, reply); done {
|
2019-12-06 20:46:46 +00:00
|
|
|
setError(err, structs.IsRecoverable(err) || err == structs.ErrNoLeader)
|
2016-10-23 01:08:30 +00:00
|
|
|
return nil
|
2016-08-18 17:50:47 +00:00
|
|
|
}
|
|
|
|
defer metrics.MeasureSince([]string{"nomad", "client", "derive_vault_token"}, time.Now())
|
|
|
|
|
|
|
|
// Verify the arguments
|
|
|
|
if args.NodeID == "" {
|
2019-12-06 20:46:46 +00:00
|
|
|
setError(fmt.Errorf("missing node ID"), false)
|
2016-10-28 22:50:35 +00:00
|
|
|
return nil
|
2016-08-18 17:50:47 +00:00
|
|
|
}
|
|
|
|
if args.SecretID == "" {
|
2019-12-06 20:46:46 +00:00
|
|
|
setError(fmt.Errorf("missing node SecretID"), false)
|
2016-10-23 01:08:30 +00:00
|
|
|
return nil
|
2016-08-18 17:50:47 +00:00
|
|
|
}
|
|
|
|
if args.AllocID == "" {
|
2019-12-06 20:46:46 +00:00
|
|
|
setError(fmt.Errorf("missing allocation ID"), false)
|
2016-10-23 01:08:30 +00:00
|
|
|
return nil
|
2016-08-18 17:50:47 +00:00
|
|
|
}
|
|
|
|
if len(args.Tasks) == 0 {
|
2019-12-06 20:46:46 +00:00
|
|
|
setError(fmt.Errorf("no tasks specified"), false)
|
2016-10-23 01:08:30 +00:00
|
|
|
return nil
|
2016-08-18 17:50:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Verify the following:
|
|
|
|
// * The Node exists and has the correct SecretID
|
2019-12-06 20:46:46 +00:00
|
|
|
// * The Allocation exists on the specified Node
|
|
|
|
// * The Allocation contains the given tasks and they each require Vault
|
2016-08-18 17:50:47 +00:00
|
|
|
// tokens
|
|
|
|
snap, err := n.srv.fsm.State().Snapshot()
|
|
|
|
if err != nil {
|
2019-12-06 20:46:46 +00:00
|
|
|
setError(err, false)
|
2016-10-23 01:08:30 +00:00
|
|
|
return nil
|
2016-08-18 17:50:47 +00:00
|
|
|
}
|
2017-02-08 04:31:23 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
node, err := snap.NodeByID(ws, args.NodeID)
|
2016-08-18 17:50:47 +00:00
|
|
|
if err != nil {
|
2019-12-06 20:46:46 +00:00
|
|
|
setError(err, false)
|
2016-10-23 01:08:30 +00:00
|
|
|
return nil
|
2016-08-18 17:50:47 +00:00
|
|
|
}
|
|
|
|
if node == nil {
|
2019-12-06 20:46:46 +00:00
|
|
|
setError(fmt.Errorf("Node %q does not exist", args.NodeID), false)
|
2016-10-23 01:08:30 +00:00
|
|
|
return nil
|
2016-08-18 17:50:47 +00:00
|
|
|
}
|
2016-08-19 01:57:33 +00:00
|
|
|
if node.SecretID != args.SecretID {
|
2019-12-06 20:46:46 +00:00
|
|
|
setError(fmt.Errorf("SecretID mismatch"), false)
|
2016-10-23 01:08:30 +00:00
|
|
|
return nil
|
2016-08-19 01:57:33 +00:00
|
|
|
}
|
2016-08-18 17:50:47 +00:00
|
|
|
|
2017-02-08 04:31:23 +00:00
|
|
|
alloc, err := snap.AllocByID(ws, args.AllocID)
|
2016-08-18 17:50:47 +00:00
|
|
|
if err != nil {
|
2019-12-06 20:46:46 +00:00
|
|
|
setError(err, false)
|
2016-10-23 01:08:30 +00:00
|
|
|
return nil
|
2016-08-18 17:50:47 +00:00
|
|
|
}
|
|
|
|
if alloc == nil {
|
2019-12-06 20:46:46 +00:00
|
|
|
setError(fmt.Errorf("Allocation %q does not exist", args.AllocID), false)
|
2016-10-23 01:08:30 +00:00
|
|
|
return nil
|
2016-08-18 17:50:47 +00:00
|
|
|
}
|
|
|
|
if alloc.NodeID != args.NodeID {
|
2019-12-06 20:46:46 +00:00
|
|
|
setError(fmt.Errorf("Allocation %q not running on Node %q", args.AllocID, args.NodeID), false)
|
2016-10-23 01:08:30 +00:00
|
|
|
return nil
|
2016-08-18 17:50:47 +00:00
|
|
|
}
|
2016-08-19 20:13:51 +00:00
|
|
|
if alloc.TerminalStatus() {
|
2019-12-06 20:46:46 +00:00
|
|
|
setError(fmt.Errorf("Can't request Vault token for terminal allocation"), false)
|
2016-10-23 01:08:30 +00:00
|
|
|
return nil
|
2016-08-19 20:13:51 +00:00
|
|
|
}
|
2016-08-18 17:50:47 +00:00
|
|
|
|
|
|
|
// Check the policies
|
|
|
|
policies := alloc.Job.VaultPolicies()
|
|
|
|
if policies == nil {
|
2019-12-06 20:46:46 +00:00
|
|
|
setError(fmt.Errorf("Job doesn't require Vault policies"), false)
|
2016-10-23 01:08:30 +00:00
|
|
|
return nil
|
2016-08-18 17:50:47 +00:00
|
|
|
}
|
|
|
|
tg, ok := policies[alloc.TaskGroup]
|
|
|
|
if !ok {
|
2019-12-06 20:46:46 +00:00
|
|
|
setError(fmt.Errorf("Task group does not require Vault policies"), false)
|
2016-10-23 01:08:30 +00:00
|
|
|
return nil
|
2016-08-18 17:50:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var unneeded []string
|
|
|
|
for _, task := range args.Tasks {
|
|
|
|
taskVault := tg[task]
|
2016-08-19 20:13:51 +00:00
|
|
|
if taskVault == nil || len(taskVault.Policies) == 0 {
|
2016-08-18 17:50:47 +00:00
|
|
|
unneeded = append(unneeded, task)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(unneeded) != 0 {
|
2016-10-23 01:08:30 +00:00
|
|
|
e := fmt.Errorf("Requested Vault tokens for tasks without defined Vault policies: %s",
|
2016-08-18 17:50:47 +00:00
|
|
|
strings.Join(unneeded, ", "))
|
2019-12-06 20:46:46 +00:00
|
|
|
setError(e, false)
|
2016-10-23 01:08:30 +00:00
|
|
|
return nil
|
2016-08-18 17:50:47 +00:00
|
|
|
}
|
|
|
|
|
2016-08-18 21:31:44 +00:00
|
|
|
// At this point the request is valid and we should contact Vault for
|
|
|
|
// tokens.
|
|
|
|
|
|
|
|
// Create an error group where we will spin up a fixed set of goroutines to
|
|
|
|
// handle deriving tokens but where if any fails the whole group is
|
|
|
|
// canceled.
|
|
|
|
g, ctx := errgroup.WithContext(context.Background())
|
|
|
|
|
|
|
|
// Cap the handlers
|
|
|
|
handlers := len(args.Tasks)
|
|
|
|
if handlers > maxParallelRequestsPerDerive {
|
|
|
|
handlers = maxParallelRequestsPerDerive
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create the Vault Tokens
|
|
|
|
input := make(chan string, handlers)
|
|
|
|
results := make(map[string]*vapi.Secret, len(args.Tasks))
|
|
|
|
for i := 0; i < handlers; i++ {
|
|
|
|
g.Go(func() error {
|
2016-08-20 02:55:06 +00:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case task, ok := <-input:
|
|
|
|
if !ok {
|
|
|
|
return nil
|
|
|
|
}
|
2016-08-18 21:31:44 +00:00
|
|
|
|
2016-08-20 02:55:06 +00:00
|
|
|
secret, err := n.srv.vault.CreateToken(ctx, alloc, task)
|
|
|
|
if err != nil {
|
2018-03-13 22:09:03 +00:00
|
|
|
return err
|
2016-08-20 02:55:06 +00:00
|
|
|
}
|
2016-08-18 21:31:44 +00:00
|
|
|
|
2016-08-20 02:55:06 +00:00
|
|
|
results[task] = secret
|
|
|
|
case <-ctx.Done():
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
2016-08-18 21:31:44 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// Send the input
|
|
|
|
go func() {
|
2016-08-20 02:55:06 +00:00
|
|
|
defer close(input)
|
2016-08-18 21:31:44 +00:00
|
|
|
for _, task := range args.Tasks {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return
|
|
|
|
case input <- task:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
// Wait for everything to complete or for an error
|
2016-10-23 01:08:30 +00:00
|
|
|
createErr := g.Wait()
|
2016-08-19 01:57:33 +00:00
|
|
|
|
2016-10-23 01:08:30 +00:00
|
|
|
// Retrieve the results
|
2016-08-19 01:57:33 +00:00
|
|
|
accessors := make([]*structs.VaultAccessor, 0, len(results))
|
|
|
|
tokens := make(map[string]string, len(results))
|
|
|
|
for task, secret := range results {
|
|
|
|
w := secret.WrapInfo
|
|
|
|
tokens[task] = w.Token
|
|
|
|
accessor := &structs.VaultAccessor{
|
|
|
|
Accessor: w.WrappedAccessor,
|
|
|
|
Task: task,
|
|
|
|
NodeID: alloc.NodeID,
|
|
|
|
AllocID: alloc.ID,
|
|
|
|
CreationTTL: w.TTL,
|
|
|
|
}
|
2016-08-18 17:50:47 +00:00
|
|
|
|
2016-08-19 01:57:33 +00:00
|
|
|
accessors = append(accessors, accessor)
|
|
|
|
}
|
|
|
|
|
2016-08-22 20:57:27 +00:00
|
|
|
// If there was an error revoke the created tokens
|
2016-10-23 01:08:30 +00:00
|
|
|
if createErr != nil {
|
2018-09-15 23:23:13 +00:00
|
|
|
n.logger.Error("Vault token creation for alloc failed", "alloc_id", alloc.ID, "error", createErr)
|
2016-10-28 22:50:35 +00:00
|
|
|
|
2016-10-23 01:08:30 +00:00
|
|
|
if revokeErr := n.srv.vault.RevokeTokens(context.Background(), accessors, false); revokeErr != nil {
|
2018-09-15 23:23:13 +00:00
|
|
|
n.logger.Error("Vault token revocation for alloc failed", "alloc_id", alloc.ID, "error", revokeErr)
|
2016-10-23 01:08:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if rerr, ok := createErr.(*structs.RecoverableError); ok {
|
|
|
|
reply.Error = rerr
|
2017-03-29 20:59:43 +00:00
|
|
|
} else {
|
2017-02-01 21:18:12 +00:00
|
|
|
reply.Error = structs.NewRecoverableError(createErr, false).(*structs.RecoverableError)
|
2016-08-22 20:57:27 +00:00
|
|
|
}
|
2016-10-23 01:08:30 +00:00
|
|
|
|
|
|
|
return nil
|
2016-08-22 20:57:27 +00:00
|
|
|
}
|
|
|
|
|
2016-10-23 01:08:30 +00:00
|
|
|
// Commit to Raft before returning any of the tokens
|
2016-08-22 20:57:27 +00:00
|
|
|
req := structs.VaultAccessorsRequest{Accessors: accessors}
|
2016-08-19 01:57:33 +00:00
|
|
|
_, index, err := n.srv.raftApply(structs.VaultAccessorRegisterRequestType, &req)
|
|
|
|
if err != nil {
|
2018-09-15 23:23:13 +00:00
|
|
|
n.logger.Error("registering Vault accessors for alloc failed", "alloc_id", alloc.ID, "error", err)
|
2016-10-23 01:08:30 +00:00
|
|
|
|
|
|
|
// Determine if we can recover from the error
|
|
|
|
retry := false
|
|
|
|
switch err {
|
|
|
|
case raft.ErrNotLeader, raft.ErrLeadershipLost, raft.ErrRaftShutdown, raft.ErrEnqueueTimeout:
|
|
|
|
retry = true
|
|
|
|
}
|
|
|
|
|
2019-12-06 20:46:46 +00:00
|
|
|
setError(err, retry)
|
2016-10-23 01:08:30 +00:00
|
|
|
return nil
|
2016-08-19 01:57:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
reply.Index = index
|
|
|
|
reply.Tasks = tokens
|
|
|
|
n.srv.setQueryMeta(&reply.QueryMeta)
|
2016-08-18 17:50:47 +00:00
|
|
|
return nil
|
|
|
|
}
|
2018-03-14 00:52:12 +00:00
|
|
|
|
2019-12-06 20:46:46 +00:00
|
|
|
func (n *Node) DeriveSIToken(args *structs.DeriveSITokenRequest, reply *structs.DeriveSITokenResponse) error {
|
|
|
|
setError := func(e error, recoverable bool) {
|
|
|
|
if e != nil {
|
|
|
|
if re, ok := e.(*structs.RecoverableError); ok {
|
|
|
|
reply.Error = re // No need to wrap if error is already a RecoverableError
|
|
|
|
} else {
|
|
|
|
reply.Error = structs.NewRecoverableError(e, recoverable).(*structs.RecoverableError)
|
|
|
|
}
|
|
|
|
n.logger.Error("DeriveSIToken failed", "recoverable", recoverable, "error", e)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if done, err := n.srv.forward("Node.DeriveSIToken", args, args, reply); done {
|
|
|
|
setError(err, structs.IsRecoverable(err) || err == structs.ErrNoLeader)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
defer metrics.MeasureSince([]string{"nomad", "client", "derive_si_token"}, time.Now())
|
|
|
|
|
|
|
|
// Verify the arguments
|
|
|
|
if err := args.Validate(); err != nil {
|
|
|
|
setError(err, false)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the ClusterID
|
|
|
|
clusterID, err := n.srv.ClusterID()
|
|
|
|
if err != nil {
|
|
|
|
setError(err, false)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Verify the following:
|
|
|
|
// * The Node exists and has the correct SecretID.
|
|
|
|
// * The Allocation exists on the specified Node.
|
|
|
|
// * The Allocation contains the given tasks, and each task requires a
|
|
|
|
// SI token.
|
|
|
|
|
|
|
|
snap, err := n.srv.fsm.State().Snapshot()
|
|
|
|
if err != nil {
|
|
|
|
setError(err, false)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
node, err := snap.NodeByID(nil, args.NodeID)
|
|
|
|
if err != nil {
|
|
|
|
setError(err, false)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if node == nil {
|
|
|
|
setError(errors.Errorf("Node %q does not exist", args.NodeID), false)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if node.SecretID != args.SecretID {
|
|
|
|
setError(errors.Errorf("SecretID mismatch"), false)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
alloc, err := snap.AllocByID(nil, args.AllocID)
|
|
|
|
if err != nil {
|
|
|
|
setError(err, false)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if alloc == nil {
|
|
|
|
setError(errors.Errorf("Allocation %q does not exist", args.AllocID), false)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if alloc.NodeID != args.NodeID {
|
|
|
|
setError(errors.Errorf("Allocation %q not running on node %q", args.AllocID, args.NodeID), false)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if alloc.TerminalStatus() {
|
|
|
|
setError(errors.Errorf("Cannot request SI token for terminal allocation"), false)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// make sure task group contains at least one connect enabled service
|
|
|
|
tg := alloc.Job.LookupTaskGroup(alloc.TaskGroup)
|
|
|
|
if tg == nil {
|
|
|
|
setError(errors.Errorf("Allocation %q does not contain TaskGroup %q", args.AllocID, alloc.TaskGroup), false)
|
|
|
|
return nil
|
|
|
|
}
|
2020-01-30 16:49:07 +00:00
|
|
|
if !tg.UsesConnect() {
|
2019-12-06 20:46:46 +00:00
|
|
|
setError(errors.Errorf("TaskGroup %q does not use Connect", tg.Name), false)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// make sure each task in args.Tasks is a connect-enabled task
|
2019-12-19 23:40:30 +00:00
|
|
|
// note: the tasks at this point should be the "connect-sidecar-<id>" name
|
|
|
|
//
|
2019-12-06 20:46:46 +00:00
|
|
|
unneeded := tasksNotUsingConnect(tg, args.Tasks)
|
|
|
|
if len(unneeded) > 0 {
|
|
|
|
setError(fmt.Errorf(
|
|
|
|
"Requested Consul Service Identity tokens for tasks that are not Connect enabled: %v",
|
|
|
|
strings.Join(unneeded, ", "),
|
|
|
|
), false)
|
|
|
|
}
|
|
|
|
|
|
|
|
// At this point the request is valid and we should contact Consul for tokens.
|
|
|
|
|
|
|
|
// A lot of the following is copied from DeriveVaultToken which has been
|
|
|
|
// working fine for years.
|
|
|
|
|
|
|
|
// Create an error group where we will spin up a fixed set of goroutines to
|
|
|
|
// handle deriving tokens but where if any fails the whole group is
|
|
|
|
// canceled.
|
|
|
|
g, ctx := errgroup.WithContext(context.Background())
|
|
|
|
|
|
|
|
// Cap the worker threads
|
|
|
|
numWorkers := len(args.Tasks)
|
|
|
|
if numWorkers > maxParallelRequestsPerDerive {
|
|
|
|
numWorkers = maxParallelRequestsPerDerive
|
|
|
|
}
|
|
|
|
|
|
|
|
// would like to pull some of this out...
|
|
|
|
|
|
|
|
// Create the SI tokens
|
|
|
|
input := make(chan string, numWorkers)
|
|
|
|
results := make(map[string]*structs.SIToken, numWorkers)
|
|
|
|
for i := 0; i < numWorkers; i++ {
|
|
|
|
g.Go(func() error {
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case task, ok := <-input:
|
|
|
|
if !ok {
|
|
|
|
return nil
|
|
|
|
}
|
2019-12-19 23:40:30 +00:00
|
|
|
|
2019-12-06 20:46:46 +00:00
|
|
|
sii := ServiceIdentityIndex{
|
|
|
|
ClusterID: clusterID,
|
|
|
|
AllocID: alloc.ID,
|
|
|
|
TaskName: task,
|
|
|
|
}
|
|
|
|
secret, err := n.srv.consulACLs.CreateToken(ctx, sii)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
results[task] = secret
|
|
|
|
case <-ctx.Done():
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// Send the input
|
|
|
|
go func() {
|
|
|
|
defer close(input)
|
|
|
|
for _, task := range args.Tasks {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return
|
|
|
|
case input <- task:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
// Wait for everything to complete or for an error
|
|
|
|
createErr := g.Wait()
|
|
|
|
|
|
|
|
accessors := make([]*structs.SITokenAccessor, 0, len(results))
|
|
|
|
tokens := make(map[string]string, len(results))
|
|
|
|
for task, secret := range results {
|
|
|
|
tokens[task] = secret.SecretID
|
|
|
|
accessor := &structs.SITokenAccessor{
|
|
|
|
NodeID: alloc.NodeID,
|
|
|
|
AllocID: alloc.ID,
|
|
|
|
TaskName: task,
|
|
|
|
AccessorID: secret.AccessorID,
|
|
|
|
}
|
|
|
|
accessors = append(accessors, accessor)
|
|
|
|
}
|
|
|
|
|
2020-01-02 15:03:05 +00:00
|
|
|
// If there was an error, revoke all created tokens. These tokens have not
|
|
|
|
// yet been committed to the persistent store.
|
2019-12-06 20:46:46 +00:00
|
|
|
if createErr != nil {
|
|
|
|
n.logger.Error("Consul Service Identity token creation for alloc failed", "alloc_id", alloc.ID, "error", createErr)
|
2020-01-02 15:03:05 +00:00
|
|
|
_ = n.srv.consulACLs.RevokeTokens(context.Background(), accessors, false)
|
2019-12-06 20:46:46 +00:00
|
|
|
|
|
|
|
if recoverable, ok := createErr.(*structs.RecoverableError); ok {
|
|
|
|
reply.Error = recoverable
|
|
|
|
} else {
|
|
|
|
reply.Error = structs.NewRecoverableError(createErr, false).(*structs.RecoverableError)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Commit the derived tokens to raft before returning them
|
|
|
|
requested := structs.SITokenAccessorsRequest{Accessors: accessors}
|
|
|
|
_, index, err := n.srv.raftApply(structs.ServiceIdentityAccessorRegisterRequestType, &requested)
|
|
|
|
if err != nil {
|
|
|
|
n.logger.Error("registering Service Identity token accessors for alloc failed", "alloc_id", alloc.ID, "error", err)
|
|
|
|
|
|
|
|
// Determine if we can recover from the error
|
|
|
|
retry := false
|
|
|
|
switch err {
|
|
|
|
case raft.ErrNotLeader, raft.ErrLeadershipLost, raft.ErrRaftShutdown, raft.ErrEnqueueTimeout:
|
|
|
|
retry = true
|
|
|
|
}
|
|
|
|
setError(err, retry)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// We made it! Now we can set the reply.
|
|
|
|
reply.Index = index
|
|
|
|
reply.Tokens = tokens
|
|
|
|
n.srv.setQueryMeta(&reply.QueryMeta)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func tasksNotUsingConnect(tg *structs.TaskGroup, tasks []string) []string {
|
|
|
|
var unneeded []string
|
|
|
|
for _, task := range tasks {
|
|
|
|
tgTask := tg.LookupTask(task)
|
|
|
|
if !taskUsesConnect(tgTask) {
|
|
|
|
unneeded = append(unneeded, task)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return unneeded
|
|
|
|
}
|
|
|
|
|
|
|
|
func taskUsesConnect(task *structs.Task) bool {
|
|
|
|
if task == nil {
|
|
|
|
// not even in the task group
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
// todo(shoenig): TBD what Kind does a native task have?
|
|
|
|
return task.Kind.IsConnectProxy()
|
|
|
|
}
|
|
|
|
|
2018-03-14 00:52:12 +00:00
|
|
|
func (n *Node) EmitEvents(args *structs.EmitNodeEventsRequest, reply *structs.EmitNodeEventsResponse) error {
|
|
|
|
if done, err := n.srv.forward("Node.EmitEvents", args, args, reply); done {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer metrics.MeasureSince([]string{"nomad", "client", "emit_events"}, time.Now())
|
|
|
|
|
|
|
|
if len(args.NodeEvents) == 0 {
|
|
|
|
return fmt.Errorf("no node events given")
|
|
|
|
}
|
|
|
|
for nodeID, events := range args.NodeEvents {
|
|
|
|
if len(events) == 0 {
|
|
|
|
return fmt.Errorf("no node events given for node %q", nodeID)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
_, index, err := n.srv.raftApply(structs.UpsertNodeEventsType, args)
|
|
|
|
if err != nil {
|
2018-09-15 23:23:13 +00:00
|
|
|
n.logger.Error("upserting node events failed", "error", err)
|
2018-03-14 00:52:12 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
reply.Index = index
|
|
|
|
return nil
|
|
|
|
}
|