2015-08-15 19:38:58 +00:00
|
|
|
package nomad
|
|
|
|
|
|
|
|
import (
|
2015-08-15 23:07:50 +00:00
|
|
|
"fmt"
|
2015-08-16 00:42:51 +00:00
|
|
|
"time"
|
2015-08-15 23:07:50 +00:00
|
|
|
|
2015-08-15 19:38:58 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad/state"
|
|
|
|
"github.com/hashicorp/nomad/nomad/structs"
|
|
|
|
"github.com/hashicorp/nomad/scheduler"
|
|
|
|
)
|
|
|
|
|
|
|
|
// CoreScheduler is a special "scheduler" that is registered
|
|
|
|
// as "_core". It is used to run various administrative work
|
|
|
|
// across the cluster.
|
|
|
|
type CoreScheduler struct {
|
|
|
|
srv *Server
|
|
|
|
snap *state.StateSnapshot
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewCoreScheduler is used to return a new system scheduler instance
|
|
|
|
func NewCoreScheduler(srv *Server, snap *state.StateSnapshot) scheduler.Scheduler {
|
|
|
|
s := &CoreScheduler{
|
|
|
|
srv: srv,
|
|
|
|
snap: snap,
|
|
|
|
}
|
|
|
|
return s
|
|
|
|
}
|
|
|
|
|
|
|
|
// Process is used to implement the scheduler.Scheduler interface
|
2015-08-15 23:07:50 +00:00
|
|
|
func (s *CoreScheduler) Process(eval *structs.Evaluation) error {
|
|
|
|
switch eval.JobID {
|
|
|
|
case structs.CoreJobEvalGC:
|
|
|
|
return s.evalGC(eval)
|
2015-09-07 18:01:29 +00:00
|
|
|
case structs.CoreJobNodeGC:
|
|
|
|
return s.nodeGC(eval)
|
2015-12-15 03:20:57 +00:00
|
|
|
case structs.CoreJobJobGC:
|
|
|
|
return s.jobGC(eval)
|
2015-08-15 23:07:50 +00:00
|
|
|
default:
|
|
|
|
return fmt.Errorf("core scheduler cannot handle job '%s'", eval.JobID)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-12-15 03:20:57 +00:00
|
|
|
// jobGC is used to garbage collect eligible jobs.
|
|
|
|
func (c *CoreScheduler) jobGC(eval *structs.Evaluation) error {
|
|
|
|
// Get all the jobs eligible for garbage collection.
|
2015-12-16 22:27:40 +00:00
|
|
|
iter, err := c.snap.JobsByGC(true)
|
2015-12-15 03:20:57 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the time table to calculate GC cutoffs.
|
|
|
|
tt := c.srv.fsm.TimeTable()
|
2015-12-15 21:07:25 +00:00
|
|
|
cutoff := time.Now().UTC().Add(-1 * c.srv.config.JobGCThreshold)
|
|
|
|
oldThreshold := tt.NearestIndex(cutoff)
|
|
|
|
c.srv.logger.Printf("[DEBUG] sched.core: job GC: scanning before index %d (%v)",
|
|
|
|
oldThreshold, c.srv.config.JobGCThreshold)
|
2015-12-15 03:20:57 +00:00
|
|
|
|
2015-12-16 22:27:40 +00:00
|
|
|
// Collect the allocations, evaluations and jobs to GC
|
2015-12-15 03:20:57 +00:00
|
|
|
var gcAlloc, gcEval, gcJob []string
|
|
|
|
|
|
|
|
OUTER:
|
2015-12-16 22:27:40 +00:00
|
|
|
for i := iter.Next(); i != nil; i = iter.Next() {
|
2015-12-15 03:20:57 +00:00
|
|
|
job := i.(*structs.Job)
|
|
|
|
|
|
|
|
// Ignore new jobs.
|
|
|
|
if job.CreateIndex > oldThreshold {
|
2015-12-16 22:27:40 +00:00
|
|
|
continue
|
2015-12-15 03:20:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
evals, err := c.snap.EvalsByJob(job.ID)
|
|
|
|
if err != nil {
|
|
|
|
c.srv.logger.Printf("[ERR] sched.core: failed to get evals for job %s: %v", job.ID, err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, eval := range evals {
|
|
|
|
gc, allocs, err := c.gcEval(eval, oldThreshold)
|
|
|
|
if err != nil || !gc {
|
|
|
|
continue OUTER
|
|
|
|
}
|
|
|
|
|
|
|
|
gcEval = append(gcEval, eval.ID)
|
|
|
|
gcAlloc = append(gcAlloc, allocs...)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Job is eligible for garbage collection
|
|
|
|
gcJob = append(gcJob, job.ID)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fast-path the nothing case
|
|
|
|
if len(gcEval) == 0 && len(gcAlloc) == 0 && len(gcJob) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
c.srv.logger.Printf("[DEBUG] sched.core: job GC: %d jobs, %d evaluations, %d allocs eligible",
|
|
|
|
len(gcJob), len(gcEval), len(gcAlloc))
|
|
|
|
|
|
|
|
// Reap the evals and allocs
|
|
|
|
if err := c.evalReap(gcEval, gcAlloc); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Call to the leader to deregister the jobs.
|
|
|
|
for _, job := range gcJob {
|
|
|
|
req := structs.JobDeregisterRequest{
|
|
|
|
JobID: job,
|
|
|
|
WriteRequest: structs.WriteRequest{
|
|
|
|
Region: c.srv.config.Region,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var resp structs.JobDeregisterResponse
|
|
|
|
if err := c.srv.RPC("Job.Deregister", &req, &resp); err != nil {
|
|
|
|
c.srv.logger.Printf("[ERR] sched.core: job deregister failed: %v", err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-08-15 23:07:50 +00:00
|
|
|
// evalGC is used to garbage collect old evaluations
|
|
|
|
func (c *CoreScheduler) evalGC(eval *structs.Evaluation) error {
|
|
|
|
// Iterate over the evaluations
|
|
|
|
iter, err := c.snap.Evals()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2015-08-16 00:42:51 +00:00
|
|
|
// Compute the old threshold limit for GC using the FSM
|
|
|
|
// time table. This is a rough mapping of a time to the
|
2015-08-15 23:07:50 +00:00
|
|
|
// Raft index it belongs to.
|
2015-08-16 00:42:51 +00:00
|
|
|
tt := c.srv.fsm.TimeTable()
|
|
|
|
cutoff := time.Now().UTC().Add(-1 * c.srv.config.EvalGCThreshold)
|
|
|
|
oldThreshold := tt.NearestIndex(cutoff)
|
|
|
|
c.srv.logger.Printf("[DEBUG] sched.core: eval GC: scanning before index %d (%v)",
|
|
|
|
oldThreshold, c.srv.config.EvalGCThreshold)
|
2015-08-15 23:07:50 +00:00
|
|
|
|
|
|
|
// Collect the allocations and evaluations to GC
|
|
|
|
var gcAlloc, gcEval []string
|
2015-12-15 03:20:57 +00:00
|
|
|
for raw := iter.Next(); raw != nil; raw = iter.Next() {
|
2015-08-15 23:07:50 +00:00
|
|
|
eval := raw.(*structs.Evaluation)
|
2015-12-15 03:20:57 +00:00
|
|
|
gc, allocs, err := c.gcEval(eval, oldThreshold)
|
2015-08-15 23:07:50 +00:00
|
|
|
if err != nil {
|
2015-12-15 03:20:57 +00:00
|
|
|
return err
|
2015-08-15 23:07:50 +00:00
|
|
|
}
|
|
|
|
|
2015-12-15 03:20:57 +00:00
|
|
|
if gc {
|
|
|
|
gcEval = append(gcEval, eval.ID)
|
|
|
|
gcAlloc = append(gcAlloc, allocs...)
|
2015-08-15 23:07:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fast-path the nothing case
|
|
|
|
if len(gcEval) == 0 && len(gcAlloc) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
2015-09-07 18:01:29 +00:00
|
|
|
c.srv.logger.Printf("[DEBUG] sched.core: eval GC: %d evaluations, %d allocs eligible",
|
2015-08-15 23:07:50 +00:00
|
|
|
len(gcEval), len(gcAlloc))
|
|
|
|
|
2015-12-15 03:20:57 +00:00
|
|
|
return c.evalReap(gcEval, gcAlloc)
|
|
|
|
}
|
|
|
|
|
|
|
|
// gcEval returns whether the eval should be garbage collected given a raft
|
|
|
|
// threshold index. The eval disqualifies for garbage collection if it or its
|
|
|
|
// allocs are not older than the threshold. If the eval should be garbage
|
|
|
|
// collected, the associated alloc ids that should also be removed are also
|
|
|
|
// returned
|
|
|
|
func (c *CoreScheduler) gcEval(eval *structs.Evaluation, thresholdIndex uint64) (
|
|
|
|
bool, []string, error) {
|
|
|
|
// Ignore non-terminal and new evaluations
|
|
|
|
if !eval.TerminalStatus() || eval.ModifyIndex > thresholdIndex {
|
|
|
|
return false, nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the allocations by eval
|
|
|
|
allocs, err := c.snap.AllocsByEval(eval.ID)
|
|
|
|
if err != nil {
|
|
|
|
c.srv.logger.Printf("[ERR] sched.core: failed to get allocs for eval %s: %v",
|
|
|
|
eval.ID, err)
|
|
|
|
return false, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Scan the allocations to ensure they are terminal and old
|
|
|
|
for _, alloc := range allocs {
|
|
|
|
if !alloc.TerminalStatus() || alloc.ModifyIndex > thresholdIndex {
|
|
|
|
return false, nil, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
allocIds := make([]string, len(allocs))
|
|
|
|
for i, alloc := range allocs {
|
|
|
|
allocIds[i] = alloc.ID
|
|
|
|
}
|
|
|
|
|
|
|
|
// Evaluation is eligible for garbage collection
|
|
|
|
return true, allocIds, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// evalReap contacts the leader and issues a reap on the passed evals and
|
|
|
|
// allocs.
|
|
|
|
func (c *CoreScheduler) evalReap(evals, allocs []string) error {
|
2015-08-15 23:07:50 +00:00
|
|
|
// Call to the leader to issue the reap
|
|
|
|
req := structs.EvalDeleteRequest{
|
2015-12-15 03:20:57 +00:00
|
|
|
Evals: evals,
|
|
|
|
Allocs: allocs,
|
2015-08-15 23:07:50 +00:00
|
|
|
WriteRequest: structs.WriteRequest{
|
|
|
|
Region: c.srv.config.Region,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var resp structs.GenericResponse
|
|
|
|
if err := c.srv.RPC("Eval.Reap", &req, &resp); err != nil {
|
|
|
|
c.srv.logger.Printf("[ERR] sched.core: eval reap failed: %v", err)
|
|
|
|
return err
|
|
|
|
}
|
2015-12-15 03:20:57 +00:00
|
|
|
|
2015-08-15 19:38:58 +00:00
|
|
|
return nil
|
|
|
|
}
|
2015-09-07 18:01:29 +00:00
|
|
|
|
|
|
|
// nodeGC is used to garbage collect old nodes
|
|
|
|
func (c *CoreScheduler) nodeGC(eval *structs.Evaluation) error {
|
|
|
|
// Iterate over the evaluations
|
|
|
|
iter, err := c.snap.Nodes()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Compute the old threshold limit for GC using the FSM
|
|
|
|
// time table. This is a rough mapping of a time to the
|
|
|
|
// Raft index it belongs to.
|
|
|
|
tt := c.srv.fsm.TimeTable()
|
|
|
|
cutoff := time.Now().UTC().Add(-1 * c.srv.config.NodeGCThreshold)
|
|
|
|
oldThreshold := tt.NearestIndex(cutoff)
|
|
|
|
c.srv.logger.Printf("[DEBUG] sched.core: node GC: scanning before index %d (%v)",
|
|
|
|
oldThreshold, c.srv.config.NodeGCThreshold)
|
|
|
|
|
|
|
|
// Collect the nodes to GC
|
|
|
|
var gcNode []string
|
|
|
|
for {
|
|
|
|
raw := iter.Next()
|
|
|
|
if raw == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
node := raw.(*structs.Node)
|
|
|
|
|
|
|
|
// Ignore non-terminal and new nodes
|
|
|
|
if !node.TerminalStatus() || node.ModifyIndex > oldThreshold {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the allocations by node
|
|
|
|
allocs, err := c.snap.AllocsByNode(node.ID)
|
|
|
|
if err != nil {
|
|
|
|
c.srv.logger.Printf("[ERR] sched.core: failed to get allocs for node %s: %v",
|
|
|
|
eval.ID, err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// If there are any allocations, skip the node
|
|
|
|
if len(allocs) > 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Node is eligible for garbage collection
|
|
|
|
gcNode = append(gcNode, node.ID)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fast-path the nothing case
|
|
|
|
if len(gcNode) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
c.srv.logger.Printf("[DEBUG] sched.core: node GC: %d nodes eligible", len(gcNode))
|
|
|
|
|
|
|
|
// Call to the leader to issue the reap
|
|
|
|
for _, nodeID := range gcNode {
|
|
|
|
req := structs.NodeDeregisterRequest{
|
|
|
|
NodeID: nodeID,
|
|
|
|
WriteRequest: structs.WriteRequest{
|
|
|
|
Region: c.srv.config.Region,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var resp structs.NodeUpdateResponse
|
|
|
|
if err := c.srv.RPC("Node.Deregister", &req, &resp); err != nil {
|
|
|
|
c.srv.logger.Printf("[ERR] sched.core: node '%s' reap failed: %v", nodeID, err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|