open-nomad/nomad/core_sched.go

397 lines
11 KiB
Go
Raw Normal View History

package nomad
import (
"fmt"
2016-02-20 23:50:41 +00:00
"math"
"time"
"github.com/hashicorp/nomad/nomad/state"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/scheduler"
)
2016-03-30 22:17:13 +00:00
var (
// maxIdsPerReap is the maximum number of evals and allocations to reap in a
// single Raft transaction. This is to ensure that the Raft message does not
// become too large.
2016-04-14 18:41:04 +00:00
maxIdsPerReap = (1024 * 256) / 36 // 0.25 MB of ids.
2016-03-30 22:17:13 +00:00
)
// CoreScheduler is a special "scheduler" that is registered
// as "_core". It is used to run various administrative work
// across the cluster.
type CoreScheduler struct {
srv *Server
snap *state.StateSnapshot
}
// NewCoreScheduler is used to return a new system scheduler instance
func NewCoreScheduler(srv *Server, snap *state.StateSnapshot) scheduler.Scheduler {
s := &CoreScheduler{
srv: srv,
snap: snap,
}
return s
}
// Process is used to implement the scheduler.Scheduler interface
func (c *CoreScheduler) Process(eval *structs.Evaluation) error {
switch eval.JobID {
case structs.CoreJobEvalGC:
return c.evalGC(eval)
2015-09-07 18:01:29 +00:00
case structs.CoreJobNodeGC:
return c.nodeGC(eval)
2015-12-15 03:20:57 +00:00
case structs.CoreJobJobGC:
return c.jobGC(eval)
case structs.CoreJobForceGC:
return c.forceGC(eval)
default:
return fmt.Errorf("core scheduler cannot handle job '%s'", eval.JobID)
}
}
// forceGC is used to garbage collect all eligible objects.
func (c *CoreScheduler) forceGC(eval *structs.Evaluation) error {
if err := c.jobGC(eval); err != nil {
return err
}
if err := c.evalGC(eval); err != nil {
return err
}
// Node GC must occur after the others to ensure the allocations are
// cleared.
return c.nodeGC(eval)
}
2015-12-15 03:20:57 +00:00
// jobGC is used to garbage collect eligible jobs.
func (c *CoreScheduler) jobGC(eval *structs.Evaluation) error {
// Get all the jobs eligible for garbage collection.
2015-12-16 22:27:40 +00:00
iter, err := c.snap.JobsByGC(true)
2015-12-15 03:20:57 +00:00
if err != nil {
return err
}
2016-02-20 23:50:41 +00:00
var oldThreshold uint64
if eval.JobID == structs.CoreJobForceGC {
2016-02-20 23:50:41 +00:00
// The GC was forced, so set the threshold to its maximum so everything
// will GC.
oldThreshold = math.MaxUint64
c.srv.logger.Println("[DEBUG] sched.core: forced job GC")
} else {
// Get the time table to calculate GC cutoffs.
tt := c.srv.fsm.TimeTable()
cutoff := time.Now().UTC().Add(-1 * c.srv.config.JobGCThreshold)
oldThreshold = tt.NearestIndex(cutoff)
c.srv.logger.Printf("[DEBUG] sched.core: job GC: scanning before index %d (%v)",
oldThreshold, c.srv.config.JobGCThreshold)
2016-02-20 23:50:41 +00:00
}
2015-12-15 03:20:57 +00:00
2015-12-16 22:27:40 +00:00
// Collect the allocations, evaluations and jobs to GC
2015-12-15 03:20:57 +00:00
var gcAlloc, gcEval, gcJob []string
OUTER:
2015-12-16 22:27:40 +00:00
for i := iter.Next(); i != nil; i = iter.Next() {
2015-12-15 03:20:57 +00:00
job := i.(*structs.Job)
// Ignore new jobs.
if job.CreateIndex > oldThreshold {
2015-12-16 22:27:40 +00:00
continue
2015-12-15 03:20:57 +00:00
}
evals, err := c.snap.EvalsByJob(job.ID)
if err != nil {
c.srv.logger.Printf("[ERR] sched.core: failed to get evals for job %s: %v", job.ID, err)
continue
}
for _, eval := range evals {
gc, allocs, err := c.gcEval(eval, oldThreshold)
if err != nil || !gc {
2016-03-25 23:46:48 +00:00
// We skip the job because it is not finished if it has
// non-terminal allocations.
2015-12-15 03:20:57 +00:00
continue OUTER
}
gcEval = append(gcEval, eval.ID)
gcAlloc = append(gcAlloc, allocs...)
}
// Job is eligible for garbage collection
gcJob = append(gcJob, job.ID)
}
// Fast-path the nothing case
if len(gcEval) == 0 && len(gcAlloc) == 0 && len(gcJob) == 0 {
return nil
}
c.srv.logger.Printf("[DEBUG] sched.core: job GC: %d jobs, %d evaluations, %d allocs eligible",
len(gcJob), len(gcEval), len(gcAlloc))
// Reap the evals and allocs
if err := c.evalReap(gcEval, gcAlloc); err != nil {
return err
}
// Call to the leader to deregister the jobs.
for _, job := range gcJob {
req := structs.JobDeregisterRequest{
JobID: job,
WriteRequest: structs.WriteRequest{
Region: c.srv.config.Region,
},
}
var resp structs.JobDeregisterResponse
if err := c.srv.RPC("Job.Deregister", &req, &resp); err != nil {
c.srv.logger.Printf("[ERR] sched.core: job deregister failed: %v", err)
return err
}
}
return nil
}
// evalGC is used to garbage collect old evaluations
func (c *CoreScheduler) evalGC(eval *structs.Evaluation) error {
// Iterate over the evaluations
iter, err := c.snap.Evals()
if err != nil {
return err
}
2016-02-20 23:50:41 +00:00
var oldThreshold uint64
if eval.JobID == structs.CoreJobForceGC {
2016-02-20 23:50:41 +00:00
// The GC was forced, so set the threshold to its maximum so everything
// will GC.
oldThreshold = math.MaxUint64
c.srv.logger.Println("[DEBUG] sched.core: forced eval GC")
} else {
// Compute the old threshold limit for GC using the FSM
// time table. This is a rough mapping of a time to the
// Raft index it belongs to.
tt := c.srv.fsm.TimeTable()
cutoff := time.Now().UTC().Add(-1 * c.srv.config.EvalGCThreshold)
oldThreshold = tt.NearestIndex(cutoff)
c.srv.logger.Printf("[DEBUG] sched.core: eval GC: scanning before index %d (%v)",
oldThreshold, c.srv.config.EvalGCThreshold)
2016-02-20 23:50:41 +00:00
}
// Collect the allocations and evaluations to GC
var gcAlloc, gcEval []string
2015-12-15 03:20:57 +00:00
for raw := iter.Next(); raw != nil; raw = iter.Next() {
eval := raw.(*structs.Evaluation)
2016-03-25 23:46:48 +00:00
2015-12-15 03:20:57 +00:00
gc, allocs, err := c.gcEval(eval, oldThreshold)
if err != nil {
2015-12-15 03:20:57 +00:00
return err
}
// If the eval is from a running "batch" job we don't want to garbage
// collect its allocations. If there is a long running batch job and its
2016-03-25 23:46:48 +00:00
// terminal allocations get GC'd the scheduler would re-run the
// allocations.
if eval.Type == structs.JobTypeBatch {
// Check if the job is running
job, err := c.snap.JobByID(eval.JobID)
if err != nil {
return err
}
// If the job has been deregistered, we want to garbage collect the
// allocations and evaluations.
if job != nil && len(allocs) != 0 {
continue
}
2016-03-25 23:46:48 +00:00
}
2015-12-15 03:20:57 +00:00
if gc {
gcEval = append(gcEval, eval.ID)
gcAlloc = append(gcAlloc, allocs...)
}
}
// Fast-path the nothing case
if len(gcEval) == 0 && len(gcAlloc) == 0 {
return nil
}
2015-09-07 18:01:29 +00:00
c.srv.logger.Printf("[DEBUG] sched.core: eval GC: %d evaluations, %d allocs eligible",
len(gcEval), len(gcAlloc))
2015-12-15 03:20:57 +00:00
return c.evalReap(gcEval, gcAlloc)
}
// gcEval returns whether the eval should be garbage collected given a raft
// threshold index. The eval disqualifies for garbage collection if it or its
// allocs are not older than the threshold. If the eval should be garbage
// collected, the associated alloc ids that should also be removed are also
// returned
func (c *CoreScheduler) gcEval(eval *structs.Evaluation, thresholdIndex uint64) (
bool, []string, error) {
// Ignore non-terminal and new evaluations
if !eval.TerminalStatus() || eval.ModifyIndex > thresholdIndex {
return false, nil, nil
}
// Get the allocations by eval
allocs, err := c.snap.AllocsByEval(eval.ID)
if err != nil {
c.srv.logger.Printf("[ERR] sched.core: failed to get allocs for eval %s: %v",
eval.ID, err)
return false, nil, err
}
// Scan the allocations to ensure they are terminal and old
for _, alloc := range allocs {
if !alloc.TerminalStatus() || alloc.ModifyIndex > thresholdIndex {
2015-12-15 03:20:57 +00:00
return false, nil, nil
}
}
allocIds := make([]string, len(allocs))
for i, alloc := range allocs {
allocIds[i] = alloc.ID
}
// Evaluation is eligible for garbage collection
return true, allocIds, nil
}
// evalReap contacts the leader and issues a reap on the passed evals and
// allocs.
func (c *CoreScheduler) evalReap(evals, allocs []string) error {
// Call to the leader to issue the reap
2016-03-30 22:17:13 +00:00
for _, req := range c.partitionReap(evals, allocs) {
var resp structs.GenericResponse
if err := c.srv.RPC("Eval.Reap", req, &resp); err != nil {
c.srv.logger.Printf("[ERR] sched.core: eval reap failed: %v", err)
return err
}
}
2015-12-15 03:20:57 +00:00
return nil
}
2015-09-07 18:01:29 +00:00
2016-03-30 22:17:13 +00:00
// partitionReap returns a list of EvalDeleteRequest to make, ensuring a single
// request does not contain too many allocations and evaluations. This is
// necessary to ensure that the Raft transaction does not become too large.
func (c *CoreScheduler) partitionReap(evals, allocs []string) []*structs.EvalDeleteRequest {
var requests []*structs.EvalDeleteRequest
2016-04-14 18:41:04 +00:00
submittedEvals, submittedAllocs := 0, 0
2016-03-30 22:17:13 +00:00
for submittedEvals != len(evals) || submittedAllocs != len(allocs) {
req := &structs.EvalDeleteRequest{
WriteRequest: structs.WriteRequest{
Region: c.srv.config.Region,
},
}
requests = append(requests, req)
available := maxIdsPerReap
2016-04-14 18:41:04 +00:00
// Add the allocs first
if remaining := len(allocs) - submittedAllocs; remaining > 0 {
2016-03-30 22:17:13 +00:00
if remaining <= available {
2016-04-14 18:41:04 +00:00
req.Allocs = allocs[submittedAllocs:]
2016-03-30 22:17:13 +00:00
available -= remaining
2016-04-14 18:41:04 +00:00
submittedAllocs += remaining
2016-03-30 22:17:13 +00:00
} else {
2016-04-14 18:41:04 +00:00
req.Allocs = allocs[submittedAllocs : submittedAllocs+available]
submittedAllocs += available
2016-03-30 22:17:13 +00:00
2016-04-14 18:41:04 +00:00
// Exhausted space so skip adding evals
2016-03-30 22:17:13 +00:00
continue
}
}
2016-04-14 18:41:04 +00:00
// Add the evals
if remaining := len(evals) - submittedEvals; remaining > 0 {
2016-03-30 22:17:13 +00:00
if remaining <= available {
2016-04-14 18:41:04 +00:00
req.Evals = evals[submittedEvals:]
submittedEvals += remaining
2016-03-30 22:17:13 +00:00
} else {
2016-04-14 18:41:04 +00:00
req.Evals = evals[submittedEvals : submittedEvals+available]
submittedEvals += available
2016-03-30 22:17:13 +00:00
}
}
}
return requests
}
2015-09-07 18:01:29 +00:00
// nodeGC is used to garbage collect old nodes
func (c *CoreScheduler) nodeGC(eval *structs.Evaluation) error {
// Iterate over the evaluations
iter, err := c.snap.Nodes()
if err != nil {
return err
}
2016-02-21 00:11:29 +00:00
var oldThreshold uint64
if eval.JobID == structs.CoreJobForceGC {
2016-02-21 00:11:29 +00:00
// The GC was forced, so set the threshold to its maximum so everything
// will GC.
oldThreshold = math.MaxUint64
c.srv.logger.Println("[DEBUG] sched.core: forced node GC")
} else {
// Compute the old threshold limit for GC using the FSM
// time table. This is a rough mapping of a time to the
// Raft index it belongs to.
tt := c.srv.fsm.TimeTable()
cutoff := time.Now().UTC().Add(-1 * c.srv.config.NodeGCThreshold)
oldThreshold = tt.NearestIndex(cutoff)
c.srv.logger.Printf("[DEBUG] sched.core: node GC: scanning before index %d (%v)",
oldThreshold, c.srv.config.NodeGCThreshold)
2016-02-21 00:11:29 +00:00
}
2015-09-07 18:01:29 +00:00
// Collect the nodes to GC
var gcNode []string
for {
raw := iter.Next()
if raw == nil {
break
}
node := raw.(*structs.Node)
// Ignore non-terminal and new nodes
if !node.TerminalStatus() || node.ModifyIndex > oldThreshold {
continue
}
// Get the allocations by node
allocs, err := c.snap.AllocsByNode(node.ID)
if err != nil {
c.srv.logger.Printf("[ERR] sched.core: failed to get allocs for node %s: %v",
eval.ID, err)
continue
}
// If there are any allocations, skip the node
if len(allocs) > 0 {
continue
}
// Node is eligible for garbage collection
gcNode = append(gcNode, node.ID)
}
// Fast-path the nothing case
if len(gcNode) == 0 {
return nil
}
c.srv.logger.Printf("[DEBUG] sched.core: node GC: %d nodes eligible", len(gcNode))
// Call to the leader to issue the reap
for _, nodeID := range gcNode {
req := structs.NodeDeregisterRequest{
NodeID: nodeID,
WriteRequest: structs.WriteRequest{
Region: c.srv.config.Region,
},
}
var resp structs.NodeUpdateResponse
if err := c.srv.RPC("Node.Deregister", &req, &resp); err != nil {
c.srv.logger.Printf("[ERR] sched.core: node '%s' reap failed: %v", nodeID, err)
return err
}
}
return nil
}