2023-04-10 15:36:59 +00:00
|
|
|
// Copyright (c) HashiCorp, Inc.
|
|
|
|
// SPDX-License-Identifier: MPL-2.0
|
|
|
|
|
2015-08-15 19:38:58 +00:00
|
|
|
package nomad
|
|
|
|
|
|
|
|
import (
|
2022-08-15 15:19:53 +00:00
|
|
|
"context"
|
2022-07-07 17:48:38 +00:00
|
|
|
"encoding/json"
|
2015-08-15 23:07:50 +00:00
|
|
|
"fmt"
|
2016-02-20 23:50:41 +00:00
|
|
|
"math"
|
2020-02-19 14:05:33 +00:00
|
|
|
"strings"
|
2015-08-16 00:42:51 +00:00
|
|
|
"time"
|
2015-08-15 23:07:50 +00:00
|
|
|
|
2018-09-15 23:23:13 +00:00
|
|
|
log "github.com/hashicorp/go-hclog"
|
2017-02-08 04:31:23 +00:00
|
|
|
memdb "github.com/hashicorp/go-memdb"
|
2019-06-07 14:57:57 +00:00
|
|
|
version "github.com/hashicorp/go-version"
|
2022-08-15 15:19:53 +00:00
|
|
|
"github.com/hashicorp/nomad/helper/uuid"
|
2015-08-15 19:38:58 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad/state"
|
|
|
|
"github.com/hashicorp/nomad/nomad/structs"
|
|
|
|
"github.com/hashicorp/nomad/scheduler"
|
2022-08-15 15:19:53 +00:00
|
|
|
"golang.org/x/time/rate"
|
2015-08-15 19:38:58 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// CoreScheduler is a special "scheduler" that is registered
|
|
|
|
// as "_core". It is used to run various administrative work
|
|
|
|
// across the cluster.
|
|
|
|
type CoreScheduler struct {
|
2018-09-15 23:23:13 +00:00
|
|
|
srv *Server
|
|
|
|
snap *state.StateSnapshot
|
|
|
|
logger log.Logger
|
2015-08-15 19:38:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewCoreScheduler is used to return a new system scheduler instance
|
|
|
|
func NewCoreScheduler(srv *Server, snap *state.StateSnapshot) scheduler.Scheduler {
|
|
|
|
s := &CoreScheduler{
|
2018-09-15 23:23:13 +00:00
|
|
|
srv: srv,
|
|
|
|
snap: snap,
|
|
|
|
logger: srv.logger.ResetNamed("core.sched"),
|
2015-08-15 19:38:58 +00:00
|
|
|
}
|
|
|
|
return s
|
|
|
|
}
|
|
|
|
|
|
|
|
// Process is used to implement the scheduler.Scheduler interface
|
2016-04-08 18:42:02 +00:00
|
|
|
func (c *CoreScheduler) Process(eval *structs.Evaluation) error {
|
2020-02-19 14:05:33 +00:00
|
|
|
job := strings.Split(eval.JobID, ":") // extra data can be smuggled in w/ JobID
|
|
|
|
switch job[0] {
|
2015-08-15 23:07:50 +00:00
|
|
|
case structs.CoreJobEvalGC:
|
2016-04-08 18:42:02 +00:00
|
|
|
return c.evalGC(eval)
|
2015-09-07 18:01:29 +00:00
|
|
|
case structs.CoreJobNodeGC:
|
2016-04-08 18:42:02 +00:00
|
|
|
return c.nodeGC(eval)
|
2015-12-15 03:20:57 +00:00
|
|
|
case structs.CoreJobJobGC:
|
2016-04-08 18:42:02 +00:00
|
|
|
return c.jobGC(eval)
|
2017-06-29 19:32:37 +00:00
|
|
|
case structs.CoreJobDeploymentGC:
|
|
|
|
return c.deploymentGC(eval)
|
2020-02-19 14:05:33 +00:00
|
|
|
case structs.CoreJobCSIVolumeClaimGC:
|
|
|
|
return c.csiVolumeClaimGC(eval)
|
2020-05-06 20:49:12 +00:00
|
|
|
case structs.CoreJobCSIPluginGC:
|
|
|
|
return c.csiPluginGC(eval)
|
2021-02-26 15:20:33 +00:00
|
|
|
case structs.CoreJobOneTimeTokenGC:
|
|
|
|
return c.expiredOneTimeTokenGC(eval)
|
2022-07-19 13:37:46 +00:00
|
|
|
case structs.CoreJobLocalTokenExpiredGC:
|
|
|
|
return c.expiredACLTokenGC(eval, false)
|
|
|
|
case structs.CoreJobGlobalTokenExpiredGC:
|
|
|
|
return c.expiredACLTokenGC(eval, true)
|
2022-06-20 20:26:05 +00:00
|
|
|
case structs.CoreJobRootKeyRotateOrGC:
|
2022-11-01 19:00:50 +00:00
|
|
|
return c.rootKeyRotateOrGC(eval)
|
2022-08-26 18:03:56 +00:00
|
|
|
case structs.CoreJobVariablesRekey:
|
|
|
|
return c.variablesRekey(eval)
|
2016-04-08 18:42:02 +00:00
|
|
|
case structs.CoreJobForceGC:
|
|
|
|
return c.forceGC(eval)
|
2015-08-15 23:07:50 +00:00
|
|
|
default:
|
|
|
|
return fmt.Errorf("core scheduler cannot handle job '%s'", eval.JobID)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-08 18:42:02 +00:00
|
|
|
// forceGC is used to garbage collect all eligible objects.
|
|
|
|
func (c *CoreScheduler) forceGC(eval *structs.Evaluation) error {
|
|
|
|
if err := c.jobGC(eval); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := c.evalGC(eval); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-06-29 19:32:37 +00:00
|
|
|
if err := c.deploymentGC(eval); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-05-06 20:49:12 +00:00
|
|
|
if err := c.csiPluginGC(eval); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-05-11 12:20:50 +00:00
|
|
|
if err := c.csiVolumeClaimGC(eval); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-02-26 15:20:33 +00:00
|
|
|
if err := c.expiredOneTimeTokenGC(eval); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2022-07-19 13:37:46 +00:00
|
|
|
if err := c.expiredACLTokenGC(eval, false); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := c.expiredACLTokenGC(eval, true); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2022-11-01 19:00:50 +00:00
|
|
|
if err := c.rootKeyGC(eval); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-04-08 18:42:02 +00:00
|
|
|
// Node GC must occur after the others to ensure the allocations are
|
|
|
|
// cleared.
|
|
|
|
return c.nodeGC(eval)
|
|
|
|
}
|
|
|
|
|
2015-12-15 03:20:57 +00:00
|
|
|
// jobGC is used to garbage collect eligible jobs.
|
|
|
|
func (c *CoreScheduler) jobGC(eval *structs.Evaluation) error {
|
|
|
|
// Get all the jobs eligible for garbage collection.
|
2017-02-08 04:31:23 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
iter, err := c.snap.JobsByGC(ws, true)
|
2015-12-15 03:20:57 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2022-06-01 15:12:20 +00:00
|
|
|
oldThreshold := c.getThreshold(eval, "job",
|
|
|
|
"job_gc_threshold", c.srv.config.JobGCThreshold)
|
2015-12-15 03:20:57 +00:00
|
|
|
|
2015-12-16 22:27:40 +00:00
|
|
|
// Collect the allocations, evaluations and jobs to GC
|
2017-09-07 23:56:15 +00:00
|
|
|
var gcAlloc, gcEval []string
|
|
|
|
var gcJob []*structs.Job
|
2015-12-15 03:20:57 +00:00
|
|
|
|
|
|
|
OUTER:
|
2015-12-16 22:27:40 +00:00
|
|
|
for i := iter.Next(); i != nil; i = iter.Next() {
|
2015-12-15 03:20:57 +00:00
|
|
|
job := i.(*structs.Job)
|
|
|
|
|
|
|
|
// Ignore new jobs.
|
|
|
|
if job.CreateIndex > oldThreshold {
|
2015-12-16 22:27:40 +00:00
|
|
|
continue
|
2015-12-15 03:20:57 +00:00
|
|
|
}
|
|
|
|
|
2017-02-08 04:31:23 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
2017-09-07 23:56:15 +00:00
|
|
|
evals, err := c.snap.EvalsByJob(ws, job.Namespace, job.ID)
|
2015-12-15 03:20:57 +00:00
|
|
|
if err != nil {
|
2018-09-15 23:23:13 +00:00
|
|
|
c.logger.Error("job GC failed to get evals for job", "job", job.ID, "error", err)
|
2015-12-15 03:20:57 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2016-06-11 01:32:37 +00:00
|
|
|
allEvalsGC := true
|
2016-06-27 22:47:49 +00:00
|
|
|
var jobAlloc, jobEval []string
|
2015-12-15 03:20:57 +00:00
|
|
|
for _, eval := range evals {
|
2016-06-27 22:47:49 +00:00
|
|
|
gc, allocs, err := c.gcEval(eval, oldThreshold, true)
|
2016-06-11 01:32:37 +00:00
|
|
|
if err != nil {
|
2015-12-15 03:20:57 +00:00
|
|
|
continue OUTER
|
2020-10-09 21:31:38 +00:00
|
|
|
} else if gc {
|
2016-06-27 22:47:49 +00:00
|
|
|
jobEval = append(jobEval, eval.ID)
|
|
|
|
jobAlloc = append(jobAlloc, allocs...)
|
|
|
|
} else {
|
|
|
|
allEvalsGC = false
|
|
|
|
break
|
2016-06-11 01:32:37 +00:00
|
|
|
}
|
2015-12-15 03:20:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Job is eligible for garbage collection
|
2016-06-11 01:32:37 +00:00
|
|
|
if allEvalsGC {
|
2017-09-07 23:56:15 +00:00
|
|
|
gcJob = append(gcJob, job)
|
2016-06-27 22:47:49 +00:00
|
|
|
gcAlloc = append(gcAlloc, jobAlloc...)
|
|
|
|
gcEval = append(gcEval, jobEval...)
|
2016-06-11 01:32:37 +00:00
|
|
|
}
|
2020-02-19 14:05:33 +00:00
|
|
|
|
2015-12-15 03:20:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Fast-path the nothing case
|
|
|
|
if len(gcEval) == 0 && len(gcAlloc) == 0 && len(gcJob) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
2020-10-09 21:31:38 +00:00
|
|
|
|
2018-09-15 23:23:13 +00:00
|
|
|
c.logger.Debug("job GC found eligible objects",
|
|
|
|
"jobs", len(gcJob), "evals", len(gcEval), "allocs", len(gcAlloc))
|
2015-12-15 03:20:57 +00:00
|
|
|
|
|
|
|
// Reap the evals and allocs
|
|
|
|
if err := c.evalReap(gcEval, gcAlloc); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-03-14 23:06:37 +00:00
|
|
|
// Reap the jobs
|
|
|
|
return c.jobReap(gcJob, eval.LeaderACL)
|
|
|
|
}
|
|
|
|
|
|
|
|
// jobReap contacts the leader and issues a reap on the passed jobs
|
|
|
|
func (c *CoreScheduler) jobReap(jobs []*structs.Job, leaderACL string) error {
|
|
|
|
// Call to the leader to issue the reap
|
2023-02-10 14:26:00 +00:00
|
|
|
for _, req := range c.partitionJobReap(jobs, leaderACL, structs.MaxUUIDsPerWriteRequest) {
|
2018-03-14 23:06:37 +00:00
|
|
|
var resp structs.JobBatchDeregisterResponse
|
|
|
|
if err := c.srv.RPC("Job.BatchDeregister", req, &resp); err != nil {
|
2018-09-15 23:23:13 +00:00
|
|
|
c.logger.Error("batch job reap failed", "error", err)
|
2018-03-14 23:06:37 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// partitionJobReap returns a list of JobBatchDeregisterRequests to make,
|
|
|
|
// ensuring a single request does not contain too many jobs. This is necessary
|
|
|
|
// to ensure that the Raft transaction does not become too large.
|
2023-02-10 14:26:00 +00:00
|
|
|
func (c *CoreScheduler) partitionJobReap(jobs []*structs.Job, leaderACL string, batchSize int) []*structs.JobBatchDeregisterRequest {
|
2018-03-14 23:06:37 +00:00
|
|
|
option := &structs.JobDeregisterOptions{Purge: true}
|
|
|
|
var requests []*structs.JobBatchDeregisterRequest
|
|
|
|
submittedJobs := 0
|
|
|
|
for submittedJobs != len(jobs) {
|
|
|
|
req := &structs.JobBatchDeregisterRequest{
|
|
|
|
Jobs: make(map[structs.NamespacedID]*structs.JobDeregisterOptions),
|
2015-12-15 03:20:57 +00:00
|
|
|
WriteRequest: structs.WriteRequest{
|
2017-09-07 23:56:15 +00:00
|
|
|
Region: c.srv.config.Region,
|
2018-03-14 23:06:37 +00:00
|
|
|
AuthToken: leaderACL,
|
2015-12-15 03:20:57 +00:00
|
|
|
},
|
|
|
|
}
|
2018-03-14 23:06:37 +00:00
|
|
|
requests = append(requests, req)
|
2023-02-10 14:26:00 +00:00
|
|
|
available := batchSize
|
2018-03-14 23:06:37 +00:00
|
|
|
|
|
|
|
if remaining := len(jobs) - submittedJobs; remaining > 0 {
|
|
|
|
if remaining <= available {
|
|
|
|
for _, job := range jobs[submittedJobs:] {
|
|
|
|
jns := structs.NamespacedID{ID: job.ID, Namespace: job.Namespace}
|
|
|
|
req.Jobs[jns] = option
|
|
|
|
}
|
|
|
|
submittedJobs += remaining
|
|
|
|
} else {
|
|
|
|
for _, job := range jobs[submittedJobs : submittedJobs+available] {
|
|
|
|
jns := structs.NamespacedID{ID: job.ID, Namespace: job.Namespace}
|
|
|
|
req.Jobs[jns] = option
|
|
|
|
}
|
|
|
|
submittedJobs += available
|
|
|
|
}
|
2015-12-15 03:20:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-14 23:06:37 +00:00
|
|
|
return requests
|
2015-12-15 03:20:57 +00:00
|
|
|
}
|
|
|
|
|
2015-08-15 23:07:50 +00:00
|
|
|
// evalGC is used to garbage collect old evaluations
|
|
|
|
func (c *CoreScheduler) evalGC(eval *structs.Evaluation) error {
|
|
|
|
// Iterate over the evaluations
|
2017-02-08 04:31:23 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
2022-02-10 17:50:34 +00:00
|
|
|
iter, err := c.snap.Evals(ws, false)
|
2015-08-15 23:07:50 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2022-06-01 15:12:20 +00:00
|
|
|
oldThreshold := c.getThreshold(eval, "eval",
|
|
|
|
"eval_gc_threshold", c.srv.config.EvalGCThreshold)
|
2023-01-31 18:32:14 +00:00
|
|
|
batchOldThreshold := c.getThreshold(eval, "eval",
|
|
|
|
"batch_eval_gc_threshold", c.srv.config.BatchEvalGCThreshold)
|
2015-08-15 23:07:50 +00:00
|
|
|
|
|
|
|
// Collect the allocations and evaluations to GC
|
|
|
|
var gcAlloc, gcEval []string
|
2015-12-15 03:20:57 +00:00
|
|
|
for raw := iter.Next(); raw != nil; raw = iter.Next() {
|
2015-08-15 23:07:50 +00:00
|
|
|
eval := raw.(*structs.Evaluation)
|
2016-03-25 23:46:48 +00:00
|
|
|
|
2023-01-31 18:32:14 +00:00
|
|
|
gcThreshold := oldThreshold
|
|
|
|
if eval.Type == structs.JobTypeBatch {
|
|
|
|
gcThreshold = batchOldThreshold
|
|
|
|
}
|
|
|
|
|
|
|
|
gc, allocs, err := c.gcEval(eval, gcThreshold, false)
|
2015-08-15 23:07:50 +00:00
|
|
|
if err != nil {
|
2015-12-15 03:20:57 +00:00
|
|
|
return err
|
2015-08-15 23:07:50 +00:00
|
|
|
}
|
|
|
|
|
2015-12-15 03:20:57 +00:00
|
|
|
if gc {
|
|
|
|
gcEval = append(gcEval, eval.ID)
|
2015-08-15 23:07:50 +00:00
|
|
|
}
|
2016-06-11 01:32:37 +00:00
|
|
|
gcAlloc = append(gcAlloc, allocs...)
|
2015-08-15 23:07:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Fast-path the nothing case
|
|
|
|
if len(gcEval) == 0 && len(gcAlloc) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
2018-09-15 23:23:13 +00:00
|
|
|
c.logger.Debug("eval GC found eligibile objects",
|
|
|
|
"evals", len(gcEval), "allocs", len(gcAlloc))
|
2015-08-15 23:07:50 +00:00
|
|
|
|
2015-12-15 03:20:57 +00:00
|
|
|
return c.evalReap(gcEval, gcAlloc)
|
|
|
|
}
|
|
|
|
|
|
|
|
// gcEval returns whether the eval should be garbage collected given a raft
|
|
|
|
// threshold index. The eval disqualifies for garbage collection if it or its
|
|
|
|
// allocs are not older than the threshold. If the eval should be garbage
|
|
|
|
// collected, the associated alloc ids that should also be removed are also
|
|
|
|
// returned
|
2016-06-27 22:47:49 +00:00
|
|
|
func (c *CoreScheduler) gcEval(eval *structs.Evaluation, thresholdIndex uint64, allowBatch bool) (
|
2015-12-15 03:20:57 +00:00
|
|
|
bool, []string, error) {
|
|
|
|
// Ignore non-terminal and new evaluations
|
|
|
|
if !eval.TerminalStatus() || eval.ModifyIndex > thresholdIndex {
|
|
|
|
return false, nil, nil
|
|
|
|
}
|
|
|
|
|
2017-02-08 04:31:23 +00:00
|
|
|
// Create a watchset
|
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
|
2018-01-22 22:31:38 +00:00
|
|
|
// Look up the job
|
|
|
|
job, err := c.snap.JobByID(ws, eval.Namespace, eval.JobID)
|
|
|
|
if err != nil {
|
|
|
|
return false, nil, err
|
|
|
|
}
|
|
|
|
|
2018-11-01 05:02:26 +00:00
|
|
|
// Get the allocations by eval
|
|
|
|
allocs, err := c.snap.AllocsByEval(ws, eval.ID)
|
|
|
|
if err != nil {
|
|
|
|
c.logger.Error("failed to get allocs for eval",
|
|
|
|
"eval_id", eval.ID, "error", err)
|
|
|
|
return false, nil, err
|
|
|
|
}
|
|
|
|
|
2016-06-11 01:32:37 +00:00
|
|
|
// If the eval is from a running "batch" job we don't want to garbage
|
2023-01-31 18:32:14 +00:00
|
|
|
// collect its most current allocations. If there is a long running batch job and its
|
|
|
|
// terminal allocations get GC'd the scheduler would re-run the allocations. However,
|
|
|
|
// we do want to GC old Evals and Allocs if there are newer ones due to update.
|
|
|
|
//
|
|
|
|
// The age of the evaluation must also reach the threshold configured to be GCed so that
|
|
|
|
// one may debug old evaluations and referenced allocations.
|
2016-06-11 01:32:37 +00:00
|
|
|
if eval.Type == structs.JobTypeBatch {
|
|
|
|
// Check if the job is running
|
|
|
|
|
2023-01-31 18:32:14 +00:00
|
|
|
// Can collect if either holds:
|
|
|
|
// - Job doesn't exist
|
|
|
|
// - Job is Stopped and dead
|
|
|
|
// - allowBatch and the job is dead
|
|
|
|
//
|
|
|
|
// If we cannot collect outright, check if a partial GC may occur
|
|
|
|
collect := job == nil || job.Status == structs.JobStatusDead && (job.Stop || allowBatch)
|
2017-04-15 23:47:19 +00:00
|
|
|
if !collect {
|
2023-01-31 18:32:14 +00:00
|
|
|
oldAllocs := olderVersionTerminalAllocs(allocs, job, thresholdIndex)
|
|
|
|
gcEval := (len(oldAllocs) == len(allocs))
|
|
|
|
return gcEval, oldAllocs, nil
|
2016-06-11 01:32:37 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-12-15 03:20:57 +00:00
|
|
|
// Scan the allocations to ensure they are terminal and old
|
2016-06-11 01:32:37 +00:00
|
|
|
gcEval := true
|
|
|
|
var gcAllocIDs []string
|
2015-12-15 03:20:57 +00:00
|
|
|
for _, alloc := range allocs {
|
2018-04-11 18:58:02 +00:00
|
|
|
if !allocGCEligible(alloc, job, time.Now(), thresholdIndex) {
|
2016-06-11 01:32:37 +00:00
|
|
|
// Can't GC the evaluation since not all of the allocations are
|
|
|
|
// terminal
|
|
|
|
gcEval = false
|
|
|
|
} else {
|
|
|
|
// The allocation is eligible to be GC'd
|
|
|
|
gcAllocIDs = append(gcAllocIDs, alloc.ID)
|
2015-12-15 03:20:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-11 01:32:37 +00:00
|
|
|
return gcEval, gcAllocIDs, nil
|
2015-12-15 03:20:57 +00:00
|
|
|
}
|
|
|
|
|
2023-01-31 18:32:14 +00:00
|
|
|
// olderVersionTerminalAllocs returns a list of terminal allocations that belong to the evaluation and may be
|
|
|
|
// GCed.
|
|
|
|
func olderVersionTerminalAllocs(allocs []*structs.Allocation, job *structs.Job, thresholdIndex uint64) []string {
|
2018-11-01 05:02:26 +00:00
|
|
|
var ret []string
|
|
|
|
for _, alloc := range allocs {
|
2023-01-31 18:32:14 +00:00
|
|
|
if alloc.CreateIndex < job.JobModifyIndex && alloc.ModifyIndex < thresholdIndex && alloc.TerminalStatus() {
|
2018-11-01 05:02:26 +00:00
|
|
|
ret = append(ret, alloc.ID)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ret
|
|
|
|
}
|
|
|
|
|
2015-12-15 03:20:57 +00:00
|
|
|
// evalReap contacts the leader and issues a reap on the passed evals and
|
|
|
|
// allocs.
|
|
|
|
func (c *CoreScheduler) evalReap(evals, allocs []string) error {
|
2015-08-15 23:07:50 +00:00
|
|
|
// Call to the leader to issue the reap
|
2023-02-10 14:26:00 +00:00
|
|
|
for _, req := range c.partitionEvalReap(evals, allocs, structs.MaxUUIDsPerWriteRequest) {
|
2016-03-30 22:17:13 +00:00
|
|
|
var resp structs.GenericResponse
|
|
|
|
if err := c.srv.RPC("Eval.Reap", req, &resp); err != nil {
|
2018-09-15 23:23:13 +00:00
|
|
|
c.logger.Error("eval reap failed", "error", err)
|
2016-03-30 22:17:13 +00:00
|
|
|
return err
|
|
|
|
}
|
2015-08-15 23:07:50 +00:00
|
|
|
}
|
2015-12-15 03:20:57 +00:00
|
|
|
|
2015-08-15 19:38:58 +00:00
|
|
|
return nil
|
|
|
|
}
|
2015-09-07 18:01:29 +00:00
|
|
|
|
2022-07-06 14:30:11 +00:00
|
|
|
// partitionEvalReap returns a list of EvalReapRequest to make, ensuring a single
|
2016-03-30 22:17:13 +00:00
|
|
|
// request does not contain too many allocations and evaluations. This is
|
|
|
|
// necessary to ensure that the Raft transaction does not become too large.
|
2023-02-10 14:26:00 +00:00
|
|
|
func (c *CoreScheduler) partitionEvalReap(evals, allocs []string, batchSize int) []*structs.EvalReapRequest {
|
2022-07-06 14:30:11 +00:00
|
|
|
var requests []*structs.EvalReapRequest
|
2016-04-14 18:41:04 +00:00
|
|
|
submittedEvals, submittedAllocs := 0, 0
|
2016-03-30 22:17:13 +00:00
|
|
|
for submittedEvals != len(evals) || submittedAllocs != len(allocs) {
|
2022-07-06 14:30:11 +00:00
|
|
|
req := &structs.EvalReapRequest{
|
2016-03-30 22:17:13 +00:00
|
|
|
WriteRequest: structs.WriteRequest{
|
|
|
|
Region: c.srv.config.Region,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
requests = append(requests, req)
|
2023-02-10 14:26:00 +00:00
|
|
|
available := batchSize
|
2016-03-30 22:17:13 +00:00
|
|
|
|
2016-04-14 18:41:04 +00:00
|
|
|
// Add the allocs first
|
|
|
|
if remaining := len(allocs) - submittedAllocs; remaining > 0 {
|
2016-03-30 22:17:13 +00:00
|
|
|
if remaining <= available {
|
2016-04-14 18:41:04 +00:00
|
|
|
req.Allocs = allocs[submittedAllocs:]
|
2016-03-30 22:17:13 +00:00
|
|
|
available -= remaining
|
2016-04-14 18:41:04 +00:00
|
|
|
submittedAllocs += remaining
|
2016-03-30 22:17:13 +00:00
|
|
|
} else {
|
2016-04-14 18:41:04 +00:00
|
|
|
req.Allocs = allocs[submittedAllocs : submittedAllocs+available]
|
|
|
|
submittedAllocs += available
|
2016-03-30 22:17:13 +00:00
|
|
|
|
2016-04-14 18:41:04 +00:00
|
|
|
// Exhausted space so skip adding evals
|
2016-03-30 22:17:13 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-14 18:41:04 +00:00
|
|
|
// Add the evals
|
|
|
|
if remaining := len(evals) - submittedEvals; remaining > 0 {
|
2016-03-30 22:17:13 +00:00
|
|
|
if remaining <= available {
|
2016-04-14 18:41:04 +00:00
|
|
|
req.Evals = evals[submittedEvals:]
|
|
|
|
submittedEvals += remaining
|
2016-03-30 22:17:13 +00:00
|
|
|
} else {
|
2016-04-14 18:41:04 +00:00
|
|
|
req.Evals = evals[submittedEvals : submittedEvals+available]
|
|
|
|
submittedEvals += available
|
2016-03-30 22:17:13 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return requests
|
|
|
|
}
|
|
|
|
|
2015-09-07 18:01:29 +00:00
|
|
|
// nodeGC is used to garbage collect old nodes
|
|
|
|
func (c *CoreScheduler) nodeGC(eval *structs.Evaluation) error {
|
|
|
|
// Iterate over the evaluations
|
2017-02-08 04:31:23 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
iter, err := c.snap.Nodes(ws)
|
2015-09-07 18:01:29 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2022-06-01 15:12:20 +00:00
|
|
|
oldThreshold := c.getThreshold(eval, "node",
|
|
|
|
"node_gc_threshold", c.srv.config.NodeGCThreshold)
|
2015-09-07 18:01:29 +00:00
|
|
|
|
|
|
|
// Collect the nodes to GC
|
|
|
|
var gcNode []string
|
2016-06-03 23:24:41 +00:00
|
|
|
OUTER:
|
2015-09-07 18:01:29 +00:00
|
|
|
for {
|
|
|
|
raw := iter.Next()
|
|
|
|
if raw == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
node := raw.(*structs.Node)
|
|
|
|
|
|
|
|
// Ignore non-terminal and new nodes
|
|
|
|
if !node.TerminalStatus() || node.ModifyIndex > oldThreshold {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the allocations by node
|
2017-02-08 04:31:23 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
allocs, err := c.snap.AllocsByNode(ws, node.ID)
|
2015-09-07 18:01:29 +00:00
|
|
|
if err != nil {
|
2018-09-15 23:23:13 +00:00
|
|
|
c.logger.Error("failed to get allocs for node",
|
|
|
|
"node_id", node.ID, "error", err)
|
2015-09-07 18:01:29 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2016-06-03 23:24:41 +00:00
|
|
|
// If there are any non-terminal allocations, skip the node. If the node
|
|
|
|
// is terminal and the allocations are not, the scheduler may not have
|
2016-06-16 23:17:17 +00:00
|
|
|
// run yet to transition the allocs on the node to terminal. We delay
|
2016-06-03 23:24:41 +00:00
|
|
|
// GC'ing until this happens.
|
|
|
|
for _, alloc := range allocs {
|
|
|
|
if !alloc.TerminalStatus() {
|
|
|
|
continue OUTER
|
|
|
|
}
|
2015-09-07 18:01:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Node is eligible for garbage collection
|
|
|
|
gcNode = append(gcNode, node.ID)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fast-path the nothing case
|
|
|
|
if len(gcNode) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
2018-09-15 23:23:13 +00:00
|
|
|
c.logger.Debug("node GC found eligible nodes", "nodes", len(gcNode))
|
2019-06-07 14:57:57 +00:00
|
|
|
return c.nodeReap(eval, gcNode)
|
2019-06-06 19:59:14 +00:00
|
|
|
}
|
|
|
|
|
2019-06-07 14:57:57 +00:00
|
|
|
func (c *CoreScheduler) nodeReap(eval *structs.Evaluation, nodeIDs []string) error {
|
2019-06-27 19:16:27 +00:00
|
|
|
// For old clusters, send single deregistration messages COMPAT(0.11)
|
2019-06-13 19:04:38 +00:00
|
|
|
minVersionBatchNodeDeregister := version.Must(version.NewVersion("0.9.4"))
|
2022-10-17 20:23:51 +00:00
|
|
|
if !ServersMeetMinimumVersion(c.srv.Members(), c.srv.Region(), minVersionBatchNodeDeregister, true) {
|
2019-06-06 19:59:14 +00:00
|
|
|
for _, id := range nodeIDs {
|
|
|
|
req := structs.NodeDeregisterRequest{
|
|
|
|
NodeID: id,
|
|
|
|
WriteRequest: structs.WriteRequest{
|
|
|
|
Region: c.srv.config.Region,
|
|
|
|
AuthToken: eval.LeaderACL,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var resp structs.NodeUpdateResponse
|
|
|
|
if err := c.srv.RPC("Node.Deregister", &req, &resp); err != nil {
|
|
|
|
c.logger.Error("node reap failed", "node_id", id, "error", err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2015-09-07 18:01:29 +00:00
|
|
|
|
|
|
|
// Call to the leader to issue the reap
|
2022-07-06 14:30:11 +00:00
|
|
|
for _, ids := range partitionAll(structs.MaxUUIDsPerWriteRequest, nodeIDs) {
|
2019-06-26 14:57:58 +00:00
|
|
|
req := structs.NodeBatchDeregisterRequest{
|
2019-06-05 14:19:21 +00:00
|
|
|
NodeIDs: ids,
|
2015-09-07 18:01:29 +00:00
|
|
|
WriteRequest: structs.WriteRequest{
|
2017-10-23 22:04:00 +00:00
|
|
|
Region: c.srv.config.Region,
|
|
|
|
AuthToken: eval.LeaderACL,
|
2015-09-07 18:01:29 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
var resp structs.NodeUpdateResponse
|
2019-06-26 14:57:58 +00:00
|
|
|
if err := c.srv.RPC("Node.BatchDeregister", &req, &resp); err != nil {
|
2019-06-05 14:19:21 +00:00
|
|
|
c.logger.Error("node reap failed", "node_ids", ids, "error", err)
|
2015-09-07 18:01:29 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2017-06-29 19:32:37 +00:00
|
|
|
|
|
|
|
// deploymentGC is used to garbage collect old deployments
|
|
|
|
func (c *CoreScheduler) deploymentGC(eval *structs.Evaluation) error {
|
|
|
|
// Iterate over the deployments
|
|
|
|
ws := memdb.NewWatchSet()
|
2022-03-09 01:54:17 +00:00
|
|
|
iter, err := c.snap.Deployments(ws, state.SortDefault)
|
2017-06-29 19:32:37 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2022-06-01 15:12:20 +00:00
|
|
|
oldThreshold := c.getThreshold(eval, "deployment",
|
|
|
|
"deployment_gc_threshold", c.srv.config.DeploymentGCThreshold)
|
2017-06-29 19:32:37 +00:00
|
|
|
|
|
|
|
// Collect the deployments to GC
|
|
|
|
var gcDeployment []string
|
2017-07-14 20:02:39 +00:00
|
|
|
|
|
|
|
OUTER:
|
2017-06-29 19:32:37 +00:00
|
|
|
for {
|
|
|
|
raw := iter.Next()
|
|
|
|
if raw == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
deploy := raw.(*structs.Deployment)
|
|
|
|
|
|
|
|
// Ignore non-terminal and new deployments
|
|
|
|
if deploy.Active() || deploy.ModifyIndex > oldThreshold {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2017-07-14 20:02:39 +00:00
|
|
|
// Ensure there are no allocs referencing this deployment.
|
|
|
|
allocs, err := c.snap.AllocsByDeployment(ws, deploy.ID)
|
|
|
|
if err != nil {
|
2018-09-15 23:23:13 +00:00
|
|
|
c.logger.Error("failed to get allocs for deployment",
|
|
|
|
"deployment_id", deploy.ID, "error", err)
|
2017-07-14 20:02:39 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure there is no allocation referencing the deployment.
|
|
|
|
for _, alloc := range allocs {
|
|
|
|
if !alloc.TerminalStatus() {
|
|
|
|
continue OUTER
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-06-29 19:32:37 +00:00
|
|
|
// Deployment is eligible for garbage collection
|
|
|
|
gcDeployment = append(gcDeployment, deploy.ID)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fast-path the nothing case
|
|
|
|
if len(gcDeployment) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
2018-09-15 23:23:13 +00:00
|
|
|
c.logger.Debug("deployment GC found eligible deployments", "deployments", len(gcDeployment))
|
2017-06-29 19:32:37 +00:00
|
|
|
return c.deploymentReap(gcDeployment)
|
|
|
|
}
|
|
|
|
|
|
|
|
// deploymentReap contacts the leader and issues a reap on the passed
|
|
|
|
// deployments.
|
|
|
|
func (c *CoreScheduler) deploymentReap(deployments []string) error {
|
|
|
|
// Call to the leader to issue the reap
|
2023-02-10 14:26:00 +00:00
|
|
|
for _, req := range c.partitionDeploymentReap(deployments, structs.MaxUUIDsPerWriteRequest) {
|
2017-06-29 19:32:37 +00:00
|
|
|
var resp structs.GenericResponse
|
|
|
|
if err := c.srv.RPC("Deployment.Reap", req, &resp); err != nil {
|
2018-09-15 23:23:13 +00:00
|
|
|
c.logger.Error("deployment reap failed", "error", err)
|
2017-06-29 19:32:37 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// partitionDeploymentReap returns a list of DeploymentDeleteRequest to make,
|
|
|
|
// ensuring a single request does not contain too many deployments. This is
|
|
|
|
// necessary to ensure that the Raft transaction does not become too large.
|
2023-02-10 14:26:00 +00:00
|
|
|
func (c *CoreScheduler) partitionDeploymentReap(deployments []string, batchSize int) []*structs.DeploymentDeleteRequest {
|
2017-06-29 19:32:37 +00:00
|
|
|
var requests []*structs.DeploymentDeleteRequest
|
|
|
|
submittedDeployments := 0
|
|
|
|
for submittedDeployments != len(deployments) {
|
|
|
|
req := &structs.DeploymentDeleteRequest{
|
|
|
|
WriteRequest: structs.WriteRequest{
|
|
|
|
Region: c.srv.config.Region,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
requests = append(requests, req)
|
2023-02-10 14:26:00 +00:00
|
|
|
available := batchSize
|
2017-06-29 19:32:37 +00:00
|
|
|
|
|
|
|
if remaining := len(deployments) - submittedDeployments; remaining > 0 {
|
|
|
|
if remaining <= available {
|
|
|
|
req.Deployments = deployments[submittedDeployments:]
|
|
|
|
submittedDeployments += remaining
|
|
|
|
} else {
|
|
|
|
req.Deployments = deployments[submittedDeployments : submittedDeployments+available]
|
|
|
|
submittedDeployments += available
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return requests
|
|
|
|
}
|
2018-01-30 15:12:14 +00:00
|
|
|
|
2018-01-30 22:14:53 +00:00
|
|
|
// allocGCEligible returns if the allocation is eligible to be garbage collected
|
2018-01-30 15:12:14 +00:00
|
|
|
// according to its terminal status and its reschedule trackers
|
2018-04-11 18:58:02 +00:00
|
|
|
func allocGCEligible(a *structs.Allocation, job *structs.Job, gcTime time.Time, thresholdIndex uint64) bool {
|
2018-01-30 15:12:14 +00:00
|
|
|
// Not in a terminal status and old enough
|
|
|
|
if !a.TerminalStatus() || a.ModifyIndex > thresholdIndex {
|
|
|
|
return false
|
|
|
|
}
|
2018-01-30 22:14:53 +00:00
|
|
|
|
2018-12-05 21:01:12 +00:00
|
|
|
// If the allocation is still running on the client we can not garbage
|
|
|
|
// collect it.
|
|
|
|
if a.ClientStatus == structs.AllocClientStatusRunning {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2018-04-11 18:58:02 +00:00
|
|
|
// If the job is deleted, stopped or dead all allocs can be removed
|
2018-01-30 22:14:53 +00:00
|
|
|
if job == nil || job.Stop || job.Status == structs.JobStatusDead {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2018-05-21 18:28:31 +00:00
|
|
|
// If the allocation's desired state is Stop, it can be GCed even if it
|
|
|
|
// has failed and hasn't been rescheduled. This can happen during job updates
|
|
|
|
if a.DesiredStatus == structs.AllocDesiredStatusStop {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2018-04-11 18:58:02 +00:00
|
|
|
// If the alloc hasn't failed then we don't need to consider it for rescheduling
|
|
|
|
// Rescheduling needs to copy over information from the previous alloc so that it
|
|
|
|
// can enforce the reschedule policy
|
|
|
|
if a.ClientStatus != structs.AllocClientStatusFailed {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2018-01-30 22:14:53 +00:00
|
|
|
var reschedulePolicy *structs.ReschedulePolicy
|
|
|
|
tg := job.LookupTaskGroup(a.TaskGroup)
|
|
|
|
|
|
|
|
if tg != nil {
|
|
|
|
reschedulePolicy = tg.ReschedulePolicy
|
|
|
|
}
|
2018-04-10 21:08:37 +00:00
|
|
|
// No reschedule policy or rescheduling is disabled
|
|
|
|
if reschedulePolicy == nil || (!reschedulePolicy.Unlimited && reschedulePolicy.Attempts == 0) {
|
2018-01-30 15:12:14 +00:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
// Restart tracking information has been carried forward
|
|
|
|
if a.NextAllocation != "" {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2018-04-11 18:58:02 +00:00
|
|
|
// This task has unlimited rescheduling and the alloc has not been replaced, so we can't GC it yet
|
|
|
|
if reschedulePolicy.Unlimited {
|
|
|
|
return false
|
2018-04-10 21:08:37 +00:00
|
|
|
}
|
2018-01-30 15:12:14 +00:00
|
|
|
|
2018-04-11 18:58:02 +00:00
|
|
|
// No restarts have been attempted yet
|
|
|
|
if a.RescheduleTracker == nil || len(a.RescheduleTracker.Events) == 0 {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// Don't GC if most recent reschedule attempt is within time interval
|
|
|
|
interval := reschedulePolicy.Interval
|
|
|
|
lastIndex := len(a.RescheduleTracker.Events)
|
|
|
|
lastRescheduleEvent := a.RescheduleTracker.Events[lastIndex-1]
|
|
|
|
timeDiff := gcTime.UTC().UnixNano() - lastRescheduleEvent.RescheduleTime
|
|
|
|
|
|
|
|
return timeDiff > interval.Nanoseconds()
|
2018-01-30 15:12:14 +00:00
|
|
|
}
|
2020-01-30 13:15:56 +00:00
|
|
|
|
2020-02-19 14:05:33 +00:00
|
|
|
// csiVolumeClaimGC is used to garbage collect CSI volume claims
|
|
|
|
func (c *CoreScheduler) csiVolumeClaimGC(eval *structs.Evaluation) error {
|
2020-05-11 12:20:50 +00:00
|
|
|
|
|
|
|
gcClaims := func(ns, volID string) error {
|
|
|
|
req := &structs.CSIVolumeClaimRequest{
|
|
|
|
VolumeID: volID,
|
2020-11-11 18:06:30 +00:00
|
|
|
Claim: structs.CSIVolumeClaimGC,
|
|
|
|
State: structs.CSIVolumeClaimStateUnpublishing,
|
2020-08-07 19:37:27 +00:00
|
|
|
WriteRequest: structs.WriteRequest{
|
|
|
|
Namespace: ns,
|
|
|
|
Region: c.srv.Region(),
|
|
|
|
AuthToken: eval.LeaderACL,
|
|
|
|
},
|
2020-05-11 12:20:50 +00:00
|
|
|
}
|
|
|
|
err := c.srv.RPC("CSIVolume.Claim", req, &structs.CSIVolumeClaimResponse{})
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-04-30 13:13:00 +00:00
|
|
|
c.logger.Trace("garbage collecting unclaimed CSI volume claims", "eval.JobID", eval.JobID)
|
2020-02-19 14:05:33 +00:00
|
|
|
|
2020-04-05 14:47:40 +00:00
|
|
|
// Volume ID smuggled in with the eval's own JobID
|
|
|
|
evalVolID := strings.Split(eval.JobID, ":")
|
2020-04-30 13:13:00 +00:00
|
|
|
|
|
|
|
// COMPAT(1.0): 0.11.0 shipped with 3 fields. tighten this check to len == 2
|
2020-05-11 12:20:50 +00:00
|
|
|
if len(evalVolID) > 1 {
|
|
|
|
volID := evalVolID[1]
|
|
|
|
return gcClaims(eval.Namespace, volID)
|
|
|
|
}
|
|
|
|
|
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
|
|
|
|
iter, err := c.snap.CSIVolumes(ws)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2022-06-01 15:12:20 +00:00
|
|
|
oldThreshold := c.getThreshold(eval, "CSI volume claim",
|
|
|
|
"csi_volume_claim_gc_threshold", c.srv.config.CSIVolumeClaimGCThreshold)
|
2020-02-19 14:05:33 +00:00
|
|
|
|
2020-05-11 12:20:50 +00:00
|
|
|
for i := iter.Next(); i != nil; i = iter.Next() {
|
|
|
|
vol := i.(*structs.CSIVolume)
|
|
|
|
|
|
|
|
// Ignore new volumes
|
|
|
|
if vol.CreateIndex > oldThreshold {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// we only call the claim release RPC if the volume has claims
|
|
|
|
// that no longer have valid allocations. otherwise we'd send
|
|
|
|
// out a lot of do-nothing RPCs.
|
2023-02-27 13:47:08 +00:00
|
|
|
vol, err := c.snap.CSIVolumeDenormalize(ws, vol.Copy())
|
2022-01-27 15:39:08 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
2020-05-11 12:20:50 +00:00
|
|
|
}
|
|
|
|
if len(vol.PastClaims) > 0 {
|
|
|
|
err = gcClaims(vol.Namespace, vol.ID)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-19 14:05:33 +00:00
|
|
|
}
|
2020-05-11 12:20:50 +00:00
|
|
|
return nil
|
2020-04-02 20:04:56 +00:00
|
|
|
|
|
|
|
}
|
2020-05-06 20:49:12 +00:00
|
|
|
|
|
|
|
// csiPluginGC is used to garbage collect unused plugins
|
|
|
|
func (c *CoreScheduler) csiPluginGC(eval *structs.Evaluation) error {
|
|
|
|
|
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
|
|
|
|
iter, err := c.snap.CSIPlugins(ws)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2022-06-01 15:12:20 +00:00
|
|
|
oldThreshold := c.getThreshold(eval, "CSI plugin",
|
|
|
|
"csi_plugin_gc_threshold", c.srv.config.CSIPluginGCThreshold)
|
2020-05-06 20:49:12 +00:00
|
|
|
|
|
|
|
for i := iter.Next(); i != nil; i = iter.Next() {
|
|
|
|
plugin := i.(*structs.CSIPlugin)
|
|
|
|
|
|
|
|
// Ignore new plugins
|
|
|
|
if plugin.CreateIndex > oldThreshold {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2020-08-07 19:37:27 +00:00
|
|
|
req := &structs.CSIPluginDeleteRequest{ID: plugin.ID,
|
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
Region: c.srv.Region(),
|
|
|
|
AuthToken: eval.LeaderACL,
|
|
|
|
}}
|
2020-05-06 20:49:12 +00:00
|
|
|
err := c.srv.RPC("CSIPlugin.Delete", req, &structs.CSIPluginDeleteResponse{})
|
|
|
|
if err != nil {
|
2020-10-21 20:54:28 +00:00
|
|
|
if strings.Contains(err.Error(), "plugin in use") {
|
2020-05-06 20:49:12 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
c.logger.Error("failed to GC plugin", "plugin_id", plugin.ID, "error", err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2021-02-26 15:20:33 +00:00
|
|
|
|
|
|
|
func (c *CoreScheduler) expiredOneTimeTokenGC(eval *structs.Evaluation) error {
|
|
|
|
req := &structs.OneTimeTokenExpireRequest{
|
|
|
|
WriteRequest: structs.WriteRequest{
|
|
|
|
Region: c.srv.Region(),
|
|
|
|
AuthToken: eval.LeaderACL,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
return c.srv.RPC("ACL.ExpireOneTimeTokens", req, &structs.GenericResponse{})
|
|
|
|
}
|
2022-06-01 15:12:20 +00:00
|
|
|
|
2022-07-19 13:37:46 +00:00
|
|
|
// expiredACLTokenGC handles running the garbage collector for expired ACL
|
|
|
|
// tokens. It can be used for both local and global tokens and includes
|
|
|
|
// behaviour to account for periodic and user actioned garbage collection
|
|
|
|
// invocations.
|
|
|
|
func (c *CoreScheduler) expiredACLTokenGC(eval *structs.Evaluation, global bool) error {
|
|
|
|
|
|
|
|
// If ACLs are not enabled, we do not need to continue and should exit
|
|
|
|
// early. This is not an error condition as callers can blindly call this
|
|
|
|
// function without checking the configuration. If the caller wants this to
|
|
|
|
// be an error, they should check this config value themselves.
|
|
|
|
if !c.srv.config.ACLEnabled {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the function has been triggered for global tokens, but we are not the
|
|
|
|
// authoritative region, we should exit. This is not an error condition as
|
|
|
|
// callers can blindly call this function without checking the
|
|
|
|
// configuration. If the caller wants this to be an error, they should
|
|
|
|
// check this config value themselves.
|
|
|
|
if global && c.srv.config.AuthoritativeRegion != c.srv.Region() {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-09-06 13:42:45 +00:00
|
|
|
// The object name is logged within the getThreshold function, therefore we
|
|
|
|
// want to be clear what token type this trigger is for.
|
|
|
|
tokenScope := "local"
|
|
|
|
if global {
|
|
|
|
tokenScope = "global"
|
|
|
|
}
|
|
|
|
|
|
|
|
expiryThresholdIdx := c.getThreshold(eval, tokenScope+" expired ACL tokens",
|
2022-07-19 13:37:46 +00:00
|
|
|
"acl_token_expiration_gc_threshold", c.srv.config.ACLTokenExpirationGCThreshold)
|
|
|
|
|
|
|
|
expiredIter, err := c.snap.ACLTokensByExpired(global)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
var (
|
|
|
|
expiredAccessorIDs []string
|
|
|
|
num int
|
|
|
|
)
|
|
|
|
|
|
|
|
// The memdb iterator contains all tokens which include an expiration time,
|
|
|
|
// however, as the caller, we do not know at which point in the array the
|
|
|
|
// tokens are no longer expired. This time therefore forms the basis at
|
|
|
|
// which we draw the line in the iteration loop and find the final expired
|
|
|
|
// token that is eligible for deletion.
|
|
|
|
now := time.Now().UTC()
|
|
|
|
|
|
|
|
for raw := expiredIter.Next(); raw != nil; raw = expiredIter.Next() {
|
|
|
|
token := raw.(*structs.ACLToken)
|
|
|
|
|
|
|
|
// The iteration order of the indexes mean if we come across an
|
|
|
|
// unexpired token, we can exit as we have found all currently expired
|
|
|
|
// tokens.
|
|
|
|
if !token.IsExpired(now) {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if the token is recent enough to skip, otherwise we'll delete
|
|
|
|
// it.
|
|
|
|
if token.CreateIndex > expiryThresholdIdx {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add the token accessor ID to the tracking array, thus marking it
|
|
|
|
// ready for deletion.
|
|
|
|
expiredAccessorIDs = append(expiredAccessorIDs, token.AccessorID)
|
|
|
|
|
|
|
|
// Increment the counter. If this is at or above our limit, we return
|
|
|
|
// what we have so far.
|
|
|
|
if num++; num >= structs.ACLMaxExpiredBatchSize {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// There is no need to call the RPC endpoint if we do not have any tokens
|
|
|
|
// to delete.
|
|
|
|
if len(expiredAccessorIDs) < 1 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Log a nice, friendly debug message which could be useful when debugging
|
|
|
|
// garbage collection in environments with a high rate of token creation
|
|
|
|
// and expiration.
|
|
|
|
c.logger.Debug("expired ACL token GC found eligible tokens",
|
2022-09-06 13:42:45 +00:00
|
|
|
"num", len(expiredAccessorIDs), "global", global)
|
2022-07-19 13:37:46 +00:00
|
|
|
|
|
|
|
// Set up and make the RPC request which will return any error performing
|
|
|
|
// the deletion.
|
|
|
|
req := structs.ACLTokenDeleteRequest{
|
|
|
|
AccessorIDs: expiredAccessorIDs,
|
|
|
|
WriteRequest: structs.WriteRequest{
|
|
|
|
Region: c.srv.Region(),
|
|
|
|
AuthToken: eval.LeaderACL,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
return c.srv.RPC(structs.ACLDeleteTokensRPCMethod, req, &structs.GenericResponse{})
|
|
|
|
}
|
|
|
|
|
2022-11-01 19:00:50 +00:00
|
|
|
// rootKeyRotateOrGC is used to rotate or garbage collect root keys
|
|
|
|
func (c *CoreScheduler) rootKeyRotateOrGC(eval *structs.Evaluation) error {
|
|
|
|
|
|
|
|
// a rotation will be sent to the leader so our view of state
|
|
|
|
// is no longer valid. we ack this core job and will pick up
|
|
|
|
// the GC work on the next interval
|
|
|
|
wasRotated, err := c.rootKeyRotate(eval)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if wasRotated {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return c.rootKeyGC(eval)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *CoreScheduler) rootKeyGC(eval *structs.Evaluation) error {
|
|
|
|
|
|
|
|
oldThreshold := c.getThreshold(eval, "root key",
|
|
|
|
"root_key_gc_threshold", c.srv.config.RootKeyGCThreshold)
|
|
|
|
|
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
iter, err := c.snap.RootKeyMetas(ws)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
for {
|
|
|
|
raw := iter.Next()
|
|
|
|
if raw == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
keyMeta := raw.(*structs.RootKeyMeta)
|
|
|
|
if keyMeta.Active() || keyMeta.Rekeying() {
|
|
|
|
continue // never GC the active key or one we're rekeying
|
|
|
|
}
|
|
|
|
if keyMeta.CreateIndex > oldThreshold {
|
|
|
|
continue // don't GC recent keys
|
|
|
|
}
|
|
|
|
|
|
|
|
inUse, err := c.snap.IsRootKeyMetaInUse(keyMeta.KeyID)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if inUse {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
req := &structs.KeyringDeleteRootKeyRequest{
|
|
|
|
KeyID: keyMeta.KeyID,
|
|
|
|
WriteRequest: structs.WriteRequest{
|
|
|
|
Region: c.srv.config.Region,
|
|
|
|
AuthToken: eval.LeaderACL,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
if err := c.srv.RPC("Keyring.Delete",
|
|
|
|
req, &structs.KeyringDeleteRootKeyResponse{}); err != nil {
|
|
|
|
c.logger.Error("root key delete failed", "error", err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-10-25 21:06:18 +00:00
|
|
|
// rootKeyRotate checks if the active key is old enough that we need
|
|
|
|
// to kick off a rotation.
|
2022-11-01 19:00:50 +00:00
|
|
|
func (c *CoreScheduler) rootKeyRotate(eval *structs.Evaluation) (bool, error) {
|
2022-06-20 20:26:05 +00:00
|
|
|
|
|
|
|
rotationThreshold := c.getThreshold(eval, "root key",
|
|
|
|
"root_key_rotation_threshold", c.srv.config.RootKeyRotationThreshold)
|
|
|
|
|
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
activeKey, err := c.snap.GetActiveRootKeyMeta(ws)
|
|
|
|
if err != nil {
|
2022-11-01 19:00:50 +00:00
|
|
|
return false, err
|
2022-06-20 20:26:05 +00:00
|
|
|
}
|
|
|
|
if activeKey == nil {
|
2022-11-01 19:00:50 +00:00
|
|
|
return false, nil // no active key
|
2022-06-20 20:26:05 +00:00
|
|
|
}
|
|
|
|
if activeKey.CreateIndex >= rotationThreshold {
|
2022-11-01 19:00:50 +00:00
|
|
|
return false, nil // key is too new
|
2022-06-20 20:26:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
req := &structs.KeyringRotateRootKeyRequest{
|
|
|
|
WriteRequest: structs.WriteRequest{
|
|
|
|
Region: c.srv.config.Region,
|
|
|
|
AuthToken: eval.LeaderACL,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
if err := c.srv.RPC("Keyring.Rotate",
|
|
|
|
req, &structs.KeyringRotateRootKeyResponse{}); err != nil {
|
|
|
|
c.logger.Error("root key rotation failed", "error", err)
|
2022-11-01 19:00:50 +00:00
|
|
|
return false, err
|
2022-06-20 20:26:05 +00:00
|
|
|
}
|
|
|
|
|
2022-11-01 19:00:50 +00:00
|
|
|
return true, nil
|
2022-06-20 20:26:05 +00:00
|
|
|
}
|
|
|
|
|
2022-08-26 18:03:56 +00:00
|
|
|
// variablesReKey is optionally run after rotating the active
|
2022-07-07 17:48:38 +00:00
|
|
|
// root key. It iterates over all the variables for the keys in the
|
|
|
|
// re-keying state, decrypts them, and re-encrypts them in batches
|
|
|
|
// with the currently active key. This job does not GC the keys, which
|
|
|
|
// is handled in the normal periodic GC job.
|
2022-08-26 18:03:56 +00:00
|
|
|
func (c *CoreScheduler) variablesRekey(eval *structs.Evaluation) error {
|
2022-07-07 17:48:38 +00:00
|
|
|
|
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
iter, err := c.snap.RootKeyMetas(ws)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
for {
|
|
|
|
raw := iter.Next()
|
|
|
|
if raw == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
keyMeta := raw.(*structs.RootKeyMeta)
|
|
|
|
if !keyMeta.Rekeying() {
|
|
|
|
continue
|
|
|
|
}
|
2022-08-26 18:03:56 +00:00
|
|
|
varIter, err := c.snap.GetVariablesByKeyID(ws, keyMeta.KeyID)
|
2022-07-07 17:48:38 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2022-08-15 15:19:53 +00:00
|
|
|
err = c.rotateVariables(varIter, eval)
|
2022-07-07 17:48:38 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-08-26 18:03:56 +00:00
|
|
|
// rotateVariables runs over an iterator of variables and decrypts them, and
|
|
|
|
// then sends them back to be re-encrypted with the currently active key,
|
2022-08-15 15:19:53 +00:00
|
|
|
// checking for conflicts
|
|
|
|
func (c *CoreScheduler) rotateVariables(iter memdb.ResultIterator, eval *structs.Evaluation) error {
|
2022-07-07 17:48:38 +00:00
|
|
|
|
2022-08-26 18:03:56 +00:00
|
|
|
args := &structs.VariablesApplyRequest{
|
|
|
|
Op: structs.VarOpCAS,
|
2022-08-15 15:19:53 +00:00
|
|
|
WriteRequest: structs.WriteRequest{
|
|
|
|
Region: c.srv.config.Region,
|
|
|
|
AuthToken: eval.LeaderACL,
|
|
|
|
},
|
2022-07-07 17:48:38 +00:00
|
|
|
}
|
|
|
|
|
2022-08-15 15:19:53 +00:00
|
|
|
// We may have to work on a very large number of variables. There's no
|
|
|
|
// BatchApply RPC because it makes for an awkward API around conflict
|
|
|
|
// detection, and even if we did, we'd be blocking this scheduler goroutine
|
|
|
|
// for a very long time using the same snapshot. This would increase the
|
|
|
|
// risk that any given batch hits a conflict because of a concurrent change
|
|
|
|
// and make it more likely that we fail the eval. For large sets, this would
|
|
|
|
// likely mean the eval would run out of retries.
|
|
|
|
//
|
|
|
|
// Instead, we'll rate limit RPC requests and have a timeout. If we still
|
|
|
|
// haven't finished the set by the timeout, emit a new eval.
|
2022-11-01 20:50:50 +00:00
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), c.srv.GetConfig().EvalNackTimeout/2)
|
2022-08-15 15:19:53 +00:00
|
|
|
defer cancel()
|
|
|
|
limiter := rate.NewLimiter(rate.Limit(100), 100)
|
|
|
|
|
2022-07-07 17:48:38 +00:00
|
|
|
for {
|
|
|
|
raw := iter.Next()
|
|
|
|
if raw == nil {
|
|
|
|
break
|
|
|
|
}
|
2022-08-15 15:19:53 +00:00
|
|
|
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
newEval := &structs.Evaluation{
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Namespace: "-",
|
|
|
|
Priority: structs.CoreJobPriority,
|
|
|
|
Type: structs.JobTypeCore,
|
|
|
|
TriggeredBy: structs.EvalTriggerScheduled,
|
|
|
|
JobID: eval.JobID,
|
|
|
|
Status: structs.EvalStatusPending,
|
|
|
|
LeaderACL: eval.LeaderACL,
|
|
|
|
}
|
|
|
|
return c.srv.RPC("Eval.Create", &structs.EvalUpdateRequest{
|
|
|
|
Evals: []*structs.Evaluation{newEval},
|
|
|
|
EvalToken: uuid.Generate(),
|
|
|
|
WriteRequest: structs.WriteRequest{
|
|
|
|
Region: c.srv.config.Region,
|
|
|
|
AuthToken: eval.LeaderACL,
|
|
|
|
},
|
|
|
|
}, &structs.GenericResponse{})
|
|
|
|
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
2022-08-26 18:03:56 +00:00
|
|
|
ev := raw.(*structs.VariableEncrypted)
|
2022-07-07 17:48:38 +00:00
|
|
|
cleartext, err := c.srv.encrypter.Decrypt(ev.Data, ev.KeyID)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2022-08-26 18:03:56 +00:00
|
|
|
dv := &structs.VariableDecrypted{
|
|
|
|
VariableMetadata: ev.VariableMetadata,
|
2022-07-07 17:48:38 +00:00
|
|
|
}
|
|
|
|
dv.Items = make(map[string]string)
|
|
|
|
err = json.Unmarshal(cleartext, &dv.Items)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2022-08-15 15:19:53 +00:00
|
|
|
args.Var = dv
|
2022-08-26 18:03:56 +00:00
|
|
|
reply := &structs.VariablesApplyResponse{}
|
2022-08-15 15:19:53 +00:00
|
|
|
|
|
|
|
if err := limiter.Wait(ctx); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2022-08-26 18:03:56 +00:00
|
|
|
err = c.srv.RPC("Variables.Apply", args, reply)
|
2022-08-15 15:19:53 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if reply.IsConflict() {
|
|
|
|
// we've already rotated the key by the time we took this
|
|
|
|
// evaluation's snapshot, so any conflict is going to be on a write
|
|
|
|
// made with the new key, so there's nothing for us to do here
|
|
|
|
continue
|
2022-07-07 17:48:38 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-08-15 15:19:53 +00:00
|
|
|
return nil
|
2022-07-07 17:48:38 +00:00
|
|
|
}
|
|
|
|
|
2022-06-01 15:12:20 +00:00
|
|
|
// getThreshold returns the index threshold for determining whether an
|
|
|
|
// object is old enough to GC
|
|
|
|
func (c *CoreScheduler) getThreshold(eval *structs.Evaluation, objectName, configName string, configThreshold time.Duration) uint64 {
|
|
|
|
var oldThreshold uint64
|
|
|
|
if eval.JobID == structs.CoreJobForceGC {
|
|
|
|
// The GC was forced, so set the threshold to its maximum so
|
|
|
|
// everything will GC.
|
|
|
|
oldThreshold = math.MaxUint64
|
|
|
|
c.logger.Debug(fmt.Sprintf("forced %s GC", objectName))
|
|
|
|
} else {
|
|
|
|
// Compute the old threshold limit for GC using the FSM
|
|
|
|
// time table. This is a rough mapping of a time to the
|
|
|
|
// Raft index it belongs to.
|
|
|
|
tt := c.srv.fsm.TimeTable()
|
|
|
|
cutoff := time.Now().UTC().Add(-1 * configThreshold)
|
|
|
|
oldThreshold = tt.NearestIndex(cutoff)
|
|
|
|
c.logger.Debug(
|
|
|
|
fmt.Sprintf("%s GC scanning before cutoff index", objectName),
|
|
|
|
"index", oldThreshold,
|
|
|
|
configName, configThreshold)
|
|
|
|
}
|
|
|
|
return oldThreshold
|
|
|
|
}
|