2015-08-14 00:48:26 +00:00
|
|
|
package scheduler
|
|
|
|
|
|
|
|
import (
|
|
|
|
"math"
|
2015-08-14 04:46:33 +00:00
|
|
|
"time"
|
2015-08-14 00:48:26 +00:00
|
|
|
|
|
|
|
"github.com/hashicorp/nomad/nomad/structs"
|
|
|
|
)
|
|
|
|
|
2015-08-16 17:37:11 +00:00
|
|
|
const (
|
2018-01-23 21:13:44 +00:00
|
|
|
// skipScoreThreshold is a threshold used in the limit iterator to skip nodes
|
2018-07-16 13:47:18 +00:00
|
|
|
// that have a score lower than this. -1 is the lowest possible score for a
|
|
|
|
// node with penalties (based on job anti affinity and node rescheduling penalties
|
|
|
|
skipScoreThreshold = 0.0
|
2018-01-23 21:13:44 +00:00
|
|
|
|
|
|
|
// maxSkip limits the number of nodes that can be skipped in the limit iterator
|
|
|
|
maxSkip = 3
|
2015-08-16 17:37:11 +00:00
|
|
|
)
|
|
|
|
|
2015-08-14 01:44:27 +00:00
|
|
|
// Stack is a chained collection of iterators. The stack is used to
|
|
|
|
// make placement decisions. Different schedulers may customize the
|
|
|
|
// stack they use to vary the way placements are made.
|
2015-08-14 00:48:26 +00:00
|
|
|
type Stack interface {
|
2015-09-07 18:30:13 +00:00
|
|
|
// SetNodes is used to set the base set of potential nodes
|
|
|
|
SetNodes([]*structs.Node)
|
|
|
|
|
2015-08-14 00:48:26 +00:00
|
|
|
// SetTaskGroup is used to set the job for selection
|
|
|
|
SetJob(job *structs.Job)
|
|
|
|
|
|
|
|
// Select is used to select a node for the task group
|
2018-10-02 20:36:04 +00:00
|
|
|
Select(tg *structs.TaskGroup, options *SelectOptions) *RankedNode
|
2018-01-14 22:47:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type SelectOptions struct {
|
2018-01-19 14:41:53 +00:00
|
|
|
PenaltyNodeIDs map[string]struct{}
|
2018-01-14 22:47:21 +00:00
|
|
|
PreferredNodes []*structs.Node
|
2019-04-11 01:20:22 +00:00
|
|
|
Preempt bool
|
2015-08-14 00:48:26 +00:00
|
|
|
}
|
|
|
|
|
2015-08-14 05:35:48 +00:00
|
|
|
// GenericStack is the Stack used for the Generic scheduler. It is
|
2015-08-14 01:44:27 +00:00
|
|
|
// designed to make better placement decisions at the cost of performance.
|
2015-08-14 05:35:48 +00:00
|
|
|
type GenericStack struct {
|
2016-01-26 18:07:33 +00:00
|
|
|
batch bool
|
|
|
|
ctx Context
|
|
|
|
source *StaticIterator
|
|
|
|
|
2019-07-25 14:46:29 +00:00
|
|
|
wrappedChecks *FeasibilityWrapper
|
|
|
|
quota FeasibleIterator
|
|
|
|
jobConstraint *ConstraintChecker
|
|
|
|
taskGroupDrivers *DriverChecker
|
|
|
|
taskGroupConstraint *ConstraintChecker
|
|
|
|
taskGroupDevices *DeviceChecker
|
|
|
|
taskGroupHostVolumes *HostVolumeChecker
|
2020-01-31 15:13:21 +00:00
|
|
|
taskGroupCSIVolumes *CSIVolumeChecker
|
2016-01-26 18:07:33 +00:00
|
|
|
|
2017-03-09 03:00:10 +00:00
|
|
|
distinctHostsConstraint *DistinctHostsIterator
|
|
|
|
distinctPropertyConstraint *DistinctPropertyIterator
|
|
|
|
binPack *BinPackIterator
|
|
|
|
jobAntiAff *JobAntiAffinityIterator
|
2018-07-16 13:47:18 +00:00
|
|
|
nodeReschedulingPenalty *NodeReschedulingPenaltyIterator
|
2017-03-09 03:00:10 +00:00
|
|
|
limit *LimitIterator
|
|
|
|
maxScore *MaxScoreIterator
|
2018-07-16 13:47:18 +00:00
|
|
|
nodeAffinity *NodeAffinityIterator
|
2018-07-17 22:25:38 +00:00
|
|
|
spread *SpreadIterator
|
2018-07-16 13:47:18 +00:00
|
|
|
scoreNorm *ScoreNormalizationIterator
|
2015-08-14 00:48:26 +00:00
|
|
|
}
|
|
|
|
|
2015-09-07 18:30:13 +00:00
|
|
|
func (s *GenericStack) SetNodes(baseNodes []*structs.Node) {
|
|
|
|
// Shuffle base nodes
|
|
|
|
shuffleNodes(baseNodes)
|
|
|
|
|
|
|
|
// Update the set of base nodes
|
|
|
|
s.source.SetNodes(baseNodes)
|
2015-09-11 19:03:41 +00:00
|
|
|
|
|
|
|
// Apply a limit function. This is to avoid scanning *every* possible node.
|
|
|
|
// For batch jobs we only need to evaluate 2 options and depend on the
|
2015-10-14 23:43:06 +00:00
|
|
|
// power of two choices. For services jobs we need to visit "enough".
|
2015-09-11 19:03:41 +00:00
|
|
|
// Using a log of the total number of nodes is a good restriction, with
|
|
|
|
// at least 2 as the floor
|
|
|
|
limit := 2
|
|
|
|
if n := len(baseNodes); !s.batch && n > 0 {
|
|
|
|
logLimit := int(math.Ceil(math.Log2(float64(n))))
|
|
|
|
if logLimit > limit {
|
|
|
|
limit = logLimit
|
|
|
|
}
|
|
|
|
}
|
|
|
|
s.limit.SetLimit(limit)
|
2015-09-07 18:30:13 +00:00
|
|
|
}
|
|
|
|
|
2015-08-14 05:35:48 +00:00
|
|
|
func (s *GenericStack) SetJob(job *structs.Job) {
|
2015-08-14 01:44:27 +00:00
|
|
|
s.jobConstraint.SetConstraints(job.Constraints)
|
2017-03-09 03:00:10 +00:00
|
|
|
s.distinctHostsConstraint.SetJob(job)
|
|
|
|
s.distinctPropertyConstraint.SetJob(job)
|
2019-04-11 01:20:22 +00:00
|
|
|
s.binPack.SetJob(job)
|
2018-07-16 13:47:18 +00:00
|
|
|
s.jobAntiAff.SetJob(job)
|
|
|
|
s.nodeAffinity.SetJob(job)
|
2018-07-17 22:25:38 +00:00
|
|
|
s.spread.SetJob(job)
|
2016-01-26 18:07:33 +00:00
|
|
|
s.ctx.Eligibility().SetJob(job)
|
2020-03-17 15:35:34 +00:00
|
|
|
s.taskGroupCSIVolumes.SetNamespace(job.Namespace)
|
2020-03-24 01:21:04 +00:00
|
|
|
s.taskGroupCSIVolumes.SetJobID(job.ID)
|
2017-10-13 21:36:02 +00:00
|
|
|
|
|
|
|
if contextual, ok := s.quota.(ContextualIterator); ok {
|
|
|
|
contextual.SetJob(job)
|
|
|
|
}
|
2015-08-14 00:48:26 +00:00
|
|
|
}
|
|
|
|
|
2018-10-02 20:36:04 +00:00
|
|
|
func (s *GenericStack) Select(tg *structs.TaskGroup, options *SelectOptions) *RankedNode {
|
2018-01-14 22:47:21 +00:00
|
|
|
|
|
|
|
// This block handles trying to select from preferred nodes if options specify them
|
|
|
|
// It also sets back the set of nodes to the original nodes
|
|
|
|
if options != nil && len(options.PreferredNodes) > 0 {
|
|
|
|
originalNodes := s.source.nodes
|
|
|
|
s.source.SetNodes(options.PreferredNodes)
|
2018-01-19 16:09:30 +00:00
|
|
|
optionsNew := *options
|
|
|
|
optionsNew.PreferredNodes = nil
|
2018-10-02 20:36:04 +00:00
|
|
|
if option := s.Select(tg, &optionsNew); option != nil {
|
2018-01-14 22:47:21 +00:00
|
|
|
s.source.SetNodes(originalNodes)
|
2018-10-02 20:36:04 +00:00
|
|
|
return option
|
2018-01-14 22:47:21 +00:00
|
|
|
}
|
|
|
|
s.source.SetNodes(originalNodes)
|
2018-01-19 16:09:30 +00:00
|
|
|
return s.Select(tg, &optionsNew)
|
2018-01-14 22:47:21 +00:00
|
|
|
}
|
|
|
|
|
2015-08-14 01:44:27 +00:00
|
|
|
// Reset the max selector and context
|
|
|
|
s.maxScore.Reset()
|
|
|
|
s.ctx.Reset()
|
2015-08-14 04:46:33 +00:00
|
|
|
start := time.Now()
|
2015-08-14 01:44:27 +00:00
|
|
|
|
2015-10-16 21:00:51 +00:00
|
|
|
// Get the task groups constraints.
|
|
|
|
tgConstr := taskGroupConstraints(tg)
|
2015-08-14 00:48:26 +00:00
|
|
|
|
|
|
|
// Update the parameters of iterators
|
2015-10-16 21:00:51 +00:00
|
|
|
s.taskGroupDrivers.SetDrivers(tgConstr.drivers)
|
|
|
|
s.taskGroupConstraint.SetConstraints(tgConstr.constraints)
|
2018-10-10 18:32:56 +00:00
|
|
|
s.taskGroupDevices.SetTaskGroup(tg)
|
2019-07-25 14:46:29 +00:00
|
|
|
s.taskGroupHostVolumes.SetVolumes(tg.Volumes)
|
2020-01-31 15:13:21 +00:00
|
|
|
s.taskGroupCSIVolumes.SetVolumes(tg.Volumes)
|
2017-03-09 03:00:10 +00:00
|
|
|
s.distinctHostsConstraint.SetTaskGroup(tg)
|
|
|
|
s.distinctPropertyConstraint.SetTaskGroup(tg)
|
2016-02-04 05:22:18 +00:00
|
|
|
s.wrappedChecks.SetTaskGroup(tg.Name)
|
2016-08-25 17:27:19 +00:00
|
|
|
s.binPack.SetTaskGroup(tg)
|
2019-04-11 01:20:22 +00:00
|
|
|
if options != nil {
|
|
|
|
s.binPack.evict = options.Preempt
|
|
|
|
}
|
2018-07-16 13:47:18 +00:00
|
|
|
s.jobAntiAff.SetTaskGroup(tg)
|
2018-01-14 22:47:21 +00:00
|
|
|
if options != nil {
|
2018-07-16 13:47:18 +00:00
|
|
|
s.nodeReschedulingPenalty.SetPenaltyNodes(options.PenaltyNodeIDs)
|
|
|
|
}
|
|
|
|
s.nodeAffinity.SetTaskGroup(tg)
|
2018-07-17 22:25:38 +00:00
|
|
|
s.spread.SetTaskGroup(tg)
|
|
|
|
|
|
|
|
if s.nodeAffinity.hasAffinities() || s.spread.hasSpreads() {
|
2018-07-16 13:47:18 +00:00
|
|
|
s.limit.SetLimit(math.MaxInt32)
|
2018-01-14 22:47:21 +00:00
|
|
|
}
|
2015-08-14 01:36:13 +00:00
|
|
|
|
2017-10-13 21:36:02 +00:00
|
|
|
if contextual, ok := s.quota.(ContextualIterator); ok {
|
|
|
|
contextual.SetTaskGroup(tg)
|
|
|
|
}
|
|
|
|
|
2015-08-14 04:46:33 +00:00
|
|
|
// Find the node with the max score
|
|
|
|
option := s.maxScore.Next()
|
|
|
|
|
|
|
|
// Store the compute time
|
|
|
|
s.ctx.Metrics().AllocationTime = time.Since(start)
|
2018-10-02 20:36:04 +00:00
|
|
|
return option
|
2015-08-14 00:48:26 +00:00
|
|
|
}
|
2015-10-14 23:43:06 +00:00
|
|
|
|
|
|
|
// SystemStack is the Stack used for the System scheduler. It is designed to
|
|
|
|
// attempt to make placements on all nodes.
|
|
|
|
type SystemStack struct {
|
2018-10-10 18:32:56 +00:00
|
|
|
ctx Context
|
|
|
|
source *StaticIterator
|
|
|
|
|
2019-07-25 14:46:29 +00:00
|
|
|
wrappedChecks *FeasibilityWrapper
|
|
|
|
quota FeasibleIterator
|
|
|
|
jobConstraint *ConstraintChecker
|
|
|
|
taskGroupDrivers *DriverChecker
|
|
|
|
taskGroupConstraint *ConstraintChecker
|
|
|
|
taskGroupDevices *DeviceChecker
|
|
|
|
taskGroupHostVolumes *HostVolumeChecker
|
2020-01-31 15:13:21 +00:00
|
|
|
taskGroupCSIVolumes *CSIVolumeChecker
|
2018-10-10 18:32:56 +00:00
|
|
|
|
2017-03-09 03:00:10 +00:00
|
|
|
distinctPropertyConstraint *DistinctPropertyIterator
|
|
|
|
binPack *BinPackIterator
|
2018-07-16 13:47:18 +00:00
|
|
|
scoreNorm *ScoreNormalizationIterator
|
2015-10-14 23:43:06 +00:00
|
|
|
}
|
|
|
|
|
2020-04-09 22:19:11 +00:00
|
|
|
// NewSystemStack constructs a stack used for selecting system job placements.
|
2015-10-17 00:05:23 +00:00
|
|
|
func NewSystemStack(ctx Context) *SystemStack {
|
2015-10-14 23:43:06 +00:00
|
|
|
// Create a new stack
|
|
|
|
s := &SystemStack{ctx: ctx}
|
|
|
|
|
|
|
|
// Create the source iterator. We visit nodes in a linear order because we
|
|
|
|
// have to evaluate on all nodes.
|
2015-10-17 00:05:23 +00:00
|
|
|
s.source = NewStaticIterator(ctx, nil)
|
2015-10-14 23:43:06 +00:00
|
|
|
|
2017-10-13 21:36:02 +00:00
|
|
|
// Create the quota iterator to determine if placements would result in the
|
|
|
|
// quota attached to the namespace of the job to go over.
|
|
|
|
s.quota = NewQuotaIterator(ctx, s.source)
|
|
|
|
|
2015-10-14 23:43:06 +00:00
|
|
|
// Attach the job constraints. The job is filled in later.
|
2016-01-26 18:07:33 +00:00
|
|
|
s.jobConstraint = NewConstraintChecker(ctx, nil)
|
2015-10-14 23:43:06 +00:00
|
|
|
|
|
|
|
// Filter on task group drivers first as they are faster
|
2016-01-26 18:07:33 +00:00
|
|
|
s.taskGroupDrivers = NewDriverChecker(ctx, nil)
|
2015-10-14 23:43:06 +00:00
|
|
|
|
|
|
|
// Filter on task group constraints second
|
2016-01-26 18:07:33 +00:00
|
|
|
s.taskGroupConstraint = NewConstraintChecker(ctx, nil)
|
|
|
|
|
2019-07-25 14:46:29 +00:00
|
|
|
// Filter on task group host volumes
|
|
|
|
s.taskGroupHostVolumes = NewHostVolumeChecker(ctx)
|
|
|
|
|
2020-01-31 15:13:21 +00:00
|
|
|
// Filter on available, healthy CSI plugins
|
|
|
|
s.taskGroupCSIVolumes = NewCSIVolumeChecker(ctx)
|
|
|
|
|
2018-10-10 18:32:56 +00:00
|
|
|
// Filter on task group devices
|
|
|
|
s.taskGroupDevices = NewDeviceChecker(ctx)
|
|
|
|
|
2016-01-27 00:43:42 +00:00
|
|
|
// Create the feasibility wrapper which wraps all feasibility checks in
|
|
|
|
// which feasibility checking can be skipped if the computed node class has
|
|
|
|
// previously been marked as eligible or ineligible. Generally this will be
|
|
|
|
// checks that only needs to examine the single node to determine feasibility.
|
2016-01-26 18:07:33 +00:00
|
|
|
jobs := []FeasibilityChecker{s.jobConstraint}
|
2020-01-31 15:13:21 +00:00
|
|
|
tgs := []FeasibilityChecker{s.taskGroupDrivers, s.taskGroupConstraint,
|
|
|
|
s.taskGroupHostVolumes,
|
|
|
|
s.taskGroupDevices}
|
|
|
|
avail := []FeasibilityChecker{s.taskGroupCSIVolumes}
|
|
|
|
s.wrappedChecks = NewFeasibilityWrapper(ctx, s.quota, jobs, tgs, avail)
|
2015-10-14 23:43:06 +00:00
|
|
|
|
2017-03-09 03:00:10 +00:00
|
|
|
// Filter on distinct property constraints.
|
|
|
|
s.distinctPropertyConstraint = NewDistinctPropertyIterator(ctx, s.wrappedChecks)
|
|
|
|
|
2015-10-14 23:43:06 +00:00
|
|
|
// Upgrade from feasible to rank iterator
|
2017-03-09 03:00:10 +00:00
|
|
|
rankSource := NewFeasibleRankIterator(ctx, s.distinctPropertyConstraint)
|
2015-10-14 23:43:06 +00:00
|
|
|
|
|
|
|
// Apply the bin packing, this depends on the resources needed
|
|
|
|
// by a particular task group. Enable eviction as system jobs are high
|
|
|
|
// priority.
|
2018-09-28 04:44:01 +00:00
|
|
|
_, schedConfig, _ := s.ctx.State().SchedulerConfig()
|
2018-10-01 14:26:52 +00:00
|
|
|
enablePreemption := true
|
2018-09-28 04:44:01 +00:00
|
|
|
if schedConfig != nil {
|
2018-10-01 14:26:52 +00:00
|
|
|
enablePreemption = schedConfig.PreemptionConfig.SystemSchedulerEnabled
|
2018-09-28 04:44:01 +00:00
|
|
|
}
|
|
|
|
s.binPack = NewBinPackIterator(ctx, rankSource, enablePreemption, 0)
|
2018-07-16 13:47:18 +00:00
|
|
|
|
|
|
|
// Apply score normalization
|
|
|
|
s.scoreNorm = NewScoreNormalizationIterator(ctx, s.binPack)
|
2015-10-14 23:43:06 +00:00
|
|
|
return s
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *SystemStack) SetNodes(baseNodes []*structs.Node) {
|
|
|
|
// Update the set of base nodes
|
|
|
|
s.source.SetNodes(baseNodes)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *SystemStack) SetJob(job *structs.Job) {
|
|
|
|
s.jobConstraint.SetConstraints(job.Constraints)
|
2017-03-09 03:00:10 +00:00
|
|
|
s.distinctPropertyConstraint.SetJob(job)
|
2019-04-11 01:20:22 +00:00
|
|
|
s.binPack.SetJob(job)
|
2016-01-26 18:07:33 +00:00
|
|
|
s.ctx.Eligibility().SetJob(job)
|
2017-10-13 21:36:02 +00:00
|
|
|
|
|
|
|
if contextual, ok := s.quota.(ContextualIterator); ok {
|
|
|
|
contextual.SetJob(job)
|
|
|
|
}
|
2015-10-14 23:43:06 +00:00
|
|
|
}
|
|
|
|
|
2018-10-02 20:36:04 +00:00
|
|
|
func (s *SystemStack) Select(tg *structs.TaskGroup, options *SelectOptions) *RankedNode {
|
2015-10-14 23:43:06 +00:00
|
|
|
// Reset the binpack selector and context
|
2018-07-16 13:47:18 +00:00
|
|
|
s.scoreNorm.Reset()
|
2015-10-14 23:43:06 +00:00
|
|
|
s.ctx.Reset()
|
|
|
|
start := time.Now()
|
|
|
|
|
2015-10-16 21:00:51 +00:00
|
|
|
// Get the task groups constraints.
|
|
|
|
tgConstr := taskGroupConstraints(tg)
|
2015-10-14 23:43:06 +00:00
|
|
|
|
|
|
|
// Update the parameters of iterators
|
2015-10-16 21:00:51 +00:00
|
|
|
s.taskGroupDrivers.SetDrivers(tgConstr.drivers)
|
|
|
|
s.taskGroupConstraint.SetConstraints(tgConstr.constraints)
|
2018-10-10 18:32:56 +00:00
|
|
|
s.taskGroupDevices.SetTaskGroup(tg)
|
2019-07-25 14:46:29 +00:00
|
|
|
s.taskGroupHostVolumes.SetVolumes(tg.Volumes)
|
2020-01-31 15:13:21 +00:00
|
|
|
s.taskGroupCSIVolumes.SetVolumes(tg.Volumes)
|
2016-02-04 05:22:18 +00:00
|
|
|
s.wrappedChecks.SetTaskGroup(tg.Name)
|
2017-03-09 03:00:10 +00:00
|
|
|
s.distinctPropertyConstraint.SetTaskGroup(tg)
|
|
|
|
s.binPack.SetTaskGroup(tg)
|
2015-10-14 23:43:06 +00:00
|
|
|
|
2017-10-13 21:36:02 +00:00
|
|
|
if contextual, ok := s.quota.(ContextualIterator); ok {
|
|
|
|
contextual.SetTaskGroup(tg)
|
|
|
|
}
|
|
|
|
|
2015-10-14 23:43:06 +00:00
|
|
|
// Get the next option that satisfies the constraints.
|
2018-07-16 13:47:18 +00:00
|
|
|
option := s.scoreNorm.Next()
|
2015-10-14 23:43:06 +00:00
|
|
|
|
|
|
|
// Store the compute time
|
|
|
|
s.ctx.Metrics().AllocationTime = time.Since(start)
|
2018-10-02 20:36:04 +00:00
|
|
|
return option
|
2015-10-14 23:43:06 +00:00
|
|
|
}
|