2015-08-14 00:48:26 +00:00
|
|
|
package scheduler
|
|
|
|
|
|
|
|
import (
|
|
|
|
"math"
|
2015-08-14 04:46:33 +00:00
|
|
|
"time"
|
2015-08-14 00:48:26 +00:00
|
|
|
|
|
|
|
"github.com/hashicorp/nomad/nomad/structs"
|
|
|
|
)
|
|
|
|
|
2015-08-16 17:37:11 +00:00
|
|
|
const (
|
2018-01-23 21:13:44 +00:00
|
|
|
// skipScoreThreshold is a threshold used in the limit iterator to skip nodes
|
2018-07-16 13:47:18 +00:00
|
|
|
// that have a score lower than this. -1 is the lowest possible score for a
|
|
|
|
// node with penalties (based on job anti affinity and node rescheduling penalties
|
|
|
|
skipScoreThreshold = 0.0
|
2018-01-23 21:13:44 +00:00
|
|
|
|
|
|
|
// maxSkip limits the number of nodes that can be skipped in the limit iterator
|
|
|
|
maxSkip = 3
|
2015-08-16 17:37:11 +00:00
|
|
|
)
|
|
|
|
|
2015-08-14 01:44:27 +00:00
|
|
|
// Stack is a chained collection of iterators. The stack is used to
|
|
|
|
// make placement decisions. Different schedulers may customize the
|
|
|
|
// stack they use to vary the way placements are made.
|
2015-08-14 00:48:26 +00:00
|
|
|
type Stack interface {
|
2015-09-07 18:30:13 +00:00
|
|
|
// SetNodes is used to set the base set of potential nodes
|
|
|
|
SetNodes([]*structs.Node)
|
|
|
|
|
2015-08-14 00:48:26 +00:00
|
|
|
// SetTaskGroup is used to set the job for selection
|
|
|
|
SetJob(job *structs.Job)
|
|
|
|
|
|
|
|
// Select is used to select a node for the task group
|
2018-10-02 20:36:04 +00:00
|
|
|
Select(tg *structs.TaskGroup, options *SelectOptions) *RankedNode
|
2018-01-14 22:47:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type SelectOptions struct {
|
2018-01-19 14:41:53 +00:00
|
|
|
PenaltyNodeIDs map[string]struct{}
|
2018-01-14 22:47:21 +00:00
|
|
|
PreferredNodes []*structs.Node
|
2019-04-11 01:20:22 +00:00
|
|
|
Preempt bool
|
2021-03-18 19:35:11 +00:00
|
|
|
AllocName string
|
2015-08-14 00:48:26 +00:00
|
|
|
}
|
|
|
|
|
2015-08-14 05:35:48 +00:00
|
|
|
// GenericStack is the Stack used for the Generic scheduler. It is
|
2015-08-14 01:44:27 +00:00
|
|
|
// designed to make better placement decisions at the cost of performance.
|
2015-08-14 05:35:48 +00:00
|
|
|
type GenericStack struct {
|
2016-01-26 18:07:33 +00:00
|
|
|
batch bool
|
|
|
|
ctx Context
|
|
|
|
source *StaticIterator
|
|
|
|
|
2019-07-25 14:46:29 +00:00
|
|
|
wrappedChecks *FeasibilityWrapper
|
|
|
|
quota FeasibleIterator
|
2020-08-13 13:35:09 +00:00
|
|
|
jobVersion *uint64
|
2019-07-25 14:46:29 +00:00
|
|
|
jobConstraint *ConstraintChecker
|
|
|
|
taskGroupDrivers *DriverChecker
|
|
|
|
taskGroupConstraint *ConstraintChecker
|
|
|
|
taskGroupDevices *DeviceChecker
|
|
|
|
taskGroupHostVolumes *HostVolumeChecker
|
2020-01-31 15:13:21 +00:00
|
|
|
taskGroupCSIVolumes *CSIVolumeChecker
|
2020-05-15 15:09:01 +00:00
|
|
|
taskGroupNetwork *NetworkChecker
|
2016-01-26 18:07:33 +00:00
|
|
|
|
2017-03-09 03:00:10 +00:00
|
|
|
distinctHostsConstraint *DistinctHostsIterator
|
|
|
|
distinctPropertyConstraint *DistinctPropertyIterator
|
|
|
|
binPack *BinPackIterator
|
|
|
|
jobAntiAff *JobAntiAffinityIterator
|
2018-07-16 13:47:18 +00:00
|
|
|
nodeReschedulingPenalty *NodeReschedulingPenaltyIterator
|
2017-03-09 03:00:10 +00:00
|
|
|
limit *LimitIterator
|
|
|
|
maxScore *MaxScoreIterator
|
2018-07-16 13:47:18 +00:00
|
|
|
nodeAffinity *NodeAffinityIterator
|
2018-07-17 22:25:38 +00:00
|
|
|
spread *SpreadIterator
|
2018-07-16 13:47:18 +00:00
|
|
|
scoreNorm *ScoreNormalizationIterator
|
2015-08-14 00:48:26 +00:00
|
|
|
}
|
|
|
|
|
2015-09-07 18:30:13 +00:00
|
|
|
func (s *GenericStack) SetNodes(baseNodes []*structs.Node) {
|
|
|
|
// Shuffle base nodes
|
2022-02-08 17:16:33 +00:00
|
|
|
idx, _ := s.ctx.State().LatestIndex()
|
|
|
|
shuffleNodes(s.ctx.Plan(), idx, baseNodes)
|
2015-09-07 18:30:13 +00:00
|
|
|
|
|
|
|
// Update the set of base nodes
|
|
|
|
s.source.SetNodes(baseNodes)
|
2015-09-11 19:03:41 +00:00
|
|
|
|
|
|
|
// Apply a limit function. This is to avoid scanning *every* possible node.
|
|
|
|
// For batch jobs we only need to evaluate 2 options and depend on the
|
2015-10-14 23:43:06 +00:00
|
|
|
// power of two choices. For services jobs we need to visit "enough".
|
2015-09-11 19:03:41 +00:00
|
|
|
// Using a log of the total number of nodes is a good restriction, with
|
|
|
|
// at least 2 as the floor
|
|
|
|
limit := 2
|
|
|
|
if n := len(baseNodes); !s.batch && n > 0 {
|
|
|
|
logLimit := int(math.Ceil(math.Log2(float64(n))))
|
|
|
|
if logLimit > limit {
|
|
|
|
limit = logLimit
|
|
|
|
}
|
|
|
|
}
|
|
|
|
s.limit.SetLimit(limit)
|
2015-09-07 18:30:13 +00:00
|
|
|
}
|
|
|
|
|
2015-08-14 05:35:48 +00:00
|
|
|
func (s *GenericStack) SetJob(job *structs.Job) {
|
2020-08-13 13:35:09 +00:00
|
|
|
if s.jobVersion != nil && *s.jobVersion == job.Version {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
jobVer := job.Version
|
|
|
|
s.jobVersion = &jobVer
|
|
|
|
|
2015-08-14 01:44:27 +00:00
|
|
|
s.jobConstraint.SetConstraints(job.Constraints)
|
2017-03-09 03:00:10 +00:00
|
|
|
s.distinctHostsConstraint.SetJob(job)
|
|
|
|
s.distinctPropertyConstraint.SetJob(job)
|
2019-04-11 01:20:22 +00:00
|
|
|
s.binPack.SetJob(job)
|
2018-07-16 13:47:18 +00:00
|
|
|
s.jobAntiAff.SetJob(job)
|
|
|
|
s.nodeAffinity.SetJob(job)
|
2018-07-17 22:25:38 +00:00
|
|
|
s.spread.SetJob(job)
|
2016-01-26 18:07:33 +00:00
|
|
|
s.ctx.Eligibility().SetJob(job)
|
2020-03-17 15:35:34 +00:00
|
|
|
s.taskGroupCSIVolumes.SetNamespace(job.Namespace)
|
2020-03-24 01:21:04 +00:00
|
|
|
s.taskGroupCSIVolumes.SetJobID(job.ID)
|
2017-10-13 21:36:02 +00:00
|
|
|
|
|
|
|
if contextual, ok := s.quota.(ContextualIterator); ok {
|
|
|
|
contextual.SetJob(job)
|
|
|
|
}
|
2015-08-14 00:48:26 +00:00
|
|
|
}
|
|
|
|
|
2018-10-02 20:36:04 +00:00
|
|
|
func (s *GenericStack) Select(tg *structs.TaskGroup, options *SelectOptions) *RankedNode {
|
2018-01-14 22:47:21 +00:00
|
|
|
|
|
|
|
// This block handles trying to select from preferred nodes if options specify them
|
|
|
|
// It also sets back the set of nodes to the original nodes
|
|
|
|
if options != nil && len(options.PreferredNodes) > 0 {
|
|
|
|
originalNodes := s.source.nodes
|
|
|
|
s.source.SetNodes(options.PreferredNodes)
|
2018-01-19 16:09:30 +00:00
|
|
|
optionsNew := *options
|
|
|
|
optionsNew.PreferredNodes = nil
|
2018-10-02 20:36:04 +00:00
|
|
|
if option := s.Select(tg, &optionsNew); option != nil {
|
2018-01-14 22:47:21 +00:00
|
|
|
s.source.SetNodes(originalNodes)
|
2018-10-02 20:36:04 +00:00
|
|
|
return option
|
2018-01-14 22:47:21 +00:00
|
|
|
}
|
|
|
|
s.source.SetNodes(originalNodes)
|
2018-01-19 16:09:30 +00:00
|
|
|
return s.Select(tg, &optionsNew)
|
2018-01-14 22:47:21 +00:00
|
|
|
}
|
|
|
|
|
2015-08-14 01:44:27 +00:00
|
|
|
// Reset the max selector and context
|
|
|
|
s.maxScore.Reset()
|
|
|
|
s.ctx.Reset()
|
2015-08-14 04:46:33 +00:00
|
|
|
start := time.Now()
|
2015-08-14 01:44:27 +00:00
|
|
|
|
2015-10-16 21:00:51 +00:00
|
|
|
// Get the task groups constraints.
|
|
|
|
tgConstr := taskGroupConstraints(tg)
|
2015-08-14 00:48:26 +00:00
|
|
|
|
|
|
|
// Update the parameters of iterators
|
2015-10-16 21:00:51 +00:00
|
|
|
s.taskGroupDrivers.SetDrivers(tgConstr.drivers)
|
|
|
|
s.taskGroupConstraint.SetConstraints(tgConstr.constraints)
|
2018-10-10 18:32:56 +00:00
|
|
|
s.taskGroupDevices.SetTaskGroup(tg)
|
2023-01-26 14:14:47 +00:00
|
|
|
s.taskGroupHostVolumes.SetVolumes(options.AllocName, tg.Volumes)
|
2021-03-18 19:35:11 +00:00
|
|
|
s.taskGroupCSIVolumes.SetVolumes(options.AllocName, tg.Volumes)
|
2020-05-15 15:09:01 +00:00
|
|
|
if len(tg.Networks) > 0 {
|
2020-06-16 15:53:10 +00:00
|
|
|
s.taskGroupNetwork.SetNetwork(tg.Networks[0])
|
2020-05-15 15:09:01 +00:00
|
|
|
}
|
2017-03-09 03:00:10 +00:00
|
|
|
s.distinctHostsConstraint.SetTaskGroup(tg)
|
|
|
|
s.distinctPropertyConstraint.SetTaskGroup(tg)
|
2016-02-04 05:22:18 +00:00
|
|
|
s.wrappedChecks.SetTaskGroup(tg.Name)
|
2016-08-25 17:27:19 +00:00
|
|
|
s.binPack.SetTaskGroup(tg)
|
2019-04-11 01:20:22 +00:00
|
|
|
if options != nil {
|
|
|
|
s.binPack.evict = options.Preempt
|
|
|
|
}
|
2018-07-16 13:47:18 +00:00
|
|
|
s.jobAntiAff.SetTaskGroup(tg)
|
2018-01-14 22:47:21 +00:00
|
|
|
if options != nil {
|
2018-07-16 13:47:18 +00:00
|
|
|
s.nodeReschedulingPenalty.SetPenaltyNodes(options.PenaltyNodeIDs)
|
|
|
|
}
|
|
|
|
s.nodeAffinity.SetTaskGroup(tg)
|
2018-07-17 22:25:38 +00:00
|
|
|
s.spread.SetTaskGroup(tg)
|
|
|
|
|
|
|
|
if s.nodeAffinity.hasAffinities() || s.spread.hasSpreads() {
|
2021-12-21 15:10:01 +00:00
|
|
|
// scoring spread across all nodes has quadratic behavior, so
|
|
|
|
// we need to consider a subset of nodes to keep evaluaton times
|
|
|
|
// reasonable but enough to ensure spread is correct. this
|
|
|
|
// value was empirically determined.
|
|
|
|
s.limit.SetLimit(tg.Count)
|
|
|
|
if tg.Count < 100 {
|
|
|
|
s.limit.SetLimit(100)
|
|
|
|
}
|
2018-01-14 22:47:21 +00:00
|
|
|
}
|
2015-08-14 01:36:13 +00:00
|
|
|
|
2017-10-13 21:36:02 +00:00
|
|
|
if contextual, ok := s.quota.(ContextualIterator); ok {
|
|
|
|
contextual.SetTaskGroup(tg)
|
|
|
|
}
|
|
|
|
|
2015-08-14 04:46:33 +00:00
|
|
|
// Find the node with the max score
|
|
|
|
option := s.maxScore.Next()
|
|
|
|
|
|
|
|
// Store the compute time
|
|
|
|
s.ctx.Metrics().AllocationTime = time.Since(start)
|
2018-10-02 20:36:04 +00:00
|
|
|
return option
|
2015-08-14 00:48:26 +00:00
|
|
|
}
|
2015-10-14 23:43:06 +00:00
|
|
|
|
|
|
|
// SystemStack is the Stack used for the System scheduler. It is designed to
|
|
|
|
// attempt to make placements on all nodes.
|
|
|
|
type SystemStack struct {
|
2018-10-10 18:32:56 +00:00
|
|
|
ctx Context
|
|
|
|
source *StaticIterator
|
|
|
|
|
2019-07-25 14:46:29 +00:00
|
|
|
wrappedChecks *FeasibilityWrapper
|
|
|
|
quota FeasibleIterator
|
|
|
|
jobConstraint *ConstraintChecker
|
|
|
|
taskGroupDrivers *DriverChecker
|
|
|
|
taskGroupConstraint *ConstraintChecker
|
|
|
|
taskGroupDevices *DeviceChecker
|
|
|
|
taskGroupHostVolumes *HostVolumeChecker
|
2020-01-31 15:13:21 +00:00
|
|
|
taskGroupCSIVolumes *CSIVolumeChecker
|
2020-06-24 20:01:00 +00:00
|
|
|
taskGroupNetwork *NetworkChecker
|
2018-10-10 18:32:56 +00:00
|
|
|
|
2017-03-09 03:00:10 +00:00
|
|
|
distinctPropertyConstraint *DistinctPropertyIterator
|
|
|
|
binPack *BinPackIterator
|
2018-07-16 13:47:18 +00:00
|
|
|
scoreNorm *ScoreNormalizationIterator
|
2015-10-14 23:43:06 +00:00
|
|
|
}
|
|
|
|
|
2020-10-09 21:31:38 +00:00
|
|
|
// NewSystemStack constructs a stack used for selecting system and sysbatch
|
|
|
|
// job placements.
|
|
|
|
//
|
|
|
|
// sysbatch is used to determine which scheduler config option is used to
|
|
|
|
// control the use of preemption.
|
|
|
|
func NewSystemStack(sysbatch bool, ctx Context) *SystemStack {
|
2015-10-14 23:43:06 +00:00
|
|
|
// Create a new stack
|
|
|
|
s := &SystemStack{ctx: ctx}
|
|
|
|
|
|
|
|
// Create the source iterator. We visit nodes in a linear order because we
|
|
|
|
// have to evaluate on all nodes.
|
2015-10-17 00:05:23 +00:00
|
|
|
s.source = NewStaticIterator(ctx, nil)
|
2015-10-14 23:43:06 +00:00
|
|
|
|
|
|
|
// Attach the job constraints. The job is filled in later.
|
2016-01-26 18:07:33 +00:00
|
|
|
s.jobConstraint = NewConstraintChecker(ctx, nil)
|
2015-10-14 23:43:06 +00:00
|
|
|
|
|
|
|
// Filter on task group drivers first as they are faster
|
2016-01-26 18:07:33 +00:00
|
|
|
s.taskGroupDrivers = NewDriverChecker(ctx, nil)
|
2015-10-14 23:43:06 +00:00
|
|
|
|
|
|
|
// Filter on task group constraints second
|
2016-01-26 18:07:33 +00:00
|
|
|
s.taskGroupConstraint = NewConstraintChecker(ctx, nil)
|
|
|
|
|
2019-07-25 14:46:29 +00:00
|
|
|
// Filter on task group host volumes
|
|
|
|
s.taskGroupHostVolumes = NewHostVolumeChecker(ctx)
|
|
|
|
|
2020-01-31 15:13:21 +00:00
|
|
|
// Filter on available, healthy CSI plugins
|
|
|
|
s.taskGroupCSIVolumes = NewCSIVolumeChecker(ctx)
|
|
|
|
|
2018-10-10 18:32:56 +00:00
|
|
|
// Filter on task group devices
|
|
|
|
s.taskGroupDevices = NewDeviceChecker(ctx)
|
|
|
|
|
2020-06-24 20:01:00 +00:00
|
|
|
// Filter on available client networks
|
|
|
|
s.taskGroupNetwork = NewNetworkChecker(ctx)
|
|
|
|
|
2016-01-27 00:43:42 +00:00
|
|
|
// Create the feasibility wrapper which wraps all feasibility checks in
|
|
|
|
// which feasibility checking can be skipped if the computed node class has
|
|
|
|
// previously been marked as eligible or ineligible. Generally this will be
|
|
|
|
// checks that only needs to examine the single node to determine feasibility.
|
2016-01-26 18:07:33 +00:00
|
|
|
jobs := []FeasibilityChecker{s.jobConstraint}
|
2020-10-09 21:31:38 +00:00
|
|
|
tgs := []FeasibilityChecker{
|
|
|
|
s.taskGroupDrivers,
|
|
|
|
s.taskGroupConstraint,
|
2020-01-31 15:13:21 +00:00
|
|
|
s.taskGroupHostVolumes,
|
2020-06-24 20:01:00 +00:00
|
|
|
s.taskGroupDevices,
|
2020-10-09 21:31:38 +00:00
|
|
|
s.taskGroupNetwork,
|
|
|
|
}
|
2020-01-31 15:13:21 +00:00
|
|
|
avail := []FeasibilityChecker{s.taskGroupCSIVolumes}
|
2021-06-14 14:11:40 +00:00
|
|
|
s.wrappedChecks = NewFeasibilityWrapper(ctx, s.source, jobs, tgs, avail)
|
2015-10-14 23:43:06 +00:00
|
|
|
|
2017-03-09 03:00:10 +00:00
|
|
|
// Filter on distinct property constraints.
|
|
|
|
s.distinctPropertyConstraint = NewDistinctPropertyIterator(ctx, s.wrappedChecks)
|
|
|
|
|
2021-06-14 14:11:40 +00:00
|
|
|
// Create the quota iterator to determine if placements would result in
|
|
|
|
// the quota attached to the namespace of the job to go over.
|
|
|
|
// Note: the quota iterator must be the last feasibility iterator before
|
|
|
|
// we upgrade to ranking, or our quota usage will include ineligible
|
|
|
|
// nodes!
|
|
|
|
s.quota = NewQuotaIterator(ctx, s.distinctPropertyConstraint)
|
|
|
|
|
2015-10-14 23:43:06 +00:00
|
|
|
// Upgrade from feasible to rank iterator
|
2021-06-14 14:11:40 +00:00
|
|
|
rankSource := NewFeasibleRankIterator(ctx, s.quota)
|
2015-10-14 23:43:06 +00:00
|
|
|
|
|
|
|
// Apply the bin packing, this depends on the resources needed
|
|
|
|
// by a particular task group. Enable eviction as system jobs are high
|
|
|
|
// priority.
|
2018-09-28 04:44:01 +00:00
|
|
|
_, schedConfig, _ := s.ctx.State().SchedulerConfig()
|
2018-10-01 14:26:52 +00:00
|
|
|
enablePreemption := true
|
2018-09-28 04:44:01 +00:00
|
|
|
if schedConfig != nil {
|
2020-10-09 21:31:38 +00:00
|
|
|
if sysbatch {
|
|
|
|
enablePreemption = schedConfig.PreemptionConfig.SysBatchSchedulerEnabled
|
|
|
|
} else {
|
|
|
|
enablePreemption = schedConfig.PreemptionConfig.SystemSchedulerEnabled
|
|
|
|
}
|
2018-09-28 04:44:01 +00:00
|
|
|
}
|
2020-04-08 19:59:16 +00:00
|
|
|
|
2020-10-09 21:31:38 +00:00
|
|
|
// Create binpack iterator
|
2021-04-30 02:09:56 +00:00
|
|
|
s.binPack = NewBinPackIterator(ctx, rankSource, enablePreemption, 0, schedConfig)
|
2018-07-16 13:47:18 +00:00
|
|
|
|
|
|
|
// Apply score normalization
|
|
|
|
s.scoreNorm = NewScoreNormalizationIterator(ctx, s.binPack)
|
2015-10-14 23:43:06 +00:00
|
|
|
return s
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *SystemStack) SetNodes(baseNodes []*structs.Node) {
|
|
|
|
// Update the set of base nodes
|
|
|
|
s.source.SetNodes(baseNodes)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *SystemStack) SetJob(job *structs.Job) {
|
|
|
|
s.jobConstraint.SetConstraints(job.Constraints)
|
2017-03-09 03:00:10 +00:00
|
|
|
s.distinctPropertyConstraint.SetJob(job)
|
2019-04-11 01:20:22 +00:00
|
|
|
s.binPack.SetJob(job)
|
2016-01-26 18:07:33 +00:00
|
|
|
s.ctx.Eligibility().SetJob(job)
|
2022-11-23 21:47:35 +00:00
|
|
|
s.taskGroupCSIVolumes.SetNamespace(job.Namespace)
|
|
|
|
s.taskGroupCSIVolumes.SetJobID(job.ID)
|
2017-10-13 21:36:02 +00:00
|
|
|
|
|
|
|
if contextual, ok := s.quota.(ContextualIterator); ok {
|
|
|
|
contextual.SetJob(job)
|
|
|
|
}
|
2015-10-14 23:43:06 +00:00
|
|
|
}
|
|
|
|
|
2018-10-02 20:36:04 +00:00
|
|
|
func (s *SystemStack) Select(tg *structs.TaskGroup, options *SelectOptions) *RankedNode {
|
2015-10-14 23:43:06 +00:00
|
|
|
// Reset the binpack selector and context
|
2018-07-16 13:47:18 +00:00
|
|
|
s.scoreNorm.Reset()
|
2015-10-14 23:43:06 +00:00
|
|
|
s.ctx.Reset()
|
|
|
|
start := time.Now()
|
|
|
|
|
2015-10-16 21:00:51 +00:00
|
|
|
// Get the task groups constraints.
|
|
|
|
tgConstr := taskGroupConstraints(tg)
|
2015-10-14 23:43:06 +00:00
|
|
|
|
|
|
|
// Update the parameters of iterators
|
2015-10-16 21:00:51 +00:00
|
|
|
s.taskGroupDrivers.SetDrivers(tgConstr.drivers)
|
|
|
|
s.taskGroupConstraint.SetConstraints(tgConstr.constraints)
|
2018-10-10 18:32:56 +00:00
|
|
|
s.taskGroupDevices.SetTaskGroup(tg)
|
2023-01-26 14:14:47 +00:00
|
|
|
s.taskGroupHostVolumes.SetVolumes(options.AllocName, tg.Volumes)
|
2021-03-18 19:35:11 +00:00
|
|
|
s.taskGroupCSIVolumes.SetVolumes(options.AllocName, tg.Volumes)
|
2020-06-24 20:01:00 +00:00
|
|
|
if len(tg.Networks) > 0 {
|
|
|
|
s.taskGroupNetwork.SetNetwork(tg.Networks[0])
|
|
|
|
}
|
2016-02-04 05:22:18 +00:00
|
|
|
s.wrappedChecks.SetTaskGroup(tg.Name)
|
2017-03-09 03:00:10 +00:00
|
|
|
s.distinctPropertyConstraint.SetTaskGroup(tg)
|
|
|
|
s.binPack.SetTaskGroup(tg)
|
2015-10-14 23:43:06 +00:00
|
|
|
|
2017-10-13 21:36:02 +00:00
|
|
|
if contextual, ok := s.quota.(ContextualIterator); ok {
|
|
|
|
contextual.SetTaskGroup(tg)
|
|
|
|
}
|
|
|
|
|
2015-10-14 23:43:06 +00:00
|
|
|
// Get the next option that satisfies the constraints.
|
2018-07-16 13:47:18 +00:00
|
|
|
option := s.scoreNorm.Next()
|
2015-10-14 23:43:06 +00:00
|
|
|
|
|
|
|
// Store the compute time
|
|
|
|
s.ctx.Metrics().AllocationTime = time.Since(start)
|
2018-10-02 20:36:04 +00:00
|
|
|
return option
|
2015-10-14 23:43:06 +00:00
|
|
|
}
|
2020-05-27 19:02:01 +00:00
|
|
|
|
|
|
|
// NewGenericStack constructs a stack used for selecting service placements
|
|
|
|
func NewGenericStack(batch bool, ctx Context) *GenericStack {
|
|
|
|
// Create a new stack
|
|
|
|
s := &GenericStack{
|
|
|
|
batch: batch,
|
|
|
|
ctx: ctx,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create the source iterator. We randomize the order we visit nodes
|
|
|
|
// to reduce collisions between schedulers and to do a basic load
|
|
|
|
// balancing across eligible nodes.
|
|
|
|
s.source = NewRandomIterator(ctx, nil)
|
|
|
|
|
|
|
|
// Attach the job constraints. The job is filled in later.
|
|
|
|
s.jobConstraint = NewConstraintChecker(ctx, nil)
|
|
|
|
|
|
|
|
// Filter on task group drivers first as they are faster
|
|
|
|
s.taskGroupDrivers = NewDriverChecker(ctx, nil)
|
|
|
|
|
|
|
|
// Filter on task group constraints second
|
|
|
|
s.taskGroupConstraint = NewConstraintChecker(ctx, nil)
|
|
|
|
|
|
|
|
// Filter on task group devices
|
|
|
|
s.taskGroupDevices = NewDeviceChecker(ctx)
|
|
|
|
|
|
|
|
// Filter on task group host volumes
|
|
|
|
s.taskGroupHostVolumes = NewHostVolumeChecker(ctx)
|
|
|
|
|
|
|
|
// Filter on available, healthy CSI plugins
|
|
|
|
s.taskGroupCSIVolumes = NewCSIVolumeChecker(ctx)
|
|
|
|
|
2020-05-15 15:09:01 +00:00
|
|
|
// Filter on available client networks
|
|
|
|
s.taskGroupNetwork = NewNetworkChecker(ctx)
|
|
|
|
|
2020-05-27 19:02:01 +00:00
|
|
|
// Create the feasibility wrapper which wraps all feasibility checks in
|
|
|
|
// which feasibility checking can be skipped if the computed node class has
|
|
|
|
// previously been marked as eligible or ineligible. Generally this will be
|
|
|
|
// checks that only needs to examine the single node to determine feasibility.
|
|
|
|
jobs := []FeasibilityChecker{s.jobConstraint}
|
2020-10-09 21:31:38 +00:00
|
|
|
tgs := []FeasibilityChecker{
|
|
|
|
s.taskGroupDrivers,
|
2020-05-27 19:02:01 +00:00
|
|
|
s.taskGroupConstraint,
|
|
|
|
s.taskGroupHostVolumes,
|
2020-05-15 15:09:01 +00:00
|
|
|
s.taskGroupDevices,
|
2020-10-09 21:31:38 +00:00
|
|
|
s.taskGroupNetwork,
|
|
|
|
}
|
2020-05-27 19:02:01 +00:00
|
|
|
avail := []FeasibilityChecker{s.taskGroupCSIVolumes}
|
2021-06-14 14:11:40 +00:00
|
|
|
s.wrappedChecks = NewFeasibilityWrapper(ctx, s.source, jobs, tgs, avail)
|
2020-05-27 19:02:01 +00:00
|
|
|
|
|
|
|
// Filter on distinct host constraints.
|
|
|
|
s.distinctHostsConstraint = NewDistinctHostsIterator(ctx, s.wrappedChecks)
|
|
|
|
|
|
|
|
// Filter on distinct property constraints.
|
|
|
|
s.distinctPropertyConstraint = NewDistinctPropertyIterator(ctx, s.distinctHostsConstraint)
|
|
|
|
|
2021-06-14 14:11:40 +00:00
|
|
|
// Create the quota iterator to determine if placements would result in
|
|
|
|
// the quota attached to the namespace of the job to go over.
|
|
|
|
// Note: the quota iterator must be the last feasibility iterator before
|
|
|
|
// we upgrade to ranking, or our quota usage will include ineligible
|
|
|
|
// nodes!
|
|
|
|
s.quota = NewQuotaIterator(ctx, s.distinctPropertyConstraint)
|
|
|
|
|
2020-05-27 19:02:01 +00:00
|
|
|
// Upgrade from feasible to rank iterator
|
2021-06-14 14:11:40 +00:00
|
|
|
rankSource := NewFeasibleRankIterator(ctx, s.quota)
|
2020-05-27 19:02:01 +00:00
|
|
|
|
|
|
|
// Apply the bin packing, this depends on the resources needed
|
|
|
|
// by a particular task group.
|
|
|
|
_, schedConfig, _ := ctx.State().SchedulerConfig()
|
2021-04-30 02:09:56 +00:00
|
|
|
s.binPack = NewBinPackIterator(ctx, rankSource, false, 0, schedConfig)
|
2020-05-27 19:02:01 +00:00
|
|
|
|
|
|
|
// Apply the job anti-affinity iterator. This is to avoid placing
|
|
|
|
// multiple allocations on the same node for this job.
|
|
|
|
s.jobAntiAff = NewJobAntiAffinityIterator(ctx, s.binPack, "")
|
|
|
|
|
|
|
|
// Apply node rescheduling penalty. This tries to avoid placing on a
|
|
|
|
// node where the allocation failed previously
|
|
|
|
s.nodeReschedulingPenalty = NewNodeReschedulingPenaltyIterator(ctx, s.jobAntiAff)
|
|
|
|
|
2023-01-30 14:48:43 +00:00
|
|
|
// Apply scores based on affinity block
|
2020-05-27 19:02:01 +00:00
|
|
|
s.nodeAffinity = NewNodeAffinityIterator(ctx, s.nodeReschedulingPenalty)
|
|
|
|
|
2023-01-30 14:48:43 +00:00
|
|
|
// Apply scores based on spread block
|
2020-05-27 19:02:01 +00:00
|
|
|
s.spread = NewSpreadIterator(ctx, s.nodeAffinity)
|
|
|
|
|
|
|
|
// Add the preemption options scoring iterator
|
|
|
|
preemptionScorer := NewPreemptionScoringIterator(ctx, s.spread)
|
|
|
|
|
|
|
|
// Normalizes scores by averaging them across various scorers
|
|
|
|
s.scoreNorm = NewScoreNormalizationIterator(ctx, preemptionScorer)
|
|
|
|
|
|
|
|
// Apply a limit function. This is to avoid scanning *every* possible node.
|
|
|
|
s.limit = NewLimitIterator(ctx, s.scoreNorm, 2, skipScoreThreshold, maxSkip)
|
|
|
|
|
|
|
|
// Select the node with the maximum score for placement
|
|
|
|
s.maxScore = NewMaxScoreIterator(ctx, s.limit)
|
|
|
|
return s
|
|
|
|
}
|
2023-02-03 17:29:39 +00:00
|
|
|
|
|
|
|
// taskGroupConstraints collects the constraints, drivers and resources required by each
|
|
|
|
// sub-task to aggregate the TaskGroup totals
|
|
|
|
func taskGroupConstraints(tg *structs.TaskGroup) tgConstrainTuple {
|
|
|
|
c := tgConstrainTuple{
|
|
|
|
constraints: make([]*structs.Constraint, 0, len(tg.Constraints)),
|
|
|
|
drivers: make(map[string]struct{}),
|
|
|
|
}
|
|
|
|
|
|
|
|
c.constraints = append(c.constraints, tg.Constraints...)
|
|
|
|
for _, task := range tg.Tasks {
|
|
|
|
c.drivers[task.Driver] = struct{}{}
|
|
|
|
c.constraints = append(c.constraints, task.Constraints...)
|
|
|
|
}
|
|
|
|
|
|
|
|
return c
|
|
|
|
}
|
|
|
|
|
|
|
|
// tgConstrainTuple is used to store the total constraints of a task group.
|
|
|
|
type tgConstrainTuple struct {
|
|
|
|
// Holds the combined constraints of the task group and all it's sub-tasks.
|
|
|
|
constraints []*structs.Constraint
|
|
|
|
|
|
|
|
// The set of required drivers within the task group.
|
|
|
|
drivers map[string]struct{}
|
|
|
|
}
|