open-nomad/scheduler/feasible.go

1215 lines
31 KiB
Go
Raw Normal View History

package scheduler
import (
"fmt"
"reflect"
2015-10-11 19:35:13 +00:00
"regexp"
2015-10-14 23:43:06 +00:00
"strconv"
"strings"
2019-01-15 19:46:12 +00:00
version "github.com/hashicorp/go-version"
"github.com/hashicorp/nomad/helper/constraints/semver"
"github.com/hashicorp/nomad/nomad/structs"
2018-10-15 22:15:46 +00:00
psstructs "github.com/hashicorp/nomad/plugins/shared/structs"
)
// FeasibleIterator is used to iteratively yield nodes that
// match feasibility constraints. The iterators may manage
// some state for performance optimizations.
type FeasibleIterator interface {
// Next yields a feasible node or nil if exhausted
Next() *structs.Node
2015-08-13 22:01:02 +00:00
// Reset is invoked when an allocation has been placed
// to reset any stale state.
Reset()
}
2017-10-13 21:36:02 +00:00
// JobContextualIterator is an iterator that can have the job and task group set
// on it.
type ContextualIterator interface {
SetJob(*structs.Job)
SetTaskGroup(*structs.TaskGroup)
}
// FeasibilityChecker is used to check if a single node meets feasibility
// constraints.
type FeasibilityChecker interface {
Feasible(*structs.Node) bool
}
// StaticIterator is a FeasibleIterator which returns nodes
// in a static order. This is used at the base of the iterator
// chain only for testing due to deterministic behavior.
type StaticIterator struct {
ctx Context
nodes []*structs.Node
offset int
2015-08-13 22:01:02 +00:00
seen int
}
// NewStaticIterator constructs a random iterator from a list of nodes
func NewStaticIterator(ctx Context, nodes []*structs.Node) *StaticIterator {
iter := &StaticIterator{
ctx: ctx,
nodes: nodes,
}
return iter
}
func (iter *StaticIterator) Next() *structs.Node {
// Check if exhausted
2015-08-13 22:01:02 +00:00
n := len(iter.nodes)
if iter.offset == n || iter.seen == n {
if iter.seen != n {
iter.offset = 0
} else {
return nil
}
}
// Return the next offset
offset := iter.offset
iter.offset += 1
2015-08-13 22:01:02 +00:00
iter.seen += 1
2015-08-14 04:46:33 +00:00
iter.ctx.Metrics().EvaluateNode()
return iter.nodes[offset]
}
2015-08-13 22:01:02 +00:00
func (iter *StaticIterator) Reset() {
iter.seen = 0
}
func (iter *StaticIterator) SetNodes(nodes []*structs.Node) {
iter.nodes = nodes
iter.offset = 0
iter.seen = 0
}
// NewRandomIterator constructs a static iterator from a list of nodes
2015-08-13 17:13:11 +00:00
// after applying the Fisher-Yates algorithm for a random shuffle. This
// is applied in-place
func NewRandomIterator(ctx Context, nodes []*structs.Node) *StaticIterator {
// shuffle with the Fisher-Yates algorithm
2015-09-07 18:23:38 +00:00
shuffleNodes(nodes)
// Create a static iterator
return NewStaticIterator(ctx, nodes)
}
// HostVolumeChecker is a FeasibilityChecker which returns whether a node has
// the host volumes necessary to schedule a task group.
type HostVolumeChecker struct {
ctx Context
// volumes is a map[HostVolumeName][]RequestedVolume. The requested volumes are
// a slice because a single task group may request the same volume multiple times.
volumes map[string][]*structs.VolumeRequest
}
// NewHostVolumeChecker creates a HostVolumeChecker from a set of volumes
func NewHostVolumeChecker(ctx Context) *HostVolumeChecker {
return &HostVolumeChecker{
ctx: ctx,
}
}
// SetVolumes takes the volumes required by a task group and updates the checker.
func (h *HostVolumeChecker) SetVolumes(volumes map[string]*structs.VolumeRequest) {
config: Hoist volume.config.source into volume Currently, using a Volume in a job uses the following configuration: ``` volume "alias-name" { type = "volume-type" read_only = true config { source = "host_volume_name" } } ``` This commit migrates to the following: ``` volume "alias-name" { type = "volume-type" source = "host_volume_name" read_only = true } ``` The original design was based due to being uncertain about the future of storage plugins, and to allow maxium flexibility. However, this causes a few issues, namely: - We frequently need to parse this configuration during submission, scheduling, and mounting - It complicates the configuration from and end users perspective - It complicates the ability to do validation As we understand the problem space of CSI a little more, it has become clear that we won't need the `source` to be in config, as it will be used in the majority of cases: - Host Volumes: Always need a source - Preallocated CSI Volumes: Always needs a source from a volume or claim name - Dynamic Persistent CSI Volumes*: Always needs a source to attach the volumes to for managing upgrades and to avoid dangling. - Dynamic Ephemeral CSI Volumes*: Less thought out, but `source` will probably point to the plugin name, and a `config` block will allow you to pass meta to the plugin. Or will point to a pre-configured ephemeral config. *If implemented The new design simplifies this by merging the source into the volume stanza to solve the above issues with usability, performance, and error handling.
2019-09-13 02:09:58 +00:00
lookupMap := make(map[string][]*structs.VolumeRequest)
// Convert the map from map[DesiredName]Request to map[Source][]Request to improve
// lookup performance. Also filter non-host volumes.
for _, req := range volumes {
if req.Type != structs.VolumeTypeHost {
continue
}
config: Hoist volume.config.source into volume Currently, using a Volume in a job uses the following configuration: ``` volume "alias-name" { type = "volume-type" read_only = true config { source = "host_volume_name" } } ``` This commit migrates to the following: ``` volume "alias-name" { type = "volume-type" source = "host_volume_name" read_only = true } ``` The original design was based due to being uncertain about the future of storage plugins, and to allow maxium flexibility. However, this causes a few issues, namely: - We frequently need to parse this configuration during submission, scheduling, and mounting - It complicates the configuration from and end users perspective - It complicates the ability to do validation As we understand the problem space of CSI a little more, it has become clear that we won't need the `source` to be in config, as it will be used in the majority of cases: - Host Volumes: Always need a source - Preallocated CSI Volumes: Always needs a source from a volume or claim name - Dynamic Persistent CSI Volumes*: Always needs a source to attach the volumes to for managing upgrades and to avoid dangling. - Dynamic Ephemeral CSI Volumes*: Less thought out, but `source` will probably point to the plugin name, and a `config` block will allow you to pass meta to the plugin. Or will point to a pre-configured ephemeral config. *If implemented The new design simplifies this by merging the source into the volume stanza to solve the above issues with usability, performance, and error handling.
2019-09-13 02:09:58 +00:00
lookupMap[req.Source] = append(lookupMap[req.Source], req)
}
config: Hoist volume.config.source into volume Currently, using a Volume in a job uses the following configuration: ``` volume "alias-name" { type = "volume-type" read_only = true config { source = "host_volume_name" } } ``` This commit migrates to the following: ``` volume "alias-name" { type = "volume-type" source = "host_volume_name" read_only = true } ``` The original design was based due to being uncertain about the future of storage plugins, and to allow maxium flexibility. However, this causes a few issues, namely: - We frequently need to parse this configuration during submission, scheduling, and mounting - It complicates the configuration from and end users perspective - It complicates the ability to do validation As we understand the problem space of CSI a little more, it has become clear that we won't need the `source` to be in config, as it will be used in the majority of cases: - Host Volumes: Always need a source - Preallocated CSI Volumes: Always needs a source from a volume or claim name - Dynamic Persistent CSI Volumes*: Always needs a source to attach the volumes to for managing upgrades and to avoid dangling. - Dynamic Ephemeral CSI Volumes*: Less thought out, but `source` will probably point to the plugin name, and a `config` block will allow you to pass meta to the plugin. Or will point to a pre-configured ephemeral config. *If implemented The new design simplifies this by merging the source into the volume stanza to solve the above issues with usability, performance, and error handling.
2019-09-13 02:09:58 +00:00
h.volumes = lookupMap
}
func (h *HostVolumeChecker) Feasible(candidate *structs.Node) bool {
if h.hasVolumes(candidate) {
return true
}
h.ctx.Metrics().FilterNode(candidate, "missing compatible host volumes")
return false
}
func (h *HostVolumeChecker) hasVolumes(n *structs.Node) bool {
rLen := len(h.volumes)
hLen := len(n.HostVolumes)
// Fast path: Requested no volumes. No need to check further.
if rLen == 0 {
return true
}
// Fast path: Requesting more volumes than the node has, can't meet the criteria.
if rLen > hLen {
return false
}
for source, requests := range h.volumes {
nodeVolume, ok := n.HostVolumes[source]
if !ok {
return false
}
// If the volume supports being mounted as ReadWrite, we do not need to
// do further validation for readonly placement.
if !nodeVolume.ReadOnly {
continue
}
// The Volume can only be mounted ReadOnly, validate that no requests for
// it are ReadWrite.
for _, req := range requests {
if !req.ReadOnly {
return false
}
}
}
return true
}
// DriverChecker is a FeasibilityChecker which returns whether a node has the
// drivers necessary to scheduler a task group.
type DriverChecker struct {
ctx Context
drivers map[string]struct{}
}
// NewDriverChecker creates a DriverChecker from a set of drivers
func NewDriverChecker(ctx Context, drivers map[string]struct{}) *DriverChecker {
return &DriverChecker{
ctx: ctx,
drivers: drivers,
}
}
func (c *DriverChecker) SetDrivers(d map[string]struct{}) {
c.drivers = d
}
func (c *DriverChecker) Feasible(option *structs.Node) bool {
// Use this node if possible
if c.hasDrivers(option) {
return true
}
c.ctx.Metrics().FilterNode(option, "missing drivers")
return false
2015-08-13 22:01:02 +00:00
}
// hasDrivers is used to check if the node has all the appropriate
// drivers for this task group. Drivers are registered as node attribute
// like "driver.docker=1" with their corresponding version.
func (c *DriverChecker) hasDrivers(option *structs.Node) bool {
for driver := range c.drivers {
driverStr := fmt.Sprintf("driver.%s", driver)
2015-10-14 23:43:06 +00:00
2018-03-19 12:06:09 +00:00
// COMPAT: Remove in 0.10: As of Nomad 0.8, nodes have a DriverInfo that
2018-03-06 21:03:24 +00:00
// corresponds with every driver. As a Nomad server might be on a later
// version than a Nomad client, we need to check for compatibility here
// to verify the client supports this.
2018-03-21 19:32:40 +00:00
if driverInfo, ok := option.Drivers[driver]; ok {
if driverInfo == nil {
2018-09-15 23:23:13 +00:00
c.ctx.Logger().Named("driver_checker").Warn("node has no driver info set", "node_id", option.ID, "driver", driver)
return false
}
if driverInfo.Detected && driverInfo.Healthy {
continue
} else {
return false
}
2018-03-21 19:32:40 +00:00
}
2018-03-21 19:32:40 +00:00
value, ok := option.Attributes[driverStr]
if !ok {
return false
}
enabled, err := strconv.ParseBool(value)
if err != nil {
2018-09-15 23:23:13 +00:00
c.ctx.Logger().Named("driver_checker").Warn("node has invalid driver setting", "node_id", option.ID, "driver", driver, "val", value)
2018-03-21 19:32:40 +00:00
return false
}
2018-03-21 19:32:40 +00:00
if !enabled {
return false
2015-10-14 23:43:06 +00:00
}
}
return true
}
2015-08-12 01:27:54 +00:00
// DistinctHostsIterator is a FeasibleIterator which returns nodes that pass the
// distinct_hosts constraint. The constraint ensures that multiple allocations
// do not exist on the same node.
type DistinctHostsIterator struct {
ctx Context
source FeasibleIterator
tg *structs.TaskGroup
job *structs.Job
// Store whether the Job or TaskGroup has a distinct_hosts constraints so
2015-10-23 00:40:41 +00:00
// they don't have to be calculated every time Next() is called.
tgDistinctHosts bool
jobDistinctHosts bool
}
// NewDistinctHostsIterator creates a DistinctHostsIterator from a source.
func NewDistinctHostsIterator(ctx Context, source FeasibleIterator) *DistinctHostsIterator {
return &DistinctHostsIterator{
ctx: ctx,
source: source,
}
}
func (iter *DistinctHostsIterator) SetTaskGroup(tg *structs.TaskGroup) {
iter.tg = tg
2015-10-23 00:40:41 +00:00
iter.tgDistinctHosts = iter.hasDistinctHostsConstraint(tg.Constraints)
}
func (iter *DistinctHostsIterator) SetJob(job *structs.Job) {
iter.job = job
2015-10-23 00:40:41 +00:00
iter.jobDistinctHosts = iter.hasDistinctHostsConstraint(job.Constraints)
}
func (iter *DistinctHostsIterator) hasDistinctHostsConstraint(constraints []*structs.Constraint) bool {
for _, con := range constraints {
if con.Operand == structs.ConstraintDistinctHosts {
return true
}
}
2017-03-07 22:20:02 +00:00
return false
}
func (iter *DistinctHostsIterator) Next() *structs.Node {
for {
// Get the next option from the source
option := iter.source.Next()
2017-03-07 22:20:02 +00:00
// Hot-path if the option is nil or there are no distinct_hosts or
// distinct_property constraints.
hosts := iter.jobDistinctHosts || iter.tgDistinctHosts
if option == nil || !hosts {
return option
}
2017-03-07 22:20:02 +00:00
// Check if the host constraints are satisfied
if !iter.satisfiesDistinctHosts(option) {
iter.ctx.Metrics().FilterNode(option, structs.ConstraintDistinctHosts)
continue
}
return option
}
}
// satisfiesDistinctHosts checks if the node satisfies a distinct_hosts
2015-10-23 00:40:41 +00:00
// constraint either specified at the job level or the TaskGroup level.
func (iter *DistinctHostsIterator) satisfiesDistinctHosts(option *structs.Node) bool {
2015-10-26 21:01:32 +00:00
// Check if there is no constraint set.
if !(iter.jobDistinctHosts || iter.tgDistinctHosts) {
return true
}
// Get the proposed allocations
proposed, err := iter.ctx.ProposedAllocs(option.ID)
if err != nil {
2018-09-15 23:23:13 +00:00
iter.ctx.Logger().Named("distinct_hosts").Error("failed to get proposed allocations", "error", err)
return false
}
// Skip the node if the task group has already been allocated on it.
for _, alloc := range proposed {
// If the job has a distinct_hosts constraint we only need an alloc
2015-10-23 00:40:41 +00:00
// collision on the JobID but if the constraint is on the TaskGroup then
// we need both a job and TaskGroup collision.
2015-10-26 21:01:32 +00:00
jobCollision := alloc.JobID == iter.job.ID
taskCollision := alloc.TaskGroup == iter.tg.Name
if iter.jobDistinctHosts && jobCollision || jobCollision && taskCollision {
return false
}
}
return true
}
func (iter *DistinctHostsIterator) Reset() {
iter.source.Reset()
}
// DistinctPropertyIterator is a FeasibleIterator which returns nodes that pass the
// distinct_property constraint. The constraint ensures that multiple allocations
// do not use the same value of the given property.
type DistinctPropertyIterator struct {
ctx Context
source FeasibleIterator
tg *structs.TaskGroup
job *structs.Job
2017-03-09 23:20:53 +00:00
hasDistinctPropertyConstraints bool
jobPropertySets []*propertySet
groupPropertySets map[string][]*propertySet
}
// NewDistinctPropertyIterator creates a DistinctPropertyIterator from a source.
func NewDistinctPropertyIterator(ctx Context, source FeasibleIterator) *DistinctPropertyIterator {
return &DistinctPropertyIterator{
2017-03-09 23:20:53 +00:00
ctx: ctx,
source: source,
groupPropertySets: make(map[string][]*propertySet),
}
}
func (iter *DistinctPropertyIterator) SetTaskGroup(tg *structs.TaskGroup) {
iter.tg = tg
2017-03-10 05:36:27 +00:00
// Build the property set at the taskgroup level
if _, ok := iter.groupPropertySets[tg.Name]; !ok {
for _, c := range tg.Constraints {
if c.Operand != structs.ConstraintDistinctProperty {
continue
}
pset := NewPropertySet(iter.ctx, iter.job)
pset.SetTGConstraint(c, tg.Name)
iter.groupPropertySets[tg.Name] = append(iter.groupPropertySets[tg.Name], pset)
}
}
// Check if there is a distinct property
iter.hasDistinctPropertyConstraints = len(iter.jobPropertySets) != 0 || len(iter.groupPropertySets[tg.Name]) != 0
}
func (iter *DistinctPropertyIterator) SetJob(job *structs.Job) {
iter.job = job
2017-03-07 22:20:02 +00:00
2017-03-10 05:36:27 +00:00
// Build the property set at the job level
2017-03-09 23:20:53 +00:00
for _, c := range job.Constraints {
2017-03-08 19:47:55 +00:00
if c.Operand != structs.ConstraintDistinctProperty {
2017-03-07 22:20:02 +00:00
continue
}
2017-03-09 23:20:53 +00:00
pset := NewPropertySet(iter.ctx, job)
pset.SetJobConstraint(c)
iter.jobPropertySets = append(iter.jobPropertySets, pset)
2017-03-08 19:47:55 +00:00
}
}
2017-03-09 23:20:53 +00:00
func (iter *DistinctPropertyIterator) Next() *structs.Node {
for {
// Get the next option from the source
option := iter.source.Next()
2017-03-07 22:20:02 +00:00
2017-03-09 23:20:53 +00:00
// Hot path if there is nothing to check
if option == nil || !iter.hasDistinctPropertyConstraints {
return option
2017-03-07 22:20:02 +00:00
}
2017-03-09 23:20:53 +00:00
// Check if the constraints are met
2017-03-10 05:36:27 +00:00
if !iter.satisfiesProperties(option, iter.jobPropertySets) ||
!iter.satisfiesProperties(option, iter.groupPropertySets[iter.tg.Name]) {
continue
2017-03-07 22:20:02 +00:00
}
2017-03-09 23:20:53 +00:00
return option
}
2017-03-07 22:20:02 +00:00
}
2017-03-10 05:36:27 +00:00
// satisfiesProperties returns whether the option satisfies the set of
// properties. If not it will be filtered.
func (iter *DistinctPropertyIterator) satisfiesProperties(option *structs.Node, set []*propertySet) bool {
for _, ps := range set {
if satisfies, reason := ps.SatisfiesDistinctProperties(option, iter.tg.Name); !satisfies {
iter.ctx.Metrics().FilterNode(option, reason)
return false
}
}
return true
}
2017-03-09 23:20:53 +00:00
func (iter *DistinctPropertyIterator) Reset() {
iter.source.Reset()
2017-03-08 19:47:55 +00:00
2017-03-09 23:20:53 +00:00
for _, ps := range iter.jobPropertySets {
ps.PopulateProposed()
2017-03-08 19:47:55 +00:00
}
2017-03-09 23:20:53 +00:00
for _, sets := range iter.groupPropertySets {
for _, ps := range sets {
ps.PopulateProposed()
}
}
}
// ConstraintChecker is a FeasibilityChecker which returns nodes that match a
// given set of constraints. This is used to filter on job, task group, and task
// constraints.
type ConstraintChecker struct {
ctx Context
constraints []*structs.Constraint
2015-08-12 01:27:54 +00:00
}
// NewConstraintChecker creates a ConstraintChecker for a set of constraints
func NewConstraintChecker(ctx Context, constraints []*structs.Constraint) *ConstraintChecker {
return &ConstraintChecker{
ctx: ctx,
constraints: constraints,
2015-08-12 01:27:54 +00:00
}
}
func (c *ConstraintChecker) SetConstraints(constraints []*structs.Constraint) {
c.constraints = constraints
}
func (c *ConstraintChecker) Feasible(option *structs.Node) bool {
// Use this node if possible
for _, constraint := range c.constraints {
if !c.meetsConstraint(constraint, option) {
c.ctx.Metrics().FilterNode(option, constraint.String())
return false
}
}
2015-08-12 01:27:54 +00:00
return true
}
func (c *ConstraintChecker) meetsConstraint(constraint *structs.Constraint, option *structs.Node) bool {
// Resolve the targets. Targets that are not present are treated as `nil`.
// This is to allow for matching constraints where a target is not present.
lVal, lOk := resolveTarget(constraint.LTarget, option)
rVal, rOk := resolveTarget(constraint.RTarget, option)
// Check if satisfied
return checkConstraint(c.ctx, constraint.Operand, lVal, rVal, lOk, rOk)
}
// resolveTarget is used to resolve the LTarget and RTarget of a Constraint.
func resolveTarget(target string, node *structs.Node) (interface{}, bool) {
// If no prefix, this must be a literal value
if !strings.HasPrefix(target, "${") {
return target, true
}
// Handle the interpolations
switch {
2016-02-05 00:50:20 +00:00
case "${node.unique.id}" == target:
return node.ID, true
2016-02-05 00:50:20 +00:00
case "${node.datacenter}" == target:
return node.Datacenter, true
2016-02-05 00:50:20 +00:00
case "${node.unique.name}" == target:
return node.Name, true
2016-02-05 00:50:20 +00:00
case "${node.class}" == target:
2015-12-22 01:15:34 +00:00
return node.NodeClass, true
2016-02-05 00:50:20 +00:00
case strings.HasPrefix(target, "${attr."):
attr := strings.TrimSuffix(strings.TrimPrefix(target, "${attr."), "}")
val, ok := node.Attributes[attr]
return val, ok
2016-02-05 00:50:20 +00:00
case strings.HasPrefix(target, "${meta."):
meta := strings.TrimSuffix(strings.TrimPrefix(target, "${meta."), "}")
val, ok := node.Meta[meta]
return val, ok
default:
return nil, false
}
}
// checkConstraint checks if a constraint is satisfied. The lVal and rVal
// interfaces may be nil.
func checkConstraint(ctx Context, operand string, lVal, rVal interface{}, lFound, rFound bool) bool {
// Check for constraints not handled by this checker.
switch operand {
2017-03-07 22:20:02 +00:00
case structs.ConstraintDistinctHosts, structs.ConstraintDistinctProperty:
return true
default:
break
}
switch operand {
case "=", "==", "is":
return lFound && rFound && reflect.DeepEqual(lVal, rVal)
case "!=", "not":
return !reflect.DeepEqual(lVal, rVal)
case "<", "<=", ">", ">=":
return lFound && rFound && checkLexicalOrder(operand, lVal, rVal)
case structs.ConstraintAttributeIsSet:
return lFound
case structs.ConstraintAttributeIsNotSet:
return !lFound
case structs.ConstraintVersion:
parser := newVersionConstraintParser(ctx)
return lFound && rFound && checkVersionMatch(ctx, parser, lVal, rVal)
case structs.ConstraintSemver:
parser := newSemverConstraintParser(ctx)
return lFound && rFound && checkVersionMatch(ctx, parser, lVal, rVal)
case structs.ConstraintRegex:
return lFound && rFound && checkRegexpMatch(ctx, lVal, rVal)
2018-10-15 22:31:13 +00:00
case structs.ConstraintSetContains, structs.ConstraintSetContainsAll:
return lFound && rFound && checkSetContainsAll(ctx, lVal, rVal)
2018-10-15 22:31:13 +00:00
case structs.ConstraintSetContainsAny:
return lFound && rFound && checkSetContainsAny(lVal, rVal)
default:
return false
}
}
// checkAffinity checks if a specific affinity is satisfied
func checkAffinity(ctx Context, operand string, lVal, rVal interface{}, lFound, rFound bool) bool {
return checkConstraint(ctx, operand, lVal, rVal, lFound, rFound)
2018-10-17 18:04:54 +00:00
}
// checkAttributeAffinity checks if an affinity is satisfied
func checkAttributeAffinity(ctx Context, operand string, lVal, rVal *psstructs.Attribute, lFound, rFound bool) bool {
return checkAttributeConstraint(ctx, operand, lVal, rVal, lFound, rFound)
}
// checkLexicalOrder is used to check for lexical ordering
func checkLexicalOrder(op string, lVal, rVal interface{}) bool {
// Ensure the values are strings
lStr, ok := lVal.(string)
if !ok {
return false
}
rStr, ok := rVal.(string)
if !ok {
return false
}
switch op {
case "<":
return lStr < rStr
case "<=":
return lStr <= rStr
case ">":
return lStr > rStr
case ">=":
return lStr >= rStr
default:
return false
}
}
// checkVersionMatch is used to compare a version on the
// left hand side with a set of constraints on the right hand side
func checkVersionMatch(ctx Context, parse verConstraintParser, lVal, rVal interface{}) bool {
// Parse the version
var versionStr string
switch v := lVal.(type) {
case string:
versionStr = v
case int:
versionStr = fmt.Sprintf("%d", v)
default:
return false
}
2016-05-15 16:41:34 +00:00
// Parse the version
vers, err := version.NewVersion(versionStr)
if err != nil {
return false
}
// Constraint must be a string
constraintStr, ok := rVal.(string)
if !ok {
return false
}
// Parse the constraints
constraints := parse(constraintStr)
if constraints == nil {
return false
}
// Check the constraints against the version
return constraints.Check(vers)
}
2015-10-11 19:35:13 +00:00
2018-10-14 01:38:08 +00:00
// checkAttributeVersionMatch is used to compare a version on the
// left hand side with a set of constraints on the right hand side
func checkAttributeVersionMatch(ctx Context, parse verConstraintParser, lVal, rVal *psstructs.Attribute) bool {
2018-10-14 01:38:08 +00:00
// Parse the version
var versionStr string
if s, ok := lVal.GetString(); ok {
versionStr = s
} else if i, ok := lVal.GetInt(); ok {
versionStr = fmt.Sprintf("%d", i)
} else {
return false
}
// Parse the version
vers, err := version.NewVersion(versionStr)
if err != nil {
return false
}
// Constraint must be a string
constraintStr, ok := rVal.GetString()
if !ok {
return false
}
// Parse the constraints
constraints := parse(constraintStr)
2018-10-14 01:38:08 +00:00
if constraints == nil {
return false
2018-10-14 01:38:08 +00:00
}
// Check the constraints against the version
return constraints.Check(vers)
}
// checkRegexpMatch is used to compare a value on the
2015-10-11 19:35:13 +00:00
// left hand side with a regexp on the right hand side
func checkRegexpMatch(ctx Context, lVal, rVal interface{}) bool {
2015-10-11 19:35:13 +00:00
// Ensure left-hand is string
lStr, ok := lVal.(string)
if !ok {
return false
}
// Regexp must be a string
regexpStr, ok := rVal.(string)
if !ok {
return false
}
// Check the cache
cache := ctx.RegexpCache()
re := cache[regexpStr]
2015-10-11 19:35:13 +00:00
// Parse the regexp
if re == nil {
var err error
re, err = regexp.Compile(regexpStr)
if err != nil {
return false
}
cache[regexpStr] = re
2015-10-11 19:35:13 +00:00
}
// Look for a match
return re.MatchString(lStr)
}
// checkSetContainsAll is used to see if the left hand side contains the
2016-10-19 20:06:28 +00:00
// string on the right hand side
func checkSetContainsAll(ctx Context, lVal, rVal interface{}) bool {
2016-10-19 20:06:28 +00:00
// Ensure left-hand is string
lStr, ok := lVal.(string)
if !ok {
return false
}
// Regexp must be a string
rStr, ok := rVal.(string)
if !ok {
return false
}
input := strings.Split(lStr, ",")
lookup := make(map[string]struct{}, len(input))
for _, in := range input {
cleaned := strings.TrimSpace(in)
lookup[cleaned] = struct{}{}
}
for _, r := range strings.Split(rStr, ",") {
cleaned := strings.TrimSpace(r)
if _, ok := lookup[cleaned]; !ok {
return false
}
}
return true
}
// checkSetContainsAny is used to see if the left hand side contains any
// values on the right hand side
func checkSetContainsAny(lVal, rVal interface{}) bool {
// Ensure left-hand is string
lStr, ok := lVal.(string)
if !ok {
return false
}
// RHS must be a string
rStr, ok := rVal.(string)
if !ok {
return false
}
input := strings.Split(lStr, ",")
lookup := make(map[string]struct{}, len(input))
for _, in := range input {
cleaned := strings.TrimSpace(in)
lookup[cleaned] = struct{}{}
}
for _, r := range strings.Split(rStr, ",") {
cleaned := strings.TrimSpace(r)
if _, ok := lookup[cleaned]; ok {
return true
}
}
return false
}
// FeasibilityWrapper is a FeasibleIterator which wraps both job and task group
// FeasibilityCheckers in which feasibility checking can be skipped if the
// computed node class has previously been marked as eligible or ineligible.
type FeasibilityWrapper struct {
ctx Context
source FeasibleIterator
jobCheckers []FeasibilityChecker
tgCheckers []FeasibilityChecker
tg string
}
// NewFeasibilityWrapper returns a FeasibleIterator based on the passed source
// and FeasibilityCheckers.
func NewFeasibilityWrapper(ctx Context, source FeasibleIterator,
jobCheckers, tgCheckers []FeasibilityChecker) *FeasibilityWrapper {
return &FeasibilityWrapper{
ctx: ctx,
source: source,
jobCheckers: jobCheckers,
tgCheckers: tgCheckers,
}
}
func (w *FeasibilityWrapper) SetTaskGroup(tg string) {
w.tg = tg
}
func (w *FeasibilityWrapper) Reset() {
w.source.Reset()
}
// Next returns an eligible node, only running the FeasibilityCheckers as needed
// based on the sources computed node class.
func (w *FeasibilityWrapper) Next() *structs.Node {
evalElig := w.ctx.Eligibility()
metrics := w.ctx.Metrics()
OUTER:
for {
// Get the next option from the source
option := w.source.Next()
if option == nil {
return nil
}
// Check if the job has been marked as eligible or ineligible.
2016-01-27 00:43:42 +00:00
jobEscaped, jobUnknown := false, false
switch evalElig.JobStatus(option.ComputedClass) {
case EvalComputedClassIneligible:
// Fast path the ineligible case
metrics.FilterNode(option, "computed class ineligible")
continue
case EvalComputedClassEscaped:
jobEscaped = true
2016-01-27 00:43:42 +00:00
case EvalComputedClassUnknown:
jobUnknown = true
}
// Run the job feasibility checks.
for _, check := range w.jobCheckers {
feasible := check.Feasible(option)
if !feasible {
// If the job hasn't escaped, set it to be ineligible since it
// failed a job check.
if !jobEscaped {
evalElig.SetJobEligibility(false, option.ComputedClass)
}
continue OUTER
}
}
2016-01-27 00:43:42 +00:00
// Set the job eligibility if the constraints weren't escaped and it
// hasn't been set before.
if !jobEscaped && jobUnknown {
evalElig.SetJobEligibility(true, option.ComputedClass)
}
// Check if the task group has been marked as eligible or ineligible.
2016-01-27 00:43:42 +00:00
tgEscaped, tgUnknown := false, false
switch evalElig.TaskGroupStatus(w.tg, option.ComputedClass) {
case EvalComputedClassIneligible:
// Fast path the ineligible case
metrics.FilterNode(option, "computed class ineligible")
continue
case EvalComputedClassEligible:
// Fast path the eligible case
return option
case EvalComputedClassEscaped:
tgEscaped = true
2016-01-27 00:43:42 +00:00
case EvalComputedClassUnknown:
tgUnknown = true
}
// Run the task group feasibility checks.
for _, check := range w.tgCheckers {
feasible := check.Feasible(option)
if !feasible {
// If the task group hasn't escaped, set it to be ineligible
// since it failed a check.
if !tgEscaped {
evalElig.SetTaskGroupEligibility(false, w.tg, option.ComputedClass)
}
continue OUTER
}
}
2016-01-27 00:43:42 +00:00
// Set the task group eligibility if the constraints weren't escaped and
// it hasn't been set before.
if !tgEscaped && tgUnknown {
evalElig.SetTaskGroupEligibility(true, w.tg, option.ComputedClass)
}
return option
}
}
2018-10-10 17:32:44 +00:00
// DeviceChecker is a FeasibilityChecker which returns whether a node has the
// devices necessary to scheduler a task group.
type DeviceChecker struct {
ctx Context
2018-10-14 01:38:08 +00:00
// required is the set of requested devices that must exist on the node
2018-10-13 23:47:53 +00:00
required []*structs.RequestedDevice
2018-10-10 17:32:44 +00:00
// requiresDevices indicates if the task group requires devices
requiresDevices bool
}
// NewDeviceChecker creates a DeviceChecker
func NewDeviceChecker(ctx Context) *DeviceChecker {
return &DeviceChecker{
ctx: ctx,
}
}
func (c *DeviceChecker) SetTaskGroup(tg *structs.TaskGroup) {
2018-10-13 23:47:53 +00:00
c.required = nil
2018-10-10 17:32:44 +00:00
for _, task := range tg.Tasks {
2018-10-13 23:47:53 +00:00
c.required = append(c.required, task.Resources.Devices...)
2018-10-10 17:32:44 +00:00
}
c.requiresDevices = len(c.required) != 0
}
func (c *DeviceChecker) Feasible(option *structs.Node) bool {
if c.hasDevices(option) {
return true
}
c.ctx.Metrics().FilterNode(option, "missing devices")
return false
}
func (c *DeviceChecker) hasDevices(option *structs.Node) bool {
if !c.requiresDevices {
return true
}
// COMPAT(0.11): Remove in 0.11
// The node does not have the new resources object so it can not have any
// devices
if option.NodeResources == nil {
return false
}
// Check if the node has any devices
nodeDevs := option.NodeResources.Devices
if len(nodeDevs) == 0 {
return false
}
2018-10-13 23:47:53 +00:00
// Create a mapping of node devices to the remaining count
available := make(map[*structs.NodeDeviceResource]uint64, len(nodeDevs))
2018-10-10 17:32:44 +00:00
for _, d := range nodeDevs {
var healthy uint64 = 0
for _, instance := range d.Instances {
if instance.Healthy {
healthy++
}
}
2018-10-13 23:47:53 +00:00
if healthy != 0 {
available[d] = healthy
}
2018-10-10 17:32:44 +00:00
}
2018-10-13 23:47:53 +00:00
// Go through the required devices trying to find matches
2018-10-10 17:32:44 +00:00
OUTER:
2018-10-13 23:47:53 +00:00
for _, req := range c.required {
// Determine how many there are to place
desiredCount := req.Count
// Go through the device resources and see if we have a match
for d, unused := range available {
if unused == 0 {
// Depleted
2018-10-10 17:32:44 +00:00
continue
}
2018-10-17 18:04:54 +00:00
// First check we have enough instances of the device since this is
// cheaper than checking the constraints
if unused < desiredCount {
continue
}
2018-11-07 19:09:30 +00:00
// Check the constraints
2018-10-14 01:38:08 +00:00
if nodeDeviceMatches(c.ctx, d, req) {
2018-10-13 23:47:53 +00:00
// Consume the instances
2018-10-17 18:04:54 +00:00
available[d] -= desiredCount
// Move on to the next request
continue OUTER
2018-10-10 17:32:44 +00:00
}
}
2018-10-13 23:47:53 +00:00
// We couldn't match the request for the device
2018-10-15 22:15:46 +00:00
return false
2018-10-13 23:47:53 +00:00
}
// Only satisfied if there are no more devices to place
return true
}
2018-10-14 01:38:08 +00:00
// nodeDeviceMatches checks if the device matches the request and its
// constraints. It doesn't check the count.
func nodeDeviceMatches(ctx Context, d *structs.NodeDeviceResource, req *structs.RequestedDevice) bool {
2018-10-13 23:47:53 +00:00
if !d.ID().Matches(req.ID()) {
2018-10-10 17:32:44 +00:00
return false
}
2018-10-13 23:47:53 +00:00
// There are no constraints to consider
if len(req.Constraints) == 0 {
return true
}
2018-10-14 01:38:08 +00:00
for _, c := range req.Constraints {
// Resolve the targets
lVal, lOk := resolveDeviceTarget(c.LTarget, d)
rVal, rOk := resolveDeviceTarget(c.RTarget, d)
2018-10-14 01:38:08 +00:00
// Check if satisfied
if !checkAttributeConstraint(ctx, c.Operand, lVal, rVal, lOk, rOk) {
2018-10-14 01:38:08 +00:00
return false
}
}
2018-10-10 17:32:44 +00:00
return true
}
2018-10-14 01:38:08 +00:00
// resolveDeviceTarget is used to resolve the LTarget and RTarget of a Constraint
// when used on a device
func resolveDeviceTarget(target string, d *structs.NodeDeviceResource) (*psstructs.Attribute, bool) {
// If no prefix, this must be a literal value
if !strings.HasPrefix(target, "${") {
return psstructs.ParseAttribute(target), true
}
// Handle the interpolations
switch {
case "${device.model}" == target:
2018-10-14 01:38:08 +00:00
return psstructs.NewStringAttribute(d.Name), true
case "${device.vendor}" == target:
2018-10-14 01:38:08 +00:00
return psstructs.NewStringAttribute(d.Vendor), true
case "${device.type}" == target:
2018-10-14 01:38:08 +00:00
return psstructs.NewStringAttribute(d.Type), true
case strings.HasPrefix(target, "${device.attr."):
attr := strings.TrimPrefix(target, "${device.attr.")
2018-10-15 22:31:13 +00:00
attr = strings.TrimSuffix(attr, "}")
2018-10-14 01:38:08 +00:00
val, ok := d.Attributes[attr]
return val, ok
default:
return nil, false
}
}
// checkAttributeConstraint checks if a constraint is satisfied. nil equality
// comparisons are considered to be false.
func checkAttributeConstraint(ctx Context, operand string, lVal, rVal *psstructs.Attribute, lFound, rFound bool) bool {
2018-10-14 01:38:08 +00:00
// Check for constraints not handled by this checker.
switch operand {
case structs.ConstraintDistinctHosts, structs.ConstraintDistinctProperty:
return true
default:
break
}
switch operand {
case "!=", "not":
// Neither value was provided, nil != nil == false
if !(lFound || rFound) {
return false
}
// Only 1 value was provided, therefore nil != some == true
if lFound != rFound {
return true
}
// Both values were provided, so actually compare them
v, ok := lVal.Compare(rVal)
if !ok {
return false
}
return v != 0
case "<", "<=", ">", ">=", "=", "==", "is":
if !(lFound && rFound) {
return false
}
2018-10-14 01:38:08 +00:00
v, ok := lVal.Compare(rVal)
if !ok {
return false
}
switch operand {
case "is", "==", "=":
return v == 0
case "<":
return v == -1
case "<=":
return v != 1
case ">":
return v == 1
case ">=":
return v != -1
default:
return false
}
case structs.ConstraintVersion:
if !(lFound && rFound) {
return false
}
parser := newVersionConstraintParser(ctx)
return checkAttributeVersionMatch(ctx, parser, lVal, rVal)
case structs.ConstraintSemver:
if !(lFound && rFound) {
return false
}
parser := newSemverConstraintParser(ctx)
return checkAttributeVersionMatch(ctx, parser, lVal, rVal)
2018-10-14 01:38:08 +00:00
case structs.ConstraintRegex:
if !(lFound && rFound) {
return false
}
2018-10-14 01:38:08 +00:00
ls, ok := lVal.GetString()
rs, ok2 := rVal.GetString()
if !ok || !ok2 {
return false
}
return checkRegexpMatch(ctx, ls, rs)
2018-10-15 22:31:13 +00:00
case structs.ConstraintSetContains, structs.ConstraintSetContainsAll:
if !(lFound && rFound) {
return false
}
2018-10-14 01:38:08 +00:00
ls, ok := lVal.GetString()
rs, ok2 := rVal.GetString()
if !ok || !ok2 {
return false
}
return checkSetContainsAll(ctx, ls, rs)
2018-10-15 22:31:13 +00:00
case structs.ConstraintSetContainsAny:
if !(lFound && rFound) {
return false
}
2018-10-15 22:31:13 +00:00
ls, ok := lVal.GetString()
rs, ok2 := rVal.GetString()
if !ok || !ok2 {
return false
}
return checkSetContainsAny(ls, rs)
case structs.ConstraintAttributeIsSet:
return lFound
case structs.ConstraintAttributeIsNotSet:
return !lFound
2018-10-14 01:38:08 +00:00
default:
return false
}
}
// VerConstraints is the interface implemented by both go-verson constraints
// and semver constraints.
type VerConstraints interface {
Check(v *version.Version) bool
String() string
}
// verConstraintParser returns a version constraints implementation (go-version
// or semver).
type verConstraintParser func(verConstraint string) VerConstraints
func newVersionConstraintParser(ctx Context) verConstraintParser {
cache := ctx.VersionConstraintCache()
return func(cstr string) VerConstraints {
if c := cache[cstr]; c != nil {
return c
}
constraints, err := version.NewConstraint(cstr)
if err != nil {
return nil
}
cache[cstr] = constraints
return constraints
}
}
func newSemverConstraintParser(ctx Context) verConstraintParser {
cache := ctx.SemverConstraintCache()
return func(cstr string) VerConstraints {
if c := cache[cstr]; c != nil {
return c
}
constraints, err := semver.NewConstraint(cstr)
if err != nil {
return nil
}
cache[cstr] = constraints
return constraints
}
}