2015-08-12 00:57:23 +00:00
package scheduler
import (
2015-08-13 17:19:46 +00:00
"fmt"
2015-08-13 17:46:30 +00:00
"reflect"
2015-10-11 19:35:13 +00:00
"regexp"
2015-10-14 23:43:06 +00:00
"strconv"
2015-08-13 17:46:30 +00:00
"strings"
2015-08-12 00:57:23 +00:00
2020-01-31 15:13:21 +00:00
memdb "github.com/hashicorp/go-memdb"
2019-01-15 19:46:12 +00:00
version "github.com/hashicorp/go-version"
2019-11-13 23:36:15 +00:00
"github.com/hashicorp/nomad/helper/constraints/semver"
2015-08-12 00:57:23 +00:00
"github.com/hashicorp/nomad/nomad/structs"
2018-10-15 22:15:46 +00:00
psstructs "github.com/hashicorp/nomad/plugins/shared/structs"
2015-08-12 00:57:23 +00:00
)
2020-01-31 15:13:21 +00:00
const (
2022-01-27 14:30:03 +00:00
FilterConstraintHostVolumes = "missing compatible host volumes"
FilterConstraintCSIPluginTemplate = "CSI plugin %s is missing from client %s"
FilterConstraintCSIPluginUnhealthyTemplate = "CSI plugin %s is unhealthy on client %s"
FilterConstraintCSIPluginMaxVolumesTemplate = "CSI plugin %s has the maximum number of volumes on client %s"
FilterConstraintCSIVolumesLookupFailed = "CSI volume lookup failed"
FilterConstraintCSIVolumeNotFoundTemplate = "missing CSI Volume %s"
FilterConstraintCSIVolumeNoReadTemplate = "CSI volume %s is unschedulable or has exhausted its available reader claims"
FilterConstraintCSIVolumeNoWriteTemplate = "CSI volume %s is unschedulable or is read-only"
FilterConstraintCSIVolumeInUseTemplate = "CSI volume %s has exhausted its available writer claims"
FilterConstraintCSIVolumeGCdAllocationTemplate = "CSI volume %s has exhausted its available writer claims and is claimed by a garbage collected allocation %s; waiting for claim to be released"
FilterConstraintDrivers = "missing drivers"
FilterConstraintDevices = "missing devices"
2022-03-01 15:15:46 +00:00
FilterConstraintsCSIPluginTopology = "did not meet topology requirement"
2020-01-31 15:13:21 +00:00
)
2020-11-13 20:17:01 +00:00
var (
// predatesBridgeFingerprint returns true if the constraint matches a version
// of nomad that predates the addition of the bridge network finger-printer,
// which was added in Nomad v0.12
predatesBridgeFingerprint = mustBridgeConstraint ( )
)
func mustBridgeConstraint ( ) version . Constraints {
versionC , err := version . NewConstraint ( "< 0.12" )
if err != nil {
panic ( err )
}
return versionC
}
2015-08-12 00:57:23 +00:00
// FeasibleIterator is used to iteratively yield nodes that
// match feasibility constraints. The iterators may manage
// some state for performance optimizations.
type FeasibleIterator interface {
// Next yields a feasible node or nil if exhausted
Next ( ) * structs . Node
2015-08-13 22:01:02 +00:00
// Reset is invoked when an allocation has been placed
// to reset any stale state.
Reset ( )
2015-08-12 00:57:23 +00:00
}
2017-10-13 21:36:02 +00:00
// JobContextualIterator is an iterator that can have the job and task group set
// on it.
type ContextualIterator interface {
SetJob ( * structs . Job )
SetTaskGroup ( * structs . TaskGroup )
}
2016-01-26 18:07:33 +00:00
// FeasibilityChecker is used to check if a single node meets feasibility
// constraints.
type FeasibilityChecker interface {
Feasible ( * structs . Node ) bool
}
2015-08-12 00:57:23 +00:00
// StaticIterator is a FeasibleIterator which returns nodes
// in a static order. This is used at the base of the iterator
// chain only for testing due to deterministic behavior.
type StaticIterator struct {
ctx Context
nodes [ ] * structs . Node
offset int
2015-08-13 22:01:02 +00:00
seen int
2015-08-12 00:57:23 +00:00
}
// NewStaticIterator constructs a random iterator from a list of nodes
func NewStaticIterator ( ctx Context , nodes [ ] * structs . Node ) * StaticIterator {
iter := & StaticIterator {
ctx : ctx ,
nodes : nodes ,
}
return iter
}
func ( iter * StaticIterator ) Next ( ) * structs . Node {
// Check if exhausted
2015-08-13 22:01:02 +00:00
n := len ( iter . nodes )
if iter . offset == n || iter . seen == n {
2020-01-31 15:13:21 +00:00
if iter . seen != n { // seen has been Reset() to 0
2015-08-13 22:01:02 +00:00
iter . offset = 0
} else {
return nil
}
2015-08-12 00:57:23 +00:00
}
2020-01-31 15:13:21 +00:00
// Return the next offset, use this one
2015-08-12 00:57:23 +00:00
offset := iter . offset
iter . offset += 1
2015-08-13 22:01:02 +00:00
iter . seen += 1
2015-08-14 04:46:33 +00:00
iter . ctx . Metrics ( ) . EvaluateNode ( )
2015-08-12 00:57:23 +00:00
return iter . nodes [ offset ]
}
2015-08-13 22:01:02 +00:00
func ( iter * StaticIterator ) Reset ( ) {
iter . seen = 0
}
2015-09-07 18:26:16 +00:00
func ( iter * StaticIterator ) SetNodes ( nodes [ ] * structs . Node ) {
iter . nodes = nodes
iter . offset = 0
iter . seen = 0
}
2015-08-12 00:57:23 +00:00
// NewRandomIterator constructs a static iterator from a list of nodes
2015-08-13 17:13:11 +00:00
// after applying the Fisher-Yates algorithm for a random shuffle. This
// is applied in-place
2015-08-12 00:57:23 +00:00
func NewRandomIterator ( ctx Context , nodes [ ] * structs . Node ) * StaticIterator {
// shuffle with the Fisher-Yates algorithm
2022-02-08 17:16:33 +00:00
idx , _ := ctx . State ( ) . LatestIndex ( )
shuffleNodes ( ctx . Plan ( ) , idx , nodes )
2015-08-12 00:57:23 +00:00
// Create a static iterator
return NewStaticIterator ( ctx , nodes )
}
2019-07-25 14:46:29 +00:00
// HostVolumeChecker is a FeasibilityChecker which returns whether a node has
// the host volumes necessary to schedule a task group.
type HostVolumeChecker struct {
2019-08-01 09:33:26 +00:00
ctx Context
// volumes is a map[HostVolumeName][]RequestedVolume. The requested volumes are
// a slice because a single task group may request the same volume multiple times.
2019-07-25 14:46:29 +00:00
volumes map [ string ] [ ] * structs . VolumeRequest
}
// NewHostVolumeChecker creates a HostVolumeChecker from a set of volumes
func NewHostVolumeChecker ( ctx Context ) * HostVolumeChecker {
return & HostVolumeChecker {
ctx : ctx ,
}
}
// SetVolumes takes the volumes required by a task group and updates the checker.
func ( h * HostVolumeChecker ) SetVolumes ( volumes map [ string ] * structs . VolumeRequest ) {
config: Hoist volume.config.source into volume
Currently, using a Volume in a job uses the following configuration:
```
volume "alias-name" {
type = "volume-type"
read_only = true
config {
source = "host_volume_name"
}
}
```
This commit migrates to the following:
```
volume "alias-name" {
type = "volume-type"
source = "host_volume_name"
read_only = true
}
```
The original design was based due to being uncertain about the future of storage
plugins, and to allow maxium flexibility.
However, this causes a few issues, namely:
- We frequently need to parse this configuration during submission,
scheduling, and mounting
- It complicates the configuration from and end users perspective
- It complicates the ability to do validation
As we understand the problem space of CSI a little more, it has become
clear that we won't need the `source` to be in config, as it will be
used in the majority of cases:
- Host Volumes: Always need a source
- Preallocated CSI Volumes: Always needs a source from a volume or claim name
- Dynamic Persistent CSI Volumes*: Always needs a source to attach the volumes
to for managing upgrades and to avoid dangling.
- Dynamic Ephemeral CSI Volumes*: Less thought out, but `source` will probably point
to the plugin name, and a `config` block will
allow you to pass meta to the plugin. Or will
point to a pre-configured ephemeral config.
*If implemented
The new design simplifies this by merging the source into the volume
stanza to solve the above issues with usability, performance, and error
handling.
2019-09-13 02:09:58 +00:00
lookupMap := make ( map [ string ] [ ] * structs . VolumeRequest )
2019-07-25 14:46:29 +00:00
// Convert the map from map[DesiredName]Request to map[Source][]Request to improve
// lookup performance. Also filter non-host volumes.
for _ , req := range volumes {
2019-08-01 09:33:26 +00:00
if req . Type != structs . VolumeTypeHost {
2019-07-25 14:46:29 +00:00
continue
}
config: Hoist volume.config.source into volume
Currently, using a Volume in a job uses the following configuration:
```
volume "alias-name" {
type = "volume-type"
read_only = true
config {
source = "host_volume_name"
}
}
```
This commit migrates to the following:
```
volume "alias-name" {
type = "volume-type"
source = "host_volume_name"
read_only = true
}
```
The original design was based due to being uncertain about the future of storage
plugins, and to allow maxium flexibility.
However, this causes a few issues, namely:
- We frequently need to parse this configuration during submission,
scheduling, and mounting
- It complicates the configuration from and end users perspective
- It complicates the ability to do validation
As we understand the problem space of CSI a little more, it has become
clear that we won't need the `source` to be in config, as it will be
used in the majority of cases:
- Host Volumes: Always need a source
- Preallocated CSI Volumes: Always needs a source from a volume or claim name
- Dynamic Persistent CSI Volumes*: Always needs a source to attach the volumes
to for managing upgrades and to avoid dangling.
- Dynamic Ephemeral CSI Volumes*: Less thought out, but `source` will probably point
to the plugin name, and a `config` block will
allow you to pass meta to the plugin. Or will
point to a pre-configured ephemeral config.
*If implemented
The new design simplifies this by merging the source into the volume
stanza to solve the above issues with usability, performance, and error
handling.
2019-09-13 02:09:58 +00:00
lookupMap [ req . Source ] = append ( lookupMap [ req . Source ] , req )
2019-07-25 14:46:29 +00:00
}
config: Hoist volume.config.source into volume
Currently, using a Volume in a job uses the following configuration:
```
volume "alias-name" {
type = "volume-type"
read_only = true
config {
source = "host_volume_name"
}
}
```
This commit migrates to the following:
```
volume "alias-name" {
type = "volume-type"
source = "host_volume_name"
read_only = true
}
```
The original design was based due to being uncertain about the future of storage
plugins, and to allow maxium flexibility.
However, this causes a few issues, namely:
- We frequently need to parse this configuration during submission,
scheduling, and mounting
- It complicates the configuration from and end users perspective
- It complicates the ability to do validation
As we understand the problem space of CSI a little more, it has become
clear that we won't need the `source` to be in config, as it will be
used in the majority of cases:
- Host Volumes: Always need a source
- Preallocated CSI Volumes: Always needs a source from a volume or claim name
- Dynamic Persistent CSI Volumes*: Always needs a source to attach the volumes
to for managing upgrades and to avoid dangling.
- Dynamic Ephemeral CSI Volumes*: Less thought out, but `source` will probably point
to the plugin name, and a `config` block will
allow you to pass meta to the plugin. Or will
point to a pre-configured ephemeral config.
*If implemented
The new design simplifies this by merging the source into the volume
stanza to solve the above issues with usability, performance, and error
handling.
2019-09-13 02:09:58 +00:00
h . volumes = lookupMap
2019-07-25 14:46:29 +00:00
}
func ( h * HostVolumeChecker ) Feasible ( candidate * structs . Node ) bool {
if h . hasVolumes ( candidate ) {
return true
}
2020-01-31 15:13:21 +00:00
h . ctx . Metrics ( ) . FilterNode ( candidate , FilterConstraintHostVolumes )
2019-07-25 14:46:29 +00:00
return false
}
func ( h * HostVolumeChecker ) hasVolumes ( n * structs . Node ) bool {
2019-08-01 09:33:26 +00:00
rLen := len ( h . volumes )
hLen := len ( n . HostVolumes )
// Fast path: Requested no volumes. No need to check further.
if rLen == 0 {
return true
}
2019-07-25 14:46:29 +00:00
// Fast path: Requesting more volumes than the node has, can't meet the criteria.
2019-08-01 09:33:26 +00:00
if rLen > hLen {
2019-07-25 14:46:29 +00:00
return false
}
2019-08-21 18:57:05 +00:00
for source , requests := range h . volumes {
nodeVolume , ok := n . HostVolumes [ source ]
if ! ok {
2019-07-25 14:46:29 +00:00
return false
}
2019-08-21 18:57:05 +00:00
// If the volume supports being mounted as ReadWrite, we do not need to
// do further validation for readonly placement.
if ! nodeVolume . ReadOnly {
continue
}
// The Volume can only be mounted ReadOnly, validate that no requests for
// it are ReadWrite.
for _ , req := range requests {
if ! req . ReadOnly {
return false
}
}
2019-07-25 14:46:29 +00:00
}
return true
}
2020-01-31 15:13:21 +00:00
type CSIVolumeChecker struct {
2020-03-17 15:35:34 +00:00
ctx Context
namespace string
2020-03-24 01:21:04 +00:00
jobID string
2020-03-17 15:35:34 +00:00
volumes map [ string ] * structs . VolumeRequest
2020-01-31 15:13:21 +00:00
}
func NewCSIVolumeChecker ( ctx Context ) * CSIVolumeChecker {
return & CSIVolumeChecker {
ctx : ctx ,
}
}
2020-03-24 01:21:04 +00:00
func ( c * CSIVolumeChecker ) SetJobID ( jobID string ) {
c . jobID = jobID
}
2020-03-17 15:35:34 +00:00
func ( c * CSIVolumeChecker ) SetNamespace ( namespace string ) {
c . namespace = namespace
}
2021-03-18 19:35:11 +00:00
func ( c * CSIVolumeChecker ) SetVolumes ( allocName string , volumes map [ string ] * structs . VolumeRequest ) {
2020-02-07 09:21:26 +00:00
xs := make ( map [ string ] * structs . VolumeRequest )
2021-03-18 19:35:11 +00:00
2020-02-07 09:21:26 +00:00
// Filter to only CSI Volumes
for alias , req := range volumes {
if req . Type != structs . VolumeTypeCSI {
continue
}
2021-03-18 19:35:11 +00:00
if req . PerAlloc {
// provide a unique volume source per allocation
copied := req . Copy ( )
copied . Source = copied . Source + structs . AllocSuffix ( allocName )
xs [ alias ] = copied
} else {
xs [ alias ] = req
}
2020-02-07 09:21:26 +00:00
}
c . volumes = xs
2020-01-31 15:13:21 +00:00
}
func ( c * CSIVolumeChecker ) Feasible ( n * structs . Node ) bool {
2021-03-18 19:35:11 +00:00
ok , failReason := c . isFeasible ( n )
if ok {
2020-01-31 15:13:21 +00:00
return true
}
2020-02-17 13:38:08 +00:00
c . ctx . Metrics ( ) . FilterNode ( n , failReason )
2020-01-31 15:13:21 +00:00
return false
}
2021-03-18 19:35:11 +00:00
func ( c * CSIVolumeChecker ) isFeasible ( n * structs . Node ) ( bool , string ) {
2020-01-31 15:13:21 +00:00
// We can mount the volume if
// - if required, a healthy controller plugin is running the driver
2020-03-24 01:21:04 +00:00
// - the volume has free claims, or this job owns the claims
2020-01-31 15:13:21 +00:00
// - this node is running the node plugin, implies matching topology
// Fast path: Requested no volumes. No need to check further.
if len ( c . volumes ) == 0 {
2020-02-17 13:38:08 +00:00
return true , ""
2020-01-31 15:13:21 +00:00
}
ws := memdb . NewWatchSet ( )
2020-03-31 21:16:47 +00:00
// Find the count per plugin for this node, so that can enforce MaxVolumes
pluginCount := map [ string ] int64 { }
2021-03-18 18:32:40 +00:00
iter , err := c . ctx . State ( ) . CSIVolumesByNodeID ( ws , "" , n . ID )
2020-03-31 21:16:47 +00:00
if err != nil {
return false , FilterConstraintCSIVolumesLookupFailed
}
for {
raw := iter . Next ( )
if raw == nil {
break
}
vol , ok := raw . ( * structs . CSIVolume )
if ! ok {
continue
}
pluginCount [ vol . PluginID ] += 1
}
// For volume requests, find volumes and determine feasibility
2020-01-31 15:13:21 +00:00
for _ , req := range c . volumes {
2020-03-17 15:35:34 +00:00
vol , err := c . ctx . State ( ) . CSIVolumeByID ( ws , c . namespace , req . Source )
2020-02-17 13:38:08 +00:00
if err != nil {
return false , FilterConstraintCSIVolumesLookupFailed
}
if vol == nil {
return false , fmt . Sprintf ( FilterConstraintCSIVolumeNotFoundTemplate , req . Source )
2020-01-31 15:13:21 +00:00
}
2020-02-07 14:38:02 +00:00
// Check that this node has a healthy running plugin with the right PluginID
plugin , ok := n . CSINodePlugins [ vol . PluginID ]
2020-03-24 01:21:04 +00:00
if ! ok {
return false , fmt . Sprintf ( FilterConstraintCSIPluginTemplate , vol . PluginID , n . ID )
2020-02-07 14:38:02 +00:00
}
2020-03-24 01:21:04 +00:00
if ! plugin . Healthy {
return false , fmt . Sprintf ( FilterConstraintCSIPluginUnhealthyTemplate , vol . PluginID , n . ID )
}
2020-03-31 21:16:47 +00:00
if pluginCount [ vol . PluginID ] >= plugin . NodeInfo . MaxVolumes {
return false , fmt . Sprintf ( FilterConstraintCSIPluginMaxVolumesTemplate , vol . PluginID , n . ID )
}
2020-03-24 01:21:04 +00:00
2022-03-01 15:15:46 +00:00
// CSI spec: "If requisite is specified, the provisioned
// volume MUST be accessible from at least one of the
// requisite topologies."
if len ( vol . Topologies ) > 0 {
2022-03-07 16:06:59 +00:00
if ! plugin . NodeInfo . AccessibleTopology . MatchFound ( vol . Topologies ) {
2022-03-01 15:15:46 +00:00
return false , FilterConstraintsCSIPluginTopology
}
}
2020-03-23 15:02:34 +00:00
if req . ReadOnly {
2020-03-24 01:21:04 +00:00
if ! vol . ReadSchedulable ( ) {
2020-03-23 15:02:34 +00:00
return false , fmt . Sprintf ( FilterConstraintCSIVolumeNoReadTemplate , vol . ID )
}
2020-03-24 01:21:04 +00:00
} else {
if ! vol . WriteSchedulable ( ) {
return false , fmt . Sprintf ( FilterConstraintCSIVolumeNoWriteTemplate , vol . ID )
}
2022-02-23 16:13:51 +00:00
if ! vol . HasFreeWriteClaims ( ) {
2021-03-09 15:34:07 +00:00
for id := range vol . WriteAllocs {
a , err := c . ctx . State ( ) . AllocByID ( ws , id )
2022-01-27 14:30:03 +00:00
// the alloc for this blocking claim has been
// garbage collected but the volumewatcher hasn't
// finished releasing the claim (and possibly
// detaching the volume), so we need to block
// until it can be scheduled
if err != nil || a == nil {
return false , fmt . Sprintf (
FilterConstraintCSIVolumeGCdAllocationTemplate , vol . ID , id )
} else if a . Namespace != c . namespace || a . JobID != c . jobID {
// the blocking claim is for another live job
// so it's legitimately blocking more write
// claims
2021-03-09 15:34:07 +00:00
return false , fmt . Sprintf (
FilterConstraintCSIVolumeInUseTemplate , vol . ID )
}
2020-03-24 01:21:04 +00:00
}
}
2020-01-31 15:13:21 +00:00
}
}
2020-02-17 13:38:08 +00:00
return true , ""
2020-01-31 15:13:21 +00:00
}
2020-05-15 15:09:01 +00:00
// NetworkChecker is a FeasibilityChecker which returns whether a node has the
// network resources necessary to schedule the task group
type NetworkChecker struct {
ctx Context
networkMode string
2020-06-16 15:53:10 +00:00
ports [ ] structs . Port
2020-05-15 15:09:01 +00:00
}
func NewNetworkChecker ( ctx Context ) * NetworkChecker {
return & NetworkChecker { ctx : ctx , networkMode : "host" }
}
2020-06-16 15:53:10 +00:00
func ( c * NetworkChecker ) SetNetwork ( network * structs . NetworkResource ) {
c . networkMode = network . Mode
if c . networkMode == "" {
c . networkMode = "host"
}
c . ports = make ( [ ] structs . Port , len ( network . DynamicPorts ) + len ( network . ReservedPorts ) )
2020-12-09 19:05:18 +00:00
c . ports = append ( c . ports , network . DynamicPorts ... )
c . ports = append ( c . ports , network . ReservedPorts ... )
2020-05-15 15:09:01 +00:00
}
func ( c * NetworkChecker ) Feasible ( option * structs . Node ) bool {
2020-06-16 15:53:10 +00:00
if ! c . hasNetwork ( option ) {
2020-11-13 20:17:01 +00:00
// special case - if the client is running a version older than 0.12 but
// the server is 0.12 or newer, we need to maintain an upgrade path for
// jobs looking for a bridge network that will not have been fingerprinted
// on the client (which was added in 0.12)
if c . networkMode == "bridge" {
sv , err := version . NewSemver ( option . Attributes [ "nomad.version" ] )
if err == nil && predatesBridgeFingerprint . Check ( sv ) {
return true
}
}
2020-06-16 15:53:10 +00:00
c . ctx . Metrics ( ) . FilterNode ( option , "missing network" )
return false
2020-05-15 15:09:01 +00:00
}
2020-06-16 15:53:10 +00:00
if c . ports != nil {
if ! c . hasHostNetworks ( option ) {
return false
}
}
return true
}
func ( c * NetworkChecker ) hasHostNetworks ( option * structs . Node ) bool {
for _ , port := range c . ports {
if port . HostNetwork != "" {
2021-04-13 13:53:05 +00:00
hostNetworkValue , hostNetworkOk := resolveTarget ( port . HostNetwork , option )
if ! hostNetworkOk {
c . ctx . Metrics ( ) . FilterNode ( option , fmt . Sprintf ( "invalid host network %q template for port %q" , port . HostNetwork , port . Label ) )
return false
}
2020-06-16 15:53:10 +00:00
found := false
for _ , net := range option . NodeResources . NodeNetworks {
2021-04-13 13:53:05 +00:00
if net . HasAlias ( hostNetworkValue . ( string ) ) {
2020-06-16 15:53:10 +00:00
found = true
break
}
}
if ! found {
2021-04-13 13:53:05 +00:00
c . ctx . Metrics ( ) . FilterNode ( option , fmt . Sprintf ( "missing host network %q for port %q" , hostNetworkValue . ( string ) , port . Label ) )
2020-06-16 15:53:10 +00:00
return false
}
}
}
return true
2020-05-15 15:09:01 +00:00
}
func ( c * NetworkChecker ) hasNetwork ( option * structs . Node ) bool {
if option . NodeResources == nil {
return false
}
for _ , nw := range option . NodeResources . Networks {
2020-06-24 20:01:00 +00:00
mode := nw . Mode
if mode == "" {
mode = "host"
}
if mode == c . networkMode {
2020-05-15 15:09:01 +00:00
return true
}
}
return false
}
2016-01-26 18:07:33 +00:00
// DriverChecker is a FeasibilityChecker which returns whether a node has the
// drivers necessary to scheduler a task group.
type DriverChecker struct {
2015-08-13 17:19:46 +00:00
ctx Context
drivers map [ string ] struct { }
2015-08-12 00:57:23 +00:00
}
2016-01-26 18:07:33 +00:00
// NewDriverChecker creates a DriverChecker from a set of drivers
func NewDriverChecker ( ctx Context , drivers map [ string ] struct { } ) * DriverChecker {
return & DriverChecker {
2015-08-13 17:19:46 +00:00
ctx : ctx ,
drivers : drivers ,
2015-08-12 00:57:23 +00:00
}
}
2016-01-26 18:07:33 +00:00
func ( c * DriverChecker ) SetDrivers ( d map [ string ] struct { } ) {
c . drivers = d
2015-08-13 20:52:20 +00:00
}
2016-01-26 18:07:33 +00:00
func ( c * DriverChecker ) Feasible ( option * structs . Node ) bool {
// Use this node if possible
if c . hasDrivers ( option ) {
return true
2015-08-12 00:57:23 +00:00
}
2020-01-31 15:13:21 +00:00
c . ctx . Metrics ( ) . FilterNode ( option , FilterConstraintDrivers )
2016-01-26 18:07:33 +00:00
return false
2015-08-13 22:01:02 +00:00
}
2015-08-13 17:19:46 +00:00
// hasDrivers is used to check if the node has all the appropriate
// drivers for this task group. Drivers are registered as node attribute
// like "driver.docker=1" with their corresponding version.
2016-01-26 18:07:33 +00:00
func ( c * DriverChecker ) hasDrivers ( option * structs . Node ) bool {
for driver := range c . drivers {
2015-08-13 17:19:46 +00:00
driverStr := fmt . Sprintf ( "driver.%s" , driver )
2015-10-14 23:43:06 +00:00
2018-03-19 12:06:09 +00:00
// COMPAT: Remove in 0.10: As of Nomad 0.8, nodes have a DriverInfo that
2018-03-06 21:03:24 +00:00
// corresponds with every driver. As a Nomad server might be on a later
// version than a Nomad client, we need to check for compatibility here
// to verify the client supports this.
2018-03-21 19:32:40 +00:00
if driverInfo , ok := option . Drivers [ driver ] ; ok {
2018-02-22 22:24:34 +00:00
if driverInfo == nil {
2018-09-15 23:23:13 +00:00
c . ctx . Logger ( ) . Named ( "driver_checker" ) . Warn ( "node has no driver info set" , "node_id" , option . ID , "driver" , driver )
2018-02-22 22:24:34 +00:00
return false
}
2018-02-27 19:57:10 +00:00
2019-08-29 12:54:16 +00:00
if driverInfo . Detected && driverInfo . Healthy {
continue
} else {
return false
}
2018-03-21 19:32:40 +00:00
}
2018-02-22 22:24:34 +00:00
2018-03-21 19:32:40 +00:00
value , ok := option . Attributes [ driverStr ]
if ! ok {
return false
}
enabled , err := strconv . ParseBool ( value )
if err != nil {
2018-09-15 23:23:13 +00:00
c . ctx . Logger ( ) . Named ( "driver_checker" ) . Warn ( "node has invalid driver setting" , "node_id" , option . ID , "driver" , driver , "val" , value )
2018-03-21 19:32:40 +00:00
return false
}
2018-02-22 22:24:34 +00:00
2018-03-21 19:32:40 +00:00
if ! enabled {
return false
2015-10-14 23:43:06 +00:00
}
2015-08-13 17:19:46 +00:00
}
2019-08-29 12:41:32 +00:00
2015-08-12 00:57:23 +00:00
return true
}
2015-08-12 01:27:54 +00:00
2017-03-09 03:00:10 +00:00
// DistinctHostsIterator is a FeasibleIterator which returns nodes that pass the
// distinct_hosts constraint. The constraint ensures that multiple allocations
// do not exist on the same node.
type DistinctHostsIterator struct {
2015-10-22 21:31:12 +00:00
ctx Context
source FeasibleIterator
tg * structs . TaskGroup
job * structs . Job
2015-10-26 20:47:56 +00:00
// Store whether the Job or TaskGroup has a distinct_hosts constraints so
2015-10-23 00:40:41 +00:00
// they don't have to be calculated every time Next() is called.
tgDistinctHosts bool
jobDistinctHosts bool
2015-10-22 21:31:12 +00:00
}
2017-03-09 03:00:10 +00:00
// NewDistinctHostsIterator creates a DistinctHostsIterator from a source.
func NewDistinctHostsIterator ( ctx Context , source FeasibleIterator ) * DistinctHostsIterator {
return & DistinctHostsIterator {
ctx : ctx ,
source : source ,
2015-10-22 21:31:12 +00:00
}
}
2017-03-09 03:00:10 +00:00
func ( iter * DistinctHostsIterator ) SetTaskGroup ( tg * structs . TaskGroup ) {
2015-10-22 21:31:12 +00:00
iter . tg = tg
2015-10-23 00:40:41 +00:00
iter . tgDistinctHosts = iter . hasDistinctHostsConstraint ( tg . Constraints )
2015-10-22 21:31:12 +00:00
}
2017-03-09 03:00:10 +00:00
func ( iter * DistinctHostsIterator ) SetJob ( job * structs . Job ) {
2015-10-22 21:31:12 +00:00
iter . job = job
2015-10-23 00:40:41 +00:00
iter . jobDistinctHosts = iter . hasDistinctHostsConstraint ( job . Constraints )
2015-10-22 21:31:12 +00:00
}
2017-03-09 03:00:10 +00:00
func ( iter * DistinctHostsIterator ) hasDistinctHostsConstraint ( constraints [ ] * structs . Constraint ) bool {
2015-10-22 21:31:12 +00:00
for _ , con := range constraints {
2015-10-26 20:47:56 +00:00
if con . Operand == structs . ConstraintDistinctHosts {
2015-10-22 21:31:12 +00:00
return true
}
}
2017-03-07 22:20:02 +00:00
2015-10-22 21:31:12 +00:00
return false
}
2017-03-09 03:00:10 +00:00
func ( iter * DistinctHostsIterator ) Next ( ) * structs . Node {
2015-10-22 21:31:12 +00:00
for {
// Get the next option from the source
option := iter . source . Next ( )
2017-03-07 22:20:02 +00:00
// Hot-path if the option is nil or there are no distinct_hosts or
// distinct_property constraints.
hosts := iter . jobDistinctHosts || iter . tgDistinctHosts
2017-03-09 03:00:10 +00:00
if option == nil || ! hosts {
2015-10-22 21:31:12 +00:00
return option
}
2017-03-07 22:20:02 +00:00
// Check if the host constraints are satisfied
2017-03-09 03:00:10 +00:00
if ! iter . satisfiesDistinctHosts ( option ) {
iter . ctx . Metrics ( ) . FilterNode ( option , structs . ConstraintDistinctHosts )
continue
2015-10-22 21:31:12 +00:00
}
return option
}
}
2015-10-26 20:47:56 +00:00
// satisfiesDistinctHosts checks if the node satisfies a distinct_hosts
2015-10-23 00:40:41 +00:00
// constraint either specified at the job level or the TaskGroup level.
2017-03-09 03:00:10 +00:00
func ( iter * DistinctHostsIterator ) satisfiesDistinctHosts ( option * structs . Node ) bool {
2015-10-26 21:01:32 +00:00
// Check if there is no constraint set.
if ! ( iter . jobDistinctHosts || iter . tgDistinctHosts ) {
return true
}
2015-10-22 21:31:12 +00:00
// Get the proposed allocations
proposed , err := iter . ctx . ProposedAllocs ( option . ID )
if err != nil {
2018-09-15 23:23:13 +00:00
iter . ctx . Logger ( ) . Named ( "distinct_hosts" ) . Error ( "failed to get proposed allocations" , "error" , err )
2015-10-22 21:31:12 +00:00
return false
}
// Skip the node if the task group has already been allocated on it.
for _ , alloc := range proposed {
2015-10-26 20:47:56 +00:00
// If the job has a distinct_hosts constraint we only need an alloc
2015-10-23 00:40:41 +00:00
// collision on the JobID but if the constraint is on the TaskGroup then
// we need both a job and TaskGroup collision.
2015-10-26 21:01:32 +00:00
jobCollision := alloc . JobID == iter . job . ID
taskCollision := alloc . TaskGroup == iter . tg . Name
if iter . jobDistinctHosts && jobCollision || jobCollision && taskCollision {
2015-10-22 21:31:12 +00:00
return false
}
}
return true
}
2017-03-09 03:00:10 +00:00
func ( iter * DistinctHostsIterator ) Reset ( ) {
iter . source . Reset ( )
}
// DistinctPropertyIterator is a FeasibleIterator which returns nodes that pass the
// distinct_property constraint. The constraint ensures that multiple allocations
// do not use the same value of the given property.
type DistinctPropertyIterator struct {
ctx Context
source FeasibleIterator
tg * structs . TaskGroup
job * structs . Job
2017-03-09 23:20:53 +00:00
hasDistinctPropertyConstraints bool
jobPropertySets [ ] * propertySet
groupPropertySets map [ string ] [ ] * propertySet
2017-03-09 03:00:10 +00:00
}
// NewDistinctPropertyIterator creates a DistinctPropertyIterator from a source.
func NewDistinctPropertyIterator ( ctx Context , source FeasibleIterator ) * DistinctPropertyIterator {
return & DistinctPropertyIterator {
2017-03-09 23:20:53 +00:00
ctx : ctx ,
source : source ,
groupPropertySets : make ( map [ string ] [ ] * propertySet ) ,
2017-03-09 03:00:10 +00:00
}
}
func ( iter * DistinctPropertyIterator ) SetTaskGroup ( tg * structs . TaskGroup ) {
iter . tg = tg
2017-03-10 00:12:43 +00:00
2017-03-10 05:36:27 +00:00
// Build the property set at the taskgroup level
if _ , ok := iter . groupPropertySets [ tg . Name ] ; ! ok {
for _ , c := range tg . Constraints {
if c . Operand != structs . ConstraintDistinctProperty {
continue
}
pset := NewPropertySet ( iter . ctx , iter . job )
pset . SetTGConstraint ( c , tg . Name )
iter . groupPropertySets [ tg . Name ] = append ( iter . groupPropertySets [ tg . Name ] , pset )
}
}
2017-03-10 00:12:43 +00:00
// Check if there is a distinct property
iter . hasDistinctPropertyConstraints = len ( iter . jobPropertySets ) != 0 || len ( iter . groupPropertySets [ tg . Name ] ) != 0
2017-03-09 03:00:10 +00:00
}
func ( iter * DistinctPropertyIterator ) SetJob ( job * structs . Job ) {
iter . job = job
2017-03-07 22:20:02 +00:00
2017-03-10 05:36:27 +00:00
// Build the property set at the job level
2017-03-09 23:20:53 +00:00
for _ , c := range job . Constraints {
2017-03-08 19:47:55 +00:00
if c . Operand != structs . ConstraintDistinctProperty {
2017-03-07 22:20:02 +00:00
continue
}
2017-03-09 23:20:53 +00:00
pset := NewPropertySet ( iter . ctx , job )
pset . SetJobConstraint ( c )
iter . jobPropertySets = append ( iter . jobPropertySets , pset )
2017-03-08 19:47:55 +00:00
}
}
2017-03-09 23:20:53 +00:00
func ( iter * DistinctPropertyIterator ) Next ( ) * structs . Node {
for {
// Get the next option from the source
option := iter . source . Next ( )
2017-03-07 22:20:02 +00:00
2017-03-09 23:20:53 +00:00
// Hot path if there is nothing to check
if option == nil || ! iter . hasDistinctPropertyConstraints {
return option
2017-03-07 22:20:02 +00:00
}
2017-03-09 23:20:53 +00:00
// Check if the constraints are met
2017-03-10 05:36:27 +00:00
if ! iter . satisfiesProperties ( option , iter . jobPropertySets ) ||
! iter . satisfiesProperties ( option , iter . groupPropertySets [ iter . tg . Name ] ) {
continue
2017-03-07 22:20:02 +00:00
}
2017-03-09 23:20:53 +00:00
return option
}
2017-03-07 22:20:02 +00:00
}
2017-03-10 05:36:27 +00:00
// satisfiesProperties returns whether the option satisfies the set of
// properties. If not it will be filtered.
func ( iter * DistinctPropertyIterator ) satisfiesProperties ( option * structs . Node , set [ ] * propertySet ) bool {
for _ , ps := range set {
if satisfies , reason := ps . SatisfiesDistinctProperties ( option , iter . tg . Name ) ; ! satisfies {
iter . ctx . Metrics ( ) . FilterNode ( option , reason )
return false
}
}
return true
}
2017-03-09 23:20:53 +00:00
func ( iter * DistinctPropertyIterator ) Reset ( ) {
iter . source . Reset ( )
2017-03-08 19:47:55 +00:00
2017-03-09 23:20:53 +00:00
for _ , ps := range iter . jobPropertySets {
ps . PopulateProposed ( )
2017-03-08 19:47:55 +00:00
}
2017-03-09 23:20:53 +00:00
for _ , sets := range iter . groupPropertySets {
for _ , ps := range sets {
ps . PopulateProposed ( )
}
}
2015-10-22 21:31:12 +00:00
}
2016-01-26 18:07:33 +00:00
// ConstraintChecker is a FeasibilityChecker which returns nodes that match a
// given set of constraints. This is used to filter on job, task group, and task
// constraints.
type ConstraintChecker struct {
2015-08-13 17:19:46 +00:00
ctx Context
constraints [ ] * structs . Constraint
2015-08-12 01:27:54 +00:00
}
2016-01-26 18:07:33 +00:00
// NewConstraintChecker creates a ConstraintChecker for a set of constraints
func NewConstraintChecker ( ctx Context , constraints [ ] * structs . Constraint ) * ConstraintChecker {
return & ConstraintChecker {
2015-08-13 17:19:46 +00:00
ctx : ctx ,
constraints : constraints ,
2015-08-12 01:27:54 +00:00
}
}
2016-01-26 18:07:33 +00:00
func ( c * ConstraintChecker ) SetConstraints ( constraints [ ] * structs . Constraint ) {
c . constraints = constraints
2015-08-13 20:52:20 +00:00
}
2016-01-26 18:07:33 +00:00
func ( c * ConstraintChecker ) Feasible ( option * structs . Node ) bool {
// Use this node if possible
for _ , constraint := range c . constraints {
if ! c . meetsConstraint ( constraint , option ) {
c . ctx . Metrics ( ) . FilterNode ( option , constraint . String ( ) )
2015-08-13 17:46:30 +00:00
return false
}
}
2015-08-12 01:27:54 +00:00
return true
}
2015-08-13 17:46:30 +00:00
2016-01-26 18:07:33 +00:00
func ( c * ConstraintChecker ) meetsConstraint ( constraint * structs . Constraint , option * structs . Node ) bool {
2018-11-13 21:28:10 +00:00
// Resolve the targets. Targets that are not present are treated as `nil`.
// This is to allow for matching constraints where a target is not present.
2018-11-13 23:57:59 +00:00
lVal , lOk := resolveTarget ( constraint . LTarget , option )
rVal , rOk := resolveTarget ( constraint . RTarget , option )
2015-08-13 17:46:30 +00:00
// Check if satisfied
2018-11-13 23:57:59 +00:00
return checkConstraint ( c . ctx , constraint . Operand , lVal , rVal , lOk , rOk )
2015-08-13 17:46:30 +00:00
}
2018-11-13 23:57:59 +00:00
// resolveTarget is used to resolve the LTarget and RTarget of a Constraint.
2018-07-16 13:47:18 +00:00
func resolveTarget ( target string , node * structs . Node ) ( interface { } , bool ) {
2015-08-13 17:46:30 +00:00
// If no prefix, this must be a literal value
2016-02-16 18:03:04 +00:00
if ! strings . HasPrefix ( target , "${" ) {
2015-08-13 17:46:30 +00:00
return target , true
}
// Handle the interpolations
switch {
2016-02-05 00:50:20 +00:00
case "${node.unique.id}" == target :
2015-08-13 17:46:30 +00:00
return node . ID , true
2016-02-05 00:50:20 +00:00
case "${node.datacenter}" == target :
2015-08-13 17:46:30 +00:00
return node . Datacenter , true
2016-02-05 00:50:20 +00:00
case "${node.unique.name}" == target :
2015-08-13 17:46:30 +00:00
return node . Name , true
2016-02-05 00:50:20 +00:00
case "${node.class}" == target :
2015-12-22 01:15:34 +00:00
return node . NodeClass , true
2016-02-05 00:50:20 +00:00
case strings . HasPrefix ( target , "${attr." ) :
attr := strings . TrimSuffix ( strings . TrimPrefix ( target , "${attr." ) , "}" )
2015-08-13 17:46:30 +00:00
val , ok := node . Attributes [ attr ]
return val , ok
2016-02-05 00:50:20 +00:00
case strings . HasPrefix ( target , "${meta." ) :
meta := strings . TrimSuffix ( strings . TrimPrefix ( target , "${meta." ) , "}" )
2015-08-13 17:46:30 +00:00
val , ok := node . Meta [ meta ]
return val , ok
default :
return nil , false
}
}
2018-11-13 21:28:10 +00:00
// checkConstraint checks if a constraint is satisfied. The lVal and rVal
// interfaces may be nil.
2018-11-13 23:57:59 +00:00
func checkConstraint ( ctx Context , operand string , lVal , rVal interface { } , lFound , rFound bool ) bool {
2016-01-26 18:07:33 +00:00
// Check for constraints not handled by this checker.
2015-10-22 21:31:12 +00:00
switch operand {
2017-03-07 22:20:02 +00:00
case structs . ConstraintDistinctHosts , structs . ConstraintDistinctProperty :
2015-10-22 21:31:12 +00:00
return true
default :
break
}
2015-08-13 17:46:30 +00:00
switch operand {
case "=" , "==" , "is" :
2018-11-13 23:57:59 +00:00
return lFound && rFound && reflect . DeepEqual ( lVal , rVal )
2015-08-13 17:46:30 +00:00
case "!=" , "not" :
return ! reflect . DeepEqual ( lVal , rVal )
case "<" , "<=" , ">" , ">=" :
2018-11-13 23:57:59 +00:00
return lFound && rFound && checkLexicalOrder ( operand , lVal , rVal )
case structs . ConstraintAttributeIsSet :
return lFound
case structs . ConstraintAttributeIsNotSet :
return ! lFound
2015-10-26 20:47:56 +00:00
case structs . ConstraintVersion :
2019-11-13 23:36:15 +00:00
parser := newVersionConstraintParser ( ctx )
return lFound && rFound && checkVersionMatch ( ctx , parser , lVal , rVal )
case structs . ConstraintSemver :
parser := newSemverConstraintParser ( ctx )
return lFound && rFound && checkVersionMatch ( ctx , parser , lVal , rVal )
2015-10-26 20:47:56 +00:00
case structs . ConstraintRegex :
2018-11-13 23:57:59 +00:00
return lFound && rFound && checkRegexpMatch ( ctx , lVal , rVal )
2018-10-15 22:31:13 +00:00
case structs . ConstraintSetContains , structs . ConstraintSetContainsAll :
2018-11-13 23:57:59 +00:00
return lFound && rFound && checkSetContainsAll ( ctx , lVal , rVal )
2018-10-15 22:31:13 +00:00
case structs . ConstraintSetContainsAny :
2018-11-13 23:57:59 +00:00
return lFound && rFound && checkSetContainsAny ( lVal , rVal )
2015-08-13 17:46:30 +00:00
default :
return false
}
}
2015-10-11 19:12:39 +00:00
2018-07-16 13:47:18 +00:00
// checkAffinity checks if a specific affinity is satisfied
2018-11-13 23:57:59 +00:00
func checkAffinity ( ctx Context , operand string , lVal , rVal interface { } , lFound , rFound bool ) bool {
return checkConstraint ( ctx , operand , lVal , rVal , lFound , rFound )
2018-10-17 18:04:54 +00:00
}
// checkAttributeAffinity checks if an affinity is satisfied
2018-11-13 23:57:59 +00:00
func checkAttributeAffinity ( ctx Context , operand string , lVal , rVal * psstructs . Attribute , lFound , rFound bool ) bool {
return checkAttributeConstraint ( ctx , operand , lVal , rVal , lFound , rFound )
2018-07-16 13:47:18 +00:00
}
2015-10-11 19:57:06 +00:00
// checkLexicalOrder is used to check for lexical ordering
func checkLexicalOrder ( op string , lVal , rVal interface { } ) bool {
// Ensure the values are strings
lStr , ok := lVal . ( string )
if ! ok {
return false
}
rStr , ok := rVal . ( string )
if ! ok {
return false
}
switch op {
case "<" :
return lStr < rStr
case "<=" :
return lStr <= rStr
case ">" :
return lStr > rStr
case ">=" :
return lStr >= rStr
default :
return false
}
}
2018-07-16 13:47:18 +00:00
// checkVersionMatch is used to compare a version on the
2015-10-11 19:12:39 +00:00
// left hand side with a set of constraints on the right hand side
2019-11-13 23:36:15 +00:00
func checkVersionMatch ( ctx Context , parse verConstraintParser , lVal , rVal interface { } ) bool {
2015-10-11 19:12:39 +00:00
// Parse the version
var versionStr string
switch v := lVal . ( type ) {
case string :
versionStr = v
case int :
versionStr = fmt . Sprintf ( "%d" , v )
default :
return false
}
2016-05-15 16:41:34 +00:00
// Parse the version
2015-10-11 19:12:39 +00:00
vers , err := version . NewVersion ( versionStr )
if err != nil {
return false
}
// Constraint must be a string
constraintStr , ok := rVal . ( string )
if ! ok {
return false
}
// Parse the constraints
2019-11-13 23:36:15 +00:00
constraints := parse ( constraintStr )
2015-10-13 03:15:07 +00:00
if constraints == nil {
2019-11-13 23:36:15 +00:00
return false
2015-10-11 19:12:39 +00:00
}
// Check the constraints against the version
return constraints . Check ( vers )
}
2015-10-11 19:35:13 +00:00
2018-10-14 01:38:08 +00:00
// checkAttributeVersionMatch is used to compare a version on the
// left hand side with a set of constraints on the right hand side
2019-11-13 23:36:15 +00:00
func checkAttributeVersionMatch ( ctx Context , parse verConstraintParser , lVal , rVal * psstructs . Attribute ) bool {
2018-10-14 01:38:08 +00:00
// Parse the version
var versionStr string
if s , ok := lVal . GetString ( ) ; ok {
versionStr = s
} else if i , ok := lVal . GetInt ( ) ; ok {
versionStr = fmt . Sprintf ( "%d" , i )
} else {
return false
}
// Parse the version
vers , err := version . NewVersion ( versionStr )
if err != nil {
return false
}
// Constraint must be a string
constraintStr , ok := rVal . GetString ( )
if ! ok {
return false
}
// Parse the constraints
2019-11-13 23:36:15 +00:00
constraints := parse ( constraintStr )
2018-10-14 01:38:08 +00:00
if constraints == nil {
2019-11-13 23:36:15 +00:00
return false
2018-10-14 01:38:08 +00:00
}
// Check the constraints against the version
return constraints . Check ( vers )
}
2018-07-16 13:47:18 +00:00
// checkRegexpMatch is used to compare a value on the
2015-10-11 19:35:13 +00:00
// left hand side with a regexp on the right hand side
2018-07-16 13:47:18 +00:00
func checkRegexpMatch ( ctx Context , lVal , rVal interface { } ) bool {
2015-10-11 19:35:13 +00:00
// Ensure left-hand is string
lStr , ok := lVal . ( string )
if ! ok {
return false
}
// Regexp must be a string
regexpStr , ok := rVal . ( string )
if ! ok {
return false
}
2015-10-13 03:15:07 +00:00
// Check the cache
cache := ctx . RegexpCache ( )
re := cache [ regexpStr ]
2015-10-11 19:35:13 +00:00
// Parse the regexp
2015-10-13 03:15:07 +00:00
if re == nil {
var err error
re , err = regexp . Compile ( regexpStr )
if err != nil {
return false
}
cache [ regexpStr ] = re
2015-10-11 19:35:13 +00:00
}
// Look for a match
return re . MatchString ( lStr )
}
2016-01-26 18:07:33 +00:00
2018-07-16 13:47:18 +00:00
// checkSetContainsAll is used to see if the left hand side contains the
2016-10-19 20:06:28 +00:00
// string on the right hand side
2018-07-16 13:47:18 +00:00
func checkSetContainsAll ( ctx Context , lVal , rVal interface { } ) bool {
2016-10-19 20:06:28 +00:00
// Ensure left-hand is string
lStr , ok := lVal . ( string )
if ! ok {
return false
}
// Regexp must be a string
rStr , ok := rVal . ( string )
if ! ok {
return false
}
input := strings . Split ( lStr , "," )
lookup := make ( map [ string ] struct { } , len ( input ) )
for _ , in := range input {
cleaned := strings . TrimSpace ( in )
lookup [ cleaned ] = struct { } { }
}
for _ , r := range strings . Split ( rStr , "," ) {
cleaned := strings . TrimSpace ( r )
if _ , ok := lookup [ cleaned ] ; ! ok {
return false
}
}
return true
}
2018-07-16 13:47:18 +00:00
// checkSetContainsAny is used to see if the left hand side contains any
// values on the right hand side
2018-07-19 00:10:18 +00:00
func checkSetContainsAny ( lVal , rVal interface { } ) bool {
2018-07-16 13:47:18 +00:00
// Ensure left-hand is string
lStr , ok := lVal . ( string )
if ! ok {
return false
}
// RHS must be a string
rStr , ok := rVal . ( string )
if ! ok {
return false
}
input := strings . Split ( lStr , "," )
lookup := make ( map [ string ] struct { } , len ( input ) )
for _ , in := range input {
cleaned := strings . TrimSpace ( in )
lookup [ cleaned ] = struct { } { }
}
for _ , r := range strings . Split ( rStr , "," ) {
cleaned := strings . TrimSpace ( r )
if _ , ok := lookup [ cleaned ] ; ok {
return true
}
}
return false
}
2016-01-26 18:07:33 +00:00
// FeasibilityWrapper is a FeasibleIterator which wraps both job and task group
// FeasibilityCheckers in which feasibility checking can be skipped if the
// computed node class has previously been marked as eligible or ineligible.
type FeasibilityWrapper struct {
ctx Context
source FeasibleIterator
jobCheckers [ ] FeasibilityChecker
tgCheckers [ ] FeasibilityChecker
2020-01-31 15:13:21 +00:00
tgAvailable [ ] FeasibilityChecker
2016-01-26 18:07:33 +00:00
tg string
}
// NewFeasibilityWrapper returns a FeasibleIterator based on the passed source
// and FeasibilityCheckers.
func NewFeasibilityWrapper ( ctx Context , source FeasibleIterator ,
2020-01-31 15:13:21 +00:00
jobCheckers , tgCheckers , tgAvailable [ ] FeasibilityChecker ) * FeasibilityWrapper {
2016-01-26 18:07:33 +00:00
return & FeasibilityWrapper {
ctx : ctx ,
source : source ,
jobCheckers : jobCheckers ,
tgCheckers : tgCheckers ,
2020-01-31 15:13:21 +00:00
tgAvailable : tgAvailable ,
2016-01-26 18:07:33 +00:00
}
}
func ( w * FeasibilityWrapper ) SetTaskGroup ( tg string ) {
w . tg = tg
}
func ( w * FeasibilityWrapper ) Reset ( ) {
w . source . Reset ( )
}
// Next returns an eligible node, only running the FeasibilityCheckers as needed
// based on the sources computed node class.
func ( w * FeasibilityWrapper ) Next ( ) * structs . Node {
evalElig := w . ctx . Eligibility ( )
metrics := w . ctx . Metrics ( )
OUTER :
for {
// Get the next option from the source
option := w . source . Next ( )
if option == nil {
return nil
}
// Check if the job has been marked as eligible or ineligible.
2016-01-27 00:43:42 +00:00
jobEscaped , jobUnknown := false , false
2016-01-26 18:07:33 +00:00
switch evalElig . JobStatus ( option . ComputedClass ) {
case EvalComputedClassIneligible :
// Fast path the ineligible case
metrics . FilterNode ( option , "computed class ineligible" )
continue
case EvalComputedClassEscaped :
jobEscaped = true
2016-01-27 00:43:42 +00:00
case EvalComputedClassUnknown :
jobUnknown = true
2016-01-26 18:07:33 +00:00
}
// Run the job feasibility checks.
for _ , check := range w . jobCheckers {
feasible := check . Feasible ( option )
if ! feasible {
// If the job hasn't escaped, set it to be ineligible since it
// failed a job check.
if ! jobEscaped {
evalElig . SetJobEligibility ( false , option . ComputedClass )
}
continue OUTER
}
}
2016-01-27 00:43:42 +00:00
// Set the job eligibility if the constraints weren't escaped and it
// hasn't been set before.
if ! jobEscaped && jobUnknown {
2016-01-26 18:07:33 +00:00
evalElig . SetJobEligibility ( true , option . ComputedClass )
}
// Check if the task group has been marked as eligible or ineligible.
2016-01-27 00:43:42 +00:00
tgEscaped , tgUnknown := false , false
2016-01-26 18:07:33 +00:00
switch evalElig . TaskGroupStatus ( w . tg , option . ComputedClass ) {
case EvalComputedClassIneligible :
// Fast path the ineligible case
metrics . FilterNode ( option , "computed class ineligible" )
continue
case EvalComputedClassEligible :
// Fast path the eligible case
2020-01-31 15:13:21 +00:00
if w . available ( option ) {
return option
}
// We match the class but are temporarily unavailable, the eval
// should be blocked
return nil
2016-01-26 18:07:33 +00:00
case EvalComputedClassEscaped :
tgEscaped = true
2016-01-27 00:43:42 +00:00
case EvalComputedClassUnknown :
tgUnknown = true
2016-01-26 18:07:33 +00:00
}
// Run the task group feasibility checks.
for _ , check := range w . tgCheckers {
feasible := check . Feasible ( option )
if ! feasible {
// If the task group hasn't escaped, set it to be ineligible
// since it failed a check.
if ! tgEscaped {
evalElig . SetTaskGroupEligibility ( false , w . tg , option . ComputedClass )
}
continue OUTER
}
}
2016-01-27 00:43:42 +00:00
// Set the task group eligibility if the constraints weren't escaped and
// it hasn't been set before.
if ! tgEscaped && tgUnknown {
2016-01-26 18:07:33 +00:00
evalElig . SetTaskGroupEligibility ( true , w . tg , option . ComputedClass )
}
2020-01-31 15:13:21 +00:00
// tgAvailable handlers are available transiently, so we test them without
// affecting the computed class
if ! w . available ( option ) {
continue OUTER
}
2016-01-26 18:07:33 +00:00
return option
}
}
2018-10-10 17:32:44 +00:00
2020-01-31 15:13:21 +00:00
// available checks transient feasibility checkers which depend on changing conditions,
// e.g. the health status of a plugin or driver
func ( w * FeasibilityWrapper ) available ( option * structs . Node ) bool {
// If we don't have any availability checks, we're available
if len ( w . tgAvailable ) == 0 {
return true
}
for _ , check := range w . tgAvailable {
if ! check . Feasible ( option ) {
return false
}
}
return true
}
2018-10-10 17:32:44 +00:00
// DeviceChecker is a FeasibilityChecker which returns whether a node has the
// devices necessary to scheduler a task group.
type DeviceChecker struct {
ctx Context
2018-10-14 01:38:08 +00:00
// required is the set of requested devices that must exist on the node
2018-10-13 23:47:53 +00:00
required [ ] * structs . RequestedDevice
2018-10-10 17:32:44 +00:00
// requiresDevices indicates if the task group requires devices
requiresDevices bool
}
// NewDeviceChecker creates a DeviceChecker
func NewDeviceChecker ( ctx Context ) * DeviceChecker {
return & DeviceChecker {
ctx : ctx ,
}
}
func ( c * DeviceChecker ) SetTaskGroup ( tg * structs . TaskGroup ) {
2018-10-13 23:47:53 +00:00
c . required = nil
2018-10-10 17:32:44 +00:00
for _ , task := range tg . Tasks {
2018-10-13 23:47:53 +00:00
c . required = append ( c . required , task . Resources . Devices ... )
2018-10-10 17:32:44 +00:00
}
c . requiresDevices = len ( c . required ) != 0
}
func ( c * DeviceChecker ) Feasible ( option * structs . Node ) bool {
if c . hasDevices ( option ) {
return true
}
2020-01-31 15:13:21 +00:00
c . ctx . Metrics ( ) . FilterNode ( option , FilterConstraintDevices )
2018-10-10 17:32:44 +00:00
return false
}
func ( c * DeviceChecker ) hasDevices ( option * structs . Node ) bool {
if ! c . requiresDevices {
return true
}
// COMPAT(0.11): Remove in 0.11
// The node does not have the new resources object so it can not have any
// devices
if option . NodeResources == nil {
return false
}
// Check if the node has any devices
nodeDevs := option . NodeResources . Devices
if len ( nodeDevs ) == 0 {
return false
}
2018-10-13 23:47:53 +00:00
// Create a mapping of node devices to the remaining count
available := make ( map [ * structs . NodeDeviceResource ] uint64 , len ( nodeDevs ) )
2018-10-10 17:32:44 +00:00
for _ , d := range nodeDevs {
var healthy uint64 = 0
for _ , instance := range d . Instances {
if instance . Healthy {
healthy ++
}
}
2018-10-13 23:47:53 +00:00
if healthy != 0 {
available [ d ] = healthy
}
2018-10-10 17:32:44 +00:00
}
2018-10-13 23:47:53 +00:00
// Go through the required devices trying to find matches
2018-10-10 17:32:44 +00:00
OUTER :
2018-10-13 23:47:53 +00:00
for _ , req := range c . required {
// Determine how many there are to place
desiredCount := req . Count
// Go through the device resources and see if we have a match
for d , unused := range available {
if unused == 0 {
// Depleted
2018-10-10 17:32:44 +00:00
continue
}
2018-10-17 18:04:54 +00:00
// First check we have enough instances of the device since this is
// cheaper than checking the constraints
if unused < desiredCount {
continue
}
2018-11-07 19:09:30 +00:00
// Check the constraints
2018-10-14 01:38:08 +00:00
if nodeDeviceMatches ( c . ctx , d , req ) {
2018-10-13 23:47:53 +00:00
// Consume the instances
2018-10-17 18:04:54 +00:00
available [ d ] -= desiredCount
// Move on to the next request
continue OUTER
2018-10-10 17:32:44 +00:00
}
}
2018-10-13 23:47:53 +00:00
// We couldn't match the request for the device
2018-10-15 22:15:46 +00:00
return false
2018-10-13 23:47:53 +00:00
}
// Only satisfied if there are no more devices to place
return true
}
2018-10-14 01:38:08 +00:00
// nodeDeviceMatches checks if the device matches the request and its
// constraints. It doesn't check the count.
func nodeDeviceMatches ( ctx Context , d * structs . NodeDeviceResource , req * structs . RequestedDevice ) bool {
2018-10-13 23:47:53 +00:00
if ! d . ID ( ) . Matches ( req . ID ( ) ) {
2018-10-10 17:32:44 +00:00
return false
}
2018-10-13 23:47:53 +00:00
// There are no constraints to consider
if len ( req . Constraints ) == 0 {
return true
}
2018-10-14 01:38:08 +00:00
for _ , c := range req . Constraints {
// Resolve the targets
2018-11-13 23:57:59 +00:00
lVal , lOk := resolveDeviceTarget ( c . LTarget , d )
rVal , rOk := resolveDeviceTarget ( c . RTarget , d )
2018-10-14 01:38:08 +00:00
// Check if satisfied
2018-11-13 23:57:59 +00:00
if ! checkAttributeConstraint ( ctx , c . Operand , lVal , rVal , lOk , rOk ) {
2018-10-14 01:38:08 +00:00
return false
}
}
2018-10-10 17:32:44 +00:00
return true
}
2018-10-14 01:38:08 +00:00
// resolveDeviceTarget is used to resolve the LTarget and RTarget of a Constraint
// when used on a device
func resolveDeviceTarget ( target string , d * structs . NodeDeviceResource ) ( * psstructs . Attribute , bool ) {
// If no prefix, this must be a literal value
if ! strings . HasPrefix ( target , "${" ) {
return psstructs . ParseAttribute ( target ) , true
}
// Handle the interpolations
switch {
2019-01-23 00:48:09 +00:00
case "${device.model}" == target :
2018-10-14 01:38:08 +00:00
return psstructs . NewStringAttribute ( d . Name ) , true
2019-01-23 00:48:09 +00:00
case "${device.vendor}" == target :
2018-10-14 01:38:08 +00:00
return psstructs . NewStringAttribute ( d . Vendor ) , true
2019-01-23 00:48:09 +00:00
case "${device.type}" == target :
2018-10-14 01:38:08 +00:00
return psstructs . NewStringAttribute ( d . Type ) , true
2019-01-23 00:48:09 +00:00
case strings . HasPrefix ( target , "${device.attr." ) :
attr := strings . TrimPrefix ( target , "${device.attr." )
2018-10-15 22:31:13 +00:00
attr = strings . TrimSuffix ( attr , "}" )
2018-10-14 01:38:08 +00:00
val , ok := d . Attributes [ attr ]
return val , ok
default :
return nil , false
}
}
2018-11-13 23:57:59 +00:00
// checkAttributeConstraint checks if a constraint is satisfied. nil equality
// comparisons are considered to be false.
func checkAttributeConstraint ( ctx Context , operand string , lVal , rVal * psstructs . Attribute , lFound , rFound bool ) bool {
2018-10-14 01:38:08 +00:00
// Check for constraints not handled by this checker.
switch operand {
case structs . ConstraintDistinctHosts , structs . ConstraintDistinctProperty :
return true
default :
break
}
switch operand {
2018-11-13 23:57:59 +00:00
case "!=" , "not" :
// Neither value was provided, nil != nil == false
if ! ( lFound || rFound ) {
return false
}
// Only 1 value was provided, therefore nil != some == true
if lFound != rFound {
return true
}
// Both values were provided, so actually compare them
v , ok := lVal . Compare ( rVal )
if ! ok {
return false
}
return v != 0
case "<" , "<=" , ">" , ">=" , "=" , "==" , "is" :
if ! ( lFound && rFound ) {
return false
}
2018-10-14 01:38:08 +00:00
v , ok := lVal . Compare ( rVal )
if ! ok {
return false
}
switch operand {
case "is" , "==" , "=" :
return v == 0
case "<" :
return v == - 1
case "<=" :
return v != 1
case ">" :
return v == 1
case ">=" :
return v != - 1
default :
return false
}
case structs . ConstraintVersion :
2018-11-13 23:57:59 +00:00
if ! ( lFound && rFound ) {
return false
}
2019-11-13 23:36:15 +00:00
parser := newVersionConstraintParser ( ctx )
return checkAttributeVersionMatch ( ctx , parser , lVal , rVal )
case structs . ConstraintSemver :
if ! ( lFound && rFound ) {
return false
}
parser := newSemverConstraintParser ( ctx )
return checkAttributeVersionMatch ( ctx , parser , lVal , rVal )
2018-10-14 01:38:08 +00:00
case structs . ConstraintRegex :
2018-11-13 23:57:59 +00:00
if ! ( lFound && rFound ) {
return false
}
2018-10-14 01:38:08 +00:00
ls , ok := lVal . GetString ( )
rs , ok2 := rVal . GetString ( )
if ! ok || ! ok2 {
return false
}
return checkRegexpMatch ( ctx , ls , rs )
2018-10-15 22:31:13 +00:00
case structs . ConstraintSetContains , structs . ConstraintSetContainsAll :
2018-11-13 23:57:59 +00:00
if ! ( lFound && rFound ) {
return false
}
2018-10-14 01:38:08 +00:00
ls , ok := lVal . GetString ( )
rs , ok2 := rVal . GetString ( )
if ! ok || ! ok2 {
return false
}
return checkSetContainsAll ( ctx , ls , rs )
2018-10-15 22:31:13 +00:00
case structs . ConstraintSetContainsAny :
2018-11-13 23:57:59 +00:00
if ! ( lFound && rFound ) {
return false
}
2018-10-15 22:31:13 +00:00
ls , ok := lVal . GetString ( )
rs , ok2 := rVal . GetString ( )
if ! ok || ! ok2 {
return false
}
return checkSetContainsAny ( ls , rs )
2018-11-13 23:57:59 +00:00
case structs . ConstraintAttributeIsSet :
return lFound
case structs . ConstraintAttributeIsNotSet :
return ! lFound
2018-10-14 01:38:08 +00:00
default :
return false
}
}
2019-11-13 23:36:15 +00:00
// VerConstraints is the interface implemented by both go-verson constraints
// and semver constraints.
type VerConstraints interface {
Check ( v * version . Version ) bool
String ( ) string
}
// verConstraintParser returns a version constraints implementation (go-version
// or semver).
type verConstraintParser func ( verConstraint string ) VerConstraints
func newVersionConstraintParser ( ctx Context ) verConstraintParser {
cache := ctx . VersionConstraintCache ( )
return func ( cstr string ) VerConstraints {
if c := cache [ cstr ] ; c != nil {
return c
}
constraints , err := version . NewConstraint ( cstr )
if err != nil {
return nil
}
cache [ cstr ] = constraints
return constraints
}
}
func newSemverConstraintParser ( ctx Context ) verConstraintParser {
cache := ctx . SemverConstraintCache ( )
return func ( cstr string ) VerConstraints {
if c := cache [ cstr ] ; c != nil {
return c
}
constraints , err := semver . NewConstraint ( cstr )
if err != nil {
return nil
}
cache [ cstr ] = constraints
return constraints
}
}