open-nomad/scheduler/feasible.go

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

1588 lines
42 KiB
Go
Raw Permalink Normal View History

// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package scheduler
import (
"fmt"
"reflect"
2015-10-11 19:35:13 +00:00
"regexp"
2015-10-14 23:43:06 +00:00
"strconv"
"strings"
"github.com/hashicorp/go-memdb"
"github.com/hashicorp/go-version"
"github.com/hashicorp/nomad/helper/constraints/semver"
"github.com/hashicorp/nomad/nomad/structs"
2018-10-15 22:15:46 +00:00
psstructs "github.com/hashicorp/nomad/plugins/shared/structs"
"golang.org/x/exp/constraints"
)
const (
FilterConstraintHostVolumes = "missing compatible host volumes"
FilterConstraintCSIPluginTemplate = "CSI plugin %s is missing from client %s"
FilterConstraintCSIPluginUnhealthyTemplate = "CSI plugin %s is unhealthy on client %s"
FilterConstraintCSIPluginMaxVolumesTemplate = "CSI plugin %s has the maximum number of volumes on client %s"
FilterConstraintCSIVolumesLookupFailed = "CSI volume lookup failed"
FilterConstraintCSIVolumeNotFoundTemplate = "missing CSI Volume %s"
FilterConstraintCSIVolumeNoReadTemplate = "CSI volume %s is unschedulable or has exhausted its available reader claims"
FilterConstraintCSIVolumeNoWriteTemplate = "CSI volume %s is unschedulable or is read-only"
FilterConstraintCSIVolumeInUseTemplate = "CSI volume %s has exhausted its available writer claims"
FilterConstraintCSIVolumeGCdAllocationTemplate = "CSI volume %s has exhausted its available writer claims and is claimed by a garbage collected allocation %s; waiting for claim to be released"
FilterConstraintDrivers = "missing drivers"
FilterConstraintDevices = "missing devices"
FilterConstraintsCSIPluginTopology = "did not meet topology requirement"
)
var (
// predatesBridgeFingerprint returns true if the constraint matches a version
// of nomad that predates the addition of the bridge network finger-printer,
// which was added in Nomad v0.12
predatesBridgeFingerprint = mustBridgeConstraint()
)
func mustBridgeConstraint() version.Constraints {
versionC, err := version.NewConstraint("< 0.12")
if err != nil {
panic(err)
}
return versionC
}
// FeasibleIterator is used to iteratively yield nodes that
// match feasibility constraints. The iterators may manage
// some state for performance optimizations.
type FeasibleIterator interface {
// Next yields a feasible node or nil if exhausted
Next() *structs.Node
2015-08-13 22:01:02 +00:00
// Reset is invoked when an allocation has been placed
// to reset any stale state.
Reset()
}
// ContextualIterator is an iterator that can have the job and task group set
2017-10-13 21:36:02 +00:00
// on it.
type ContextualIterator interface {
SetJob(*structs.Job)
SetTaskGroup(*structs.TaskGroup)
}
// FeasibilityChecker is used to check if a single node meets feasibility
// constraints.
type FeasibilityChecker interface {
Feasible(*structs.Node) bool
}
// StaticIterator is a FeasibleIterator which returns nodes
// in a static order. This is used at the base of the iterator
// chain only for testing due to deterministic behavior.
type StaticIterator struct {
ctx Context
nodes []*structs.Node
offset int
2015-08-13 22:01:02 +00:00
seen int
}
// NewStaticIterator constructs a random iterator from a list of nodes
func NewStaticIterator(ctx Context, nodes []*structs.Node) *StaticIterator {
iter := &StaticIterator{
ctx: ctx,
nodes: nodes,
}
return iter
}
func (iter *StaticIterator) Next() *structs.Node {
// Check if exhausted
2015-08-13 22:01:02 +00:00
n := len(iter.nodes)
if iter.offset == n || iter.seen == n {
if iter.seen != n { // seen has been Reset() to 0
2015-08-13 22:01:02 +00:00
iter.offset = 0
} else {
return nil
}
}
// Return the next offset, use this one
offset := iter.offset
iter.offset += 1
2015-08-13 22:01:02 +00:00
iter.seen += 1
2015-08-14 04:46:33 +00:00
iter.ctx.Metrics().EvaluateNode()
return iter.nodes[offset]
}
2015-08-13 22:01:02 +00:00
func (iter *StaticIterator) Reset() {
iter.seen = 0
}
func (iter *StaticIterator) SetNodes(nodes []*structs.Node) {
iter.nodes = nodes
iter.offset = 0
iter.seen = 0
}
// NewRandomIterator constructs a static iterator from a list of nodes
2015-08-13 17:13:11 +00:00
// after applying the Fisher-Yates algorithm for a random shuffle. This
// is applied in-place
func NewRandomIterator(ctx Context, nodes []*structs.Node) *StaticIterator {
// shuffle with the Fisher-Yates algorithm
idx, _ := ctx.State().LatestIndex()
shuffleNodes(ctx.Plan(), idx, nodes)
// Create a static iterator
return NewStaticIterator(ctx, nodes)
}
// HostVolumeChecker is a FeasibilityChecker which returns whether a node has
// the host volumes necessary to schedule a task group.
type HostVolumeChecker struct {
ctx Context
// volumes is a map[HostVolumeName][]RequestedVolume. The requested volumes are
// a slice because a single task group may request the same volume multiple times.
volumes map[string][]*structs.VolumeRequest
}
// NewHostVolumeChecker creates a HostVolumeChecker from a set of volumes
func NewHostVolumeChecker(ctx Context) *HostVolumeChecker {
return &HostVolumeChecker{
ctx: ctx,
}
}
// SetVolumes takes the volumes required by a task group and updates the checker.
func (h *HostVolumeChecker) SetVolumes(allocName string, volumes map[string]*structs.VolumeRequest) {
config: Hoist volume.config.source into volume Currently, using a Volume in a job uses the following configuration: ``` volume "alias-name" { type = "volume-type" read_only = true config { source = "host_volume_name" } } ``` This commit migrates to the following: ``` volume "alias-name" { type = "volume-type" source = "host_volume_name" read_only = true } ``` The original design was based due to being uncertain about the future of storage plugins, and to allow maxium flexibility. However, this causes a few issues, namely: - We frequently need to parse this configuration during submission, scheduling, and mounting - It complicates the configuration from and end users perspective - It complicates the ability to do validation As we understand the problem space of CSI a little more, it has become clear that we won't need the `source` to be in config, as it will be used in the majority of cases: - Host Volumes: Always need a source - Preallocated CSI Volumes: Always needs a source from a volume or claim name - Dynamic Persistent CSI Volumes*: Always needs a source to attach the volumes to for managing upgrades and to avoid dangling. - Dynamic Ephemeral CSI Volumes*: Less thought out, but `source` will probably point to the plugin name, and a `config` block will allow you to pass meta to the plugin. Or will point to a pre-configured ephemeral config. *If implemented The new design simplifies this by merging the source into the volume stanza to solve the above issues with usability, performance, and error handling.
2019-09-13 02:09:58 +00:00
lookupMap := make(map[string][]*structs.VolumeRequest)
// Convert the map from map[DesiredName]Request to map[Source][]Request to improve
// lookup performance. Also filter non-host volumes.
for _, req := range volumes {
if req.Type != structs.VolumeTypeHost {
continue
}
if req.PerAlloc {
// provide a unique volume source per allocation
copied := req.Copy()
copied.Source = copied.Source + structs.AllocSuffix(allocName)
lookupMap[copied.Source] = append(lookupMap[copied.Source], copied)
} else {
lookupMap[req.Source] = append(lookupMap[req.Source], req)
}
}
config: Hoist volume.config.source into volume Currently, using a Volume in a job uses the following configuration: ``` volume "alias-name" { type = "volume-type" read_only = true config { source = "host_volume_name" } } ``` This commit migrates to the following: ``` volume "alias-name" { type = "volume-type" source = "host_volume_name" read_only = true } ``` The original design was based due to being uncertain about the future of storage plugins, and to allow maxium flexibility. However, this causes a few issues, namely: - We frequently need to parse this configuration during submission, scheduling, and mounting - It complicates the configuration from and end users perspective - It complicates the ability to do validation As we understand the problem space of CSI a little more, it has become clear that we won't need the `source` to be in config, as it will be used in the majority of cases: - Host Volumes: Always need a source - Preallocated CSI Volumes: Always needs a source from a volume or claim name - Dynamic Persistent CSI Volumes*: Always needs a source to attach the volumes to for managing upgrades and to avoid dangling. - Dynamic Ephemeral CSI Volumes*: Less thought out, but `source` will probably point to the plugin name, and a `config` block will allow you to pass meta to the plugin. Or will point to a pre-configured ephemeral config. *If implemented The new design simplifies this by merging the source into the volume stanza to solve the above issues with usability, performance, and error handling.
2019-09-13 02:09:58 +00:00
h.volumes = lookupMap
}
func (h *HostVolumeChecker) Feasible(candidate *structs.Node) bool {
if h.hasVolumes(candidate) {
return true
}
h.ctx.Metrics().FilterNode(candidate, FilterConstraintHostVolumes)
return false
}
func (h *HostVolumeChecker) hasVolumes(n *structs.Node) bool {
rLen := len(h.volumes)
hLen := len(n.HostVolumes)
// Fast path: Requested no volumes. No need to check further.
if rLen == 0 {
return true
}
// Fast path: Requesting more volumes than the node has, can't meet the criteria.
if rLen > hLen {
return false
}
for source, requests := range h.volumes {
nodeVolume, ok := n.HostVolumes[source]
if !ok {
return false
}
// If the volume supports being mounted as ReadWrite, we do not need to
// do further validation for readonly placement.
if !nodeVolume.ReadOnly {
continue
}
// The Volume can only be mounted ReadOnly, validate that no requests for
// it are ReadWrite.
for _, req := range requests {
if !req.ReadOnly {
return false
}
}
}
return true
}
type CSIVolumeChecker struct {
ctx Context
namespace string
jobID string
volumes map[string]*structs.VolumeRequest
}
func NewCSIVolumeChecker(ctx Context) *CSIVolumeChecker {
return &CSIVolumeChecker{
ctx: ctx,
}
}
func (c *CSIVolumeChecker) SetJobID(jobID string) {
c.jobID = jobID
}
func (c *CSIVolumeChecker) SetNamespace(namespace string) {
c.namespace = namespace
}
func (c *CSIVolumeChecker) SetVolumes(allocName string, volumes map[string]*structs.VolumeRequest) {
xs := make(map[string]*structs.VolumeRequest)
// Filter to only CSI Volumes
for alias, req := range volumes {
if req.Type != structs.VolumeTypeCSI {
continue
}
if req.PerAlloc {
// provide a unique volume source per allocation
copied := req.Copy()
copied.Source = copied.Source + structs.AllocSuffix(allocName)
xs[alias] = copied
} else {
xs[alias] = req
}
}
c.volumes = xs
}
func (c *CSIVolumeChecker) Feasible(n *structs.Node) bool {
ok, failReason := c.isFeasible(n)
if ok {
return true
}
c.ctx.Metrics().FilterNode(n, failReason)
return false
}
func (c *CSIVolumeChecker) isFeasible(n *structs.Node) (bool, string) {
// We can mount the volume if
// - if required, a healthy controller plugin is running the driver
// - the volume has free claims, or this job owns the claims
// - this node is running the node plugin, implies matching topology
// Fast path: Requested no volumes. No need to check further.
if len(c.volumes) == 0 {
return true, ""
}
ws := memdb.NewWatchSet()
// Find the count per plugin for this node, so that can enforce MaxVolumes
pluginCount := map[string]int64{}
iter, err := c.ctx.State().CSIVolumesByNodeID(ws, "", n.ID)
if err != nil {
return false, FilterConstraintCSIVolumesLookupFailed
}
for {
raw := iter.Next()
if raw == nil {
break
}
vol, ok := raw.(*structs.CSIVolume)
if !ok {
continue
}
pluginCount[vol.PluginID] += 1
}
// For volume requests, find volumes and determine feasibility
for _, req := range c.volumes {
vol, err := c.ctx.State().CSIVolumeByID(ws, c.namespace, req.Source)
if err != nil {
return false, FilterConstraintCSIVolumesLookupFailed
}
if vol == nil {
return false, fmt.Sprintf(FilterConstraintCSIVolumeNotFoundTemplate, req.Source)
}
// Check that this node has a healthy running plugin with the right PluginID
plugin, ok := n.CSINodePlugins[vol.PluginID]
if !ok {
return false, fmt.Sprintf(FilterConstraintCSIPluginTemplate, vol.PluginID, n.ID)
}
if !plugin.Healthy {
return false, fmt.Sprintf(FilterConstraintCSIPluginUnhealthyTemplate, vol.PluginID, n.ID)
}
if pluginCount[vol.PluginID] >= plugin.NodeInfo.MaxVolumes {
return false, fmt.Sprintf(FilterConstraintCSIPluginMaxVolumesTemplate, vol.PluginID, n.ID)
}
// CSI spec: "If requisite is specified, the provisioned
// volume MUST be accessible from at least one of the
// requisite topologies."
if len(vol.Topologies) > 0 {
if !plugin.NodeInfo.AccessibleTopology.MatchFound(vol.Topologies) {
return false, FilterConstraintsCSIPluginTopology
}
}
if req.ReadOnly {
if !vol.ReadSchedulable() {
return false, fmt.Sprintf(FilterConstraintCSIVolumeNoReadTemplate, vol.ID)
}
} else {
if !vol.WriteSchedulable() {
return false, fmt.Sprintf(FilterConstraintCSIVolumeNoWriteTemplate, vol.ID)
}
if !vol.HasFreeWriteClaims() {
for id := range vol.WriteAllocs {
a, err := c.ctx.State().AllocByID(ws, id)
// the alloc for this blocking claim has been
// garbage collected but the volumewatcher hasn't
// finished releasing the claim (and possibly
// detaching the volume), so we need to block
// until it can be scheduled
if err != nil || a == nil {
return false, fmt.Sprintf(
FilterConstraintCSIVolumeGCdAllocationTemplate, vol.ID, id)
} else if a.Namespace != c.namespace || a.JobID != c.jobID {
// the blocking claim is for another live job
// so it's legitimately blocking more write
// claims
return false, fmt.Sprintf(
FilterConstraintCSIVolumeInUseTemplate, vol.ID)
}
}
}
}
}
return true, ""
}
2020-05-15 15:09:01 +00:00
// NetworkChecker is a FeasibilityChecker which returns whether a node has the
// network resources necessary to schedule the task group
type NetworkChecker struct {
ctx Context
networkMode string
2020-06-16 15:53:10 +00:00
ports []structs.Port
2020-05-15 15:09:01 +00:00
}
func NewNetworkChecker(ctx Context) *NetworkChecker {
return &NetworkChecker{ctx: ctx, networkMode: "host"}
}
2020-06-16 15:53:10 +00:00
func (c *NetworkChecker) SetNetwork(network *structs.NetworkResource) {
c.networkMode = network.Mode
if c.networkMode == "" {
c.networkMode = "host"
}
c.ports = make([]structs.Port, len(network.DynamicPorts)+len(network.ReservedPorts))
2020-12-09 19:05:18 +00:00
c.ports = append(c.ports, network.DynamicPorts...)
c.ports = append(c.ports, network.ReservedPorts...)
2020-05-15 15:09:01 +00:00
}
func (c *NetworkChecker) Feasible(option *structs.Node) bool {
// Allow jobs not requiring any network resources
if c.networkMode == "none" {
return true
}
2020-06-16 15:53:10 +00:00
if !c.hasNetwork(option) {
// special case - if the client is running a version older than 0.12 but
// the server is 0.12 or newer, we need to maintain an upgrade path for
// jobs looking for a bridge network that will not have been fingerprinted
// on the client (which was added in 0.12)
if c.networkMode == "bridge" {
sv, err := version.NewSemver(option.Attributes["nomad.version"])
if err == nil && predatesBridgeFingerprint.Check(sv) {
return true
}
}
2020-06-16 15:53:10 +00:00
c.ctx.Metrics().FilterNode(option, "missing network")
return false
2020-05-15 15:09:01 +00:00
}
2020-06-16 15:53:10 +00:00
if c.ports != nil {
if !c.hasHostNetworks(option) {
return false
}
}
return true
}
func (c *NetworkChecker) hasHostNetworks(option *structs.Node) bool {
for _, port := range c.ports {
if port.HostNetwork != "" {
hostNetworkValue, hostNetworkOk := resolveTarget(port.HostNetwork, option)
if !hostNetworkOk {
c.ctx.Metrics().FilterNode(option, fmt.Sprintf("invalid host network %q template for port %q", port.HostNetwork, port.Label))
return false
}
2020-06-16 15:53:10 +00:00
found := false
for _, net := range option.NodeResources.NodeNetworks {
core: merge reserved_ports into host_networks (#13651) Fixes #13505 This fixes #13505 by treating reserved_ports like we treat a lot of jobspec settings: merging settings from more global stanzas (client.reserved.reserved_ports) "down" into more specific stanzas (client.host_networks[].reserved_ports). As discussed in #13505 there are other options, and since it's totally broken right now we have some flexibility: Treat overlapping reserved_ports on addresses as invalid and refuse to start agents. However, I'm not sure there's a cohesive model we want to publish right now since so much 0.9-0.12 compat code still exists! We would have to explain to folks that if their -network-interface and host_network addresses overlapped, they could only specify reserved_ports in one place or the other?! It gets ugly. Use the global client.reserved.reserved_ports value as the default and treat host_network[].reserverd_ports as overrides. My first suggestion in the issue, but @groggemans made me realize the addresses on the agent's interface (as configured by -network-interface) may overlap with host_networks, so you'd need to remove the global reserved_ports from addresses shared with a shared network?! This seemed really confusing and subtle for users to me. So I think "merging down" creates the most expressive yet understandable approach. I've played around with it a bit, and it doesn't seem too surprising. The only frustrating part is how difficult it is to observe the available addresses and ports on a node! However that's a job for another PR.
2022-07-12 21:40:25 +00:00
if net.HasAlias(hostNetworkValue) {
2020-06-16 15:53:10 +00:00
found = true
break
}
}
if !found {
core: merge reserved_ports into host_networks (#13651) Fixes #13505 This fixes #13505 by treating reserved_ports like we treat a lot of jobspec settings: merging settings from more global stanzas (client.reserved.reserved_ports) "down" into more specific stanzas (client.host_networks[].reserved_ports). As discussed in #13505 there are other options, and since it's totally broken right now we have some flexibility: Treat overlapping reserved_ports on addresses as invalid and refuse to start agents. However, I'm not sure there's a cohesive model we want to publish right now since so much 0.9-0.12 compat code still exists! We would have to explain to folks that if their -network-interface and host_network addresses overlapped, they could only specify reserved_ports in one place or the other?! It gets ugly. Use the global client.reserved.reserved_ports value as the default and treat host_network[].reserverd_ports as overrides. My first suggestion in the issue, but @groggemans made me realize the addresses on the agent's interface (as configured by -network-interface) may overlap with host_networks, so you'd need to remove the global reserved_ports from addresses shared with a shared network?! This seemed really confusing and subtle for users to me. So I think "merging down" creates the most expressive yet understandable approach. I've played around with it a bit, and it doesn't seem too surprising. The only frustrating part is how difficult it is to observe the available addresses and ports on a node! However that's a job for another PR.
2022-07-12 21:40:25 +00:00
c.ctx.Metrics().FilterNode(option, fmt.Sprintf("missing host network %q for port %q", hostNetworkValue, port.Label))
2020-06-16 15:53:10 +00:00
return false
}
}
}
return true
2020-05-15 15:09:01 +00:00
}
func (c *NetworkChecker) hasNetwork(option *structs.Node) bool {
if option.NodeResources == nil {
return false
}
for _, nw := range option.NodeResources.Networks {
mode := nw.Mode
if mode == "" {
mode = "host"
}
if mode == c.networkMode {
2020-05-15 15:09:01 +00:00
return true
}
}
return false
}
// DriverChecker is a FeasibilityChecker which returns whether a node has the
// drivers necessary to scheduler a task group.
type DriverChecker struct {
ctx Context
drivers map[string]struct{}
}
// NewDriverChecker creates a DriverChecker from a set of drivers
func NewDriverChecker(ctx Context, drivers map[string]struct{}) *DriverChecker {
return &DriverChecker{
ctx: ctx,
drivers: drivers,
}
}
func (c *DriverChecker) SetDrivers(d map[string]struct{}) {
c.drivers = d
}
func (c *DriverChecker) Feasible(option *structs.Node) bool {
// Use this node if possible
if c.hasDrivers(option) {
return true
}
c.ctx.Metrics().FilterNode(option, FilterConstraintDrivers)
return false
2015-08-13 22:01:02 +00:00
}
// hasDrivers is used to check if the node has all the appropriate
// drivers for this task group. Drivers are registered as node attribute
// like "driver.docker=1" with their corresponding version.
func (c *DriverChecker) hasDrivers(option *structs.Node) bool {
for driver := range c.drivers {
driverStr := fmt.Sprintf("driver.%s", driver)
2015-10-14 23:43:06 +00:00
2018-03-19 12:06:09 +00:00
// COMPAT: Remove in 0.10: As of Nomad 0.8, nodes have a DriverInfo that
2018-03-06 21:03:24 +00:00
// corresponds with every driver. As a Nomad server might be on a later
// version than a Nomad client, we need to check for compatibility here
// to verify the client supports this.
2018-03-21 19:32:40 +00:00
if driverInfo, ok := option.Drivers[driver]; ok {
if driverInfo == nil {
2018-09-15 23:23:13 +00:00
c.ctx.Logger().Named("driver_checker").Warn("node has no driver info set", "node_id", option.ID, "driver", driver)
return false
}
if driverInfo.Detected && driverInfo.Healthy {
continue
} else {
return false
}
2018-03-21 19:32:40 +00:00
}
2018-03-21 19:32:40 +00:00
value, ok := option.Attributes[driverStr]
if !ok {
return false
}
enabled, err := strconv.ParseBool(value)
if err != nil {
2018-09-15 23:23:13 +00:00
c.ctx.Logger().Named("driver_checker").Warn("node has invalid driver setting", "node_id", option.ID, "driver", driver, "val", value)
2018-03-21 19:32:40 +00:00
return false
}
2018-03-21 19:32:40 +00:00
if !enabled {
return false
2015-10-14 23:43:06 +00:00
}
}
return true
}
2015-08-12 01:27:54 +00:00
// DistinctHostsIterator is a FeasibleIterator which returns nodes that pass the
// distinct_hosts constraint. The constraint ensures that multiple allocations
// do not exist on the same node.
type DistinctHostsIterator struct {
ctx Context
source FeasibleIterator
tg *structs.TaskGroup
job *structs.Job
// Store whether the Job or TaskGroup has a distinct_hosts constraints so
2015-10-23 00:40:41 +00:00
// they don't have to be calculated every time Next() is called.
tgDistinctHosts bool
jobDistinctHosts bool
}
// NewDistinctHostsIterator creates a DistinctHostsIterator from a source.
func NewDistinctHostsIterator(ctx Context, source FeasibleIterator) *DistinctHostsIterator {
return &DistinctHostsIterator{
ctx: ctx,
source: source,
}
}
func (iter *DistinctHostsIterator) SetTaskGroup(tg *structs.TaskGroup) {
iter.tg = tg
2015-10-23 00:40:41 +00:00
iter.tgDistinctHosts = iter.hasDistinctHostsConstraint(tg.Constraints)
}
func (iter *DistinctHostsIterator) SetJob(job *structs.Job) {
iter.job = job
2015-10-23 00:40:41 +00:00
iter.jobDistinctHosts = iter.hasDistinctHostsConstraint(job.Constraints)
}
func (iter *DistinctHostsIterator) hasDistinctHostsConstraint(constraints []*structs.Constraint) bool {
for _, con := range constraints {
if con.Operand == structs.ConstraintDistinctHosts {
// distinct_hosts defaults to true
if con.RTarget == "" {
return true
}
enabled, err := strconv.ParseBool(con.RTarget)
// If the value is unparsable as a boolean, fall back to the old behavior
// of enforcing the constraint when it appears.
return err != nil || enabled
}
}
2017-03-07 22:20:02 +00:00
return false
}
func (iter *DistinctHostsIterator) Next() *structs.Node {
for {
// Get the next option from the source
option := iter.source.Next()
2017-03-07 22:20:02 +00:00
// Hot-path if the option is nil or there are no distinct_hosts or
// distinct_property constraints.
hosts := iter.jobDistinctHosts || iter.tgDistinctHosts
if option == nil || !hosts {
return option
}
2017-03-07 22:20:02 +00:00
// Check if the host constraints are satisfied
if !iter.satisfiesDistinctHosts(option) {
iter.ctx.Metrics().FilterNode(option, structs.ConstraintDistinctHosts)
continue
}
return option
}
}
// satisfiesDistinctHosts checks if the node satisfies a distinct_hosts
2015-10-23 00:40:41 +00:00
// constraint either specified at the job level or the TaskGroup level.
func (iter *DistinctHostsIterator) satisfiesDistinctHosts(option *structs.Node) bool {
2015-10-26 21:01:32 +00:00
// Check if there is no constraint set.
if !(iter.jobDistinctHosts || iter.tgDistinctHosts) {
return true
}
// Get the proposed allocations
proposed, err := iter.ctx.ProposedAllocs(option.ID)
if err != nil {
2018-09-15 23:23:13 +00:00
iter.ctx.Logger().Named("distinct_hosts").Error("failed to get proposed allocations", "error", err)
return false
}
// Skip the node if the task group has already been allocated on it.
for _, alloc := range proposed {
// If the job has a distinct_hosts constraint we need an alloc collision
// on the Namespace,JobID but if the constraint is on the TaskGroup then
2015-10-23 00:40:41 +00:00
// we need both a job and TaskGroup collision.
jobCollision := alloc.JobID == iter.job.ID && alloc.Namespace == iter.job.Namespace
2015-10-26 21:01:32 +00:00
taskCollision := alloc.TaskGroup == iter.tg.Name
2015-10-26 21:01:32 +00:00
if iter.jobDistinctHosts && jobCollision || jobCollision && taskCollision {
return false
}
}
return true
}
func (iter *DistinctHostsIterator) Reset() {
iter.source.Reset()
}
// DistinctPropertyIterator is a FeasibleIterator which returns nodes that pass the
// distinct_property constraint. The constraint ensures that multiple allocations
// do not use the same value of the given property.
type DistinctPropertyIterator struct {
ctx Context
source FeasibleIterator
tg *structs.TaskGroup
job *structs.Job
2017-03-09 23:20:53 +00:00
hasDistinctPropertyConstraints bool
jobPropertySets []*propertySet
groupPropertySets map[string][]*propertySet
}
// NewDistinctPropertyIterator creates a DistinctPropertyIterator from a source.
func NewDistinctPropertyIterator(ctx Context, source FeasibleIterator) *DistinctPropertyIterator {
return &DistinctPropertyIterator{
2017-03-09 23:20:53 +00:00
ctx: ctx,
source: source,
groupPropertySets: make(map[string][]*propertySet),
}
}
func (iter *DistinctPropertyIterator) SetTaskGroup(tg *structs.TaskGroup) {
iter.tg = tg
2017-03-10 05:36:27 +00:00
// Build the property set at the taskgroup level
if _, ok := iter.groupPropertySets[tg.Name]; !ok {
for _, c := range tg.Constraints {
if c.Operand != structs.ConstraintDistinctProperty {
continue
}
pset := NewPropertySet(iter.ctx, iter.job)
pset.SetTGConstraint(c, tg.Name)
iter.groupPropertySets[tg.Name] = append(iter.groupPropertySets[tg.Name], pset)
}
}
// Check if there is a distinct property
iter.hasDistinctPropertyConstraints = len(iter.jobPropertySets) != 0 || len(iter.groupPropertySets[tg.Name]) != 0
}
func (iter *DistinctPropertyIterator) SetJob(job *structs.Job) {
iter.job = job
2017-03-07 22:20:02 +00:00
2017-03-10 05:36:27 +00:00
// Build the property set at the job level
2017-03-09 23:20:53 +00:00
for _, c := range job.Constraints {
2017-03-08 19:47:55 +00:00
if c.Operand != structs.ConstraintDistinctProperty {
2017-03-07 22:20:02 +00:00
continue
}
2017-03-09 23:20:53 +00:00
pset := NewPropertySet(iter.ctx, job)
pset.SetJobConstraint(c)
iter.jobPropertySets = append(iter.jobPropertySets, pset)
2017-03-08 19:47:55 +00:00
}
}
2017-03-09 23:20:53 +00:00
func (iter *DistinctPropertyIterator) Next() *structs.Node {
for {
// Get the next option from the source
option := iter.source.Next()
2017-03-07 22:20:02 +00:00
2017-03-09 23:20:53 +00:00
// Hot path if there is nothing to check
if option == nil || !iter.hasDistinctPropertyConstraints {
return option
2017-03-07 22:20:02 +00:00
}
2017-03-09 23:20:53 +00:00
// Check if the constraints are met
2017-03-10 05:36:27 +00:00
if !iter.satisfiesProperties(option, iter.jobPropertySets) ||
!iter.satisfiesProperties(option, iter.groupPropertySets[iter.tg.Name]) {
continue
2017-03-07 22:20:02 +00:00
}
2017-03-09 23:20:53 +00:00
return option
}
2017-03-07 22:20:02 +00:00
}
2017-03-10 05:36:27 +00:00
// satisfiesProperties returns whether the option satisfies the set of
// properties. If not it will be filtered.
func (iter *DistinctPropertyIterator) satisfiesProperties(option *structs.Node, set []*propertySet) bool {
for _, ps := range set {
if satisfies, reason := ps.SatisfiesDistinctProperties(option, iter.tg.Name); !satisfies {
iter.ctx.Metrics().FilterNode(option, reason)
return false
}
}
return true
}
2017-03-09 23:20:53 +00:00
func (iter *DistinctPropertyIterator) Reset() {
iter.source.Reset()
2017-03-08 19:47:55 +00:00
2017-03-09 23:20:53 +00:00
for _, ps := range iter.jobPropertySets {
ps.PopulateProposed()
2017-03-08 19:47:55 +00:00
}
2017-03-09 23:20:53 +00:00
for _, sets := range iter.groupPropertySets {
for _, ps := range sets {
ps.PopulateProposed()
}
}
}
// ConstraintChecker is a FeasibilityChecker which returns nodes that match a
// given set of constraints. This is used to filter on job, task group, and task
// constraints.
type ConstraintChecker struct {
ctx Context
constraints []*structs.Constraint
2015-08-12 01:27:54 +00:00
}
// NewConstraintChecker creates a ConstraintChecker for a set of constraints
func NewConstraintChecker(ctx Context, constraints []*structs.Constraint) *ConstraintChecker {
return &ConstraintChecker{
ctx: ctx,
constraints: constraints,
2015-08-12 01:27:54 +00:00
}
}
func (c *ConstraintChecker) SetConstraints(constraints []*structs.Constraint) {
c.constraints = constraints
}
func (c *ConstraintChecker) Feasible(option *structs.Node) bool {
// Use this node if possible
for _, constraint := range c.constraints {
if !c.meetsConstraint(constraint, option) {
c.ctx.Metrics().FilterNode(option, constraint.String())
return false
}
}
2015-08-12 01:27:54 +00:00
return true
}
func (c *ConstraintChecker) meetsConstraint(constraint *structs.Constraint, option *structs.Node) bool {
// Resolve the targets. Targets that are not present are treated as `nil`.
// This is to allow for matching constraints where a target is not present.
lVal, lOk := resolveTarget(constraint.LTarget, option)
rVal, rOk := resolveTarget(constraint.RTarget, option)
// Check if satisfied
return checkConstraint(c.ctx, constraint.Operand, lVal, rVal, lOk, rOk)
}
// resolveTarget is used to resolve the LTarget and RTarget of a Constraint.
core: merge reserved_ports into host_networks (#13651) Fixes #13505 This fixes #13505 by treating reserved_ports like we treat a lot of jobspec settings: merging settings from more global stanzas (client.reserved.reserved_ports) "down" into more specific stanzas (client.host_networks[].reserved_ports). As discussed in #13505 there are other options, and since it's totally broken right now we have some flexibility: Treat overlapping reserved_ports on addresses as invalid and refuse to start agents. However, I'm not sure there's a cohesive model we want to publish right now since so much 0.9-0.12 compat code still exists! We would have to explain to folks that if their -network-interface and host_network addresses overlapped, they could only specify reserved_ports in one place or the other?! It gets ugly. Use the global client.reserved.reserved_ports value as the default and treat host_network[].reserverd_ports as overrides. My first suggestion in the issue, but @groggemans made me realize the addresses on the agent's interface (as configured by -network-interface) may overlap with host_networks, so you'd need to remove the global reserved_ports from addresses shared with a shared network?! This seemed really confusing and subtle for users to me. So I think "merging down" creates the most expressive yet understandable approach. I've played around with it a bit, and it doesn't seem too surprising. The only frustrating part is how difficult it is to observe the available addresses and ports on a node! However that's a job for another PR.
2022-07-12 21:40:25 +00:00
func resolveTarget(target string, node *structs.Node) (string, bool) {
// If no prefix, this must be a literal value
if !strings.HasPrefix(target, "${") {
return target, true
}
// Handle the interpolations
switch {
2016-02-05 00:50:20 +00:00
case "${node.unique.id}" == target:
return node.ID, true
2016-02-05 00:50:20 +00:00
case "${node.datacenter}" == target:
return node.Datacenter, true
2016-02-05 00:50:20 +00:00
case "${node.unique.name}" == target:
return node.Name, true
2016-02-05 00:50:20 +00:00
case "${node.class}" == target:
2015-12-22 01:15:34 +00:00
return node.NodeClass, true
case "${node.pool}" == target:
return node.NodePool, true
2016-02-05 00:50:20 +00:00
case strings.HasPrefix(target, "${attr."):
attr := strings.TrimSuffix(strings.TrimPrefix(target, "${attr."), "}")
val, ok := node.Attributes[attr]
return val, ok
2016-02-05 00:50:20 +00:00
case strings.HasPrefix(target, "${meta."):
meta := strings.TrimSuffix(strings.TrimPrefix(target, "${meta."), "}")
val, ok := node.Meta[meta]
return val, ok
default:
core: merge reserved_ports into host_networks (#13651) Fixes #13505 This fixes #13505 by treating reserved_ports like we treat a lot of jobspec settings: merging settings from more global stanzas (client.reserved.reserved_ports) "down" into more specific stanzas (client.host_networks[].reserved_ports). As discussed in #13505 there are other options, and since it's totally broken right now we have some flexibility: Treat overlapping reserved_ports on addresses as invalid and refuse to start agents. However, I'm not sure there's a cohesive model we want to publish right now since so much 0.9-0.12 compat code still exists! We would have to explain to folks that if their -network-interface and host_network addresses overlapped, they could only specify reserved_ports in one place or the other?! It gets ugly. Use the global client.reserved.reserved_ports value as the default and treat host_network[].reserverd_ports as overrides. My first suggestion in the issue, but @groggemans made me realize the addresses on the agent's interface (as configured by -network-interface) may overlap with host_networks, so you'd need to remove the global reserved_ports from addresses shared with a shared network?! This seemed really confusing and subtle for users to me. So I think "merging down" creates the most expressive yet understandable approach. I've played around with it a bit, and it doesn't seem too surprising. The only frustrating part is how difficult it is to observe the available addresses and ports on a node! However that's a job for another PR.
2022-07-12 21:40:25 +00:00
return "", false
}
}
// checkConstraint checks if a constraint is satisfied. The lVal and rVal
// interfaces may be nil.
func checkConstraint(ctx Context, operand string, lVal, rVal interface{}, lFound, rFound bool) bool {
// Check for constraints not handled by this checker.
switch operand {
2017-03-07 22:20:02 +00:00
case structs.ConstraintDistinctHosts, structs.ConstraintDistinctProperty:
return true
default:
break
}
switch operand {
case "=", "==", "is":
return lFound && rFound && reflect.DeepEqual(lVal, rVal)
case "!=", "not":
return !reflect.DeepEqual(lVal, rVal)
case "<", "<=", ">", ">=":
return lFound && rFound && checkOrder(operand, lVal, rVal)
case structs.ConstraintAttributeIsSet:
return lFound
case structs.ConstraintAttributeIsNotSet:
return !lFound
case structs.ConstraintVersion:
parser := newVersionConstraintParser(ctx)
return lFound && rFound && checkVersionMatch(ctx, parser, lVal, rVal)
case structs.ConstraintSemver:
parser := newSemverConstraintParser(ctx)
return lFound && rFound && checkVersionMatch(ctx, parser, lVal, rVal)
case structs.ConstraintRegex:
return lFound && rFound && checkRegexpMatch(ctx, lVal, rVal)
2018-10-15 22:31:13 +00:00
case structs.ConstraintSetContains, structs.ConstraintSetContainsAll:
return lFound && rFound && checkSetContainsAll(ctx, lVal, rVal)
2018-10-15 22:31:13 +00:00
case structs.ConstraintSetContainsAny:
return lFound && rFound && checkSetContainsAny(lVal, rVal)
default:
return false
}
}
// checkAffinity checks if a specific affinity is satisfied
func checkAffinity(ctx Context, operand string, lVal, rVal interface{}, lFound, rFound bool) bool {
return checkConstraint(ctx, operand, lVal, rVal, lFound, rFound)
2018-10-17 18:04:54 +00:00
}
// checkAttributeAffinity checks if an affinity is satisfied
func checkAttributeAffinity(ctx Context, operand string, lVal, rVal *psstructs.Attribute, lFound, rFound bool) bool {
return checkAttributeConstraint(ctx, operand, lVal, rVal, lFound, rFound)
}
// checkOrder returns the result of (lVal operand rVal). The comparison is
// done as integers if possible, or floats if possible, and lexically otherwise.
func checkOrder(operand string, lVal, rVal any) bool {
left, leftOK := lVal.(string)
right, rightOK := rVal.(string)
if !leftOK || !rightOK {
return false
}
if result, ok := checkIntegralOrder(operand, left, right); ok {
return result
}
if result, ok := checkFloatOrder(operand, left, right); ok {
return result
}
return checkLexicalOrder(operand, left, right)
}
// checkIntegralOrder compares lVal and rVal as integers if possible, or false otherwise.
func checkIntegralOrder(op, lVal, rVal string) (bool, bool) {
left, lErr := strconv.ParseInt(lVal, 10, 64)
if lErr != nil {
return false, false
}
right, rErr := strconv.ParseInt(rVal, 10, 64)
if rErr != nil {
return false, false
}
return compareOrder(op, left, right), true
}
// checkFloatOrder compares lVal and rVal as floats if possible, or false otherwise.
func checkFloatOrder(op, lVal, rVal string) (bool, bool) {
left, lErr := strconv.ParseFloat(lVal, 64)
if lErr != nil {
return false, false
}
right, rErr := strconv.ParseFloat(rVal, 64)
if rErr != nil {
return false, false
}
return compareOrder(op, left, right), true
}
// checkLexicalOrder compares lVal and rVal lexically.
func checkLexicalOrder(op string, lVal, rVal string) bool {
return compareOrder[string](op, lVal, rVal)
}
// compareOrder returns the result of the expression (left op right)
func compareOrder[T constraints.Ordered](op string, left, right T) bool {
switch op {
case "<":
return left < right
case "<=":
return left <= right
case ">":
return left > right
case ">=":
return left >= right
default:
return false
}
}
// checkVersionMatch is used to compare a version on the
// left hand side with a set of constraints on the right hand side
func checkVersionMatch(_ Context, parse verConstraintParser, lVal, rVal interface{}) bool {
// Parse the version
var versionStr string
switch v := lVal.(type) {
case string:
versionStr = v
case int:
versionStr = fmt.Sprintf("%d", v)
default:
return false
}
2016-05-15 16:41:34 +00:00
// Parse the version
vers, err := version.NewVersion(versionStr)
if err != nil {
return false
}
// Constraint must be a string
constraintStr, ok := rVal.(string)
if !ok {
return false
}
// Parse the constraints
c := parse(constraintStr)
if c == nil {
return false
}
// Check the constraints against the version
return c.Check(vers)
}
2015-10-11 19:35:13 +00:00
2018-10-14 01:38:08 +00:00
// checkAttributeVersionMatch is used to compare a version on the
// left hand side with a set of constraints on the right hand side
func checkAttributeVersionMatch(_ Context, parse verConstraintParser, lVal, rVal *psstructs.Attribute) bool {
2018-10-14 01:38:08 +00:00
// Parse the version
var versionStr string
if s, ok := lVal.GetString(); ok {
versionStr = s
} else if i, ok := lVal.GetInt(); ok {
versionStr = fmt.Sprintf("%d", i)
} else {
return false
}
// Parse the version
vers, err := version.NewVersion(versionStr)
if err != nil {
return false
}
// Constraint must be a string
constraintStr, ok := rVal.GetString()
if !ok {
return false
}
// Parse the constraints
c := parse(constraintStr)
if c == nil {
return false
2018-10-14 01:38:08 +00:00
}
// Check the constraints against the version
return c.Check(vers)
2018-10-14 01:38:08 +00:00
}
// checkRegexpMatch is used to compare a value on the
2015-10-11 19:35:13 +00:00
// left hand side with a regexp on the right hand side
func checkRegexpMatch(ctx Context, lVal, rVal interface{}) bool {
2015-10-11 19:35:13 +00:00
// Ensure left-hand is string
lStr, ok := lVal.(string)
if !ok {
return false
}
// Regexp must be a string
regexpStr, ok := rVal.(string)
if !ok {
return false
}
// Check the cache
cache := ctx.RegexpCache()
re := cache[regexpStr]
2015-10-11 19:35:13 +00:00
// Parse the regexp
if re == nil {
var err error
re, err = regexp.Compile(regexpStr)
if err != nil {
return false
}
cache[regexpStr] = re
2015-10-11 19:35:13 +00:00
}
// Look for a match
return re.MatchString(lStr)
}
// checkSetContainsAll is used to see if the left hand side contains the
2016-10-19 20:06:28 +00:00
// string on the right hand side
func checkSetContainsAll(_ Context, lVal, rVal interface{}) bool {
2016-10-19 20:06:28 +00:00
// Ensure left-hand is string
lStr, ok := lVal.(string)
if !ok {
return false
}
// Regexp must be a string
rStr, ok := rVal.(string)
if !ok {
return false
}
input := strings.Split(lStr, ",")
lookup := make(map[string]struct{}, len(input))
for _, in := range input {
cleaned := strings.TrimSpace(in)
lookup[cleaned] = struct{}{}
}
for _, r := range strings.Split(rStr, ",") {
cleaned := strings.TrimSpace(r)
if _, ok := lookup[cleaned]; !ok {
return false
}
}
return true
}
// checkSetContainsAny is used to see if the left hand side contains any
// values on the right hand side
func checkSetContainsAny(lVal, rVal interface{}) bool {
// Ensure left-hand is string
lStr, ok := lVal.(string)
if !ok {
return false
}
// RHS must be a string
rStr, ok := rVal.(string)
if !ok {
return false
}
input := strings.Split(lStr, ",")
lookup := make(map[string]struct{}, len(input))
for _, in := range input {
cleaned := strings.TrimSpace(in)
lookup[cleaned] = struct{}{}
}
for _, r := range strings.Split(rStr, ",") {
cleaned := strings.TrimSpace(r)
if _, ok := lookup[cleaned]; ok {
return true
}
}
return false
}
// FeasibilityWrapper is a FeasibleIterator which wraps both job and task group
// FeasibilityCheckers in which feasibility checking can be skipped if the
// computed node class has previously been marked as eligible or ineligible.
type FeasibilityWrapper struct {
ctx Context
source FeasibleIterator
jobCheckers []FeasibilityChecker
tgCheckers []FeasibilityChecker
tgAvailable []FeasibilityChecker
tg string
}
// NewFeasibilityWrapper returns a FeasibleIterator based on the passed source
// and FeasibilityCheckers.
func NewFeasibilityWrapper(ctx Context, source FeasibleIterator,
jobCheckers, tgCheckers, tgAvailable []FeasibilityChecker) *FeasibilityWrapper {
return &FeasibilityWrapper{
ctx: ctx,
source: source,
jobCheckers: jobCheckers,
tgCheckers: tgCheckers,
tgAvailable: tgAvailable,
}
}
func (w *FeasibilityWrapper) SetTaskGroup(tg string) {
w.tg = tg
}
func (w *FeasibilityWrapper) Reset() {
w.source.Reset()
}
// Next returns an eligible node, only running the FeasibilityCheckers as needed
// based on the sources computed node class.
func (w *FeasibilityWrapper) Next() *structs.Node {
evalElig := w.ctx.Eligibility()
metrics := w.ctx.Metrics()
OUTER:
for {
// Get the next option from the source
option := w.source.Next()
if option == nil {
return nil
}
// Check if the job has been marked as eligible or ineligible.
2016-01-27 00:43:42 +00:00
jobEscaped, jobUnknown := false, false
switch evalElig.JobStatus(option.ComputedClass) {
case EvalComputedClassIneligible:
// Fast path the ineligible case
metrics.FilterNode(option, "computed class ineligible")
continue
case EvalComputedClassEscaped:
jobEscaped = true
2016-01-27 00:43:42 +00:00
case EvalComputedClassUnknown:
jobUnknown = true
}
// Run the job feasibility checks.
for _, check := range w.jobCheckers {
feasible := check.Feasible(option)
if !feasible {
// If the job hasn't escaped, set it to be ineligible since it
// failed a job check.
if !jobEscaped {
evalElig.SetJobEligibility(false, option.ComputedClass)
}
continue OUTER
}
}
2016-01-27 00:43:42 +00:00
// Set the job eligibility if the constraints weren't escaped and it
// hasn't been set before.
if !jobEscaped && jobUnknown {
evalElig.SetJobEligibility(true, option.ComputedClass)
}
// Check if the task group has been marked as eligible or ineligible.
2016-01-27 00:43:42 +00:00
tgEscaped, tgUnknown := false, false
switch evalElig.TaskGroupStatus(w.tg, option.ComputedClass) {
case EvalComputedClassIneligible:
// Fast path the ineligible case
metrics.FilterNode(option, "computed class ineligible")
continue
case EvalComputedClassEligible:
// Fast path the eligible case
if w.available(option) {
return option
}
// We match the class but are temporarily unavailable
continue OUTER
case EvalComputedClassEscaped:
tgEscaped = true
2016-01-27 00:43:42 +00:00
case EvalComputedClassUnknown:
tgUnknown = true
}
// Run the task group feasibility checks.
for _, check := range w.tgCheckers {
feasible := check.Feasible(option)
if !feasible {
// If the task group hasn't escaped, set it to be ineligible
// since it failed a check.
if !tgEscaped {
evalElig.SetTaskGroupEligibility(false, w.tg, option.ComputedClass)
}
continue OUTER
}
}
2016-01-27 00:43:42 +00:00
// Set the task group eligibility if the constraints weren't escaped and
// it hasn't been set before.
if !tgEscaped && tgUnknown {
evalElig.SetTaskGroupEligibility(true, w.tg, option.ComputedClass)
}
// tgAvailable handlers are available transiently, so we test them without
// affecting the computed class
if !w.available(option) {
continue OUTER
}
return option
}
}
2018-10-10 17:32:44 +00:00
// available checks transient feasibility checkers which depend on changing conditions,
// e.g. the health status of a plugin or driver, or that are not considered in node
// computed class, e.g. host volumes.
func (w *FeasibilityWrapper) available(option *structs.Node) bool {
// If we don't have any availability checks, we're available
if len(w.tgAvailable) == 0 {
return true
}
for _, check := range w.tgAvailable {
if !check.Feasible(option) {
return false
}
}
return true
}
2018-10-10 17:32:44 +00:00
// DeviceChecker is a FeasibilityChecker which returns whether a node has the
// devices necessary to scheduler a task group.
type DeviceChecker struct {
ctx Context
2018-10-14 01:38:08 +00:00
// required is the set of requested devices that must exist on the node
2018-10-13 23:47:53 +00:00
required []*structs.RequestedDevice
2018-10-10 17:32:44 +00:00
// requiresDevices indicates if the task group requires devices
requiresDevices bool
}
// NewDeviceChecker creates a DeviceChecker
func NewDeviceChecker(ctx Context) *DeviceChecker {
return &DeviceChecker{
ctx: ctx,
}
}
func (c *DeviceChecker) SetTaskGroup(tg *structs.TaskGroup) {
2018-10-13 23:47:53 +00:00
c.required = nil
2018-10-10 17:32:44 +00:00
for _, task := range tg.Tasks {
2018-10-13 23:47:53 +00:00
c.required = append(c.required, task.Resources.Devices...)
2018-10-10 17:32:44 +00:00
}
c.requiresDevices = len(c.required) != 0
}
func (c *DeviceChecker) Feasible(option *structs.Node) bool {
if c.hasDevices(option) {
return true
}
c.ctx.Metrics().FilterNode(option, FilterConstraintDevices)
2018-10-10 17:32:44 +00:00
return false
}
func (c *DeviceChecker) hasDevices(option *structs.Node) bool {
if !c.requiresDevices {
return true
}
// COMPAT(0.11): Remove in 0.11
// The node does not have the new resources object so it can not have any
// devices
if option.NodeResources == nil {
return false
}
// Check if the node has any devices
nodeDevs := option.NodeResources.Devices
if len(nodeDevs) == 0 {
return false
}
2018-10-13 23:47:53 +00:00
// Create a mapping of node devices to the remaining count
available := make(map[*structs.NodeDeviceResource]uint64, len(nodeDevs))
2018-10-10 17:32:44 +00:00
for _, d := range nodeDevs {
var healthy uint64 = 0
for _, instance := range d.Instances {
if instance.Healthy {
healthy++
}
}
2018-10-13 23:47:53 +00:00
if healthy != 0 {
available[d] = healthy
}
2018-10-10 17:32:44 +00:00
}
2018-10-13 23:47:53 +00:00
// Go through the required devices trying to find matches
2018-10-10 17:32:44 +00:00
OUTER:
2018-10-13 23:47:53 +00:00
for _, req := range c.required {
// Determine how many there are to place
desiredCount := req.Count
// Go through the device resources and see if we have a match
for d, unused := range available {
if unused == 0 {
// Depleted
2018-10-10 17:32:44 +00:00
continue
}
2018-10-17 18:04:54 +00:00
// First check we have enough instances of the device since this is
// cheaper than checking the constraints
if unused < desiredCount {
continue
}
2018-11-07 19:09:30 +00:00
// Check the constraints
2018-10-14 01:38:08 +00:00
if nodeDeviceMatches(c.ctx, d, req) {
2018-10-13 23:47:53 +00:00
// Consume the instances
2018-10-17 18:04:54 +00:00
available[d] -= desiredCount
// Move on to the next request
continue OUTER
2018-10-10 17:32:44 +00:00
}
}
2018-10-13 23:47:53 +00:00
// We couldn't match the request for the device
2018-10-15 22:15:46 +00:00
return false
2018-10-13 23:47:53 +00:00
}
// Only satisfied if there are no more devices to place
return true
}
2018-10-14 01:38:08 +00:00
// nodeDeviceMatches checks if the device matches the request and its
// constraints. It doesn't check the count.
func nodeDeviceMatches(ctx Context, d *structs.NodeDeviceResource, req *structs.RequestedDevice) bool {
2018-10-13 23:47:53 +00:00
if !d.ID().Matches(req.ID()) {
2018-10-10 17:32:44 +00:00
return false
}
2018-10-13 23:47:53 +00:00
// There are no constraints to consider
if len(req.Constraints) == 0 {
return true
}
2018-10-14 01:38:08 +00:00
for _, c := range req.Constraints {
// Resolve the targets
lVal, lOk := resolveDeviceTarget(c.LTarget, d)
rVal, rOk := resolveDeviceTarget(c.RTarget, d)
2018-10-14 01:38:08 +00:00
// Check if satisfied
if !checkAttributeConstraint(ctx, c.Operand, lVal, rVal, lOk, rOk) {
2018-10-14 01:38:08 +00:00
return false
}
}
2018-10-10 17:32:44 +00:00
return true
}
2018-10-14 01:38:08 +00:00
// resolveDeviceTarget is used to resolve the LTarget and RTarget of a Constraint
// when used on a device
func resolveDeviceTarget(target string, d *structs.NodeDeviceResource) (*psstructs.Attribute, bool) {
// If no prefix, this must be a literal value
if !strings.HasPrefix(target, "${") {
return psstructs.ParseAttribute(target), true
}
// Handle the interpolations
switch {
case "${device.ids}" == target:
ids := make([]string, len(d.Instances))
for i, device := range d.Instances {
ids[i] = device.ID
}
return psstructs.NewStringAttribute(strings.Join(ids, ",")), true
case "${device.model}" == target:
2018-10-14 01:38:08 +00:00
return psstructs.NewStringAttribute(d.Name), true
case "${device.vendor}" == target:
2018-10-14 01:38:08 +00:00
return psstructs.NewStringAttribute(d.Vendor), true
case "${device.type}" == target:
2018-10-14 01:38:08 +00:00
return psstructs.NewStringAttribute(d.Type), true
case strings.HasPrefix(target, "${device.attr."):
attr := strings.TrimPrefix(target, "${device.attr.")
2018-10-15 22:31:13 +00:00
attr = strings.TrimSuffix(attr, "}")
2018-10-14 01:38:08 +00:00
val, ok := d.Attributes[attr]
return val, ok
default:
return nil, false
}
}
// checkAttributeConstraint checks if a constraint is satisfied. nil equality
// comparisons are considered to be false.
func checkAttributeConstraint(ctx Context, operand string, lVal, rVal *psstructs.Attribute, lFound, rFound bool) bool {
2018-10-14 01:38:08 +00:00
// Check for constraints not handled by this checker.
switch operand {
case structs.ConstraintDistinctHosts, structs.ConstraintDistinctProperty:
return true
default:
break
}
switch operand {
case "!=", "not":
// Neither value was provided, nil != nil == false
if !(lFound || rFound) {
return false
}
// Only 1 value was provided, therefore nil != some == true
if lFound != rFound {
return true
}
// Both values were provided, so actually compare them
v, ok := lVal.Compare(rVal)
if !ok {
return false
}
return v != 0
case "<", "<=", ">", ">=", "=", "==", "is":
if !(lFound && rFound) {
return false
}
2018-10-14 01:38:08 +00:00
v, ok := lVal.Compare(rVal)
if !ok {
return false
}
switch operand {
case "is", "==", "=":
return v == 0
case "<":
return v == -1
case "<=":
return v != 1
case ">":
return v == 1
case ">=":
return v != -1
default:
return false
}
case structs.ConstraintVersion:
if !(lFound && rFound) {
return false
}
parser := newVersionConstraintParser(ctx)
return checkAttributeVersionMatch(ctx, parser, lVal, rVal)
case structs.ConstraintSemver:
if !(lFound && rFound) {
return false
}
parser := newSemverConstraintParser(ctx)
return checkAttributeVersionMatch(ctx, parser, lVal, rVal)
2018-10-14 01:38:08 +00:00
case structs.ConstraintRegex:
if !(lFound && rFound) {
return false
}
2018-10-14 01:38:08 +00:00
ls, ok := lVal.GetString()
rs, ok2 := rVal.GetString()
if !ok || !ok2 {
return false
}
return checkRegexpMatch(ctx, ls, rs)
2018-10-15 22:31:13 +00:00
case structs.ConstraintSetContains, structs.ConstraintSetContainsAll:
if !(lFound && rFound) {
return false
}
2018-10-14 01:38:08 +00:00
ls, ok := lVal.GetString()
rs, ok2 := rVal.GetString()
if !ok || !ok2 {
return false
}
return checkSetContainsAll(ctx, ls, rs)
2018-10-15 22:31:13 +00:00
case structs.ConstraintSetContainsAny:
if !(lFound && rFound) {
return false
}
2018-10-15 22:31:13 +00:00
ls, ok := lVal.GetString()
rs, ok2 := rVal.GetString()
if !ok || !ok2 {
return false
}
return checkSetContainsAny(ls, rs)
case structs.ConstraintAttributeIsSet:
return lFound
case structs.ConstraintAttributeIsNotSet:
return !lFound
2018-10-14 01:38:08 +00:00
default:
return false
}
}
// VerConstraints is the interface implemented by both go-verson constraints
// and semver constraints.
type VerConstraints interface {
Check(v *version.Version) bool
String() string
}
// verConstraintParser returns a version constraints implementation (go-version
// or semver).
type verConstraintParser func(verConstraint string) VerConstraints
func newVersionConstraintParser(ctx Context) verConstraintParser {
cache := ctx.VersionConstraintCache()
return func(cstr string) VerConstraints {
if c := cache[cstr]; c != nil {
return c
}
constraint, err := version.NewConstraint(cstr)
if err != nil {
return nil
}
cache[cstr] = constraint
return constraint
}
}
func newSemverConstraintParser(ctx Context) verConstraintParser {
cache := ctx.SemverConstraintCache()
return func(cstr string) VerConstraints {
if c := cache[cstr]; c != nil {
return c
}
constraint, err := semver.NewConstraint(cstr)
if err != nil {
return nil
}
cache[cstr] = constraint
return constraint
}
}