backport of commit 0ccf942b26f8c47582f18f324114d02d0bb03a43 (#18684)

Co-authored-by: Luiz Aoqui <luiz@hashicorp.com>
This commit is contained in:
hc-github-team-nomad-core 2023-10-06 11:19:52 -05:00 committed by GitHub
parent bfc15e5aa0
commit 102e31bf3c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 105 additions and 5 deletions

View File

@ -1237,7 +1237,8 @@ OUTER:
}
// available checks transient feasibility checkers which depend on changing conditions,
// e.g. the health status of a plugin or driver
// e.g. the health status of a plugin or driver, or that are not considered in node
// computed class, e.g. host volumes.
func (w *FeasibilityWrapper) available(option *structs.Node) bool {
// If we don't have any availability checks, we're available
if len(w.tgAvailable) == 0 {

View File

@ -259,11 +259,13 @@ func NewSystemStack(sysbatch bool, ctx Context) *SystemStack {
tgs := []FeasibilityChecker{
s.taskGroupDrivers,
s.taskGroupConstraint,
s.taskGroupHostVolumes,
s.taskGroupDevices,
s.taskGroupNetwork,
}
avail := []FeasibilityChecker{s.taskGroupCSIVolumes}
avail := []FeasibilityChecker{
s.taskGroupHostVolumes,
s.taskGroupCSIVolumes,
}
s.wrappedChecks = NewFeasibilityWrapper(ctx, s.source, jobs, tgs, avail)
// Filter on distinct property constraints.
@ -406,11 +408,13 @@ func NewGenericStack(batch bool, ctx Context) *GenericStack {
tgs := []FeasibilityChecker{
s.taskGroupDrivers,
s.taskGroupConstraint,
s.taskGroupHostVolumes,
s.taskGroupDevices,
s.taskGroupNetwork,
}
avail := []FeasibilityChecker{s.taskGroupCSIVolumes}
avail := []FeasibilityChecker{
s.taskGroupHostVolumes,
s.taskGroupCSIVolumes,
}
s.wrappedChecks = NewFeasibilityWrapper(ctx, s.source, jobs, tgs, avail)
// Filter on distinct host constraints.

View File

@ -12,6 +12,7 @@ import (
"github.com/hashicorp/nomad/ci"
"github.com/hashicorp/nomad/nomad/mock"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/shoenig/test/must"
"github.com/stretchr/testify/require"
)
@ -247,6 +248,100 @@ func TestServiceStack_Select_DriverFilter(t *testing.T) {
}
}
func TestServiceStack_Select_HostVolume(t *testing.T) {
ci.Parallel(t)
_, ctx := testContext(t)
// Create nodes with host volumes and one without.
node0 := mock.Node()
node1 := mock.Node()
node1.HostVolumes = map[string]*structs.ClientHostVolumeConfig{
"unique": {
Name: "unique",
Path: "/tmp/unique",
},
"per_alloc[0]": {
Name: "per_alloc[0]",
Path: "/tmp/per_alloc_0",
},
}
node1.ComputeClass()
node2 := mock.Node()
node2.HostVolumes = map[string]*structs.ClientHostVolumeConfig{
"per_alloc[1]": {
Name: "per_alloc[1]",
Path: "/tmp/per_alloc_1",
},
}
node2.ComputeClass()
// Create stack with nodes.
stack := NewGenericStack(false, ctx)
stack.SetNodes([]*structs.Node{node0, node1, node2})
job := mock.Job()
job.TaskGroups[0].Count = 1
job.TaskGroups[0].Volumes = map[string]*structs.VolumeRequest{"unique": {
Name: "unique",
Type: structs.VolumeTypeHost,
Source: "unique",
PerAlloc: false,
}}
stack.SetJob(job)
// Alloc selects node with host volume 'unique'.
selectOptions := &SelectOptions{
AllocName: structs.AllocName(job.Name, job.TaskGroups[0].Name, 0),
}
option := stack.Select(job.TaskGroups[0], selectOptions)
must.NotNil(t, option)
must.Eq(t, option.Node.ID, node1.ID)
// Recreate the stack and select volumes per alloc.
stack = NewGenericStack(false, ctx)
stack.SetNodes([]*structs.Node{node0, node1, node2})
job.TaskGroups[0].Count = 3
job.TaskGroups[0].Volumes = map[string]*structs.VolumeRequest{"per_alloc": {
Name: "per_alloc",
Type: structs.VolumeTypeHost,
Source: "per_alloc",
PerAlloc: true,
}}
stack.SetJob(job)
// First alloc selects node with host volume 'per_alloc[0]'.
selectOptions = &SelectOptions{
AllocName: structs.AllocName(job.Name, job.TaskGroups[0].Name, 0),
}
option = stack.Select(job.TaskGroups[0], selectOptions)
must.NotNil(t, option)
must.Eq(t, option.Node.ID, node1.ID)
// Second alloc selects node with host volume 'per_alloc[1]'.
selectOptions = &SelectOptions{
AllocName: structs.AllocName(job.Name, job.TaskGroups[0].Name, 1),
}
option = stack.Select(job.TaskGroups[0], selectOptions)
must.NotNil(t, option)
must.Eq(t, option.Node.ID, node2.ID)
// Third alloc must select node with host volume 'per_alloc[2]', but none
// of the nodes available can fulfil this requirement.
selectOptions = &SelectOptions{
AllocName: structs.AllocName(job.Name, job.TaskGroups[0].Name, 2),
}
option = stack.Select(job.TaskGroups[0], selectOptions)
must.Nil(t, option)
metrics := ctx.Metrics()
must.MapLen(t, 1, metrics.ConstraintFiltered)
must.Eq(t, metrics.ConstraintFiltered[FilterConstraintHostVolumes], 3)
}
func TestServiceStack_Select_CSI(t *testing.T) {
ci.Parallel(t)