2023-04-10 15:36:59 +00:00
|
|
|
// Copyright (c) HashiCorp, Inc.
|
|
|
|
// SPDX-License-Identifier: MPL-2.0
|
|
|
|
|
2018-10-15 22:15:46 +00:00
|
|
|
package scheduler
|
|
|
|
|
|
|
|
import (
|
|
|
|
"testing"
|
|
|
|
|
2022-03-15 12:42:43 +00:00
|
|
|
"github.com/hashicorp/nomad/ci"
|
2018-10-15 22:15:46 +00:00
|
|
|
"github.com/hashicorp/nomad/helper/uuid"
|
|
|
|
"github.com/hashicorp/nomad/nomad/mock"
|
|
|
|
"github.com/hashicorp/nomad/nomad/structs"
|
|
|
|
psstructs "github.com/hashicorp/nomad/plugins/shared/structs"
|
2023-08-03 18:58:30 +00:00
|
|
|
"github.com/shoenig/test/must"
|
2018-10-15 22:15:46 +00:00
|
|
|
"github.com/stretchr/testify/require"
|
|
|
|
)
|
|
|
|
|
2018-10-17 18:04:54 +00:00
|
|
|
// deviceRequest takes the name, count and potential constraints and affinities
|
|
|
|
// and returns a device request.
|
2018-10-15 22:15:46 +00:00
|
|
|
func deviceRequest(name string, count uint64,
|
|
|
|
constraints []*structs.Constraint, affinities []*structs.Affinity) *structs.RequestedDevice {
|
|
|
|
return &structs.RequestedDevice{
|
|
|
|
Name: name,
|
|
|
|
Count: count,
|
|
|
|
Constraints: constraints,
|
|
|
|
Affinities: affinities,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-17 18:04:54 +00:00
|
|
|
// devNode returns a node containing two devices, an nvidia gpu and an intel
|
|
|
|
// FPGA.
|
2018-10-15 22:15:46 +00:00
|
|
|
func devNode() *structs.Node {
|
|
|
|
n := mock.NvidiaNode()
|
|
|
|
n.NodeResources.Devices = append(n.NodeResources.Devices, &structs.NodeDeviceResource{
|
|
|
|
Type: "fpga",
|
|
|
|
Vendor: "intel",
|
|
|
|
Name: "F100",
|
|
|
|
Attributes: map[string]*psstructs.Attribute{
|
|
|
|
"memory": psstructs.NewIntAttribute(4, psstructs.UnitGiB),
|
|
|
|
},
|
|
|
|
Instances: []*structs.NodeDevice{
|
|
|
|
{
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Healthy: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Healthy: false,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
})
|
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
2018-10-17 18:04:54 +00:00
|
|
|
// multipleNvidiaNode returns a node containing multiple nvidia device types.
|
2018-10-15 22:15:46 +00:00
|
|
|
func multipleNvidiaNode() *structs.Node {
|
|
|
|
n := mock.NvidiaNode()
|
|
|
|
n.NodeResources.Devices = append(n.NodeResources.Devices, &structs.NodeDeviceResource{
|
|
|
|
Type: "gpu",
|
|
|
|
Vendor: "nvidia",
|
|
|
|
Name: "2080ti",
|
|
|
|
Attributes: map[string]*psstructs.Attribute{
|
|
|
|
"memory": psstructs.NewIntAttribute(11, psstructs.UnitGiB),
|
|
|
|
"cuda_cores": psstructs.NewIntAttribute(4352, ""),
|
|
|
|
"graphics_clock": psstructs.NewIntAttribute(1350, psstructs.UnitMHz),
|
|
|
|
"memory_bandwidth": psstructs.NewIntAttribute(14, psstructs.UnitGBPerS),
|
|
|
|
},
|
|
|
|
Instances: []*structs.NodeDevice{
|
|
|
|
{
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Healthy: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Healthy: true,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
})
|
|
|
|
return n
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
// collectInstanceIDs returns the IDs of the device instances
|
|
|
|
func collectInstanceIDs(devices ...*structs.NodeDeviceResource) []string {
|
|
|
|
var out []string
|
|
|
|
for _, d := range devices {
|
|
|
|
for _, i := range d.Instances {
|
|
|
|
out = append(out, i.ID)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return out
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test that asking for a device that isn't fully specified works.
|
|
|
|
func TestDeviceAllocator_Allocate_GenericRequest(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2018-10-15 22:15:46 +00:00
|
|
|
require := require.New(t)
|
|
|
|
_, ctx := testContext(t)
|
|
|
|
n := devNode()
|
|
|
|
d := newDeviceAllocator(ctx, n)
|
|
|
|
require.NotNil(d)
|
|
|
|
|
|
|
|
// Build the request
|
|
|
|
ask := deviceRequest("gpu", 1, nil, nil)
|
|
|
|
|
2018-10-17 18:04:54 +00:00
|
|
|
out, score, err := d.AssignDevice(ask)
|
2018-10-15 22:15:46 +00:00
|
|
|
require.NotNil(out)
|
2018-10-17 18:04:54 +00:00
|
|
|
require.Zero(score)
|
2018-10-15 22:15:46 +00:00
|
|
|
require.NoError(err)
|
|
|
|
|
|
|
|
// Check that we got the nvidia device
|
|
|
|
require.Len(out.DeviceIDs, 1)
|
|
|
|
require.Contains(collectInstanceIDs(n.NodeResources.Devices[0]), out.DeviceIDs[0])
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test that asking for a device that is fully specified works.
|
|
|
|
func TestDeviceAllocator_Allocate_FullyQualifiedRequest(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2018-10-15 22:15:46 +00:00
|
|
|
require := require.New(t)
|
|
|
|
_, ctx := testContext(t)
|
|
|
|
n := devNode()
|
|
|
|
d := newDeviceAllocator(ctx, n)
|
|
|
|
require.NotNil(d)
|
|
|
|
|
|
|
|
// Build the request
|
|
|
|
ask := deviceRequest("intel/fpga/F100", 1, nil, nil)
|
|
|
|
|
2018-10-17 18:04:54 +00:00
|
|
|
out, score, err := d.AssignDevice(ask)
|
2018-10-15 22:15:46 +00:00
|
|
|
require.NotNil(out)
|
2018-10-17 18:04:54 +00:00
|
|
|
require.Zero(score)
|
2018-10-15 22:15:46 +00:00
|
|
|
require.NoError(err)
|
|
|
|
|
|
|
|
// Check that we got the nvidia device
|
|
|
|
require.Len(out.DeviceIDs, 1)
|
|
|
|
require.Contains(collectInstanceIDs(n.NodeResources.Devices[1]), out.DeviceIDs[0])
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test that asking for a device with too much count doesn't place
|
|
|
|
func TestDeviceAllocator_Allocate_NotEnoughInstances(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2018-10-15 22:15:46 +00:00
|
|
|
require := require.New(t)
|
|
|
|
_, ctx := testContext(t)
|
|
|
|
n := devNode()
|
|
|
|
d := newDeviceAllocator(ctx, n)
|
|
|
|
require.NotNil(d)
|
|
|
|
|
|
|
|
// Build the request
|
|
|
|
ask := deviceRequest("gpu", 4, nil, nil)
|
|
|
|
|
2018-10-17 18:04:54 +00:00
|
|
|
out, _, err := d.AssignDevice(ask)
|
2018-10-15 22:15:46 +00:00
|
|
|
require.Nil(out)
|
|
|
|
require.Error(err)
|
|
|
|
require.Contains(err.Error(), "no devices match request")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test that asking for a device with constraints works
|
|
|
|
func TestDeviceAllocator_Allocate_Constraints(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2018-10-15 22:15:46 +00:00
|
|
|
n := multipleNvidiaNode()
|
|
|
|
nvidia0 := n.NodeResources.Devices[0]
|
|
|
|
nvidia1 := n.NodeResources.Devices[1]
|
|
|
|
|
|
|
|
cases := []struct {
|
2023-08-03 18:58:30 +00:00
|
|
|
Name string
|
|
|
|
Note string
|
|
|
|
Constraints []*structs.Constraint
|
|
|
|
ExpectedDevice *structs.NodeDeviceResource
|
|
|
|
ExpectedDeviceIDs []string
|
|
|
|
NoPlacement bool
|
2018-10-15 22:15:46 +00:00
|
|
|
}{
|
|
|
|
{
|
|
|
|
Name: "gpu",
|
2023-08-03 18:58:30 +00:00
|
|
|
Note: "-gt",
|
2018-10-15 22:15:46 +00:00
|
|
|
Constraints: []*structs.Constraint{
|
|
|
|
{
|
2019-01-23 00:48:09 +00:00
|
|
|
LTarget: "${device.attr.cuda_cores}",
|
2018-10-15 22:15:46 +00:00
|
|
|
Operand: ">",
|
|
|
|
RTarget: "4000",
|
|
|
|
},
|
|
|
|
},
|
2023-08-03 18:58:30 +00:00
|
|
|
ExpectedDevice: nvidia1,
|
|
|
|
ExpectedDeviceIDs: collectInstanceIDs(nvidia1),
|
2018-10-15 22:15:46 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "gpu",
|
2023-08-03 18:58:30 +00:00
|
|
|
Note: "-lt",
|
2018-10-15 22:15:46 +00:00
|
|
|
Constraints: []*structs.Constraint{
|
|
|
|
{
|
2019-01-23 00:48:09 +00:00
|
|
|
LTarget: "${device.attr.cuda_cores}",
|
2018-10-15 22:15:46 +00:00
|
|
|
Operand: "<",
|
|
|
|
RTarget: "4000",
|
|
|
|
},
|
|
|
|
},
|
2023-08-03 18:58:30 +00:00
|
|
|
ExpectedDevice: nvidia0,
|
|
|
|
ExpectedDeviceIDs: collectInstanceIDs(nvidia0),
|
2018-10-15 22:15:46 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "nvidia/gpu",
|
|
|
|
Constraints: []*structs.Constraint{
|
|
|
|
// First two are shared across both devices
|
|
|
|
{
|
2019-01-23 00:48:09 +00:00
|
|
|
LTarget: "${device.attr.memory_bandwidth}",
|
2018-10-15 22:15:46 +00:00
|
|
|
Operand: ">",
|
|
|
|
RTarget: "10 GB/s",
|
|
|
|
},
|
|
|
|
{
|
2019-01-23 00:48:09 +00:00
|
|
|
LTarget: "${device.attr.memory}",
|
2018-10-15 22:15:46 +00:00
|
|
|
Operand: "is",
|
|
|
|
RTarget: "11264 MiB",
|
|
|
|
},
|
|
|
|
{
|
2019-01-23 00:48:09 +00:00
|
|
|
LTarget: "${device.attr.graphics_clock}",
|
2018-10-15 22:15:46 +00:00
|
|
|
Operand: ">",
|
|
|
|
RTarget: "1.4 GHz",
|
|
|
|
},
|
|
|
|
},
|
2023-08-03 18:58:30 +00:00
|
|
|
ExpectedDevice: nvidia0,
|
|
|
|
ExpectedDeviceIDs: collectInstanceIDs(nvidia0),
|
2018-10-15 22:15:46 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "intel/gpu",
|
|
|
|
NoPlacement: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "nvidia/gpu",
|
2023-08-03 18:58:30 +00:00
|
|
|
Note: "-no-placement",
|
2018-10-15 22:15:46 +00:00
|
|
|
Constraints: []*structs.Constraint{
|
|
|
|
{
|
2019-01-23 00:48:09 +00:00
|
|
|
LTarget: "${device.attr.memory_bandwidth}",
|
2018-10-15 22:15:46 +00:00
|
|
|
Operand: ">",
|
|
|
|
RTarget: "10 GB/s",
|
|
|
|
},
|
|
|
|
{
|
2019-01-23 00:48:09 +00:00
|
|
|
LTarget: "${device.attr.memory}",
|
2018-10-15 22:15:46 +00:00
|
|
|
Operand: "is",
|
|
|
|
RTarget: "11264 MiB",
|
|
|
|
},
|
|
|
|
// Rules both out
|
|
|
|
{
|
2019-01-23 00:48:09 +00:00
|
|
|
LTarget: "${device.attr.graphics_clock}",
|
2018-10-15 22:15:46 +00:00
|
|
|
Operand: ">",
|
|
|
|
RTarget: "2.4 GHz",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
NoPlacement: true,
|
|
|
|
},
|
2023-08-03 18:58:30 +00:00
|
|
|
{
|
|
|
|
Name: "nvidia/gpu",
|
|
|
|
Note: "-contains-id",
|
|
|
|
Constraints: []*structs.Constraint{
|
|
|
|
{
|
|
|
|
LTarget: "${device.ids}",
|
|
|
|
Operand: "set_contains",
|
|
|
|
RTarget: nvidia0.Instances[1].ID,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
ExpectedDevice: nvidia0,
|
|
|
|
ExpectedDeviceIDs: []string{nvidia0.Instances[1].ID},
|
|
|
|
},
|
2018-10-15 22:15:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, c := range cases {
|
2023-08-03 18:58:30 +00:00
|
|
|
t.Run(c.Name+c.Note, func(t *testing.T) {
|
2018-10-15 22:15:46 +00:00
|
|
|
_, ctx := testContext(t)
|
|
|
|
d := newDeviceAllocator(ctx, n)
|
2023-08-03 18:58:30 +00:00
|
|
|
must.NotNil(t, d)
|
2018-10-15 22:15:46 +00:00
|
|
|
|
|
|
|
// Build the request
|
|
|
|
ask := deviceRequest(c.Name, 1, c.Constraints, nil)
|
|
|
|
|
2018-10-17 18:04:54 +00:00
|
|
|
out, score, err := d.AssignDevice(ask)
|
2018-10-15 22:15:46 +00:00
|
|
|
if c.NoPlacement {
|
2023-08-03 18:58:30 +00:00
|
|
|
require.Nil(t, out)
|
2018-10-15 22:15:46 +00:00
|
|
|
} else {
|
2023-08-03 18:58:30 +00:00
|
|
|
must.NotNil(t, out)
|
|
|
|
must.Zero(t, score)
|
|
|
|
must.NoError(t, err)
|
2018-10-15 22:15:46 +00:00
|
|
|
|
2023-08-03 18:58:30 +00:00
|
|
|
// Check that we got the right nvidia device instance, and
|
|
|
|
// specific device instance IDs if required
|
|
|
|
must.Len(t, 1, out.DeviceIDs)
|
|
|
|
must.SliceContains(t, collectInstanceIDs(c.ExpectedDevice), out.DeviceIDs[0])
|
|
|
|
must.SliceContainsSubset(t, c.ExpectedDeviceIDs, out.DeviceIDs)
|
2018-10-15 22:15:46 +00:00
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-17 18:04:54 +00:00
|
|
|
// Test that asking for a device with affinities works
|
|
|
|
func TestDeviceAllocator_Allocate_Affinities(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2018-10-17 18:04:54 +00:00
|
|
|
n := multipleNvidiaNode()
|
|
|
|
nvidia0 := n.NodeResources.Devices[0]
|
|
|
|
nvidia1 := n.NodeResources.Devices[1]
|
|
|
|
|
|
|
|
cases := []struct {
|
|
|
|
Name string
|
|
|
|
Affinities []*structs.Affinity
|
|
|
|
ExpectedDevice *structs.NodeDeviceResource
|
|
|
|
ZeroScore bool
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
Name: "gpu",
|
|
|
|
Affinities: []*structs.Affinity{
|
|
|
|
{
|
2019-01-23 00:48:09 +00:00
|
|
|
LTarget: "${device.attr.cuda_cores}",
|
2018-10-17 18:04:54 +00:00
|
|
|
Operand: ">",
|
|
|
|
RTarget: "4000",
|
2019-01-30 20:20:38 +00:00
|
|
|
Weight: 60,
|
2018-10-17 18:04:54 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
ExpectedDevice: nvidia1,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "gpu",
|
|
|
|
Affinities: []*structs.Affinity{
|
|
|
|
{
|
2019-01-23 00:48:09 +00:00
|
|
|
LTarget: "${device.attr.cuda_cores}",
|
2018-10-17 18:04:54 +00:00
|
|
|
Operand: "<",
|
|
|
|
RTarget: "4000",
|
2019-01-30 20:20:38 +00:00
|
|
|
Weight: 10,
|
2018-10-17 18:04:54 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
ExpectedDevice: nvidia0,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "gpu",
|
|
|
|
Affinities: []*structs.Affinity{
|
|
|
|
{
|
2019-01-23 00:48:09 +00:00
|
|
|
LTarget: "${device.attr.cuda_cores}",
|
2018-10-17 18:04:54 +00:00
|
|
|
Operand: ">",
|
|
|
|
RTarget: "4000",
|
2019-01-30 20:20:38 +00:00
|
|
|
Weight: -20,
|
2018-10-17 18:04:54 +00:00
|
|
|
},
|
|
|
|
},
|
2018-11-12 21:06:45 +00:00
|
|
|
ZeroScore: true,
|
2018-10-17 18:04:54 +00:00
|
|
|
ExpectedDevice: nvidia0,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "nvidia/gpu",
|
|
|
|
Affinities: []*structs.Affinity{
|
|
|
|
// First two are shared across both devices
|
|
|
|
{
|
2019-01-23 00:48:09 +00:00
|
|
|
LTarget: "${device.attr.memory_bandwidth}",
|
2018-10-17 18:04:54 +00:00
|
|
|
Operand: ">",
|
|
|
|
RTarget: "10 GB/s",
|
2019-01-30 20:20:38 +00:00
|
|
|
Weight: 20,
|
2018-10-17 18:04:54 +00:00
|
|
|
},
|
|
|
|
{
|
2019-01-23 00:48:09 +00:00
|
|
|
LTarget: "${device.attr.memory}",
|
2018-10-17 18:04:54 +00:00
|
|
|
Operand: "is",
|
|
|
|
RTarget: "11264 MiB",
|
2019-01-30 20:20:38 +00:00
|
|
|
Weight: 20,
|
2018-10-17 18:04:54 +00:00
|
|
|
},
|
|
|
|
{
|
2019-01-23 00:48:09 +00:00
|
|
|
LTarget: "${device.attr.graphics_clock}",
|
2018-10-17 18:04:54 +00:00
|
|
|
Operand: ">",
|
|
|
|
RTarget: "1.4 GHz",
|
2019-01-30 20:20:38 +00:00
|
|
|
Weight: 90,
|
2018-10-17 18:04:54 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
ExpectedDevice: nvidia0,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, c := range cases {
|
|
|
|
t.Run(c.Name, func(t *testing.T) {
|
|
|
|
require := require.New(t)
|
|
|
|
_, ctx := testContext(t)
|
|
|
|
d := newDeviceAllocator(ctx, n)
|
|
|
|
require.NotNil(d)
|
|
|
|
|
|
|
|
// Build the request
|
|
|
|
ask := deviceRequest(c.Name, 1, nil, c.Affinities)
|
|
|
|
|
|
|
|
out, score, err := d.AssignDevice(ask)
|
|
|
|
require.NotNil(out)
|
|
|
|
require.NoError(err)
|
|
|
|
if c.ZeroScore {
|
|
|
|
require.Zero(score)
|
|
|
|
} else {
|
|
|
|
require.NotZero(score)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that we got the nvidia device
|
|
|
|
require.Len(out.DeviceIDs, 1)
|
|
|
|
require.Contains(collectInstanceIDs(c.ExpectedDevice), out.DeviceIDs[0])
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|