open-nomad/client/allocrunner/taskrunner/volume_hook_test.go
Tim Gross aa8927abb4
volumes: return better error messages for unsupported task drivers (#8030)
When an allocation runs for a task driver that can't support volume mounts,
the mounting will fail in a way that can be hard to understand. With host
volumes this usually means failing silently, whereas with CSI the operator
gets inscrutable internals exposed in the `nomad alloc status`.

This changeset adds a MountConfig field to the task driver Capabilities
response. We validate this when the `csi_hook` or `volume_hook` fires and
return a user-friendly error.

Note that we don't currently have a way to get driver capabilities up to the
server, except through attributes. Validating this when the user initially
submits the jobspec would be even better than what we're doing here (and could
be useful for all our other capabilities), but that's out of scope for this
changeset.

Also note that the MountConfig enum starts with "supports all" in order to
support community plugins in a backwards compatible way, rather than cutting
them off from volume mounting unexpectedly.
2020-05-21 09:18:02 -04:00

227 lines
4.8 KiB
Go

package taskrunner
import (
"testing"
"github.com/hashicorp/nomad/client/allocrunner/interfaces"
"github.com/hashicorp/nomad/client/pluginmanager/csimanager"
cstructs "github.com/hashicorp/nomad/client/structs"
"github.com/hashicorp/nomad/client/taskenv"
"github.com/hashicorp/nomad/helper/testlog"
"github.com/hashicorp/nomad/nomad/mock"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/plugins/drivers"
dtu "github.com/hashicorp/nomad/plugins/drivers/testutils"
"github.com/stretchr/testify/require"
)
func TestVolumeHook_PartitionMountsByVolume_Works(t *testing.T) {
mounts := []*structs.VolumeMount{
{
Volume: "foo",
Destination: "/tmp",
ReadOnly: false,
},
{
Volume: "foo",
Destination: "/bar",
ReadOnly: false,
},
{
Volume: "baz",
Destination: "/baz",
ReadOnly: false,
},
}
expected := map[string][]*structs.VolumeMount{
"foo": {
{
Volume: "foo",
Destination: "/tmp",
ReadOnly: false,
},
{
Volume: "foo",
Destination: "/bar",
ReadOnly: false,
},
},
"baz": {
{
Volume: "baz",
Destination: "/baz",
ReadOnly: false,
},
},
}
// Test with a real collection
partitioned := partitionMountsByVolume(mounts)
require.Equal(t, expected, partitioned)
// Test with nil/emptylist
partitioned = partitionMountsByVolume(nil)
require.Equal(t, map[string][]*structs.VolumeMount{}, partitioned)
}
func TestVolumeHook_prepareCSIVolumes(t *testing.T) {
req := &interfaces.TaskPrestartRequest{
Task: &structs.Task{
Name: "test",
Driver: "mock",
VolumeMounts: []*structs.VolumeMount{
{
Volume: "foo",
Destination: "/bar",
},
},
},
}
volumes := map[string]*structs.VolumeRequest{
"foo": {
Type: "csi",
Source: "my-test-volume",
},
}
cases := []struct {
Name string
Driver drivers.DriverPlugin
Expected []*drivers.MountConfig
ExpectedError string
}{
{
Name: "supported driver",
Driver: &dtu.MockDriver{
CapabilitiesF: func() (*drivers.Capabilities, error) {
return &drivers.Capabilities{
MountConfigs: drivers.MountConfigSupportAll,
}, nil
},
},
Expected: []*drivers.MountConfig{
{
HostPath: "/mnt/my-test-volume",
TaskPath: "/bar",
},
},
},
{
Name: "unsupported driver",
Driver: &dtu.MockDriver{
CapabilitiesF: func() (*drivers.Capabilities, error) {
return &drivers.Capabilities{
MountConfigs: drivers.MountConfigSupportNone,
}, nil
},
},
ExpectedError: "task driver \"mock\" for \"test\" does not support CSI",
},
}
for _, tc := range cases {
t.Run(tc.Name, func(t *testing.T) {
tr := &TaskRunner{
task: req.Task,
driver: tc.Driver,
allocHookResources: &cstructs.AllocHookResources{
CSIMounts: map[string]*csimanager.MountInfo{
"foo": {
Source: "/mnt/my-test-volume",
},
},
},
}
hook := &volumeHook{
logger: testlog.HCLogger(t),
alloc: structs.MockAlloc(),
runner: tr,
}
mounts, err := hook.prepareCSIVolumes(req, volumes)
if tc.ExpectedError != "" {
require.EqualError(t, err, tc.ExpectedError)
} else {
require.NoError(t, err)
}
require.Equal(t, tc.Expected, mounts)
})
}
}
func TestVolumeHook_Interpolation(t *testing.T) {
alloc := mock.Alloc()
task := alloc.Job.TaskGroups[0].Tasks[0]
taskEnv := taskenv.NewBuilder(mock.Node(), alloc, task, "global").SetHookEnv("volume",
map[string]string{
"PROPAGATION_MODE": "private",
"VOLUME_ID": "my-other-volume",
},
).Build()
mounts := []*structs.VolumeMount{
{
Volume: "foo",
Destination: "/tmp",
ReadOnly: false,
PropagationMode: "bidirectional",
},
{
Volume: "foo",
Destination: "/bar-${NOMAD_JOB_NAME}",
ReadOnly: false,
PropagationMode: "bidirectional",
},
{
Volume: "${VOLUME_ID}",
Destination: "/baz",
ReadOnly: false,
PropagationMode: "bidirectional",
},
{
Volume: "foo",
Destination: "/quux",
ReadOnly: false,
PropagationMode: "${PROPAGATION_MODE}",
},
}
expected := []*structs.VolumeMount{
{
Volume: "foo",
Destination: "/tmp",
ReadOnly: false,
PropagationMode: "bidirectional",
},
{
Volume: "foo",
Destination: "/bar-my-job",
ReadOnly: false,
PropagationMode: "bidirectional",
},
{
Volume: "my-other-volume",
Destination: "/baz",
ReadOnly: false,
PropagationMode: "bidirectional",
},
{
Volume: "foo",
Destination: "/quux",
ReadOnly: false,
PropagationMode: "private",
},
}
interpolateVolumeMounts(mounts, taskEnv)
require.Equal(t, expected, mounts)
}