open-nomad/client/pluginmanager/csimanager/volume_test.go
Tim Gross 5a3b45864d csi: fix unpublish workflow ID mismatches
The CSI plugins uses the external volume ID for all operations, but
the Client CSI RPCs uses the Nomad volume ID (human-friendly) for the
mount paths. Pass the External ID as an arg in the RPC call so that
the unpublish workflows have it without calling back to the server to
find the external ID.

The controller CSI plugins need the CSI node ID (or in other words,
the storage provider's view of node ID like the EC2 instance ID), not
the Nomad node ID, to determine how to detach the external volume.
2020-04-06 10:15:55 -04:00

491 lines
14 KiB
Go

package csimanager
import (
"context"
"errors"
"io/ioutil"
"os"
"runtime"
"testing"
"github.com/hashicorp/nomad/helper/testlog"
"github.com/hashicorp/nomad/nomad/mock"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/plugins/csi"
csifake "github.com/hashicorp/nomad/plugins/csi/fake"
"github.com/stretchr/testify/require"
)
func tmpDir(t testing.TB) string {
t.Helper()
dir, err := ioutil.TempDir("", "nomad")
require.NoError(t, err)
return dir
}
func TestVolumeManager_ensureStagingDir(t *testing.T) {
t.Parallel()
cases := []struct {
Name string
Volume *structs.CSIVolume
UsageOptions *UsageOptions
CreateDirAheadOfTime bool
MountDirAheadOfTime bool
ExpectedErr error
ExpectedMountState bool
}{
{
Name: "Creates a directory when one does not exist",
Volume: &structs.CSIVolume{ID: "foo"},
UsageOptions: &UsageOptions{},
},
{
Name: "Does not fail because of a pre-existing directory",
Volume: &structs.CSIVolume{ID: "foo"},
UsageOptions: &UsageOptions{},
CreateDirAheadOfTime: true,
},
{
Name: "Returns negative mount info",
UsageOptions: &UsageOptions{},
Volume: &structs.CSIVolume{ID: "foo"},
},
{
Name: "Returns positive mount info",
Volume: &structs.CSIVolume{ID: "foo"},
UsageOptions: &UsageOptions{},
CreateDirAheadOfTime: true,
MountDirAheadOfTime: true,
ExpectedMountState: true,
},
}
for _, tc := range cases {
t.Run(tc.Name, func(t *testing.T) {
// Step 1: Validate that the test case makes sense
if !tc.CreateDirAheadOfTime && tc.MountDirAheadOfTime {
require.Fail(t, "Cannot Mount without creating a dir")
}
if tc.MountDirAheadOfTime {
// We can enable these tests by either mounting a fake device on linux
// e.g shipping a small ext4 image file and using that as a loopback
// device, but there's no convenient way to implement this.
t.Skip("TODO: Skipped because we don't detect bind mounts")
}
// Step 2: Test Setup
tmpPath := tmpDir(t)
defer os.RemoveAll(tmpPath)
csiFake := &csifake.Client{}
eventer := func(e *structs.NodeEvent) {}
manager := newVolumeManager(testlog.HCLogger(t), eventer, csiFake, tmpPath, tmpPath, true)
expectedStagingPath := manager.stagingDirForVolume(tmpPath, tc.Volume.ID, tc.UsageOptions)
if tc.CreateDirAheadOfTime {
err := os.MkdirAll(expectedStagingPath, 0700)
require.NoError(t, err)
}
// Step 3: Now we can do some testing
path, detectedMount, testErr := manager.ensureStagingDir(tc.Volume, tc.UsageOptions)
if tc.ExpectedErr != nil {
require.EqualError(t, testErr, tc.ExpectedErr.Error())
return // We don't perform extra validation if an error was detected.
}
require.NoError(t, testErr)
require.Equal(t, tc.ExpectedMountState, detectedMount)
// If the ensureStagingDir call had to create a directory itself, then here
// we validate that the directory exists and its permissions
if !tc.CreateDirAheadOfTime {
file, err := os.Lstat(path)
require.NoError(t, err)
require.True(t, file.IsDir())
// TODO: Figure out a windows equivalent of this test
if runtime.GOOS != "windows" {
require.Equal(t, os.FileMode(0700), file.Mode().Perm())
}
}
})
}
}
func TestVolumeManager_stageVolume(t *testing.T) {
t.Parallel()
cases := []struct {
Name string
Volume *structs.CSIVolume
UsageOptions *UsageOptions
PluginErr error
ExpectedErr error
}{
{
Name: "Returns an error when an invalid AttachmentMode is provided",
Volume: &structs.CSIVolume{
ID: "foo",
AttachmentMode: "nonsense",
},
UsageOptions: &UsageOptions{},
ExpectedErr: errors.New("Unknown volume attachment mode: nonsense"),
},
{
Name: "Returns an error when an invalid AccessMode is provided",
Volume: &structs.CSIVolume{
ID: "foo",
AttachmentMode: structs.CSIVolumeAttachmentModeBlockDevice,
AccessMode: "nonsense",
},
UsageOptions: &UsageOptions{},
ExpectedErr: errors.New("Unknown volume access mode: nonsense"),
},
{
Name: "Returns an error when the plugin returns an error",
Volume: &structs.CSIVolume{
ID: "foo",
AttachmentMode: structs.CSIVolumeAttachmentModeBlockDevice,
AccessMode: structs.CSIVolumeAccessModeMultiNodeMultiWriter,
},
UsageOptions: &UsageOptions{},
PluginErr: errors.New("Some Unknown Error"),
ExpectedErr: errors.New("Some Unknown Error"),
},
{
Name: "Happy Path",
Volume: &structs.CSIVolume{
ID: "foo",
AttachmentMode: structs.CSIVolumeAttachmentModeBlockDevice,
AccessMode: structs.CSIVolumeAccessModeMultiNodeMultiWriter,
},
UsageOptions: &UsageOptions{},
PluginErr: nil,
ExpectedErr: nil,
},
}
for _, tc := range cases {
t.Run(tc.Name, func(t *testing.T) {
tmpPath := tmpDir(t)
defer os.RemoveAll(tmpPath)
csiFake := &csifake.Client{}
csiFake.NextNodeStageVolumeErr = tc.PluginErr
eventer := func(e *structs.NodeEvent) {}
manager := newVolumeManager(testlog.HCLogger(t), eventer, csiFake, tmpPath, tmpPath, true)
ctx := context.Background()
err := manager.stageVolume(ctx, tc.Volume, tc.UsageOptions, nil)
if tc.ExpectedErr != nil {
require.EqualError(t, err, tc.ExpectedErr.Error())
} else {
require.NoError(t, err)
}
})
}
}
func TestVolumeManager_unstageVolume(t *testing.T) {
t.Parallel()
cases := []struct {
Name string
Volume *structs.CSIVolume
UsageOptions *UsageOptions
PluginErr error
ExpectedErr error
ExpectedCSICallCount int64
}{
{
Name: "Returns an error when the plugin returns an error",
Volume: &structs.CSIVolume{
ID: "foo",
},
UsageOptions: &UsageOptions{},
PluginErr: errors.New("Some Unknown Error"),
ExpectedErr: errors.New("Some Unknown Error"),
ExpectedCSICallCount: 1,
},
{
Name: "Happy Path",
Volume: &structs.CSIVolume{
ID: "foo",
},
UsageOptions: &UsageOptions{},
PluginErr: nil,
ExpectedErr: nil,
ExpectedCSICallCount: 1,
},
}
for _, tc := range cases {
t.Run(tc.Name, func(t *testing.T) {
tmpPath := tmpDir(t)
defer os.RemoveAll(tmpPath)
csiFake := &csifake.Client{}
csiFake.NextNodeUnstageVolumeErr = tc.PluginErr
eventer := func(e *structs.NodeEvent) {}
manager := newVolumeManager(testlog.HCLogger(t), eventer, csiFake, tmpPath, tmpPath, true)
ctx := context.Background()
err := manager.unstageVolume(ctx,
tc.Volume.ID, tc.Volume.RemoteID(), tc.UsageOptions)
if tc.ExpectedErr != nil {
require.EqualError(t, err, tc.ExpectedErr.Error())
} else {
require.NoError(t, err)
}
require.Equal(t, tc.ExpectedCSICallCount, csiFake.NodeUnstageVolumeCallCount)
})
}
}
func TestVolumeManager_publishVolume(t *testing.T) {
t.Parallel()
cases := []struct {
Name string
Allocation *structs.Allocation
Volume *structs.CSIVolume
UsageOptions *UsageOptions
PluginErr error
ExpectedErr error
ExpectedCSICallCount int64
ExpectedVolumeCapability *csi.VolumeCapability
}{
{
Name: "Returns an error when the plugin returns an error",
Allocation: structs.MockAlloc(),
Volume: &structs.CSIVolume{
ID: "foo",
AttachmentMode: structs.CSIVolumeAttachmentModeBlockDevice,
AccessMode: structs.CSIVolumeAccessModeMultiNodeMultiWriter,
},
UsageOptions: &UsageOptions{},
PluginErr: errors.New("Some Unknown Error"),
ExpectedErr: errors.New("Some Unknown Error"),
ExpectedCSICallCount: 1,
},
{
Name: "Happy Path",
Allocation: structs.MockAlloc(),
Volume: &structs.CSIVolume{
ID: "foo",
AttachmentMode: structs.CSIVolumeAttachmentModeBlockDevice,
AccessMode: structs.CSIVolumeAccessModeMultiNodeMultiWriter,
},
UsageOptions: &UsageOptions{},
PluginErr: nil,
ExpectedErr: nil,
ExpectedCSICallCount: 1,
},
{
Name: "Mount options in the volume",
Allocation: structs.MockAlloc(),
Volume: &structs.CSIVolume{
ID: "foo",
AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem,
AccessMode: structs.CSIVolumeAccessModeMultiNodeMultiWriter,
MountOptions: &structs.CSIMountOptions{
MountFlags: []string{"ro"},
},
},
UsageOptions: &UsageOptions{},
PluginErr: nil,
ExpectedErr: nil,
ExpectedCSICallCount: 1,
ExpectedVolumeCapability: &csi.VolumeCapability{
AccessType: csi.VolumeAccessTypeMount,
AccessMode: csi.VolumeAccessModeMultiNodeMultiWriter,
MountVolume: &structs.CSIMountOptions{
MountFlags: []string{"ro"},
},
},
},
{
Name: "Mount options override in the request",
Allocation: structs.MockAlloc(),
Volume: &structs.CSIVolume{
ID: "foo",
AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem,
AccessMode: structs.CSIVolumeAccessModeMultiNodeMultiWriter,
MountOptions: &structs.CSIMountOptions{
MountFlags: []string{"ro"},
},
},
UsageOptions: &UsageOptions{
MountOptions: &structs.CSIMountOptions{
MountFlags: []string{"rw"},
},
},
PluginErr: nil,
ExpectedErr: nil,
ExpectedCSICallCount: 1,
ExpectedVolumeCapability: &csi.VolumeCapability{
AccessType: csi.VolumeAccessTypeMount,
AccessMode: csi.VolumeAccessModeMultiNodeMultiWriter,
MountVolume: &structs.CSIMountOptions{
MountFlags: []string{"rw"},
},
},
},
}
for _, tc := range cases {
t.Run(tc.Name, func(t *testing.T) {
tmpPath := tmpDir(t)
defer os.RemoveAll(tmpPath)
csiFake := &csifake.Client{}
csiFake.NextNodePublishVolumeErr = tc.PluginErr
eventer := func(e *structs.NodeEvent) {}
manager := newVolumeManager(testlog.HCLogger(t), eventer, csiFake, tmpPath, tmpPath, true)
ctx := context.Background()
_, err := manager.publishVolume(ctx, tc.Volume, tc.Allocation, tc.UsageOptions, nil)
if tc.ExpectedErr != nil {
require.EqualError(t, err, tc.ExpectedErr.Error())
} else {
require.NoError(t, err)
}
require.Equal(t, tc.ExpectedCSICallCount, csiFake.NodePublishVolumeCallCount)
if tc.ExpectedVolumeCapability != nil {
require.Equal(t, tc.ExpectedVolumeCapability, csiFake.PrevVolumeCapability)
}
})
}
}
func TestVolumeManager_unpublishVolume(t *testing.T) {
t.Parallel()
cases := []struct {
Name string
Allocation *structs.Allocation
Volume *structs.CSIVolume
UsageOptions *UsageOptions
PluginErr error
ExpectedErr error
ExpectedCSICallCount int64
}{
{
Name: "Returns an error when the plugin returns an error",
Allocation: structs.MockAlloc(),
Volume: &structs.CSIVolume{
ID: "foo",
},
UsageOptions: &UsageOptions{},
PluginErr: errors.New("Some Unknown Error"),
ExpectedErr: errors.New("Some Unknown Error"),
ExpectedCSICallCount: 1,
},
{
Name: "Happy Path",
Allocation: structs.MockAlloc(),
Volume: &structs.CSIVolume{
ID: "foo",
},
UsageOptions: &UsageOptions{},
PluginErr: nil,
ExpectedErr: nil,
ExpectedCSICallCount: 1,
},
}
for _, tc := range cases {
t.Run(tc.Name, func(t *testing.T) {
tmpPath := tmpDir(t)
defer os.RemoveAll(tmpPath)
csiFake := &csifake.Client{}
csiFake.NextNodeUnpublishVolumeErr = tc.PluginErr
eventer := func(e *structs.NodeEvent) {}
manager := newVolumeManager(testlog.HCLogger(t), eventer, csiFake, tmpPath, tmpPath, true)
ctx := context.Background()
err := manager.unpublishVolume(ctx,
tc.Volume.ID, tc.Volume.RemoteID(), tc.Allocation.ID, tc.UsageOptions)
if tc.ExpectedErr != nil {
require.EqualError(t, err, tc.ExpectedErr.Error())
} else {
require.NoError(t, err)
}
require.Equal(t, tc.ExpectedCSICallCount, csiFake.NodeUnpublishVolumeCallCount)
})
}
}
func TestVolumeManager_MountVolumeEvents(t *testing.T) {
t.Parallel()
tmpPath := tmpDir(t)
defer os.RemoveAll(tmpPath)
csiFake := &csifake.Client{}
var events []*structs.NodeEvent
eventer := func(e *structs.NodeEvent) {
events = append(events, e)
}
manager := newVolumeManager(testlog.HCLogger(t), eventer, csiFake, tmpPath, tmpPath, true)
ctx := context.Background()
vol := &structs.CSIVolume{
ID: "vol",
Namespace: "ns",
AccessMode: structs.CSIVolumeAccessModeMultiNodeMultiWriter,
}
alloc := mock.Alloc()
usage := &UsageOptions{}
pubCtx := map[string]string{}
_, err := manager.MountVolume(ctx, vol, alloc, usage, pubCtx)
require.Error(t, err, "Unknown volume attachment mode: ")
require.Equal(t, 1, len(events))
e := events[0]
require.Equal(t, "Mount volume", e.Message)
require.Equal(t, "Storage", e.Subsystem)
require.Equal(t, "vol", e.Details["volume_id"])
require.Equal(t, "false", e.Details["success"])
require.Equal(t, "Unknown volume attachment mode: ", e.Details["error"])
events = events[1:]
vol.AttachmentMode = structs.CSIVolumeAttachmentModeFilesystem
_, err = manager.MountVolume(ctx, vol, alloc, usage, pubCtx)
require.NoError(t, err)
require.Equal(t, 1, len(events))
e = events[0]
require.Equal(t, "Mount volume", e.Message)
require.Equal(t, "Storage", e.Subsystem)
require.Equal(t, "vol", e.Details["volume_id"])
require.Equal(t, "true", e.Details["success"])
events = events[1:]
err = manager.UnmountVolume(ctx, vol.ID, vol.RemoteID(), alloc.ID, usage)
require.NoError(t, err)
require.Equal(t, 1, len(events))
e = events[0]
require.Equal(t, "Unmount volume", e.Message)
require.Equal(t, "Storage", e.Subsystem)
require.Equal(t, "vol", e.Details["volume_id"])
require.Equal(t, "true", e.Details["success"])
}