2020-01-08 12:47:07 +00:00
|
|
|
package csimanager
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2020-08-07 15:01:36 +00:00
|
|
|
"errors"
|
2020-01-08 12:47:07 +00:00
|
|
|
"fmt"
|
2020-01-28 12:19:56 +00:00
|
|
|
"os"
|
|
|
|
"path/filepath"
|
2020-04-02 20:04:56 +00:00
|
|
|
"strings"
|
2020-01-08 12:47:07 +00:00
|
|
|
"time"
|
|
|
|
|
2020-01-29 12:20:41 +00:00
|
|
|
grpc_retry "github.com/grpc-ecosystem/go-grpc-middleware/retry"
|
2020-01-08 12:47:07 +00:00
|
|
|
"github.com/hashicorp/go-hclog"
|
2020-02-14 12:34:41 +00:00
|
|
|
"github.com/hashicorp/go-multierror"
|
2020-01-28 12:19:56 +00:00
|
|
|
"github.com/hashicorp/nomad/helper/mount"
|
2020-01-08 12:47:07 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad/structs"
|
|
|
|
"github.com/hashicorp/nomad/plugins/csi"
|
|
|
|
)
|
|
|
|
|
|
|
|
var _ VolumeMounter = &volumeManager{}
|
|
|
|
|
|
|
|
const (
|
|
|
|
DefaultMountActionTimeout = 2 * time.Minute
|
|
|
|
StagingDirName = "staging"
|
|
|
|
AllocSpecificDirName = "per-alloc"
|
|
|
|
)
|
|
|
|
|
|
|
|
// volumeManager handles the state of attached volumes for a given CSI Plugin.
|
|
|
|
//
|
|
|
|
// volumeManagers outlive the lifetime of a given allocation as volumes may be
|
|
|
|
// shared by multiple allocations on the same node.
|
|
|
|
//
|
|
|
|
// volumes are stored by an enriched volume usage struct as the CSI Spec requires
|
|
|
|
// slightly different usage based on the given usage model.
|
|
|
|
type volumeManager struct {
|
2020-03-31 21:13:52 +00:00
|
|
|
logger hclog.Logger
|
|
|
|
eventer TriggerNodeEvent
|
|
|
|
plugin csi.CSIPlugin
|
2020-01-08 12:47:07 +00:00
|
|
|
|
2020-02-17 14:37:49 +00:00
|
|
|
usageTracker *volumeUsageTracker
|
2020-01-08 12:47:07 +00:00
|
|
|
|
|
|
|
// mountRoot is the root of where plugin directories and mounts may be created
|
|
|
|
// e.g /opt/nomad.d/statedir/csi/my-csi-plugin/
|
|
|
|
mountRoot string
|
|
|
|
|
2020-02-06 14:26:29 +00:00
|
|
|
// containerMountPoint is the location _inside_ the plugin container that the
|
|
|
|
// `mountRoot` is bound in to.
|
|
|
|
containerMountPoint string
|
|
|
|
|
2020-01-08 12:47:07 +00:00
|
|
|
// requiresStaging shows whether the plugin requires that the volume manager
|
|
|
|
// calls NodeStageVolume and NodeUnstageVolume RPCs during setup and teardown
|
|
|
|
requiresStaging bool
|
|
|
|
}
|
|
|
|
|
2020-03-31 21:13:52 +00:00
|
|
|
func newVolumeManager(logger hclog.Logger, eventer TriggerNodeEvent, plugin csi.CSIPlugin, rootDir, containerRootDir string, requiresStaging bool) *volumeManager {
|
2020-01-08 12:47:07 +00:00
|
|
|
return &volumeManager{
|
2020-02-06 14:26:29 +00:00
|
|
|
logger: logger.Named("volume_manager"),
|
2020-03-31 21:13:52 +00:00
|
|
|
eventer: eventer,
|
2020-02-06 14:26:29 +00:00
|
|
|
plugin: plugin,
|
|
|
|
mountRoot: rootDir,
|
|
|
|
containerMountPoint: containerRootDir,
|
|
|
|
requiresStaging: requiresStaging,
|
2020-02-17 14:37:49 +00:00
|
|
|
usageTracker: newVolumeUsageTracker(),
|
2020-01-08 12:47:07 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-02 20:04:56 +00:00
|
|
|
func (v *volumeManager) stagingDirForVolume(root string, volID string, usage *UsageOptions) string {
|
|
|
|
return filepath.Join(root, StagingDirName, volID, usage.ToFS())
|
2020-01-28 12:19:56 +00:00
|
|
|
}
|
|
|
|
|
2020-07-23 19:52:22 +00:00
|
|
|
func (v *volumeManager) allocDirForVolume(root string, volID, allocID string) string {
|
|
|
|
return filepath.Join(root, AllocSpecificDirName, allocID, volID)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (v *volumeManager) targetForVolume(root string, volID, allocID string, usage *UsageOptions) string {
|
2020-04-02 20:04:56 +00:00
|
|
|
return filepath.Join(root, AllocSpecificDirName, allocID, volID, usage.ToFS())
|
2020-01-31 13:45:48 +00:00
|
|
|
}
|
|
|
|
|
2020-01-28 12:19:56 +00:00
|
|
|
// ensureStagingDir attempts to create a directory for use when staging a volume
|
|
|
|
// and then validates that the path is not already a mount point for e.g an
|
|
|
|
// existing volume stage.
|
|
|
|
//
|
|
|
|
// Returns whether the directory is a pre-existing mountpoint, the staging path,
|
2020-01-29 12:20:41 +00:00
|
|
|
// and any errors that occurred.
|
2020-02-17 11:10:12 +00:00
|
|
|
func (v *volumeManager) ensureStagingDir(vol *structs.CSIVolume, usage *UsageOptions) (string, bool, error) {
|
2020-04-02 20:04:56 +00:00
|
|
|
stagingPath := v.stagingDirForVolume(v.mountRoot, vol.ID, usage)
|
2020-01-28 12:19:56 +00:00
|
|
|
|
|
|
|
// Make the staging path, owned by the Nomad User
|
|
|
|
if err := os.MkdirAll(stagingPath, 0700); err != nil && !os.IsExist(err) {
|
2020-02-02 09:13:51 +00:00
|
|
|
return "", false, fmt.Errorf("failed to create staging directory for volume (%s): %v", vol.ID, err)
|
|
|
|
|
2020-01-28 12:19:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Validate that it is not already a mount point
|
|
|
|
m := mount.New()
|
|
|
|
isNotMount, err := m.IsNotAMountPoint(stagingPath)
|
|
|
|
if err != nil {
|
2020-02-02 09:13:51 +00:00
|
|
|
return "", false, fmt.Errorf("mount point detection failed for volume (%s): %v", vol.ID, err)
|
2020-01-28 12:19:56 +00:00
|
|
|
}
|
|
|
|
|
2020-02-02 09:13:51 +00:00
|
|
|
return stagingPath, !isNotMount, nil
|
2020-01-28 12:19:56 +00:00
|
|
|
}
|
|
|
|
|
2020-01-31 13:45:48 +00:00
|
|
|
// ensureAllocDir attempts to create a directory for use when publishing a volume
|
|
|
|
// and then validates that the path is not already a mount point (e.g when reattaching
|
|
|
|
// to existing allocs).
|
|
|
|
//
|
|
|
|
// Returns whether the directory is a pre-existing mountpoint, the publish path,
|
|
|
|
// and any errors that occurred.
|
2020-02-17 11:10:12 +00:00
|
|
|
func (v *volumeManager) ensureAllocDir(vol *structs.CSIVolume, alloc *structs.Allocation, usage *UsageOptions) (string, bool, error) {
|
2020-07-23 19:52:22 +00:00
|
|
|
allocPath := v.allocDirForVolume(v.mountRoot, vol.ID, alloc.ID)
|
2020-01-31 13:45:48 +00:00
|
|
|
|
|
|
|
// Make the alloc path, owned by the Nomad User
|
|
|
|
if err := os.MkdirAll(allocPath, 0700); err != nil && !os.IsExist(err) {
|
2020-02-02 09:13:51 +00:00
|
|
|
return "", false, fmt.Errorf("failed to create allocation directory for volume (%s): %v", vol.ID, err)
|
2020-01-29 12:20:41 +00:00
|
|
|
}
|
|
|
|
|
2020-07-23 19:52:22 +00:00
|
|
|
// Validate that the target is not already a mount point
|
|
|
|
targetPath := v.targetForVolume(v.mountRoot, vol.ID, alloc.ID, usage)
|
2022-01-14 15:46:06 +00:00
|
|
|
|
2020-01-31 13:45:48 +00:00
|
|
|
m := mount.New()
|
2020-07-23 19:52:22 +00:00
|
|
|
isNotMount, err := m.IsNotAMountPoint(targetPath)
|
2022-01-14 15:46:06 +00:00
|
|
|
|
|
|
|
switch {
|
|
|
|
case errors.Is(err, os.ErrNotExist):
|
|
|
|
// ignore; path does not exist and as such is not a mount
|
|
|
|
case err != nil:
|
2020-02-02 09:13:51 +00:00
|
|
|
return "", false, fmt.Errorf("mount point detection failed for volume (%s): %v", vol.ID, err)
|
2020-01-29 12:20:41 +00:00
|
|
|
}
|
|
|
|
|
2020-07-23 19:52:22 +00:00
|
|
|
return targetPath, !isNotMount, nil
|
2020-01-31 13:45:48 +00:00
|
|
|
}
|
|
|
|
|
2020-03-23 17:55:26 +00:00
|
|
|
func volumeCapability(vol *structs.CSIVolume, usage *UsageOptions) (*csi.VolumeCapability, error) {
|
|
|
|
var opts *structs.CSIMountOptions
|
|
|
|
if vol.MountOptions == nil {
|
|
|
|
opts = usage.MountOptions
|
|
|
|
} else {
|
|
|
|
opts = vol.MountOptions.Copy()
|
|
|
|
opts.Merge(usage.MountOptions)
|
|
|
|
}
|
|
|
|
|
2021-05-22 15:53:29 +00:00
|
|
|
capability, err := csi.VolumeCapabilityFromStructs(usage.AttachmentMode, usage.AccessMode, opts)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-03-23 17:55:26 +00:00
|
|
|
|
|
|
|
return capability, nil
|
|
|
|
}
|
|
|
|
|
2020-01-31 13:45:48 +00:00
|
|
|
// stageVolume prepares a volume for use by allocations. When a plugin exposes
|
|
|
|
// the STAGE_UNSTAGE_VOLUME capability it MUST be called once-per-volume for a
|
|
|
|
// given usage mode before the volume can be NodePublish-ed.
|
2020-02-17 12:57:25 +00:00
|
|
|
func (v *volumeManager) stageVolume(ctx context.Context, vol *structs.CSIVolume, usage *UsageOptions, publishContext map[string]string) error {
|
2020-01-31 13:45:48 +00:00
|
|
|
logger := hclog.FromContext(ctx)
|
|
|
|
logger.Trace("Preparing volume staging environment")
|
2020-02-17 11:10:12 +00:00
|
|
|
hostStagingPath, isMount, err := v.ensureStagingDir(vol, usage)
|
2020-01-31 13:45:48 +00:00
|
|
|
if err != nil {
|
2020-02-06 14:26:29 +00:00
|
|
|
return err
|
2020-01-31 13:45:48 +00:00
|
|
|
}
|
2020-04-02 20:04:56 +00:00
|
|
|
pluginStagingPath := v.stagingDirForVolume(v.containerMountPoint, vol.ID, usage)
|
2020-02-06 14:26:29 +00:00
|
|
|
|
|
|
|
logger.Trace("Volume staging environment", "pre-existing_mount", isMount, "host_staging_path", hostStagingPath, "plugin_staging_path", pluginStagingPath)
|
2020-01-31 13:45:48 +00:00
|
|
|
|
2020-02-02 09:13:51 +00:00
|
|
|
if isMount {
|
2020-02-06 14:26:29 +00:00
|
|
|
logger.Debug("re-using existing staging mount for volume", "staging_path", hostStagingPath)
|
|
|
|
return nil
|
2020-01-31 13:45:48 +00:00
|
|
|
}
|
|
|
|
|
2020-03-23 17:55:26 +00:00
|
|
|
capability, err := volumeCapability(vol, usage)
|
2020-01-31 13:45:48 +00:00
|
|
|
if err != nil {
|
2020-02-06 14:26:29 +00:00
|
|
|
return err
|
2020-01-29 12:20:41 +00:00
|
|
|
}
|
|
|
|
|
2020-06-22 17:54:32 +00:00
|
|
|
req := &csi.NodeStageVolumeRequest{
|
|
|
|
ExternalID: vol.RemoteID(),
|
|
|
|
PublishContext: publishContext,
|
|
|
|
StagingTargetPath: pluginStagingPath,
|
|
|
|
VolumeCapability: capability,
|
|
|
|
Secrets: vol.Secrets,
|
|
|
|
VolumeContext: vol.Context,
|
|
|
|
}
|
|
|
|
|
2020-03-30 20:26:03 +00:00
|
|
|
// CSI NodeStageVolume errors for timeout, codes.Unavailable and
|
|
|
|
// codes.ResourceExhausted are retried; all other errors are fatal.
|
2020-06-22 17:54:32 +00:00
|
|
|
return v.plugin.NodeStageVolume(ctx, req,
|
2020-01-31 13:45:48 +00:00
|
|
|
grpc_retry.WithPerRetryTimeout(DefaultMountActionTimeout),
|
|
|
|
grpc_retry.WithMax(3),
|
|
|
|
grpc_retry.WithBackoff(grpc_retry.BackoffExponential(100*time.Millisecond)),
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2020-02-17 12:57:25 +00:00
|
|
|
func (v *volumeManager) publishVolume(ctx context.Context, vol *structs.CSIVolume, alloc *structs.Allocation, usage *UsageOptions, publishContext map[string]string) (*MountInfo, error) {
|
2020-01-31 13:45:48 +00:00
|
|
|
logger := hclog.FromContext(ctx)
|
2020-02-06 14:26:29 +00:00
|
|
|
var pluginStagingPath string
|
|
|
|
if v.requiresStaging {
|
2020-04-02 20:04:56 +00:00
|
|
|
pluginStagingPath = v.stagingDirForVolume(v.containerMountPoint, vol.ID, usage)
|
2020-02-06 14:26:29 +00:00
|
|
|
}
|
2020-01-31 13:45:48 +00:00
|
|
|
|
2020-02-17 11:10:12 +00:00
|
|
|
hostTargetPath, isMount, err := v.ensureAllocDir(vol, alloc, usage)
|
2020-01-31 13:45:48 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-07-23 19:52:22 +00:00
|
|
|
pluginTargetPath := v.targetForVolume(v.containerMountPoint, vol.ID, alloc.ID, usage)
|
2020-01-31 13:45:48 +00:00
|
|
|
|
2020-02-02 09:13:51 +00:00
|
|
|
if isMount {
|
2020-01-31 13:45:48 +00:00
|
|
|
logger.Debug("Re-using existing published volume for allocation")
|
2020-02-06 14:26:29 +00:00
|
|
|
return &MountInfo{Source: hostTargetPath}, nil
|
2020-01-31 13:45:48 +00:00
|
|
|
}
|
|
|
|
|
2020-03-23 17:55:26 +00:00
|
|
|
capabilities, err := volumeCapability(vol, usage)
|
2020-01-31 13:45:48 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2020-03-30 20:26:03 +00:00
|
|
|
// CSI NodePublishVolume errors for timeout, codes.Unavailable and
|
|
|
|
// codes.ResourceExhausted are retried; all other errors are fatal.
|
2020-01-31 13:45:48 +00:00
|
|
|
err = v.plugin.NodePublishVolume(ctx, &csi.NodePublishVolumeRequest{
|
2020-05-14 15:56:07 +00:00
|
|
|
ExternalID: vol.RemoteID(),
|
2020-02-17 12:57:25 +00:00
|
|
|
PublishContext: publishContext,
|
2020-02-06 14:26:29 +00:00
|
|
|
StagingTargetPath: pluginStagingPath,
|
|
|
|
TargetPath: pluginTargetPath,
|
2020-01-31 13:45:48 +00:00
|
|
|
VolumeCapability: capabilities,
|
2020-02-17 11:10:12 +00:00
|
|
|
Readonly: usage.ReadOnly,
|
2020-05-11 21:12:51 +00:00
|
|
|
Secrets: vol.Secrets,
|
2020-06-22 17:54:32 +00:00
|
|
|
VolumeContext: vol.Context,
|
2020-01-31 13:45:48 +00:00
|
|
|
},
|
2020-01-29 12:20:41 +00:00
|
|
|
grpc_retry.WithPerRetryTimeout(DefaultMountActionTimeout),
|
|
|
|
grpc_retry.WithMax(3),
|
|
|
|
grpc_retry.WithBackoff(grpc_retry.BackoffExponential(100*time.Millisecond)),
|
|
|
|
)
|
2020-01-31 13:45:48 +00:00
|
|
|
|
2020-02-06 14:26:29 +00:00
|
|
|
return &MountInfo{Source: hostTargetPath}, err
|
2020-01-29 12:20:41 +00:00
|
|
|
}
|
|
|
|
|
2020-01-08 12:47:07 +00:00
|
|
|
// MountVolume performs the steps required for using a given volume
|
|
|
|
// configuration for the provided allocation.
|
2020-02-19 11:46:11 +00:00
|
|
|
// It is passed the publishContext from remote attachment, and specific usage
|
|
|
|
// modes from the CSI Hook.
|
|
|
|
// It then uses this state to stage and publish the volume as required for use
|
|
|
|
// by the given allocation.
|
2020-03-31 21:13:52 +00:00
|
|
|
func (v *volumeManager) MountVolume(ctx context.Context, vol *structs.CSIVolume, alloc *structs.Allocation, usage *UsageOptions, publishContext map[string]string) (mountInfo *MountInfo, err error) {
|
2020-01-31 11:11:40 +00:00
|
|
|
logger := v.logger.With("volume_id", vol.ID, "alloc_id", alloc.ID)
|
2020-01-29 12:20:41 +00:00
|
|
|
ctx = hclog.WithContext(ctx, logger)
|
|
|
|
|
|
|
|
if v.requiresStaging {
|
2020-03-31 21:13:52 +00:00
|
|
|
err = v.stageVolume(ctx, vol, usage, publishContext)
|
2020-01-29 12:20:41 +00:00
|
|
|
}
|
|
|
|
|
2020-03-31 21:13:52 +00:00
|
|
|
if err == nil {
|
|
|
|
mountInfo, err = v.publishVolume(ctx, vol, alloc, usage, publishContext)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err == nil {
|
2020-04-02 20:04:56 +00:00
|
|
|
v.usageTracker.Claim(alloc.ID, vol.ID, usage)
|
2020-03-31 21:13:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
event := structs.NewNodeEvent().
|
|
|
|
SetSubsystem(structs.NodeEventSubsystemStorage).
|
|
|
|
SetMessage("Mount volume").
|
|
|
|
AddDetail("volume_id", vol.ID)
|
|
|
|
if err == nil {
|
|
|
|
event.AddDetail("success", "true")
|
|
|
|
} else {
|
|
|
|
event.AddDetail("success", "false")
|
|
|
|
event.AddDetail("error", err.Error())
|
2020-02-17 14:37:49 +00:00
|
|
|
}
|
|
|
|
|
2020-03-31 21:13:52 +00:00
|
|
|
v.eventer(event)
|
2020-02-17 14:37:49 +00:00
|
|
|
|
2020-03-31 21:13:52 +00:00
|
|
|
return mountInfo, err
|
2020-01-08 12:47:07 +00:00
|
|
|
}
|
|
|
|
|
2020-01-31 11:11:40 +00:00
|
|
|
// unstageVolume is the inverse operation of `stageVolume` and must be called
|
|
|
|
// once for each staging path that a volume has been staged under.
|
|
|
|
// It is safe to call multiple times and a plugin is required to return OK if
|
|
|
|
// the volume has been unstaged or was never staged on the node.
|
2020-04-04 15:03:44 +00:00
|
|
|
func (v *volumeManager) unstageVolume(ctx context.Context, volID, remoteID string, usage *UsageOptions) error {
|
2020-01-31 11:11:40 +00:00
|
|
|
logger := hclog.FromContext(ctx)
|
|
|
|
logger.Trace("Unstaging volume")
|
2020-04-02 20:04:56 +00:00
|
|
|
stagingPath := v.stagingDirForVolume(v.containerMountPoint, volID, usage)
|
2020-03-30 20:26:03 +00:00
|
|
|
|
|
|
|
// CSI NodeUnstageVolume errors for timeout, codes.Unavailable and
|
|
|
|
// codes.ResourceExhausted are retried; all other errors are fatal.
|
2020-01-31 11:11:40 +00:00
|
|
|
return v.plugin.NodeUnstageVolume(ctx,
|
2020-04-04 15:03:44 +00:00
|
|
|
remoteID,
|
2020-01-31 11:11:40 +00:00
|
|
|
stagingPath,
|
|
|
|
grpc_retry.WithPerRetryTimeout(DefaultMountActionTimeout),
|
|
|
|
grpc_retry.WithMax(3),
|
|
|
|
grpc_retry.WithBackoff(grpc_retry.BackoffExponential(100*time.Millisecond)),
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2020-02-14 12:34:41 +00:00
|
|
|
func combineErrors(maybeErrs ...error) error {
|
|
|
|
var result *multierror.Error
|
|
|
|
for _, err := range maybeErrs {
|
|
|
|
if err == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
result = multierror.Append(result, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return result.ErrorOrNil()
|
|
|
|
}
|
|
|
|
|
2020-04-04 15:03:44 +00:00
|
|
|
func (v *volumeManager) unpublishVolume(ctx context.Context, volID, remoteID, allocID string, usage *UsageOptions) error {
|
2020-07-23 19:52:22 +00:00
|
|
|
pluginTargetPath := v.targetForVolume(v.containerMountPoint, volID, allocID, usage)
|
2020-02-14 12:34:41 +00:00
|
|
|
|
2020-03-30 20:26:03 +00:00
|
|
|
// CSI NodeUnpublishVolume errors for timeout, codes.Unavailable and
|
|
|
|
// codes.ResourceExhausted are retried; all other errors are fatal.
|
2020-04-04 15:03:44 +00:00
|
|
|
rpcErr := v.plugin.NodeUnpublishVolume(ctx, remoteID, pluginTargetPath,
|
2020-02-14 12:34:41 +00:00
|
|
|
grpc_retry.WithPerRetryTimeout(DefaultMountActionTimeout),
|
|
|
|
grpc_retry.WithMax(3),
|
|
|
|
grpc_retry.WithBackoff(grpc_retry.BackoffExponential(100*time.Millisecond)),
|
|
|
|
)
|
|
|
|
|
2020-07-23 19:52:22 +00:00
|
|
|
hostTargetPath := v.targetForVolume(v.mountRoot, volID, allocID, usage)
|
2020-02-14 12:34:41 +00:00
|
|
|
if _, err := os.Stat(hostTargetPath); os.IsNotExist(err) {
|
2020-04-02 20:04:56 +00:00
|
|
|
if rpcErr != nil && strings.Contains(rpcErr.Error(), "no mount point") {
|
|
|
|
// host target path was already destroyed, nothing to do here.
|
|
|
|
// this helps us in the case that a previous GC attempt cleaned
|
|
|
|
// up the volume on the node but the controller RPCs failed
|
2020-08-07 15:01:36 +00:00
|
|
|
rpcErr = fmt.Errorf("%w: %v", structs.ErrCSIClientRPCIgnorable, rpcErr)
|
2020-04-02 20:04:56 +00:00
|
|
|
}
|
2020-02-14 12:34:41 +00:00
|
|
|
return rpcErr
|
|
|
|
}
|
|
|
|
|
|
|
|
// Host Target Path was not cleaned up, attempt to do so here. If it's still
|
|
|
|
// a mount then removing the dir will fail and we'll return any rpcErr and the
|
|
|
|
// file error.
|
|
|
|
rmErr := os.Remove(hostTargetPath)
|
|
|
|
if rmErr != nil {
|
|
|
|
return combineErrors(rpcErr, rmErr)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We successfully removed the directory, return any rpcErrors that were
|
|
|
|
// encountered, but because we got here, they were probably flaky or was
|
2020-08-07 15:01:36 +00:00
|
|
|
// cleaned up externally.
|
|
|
|
return fmt.Errorf("%w: %v", structs.ErrCSIClientRPCIgnorable, rpcErr)
|
2020-02-14 12:34:41 +00:00
|
|
|
}
|
|
|
|
|
2020-04-04 15:03:44 +00:00
|
|
|
func (v *volumeManager) UnmountVolume(ctx context.Context, volID, remoteID, allocID string, usage *UsageOptions) (err error) {
|
2020-04-02 20:04:56 +00:00
|
|
|
logger := v.logger.With("volume_id", volID, "alloc_id", allocID)
|
2020-01-31 11:11:40 +00:00
|
|
|
ctx = hclog.WithContext(ctx, logger)
|
|
|
|
|
2020-04-04 15:03:44 +00:00
|
|
|
err = v.unpublishVolume(ctx, volID, remoteID, allocID, usage)
|
2020-03-31 21:13:52 +00:00
|
|
|
|
2020-08-07 15:01:36 +00:00
|
|
|
if err == nil || errors.Is(err, structs.ErrCSIClientRPCIgnorable) {
|
2020-04-02 20:04:56 +00:00
|
|
|
canRelease := v.usageTracker.Free(allocID, volID, usage)
|
2020-03-31 21:13:52 +00:00
|
|
|
if v.requiresStaging && canRelease {
|
2020-04-04 15:03:44 +00:00
|
|
|
err = v.unstageVolume(ctx, volID, remoteID, usage)
|
2020-03-31 21:13:52 +00:00
|
|
|
}
|
2020-02-14 12:34:41 +00:00
|
|
|
}
|
2020-01-31 11:11:40 +00:00
|
|
|
|
2022-01-28 13:30:31 +00:00
|
|
|
if errors.Is(err, structs.ErrCSIClientRPCIgnorable) {
|
|
|
|
logger.Trace("unmounting volume failed with ignorable error", "error", err)
|
|
|
|
err = nil
|
|
|
|
}
|
|
|
|
|
2020-03-31 21:13:52 +00:00
|
|
|
event := structs.NewNodeEvent().
|
|
|
|
SetSubsystem(structs.NodeEventSubsystemStorage).
|
|
|
|
SetMessage("Unmount volume").
|
2020-04-02 20:04:56 +00:00
|
|
|
AddDetail("volume_id", volID)
|
2022-01-28 13:30:31 +00:00
|
|
|
if err == nil {
|
2020-03-31 21:13:52 +00:00
|
|
|
event.AddDetail("success", "true")
|
|
|
|
} else {
|
|
|
|
event.AddDetail("success", "false")
|
|
|
|
event.AddDetail("error", err.Error())
|
2020-01-31 11:11:40 +00:00
|
|
|
}
|
|
|
|
|
2020-03-31 21:13:52 +00:00
|
|
|
v.eventer(event)
|
|
|
|
|
|
|
|
return err
|
2020-01-08 12:47:07 +00:00
|
|
|
}
|