2023-04-10 15:36:59 +00:00
|
|
|
// Copyright (c) HashiCorp, Inc.
|
|
|
|
// SPDX-License-Identifier: MPL-2.0
|
|
|
|
|
2020-01-08 12:47:07 +00:00
|
|
|
package allocrunner
|
|
|
|
|
|
|
|
import (
|
2020-02-11 13:45:16 +00:00
|
|
|
"context"
|
|
|
|
"fmt"
|
2022-02-24 15:39:07 +00:00
|
|
|
"strings"
|
2022-01-28 13:30:31 +00:00
|
|
|
"sync"
|
|
|
|
"time"
|
2020-02-11 13:45:16 +00:00
|
|
|
|
2020-01-08 12:47:07 +00:00
|
|
|
hclog "github.com/hashicorp/go-hclog"
|
2020-08-06 18:51:46 +00:00
|
|
|
multierror "github.com/hashicorp/go-multierror"
|
2023-07-10 17:20:15 +00:00
|
|
|
"github.com/hashicorp/nomad/client/allocrunner/state"
|
2023-05-12 17:29:44 +00:00
|
|
|
"github.com/hashicorp/nomad/client/config"
|
2023-04-10 22:15:33 +00:00
|
|
|
"github.com/hashicorp/nomad/client/dynamicplugins"
|
2020-02-11 13:45:16 +00:00
|
|
|
"github.com/hashicorp/nomad/client/pluginmanager/csimanager"
|
2023-04-03 15:03:36 +00:00
|
|
|
cstructs "github.com/hashicorp/nomad/client/structs"
|
2022-02-22 18:43:06 +00:00
|
|
|
"github.com/hashicorp/nomad/helper"
|
2020-01-08 12:47:07 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad/structs"
|
2020-05-21 13:18:02 +00:00
|
|
|
"github.com/hashicorp/nomad/plugins/drivers"
|
2020-01-08 12:47:07 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// csiHook will wait for remote csi volumes to be attached to the host before
|
|
|
|
// continuing.
|
|
|
|
//
|
|
|
|
// It is a noop for allocs that do not depend on CSI Volumes.
|
|
|
|
type csiHook struct {
|
2022-04-05 17:05:10 +00:00
|
|
|
alloc *structs.Allocation
|
|
|
|
logger hclog.Logger
|
|
|
|
csimanager csimanager.Manager
|
|
|
|
|
|
|
|
// interfaces implemented by the allocRunner
|
2023-07-10 17:20:15 +00:00
|
|
|
rpcClient config.RPCer
|
|
|
|
allocRunnerShim allocRunnerShim
|
|
|
|
hookResources *cstructs.AllocHookResources
|
2022-01-07 20:23:47 +00:00
|
|
|
|
2022-04-05 17:05:10 +00:00
|
|
|
nodeSecret string
|
2022-02-24 15:39:07 +00:00
|
|
|
minBackoffInterval time.Duration
|
2022-01-28 13:30:31 +00:00
|
|
|
maxBackoffInterval time.Duration
|
|
|
|
maxBackoffDuration time.Duration
|
2022-04-05 17:05:10 +00:00
|
|
|
|
2023-07-10 17:20:15 +00:00
|
|
|
volumeResultsLock sync.Mutex
|
|
|
|
volumeResults map[string]*volumePublishResult // alias -> volumePublishResult
|
|
|
|
|
2022-04-05 17:05:10 +00:00
|
|
|
shutdownCtx context.Context
|
|
|
|
shutdownCancelFn context.CancelFunc
|
2020-01-08 12:47:07 +00:00
|
|
|
}
|
|
|
|
|
2022-01-07 20:23:47 +00:00
|
|
|
// implemented by allocrunner
|
2023-07-10 17:20:15 +00:00
|
|
|
type allocRunnerShim interface {
|
2022-01-07 20:23:47 +00:00
|
|
|
GetTaskDriverCapabilities(string) (*drivers.Capabilities, error)
|
2023-07-10 17:20:15 +00:00
|
|
|
SetCSIVolumes(vols map[string]*state.CSIVolumeStub) error
|
|
|
|
GetCSIVolumes() (map[string]*state.CSIVolumeStub, error)
|
2022-01-07 20:23:47 +00:00
|
|
|
}
|
|
|
|
|
2023-07-10 17:20:15 +00:00
|
|
|
func newCSIHook(alloc *structs.Allocation, logger hclog.Logger, csi csimanager.Manager, rpcClient config.RPCer, arShim allocRunnerShim, hookResources *cstructs.AllocHookResources, nodeSecret string) *csiHook {
|
2022-04-05 17:05:10 +00:00
|
|
|
|
|
|
|
shutdownCtx, shutdownCancelFn := context.WithCancel(context.Background())
|
|
|
|
|
2022-01-07 20:23:47 +00:00
|
|
|
return &csiHook{
|
2023-07-10 17:20:15 +00:00
|
|
|
alloc: alloc,
|
|
|
|
logger: logger.Named("csi_hook"),
|
|
|
|
csimanager: csi,
|
|
|
|
rpcClient: rpcClient,
|
|
|
|
allocRunnerShim: arShim,
|
|
|
|
hookResources: hookResources,
|
|
|
|
nodeSecret: nodeSecret,
|
|
|
|
volumeResults: map[string]*volumePublishResult{},
|
|
|
|
minBackoffInterval: time.Second,
|
|
|
|
maxBackoffInterval: time.Minute,
|
|
|
|
maxBackoffDuration: time.Hour * 24,
|
|
|
|
shutdownCtx: shutdownCtx,
|
|
|
|
shutdownCancelFn: shutdownCancelFn,
|
2022-01-07 20:23:47 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-08 12:47:07 +00:00
|
|
|
func (c *csiHook) Name() string {
|
|
|
|
return "csi_hook"
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *csiHook) Prerun() error {
|
|
|
|
if !c.shouldRun() {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-07-10 17:20:15 +00:00
|
|
|
tg := c.alloc.Job.LookupTaskGroup(c.alloc.TaskGroup)
|
|
|
|
if err := c.validateTasksSupportCSI(tg); err != nil {
|
|
|
|
return err
|
2020-02-11 13:45:16 +00:00
|
|
|
}
|
|
|
|
|
2023-07-10 17:20:15 +00:00
|
|
|
// Because operations on CSI volumes are expensive and can error, we do each
|
|
|
|
// step for all volumes before proceeding to the next step so we have to
|
|
|
|
// unwind less work. In practice, most allocations with volumes will only
|
|
|
|
// have one or a few at most. We lock the results so that if an update/stop
|
|
|
|
// comes in while we're running we can assert we'll safely tear down
|
|
|
|
// everything that's been done so far.
|
2022-04-05 17:05:10 +00:00
|
|
|
|
2023-07-10 17:20:15 +00:00
|
|
|
c.volumeResultsLock.Lock()
|
|
|
|
defer c.volumeResultsLock.Unlock()
|
2023-04-10 22:15:33 +00:00
|
|
|
|
2023-07-10 17:20:15 +00:00
|
|
|
// Initially, populate the result map with all of the requests
|
|
|
|
for alias, volumeRequest := range tg.Volumes {
|
|
|
|
if volumeRequest.Type == structs.VolumeTypeCSI {
|
|
|
|
c.volumeResults[alias] = &volumePublishResult{
|
|
|
|
request: volumeRequest,
|
|
|
|
stub: &state.CSIVolumeStub{
|
|
|
|
VolumeID: volumeRequest.VolumeID(c.alloc.Name)},
|
|
|
|
}
|
2020-02-11 13:45:16 +00:00
|
|
|
}
|
2023-07-10 17:20:15 +00:00
|
|
|
}
|
2020-02-11 13:45:16 +00:00
|
|
|
|
2023-07-10 17:20:15 +00:00
|
|
|
err := c.restoreMounts(c.volumeResults)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("restoring mounts: %w", err)
|
|
|
|
}
|
2020-02-17 11:10:12 +00:00
|
|
|
|
2023-07-10 17:20:15 +00:00
|
|
|
err = c.claimVolumes(c.volumeResults)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("claiming volumes: %w", err)
|
|
|
|
}
|
2020-02-11 13:45:16 +00:00
|
|
|
|
2023-07-10 17:20:15 +00:00
|
|
|
err = c.mountVolumes(c.volumeResults)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("mounting volumes: %w", err)
|
2020-02-11 13:45:16 +00:00
|
|
|
}
|
|
|
|
|
2023-04-03 15:03:36 +00:00
|
|
|
// make the mounts available to the taskrunner's volume_hook
|
2023-07-10 17:20:15 +00:00
|
|
|
mounts := helper.ConvertMap(c.volumeResults,
|
|
|
|
func(result *volumePublishResult) *csimanager.MountInfo {
|
|
|
|
return result.stub.MountInfo
|
|
|
|
})
|
2023-04-03 15:03:36 +00:00
|
|
|
c.hookResources.SetCSIMounts(mounts)
|
2020-02-11 13:45:16 +00:00
|
|
|
|
2023-07-10 17:20:15 +00:00
|
|
|
// persist the published mount info so we can restore on client restarts
|
|
|
|
stubs := helper.ConvertMap(c.volumeResults,
|
|
|
|
func(result *volumePublishResult) *state.CSIVolumeStub {
|
|
|
|
return result.stub
|
|
|
|
})
|
|
|
|
c.allocRunnerShim.SetCSIVolumes(stubs)
|
|
|
|
|
2020-01-08 12:47:07 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-08-06 18:51:46 +00:00
|
|
|
// Postrun sends an RPC to the server to unpublish the volume. This may
|
|
|
|
// forward client RPCs to the node plugins or to the controller plugins,
|
|
|
|
// depending on whether other allocations on this node have claims on this
|
|
|
|
// volume.
|
|
|
|
func (c *csiHook) Postrun() error {
|
|
|
|
if !c.shouldRun() {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-07-10 17:20:15 +00:00
|
|
|
c.volumeResultsLock.Lock()
|
|
|
|
defer c.volumeResultsLock.Unlock()
|
|
|
|
|
2022-01-28 13:30:31 +00:00
|
|
|
var wg sync.WaitGroup
|
2023-07-10 17:20:15 +00:00
|
|
|
errs := make(chan error, len(c.volumeResults))
|
2020-08-06 18:51:46 +00:00
|
|
|
|
2023-07-10 17:20:15 +00:00
|
|
|
for _, result := range c.volumeResults {
|
2022-01-28 13:30:31 +00:00
|
|
|
wg.Add(1)
|
2022-04-05 17:05:10 +00:00
|
|
|
// CSI RPCs can potentially take a long time. Split the work
|
|
|
|
// into goroutines so that operators could potentially reuse
|
|
|
|
// one of a set of volumes
|
2023-07-10 17:20:15 +00:00
|
|
|
go func(result *volumePublishResult) {
|
2022-01-28 13:30:31 +00:00
|
|
|
defer wg.Done()
|
2023-07-10 17:20:15 +00:00
|
|
|
err := c.unmountImpl(result)
|
2022-01-28 13:30:31 +00:00
|
|
|
if err != nil {
|
2022-04-05 17:05:10 +00:00
|
|
|
// we can recover an unmount failure if the operator
|
|
|
|
// brings the plugin back up, so retry every few minutes
|
|
|
|
// but eventually give up. Don't block shutdown so that
|
|
|
|
// we don't block shutting down the client in -dev mode
|
2023-07-10 17:20:15 +00:00
|
|
|
go func(result *volumePublishResult) {
|
|
|
|
err := c.unmountWithRetry(result)
|
2022-04-05 17:05:10 +00:00
|
|
|
if err != nil {
|
|
|
|
c.logger.Error("volume could not be unmounted")
|
|
|
|
}
|
2023-07-10 17:20:15 +00:00
|
|
|
err = c.unpublish(result)
|
2022-04-05 17:05:10 +00:00
|
|
|
if err != nil {
|
|
|
|
c.logger.Error("volume could not be unpublished")
|
|
|
|
}
|
2023-07-10 17:20:15 +00:00
|
|
|
}(result)
|
2022-01-28 13:30:31 +00:00
|
|
|
}
|
2020-11-11 18:06:30 +00:00
|
|
|
|
2022-01-28 13:30:31 +00:00
|
|
|
// we can't recover from this RPC error client-side; the
|
|
|
|
// volume claim GC job will have to clean up for us once
|
|
|
|
// the allocation is marked terminal
|
2023-07-10 17:20:15 +00:00
|
|
|
errs <- c.unpublish(result)
|
|
|
|
}(result)
|
2022-01-28 13:30:31 +00:00
|
|
|
}
|
2020-11-11 18:06:30 +00:00
|
|
|
|
2022-01-28 13:30:31 +00:00
|
|
|
wg.Wait()
|
|
|
|
close(errs) // so we don't block waiting if there were no errors
|
2021-03-18 19:35:11 +00:00
|
|
|
|
2022-01-28 13:30:31 +00:00
|
|
|
var mErr *multierror.Error
|
|
|
|
for err := range errs {
|
|
|
|
mErr = multierror.Append(mErr, err)
|
2020-08-06 18:51:46 +00:00
|
|
|
}
|
2022-01-28 13:30:31 +00:00
|
|
|
|
2020-08-06 18:51:46 +00:00
|
|
|
return mErr.ErrorOrNil()
|
|
|
|
}
|
|
|
|
|
2023-07-10 17:20:15 +00:00
|
|
|
type volumePublishResult struct {
|
|
|
|
request *structs.VolumeRequest // the request from the jobspec
|
|
|
|
volume *structs.CSIVolume // the volume we get back from the server
|
|
|
|
publishContext map[string]string // populated after claim if provided by plugin
|
|
|
|
stub *state.CSIVolumeStub // populated from volume, plugin, or stub
|
2020-02-17 12:57:25 +00:00
|
|
|
}
|
|
|
|
|
2023-07-10 17:20:15 +00:00
|
|
|
// validateTasksSupportCSI verifies that at least one task in the group uses a
|
|
|
|
// task driver that supports CSI. This prevents us from publishing CSI volumes
|
|
|
|
// only to find out once we get to the taskrunner/volume_hook that no task can
|
|
|
|
// mount them.
|
|
|
|
func (c *csiHook) validateTasksSupportCSI(tg *structs.TaskGroup) error {
|
2020-02-17 12:57:25 +00:00
|
|
|
|
2022-06-22 14:43:43 +00:00
|
|
|
for _, task := range tg.Tasks {
|
2023-07-10 17:20:15 +00:00
|
|
|
caps, err := c.allocRunnerShim.GetTaskDriverCapabilities(task.Name)
|
2022-06-22 14:43:43 +00:00
|
|
|
if err != nil {
|
2023-07-10 17:20:15 +00:00
|
|
|
return fmt.Errorf("could not validate task driver capabilities: %v", err)
|
2022-06-22 14:43:43 +00:00
|
|
|
}
|
2020-05-21 13:18:02 +00:00
|
|
|
|
2022-06-22 14:43:43 +00:00
|
|
|
if caps.MountConfigs == drivers.MountConfigSupportNone {
|
|
|
|
continue
|
|
|
|
}
|
2020-05-21 13:18:02 +00:00
|
|
|
|
2023-07-10 17:20:15 +00:00
|
|
|
return nil
|
2022-06-22 14:43:43 +00:00
|
|
|
}
|
2020-05-21 13:18:02 +00:00
|
|
|
|
2023-07-10 17:20:15 +00:00
|
|
|
return fmt.Errorf("no task supports CSI")
|
|
|
|
}
|
|
|
|
|
|
|
|
// restoreMounts tries to restore the mount info from the local client state and
|
|
|
|
// then verifies it with the plugin. If the volume is already mounted, we don't
|
|
|
|
// want to re-run the claim and mount workflow again. This lets us tolerate
|
|
|
|
// restarting clients even on disconnected nodes.
|
|
|
|
func (c *csiHook) restoreMounts(results map[string]*volumePublishResult) error {
|
|
|
|
stubs, err := c.allocRunnerShim.GetCSIVolumes()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if stubs == nil {
|
|
|
|
return nil // no previous volumes
|
2022-06-22 14:43:43 +00:00
|
|
|
}
|
2023-07-10 17:20:15 +00:00
|
|
|
for _, result := range results {
|
|
|
|
stub := stubs[result.request.Name]
|
|
|
|
if stub == nil {
|
|
|
|
continue
|
|
|
|
}
|
2020-05-21 13:18:02 +00:00
|
|
|
|
2023-07-10 17:20:15 +00:00
|
|
|
result.stub = stub
|
|
|
|
|
|
|
|
if result.stub.MountInfo != nil && result.stub.PluginID != "" {
|
|
|
|
|
|
|
|
// make sure the plugin is ready or becomes so quickly.
|
|
|
|
plugin := result.stub.PluginID
|
|
|
|
pType := dynamicplugins.PluginTypeCSINode
|
|
|
|
if err := c.csimanager.WaitForPlugin(c.shutdownCtx, pType, plugin); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
c.logger.Debug("found CSI plugin", "type", pType, "name", plugin)
|
|
|
|
|
2023-09-08 22:10:25 +00:00
|
|
|
manager, err := c.csimanager.ManagerForPlugin(c.shutdownCtx, plugin)
|
2023-07-10 17:20:15 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2023-09-08 22:10:25 +00:00
|
|
|
isMounted, err := manager.HasMount(c.shutdownCtx, result.stub.MountInfo)
|
2023-07-10 17:20:15 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if !isMounted {
|
|
|
|
// the mount is gone, so clear this from our result state so it
|
|
|
|
// we can try to remount it with the plugin ID we have
|
|
|
|
result.stub.MountInfo = nil
|
|
|
|
}
|
2020-02-17 12:57:25 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-07-10 17:20:15 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// claimVolumes sends a claim to the server for each volume to mark it in use
|
|
|
|
// and kick off the controller publish workflow (optionally)
|
|
|
|
func (c *csiHook) claimVolumes(results map[string]*volumePublishResult) error {
|
|
|
|
|
|
|
|
for _, result := range results {
|
|
|
|
if result.stub.MountInfo != nil {
|
|
|
|
continue // already mounted
|
2020-02-17 12:57:25 +00:00
|
|
|
}
|
|
|
|
|
2023-07-10 17:20:15 +00:00
|
|
|
request := result.request
|
|
|
|
|
|
|
|
claimType := structs.CSIVolumeClaimWrite
|
|
|
|
if request.ReadOnly {
|
|
|
|
claimType = structs.CSIVolumeClaimRead
|
2021-03-18 19:35:11 +00:00
|
|
|
}
|
|
|
|
|
2020-02-17 12:57:25 +00:00
|
|
|
req := &structs.CSIVolumeClaimRequest{
|
2023-07-10 17:20:15 +00:00
|
|
|
VolumeID: result.stub.VolumeID,
|
2021-04-02 18:36:13 +00:00
|
|
|
AllocationID: c.alloc.ID,
|
|
|
|
NodeID: c.alloc.NodeID,
|
2023-07-10 17:20:15 +00:00
|
|
|
ExternalNodeID: result.stub.ExternalNodeID,
|
2021-04-02 18:36:13 +00:00
|
|
|
Claim: claimType,
|
2023-07-10 17:20:15 +00:00
|
|
|
AccessMode: request.AccessMode,
|
|
|
|
AttachmentMode: request.AttachmentMode,
|
2020-08-11 17:08:39 +00:00
|
|
|
WriteRequest: structs.WriteRequest{
|
|
|
|
Region: c.alloc.Job.Region,
|
|
|
|
Namespace: c.alloc.Job.Namespace,
|
2022-01-07 20:23:47 +00:00
|
|
|
AuthToken: c.nodeSecret,
|
2020-08-11 17:08:39 +00:00
|
|
|
},
|
2020-02-17 12:57:25 +00:00
|
|
|
}
|
|
|
|
|
2022-02-24 15:39:07 +00:00
|
|
|
resp, err := c.claimWithRetry(req)
|
|
|
|
if err != nil {
|
2023-07-10 17:20:15 +00:00
|
|
|
return fmt.Errorf("could not claim volume %s: %w", req.VolumeID, err)
|
2020-02-17 12:57:25 +00:00
|
|
|
}
|
|
|
|
if resp.Volume == nil {
|
2023-07-10 17:20:15 +00:00
|
|
|
return fmt.Errorf("Unexpected nil volume returned for ID: %v", request.Source)
|
|
|
|
}
|
|
|
|
|
|
|
|
result.volume = resp.Volume
|
|
|
|
|
|
|
|
// populate data we'll write later to disk
|
|
|
|
result.stub.VolumeID = resp.Volume.ID
|
|
|
|
result.stub.VolumeExternalID = resp.Volume.RemoteID()
|
|
|
|
result.stub.PluginID = resp.Volume.PluginID
|
|
|
|
result.publishContext = resp.PublishContext
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *csiHook) mountVolumes(results map[string]*volumePublishResult) error {
|
|
|
|
|
|
|
|
for _, result := range results {
|
|
|
|
if result.stub.MountInfo != nil {
|
|
|
|
continue // already mounted
|
|
|
|
}
|
|
|
|
if result.volume == nil {
|
|
|
|
return fmt.Errorf("volume not available from claim for mounting volume request %q",
|
|
|
|
result.request.Name) // should be unreachable
|
|
|
|
}
|
|
|
|
|
|
|
|
// make sure the plugin is ready or becomes so quickly.
|
|
|
|
plugin := result.volume.PluginID
|
|
|
|
pType := dynamicplugins.PluginTypeCSINode
|
|
|
|
if err := c.csimanager.WaitForPlugin(c.shutdownCtx, pType, plugin); err != nil {
|
|
|
|
return err
|
2020-02-17 12:57:25 +00:00
|
|
|
}
|
2023-07-10 17:20:15 +00:00
|
|
|
c.logger.Debug("found CSI plugin", "type", pType, "name", plugin)
|
2020-02-17 12:57:25 +00:00
|
|
|
|
2023-09-08 22:10:25 +00:00
|
|
|
manager, err := c.csimanager.ManagerForPlugin(c.shutdownCtx, plugin)
|
2023-07-10 17:20:15 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
usageOpts := &csimanager.UsageOptions{
|
|
|
|
ReadOnly: result.request.ReadOnly,
|
|
|
|
AttachmentMode: result.request.AttachmentMode,
|
|
|
|
AccessMode: result.request.AccessMode,
|
|
|
|
MountOptions: result.request.MountOptions,
|
|
|
|
}
|
|
|
|
|
2023-09-08 22:10:25 +00:00
|
|
|
mountInfo, err := manager.MountVolume(
|
2023-07-10 17:20:15 +00:00
|
|
|
c.shutdownCtx, result.volume, c.alloc, usageOpts, result.publishContext)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
result.stub.MountInfo = mountInfo
|
2020-02-17 12:57:25 +00:00
|
|
|
}
|
|
|
|
|
2023-07-10 17:20:15 +00:00
|
|
|
return nil
|
2020-02-17 11:10:12 +00:00
|
|
|
}
|
|
|
|
|
2022-02-24 15:39:07 +00:00
|
|
|
// claimWithRetry tries to claim the volume on the server, retrying
|
|
|
|
// with exponential backoff capped to a maximum interval
|
|
|
|
func (c *csiHook) claimWithRetry(req *structs.CSIVolumeClaimRequest) (*structs.CSIVolumeClaimResponse, error) {
|
|
|
|
|
2022-04-05 17:05:10 +00:00
|
|
|
ctx, cancel := context.WithTimeout(c.shutdownCtx, c.maxBackoffDuration)
|
2022-02-24 15:39:07 +00:00
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
var resp structs.CSIVolumeClaimResponse
|
|
|
|
var err error
|
|
|
|
backoff := c.minBackoffInterval
|
|
|
|
t, stop := helper.NewSafeTimer(0)
|
|
|
|
defer stop()
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return nil, err
|
|
|
|
case <-t.C:
|
|
|
|
}
|
|
|
|
|
|
|
|
err = c.rpcClient.RPC("CSIVolume.Claim", req, &resp)
|
|
|
|
if err == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
if !isRetryableClaimRPCError(err) {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
if backoff < c.maxBackoffInterval {
|
|
|
|
backoff = backoff * 2
|
|
|
|
if backoff > c.maxBackoffInterval {
|
|
|
|
backoff = c.maxBackoffInterval
|
|
|
|
}
|
|
|
|
}
|
|
|
|
c.logger.Debug(
|
2022-03-29 13:44:00 +00:00
|
|
|
"volume could not be claimed because it is in use", "retry_in", backoff)
|
2022-02-24 15:39:07 +00:00
|
|
|
t.Reset(backoff)
|
|
|
|
}
|
|
|
|
return &resp, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// isRetryableClaimRPCError looks for errors where we need to retry
|
|
|
|
// with backoff because we expect them to be eventually resolved.
|
|
|
|
func isRetryableClaimRPCError(err error) bool {
|
|
|
|
|
|
|
|
// note: because these errors are returned via RPC which breaks error
|
|
|
|
// wrapping, we can't check with errors.Is and need to read the string
|
|
|
|
errMsg := err.Error()
|
|
|
|
if strings.Contains(errMsg, structs.ErrCSIVolumeMaxClaims.Error()) {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if strings.Contains(errMsg, structs.ErrCSIClientRPCRetryable.Error()) {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if strings.Contains(errMsg, "no servers") {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if strings.Contains(errMsg, structs.ErrNoLeader.Error()) {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2021-12-20 10:44:21 +00:00
|
|
|
func (c *csiHook) shouldRun() bool {
|
|
|
|
tg := c.alloc.Job.LookupTaskGroup(c.alloc.TaskGroup)
|
2020-01-08 12:47:07 +00:00
|
|
|
for _, vol := range tg.Volumes {
|
|
|
|
if vol.Type == structs.VolumeTypeCSI {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
|
|
|
}
|
2022-01-28 13:30:31 +00:00
|
|
|
|
2023-07-10 17:20:15 +00:00
|
|
|
func (c *csiHook) unpublish(result *volumePublishResult) error {
|
2022-01-28 13:30:31 +00:00
|
|
|
|
|
|
|
mode := structs.CSIVolumeClaimRead
|
2023-07-10 17:20:15 +00:00
|
|
|
if !result.request.ReadOnly {
|
2022-01-28 13:30:31 +00:00
|
|
|
mode = structs.CSIVolumeClaimWrite
|
|
|
|
}
|
|
|
|
|
2023-07-10 17:20:15 +00:00
|
|
|
source := result.request.Source
|
|
|
|
if result.request.PerAlloc {
|
2022-01-28 13:30:31 +00:00
|
|
|
// NOTE: PerAlloc can't be set if we have canaries
|
|
|
|
source = source + structs.AllocSuffix(c.alloc.Name)
|
|
|
|
}
|
|
|
|
|
|
|
|
req := &structs.CSIVolumeUnpublishRequest{
|
|
|
|
VolumeID: source,
|
|
|
|
Claim: &structs.CSIVolumeClaim{
|
|
|
|
AllocationID: c.alloc.ID,
|
|
|
|
NodeID: c.alloc.NodeID,
|
|
|
|
Mode: mode,
|
|
|
|
State: structs.CSIVolumeClaimStateUnpublishing,
|
|
|
|
},
|
|
|
|
WriteRequest: structs.WriteRequest{
|
|
|
|
Region: c.alloc.Job.Region,
|
|
|
|
Namespace: c.alloc.Job.Namespace,
|
|
|
|
AuthToken: c.nodeSecret,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
return c.rpcClient.RPC("CSIVolume.Unpublish",
|
|
|
|
req, &structs.CSIVolumeUnpublishResponse{})
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
// unmountWithRetry tries to unmount/unstage the volume, retrying with
|
|
|
|
// exponential backoff capped to a maximum interval
|
2023-07-10 17:20:15 +00:00
|
|
|
func (c *csiHook) unmountWithRetry(result *volumePublishResult) error {
|
2022-01-28 13:30:31 +00:00
|
|
|
|
2022-04-05 17:05:10 +00:00
|
|
|
ctx, cancel := context.WithTimeout(c.shutdownCtx, c.maxBackoffDuration)
|
2022-01-28 13:30:31 +00:00
|
|
|
defer cancel()
|
|
|
|
var err error
|
2022-02-24 15:39:07 +00:00
|
|
|
backoff := c.minBackoffInterval
|
2022-02-22 18:43:06 +00:00
|
|
|
t, stop := helper.NewSafeTimer(0)
|
|
|
|
defer stop()
|
2022-01-28 13:30:31 +00:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return err
|
2022-02-22 18:43:06 +00:00
|
|
|
case <-t.C:
|
2022-01-28 13:30:31 +00:00
|
|
|
}
|
|
|
|
|
2023-07-10 17:20:15 +00:00
|
|
|
err = c.unmountImpl(result)
|
2022-01-28 13:30:31 +00:00
|
|
|
if err == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
if backoff < c.maxBackoffInterval {
|
|
|
|
backoff = backoff * 2
|
|
|
|
if backoff > c.maxBackoffInterval {
|
|
|
|
backoff = c.maxBackoffInterval
|
|
|
|
}
|
|
|
|
}
|
2022-03-29 13:44:00 +00:00
|
|
|
c.logger.Debug("volume could not be unmounted", "retry_in", backoff)
|
2022-02-22 18:43:06 +00:00
|
|
|
t.Reset(backoff)
|
2022-01-28 13:30:31 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// unmountImpl implements the call to the CSI plugin manager to
|
|
|
|
// unmount the volume. Each retry will write an "Unmount volume"
|
|
|
|
// NodeEvent
|
2023-07-10 17:20:15 +00:00
|
|
|
func (c *csiHook) unmountImpl(result *volumePublishResult) error {
|
2022-01-28 13:30:31 +00:00
|
|
|
|
2023-09-08 22:10:25 +00:00
|
|
|
manager, err := c.csimanager.ManagerForPlugin(c.shutdownCtx, result.stub.PluginID)
|
2022-01-28 13:30:31 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
usageOpts := &csimanager.UsageOptions{
|
2023-07-10 17:20:15 +00:00
|
|
|
ReadOnly: result.request.ReadOnly,
|
|
|
|
AttachmentMode: result.request.AttachmentMode,
|
|
|
|
AccessMode: result.request.AccessMode,
|
|
|
|
MountOptions: result.request.MountOptions,
|
2022-01-28 13:30:31 +00:00
|
|
|
}
|
|
|
|
|
2023-09-08 22:10:25 +00:00
|
|
|
return manager.UnmountVolume(c.shutdownCtx,
|
2023-07-10 17:20:15 +00:00
|
|
|
result.stub.VolumeID, result.stub.VolumeExternalID, c.alloc.ID, usageOpts)
|
2022-01-28 13:30:31 +00:00
|
|
|
}
|
2022-04-05 17:05:10 +00:00
|
|
|
|
|
|
|
// Shutdown will get called when the client is gracefully
|
|
|
|
// stopping. Cancel our shutdown context so that we don't block client
|
|
|
|
// shutdown while in the CSI RPC retry loop.
|
|
|
|
func (c *csiHook) Shutdown() {
|
|
|
|
c.logger.Trace("shutting down hook")
|
|
|
|
c.shutdownCancelFn()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Destroy will get called when an allocation gets GC'd on the client
|
|
|
|
// or when a -dev mode client is stopped. Cancel our shutdown context
|
|
|
|
// so that we don't block client shutdown while in the CSI RPC retry
|
|
|
|
// loop.
|
|
|
|
func (c *csiHook) Destroy() {
|
|
|
|
c.logger.Trace("destroying hook")
|
|
|
|
c.shutdownCancelFn()
|
|
|
|
}
|