CSI: allow updates to volumes on re-registration (#12167)
CSI `CreateVolume` RPC is idempotent given that the topology, capabilities, and parameters are unchanged. CSI volumes have many user-defined fields that are immutable once set, and many fields that are not user-settable. Update the `Register` RPC so that updating a volume via the API merges onto any existing volume without touching Nomad-controlled fields, while validating it with the same strict requirements expected for idempotent `CreateVolume` RPCs. Also, clarify that this state store method is used for everything, not just for the `Register` RPC.
This commit is contained in:
parent
09a7612150
commit
2dafe46fe3
|
@ -0,0 +1,3 @@
|
|||
```release-note:improvement
|
||||
csi: Allow volumes to be re-registered to be updated while not in use
|
||||
```
|
|
@ -216,6 +216,10 @@ func (c *CSI) ControllerCreateVolume(req *structs.ClientCSIControllerCreateVolum
|
|||
resp.CapacityBytes = cresp.Volume.CapacityBytes
|
||||
resp.VolumeContext = cresp.Volume.VolumeContext
|
||||
|
||||
// Note: we safely throw away cresp.Volume.ContentSource here
|
||||
// because it's just round-tripping the value set by the user in
|
||||
// the server RPC call
|
||||
|
||||
resp.Topologies = make([]*nstructs.CSITopology, len(cresp.Volume.AccessibleTopology))
|
||||
for _, topo := range cresp.Volume.AccessibleTopology {
|
||||
resp.Topologies = append(resp.Topologies,
|
||||
|
|
|
@ -479,7 +479,7 @@ func TestAllocStatusCommand_CSIVolumes(t *testing.T) {
|
|||
Segments: map[string]string{"foo": "bar"},
|
||||
}},
|
||||
}}
|
||||
err = state.CSIVolumeRegister(1002, vols)
|
||||
err = state.UpsertCSIVolume(1002, vols)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Upsert the job and alloc
|
||||
|
|
|
@ -24,6 +24,7 @@ func (c *VolumeRegisterCommand) csiRegister(client *api.Client, ast *ast.File) i
|
|||
return 1
|
||||
}
|
||||
|
||||
c.Ui.Output(fmt.Sprintf("Volume %q registered", vol.ID))
|
||||
return 0
|
||||
}
|
||||
|
||||
|
|
|
@ -46,7 +46,7 @@ func TestCSIVolumeStatusCommand_AutocompleteArgs(t *testing.T) {
|
|||
PluginID: "glade",
|
||||
}
|
||||
|
||||
require.NoError(t, state.CSIVolumeRegister(1000, []*structs.CSIVolume{vol}))
|
||||
require.NoError(t, state.UpsertCSIVolume(1000, []*structs.CSIVolume{vol}))
|
||||
|
||||
prefix := vol.ID[:len(vol.ID)-5]
|
||||
args := complete.Args{Last: prefix}
|
||||
|
|
|
@ -240,6 +240,7 @@ func (v *CSIVolume) pluginValidateVolume(req *structs.CSIVolumeRegisterRequest,
|
|||
|
||||
vol.Provider = plugin.Provider
|
||||
vol.ProviderVersion = plugin.Version
|
||||
|
||||
return plugin, nil
|
||||
}
|
||||
|
||||
|
@ -265,7 +266,15 @@ func (v *CSIVolume) controllerValidateVolume(req *structs.CSIVolumeRegisterReque
|
|||
return v.srv.RPC(method, cReq, cResp)
|
||||
}
|
||||
|
||||
// Register registers a new volume
|
||||
// Register registers a new volume or updates an existing volume. Note
|
||||
// that most user-defined CSIVolume fields are immutable once the
|
||||
// volume has been created.
|
||||
//
|
||||
// If the user needs to change fields because they've misconfigured
|
||||
// the registration of the external volume, we expect that claims
|
||||
// won't work either, and the user can deregister the volume and try
|
||||
// again with the right settings. This lets us be as strict with
|
||||
// validation here as the CreateVolume CSI RPC is expected to be.
|
||||
func (v *CSIVolume) Register(args *structs.CSIVolumeRegisterRequest, reply *structs.CSIVolumeRegisterResponse) error {
|
||||
if done, err := v.srv.forward("CSIVolume.Register", args, args, reply); done {
|
||||
return err
|
||||
|
@ -291,11 +300,50 @@ func (v *CSIVolume) Register(args *structs.CSIVolumeRegisterRequest, reply *stru
|
|||
// We also validate that the plugin exists for each plugin, and validate the
|
||||
// capabilities when the plugin has a controller.
|
||||
for _, vol := range args.Volumes {
|
||||
vol.Namespace = args.RequestNamespace()
|
||||
|
||||
snap, err := v.srv.State().Snapshot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// TODO: allow volume spec file to set namespace
|
||||
// https://github.com/hashicorp/nomad/issues/11196
|
||||
if vol.Namespace == "" {
|
||||
vol.Namespace = args.RequestNamespace()
|
||||
}
|
||||
if err = vol.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ws := memdb.NewWatchSet()
|
||||
existingVol, err := snap.CSIVolumeByID(ws, vol.Namespace, vol.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// CSIVolume has many user-defined fields which are immutable
|
||||
// once set, and many fields that are controlled by Nomad and
|
||||
// are not user-settable. We merge onto a copy of the existing
|
||||
// volume to allow a user to submit a volume spec for `volume
|
||||
// create` and reuse it for updates in `volume register`
|
||||
// without having to manually remove the fields unused by
|
||||
// register (and similar use cases with API consumers such as
|
||||
// Terraform).
|
||||
if existingVol != nil {
|
||||
existingVol = existingVol.Copy()
|
||||
err = existingVol.Merge(vol)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*vol = *existingVol
|
||||
} else if vol.Topologies == nil || len(vol.Topologies) == 0 {
|
||||
// The topologies for the volume have already been set
|
||||
// when it was created, so for newly register volumes
|
||||
// we accept the user's description of that topology
|
||||
if vol.RequestedTopologies != nil {
|
||||
vol.Topologies = vol.RequestedTopologies.Required
|
||||
}
|
||||
}
|
||||
|
||||
plugin, err := v.pluginValidateVolume(args, vol)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -303,14 +351,6 @@ func (v *CSIVolume) Register(args *structs.CSIVolumeRegisterRequest, reply *stru
|
|||
if err := v.controllerValidateVolume(args, vol, plugin); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// The topologies for the volume have already been set when it was
|
||||
// created, so we accept the user's description of that topology
|
||||
if vol.Topologies == nil || len(vol.Topologies) == 0 {
|
||||
if vol.RequestedTopologies != nil {
|
||||
vol.Topologies = vol.RequestedTopologies.Required
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resp, index, err := v.srv.raftApply(structs.CSIVolumeRegisterRequestType, args)
|
||||
|
|
|
@ -45,7 +45,7 @@ func TestCSIVolumeEndpoint_Get(t *testing.T) {
|
|||
AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem,
|
||||
}},
|
||||
}}
|
||||
err := state.CSIVolumeRegister(999, vols)
|
||||
err := state.UpsertCSIVolume(999, vols)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create the register request
|
||||
|
@ -95,7 +95,7 @@ func TestCSIVolumeEndpoint_Get_ACL(t *testing.T) {
|
|||
AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem,
|
||||
}},
|
||||
}}
|
||||
err := state.CSIVolumeRegister(999, vols)
|
||||
err := state.UpsertCSIVolume(999, vols)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create the register request
|
||||
|
@ -145,7 +145,6 @@ func TestCSIVolumeEndpoint_Register(t *testing.T) {
|
|||
// Create the volume
|
||||
vols := []*structs.CSIVolume{{
|
||||
ID: id0,
|
||||
Namespace: "notTheNamespace",
|
||||
PluginID: "minnie",
|
||||
AccessMode: structs.CSIVolumeAccessModeSingleNodeReader, // legacy field ignored
|
||||
AttachmentMode: structs.CSIVolumeAttachmentModeBlockDevice, // legacy field ignored
|
||||
|
@ -286,7 +285,7 @@ func TestCSIVolumeEndpoint_Claim(t *testing.T) {
|
|||
}},
|
||||
}}
|
||||
index++
|
||||
err = state.CSIVolumeRegister(index, vols)
|
||||
err = state.UpsertCSIVolume(index, vols)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify that the volume exists, and is healthy
|
||||
|
@ -425,7 +424,7 @@ func TestCSIVolumeEndpoint_ClaimWithController(t *testing.T) {
|
|||
AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem,
|
||||
}},
|
||||
}}
|
||||
err = state.CSIVolumeRegister(1003, vols)
|
||||
err = state.UpsertCSIVolume(1003, vols)
|
||||
require.NoError(t, err)
|
||||
|
||||
alloc := mock.BatchAlloc()
|
||||
|
@ -535,7 +534,7 @@ func TestCSIVolumeEndpoint_Unpublish(t *testing.T) {
|
|||
}
|
||||
|
||||
index++
|
||||
err = state.CSIVolumeRegister(index, []*structs.CSIVolume{vol})
|
||||
err = state.UpsertCSIVolume(index, []*structs.CSIVolume{vol})
|
||||
require.NoError(t, err)
|
||||
|
||||
// setup: create an alloc that will claim our volume
|
||||
|
@ -642,7 +641,7 @@ func TestCSIVolumeEndpoint_List(t *testing.T) {
|
|||
AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem,
|
||||
}},
|
||||
}}
|
||||
err = state.CSIVolumeRegister(1002, vols)
|
||||
err = state.UpsertCSIVolume(1002, vols)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Query everything in the namespace
|
||||
|
@ -721,7 +720,7 @@ func TestCSIVolumeEndpoint_ListAllNamespaces(t *testing.T) {
|
|||
}},
|
||||
},
|
||||
}
|
||||
err = state.CSIVolumeRegister(1001, vols)
|
||||
err = state.UpsertCSIVolume(1001, vols)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Lookup volumes in all namespaces
|
||||
|
@ -972,7 +971,7 @@ func TestCSIVolumeEndpoint_Delete(t *testing.T) {
|
|||
Secrets: structs.CSISecrets{"mysecret": "secretvalue"},
|
||||
}}
|
||||
index++
|
||||
err = state.CSIVolumeRegister(index, vols)
|
||||
err = state.UpsertCSIVolume(index, vols)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Delete volumes
|
||||
|
@ -1191,7 +1190,7 @@ func TestCSIVolumeEndpoint_CreateSnapshot(t *testing.T) {
|
|||
ExternalID: "vol-12345",
|
||||
}}
|
||||
index++
|
||||
require.NoError(t, state.CSIVolumeRegister(index, vols))
|
||||
require.NoError(t, state.UpsertCSIVolume(index, vols))
|
||||
|
||||
// Create the snapshot request
|
||||
req1 := &structs.CSISnapshotCreateRequest{
|
||||
|
@ -1665,7 +1664,7 @@ func TestCSI_RPCVolumeAndPluginLookup(t *testing.T) {
|
|||
ControllerRequired: false,
|
||||
},
|
||||
}
|
||||
err = state.CSIVolumeRegister(1002, vols)
|
||||
err = state.UpsertCSIVolume(1002, vols)
|
||||
require.NoError(t, err)
|
||||
|
||||
// has controller
|
||||
|
|
|
@ -1260,7 +1260,7 @@ func (n *nomadFSM) applyCSIVolumeRegister(buf []byte, index uint64) interface{}
|
|||
}
|
||||
defer metrics.MeasureSince([]string{"nomad", "fsm", "apply_csi_volume_register"}, time.Now())
|
||||
|
||||
if err := n.state.CSIVolumeRegister(index, req.Volumes); err != nil {
|
||||
if err := n.state.UpsertCSIVolume(index, req.Volumes); err != nil {
|
||||
n.logger.Error("CSIVolumeRegister failed", "error", err)
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -728,7 +728,7 @@ func TestSearch_PrefixSearch_CSIVolume(t *testing.T) {
|
|||
testutil.WaitForLeader(t, s.RPC)
|
||||
|
||||
id := uuid.Generate()
|
||||
err := s.fsm.State().CSIVolumeRegister(1000, []*structs.CSIVolume{{
|
||||
err := s.fsm.State().UpsertCSIVolume(1000, []*structs.CSIVolume{{
|
||||
ID: id,
|
||||
Namespace: structs.DefaultNamespace,
|
||||
PluginID: "glade",
|
||||
|
@ -1348,7 +1348,7 @@ func TestSearch_FuzzySearch_CSIVolume(t *testing.T) {
|
|||
testutil.WaitForLeader(t, s.RPC)
|
||||
|
||||
id := uuid.Generate()
|
||||
err := s.fsm.State().CSIVolumeRegister(1000, []*structs.CSIVolume{{
|
||||
err := s.fsm.State().UpsertCSIVolume(1000, []*structs.CSIVolume{{
|
||||
ID: id,
|
||||
Namespace: structs.DefaultNamespace,
|
||||
PluginID: "glade",
|
||||
|
|
|
@ -2143,8 +2143,8 @@ func (s *StateStore) JobSummaryByPrefix(ws memdb.WatchSet, namespace, id string)
|
|||
return iter, nil
|
||||
}
|
||||
|
||||
// CSIVolumeRegister adds a volume to the server store, failing if it already exists
|
||||
func (s *StateStore) CSIVolumeRegister(index uint64, volumes []*structs.CSIVolume) error {
|
||||
// UpsertCSIVolume inserts a volume in the state store.
|
||||
func (s *StateStore) UpsertCSIVolume(index uint64, volumes []*structs.CSIVolume) error {
|
||||
txn := s.db.WriteTxn(index)
|
||||
defer txn.Abort()
|
||||
|
||||
|
@ -2155,7 +2155,6 @@ func (s *StateStore) CSIVolumeRegister(index uint64, volumes []*structs.CSIVolum
|
|||
return fmt.Errorf("volume %s is in nonexistent namespace %s", v.ID, v.Namespace)
|
||||
}
|
||||
|
||||
// Check for volume existence
|
||||
obj, err := txn.First("csi_volumes", "id", v.Namespace, v.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("volume existence check error: %v", err)
|
||||
|
@ -2164,17 +2163,20 @@ func (s *StateStore) CSIVolumeRegister(index uint64, volumes []*structs.CSIVolum
|
|||
// Allow some properties of a volume to be updated in place, but
|
||||
// prevent accidentally overwriting important properties, or
|
||||
// overwriting a volume in use
|
||||
old, ok := obj.(*structs.CSIVolume)
|
||||
if ok &&
|
||||
old.InUse() ||
|
||||
old.ExternalID != v.ExternalID ||
|
||||
old := obj.(*structs.CSIVolume)
|
||||
if old.ExternalID != v.ExternalID ||
|
||||
old.PluginID != v.PluginID ||
|
||||
old.Provider != v.Provider {
|
||||
return fmt.Errorf("volume exists: %s", v.ID)
|
||||
return fmt.Errorf("volume identity cannot be updated: %s", v.ID)
|
||||
}
|
||||
s.CSIVolumeDenormalize(nil, old.Copy())
|
||||
if old.InUse() {
|
||||
return fmt.Errorf("volume cannot be updated while in use")
|
||||
}
|
||||
}
|
||||
|
||||
if v.CreateIndex == 0 {
|
||||
v.CreateIndex = old.CreateIndex
|
||||
v.ModifyIndex = index
|
||||
} else {
|
||||
v.CreateIndex = index
|
||||
v.ModifyIndex = index
|
||||
}
|
||||
|
|
|
@ -2684,6 +2684,10 @@ func TestStateStore_CSIVolume(t *testing.T) {
|
|||
v0.Schedulable = true
|
||||
v0.AccessMode = structs.CSIVolumeAccessModeMultiNodeSingleWriter
|
||||
v0.AttachmentMode = structs.CSIVolumeAttachmentModeFilesystem
|
||||
v0.RequestedCapabilities = []*structs.CSIVolumeCapability{{
|
||||
AccessMode: structs.CSIVolumeAccessModeMultiNodeSingleWriter,
|
||||
AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem,
|
||||
}}
|
||||
|
||||
index++
|
||||
v1 := structs.NewCSIVolume("foo", index)
|
||||
|
@ -2693,20 +2697,24 @@ func TestStateStore_CSIVolume(t *testing.T) {
|
|||
v1.Schedulable = true
|
||||
v1.AccessMode = structs.CSIVolumeAccessModeMultiNodeSingleWriter
|
||||
v1.AttachmentMode = structs.CSIVolumeAttachmentModeFilesystem
|
||||
v1.RequestedCapabilities = []*structs.CSIVolumeCapability{{
|
||||
AccessMode: structs.CSIVolumeAccessModeMultiNodeSingleWriter,
|
||||
AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem,
|
||||
}}
|
||||
|
||||
index++
|
||||
err = state.CSIVolumeRegister(index, []*structs.CSIVolume{v0, v1})
|
||||
err = state.UpsertCSIVolume(index, []*structs.CSIVolume{v0, v1})
|
||||
require.NoError(t, err)
|
||||
|
||||
// volume registration is idempotent, unless identies are changed
|
||||
index++
|
||||
err = state.CSIVolumeRegister(index, []*structs.CSIVolume{v0, v1})
|
||||
err = state.UpsertCSIVolume(index, []*structs.CSIVolume{v0, v1})
|
||||
require.NoError(t, err)
|
||||
|
||||
index++
|
||||
v2 := v0.Copy()
|
||||
v2.PluginID = "new-id"
|
||||
err = state.CSIVolumeRegister(index, []*structs.CSIVolume{v2})
|
||||
err = state.UpsertCSIVolume(index, []*structs.CSIVolume{v2})
|
||||
require.Error(t, err, fmt.Sprintf("volume exists: %s", v0.ID))
|
||||
|
||||
ws := memdb.NewWatchSet()
|
||||
|
@ -2786,7 +2794,7 @@ func TestStateStore_CSIVolume(t *testing.T) {
|
|||
|
||||
// registration is an error when the volume is in use
|
||||
index++
|
||||
err = state.CSIVolumeRegister(index, []*structs.CSIVolume{v0})
|
||||
err = state.UpsertCSIVolume(index, []*structs.CSIVolume{v0})
|
||||
require.Error(t, err, "volume re-registered while in use")
|
||||
// as is deregistration
|
||||
index++
|
||||
|
@ -3126,7 +3134,7 @@ func TestStateStore_CSIPlugin_Lifecycle(t *testing.T) {
|
|||
Namespace: structs.DefaultNamespace,
|
||||
PluginID: plugID,
|
||||
}
|
||||
err = store.CSIVolumeRegister(nextIndex(store), []*structs.CSIVolume{vol})
|
||||
err = store.UpsertCSIVolume(nextIndex(store), []*structs.CSIVolume{vol})
|
||||
require.NoError(t, err)
|
||||
|
||||
err = store.DeleteJob(nextIndex(store), structs.DefaultNamespace, controllerJobID)
|
||||
|
|
|
@ -305,7 +305,7 @@ func TestBadCSIState(t testing.TB, store *StateStore) error {
|
|||
}
|
||||
vol = vol.Copy() // canonicalize
|
||||
|
||||
err = store.CSIVolumeRegister(index, []*structs.CSIVolume{vol})
|
||||
err = store.UpsertCSIVolume(index, []*structs.CSIVolume{vol})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package structs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
@ -180,6 +181,22 @@ func (o *CSIMountOptions) Merge(p *CSIMountOptions) {
|
|||
}
|
||||
}
|
||||
|
||||
func (o *CSIMountOptions) Equal(p *CSIMountOptions) bool {
|
||||
if o == nil && p == nil {
|
||||
return true
|
||||
}
|
||||
if o == nil || p == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if o.FSType != p.FSType {
|
||||
return false
|
||||
}
|
||||
|
||||
return helper.CompareSliceSetString(
|
||||
o.MountFlags, p.MountFlags)
|
||||
}
|
||||
|
||||
// CSIMountOptions implements the Stringer and GoStringer interfaces to prevent
|
||||
// accidental leakage of sensitive mount flags via logs.
|
||||
var _ fmt.Stringer = &CSIMountOptions{}
|
||||
|
@ -707,6 +724,103 @@ func (v *CSIVolume) Validate() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Merge updates the mutable fields of a volume with those from
|
||||
// another volume. CSIVolume has many user-defined fields which are
|
||||
// immutable once set, and many fields that are not
|
||||
// user-settable. Merge will return an error if we try to mutate the
|
||||
// user-defined immutable fields after they're set, but silently
|
||||
// ignore fields that are controlled by Nomad.
|
||||
func (v *CSIVolume) Merge(other *CSIVolume) error {
|
||||
if other == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var errs *multierror.Error
|
||||
|
||||
if v.Name != other.Name && other.Name != "" {
|
||||
errs = multierror.Append(errs, errors.New("volume name cannot be updated"))
|
||||
}
|
||||
if v.ExternalID != other.ExternalID && other.ExternalID != "" {
|
||||
errs = multierror.Append(errs, errors.New(
|
||||
"volume external ID cannot be updated"))
|
||||
}
|
||||
if v.PluginID != other.PluginID {
|
||||
errs = multierror.Append(errs, errors.New(
|
||||
"volume plugin ID cannot be updated"))
|
||||
}
|
||||
if v.CloneID != other.CloneID && other.CloneID != "" {
|
||||
errs = multierror.Append(errs, errors.New(
|
||||
"volume clone ID cannot be updated"))
|
||||
}
|
||||
if v.SnapshotID != other.SnapshotID && other.SnapshotID != "" {
|
||||
errs = multierror.Append(errs, errors.New(
|
||||
"volume snapshot ID cannot be updated"))
|
||||
}
|
||||
|
||||
// must be compatible with capacity range
|
||||
// TODO: when ExpandVolume is implemented we'll need to update
|
||||
// this logic https://github.com/hashicorp/nomad/issues/10324
|
||||
if v.Capacity != 0 {
|
||||
if other.RequestedCapacityMax < v.Capacity ||
|
||||
other.RequestedCapacityMin > v.Capacity {
|
||||
errs = multierror.Append(errs, errors.New(
|
||||
"volume requested capacity update was not compatible with existing capacity"))
|
||||
} else {
|
||||
v.RequestedCapacityMin = other.RequestedCapacityMin
|
||||
v.RequestedCapacityMax = other.RequestedCapacityMax
|
||||
}
|
||||
}
|
||||
|
||||
// must be compatible with volume_capabilities
|
||||
if v.AccessMode != CSIVolumeAccessModeUnknown ||
|
||||
v.AttachmentMode != CSIVolumeAttachmentModeUnknown {
|
||||
var ok bool
|
||||
for _, cap := range other.RequestedCapabilities {
|
||||
if cap.AccessMode == v.AccessMode &&
|
||||
cap.AttachmentMode == v.AttachmentMode {
|
||||
ok = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if ok {
|
||||
v.RequestedCapabilities = other.RequestedCapabilities
|
||||
} else {
|
||||
errs = multierror.Append(errs, errors.New(
|
||||
"volume requested capabilities update was not compatible with existing capability in use"))
|
||||
}
|
||||
} else {
|
||||
v.RequestedCapabilities = other.RequestedCapabilities
|
||||
}
|
||||
|
||||
// topologies are immutable, so topology request changes must be
|
||||
// compatible with the existing topology, if any
|
||||
if len(v.Topologies) > 0 {
|
||||
if !v.RequestedTopologies.Equal(other.RequestedTopologies) {
|
||||
errs = multierror.Append(errs, errors.New(
|
||||
"volume topology request update was not compatible with existing topology"))
|
||||
}
|
||||
}
|
||||
|
||||
// MountOptions can be updated so long as the volume isn't in use,
|
||||
// but the caller will reject updating an in-use volume
|
||||
v.MountOptions = other.MountOptions
|
||||
|
||||
// Secrets can be updated freely
|
||||
v.Secrets = other.Secrets
|
||||
|
||||
// must be compatible with parameters set by from CreateVolumeResponse
|
||||
|
||||
if len(other.Parameters) != 0 && !helper.CompareMapStringString(v.Parameters, other.Parameters) {
|
||||
errs = multierror.Append(errs, errors.New(
|
||||
"volume parameters cannot be updated"))
|
||||
}
|
||||
|
||||
// Context is mutable and will be used during controller
|
||||
// validation
|
||||
v.Context = other.Context
|
||||
return errs.ErrorOrNil()
|
||||
}
|
||||
|
||||
// Request and response wrappers
|
||||
type CSIVolumeRegisterRequest struct {
|
||||
Volumes []*CSIVolume
|
||||
|
|
|
@ -569,6 +569,175 @@ func TestCSIVolume_Validate(t *testing.T) {
|
|||
|
||||
}
|
||||
|
||||
func TestCSIVolume_Merge(t *testing.T) {
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
v *CSIVolume
|
||||
update *CSIVolume
|
||||
expected string
|
||||
expectFn func(t *testing.T, v *CSIVolume)
|
||||
}{
|
||||
{
|
||||
name: "invalid capacity update",
|
||||
v: &CSIVolume{Capacity: 100},
|
||||
update: &CSIVolume{
|
||||
RequestedCapacityMax: 300, RequestedCapacityMin: 200},
|
||||
expected: "volume requested capacity update was not compatible with existing capacity",
|
||||
expectFn: func(t *testing.T, v *CSIVolume) {
|
||||
require.NotEqual(t, 300, v.RequestedCapacityMax)
|
||||
require.NotEqual(t, 200, v.RequestedCapacityMin)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid capability update",
|
||||
v: &CSIVolume{
|
||||
AccessMode: CSIVolumeAccessModeMultiNodeReader,
|
||||
AttachmentMode: CSIVolumeAttachmentModeFilesystem,
|
||||
},
|
||||
update: &CSIVolume{
|
||||
RequestedCapabilities: []*CSIVolumeCapability{
|
||||
{
|
||||
AccessMode: CSIVolumeAccessModeSingleNodeWriter,
|
||||
AttachmentMode: CSIVolumeAttachmentModeFilesystem,
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: "volume requested capabilities update was not compatible with existing capability in use",
|
||||
},
|
||||
{
|
||||
name: "invalid topology update - removed",
|
||||
v: &CSIVolume{
|
||||
RequestedTopologies: &CSITopologyRequest{
|
||||
Required: []*CSITopology{
|
||||
{Segments: map[string]string{"rack": "R1"}},
|
||||
},
|
||||
},
|
||||
Topologies: []*CSITopology{
|
||||
{Segments: map[string]string{"rack": "R1"}},
|
||||
},
|
||||
},
|
||||
update: &CSIVolume{},
|
||||
expected: "volume topology request update was not compatible with existing topology",
|
||||
expectFn: func(t *testing.T, v *CSIVolume) {
|
||||
require.Len(t, v.Topologies, 1)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid topology requirement added",
|
||||
v: &CSIVolume{
|
||||
Topologies: []*CSITopology{
|
||||
{Segments: map[string]string{"rack": "R1"}},
|
||||
},
|
||||
},
|
||||
update: &CSIVolume{
|
||||
RequestedTopologies: &CSITopologyRequest{
|
||||
Required: []*CSITopology{
|
||||
{Segments: map[string]string{"rack": "R1"}},
|
||||
{Segments: map[string]string{"rack": "R3"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: "volume topology request update was not compatible with existing topology",
|
||||
expectFn: func(t *testing.T, v *CSIVolume) {
|
||||
require.Len(t, v.Topologies, 1)
|
||||
require.Equal(t, "R1", v.Topologies[0].Segments["rack"])
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid topology preference removed",
|
||||
v: &CSIVolume{
|
||||
Topologies: []*CSITopology{
|
||||
{Segments: map[string]string{"rack": "R1"}},
|
||||
},
|
||||
RequestedTopologies: &CSITopologyRequest{
|
||||
Preferred: []*CSITopology{
|
||||
{Segments: map[string]string{"rack": "R1"}},
|
||||
{Segments: map[string]string{"rack": "R3"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
update: &CSIVolume{
|
||||
Topologies: []*CSITopology{
|
||||
{Segments: map[string]string{"rack": "R1"}},
|
||||
},
|
||||
RequestedTopologies: &CSITopologyRequest{
|
||||
Preferred: []*CSITopology{
|
||||
{Segments: map[string]string{"rack": "R3"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: "volume topology request update was not compatible with existing topology",
|
||||
},
|
||||
{
|
||||
name: "valid update",
|
||||
v: &CSIVolume{
|
||||
Topologies: []*CSITopology{
|
||||
{Segments: map[string]string{"rack": "R1"}},
|
||||
{Segments: map[string]string{"rack": "R2"}},
|
||||
},
|
||||
AccessMode: CSIVolumeAccessModeMultiNodeReader,
|
||||
AttachmentMode: CSIVolumeAttachmentModeFilesystem,
|
||||
MountOptions: &CSIMountOptions{
|
||||
FSType: "ext4",
|
||||
MountFlags: []string{"noatime"},
|
||||
},
|
||||
RequestedTopologies: &CSITopologyRequest{
|
||||
Required: []*CSITopology{
|
||||
{Segments: map[string]string{"rack": "R1"}},
|
||||
},
|
||||
Preferred: []*CSITopology{
|
||||
{Segments: map[string]string{"rack": "R2"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
update: &CSIVolume{
|
||||
Topologies: []*CSITopology{
|
||||
{Segments: map[string]string{"rack": "R1"}},
|
||||
{Segments: map[string]string{"rack": "R2"}},
|
||||
},
|
||||
MountOptions: &CSIMountOptions{
|
||||
FSType: "ext4",
|
||||
MountFlags: []string{"noatime"},
|
||||
},
|
||||
RequestedTopologies: &CSITopologyRequest{
|
||||
Required: []*CSITopology{
|
||||
{Segments: map[string]string{"rack": "R1"}},
|
||||
},
|
||||
Preferred: []*CSITopology{
|
||||
{Segments: map[string]string{"rack": "R2"}},
|
||||
},
|
||||
},
|
||||
RequestedCapabilities: []*CSIVolumeCapability{
|
||||
{
|
||||
AccessMode: CSIVolumeAccessModeMultiNodeReader,
|
||||
AttachmentMode: CSIVolumeAttachmentModeFilesystem,
|
||||
},
|
||||
{
|
||||
AccessMode: CSIVolumeAccessModeMultiNodeReader,
|
||||
AttachmentMode: CSIVolumeAttachmentModeFilesystem,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
tc = tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
err := tc.v.Merge(tc.update)
|
||||
if tc.expected == "" {
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
if tc.expectFn != nil {
|
||||
tc.expectFn(t, tc.v)
|
||||
}
|
||||
require.Error(t, err, tc.expected)
|
||||
require.Contains(t, err.Error(), tc.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCSIPluginJobs(t *testing.T) {
|
||||
plug := NewCSIPlugin("foo", 1000)
|
||||
controller := &Job{
|
||||
|
|
|
@ -62,6 +62,19 @@ func (t *CSITopology) Equal(o *CSITopology) bool {
|
|||
return helper.CompareMapStringString(t.Segments, o.Segments)
|
||||
}
|
||||
|
||||
func (t *CSITopology) MatchFound(o []*CSITopology) bool {
|
||||
if t == nil || o == nil || len(o) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, other := range o {
|
||||
if t.Equal(other) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// CSITopologyRequest are the topologies submitted as options to the
|
||||
// storage provider at the time the volume was created. The storage
|
||||
// provider will return a single topology.
|
||||
|
@ -70,6 +83,29 @@ type CSITopologyRequest struct {
|
|||
Preferred []*CSITopology
|
||||
}
|
||||
|
||||
func (tr *CSITopologyRequest) Equal(o *CSITopologyRequest) bool {
|
||||
if tr == nil && o == nil {
|
||||
return true
|
||||
}
|
||||
if tr == nil && o != nil || tr != nil && o == nil {
|
||||
return false
|
||||
}
|
||||
if len(tr.Required) != len(o.Required) || len(tr.Preferred) != len(o.Preferred) {
|
||||
return false
|
||||
}
|
||||
for i, topo := range tr.Required {
|
||||
if !topo.Equal(o.Required[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
for i, topo := range tr.Preferred {
|
||||
if !topo.Equal(o.Preferred[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// CSINodeInfo is the fingerprinted data from a CSI Plugin that is specific to
|
||||
// the Node API.
|
||||
type CSINodeInfo struct {
|
||||
|
|
|
@ -32,7 +32,7 @@ func TestVolumeWatch_EnableDisable(t *testing.T) {
|
|||
vol := testVolume(plugin, alloc, node.ID)
|
||||
|
||||
index++
|
||||
err := srv.State().CSIVolumeRegister(index, []*structs.CSIVolume{vol})
|
||||
err := srv.State().UpsertCSIVolume(index, []*structs.CSIVolume{vol})
|
||||
require.NoError(err)
|
||||
|
||||
claim := &structs.CSIVolumeClaim{
|
||||
|
@ -78,7 +78,7 @@ func TestVolumeWatch_LeadershipTransition(t *testing.T) {
|
|||
watcher.SetEnabled(true, srv.State(), "")
|
||||
|
||||
index++
|
||||
err = srv.State().CSIVolumeRegister(index, []*structs.CSIVolume{vol})
|
||||
err = srv.State().UpsertCSIVolume(index, []*structs.CSIVolume{vol})
|
||||
require.NoError(err)
|
||||
|
||||
// we should get or start up a watcher when we get an update for
|
||||
|
@ -167,7 +167,7 @@ func TestVolumeWatch_StartStop(t *testing.T) {
|
|||
// register a volume
|
||||
vol := testVolume(plugin, alloc1, node.ID)
|
||||
index++
|
||||
err = srv.State().CSIVolumeRegister(index, []*structs.CSIVolume{vol})
|
||||
err = srv.State().UpsertCSIVolume(index, []*structs.CSIVolume{vol})
|
||||
require.NoError(err)
|
||||
|
||||
// assert we get a watcher; there are no claims so it should immediately stop
|
||||
|
@ -254,7 +254,7 @@ func TestVolumeWatch_RegisterDeregister(t *testing.T) {
|
|||
// register a volume without claims
|
||||
vol := mock.CSIVolume(plugin)
|
||||
index++
|
||||
err := srv.State().CSIVolumeRegister(index, []*structs.CSIVolume{vol})
|
||||
err := srv.State().UpsertCSIVolume(index, []*structs.CSIVolume{vol})
|
||||
require.NoError(err)
|
||||
|
||||
// watcher should be started but immediately stopped
|
||||
|
|
|
@ -318,14 +318,7 @@ func (c *CSIVolumeChecker) isFeasible(n *structs.Node) (bool, string) {
|
|||
// volume MUST be accessible from at least one of the
|
||||
// requisite topologies."
|
||||
if len(vol.Topologies) > 0 {
|
||||
var ok bool
|
||||
for _, requiredTopo := range vol.Topologies {
|
||||
if requiredTopo.Equal(plugin.NodeInfo.AccessibleTopology) {
|
||||
ok = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !ok {
|
||||
if !plugin.NodeInfo.AccessibleTopology.MatchFound(vol.Topologies) {
|
||||
return false, FilterConstraintsCSIPluginTopology
|
||||
}
|
||||
}
|
||||
|
|
|
@ -342,7 +342,7 @@ func TestCSIVolumeChecker(t *testing.T) {
|
|||
{Segments: map[string]string{"rack": "R1"}},
|
||||
{Segments: map[string]string{"rack": "R2"}},
|
||||
}
|
||||
err := state.CSIVolumeRegister(index, []*structs.CSIVolume{vol})
|
||||
err := state.UpsertCSIVolume(index, []*structs.CSIVolume{vol})
|
||||
require.NoError(t, err)
|
||||
index++
|
||||
|
||||
|
@ -353,14 +353,14 @@ func TestCSIVolumeChecker(t *testing.T) {
|
|||
vol2.Namespace = structs.DefaultNamespace
|
||||
vol2.AccessMode = structs.CSIVolumeAccessModeMultiNodeSingleWriter
|
||||
vol2.AttachmentMode = structs.CSIVolumeAttachmentModeFilesystem
|
||||
err = state.CSIVolumeRegister(index, []*structs.CSIVolume{vol2})
|
||||
err = state.UpsertCSIVolume(index, []*structs.CSIVolume{vol2})
|
||||
require.NoError(t, err)
|
||||
index++
|
||||
|
||||
vid3 := "volume-id[0]"
|
||||
vol3 := vol.Copy()
|
||||
vol3.ID = vid3
|
||||
err = state.CSIVolumeRegister(index, []*structs.CSIVolume{vol3})
|
||||
err = state.UpsertCSIVolume(index, []*structs.CSIVolume{vol3})
|
||||
require.NoError(t, err)
|
||||
index++
|
||||
|
||||
|
|
|
@ -6133,7 +6133,7 @@ func TestServiceSched_CSIVolumesPerAlloc(t *testing.T) {
|
|||
// once its been fixed
|
||||
shared.AccessMode = structs.CSIVolumeAccessModeMultiNodeReader
|
||||
|
||||
require.NoError(h.State.CSIVolumeRegister(
|
||||
require.NoError(h.State.UpsertCSIVolume(
|
||||
h.NextIndex(), []*structs.CSIVolume{shared, vol0, vol1, vol2}))
|
||||
|
||||
// Create a job that uses both
|
||||
|
@ -6226,7 +6226,7 @@ func TestServiceSched_CSIVolumesPerAlloc(t *testing.T) {
|
|||
vol4.ID = "volume-unique[3]"
|
||||
vol5 := vol0.Copy()
|
||||
vol5.ID = "volume-unique[4]"
|
||||
require.NoError(h.State.CSIVolumeRegister(
|
||||
require.NoError(h.State.UpsertCSIVolume(
|
||||
h.NextIndex(), []*structs.CSIVolume{vol4, vol5}))
|
||||
|
||||
// Process again with failure fixed. It should create a new plan
|
||||
|
|
|
@ -245,7 +245,7 @@ func TestServiceStack_Select_CSI(t *testing.T) {
|
|||
v.AccessMode = structs.CSIVolumeAccessModeMultiNodeSingleWriter
|
||||
v.AttachmentMode = structs.CSIVolumeAttachmentModeFilesystem
|
||||
v.PluginID = "bar"
|
||||
err := state.CSIVolumeRegister(999, []*structs.CSIVolume{v})
|
||||
err := state.UpsertCSIVolume(999, []*structs.CSIVolume{v})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a node with healthy fingerprints for both controller and node plugins
|
||||
|
|
Loading…
Reference in New Issue